#include "thread/Scheduler.h"
#include "Log.h"
#include "arch/CPU.h"
#include "arch/MMU.h"
#include "binfmt/ELF.h"
#include "memory/MemoryManager.h"
#include "thread/ThreadImage.h"
#include <luna/Alignment.h>
#include <luna/ScopeGuard.h>
#include <luna/Stack.h>

static Thread g_idle;
static Thread* g_current = nullptr;
static Thread* g_init = nullptr;
static Thread* g_reap = nullptr;

static const usize TICKS_PER_TIMESLICE = 20;

namespace Scheduler
{
    void init()
    {
        g_idle.id = 0;
        g_idle.init_regs_kernel();
        g_idle.set_ip((u64)CPU::idle_loop);
        g_idle.state = ThreadState::Idle;
        g_idle.is_kernel = true;
        g_idle.parent = nullptr;
        g_idle.name = "[idle]";
        g_idle.active_directory = nullptr;

        g_idle.ticks_left = 1;

        // Map some stack for the idle task
        u64 idle_stack_vm = mark_critical(MemoryManager::alloc_for_kernel(1, MMU::NoExecute | MMU::ReadWrite),
                                          "Failed to allocate stack memory for the CPU idle thread");

        Stack idle_stack { idle_stack_vm, ARCH_PAGE_SIZE };
        g_idle.set_sp(idle_stack.top());

        g_idle.stack = idle_stack;

        kinfoln("Created idle thread: id %d with ip %#lx and sp %#lx", g_idle.id, g_idle.ip(), g_idle.sp());

        g_current = &g_idle;
    }

    Thread* current()
    {
        return g_current;
    }

    Thread* idle()
    {
        return &g_idle;
    }

    Thread* init_thread()
    {
        return g_init;
    }

    void set_reap_thread(Thread* thread)
    {
        g_reap = thread;
    }

    void signal_reap_thread()
    {
        if (g_reap) g_reap->wake_up();
    }

    Result<Thread*> new_kernel_thread_impl(Thread* thread, const char* name)
    {
        // If anything fails, make sure to clean up.
        auto guard = make_scope_guard([&] { delete thread; });

        const u64 thread_stack_vm = TRY(MemoryManager::alloc_for_kernel(4, MMU::NoExecute | MMU::ReadWrite));

        guard.deactivate();

        const Stack thread_stack { thread_stack_vm, ARCH_PAGE_SIZE * 4 };
        thread->set_sp(thread_stack.top());

        thread->stack = thread_stack;

        thread->name = name;

        thread->is_kernel = true;
        thread->active_directory = MMU::kernel_page_directory();

        thread->auth = Credentials { .uid = 0, .euid = 0, .suid = 0, .gid = 0, .egid = 0, .sgid = 0 };

        g_threads.append(thread);

        thread->state = ThreadState::Runnable;

        kinfoln("Created kernel thread: id %d with ip %#lx and sp %#lx", thread->id, thread->ip(), thread->sp());

        return thread;
    }

    Result<Thread*> new_kernel_thread(u64 address, const char* name)
    {
        Thread* const thread = TRY(new_thread());
        thread->init_regs_kernel();
        thread->set_ip(address);

        return new_kernel_thread_impl(thread, name);
    }

    Result<Thread*> new_kernel_thread(void (*func)(void), const char* name)
    {
        Thread* const thread = TRY(new_thread());
        thread->init_regs_kernel();
        thread->set_ip((u64)func);

        return new_kernel_thread_impl(thread, name);
    }

    Result<Thread*> new_kernel_thread(void (*func)(void*), void* arg, const char* name)
    {
        Thread* const thread = TRY(new_thread());
        thread->init_regs_kernel();
        thread->set_ip((u64)func);
        thread->set_arguments((u64)arg, 0, 0, 0);

        return new_kernel_thread_impl(thread, name);
    }

    Result<Thread*> new_userspace_thread(SharedPtr<VFS::Inode> inode, const char* name)
    {
        Thread* const thread = TRY(make<Thread>());

        thread->state = ThreadState::None;
        thread->is_kernel = false;
        thread->id = 1;
        thread->pgid = 1;
        thread->name = name;
        thread->auth = Credentials { .uid = 0, .euid = 0, .suid = 0, .gid = 0, .egid = 0, .sgid = 0 };

        Vector<String> args;
        auto name_string = TRY(String::from_cstring(name));
        TRY(args.try_append(move(name_string)));

        Vector<String> env;

        auto guard = make_scope_guard([&] { delete thread; });

        // Contrary to other programs, which use BinaryFormat::create_loader(), init must be a native executable.
        auto loader = TRY(ELFLoader::create(inode, nullptr, 0));

        auto image = TRY(ThreadImage::try_load_from_binary(loader));
        u64 argv = TRY(image->push_string_vector_on_stack(args));
        u64 envp = TRY(image->push_string_vector_on_stack(env));

        guard.deactivate();

        image->apply(thread);
        thread->set_arguments(args.size(), argv, env.size(), envp);

        for (int i = 0; i < NSIG; i++)
        {
            thread->signal_handlers[i] = { .sa_handler = SIG_DFL, .sa_mask = 0, .sa_flags = 0 };
        }

        kinfoln("Created userspace thread: id %d with ip %#.16lx and sp %#.16lx (ksp %#lx)", thread->id, thread->ip(),
                thread->sp(), thread->kernel_stack.top());

        g_threads.append(thread);
        g_init = thread;

        return thread;
    }

    void add_thread(Thread* thread)
    {
        g_threads.append(thread);
    }

    void reap_thread(Thread* thread)
    {
        CPU::disable_interrupts();

#ifdef REAP_DEBUG
        kdbgln("reap: reaping thread with id %d", thread->id);
#endif

        if (thread->is_kernel)
        {
            auto stack = thread->stack;
            MemoryManager::unmap_owned_and_free_vm(stack.bottom(), stack.bytes() / ARCH_PAGE_SIZE).release_value();
        }
        else
        {
            auto stack = thread->kernel_stack;
            MemoryManager::unmap_owned_and_free_vm(stack.bottom(), stack.bytes() / ARCH_PAGE_SIZE).release_value();
        }

        delete thread;

        CPU::enable_interrupts();
    }

    Thread* pick_task()
    {
        Thread* old = g_current;
        if (old->is_idle())
        {
            auto maybe_last = g_threads.last();
            if (!maybe_last.has_value()) // No threads!!
                return &g_idle;
            g_current = old = maybe_last.value();
        }

        bool has_found_thread = false;

        do {
            auto maybe_next = g_threads.next(g_current);
            if (!maybe_next.has_value()) g_current = g_threads.expect_first();
            else
                g_current = maybe_next.value();

            if (g_current->state == ThreadState::Runnable)
            {
                has_found_thread = true;
                break;
            }
        } while (g_current != old);

        if (!has_found_thread) g_current = &g_idle;

        return g_current;
    }

    void generic_switch_context(Thread* old_thread, Thread* new_thread, Registers* regs)
    {
        if (old_thread != new_thread)
        {
            switch_context(old_thread, new_thread, regs);
            if (!old_thread->is_kernel) old_thread->fp_data.save();
            if (old_thread->state != ThreadState::Idle && MMU::get_page_directory() != MMU::kernel_page_directory())
                old_thread->active_directory = MMU::get_page_directory();
            if (new_thread->active_directory) MMU::switch_page_directory(new_thread->active_directory);
            if (!new_thread->is_kernel)
            {
                CPU::switch_kernel_stack(new_thread->kernel_stack.top());
                new_thread->fp_data.restore();
            }
        }

        if (new_thread->is_idle())
        {
            new_thread->ticks_left = 1; // The idle task only runs for 1 tick so we can check for new runnable tasks
                                        // as fast as possible.
        }
        else
            new_thread->ticks_left = TICKS_PER_TIMESLICE;
    }

    void switch_task(Registers* regs)
    {
        Thread* old_thread = g_current;
        Thread* new_thread = pick_task();
        generic_switch_context(old_thread, new_thread, regs);
        if (!is_in_kernel(regs)) new_thread->process_pending_signals(regs);
    }

    void invoke(Registers* regs)
    {
        CPU::disable_interrupts();

        if (is_in_kernel(regs)) g_current->kernel_ticks_self++;
        else
            g_current->user_ticks_self++;

        g_current->ticks_left--;

        for (auto* const thread : g_threads)
        {
            if (thread->state == ThreadState::Sleeping)
            {
                if (thread->sleep_ticks_left == 0 || --thread->sleep_ticks_left == 0) thread->wake_up();
            }

            if (thread->alarm_ticks_left && --thread->alarm_ticks_left == 0) thread->send_signal(SIGALRM);
        }

        if (!g_current->ticks_left) switch_task(regs);
    }

    LinkedList<Thread> check_for_dying_threads()
    {
        LinkedList<Thread> result;

        g_threads.delayed_for_each([&](Thread* thread) {
            if (thread->state == ThreadState::Dying)
            {
                g_threads.remove(thread);
                result.append(thread);
            }
        });

        return result;
    }

    Option<Thread*> find_by_pid(pid_t pid)
    {
        for (auto* const thread : g_threads)
        {
            if (thread->id == pid && thread->state != ThreadState::Dying) return thread;
        }

        return {};
    }

    bool has_children(Thread* thread)
    {
        bool result { false };

        for_each_child(thread, [&](Thread*) {
            result = true;
            return false;
        });

        return result;
    }

    Option<Thread*> find_exited_child(Thread* thread)
    {
        Option<Thread*> result;

        for_each_child(thread, [&](Thread* child) {
            if (!result.has_value() && child->state == ThreadState::Exited)
            {
                result = child;
                return false;
            }
            return true;
        });

        return result;
    }

    void dump_state()
    {
        CPU::disable_interrupts();

        kdbgln("--- BEGIN SCHEDULER DUMP ---");
        kdbgln("current at %p, id = %d", g_current, g_current->id);

        for (const auto* thread : g_threads)
        {
            kdbgln("%p %c [%-20s] %4d, parent = (%-18p,%d), state = %d, ticks: (k:%04zu,u:%04zu), status = "
                   "%d, cwd = %s",
                   thread, thread->is_kernel ? 'k' : 'u', thread->name.chars(), thread->id, thread->parent,
                   thread->parent ? thread->parent->id : 0, (int)thread->state, thread->kernel_ticks_self,
                   thread->user_ticks_self, thread->status,
                   thread->current_directory_path.is_empty() ? "/" : thread->current_directory_path.chars());
        }

        kdbgln("--- END SCHEDULER DUMP ---");

        CPU::enable_interrupts();
    }
}

void kernel_sleep(u64 ms)
{
    g_current->sleep_ticks_left = ms;
    g_current->state = ThreadState::Sleeping;
    kernel_yield();
}

void kernel_wait(pid_t pid)
{
    g_current->child_being_waited_for = pid;
    g_current->state = ThreadState::Waiting;
    kernel_yield();
}

void kernel_wait_for_event()
{
    g_current->state = ThreadState::Waiting;
    kernel_yield();
}

[[noreturn]] void kernel_exit()
{
    g_current->state = ThreadState::Dying;
    Scheduler::signal_reap_thread();
    kernel_yield();
    unreachable();
}