axmol/core/base/JobSystem.cpp

213 lines
5.6 KiB
C++
Raw Normal View History

2024-06-07 00:33:01 +08:00
#include "base/JobSystem.h"
#include "base/Director.h"
#include "yasio/thread_name.hpp"
#include <queue>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <future>
#include <functional>
#include <stdexcept>
NS_AX_BEGIN
#pragma region JobExecutor
class JobExecutor
{
public:
explicit JobExecutor(std::span<std::shared_ptr<JobThreadData>> tdds) : stop(false)
{
for (auto thread_data : tdds)
workers.emplace_back([this, thread_data] {
thread_data->init();
yasio::set_thread_name(thread_data->name());
for (;;)
{
std::function<void(JobThreadData*)> task;
{
std::unique_lock<std::mutex> lock(this->queue_mutex);
this->condition.wait(lock, [this] { return this->stop || !this->tasks.empty(); });
if (this->stop && this->tasks.empty())
break;
task = std::move(this->tasks.front());
this->tasks.pop();
}
task(thread_data.get());
}
thread_data->finz();
});
}
template <class F, class... Args>
auto enqueue(F&& f, Args&&... args) -> std::future<typename std::invoke_result<F(Args...)>::type>
{
using return_type = typename std::invoke_result<F(Args...)>::type;
auto task = std::make_shared<std::packaged_task<return_type(size_t)>>(
std::bind(std::forward<F>(f), std::placeholders::_1, std::forward<Args>(args)...));
std::future<return_type> res = task->get_future();
{
std::unique_lock<std::mutex> lock(queue_mutex);
// don't allow enqueueing after stopping the pool
if (stop)
throw std::runtime_error("enqueue on stopped executor");
tasks.emplace([task](JobThreadData* thread_data) { (*task)(thread_data); });
}
condition.notify_one();
return res;
}
template <class F, class... Args>
void enqueue_v(F&& f, Args&&... args)
{
auto task = std::bind(std::forward<F>(f), std::placeholders::_1, std::forward<Args>(args)...);
{
std::unique_lock<std::mutex> lock(queue_mutex);
// don't allow enqueueing after stopping the pool
if (stop)
throw std::runtime_error("enqueue on stopped executor");
tasks.emplace(std::move(task));
}
condition.notify_one();
}
~JobExecutor()
{
{
std::unique_lock<std::mutex> lock(queue_mutex);
stop = true;
}
condition.notify_all();
for (std::thread& worker : workers)
worker.join();
}
private:
// need to keep track of threads so we can join them
std::vector<std::thread> workers;
// the task queue
std::queue<std::function<void(JobThreadData*)>> tasks;
// synchronization
std::mutex queue_mutex;
std::condition_variable condition;
bool stop;
};
#pragma endregion
#pragma region JobSystem
2024-06-08 01:09:17 +08:00
static int clampThreads(int nThreads)
2024-06-07 00:33:01 +08:00
{
2024-06-08 01:09:17 +08:00
if (nThreads <= 0)
2024-06-07 00:33:01 +08:00
{
2024-06-08 01:09:17 +08:00
#if !defined(__EMSCRIPTEN__) || defined(__EMSCRIPTEN_PTHREADS__)
# if !defined(__EMSCRIPTEN__)
nThreads = (std::max)(static_cast<int>(std::thread::hardware_concurrency() * 3 / 2), 2);
# else
nThreads = (std::clamp)(static_cast<int>(std::thread::hardware_concurrency()), 2, 8);
# endif
#else
AXLOGW("The emscripten pthread not enabled, JobSystem not working");
nThreads = 0;
#endif
2024-06-07 00:33:01 +08:00
}
2024-06-08 01:09:17 +08:00
return nThreads;
2024-06-07 00:33:01 +08:00
}
2024-06-08 01:09:17 +08:00
class MainThreadData : public JobThreadData
2024-06-07 00:33:01 +08:00
{
2024-06-08 01:09:17 +08:00
public:
const char* name() override { return "axmol-main"; }
};
2024-06-07 00:33:01 +08:00
2024-06-08 01:09:17 +08:00
JobSystem::JobSystem(int nThreads)
2024-06-07 00:33:01 +08:00
{
2024-06-08 01:09:17 +08:00
nThreads = clampThreads(nThreads);
2024-06-07 00:33:01 +08:00
std::vector<std::shared_ptr<JobThreadData>> tdds;
for (auto i = 0; i < nThreads; ++i)
tdds.emplace_back(std::make_shared<JobThreadData>());
2024-06-08 01:09:17 +08:00
init(tdds);
2024-06-07 00:33:01 +08:00
}
2024-06-08 01:09:17 +08:00
JobSystem::JobSystem(std::span<std::shared_ptr<JobThreadData>> tdds)
2024-06-07 00:33:01 +08:00
{
2024-06-08 01:09:17 +08:00
init(tdds);
}
2024-06-07 00:33:01 +08:00
2024-06-08 01:09:17 +08:00
void JobSystem::init(const std::span<std::shared_ptr<JobThreadData>>& tdds)
{
_mainThreadData = new MainThreadData();
if (!tdds.empty())
_executor = new JobExecutor(tdds);
2024-06-07 00:33:01 +08:00
}
2024-06-08 01:09:17 +08:00
JobSystem::~JobSystem()
2024-06-07 00:33:01 +08:00
{
2024-06-08 01:09:17 +08:00
if (_executor)
2024-06-07 00:33:01 +08:00
delete _executor;
2024-06-08 01:09:17 +08:00
delete _mainThreadData;
2024-06-07 00:33:01 +08:00
}
void JobSystem::enqueue_v(std::function<void(JobThreadData*)> task)
{
2024-06-08 01:09:17 +08:00
if (_executor)
_executor->enqueue_v(std::move(task));
else
task(_mainThreadData);
2024-06-07 00:33:01 +08:00
}
void JobSystem::enqueue(std::function<void()> task)
{
2024-06-08 01:09:17 +08:00
if (_executor)
this->enqueue(task, nullptr);
else
task();
2024-06-07 00:33:01 +08:00
}
void JobSystem::enqueue(std::shared_ptr<JobThreadTask> task)
{
2024-06-08 01:09:17 +08:00
auto taskw = [task](JobThreadData* thread_data) {
2024-06-07 00:33:01 +08:00
if (!task->isRequestCancel())
{
task->setThreadData(thread_data);
task->setState(JobThreadTask::State::Inprogress);
task->execute();
task->setState(JobThreadTask::State::Idle);
}
2024-06-08 01:09:17 +08:00
};
if (_executor)
_executor->enqueue_v(std::move(taskw));
else
taskw(_mainThreadData);
2024-06-07 00:33:01 +08:00
}
void JobSystem::enqueue(std::function<void()> task, std::function<void()> done)
{
2024-06-08 01:09:17 +08:00
if (!task)
return;
auto taskw = [task_ = std::move(task), done_ = std::move(done)](JobThreadData*) {
2024-06-07 00:33:01 +08:00
task_();
2024-06-08 01:09:17 +08:00
if (done_)
Director::getInstance()->getScheduler()->runOnAxmolThread(done_);
};
if (_executor)
_executor->enqueue_v(taskw);
else
taskw(_mainThreadData);
2024-06-07 00:33:01 +08:00
}
#pragma endregion
NS_AX_END