added per-thread metrics
This commit is contained in:
parent
2a72cb42d8
commit
b37eb0ed62
@ -16,6 +16,8 @@
|
||||
#include <functional>
|
||||
#include <condition_variable>
|
||||
|
||||
#define UF_THREAD_METRICS 1
|
||||
|
||||
namespace uf {
|
||||
namespace thread {
|
||||
extern UF_API uf::stl::string mainThreadName;
|
||||
@ -29,6 +31,19 @@ namespace pod {
|
||||
typedef uint16_t id_t;
|
||||
typedef std::function<void()> function_t;
|
||||
typedef uf::stl::vector<pod::Thread::function_t> container_t;
|
||||
|
||||
struct UF_API Tasks {
|
||||
uf::stl::string name = uf::thread::workerThreadName;
|
||||
bool waits = true;
|
||||
|
||||
pod::Thread::container_t container;
|
||||
|
||||
inline void add( const pod::Thread::function_t& fun ) { container.emplace_back(fun); }
|
||||
inline void emplace( const pod::Thread::function_t& fun ) { container.emplace_back(fun); }
|
||||
inline void queue( const pod::Thread::function_t& fun ) { container.emplace_back(fun); }
|
||||
inline bool empty() { return container.empty(); }
|
||||
inline void clear() { container = {}; }
|
||||
};
|
||||
|
||||
pod::Thread::id_t uid;
|
||||
uf::stl::string name;
|
||||
@ -51,19 +66,20 @@ namespace pod {
|
||||
pod::Thread::container_t container;
|
||||
|
||||
uint32_t affinity = 0;
|
||||
#if UF_THREAD_METRICS
|
||||
struct Performance {
|
||||
typedef std::tuple<float, float, float, uint32_t> tuple_t;
|
||||
|
||||
struct UF_API Tasks {
|
||||
uf::stl::string name = uf::thread::workerThreadName;
|
||||
bool waits = true;
|
||||
std::atomic<float> activeTimeMs{0.0f};
|
||||
std::atomic<float> idleTimeMs{0.0f};
|
||||
std::atomic<float> totalFrameTimeMs{0.0f};
|
||||
std::atomic<uint32_t> tasksProcessed{0};
|
||||
|
||||
pod::Thread::container_t container;
|
||||
|
||||
inline void add( const pod::Thread::function_t& fun ) { container.emplace_back(fun); }
|
||||
inline void emplace( const pod::Thread::function_t& fun ) { container.emplace_back(fun); }
|
||||
inline void queue( const pod::Thread::function_t& fun ) { container.emplace_back(fun); }
|
||||
inline bool empty() { return container.empty(); }
|
||||
inline void clear() { container = {}; }
|
||||
};
|
||||
inline tuple_t collect() {
|
||||
return std::make_tuple( activeTimeMs.load(), idleTimeMs.load(), totalFrameTimeMs.load(), tasksProcessed.load() );
|
||||
}
|
||||
} metrics;
|
||||
#endif
|
||||
};
|
||||
}
|
||||
|
||||
@ -133,5 +149,9 @@ namespace uf {
|
||||
std::thread::id UF_API id( const pod::Thread& );
|
||||
pod::Thread::id_t UF_API uid( const pod::Thread& );
|
||||
bool UF_API running( const pod::Thread& );
|
||||
|
||||
#if UF_THREAD_METRICS
|
||||
uf::stl::unordered_map<uf::stl::string, pod::Thread::Performance::tuple_t> collectStats();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
@ -873,6 +873,11 @@ void UF_API uf::tick() {
|
||||
#if UF_ENV_DREAMCAST
|
||||
DC_STATS();
|
||||
#endif
|
||||
#if UF_THREAD_METRICS
|
||||
auto metrics = uf::thread::collectStats();
|
||||
for ( auto& [ name, stats ] : metrics ) UF_MSG_DEBUG("Thread {}: active={}, idle={}, total={}, tasks={}", name, std::get<0>(stats), std::get<1>(stats), std::get<2>(stats), std::get<3>(stats) );
|
||||
#endif
|
||||
|
||||
/*global*/::times.frames = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1475,23 +1475,24 @@ bool uf::graph::tick( pod::Graph::Storage& storage ) {
|
||||
UF_MSG_DEBUG("Graph buffers requesting renderer update");
|
||||
uf::renderer::states::rebuild = true;
|
||||
|
||||
#if UF_USE_VULKAN
|
||||
if ( uf::renderer::hasRenderMode("", true) ) {
|
||||
auto& renderMode = uf::renderer::getRenderMode("", true);
|
||||
|
||||
#if UF_USE_VULKAN
|
||||
auto& blitter = renderMode.getBlitter();
|
||||
auto& shader = blitter.material.getShader(blitter.material.hasShader("compute", "deferred") ? "compute" : "fragment", "deferred");
|
||||
if ( blitter.material.hasShader("compute", "deferred") || blitter.material.hasShader("fragment", "deferred") ) {
|
||||
auto& shader = blitter.material.getShader(blitter.material.hasShader("compute", "deferred") ? "compute" : "fragment", "deferred");
|
||||
|
||||
shader.metadata.aliases.buffers.clear();
|
||||
shader.metadata.aliases.buffers.clear();
|
||||
|
||||
shader.aliasBuffer( "drawCommands", storage.buffers.drawCommands );
|
||||
shader.aliasBuffer( "instance", storage.buffers.instance );
|
||||
shader.aliasBuffer( "instanceAddresses", storage.buffers.instanceAddresses );
|
||||
shader.aliasBuffer( "material", storage.buffers.material );
|
||||
shader.aliasBuffer( "texture", storage.buffers.texture );
|
||||
shader.aliasBuffer( "light", storage.buffers.light );
|
||||
#endif
|
||||
shader.aliasBuffer( "drawCommands", storage.buffers.drawCommands );
|
||||
shader.aliasBuffer( "instance", storage.buffers.instance );
|
||||
shader.aliasBuffer( "instanceAddresses", storage.buffers.instanceAddresses );
|
||||
shader.aliasBuffer( "material", storage.buffers.material );
|
||||
shader.aliasBuffer( "texture", storage.buffers.texture );
|
||||
shader.aliasBuffer( "light", storage.buffers.light );
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
return rebuild;
|
||||
|
||||
@ -119,6 +119,13 @@ void uf::thread::process( pod::Thread& thread ) { if ( !uf::thread::has(thread.n
|
||||
local_queue.clear();
|
||||
local_container.clear();
|
||||
|
||||
#if UF_THREAD_METRICS
|
||||
uint32_t tasksThisFrame = 0;
|
||||
auto frameStart = std::chrono::high_resolution_clock::now();
|
||||
auto idleStart = std::chrono::high_resolution_clock::now();
|
||||
#endif
|
||||
|
||||
// wait for work
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(thread.mutex);
|
||||
if ( thread.limiter > 0 ) {
|
||||
@ -139,6 +146,16 @@ void uf::thread::process( pod::Thread& thread ) { if ( !uf::thread::has(thread.n
|
||||
std::swap( local_queue, thread.queue );
|
||||
}
|
||||
|
||||
// update stats
|
||||
#if UF_THREAD_METRICS
|
||||
{
|
||||
std::chrono::duration<float, std::milli> idleTime = std::chrono::high_resolution_clock::now() - idleStart;
|
||||
thread.metrics.idleTimeMs.store(idleTime.count(), std::memory_order_relaxed);
|
||||
}
|
||||
auto activeStart = std::chrono::high_resolution_clock::now();
|
||||
#endif
|
||||
|
||||
// iterate through queued work
|
||||
for ( auto& function : local_queue ) {
|
||||
#if UF_EXCEPTIONS
|
||||
try {
|
||||
@ -149,17 +166,21 @@ void uf::thread::process( pod::Thread& thread ) { if ( !uf::thread::has(thread.n
|
||||
UF_MSG_ERROR("Thread {} (UID: {}) caught exception: {}", thread.name, thread.uid, e.what());
|
||||
}
|
||||
#endif
|
||||
|
||||
if ( thread.pending.fetch_sub(1) == 1 ) {
|
||||
thread.conditions.finished.notify_all();
|
||||
}
|
||||
#if UF_THREAD_METRICS
|
||||
++tasksThisFrame;
|
||||
#endif
|
||||
}
|
||||
|
||||
// buffer persistent work
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(thread.mutex);
|
||||
local_container = thread.container;
|
||||
}
|
||||
|
||||
// iterate through persistent work
|
||||
for ( auto& function : local_container ) {
|
||||
#if UF_EXCEPTIONS
|
||||
try {
|
||||
@ -170,12 +191,27 @@ void uf::thread::process( pod::Thread& thread ) { if ( !uf::thread::has(thread.n
|
||||
UF_MSG_ERROR("Thread {} (UID: {}) caught exception: {}", thread.name, thread.uid, e.what());
|
||||
}
|
||||
#endif
|
||||
#if UF_THREAD_METRICS
|
||||
++tasksThisFrame;
|
||||
#endif
|
||||
}
|
||||
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(thread.mutex);
|
||||
thread.conditions.finished.notify_all();
|
||||
}
|
||||
|
||||
// update metrics
|
||||
#if UF_THREAD_METRICS
|
||||
{
|
||||
std::chrono::duration<float, std::milli> activeTime = std::chrono::high_resolution_clock::now() - activeStart;
|
||||
std::chrono::duration<float, std::milli> frameTime = std::chrono::high_resolution_clock::now() - frameStart;
|
||||
|
||||
thread.metrics.activeTimeMs.store(activeTime.count(), std::memory_order_relaxed);
|
||||
thread.metrics.totalFrameTimeMs.store(frameTime.count(), std::memory_order_relaxed);
|
||||
thread.metrics.tasksProcessed.store(tasksThisFrame, std::memory_order_relaxed);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
void uf::thread::wait( pod::Thread& thread ) {
|
||||
std::unique_lock<std::mutex> lock(thread.mutex);
|
||||
@ -204,7 +240,7 @@ void uf::thread::terminate() {
|
||||
std::swap( local_threads, uf::thread::threads );
|
||||
}
|
||||
|
||||
for ( auto& [ key, thread ] : local_threads ) {
|
||||
for ( auto& [ key, thread ] : local_threads ) {
|
||||
uf::thread::quit( *thread );
|
||||
delete thread;
|
||||
}
|
||||
@ -252,4 +288,13 @@ bool uf::thread::has( const uf::stl::string& name ) {
|
||||
pod::Thread& uf::thread::get( const uf::stl::string& name ) {
|
||||
if ( !uf::thread::has(name) ) return uf::thread::create(name);
|
||||
return *uf::thread::threads[name];
|
||||
}
|
||||
}
|
||||
|
||||
#if UF_THREAD_METRICS
|
||||
uf::stl::unordered_map<uf::stl::string, pod::Thread::Performance::tuple_t> uf::thread::collectStats() {
|
||||
uf::stl::unordered_map<uf::stl::string, pod::Thread::Performance::tuple_t> stats;
|
||||
// possible mutex issue
|
||||
for ( auto& [ key, thread ] : uf::thread::threads ) stats[thread->name] = thread->metrics.collect();
|
||||
return stats;
|
||||
}
|
||||
#endif
|
||||
Loading…
Reference in New Issue
Block a user