diff options
author | Vito Caputo <vcaputo@pengaru.com> | 2022-04-15 11:26:24 -0700 |
---|---|---|
committer | Vito Caputo <vcaputo@pengaru.com> | 2022-04-15 11:26:24 -0700 |
commit | 4e52861bb580ac41eaf8df2dd4c99029c3706cf3 (patch) | |
tree | 1e65261f7e0fbf4c51d03dc1d4ae1e84d61ecfdb | |
parent | 292e7e85d7e0a02fb19a3ec6f8245627c41c8f62 (diff) |
til_threads: propagate threaded til_fb_fragment.zeroed
Currently when a threaded renderer performed
til_fb_fragment_zero() in render_fragment() vs. prepare_frame(),
the til_fb_fragment.zeroed maintenance would stay isolated to the
ephemeral fragment generated by the fragmenter.
With this commit, when all ephemeral fragments rendered in a
threaded fashion for a given frame returned a set .zeroed member,
the outer frame's .zeroed member gets set.
This should enable proper threaded zeroing of the frame in
render_fragment().
Note that since it's careful to actually count the number of
zeroed ephemeral subfragments and only propagates when that count
matches the number of subfragments rendered in the entire frame,
it's also supported to use til_fb_fragment_zero() conditionally
on just some fragments while not zeroing others and the entire
frame will not get its .zeroed member set. Imagine a renderer
which randomly zeroes out some fragments, while drawing into
others, this will be honored as a non-zeroed frame on the whole.
-rw-r--r-- | src/til_threads.c | 15 |
1 files changed, 13 insertions, 2 deletions
diff --git a/src/til_threads.c b/src/til_threads.c index 7c68d52..6f01db1 100644 --- a/src/til_threads.c +++ b/src/til_threads.c @@ -18,6 +18,8 @@ typedef struct til_threads_t { pthread_mutex_t idle_mutex; pthread_cond_t idle_cond; + unsigned idle_n_fragments; + unsigned idle_n_zeroed; unsigned n_idle; pthread_mutex_t frame_mutex; @@ -45,6 +47,7 @@ static void * thread_func(void *_thread) pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); for (;;) { + unsigned n_fragments = 0, n_zeroed = 0; /* wait for a new frame */ pthread_mutex_lock(&threads->frame_mutex); @@ -65,14 +68,22 @@ static void * thread_func(void *_thread) break; threads->render_fragment_func(threads->context, threads->ticks, thread->id, &fragment); + n_zeroed += fragment.zeroed; + n_fragments++; } /* report as idle */ pthread_mutex_lock(&threads->idle_mutex); pthread_cleanup_push((void (*)(void *))pthread_mutex_unlock, &threads->idle_mutex); + threads->idle_n_fragments += n_fragments; + threads->idle_n_zeroed += n_zeroed; threads->n_idle++; - if (threads->n_idle == threads->n_threads) /* Frame finished! Notify potential waiter. */ + if (threads->n_idle == threads->n_threads) { /* Frame finished! Notify potential waiter. */ + if (threads->idle_n_zeroed == threads->idle_n_fragments) + threads->fragment->zeroed = 1; + pthread_cond_signal(&threads->idle_cond); + } pthread_cleanup_pop(1); } @@ -104,7 +115,7 @@ void til_threads_frame_submit(til_threads_t *threads, til_fb_fragment_t *fragmen threads->context = context; threads->ticks = ticks; threads->frame_num++; - threads->n_idle = threads->next_fragment = 0; + threads->n_idle = threads->idle_n_zeroed = threads->idle_n_fragments = threads->next_fragment = 0; pthread_cond_broadcast(&threads->frame_cond); pthread_cleanup_pop(1); } |