summaryrefslogtreecommitdiff
path: root/src/modules/roto/roto.c
diff options
context:
space:
mode:
authorVito Caputo <vcaputo@pengaru.com>2022-06-10 20:11:23 -0700
committerVito Caputo <vcaputo@pengaru.com>2022-06-10 21:22:09 -0700
commitd5db150801023c307fcbe1cd98b4fd8e2d27b55e (patch)
tree13657dafe98f8e707760fe176a9fa0bd05866b1f /src/modules/roto/roto.c
parent852ccfb6ffef113003378526c422e591d9339b85 (diff)
til: introduce til_frame_plan_t and .cpu_affinity
modules/checkers w/fill_module=$module requires a consistent mapping of cpu to fragnum since it creates a per-cpu til_module_context_t for the fill_module. The existing implementation for threaded rendering maximizes performance by letting *any* scheduled to run thread advance fragnum atomically and render the acquired fragnum indiscriminately. A side effect of this is any given frame, even rendered by the same module, will have a random mapping of cpus/threads to fragnums. With this change, the simple til_module_t.prepare_frame() API of returning a bare fragmenter function is changed to instead return a "frame plan" in til_frame_plan_t. Right now til_frame_plan_t just contains the same fragmenter as before, but also has a .cpu_affinity member for setting if the frame requires a stable relationship of cpu/thread to fragnum. Setting .cpu_affinity should be avoided if unnecessary, and that is the default if you don't mention .cpu_affinity at all when initializing the plan in the ergonomic manner w/designated initializers. This is because the way .cpu_affinity is implemented will leave threads spinning while they poll for *their* next fragnum using atomic intrinsics. There's probably some room for improvement here, but this is good enough for now to get things working and correct.
Diffstat (limited to 'src/modules/roto/roto.c')
-rw-r--r--src/modules/roto/roto.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/src/modules/roto/roto.c b/src/modules/roto/roto.c
index 1831d2b..a8ee45d 100644
--- a/src/modules/roto/roto.c
+++ b/src/modules/roto/roto.c
@@ -174,7 +174,7 @@ static void init_roto(uint8_t texture[256][256], int32_t *costab, int32_t *sinta
/* prepare a frame for concurrent rendering */
-static void roto_prepare_frame(til_module_context_t *context, unsigned ticks, til_fb_fragment_t *fragment, til_fragmenter_t *res_fragmenter)
+static void roto_prepare_frame(til_module_context_t *context, unsigned ticks, til_fb_fragment_t *fragment, til_frame_plan_t *res_frame_plan)
{
roto_context_t *ctxt = (roto_context_t *)context;
static int initialized;
@@ -185,7 +185,7 @@ static void roto_prepare_frame(til_module_context_t *context, unsigned ticks, ti
init_roto(texture, costab, sintab);
}
- *res_fragmenter = til_fragmenter_slice_per_cpu;
+ *res_frame_plan = (til_frame_plan_t){ .fragmenter = til_fragmenter_slice_per_cpu };
// This governs the rotation and color cycle.
if (ticks != context->ticks) {
© All Rights Reserved