summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorVito Caputo <vcaputo@pengaru.com>2022-09-05 22:50:36 -0700
committerVito Caputo <vcaputo@pengaru.com>2022-09-05 22:50:36 -0700
commit0fb1ebed2f4ed97d2b5789b4cb0d564c5ca19bf5 (patch)
tree638632b34c434da3162f456661b33c8b99f4fff7 /src
parent237d717f0989e9e3adc51b0088d07db9b8cac6d6 (diff)
modules/roto: move tables init to context create
Mechanical rearrangement, but ultimately there probably needs to be an initialize function added to til_module_t. With all the threading chaos going on, this approach to implicit initialization with a static flag is racy without using atomics. For now it's probably marginally better to do this in context create vs. prepare frame. Context creates *tend* to happen in single-threaded phases of execution, and infrequently. Prepare frame is a serialized phase of the rendering for a given context, but there can be many contexts in-flight simultaneously now with all the forms of compositing happening, sometimes from multiple threads. So that assumption no longer holds...
Diffstat (limited to 'src')
-rw-r--r--src/modules/roto/roto.c71
1 files changed, 36 insertions, 35 deletions
diff --git a/src/modules/roto/roto.c b/src/modules/roto/roto.c
index e2abea3..1427947 100644
--- a/src/modules/roto/roto.c
+++ b/src/modules/roto/roto.c
@@ -33,10 +33,46 @@ typedef struct roto_context_t {
static int32_t costab[FIXED_TRIG_LUT_SIZE], sintab[FIXED_TRIG_LUT_SIZE];
static uint8_t texture[256][256];
+
+static void init_roto(uint8_t texture[256][256], int32_t *costab, int32_t *sintab)
+{
+ int x, y, i;
+
+ /* Generate simple checker pattern texture, nothing clever, feel free to play! */
+ /* If you modify texture on every frame instead of only @ initialization you can
+ * produce some neat output. These values are indexed into palette[] below. */
+ for (y = 0; y < 128; y++) {
+ for (x = 0; x < 128; x++)
+ texture[y][x] = 1;
+ for (; x < 256; x++)
+ texture[y][x] = 0;
+ }
+ for (; y < 256; y++) {
+ for (x = 0; x < 128; x++)
+ texture[y][x] = 0;
+ for (; x < 256; x++)
+ texture[y][x] = 1;
+ }
+
+ /* Generate fixed-point cos & sin LUTs. */
+ for (i = 0; i < FIXED_TRIG_LUT_SIZE; i++) {
+ costab[i] = ((cos((double)2*M_PI*i/FIXED_TRIG_LUT_SIZE))*FIXED_EXP);
+ sintab[i] = ((sin((double)2*M_PI*i/FIXED_TRIG_LUT_SIZE))*FIXED_EXP);
+ }
+}
+
+
static til_module_context_t * roto_create_context(unsigned seed, unsigned ticks, unsigned n_cpus, til_setup_t *setup)
{
+ static int initialized;
roto_context_t *ctxt;
+ if (!initialized) {
+ initialized = 1;
+
+ init_roto(texture, costab, sintab);
+ }
+
ctxt = til_module_context_new(sizeof(roto_context_t), seed, ticks, n_cpus);
if (!ctxt)
return NULL;
@@ -145,45 +181,10 @@ static uint32_t bilerp_color(uint8_t texture[256][256], color_t *palette, int tx
}
-static void init_roto(uint8_t texture[256][256], int32_t *costab, int32_t *sintab)
-{
- int x, y, i;
-
- /* Generate simple checker pattern texture, nothing clever, feel free to play! */
- /* If you modify texture on every frame instead of only @ initialization you can
- * produce some neat output. These values are indexed into palette[] below. */
- for (y = 0; y < 128; y++) {
- for (x = 0; x < 128; x++)
- texture[y][x] = 1;
- for (; x < 256; x++)
- texture[y][x] = 0;
- }
- for (; y < 256; y++) {
- for (x = 0; x < 128; x++)
- texture[y][x] = 0;
- for (; x < 256; x++)
- texture[y][x] = 1;
- }
-
- /* Generate fixed-point cos & sin LUTs. */
- for (i = 0; i < FIXED_TRIG_LUT_SIZE; i++) {
- costab[i] = ((cos((double)2*M_PI*i/FIXED_TRIG_LUT_SIZE))*FIXED_EXP);
- sintab[i] = ((sin((double)2*M_PI*i/FIXED_TRIG_LUT_SIZE))*FIXED_EXP);
- }
-}
-
-
/* prepare a frame for concurrent rendering */
static void roto_prepare_frame(til_module_context_t *context, unsigned ticks, til_fb_fragment_t **fragment_ptr, til_frame_plan_t *res_frame_plan)
{
roto_context_t *ctxt = (roto_context_t *)context;
- static int initialized;
-
- if (!initialized) {
- initialized = 1;
-
- init_roto(texture, costab, sintab);
- }
*res_frame_plan = (til_frame_plan_t){ .fragmenter = til_fragmenter_slice_per_cpu };
© All Rights Reserved