summaryrefslogtreecommitdiff
path: root/src/modules/sparkler
diff options
context:
space:
mode:
Diffstat (limited to 'src/modules/sparkler')
-rw-r--r--src/modules/sparkler/Makefile.am4
-rw-r--r--src/modules/sparkler/bsp.c584
-rw-r--r--src/modules/sparkler/bsp.h28
-rw-r--r--src/modules/sparkler/burst.c111
-rw-r--r--src/modules/sparkler/chunker.c225
-rw-r--r--src/modules/sparkler/chunker.h11
-rw-r--r--src/modules/sparkler/container.h11
-rw-r--r--src/modules/sparkler/draw.h32
-rw-r--r--src/modules/sparkler/list.h252
-rw-r--r--src/modules/sparkler/particle.c14
-rw-r--r--src/modules/sparkler/particle.h79
-rw-r--r--src/modules/sparkler/particles.c342
-rw-r--r--src/modules/sparkler/particles.h21
-rw-r--r--src/modules/sparkler/rocket.c144
-rw-r--r--src/modules/sparkler/simple.c113
-rw-r--r--src/modules/sparkler/spark.c63
-rw-r--r--src/modules/sparkler/sparkler.c53
-rw-r--r--src/modules/sparkler/sparkler.h8
-rw-r--r--src/modules/sparkler/v3f.h157
-rw-r--r--src/modules/sparkler/xplode.c82
20 files changed, 2334 insertions, 0 deletions
diff --git a/src/modules/sparkler/Makefile.am b/src/modules/sparkler/Makefile.am
new file mode 100644
index 0000000..ed85f36
--- /dev/null
+++ b/src/modules/sparkler/Makefile.am
@@ -0,0 +1,4 @@
+noinst_LIBRARIES = libsparkler.a
+libsparkler_a_SOURCES = bsp.c bsp.h burst.c chunker.c chunker.h container.h draw.h list.h particle.c particle.h particles.c particles.h rocket.c simple.c spark.c sparkler.c sparkler.h v3f.h xplode.c
+libsparkler_a_CFLAGS = @ROTOTILLER_CFLAGS@ -ffast-math
+libsparkler_a_CPPFLAGS = @ROTOTILLER_CFLAGS@ -I@top_srcdir@/src
diff --git a/src/modules/sparkler/bsp.c b/src/modules/sparkler/bsp.c
new file mode 100644
index 0000000..381e922
--- /dev/null
+++ b/src/modules/sparkler/bsp.c
@@ -0,0 +1,584 @@
+#include <assert.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "bsp.h"
+
+
+/* octree-based bsp for faster proximity searches */
+/* meanings:
+ * octrant = "octo" analog of a quadrant, an octree is a quadtree with an additional dimension (Z/3d)
+ * bv = bounding volume
+ * bsp = binary space partition
+ * occupant = the things being indexed by the bsp (e.g. a particle, or its position)
+ */
+
+
+/* FIXME: these are not tuned at all, and should really all be parameters to bsp_new() instead */
+#define BSP_GROWBY 16
+#define BSP_MAX_OCCUPANTS 64
+#define BSP_MAX_DEPTH 16
+
+#define MAX(_a, _b) (_a > _b ? _a : _b)
+#define MIN(_a, _b) (_a < _b ? _a : _b)
+
+
+struct bsp_node_t {
+ v3f_t center; /* center point about which the bounding volume's 3d-space is divided */
+ bsp_node_t *parent; /* parent bounding volume, NULL when root node */
+ bsp_node_t *octrants; /* NULL when a leaf, otherwise an array of 8 bsp_node_t's */
+ list_head_t occupants; /* list of occupants in this volume when a leaf node */
+ unsigned n_occupants; /* number of ^^ */
+};
+
+#define OCTRANTS \
+ octrant(OCT_XL_YL_ZL, (1 << 2 | 1 << 1 | 1)) \
+ octrant(OCT_XR_YL_ZL, ( 1 << 1 | 1)) \
+ octrant(OCT_XL_YR_ZL, (1 << 2 | 1)) \
+ octrant(OCT_XR_YR_ZL, ( 1)) \
+ octrant(OCT_XL_YL_ZR, (1 << 2 | 1 << 1 )) \
+ octrant(OCT_XR_YL_ZR, ( 1 << 1 )) \
+ octrant(OCT_XL_YR_ZR, (1 << 2 )) \
+ octrant(OCT_XR_YR_ZR, 0)
+
+#define octrant(_sym, _val) _sym = _val,
+typedef enum _octrant_idx_t {
+ OCTRANTS
+} octrant_idx_t;
+#undef octrant
+
+/* bsp lookup state, encapsulated for preservation across composite
+ * lookup-dependent operations, so they can potentially avoid having
+ * to redo the lookup. i.e. lookup caching.
+ */
+typedef struct _bsp_lookup_t {
+ int depth;
+ v3f_t left;
+ v3f_t right;
+ bsp_node_t *bv;
+ octrant_idx_t oidx;
+} bsp_lookup_t;
+
+struct bsp_t {
+ bsp_node_t root;
+ list_head_t free;
+ bsp_lookup_t lookup_cache;
+};
+
+
+static inline const char * octstr(octrant_idx_t oidx)
+{
+#define octrant(_sym, _val) #_sym,
+ static const char *octrant_strs[] = {
+ OCTRANTS
+ };
+#undef octrant
+
+ return octrant_strs[oidx];
+}
+
+
+static inline void _bsp_print(bsp_node_t *node)
+{
+ static int depth = 0;
+
+ fprintf(stderr, "%-*s %i: %p\n", depth, " ", depth, node);
+ if (node->octrants) {
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ fprintf(stderr, "%-*s %i: %s: %p\n", depth, " ", depth, octstr(i), &node->octrants[i]);
+ depth++;
+ _bsp_print(&node->octrants[i]);
+ depth--;
+ }
+ }
+}
+
+
+/* Print a bsp tree to stderr (debugging) */
+void bsp_print(bsp_t *bsp)
+{
+ _bsp_print(&bsp->root);
+}
+
+
+/* Initialize the lookup cache to the root */
+static inline void bsp_init_lookup_cache(bsp_t *bsp) {
+ bsp->lookup_cache.bv = &bsp->root;
+ bsp->lookup_cache.depth = 0;
+ v3f_set(&bsp->lookup_cache.left, -1.0, -1.0, -1.0); /* TODO: the bsp AABB should be supplied to bsp_new() */
+ v3f_set(&bsp->lookup_cache.right, 1.0, 1.0, 1.0);
+}
+
+
+/* Invalidate/reset the bsp's lookup cache TODO: make conditional on a supplied node being cached? */
+static inline void bsp_invalidate_lookup_cache(bsp_t *bsp) {
+ if (bsp->lookup_cache.bv != &bsp->root) {
+ bsp_init_lookup_cache(bsp);
+ }
+}
+
+
+/* Create a new bsp octree. */
+bsp_t * bsp_new(void)
+{
+ bsp_t *bsp;
+
+ bsp = calloc(1, sizeof(bsp_t));
+ if (!bsp) {
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&bsp->root.occupants);
+ INIT_LIST_HEAD(&bsp->free);
+ bsp_init_lookup_cache(bsp);
+
+ return bsp;
+}
+
+
+/* Free a bsp octree */
+void bsp_free(bsp_t *bsp)
+{
+ /* TODO: free everything ... */
+ free(bsp);
+}
+
+
+/* lookup a position's containing leaf node in the bsp tree, store resultant lookup state in *lookup_res */
+static inline void bsp_lookup_position(bsp_t *bsp, bsp_node_t *root, v3f_t *position, bsp_lookup_t *lookup_res)
+{
+ bsp_lookup_t res = bsp->lookup_cache;
+
+ if (res.bv->parent) {
+ /* When starting from a cached (non-root) lookup, we must verify our position falls within the cached bv */
+ if (position->x < res.left.x || position->x > res.right.x ||
+ position->y < res.left.y || position->y > res.right.y ||
+ position->z < res.left.z || position->z > res.right.z) {
+ bsp_invalidate_lookup_cache(bsp);
+ res = bsp->lookup_cache;
+ }
+ }
+
+ while (res.bv->octrants) {
+ res.oidx = OCT_XR_YR_ZR;
+ if (position->x <= res.bv->center.x) {
+ res.oidx |= (1 << 2);
+ res.right.x = res.bv->center.x;
+ } else {
+ res.left.x = res.bv->center.x;
+ }
+
+ if (position->y <= res.bv->center.y) {
+ res.oidx |= (1 << 1);
+ res.right.y = res.bv->center.y;
+ } else {
+ res.left.y = res.bv->center.y;
+ }
+
+ if (position->z <= res.bv->center.z) {
+ res.oidx |= 1;
+ res.right.z = res.bv->center.z;
+ } else {
+ res.left.z = res.bv->center.z;
+ }
+
+ res.bv = &res.bv->octrants[res.oidx];
+ res.depth++;
+ }
+
+ *lookup_res = bsp->lookup_cache = res;
+}
+
+
+/* Add an occupant to a bsp tree, use provided node lookup *l if supplied */
+static inline void _bsp_add_occupant(bsp_t *bsp, bsp_occupant_t *occupant, v3f_t *position, bsp_lookup_t *l)
+{
+ bsp_lookup_t _lookup;
+
+ /* if no explicitly cached lookup result was provided, perform the lookup now (which may still be cached). */
+ if (!l) {
+ l = &_lookup;
+ bsp_lookup_position(bsp, &bsp->root, position, l);
+ }
+
+ assert(l);
+ assert(l->bv);
+
+ occupant->position = position;
+
+#define map_occupant2octrant(_occupant, _bv, _octrant) \
+ _octrant = OCT_XR_YR_ZR; \
+ if (_occupant->position->x <= _bv->center.x) { \
+ _octrant |= (1 << 2); \
+ } \
+ if (_occupant->position->y <= _bv->center.y) { \
+ _octrant |= (1 << 1); \
+ } \
+ if (_occupant->position->z <= _bv->center.z) { \
+ _octrant |= 1; \
+ }
+
+ if (l->bv->n_occupants >= BSP_MAX_OCCUPANTS && l->depth < BSP_MAX_DEPTH) {
+ int i;
+ list_head_t *t, *_t;
+ bsp_node_t *bv = l->bv;
+
+ /* bv is full and shallow enough, subdivide it. */
+
+ /* ensure the free list has something for us */
+ if (list_empty(&bsp->free)) {
+ bsp_node_t *t;
+
+ /* TODO: does using the chunker instead make sense here? */
+ t = calloc(sizeof(bsp_node_t), 8 * BSP_GROWBY);
+ for (i = 0; i < 8 * BSP_GROWBY; i += 8) {
+ list_add(&t[i].occupants, &bsp->free);
+ }
+ }
+
+ /* take an octrants array from the free list */
+ bv->octrants = list_entry(bsp->free.next, bsp_node_t, occupants);
+ list_del(&bv->octrants[0].occupants);
+
+ /* initialize the octrants */
+ for (i = 0; i < 8; i++) {
+ INIT_LIST_HEAD(&bv->octrants[i].occupants);
+ bv->octrants[i].n_occupants = 0;
+ bv->octrants[i].parent = bv;
+ bv->octrants[i].octrants = NULL;
+ }
+
+ /* set the center point in each octrant which places the partitioning hyperplane */
+ /* XXX: note this is pretty unreadable due to reusing the earlier computed values
+ * where the identical computation is required.
+ */
+ bv->octrants[OCT_XR_YR_ZR].center.x = (l->right.x - bv->center.x) * .5f + bv->center.x;
+ bv->octrants[OCT_XR_YR_ZR].center.y = (l->right.y - bv->center.y) * .5f + bv->center.y;
+ bv->octrants[OCT_XR_YR_ZR].center.z = (l->right.z - bv->center.z) * .5f + bv->center.z;
+
+ bv->octrants[OCT_XR_YR_ZL].center.x = bv->octrants[OCT_XR_YR_ZR].center.x;
+ bv->octrants[OCT_XR_YR_ZL].center.y = bv->octrants[OCT_XR_YR_ZR].center.y;
+ bv->octrants[OCT_XR_YR_ZL].center.z = (bv->center.z - l->left.z) * .5f + l->left.z;
+
+ bv->octrants[OCT_XR_YL_ZR].center.x = bv->octrants[OCT_XR_YR_ZR].center.x;
+ bv->octrants[OCT_XR_YL_ZR].center.y = (bv->center.y - l->left.y) * .5f + l->left.y;
+ bv->octrants[OCT_XR_YL_ZR].center.z = bv->octrants[OCT_XR_YR_ZR].center.z;
+
+ bv->octrants[OCT_XR_YL_ZL].center.x = bv->octrants[OCT_XR_YR_ZR].center.x;
+ bv->octrants[OCT_XR_YL_ZL].center.y = bv->octrants[OCT_XR_YL_ZR].center.y;
+ bv->octrants[OCT_XR_YL_ZL].center.z = bv->octrants[OCT_XR_YR_ZL].center.z;
+
+ bv->octrants[OCT_XL_YR_ZR].center.x = (bv->center.x - l->left.x) * .5f + l->left.x;
+ bv->octrants[OCT_XL_YR_ZR].center.y = bv->octrants[OCT_XR_YR_ZR].center.y;
+ bv->octrants[OCT_XL_YR_ZR].center.z = bv->octrants[OCT_XR_YR_ZR].center.z;
+
+ bv->octrants[OCT_XL_YR_ZL].center.x = bv->octrants[OCT_XL_YR_ZR].center.x;
+ bv->octrants[OCT_XL_YR_ZL].center.y = bv->octrants[OCT_XR_YR_ZR].center.y;
+ bv->octrants[OCT_XL_YR_ZL].center.z = bv->octrants[OCT_XR_YR_ZL].center.z;
+
+ bv->octrants[OCT_XL_YL_ZR].center.x = bv->octrants[OCT_XL_YR_ZR].center.x;
+ bv->octrants[OCT_XL_YL_ZR].center.y = bv->octrants[OCT_XR_YL_ZR].center.y;
+ bv->octrants[OCT_XL_YL_ZR].center.z = bv->octrants[OCT_XR_YR_ZR].center.z;
+
+ bv->octrants[OCT_XL_YL_ZL].center.x = bv->octrants[OCT_XL_YR_ZR].center.x;
+ bv->octrants[OCT_XL_YL_ZL].center.y = bv->octrants[OCT_XR_YL_ZR].center.y;
+ bv->octrants[OCT_XL_YL_ZL].center.z = bv->octrants[OCT_XR_YR_ZL].center.z;
+
+ /* migrate the occupants into the appropriate octrants */
+ list_for_each_safe(t, _t, &bv->occupants) {
+ octrant_idx_t oidx;
+ bsp_occupant_t *o = list_entry(t, bsp_occupant_t, occupants);
+
+ map_occupant2octrant(o, bv, oidx);
+ list_move(t, &bv->octrants[oidx].occupants);
+ o->leaf = &bv->octrants[oidx];
+ bv->octrants[oidx].n_occupants++;
+ }
+ bv->n_occupants = 0;
+
+ /* a new leaf assumes the bv position for the occupant to be added into */
+ map_occupant2octrant(occupant, bv, l->oidx);
+ l->bv = &bv->octrants[l->oidx];
+ l->depth++;
+ }
+
+#undef map_occupant2octrant
+
+ occupant->leaf = l->bv;
+ list_add(&occupant->occupants, &l->bv->occupants);
+ l->bv->n_occupants++;
+
+ assert(occupant->leaf);
+}
+
+
+/* add an occupant to a bsp tree */
+void bsp_add_occupant(bsp_t *bsp, bsp_occupant_t *occupant, v3f_t *position)
+{
+ _bsp_add_occupant(bsp, occupant, position, NULL);
+}
+
+
+/* Delete an occupant from a bsp tree.
+ * Set reservation to prevent potentially freeing a node made empty by our delete that
+ * we have a reference to (i.e. a cached lookup result, see bsp_move_occupant()).
+ */
+static inline void _bsp_delete_occupant(bsp_t *bsp, bsp_occupant_t *occupant, bsp_node_t *reservation)
+{
+ if (occupant->leaf->octrants) {
+ fprintf(stderr, "BUG: deleting occupant(%p) from non-leaf bv(%p)\n", occupant, occupant->leaf);
+ }
+
+ /* delete the occupant */
+ list_del(&occupant->occupants);
+ occupant->leaf->n_occupants--;
+
+ if (list_empty(&occupant->leaf->occupants)) {
+ bsp_node_t *parent_bv;
+
+ if (occupant->leaf->n_occupants) {
+ fprintf(stderr, "BUG: bv_occupants empty but n_occupants=%u\n", occupant->leaf->n_occupants);
+ }
+
+ /* leaf is now empty, since nodes are allocated as clusters of 8, they aren't freed unless all nodes in the cluster are empty.
+ * Determine if they're all empty, and free the parent's octrants as a set.
+ * Repeat this process up the chain of parents, repeatedly converting empty parents into leaf nodes.
+ * TODO: maybe just use the chunker instead?
+ */
+
+ for (parent_bv = occupant->leaf->parent; parent_bv && parent_bv != reservation; parent_bv = parent_bv->parent) {
+ int i;
+
+ /* are _all_ the parent's octrants freeable? */
+ for (i = 0; i < 8; i++) {
+ if (&parent_bv->octrants[i] == reservation ||
+ parent_bv->octrants[i].octrants ||
+ !list_empty(&parent_bv->octrants[i].occupants)) {
+ goto _out;
+ }
+ }
+
+ /* "freeing" really just entails putting the octrants cluster of nodes onto the free list */
+ list_add(&parent_bv->octrants[0].occupants, &bsp->free);
+ parent_bv->octrants = NULL;
+ bsp_invalidate_lookup_cache(bsp);
+ }
+ }
+
+_out:
+ occupant->leaf = NULL;
+}
+
+
+/* Delete an occupant from a bsp tree. */
+void bsp_delete_occupant(bsp_t *bsp, bsp_occupant_t *occupant)
+{
+ _bsp_delete_occupant(bsp, occupant, NULL);
+}
+
+
+/* Move an occupant within a bsp tree to a new position */
+void bsp_move_occupant(bsp_t *bsp, bsp_occupant_t *occupant, v3f_t *position)
+{
+ bsp_lookup_t lookup_res;
+
+ if (v3f_equal(occupant->position, position)) {
+ return;
+ }
+
+ /* TODO: now that there's a cache maintained in bsp->lookup_cache as well,
+ * this feels a bit vestigial, see about consolidating things. We still
+ * need to be able to pin lookup_res.bv in the delete, but why not just use
+ * the one in bsp->lookup_cache.bv then stop having lookup_position return
+ * a result at all???? this bsp isn't concurrent/threaded, so it doens't
+ * really matter.
+ */
+ bsp_lookup_position(bsp, &bsp->root, occupant->position, &lookup_res);
+ if (lookup_res.bv == occupant->leaf) {
+ /* leaf unchanged, do nothing past lookup. */
+ occupant->position = position;
+ return;
+ }
+
+ _bsp_delete_occupant(bsp, occupant, lookup_res.bv);
+ _bsp_add_occupant(bsp, occupant, position, &lookup_res);
+}
+
+
+static inline float square(float v)
+{
+ return v * v;
+}
+
+
+typedef enum overlaps_t {
+ OVERLAPS_NONE, /* objects are completely separated */
+ OVERLAPS_PARTIALLY, /* objects surfaces one another */
+ OVERLAPS_A_IN_B, /* first object is fully within the second */
+ OVERLAPS_B_IN_A, /* second object is fully within the first */
+} overlaps_t;
+
+
+/* Returns wether the axis-aligned bounding box (AABB) overlaps the sphere.
+ * Absolute vs. partial overlaps are distinguished, since it's an important optimization
+ * to know if the sphere falls entirely within one partition of the octree.
+ */
+static inline overlaps_t aabb_overlaps_sphere(v3f_t *aabb_min, v3f_t *aabb_max, v3f_t *sphere_center, float sphere_radius)
+{
+ /* This implementation is based on James Arvo's from Graphics Gems pg. 335 */
+ float r2 = square(sphere_radius);
+ float dface = INFINITY;
+ float dmin = 0;
+ float dmax = 0;
+ float a, b;
+
+#define per_dimension(_center, _box_max, _box_min) \
+ a = square(_center - _box_min); \
+ b = square(_center - _box_max); \
+ \
+ dmax += MAX(a, b); \
+ if (_center >= _box_min && _center <= _box_max) { \
+ /* sphere center within box */ \
+ dface = MIN(dface, MIN(a, b)); \
+ } else { \
+ /* sphere center outside the box */ \
+ dface = 0; \
+ dmin += MIN(a, b); \
+ }
+
+ per_dimension(sphere_center->x, aabb_max->x, aabb_min->x);
+ per_dimension(sphere_center->y, aabb_max->y, aabb_min->y);
+ per_dimension(sphere_center->z, aabb_max->z, aabb_min->z);
+
+ if (dmax < r2) {
+ /* maximum distance to box smaller than radius, box is inside
+ * the sphere */
+ return OVERLAPS_A_IN_B;
+ }
+
+ if (dface > r2) {
+ /* sphere center is within box (non-zero dface), and dface is
+ * greater than sphere diameter, sphere is inside the box. */
+ return OVERLAPS_B_IN_A;
+ }
+
+ if (dmin <= r2) {
+ /* minimum distance from sphere center to box is smaller than
+ * sphere's radius, surfaces intersect */
+ return OVERLAPS_PARTIALLY;
+ }
+
+ return OVERLAPS_NONE;
+}
+
+
+typedef struct bsp_search_sphere_t {
+ v3f_t *center;
+ float radius_min;
+ float radius_max;
+ void (*cb)(bsp_t *, list_head_t *, void *);
+ void *cb_data;
+} bsp_search_sphere_t;
+
+
+static overlaps_t _bsp_search_sphere(bsp_t *bsp, bsp_node_t *node, bsp_search_sphere_t *search, v3f_t *aabb_min, v3f_t *aabb_max)
+{
+ overlaps_t res;
+ v3f_t oaabb_min, oaabb_max;
+
+ /* if the radius_max search doesn't overlap aabb_min:aabb_max at all, simply return. */
+ res = aabb_overlaps_sphere(aabb_min, aabb_max, search->center, search->radius_max);
+ if (res == OVERLAPS_NONE) {
+ return res;
+ }
+
+ /* if the radius_max absolutely overlaps the AABB, we must see if the AABB falls entirely within radius_min so we can skip it. */
+ if (res == OVERLAPS_A_IN_B) {
+ res = aabb_overlaps_sphere(aabb_min, aabb_max, search->center, search->radius_min);
+ if (res == OVERLAPS_A_IN_B) {
+ /* AABB is entirely within radius_min, skip it. */
+ return OVERLAPS_NONE;
+ }
+
+ if (res == OVERLAPS_NONE) {
+ /* radius_min didn't overlap, radius_max overlapped aabb 100%, it's entirely within the range. */
+ res = OVERLAPS_A_IN_B;
+ } else {
+ /* radius_min overlapped partially.. */
+ res = OVERLAPS_PARTIALLY;
+ }
+ }
+
+ /* if node is a leaf, call search->cb with the occupants, then return. */
+ if (!node->octrants) {
+ search->cb(bsp, &node->occupants, search->cb_data);
+ return res;
+ }
+
+ /* node is a parent, recur on each octrant with appropriately adjusted aabb_min:aabb_max values */
+ /* if any of the octrants absolutely overlaps the search sphere, skip the others by returning. */
+#define search_octrant(_oid, _aabb_min, _aabb_max) \
+ res = _bsp_search_sphere(bsp, &node->octrants[_oid], search, _aabb_min, _aabb_max); \
+ if (res == OVERLAPS_B_IN_A) { \
+ return res; \
+ }
+
+ /* OCT_XL_YL_ZL and OCT_XR_YR_ZR AABBs don't require tedious composition */
+ search_octrant(OCT_XL_YL_ZL, aabb_min, &node->center);
+ search_octrant(OCT_XR_YR_ZR, &node->center, aabb_max);
+
+ /* the rest are stitched together requiring temp storage and tedium */
+ v3f_set(&oaabb_min, node->center.x, aabb_min->y, aabb_min->z);
+ v3f_set(&oaabb_max, aabb_max->x, node->center.y, node->center.z);
+ search_octrant(OCT_XR_YL_ZL, &oaabb_min, &oaabb_max);
+
+ v3f_set(&oaabb_min, aabb_min->x, node->center.y, aabb_min->z);
+ v3f_set(&oaabb_max, node->center.x, aabb_max->y, node->center.z);
+ search_octrant(OCT_XL_YR_ZL, &oaabb_min, &oaabb_max);
+
+ v3f_set(&oaabb_min, node->center.x, node->center.y, aabb_min->z);
+ v3f_set(&oaabb_max, aabb_max->x, aabb_max->y, node->center.z);
+ search_octrant(OCT_XR_YR_ZL, &oaabb_min, &oaabb_max);
+
+ v3f_set(&oaabb_min, aabb_min->x, aabb_min->y, node->center.z);
+ v3f_set(&oaabb_max, node->center.x, node->center.y, aabb_max->z);
+ search_octrant(OCT_XL_YL_ZR, &oaabb_min, &oaabb_max);
+
+ v3f_set(&oaabb_min, node->center.x, aabb_min->y, node->center.z);
+ v3f_set(&oaabb_max, aabb_max->x, node->center.y, aabb_max->z);
+ search_octrant(OCT_XR_YL_ZR, &oaabb_min, &oaabb_max);
+
+ v3f_set(&oaabb_min, aabb_min->x, node->center.y, node->center.z);
+ v3f_set(&oaabb_max, node->center.x, aabb_max->y, aabb_max->z);
+ search_octrant(OCT_XL_YR_ZR, &oaabb_min, &oaabb_max);
+
+#undef search_octrant
+
+ /* since early on an OVERLAPS_NONE short-circuits the function, and
+ * OVERLAPS_ABSOLUTE also causes short-circuits, if we arrive here it's
+ * a partial overlap
+ */
+ return OVERLAPS_PARTIALLY;
+}
+
+
+/* search the bsp tree for leaf nodes which intersect the space between radius_min and radius_max of a sphere @ center */
+/* for every leaf node found to intersect the sphere, cb is called with the leaf node's occupants list head */
+/* the callback cb must then further filter the occupants as necessary. */
+void bsp_search_sphere(bsp_t *bsp, v3f_t *center, float radius_min, float radius_max, void (*cb)(bsp_t *, list_head_t *, void *), void *cb_data)
+{
+ bsp_search_sphere_t search = {
+ .center = center,
+ .radius_min = radius_min,
+ .radius_max = radius_max,
+ .cb = cb,
+ .cb_data = cb_data,
+ };
+ v3f_t aabb_min = v3f_init(-1.0f, -1.0f, -1.0f);
+ v3f_t aabb_max = v3f_init(1.0f, 1.0f, 1.0f);
+
+ _bsp_search_sphere(bsp, &bsp->root, &search, &aabb_min, &aabb_max);
+}
diff --git a/src/modules/sparkler/bsp.h b/src/modules/sparkler/bsp.h
new file mode 100644
index 0000000..f5ce303
--- /dev/null
+++ b/src/modules/sparkler/bsp.h
@@ -0,0 +1,28 @@
+#ifndef _BSP_H
+#define _BSP_H
+
+#include <stdint.h>
+
+#include "list.h"
+#include "v3f.h"
+
+typedef struct bsp_t bsp_t;
+typedef struct bsp_node_t bsp_node_t;
+
+/* Embed this in anything you want spatially indexed by the bsp tree. */
+/* TODO: it would be nice to make this opaque, but it's a little annoying. */
+typedef struct bsp_occupant_t {
+ bsp_node_t *leaf; /* leaf node containing this occupant */
+ list_head_t occupants; /* node on containing leaf node's list of occupants */
+ v3f_t *position; /* position of occupant to be partitioned */
+} bsp_occupant_t;
+
+bsp_t * bsp_new(void);
+void bsp_free(bsp_t *bsp);
+void bsp_print(bsp_t *bsp);
+void bsp_add_occupant(bsp_t *bsp, bsp_occupant_t *occupant, v3f_t *position);
+void bsp_delete_occupant(bsp_t *bsp, bsp_occupant_t *occupant);
+void bsp_move_occupant(bsp_t *bsp, bsp_occupant_t *occupant, v3f_t *position);
+void bsp_search_sphere(bsp_t *bsp, v3f_t *center, float radius_min, float radius_max, void (*cb)(bsp_t *, list_head_t *, void *), void *cb_data);
+
+#endif
diff --git a/src/modules/sparkler/burst.c b/src/modules/sparkler/burst.c
new file mode 100644
index 0000000..828ca02
--- /dev/null
+++ b/src/modules/sparkler/burst.c
@@ -0,0 +1,111 @@
+#include <stdlib.h>
+
+#include "bsp.h"
+#include "container.h"
+#include "particle.h"
+#include "particles.h"
+
+
+/* a "burst" (shockwave) particle type */
+/* this doesn't draw anything, it just pushes neighbors away in an increasing radius */
+
+#define BURST_FORCE 0.01f
+#define BURST_MAX_LIFETIME 8
+
+typedef struct _burst_ctxt_t {
+ int longevity;
+ int lifetime;
+} burst_ctxt_t;
+
+
+static int burst_init(particles_t *particles, particle_t *p)
+{
+ burst_ctxt_t *ctxt = p->ctxt;
+
+ ctxt->longevity = ctxt->lifetime = BURST_MAX_LIFETIME;
+ p->props->velocity = 0; /* burst should be stationary */
+ p->props->mass = 0; /* no mass prevents gravity's effects */
+
+ return 1;
+}
+
+
+static inline void thrust_part(particle_t *burst, particle_t *victim, float distance_sq)
+{
+ v3f_t direction = v3f_sub(&victim->props->position, &burst->props->position);
+
+ /* TODO: normalize is expensive, see about removing these. */
+ direction = v3f_normalize(&direction);
+ victim->props->direction = v3f_add(&victim->props->direction, &direction);
+ victim->props->direction = v3f_normalize(&victim->props->direction);
+
+ victim->props->velocity += BURST_FORCE;
+}
+
+
+typedef struct burst_sphere_t {
+ particle_t *center;
+ float radius_min;
+ float radius_max;
+} burst_sphere_t;
+
+
+static void burst_cb(bsp_t *bsp, list_head_t *occupants, void *_s)
+{
+ burst_sphere_t *s = _s;
+ bsp_occupant_t *o;
+ float rmin_sq = s->radius_min * s->radius_min;
+ float rmax_sq = s->radius_max * s->radius_max;
+
+ /* XXX: to avoid having a callback per-particle, bsp_occupant_t was
+ * moved to the public particle, and the particle-specific
+ * implementations directly perform bsp-accelerated searches. Another
+ * wart caused by this is particles_bsp().
+ */
+ list_for_each_entry(o, occupants, occupants) {
+ particle_t *p = container_of(o, particle_t, occupant);
+ float d_sq;
+
+ if (p == s->center) {
+ /* leave ourselves alone */
+ continue;
+ }
+
+ d_sq = v3f_distance_sq(&s->center->props->position, &p->props->position);
+
+ if (d_sq > rmin_sq && d_sq < rmax_sq) {
+ /* displace the part relative to the burst origin */
+ thrust_part(s->center, p, d_sq);
+ }
+
+ }
+}
+
+
+static particle_status_t burst_sim(particles_t *particles, particle_t *p)
+{
+ burst_ctxt_t *ctxt = p->ctxt;
+ bsp_t *bsp = particles_bsp(particles); /* XXX see note above about bsp_occupant_t */
+ burst_sphere_t s;
+
+ if (!ctxt->longevity || (ctxt->longevity--) <= 0) {
+ return PARTICLE_DEAD;
+ }
+
+ /* affect neighbors for the shock-wave */
+ s.radius_min = (1.0f - ((float)ctxt->longevity / ctxt->lifetime)) * 0.075f;
+ s.radius_max = s.radius_min + .01f;
+ s.center = p;
+ bsp_search_sphere(bsp, &p->props->position, s.radius_min, s.radius_max, burst_cb, &s);
+
+ return PARTICLE_ALIVE;
+}
+
+
+particle_ops_t burst_ops = {
+ .context_size = sizeof(burst_ctxt_t),
+ .sim = burst_sim,
+ .init = burst_init,
+ .draw = NULL,
+ .cleanup = NULL,
+ };
diff --git a/src/modules/sparkler/chunker.c b/src/modules/sparkler/chunker.c
new file mode 100644
index 0000000..ca072eb
--- /dev/null
+++ b/src/modules/sparkler/chunker.c
@@ -0,0 +1,225 @@
+#include <assert.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+
+#include "chunker.h"
+#include "container.h"
+#include "list.h"
+
+/* Everything associated with the particles tends to be short-lived.
+ *
+ * They come and go frequently in large numbers. This implements a very basic
+ * chunked allocator which prioritizes efficient allocation and freeing over
+ * low waste of memory. We malloc chunks at a time, doling out elements from
+ * the chunk sequentially as requested until the chunk is cannot fulfill an
+ * allocation, then we just retire the chunk, add a new chunk and continue.
+ *
+ * When allocations are freed, we simply decrement the refcount for its chunk,
+ * leaving the chunk pinned with holes accumulating until its refcount reaches
+ * zero, at which point the chunk is made available for allocations again.
+ *
+ * This requires a reference to the chunk be returned with every allocation.
+ * It may be possible to reduce the footprint of this by using a relative
+ * offset to the chunk start instead, but that would probably be more harmful
+ * to the alignment.
+ *
+ * This has some similarities to a slab allocator...
+ */
+
+#define CHUNK_ALIGNMENT 8192 /* XXX: this may be unnecessary, callers should be able to ideally size their chunkers */
+#define ALLOC_ALIGNMENT 8 /* allocations within the chunk need to be aligned since their size affects subsequent allocation offsets */
+#define ALIGN(_size, _alignment) (((_size) + _alignment - 1) & ~(_alignment - 1))
+
+typedef struct chunk_t {
+ chunker_t *chunker; /* chunker chunk belongs to */
+ list_head_t chunks; /* node on free/pinned list */
+ uint32_t n_refs; /* number of references (allocations) to this chunk */
+ unsigned next_offset; /* next available offset for allocation */
+ uint8_t mem[]; /* usable memory from this chunk */
+} chunk_t;
+
+typedef struct allocation_t {
+ chunk_t *chunk; /* chunk this allocation came from */
+ uint8_t mem[]; /* usable memory from this allocation */
+} allocation_t;
+
+struct chunker_t {
+ chunk_t *chunk; /* current chunk allocations come from */
+ unsigned chunk_size; /* size chunks are allocated in */
+ list_head_t free_chunks; /* list of completely free chunks */
+ list_head_t pinned_chunks; /* list of chunks pinned because they have an outstanding allocation */
+};
+
+
+/* Add a reference to a chunk. */
+static inline void chunk_ref(chunk_t *chunk)
+{
+ assert(chunk);
+ assert(chunk->chunker);
+
+ chunk->n_refs++;
+
+ assert(chunk->n_refs != 0);
+}
+
+
+/* Remove reference from a chunk, move to free list when no references remain. */
+static inline void chunk_unref(chunk_t *chunk)
+{
+ assert(chunk);
+ assert(chunk->chunker);
+ assert(chunk->n_refs > 0);
+
+ chunk->n_refs--;
+ if (chunk->n_refs == 0) {
+ list_move(&chunk->chunks, &chunk->chunker->free_chunks);
+ }
+}
+
+
+/* Return allocated size of the chunk */
+static inline unsigned chunk_alloc_size(chunker_t *chunker)
+{
+ assert(chunker);
+
+ return (sizeof(chunk_t) + chunker->chunk_size);
+}
+
+
+/* Get a new working chunk, retiring and replacing chunker->chunk. */
+static void chunker_new_chunk(chunker_t *chunker)
+{
+ chunk_t *chunk;
+
+ assert(chunker);
+
+ if (chunker->chunk) {
+ chunk_unref(chunker->chunk);
+ chunker->chunk = NULL;
+ }
+
+ if (!list_empty(&chunker->free_chunks)) {
+ chunk = list_entry(chunker->free_chunks.next, chunk_t, chunks);
+ list_del(&chunk->chunks);
+ } else {
+ /* No free chunks, must ask libc for memory */
+ chunk = malloc(chunk_alloc_size(chunker));
+ }
+
+ /* Note a chunk is pinned from the moment it's created, and a reference
+ * is added to represent chunker->chunk, even though no allocations
+ * occurred yet.
+ */
+ chunk->n_refs = 1;
+ chunk->next_offset = 0;
+ chunk->chunker = chunker;
+ chunker->chunk = chunk;
+ list_add(&chunk->chunks, &chunker->pinned_chunks);
+}
+
+
+/* Create a new chunker. */
+chunker_t * chunker_new(unsigned chunk_size)
+{
+ chunker_t *chunker;
+
+ chunker = calloc(1, sizeof(chunker_t));
+ if (!chunker) {
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&chunker->free_chunks);
+ INIT_LIST_HEAD(&chunker->pinned_chunks);
+
+ /* XXX: chunker->chunk_size does not include the size of the chunk_t container */
+ chunker->chunk_size = ALIGN(chunk_size, CHUNK_ALIGNMENT);
+
+ return chunker;
+}
+
+
+/* Allocate non-zeroed memory from a chunker. */
+void * chunker_alloc(chunker_t *chunker, unsigned size)
+{
+ allocation_t *allocation;
+
+ assert(chunker);
+ assert(size <= chunker->chunk_size);
+
+ size = ALIGN(sizeof(allocation_t) + size, ALLOC_ALIGNMENT);
+
+ if (!chunker->chunk || size + chunker->chunk->next_offset > chunker->chunk_size) {
+ /* Retire this chunk, time for a new one */
+ chunker_new_chunk(chunker);
+ }
+
+ if (!chunker->chunk) {
+ return NULL;
+ }
+
+ chunk_ref(chunker->chunk);
+ allocation = (allocation_t *)&chunker->chunk->mem[chunker->chunk->next_offset];
+ chunker->chunk->next_offset += size;
+ allocation->chunk = chunker->chunk;
+
+ assert(chunker->chunk->next_offset <= chunker->chunk_size);
+
+ return allocation->mem;
+}
+
+
+/* Free memory allocated from a chunker. */
+void chunker_free(void *ptr)
+{
+ allocation_t *allocation = container_of(ptr, allocation_t, mem);
+
+ assert(ptr);
+
+ chunk_unref(allocation->chunk);
+}
+
+
+/* Free a chunker and it's associated allocations. */
+void chunker_free_chunker(chunker_t *chunker)
+{
+ chunk_t *chunk, *_chunk;
+
+ assert(chunker);
+
+ if (chunker->chunk) {
+ chunk_unref(chunker->chunk);
+ }
+
+ assert(list_empty(&chunker->pinned_chunks));
+
+ list_for_each_entry_safe(chunk, _chunk, &chunker->free_chunks, chunks) {
+ free(chunk);
+ }
+
+ free(chunker);
+}
+
+/* TODO: add pinned chunk iterator interface for cache-friendly iterating across
+ * chunk contents.
+ * The idea is that at times when the performance is really important, the
+ * chunks will be full of active particles, because it's the large numbers
+ * which slows us down. At those times, it's beneficial to not walk linked
+ * lists of structs to process them, instead we just process all the elements
+ * of the chunk as an array and assume everything is active. The type of
+ * processing being done in this fashion is benign to perform on an unused
+ * element, as long as there's no dangling pointers being dereferenced. If
+ * there's references, a status field could be maintained in the entry to say
+ * if it's active, then simply skip processing of the inactive elements. This
+ * tends to be more cache-friendly than chasing pointers. A linked list
+ * heirarchy of particles is still maintained for the parent:child
+ * relationships under the assumption that some particles will make use of the
+ * tracked descendants, though nothing has been done with it yet.
+ *
+ * The current implementation of the _particle_t is variable length, which precludes
+ * this optimization. However, breaking out the particle_props_t into a separate
+ * chunker would allow running particles_age() across the props alone directly
+ * within the pinned chunks. The other passes are still done heirarchically,
+ * and require the full particle context.
+ */
diff --git a/src/modules/sparkler/chunker.h b/src/modules/sparkler/chunker.h
new file mode 100644
index 0000000..ac53cec
--- /dev/null
+++ b/src/modules/sparkler/chunker.h
@@ -0,0 +1,11 @@
+#ifndef _CHUNKER_H
+#define _CHUNKER_H
+
+typedef struct chunker_t chunker_t;
+
+chunker_t * chunker_new(unsigned chunk_size);
+void * chunker_alloc(chunker_t *chunker, unsigned size);
+void chunker_free(void *mem);
+void chunker_free_chunker(chunker_t *chunker);
+
+#endif
diff --git a/src/modules/sparkler/container.h b/src/modules/sparkler/container.h
new file mode 100644
index 0000000..a3779e8
--- /dev/null
+++ b/src/modules/sparkler/container.h
@@ -0,0 +1,11 @@
+#ifndef _CONTAINER_H
+#define _CONTAINER_H
+
+#include <stddef.h>
+
+#ifndef container_of
+#define container_of(_ptr, _type, _member) \
+ (_type *)((void *)(_ptr) - offsetof(_type, _member))
+#endif
+
+#endif
diff --git a/src/modules/sparkler/draw.h b/src/modules/sparkler/draw.h
new file mode 100644
index 0000000..5010374
--- /dev/null
+++ b/src/modules/sparkler/draw.h
@@ -0,0 +1,32 @@
+#ifndef _DRAW_H
+#define _DRAW_H
+
+#include <stdint.h>
+
+#include "fb.h"
+
+/* helper for scaling rgb colors and packing them into an pixel */
+static inline uint32_t makergb(uint32_t r, uint32_t g, uint32_t b, float intensity)
+{
+ r = (((float)intensity) * r);
+ g = (((float)intensity) * g);
+ b = (((float)intensity) * b);
+
+ return (((r & 0xff) << 16) | ((g & 0xff) << 8) | (b & 0xff));
+}
+
+static inline int draw_pixel(fb_fragment_t *f, int x, int y, uint32_t pixel)
+{
+ uint32_t *pixels = f->buf;
+
+ if (y < 0 || y >= f->height || x < 0 || x >= f->width) {
+ return 0;
+ }
+
+ /* FIXME this assumes stride is aligned to 4 */
+ pixels[(y * (f->width + (f->stride >> 2))) + x] = pixel;
+
+ return 1;
+}
+
+#endif
diff --git a/src/modules/sparkler/list.h b/src/modules/sparkler/list.h
new file mode 100644
index 0000000..48bca36
--- /dev/null
+++ b/src/modules/sparkler/list.h
@@ -0,0 +1,252 @@
+#ifndef __LIST_H
+#define __LIST_H
+
+/* linux kernel linked list interface */
+
+/*
+ * Simple doubly linked list implementation.
+ *
+ * Some of the internal functions ("__xxx") are useful when
+ * manipulating whole lists rather than single entries, as
+ * sometimes we already know the next/prev entries and we can
+ * generate better code by using them directly rather than
+ * using the generic single-entry routines.
+ */
+
+typedef struct list_head {
+ struct list_head *next, *prev;
+} list_head_t;
+
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define LIST_HEAD(name) \
+ struct list_head name = LIST_HEAD_INIT(name)
+
+#define INIT_LIST_HEAD(ptr) do { \
+ (ptr)->next = (ptr); (ptr)->prev = (ptr); \
+} while (0)
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static inline void __list_add(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+ prev->next = new;
+}
+
+/**
+ * list_add - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ */
+static inline void list_add(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head, head->next);
+}
+
+/**
+ * list_add_tail - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it before
+ *
+ * Insert a new entry before the specified head.
+ * This is useful for implementing queues.
+ */
+static inline void list_add_tail(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head->prev, head);
+}
+
+/*
+ * Delete a list entry by making the prev/next entries
+ * point to each other.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static inline void __list_del(struct list_head *prev, struct list_head *next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+/**
+ * list_del - deletes entry from list.
+ * @entry: the element to delete from the list.
+ * Note: list_empty on entry does not return true after this, the entry is in an undefined state.
+ */
+static inline void list_del(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ entry->next = (void *) 0;
+ entry->prev = (void *) 0;
+}
+
+/**
+ * list_del_init - deletes entry from list and reinitialize it.
+ * @entry: the element to delete from the list.
+ */
+static inline void list_del_init(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ INIT_LIST_HEAD(entry);
+}
+
+/**
+ * list_move - delete from one list and add as another's head
+ * @list: the entry to move
+ * @head: the head that will precede our entry
+ */
+static inline void list_move(struct list_head *list, struct list_head *head)
+{
+ __list_del(list->prev, list->next);
+ list_add(list, head);
+}
+
+/**
+ * list_move_tail - delete from one list and add as another's tail
+ * @list: the entry to move
+ * @head: the head that will follow our entry
+ */
+static inline void list_move_tail(struct list_head *list,
+ struct list_head *head)
+{
+ __list_del(list->prev, list->next);
+ list_add_tail(list, head);
+}
+
+/**
+ * list_empty - tests whether a list is empty
+ * @head: the list to test.
+ */
+static inline int list_empty(struct list_head *head)
+{
+ return head->next == head;
+}
+
+static inline void __list_splice(struct list_head *list,
+ struct list_head *head)
+{
+ struct list_head *first = list->next;
+ struct list_head *last = list->prev;
+ struct list_head *at = head->next;
+
+ first->prev = head;
+ head->next = first;
+
+ last->next = at;
+ at->prev = last;
+}
+
+/**
+ * list_splice - join two lists
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ */
+static inline void list_splice(struct list_head *list, struct list_head *head)
+{
+ if (!list_empty(list))
+ __list_splice(list, head);
+}
+
+/**
+ * list_splice_init - join two lists and reinitialise the emptied list.
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ *
+ * The list at @list is reinitialised
+ */
+static inline void list_splice_init(struct list_head *list,
+ struct list_head *head)
+{
+ if (!list_empty(list)) {
+ __list_splice(list, head);
+ INIT_LIST_HEAD(list);
+ }
+}
+
+/**
+ * list_entry - get the struct for this entry
+ * @ptr: the &struct list_head pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_entry(ptr, type, member) \
+ ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
+
+/**
+ * list_for_each - iterate over a list
+ * @pos: the &struct list_head to use as a loop counter.
+ * @head: the head for your list.
+ */
+#define list_for_each(pos, head) \
+ for (pos = (head)->next; pos != (head); \
+ pos = pos->next)
+/**
+ * list_for_each_prev - iterate over a list backwards
+ * @pos: the &struct list_head to use as a loop counter.
+ * @head: the head for your list.
+ */
+#define list_for_each_prev(pos, head) \
+ for (pos = (head)->prev; pos != (head); \
+ pos = pos->prev)
+
+/**
+ * list_for_each_safe - iterate over a list safe against removal of list entry
+ * @pos: the &struct list_head to use as a loop counter.
+ * @n: another &struct list_head to use as temporary storage
+ * @head: the head for your list.
+ */
+#define list_for_each_safe(pos, n, head) \
+ for (pos = (head)->next, n = pos->next; pos != (head); \
+ pos = n, n = pos->next)
+
+/**
+ * list_for_each_entry - iterate over list of given type
+ * @pos: the type * to use as a loop counter.
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry(pos, head, member) \
+ for (pos = list_entry((head)->next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_entry(pos->member.next, typeof(*pos), member))
+
+/**
+ * list_for_each_entry_prev - iterate over list of given type backwards
+ * @pos: the type * to use as a loop counter.
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_prev(pos, head, member) \
+ for (pos = list_entry((head)->prev, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_entry(pos->member.prev, typeof(*pos), member))
+
+
+/**
+ * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+ * @pos: the type * to use as a loop counter.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_safe(pos, n, head, member) \
+ for (pos = list_entry((head)->next, typeof(*pos), member), \
+ n = list_entry(pos->member.next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = n, n = list_entry(n->member.next, typeof(*n), member))
+
+
+#endif
diff --git a/src/modules/sparkler/particle.c b/src/modules/sparkler/particle.c
new file mode 100644
index 0000000..0e3d2c8
--- /dev/null
+++ b/src/modules/sparkler/particle.c
@@ -0,0 +1,14 @@
+#include "particle.h"
+
+/* convert a particle to a new type */
+void particle_convert(particles_t *particles, particle_t *p, particle_props_t *props, particle_ops_t *ops)
+{
+ particle_cleanup(particles, p);
+ if (props) {
+ *p->props = *props;
+ }
+ if (ops) {
+ p->ops = ops;
+ }
+ particle_init(particles, p);
+}
diff --git a/src/modules/sparkler/particle.h b/src/modules/sparkler/particle.h
new file mode 100644
index 0000000..95c117e
--- /dev/null
+++ b/src/modules/sparkler/particle.h
@@ -0,0 +1,79 @@
+#ifndef _PARTICLE_H
+#define _PARTICLE_H
+
+#include "bsp.h"
+#include "fb.h"
+#include "v3f.h"
+
+typedef struct particle_props_t {
+ v3f_t position; /* position in 3d space */
+ v3f_t direction; /* trajectory in 3d space */
+ float velocity; /* linear velocity */
+ float mass; /* mass of particle */
+ float drag; /* drag of particle */
+ int of_use:1; /* are these properties of use/meaningful? */
+} particle_props_t;
+
+typedef enum particle_status_t {
+ PARTICLE_ALIVE,
+ PARTICLE_DEAD
+} particle_status_t;
+
+typedef struct particle_t particle_t;
+typedef struct particles_t particles_t;
+
+typedef struct particle_ops_t {
+ unsigned context_size; /* size of the particle context (0 for none) */
+ int (*init)(particles_t *, particle_t *); /* initialize the particle, called after allocating context (optional) */
+ void (*cleanup)(particles_t *, particle_t *); /* cleanup function, called before freeing context (optional) */
+ particle_status_t (*sim)(particles_t *, particle_t *); /* simulate the particle for another cycle (required) */
+ void (*draw)(particles_t *, particle_t *, int, int, fb_fragment_t *); /* draw the particle, 3d->2d projection has been done already (optional) */
+} particle_ops_t;
+
+struct particle_t {
+ bsp_occupant_t occupant; /* occupant node in the bsp tree */
+ particle_props_t *props;
+ particle_ops_t *ops;
+ void *ctxt;
+};
+
+
+//#define rand_within_range(_min, _max) ((rand() % (_max - _min)) + _min)
+// the style of random number generator used by c libraries has less entropy in the lower bits meaning one shouldn't just use modulo, while this is slower, the results do seem a little different.
+#define rand_within_range(_min, _max) (int)(((float)_min) + ((float)rand() / (float)RAND_MAX) * (_max - _min))
+
+#define INHERIT_OPS NULL
+#define INHERIT_PROPS NULL
+
+
+static inline int particle_init(particles_t *particles, particle_t *p) {
+ if (p->ops->init) {
+ return p->ops->init(particles, p);
+ }
+
+ return 1;
+}
+
+
+static inline void particle_cleanup(particles_t *particles, particle_t *p) {
+ if (p->ops->cleanup) {
+ p->ops->cleanup(particles, p);
+ }
+}
+
+
+static inline particle_status_t particle_sim(particles_t *particles, particle_t *p) {
+ return p->ops->sim(particles, p);
+}
+
+
+static inline void particle_draw(particles_t *particles, particle_t *p, int x, int y, fb_fragment_t *f) {
+ if (p->ops->draw) {
+ p->ops->draw(particles, p, x, y, f);
+ }
+}
+
+
+void particle_convert(particles_t *particles, particle_t *p, particle_props_t *props, particle_ops_t *ops);
+
+#endif
diff --git a/src/modules/sparkler/particles.c b/src/modules/sparkler/particles.c
new file mode 100644
index 0000000..0eb260e
--- /dev/null
+++ b/src/modules/sparkler/particles.c
@@ -0,0 +1,342 @@
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <math.h>
+#include <stdlib.h>
+#include <time.h>
+
+#include "fb.h"
+
+#include "chunker.h"
+#include "container.h"
+#include "bsp.h"
+#include "list.h"
+#include "particle.h"
+#include "particles.h"
+#include "v3f.h"
+
+#define ZCONST 0.4f
+
+/* private particle with all the particles bookkeeping... */
+typedef struct _particle_t {
+ list_head_t siblings; /* sibling particles */
+ list_head_t children; /* children particles */
+
+ particle_props_t props; /* we reference this in the public particle, I might change
+ * the way props are allocated so coding everything to use a
+ * reference for now. It may make sense to have props allocated
+ * separately via their own chunker, and perform some mass operations
+ * against the list of chunks rather than chasing the pointers of
+ * the particle heirarchy. TODO
+ */
+ particle_t public; /* the public particle_t is embedded */
+
+ uint8_t context[]; /* particle type-specific context [public.ops.context_size] */
+} _particle_t;
+
+struct particles_t {
+ chunker_t *chunker; /* chunker for variably-sized particle allocation (includes context) */
+ list_head_t active; /* top-level active list of particles heirarchy */
+ bsp_t *bsp; /* bsp spatial index of the particles */
+};
+
+
+/* create a new particle system */
+particles_t * particles_new(void)
+{
+ particles_t *particles;
+
+ particles = calloc(1, sizeof(particles_t));
+ if (!particles) {
+ return NULL;
+ }
+
+ particles->chunker = chunker_new(sizeof(_particle_t) * 128);
+ if (!particles->chunker) {
+ return NULL;
+ }
+
+ particles->bsp = bsp_new();
+ if (!particles->bsp) {
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&particles->active);
+
+ return particles;
+}
+
+
+/* TODO: add a public interface for destroying particles? for now we just return PARTICLE_DEAD in the sim */
+static inline void _particles_free_particle(particles_t *particles, _particle_t *p)
+{
+ assert(p);
+
+ particle_cleanup(particles, &p->public);
+ chunker_free(p);
+}
+
+
+static inline void _particles_free(particles_t *particles, list_head_t *list)
+{
+ _particle_t *p, *_p;
+
+ assert(particles);
+ assert(list);
+
+ list_for_each_entry_safe(p, _p, list, siblings) {
+ _particles_free(particles, &p->children);
+ _particles_free_particle(particles, p);
+ }
+}
+
+
+/* free up all the particles */
+void particles_free(particles_t *particles)
+{
+ assert(particles);
+
+ _particles_free(particles, &particles->active);
+}
+
+
+/* reclaim a dead particle, moving it to the free list */
+static void particles_reap_particle(particles_t *particles, _particle_t *particle)
+{
+ assert(particles);
+ assert(particle);
+
+ if (!list_empty(&particle->children)) {
+ /* adopt any orphaned children using the global parts list */
+ list_splice(&particle->children, &particles->active);
+ }
+
+ list_del(&particle->siblings);
+ bsp_delete_occupant(particles->bsp, &particle->public.occupant);
+ _particles_free_particle(particles, particle);
+}
+
+
+/* add a particle to the specified list */
+static inline int _particles_add_particle(particles_t *particles, list_head_t *list, particle_props_t *props, particle_ops_t *ops)
+{
+ _particle_t *p;
+
+ assert(particles);
+ assert(ops);
+ assert(list);
+
+ p = chunker_alloc(particles->chunker, sizeof(_particle_t) + ops->context_size);
+ if (!p) {
+ return 0;
+ }
+
+ INIT_LIST_HEAD(&p->children);
+ INIT_LIST_HEAD(&p->siblings);
+
+ /* inherit the parent's properties and ops if they're not explicitly provided */
+ if (props) {
+ p->props = *props;
+ } else {
+ p->props.of_use = 0;
+ }
+
+ p->public.props = &p->props;
+ p->public.ops = ops;
+
+ if (ops->context_size) {
+ p->public.ctxt = p->context;
+ }
+
+ if (!particle_init(particles, &p->public)) {
+ /* XXX FIXME this shouldn't be normal, we don't want to allocate
+ * particles that cannot be initialized. the rockets today set a cap
+ * by failing initialization, that's silly. */
+ chunker_free(p);
+ return 0;
+ }
+
+ p->public.props->of_use = 1;
+ list_add(&p->siblings, list);
+ bsp_add_occupant(particles->bsp, &p->public.occupant, &p->props.position);
+
+ return 1;
+}
+
+
+/* add a new "top-level" particle of the specified props and ops taking from the provided parts list */
+int particles_add_particle(particles_t *particles, particle_props_t *props, particle_ops_t *ops)
+{
+ assert(particles);
+
+ return _particles_add_particle(particles, &particles->active, props, ops);
+}
+
+
+/* spawn a new child particle from a parent, initializing it via inheritance if desired */
+void particles_spawn_particle(particles_t *particles, particle_t *parent, particle_props_t *props, particle_ops_t *ops)
+{
+ _particle_t *p = container_of(parent, _particle_t, public);
+
+ assert(particles);
+ assert(parent);
+
+ _particles_add_particle(particles, &p->children, props ? props : parent->props, ops ? ops : parent->ops);
+}
+
+
+/* plural version of particle_add(); adds multiple "top-level" particles of uniform props and ops */
+void particles_add_particles(particles_t *particles, particle_props_t *props, particle_ops_t *ops, int num)
+{
+ int i;
+
+ assert(particles);
+
+ for (i = 0; i < num; i++) {
+ _particles_add_particle(particles, &particles->active, props, ops);
+ }
+}
+
+
+/* Simple accessor to get the bsp pointer, the bsp is special because we don't want to do
+ * callbacks per-occupant, so the bsp_occupant_t and search functions are used directly by
+ * the per-particle code needing nearest-neighbor search. that requires an accessor since
+ * particles_t is opaque. This seemed less shitty than opening up particles_t.
+ */
+bsp_t * particles_bsp(particles_t *particles)
+{
+ assert(particles);
+ assert(particles->bsp);
+
+ return particles->bsp;
+}
+
+
+static inline void _particles_draw(particles_t *particles, list_head_t *list, fb_fragment_t *fragment)
+{
+ float w2 = fragment->width * .5f, h2 = fragment->height * .5f;
+ _particle_t *p;
+
+ assert(particles);
+ assert(list);
+ assert(fragment);
+
+ list_for_each_entry(p, list, siblings) {
+ int x, y;
+
+ /* project the 3d coordinates onto the 2d plane */
+ x = (p->props.position.x / (p->props.position.z - ZCONST) * w2) + w2;
+ y = (p->props.position.y / (p->props.position.z - ZCONST) * h2) + h2;
+
+ particle_draw(particles, &p->public, x, y, fragment);
+
+ if (!list_empty(&p->children)) {
+ _particles_draw(particles, &p->children, fragment);
+ }
+ }
+}
+
+
+/* draw all of the particles, currently called in heirarchical order */
+void particles_draw(particles_t *particles, fb_fragment_t *fragment)
+{
+ assert(particles);
+
+ _particles_draw(particles, &particles->active, fragment);
+}
+
+
+static inline particle_status_t _particles_sim(particles_t *particles, list_head_t *list)
+{
+ particle_status_t ret = PARTICLE_DEAD, s;
+ _particle_t *p, *_p;
+
+ assert(particles);
+ assert(list);
+
+ list_for_each_entry_safe(p, _p, list, siblings) {
+ if ((s = particle_sim(particles, &p->public)) == PARTICLE_ALIVE) {
+ ret = PARTICLE_ALIVE;
+
+ if (!list_empty(&p->children) &&
+ _particles_sim(particles, &p->children) == PARTICLE_ALIVE) {
+ ret = PARTICLE_ALIVE;
+ }
+ } else {
+ particles_reap_particle(particles, p);
+ }
+ }
+
+ return ret;
+}
+
+
+/* simulate the particles, call the sim method of every particle in the heirarchy, this is what makes the particles dynamic */
+/* if any paticle is still living, we return PARTICLE_ALIVE, to inform the caller when everything's dead */
+particle_status_t particles_sim(particles_t *particles)
+{
+ assert(particles);
+
+ return _particles_sim(particles, &particles->active);
+}
+
+
+static inline void _particles_age(particles_t *particles, list_head_t *list)
+{
+ _particle_t *p;
+
+ assert(particles);
+ assert(list);
+
+ /* TODO: since this *only* involves the properties struct, if they were
+ * allocated from a separate slab containing only properties, it'd be
+ * more efficient to iterate across property arrays and skip inactive
+ * entries. This heirarchical pointer-chasing recursion isn't
+ * particularly good for cache utilization.
+ */
+ list_for_each_entry(p, list, siblings) {
+#if 1
+ if (p->props.mass > 0.0f) {
+ /* gravity, TODO: mass isn't applied. */
+ static v3f_t gravity = v3f_init(0.0f, -0.05f, 0.0f);
+
+ p->props.direction = v3f_add(&p->props.direction, &gravity);
+ p->props.direction = v3f_normalize(&p->props.direction);
+ }
+#endif
+
+#if 1
+ /* some drag/resistance proportional to velocity TODO: integrate mass */
+ if (p->props.velocity > 0.0f) {
+ p->props.velocity -= ((p->props.velocity * p->props.velocity * p->props.drag));
+ if (p->props.velocity < 0.0f) {
+ p->props.velocity = 0;
+ }
+ }
+#endif
+
+ /* regular movement */
+ if (p->props.velocity > 0.0f) {
+ v3f_t movement = v3f_mult_scalar(&p->props.direction, p->props.velocity);
+
+ p->props.position = v3f_add(&p->props.position, &movement);
+ bsp_move_occupant(particles->bsp, &p->public.occupant, &p->props.position);
+ }
+
+ if (!list_empty(&p->children)) {
+ _particles_age(particles, &p->children);
+ }
+ }
+}
+
+
+/* advance time for all the particles (move them), this doesn't currently invoke any part-specific helpers, it's just applying
+ * physics-type stuff, moving particles according to their velocities, directions, mass, drag, gravity etc... */
+void particles_age(particles_t *particles)
+{
+ assert(particles);
+
+ _particles_age(particles, &particles->active);
+}
diff --git a/src/modules/sparkler/particles.h b/src/modules/sparkler/particles.h
new file mode 100644
index 0000000..689934b
--- /dev/null
+++ b/src/modules/sparkler/particles.h
@@ -0,0 +1,21 @@
+#ifndef _PARTICLES_H
+#define _PARTICLES_H
+
+#include "bsp.h"
+#include "fb.h"
+#include "list.h"
+#include "particle.h"
+
+typedef struct particles_t particles_t;
+
+particles_t * particles_new(void);
+void particles_draw(particles_t *particles, fb_fragment_t *fragment);
+particle_status_t particles_sim(particles_t *particles);
+void particles_age(particles_t *particles);
+void particles_free(particles_t *particles);
+int particles_add_particle(particles_t *particles, particle_props_t *props, particle_ops_t *ops);
+void particles_spawn_particle(particles_t *particles, particle_t *parent, particle_props_t *props, particle_ops_t *ops);
+void particles_add_particles(particles_t *particles, particle_props_t *props, particle_ops_t *ops, int num);
+bsp_t * particles_bsp(particles_t *particles);
+
+#endif
diff --git a/src/modules/sparkler/rocket.c b/src/modules/sparkler/rocket.c
new file mode 100644
index 0000000..6b9dc5e
--- /dev/null
+++ b/src/modules/sparkler/rocket.c
@@ -0,0 +1,144 @@
+#include <stdlib.h>
+
+#include "draw.h"
+#include "particle.h"
+#include "particles.h"
+
+/* a "rocket" particle type */
+#define ROCKET_MAX_DECAY_RATE 20
+#define ROCKET_MIN_DECAY_RATE 2
+#define ROCKET_MAX_LIFETIME 500
+#define ROCKET_MIN_LIFETIME 300
+#define ROCKETS_MAX 20
+#define ROCKETS_XPLODE_MIN_SIZE 2000
+#define ROCKETS_XPLODE_MAX_SIZE 8000
+
+extern particle_ops_t burst_ops;
+extern particle_ops_t spark_ops;
+extern particle_ops_t xplode_ops;
+
+static unsigned rockets_cnt;
+
+typedef struct rocket_ctxt_t {
+ int decay_rate;
+ int longevity;
+ v3f_t wander;
+ float last_velocity; /* cache velocity to sense violent accelerations and explode when they happen */
+} rocket_ctxt_t;
+
+
+static int rocket_init(particles_t *particles, particle_t *p)
+{
+ rocket_ctxt_t *ctxt = p->ctxt;
+
+ if (rockets_cnt >= ROCKETS_MAX) {
+ return 0;
+ }
+ rockets_cnt++;
+
+ ctxt->decay_rate = rand_within_range(ROCKET_MIN_DECAY_RATE, ROCKET_MAX_DECAY_RATE);
+ ctxt->longevity = rand_within_range(ROCKET_MIN_LIFETIME, ROCKET_MAX_LIFETIME);
+
+ ctxt->wander.x = (float)(rand_within_range(0, 628) - 314) / 10000.0f;
+ ctxt->wander.y = (float)(rand_within_range(0, 628) - 314) / 10000.0f;
+ ctxt->wander.z = (float)(rand_within_range(0, 628) - 314) / 10000.0f;
+ ctxt->wander = v3f_normalize(&ctxt->wander);
+
+ ctxt->last_velocity = p->props->velocity;
+ p->props->drag = 0.4;
+ p->props->mass = 0.8;
+
+ return 1;
+}
+
+
+static particle_status_t rocket_sim(particles_t *particles, particle_t *p)
+{
+ rocket_ctxt_t *ctxt = p->ctxt;
+ int i, n_sparks;
+
+ if (!ctxt->longevity ||
+ (ctxt->longevity -= ctxt->decay_rate) <= 0 ||
+ p->props->velocity - ctxt->last_velocity > p->props->velocity * .05) { /* explode if accelerated too hard (burst) */
+ int n_xplode;
+ /* on death we explode */
+
+ ctxt->longevity = 0;
+
+ /* add a burst shockwave particle at our location
+ * TODO: need way to supply particle-type-specific parameters at spawn (burst size should derive from n_xplode)
+ */
+ particles_spawn_particle(particles, p, NULL, &burst_ops);
+
+ /* add a bunch of new explosion particles */
+ /* TODO: also particle-type-specific parameters, colors! rocket bursts should be able to vary the color. */
+ n_xplode = rand_within_range(ROCKETS_XPLODE_MIN_SIZE, ROCKETS_XPLODE_MAX_SIZE);
+ for (i = 0; i < n_xplode; i++) {
+ particle_props_t props = *p->props;
+ particle_ops_t *ops = &xplode_ops;
+
+ props.direction.x = ((float)(rand_within_range(0, 314159 * 2) - 314159) / 100000.0);
+ props.direction.y = ((float)(rand_within_range(0, 314159 * 2) - 314159) / 100000.0);
+ props.direction.z = ((float)(rand_within_range(0, 314159 * 2) - 314159) / 100000.0);
+ props.direction = v3f_normalize(&props.direction);
+
+ //props->velocity = ((float)rand_within_range(100, 200) / 100000.0);
+ props.velocity = ((float)rand_within_range(100, 300) / 100000.0);
+ particles_spawn_particle(particles, p, &props, ops);
+ }
+ return PARTICLE_DEAD;
+ }
+
+#if 1
+ /* FIXME: this isn't behaving as intended */
+ p->props->direction = v3f_add(&p->props->direction, &ctxt->wander);
+ p->props->direction = v3f_normalize(&p->props->direction);
+#endif
+ p->props->velocity += .00003;
+
+ /* spray some sparks behind the rocket */
+ n_sparks = rand_within_range(10, 40);
+ for (i = 0; i < n_sparks; i++) {
+ particle_props_t props = *p->props;
+
+ props.direction = v3f_negate(&props.direction);
+
+ props.direction.x += (float)(rand_within_range(0, 40) - 20) / 100.0;
+ props.direction.y += (float)(rand_within_range(0, 40) - 20) / 100.0;
+ props.direction.z += (float)(rand_within_range(0, 40) - 20) / 100.0;
+ props.direction = v3f_normalize(&props.direction);
+
+ props.velocity = (float)rand_within_range(10, 50) / 100000.0;
+ particles_spawn_particle(particles, p, &props, &spark_ops);
+ }
+
+ ctxt->last_velocity = p->props->velocity;
+
+ return PARTICLE_ALIVE;
+}
+
+
+static void rocket_draw(particles_t *particles, particle_t *p, int x, int y, fb_fragment_t *f)
+{
+ rocket_ctxt_t *ctxt = p->ctxt;
+
+ if (!draw_pixel(f, x, y, 0xff0000)) {
+ /* kill off parts that wander off screen */
+ ctxt->longevity = 0;
+ }
+}
+
+
+static void rocket_cleanup(particles_t *particles, particle_t *p)
+{
+ rockets_cnt--;
+}
+
+
+particle_ops_t rocket_ops = {
+ .context_size = sizeof(rocket_ctxt_t),
+ .sim = rocket_sim,
+ .init = rocket_init,
+ .draw = rocket_draw,
+ .cleanup = rocket_cleanup,
+ };
diff --git a/src/modules/sparkler/simple.c b/src/modules/sparkler/simple.c
new file mode 100644
index 0000000..e453e46
--- /dev/null
+++ b/src/modules/sparkler/simple.c
@@ -0,0 +1,113 @@
+#include <stdlib.h>
+
+#include "draw.h"
+#include "particle.h"
+#include "particles.h"
+
+
+/* a "simple" particle type */
+#define SIMPLE_MAX_DECAY_RATE 20
+#define SIMPLE_MIN_DECAY_RATE 2
+#define SIMPLE_MAX_LIFETIME 110
+#define SIMPLE_MIN_LIFETIME 30
+#define SIMPLE_MAX_SPAWN 15
+#define SIMPLE_MIN_SPAWN 2
+
+extern particle_ops_t rocket_ops;
+
+typedef struct _simple_ctxt_t {
+ int decay_rate;
+ int longevity;
+ int lifetime;
+} simple_ctxt_t;
+
+
+static int simple_init(particles_t *particles, particle_t *p)
+{
+ simple_ctxt_t *ctxt = p->ctxt;
+
+ ctxt->decay_rate = rand_within_range(SIMPLE_MIN_DECAY_RATE, SIMPLE_MAX_DECAY_RATE);
+ ctxt->lifetime = ctxt->longevity = rand_within_range(SIMPLE_MIN_LIFETIME, SIMPLE_MAX_LIFETIME);
+
+ if (!p->props->of_use) {
+ /* everything starts from the bottom center */
+ p->props->position.x = 0;
+ p->props->position.y = 0;
+ p->props->position.z = 0;
+
+ /* TODO: direction random-ish within the range of a narrow upward facing cone */
+ p->props->direction.x = (float)(rand_within_range(0, 6) - 3) * .1f;
+ p->props->direction.y = 1.0f + (float)(rand_within_range(0, 6) - 3) * .1f;
+ p->props->direction.z = (float)(rand_within_range(0, 6) - 3) * .1f;
+ p->props->direction = v3f_normalize(&p->props->direction);
+
+ p->props->velocity = (float)rand_within_range(300, 800) / 100000.0;
+
+ p->props->drag = 0.03;
+ p->props->mass = 0.3;
+ p->props->of_use = 1;
+ } /* else { we've been given properties, manipulate them or run with them? } */
+
+ return 1;
+}
+
+
+static particle_status_t simple_sim(particles_t *particles, particle_t *p)
+{
+ simple_ctxt_t *ctxt = p->ctxt;
+
+ /* a particle is free to manipulate its children list when aging, but not itself or its siblings */
+ /* return PARTICLE_DEAD to remove kill yourself, do not age children here, the age pass will recurse
+ * into children and age them independently _after_ their parents have been aged
+ */
+ if (!ctxt->longevity || (ctxt->longevity -= ctxt->decay_rate) <= 0) {
+ ctxt->longevity = 0;
+ return PARTICLE_DEAD;
+ }
+
+ /* create particles inheriting our type based on some silly conditions, with some tweaks to their direction */
+ if (ctxt->longevity == 42 || (ctxt->longevity > 500 && !(ctxt->longevity % 50))) {
+ int i, num = rand_within_range(SIMPLE_MIN_SPAWN, SIMPLE_MAX_SPAWN);
+
+ for (i = 0; i < num; i++) {
+ particle_props_t props = *p->props;
+ particle_ops_t *ops = INHERIT_OPS;
+
+ if (i == (SIMPLE_MAX_SPAWN - 2)) {
+ ops = &rocket_ops;
+ props.velocity = (float)rand_within_range(60, 100) / 1000000.0;
+ } else {
+ props.velocity = (float)rand_within_range(30, 100) / 10000.0;
+ }
+
+ props.direction.x += (float)(rand_within_range(0, 315 * 2) - 315) / 100.0;
+ props.direction.y += (float)(rand_within_range(0, 315 * 2) - 315) / 100.0;
+ props.direction.z += (float)(rand_within_range(0, 315 * 2) - 315) / 100.0;
+ props.direction = v3f_normalize(&props.direction);
+
+ particles_spawn_particle(particles, p, &props, ops); // XXX
+ }
+ }
+
+ return PARTICLE_ALIVE;
+}
+
+
+static void simple_draw(particles_t *particles, particle_t *p, int x, int y, fb_fragment_t *f)
+{
+ simple_ctxt_t *ctxt = p->ctxt;
+
+ if (!draw_pixel(f, x, y, makergb(0xff, 0xff, 0xff, ((float)ctxt->longevity / ctxt->lifetime)))) {
+ /* immediately kill off stars that wander off screen */
+ ctxt->longevity = 0;
+ }
+}
+
+
+particle_ops_t simple_ops = {
+ .context_size = sizeof(simple_ctxt_t),
+ .sim = simple_sim,
+ .init = simple_init,
+ .draw = simple_draw,
+ .cleanup = NULL,
+ };
diff --git a/src/modules/sparkler/spark.c b/src/modules/sparkler/spark.c
new file mode 100644
index 0000000..ea68ac2
--- /dev/null
+++ b/src/modules/sparkler/spark.c
@@ -0,0 +1,63 @@
+#include <stdlib.h>
+
+#include "draw.h"
+#include "particle.h"
+#include "particles.h"
+
+/* a "spark" particle type, emitted from behind rockets */
+#define SPARK_MAX_DECAY_RATE 20
+#define SPARK_MIN_DECAY_RATE 2
+#define SPARK_MAX_LIFETIME 150
+#define SPARK_MIN_LIFETIME 1
+
+typedef struct _spark_ctxt_t {
+ int decay_rate;
+ int longevity;
+ int lifetime;
+} spark_ctxt_t;
+
+
+static int spark_init(particles_t *particles, particle_t *p)
+{
+ spark_ctxt_t *ctxt = p->ctxt;
+
+ p->props->drag = 20.0;
+ p->props->mass = 0.1;
+ ctxt->decay_rate = rand_within_range(SPARK_MIN_DECAY_RATE, SPARK_MAX_DECAY_RATE);
+ ctxt->lifetime = ctxt->longevity = rand_within_range(SPARK_MIN_LIFETIME, SPARK_MAX_LIFETIME);
+
+ return 1;
+}
+
+
+static particle_status_t spark_sim(particles_t *particles, particle_t *p)
+{
+ spark_ctxt_t *ctxt = p->ctxt;
+
+ if (!ctxt->longevity || (ctxt->longevity -= ctxt->decay_rate) <= 0) {
+ ctxt->longevity = 0;
+ return PARTICLE_DEAD;
+ }
+
+ return PARTICLE_ALIVE;
+}
+
+
+static void spark_draw(particles_t *particles, particle_t *p, int x, int y, fb_fragment_t *f)
+{
+ spark_ctxt_t *ctxt = p->ctxt;
+
+ if (!draw_pixel(f, x, y, makergb(0xff, 0xa0, 0x20, ((float)ctxt->longevity / ctxt->lifetime)))) {
+ /* offscreen */
+ ctxt->longevity = 0;
+ }
+}
+
+
+particle_ops_t spark_ops = {
+ .context_size = sizeof(spark_ctxt_t),
+ .sim = spark_sim,
+ .init = spark_init,
+ .draw = spark_draw,
+ .cleanup = NULL,
+ };
diff --git a/src/modules/sparkler/sparkler.c b/src/modules/sparkler/sparkler.c
new file mode 100644
index 0000000..0bb0fcf
--- /dev/null
+++ b/src/modules/sparkler/sparkler.c
@@ -0,0 +1,53 @@
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+
+#include "fb.h"
+#include "rototiller.h"
+#include "util.h"
+
+#include "particles.h"
+
+/* particle system gadget (C) Vito Caputo <vcaputo@pengaru.com> 2/15/2014 */
+/* 1/10/2015 added octree bsp (though not yet leveraged) */
+/* 11/25/2016 refactor and begun adapting to rototiller */
+
+#define INIT_PARTS 100
+
+extern particle_ops_t simple_ops;
+
+
+/* Render a 3D particle system */
+static void sparkler(fb_fragment_t *fragment)
+{
+ static particles_t *particles;
+ static int initialized;
+ uint32_t *buf = fragment->buf;
+
+ if (!initialized) {
+ srand(time(NULL) + getpid());
+
+ particles = particles_new();
+ particles_add_particles(particles, NULL, &simple_ops, INIT_PARTS);
+
+ initialized = 1;
+ }
+
+ memset(buf, 0, ((fragment->width << 2) + fragment->stride) * fragment->height);
+
+ particles_age(particles);
+ particles_draw(particles, fragment);
+ particles_sim(particles);
+ particles_add_particles(particles, NULL, &simple_ops, INIT_PARTS / 4);
+}
+
+
+rototiller_renderer_t sparkler_renderer = {
+ .render = sparkler,
+ .name = "sparkler",
+ .description = "Particle system with spatial interactions",
+ .author = "Vito Caputo <vcaputo@pengaru.com>",
+ .license = "GPLv2",
+};
diff --git a/src/modules/sparkler/sparkler.h b/src/modules/sparkler/sparkler.h
new file mode 100644
index 0000000..3beb610
--- /dev/null
+++ b/src/modules/sparkler/sparkler.h
@@ -0,0 +1,8 @@
+#ifndef _SPARKLER_H
+#define _SPARKLER_H
+
+#include "fb.h"
+
+void sparkler(fb_fragment_t *fragment);
+
+#endif
diff --git a/src/modules/sparkler/v3f.h b/src/modules/sparkler/v3f.h
new file mode 100644
index 0000000..8bf7e24
--- /dev/null
+++ b/src/modules/sparkler/v3f.h
@@ -0,0 +1,157 @@
+#ifndef _V3F_H
+#define _V3F_H
+
+#include <math.h>
+
+typedef struct v3f_t {
+ float x, y, z;
+} v3f_t;
+
+#define v3f_set(_v3f, _x, _y, _z) \
+ (_v3f)->x = _x; \
+ (_v3f)->y = _y; \
+ (_v3f)->z = _z;
+
+#define v3f_init(_x, _y, _z) \
+ { \
+ .x = _x, \
+ .y = _y, \
+ .z = _z, \
+ }
+
+/* return if a and b are equal */
+static inline int v3f_equal(v3f_t *a, v3f_t *b)
+{
+ return (a->x == b->x && a->y == b->y && a->z == b->z);
+}
+
+
+/* return the result of (a + b) */
+static inline v3f_t v3f_add(v3f_t *a, v3f_t *b)
+{
+ v3f_t res = v3f_init(a->x + b->x, a->y + b->y, a->z + b->z);
+
+ return res;
+}
+
+
+/* return the result of (a - b) */
+static inline v3f_t v3f_sub(v3f_t *a, v3f_t *b)
+{
+ v3f_t res = v3f_init(a->x - b->x, a->y - b->y, a->z - b->z);
+
+ return res;
+}
+
+
+/* return the result of (-v) */
+static inline v3f_t v3f_negate(v3f_t *v)
+{
+ v3f_t res = v3f_init(-v->x, -v->y, -v->z);
+
+ return res;
+}
+
+
+/* return the result of (a * b) */
+static inline v3f_t v3f_mult(v3f_t *a, v3f_t *b)
+{
+ v3f_t res = v3f_init(a->x * b->x, a->y * b->y, a->z * b->z);
+
+ return res;
+}
+
+
+/* return the result of (v * scalar) */
+static inline v3f_t v3f_mult_scalar(v3f_t *v, float scalar)
+{
+ v3f_t res = v3f_init( v->x * scalar, v->y * scalar, v->z * scalar);
+
+ return res;
+}
+
+
+/* return the result of (uv / scalar) */
+static inline v3f_t v3f_div_scalar(v3f_t *v, float scalar)
+{
+ v3f_t res = v3f_init(v->x / scalar, v->y / scalar, v->z / scalar);
+
+ return res;
+}
+
+
+/* return the result of (a . b) */
+static inline float v3f_dot(v3f_t *a, v3f_t *b)
+{
+ return a->x * b->x + a->y * b->y + a->z * b->z;
+}
+
+
+/* return the length of the supplied vector */
+static inline float v3f_length(v3f_t *v)
+{
+ return sqrtf(v3f_dot(v, v));
+}
+
+
+/* return the normalized form of the supplied vector */
+static inline v3f_t v3f_normalize(v3f_t *v)
+{
+ v3f_t nv;
+ float f;
+
+ f = 1.0f / v3f_length(v);
+
+ v3f_set(&nv, f * v->x, f * v->y, f * v->z);
+
+ return nv;
+}
+
+
+/* return the distance squared between two arbitrary points */
+static inline float v3f_distance_sq(v3f_t *a, v3f_t *b)
+{
+ return powf(a->x - b->x, 2) + powf(a->y - b->y, 2) + powf(a->z - b->z, 2);
+}
+
+
+/* return the distance between two arbitrary points */
+/* (consider using v3f_distance_sq() instead if possible, sqrtf() is slow) */
+static inline float v3f_distance(v3f_t *a, v3f_t *b)
+{
+ return sqrtf(v3f_distance_sq(a, b));
+}
+
+
+/* return the cross product of two unit vectors */
+static inline v3f_t v3f_cross(v3f_t *a, v3f_t *b)
+{
+ v3f_t product = v3f_init(a->y * b->z - a->z * b->y, a->z * b->x - a->x * b->z, a->x * b->y - a->y * b->x);
+
+ return product;
+}
+
+
+/* return the linearly interpolated vector between the two vectors at point alpha (0-1.0) */
+static inline v3f_t v3f_lerp(v3f_t *a, v3f_t *b, float alpha)
+{
+ v3f_t lerp_a, lerp_b;
+
+ lerp_a = v3f_mult_scalar(a, 1.0f - alpha);
+ lerp_b = v3f_mult_scalar(b, alpha);
+
+ return v3f_add(&lerp_a, &lerp_b);
+}
+
+
+/* return the normalized linearly interpolated vector between the two vectors at point alpha (0-1.0) */
+static inline v3f_t v3f_nlerp(v3f_t *a, v3f_t *b, float alpha)
+{
+ v3f_t lerp;
+
+ lerp = v3f_lerp(a, b, alpha);
+
+ return v3f_normalize(&lerp);
+}
+
+#endif
diff --git a/src/modules/sparkler/xplode.c b/src/modules/sparkler/xplode.c
new file mode 100644
index 0000000..24a436e
--- /dev/null
+++ b/src/modules/sparkler/xplode.c
@@ -0,0 +1,82 @@
+#include <stdlib.h>
+
+#include "draw.h"
+#include "particle.h"
+#include "particles.h"
+
+/* a "xplode" particle type, emitted by rockets in large numbers at the end of their lifetime */
+#define XPLODE_MAX_DECAY_RATE 10
+#define XPLODE_MIN_DECAY_RATE 5
+#define XPLODE_MAX_LIFETIME 150
+#define XPLODE_MIN_LIFETIME 5
+
+extern particle_ops_t spark_ops;
+particle_ops_t xplode_ops;
+
+typedef struct _xplode_ctxt_t {
+ int decay_rate;
+ int longevity;
+ int lifetime;
+} xplode_ctxt_t;
+
+
+static int xplode_init(particles_t *particles, particle_t *p)
+{
+ xplode_ctxt_t *ctxt = p->ctxt;
+
+ ctxt->decay_rate = rand_within_range(XPLODE_MIN_DECAY_RATE, XPLODE_MAX_DECAY_RATE);
+ ctxt->lifetime = ctxt->longevity = rand_within_range(XPLODE_MIN_LIFETIME, XPLODE_MAX_LIFETIME);
+
+ p->props->drag = 10.9;
+ p->props->mass = 0.3;
+
+ return 1;
+}
+
+
+static particle_status_t xplode_sim(particles_t *particles, particle_t *p)
+{
+ xplode_ctxt_t *ctxt = p->ctxt;
+
+ if (!ctxt->longevity || (ctxt->longevity -= ctxt->decay_rate) <= 0) {
+ ctxt->longevity = 0;
+ return PARTICLE_DEAD;
+ }
+
+ /* litter some small sparks behind the explosion particle */
+ if (!(ctxt->lifetime % 30)) {
+ particle_props_t props = *p->props;
+
+ props.velocity = (float)rand_within_range(10, 50) / 10000.0;
+ particles_spawn_particle(particles, p, &props, &xplode_ops);
+ }
+
+ return PARTICLE_ALIVE;
+}
+
+
+static void xplode_draw(particles_t *particles, particle_t *p, int x, int y, fb_fragment_t *f)
+{
+ xplode_ctxt_t *ctxt = p->ctxt;
+ uint32_t color;
+
+ if (ctxt->longevity == ctxt->lifetime) {
+ color = makergb(0xff, 0xff, 0xa0, 1.0);
+ } else {
+ color = makergb(0xff, 0xff, 0x00, ((float)ctxt->longevity / ctxt->lifetime));
+ }
+
+ if (!draw_pixel(f, x, y, color)) {
+ /* offscreen */
+ ctxt->longevity = 0;
+ }
+}
+
+
+particle_ops_t xplode_ops = {
+ .context_size = sizeof(xplode_ctxt_t),
+ .sim = xplode_sim,
+ .init = xplode_init,
+ .draw = xplode_draw,
+ .cleanup = NULL,
+ };
© All Rights Reserved