summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorVito Caputo <vcaputo@pengaru.com>2018-11-26 23:46:55 -0800
committerVito Caputo <vcaputo@pengaru.com>2018-11-26 23:46:55 -0800
commit3abb513923adb1f5238a62dd7c64c433a08d9d62 (patch)
tree58ce04e6d82548ea8a93446bd64a06af01e0749a /src
*: initial commit
This is a simple allocator derived from the chunker in the sparkler rototiller particle system module. It's useful for pooling allocations of varying size with a lot of churn. It also gives a convenient way of discarding all allocations through a reset mechanism obviating the need for granular freeing, where applicable.
Diffstat (limited to 'src')
-rw-r--r--src/Makefile.am2
-rw-r--r--src/container.h27
-rw-r--r--src/example.c35
-rw-r--r--src/list.h252
-rw-r--r--src/pad.c243
-rw-r--r--src/pad.h28
6 files changed, 587 insertions, 0 deletions
diff --git a/src/Makefile.am b/src/Makefile.am
new file mode 100644
index 0000000..e1ee752
--- /dev/null
+++ b/src/Makefile.am
@@ -0,0 +1,2 @@
+noinst_LIBRARIES = libpad.a
+libpad_a_SOURCES = container.h list.h pad.c pad.h
diff --git a/src/container.h b/src/container.h
new file mode 100644
index 0000000..c286074
--- /dev/null
+++ b/src/container.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2018 - Vito Caputo - <vcaputo@pengaru.com>
+ *
+ * This program is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 3 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _CONTAINER_H
+#define _CONTAINER_H
+
+#include <stddef.h>
+
+#ifndef container_of
+#define container_of(_ptr, _type, _member) \
+ (_type *)((void *)(_ptr) - offsetof(_type, _member))
+#endif
+
+#endif
diff --git a/src/example.c b/src/example.c
new file mode 100644
index 0000000..12383fa
--- /dev/null
+++ b/src/example.c
@@ -0,0 +1,35 @@
+#include <assert.h>
+
+#include "pad.h"
+
+typedef struct foo_t {
+ int x, y, z;
+} foo_t;
+
+#define CHUNK_CNT 256
+
+int main(int argc, char *argv[])
+{
+ foo_t *f[10 * CHUNK_CNT];
+ pad_t *p;
+
+ p = pad_new(sizeof(foo_t) * 256);
+ assert(p);
+
+ for (int n = 0; n < 10; n++) {
+ for (int i = 0; i < 10 * CHUNK_CNT; i++)
+ assert(f[i] = pad_get(p, sizeof(foo_t)));
+
+ for (int i = 0; i < 10 * CHUNK_CNT; i++)
+ pad_put(f[i]);
+
+ for (int i = 0; i < 10 * CHUNK_CNT; i++)
+ assert(f[i] = pad_get(p, sizeof(foo_t)));
+
+ pad_reset(p);
+ }
+
+ pad_free(p);
+
+ return 0;
+}
diff --git a/src/list.h b/src/list.h
new file mode 100644
index 0000000..48bca36
--- /dev/null
+++ b/src/list.h
@@ -0,0 +1,252 @@
+#ifndef __LIST_H
+#define __LIST_H
+
+/* linux kernel linked list interface */
+
+/*
+ * Simple doubly linked list implementation.
+ *
+ * Some of the internal functions ("__xxx") are useful when
+ * manipulating whole lists rather than single entries, as
+ * sometimes we already know the next/prev entries and we can
+ * generate better code by using them directly rather than
+ * using the generic single-entry routines.
+ */
+
+typedef struct list_head {
+ struct list_head *next, *prev;
+} list_head_t;
+
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define LIST_HEAD(name) \
+ struct list_head name = LIST_HEAD_INIT(name)
+
+#define INIT_LIST_HEAD(ptr) do { \
+ (ptr)->next = (ptr); (ptr)->prev = (ptr); \
+} while (0)
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static inline void __list_add(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+ prev->next = new;
+}
+
+/**
+ * list_add - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ */
+static inline void list_add(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head, head->next);
+}
+
+/**
+ * list_add_tail - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it before
+ *
+ * Insert a new entry before the specified head.
+ * This is useful for implementing queues.
+ */
+static inline void list_add_tail(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head->prev, head);
+}
+
+/*
+ * Delete a list entry by making the prev/next entries
+ * point to each other.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static inline void __list_del(struct list_head *prev, struct list_head *next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+/**
+ * list_del - deletes entry from list.
+ * @entry: the element to delete from the list.
+ * Note: list_empty on entry does not return true after this, the entry is in an undefined state.
+ */
+static inline void list_del(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ entry->next = (void *) 0;
+ entry->prev = (void *) 0;
+}
+
+/**
+ * list_del_init - deletes entry from list and reinitialize it.
+ * @entry: the element to delete from the list.
+ */
+static inline void list_del_init(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ INIT_LIST_HEAD(entry);
+}
+
+/**
+ * list_move - delete from one list and add as another's head
+ * @list: the entry to move
+ * @head: the head that will precede our entry
+ */
+static inline void list_move(struct list_head *list, struct list_head *head)
+{
+ __list_del(list->prev, list->next);
+ list_add(list, head);
+}
+
+/**
+ * list_move_tail - delete from one list and add as another's tail
+ * @list: the entry to move
+ * @head: the head that will follow our entry
+ */
+static inline void list_move_tail(struct list_head *list,
+ struct list_head *head)
+{
+ __list_del(list->prev, list->next);
+ list_add_tail(list, head);
+}
+
+/**
+ * list_empty - tests whether a list is empty
+ * @head: the list to test.
+ */
+static inline int list_empty(struct list_head *head)
+{
+ return head->next == head;
+}
+
+static inline void __list_splice(struct list_head *list,
+ struct list_head *head)
+{
+ struct list_head *first = list->next;
+ struct list_head *last = list->prev;
+ struct list_head *at = head->next;
+
+ first->prev = head;
+ head->next = first;
+
+ last->next = at;
+ at->prev = last;
+}
+
+/**
+ * list_splice - join two lists
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ */
+static inline void list_splice(struct list_head *list, struct list_head *head)
+{
+ if (!list_empty(list))
+ __list_splice(list, head);
+}
+
+/**
+ * list_splice_init - join two lists and reinitialise the emptied list.
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ *
+ * The list at @list is reinitialised
+ */
+static inline void list_splice_init(struct list_head *list,
+ struct list_head *head)
+{
+ if (!list_empty(list)) {
+ __list_splice(list, head);
+ INIT_LIST_HEAD(list);
+ }
+}
+
+/**
+ * list_entry - get the struct for this entry
+ * @ptr: the &struct list_head pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_entry(ptr, type, member) \
+ ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
+
+/**
+ * list_for_each - iterate over a list
+ * @pos: the &struct list_head to use as a loop counter.
+ * @head: the head for your list.
+ */
+#define list_for_each(pos, head) \
+ for (pos = (head)->next; pos != (head); \
+ pos = pos->next)
+/**
+ * list_for_each_prev - iterate over a list backwards
+ * @pos: the &struct list_head to use as a loop counter.
+ * @head: the head for your list.
+ */
+#define list_for_each_prev(pos, head) \
+ for (pos = (head)->prev; pos != (head); \
+ pos = pos->prev)
+
+/**
+ * list_for_each_safe - iterate over a list safe against removal of list entry
+ * @pos: the &struct list_head to use as a loop counter.
+ * @n: another &struct list_head to use as temporary storage
+ * @head: the head for your list.
+ */
+#define list_for_each_safe(pos, n, head) \
+ for (pos = (head)->next, n = pos->next; pos != (head); \
+ pos = n, n = pos->next)
+
+/**
+ * list_for_each_entry - iterate over list of given type
+ * @pos: the type * to use as a loop counter.
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry(pos, head, member) \
+ for (pos = list_entry((head)->next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_entry(pos->member.next, typeof(*pos), member))
+
+/**
+ * list_for_each_entry_prev - iterate over list of given type backwards
+ * @pos: the type * to use as a loop counter.
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_prev(pos, head, member) \
+ for (pos = list_entry((head)->prev, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_entry(pos->member.prev, typeof(*pos), member))
+
+
+/**
+ * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+ * @pos: the type * to use as a loop counter.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_safe(pos, n, head, member) \
+ for (pos = list_entry((head)->next, typeof(*pos), member), \
+ n = list_entry(pos->member.next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = n, n = list_entry(n->member.next, typeof(*n), member))
+
+
+#endif
diff --git a/src/pad.c b/src/pad.c
new file mode 100644
index 0000000..e0bc56d
--- /dev/null
+++ b/src/pad.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2018 - Vito Caputo - <vcaputo@pengaru.com>
+ *
+ * This program is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 3 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+
+#include "pad.h"
+#include "container.h"
+#include "list.h"
+
+/* This is adapted from the particle system in my rototiller project:
+ * git://git.pengaru.com/rototiller
+ *
+ * This implements a very basic chunked allocator which prioritizes efficient
+ * allocation and freeing over low waste of memory. It mallocs a chunk at a
+ * time, doling out elements from the chunk sequentially as requested until the
+ * chunk cannot fulfill an allocation. At that point, the current chunk is
+ * retired, a new chunk is allocated, and the cycle repeats.
+ *
+ * When allocations are freed, the refcount for its chunk is decremented,
+ * leaving the chunk pinned with holes accumulating until the refcount reaches
+ * zero, at which point the chunk is made available for allocations again.
+ *
+ * This requires a pointer to the chunk be returned with every allocation. It
+ * may be possible to reduce the footprint of this by using a relative offset
+ * to the chunk start instead, but that would probably be more harmful to the
+ * alignment.
+ *
+ * Note that the allocations may vary in size, they just can't exceed the chunk
+ * size on a given pad instance. A TODO item is to add a pad variant with a
+ * fixed allocation size specified at pad creation, with the addition of a free
+ * list enabling hole-filling and a fast cache-friendly iterator for quickly
+ * visiting all the elements in pinned chunks.
+ */
+
+#define CHUNK_ALIGNMENT 8192 /* XXX: this may be unnecessary, callers should be able to ideally size their pads */
+#define ALLOC_ALIGNMENT 8 /* allocations within the chunk need to be aligned since their size affects subsequent allocation offsets */
+#define ALIGN(_size, _alignment) (((_size) + _alignment - 1) & ~(_alignment - 1))
+
+typedef struct chunk_t {
+ pad_t *pad; /* pad chunk belongs to */
+ list_head_t chunks; /* node on free/pinned list */
+ uint32_t n_refs; /* number of references (active allocations) to this chunk */
+ unsigned next_offset; /* next available offset for allocation */
+ uint8_t mem[]; /* usable memory from this chunk */
+} chunk_t;
+
+
+typedef struct alloc_t {
+ chunk_t *chunk; /* chunk this allocation came from */
+ uint8_t mem[]; /* usable memory from this allocation */
+} alloc_t;
+
+
+struct pad_t {
+ chunk_t *chunk; /* current chunk allocations come from */
+ unsigned chunk_size; /* size chunks are allocated in */
+ list_head_t free_chunks; /* list of completely free chunks */
+ list_head_t pinned_chunks; /* list of chunks pinned because they have an outstanding allocation */
+};
+
+
+/* Add a reference to a chunk. */
+static inline void chunk_ref(chunk_t *chunk)
+{
+ assert(chunk);
+ assert(chunk->pad);
+
+ chunk->n_refs++;
+
+ assert(chunk->n_refs != 0);
+}
+
+
+/* Remove reference from a chunk, move to free list if last reference. */
+static inline void chunk_unref(chunk_t *chunk)
+{
+ assert(chunk);
+ assert(chunk->pad);
+ assert(chunk->n_refs > 0);
+
+ if (chunk->n_refs == 1) {
+ list_move(&chunk->chunks, &chunk->pad->free_chunks);
+
+ return;
+ }
+
+ chunk->n_refs--;
+}
+
+
+/* Return allocated size of the chunk */
+static inline unsigned chunk_alloc_size(pad_t *pad)
+{
+ assert(pad);
+
+ return (sizeof(chunk_t) + pad->chunk_size);
+}
+
+
+/* Get a new working chunk */
+static chunk_t * chunk_get(pad_t *pad)
+{
+ chunk_t *chunk;
+
+ assert(pad);
+
+ if (!list_empty(&pad->free_chunks)) {
+ chunk = list_entry(pad->free_chunks.next, chunk_t, chunks);
+ list_del(&chunk->chunks);
+ } else {
+ /* No free chunks, must ask libc for memory */
+ chunk = malloc(chunk_alloc_size(pad));
+ if (!chunk)
+ return NULL;
+ }
+
+ /* Note a chunk is pinned from the moment it's created, and a reference
+ * is added to represent pad->chunk, even though no allocations
+ * occurred yet.
+ */
+ chunk->n_refs = 1;
+ chunk->next_offset = 0;
+ chunk->pad = pad;
+ list_add(&chunk->chunks, &pad->pinned_chunks);
+
+ return chunk;
+}
+
+
+/* Create a new pad. */
+pad_t * pad_new(unsigned chunk_size)
+{
+ pad_t *pad;
+
+ pad = calloc(1, sizeof(pad_t));
+ if (!pad)
+ return NULL;
+
+ INIT_LIST_HEAD(&pad->free_chunks);
+ INIT_LIST_HEAD(&pad->pinned_chunks);
+
+ /* XXX: pad->chunk_size does not include the size of the chunk_t container */
+ pad->chunk_size = ALIGN(chunk_size, CHUNK_ALIGNMENT);
+
+ return pad;
+}
+
+
+/* Reset a pad making all chunks free/available for reuse, deleting all
+ * existing allocations in one swoop without giving the memory back to
+ * libc.
+ */
+void pad_reset(pad_t *pad)
+{
+ assert(pad);
+
+ pad->chunk = NULL;
+ list_splice_init(&pad->pinned_chunks, &pad->free_chunks);
+}
+
+
+/* Free a pad and it's associated allocations. */
+void pad_free(pad_t *pad)
+{
+ chunk_t *chunk, *_chunk;
+
+ assert(pad);
+
+ pad_reset(pad);
+
+ list_for_each_entry_safe(chunk, _chunk, &pad->free_chunks, chunks)
+ free(chunk);
+
+ free(pad);
+}
+
+
+/* Get uninitialized memory from a pad. (malloc()).
+ * May return NULL if malloc() fails
+ */
+void * pad_get(pad_t *pad, unsigned size)
+{
+ alloc_t *alloc;
+
+ assert(pad);
+ assert(size <= pad->chunk_size);
+
+ size = ALIGN(sizeof(alloc_t) + size, ALLOC_ALIGNMENT);
+
+ if (!pad->chunk || size + pad->chunk->next_offset > pad->chunk_size) {
+ /* Retire this chunk, time for a new one */
+ if (pad->chunk)
+ chunk_unref(pad->chunk);
+
+ pad->chunk = chunk_get(pad);
+ }
+
+ if (!pad->chunk)
+ return NULL;
+
+ chunk_ref(pad->chunk);
+ alloc = (alloc_t *)&pad->chunk->mem[pad->chunk->next_offset];
+ pad->chunk->next_offset += size;
+ alloc->chunk = pad->chunk;
+
+ assert(pad->chunk->next_offset <= pad->chunk_size);
+
+ return alloc->mem;
+}
+
+
+/* Put memory taken from a pad back. (free()) */
+void pad_put(void *ptr)
+{
+ alloc_t *alloc = container_of(ptr, alloc_t, mem);
+
+ assert(ptr);
+
+ chunk_unref(alloc->chunk);
+}
+
+/* TODO: add a chunk iterator interface for cache-friendly iterating across
+ * chunk contents. This is really only viable for uniformly sized chunks,
+ * which the api doesn't currently enforce/differentiate. So that would
+ * probably be a special pad mode the iterator would only work with.
+ */
diff --git a/src/pad.h b/src/pad.h
new file mode 100644
index 0000000..2efb0f2
--- /dev/null
+++ b/src/pad.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2018 - Vito Caputo - <vcaputo@pengaru.com>
+ *
+ * This program is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 3 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _PAD_H
+#define _PAD_H
+
+typedef struct pad_t pad_t;
+
+pad_t * pad_new(unsigned chunk_size);
+void pad_reset(pad_t *pad);
+void pad_free(pad_t *pad);
+void * pad_get(pad_t *pad, unsigned size);
+void pad_put(void *mem);
+
+#endif
© All Rights Reserved