summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorVito Caputo <vcaputo@pengaru.com>2023-08-13 23:12:15 -0700
committerVito Caputo <vcaputo@pengaru.com>2023-08-13 23:12:15 -0700
commita41d4e72dc915a8cb79aa8f1f2177def06d75844 (patch)
tree5d538547de4d83dea9f40edbddb2c0aea926aefe /src
parent0cd861d195cf2f021f9e5c5c49b6f82dd9dfa993 (diff)
til_fb: track all pages and free them in til_fb_free()
Pages that have been handed out from the inactive list didn't live on any lists, not until they were submitted. This commit keeps all pages on a separate list to make cleanup of outstanding pages in til_fb_free() trivial, which has also been introduced here. Just tidying up the shutdown to get rid of a bunch of harmless leaks, in an effort to make ASAN more useful in detecting real leaks on graceful exits.
Diffstat (limited to 'src')
-rw-r--r--src/til_fb.c44
1 files changed, 42 insertions, 2 deletions
diff --git a/src/til_fb.c b/src/til_fb.c
index 5de7035..10a5931 100644
--- a/src/til_fb.c
+++ b/src/til_fb.c
@@ -114,6 +114,7 @@ struct _til_fb_page_t {
til_fb_t *fb;
void *fb_ops_page;
+ _til_fb_page_t *all_next, *all_previous;
_til_fb_page_t *next, *previous;
_til_fb_fragment_t fragment;
};
@@ -138,6 +139,9 @@ typedef struct til_fb_t {
_til_fb_page_t *inactive_pages_head; /* finished pages available for (re)use */
_til_fb_page_t *inactive_pages_tail;
+ _til_fb_page_t *all_pages_head; /* all pages allocated */
+ _til_fb_page_t *all_pages_tail;
+
unsigned put_pages_count;
} til_fb_t;
@@ -226,6 +230,9 @@ static int til_fb_acquire(til_fb_t *fb, _til_fb_page_t *page)
/* release the fb, making the visible page inactive */
static void til_fb_release(til_fb_t *fb)
{
+ assert(fb);
+ assert(fb->active_page);
+
fb->ops->release(fb, fb->ops_context);
/* XXX: this is getting silly, either add a doubly linked list header or
@@ -245,6 +252,16 @@ static void til_fb_release(til_fb_t *fb)
static void _til_fb_page_free(til_fb_t *fb, _til_fb_page_t *page)
{
+ if (page->all_next)
+ page->all_next->all_previous = page->all_previous;
+ else
+ fb->all_pages_tail = page->all_previous;
+
+ if (page->all_previous)
+ page->all_previous->all_next = page->all_next;
+ else
+ fb->all_pages_head = page->all_next;
+
fb->ops->page_free(fb, fb->ops_context, page->fb_ops_page);
free(page);
@@ -291,7 +308,7 @@ static void _til_fb_fragment_memcpy_buf(til_fb_fragment_t *dest, til_fb_fragment
}
-/* snapshot the contents of fragment */
+/* snapshot the contents of whole-page fragment */
static til_fb_fragment_t * _til_fb_page_snapshot(til_fb_fragment_t **fragment_ptr, int preserve_original)
{
_til_fb_page_t *page;
@@ -299,6 +316,15 @@ static til_fb_fragment_t * _til_fb_page_snapshot(til_fb_fragment_t **fragment_pt
assert(fragment_ptr && *fragment_ptr);
+ /* XXX: note that nothing serializes this _til_fb_page_alloc(), so if anything actually
+ * tried to do whole-page snapshotting in parallel things would likely explode.
+ * But as of now, all parallel snapshots of fragments occur on subfragments, not on the
+ * top-level page, so they don't enter this page allocation code path.
+ *
+ * If things started doing threaded page allocations/snapshots, it'd break some assumptions
+ * all the way down to the $foo_fb backends where they maintain spare pages lists without
+ * any locking. XXX
+ */
page = container_of(*fragment_ptr, _til_fb_page_t, fragment);
new_page = _til_fb_page_alloc(page->fb);
*fragment_ptr = &new_page->fragment.public;
@@ -326,6 +352,13 @@ static _til_fb_page_t * _til_fb_page_alloc(til_fb_t *fb)
page->fragment.ops.reclaim = _til_fb_page_reclaim;
page->fragment.public.ops = &page->fragment.ops;
+ page->all_next = fb->all_pages_head;
+ fb->all_pages_head = page;
+ if (page->all_next)
+ page->all_next->all_previous = page;
+ else
+ fb->all_pages_tail = page;
+
return page;
}
@@ -465,10 +498,17 @@ void til_fb_get_put_pages_count(til_fb_t *fb, unsigned *count)
til_fb_t * til_fb_free(til_fb_t *fb)
{
if (fb) {
+ int count = 0;
+
if (fb->active_page)
til_fb_release(fb);
- /* TODO: free all the pages */
+ while (fb->all_pages_head) {
+ _til_fb_page_free(fb, fb->all_pages_head);
+ count++;
+ }
+
+ assert(count == fb->n_pages);
if (fb->ops->shutdown && fb->ops_context)
fb->ops->shutdown(fb, fb->ops_context);
© All Rights Reserved