diff options
author | Vito Caputo <vcaputo@pengaru.com> | 2021-02-18 03:17:50 -0800 |
---|---|---|
committer | Vito Caputo <vcaputo@pengaru.com> | 2021-02-18 03:36:51 -0800 |
commit | f2d13f0b8cee3c68fd63e060facbf62c16e24ba2 (patch) | |
tree | 71166ea429fb18b012afea00259d0f8257dd5df3 /src | |
parent | 6661cb3d0cbbc170b385d1ff66f525aac899df6a (diff) |
fb: introduce fb_rebuild()
When an fb has been resized, all the pages can be forced to go
through a backend free->alloc cycle by calling this function.
This process gets queued and realized as the pages pass through
the inactive state.
This does mean that whatever pages are already queued as ready
still get displayed, so it's not exactly without
racing/flickering at the fringes on window growth.
It's possible to do something with the pages in the ready queue
to combat this, but for now I'm just leaving them be to keep it
simple. The ready pages could be stretched or something...
Diffstat (limited to 'src')
-rw-r--r-- | src/fb.c | 34 | ||||
-rw-r--r-- | src/fb.h | 1 |
2 files changed, 35 insertions, 0 deletions
@@ -67,6 +67,9 @@ typedef struct fb_t { void *ops_context; int n_pages; + pthread_mutex_t rebuild_mutex; + int rebuild_pages; /* counter of pages needing a rebuild */ + _fb_page_t *active_page; /* page currently displayed */ pthread_mutex_t ready_mutex; @@ -123,6 +126,20 @@ int fb_flip(fb_t *fb) fb->inactive_pages_head->next->previous = fb->inactive_pages_head; else fb->inactive_pages_tail = fb->inactive_pages_head; + + /* before setting the renderer loose, check if there's more page rebuilding needed, + * and if there is do as much as possible here in the inactive set. Note it's important + * that the renderer take pages from the tail, and we always replenish inactive at the + * head, as well as rebuild pages from the head. + */ + pthread_mutex_lock(&fb->rebuild_mutex); + for (_fb_page_t *p = fb->inactive_pages_head; p && fb->rebuild_pages > 0; p = p->next) { + fb->ops->page_free(fb, fb->ops_context, p->ops_page); + p->ops_page = fb->ops->page_alloc(fb, fb->ops_context, &p->public_page); + fb->rebuild_pages--; + } + pthread_mutex_unlock(&fb->rebuild_mutex); + pthread_cond_signal(&fb->inactive_cond); pthread_mutex_unlock(&fb->inactive_mutex); @@ -325,6 +342,7 @@ int fb_new(const fb_ops_t *ops, settings_t *settings, int n_pages, fb_t **res_fb pthread_cond_init(&fb->ready_cond, NULL); pthread_mutex_init(&fb->inactive_mutex, NULL); pthread_cond_init(&fb->inactive_cond, NULL); + pthread_mutex_init(&fb->rebuild_mutex, NULL); page = _fb_page_get(fb); if (!page) { @@ -347,6 +365,22 @@ fail: } +/* This informs the fb to reconstruct its pages as they become invalid, + * giving the backend an opportunity to reconfigure them before they get + * rendered to again. It's intended to be used in response to window + * resizes. + */ +void fb_rebuild(fb_t *fb) +{ + assert(fb); + + /* TODO: this could easily be an atomic counter since we have no need for waiting */ + pthread_mutex_lock(&fb->rebuild_mutex); + fb->rebuild_pages = fb->n_pages; + pthread_mutex_unlock(&fb->rebuild_mutex); +} + + /* helpers for fragmenting incrementally */ int fb_fragment_slice_single(const fb_fragment_t *fragment, unsigned n_fragments, unsigned number, fb_fragment_t *res_fragment) { @@ -50,6 +50,7 @@ void fb_page_put(fb_t *fb, fb_page_t *page); fb_t * fb_free(fb_t *fb); void fb_get_put_pages_count(fb_t *fb, unsigned *count); int fb_new(const fb_ops_t *ops, settings_t *settings, int n_pages, fb_t **res_fb); +void fb_rebuild(fb_t *fb); void * fb_context(fb_t *fb); int fb_flip(fb_t *fb); void fb_fragment_divide(fb_fragment_t *fragment, unsigned n_fragments, fb_fragment_t fragments[]); |