summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVito Caputo <vcaputo@pengaru.com>2021-08-24 17:15:25 -0700
committerVito Caputo <vcaputo@pengaru.com>2021-08-24 21:30:32 -0700
commitb4a6a215f5a0a044b56081ee9e4632e40a6ed4d4 (patch)
tree9c1108e72abfc30f38795bd99d42378bf2601e4d
parent5dc0dff11ab1ff9ec0fc1edf22458942462fe5e3 (diff)
verify-hashed-object: threaded offload via iou_async()
When a hashed object > 16KiB is encountered, give it to a worker thread via the newly added iou_async(). The idea here is to not hold up the serialized iou_run() machinery while grinding on a large object. 16KiB was pulled out of thin air, I haven't done any profiling to tune this threshold... just slapped this all together last night. Will have to get some journals with larger objects for testing, maybe some coredump.conf::Storage=journal situations, and do some tuning and timing vs. journalctl --verify.
-rw-r--r--src/verify-hashed-objects.c28
1 files changed, 25 insertions, 3 deletions
diff --git a/src/verify-hashed-objects.c b/src/verify-hashed-objects.c
index 5e2f404..a75e082 100644
--- a/src/verify-hashed-objects.c
+++ b/src/verify-hashed-objects.c
@@ -116,7 +116,7 @@ static int decompress(int compression, void *src, uint64_t src_size, void **dest
}
-THUNK_DEFINE_STATIC(per_hashed_object, journal_t *, journal, Header *, header, Object **, iter_object, void **, decompressed, thunk_t *, closure)
+THUNK_DEFINE_STATIC(verify_hashed_object, journal_t *, journal, Header *, header, Object **, iter_object, void **, decompressed)
{
int compression;
uint64_t payload_size, h;
@@ -171,7 +171,29 @@ THUNK_DEFINE_STATIC(per_hashed_object, journal_t *, journal, Header *, header, O
return -EBADMSG;
}
- return thunk_end(thunk_dispatch(closure));
+ return 0;
+}
+
+
+THUNK_DEFINE_STATIC(per_hashed_object, iou_t *, iou, journal_t *, journal, Header *, header, Object **, iter_object, void **, decompressed, thunk_t *, closure)
+{
+ assert(iter_object && *iter_object);
+
+ /* smallish objects verify synchronously here */
+ if ((*iter_object)->object.size <= 16 * 1024) {
+ int r;
+
+ r = verify_hashed_object(journal, header, iter_object, decompressed);
+ if (r < 0)
+ return r;
+
+ return thunk_end(thunk_dispatch(closure));
+ }
+
+ /* handoff larger objects to an async worker thread, with the supplied closure for continuation @ completion */
+ return thunk_end(iou_async(iou, (int(*)(void *))thunk_dispatch, THUNK(
+ verify_hashed_object(journal, header, iter_object, decompressed)),
+ (int(*)(void *))thunk_dispatch, closure));
}
@@ -201,7 +223,7 @@ THUNK_DEFINE_STATIC(per_object, thunk_t *, self, iou_t *, iou, journal_t **, jou
}
return thunk_mid(journal_get_object(iou, journal, iter_offset, &iter_object_header->size, iter_object, THUNK(
- per_hashed_object(*journal, header, iter_object, decompressed, THUNK(
+ per_hashed_object(iou, *journal, header, iter_object, decompressed, THUNK(
journal_iter_next_object(iou, journal, header, iter_offset, iter_object_header, self))))));
}
© All Rights Reserved