diff options
author | Vito Caputo <vcaputo@pengaru.com> | 2020-11-25 17:57:35 -0800 |
---|---|---|
committer | Vito Caputo <vcaputo@pengaru.com> | 2020-11-25 17:57:35 -0800 |
commit | 7b15a68d12e2df7f45c4311d0281ff9a78693e81 (patch) | |
tree | 79a9e097888e3458366e5dbf68895036007878d1 /src/upstream/unaligned.h | |
parent | 9ab253b33805bb7416e458992ffed73e9883a52b (diff) |
upstream: import some hacked systemd headers
This brings journal-file data structures and byte swapping
helpers I may as well just reuse.
I've had to do some minor messing about to make things workable in
isolation out of the systemd tree without pulling in too much.
I've also added a new HashedObjectHeader type to encompass the
slightly larger common header components of FieldObject and
DataObject to facilitate a generic hash table iterator that can
operate on just loading HashedObjectHeader when nothing more
than the object size is needed for accounting. Basically the
HashedObjectHeader is ObjectHeader+hash+next_hash_offset.
Diffstat (limited to 'src/upstream/unaligned.h')
-rw-r--r-- | src/upstream/unaligned.h | 99 |
1 files changed, 99 insertions, 0 deletions
diff --git a/src/upstream/unaligned.h b/src/upstream/unaligned.h new file mode 100644 index 0000000..00c17f8 --- /dev/null +++ b/src/upstream/unaligned.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: LGPL-2.1+ */ +#pragma once + +#include <endian.h> +#include <stdint.h> + +/* BE */ + +static inline uint16_t unaligned_read_be16(const void *_u) { + const struct __attribute__((__packed__, __may_alias__)) { uint16_t x; } *u = _u; + + return be16toh(u->x); +} + +static inline uint32_t unaligned_read_be32(const void *_u) { + const struct __attribute__((__packed__, __may_alias__)) { uint32_t x; } *u = _u; + + return be32toh(u->x); +} + +static inline uint64_t unaligned_read_be64(const void *_u) { + const struct __attribute__((__packed__, __may_alias__)) { uint64_t x; } *u = _u; + + return be64toh(u->x); +} + +static inline void unaligned_write_be16(void *_u, uint16_t a) { + struct __attribute__((__packed__, __may_alias__)) { uint16_t x; } *u = _u; + + u->x = be16toh(a); +} + +static inline void unaligned_write_be32(void *_u, uint32_t a) { + struct __attribute__((__packed__, __may_alias__)) { uint32_t x; } *u = _u; + + u->x = be32toh(a); +} + +static inline void unaligned_write_be64(void *_u, uint64_t a) { + struct __attribute__((__packed__, __may_alias__)) { uint64_t x; } *u = _u; + + u->x = be64toh(a); +} + +/* LE */ + +static inline uint16_t unaligned_read_le16(const void *_u) { + const struct __attribute__((__packed__, __may_alias__)) { uint16_t x; } *u = _u; + + return le16toh(u->x); +} + +static inline uint32_t unaligned_read_le32(const void *_u) { + const struct __attribute__((__packed__, __may_alias__)) { uint32_t x; } *u = _u; + + return le32toh(u->x); +} + +static inline uint64_t unaligned_read_le64(const void *_u) { + const struct __attribute__((__packed__, __may_alias__)) { uint64_t x; } *u = _u; + + return le64toh(u->x); +} + +static inline void unaligned_write_le16(void *_u, uint16_t a) { + struct __attribute__((__packed__, __may_alias__)) { uint16_t x; } *u = _u; + + u->x = le16toh(a); +} + +static inline void unaligned_write_le32(void *_u, uint32_t a) { + struct __attribute__((__packed__, __may_alias__)) { uint32_t x; } *u = _u; + + u->x = le32toh(a); +} + +static inline void unaligned_write_le64(void *_u, uint64_t a) { + struct __attribute__((__packed__, __may_alias__)) { uint64_t x; } *u = _u; + + u->x = le64toh(a); +} + +#if __BYTE_ORDER == __BIG_ENDIAN +#define unaligned_read_ne16 unaligned_read_be16 +#define unaligned_read_ne32 unaligned_read_be32 +#define unaligned_read_ne64 unaligned_read_be64 + +#define unaligned_write_ne16 unaligned_write_be16 +#define unaligned_write_ne32 unaligned_write_be32 +#define unaligned_write_ne64 unaligned_write_be64 +#else +#define unaligned_read_ne16 unaligned_read_le16 +#define unaligned_read_ne32 unaligned_read_le32 +#define unaligned_read_ne64 unaligned_read_le64 + +#define unaligned_write_ne16 unaligned_write_le16 +#define unaligned_write_ne32 unaligned_write_le32 +#define unaligned_write_ne64 unaligned_write_le64 +#endif |