summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVito Caputo <vcaputo@pengaru.com>2019-05-06 18:33:51 -0700
committerVito Caputo <vcaputo@pengaru.com>2019-05-06 19:06:57 -0700
commitf4cc590a4896b0a1159f13f3c367d90b7b28ade2 (patch)
tree704897de5812718e02364bc6e4ffcce3e77ea7cd
parent8fe73d31e71f2648661deceb46bb6456d3cb46f1 (diff)
v3f: mechanical rename and handle NULL res ptrs
The return by value variants of the vector-returning functions have been renamed to be prefixed with an _. Now the return by pointer variants lack any prefix as it's expected they'll be more commonly used in practice. Another change is the return by pointer variants now take the result pointer as the first argument, and a NULL result pointer is handled by dynamically allocating the result memory.
-rw-r--r--v3f.h117
1 files changed, 86 insertions, 31 deletions
diff --git a/v3f.h b/v3f.h
index 0a7bc5b..7996364 100644
--- a/v3f.h
+++ b/v3f.h
@@ -14,81 +14,132 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+/*
+ * 3D vector operations header
+ *
+ * Variants prefixed with _ return a result vector struct by value.
+ *
+ * Variants missing the _ prefix for vector result operations return a pointer
+ * and must be either supplied the result memory as the first "res" argument
+ * which is then returned after being populated with the result's value, or
+ * NULL to allocate space for the result and that pointer is resturned after being
+ * populated with its value. When supplying NULL result pointers the functions
+ * must allocate memory and thus may return NULL on OOM, so callers should
+ * check for NULL returns when supplying NULL for res.
+ *
+ * Example:
+ * v3f_t foo, *foop;
+ *
+ * foo = _v3f_mult(&(v3f_t){1.f,1.f,1.f}, &(v3f_t){2.f,2.f,2.f});
+ *
+ * is equivalent to:
+ *
+ * v3f_mult(&foo, &(v3f_t){1.f,1.f,1.f}, &(v3f_t){2.f,2.f,2.f});
+ *
+ * or dynamically allocated:
+ *
+ * foop = v3f_mult(NULL, &(v3f_t){1.f,1.f,1.f}, &(v3f_t){2.f,2.f,2.f});
+ * free(foop);
+ *
+ * is equivalent to:
+ *
+ * foop = malloc(sizeof(v3f_t));
+ * v3f_mult(foop, &(v3f_t){1.f,1.f,1.f}, &(v3f_t){2.f,2.f,2.f});
+ * free(foop);
+ */
+
#ifndef _V3F_H
#define _V3F_H
+
#include <math.h>
+#include <stdlib.h>
+
typedef struct v3f_t {
float x, y, z;
} v3f_t;
-static inline v3f_t v3f_add(const v3f_t *a, const v3f_t *b)
+static inline v3f_t * _v3f_allocated(v3f_t **ptr)
+{
+ if (!*ptr)
+ *ptr = malloc(sizeof(v3f_t));
+
+ return *ptr;
+}
+
+
+static inline v3f_t _v3f_add(const v3f_t *a, const v3f_t *b)
{
return (v3f_t){a->x + b->x, a->y + b->y, a->z + b->z};
}
-static inline v3f_t * pv3f_add(const v3f_t *a, const v3f_t *b, v3f_t *res)
+static inline v3f_t * v3f_add(v3f_t *res, const v3f_t *a, const v3f_t *b)
{
- *res = v3f_add(a, b);
+ if (_v3f_allocated(&res))
+ *res = _v3f_add(a, b);
return res;
}
-static inline v3f_t v3f_sub(const v3f_t *a, const v3f_t *b)
+static inline v3f_t _v3f_sub(const v3f_t *a, const v3f_t *b)
{
return (v3f_t){a->x - b->x, a->y - b->y, a->z - b->z};
}
-static inline v3f_t * pv3f_sub(const v3f_t *a, const v3f_t *b, v3f_t *res)
+static inline v3f_t * v3f_sub(v3f_t *res, const v3f_t *a, const v3f_t *b)
{
- *res = v3f_sub(a, b);
+ if (_v3f_allocated(&res))
+ *res = _v3f_sub(a, b);
return res;
}
-static inline v3f_t v3f_mult(const v3f_t *a, const v3f_t *b)
+static inline v3f_t _v3f_mult(const v3f_t *a, const v3f_t *b)
{
return (v3f_t){a->x * b->x, a->y * b->y, a->z * b->z};
}
-static inline v3f_t * pv3f_mult(const v3f_t *a, const v3f_t *b, v3f_t *res)
+static inline v3f_t * v3f_mult(v3f_t *res, const v3f_t *a, const v3f_t *b)
{
- *res = v3f_mult(a, b);
+ if (_v3f_allocated(&res))
+ *res = _v3f_mult(a, b);
return res;
}
-static inline v3f_t v3f_mult_scalar(const v3f_t *v, float scalar)
+static inline v3f_t _v3f_mult_scalar(const v3f_t *v, float scalar)
{
return (v3f_t){v->x * scalar, v->y * scalar, v->z * scalar};
}
-static inline v3f_t * pv3f_mult_scalar(const v3f_t *v, float scalar, v3f_t *res)
+static inline v3f_t * v3f_mult_scalar(v3f_t *res, const v3f_t *v, float scalar)
{
- *res = v3f_mult_scalar(v, scalar);
+ if (_v3f_allocated(&res))
+ *res = _v3f_mult_scalar(v, scalar);
return res;
}
-static inline v3f_t v3f_div_scalar(const v3f_t *v, float scalar)
+static inline v3f_t _v3f_div_scalar(const v3f_t *v, float scalar)
{
- return v3f_mult_scalar(v, 1.f / scalar);
+ return _v3f_mult_scalar(v, 1.f / scalar);
}
-static inline v3f_t * pv3f_div_scalar(const v3f_t *v, float scalar, v3f_t *res)
+static inline v3f_t * v3f_div_scalar(v3f_t *res, const v3f_t *v, float scalar)
{
- *res = v3f_div_scalar(v, scalar);
+ if (_v3f_allocated(&res))
+ *res = _v3f_div_scalar(v, scalar);
return res;
}
@@ -106,52 +157,56 @@ static inline float v3f_length(const v3f_t *v)
}
-static inline v3f_t v3f_normalize(const v3f_t *v)
+static inline v3f_t _v3f_normalize(const v3f_t *v)
{
- return v3f_mult_scalar(v, 1.0f / v3f_length(v));
+ return _v3f_mult_scalar(v, 1.0f / v3f_length(v));
}
-static inline v3f_t * pv3f_normalize(const v3f_t *v, v3f_t *res)
+static inline v3f_t * v3f_normalize(v3f_t *res, const v3f_t *v)
{
- *res = v3f_normalize(v);
+ if (_v3f_allocated(&res))
+ *res = _v3f_normalize(v);
return res;
}
-static inline v3f_t v3f_lerp(const v3f_t *a, const v3f_t *b, float t)
+static inline v3f_t _v3f_lerp(const v3f_t *a, const v3f_t *b, float t)
{
v3f_t lerp_a, lerp_b;
- lerp_a = v3f_mult_scalar(a, 1.0f - t);
- lerp_b = v3f_mult_scalar(b, t);
+ lerp_a = _v3f_mult_scalar(a, 1.0f - t);
+ lerp_b = _v3f_mult_scalar(b, t);
- return v3f_add(&lerp_a, &lerp_b);
+ return _v3f_add(&lerp_a, &lerp_b);
}
-static inline v3f_t * pv3f_lerp(const v3f_t *a, const v3f_t *b, float t, v3f_t *res)
+static inline v3f_t * v3f_lerp(v3f_t *res, const v3f_t *a, const v3f_t *b, float t)
{
- *res = v3f_lerp(a, b, t);
+ if (_v3f_allocated(&res))
+ *res = _v3f_lerp(a, b, t);
return res;
}
-static inline v3f_t v3f_nlerp(const v3f_t *a, const v3f_t *b, float t)
+static inline v3f_t _v3f_nlerp(const v3f_t *a, const v3f_t *b, float t)
{
- v3f_t lerp = v3f_lerp(a, b, t);
+ v3f_t lerp = _v3f_lerp(a, b, t);
- return v3f_normalize(&lerp);
+ return _v3f_normalize(&lerp);
}
-static inline v3f_t * pv3f_nlerp(const v3f_t *a, const v3f_t *b, float t, v3f_t *res)
+static inline v3f_t * v3f_nlerp(v3f_t *res, const v3f_t *a, const v3f_t *b, float t)
{
- *res = v3f_nlerp(a, b, t);
+ if (_v3f_allocated(&res))
+ *res = _v3f_nlerp(a, b, t);
return res;
}
+
#endif
© All Rights Reserved