summaryrefslogtreecommitdiff
path: root/v2f.h
diff options
context:
space:
mode:
authorVito Caputo <vcaputo@pengaru.com>2019-05-06 22:17:49 -0700
committerVito Caputo <vcaputo@pengaru.com>2019-05-06 22:17:49 -0700
commit7de752ffcc3f357001207c80dcef2fa12608a806 (patch)
tree061b996d40db0a48a635a6cd231586b5f52d504a /v2f.h
parent4d2cdb780fc99befd4fb0542ef0756965b13af3e (diff)
v2f: mechanical rename and handle NULL res ptrs
The return by value variants of the vector-returning functions have been renamed to be prefixed with an _. Now the return by pointer variants lack any prefix as it's expected they'll be more commonly used in practice. Another change is the return by pointer variants now take the result pointer as the first argument, and a NULL result pointer is handled by dynamically allocating the result memory.
Diffstat (limited to 'v2f.h')
-rw-r--r--v2f.h116
1 files changed, 85 insertions, 31 deletions
diff --git a/v2f.h b/v2f.h
index 46731bd..9fd3b3f 100644
--- a/v2f.h
+++ b/v2f.h
@@ -14,81 +14,132 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+/*
+ * 2D vector operations header
+ *
+ * Variants prefixed with _ return a result vector struct by value.
+ *
+ * Variants missing the _ prefix for vector result operations return a pointer
+ * and must be either supplied the result memory as the first "res" argument
+ * which is then returned after being populated with the result's value, or
+ * NULL to allocate space for the result and that pointer is returned after being
+ * populated with its value. When supplying NULL result pointers the functions
+ * must allocate memory and thus may return NULL on OOM, so callers should
+ * check for NULL returns when supplying NULL for "res".
+ *
+ * Example:
+ * v2f_t foo, *foop;
+ *
+ * foo = _v2f_mult(&(v2f_t){1.f,1.f}, &(v2f_t){2.f,2.f});
+ *
+ * is equivalent to:
+ *
+ * v2f_mult(&foo, &(v2f_t){1.f,1.f}, &(v2f_t){2.f,2.f});
+ *
+ * or dynamically allocated:
+ *
+ * foop = v2f_mult(NULL, &(v2f_t){1.f,1.f}, &(v2f_t){2.f,2.f});
+ * free(foop);
+ *
+ * is equivalent to:
+ *
+ * foop = malloc(sizeof(v2f_t));
+ * v2f_mult(foop, &(v2f_t){1.f,1.f}, &(v2f_t){2.f,2.f});
+ * free(foop);
+ */
+
#ifndef _V2F_H
#define _V2F_H
+
#include <math.h>
+#include <stdlib.h>
+
typedef struct v2f_t {
float x, y;
} v2f_t;
-static inline v2f_t v2f_add(const v2f_t *a, const v2f_t *b)
+static inline v2f_t * _v2f_allocated(v2f_t **ptr)
+{
+ if (!*ptr)
+ *ptr = malloc(sizeof(v2f_t));
+
+ return *ptr;
+}
+
+
+static inline v2f_t _v2f_add(const v2f_t *a, const v2f_t *b)
{
return (v2f_t){a->x + b->x, a->y + b->y};
}
-static inline v2f_t * pv2f_add(const v2f_t *a, const v2f_t *b, v2f_t *res)
+static inline v2f_t * v2f_add(v2f_t *res, const v2f_t *a, const v2f_t *b)
{
- *res = v2f_add(a, b);
+ if (_v2f_allocated(&res))
+ *res = _v2f_add(a, b);
return res;
}
-static inline v2f_t v2f_sub(const v2f_t *a, const v2f_t *b)
+static inline v2f_t _v2f_sub(const v2f_t *a, const v2f_t *b)
{
return (v2f_t){a->x - b->x, a->y - b->y};
}
-static inline v2f_t * pv2f_sub(const v2f_t *a, const v2f_t *b, v2f_t *res)
+static inline v2f_t * v2f_sub(v2f_t *res, const v2f_t *a, const v2f_t *b)
{
- *res = v2f_sub(a, b);
+ if (_v2f_allocated(&res))
+ *res = _v2f_sub(a, b);
return res;
}
-static inline v2f_t v2f_mult(const v2f_t *a, const v2f_t *b)
+static inline v2f_t _v2f_mult(const v2f_t *a, const v2f_t *b)
{
return (v2f_t){a->x * b->x, a->y * b->y};
}
-static inline v2f_t * pv2f_mult(const v2f_t *a, const v2f_t *b, v2f_t *res)
+static inline v2f_t * v2f_mult(v2f_t *res, const v2f_t *a, const v2f_t *b)
{
- *res = v2f_mult(a, b);
+ if (_v2f_allocated(&res))
+ *res = _v2f_mult(a, b);
return res;
}
-static inline v2f_t v2f_mult_scalar(const v2f_t *v, float scalar)
+static inline v2f_t _v2f_mult_scalar(const v2f_t *v, float scalar)
{
return (v2f_t){ v->x * scalar, v->y * scalar };
}
-static inline v2f_t * pv2f_mult_scalar(const v2f_t *v, float scalar, v2f_t *res)
+static inline v2f_t * v2f_mult_scalar(v2f_t *res, const v2f_t *v, float scalar)
{
- *res = v2f_mult_scalar(v, scalar);
+ if (_v2f_allocated(&res))
+ *res = _v2f_mult_scalar(v, scalar);
return res;
}
-static inline v2f_t v2f_div_scalar(const v2f_t *v, float scalar)
+static inline v2f_t _v2f_div_scalar(const v2f_t *v, float scalar)
{
- return v2f_mult_scalar(v, 1.f / scalar);
+ return _v2f_mult_scalar(v, 1.f / scalar);
}
-static inline v2f_t * pv2f_div_scalar(const v2f_t *v, float scalar, v2f_t *res)
+static inline v2f_t * v2f_div_scalar(v2f_t *res, const v2f_t *v, float scalar)
{
- *res = v2f_div_scalar(v, scalar);
+ if (_v2f_allocated(&res))
+ *res = _v2f_div_scalar(v, scalar);
return res;
}
@@ -106,50 +157,53 @@ static inline float v2f_length(const v2f_t *v)
}
-static inline v2f_t v2f_normalize(const v2f_t *v)
+static inline v2f_t _v2f_normalize(const v2f_t *v)
{
- return v2f_mult_scalar(v, 1.0f / v2f_length(v));
+ return _v2f_mult_scalar(v, 1.0f / v2f_length(v));
}
-static inline v2f_t * pv2f_normalize(const v2f_t *v, v2f_t *res)
+static inline v2f_t * v2f_normalize(v2f_t *res, const v2f_t *v)
{
- *res = v2f_normalize(v);
+ if (_v2f_allocated(&res))
+ *res = _v2f_normalize(v);
return res;
}
-static inline v2f_t v2f_lerp(const v2f_t *a, const v2f_t *b, float t)
+static inline v2f_t _v2f_lerp(const v2f_t *a, const v2f_t *b, float t)
{
v2f_t lerp_a, lerp_b;
- lerp_a = v2f_mult_scalar(a, 1.0f - t);
- lerp_b = v2f_mult_scalar(b, t);
+ lerp_a = _v2f_mult_scalar(a, 1.0f - t);
+ lerp_b = _v2f_mult_scalar(b, t);
- return v2f_add(&lerp_a, &lerp_b);
+ return _v2f_add(&lerp_a, &lerp_b);
}
-static inline v2f_t * pv2f_lerp(const v2f_t *a, const v2f_t *b, float t, v2f_t *res)
+static inline v2f_t * v2f_lerp(v2f_t *res, const v2f_t *a, const v2f_t *b, float t)
{
- *res = v2f_lerp(a, b, t);
+ if (_v2f_allocated(&res))
+ *res = _v2f_lerp(a, b, t);
return res;
}
-static inline v2f_t v2f_nlerp(const v2f_t *a, const v2f_t *b, float t)
+static inline v2f_t _v2f_nlerp(const v2f_t *a, const v2f_t *b, float t)
{
- v2f_t lerp = v2f_lerp(a, b, t);
+ v2f_t lerp = _v2f_lerp(a, b, t);
- return v2f_normalize(&lerp);
+ return _v2f_normalize(&lerp);
}
-static inline v2f_t * pv2f_nlerp(const v2f_t *a, const v2f_t *b, float t, v2f_t *res)
+static inline v2f_t * v2f_nlerp(v2f_t *res, const v2f_t *a, const v2f_t *b, float t)
{
- *res = v2f_nlerp(a, b, t);
+ if (_v2f_allocated(&res))
+ *res = _v2f_nlerp(a, b, t);
return res;
}
© All Rights Reserved