diff --git a/test/include/common.h b/test/include/common.h index d3d03a9..ac7059a 100644 --- a/test/include/common.h +++ b/test/include/common.h @@ -10,6 +10,7 @@ #include #include +#include #include #include @@ -102,13 +103,13 @@ typedef struct test_entry_t { } while(0); #if defined(_WIN32) -# define drand48() ((float)(rand() / (RAND_MAX + 1.0))) -# define OK_TEXT "ok:" -# define FAIL_TEXT "fail:" +# define drand48() ((float)(rand() / (RAND_MAX + 1.0))) +# define OK_TEXT "ok:" +# define FAIL_TEXT "fail:" # define FINAL_TEXT "^_^" #else -# define OK_TEXT "✔︎" -# define FAIL_TEXT "𐄂" +# define OK_TEXT "✔︎" +# define FAIL_TEXT "𐄂" # define FINAL_TEXT "🎉" #endif diff --git a/test/src/test_vec3.c b/test/src/test_vec3.c index 5db3b32..318d306 100644 --- a/test/src/test_vec3.c +++ b/test/src/test_vec3.c @@ -5,110 +5,22 @@ * Full license can be found in the LICENSE file */ -#include "test_common.h" +/* test inline mat3 */ -TEST_IMPL(vec3) { - mat3 rot1m3; - mat4 rot1; - vec3 v, v1, v2; - vec3s vs1, vs2, vs3, vs4; +#define GLM_PREFIX glm_ +#define GLM(X) (glm_ ## X) - /* test zero */ - glm_vec3_zero(v); - ASSERTIFY(test_assert_vec3_eq(GLM_VEC3_ZERO, v)) +#include "test_vec3.h" - /* test one */ - glm_vec3_one(v); - ASSERTIFY(test_assert_vec3_eq(GLM_VEC3_ONE, v)) +#undef GLM +#undef GLM_PREFIX - /* adds, subs, div, divs, mul */ - glm_vec3_add(v, GLM_VEC3_ONE, v); - ASSERT(glmc_vec3_eq_eps(v, 2)) +/* test pre-compiled mat3 */ - glm_vec3_adds(v, 10, v); - ASSERT(glmc_vec3_eq_eps(v, 12)) +#define GLM_PREFIX glmc_ +#define GLM(X) (glmc_ ## X) - glm_vec3_sub(v, GLM_VEC3_ONE, v); - ASSERT(glmc_vec3_eq_eps(v, 11)) +#include "test_vec3.h" - glm_vec3_subs(v, 1, v); - ASSERT(glmc_vec3_eq_eps(v, 10)) - - glm_vec3_broadcast(2, v1); - glm_vec3_div(v, v1, v); - ASSERT(glmc_vec3_eq_eps(v, 5)) - - glm_vec3_divs(v, 0.5, v); - ASSERT(glmc_vec3_eq_eps(v, 10)) - - glm_vec3_mul(v, v1, v); - ASSERT(glmc_vec3_eq_eps(v, 20)) - - glm_vec3_scale(v, 0.5, v); - ASSERT(glmc_vec3_eq_eps(v, 10)) - - glm_vec3_normalize_to(v, v1); - glm_vec3_scale(v1, 0.8f, v1); - glm_vec3_scale_as(v, 0.8f, v); - ASSERTIFY(test_assert_vec3_eq(v1, v)) - - /* addadd, subadd, muladd */ - glm_vec3_one(v); - - glm_vec3_addadd(GLM_VEC3_ONE, GLM_VEC3_ONE, v); - ASSERT(glmc_vec3_eq_eps(v, 3)) - - glm_vec3_subadd(GLM_VEC3_ONE, GLM_VEC3_ZERO, v); - ASSERT(glmc_vec3_eq_eps(v, 4)) - - glm_vec3_broadcast(2, v1); - glm_vec3_broadcast(3, v2); - glm_vec3_muladd(v1, v2, v); - ASSERT(glmc_vec3_eq_eps(v, 10)) - - /* rotate */ - glm_vec3_copy(GLM_YUP, v); - glm_rotate_make(rot1, glm_rad(90), GLM_XUP); - glm_vec3_rotate_m4(rot1, v, v1); - glm_mat4_pick3(rot1, rot1m3); - glm_vec3_rotate_m3(rot1m3, v, v2); - - ASSERTIFY(test_assert_vec3_eq(v1, v2)) - ASSERTIFY(test_assert_vec3_eq(v1, GLM_ZUP)) - - /* structs */ - vs1 = test_rand_vec3s(); - vs2 = test_rand_vec3s(); - - vs3 = glms_vec3_add(vs1, vs2); - vs4 = glms_vec3_maxv(vs1, vs3); - ASSERTIFY(test_assert_vec3s_eq(vs3, vs4)) - - /* swizzle */ - - /* ZYX */ - v1[0] = 1; - v1[1] = 2; - v1[2] = 3; - glm_vec3_swizzle(v1, GLM_ZYX, v1); - ASSERTIFY(test_assert_vec3_eq(v1, (vec3){3, 2, 1})) - - glm_vec3_swizzle(v1, GLM_XXX, v1); - ASSERTIFY(test_assert_vec3_eq(v1, (vec3){3, 3, 3})) - - v1[0] = 1; - v1[1] = 2; - v1[2] = 3; - - glm_vec3_swizzle(v1, GLM_YYY, v1); - ASSERTIFY(test_assert_vec3_eq(v1, (vec3){2, 2, 2})) - - v1[0] = 1; - v1[1] = 2; - v1[2] = 3; - - glm_vec3_swizzle(v1, GLM_ZZZ, v1); - ASSERTIFY(test_assert_vec3_eq(v1, (vec3){3, 3, 3})) - - TEST_SUCCESS -} +#undef GLM +#undef GLM_PREFIX diff --git a/test/src/test_vec3.h b/test/src/test_vec3.h new file mode 100644 index 0000000..ff60497 --- /dev/null +++ b/test/src/test_vec3.h @@ -0,0 +1,581 @@ +/* + * Copyright (c), Recep Aslantas. + * + * MIT License (MIT), http://opensource.org/licenses/MIT + * Full license can be found in the LICENSE file + */ + +#include "test_common.h" + +#define TEST_GLM_SHUFFLE3(z, y, x) (((z) << 4) | ((y) << 2) | (x)) + +#ifndef CGLM_TEST_VEC3_ONCE +#define CGLM_TEST_VEC3_ONCE + +/* Macros */ + +TEST_IMPL(MACRO_GLM_VEC3_ONE_INIT) { + vec3 v = GLM_VEC3_ONE_INIT; + + ASSERT(glm_eq(v[0], 1.0f)) + ASSERT(glm_eq(v[1], 1.0f)) + ASSERT(glm_eq(v[2], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_VEC3_ZERO_INIT) { + vec3 v = GLM_VEC3_ZERO_INIT; + + ASSERT(glm_eq(v[0], 0.0f)) + ASSERT(glm_eq(v[1], 0.0f)) + ASSERT(glm_eq(v[2], 0.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_VEC3_ONE) { + vec3 v = GLM_VEC3_ONE; + + ASSERT(glm_eq(v[0], 1.0f)) + ASSERT(glm_eq(v[1], 1.0f)) + ASSERT(glm_eq(v[2], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_VEC3_ZERO) { + vec3 v = GLM_VEC3_ZERO; + + ASSERT(glm_eq(v[0], 0.0f)) + ASSERT(glm_eq(v[1], 0.0f)) + ASSERT(glm_eq(v[2], 0.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_YUP) { + vec3 v = GLM_YUP; + + ASSERT(glm_eq(v[0], 0.0f)) + ASSERT(glm_eq(v[1], 1.0f)) + ASSERT(glm_eq(v[2], 0.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_ZUP) { + vec3 v = GLM_ZUP; + + ASSERT(glm_eq(v[0], 0.0f)) + ASSERT(glm_eq(v[1], 0.0f)) + ASSERT(glm_eq(v[2], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_XUP) { + vec3 v = GLM_XUP; + + ASSERT(glm_eq(v[0], 1.0f)) + ASSERT(glm_eq(v[1], 0.0f)) + ASSERT(glm_eq(v[2], 0.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_SHUFFLE3) { + ASSERT(TEST_GLM_SHUFFLE3(1, 0, 0) == GLM_SHUFFLE3(1, 0, 0)) + ASSERT(TEST_GLM_SHUFFLE3(0, 1, 0) == GLM_SHUFFLE3(0, 1, 0)) + ASSERT(TEST_GLM_SHUFFLE3(0, 0, 1) == GLM_SHUFFLE3(0, 0, 1)) + ASSERT(TEST_GLM_SHUFFLE3(1, 0, 0) == GLM_SHUFFLE3(1, 0, 0)) + ASSERT(TEST_GLM_SHUFFLE3(1, 0, 1) == GLM_SHUFFLE3(1, 0, 1)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_XXX) { + ASSERT(TEST_GLM_SHUFFLE3(0, 0, 0) == GLM_XXX) + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_YYY) { + ASSERT(TEST_GLM_SHUFFLE3(1, 1, 1) == GLM_YYY) + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_ZZZ) { + ASSERT(TEST_GLM_SHUFFLE3(2, 2, 2) == GLM_ZZZ) + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_ZYX) { + ASSERT(TEST_GLM_SHUFFLE3(0, 1, 2) == GLM_ZYX) + TEST_SUCCESS +} + +/* Deprecated */ + +TEST_IMPL(MACRO_glm_vec3_dup) { + vec3 v1 = {13.0f, 12.0f, 11.0f}, v2; + + glm_vec3_dup(v1, v2); + + ASSERTIFY(test_assert_vec3_eq(v1, v2)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_glm_vec3_flipsign) { + vec3 v1 = {13.0f, -12.0f, 11.0f}, + v2 = {-13.0f, 12.0f, -11.0f}; + + glm_vec3_flipsign(v1); + + ASSERTIFY(test_assert_vec3_eq(v1, v2)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_glm_vec3_flipsign_to) { + vec3 v1 = {13.0f, -12.0f, 11.0f}, + v2 = {-13.0f, 12.0f, -11.0f}, + v3; + + glm_vec3_flipsign_to(v1, v3); + + ASSERTIFY(test_assert_vec3_eq(v2, v3)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_glm_vec3_inv) { + vec3 v1 = {13.0f, -12.0f, 11.0f}, + v2 = {-13.0f, 12.0f, -11.0f}; + + glm_vec3_inv(v1); + + ASSERTIFY(test_assert_vec3_eq(v1, v2)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_glm_vec3_inv_to) { + vec3 v1 = {13.0f, -12.0f, 11.0f}, + v2 = {-13.0f, 12.0f, -11.0f}, + v3; + + glm_vec3_inv_to(v1, v3); + + ASSERTIFY(test_assert_vec3_eq(v2, v3)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_glm_vec3_mulv) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f}, + v3; + + glm_vec3_mulv(v1, v2, v3); + + ASSERT(glm_eq(v1[0] * v2[0], v3[0])) + ASSERT(glm_eq(v1[1] * v2[1], v3[1])) + ASSERT(glm_eq(v1[2] * v2[2], v3[2])) + + TEST_SUCCESS +} + +#endif /* CGLM_TEST_VEC3_ONCE */ + +/* --- */ + +TEST_IMPL(GLM_PREFIX, vec3) { + vec4 v4 = {10.0f, 9.0f, 8.0f, 7.0f}; + vec3 v3; + + GLM(vec3)(v4, v3); + + ASSERT(glm_eq(v3[0], v4[0])) + ASSERT(glm_eq(v3[1], v4[1])) + ASSERT(glm_eq(v3[2], v4[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_copy) { + vec3 v1 = {10.0f, 9.0f, 8.0f}; + vec3 v2 = {1.0f, 2.0f, 3.0f}; + + GLM(vec3_copy)(v1, v2); + + ASSERTIFY(test_assert_vec3_eq(v1, v2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_zero) { + vec3 v1 = {10.0f, 9.0f, 8.0f}; + vec3 v2 = {1.0f, 2.0f, 3.0f}; + + GLM(vec3_zero)(v1); + GLM(vec3_zero)(v2); + + ASSERTIFY(test_assert_vec3_eq(v1, GLM_VEC3_ZERO)) + ASSERTIFY(test_assert_vec3_eq(v1, GLM_VEC3_ZERO)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_one) { + vec3 v1 = {10.0f, 9.0f, 8.0f}; + vec3 v2 = {1.0f, 2.0f, 3.0f}; + + GLM(vec3_one)(v1); + GLM(vec3_one)(v2); + + ASSERTIFY(test_assert_vec3_eq(v1, GLM_VEC3_ONE)) + ASSERTIFY(test_assert_vec3_eq(v1, GLM_VEC3_ONE)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_dot) { + vec3 a = {10.0f, 9.0f, 8.0f}; + vec3 b = {1.0f, 2.0f, 3.0f}; + float dot1, dot2; + + dot1 = GLM(vec3_dot)(a, b); + dot2 = a[0] * b[0] + a[1] * b[1] + a[2] * b[2]; + + ASSERT(glm_eq(dot1, dot2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_norm2) { + vec3 a = {10.0f, 9.0f, 8.0f}; + float n1, n2; + + n1 = GLM(vec3_norm2)(a); + n2 = a[0] * a[0] + a[1] * a[1] + a[2] * a[2]; + + ASSERT(glm_eq(n1, n2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_norm) { + vec3 a = {10.0f, 9.0f, 8.0f}; + float n1, n2; + + n1 = GLM(vec3_norm)(a); + n2 = sqrtf(a[0] * a[0] + a[1] * a[1] + a[2] * a[2]); + + ASSERT(glm_eq(n1, n2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_norm_one) { + vec3 a = {-10.0f, 9.0f, -8.0f}; + float n1, n2; + + n1 = GLM(vec3_norm_one)(a); + n2 = fabsf(a[0]) + fabsf(a[1]) + fabsf(a[2]); + + ASSERT(glm_eq(n1, n2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_norm_inf) { + vec3 a = {-10.0f, 9.0f, -8.0f}; + float n1, n2; + + n1 = GLM(vec3_norm_inf)(a); + n2 = fabsf(a[0]); + + if (fabsf(a[0]) < fabsf(a[1])) + n2 = fabsf(a[1]); + + if (fabsf(a[1]) < fabsf(a[2])) + n2 = fabsf(a[2]); + + ASSERT(glm_eq(n1, n2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_add) { + vec4 a = {-10.0f, 9.0f, -8.0f}; + vec4 b = {12.0f, 19.0f, -18.0f}; + vec4 c, d; + + c[0] = a[0] + b[0]; + c[1] = a[1] + b[1]; + c[2] = a[2] + b[2]; + + GLM(vec3_add)(a, b, d); + + ASSERTIFY(test_assert_vec3_eq(c, d)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_adds) { + vec4 a = {-10.0f, 9.0f, -8.0f}; + vec4 c, d; + float s = 7.0f; + + c[0] = a[0] + s; + c[1] = a[1] + s; + c[2] = a[2] + s; + + GLM(vec3_adds)(a, s, d); + + ASSERTIFY(test_assert_vec3_eq(c, d)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_sub) { + vec4 a = {-10.0f, 9.0f, -8.0f}; + vec4 b = {12.0f, 19.0f, -18.0f}; + vec4 c, d; + + c[0] = a[0] - b[0]; + c[1] = a[1] - b[1]; + c[2] = a[2] - b[2]; + + GLM(vec3_sub)(a, b, d); + + ASSERTIFY(test_assert_vec3_eq(c, d)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_subs) { + vec4 a = {-10.0f, 9.0f, -8.0f}; + vec4 c, d; + float s = 7.0f; + + c[0] = a[0] - s; + c[1] = a[1] - s; + c[2] = a[2] - s; + + GLM(vec3_subs)(a, s, d); + + ASSERTIFY(test_assert_vec3_eq(c, d)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_mul) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f}, + v3; + + GLM(vec3_mul)(v1, v2, v3); + + ASSERT(glm_eq(v1[0] * v2[0], v3[0])) + ASSERT(glm_eq(v1[1] * v2[1], v3[1])) + ASSERT(glm_eq(v1[2] * v2[2], v3[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_scale) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, v2; + float s = 7.0f; + + GLM(vec3_scale)(v1, s, v2); + + ASSERT(glm_eq(v1[0] * s, v2[0])) + ASSERT(glm_eq(v1[1] * s, v2[1])) + ASSERT(glm_eq(v1[2] * s, v2[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_scale_as) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, v2; + float s = 7.0f; + float norm; + + GLM(vec3_scale_as)(v1, s, v2); + + norm = sqrtf(v1[0] * v1[0] + v1[1] * v1[1] + v1[2] * v1[2]); + if (norm == 0.0f) { + ASSERT(glm_eq(v1[0], 0.0f)) + ASSERT(glm_eq(v1[1], 0.0f)) + ASSERT(glm_eq(v1[2], 0.0f)) + + TEST_SUCCESS + } + + norm = s / norm; + + ASSERT(glm_eq(v1[0] * norm, v2[0])) + ASSERT(glm_eq(v1[1] * norm, v2[1])) + ASSERT(glm_eq(v1[2] * norm, v2[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_div) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f}, + v3; + + GLM(vec3_div)(v1, v2, v3); + + ASSERT(glm_eq(v1[0] / v2[0], v3[0])) + ASSERT(glm_eq(v1[1] / v2[1], v3[1])) + ASSERT(glm_eq(v1[2] / v2[2], v3[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_divs) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, v2; + float s = 7.0f; + + GLM(vec3_divs)(v1, s, v2); + + ASSERT(glm_eq(v1[0] / s, v2[0])) + ASSERT(glm_eq(v1[1] / s, v2[1])) + ASSERT(glm_eq(v1[2] / s, v2[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_addadd) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f}, + v3 = {1.0f, 2.0f, 3.0f}, + v4 = {1.0f, 2.0f, 3.0f}; + + GLM(vec3_addadd)(v1, v2, v4); + + ASSERT(glm_eq(v3[0] + v1[0] + v2[0], v4[0])) + ASSERT(glm_eq(v3[1] + v1[1] + v2[1], v4[1])) + ASSERT(glm_eq(v3[2] + v1[2] + v2[2], v4[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_subadd) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f}, + v3 = {1.0f, 2.0f, 3.0f}, + v4 = {1.0f, 2.0f, 3.0f}; + + GLM(vec3_subadd)(v1, v2, v4); + + ASSERT(glm_eq(v3[0] + v1[0] - v2[0], v4[0])) + ASSERT(glm_eq(v3[1] + v1[1] - v2[1], v4[1])) + ASSERT(glm_eq(v3[2] + v1[2] - v2[2], v4[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_muladd) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f}, + v3 = {1.0f, 2.0f, 3.0f}, + v4 = {1.0f, 2.0f, 3.0f}; + + GLM(vec3_muladd)(v1, v2, v4); + + ASSERT(glm_eq(v3[0] + v1[0] * v2[0], v4[0])) + ASSERT(glm_eq(v3[1] + v1[1] * v2[1], v4[1])) + ASSERT(glm_eq(v3[2] + v1[2] * v2[2], v4[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_muladds) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, + v2 = {1.0f, 2.0f, 3.0f}, + v3 = {1.0f, 2.0f, 3.0f}; + float s = 9.0f; + + GLM(vec3_muladds)(v1, s, v3); + + ASSERT(glm_eq(v2[0] + v1[0] * s, v3[0])) + ASSERT(glm_eq(v2[1] + v1[1] * s, v3[1])) + ASSERT(glm_eq(v2[2] + v1[2] * s, v3[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_maxadd) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f}, + v3 = {1.0f, 2.0f, 3.0f}, + v4 = {1.0f, 2.0f, 3.0f}; + + GLM(vec3_maxadd)(v1, v2, v4); + + ASSERT(glm_eq(v3[0] + glm_max(v1[0], v2[0]), v4[0])) + ASSERT(glm_eq(v3[1] + glm_max(v1[1], v2[1]), v4[1])) + ASSERT(glm_eq(v3[2] + glm_max(v1[2], v2[2]), v4[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_minadd) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f}, + v3 = {1.0f, 2.0f, 3.0f}, + v4 = {1.0f, 2.0f, 3.0f}; + + GLM(vec3_minadd)(v1, v2, v4); + + ASSERT(glm_eq(v3[0] + glm_min(v1[0], v2[0]), v4[0])) + ASSERT(glm_eq(v3[1] + glm_min(v1[1], v2[1]), v4[1])) + ASSERT(glm_eq(v3[2] + glm_min(v1[2], v2[2]), v4[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_negate_to) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f}, + v3, v4; + + GLM(vec3_negate_to)(v1, v3); + GLM(vec3_negate_to)(v2, v4); + + ASSERT(glm_eq(-v1[0], v3[0])) + ASSERT(glm_eq(-v1[1], v3[1])) + ASSERT(glm_eq(-v1[2], v3[2])) + + ASSERT(glm_eq(-v2[0], v4[0])) + ASSERT(glm_eq(-v2[1], v4[1])) + ASSERT(glm_eq(-v2[2], v4[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_negate) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f}, + v3 = {2.0f, -3.0f, 4.0f}, + v4 = {-3.0f, 4.0f, -5.0f}; + + GLM(vec3_negate)(v1); + GLM(vec3_negate)(v2); + + ASSERT(glm_eq(-v1[0], v3[0])) + ASSERT(glm_eq(-v1[1], v3[1])) + ASSERT(glm_eq(-v1[2], v3[2])) + + ASSERT(glm_eq(-v2[0], v4[0])) + ASSERT(glm_eq(-v2[1], v4[1])) + ASSERT(glm_eq(-v2[2], v4[2])) + + TEST_SUCCESS +} + diff --git a/test/src/tests-test_mat4.gcno b/test/src/tests-test_mat4.gcno deleted file mode 100644 index d58d103..0000000 Binary files a/test/src/tests-test_mat4.gcno and /dev/null differ diff --git a/test/tests.h b/test/tests.h index a3bf582..1cded8e 100644 --- a/test/tests.h +++ b/test/tests.h @@ -127,7 +127,79 @@ TEST_DECLARE(quat) TEST_DECLARE(bezier) /* vec3 */ -TEST_DECLARE(vec3) +TEST_DECLARE(MACRO_GLM_VEC3_ONE_INIT) +TEST_DECLARE(MACRO_GLM_VEC3_ZERO_INIT) +TEST_DECLARE(MACRO_GLM_VEC3_ONE) +TEST_DECLARE(MACRO_GLM_VEC3_ZERO) +TEST_DECLARE(MACRO_GLM_YUP) +TEST_DECLARE(MACRO_GLM_ZUP) +TEST_DECLARE(MACRO_GLM_XUP) +TEST_DECLARE(MACRO_GLM_SHUFFLE3) +TEST_DECLARE(MACRO_GLM_XXX) +TEST_DECLARE(MACRO_GLM_YYY) +TEST_DECLARE(MACRO_GLM_ZZZ) +TEST_DECLARE(MACRO_GLM_ZYX) + +TEST_DECLARE(MACRO_glm_vec3_dup) +TEST_DECLARE(MACRO_glm_vec3_flipsign) +TEST_DECLARE(MACRO_glm_vec3_flipsign_to) +TEST_DECLARE(MACRO_glm_vec3_inv) +TEST_DECLARE(MACRO_glm_vec3_inv_to) +TEST_DECLARE(MACRO_glm_vec3_mulv) + +TEST_DECLARE(glm_vec3) +TEST_DECLARE(glm_vec3_copy) +TEST_DECLARE(glm_vec3_zero) +TEST_DECLARE(glm_vec3_one) +TEST_DECLARE(glm_vec3_dot) +TEST_DECLARE(glm_vec3_norm2) +TEST_DECLARE(glm_vec3_norm) +TEST_DECLARE(glm_vec3_norm_one) +TEST_DECLARE(glm_vec3_norm_inf) +TEST_DECLARE(glm_vec3_add) +TEST_DECLARE(glm_vec3_adds) +TEST_DECLARE(glm_vec3_sub) +TEST_DECLARE(glm_vec3_subs) +TEST_DECLARE(glm_vec3_mul) +TEST_DECLARE(glm_vec3_scale) +TEST_DECLARE(glm_vec3_scale_as) +TEST_DECLARE(glm_vec3_div) +TEST_DECLARE(glm_vec3_divs) +TEST_DECLARE(glm_vec3_addadd) +TEST_DECLARE(glm_vec3_subadd) +TEST_DECLARE(glm_vec3_muladd) +TEST_DECLARE(glm_vec3_muladds) +TEST_DECLARE(glm_vec3_maxadd) +TEST_DECLARE(glm_vec3_minadd) +TEST_DECLARE(glm_vec3_negate_to) +TEST_DECLARE(glm_vec3_negate) + +TEST_DECLARE(glmc_vec3) +TEST_DECLARE(glmc_vec3_copy) +TEST_DECLARE(glmc_vec3_zero) +TEST_DECLARE(glmc_vec3_one) +TEST_DECLARE(glmc_vec3_dot) +TEST_DECLARE(glmc_vec3_norm2) +TEST_DECLARE(glmc_vec3_norm) +TEST_DECLARE(glmc_vec3_norm_one) +TEST_DECLARE(glmc_vec3_norm_inf) +TEST_DECLARE(glmc_vec3_add) +TEST_DECLARE(glmc_vec3_adds) +TEST_DECLARE(glmc_vec3_sub) +TEST_DECLARE(glmc_vec3_subs) +TEST_DECLARE(glmc_vec3_mul) +TEST_DECLARE(glmc_vec3_scale) +TEST_DECLARE(glmc_vec3_scale_as) +TEST_DECLARE(glmc_vec3_div) +TEST_DECLARE(glmc_vec3_divs) +TEST_DECLARE(glmc_vec3_addadd) +TEST_DECLARE(glmc_vec3_subadd) +TEST_DECLARE(glmc_vec3_muladd) +TEST_DECLARE(glmc_vec3_muladds) +TEST_DECLARE(glmc_vec3_maxadd) +TEST_DECLARE(glmc_vec3_minadd) +TEST_DECLARE(glmc_vec3_negate_to) +TEST_DECLARE(glmc_vec3_negate) /* vec4 */ TEST_DECLARE(vec4) @@ -246,8 +318,84 @@ TEST_LIST { TEST_ENTRY(bezier) /* vec3 */ - TEST_ENTRY(vec3) + /* Macros */ + + TEST_ENTRY(MACRO_GLM_VEC3_ONE_INIT) + TEST_ENTRY(MACRO_GLM_VEC3_ZERO_INIT) + TEST_ENTRY(MACRO_GLM_VEC3_ONE) + TEST_ENTRY(MACRO_GLM_VEC3_ZERO) + TEST_ENTRY(MACRO_GLM_YUP) + TEST_ENTRY(MACRO_GLM_ZUP) + TEST_ENTRY(MACRO_GLM_XUP) + TEST_ENTRY(MACRO_GLM_SHUFFLE3) + TEST_ENTRY(MACRO_GLM_XXX) + TEST_ENTRY(MACRO_GLM_YYY) + TEST_ENTRY(MACRO_GLM_ZZZ) + TEST_ENTRY(MACRO_GLM_ZYX) + + TEST_ENTRY(MACRO_glm_vec3_dup) + TEST_ENTRY(MACRO_glm_vec3_flipsign) + TEST_ENTRY(MACRO_glm_vec3_flipsign_to) + TEST_ENTRY(MACRO_glm_vec3_inv) + TEST_ENTRY(MACRO_glm_vec3_inv_to) + TEST_ENTRY(MACRO_glm_vec3_mulv) + + TEST_ENTRY(glm_vec3) + TEST_ENTRY(glm_vec3_copy) + TEST_ENTRY(glm_vec3_zero) + TEST_ENTRY(glm_vec3_one) + TEST_ENTRY(glm_vec3_dot) + TEST_ENTRY(glm_vec3_norm2) + TEST_ENTRY(glm_vec3_norm) + TEST_ENTRY(glm_vec3_norm_one) + TEST_ENTRY(glm_vec3_norm_inf) + TEST_ENTRY(glm_vec3_add) + TEST_ENTRY(glm_vec3_adds) + TEST_ENTRY(glm_vec3_sub) + TEST_ENTRY(glm_vec3_subs) + TEST_ENTRY(glm_vec3_mul) + TEST_ENTRY(glm_vec3_scale) + TEST_ENTRY(glm_vec3_scale_as) + TEST_ENTRY(glm_vec3_div) + TEST_ENTRY(glm_vec3_divs) + TEST_ENTRY(glm_vec3_addadd) + TEST_ENTRY(glm_vec3_subadd) + TEST_ENTRY(glm_vec3_muladd) + TEST_ENTRY(glm_vec3_muladds) + TEST_ENTRY(glm_vec3_maxadd) + TEST_ENTRY(glm_vec3_minadd) + TEST_ENTRY(glm_vec3_negate_to) + TEST_ENTRY(glm_vec3_negate) + + + TEST_ENTRY(glmc_vec3) + TEST_ENTRY(glmc_vec3_copy) + TEST_ENTRY(glmc_vec3_zero) + TEST_ENTRY(glmc_vec3_one) + TEST_ENTRY(glmc_vec3_dot) + TEST_ENTRY(glmc_vec3_norm2) + TEST_ENTRY(glmc_vec3_norm) + TEST_ENTRY(glmc_vec3_norm_one) + TEST_ENTRY(glmc_vec3_norm_inf) + TEST_ENTRY(glmc_vec3_add) + TEST_ENTRY(glmc_vec3_adds) + TEST_ENTRY(glmc_vec3_sub) + TEST_ENTRY(glmc_vec3_subs) + TEST_ENTRY(glmc_vec3_mul) + TEST_ENTRY(glmc_vec3_scale) + TEST_ENTRY(glmc_vec3_scale_as) + TEST_ENTRY(glmc_vec3_div) + TEST_ENTRY(glmc_vec3_divs) + TEST_ENTRY(glmc_vec3_addadd) + TEST_ENTRY(glmc_vec3_subadd) + TEST_ENTRY(glmc_vec3_muladd) + TEST_ENTRY(glmc_vec3_muladds) + TEST_ENTRY(glmc_vec3_maxadd) + TEST_ENTRY(glmc_vec3_minadd) + TEST_ENTRY(glmc_vec3_negate_to) + TEST_ENTRY(glmc_vec3_negate) + /* vec4 */ TEST_ENTRY(vec4) };