simd, sse2: make alignment optional for store operations

This commit is contained in:
Recep Aslantas
2018-05-08 15:31:09 +03:00
parent 252bf925fc
commit 568001d26a
8 changed files with 143 additions and 144 deletions

View File

@@ -58,19 +58,19 @@ glm_translate_to(mat4 m, vec3 v, mat4 dest) {
mat4 t = GLM_MAT4_IDENTITY_INIT;
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(dest[3],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_load(t[0]),
_mm_set1_ps(v[0])),
_mm_mul_ps(glmm_load(t[1]),
_mm_set1_ps(v[1]))),
_mm_add_ps(_mm_mul_ps(glmm_load(t[2]),
_mm_set1_ps(v[2])),
glmm_load(t[3]))))
glmm_store(dest[3],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_load(t[0]),
_mm_set1_ps(v[0])),
_mm_mul_ps(glmm_load(t[1]),
_mm_set1_ps(v[1]))),
_mm_add_ps(_mm_mul_ps(glmm_load(t[2]),
_mm_set1_ps(v[2])),
glmm_load(t[3]))))
;
_mm_store_ps(dest[0], glmm_load(m[0]));
_mm_store_ps(dest[1], glmm_load(m[1]));
_mm_store_ps(dest[2], glmm_load(m[2]));
glmm_store(dest[0], glmm_load(m[0]));
glmm_store(dest[1], glmm_load(m[1]));
glmm_store(dest[2], glmm_load(m[2]));
#else
vec4 v1, v2, v3;
@@ -97,14 +97,14 @@ CGLM_INLINE
void
glm_translate(mat4 m, vec3 v) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(m[3],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_load(m[0]),
_mm_set1_ps(v[0])),
_mm_mul_ps(glmm_load(m[1]),
_mm_set1_ps(v[1]))),
_mm_add_ps(_mm_mul_ps(glmm_load(m[2]),
_mm_set1_ps(v[2])),
glmm_load(m[3]))))
glmm_store(m[3],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_load(m[0]),
_mm_set1_ps(v[0])),
_mm_mul_ps(glmm_load(m[1]),
_mm_set1_ps(v[1]))),
_mm_add_ps(_mm_mul_ps(glmm_load(m[2]),
_mm_set1_ps(v[2])),
glmm_load(m[3]))))
;
#else
vec4 v1, v2, v3;
@@ -129,10 +129,10 @@ CGLM_INLINE
void
glm_translate_x(mat4 m, float x) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(m[3],
_mm_add_ps(_mm_mul_ps(glmm_load(m[0]),
_mm_set1_ps(x)),
glmm_load(m[3])))
glmm_store(m[3],
_mm_add_ps(_mm_mul_ps(glmm_load(m[0]),
_mm_set1_ps(x)),
glmm_load(m[3])))
;
#else
vec4 v1;
@@ -151,10 +151,10 @@ CGLM_INLINE
void
glm_translate_y(mat4 m, float y) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(m[3],
_mm_add_ps(_mm_mul_ps(glmm_load(m[1]),
_mm_set1_ps(y)),
glmm_load(m[3])))
glmm_store(m[3],
_mm_add_ps(_mm_mul_ps(glmm_load(m[1]),
_mm_set1_ps(y)),
glmm_load(m[3])))
;
#else
vec4 v1;
@@ -173,10 +173,10 @@ CGLM_INLINE
void
glm_translate_z(mat4 m, float z) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(m[3],
_mm_add_ps(_mm_mul_ps(glmm_load(m[2]),
_mm_set1_ps(z)),
glmm_load(m[3])))
glmm_store(m[3],
_mm_add_ps(_mm_mul_ps(glmm_load(m[2]),
_mm_set1_ps(z)),
glmm_load(m[3])))
;
#else
vec4 v1;

View File

@@ -113,10 +113,10 @@ glm_mat4_copy(mat4 mat, mat4 dest) {
_mm256_store_ps(dest[0], _mm256_load_ps(mat[0]));
_mm256_store_ps(dest[2], _mm256_load_ps(mat[2]));
#elif defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(dest[0], glmm_load(mat[0]));
_mm_store_ps(dest[1], glmm_load(mat[1]));
_mm_store_ps(dest[2], glmm_load(mat[2]));
_mm_store_ps(dest[3], glmm_load(mat[3]));
glmm_store(dest[0], glmm_load(mat[0]));
glmm_store(dest[1], glmm_load(mat[1]));
glmm_store(dest[2], glmm_load(mat[2]));
glmm_store(dest[3], glmm_load(mat[3]));
#else
glm_mat4_ucopy(mat, dest);
#endif

View File

@@ -207,7 +207,7 @@ glm_quat_normalize_to(versor q, versor dest) {
return;
}
_mm_store_ps(dest, _mm_div_ps(x0, _mm_sqrt_ps(xdot)));
glmm_store(dest, _mm_div_ps(x0, _mm_sqrt_ps(xdot)));
#else
float dot;

View File

@@ -24,29 +24,29 @@ glm_mul_sse2(mat4 m1, mat4 m2, mat4 dest) {
l3 = glmm_load(m1[3]);
r = glmm_load(m2[0]);
_mm_store_ps(dest[0],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2)));
glmm_store(dest[0],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2)));
r = glmm_load(m2[1]);
_mm_store_ps(dest[1],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2)));
glmm_store(dest[1],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2)));
r = glmm_load(m2[2]);
_mm_store_ps(dest[2],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2)));
glmm_store(dest[2],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2)));
r = glmm_load(m2[3]);
_mm_store_ps(dest[3],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2),
_mm_mul_ps(_mm_shuffle1_ps1(r, 3), l3))));
glmm_store(dest[3],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2),
_mm_mul_ps(_mm_shuffle1_ps1(r, 3), l3))));
}
CGLM_INLINE
@@ -61,24 +61,24 @@ glm_mul_rot_sse2(mat4 m1, mat4 m2, mat4 dest) {
l3 = glmm_load(m1[3]);
r = glmm_load(m2[0]);
_mm_store_ps(dest[0],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2)));
glmm_store(dest[0],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2)));
r = glmm_load(m2[1]);
_mm_store_ps(dest[1],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2)));
glmm_store(dest[1],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2)));
r = glmm_load(m2[2]);
_mm_store_ps(dest[2],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2)));
glmm_store(dest[2],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2)));
_mm_store_ps(dest[3], l3);
glmm_store(dest[3], l3);
}
CGLM_INLINE
@@ -101,10 +101,10 @@ glm_inv_tr_sse2(mat4 mat) {
x0 = _mm_add_ps(x0, x1);
_mm_store_ps(mat[0], r0);
_mm_store_ps(mat[1], r1);
_mm_store_ps(mat[2], r2);
_mm_store_ps(mat[3], x0);
glmm_store(mat[0], r0);
glmm_store(mat[1], r1);
glmm_store(mat[2], r2);
glmm_store(mat[3], x0);
}
#endif

View File

@@ -20,10 +20,10 @@ glm_mat4_scale_sse2(mat4 m, float s){
__m128 x0;
x0 = _mm_set1_ps(s);
_mm_store_ps(m[0], _mm_mul_ps(glmm_load(m[0]), x0));
_mm_store_ps(m[1], _mm_mul_ps(glmm_load(m[1]), x0));
_mm_store_ps(m[2], _mm_mul_ps(glmm_load(m[2]), x0));
_mm_store_ps(m[3], _mm_mul_ps(glmm_load(m[3]), x0));
glmm_store(m[0], _mm_mul_ps(glmm_load(m[0]), x0));
glmm_store(m[1], _mm_mul_ps(glmm_load(m[1]), x0));
glmm_store(m[2], _mm_mul_ps(glmm_load(m[2]), x0));
glmm_store(m[3], _mm_mul_ps(glmm_load(m[3]), x0));
}
CGLM_INLINE
@@ -38,10 +38,10 @@ glm_mat4_transp_sse2(mat4 m, mat4 dest){
_MM_TRANSPOSE4_PS(r0, r1, r2, r3);
_mm_store_ps(dest[0], r0);
_mm_store_ps(dest[1], r1);
_mm_store_ps(dest[2], r2);
_mm_store_ps(dest[3], r3);
glmm_store(dest[0], r0);
glmm_store(dest[1], r1);
glmm_store(dest[2], r2);
glmm_store(dest[3], r3);
}
CGLM_INLINE
@@ -57,30 +57,30 @@ glm_mat4_mul_sse2(mat4 m1, mat4 m2, mat4 dest) {
l3 = glmm_load(m1[3]);
r = glmm_load(m2[0]);
_mm_store_ps(dest[0],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2),
_mm_mul_ps(_mm_shuffle1_ps1(r, 3), l3))));
glmm_store(dest[0],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2),
_mm_mul_ps(_mm_shuffle1_ps1(r, 3), l3))));
r = glmm_load(m2[1]);
_mm_store_ps(dest[1],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2),
_mm_mul_ps(_mm_shuffle1_ps1(r, 3), l3))));
glmm_store(dest[1],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2),
_mm_mul_ps(_mm_shuffle1_ps1(r, 3), l3))));
r = glmm_load(m2[2]);
_mm_store_ps(dest[2],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2),
_mm_mul_ps(_mm_shuffle1_ps1(r, 3), l3))));
glmm_store(dest[2],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2),
_mm_mul_ps(_mm_shuffle1_ps1(r, 3), l3))));
r = glmm_load(m2[3]);
_mm_store_ps(dest[3],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2),
_mm_mul_ps(_mm_shuffle1_ps1(r, 3), l3))));
glmm_store(dest[3],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2),
_mm_mul_ps(_mm_shuffle1_ps1(r, 3), l3))));
}
CGLM_INLINE
@@ -99,7 +99,7 @@ glm_mat4_mulv_sse2(mat4 m, vec4 v, vec4 dest) {
_mm_mul_ps(glmm_load(m[3]),
_mm_shuffle1_ps1(x0, 3)));
_mm_store_ps(dest, _mm_add_ps(x1, x2));
glmm_store(dest, _mm_add_ps(x1, x2));
}
CGLM_INLINE
@@ -275,10 +275,10 @@ glm_mat4_inv_fast_sse2(mat4 mat, mat4 dest) {
x0 = _mm_add_ps(x0, _mm_shuffle1_ps(x0, 1, 0, 0, 1));
x0 = _mm_rcp_ps(x0);
_mm_store_ps(dest[0], _mm_mul_ps(v0, x0));
_mm_store_ps(dest[1], _mm_mul_ps(v1, x0));
_mm_store_ps(dest[2], _mm_mul_ps(v2, x0));
_mm_store_ps(dest[3], _mm_mul_ps(v3, x0));
glmm_store(dest[0], _mm_mul_ps(v0, x0));
glmm_store(dest[1], _mm_mul_ps(v1, x0));
glmm_store(dest[2], _mm_mul_ps(v2, x0));
glmm_store(dest[3], _mm_mul_ps(v3, x0));
}
CGLM_INLINE
@@ -399,10 +399,10 @@ glm_mat4_inv_sse2(mat4 mat, mat4 dest) {
x0 = _mm_add_ps(x0, _mm_shuffle1_ps(x0, 1, 0, 0, 1));
x0 = _mm_div_ps(_mm_set1_ps(1.0f), x0);
_mm_store_ps(dest[0], _mm_mul_ps(v0, x0));
_mm_store_ps(dest[1], _mm_mul_ps(v1, x0));
_mm_store_ps(dest[2], _mm_mul_ps(v2, x0));
_mm_store_ps(dest[3], _mm_mul_ps(v3, x0));
glmm_store(dest[0], _mm_mul_ps(v0, x0));
glmm_store(dest[1], _mm_mul_ps(v1, x0));
glmm_store(dest[2], _mm_mul_ps(v2, x0));
glmm_store(dest[3], _mm_mul_ps(v3, x0));
}
#endif

View File

@@ -38,7 +38,7 @@ glm_quat_mul_sse2(versor p, versor q, versor dest) {
x0 = _mm_xor_ps(_mm_shuffle1_ps1(xp, 2), _mm_set_ps(-0.f, 0.f, 0.f, -0.f));
r = _mm_add_ps(r, _mm_mul_ps(x0, _mm_shuffle1_ps(xq, 2, 3, 0, 1)));
_mm_store_ps(dest, r);
glmm_store(dest, r);
}

View File

@@ -42,7 +42,7 @@ CGLM_INLINE
void
glm_vec4_mulv(vec4 a, vec4 b, vec4 d) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(d, _mm_mul_ps(glmm_load(a), glmm_load(b)));
glmm_store(d, _mm_mul_ps(glmm_load(a), glmm_load(b)));
#else
d[0] = a[0] * b[0];
d[1] = a[1] * b[1];
@@ -61,7 +61,7 @@ CGLM_INLINE
void
glm_vec4_broadcast(float val, vec4 d) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(d, _mm_set1_ps(val));
glmm_store(d, _mm_set1_ps(val));
#else
d[0] = d[1] = d[2] = d[3] = val;
#endif
@@ -230,7 +230,7 @@ glm_vec4_sign(vec4 v, vec4 dest) {
x3 = _mm_and_ps(_mm_cmpgt_ps(x0, x2), _mm_shuffle1_ps1(x1, 1));
x4 = _mm_and_ps(_mm_cmplt_ps(x0, x2), _mm_shuffle1_ps1(x1, 0));
_mm_store_ps(dest, _mm_or_ps(x3, x4));
glmm_store(dest, _mm_or_ps(x3, x4));
#else
dest[0] = glm_signf(v[0]);
dest[1] = glm_signf(v[1]);
@@ -249,7 +249,7 @@ CGLM_INLINE
void
glm_vec4_sqrt(vec4 v, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(dest, _mm_sqrt_ps(glmm_load(v)));
glmm_store(dest, _mm_sqrt_ps(glmm_load(v)));
#else
dest[0] = sqrtf(v[0]);
dest[1] = sqrtf(v[1]);

View File

@@ -111,7 +111,7 @@ CGLM_INLINE
void
glm_vec4_copy(vec4 v, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(dest, glmm_load(v));
glmm_store(dest, glmm_load(v));
#else
dest[0] = v[0];
dest[1] = v[1];
@@ -129,7 +129,7 @@ CGLM_INLINE
void
glm_vec4_zero(vec4 v) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(v, _mm_setzero_ps());
glmm_store(v, _mm_setzero_ps());
#else
v[0] = 0.0f;
v[1] = 0.0f;
@@ -147,7 +147,7 @@ CGLM_INLINE
void
glm_vec4_one(vec4 v) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(v, _mm_set1_ps(1.0f));
glmm_store(v, _mm_set1_ps(1.0f));
#else
v[0] = 1.0f;
v[1] = 1.0f;
@@ -232,7 +232,7 @@ CGLM_INLINE
void
glm_vec4_add(vec4 a, vec4 b, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(dest, _mm_add_ps(glmm_load(a), glmm_load(b)));
glmm_store(dest, _mm_add_ps(glmm_load(a), glmm_load(b)));
#else
dest[0] = a[0] + b[0];
dest[1] = a[1] + b[1];
@@ -252,7 +252,7 @@ CGLM_INLINE
void
glm_vec4_adds(vec4 v, float s, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(dest, _mm_add_ps(glmm_load(v), _mm_set1_ps(s)));
glmm_store(dest, _mm_add_ps(glmm_load(v), _mm_set1_ps(s)));
#else
dest[0] = v[0] + s;
dest[1] = v[1] + s;
@@ -272,7 +272,7 @@ CGLM_INLINE
void
glm_vec4_sub(vec4 a, vec4 b, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(dest, _mm_sub_ps(glmm_load(a), glmm_load(b)));
glmm_store(dest, _mm_sub_ps(glmm_load(a), glmm_load(b)));
#else
dest[0] = a[0] - b[0];
dest[1] = a[1] - b[1];
@@ -292,7 +292,7 @@ CGLM_INLINE
void
glm_vec4_subs(vec4 v, float s, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(dest, _mm_sub_ps(glmm_load(v), _mm_set1_ps(s)));
glmm_store(dest, _mm_sub_ps(glmm_load(v), _mm_set1_ps(s)));
#else
dest[0] = v[0] - s;
dest[1] = v[1] - s;
@@ -312,7 +312,7 @@ CGLM_INLINE
void
glm_vec4_mul(vec4 a, vec4 b, vec4 d) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(d, _mm_mul_ps(glmm_load(a), glmm_load(b)));
glmm_store(d, _mm_mul_ps(glmm_load(a), glmm_load(b)));
#else
d[0] = a[0] * b[0];
d[1] = a[1] * b[1];
@@ -332,7 +332,7 @@ CGLM_INLINE
void
glm_vec4_scale(vec4 v, float s, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(dest, _mm_mul_ps(glmm_load(v), _mm_set1_ps(s)));
glmm_store(dest, _mm_mul_ps(glmm_load(v), _mm_set1_ps(s)));
#else
dest[0] = v[0] * s;
dest[1] = v[1] * s;
@@ -373,7 +373,7 @@ CGLM_INLINE
void
glm_vec4_div(vec4 a, vec4 b, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(dest, _mm_div_ps(glmm_load(a), glmm_load(b)));
glmm_store(dest, _mm_div_ps(glmm_load(a), glmm_load(b)));
#else
dest[0] = a[0] / b[0];
dest[1] = a[1] / b[1];
@@ -393,7 +393,7 @@ CGLM_INLINE
void
glm_vec4_divs(vec4 v, float s, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(dest, _mm_div_ps(glmm_load(v), _mm_set1_ps(s)));
glmm_store(dest, _mm_div_ps(glmm_load(v), _mm_set1_ps(s)));
#else
glm_vec4_scale(v, 1.0f / s, dest);
#endif
@@ -413,9 +413,9 @@ CGLM_INLINE
void
glm_vec4_addadd(vec4 a, vec4 b, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(dest, _mm_add_ps(glmm_load(dest),
_mm_add_ps(glmm_load(a),
glmm_load(b))));
glmm_store(dest, _mm_add_ps(glmm_load(dest),
_mm_add_ps(glmm_load(a),
glmm_load(b))));
#else
dest[0] += a[0] + b[0];
dest[1] += a[1] + b[1];
@@ -437,9 +437,9 @@ CGLM_INLINE
void
glm_vec4_subadd(vec4 a, vec4 b, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(dest, _mm_add_ps(glmm_load(dest),
_mm_sub_ps(glmm_load(a),
glmm_load(b))));
glmm_store(dest, _mm_add_ps(glmm_load(dest),
_mm_sub_ps(glmm_load(a),
glmm_load(b))));
#else
dest[0] += a[0] - b[0];
dest[1] += a[1] - b[1];
@@ -461,9 +461,9 @@ CGLM_INLINE
void
glm_vec4_muladd(vec4 a, vec4 b, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(dest, _mm_add_ps(glmm_load(dest),
_mm_mul_ps(glmm_load(a),
glmm_load(b))));
glmm_store(dest, _mm_add_ps(glmm_load(dest),
_mm_mul_ps(glmm_load(a),
glmm_load(b))));
#else
dest[0] += a[0] * b[0];
dest[1] += a[1] * b[1];
@@ -485,9 +485,9 @@ CGLM_INLINE
void
glm_vec4_muladds(vec4 a, float s, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(dest, _mm_add_ps(glmm_load(dest),
_mm_mul_ps(glmm_load(a),
_mm_set1_ps(s))));
glmm_store(dest, _mm_add_ps(glmm_load(dest),
_mm_mul_ps(glmm_load(a),
_mm_set1_ps(s))));
#else
dest[0] += a[0] * s;
dest[1] += a[1] * s;
@@ -505,7 +505,7 @@ CGLM_INLINE
void
glm_vec4_flipsign(vec4 v) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(v, _mm_xor_ps(glmm_load(v), _mm_set1_ps(-0.0f)));
glmm_store(v, _mm_xor_ps(glmm_load(v), _mm_set1_ps(-0.0f)));
#else
v[0] = -v[0];
v[1] = -v[1];
@@ -524,8 +524,7 @@ CGLM_INLINE
void
glm_vec4_flipsign_to(vec4 v, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(dest, _mm_xor_ps(glmm_load(v),
_mm_set1_ps(-0.0f)));
glmm_store(dest, _mm_xor_ps(glmm_load(v), _mm_set1_ps(-0.0f)));
#else
dest[0] = -v[0];
dest[1] = -v[1];
@@ -576,11 +575,11 @@ glm_vec4_normalize_to(vec4 vec, vec4 dest) {
dot = _mm_cvtss_f32(xdot);
if (dot == 0.0f) {
_mm_store_ps(dest, _mm_setzero_ps());
glmm_store(dest, _mm_setzero_ps());
return;
}
_mm_store_ps(dest, _mm_div_ps(x0, _mm_sqrt_ps(xdot)));
glmm_store(dest, _mm_div_ps(x0, _mm_sqrt_ps(xdot)));
#else
float norm;
@@ -633,7 +632,7 @@ CGLM_INLINE
void
glm_vec4_maxv(vec4 v1, vec4 v2, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(dest, _mm_max_ps(glmm_load(v1), glmm_load(v2)));
glmm_store(dest, _mm_max_ps(glmm_load(v1), glmm_load(v2)));
#else
dest[0] = glm_max(v1[0], v2[0]);
dest[1] = glm_max(v1[1], v2[1]);
@@ -653,7 +652,7 @@ CGLM_INLINE
void
glm_vec4_minv(vec4 v1, vec4 v2, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(dest, _mm_min_ps(glmm_load(v1), glmm_load(v2)));
glmm_store(dest, _mm_min_ps(glmm_load(v1), glmm_load(v2)));
#else
dest[0] = glm_min(v1[0], v2[0]);
dest[1] = glm_min(v1[1], v2[1]);
@@ -673,8 +672,8 @@ CGLM_INLINE
void
glm_vec4_clamp(vec4 v, float minVal, float maxVal) {
#if defined( __SSE__ ) || defined( __SSE2__ )
_mm_store_ps(v, _mm_min_ps(_mm_max_ps(glmm_load(v), _mm_set1_ps(minVal)),
_mm_set1_ps(maxVal)));
glmm_store(v, _mm_min_ps(_mm_max_ps(glmm_load(v), _mm_set1_ps(minVal)),
_mm_set1_ps(maxVal)));
#else
v[0] = glm_clamp(v[0], minVal, maxVal);
v[1] = glm_clamp(v[1], minVal, maxVal);