Merge branch 'master' into optimizations

This commit is contained in:
Recep Aslantas
2018-05-10 13:59:10 +03:00
committed by GitHub
64 changed files with 4736 additions and 770 deletions

View File

@@ -21,11 +21,11 @@ glm_mul_avx(mat4 m1, mat4 m2, mat4 dest) {
__m256 y0, y1, y2, y3, y4, y5, y6, y7, y8, y9;
y0 = _mm256_load_ps(m2[0]); /* h g f e d c b a */
y1 = _mm256_load_ps(m2[2]); /* p o n m l k j i */
y0 = glmm_load256(m2[0]); /* h g f e d c b a */
y1 = glmm_load256(m2[2]); /* p o n m l k j i */
y2 = _mm256_load_ps(m1[0]); /* h g f e d c b a */
y3 = _mm256_load_ps(m1[2]); /* p o n m l k j i */
y2 = glmm_load256(m1[0]); /* h g f e d c b a */
y3 = glmm_load256(m1[2]); /* p o n m l k j i */
y4 = _mm256_permute2f128_ps(y2, y2, 0b00000011); /* d c b a h g f e */
y5 = _mm256_permute2f128_ps(y3, y3, 0b00000000); /* l k j i l k j i */
@@ -37,10 +37,10 @@ glm_mul_avx(mat4 m1, mat4 m2, mat4 dest) {
y6 = _mm256_permutevar_ps(y0, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0));
y8 = _mm256_permutevar_ps(y0, _mm256_set_epi32(0, 0, 0, 0, 1, 1, 1, 1));
_mm256_store_ps(dest[0],
_mm256_add_ps(_mm256_add_ps(_mm256_mul_ps(y2, y6),
_mm256_mul_ps(y4, y8)),
_mm256_mul_ps(y5, y7)));
glmm_store256(dest[0],
_mm256_add_ps(_mm256_add_ps(_mm256_mul_ps(y2, y6),
_mm256_mul_ps(y4, y8)),
_mm256_mul_ps(y5, y7)));
/* n n n n i i i i */
@@ -52,11 +52,11 @@ glm_mul_avx(mat4 m1, mat4 m2, mat4 dest) {
y8 = _mm256_permutevar_ps(y1, _mm256_set_epi32(0, 0, 0, 0, 1, 1, 1, 1));
y9 = _mm256_permutevar_ps(y1, _mm256_set_epi32(2, 2, 2, 2, 3, 3, 3, 3));
_mm256_store_ps(dest[2],
_mm256_add_ps(_mm256_add_ps(_mm256_mul_ps(y2, y6),
_mm256_mul_ps(y3, y7)),
_mm256_add_ps(_mm256_mul_ps(y4, y8),
_mm256_mul_ps(y5, y9))));
glmm_store256(dest[2],
_mm256_add_ps(_mm256_add_ps(_mm256_mul_ps(y2, y6),
_mm256_mul_ps(y3, y7)),
_mm256_add_ps(_mm256_mul_ps(y4, y8),
_mm256_mul_ps(y5, y9))));
}
#endif

View File

@@ -21,11 +21,11 @@ glm_mat4_mul_avx(mat4 m1, mat4 m2, mat4 dest) {
__m256 y0, y1, y2, y3, y4, y5, y6, y7, y8, y9;
y0 = _mm256_load_ps(m2[0]); /* h g f e d c b a */
y1 = _mm256_load_ps(m2[2]); /* p o n m l k j i */
y0 = glmm_load256(m2[0]); /* h g f e d c b a */
y1 = glmm_load256(m2[2]); /* p o n m l k j i */
y2 = _mm256_load_ps(m1[0]); /* h g f e d c b a */
y3 = _mm256_load_ps(m1[2]); /* p o n m l k j i */
y2 = glmm_load256(m1[0]); /* h g f e d c b a */
y3 = glmm_load256(m1[2]); /* p o n m l k j i */
y4 = _mm256_permute2f128_ps(y2, y2, 0b00000011); /* d c b a h g f e */
y5 = _mm256_permute2f128_ps(y3, y3, 0b00000011); /* l k j i p o n m */
@@ -39,11 +39,11 @@ glm_mat4_mul_avx(mat4 m1, mat4 m2, mat4 dest) {
y8 = _mm256_permutevar_ps(y0, _mm256_set_epi32(0, 0, 0, 0, 1, 1, 1, 1));
y9 = _mm256_permutevar_ps(y0, _mm256_set_epi32(2, 2, 2, 2, 3, 3, 3, 3));
_mm256_store_ps(dest[0],
_mm256_add_ps(_mm256_add_ps(_mm256_mul_ps(y2, y6),
_mm256_mul_ps(y3, y7)),
_mm256_add_ps(_mm256_mul_ps(y4, y8),
_mm256_mul_ps(y5, y9))));
glmm_store256(dest[0],
_mm256_add_ps(_mm256_add_ps(_mm256_mul_ps(y2, y6),
_mm256_mul_ps(y3, y7)),
_mm256_add_ps(_mm256_mul_ps(y4, y8),
_mm256_mul_ps(y5, y9))));
/* n n n n i i i i */
/* p p p p k k k k */
@@ -54,11 +54,11 @@ glm_mat4_mul_avx(mat4 m1, mat4 m2, mat4 dest) {
y8 = _mm256_permutevar_ps(y1, _mm256_set_epi32(0, 0, 0, 0, 1, 1, 1, 1));
y9 = _mm256_permutevar_ps(y1, _mm256_set_epi32(2, 2, 2, 2, 3, 3, 3, 3));
_mm256_store_ps(dest[2],
_mm256_add_ps(_mm256_add_ps(_mm256_mul_ps(y2, y6),
_mm256_mul_ps(y3, y7)),
_mm256_add_ps(_mm256_mul_ps(y4, y8),
_mm256_mul_ps(y5, y9))));
glmm_store256(dest[2],
_mm256_add_ps(_mm256_add_ps(_mm256_mul_ps(y2, y6),
_mm256_mul_ps(y3, y7)),
_mm256_add_ps(_mm256_mul_ps(y4, y8),
_mm256_mul_ps(y5, y9))));
}
#endif

View File

@@ -8,11 +8,19 @@
#ifndef cglm_intrin_h
#define cglm_intrin_h
#if defined( _WIN32 )
#if defined( _MSC_VER )
# if (defined(_M_AMD64) || defined(_M_X64)) || _M_IX86_FP == 2
# define __SSE2__
# ifndef __SSE2__
# define __SSE2__
# endif
# elif _M_IX86_FP == 1
# define __SSE__
# ifndef __SSE__
# define __SSE__
# endif
# endif
/* do not use alignment for older visual studio versions */
# if _MSC_VER < 1913 /* Visual Studio 2017 version 15.6 */
# define CGLM_ALL_UNALIGNED
# endif
#endif
@@ -36,6 +44,49 @@
# define _mm_shuffle2_ps(a, b, z0, y0, x0, w0, z1, y1, x1, w1) \
_mm_shuffle1_ps(_mm_shuffle_ps(a, b, _MM_SHUFFLE(z0, y0, x0, w0)), \
z1, y1, x1, w1)
static inline
__m128
glmm_dot(__m128 a, __m128 b) {
__m128 x0;
x0 = _mm_mul_ps(a, b);
x0 = _mm_add_ps(x0, _mm_shuffle1_ps(x0, 1, 0, 3, 2));
return _mm_add_ps(x0, _mm_shuffle1_ps(x0, 0, 1, 0, 1));
}
static inline
__m128
glmm_norm(__m128 a) {
return _mm_sqrt_ps(glmm_dot(a, a));
}
static inline
__m128
glmm_load3(float v[3]) {
__m128i xy;
__m128 z;
xy = _mm_loadl_epi64((const __m128i *)v);
z = _mm_load_ss(&v[2]);
return _mm_movelh_ps(_mm_castsi128_ps(xy), z);
}
static inline
void
glmm_store3(__m128 vx, float v[3]) {
_mm_storel_pi((__m64 *)&v[0], vx);
_mm_store_ss(&v[2], _mm_shuffle1_ps(vx, 2, 2, 2, 2));
}
#ifdef CGLM_ALL_UNALIGNED
# define glmm_load(p) _mm_loadu_ps(p)
# define glmm_store(p, a) _mm_storeu_ps(p, a)
#else
# define glmm_load(p) _mm_load_ps(p)
# define glmm_store(p, a) _mm_store_ps(p, a)
#endif
#endif
/* x86, x64 */
@@ -45,6 +96,15 @@
#ifdef __AVX__
# define CGLM_AVX_FP 1
#ifdef CGLM_ALL_UNALIGNED
# define glmm_load256(p) _mm256_loadu_ps(p)
# define glmm_store256(p, a) _mm256_storeu_ps(p, a)
#else
# define glmm_load256(p) _mm256_load_ps(p)
# define glmm_store256(p, a) _mm256_store_ps(p, a)
#endif
#endif
/* ARM Neon */

View File

@@ -18,35 +18,67 @@ glm_mul_sse2(mat4 m1, mat4 m2, mat4 dest) {
/* D = R * L (Column-Major) */
__m128 l0, l1, l2, l3, r;
l0 = _mm_load_ps(m1[0]);
l1 = _mm_load_ps(m1[1]);
l2 = _mm_load_ps(m1[2]);
l3 = _mm_load_ps(m1[3]);
l0 = glmm_load(m1[0]);
l1 = glmm_load(m1[1]);
l2 = glmm_load(m1[2]);
l3 = glmm_load(m1[3]);
r = _mm_load_ps(m2[0]);
_mm_store_ps(dest[0],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2)));
r = glmm_load(m2[0]);
glmm_store(dest[0],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2)));
r = _mm_load_ps(m2[1]);
_mm_store_ps(dest[1],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2)));
r = glmm_load(m2[1]);
glmm_store(dest[1],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2)));
r = _mm_load_ps(m2[2]);
_mm_store_ps(dest[2],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2)));
r = glmm_load(m2[2]);
glmm_store(dest[2],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2)));
r = _mm_load_ps(m2[3]);
_mm_store_ps(dest[3],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2),
_mm_mul_ps(_mm_shuffle1_ps1(r, 3), l3))));
r = glmm_load(m2[3]);
glmm_store(dest[3],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2),
_mm_mul_ps(_mm_shuffle1_ps1(r, 3), l3))));
}
CGLM_INLINE
void
glm_mul_rot_sse2(mat4 m1, mat4 m2, mat4 dest) {
/* D = R * L (Column-Major) */
__m128 l0, l1, l2, l3, r;
l0 = glmm_load(m1[0]);
l1 = glmm_load(m1[1]);
l2 = glmm_load(m1[2]);
l3 = glmm_load(m1[3]);
r = glmm_load(m2[0]);
glmm_store(dest[0],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2)));
r = glmm_load(m2[1]);
glmm_store(dest[1],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2)));
r = glmm_load(m2[2]);
glmm_store(dest[2],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2)));
glmm_store(dest[3], l3);
}
CGLM_INLINE
@@ -54,11 +86,11 @@ void
glm_inv_tr_sse2(mat4 mat) {
__m128 r0, r1, r2, r3, x0, x1;
r0 = _mm_load_ps(mat[0]);
r1 = _mm_load_ps(mat[1]);
r2 = _mm_load_ps(mat[2]);
r3 = _mm_load_ps(mat[3]);
x1 = _mm_set_ps(1.0f, 0.0f, 0.0f, 0.0f);
r0 = glmm_load(mat[0]);
r1 = glmm_load(mat[1]);
r2 = glmm_load(mat[2]);
r3 = glmm_load(mat[3]);
x1 = _mm_set_ps(1.0f, 0.0f, 0.0f, 0.0f);
_MM_TRANSPOSE4_PS(r0, r1, r2, x1);
@@ -69,10 +101,10 @@ glm_inv_tr_sse2(mat4 mat) {
x0 = _mm_add_ps(x0, x1);
_mm_store_ps(mat[0], r0);
_mm_store_ps(mat[1], r1);
_mm_store_ps(mat[2], r2);
_mm_store_ps(mat[3], x0);
glmm_store(mat[0], r0);
glmm_store(mat[1], r1);
glmm_store(mat[2], r2);
glmm_store(mat[3], x0);
}
#endif

View File

@@ -20,10 +20,10 @@ glm_mat4_scale_sse2(mat4 m, float s){
__m128 x0;
x0 = _mm_set1_ps(s);
_mm_store_ps(m[0], _mm_mul_ps(_mm_load_ps(m[0]), x0));
_mm_store_ps(m[1], _mm_mul_ps(_mm_load_ps(m[1]), x0));
_mm_store_ps(m[2], _mm_mul_ps(_mm_load_ps(m[2]), x0));
_mm_store_ps(m[3], _mm_mul_ps(_mm_load_ps(m[3]), x0));
glmm_store(m[0], _mm_mul_ps(glmm_load(m[0]), x0));
glmm_store(m[1], _mm_mul_ps(glmm_load(m[1]), x0));
glmm_store(m[2], _mm_mul_ps(glmm_load(m[2]), x0));
glmm_store(m[3], _mm_mul_ps(glmm_load(m[3]), x0));
}
CGLM_INLINE
@@ -31,17 +31,17 @@ void
glm_mat4_transp_sse2(mat4 m, mat4 dest){
__m128 r0, r1, r2, r3;
r0 = _mm_load_ps(m[0]);
r1 = _mm_load_ps(m[1]);
r2 = _mm_load_ps(m[2]);
r3 = _mm_load_ps(m[3]);
r0 = glmm_load(m[0]);
r1 = glmm_load(m[1]);
r2 = glmm_load(m[2]);
r3 = glmm_load(m[3]);
_MM_TRANSPOSE4_PS(r0, r1, r2, r3);
_mm_store_ps(dest[0], r0);
_mm_store_ps(dest[1], r1);
_mm_store_ps(dest[2], r2);
_mm_store_ps(dest[3], r3);
glmm_store(dest[0], r0);
glmm_store(dest[1], r1);
glmm_store(dest[2], r2);
glmm_store(dest[3], r3);
}
CGLM_INLINE
@@ -51,36 +51,36 @@ glm_mat4_mul_sse2(mat4 m1, mat4 m2, mat4 dest) {
__m128 l0, l1, l2, l3, r;
l0 = _mm_load_ps(m1[0]);
l1 = _mm_load_ps(m1[1]);
l2 = _mm_load_ps(m1[2]);
l3 = _mm_load_ps(m1[3]);
l0 = glmm_load(m1[0]);
l1 = glmm_load(m1[1]);
l2 = glmm_load(m1[2]);
l3 = glmm_load(m1[3]);
r = _mm_load_ps(m2[0]);
_mm_store_ps(dest[0],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2),
_mm_mul_ps(_mm_shuffle1_ps1(r, 3), l3))));
r = _mm_load_ps(m2[1]);
_mm_store_ps(dest[1],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2),
_mm_mul_ps(_mm_shuffle1_ps1(r, 3), l3))));
r = _mm_load_ps(m2[2]);
_mm_store_ps(dest[2],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2),
_mm_mul_ps(_mm_shuffle1_ps1(r, 3), l3))));
r = glmm_load(m2[0]);
glmm_store(dest[0],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2),
_mm_mul_ps(_mm_shuffle1_ps1(r, 3), l3))));
r = glmm_load(m2[1]);
glmm_store(dest[1],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2),
_mm_mul_ps(_mm_shuffle1_ps1(r, 3), l3))));
r = glmm_load(m2[2]);
glmm_store(dest[2],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2),
_mm_mul_ps(_mm_shuffle1_ps1(r, 3), l3))));
r = _mm_load_ps(m2[3]);
_mm_store_ps(dest[3],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2),
_mm_mul_ps(_mm_shuffle1_ps1(r, 3), l3))));
r = glmm_load(m2[3]);
glmm_store(dest[3],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 0), l0),
_mm_mul_ps(_mm_shuffle1_ps1(r, 1), l1)),
_mm_add_ps(_mm_mul_ps(_mm_shuffle1_ps1(r, 2), l2),
_mm_mul_ps(_mm_shuffle1_ps1(r, 3), l3))));
}
CGLM_INLINE
@@ -88,18 +88,18 @@ void
glm_mat4_mulv_sse2(mat4 m, vec4 v, vec4 dest) {
__m128 x0, x1, x2;
x0 = _mm_load_ps(v);
x1 = _mm_add_ps(_mm_mul_ps(_mm_load_ps(m[0]),
x0 = glmm_load(v);
x1 = _mm_add_ps(_mm_mul_ps(glmm_load(m[0]),
_mm_shuffle1_ps1(x0, 0)),
_mm_mul_ps(_mm_load_ps(m[1]),
_mm_mul_ps(glmm_load(m[1]),
_mm_shuffle1_ps1(x0, 1)));
x2 = _mm_add_ps(_mm_mul_ps(_mm_load_ps(m[2]),
x2 = _mm_add_ps(_mm_mul_ps(glmm_load(m[2]),
_mm_shuffle1_ps1(x0, 2)),
_mm_mul_ps(_mm_load_ps(m[3]),
_mm_mul_ps(glmm_load(m[3]),
_mm_shuffle1_ps1(x0, 3)));
_mm_store_ps(dest, _mm_add_ps(x1, x2));
glmm_store(dest, _mm_add_ps(x1, x2));
}
CGLM_INLINE
@@ -108,10 +108,10 @@ glm_mat4_det_sse2(mat4 mat) {
__m128 r0, r1, r2, r3, x0, x1, x2;
/* 127 <- 0, [square] det(A) = det(At) */
r0 = _mm_load_ps(mat[0]); /* d c b a */
r1 = _mm_load_ps(mat[1]); /* h g f e */
r2 = _mm_load_ps(mat[2]); /* l k j i */
r3 = _mm_load_ps(mat[3]); /* p o n m */
r0 = glmm_load(mat[0]); /* d c b a */
r1 = glmm_load(mat[1]); /* h g f e */
r2 = glmm_load(mat[2]); /* l k j i */
r3 = glmm_load(mat[3]); /* p o n m */
/*
t[1] = j * p - n * l;
@@ -166,10 +166,10 @@ glm_mat4_inv_fast_sse2(mat4 mat, mat4 dest) {
x0, x1, x2, x3, x4, x5, x6, x7;
/* 127 <- 0 */
r0 = _mm_load_ps(mat[0]); /* d c b a */
r1 = _mm_load_ps(mat[1]); /* h g f e */
r2 = _mm_load_ps(mat[2]); /* l k j i */
r3 = _mm_load_ps(mat[3]); /* p o n m */
r0 = glmm_load(mat[0]); /* d c b a */
r1 = glmm_load(mat[1]); /* h g f e */
r2 = glmm_load(mat[2]); /* l k j i */
r3 = glmm_load(mat[3]); /* p o n m */
x0 = _mm_shuffle_ps(r2, r3, _MM_SHUFFLE(3, 2, 3, 2)); /* p o l k */
x1 = _mm_shuffle1_ps(x0, 1, 3, 3, 3); /* l p p p */
@@ -275,10 +275,10 @@ glm_mat4_inv_fast_sse2(mat4 mat, mat4 dest) {
x0 = _mm_add_ps(x0, _mm_shuffle1_ps(x0, 1, 0, 0, 1));
x0 = _mm_rcp_ps(x0);
_mm_store_ps(dest[0], _mm_mul_ps(v0, x0));
_mm_store_ps(dest[1], _mm_mul_ps(v1, x0));
_mm_store_ps(dest[2], _mm_mul_ps(v2, x0));
_mm_store_ps(dest[3], _mm_mul_ps(v3, x0));
glmm_store(dest[0], _mm_mul_ps(v0, x0));
glmm_store(dest[1], _mm_mul_ps(v1, x0));
glmm_store(dest[2], _mm_mul_ps(v2, x0));
glmm_store(dest[3], _mm_mul_ps(v3, x0));
}
CGLM_INLINE
@@ -290,10 +290,10 @@ glm_mat4_inv_sse2(mat4 mat, mat4 dest) {
x0, x1, x2, x3, x4, x5, x6, x7;
/* 127 <- 0 */
r0 = _mm_load_ps(mat[0]); /* d c b a */
r1 = _mm_load_ps(mat[1]); /* h g f e */
r2 = _mm_load_ps(mat[2]); /* l k j i */
r3 = _mm_load_ps(mat[3]); /* p o n m */
r0 = glmm_load(mat[0]); /* d c b a */
r1 = glmm_load(mat[1]); /* h g f e */
r2 = glmm_load(mat[2]); /* l k j i */
r3 = glmm_load(mat[3]); /* p o n m */
x0 = _mm_shuffle_ps(r2, r3, _MM_SHUFFLE(3, 2, 3, 2)); /* p o l k */
x1 = _mm_shuffle1_ps(x0, 1, 3, 3, 3); /* l p p p */
@@ -399,10 +399,10 @@ glm_mat4_inv_sse2(mat4 mat, mat4 dest) {
x0 = _mm_add_ps(x0, _mm_shuffle1_ps(x0, 1, 0, 0, 1));
x0 = _mm_div_ps(_mm_set1_ps(1.0f), x0);
_mm_store_ps(dest[0], _mm_mul_ps(v0, x0));
_mm_store_ps(dest[1], _mm_mul_ps(v1, x0));
_mm_store_ps(dest[2], _mm_mul_ps(v2, x0));
_mm_store_ps(dest[3], _mm_mul_ps(v3, x0));
glmm_store(dest[0], _mm_mul_ps(v0, x0));
glmm_store(dest[1], _mm_mul_ps(v1, x0));
glmm_store(dest[2], _mm_mul_ps(v2, x0));
glmm_store(dest[3], _mm_mul_ps(v3, x0));
}
#endif

View File

@@ -14,56 +14,33 @@
CGLM_INLINE
void
glm_quat_slerp_sse2(versor q,
versor r,
float t,
versor dest) {
/* https://en.wikipedia.org/wiki/Slerp */
float cosTheta, sinTheta, angle, a, b, c;
glm_quat_mul_sse2(versor p, versor q, versor dest) {
/*
+ (a1 b2 + b1 a2 + c1 d2 d1 c2)i
+ (a1 c2 b1 d2 + c1 a2 + d1 b2)j
+ (a1 d2 + b1 c2 c1 b2 + d1 a2)k
a1 a2 b1 b2 c1 c2 d1 d2
*/
__m128 xmm_q;
__m128 xp, xq, x0, r;
xmm_q = _mm_load_ps(q);
xp = glmm_load(p); /* 3 2 1 0 */
xq = glmm_load(q);
cosTheta = glm_vec4_dot(q, r);
if (cosTheta < 0.0f) {
_mm_store_ps(q,
_mm_xor_ps(xmm_q,
_mm_set1_ps(-0.f))) ;
r = _mm_mul_ps(_mm_shuffle1_ps1(xp, 3), xq);
cosTheta = -cosTheta;
}
x0 = _mm_xor_ps(_mm_shuffle1_ps1(xp, 0), _mm_set_ps(-0.f, 0.f, -0.f, 0.f));
r = _mm_add_ps(r, _mm_mul_ps(x0, _mm_shuffle1_ps(xq, 0, 1, 2, 3)));
if (cosTheta >= 1.0f) {
_mm_store_ps(dest, xmm_q);
return;
}
x0 = _mm_xor_ps(_mm_shuffle1_ps1(xp, 1), _mm_set_ps(-0.f, -0.f, 0.f, 0.f));
r = _mm_add_ps(r, _mm_mul_ps(x0, _mm_shuffle1_ps(xq, 1, 0, 3, 2)));
sinTheta = sqrtf(1.0f - cosTheta * cosTheta);
x0 = _mm_xor_ps(_mm_shuffle1_ps1(xp, 2), _mm_set_ps(-0.f, 0.f, 0.f, -0.f));
r = _mm_add_ps(r, _mm_mul_ps(x0, _mm_shuffle1_ps(xq, 2, 3, 0, 1)));
c = 1.0f - t;
/* LERP */
if (sinTheta < 0.001f) {
_mm_store_ps(dest, _mm_add_ps(_mm_mul_ps(_mm_set1_ps(c),
xmm_q),
_mm_mul_ps(_mm_set1_ps(t),
_mm_load_ps(r))));
return;
}
/* SLERP */
angle = acosf(cosTheta);
a = sinf(c * angle);
b = sinf(t * angle);
_mm_store_ps(dest,
_mm_div_ps(_mm_add_ps(_mm_mul_ps(_mm_set1_ps(a),
xmm_q),
_mm_mul_ps(_mm_set1_ps(b),
_mm_load_ps(r))),
_mm_set1_ps(sinTheta)));
glmm_store(dest, r);
}
#endif
#endif /* cglm_quat_simd_h */