sse: optimize affine with sse

* re-oder instructions for ILP
This commit is contained in:
Recep Aslantas
2021-05-01 02:58:14 +03:00
parent 376cf31ee7
commit 5b7bc522ac

View File

@@ -16,76 +16,80 @@ CGLM_INLINE
void void
glm_mul_sse2(mat4 m1, mat4 m2, mat4 dest) { glm_mul_sse2(mat4 m1, mat4 m2, mat4 dest) {
/* D = R * L (Column-Major) */ /* D = R * L (Column-Major) */
__m128 l0, l1, l2, l3, r; glmm_128 l, r0, r1, r2, r3, v0, v1, v2, v3;
l0 = glmm_load(m1[0]); l = glmm_load(m1[0]);
l1 = glmm_load(m1[1]); r0 = glmm_load(m2[0]);
l2 = glmm_load(m1[2]); r1 = glmm_load(m2[1]);
l3 = glmm_load(m1[3]); r2 = glmm_load(m2[2]);
r3 = glmm_load(m2[3]);
r = glmm_load(m2[0]); v0 = _mm_mul_ps(glmm_splat_x(r0), l);
glmm_store(dest[0], v1 = _mm_mul_ps(glmm_splat_x(r1), l);
glmm_fmadd(glmm_splat(r, 0), l0, v2 = _mm_mul_ps(glmm_splat_x(r2), l);
glmm_fmadd(glmm_splat(r, 1), l1, v3 = _mm_mul_ps(glmm_splat_x(r3), l);
_mm_mul_ps(glmm_splat(r, 2), l2))));
r = glmm_load(m2[1]); l = glmm_load(m1[1]);
glmm_store(dest[1], v0 = glmm_fmadd(glmm_splat_y(r0), l, v0);
glmm_fmadd(glmm_splat(r, 0), l0, v1 = glmm_fmadd(glmm_splat_y(r1), l, v1);
glmm_fmadd(glmm_splat(r, 1), l1, v2 = glmm_fmadd(glmm_splat_y(r2), l, v2);
_mm_mul_ps(glmm_splat(r, 2), l2)))); v3 = glmm_fmadd(glmm_splat_y(r3), l, v3);
r = glmm_load(m2[2]); l = glmm_load(m1[2]);
glmm_store(dest[2], v0 = glmm_fmadd(glmm_splat_z(r0), l, v0);
glmm_fmadd(glmm_splat(r, 0), l0, v1 = glmm_fmadd(glmm_splat_z(r1), l, v1);
glmm_fmadd(glmm_splat(r, 1), l1, v2 = glmm_fmadd(glmm_splat_z(r2), l, v2);
_mm_mul_ps(glmm_splat(r, 2), l2)))); v3 = glmm_fmadd(glmm_splat_z(r3), l, v3);
r = glmm_load(m2[3]); l = glmm_load(m1[3]);
glmm_store(dest[3], v3 = glmm_fmadd(glmm_splat_w(r3), l, v3);
glmm_fmadd(glmm_splat(r, 0), l0,
glmm_fmadd(glmm_splat(r, 1), l1, glmm_store(dest[0], v0);
glmm_fmadd(glmm_splat(r, 2), l2, glmm_store(dest[1], v1);
_mm_mul_ps(glmm_splat(r, 3), l3))))); glmm_store(dest[2], v2);
glmm_store(dest[3], v3);
} }
CGLM_INLINE CGLM_INLINE
void void
glm_mul_rot_sse2(mat4 m1, mat4 m2, mat4 dest) { glm_mul_rot_sse2(mat4 m1, mat4 m2, mat4 dest) {
/* D = R * L (Column-Major) */ /* D = R * L (Column-Major) */
__m128 l0, l1, l2, l3, r;
l0 = glmm_load(m1[0]); glmm_128 l, r0, r1, r2, r3, v0, v1, v2, v3;
l1 = glmm_load(m1[1]);
l2 = glmm_load(m1[2]);
l3 = glmm_load(m1[3]);
r = glmm_load(m2[0]); l = glmm_load(m1[0]);
glmm_store(dest[0], r0 = glmm_load(m2[0]);
glmm_fmadd(glmm_splat(r, 0), l0, r1 = glmm_load(m2[1]);
glmm_fmadd(glmm_splat(r, 1), l1, r2 = glmm_load(m2[2]);
_mm_mul_ps(glmm_splat(r, 2), l2)))); r3 = glmm_load(m2[3]);
r = glmm_load(m2[1]); v0 = _mm_mul_ps(glmm_splat_x(r0), l);
glmm_store(dest[1], v1 = _mm_mul_ps(glmm_splat_x(r1), l);
glmm_fmadd(glmm_splat(r, 0), l0, v2 = _mm_mul_ps(glmm_splat_x(r2), l);
glmm_fmadd(glmm_splat(r, 1), l1, v3 = _mm_mul_ps(glmm_splat_x(r3), l);
_mm_mul_ps(glmm_splat(r, 2), l2))));
l = glmm_load(m1[1]);
v0 = glmm_fmadd(glmm_splat_y(r0), l, v0);
v1 = glmm_fmadd(glmm_splat_y(r1), l, v1);
v2 = glmm_fmadd(glmm_splat_y(r2), l, v2);
v3 = glmm_fmadd(glmm_splat_y(r3), l, v3);
r = glmm_load(m2[2]); l = glmm_load(m1[2]);
glmm_store(dest[2], v0 = glmm_fmadd(glmm_splat_z(r0), l, v0);
glmm_fmadd(glmm_splat(r, 0), l0, v1 = glmm_fmadd(glmm_splat_z(r1), l, v1);
glmm_fmadd(glmm_splat(r, 1), l1, v2 = glmm_fmadd(glmm_splat_z(r2), l, v2);
_mm_mul_ps(glmm_splat(r, 2), l2)))); v3 = glmm_fmadd(glmm_splat_z(r3), l, v3);
glmm_store(dest[3], l3); glmm_store(dest[0], v0);
glmm_store(dest[1], v1);
glmm_store(dest[2], v2);
glmm_store(dest[3], glmm_load(m1[3]));
} }
CGLM_INLINE CGLM_INLINE
void void
glm_inv_tr_sse2(mat4 mat) { glm_inv_tr_sse2(mat4 mat) {
__m128 r0, r1, r2, r3, x0, x1; __m128 r0, r1, r2, r3, x0, x1, x2, x3, x4, x5;
r0 = glmm_load(mat[0]); r0 = glmm_load(mat[0]);
r1 = glmm_load(mat[1]); r1 = glmm_load(mat[1]);
@@ -95,10 +99,13 @@ glm_inv_tr_sse2(mat4 mat) {
_MM_TRANSPOSE4_PS(r0, r1, r2, x1); _MM_TRANSPOSE4_PS(r0, r1, r2, x1);
x0 = glmm_fmadd(r0, glmm_shuff1(r3, 0, 0, 0, 0), x2 = glmm_shuff1(r3, 0, 0, 0, 0);
glmm_fmadd(r1, glmm_shuff1(r3, 1, 1, 1, 1), x3 = glmm_shuff1(r3, 1, 1, 1, 1);
_mm_mul_ps(r2, glmm_shuff1(r3, 2, 2, 2, 2)))); x4 = glmm_shuff1(r3, 2, 2, 2, 2);
x0 = _mm_xor_ps(x0, _mm_set1_ps(-0.f)); x5 = _mm_set1_ps(-0.f);
x0 = glmm_fmadd(r0, x2, glmm_fmadd(r1, x3, _mm_mul_ps(r2, x4)));
x0 = _mm_xor_ps(x0, x5);
x0 = _mm_add_ps(x0, x1); x0 = _mm_add_ps(x0, x1);