From 253f5ba5f6e654607765365c5b3417158e129c95 Mon Sep 17 00:00:00 2001 From: Recep Aslantas Date: Fri, 16 Sep 2016 20:24:55 +0300 Subject: [PATCH] optimize matrix4x4 inverse for SSE2 --- include/cglm-intrin.h | 11 +++++ include/cglm-mat-simd.h | 92 +++++++++++++++++++++++++++++++++++++++++ include/cglm-mat.h | 4 ++ 3 files changed, 107 insertions(+) diff --git a/include/cglm-intrin.h b/include/cglm-intrin.h index 79ebd3c..a41a518 100644 --- a/include/cglm-intrin.h +++ b/include/cglm-intrin.h @@ -16,6 +16,17 @@ _mm_add_ps(_mm_mul_ps(_mm_set1_ps(*(L)), R0), \ _mm_mul_ps(_mm_set1_ps(*(L + 1)), R1)) +#define _mm_msub_ps(M00, M01, M10, M11) \ + _mm_sub_ps(_mm_mul_ps(M00, M01), \ + _mm_mul_ps(M10, M11)) + +#define _mm_shuffle1_ps(a, z, y, x, w) \ + _mm_shuffle_ps(a, a, _MM_SHUFFLE(z, y, x, w)) + +#define _mm_shuffle2_ps(a, b, z0, y0, x0, w0, z1, y1, x1, w1) \ + _mm_shuffle1_ps(_mm_shuffle_ps(a, b, _MM_SHUFFLE(z0, y0, x0, w0)), \ + z1, y1, x1, w1); + #define _mm_madd4_ps(L, R0, R1, R2, R3) \ _mm_add_ps(_mm_madd_ps(L, R0, R1), \ _mm_madd_ps(L + 2, R2, R3)) diff --git a/include/cglm-mat-simd.h b/include/cglm-mat-simd.h index a2535a7..f140efd 100644 --- a/include/cglm-mat-simd.h +++ b/include/cglm-mat-simd.h @@ -58,4 +58,96 @@ _mm_store_ps(M[3], _mm_mul_ps(_mm_load_ps(M[3]), xmm0)); \ } while (0) +CGLM_INLINE +void +glm_mat4_inv_see2(mat4 mat, mat4 dest) { + __m128 v0, v1, v2, v3, dt, t0, t1, t2, t3, t4; + float a, b, c, d, + e, f, g, h, + i, j, k, l, + m, n, o, p; + + a = mat[0][0], b = mat[0][1], c = mat[0][2], d = mat[0][3], + e = mat[1][0], f = mat[1][1], g = mat[1][2], h = mat[1][3], + i = mat[2][0], j = mat[2][1], k = mat[2][2], l = mat[2][3], + m = mat[3][0], n = mat[3][1], o = mat[3][2], p = mat[3][3]; + + t3 = _mm_sub_ps(_mm_mul_ps(_mm_set_ps(k, j, j, i), + _mm_set_ps(p, p, o, p)), + _mm_mul_ps(_mm_set_ps(o, n, n, m), + _mm_set_ps(l, l, k, l))); + + t4 = _mm_sub_ps(_mm_mul_ps(_mm_set1_ps(i), + _mm_set_ps(0.0f, 0.0f, o, n)), + _mm_mul_ps(_mm_set1_ps(m), + _mm_set_ps(0.0f, 0.0f, k, j))); + + t0 = _mm_shuffle1_ps(t3, 3, 3, 2, 1); + t1 = _mm_shuffle2_ps(t3, t4, 1, 1, 2, 0, 1, 0, 0, 3); + t2 = _mm_shuffle2_ps(t3, t4, 0, 1, 1, 1, 0, 2, 3, 3); + + v0 = _mm_mul_ps(_mm_set_ps(f, e, e, e), t0); + v0 = _mm_sub_ps(v0, _mm_mul_ps(_mm_set_ps(g, g, f, f), t1)); + v0 = _mm_add_ps(v0, _mm_mul_ps(_mm_set_ps(h, h, h, g), t2)); + v0 = _mm_xor_ps(v0, _mm_set_ps(0.f, -0.f, 0.f, -0.f)); + + v1 = _mm_mul_ps(_mm_set_ps(b, a, a, a), t0); + v1 = _mm_sub_ps(v1, _mm_mul_ps(_mm_set_ps(c, c, b, b), t1)); + v1 = _mm_add_ps(v1, _mm_mul_ps(_mm_set_ps(d, d, d, c), t2)); + v1 = _mm_xor_ps(v1, _mm_set_ps(-0.f, 0.f, -0.f, 0.f)); + + t3 = _mm_sub_ps(_mm_mul_ps(_mm_set_ps(g, f, f, e), + _mm_set_ps(p, p, o, p)), + _mm_mul_ps(_mm_set_ps(o, n, n, m), + _mm_set_ps(h, h, g, h))); + + t4 = _mm_sub_ps(_mm_mul_ps(_mm_set1_ps(e), + _mm_set_ps(0.0f, 0.0f, o, n)), + _mm_mul_ps(_mm_set1_ps(m), + _mm_set_ps(0.0f, 0.0f, g, f))); + + t0 = _mm_shuffle1_ps(t3, 3, 3, 2, 1); + t1 = _mm_shuffle2_ps(t3, t4, 1, 1, 2, 0, 1, 0, 0, 3); + t2 = _mm_shuffle2_ps(t3, t4, 0, 1, 1, 1, 0, 2, 3, 3); + + v2 = _mm_mul_ps(_mm_set_ps(b, a, a, a), t0); + v2 = _mm_sub_ps(v2, _mm_mul_ps(_mm_set_ps(c, c, b, b), t1)); + v2 = _mm_add_ps(v2, _mm_mul_ps(_mm_set_ps(d, d, d, c), t2)); + v2 = _mm_xor_ps(v2, _mm_set_ps(0.f, -0.f, 0.f, -0.f)); + + t3 = _mm_sub_ps(_mm_mul_ps(_mm_set_ps(g, f, f, e), + _mm_set_ps(l, l, k, l)), + _mm_mul_ps(_mm_set_ps(k, j, j, i), + _mm_set_ps(h, h, g, h))); + + t4 = _mm_sub_ps(_mm_mul_ps(_mm_set1_ps(e), + _mm_set_ps(0.0f, 0.0f, k, j)), + _mm_mul_ps(_mm_set1_ps(i), + _mm_set_ps(0.0f, 0.0f, g, f))); + + t0 = _mm_shuffle1_ps(t3, 3, 3, 2, 1); + t1 = _mm_shuffle2_ps(t3, t4, 1, 1, 2, 0, 1, 0, 0, 3); + t2 = _mm_shuffle2_ps(t3, t4, 0, 1, 1, 1, 0, 2, 3, 3); + + v3 = _mm_mul_ps(_mm_set_ps(b, a, a, a), t0); + v3 = _mm_sub_ps(v3, _mm_mul_ps(_mm_set_ps(c, c, b, b), t1)); + v3 = _mm_add_ps(v3, _mm_mul_ps(_mm_set_ps(d, d, d, c), t2)); + v3 = _mm_xor_ps(v3, _mm_set_ps(0.f, -0.f, 0.f, -0.f)); + + dt = _mm_mul_ps(_mm_set_ps(a, b, c, d), v0); + dt = _mm_add_ps(dt, _mm_shuffle1_ps(dt, 0, 1, 2, 3)); + dt = _mm_add_ps(dt, _mm_shuffle1_ps(dt, 1, 3, 3, 1)); + dt = _mm_rcp_ps(dt); + + v0 = _mm_shuffle1_ps(v0, 0, 1, 2, 3); + v1 = _mm_shuffle1_ps(v1, 0, 1, 2, 3); + v2 = _mm_shuffle1_ps(v2, 0, 1, 2, 3); + v3 = _mm_shuffle1_ps(v3, 0, 1, 2, 3); + + _mm_store_ps(dest[0], _mm_mul_ps(v0, dt)); + _mm_store_ps(dest[1], _mm_mul_ps(v1, dt)); + _mm_store_ps(dest[2], _mm_mul_ps(v2, dt)); + _mm_store_ps(dest[3], _mm_mul_ps(v3, dt)); +} + #endif /* cglm_mat_sse_h */ diff --git a/include/cglm-mat.h b/include/cglm-mat.h index 0c62581..7ffd691 100644 --- a/include/cglm-mat.h +++ b/include/cglm-mat.h @@ -178,6 +178,9 @@ glm_mat4_det(mat4 mat) { CGLM_INLINE void glm_mat4_inv(mat4 mat, mat4 dest) { +#if defined( __SSE__ ) || defined( __SSE2__ ) + glm_mat4_inv_see2(mat, dest); +#else float t[6]; float det; float a, b, c, d, @@ -223,6 +226,7 @@ glm_mat4_inv(mat4 mat, mat4 dest) { + c * dest[2][0] + d * dest[3][0]); glm_mat4_scale_p(dest, det); +#endif } #endif /* cglm_mat_h */