armi neon: optimize affine with neon

This commit is contained in:
Recep Aslantas
2021-05-01 02:46:14 +03:00
parent d28b381dd6
commit 376cf31ee7

View File

@@ -17,29 +17,32 @@ void
glm_mul_neon(mat4 m1, mat4 m2, mat4 dest) { glm_mul_neon(mat4 m1, mat4 m2, mat4 dest) {
/* D = R * L (Column-Major) */ /* D = R * L (Column-Major) */
glmm_128 l0, l1, l2, l3, r0, r1, r2, r3, v0, v1, v2, v3; glmm_128 l, r0, r1, r2, r3, v0, v1, v2, v3;
l0 = glmm_load(m1[0]); r0 = glmm_load(m2[0]); l = glmm_load(m1[0]);
l1 = glmm_load(m1[1]); r1 = glmm_load(m2[1]); r0 = glmm_load(m2[0]);
l2 = glmm_load(m1[2]); r2 = glmm_load(m2[2]); r1 = glmm_load(m2[1]);
l3 = glmm_load(m1[3]); r3 = glmm_load(m2[3]); r2 = glmm_load(m2[2]);
r3 = glmm_load(m2[3]);
v0 = vmulq_f32(glmm_splat_x(r0), l0); v0 = vmulq_f32(glmm_splat_x(r0), l);
v1 = vmulq_f32(glmm_splat_x(r1), l0); v1 = vmulq_f32(glmm_splat_x(r1), l);
v2 = vmulq_f32(glmm_splat_x(r2), l0); v2 = vmulq_f32(glmm_splat_x(r2), l);
v3 = vmulq_f32(glmm_splat_x(r3), l0); v3 = vmulq_f32(glmm_splat_x(r3), l);
v0 = glmm_fmadd(glmm_splat_y(r0), l1, v0); l = glmm_load(m1[1]);
v1 = glmm_fmadd(glmm_splat_y(r1), l1, v1); v0 = glmm_fmadd(glmm_splat_y(r0), l, v0);
v2 = glmm_fmadd(glmm_splat_y(r2), l1, v2); v1 = glmm_fmadd(glmm_splat_y(r1), l, v1);
v3 = glmm_fmadd(glmm_splat_y(r3), l1, v3); v2 = glmm_fmadd(glmm_splat_y(r2), l, v2);
v3 = glmm_fmadd(glmm_splat_y(r3), l, v3);
v0 = glmm_fmadd(glmm_splat_z(r0), l2, v0); l = glmm_load(m1[2]);
v1 = glmm_fmadd(glmm_splat_z(r1), l2, v1); v0 = glmm_fmadd(glmm_splat_z(r0), l, v0);
v2 = glmm_fmadd(glmm_splat_z(r2), l2, v2); v1 = glmm_fmadd(glmm_splat_z(r1), l, v1);
v3 = glmm_fmadd(glmm_splat_z(r3), l2, v3); v2 = glmm_fmadd(glmm_splat_z(r2), l, v2);
v3 = glmm_fmadd(glmm_splat_z(r3), l, v3);
v3 = glmm_fmadd(glmm_splat_w(r3), l3, v3); v3 = glmm_fmadd(glmm_splat_w(r3), glmm_load(m1[3]), v3);
glmm_store(dest[0], v0); glmm_store(dest[0], v0);
glmm_store(dest[1], v1); glmm_store(dest[1], v1);
@@ -52,23 +55,26 @@ void
glm_mul_rot_neon(mat4 m1, mat4 m2, mat4 dest) { glm_mul_rot_neon(mat4 m1, mat4 m2, mat4 dest) {
/* D = R * L (Column-Major) */ /* D = R * L (Column-Major) */
glmm_128 l0, l1, l2, r0, r1, r2, v0, v1, v2; glmm_128 l, r0, r1, r2, v0, v1, v2;
l0 = glmm_load(m1[0]); r0 = glmm_load(m2[0]); l = glmm_load(m1[0]);
l1 = glmm_load(m1[1]); r1 = glmm_load(m2[1]); r0 = glmm_load(m2[0]);
l2 = glmm_load(m1[2]); r2 = glmm_load(m2[2]); r1 = glmm_load(m2[1]);
r2 = glmm_load(m2[2]);
v0 = vmulq_f32(glmm_splat_x(r0), l0); v0 = vmulq_f32(glmm_splat_x(r0), l);
v1 = vmulq_f32(glmm_splat_x(r1), l0); v1 = vmulq_f32(glmm_splat_x(r1), l);
v2 = vmulq_f32(glmm_splat_x(r2), l0); v2 = vmulq_f32(glmm_splat_x(r2), l);
v0 = glmm_fmadd(glmm_splat_y(r0), l1, v0); l = glmm_load(m1[1]);
v1 = glmm_fmadd(glmm_splat_y(r1), l1, v1); v0 = glmm_fmadd(glmm_splat_y(r0), l, v0);
v2 = glmm_fmadd(glmm_splat_y(r2), l1, v2); v1 = glmm_fmadd(glmm_splat_y(r1), l, v1);
v2 = glmm_fmadd(glmm_splat_y(r2), l, v2);
v0 = glmm_fmadd(glmm_splat_z(r0), l2, v0); l = glmm_load(m1[2]);
v1 = glmm_fmadd(glmm_splat_z(r1), l2, v1); v0 = glmm_fmadd(glmm_splat_z(r0), l, v0);
v2 = glmm_fmadd(glmm_splat_z(r2), l2, v2); v1 = glmm_fmadd(glmm_splat_z(r1), l, v1);
v2 = glmm_fmadd(glmm_splat_z(r2), l, v2);
glmm_store(dest[0], v0); glmm_store(dest[0], v0);
glmm_store(dest[1], v1); glmm_store(dest[1], v1);