Compare commits

...

26 Commits

Author SHA1 Message Date
Recep Aslantas
04eaf9c535 arm, neon: neon/fma support for glm_quat_mul() 2021-04-29 01:12:00 +03:00
Recep Aslantas
bd6641bd0a build: add missing files to build files 2021-04-28 22:45:03 +03:00
Recep Aslantas
4e4bff418d arm, neon: neon/fma support for glm_mat2_mul() 2021-04-28 22:06:46 +03:00
Recep Aslantas
55ebbdbe40 arm, neon: neon/fma support for glm_inv_tr() 2021-04-28 14:46:14 +03:00
Recep Aslantas
e4c35e32fc Merge pull request #190 from ylecuyer/patch-3
Minor typo in doc
2021-04-27 23:52:40 +03:00
Yoann Lecuyer
ec467fef1f Minor typo in doc
I stumbled upon while reading the doc
2021-04-27 22:09:13 +02:00
Recep Aslantas
1e8865233b Merge pull request #189 from recp/simd-2
ARM Neon Update
2021-04-25 15:20:24 +03:00
Recep Aslantas
155eb109a8 arm, neon: neon/fma support for glm_mul_rot() 2021-04-25 03:49:35 +03:00
Recep Aslantas
2903813765 arm, neon: neon/fma support for glm_mul() 2021-04-25 03:45:00 +03:00
Recep Aslantas
0ab50f7208 arm, neon: update mat4_mul to use FMA 2021-04-25 03:41:39 +03:00
Recep Aslantas
701e015bfd avoid loading vec3 by glmm_load()
* use glmm_set1() for each for now
2021-04-25 02:36:06 +03:00
Recep Aslantas
1fb941a41b drop swizzling helpers fro now for simplicity 2021-04-25 02:35:55 +03:00
Recep Aslantas
92151c6328 arm, neon: use div instead of mul by 1 / det for mat4_inv 2021-04-24 18:02:47 +03:00
Recep Aslantas
afac887850 arm, neon: implement mat4 inv with neon 2021-04-24 17:54:01 +03:00
Recep Aslantas
a111693b6b arm, neon: implement mat4 determinant with neon 2021-04-24 15:45:36 +03:00
Recep Aslantas
ce9e5f5575 arm: update glmm swizzling func names 2021-04-24 01:38:04 +03:00
Recep Aslantas
d13842e7de arm: optimize vec4 div with NEON 2021-04-24 00:51:09 +03:00
Recep Aslantas
059e5010e6 arm: define CGLM_ARM64 for identify arm64 2021-04-24 00:44:07 +03:00
Recep Aslantas
65292a94a6 swizzling functions for NEON 2021-04-24 00:00:00 +03:00
Recep Aslantas
f303984aad use unified glmm api for vec4 2021-04-23 23:34:36 +03:00
Recep Aslantas
7d5c4da7cf optimize translate functions 2021-04-23 23:32:48 +03:00
Recep Aslantas
63988a515c glmm: new defines for splat 2021-04-23 23:32:21 +03:00
Recep Aslantas
9725b60d46 rename glmm_shuff1x() to glmm_splat()
* mark glmm_shuff1x() as DEPRECATED
2021-04-23 22:12:57 +03:00
Recep Aslantas
50c93f3d30 Merge pull request #188 from wdouglass/cmake-pkgconfig
configure and install cglm.pc with cmake
2021-04-23 17:12:04 +03:00
Recep Aslantas
f14ca0c3f2 now working on v0.8.2 2021-04-23 17:03:39 +03:00
Woodrow Douglass
0c165cba76 configure and install cglm.pc with cmake 2021-04-23 09:28:55 -04:00
28 changed files with 667 additions and 127 deletions

View File

@@ -1,5 +1,5 @@
cmake_minimum_required(VERSION 3.8.2)
project(cglm VERSION 0.8.1 LANGUAGES C)
project(cglm VERSION 0.8.2 LANGUAGES C)
set(CMAKE_C_STANDARD 11)
set(CMAKE_C_STANDARD_REQUIRED YES)
@@ -126,3 +126,12 @@ install(EXPORT ${PROJECT_NAME}
NAMESPACE ${PROJECT_NAME}::
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME})
set(PACKAGE_NAME ${PROJECT_NAME})
set(prefix ${CMAKE_INSTALL_PREFIX})
set(exec_prefix ${CMAKE_INSTALL_PREFIX})
set(includedir "\${prefix}/${CMAKE_INSTALL_INCLUDEDIR}")
set(libdir "\${prefix}/${CMAKE_INSTALL_LIBDIR}")
configure_file(${CMAKE_CURRENT_LIST_DIR}/cglm.pc.in ${CMAKE_BINARY_DIR}/cglm.pc @ONLY)
install(FILES ${CMAKE_BINARY_DIR}/cglm.pc
DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig)

View File

@@ -74,3 +74,7 @@ Link to paper: http://webserver2.tecgraf.puc-rio.br/~mgattass/cg/trbRR/Fast%20Mi
14. ARM NEON: Matrix Vector Multiplication
https://stackoverflow.com/a/57793352/2676533
16. ARM NEON Div
http://github.com/microsoft/DirectXMath

View File

@@ -109,7 +109,10 @@ cglm_simd_avx_HEADERS = include/cglm/simd/avx/mat4.h \
include/cglm/simd/avx/affine.h
cglm_simd_neondir=$(includedir)/cglm/simd/neon
cglm_simd_neon_HEADERS = include/cglm/simd/neon/mat4.h
cglm_simd_neon_HEADERS = include/cglm/simd/neon/mat4.h \
include/cglm/simd/neon/mat2.h \
include/cglm/simd/neon/affine.h \
include/cglm/simd/neon/quat.h
cglm_structdir=$(includedir)/cglm/struct
cglm_struct_HEADERS = include/cglm/struct/mat4.h \

View File

@@ -2,7 +2,7 @@ Pod::Spec.new do |s|
# Description
s.name = "cglm"
s.version = "0.8.0"
s.version = "0.8.1"
s.summary = "📽 Highly Optimized Graphics Math (glm) for C"
s.description = <<-DESC
cglm is math library for graphics programming for C. See the documentation or README for all features.

View File

@@ -7,7 +7,7 @@
#*****************************************************************************
AC_PREREQ([2.69])
AC_INIT([cglm], [0.8.1], [info@recp.me])
AC_INIT([cglm], [0.8.2], [info@recp.me])
AM_INIT_AUTOMAKE([-Wall -Werror foreign subdir-objects serial-tests])
# Don't use the default cflags (-O2 -g), we set ours manually in Makefile.am.

View File

@@ -62,9 +62,9 @@ author = u'Recep Aslantas'
# built documents.
#
# The short X.Y version.
version = u'0.8.1'
version = u'0.8.2'
# The full version, including alpha/beta/rc tags.
release = u'0.8.1'
release = u'0.8.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.

View File

@@ -2,7 +2,7 @@ How to send vector or matrix to OpenGL like API
==================================================
*cglm*'s vector and matrix types are arrays. So you can send them directly to a
function which accecpts pointer. But you may got warnings for matrix because it is
function which accepts pointer. But you may got warnings for matrix because it is
two dimensional array.
Passing / Uniforming Matrix to OpenGL:

View File

@@ -26,6 +26,10 @@
# include "simd/avx/affine.h"
#endif
#ifdef CGLM_NEON_FP
# include "simd/neon/affine.h"
#endif
/*!
* @brief this is similar to glm_mat4_mul but specialized to affine transform
*
@@ -49,6 +53,8 @@ glm_mul(mat4 m1, mat4 m2, mat4 dest) {
glm_mul_avx(m1, m2, dest);
#elif defined( __SSE__ ) || defined( __SSE2__ )
glm_mul_sse2(m1, m2, dest);
#elif defined(CGLM_NEON_FP)
glm_mul_neon(m1, m2, dest);
#else
float a00 = m1[0][0], a01 = m1[0][1], a02 = m1[0][2], a03 = m1[0][3],
a10 = m1[1][0], a11 = m1[1][1], a12 = m1[1][2], a13 = m1[1][3],
@@ -103,6 +109,8 @@ void
glm_mul_rot(mat4 m1, mat4 m2, mat4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glm_mul_rot_sse2(m1, m2, dest);
#elif defined(CGLM_NEON_FP)
glm_mul_rot_neon(m1, m2, dest);
#else
float a00 = m1[0][0], a01 = m1[0][1], a02 = m1[0][2], a03 = m1[0][3],
a10 = m1[1][0], a11 = m1[1][1], a12 = m1[1][2], a13 = m1[1][3],
@@ -150,6 +158,8 @@ void
glm_inv_tr(mat4 mat) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glm_inv_tr_sse2(mat);
#elif defined(CGLM_NEON_FP)
glm_inv_tr_neon(mat);
#else
CGLM_ALIGN_MAT mat3 r;
CGLM_ALIGN(8) vec3 t;

View File

@@ -50,26 +50,22 @@
CGLM_INLINE
void
glm_translate(mat4 m, vec3 v) {
#if defined( __SSE__ ) || defined( __SSE2__ )
#if defined(CGLM_SIMD)
glmm_128 m0, m1, m2, m3;
m0 = glmm_load(m[0]);
m1 = glmm_load(m[1]);
m2 = glmm_load(m[2]);
m3 = glmm_load(m[3]);
glmm_store(m[3],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_load(m[0]),
_mm_set1_ps(v[0])),
_mm_mul_ps(glmm_load(m[1]),
_mm_set1_ps(v[1]))),
_mm_add_ps(_mm_mul_ps(glmm_load(m[2]),
_mm_set1_ps(v[2])),
glmm_load(m[3]))))
;
glmm_fmadd(m0, glmm_set1(v[0]),
glmm_fmadd(m1, glmm_set1(v[1]),
glmm_fmadd(m2, glmm_set1(v[2]), m3))));
#else
vec4 v1, v2, v3;
glm_vec4_scale(m[0], v[0], v1);
glm_vec4_scale(m[1], v[1], v2);
glm_vec4_scale(m[2], v[2], v3);
glm_vec4_add(v1, m[3], m[3]);
glm_vec4_add(v2, m[3], m[3]);
glm_vec4_add(v3, m[3], m[3]);
glm_vec4_muladds(m[0], v[0], m[3]);
glm_vec4_muladds(m[1], v[1], m[3]);
glm_vec4_muladds(m[2], v[2], m[3]);
#endif
}
@@ -99,12 +95,8 @@ glm_translate_to(mat4 m, vec3 v, mat4 dest) {
CGLM_INLINE
void
glm_translate_x(mat4 m, float x) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(m[3],
_mm_add_ps(_mm_mul_ps(glmm_load(m[0]),
_mm_set1_ps(x)),
glmm_load(m[3])))
;
#if defined(CGLM_SIMD)
glmm_store(m[3], glmm_fmadd(glmm_load(m[0]), glmm_set1(x), glmm_load(m[3])));
#else
vec4 v1;
glm_vec4_scale(m[0], x, v1);
@@ -121,12 +113,8 @@ glm_translate_x(mat4 m, float x) {
CGLM_INLINE
void
glm_translate_y(mat4 m, float y) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(m[3],
_mm_add_ps(_mm_mul_ps(glmm_load(m[1]),
_mm_set1_ps(y)),
glmm_load(m[3])))
;
#if defined(CGLM_SIMD)
glmm_store(m[3], glmm_fmadd(glmm_load(m[1]), glmm_set1(y), glmm_load(m[3])));
#else
vec4 v1;
glm_vec4_scale(m[1], y, v1);
@@ -143,12 +131,8 @@ glm_translate_y(mat4 m, float y) {
CGLM_INLINE
void
glm_translate_z(mat4 m, float z) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(m[3],
_mm_add_ps(_mm_mul_ps(glmm_load(m[2]),
_mm_set1_ps(z)),
glmm_load(m[3])))
;
#if defined(CGLM_SIMD)
glmm_store(m[3], glmm_fmadd(glmm_load(m[2]), glmm_set1(z), glmm_load(m[3])));
#else
vec4 v1;
glm_vec4_scale(m[2], z, v1);

View File

@@ -40,6 +40,10 @@
# include "simd/sse2/mat2.h"
#endif
#ifdef CGLM_NEON_FP
# include "simd/neon/mat2.h"
#endif
#define GLM_MAT2_IDENTITY_INIT {{1.0f, 0.0f}, {0.0f, 1.0f}}
#define GLM_MAT2_ZERO_INIT {{0.0f, 0.0f}, {0.0f, 0.0f}}
@@ -130,6 +134,8 @@ void
glm_mat2_mul(mat2 m1, mat2 m2, mat2 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glm_mat2_mul_sse2(m1, m2, dest);
#elif defined(CGLM_NEON_FP)
glm_mat2_mul_neon(m1, m2, dest);
#else
float a00 = m1[0][0], a01 = m1[0][1],
a10 = m1[1][0], a11 = m1[1][1],

View File

@@ -562,6 +562,8 @@ float
glm_mat4_det(mat4 mat) {
#if defined( __SSE__ ) || defined( __SSE2__ )
return glm_mat4_det_sse2(mat);
#elif defined(CGLM_NEON_FP)
return glm_mat4_det_neon(mat);
#else
/* [square] det(A) = det(At) */
float t[6];
@@ -595,6 +597,8 @@ void
glm_mat4_inv(mat4 mat, mat4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glm_mat4_inv_sse2(mat, dest);
#elif defined(CGLM_NEON_FP)
glm_mat4_inv_neon(mat, dest);
#else
float t[6];
float det;

View File

@@ -63,6 +63,10 @@
# include "simd/sse2/quat.h"
#endif
#ifdef CGLM_NEON_FP
# include "simd/neon/quat.h"
#endif
CGLM_INLINE
void
glm_mat4_mulv(mat4 m, vec4 v, vec4 dest);
@@ -412,6 +416,8 @@ glm_quat_mul(versor p, versor q, versor dest) {
*/
#if defined( __SSE__ ) || defined( __SSE2__ )
glm_quat_mul_sse2(p, q, dest);
#elif defined(CGLM_NEON_FP)
glm_quat_mul_neon(p, q, dest);
#else
dest[0] = p[3] * q[0] + p[0] * q[3] + p[1] * q[2] - p[2] * q[1];
dest[1] = p[3] * q[1] - p[0] * q[2] + p[1] * q[3] + p[2] * q[0];

View File

@@ -10,19 +10,42 @@
#include "intrin.h"
#ifdef CGLM_SIMD_ARM
#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || defined(_M_ARM64EC) || defined(__aarch64__)
# define CGLM_ARM64 1
#endif
#define glmm_load(p) vld1q_f32(p)
#define glmm_store(p, a) vst1q_f32(p, a)
#define glmm_set1(x) vdupq_n_f32(x)
#define glmm_128 float32x4_t
#define glmm_splat_x(x) vdupq_lane_f32(vget_low_f32(x), 0)
#define glmm_splat_y(x) vdupq_lane_f32(vget_low_f32(x), 1)
#define glmm_splat_z(x) vdupq_lane_f32(vget_high_f32(x), 0)
#define glmm_splat_w(x) vdupq_lane_f32(vget_high_f32(x), 1)
#define glmm_xor(a, b) \
vreinterpretq_f32_s32(veorq_s32(vreinterpretq_s32_f32(a), \
vreinterpretq_s32_f32(b)))
static inline
float32x4_t
glmm_abs(float32x4_t v) {
return vabsq_f32(v);
}
static inline
float32x4_t
glmm_vhadd(float32x4_t v) {
v = vaddq_f32(v, vrev64q_f32(v));
return vaddq_f32(v, vcombine_f32(vget_high_f32(v), vget_low_f32(v)));
}
static inline
float
glmm_hadd(float32x4_t v) {
#if defined(__aarch64__)
#if CGLM_ARM64
return vaddvq_f32(v);
#else
v = vaddq_f32(v, vrev64q_f32(v));
@@ -79,11 +102,28 @@ glmm_norm_inf(float32x4_t a) {
return glmm_hmax(glmm_abs(a));
}
static inline
float32x4_t
glmm_div(float32x4_t a, float32x4_t b) {
#if CGLM_ARM64
return vdivq_f32(a, b);
#else
/* 2 iterations of Newton-Raphson refinement of reciprocal */
float32x4_t r0, r1;
r0 = vrecpeq_f32(b);
r1 = vrecpsq_f32(r0, b);
r0 = vmulq_f32(r1, r0);
r1 = vrecpsq_f32(r0, b);
r0 = vmulq_f32(r1, r0);
return vmulq_f32(a, r0);
#endif
}
static inline
float32x4_t
glmm_fmadd(float32x4_t a, float32x4_t b, float32x4_t c) {
#if defined(__aarch64__)
return vfmaq_f32(c, a, b);
#if CGLM_ARM64
return vfmaq_f32(c, a, b); /* why vfmaq_f32 is slower than vmlaq_f32 ??? */
#else
return vmlaq_f32(c, a, b);
#endif
@@ -92,7 +132,7 @@ glmm_fmadd(float32x4_t a, float32x4_t b, float32x4_t c) {
static inline
float32x4_t
glmm_fnmadd(float32x4_t a, float32x4_t b, float32x4_t c) {
#if defined(__aarch64__)
#if CGLM_ARM64
return vfmsq_f32(c, a, b);
#else
return vmlsq_f32(c, a, b);
@@ -102,7 +142,7 @@ glmm_fnmadd(float32x4_t a, float32x4_t b, float32x4_t c) {
static inline
float32x4_t
glmm_fmsub(float32x4_t a, float32x4_t b, float32x4_t c) {
#if defined(__aarch64__)
#if CGLM_ARM64
return vfmsq_f32(c, a, b);
#else
return vmlsq_f32(c, a, b);

View File

@@ -0,0 +1,116 @@
/*
* Copyright (c), Recep Aslantas.
*
* MIT License (MIT), http://opensource.org/licenses/MIT
* Full license can be found in the LICENSE file
*/
#ifndef cglm_affine_neon_h
#define cglm_affine_neon_h
#if defined(__ARM_NEON_FP)
#include "../../common.h"
#include "../intrin.h"
CGLM_INLINE
void
glm_mul_neon(mat4 m1, mat4 m2, mat4 dest) {
/* D = R * L (Column-Major) */
glmm_128 l0, l1, l2, l3, r0, r1, r2, r3, v0, v1, v2, v3;
l0 = glmm_load(m1[0]); r0 = glmm_load(m2[0]);
l1 = glmm_load(m1[1]); r1 = glmm_load(m2[1]);
l2 = glmm_load(m1[2]); r2 = glmm_load(m2[2]);
l3 = glmm_load(m1[3]); r3 = glmm_load(m2[3]);
v0 = vmulq_f32(glmm_splat_x(r0), l0);
v1 = vmulq_f32(glmm_splat_x(r1), l0);
v2 = vmulq_f32(glmm_splat_x(r2), l0);
v3 = vmulq_f32(glmm_splat_x(r3), l0);
v0 = glmm_fmadd(glmm_splat_y(r0), l1, v0);
v1 = glmm_fmadd(glmm_splat_y(r1), l1, v1);
v2 = glmm_fmadd(glmm_splat_y(r2), l1, v2);
v3 = glmm_fmadd(glmm_splat_y(r3), l1, v3);
v0 = glmm_fmadd(glmm_splat_z(r0), l2, v0);
v1 = glmm_fmadd(glmm_splat_z(r1), l2, v1);
v2 = glmm_fmadd(glmm_splat_z(r2), l2, v2);
v3 = glmm_fmadd(glmm_splat_z(r3), l2, v3);
v3 = glmm_fmadd(glmm_splat_w(r3), l3, v3);
glmm_store(dest[0], v0);
glmm_store(dest[1], v1);
glmm_store(dest[2], v2);
glmm_store(dest[3], v3);
}
CGLM_INLINE
void
glm_mul_rot_neon(mat4 m1, mat4 m2, mat4 dest) {
/* D = R * L (Column-Major) */
glmm_128 l0, l1, l2, r0, r1, r2, v0, v1, v2;
l0 = glmm_load(m1[0]); r0 = glmm_load(m2[0]);
l1 = glmm_load(m1[1]); r1 = glmm_load(m2[1]);
l2 = glmm_load(m1[2]); r2 = glmm_load(m2[2]);
v0 = vmulq_f32(glmm_splat_x(r0), l0);
v1 = vmulq_f32(glmm_splat_x(r1), l0);
v2 = vmulq_f32(glmm_splat_x(r2), l0);
v0 = glmm_fmadd(glmm_splat_y(r0), l1, v0);
v1 = glmm_fmadd(glmm_splat_y(r1), l1, v1);
v2 = glmm_fmadd(glmm_splat_y(r2), l1, v2);
v0 = glmm_fmadd(glmm_splat_z(r0), l2, v0);
v1 = glmm_fmadd(glmm_splat_z(r1), l2, v1);
v2 = glmm_fmadd(glmm_splat_z(r2), l2, v2);
glmm_store(dest[0], v0);
glmm_store(dest[1], v1);
glmm_store(dest[2], v2);
glmm_store(dest[3], glmm_load(m1[3]));
}
CGLM_INLINE
void
glm_inv_tr_neon(mat4 mat) {
float32x4x4_t vmat;
glmm_128 r0, r1, r2, r3, x0;
vmat = vld4q_f32(mat[0]);
r0 = vmat.val[0];
r1 = vmat.val[1];
r2 = vmat.val[2];
r3 = vmat.val[3];
x0 = glmm_fmadd(r0, glmm_splat_w(r0),
glmm_fmadd(r1, glmm_splat_w(r1),
vmulq_f32(r2, glmm_splat_w(r2))));
x0 = glmm_xor(x0, glmm_set1(-0.f));
glmm_store(mat[0], r0);
glmm_store(mat[1], r1);
glmm_store(mat[2], r2);
glmm_store(mat[3], x0);
mat[0][3] = 0.0f;
mat[1][3] = 0.0f;
mat[2][3] = 0.0f;
mat[3][3] = 1.0f;
/* TODO: ?
zo = vget_high_f32(r3);
vst1_lane_f32(&mat[0][3], zo, 0);
vst1_lane_f32(&mat[1][3], zo, 0);
vst1_lane_f32(&mat[2][3], zo, 0);
vst1_lane_f32(&mat[3][3], zo, 1);
*/
}
#endif
#endif /* cglm_affine_neon_h */

View File

@@ -0,0 +1,44 @@
/*
* Copyright (c), Recep Aslantas.
*
* MIT License (MIT), http://opensource.org/licenses/MIT
* Full license can be found in the LICENSE file
*/
#ifndef cglm_mat2_neon_h
#define cglm_mat2_neon_h
#if defined(__ARM_NEON_FP)
#include "../../common.h"
#include "../intrin.h"
CGLM_INLINE
void
glm_mat2_mul_neon(mat2 m1, mat2 m2, mat2 dest) {
float32x4x2_t a1;
glmm_128 x0, x1, x2;
float32x2_t dc, ba;
x1 = glmm_load(m1[0]); /* d c b a */
x2 = glmm_load(m2[0]); /* h g f e */
dc = vget_high_f32(x1);
ba = vget_low_f32(x1);
/* g g e e, h h f f */
a1 = vtrnq_f32(x2, x2);
/*
dest[0][0] = a * e + c * f;
dest[0][1] = b * e + d * f;
dest[1][0] = a * g + c * h;
dest[1][1] = b * g + d * h;
*/
x0 = glmm_fmadd(vcombine_f32(ba, ba), a1.val[0],
vmulq_f32(vcombine_f32(dc, dc), a1.val[1]));
glmm_store(dest[0], x0);
}
#endif
#endif /* cglm_mat2_neon_h */

View File

@@ -42,41 +42,38 @@ CGLM_INLINE
void
glm_mat4_mul_neon(mat4 m1, mat4 m2, mat4 dest) {
/* D = R * L (Column-Major) */
float32x4_t l0, l1, l2, l3, r, d0, d1, d2, d3;
l0 = vld1q_f32(m2[0]);
l1 = vld1q_f32(m2[1]);
l2 = vld1q_f32(m2[2]);
l3 = vld1q_f32(m2[3]);
glmm_128 l0, l1, l2, l3, r0, r1, r2, r3, v0, v1, v2, v3;
r = vld1q_f32(m1[0]);
d0 = vmulq_lane_f32(r, vget_low_f32(l0), 0);
d1 = vmulq_lane_f32(r, vget_low_f32(l1), 0);
d2 = vmulq_lane_f32(r, vget_low_f32(l2), 0);
d3 = vmulq_lane_f32(r, vget_low_f32(l3), 0);
l0 = glmm_load(m1[0]); r0 = glmm_load(m2[0]);
l1 = glmm_load(m1[1]); r1 = glmm_load(m2[1]);
l2 = glmm_load(m1[2]); r2 = glmm_load(m2[2]);
l3 = glmm_load(m1[3]); r3 = glmm_load(m2[3]);
r = vld1q_f32(m1[1]);
d0 = vmlaq_lane_f32(d0, r, vget_low_f32(l0), 1);
d1 = vmlaq_lane_f32(d1, r, vget_low_f32(l1), 1);
d2 = vmlaq_lane_f32(d2, r, vget_low_f32(l2), 1);
d3 = vmlaq_lane_f32(d3, r, vget_low_f32(l3), 1);
v0 = vmulq_f32(glmm_splat_x(r0), l0);
v1 = vmulq_f32(glmm_splat_x(r1), l0);
v2 = vmulq_f32(glmm_splat_x(r2), l0);
v3 = vmulq_f32(glmm_splat_x(r3), l0);
r = vld1q_f32(m1[2]);
d0 = vmlaq_lane_f32(d0, r, vget_high_f32(l0), 0);
d1 = vmlaq_lane_f32(d1, r, vget_high_f32(l1), 0);
d2 = vmlaq_lane_f32(d2, r, vget_high_f32(l2), 0);
d3 = vmlaq_lane_f32(d3, r, vget_high_f32(l3), 0);
v0 = glmm_fmadd(glmm_splat_y(r0), l1, v0);
v1 = glmm_fmadd(glmm_splat_y(r1), l1, v1);
v2 = glmm_fmadd(glmm_splat_y(r2), l1, v2);
v3 = glmm_fmadd(glmm_splat_y(r3), l1, v3);
r = vld1q_f32(m1[3]);
d0 = vmlaq_lane_f32(d0, r, vget_high_f32(l0), 1);
d1 = vmlaq_lane_f32(d1, r, vget_high_f32(l1), 1);
d2 = vmlaq_lane_f32(d2, r, vget_high_f32(l2), 1);
d3 = vmlaq_lane_f32(d3, r, vget_high_f32(l3), 1);
v0 = glmm_fmadd(glmm_splat_z(r0), l2, v0);
v1 = glmm_fmadd(glmm_splat_z(r1), l2, v1);
v2 = glmm_fmadd(glmm_splat_z(r2), l2, v2);
v3 = glmm_fmadd(glmm_splat_z(r3), l2, v3);
vst1q_f32(dest[0], d0);
vst1q_f32(dest[1], d1);
vst1q_f32(dest[2], d2);
vst1q_f32(dest[3], d3);
v0 = glmm_fmadd(glmm_splat_w(r0), l3, v0);
v1 = glmm_fmadd(glmm_splat_w(r1), l3, v1);
v2 = glmm_fmadd(glmm_splat_w(r2), l3, v2);
v3 = glmm_fmadd(glmm_splat_w(r3), l3, v3);
glmm_store(dest[0], v0);
glmm_store(dest[1], v1);
glmm_store(dest[2], v2);
glmm_store(dest[3], v3);
}
CGLM_INLINE
@@ -101,5 +98,216 @@ glm_mat4_mulv_neon(mat4 m, vec4 v, vec4 dest) {
vst1q_f32(dest, l0);
}
CGLM_INLINE
float
glm_mat4_det_neon(mat4 mat) {
float32x4_t r0, r1, r2, r3, x0, x1, x2;
float32x2_t ij, op, mn, kl, nn, mm, jj, ii, gh, ef, t12, t34;
float32x4x2_t a1;
float32x4_t x3 = { 0.f, -0.f, 0.f, -0.f };
/* 127 <- 0, [square] det(A) = det(At) */
r0 = glmm_load(mat[0]); /* d c b a */
r1 = vrev64q_f32(glmm_load(mat[1])); /* g h e f */
r2 = vrev64q_f32(glmm_load(mat[2])); /* l k i j */
r3 = vrev64q_f32(glmm_load(mat[3])); /* o p m n */
gh = vget_high_f32(r1);
ef = vget_low_f32(r1);
kl = vget_high_f32(r2);
ij = vget_low_f32(r2);
op = vget_high_f32(r3);
mn = vget_low_f32(r3);
mm = vdup_lane_f32(mn, 1);
nn = vdup_lane_f32(mn, 0);
ii = vdup_lane_f32(ij, 1);
jj = vdup_lane_f32(ij, 0);
/*
t[1] = j * p - n * l;
t[2] = j * o - n * k;
t[3] = i * p - m * l;
t[4] = i * o - m * k;
*/
x0 = glmm_fnmadd(vcombine_f32(kl, kl), vcombine_f32(nn, mm),
vmulq_f32(vcombine_f32(op, op), vcombine_f32(jj, ii)));
t12 = vget_low_f32(x0);
t34 = vget_high_f32(x0);
/* 1 3 1 3 2 4 2 4 */
a1 = vuzpq_f32(x0, x0);
/*
t[0] = k * p - o * l;
t[0] = k * p - o * l;
t[5] = i * n - m * j;
t[5] = i * n - m * j;
*/
x1 = glmm_fnmadd(vcombine_f32(vdup_lane_f32(kl, 0), jj),
vcombine_f32(vdup_lane_f32(op, 1), mm),
vmulq_f32(vcombine_f32(vdup_lane_f32(op, 0), nn),
vcombine_f32(vdup_lane_f32(kl, 1), ii)));
/*
a * (f * t[0] - g * t[1] + h * t[2])
- b * (e * t[0] - g * t[3] + h * t[4])
+ c * (e * t[1] - f * t[3] + h * t[5])
- d * (e * t[2] - f * t[4] + g * t[5])
*/
x2 = glmm_fnmadd(vcombine_f32(vdup_lane_f32(gh, 1), vdup_lane_f32(ef, 0)),
vcombine_f32(vget_low_f32(a1.val[0]), t34),
vmulq_f32(vcombine_f32(ef, vdup_lane_f32(ef, 1)),
vcombine_f32(vget_low_f32(x1), t12)));
x2 = glmm_fmadd(vcombine_f32(vdup_lane_f32(gh, 0), gh),
vcombine_f32(vget_low_f32(a1.val[1]), vget_high_f32(x1)), x2);
x2 = glmm_xor(x2, x3);
return glmm_hadd(vmulq_f32(x2, r0));
}
CGLM_INLINE
void
glm_mat4_inv_neon(mat4 mat, mat4 dest) {
float32x4_t r0, r1, r2, r3,
v0, v1, v2, v3,
t0, t1, t2, t3, t4, t5,
x0, x1, x2, x3, x4, x5, x6, x7, x8;
float32x4x2_t a1;
float32x2_t lp, ko, hg, jn, im, fe, ae, bf, cg, dh;
float32x4_t x9 = { -0.f, 0.f, -0.f, 0.f };
x8 = vrev64q_f32(x9);
/* 127 <- 0 */
r0 = glmm_load(mat[0]); /* d c b a */
r1 = glmm_load(mat[1]); /* h g f e */
r2 = glmm_load(mat[2]); /* l k j i */
r3 = glmm_load(mat[3]); /* p o n m */
/* l p k o, j n i m */
a1 = vzipq_f32(r3, r2);
jn = vget_high_f32(a1.val[0]);
im = vget_low_f32(a1.val[0]);
lp = vget_high_f32(a1.val[1]);
ko = vget_low_f32(a1.val[1]);
hg = vget_high_f32(r1);
x1 = vcombine_f32(vdup_lane_f32(lp, 0), lp); /* l p p p */
x2 = vcombine_f32(vdup_lane_f32(ko, 0), ko); /* k o o o */
x0 = vcombine_f32(vdup_lane_f32(lp, 1), vdup_lane_f32(hg, 1)); /* h h l l */
x3 = vcombine_f32(vdup_lane_f32(ko, 1), vdup_lane_f32(hg, 0)); /* g g k k */
/* t1[0] = k * p - o * l;
t1[0] = k * p - o * l;
t2[0] = g * p - o * h;
t3[0] = g * l - k * h; */
t0 = glmm_fnmadd(x2, x0, vmulq_f32(x3, x1));
fe = vget_low_f32(r1);
x4 = vcombine_f32(vdup_lane_f32(jn, 0), jn); /* j n n n */
x5 = vcombine_f32(vdup_lane_f32(jn, 1), vdup_lane_f32(fe, 1)); /* f f j j */
/* t1[1] = j * p - n * l;
t1[1] = j * p - n * l;
t2[1] = f * p - n * h;
t3[1] = f * l - j * h; */
t1 = glmm_fnmadd(x4, x0, vmulq_f32(x5, x1));
/* t1[2] = j * o - n * k
t1[2] = j * o - n * k;
t2[2] = f * o - n * g;
t3[2] = f * k - j * g; */
t2 = glmm_fnmadd(x4, x3, vmulq_f32(x5, x2));
x6 = vcombine_f32(vdup_lane_f32(im, 1), vdup_lane_f32(fe, 0)); /* e e i i */
x7 = vcombine_f32(vdup_lane_f32(im, 0), im); /* i m m m */
/* t1[3] = i * p - m * l;
t1[3] = i * p - m * l;
t2[3] = e * p - m * h;
t3[3] = e * l - i * h; */
t3 = glmm_fnmadd(x7, x0, vmulq_f32(x6, x1));
/* t1[4] = i * o - m * k;
t1[4] = i * o - m * k;
t2[4] = e * o - m * g;
t3[4] = e * k - i * g; */
t4 = glmm_fnmadd(x7, x3, vmulq_f32(x6, x2));
/* t1[5] = i * n - m * j;
t1[5] = i * n - m * j;
t2[5] = e * n - m * f;
t3[5] = e * j - i * f; */
t5 = glmm_fnmadd(x7, x5, vmulq_f32(x6, x4));
/* h d f b, g c e a */
a1 = vtrnq_f32(r0, r1);
x4 = vrev64q_f32(a1.val[0]); /* c g a e */
x5 = vrev64q_f32(a1.val[1]); /* d h b f */
ae = vget_low_f32(x4);
cg = vget_high_f32(x4);
bf = vget_low_f32(x5);
dh = vget_high_f32(x5);
x0 = vcombine_f32(ae, vdup_lane_f32(ae, 1)); /* a a a e */
x1 = vcombine_f32(bf, vdup_lane_f32(bf, 1)); /* b b b f */
x2 = vcombine_f32(cg, vdup_lane_f32(cg, 1)); /* c c c g */
x3 = vcombine_f32(dh, vdup_lane_f32(dh, 1)); /* d d d h */
/*
dest[0][0] = f * t1[0] - g * t1[1] + h * t1[2];
dest[0][1] =-(b * t1[0] - c * t1[1] + d * t1[2]);
dest[0][2] = b * t2[0] - c * t2[1] + d * t2[2];
dest[0][3] =-(b * t3[0] - c * t3[1] + d * t3[2]); */
v0 = glmm_xor(glmm_fmadd(x3, t2, glmm_fnmadd(x2, t1, vmulq_f32(x1, t0))), x8);
/*
dest[2][0] = e * t1[1] - f * t1[3] + h * t1[5];
dest[2][1] =-(a * t1[1] - b * t1[3] + d * t1[5]);
dest[2][2] = a * t2[1] - b * t2[3] + d * t2[5];
dest[2][3] =-(a * t3[1] - b * t3[3] + d * t3[5]);*/
v2 = glmm_xor(glmm_fmadd(x3, t5, glmm_fnmadd(x1, t3, vmulq_f32(x0, t1))), x8);
/*
dest[1][0] =-(e * t1[0] - g * t1[3] + h * t1[4]);
dest[1][1] = a * t1[0] - c * t1[3] + d * t1[4];
dest[1][2] =-(a * t2[0] - c * t2[3] + d * t2[4]);
dest[1][3] = a * t3[0] - c * t3[3] + d * t3[4]; */
v1 = glmm_xor(glmm_fmadd(x3, t4, glmm_fnmadd(x2, t3, vmulq_f32(x0, t0))), x9);
/*
dest[3][0] =-(e * t1[2] - f * t1[4] + g * t1[5]);
dest[3][1] = a * t1[2] - b * t1[4] + c * t1[5];
dest[3][2] =-(a * t2[2] - b * t2[4] + c * t2[5]);
dest[3][3] = a * t3[2] - b * t3[4] + c * t3[5]; */
v3 = glmm_xor(glmm_fmadd(x2, t5, glmm_fnmadd(x1, t4, vmulq_f32(x0, t2))), x9);
/* determinant */
x0 = vcombine_f32(vget_low_f32(vzipq_f32(v0, v1).val[0]),
vget_low_f32(vzipq_f32(v2, v3).val[0]));
/*
x0 = glmm_div(glmm_set1(1.0f), glmm_vhadd(vmulq_f32(x0, r0)));
glmm_store(dest[0], vmulq_f32(v0, x0));
glmm_store(dest[1], vmulq_f32(v1, x0));
glmm_store(dest[2], vmulq_f32(v2, x0));
glmm_store(dest[3], vmulq_f32(v3, x0));
*/
x0 = glmm_vhadd(vmulq_f32(x0, r0));
glmm_store(dest[0], glmm_div(v0, x0));
glmm_store(dest[1], glmm_div(v1, x0));
glmm_store(dest[2], glmm_div(v2, x0));
glmm_store(dest[3], glmm_div(v3, x0));
}
#endif
#endif /* cglm_mat4_neon_h */

View File

@@ -0,0 +1,56 @@
/*
* Copyright (c), Recep Aslantas.
*
* MIT License (MIT), http://opensource.org/licenses/MIT
* Full license can be found in the LICENSE file
*/
#ifndef cglm_quat_neon_h
#define cglm_quat_neon_h
#if defined(__ARM_NEON_FP)
#include "../../common.h"
#include "../intrin.h"
CGLM_INLINE
void
glm_quat_mul_neon(versor p, versor q, versor dest) {
/*
+ (a1 b2 + b1 a2 + c1 d2 d1 c2)i
+ (a1 c2 b1 d2 + c1 a2 + d1 b2)j
+ (a1 d2 + b1 c2 c1 b2 + d1 a2)k
a1 a2 b1 b2 c1 c2 d1 d2
*/
glmm_128 xp, xq, xqr, r, x, y, z, s2, s3;
glmm_128 s1 = {-0.f, 0.f, 0.f, -0.f};
float32x2_t qh, ql;
xp = glmm_load(p); /* 3 2 1 0 */
xq = glmm_load(q);
r = vmulq_f32(glmm_splat_w(xp), xq);
x = glmm_splat_x(xp);
y = glmm_splat_y(xp);
z = glmm_splat_z(xp);
ql = vget_high_f32(s1);
s3 = vcombine_f32(ql, ql);
s2 = vzipq_f32(s3, s3).val[0];
xqr = vrev64q_f32(xq);
qh = vget_high_f32(xqr);
ql = vget_low_f32(xqr);
r = glmm_fmadd(glmm_xor(x, s3), vcombine_f32(qh, ql), r);
r = glmm_fmadd(glmm_xor(y, s2), vcombine_f32(vget_high_f32(xq),
vget_low_f32(xq)), r);
r = glmm_fmadd(glmm_xor(z, s1), vcombine_f32(ql, qh), r);
glmm_store(dest, r);
}
#endif
#endif /* cglm_quat_neon_h */

View File

@@ -25,29 +25,28 @@ glm_mul_sse2(mat4 m1, mat4 m2, mat4 dest) {
r = glmm_load(m2[0]);
glmm_store(dest[0],
glmm_fmadd(glmm_shuff1x(r, 0), l0,
glmm_fmadd(glmm_shuff1x(r, 1), l1,
_mm_mul_ps(glmm_shuff1x(r, 2), l2))));
glmm_fmadd(glmm_splat(r, 0), l0,
glmm_fmadd(glmm_splat(r, 1), l1,
_mm_mul_ps(glmm_splat(r, 2), l2))));
r = glmm_load(m2[1]);
glmm_store(dest[1],
glmm_fmadd(glmm_shuff1x(r, 0), l0,
glmm_fmadd(glmm_shuff1x(r, 1), l1,
_mm_mul_ps(glmm_shuff1x(r, 2), l2))));
glmm_fmadd(glmm_splat(r, 0), l0,
glmm_fmadd(glmm_splat(r, 1), l1,
_mm_mul_ps(glmm_splat(r, 2), l2))));
r = glmm_load(m2[2]);
glmm_store(dest[2],
glmm_fmadd(glmm_shuff1x(r, 0), l0,
glmm_fmadd(glmm_shuff1x(r, 1), l1,
_mm_mul_ps(glmm_shuff1x(r, 2), l2))));
glmm_fmadd(glmm_splat(r, 0), l0,
glmm_fmadd(glmm_splat(r, 1), l1,
_mm_mul_ps(glmm_splat(r, 2), l2))));
r = glmm_load(m2[3]);
glmm_store(dest[3],
glmm_fmadd(glmm_shuff1x(r, 0), l0,
glmm_fmadd(glmm_shuff1x(r, 1), l1,
glmm_fmadd(glmm_shuff1x(r, 2), l2,
_mm_mul_ps(glmm_shuff1x(r, 3),
l3)))));
glmm_fmadd(glmm_splat(r, 0), l0,
glmm_fmadd(glmm_splat(r, 1), l1,
glmm_fmadd(glmm_splat(r, 2), l2,
_mm_mul_ps(glmm_splat(r, 3), l3)))));
}
CGLM_INLINE
@@ -63,22 +62,22 @@ glm_mul_rot_sse2(mat4 m1, mat4 m2, mat4 dest) {
r = glmm_load(m2[0]);
glmm_store(dest[0],
glmm_fmadd(glmm_shuff1x(r, 0), l0,
glmm_fmadd(glmm_shuff1x(r, 1), l1,
_mm_mul_ps(glmm_shuff1x(r, 2), l2))));
glmm_fmadd(glmm_splat(r, 0), l0,
glmm_fmadd(glmm_splat(r, 1), l1,
_mm_mul_ps(glmm_splat(r, 2), l2))));
r = glmm_load(m2[1]);
glmm_store(dest[1],
glmm_fmadd(glmm_shuff1x(r, 0), l0,
glmm_fmadd(glmm_shuff1x(r, 1), l1,
_mm_mul_ps(glmm_shuff1x(r, 2), l2))));
glmm_fmadd(glmm_splat(r, 0), l0,
glmm_fmadd(glmm_splat(r, 1), l1,
_mm_mul_ps(glmm_splat(r, 2), l2))));
r = glmm_load(m2[2]);
glmm_store(dest[2],
glmm_fmadd(glmm_shuff1x(r, 0), l0,
glmm_fmadd(glmm_shuff1x(r, 1), l1,
_mm_mul_ps(glmm_shuff1x(r, 2), l2))));
glmm_fmadd(glmm_splat(r, 0), l0,
glmm_fmadd(glmm_splat(r, 1), l1,
_mm_mul_ps(glmm_splat(r, 2), l2))));
glmm_store(dest[3], l3);
}

View File

@@ -60,11 +60,10 @@ glm_mat4_mul_sse2(mat4 m1, mat4 m2, mat4 dest) {
\
r = glmm_load(m2[C]); \
glmm_store(dest[C], \
glmm_fmadd(glmm_shuff1x(r, 0), l0, \
glmm_fmadd(glmm_shuff1x(r, 1), l1, \
glmm_fmadd(glmm_shuff1x(r, 2), l2, \
_mm_mul_ps(glmm_shuff1x(r, 3), \
l3)))));
glmm_fmadd(glmm_splat(r, 0), l0, \
glmm_fmadd(glmm_splat(r, 1), l1, \
glmm_fmadd(glmm_splat(r, 2), l2, \
_mm_mul_ps(glmm_splat(r, 3), l3)))));
XX(0);
XX(1);
@@ -80,11 +79,11 @@ glm_mat4_mulv_sse2(mat4 m, vec4 v, vec4 dest) {
__m128 x0, x1;
x0 = glmm_load(v);
x1 = glmm_fmadd(glmm_load(m[0]), glmm_shuff1x(x0, 0),
glmm_fmadd(glmm_load(m[1]), glmm_shuff1x(x0, 1),
glmm_fmadd(glmm_load(m[2]), glmm_shuff1x(x0, 2),
x1 = glmm_fmadd(glmm_load(m[0]), glmm_splat(x0, 0),
glmm_fmadd(glmm_load(m[1]), glmm_splat(x0, 1),
glmm_fmadd(glmm_load(m[2]), glmm_splat(x0, 2),
_mm_mul_ps(glmm_load(m[3]),
glmm_shuff1x(x0, 3)))));
glmm_splat(x0, 3)))));
glmm_store(dest, x1);
}

View File

@@ -27,20 +27,19 @@ glm_quat_mul_sse2(versor p, versor q, versor dest) {
xp = glmm_load(p); /* 3 2 1 0 */
xq = glmm_load(q);
r = _mm_mul_ps(glmm_shuff1x(xp, 3), xq);
r = _mm_mul_ps(glmm_splat(xp, 3), xq);
x0 = _mm_xor_ps(glmm_shuff1x(xp, 0), _mm_set_ps(-0.f, 0.f, -0.f, 0.f));
x0 = _mm_xor_ps(glmm_splat(xp, 0), _mm_set_ps(-0.f, 0.f, -0.f, 0.f));
r = _mm_add_ps(r, _mm_mul_ps(x0, glmm_shuff1(xq, 0, 1, 2, 3)));
x0 = _mm_xor_ps(glmm_shuff1x(xp, 1), _mm_set_ps(-0.f, -0.f, 0.f, 0.f));
x0 = _mm_xor_ps(glmm_splat(xp, 1), _mm_set_ps(-0.f, -0.f, 0.f, 0.f));
r = _mm_add_ps(r, _mm_mul_ps(x0, glmm_shuff1(xq, 1, 0, 3, 2)));
x0 = _mm_xor_ps(glmm_shuff1x(xp, 2), _mm_set_ps(-0.f, 0.f, 0.f, -0.f));
x0 = _mm_xor_ps(glmm_splat(xp, 2), _mm_set_ps(-0.f, 0.f, 0.f, -0.f));
r = _mm_add_ps(r, _mm_mul_ps(x0, glmm_shuff1(xq, 2, 3, 0, 1)));
glmm_store(dest, r);
}
#endif
#endif /* cglm_quat_simd_h */

View File

@@ -18,6 +18,9 @@
# define glmm_store(p, a) _mm_store_ps(p, a)
#endif
#define glmm_set1(x) _mm_set1_ps(x)
#define glmm_128 __m128
#ifdef CGLM_USE_INT_DOMAIN
# define glmm_shuff1(xmm, z, y, x, w) \
_mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(xmm), \
@@ -27,7 +30,16 @@
_mm_shuffle_ps(xmm, xmm, _MM_SHUFFLE(z, y, x, w))
#endif
#define glmm_splat(x, lane) glmm_shuff1(x, lane, lane, lane, lane)
#define glmm_splat_x(x) glmm_splat(x, 0)
#define glmm_splat_y(x) glmm_splat(x, 1)
#define glmm_splat_z(x) glmm_splat(x, 2)
#define glmm_splat_w(x) glmm_splat(x, 3)
/* glmm_shuff1x() is DEPRECATED!, use glmm_splat() */
#define glmm_shuff1x(xmm, x) glmm_shuff1(xmm, x, x, x, x)
#define glmm_shuff2(a, b, z0, y0, x0, w0, z1, y1, x1, w1) \
glmm_shuff1(_mm_shuffle_ps(a, b, _MM_SHUFFLE(z0, y0, x0, w0)), \
z1, y1, x1, w1)
@@ -89,7 +101,7 @@ glmm_vhmin(__m128 v) {
__m128 x0, x1, x2;
x0 = _mm_movehl_ps(v, v); /* [2, 3, 2, 3] */
x1 = _mm_min_ps(x0, v); /* [0|2, 1|3, 2|2, 3|3] */
x2 = glmm_shuff1x(x1, 1); /* [1|3, 1|3, 1|3, 1|3] */
x2 = glmm_splat(x1, 1); /* [1|3, 1|3, 1|3, 1|3] */
return _mm_min_ss(x1, x2);
}
@@ -105,7 +117,7 @@ glmm_vhmax(__m128 v) {
__m128 x0, x1, x2;
x0 = _mm_movehl_ps(v, v); /* [2, 3, 2, 3] */
x1 = _mm_max_ps(x0, v); /* [0|2, 1|3, 2|2, 3|3] */
x2 = glmm_shuff1x(x1, 1); /* [1|3, 1|3, 1|3, 1|3] */
x2 = glmm_splat(x1, 1); /* [1|3, 1|3, 1|3, 1|3] */
return _mm_max_ss(x1, x2);
}
@@ -197,6 +209,12 @@ glmm_store3(float v[3], __m128 vx) {
_mm_store_ss(&v[2], glmm_shuff1(vx, 2, 2, 2, 2));
}
static inline
__m128
glmm_div(__m128 a, __m128 b) {
return _mm_div_ps(a, b);
}
/* enable FMA macro for MSVC? */
#if defined(_MSC_VER) && !defined(__FMA__) && defined(__AVX2__)
# define __FMA__ 1

View File

@@ -224,10 +224,10 @@ glm_vec4_sign(vec4 v, vec4 dest) {
x0 = glmm_load(v);
x1 = _mm_set_ps(0.0f, 0.0f, 1.0f, -1.0f);
x2 = glmm_shuff1x(x1, 2);
x2 = glmm_splat(x1, 2);
x3 = _mm_and_ps(_mm_cmpgt_ps(x0, x2), glmm_shuff1x(x1, 1));
x4 = _mm_and_ps(_mm_cmplt_ps(x0, x2), glmm_shuff1x(x1, 0));
x3 = _mm_and_ps(_mm_cmpgt_ps(x0, x2), glmm_splat(x1, 1));
x4 = _mm_and_ps(_mm_cmplt_ps(x0, x2), glmm_splat(x1, 0));
glmm_store(dest, _mm_or_ps(x3, x4));
#else

View File

@@ -473,8 +473,8 @@ glm_vec4_scale_as(vec4 v, float s, vec4 dest) {
CGLM_INLINE
void
glm_vec4_div(vec4 a, vec4 b, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(dest, _mm_div_ps(glmm_load(a), glmm_load(b)));
#if defined(CGLM_SIMD)
glmm_store(dest, glmm_div(glmm_load(a), glmm_load(b)));
#else
dest[0] = a[0] / b[0];
dest[1] = a[1] / b[1];
@@ -590,10 +590,8 @@ glm_vec4_muladd(vec4 a, vec4 b, vec4 dest) {
CGLM_INLINE
void
glm_vec4_muladds(vec4 a, float s, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(dest, glmm_fmadd(glmm_load(a), _mm_set1_ps(s), glmm_load(dest)));
#elif defined(CGLM_NEON_FP)
glmm_store(dest, glmm_fmadd(glmm_load(a), vdupq_n_f32(s), glmm_load(dest)));
#if defined(CGLM_SIMD)
glmm_store(dest, glmm_fmadd(glmm_load(a), glmm_set1(s), glmm_load(dest)));
#else
dest[0] += a[0] * s;
dest[1] += a[1] * s;

View File

@@ -10,6 +10,6 @@
#define CGLM_VERSION_MAJOR 0
#define CGLM_VERSION_MINOR 8
#define CGLM_VERSION_PATCH 1
#define CGLM_VERSION_PATCH 2
#endif /* cglm_version_h */

View File

@@ -1,5 +1,5 @@
project('cglm', 'c',
version : '0.8.1',
version : '0.8.2',
license : 'mit',
default_options : [
'c_std=c11',

View File

@@ -7,6 +7,25 @@
#include "test_common.h"
#ifndef glm_affine_mat_test_guard
#define glm_affine_mat_test_guard
CGLM_INLINE
void
glm_inv_tr_raw(mat4 mat) {
CGLM_ALIGN_MAT mat3 r;
CGLM_ALIGN(8) vec3 t;
/* rotate */
glm_mat4_pick3t(mat, r);
glm_mat4_ins3(r, mat);
/* translate */
glm_mat3_mulv(r, mat[3], t);
glm_vec3_negate(t);
glm_vec3_copy(t, mat[3]);
}
#endif
TEST_IMPL(GLM_PREFIX, mul) {
mat4 m1 = GLM_MAT4_IDENTITY_INIT;
mat4 m2 = GLM_MAT4_IDENTITY_INIT;
@@ -81,6 +100,12 @@ TEST_IMPL(GLM_PREFIX, inv_tr) {
GLM(mat4_inv)(m1, m2);
GLM(inv_tr)(m2);
ASSERTIFY(test_assert_mat4_eq(m1, m2))
/* test with raw */
glm_mat4_copy(m1, m2);
glm_inv_tr_raw(m2);
GLM(inv_tr)(m1);
ASSERTIFY(test_assert_mat4_eq(m1, m2))
}
TEST_SUCCESS

View File

@@ -90,7 +90,10 @@
<ClInclude Include="..\include\cglm\simd\avx\affine.h" />
<ClInclude Include="..\include\cglm\simd\avx\mat4.h" />
<ClInclude Include="..\include\cglm\simd\intrin.h" />
<ClInclude Include="..\include\cglm\simd\neon\affine.h" />
<ClInclude Include="..\include\cglm\simd\neon\mat2.h" />
<ClInclude Include="..\include\cglm\simd\neon\mat4.h" />
<ClInclude Include="..\include\cglm\simd\neon\quat.h" />
<ClInclude Include="..\include\cglm\simd\sse2\affine.h" />
<ClInclude Include="..\include\cglm\simd\sse2\mat2.h" />
<ClInclude Include="..\include\cglm\simd\sse2\mat3.h" />

View File

@@ -370,5 +370,14 @@
<ClInclude Include="..\include\cglm\struct\affine2d.h">
<Filter>include\cglm\struct</Filter>
</ClInclude>
<ClInclude Include="..\include\cglm\simd\neon\affine.h">
<Filter>include\cglm\simd\neon</Filter>
</ClInclude>
<ClInclude Include="..\include\cglm\simd\neon\mat2.h">
<Filter>include\cglm\simd\neon</Filter>
</ClInclude>
<ClInclude Include="..\include\cglm\simd\neon\quat.h">
<Filter>include\cglm\simd\neon</Filter>
</ClInclude>
</ItemGroup>
</Project>