Compare commits

..

4 Commits

Author SHA1 Message Date
Recep Aslantas
6626d2b74f Update affine.h 2020-11-22 01:25:34 +03:00
Recep Aslantas
5bda762df6 use epsilon to compare results in glm_uniscaled() 2020-11-22 01:14:18 +03:00
Recep Aslantas
f9824a8dc6 test: use custom epsilon to compare 2020-11-22 00:59:48 +03:00
Recep Aslantas
ccd3058adc remove CGLM_USE_DEFAULT_EPSILON
* to override float epsilon we just need to define GLM_FLT_EPSILON
* CGLM_USE_DEFAULT_EPSILON was redundant, also it forces to override system default epsilon which may not be good idea, because not all systems may support smaller epsilon values
2020-11-22 00:38:57 +03:00
29 changed files with 316 additions and 793 deletions

View File

@@ -1,5 +1,5 @@
cmake_minimum_required(VERSION 3.8.2) cmake_minimum_required(VERSION 3.8.2)
project(cglm VERSION 0.8.2 LANGUAGES C) project(cglm VERSION 0.8.0 LANGUAGES C)
set(CMAKE_C_STANDARD 11) set(CMAKE_C_STANDARD 11)
set(CMAKE_C_STANDARD_REQUIRED YES) set(CMAKE_C_STANDARD_REQUIRED YES)
@@ -18,7 +18,7 @@ else(CGLM_STATIC)
endif() endif()
if(CGLM_USE_C99) if(CGLM_USE_C99)
set(CMAKE_C_STANDARD 99) set(C_STANDARD 99)
endif() endif()
if(MSVC) if(MSVC)
@@ -93,11 +93,6 @@ target_include_directories(${PROJECT_NAME}
${CMAKE_CURRENT_SOURCE_DIR}/src ${CMAKE_CURRENT_SOURCE_DIR}/src
) )
# Target for header-only usage
add_library(${PROJECT_NAME}_headers INTERFACE)
target_include_directories(${PROJECT_NAME}_headers INTERFACE
${CMAKE_CURRENT_SOURCE_DIR}/include)
# Test Configuration # Test Configuration
if(CGLM_USE_TEST) if(CGLM_USE_TEST)
include(CTest) include(CTest)
@@ -122,16 +117,6 @@ export(TARGETS ${PROJECT_NAME}
) )
install(EXPORT ${PROJECT_NAME} install(EXPORT ${PROJECT_NAME}
FILE "${PROJECT_NAME}Config.cmake"
NAMESPACE ${PROJECT_NAME}:: NAMESPACE ${PROJECT_NAME}::
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}) DESTINATION ${CMAKE_INSTALL_LIBDIR}/${PROJECT_NAME}/cmake)
set(PACKAGE_NAME ${PROJECT_NAME})
set(prefix ${CMAKE_INSTALL_PREFIX})
set(exec_prefix ${CMAKE_INSTALL_PREFIX})
set(includedir "\${prefix}/${CMAKE_INSTALL_INCLUDEDIR}")
set(libdir "\${prefix}/${CMAKE_INSTALL_LIBDIR}")
configure_file(${CMAKE_CURRENT_LIST_DIR}/cglm.pc.in ${CMAKE_BINARY_DIR}/cglm.pc @ONLY)
install(FILES ${CMAKE_BINARY_DIR}/cglm.pc
DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig)

View File

@@ -74,7 +74,3 @@ Link to paper: http://webserver2.tecgraf.puc-rio.br/~mgattass/cg/trbRR/Fast%20Mi
14. ARM NEON: Matrix Vector Multiplication 14. ARM NEON: Matrix Vector Multiplication
https://stackoverflow.com/a/57793352/2676533 https://stackoverflow.com/a/57793352/2676533
16. ARM NEON Div
http://github.com/microsoft/DirectXMath

View File

@@ -168,24 +168,6 @@ option(CGLM_USE_C99 "" OFF) # C11
option(CGLM_USE_TEST "Enable Tests" OFF) # for make check - make test option(CGLM_USE_TEST "Enable Tests" OFF) # for make check - make test
``` ```
#### Use as header-only library with your CMake project
This requires no building or installation of cglm.
* Example:
``` cmake
cmake_minimum_required(VERSION 3.8.2)
project(<Your Project Name>)
add_executable(${PROJECT_NAME} src/main.c)
target_link_libraries(${LIBRARY_NAME} PRIVATE
cglm_headers)
add_subdirectory(external/cglm/ EXCLUDE_FROM_ALL)
```
#### Use with your CMake project #### Use with your CMake project
* Example: * Example:
```cmake ```cmake

View File

@@ -2,7 +2,7 @@ Pod::Spec.new do |s|
# Description # Description
s.name = "cglm" s.name = "cglm"
s.version = "0.8.1" s.version = "0.7.9"
s.summary = "📽 Highly Optimized Graphics Math (glm) for C" s.summary = "📽 Highly Optimized Graphics Math (glm) for C"
s.description = <<-DESC s.description = <<-DESC
cglm is math library for graphics programming for C. See the documentation or README for all features. cglm is math library for graphics programming for C. See the documentation or README for all features.

View File

@@ -7,7 +7,7 @@
#***************************************************************************** #*****************************************************************************
AC_PREREQ([2.69]) AC_PREREQ([2.69])
AC_INIT([cglm], [0.8.2], [info@recp.me]) AC_INIT([cglm], [0.8.0], [info@recp.me])
AM_INIT_AUTOMAKE([-Wall -Werror foreign subdir-objects serial-tests]) AM_INIT_AUTOMAKE([-Wall -Werror foreign subdir-objects serial-tests])
# Don't use the default cflags (-O2 -g), we set ours manually in Makefile.am. # Don't use the default cflags (-O2 -g), we set ours manually in Makefile.am.

View File

@@ -32,22 +32,6 @@ If you don't want to install **cglm** to your system's folder you can get static
option(CGLM_USE_C99 "" OFF) # C11 option(CGLM_USE_C99 "" OFF) # C11
option(CGLM_USE_TEST "Enable Tests" OFF) # for make check - make test option(CGLM_USE_TEST "Enable Tests" OFF) # for make check - make test
**Use as header-only library with your CMake project example**
This requires no building or installation of cglm.
.. code-block:: CMake
:linenos:
cmake_minimum_required(VERSION 3.8.2)
project(<Your Project Name>)
add_executable(${PROJECT_NAME} src/main.c)
target_link_libraries(${LIBRARY_NAME} PRIVATE
cglm_headers)
add_subdirectory(external/cglm/ EXCLUDE_FROM_ALL)
**Use with your CMake project example** **Use with your CMake project example**
.. code-block:: CMake .. code-block:: CMake

View File

@@ -62,9 +62,9 @@ author = u'Recep Aslantas'
# built documents. # built documents.
# #
# The short X.Y version. # The short X.Y version.
version = u'0.8.2' version = u'0.8.0'
# The full version, including alpha/beta/rc tags. # The full version, including alpha/beta/rc tags.
release = u'0.8.2' release = u'0.8.0'
# The language for content autogenerated by Sphinx. Refer to documentation # The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages. # for a list of supported languages.

View File

@@ -26,10 +26,6 @@
# include "simd/avx/affine.h" # include "simd/avx/affine.h"
#endif #endif
#ifdef CGLM_NEON_FP
# include "simd/neon/affine.h"
#endif
/*! /*!
* @brief this is similar to glm_mat4_mul but specialized to affine transform * @brief this is similar to glm_mat4_mul but specialized to affine transform
* *
@@ -53,8 +49,6 @@ glm_mul(mat4 m1, mat4 m2, mat4 dest) {
glm_mul_avx(m1, m2, dest); glm_mul_avx(m1, m2, dest);
#elif defined( __SSE__ ) || defined( __SSE2__ ) #elif defined( __SSE__ ) || defined( __SSE2__ )
glm_mul_sse2(m1, m2, dest); glm_mul_sse2(m1, m2, dest);
#elif defined(CGLM_NEON_FP)
glm_mul_neon(m1, m2, dest);
#else #else
float a00 = m1[0][0], a01 = m1[0][1], a02 = m1[0][2], a03 = m1[0][3], float a00 = m1[0][0], a01 = m1[0][1], a02 = m1[0][2], a03 = m1[0][3],
a10 = m1[1][0], a11 = m1[1][1], a12 = m1[1][2], a13 = m1[1][3], a10 = m1[1][0], a11 = m1[1][1], a12 = m1[1][2], a13 = m1[1][3],
@@ -109,8 +103,6 @@ void
glm_mul_rot(mat4 m1, mat4 m2, mat4 dest) { glm_mul_rot(mat4 m1, mat4 m2, mat4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ ) #if defined( __SSE__ ) || defined( __SSE2__ )
glm_mul_rot_sse2(m1, m2, dest); glm_mul_rot_sse2(m1, m2, dest);
#elif defined(CGLM_NEON_FP)
glm_mul_rot_neon(m1, m2, dest);
#else #else
float a00 = m1[0][0], a01 = m1[0][1], a02 = m1[0][2], a03 = m1[0][3], float a00 = m1[0][0], a01 = m1[0][1], a02 = m1[0][2], a03 = m1[0][3],
a10 = m1[1][0], a11 = m1[1][1], a12 = m1[1][2], a13 = m1[1][3], a10 = m1[1][0], a11 = m1[1][1], a12 = m1[1][2], a13 = m1[1][3],

View File

@@ -50,22 +50,26 @@
CGLM_INLINE CGLM_INLINE
void void
glm_translate(mat4 m, vec3 v) { glm_translate(mat4 m, vec3 v) {
#if defined(CGLM_SIMD) #if defined( __SSE__ ) || defined( __SSE2__ )
glmm_128 m0, m1, m2, m3;
m0 = glmm_load(m[0]);
m1 = glmm_load(m[1]);
m2 = glmm_load(m[2]);
m3 = glmm_load(m[3]);
glmm_store(m[3], glmm_store(m[3],
glmm_fmadd(m0, glmm_set1(v[0]), _mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_load(m[0]),
glmm_fmadd(m1, glmm_set1(v[1]), _mm_set1_ps(v[0])),
glmm_fmadd(m2, glmm_set1(v[2]), m3)))); _mm_mul_ps(glmm_load(m[1]),
_mm_set1_ps(v[1]))),
_mm_add_ps(_mm_mul_ps(glmm_load(m[2]),
_mm_set1_ps(v[2])),
glmm_load(m[3]))))
;
#else #else
glm_vec4_muladds(m[0], v[0], m[3]); vec4 v1, v2, v3;
glm_vec4_muladds(m[1], v[1], m[3]);
glm_vec4_muladds(m[2], v[2], m[3]); glm_vec4_scale(m[0], v[0], v1);
glm_vec4_scale(m[1], v[1], v2);
glm_vec4_scale(m[2], v[2], v3);
glm_vec4_add(v1, m[3], m[3]);
glm_vec4_add(v2, m[3], m[3]);
glm_vec4_add(v3, m[3], m[3]);
#endif #endif
} }
@@ -95,8 +99,12 @@ glm_translate_to(mat4 m, vec3 v, mat4 dest) {
CGLM_INLINE CGLM_INLINE
void void
glm_translate_x(mat4 m, float x) { glm_translate_x(mat4 m, float x) {
#if defined(CGLM_SIMD) #if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(m[3], glmm_fmadd(glmm_load(m[0]), glmm_set1(x), glmm_load(m[3]))); glmm_store(m[3],
_mm_add_ps(_mm_mul_ps(glmm_load(m[0]),
_mm_set1_ps(x)),
glmm_load(m[3])))
;
#else #else
vec4 v1; vec4 v1;
glm_vec4_scale(m[0], x, v1); glm_vec4_scale(m[0], x, v1);
@@ -113,8 +121,12 @@ glm_translate_x(mat4 m, float x) {
CGLM_INLINE CGLM_INLINE
void void
glm_translate_y(mat4 m, float y) { glm_translate_y(mat4 m, float y) {
#if defined(CGLM_SIMD) #if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(m[3], glmm_fmadd(glmm_load(m[1]), glmm_set1(y), glmm_load(m[3]))); glmm_store(m[3],
_mm_add_ps(_mm_mul_ps(glmm_load(m[1]),
_mm_set1_ps(y)),
glmm_load(m[3])))
;
#else #else
vec4 v1; vec4 v1;
glm_vec4_scale(m[1], y, v1); glm_vec4_scale(m[1], y, v1);
@@ -131,8 +143,12 @@ glm_translate_y(mat4 m, float y) {
CGLM_INLINE CGLM_INLINE
void void
glm_translate_z(mat4 m, float z) { glm_translate_z(mat4 m, float z) {
#if defined(CGLM_SIMD) #if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(m[3], glmm_fmadd(glmm_load(m[2]), glmm_set1(z), glmm_load(m[3]))); glmm_store(m[3],
_mm_add_ps(_mm_mul_ps(glmm_load(m[2]),
_mm_set1_ps(z)),
glmm_load(m[3])))
;
#else #else
vec4 v1; vec4 v1;
glm_vec4_scale(m[2], z, v1); glm_vec4_scale(m[2], z, v1);
@@ -409,7 +425,7 @@ bool
glm_uniscaled(mat4 m) { glm_uniscaled(mat4 m) {
CGLM_ALIGN(8) vec3 s; CGLM_ALIGN(8) vec3 s;
glm_decompose_scalev(m, s); glm_decompose_scalev(m, s);
return glm_vec3_eq_all(s); return glm_vec3_eq_eps(s, s[0]);
} }
/*! /*!
@@ -439,7 +455,7 @@ glm_decompose_rs(mat4 m, mat4 r, vec3 s) {
glm_vec4_scale(r[1], 1.0f/s[1], r[1]); glm_vec4_scale(r[1], 1.0f/s[1], r[1]);
glm_vec4_scale(r[2], 1.0f/s[2], r[2]); glm_vec4_scale(r[2], 1.0f/s[2], r[2]);
/* Note from Apple Open Source (assume that the matrix is orthonormal): /* Note from Apple Open Source (asume that the matrix is orthonormal):
check for a coordinate system flip. If the determinant check for a coordinate system flip. If the determinant
is -1, then negate the matrix and the scaling factors. */ is -1, then negate the matrix and the scaling factors. */
glm_vec3_cross(m[0], m[1], v); glm_vec3_cross(m[0], m[1], v);

View File

@@ -228,8 +228,6 @@ glm_aabb_aabb(vec3 box[2], vec3 other[2]) {
* https://github.com/erich666/GraphicsGems/blob/master/gems/BoxSphere.c * https://github.com/erich666/GraphicsGems/blob/master/gems/BoxSphere.c
* Solid Box - Solid Sphere test. * Solid Box - Solid Sphere test.
* *
* Sphere Representation in cglm: [center.x, center.y, center.z, radii]
*
* @param[in] box solid bounding box * @param[in] box solid bounding box
* @param[in] s solid sphere * @param[in] s solid sphere
*/ */
@@ -239,13 +237,13 @@ glm_aabb_sphere(vec3 box[2], vec4 s) {
float dmin; float dmin;
int a, b, c; int a, b, c;
a = (s[0] < box[0][0]) + (s[0] > box[1][0]); a = s[0] >= box[0][0];
b = (s[1] < box[0][1]) + (s[1] > box[1][1]); b = s[1] >= box[0][1];
c = (s[2] < box[0][2]) + (s[2] > box[1][2]); c = s[2] >= box[0][2];
dmin = glm_pow2((s[0] - box[!(a - 1)][0]) * (a != 0)) dmin = glm_pow2(s[0] - box[a][0])
+ glm_pow2((s[1] - box[!(b - 1)][1]) * (b != 0)) + glm_pow2(s[1] - box[b][1])
+ glm_pow2((s[2] - box[!(c - 1)][2]) * (c != 0)); + glm_pow2(s[2] - box[c][2]);
return dmin <= glm_pow2(s[3]); return dmin <= glm_pow2(s[3]);
} }

View File

@@ -42,12 +42,18 @@
#include "types.h" #include "types.h"
#include "simd/intrin.h" #include "simd/intrin.h"
#ifndef CGLM_USE_DEFAULT_EPSILON /** CGLM_USE_DEFAULT_EPSILON is removed, to override float epsilon,
# ifndef GLM_FLT_EPSILON * just define GLM_FLT_EPSILON with epsilon value like below
# define GLM_FLT_EPSILON 1e-6 *
* #define GLM_FLT_EPSILON 1e-6f
*/
#ifndef GLM_FLT_EPSILON
# ifndef FLT_EPSILON
# define GLM_FLT_EPSILON 1e-6f
# else
# define GLM_FLT_EPSILON FLT_EPSILON
# endif # endif
#else
# define GLM_FLT_EPSILON FLT_EPSILON
#endif #endif
#endif /* cglm_common_h */ #endif /* cglm_common_h */

View File

@@ -539,9 +539,7 @@ glm_mat4_scale_p(mat4 m, float s) {
CGLM_INLINE CGLM_INLINE
void void
glm_mat4_scale(mat4 m, float s) { glm_mat4_scale(mat4 m, float s) {
#ifdef __AVX__ #if defined( __SSE__ ) || defined( __SSE2__ )
glm_mat4_scale_avx(m, s);
#elif defined( __SSE__ ) || defined( __SSE2__ )
glm_mat4_scale_sse2(m, s); glm_mat4_scale_sse2(m, s);
#elif defined(CGLM_NEON_FP) #elif defined(CGLM_NEON_FP)
glm_mat4_scale_neon(m, s); glm_mat4_scale_neon(m, s);
@@ -562,8 +560,6 @@ float
glm_mat4_det(mat4 mat) { glm_mat4_det(mat4 mat) {
#if defined( __SSE__ ) || defined( __SSE2__ ) #if defined( __SSE__ ) || defined( __SSE2__ )
return glm_mat4_det_sse2(mat); return glm_mat4_det_sse2(mat);
#elif defined(CGLM_NEON_FP)
return glm_mat4_det_neon(mat);
#else #else
/* [square] det(A) = det(At) */ /* [square] det(A) = det(At) */
float t[6]; float t[6];
@@ -597,8 +593,6 @@ void
glm_mat4_inv(mat4 mat, mat4 dest) { glm_mat4_inv(mat4 mat, mat4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ ) #if defined( __SSE__ ) || defined( __SSE2__ )
glm_mat4_inv_sse2(mat, dest); glm_mat4_inv_sse2(mat, dest);
#elif defined(CGLM_NEON_FP)
glm_mat4_inv_neon(mat, dest);
#else #else
float t[6]; float t[6];
float det; float det;

View File

@@ -10,42 +10,19 @@
#include "intrin.h" #include "intrin.h"
#ifdef CGLM_SIMD_ARM #ifdef CGLM_SIMD_ARM
#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || defined(_M_ARM64EC) || defined(__aarch64__)
# define CGLM_ARM64 1
#endif
#define glmm_load(p) vld1q_f32(p) #define glmm_load(p) vld1q_f32(p)
#define glmm_store(p, a) vst1q_f32(p, a) #define glmm_store(p, a) vst1q_f32(p, a)
#define glmm_set1(x) vdupq_n_f32(x)
#define glmm_128 float32x4_t
#define glmm_splat_x(x) vdupq_lane_f32(vget_low_f32(x), 0)
#define glmm_splat_y(x) vdupq_lane_f32(vget_low_f32(x), 1)
#define glmm_splat_z(x) vdupq_lane_f32(vget_high_f32(x), 0)
#define glmm_splat_w(x) vdupq_lane_f32(vget_high_f32(x), 1)
#define glmm_xor(a, b) \
vreinterpretq_f32_s32(veorq_s32(vreinterpretq_s32_f32(a), \
vreinterpretq_s32_f32(b)))
static inline static inline
float32x4_t float32x4_t
glmm_abs(float32x4_t v) { glmm_abs(float32x4_t v) {
return vabsq_f32(v); return vabsq_f32(v);
} }
static inline
float32x4_t
glmm_vhadd(float32x4_t v) {
v = vaddq_f32(v, vrev64q_f32(v));
return vaddq_f32(v, vcombine_f32(vget_high_f32(v), vget_low_f32(v)));
}
static inline static inline
float float
glmm_hadd(float32x4_t v) { glmm_hadd(float32x4_t v) {
#if CGLM_ARM64 #if defined(__aarch64__)
return vaddvq_f32(v); return vaddvq_f32(v);
#else #else
v = vaddq_f32(v, vrev64q_f32(v)); v = vaddq_f32(v, vrev64q_f32(v));
@@ -102,58 +79,5 @@ glmm_norm_inf(float32x4_t a) {
return glmm_hmax(glmm_abs(a)); return glmm_hmax(glmm_abs(a));
} }
static inline
float32x4_t
glmm_div(float32x4_t a, float32x4_t b) {
#if CGLM_ARM64
return vdivq_f32(a, b);
#else
/* 2 iterations of Newton-Raphson refinement of reciprocal */
float32x4_t r0, r1;
r0 = vrecpeq_f32(b);
r1 = vrecpsq_f32(r0, b);
r0 = vmulq_f32(r1, r0);
r1 = vrecpsq_f32(r0, b);
r0 = vmulq_f32(r1, r0);
return vmulq_f32(a, r0);
#endif
}
static inline
float32x4_t
glmm_fmadd(float32x4_t a, float32x4_t b, float32x4_t c) {
#if CGLM_ARM64
return vfmaq_f32(c, a, b); /* why vfmaq_f32 is slower than vmlaq_f32 ??? */
#else
return vmlaq_f32(c, a, b);
#endif
}
static inline
float32x4_t
glmm_fnmadd(float32x4_t a, float32x4_t b, float32x4_t c) {
#if CGLM_ARM64
return vfmsq_f32(c, a, b);
#else
return vmlsq_f32(c, a, b);
#endif
}
static inline
float32x4_t
glmm_fmsub(float32x4_t a, float32x4_t b, float32x4_t c) {
#if CGLM_ARM64
return vfmsq_f32(c, a, b);
#else
return vmlsq_f32(c, a, b);
#endif
}
static inline
float32x4_t
glmm_fnmsub(float32x4_t a, float32x4_t b, float32x4_t c) {
return vsubq_f32(vdupq_n_f32(0.0f), glmm_fmadd(a, b, c));
}
#endif #endif
#endif /* cglm_simd_arm_h */ #endif /* cglm_simd_arm_h */

View File

@@ -14,16 +14,6 @@
#include <immintrin.h> #include <immintrin.h>
CGLM_INLINE
void
glm_mat4_scale_avx(mat4 m, float s) {
__m256 y0;
y0 = _mm256_set1_ps(s);
glmm_store256(m[0], _mm256_mul_ps(y0, glmm_load256(m[0])));
glmm_store256(m[2], _mm256_mul_ps(y0, glmm_load256(m[2])));
}
CGLM_INLINE CGLM_INLINE
void void
glm_mat4_mul_avx(mat4 m1, mat4 m2, mat4 dest) { glm_mat4_mul_avx(mat4 m1, mat4 m2, mat4 dest) {

View File

@@ -1,80 +0,0 @@
/*
* Copyright (c), Recep Aslantas.
*
* MIT License (MIT), http://opensource.org/licenses/MIT
* Full license can be found in the LICENSE file
*/
#ifndef cglm_affine_neon_h
#define cglm_affine_neon_h
#if defined(__ARM_NEON_FP)
#include "../../common.h"
#include "../intrin.h"
CGLM_INLINE
void
glm_mul_neon(mat4 m1, mat4 m2, mat4 dest) {
/* D = R * L (Column-Major) */
glmm_128 l0, l1, l2, l3, r0, r1, r2, r3, v0, v1, v2, v3;
l0 = glmm_load(m1[0]); r0 = glmm_load(m2[0]);
l1 = glmm_load(m1[1]); r1 = glmm_load(m2[1]);
l2 = glmm_load(m1[2]); r2 = glmm_load(m2[2]);
l3 = glmm_load(m1[3]); r3 = glmm_load(m2[3]);
v0 = vmulq_f32(glmm_splat_x(r0), l0);
v1 = vmulq_f32(glmm_splat_x(r1), l0);
v2 = vmulq_f32(glmm_splat_x(r2), l0);
v3 = vmulq_f32(glmm_splat_x(r3), l0);
v0 = glmm_fmadd(glmm_splat_y(r0), l1, v0);
v1 = glmm_fmadd(glmm_splat_y(r1), l1, v1);
v2 = glmm_fmadd(glmm_splat_y(r2), l1, v2);
v3 = glmm_fmadd(glmm_splat_y(r3), l1, v3);
v0 = glmm_fmadd(glmm_splat_z(r0), l2, v0);
v1 = glmm_fmadd(glmm_splat_z(r1), l2, v1);
v2 = glmm_fmadd(glmm_splat_z(r2), l2, v2);
v3 = glmm_fmadd(glmm_splat_z(r3), l2, v3);
v3 = glmm_fmadd(glmm_splat_w(r3), l3, v3);
glmm_store(dest[0], v0);
glmm_store(dest[1], v1);
glmm_store(dest[2], v2);
glmm_store(dest[3], v3);
}
CGLM_INLINE
void
glm_mul_rot_neon(mat4 m1, mat4 m2, mat4 dest) {
/* D = R * L (Column-Major) */
glmm_128 l0, l1, l2, r0, r1, r2, v0, v1, v2;
l0 = glmm_load(m1[0]); r0 = glmm_load(m2[0]);
l1 = glmm_load(m1[1]); r1 = glmm_load(m2[1]);
l2 = glmm_load(m1[2]); r2 = glmm_load(m2[2]);
v0 = vmulq_f32(glmm_splat_x(r0), l0);
v1 = vmulq_f32(glmm_splat_x(r1), l0);
v2 = vmulq_f32(glmm_splat_x(r2), l0);
v0 = glmm_fmadd(glmm_splat_y(r0), l1, v0);
v1 = glmm_fmadd(glmm_splat_y(r1), l1, v1);
v2 = glmm_fmadd(glmm_splat_y(r2), l1, v2);
v0 = glmm_fmadd(glmm_splat_z(r0), l2, v0);
v1 = glmm_fmadd(glmm_splat_z(r1), l2, v1);
v2 = glmm_fmadd(glmm_splat_z(r2), l2, v2);
glmm_store(dest[0], v0);
glmm_store(dest[1], v1);
glmm_store(dest[2], v2);
glmm_store(dest[3], glmm_load(m1[3]));
}
#endif
#endif /* cglm_affine_neon_h */

View File

@@ -42,38 +42,41 @@ CGLM_INLINE
void void
glm_mat4_mul_neon(mat4 m1, mat4 m2, mat4 dest) { glm_mat4_mul_neon(mat4 m1, mat4 m2, mat4 dest) {
/* D = R * L (Column-Major) */ /* D = R * L (Column-Major) */
float32x4_t l0, l1, l2, l3, r, d0, d1, d2, d3;
glmm_128 l0, l1, l2, l3, r0, r1, r2, r3, v0, v1, v2, v3; l0 = vld1q_f32(m2[0]);
l1 = vld1q_f32(m2[1]);
l2 = vld1q_f32(m2[2]);
l3 = vld1q_f32(m2[3]);
l0 = glmm_load(m1[0]); r0 = glmm_load(m2[0]); r = vld1q_f32(m1[0]);
l1 = glmm_load(m1[1]); r1 = glmm_load(m2[1]); d0 = vmulq_lane_f32(r, vget_low_f32(l0), 0);
l2 = glmm_load(m1[2]); r2 = glmm_load(m2[2]); d1 = vmulq_lane_f32(r, vget_low_f32(l1), 0);
l3 = glmm_load(m1[3]); r3 = glmm_load(m2[3]); d2 = vmulq_lane_f32(r, vget_low_f32(l2), 0);
d3 = vmulq_lane_f32(r, vget_low_f32(l3), 0);
v0 = vmulq_f32(glmm_splat_x(r0), l0); r = vld1q_f32(m1[1]);
v1 = vmulq_f32(glmm_splat_x(r1), l0); d0 = vmlaq_lane_f32(d0, r, vget_low_f32(l0), 1);
v2 = vmulq_f32(glmm_splat_x(r2), l0); d1 = vmlaq_lane_f32(d1, r, vget_low_f32(l1), 1);
v3 = vmulq_f32(glmm_splat_x(r3), l0); d2 = vmlaq_lane_f32(d2, r, vget_low_f32(l2), 1);
d3 = vmlaq_lane_f32(d3, r, vget_low_f32(l3), 1);
v0 = glmm_fmadd(glmm_splat_y(r0), l1, v0); r = vld1q_f32(m1[2]);
v1 = glmm_fmadd(glmm_splat_y(r1), l1, v1); d0 = vmlaq_lane_f32(d0, r, vget_high_f32(l0), 0);
v2 = glmm_fmadd(glmm_splat_y(r2), l1, v2); d1 = vmlaq_lane_f32(d1, r, vget_high_f32(l1), 0);
v3 = glmm_fmadd(glmm_splat_y(r3), l1, v3); d2 = vmlaq_lane_f32(d2, r, vget_high_f32(l2), 0);
d3 = vmlaq_lane_f32(d3, r, vget_high_f32(l3), 0);
v0 = glmm_fmadd(glmm_splat_z(r0), l2, v0); r = vld1q_f32(m1[3]);
v1 = glmm_fmadd(glmm_splat_z(r1), l2, v1); d0 = vmlaq_lane_f32(d0, r, vget_high_f32(l0), 1);
v2 = glmm_fmadd(glmm_splat_z(r2), l2, v2); d1 = vmlaq_lane_f32(d1, r, vget_high_f32(l1), 1);
v3 = glmm_fmadd(glmm_splat_z(r3), l2, v3); d2 = vmlaq_lane_f32(d2, r, vget_high_f32(l2), 1);
d3 = vmlaq_lane_f32(d3, r, vget_high_f32(l3), 1);
v0 = glmm_fmadd(glmm_splat_w(r0), l3, v0); vst1q_f32(dest[0], d0);
v1 = glmm_fmadd(glmm_splat_w(r1), l3, v1); vst1q_f32(dest[1], d1);
v2 = glmm_fmadd(glmm_splat_w(r2), l3, v2); vst1q_f32(dest[2], d2);
v3 = glmm_fmadd(glmm_splat_w(r3), l3, v3); vst1q_f32(dest[3], d3);
glmm_store(dest[0], v0);
glmm_store(dest[1], v1);
glmm_store(dest[2], v2);
glmm_store(dest[3], v3);
} }
CGLM_INLINE CGLM_INLINE
@@ -98,216 +101,5 @@ glm_mat4_mulv_neon(mat4 m, vec4 v, vec4 dest) {
vst1q_f32(dest, l0); vst1q_f32(dest, l0);
} }
CGLM_INLINE
float
glm_mat4_det_neon(mat4 mat) {
float32x4_t r0, r1, r2, r3, x0, x1, x2;
float32x2_t ij, op, mn, kl, nn, mm, jj, ii, gh, ef, t12, t34;
float32x4x2_t a1;
float32x4_t x3 = { 0.f, -0.f, 0.f, -0.f };
/* 127 <- 0, [square] det(A) = det(At) */
r0 = glmm_load(mat[0]); /* d c b a */
r1 = vrev64q_f32(glmm_load(mat[1])); /* g h e f */
r2 = vrev64q_f32(glmm_load(mat[2])); /* l k i j */
r3 = vrev64q_f32(glmm_load(mat[3])); /* o p m n */
gh = vget_high_f32(r1);
ef = vget_low_f32(r1);
kl = vget_high_f32(r2);
ij = vget_low_f32(r2);
op = vget_high_f32(r3);
mn = vget_low_f32(r3);
mm = vdup_lane_f32(mn, 1);
nn = vdup_lane_f32(mn, 0);
ii = vdup_lane_f32(ij, 1);
jj = vdup_lane_f32(ij, 0);
/*
t[1] = j * p - n * l;
t[2] = j * o - n * k;
t[3] = i * p - m * l;
t[4] = i * o - m * k;
*/
x0 = glmm_fnmadd(vcombine_f32(kl, kl), vcombine_f32(nn, mm),
vmulq_f32(vcombine_f32(op, op), vcombine_f32(jj, ii)));
t12 = vget_low_f32(x0);
t34 = vget_high_f32(x0);
/* 1 3 1 3 2 4 2 4 */
a1 = vuzpq_f32(x0, x0);
/*
t[0] = k * p - o * l;
t[0] = k * p - o * l;
t[5] = i * n - m * j;
t[5] = i * n - m * j;
*/
x1 = glmm_fnmadd(vcombine_f32(vdup_lane_f32(kl, 0), jj),
vcombine_f32(vdup_lane_f32(op, 1), mm),
vmulq_f32(vcombine_f32(vdup_lane_f32(op, 0), nn),
vcombine_f32(vdup_lane_f32(kl, 1), ii)));
/*
a * (f * t[0] - g * t[1] + h * t[2])
- b * (e * t[0] - g * t[3] + h * t[4])
+ c * (e * t[1] - f * t[3] + h * t[5])
- d * (e * t[2] - f * t[4] + g * t[5])
*/
x2 = glmm_fnmadd(vcombine_f32(vdup_lane_f32(gh, 1), vdup_lane_f32(ef, 0)),
vcombine_f32(vget_low_f32(a1.val[0]), t34),
vmulq_f32(vcombine_f32(ef, vdup_lane_f32(ef, 1)),
vcombine_f32(vget_low_f32(x1), t12)));
x2 = glmm_fmadd(vcombine_f32(vdup_lane_f32(gh, 0), gh),
vcombine_f32(vget_low_f32(a1.val[1]), vget_high_f32(x1)), x2);
x2 = glmm_xor(x2, x3);
return glmm_hadd(vmulq_f32(x2, r0));
}
CGLM_INLINE
void
glm_mat4_inv_neon(mat4 mat, mat4 dest) {
float32x4_t r0, r1, r2, r3,
v0, v1, v2, v3,
t0, t1, t2, t3, t4, t5,
x0, x1, x2, x3, x4, x5, x6, x7, x8;
float32x4x2_t a1;
float32x2_t lp, ko, hg, jn, im, fe, ae, bf, cg, dh;
float32x4_t x9 = { -0.f, 0.f, -0.f, 0.f };
x8 = vrev64q_f32(x9);
/* 127 <- 0 */
r0 = glmm_load(mat[0]); /* d c b a */
r1 = glmm_load(mat[1]); /* h g f e */
r2 = glmm_load(mat[2]); /* l k j i */
r3 = glmm_load(mat[3]); /* p o n m */
/* l p k o, j n i m */
a1 = vzipq_f32(r3, r2);
jn = vget_high_f32(a1.val[0]);
im = vget_low_f32(a1.val[0]);
lp = vget_high_f32(a1.val[1]);
ko = vget_low_f32(a1.val[1]);
hg = vget_high_f32(r1);
x1 = vcombine_f32(vdup_lane_f32(lp, 0), lp); /* l p p p */
x2 = vcombine_f32(vdup_lane_f32(ko, 0), ko); /* k o o o */
x0 = vcombine_f32(vdup_lane_f32(lp, 1), vdup_lane_f32(hg, 1)); /* h h l l */
x3 = vcombine_f32(vdup_lane_f32(ko, 1), vdup_lane_f32(hg, 0)); /* g g k k */
/* t1[0] = k * p - o * l;
t1[0] = k * p - o * l;
t2[0] = g * p - o * h;
t3[0] = g * l - k * h; */
t0 = glmm_fnmadd(x2, x0, vmulq_f32(x3, x1));
fe = vget_low_f32(r1);
x4 = vcombine_f32(vdup_lane_f32(jn, 0), jn); /* j n n n */
x5 = vcombine_f32(vdup_lane_f32(jn, 1), vdup_lane_f32(fe, 1)); /* f f j j */
/* t1[1] = j * p - n * l;
t1[1] = j * p - n * l;
t2[1] = f * p - n * h;
t3[1] = f * l - j * h; */
t1 = glmm_fnmadd(x4, x0, vmulq_f32(x5, x1));
/* t1[2] = j * o - n * k
t1[2] = j * o - n * k;
t2[2] = f * o - n * g;
t3[2] = f * k - j * g; */
t2 = glmm_fnmadd(x4, x3, vmulq_f32(x5, x2));
x6 = vcombine_f32(vdup_lane_f32(im, 1), vdup_lane_f32(fe, 0)); /* e e i i */
x7 = vcombine_f32(vdup_lane_f32(im, 0), im); /* i m m m */
/* t1[3] = i * p - m * l;
t1[3] = i * p - m * l;
t2[3] = e * p - m * h;
t3[3] = e * l - i * h; */
t3 = glmm_fnmadd(x7, x0, vmulq_f32(x6, x1));
/* t1[4] = i * o - m * k;
t1[4] = i * o - m * k;
t2[4] = e * o - m * g;
t3[4] = e * k - i * g; */
t4 = glmm_fnmadd(x7, x3, vmulq_f32(x6, x2));
/* t1[5] = i * n - m * j;
t1[5] = i * n - m * j;
t2[5] = e * n - m * f;
t3[5] = e * j - i * f; */
t5 = glmm_fnmadd(x7, x5, vmulq_f32(x6, x4));
/* h d f b, g c e a */
a1 = vtrnq_f32(r0, r1);
x4 = vrev64q_f32(a1.val[0]); /* c g a e */
x5 = vrev64q_f32(a1.val[1]); /* d h b f */
ae = vget_low_f32(x4);
cg = vget_high_f32(x4);
bf = vget_low_f32(x5);
dh = vget_high_f32(x5);
x0 = vcombine_f32(ae, vdup_lane_f32(ae, 1)); /* a a a e */
x1 = vcombine_f32(bf, vdup_lane_f32(bf, 1)); /* b b b f */
x2 = vcombine_f32(cg, vdup_lane_f32(cg, 1)); /* c c c g */
x3 = vcombine_f32(dh, vdup_lane_f32(dh, 1)); /* d d d h */
/*
dest[0][0] = f * t1[0] - g * t1[1] + h * t1[2];
dest[0][1] =-(b * t1[0] - c * t1[1] + d * t1[2]);
dest[0][2] = b * t2[0] - c * t2[1] + d * t2[2];
dest[0][3] =-(b * t3[0] - c * t3[1] + d * t3[2]); */
v0 = glmm_xor(glmm_fmadd(x3, t2, glmm_fnmadd(x2, t1, vmulq_f32(x1, t0))), x8);
/*
dest[2][0] = e * t1[1] - f * t1[3] + h * t1[5];
dest[2][1] =-(a * t1[1] - b * t1[3] + d * t1[5]);
dest[2][2] = a * t2[1] - b * t2[3] + d * t2[5];
dest[2][3] =-(a * t3[1] - b * t3[3] + d * t3[5]);*/
v2 = glmm_xor(glmm_fmadd(x3, t5, glmm_fnmadd(x1, t3, vmulq_f32(x0, t1))), x8);
/*
dest[1][0] =-(e * t1[0] - g * t1[3] + h * t1[4]);
dest[1][1] = a * t1[0] - c * t1[3] + d * t1[4];
dest[1][2] =-(a * t2[0] - c * t2[3] + d * t2[4]);
dest[1][3] = a * t3[0] - c * t3[3] + d * t3[4]; */
v1 = glmm_xor(glmm_fmadd(x3, t4, glmm_fnmadd(x2, t3, vmulq_f32(x0, t0))), x9);
/*
dest[3][0] =-(e * t1[2] - f * t1[4] + g * t1[5]);
dest[3][1] = a * t1[2] - b * t1[4] + c * t1[5];
dest[3][2] =-(a * t2[2] - b * t2[4] + c * t2[5]);
dest[3][3] = a * t3[2] - b * t3[4] + c * t3[5]; */
v3 = glmm_xor(glmm_fmadd(x2, t5, glmm_fnmadd(x1, t4, vmulq_f32(x0, t2))), x9);
/* determinant */
x0 = vcombine_f32(vget_low_f32(vzipq_f32(v0, v1).val[0]),
vget_low_f32(vzipq_f32(v2, v3).val[0]));
/*
x0 = glmm_div(glmm_set1(1.0f), glmm_vhadd(vmulq_f32(x0, r0)));
glmm_store(dest[0], vmulq_f32(v0, x0));
glmm_store(dest[1], vmulq_f32(v1, x0));
glmm_store(dest[2], vmulq_f32(v2, x0));
glmm_store(dest[3], vmulq_f32(v3, x0));
*/
x0 = glmm_vhadd(vmulq_f32(x0, r0));
glmm_store(dest[0], glmm_div(v0, x0));
glmm_store(dest[1], glmm_div(v1, x0));
glmm_store(dest[2], glmm_div(v2, x0));
glmm_store(dest[3], glmm_div(v3, x0));
}
#endif #endif
#endif /* cglm_mat4_neon_h */ #endif /* cglm_mat4_neon_h */

View File

@@ -25,28 +25,28 @@ glm_mul_sse2(mat4 m1, mat4 m2, mat4 dest) {
r = glmm_load(m2[0]); r = glmm_load(m2[0]);
glmm_store(dest[0], glmm_store(dest[0],
glmm_fmadd(glmm_splat(r, 0), l0, _mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 0), l0),
glmm_fmadd(glmm_splat(r, 1), l1, _mm_mul_ps(glmm_shuff1x(r, 1), l1)),
_mm_mul_ps(glmm_splat(r, 2), l2)))); _mm_mul_ps(glmm_shuff1x(r, 2), l2)));
r = glmm_load(m2[1]); r = glmm_load(m2[1]);
glmm_store(dest[1], glmm_store(dest[1],
glmm_fmadd(glmm_splat(r, 0), l0, _mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 0), l0),
glmm_fmadd(glmm_splat(r, 1), l1, _mm_mul_ps(glmm_shuff1x(r, 1), l1)),
_mm_mul_ps(glmm_splat(r, 2), l2)))); _mm_mul_ps(glmm_shuff1x(r, 2), l2)));
r = glmm_load(m2[2]); r = glmm_load(m2[2]);
glmm_store(dest[2], glmm_store(dest[2],
glmm_fmadd(glmm_splat(r, 0), l0, _mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 0), l0),
glmm_fmadd(glmm_splat(r, 1), l1, _mm_mul_ps(glmm_shuff1x(r, 1), l1)),
_mm_mul_ps(glmm_splat(r, 2), l2)))); _mm_mul_ps(glmm_shuff1x(r, 2), l2)));
r = glmm_load(m2[3]); r = glmm_load(m2[3]);
glmm_store(dest[3], glmm_store(dest[3],
glmm_fmadd(glmm_splat(r, 0), l0, _mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 0), l0),
glmm_fmadd(glmm_splat(r, 1), l1, _mm_mul_ps(glmm_shuff1x(r, 1), l1)),
glmm_fmadd(glmm_splat(r, 2), l2, _mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 2), l2),
_mm_mul_ps(glmm_splat(r, 3), l3))))); _mm_mul_ps(glmm_shuff1x(r, 3), l3))));
} }
CGLM_INLINE CGLM_INLINE
@@ -62,22 +62,21 @@ glm_mul_rot_sse2(mat4 m1, mat4 m2, mat4 dest) {
r = glmm_load(m2[0]); r = glmm_load(m2[0]);
glmm_store(dest[0], glmm_store(dest[0],
glmm_fmadd(glmm_splat(r, 0), l0, _mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 0), l0),
glmm_fmadd(glmm_splat(r, 1), l1, _mm_mul_ps(glmm_shuff1x(r, 1), l1)),
_mm_mul_ps(glmm_splat(r, 2), l2)))); _mm_mul_ps(glmm_shuff1x(r, 2), l2)));
r = glmm_load(m2[1]); r = glmm_load(m2[1]);
glmm_store(dest[1], glmm_store(dest[1],
glmm_fmadd(glmm_splat(r, 0), l0, _mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 0), l0),
glmm_fmadd(glmm_splat(r, 1), l1, _mm_mul_ps(glmm_shuff1x(r, 1), l1)),
_mm_mul_ps(glmm_splat(r, 2), l2)))); _mm_mul_ps(glmm_shuff1x(r, 2), l2)));
r = glmm_load(m2[2]); r = glmm_load(m2[2]);
glmm_store(dest[2], glmm_store(dest[2],
glmm_fmadd(glmm_splat(r, 0), l0, _mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 0), l0),
glmm_fmadd(glmm_splat(r, 1), l1, _mm_mul_ps(glmm_shuff1x(r, 1), l1)),
_mm_mul_ps(glmm_splat(r, 2), l2)))); _mm_mul_ps(glmm_shuff1x(r, 2), l2)));
glmm_store(dest[3], l3); glmm_store(dest[3], l3);
} }
@@ -95,9 +94,9 @@ glm_inv_tr_sse2(mat4 mat) {
_MM_TRANSPOSE4_PS(r0, r1, r2, x1); _MM_TRANSPOSE4_PS(r0, r1, r2, x1);
x0 = glmm_fmadd(r0, glmm_shuff1(r3, 0, 0, 0, 0), x0 = _mm_add_ps(_mm_mul_ps(r0, glmm_shuff1(r3, 0, 0, 0, 0)),
glmm_fmadd(r1, glmm_shuff1(r3, 1, 1, 1, 1), _mm_mul_ps(r1, glmm_shuff1(r3, 1, 1, 1, 1)));
_mm_mul_ps(r2, glmm_shuff1(r3, 2, 2, 2, 2)))); x0 = _mm_add_ps(x0, _mm_mul_ps(r2, glmm_shuff1(r3, 2, 2, 2, 2)));
x0 = _mm_xor_ps(x0, _mm_set1_ps(-0.f)); x0 = _mm_xor_ps(x0, _mm_set1_ps(-0.f));
x0 = _mm_add_ps(x0, x1); x0 = _mm_add_ps(x0, x1);

View File

@@ -26,11 +26,11 @@ glm_mat2_mul_sse2(mat2 m1, mat2 m2, mat2 dest) {
dest[1][0] = a * g + c * h; dest[1][0] = a * g + c * h;
dest[1][1] = b * g + d * h; dest[1][1] = b * g + d * h;
*/ */
x0 = glmm_fmadd(_mm_movelh_ps(x1, x1), glmm_shuff1(x2, 2, 2, 0, 0), x0 = _mm_mul_ps(_mm_movelh_ps(x1, x1), glmm_shuff1(x2, 2, 2, 0, 0));
_mm_mul_ps(_mm_movehl_ps(x1, x1), x1 = _mm_mul_ps(_mm_movehl_ps(x1, x1), glmm_shuff1(x2, 3, 3, 1, 1));
glmm_shuff1(x2, 3, 3, 1, 1))); x1 = _mm_add_ps(x0, x1);
glmm_store(dest[0], x0); glmm_store(dest[0], x1);
} }
CGLM_INLINE CGLM_INLINE

View File

@@ -30,16 +30,23 @@ glm_mat3_mul_sse2(mat3 m1, mat3 m2, mat3 dest) {
x1 = glmm_shuff2(l0, l1, 1, 0, 3, 3, 0, 3, 2, 0); x1 = glmm_shuff2(l0, l1, 1, 0, 3, 3, 0, 3, 2, 0);
x2 = glmm_shuff2(l1, l2, 0, 0, 3, 2, 0, 2, 1, 0); x2 = glmm_shuff2(l1, l2, 0, 0, 3, 2, 0, 2, 1, 0);
x0 = glmm_fmadd(glmm_shuff1(l0, 0, 2, 1, 0), glmm_shuff1(r0, 3, 0, 0, 0), x0 = _mm_add_ps(_mm_mul_ps(glmm_shuff1(l0, 0, 2, 1, 0),
glmm_fmadd(x1, glmm_shuff2(r0, r1, 0, 0, 1, 1, 2, 0, 0, 0), glmm_shuff1(r0, 3, 0, 0, 0)),
_mm_mul_ps(x2, glmm_shuff2(r0, r1, 1, 1, 2, 2, 2, 0, 0, 0)))); _mm_mul_ps(x1, glmm_shuff2(r0, r1, 0, 0, 1, 1, 2, 0, 0, 0)));
x0 = _mm_add_ps(x0,
_mm_mul_ps(x2, glmm_shuff2(r0, r1, 1, 1, 2, 2, 2, 0, 0, 0)));
_mm_storeu_ps(dest[0], x0); _mm_storeu_ps(dest[0], x0);
x0 = glmm_fmadd(glmm_shuff1(l0, 1, 0, 2, 1), _mm_shuffle_ps(r0, r1, _MM_SHUFFLE(2, 2, 3, 3)), x0 = _mm_add_ps(_mm_mul_ps(glmm_shuff1(l0, 1, 0, 2, 1),
glmm_fmadd(glmm_shuff1(x1, 1, 0, 2, 1), glmm_shuff1(r1, 3, 3, 0, 0), _mm_shuffle_ps(r0, r1, _MM_SHUFFLE(2, 2, 3, 3))),
_mm_mul_ps(glmm_shuff1(x2, 1, 0, 2, 1), _mm_mul_ps(glmm_shuff1(x1, 1, 0, 2, 1),
_mm_shuffle_ps(r1, r2, _MM_SHUFFLE(0, 0, 1, 1))))); glmm_shuff1(r1, 3, 3, 0, 0)));
x0 = _mm_add_ps(x0,
_mm_mul_ps(glmm_shuff1(x2, 1, 0, 2, 1),
_mm_shuffle_ps(r1, r2, _MM_SHUFFLE(0, 0, 1, 1))));
_mm_storeu_ps(&dest[1][1], x0); _mm_storeu_ps(&dest[1][1], x0);

View File

@@ -56,36 +56,46 @@ glm_mat4_mul_sse2(mat4 m1, mat4 m2, mat4 dest) {
l2 = glmm_load(m1[2]); l2 = glmm_load(m1[2]);
l3 = glmm_load(m1[3]); l3 = glmm_load(m1[3]);
#define XX(C) \ r = glmm_load(m2[0]);
\ glmm_store(dest[0],
r = glmm_load(m2[C]); \ _mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 0), l0),
glmm_store(dest[C], \ _mm_mul_ps(glmm_shuff1x(r, 1), l1)),
glmm_fmadd(glmm_splat(r, 0), l0, \ _mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 2), l2),
glmm_fmadd(glmm_splat(r, 1), l1, \ _mm_mul_ps(glmm_shuff1x(r, 3), l3))));
glmm_fmadd(glmm_splat(r, 2), l2, \ r = glmm_load(m2[1]);
_mm_mul_ps(glmm_splat(r, 3), l3))))); glmm_store(dest[1],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 0), l0),
_mm_mul_ps(glmm_shuff1x(r, 1), l1)),
_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 2), l2),
_mm_mul_ps(glmm_shuff1x(r, 3), l3))));
r = glmm_load(m2[2]);
glmm_store(dest[2],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 0), l0),
_mm_mul_ps(glmm_shuff1x(r, 1), l1)),
_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 2), l2),
_mm_mul_ps(glmm_shuff1x(r, 3), l3))));
XX(0); r = glmm_load(m2[3]);
XX(1); glmm_store(dest[3],
XX(2); _mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 0), l0),
XX(3); _mm_mul_ps(glmm_shuff1x(r, 1), l1)),
_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 2), l2),
#undef XX _mm_mul_ps(glmm_shuff1x(r, 3), l3))));
} }
CGLM_INLINE CGLM_INLINE
void void
glm_mat4_mulv_sse2(mat4 m, vec4 v, vec4 dest) { glm_mat4_mulv_sse2(mat4 m, vec4 v, vec4 dest) {
__m128 x0, x1; __m128 x0, x1, x2;
x0 = glmm_load(v); x0 = glmm_load(v);
x1 = glmm_fmadd(glmm_load(m[0]), glmm_splat(x0, 0), x1 = _mm_add_ps(_mm_mul_ps(glmm_load(m[0]), glmm_shuff1x(x0, 0)),
glmm_fmadd(glmm_load(m[1]), glmm_splat(x0, 1), _mm_mul_ps(glmm_load(m[1]), glmm_shuff1x(x0, 1)));
glmm_fmadd(glmm_load(m[2]), glmm_splat(x0, 2),
_mm_mul_ps(glmm_load(m[3]),
glmm_splat(x0, 3)))));
glmm_store(dest, x1); x2 = _mm_add_ps(_mm_mul_ps(glmm_load(m[2]), glmm_shuff1x(x0, 2)),
_mm_mul_ps(glmm_load(m[3]), glmm_shuff1x(x0, 3)));
glmm_store(dest, _mm_add_ps(x1, x2));
} }
CGLM_INLINE CGLM_INLINE
@@ -105,18 +115,20 @@ glm_mat4_det_sse2(mat4 mat) {
t[3] = i * p - m * l; t[3] = i * p - m * l;
t[4] = i * o - m * k; t[4] = i * o - m * k;
*/ */
x0 = glmm_fnmadd(glmm_shuff1(r3, 0, 0, 1, 1), glmm_shuff1(r2, 2, 3, 2, 3), x0 = _mm_sub_ps(_mm_mul_ps(glmm_shuff1(r2, 0, 0, 1, 1),
_mm_mul_ps(glmm_shuff1(r2, 0, 0, 1, 1), glmm_shuff1(r3, 2, 3, 2, 3)),
glmm_shuff1(r3, 2, 3, 2, 3))); _mm_mul_ps(glmm_shuff1(r3, 0, 0, 1, 1),
glmm_shuff1(r2, 2, 3, 2, 3)));
/* /*
t[0] = k * p - o * l; t[0] = k * p - o * l;
t[0] = k * p - o * l; t[0] = k * p - o * l;
t[5] = i * n - m * j; t[5] = i * n - m * j;
t[5] = i * n - m * j; t[5] = i * n - m * j;
*/ */
x1 = glmm_fnmadd(glmm_shuff1(r3, 0, 0, 2, 2), glmm_shuff1(r2, 1, 1, 3, 3), x1 = _mm_sub_ps(_mm_mul_ps(glmm_shuff1(r2, 0, 0, 2, 2),
_mm_mul_ps(glmm_shuff1(r2, 0, 0, 2, 2), glmm_shuff1(r3, 1, 1, 3, 3)),
glmm_shuff1(r3, 1, 1, 3, 3))); _mm_mul_ps(glmm_shuff1(r3, 0, 0, 2, 2),
glmm_shuff1(r2, 1, 1, 3, 3)));
/* /*
a * (f * t[0] - g * t[1] + h * t[2]) a * (f * t[0] - g * t[1] + h * t[2])
@@ -124,16 +136,21 @@ glm_mat4_det_sse2(mat4 mat) {
+ c * (e * t[1] - f * t[3] + h * t[5]) + c * (e * t[1] - f * t[3] + h * t[5])
- d * (e * t[2] - f * t[4] + g * t[5]) - d * (e * t[2] - f * t[4] + g * t[5])
*/ */
x2 = glmm_fnmadd(glmm_shuff1(r1, 1, 1, 2, 2), glmm_shuff1(x0, 3, 2, 2, 0), x2 = _mm_sub_ps(_mm_mul_ps(glmm_shuff1(r1, 0, 0, 0, 1),
_mm_mul_ps(glmm_shuff1(r1, 0, 0, 0, 1), _mm_shuffle_ps(x1, x0, _MM_SHUFFLE(1, 0, 0, 0))),
_mm_shuffle_ps(x1, x0, _MM_SHUFFLE(1, 0, 0, 0)))); _mm_mul_ps(glmm_shuff1(r1, 1, 1, 2, 2),
x2 = glmm_fmadd(glmm_shuff1(r1, 2, 3, 3, 3), glmm_shuff1(x0, 3, 2, 2, 0)));
_mm_shuffle_ps(x0, x1, _MM_SHUFFLE(2, 2, 3, 1)),
x2);
x2 = _mm_add_ps(x2,
_mm_mul_ps(glmm_shuff1(r1, 2, 3, 3, 3),
_mm_shuffle_ps(x0, x1, _MM_SHUFFLE(2, 2, 3, 1))));
x2 = _mm_xor_ps(x2, _mm_set_ps(-0.f, 0.f, -0.f, 0.f)); x2 = _mm_xor_ps(x2, _mm_set_ps(-0.f, 0.f, -0.f, 0.f));
return glmm_hadd(_mm_mul_ps(x2, r0)); x0 = _mm_mul_ps(r0, x2);
x0 = _mm_add_ps(x0, glmm_shuff1(x0, 0, 1, 2, 3));
x0 = _mm_add_ps(x0, glmm_shuff1(x0, 1, 3, 3, 1));
return _mm_cvtss_f32(x0);
} }
CGLM_INLINE CGLM_INLINE
@@ -142,10 +159,7 @@ glm_mat4_inv_fast_sse2(mat4 mat, mat4 dest) {
__m128 r0, r1, r2, r3, __m128 r0, r1, r2, r3,
v0, v1, v2, v3, v0, v1, v2, v3,
t0, t1, t2, t3, t4, t5, t0, t1, t2, t3, t4, t5,
x0, x1, x2, x3, x4, x5, x6, x7, x8, x9; x0, x1, x2, x3, x4, x5, x6, x7;
x8 = _mm_set_ps(-0.f, 0.f, -0.f, 0.f);
x9 = glmm_shuff1(x8, 2, 1, 2, 1);
/* 127 <- 0 */ /* 127 <- 0 */
r0 = glmm_load(mat[0]); /* d c b a */ r0 = glmm_load(mat[0]); /* d c b a */
@@ -163,7 +177,7 @@ glm_mat4_inv_fast_sse2(mat4 mat, mat4 dest) {
t1[0] = k * p - o * l; t1[0] = k * p - o * l;
t2[0] = g * p - o * h; t2[0] = g * p - o * h;
t3[0] = g * l - k * h; */ t3[0] = g * l - k * h; */
t0 = glmm_fnmadd(x2, x0, _mm_mul_ps(x3, x1)); t0 = _mm_sub_ps(_mm_mul_ps(x3, x1), _mm_mul_ps(x2, x0));
x4 = _mm_shuffle_ps(r2, r3, _MM_SHUFFLE(2, 1, 2, 1)); /* o n k j */ x4 = _mm_shuffle_ps(r2, r3, _MM_SHUFFLE(2, 1, 2, 1)); /* o n k j */
x4 = glmm_shuff1(x4, 0, 2, 2, 2); /* j n n n */ x4 = glmm_shuff1(x4, 0, 2, 2, 2); /* j n n n */
@@ -173,13 +187,13 @@ glm_mat4_inv_fast_sse2(mat4 mat, mat4 dest) {
t1[1] = j * p - n * l; t1[1] = j * p - n * l;
t2[1] = f * p - n * h; t2[1] = f * p - n * h;
t3[1] = f * l - j * h; */ t3[1] = f * l - j * h; */
t1 = glmm_fnmadd(x4, x0, _mm_mul_ps(x5, x1)); t1 = _mm_sub_ps(_mm_mul_ps(x5, x1), _mm_mul_ps(x4, x0));
/* t1[2] = j * o - n * k /* t1[2] = j * o - n * k
t1[2] = j * o - n * k; t1[2] = j * o - n * k;
t2[2] = f * o - n * g; t2[2] = f * o - n * g;
t3[2] = f * k - j * g; */ t3[2] = f * k - j * g; */
t2 = glmm_fnmadd(x4, x3, _mm_mul_ps(x5, x2)); t2 = _mm_sub_ps(_mm_mul_ps(x5, x2), _mm_mul_ps(x4, x3));
x6 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(0, 0, 0, 0)); /* e e i i */ x6 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(0, 0, 0, 0)); /* e e i i */
x7 = glmm_shuff2(r3, r2, 0, 0, 0, 0, 2, 0, 0, 0); /* i m m m */ x7 = glmm_shuff2(r3, r2, 0, 0, 0, 0, 2, 0, 0, 0); /* i m m m */
@@ -188,19 +202,19 @@ glm_mat4_inv_fast_sse2(mat4 mat, mat4 dest) {
t1[3] = i * p - m * l; t1[3] = i * p - m * l;
t2[3] = e * p - m * h; t2[3] = e * p - m * h;
t3[3] = e * l - i * h; */ t3[3] = e * l - i * h; */
t3 = glmm_fnmadd(x7, x0, _mm_mul_ps(x6, x1)); t3 = _mm_sub_ps(_mm_mul_ps(x6, x1), _mm_mul_ps(x7, x0));
/* t1[4] = i * o - m * k; /* t1[4] = i * o - m * k;
t1[4] = i * o - m * k; t1[4] = i * o - m * k;
t2[4] = e * o - m * g; t2[4] = e * o - m * g;
t3[4] = e * k - i * g; */ t3[4] = e * k - i * g; */
t4 = glmm_fnmadd(x7, x3, _mm_mul_ps(x6, x2)); t4 = _mm_sub_ps(_mm_mul_ps(x6, x2), _mm_mul_ps(x7, x3));
/* t1[5] = i * n - m * j; /* t1[5] = i * n - m * j;
t1[5] = i * n - m * j; t1[5] = i * n - m * j;
t2[5] = e * n - m * f; t2[5] = e * n - m * f;
t3[5] = e * j - i * f; */ t3[5] = e * j - i * f; */
t5 = glmm_fnmadd(x7, x5, _mm_mul_ps(x6, x4)); t5 = _mm_sub_ps(_mm_mul_ps(x6, x4), _mm_mul_ps(x7, x5));
x0 = glmm_shuff2(r1, r0, 0, 0, 0, 0, 2, 2, 2, 0); /* a a a e */ x0 = glmm_shuff2(r1, r0, 0, 0, 0, 0, 2, 2, 2, 0); /* a a a e */
x1 = glmm_shuff2(r1, r0, 1, 1, 1, 1, 2, 2, 2, 0); /* b b b f */ x1 = glmm_shuff2(r1, r0, 1, 1, 1, 1, 2, 2, 2, 0); /* b b b f */
@@ -212,35 +226,50 @@ glm_mat4_inv_fast_sse2(mat4 mat, mat4 dest) {
dest[0][1] =-(b * t1[0] - c * t1[1] + d * t1[2]); dest[0][1] =-(b * t1[0] - c * t1[1] + d * t1[2]);
dest[0][2] = b * t2[0] - c * t2[1] + d * t2[2]; dest[0][2] = b * t2[0] - c * t2[1] + d * t2[2];
dest[0][3] =-(b * t3[0] - c * t3[1] + d * t3[2]); */ dest[0][3] =-(b * t3[0] - c * t3[1] + d * t3[2]); */
v0 = _mm_xor_ps(glmm_fmadd(x3, t2, glmm_fnmadd(x2, t1, _mm_mul_ps(x1, t0))), x8); v0 = _mm_add_ps(_mm_mul_ps(x3, t2),
_mm_sub_ps(_mm_mul_ps(x1, t0),
/* _mm_mul_ps(x2, t1)));
dest[2][0] = e * t1[1] - f * t1[3] + h * t1[5]; v0 = _mm_xor_ps(v0, _mm_set_ps(-0.f, 0.f, -0.f, 0.f));
dest[2][1] =-(a * t1[1] - b * t1[3] + d * t1[5]);
dest[2][2] = a * t2[1] - b * t2[3] + d * t2[5];
dest[2][3] =-(a * t3[1] - b * t3[3] + d * t3[5]);*/
v2 = _mm_xor_ps(glmm_fmadd(x3, t5, glmm_fnmadd(x1, t3, _mm_mul_ps(x0, t1))), x8);
/* /*
dest[1][0] =-(e * t1[0] - g * t1[3] + h * t1[4]); dest[1][0] =-(e * t1[0] - g * t1[3] + h * t1[4]);
dest[1][1] = a * t1[0] - c * t1[3] + d * t1[4]; dest[1][1] = a * t1[0] - c * t1[3] + d * t1[4];
dest[1][2] =-(a * t2[0] - c * t2[3] + d * t2[4]); dest[1][2] =-(a * t2[0] - c * t2[3] + d * t2[4]);
dest[1][3] = a * t3[0] - c * t3[3] + d * t3[4]; */ dest[1][3] = a * t3[0] - c * t3[3] + d * t3[4]; */
v1 = _mm_xor_ps(glmm_fmadd(x3, t4, glmm_fnmadd(x2, t3, _mm_mul_ps(x0, t0))), x9); v1 = _mm_add_ps(_mm_mul_ps(x3, t4),
_mm_sub_ps(_mm_mul_ps(x0, t0),
_mm_mul_ps(x2, t3)));
v1 = _mm_xor_ps(v1, _mm_set_ps(0.f, -0.f, 0.f, -0.f));
/*
dest[2][0] = e * t1[1] - f * t1[3] + h * t1[5];
dest[2][1] =-(a * t1[1] - b * t1[3] + d * t1[5]);
dest[2][2] = a * t2[1] - b * t2[3] + d * t2[5];
dest[2][3] =-(a * t3[1] - b * t3[3] + d * t3[5]);*/
v2 = _mm_add_ps(_mm_mul_ps(x3, t5),
_mm_sub_ps(_mm_mul_ps(x0, t1),
_mm_mul_ps(x1, t3)));
v2 = _mm_xor_ps(v2, _mm_set_ps(-0.f, 0.f, -0.f, 0.f));
/* /*
dest[3][0] =-(e * t1[2] - f * t1[4] + g * t1[5]); dest[3][0] =-(e * t1[2] - f * t1[4] + g * t1[5]);
dest[3][1] = a * t1[2] - b * t1[4] + c * t1[5]; dest[3][1] = a * t1[2] - b * t1[4] + c * t1[5];
dest[3][2] =-(a * t2[2] - b * t2[4] + c * t2[5]); dest[3][2] =-(a * t2[2] - b * t2[4] + c * t2[5]);
dest[3][3] = a * t3[2] - b * t3[4] + c * t3[5]; */ dest[3][3] = a * t3[2] - b * t3[4] + c * t3[5]; */
v3 = _mm_xor_ps(glmm_fmadd(x2, t5, glmm_fnmadd(x1, t4, _mm_mul_ps(x0, t2))), x9); v3 = _mm_add_ps(_mm_mul_ps(x2, t5),
_mm_sub_ps(_mm_mul_ps(x0, t2),
_mm_mul_ps(x1, t4)));
v3 = _mm_xor_ps(v3, _mm_set_ps(0.f, -0.f, 0.f, -0.f));
/* determinant */ /* determinant */
x0 = _mm_shuffle_ps(v0, v1, _MM_SHUFFLE(0, 0, 0, 0)); x0 = _mm_shuffle_ps(v0, v1, _MM_SHUFFLE(0, 0, 0, 0));
x1 = _mm_shuffle_ps(v2, v3, _MM_SHUFFLE(0, 0, 0, 0)); x1 = _mm_shuffle_ps(v2, v3, _MM_SHUFFLE(0, 0, 0, 0));
x0 = _mm_shuffle_ps(x0, x1, _MM_SHUFFLE(2, 0, 2, 0)); x0 = _mm_shuffle_ps(x0, x1, _MM_SHUFFLE(2, 0, 2, 0));
x0 = _mm_rcp_ps(glmm_vhadd(_mm_mul_ps(x0, r0))); x0 = _mm_mul_ps(x0, r0);
x0 = _mm_add_ps(x0, glmm_shuff1(x0, 0, 1, 2, 3));
x0 = _mm_add_ps(x0, glmm_shuff1(x0, 1, 0, 0, 1));
x0 = _mm_rcp_ps(x0);
glmm_store(dest[0], _mm_mul_ps(v0, x0)); glmm_store(dest[0], _mm_mul_ps(v0, x0));
glmm_store(dest[1], _mm_mul_ps(v1, x0)); glmm_store(dest[1], _mm_mul_ps(v1, x0));
@@ -254,10 +283,7 @@ glm_mat4_inv_sse2(mat4 mat, mat4 dest) {
__m128 r0, r1, r2, r3, __m128 r0, r1, r2, r3,
v0, v1, v2, v3, v0, v1, v2, v3,
t0, t1, t2, t3, t4, t5, t0, t1, t2, t3, t4, t5,
x0, x1, x2, x3, x4, x5, x6, x7, x8, x9; x0, x1, x2, x3, x4, x5, x6, x7;
x8 = _mm_set_ps(-0.f, 0.f, -0.f, 0.f);
x9 = glmm_shuff1(x8, 2, 1, 2, 1);
/* 127 <- 0 */ /* 127 <- 0 */
r0 = glmm_load(mat[0]); /* d c b a */ r0 = glmm_load(mat[0]); /* d c b a */
@@ -275,7 +301,7 @@ glm_mat4_inv_sse2(mat4 mat, mat4 dest) {
t1[0] = k * p - o * l; t1[0] = k * p - o * l;
t2[0] = g * p - o * h; t2[0] = g * p - o * h;
t3[0] = g * l - k * h; */ t3[0] = g * l - k * h; */
t0 = glmm_fnmadd(x2, x0, _mm_mul_ps(x3, x1)); t0 = _mm_sub_ps(_mm_mul_ps(x3, x1), _mm_mul_ps(x2, x0));
x4 = _mm_shuffle_ps(r2, r3, _MM_SHUFFLE(2, 1, 2, 1)); /* o n k j */ x4 = _mm_shuffle_ps(r2, r3, _MM_SHUFFLE(2, 1, 2, 1)); /* o n k j */
x4 = glmm_shuff1(x4, 0, 2, 2, 2); /* j n n n */ x4 = glmm_shuff1(x4, 0, 2, 2, 2); /* j n n n */
@@ -285,13 +311,13 @@ glm_mat4_inv_sse2(mat4 mat, mat4 dest) {
t1[1] = j * p - n * l; t1[1] = j * p - n * l;
t2[1] = f * p - n * h; t2[1] = f * p - n * h;
t3[1] = f * l - j * h; */ t3[1] = f * l - j * h; */
t1 = glmm_fnmadd(x4, x0, _mm_mul_ps(x5, x1)); t1 = _mm_sub_ps(_mm_mul_ps(x5, x1), _mm_mul_ps(x4, x0));
/* t1[2] = j * o - n * k /* t1[2] = j * o - n * k
t1[2] = j * o - n * k; t1[2] = j * o - n * k;
t2[2] = f * o - n * g; t2[2] = f * o - n * g;
t3[2] = f * k - j * g; */ t3[2] = f * k - j * g; */
t2 = glmm_fnmadd(x4, x3, _mm_mul_ps(x5, x2)); t2 = _mm_sub_ps(_mm_mul_ps(x5, x2), _mm_mul_ps(x4, x3));
x6 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(0, 0, 0, 0)); /* e e i i */ x6 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(0, 0, 0, 0)); /* e e i i */
x7 = glmm_shuff2(r3, r2, 0, 0, 0, 0, 2, 0, 0, 0); /* i m m m */ x7 = glmm_shuff2(r3, r2, 0, 0, 0, 0, 2, 0, 0, 0); /* i m m m */
@@ -300,19 +326,19 @@ glm_mat4_inv_sse2(mat4 mat, mat4 dest) {
t1[3] = i * p - m * l; t1[3] = i * p - m * l;
t2[3] = e * p - m * h; t2[3] = e * p - m * h;
t3[3] = e * l - i * h; */ t3[3] = e * l - i * h; */
t3 = glmm_fnmadd(x7, x0, _mm_mul_ps(x6, x1)); t3 = _mm_sub_ps(_mm_mul_ps(x6, x1), _mm_mul_ps(x7, x0));
/* t1[4] = i * o - m * k; /* t1[4] = i * o - m * k;
t1[4] = i * o - m * k; t1[4] = i * o - m * k;
t2[4] = e * o - m * g; t2[4] = e * o - m * g;
t3[4] = e * k - i * g; */ t3[4] = e * k - i * g; */
t4 = glmm_fnmadd(x7, x3, _mm_mul_ps(x6, x2)); t4 = _mm_sub_ps(_mm_mul_ps(x6, x2), _mm_mul_ps(x7, x3));
/* t1[5] = i * n - m * j; /* t1[5] = i * n - m * j;
t1[5] = i * n - m * j; t1[5] = i * n - m * j;
t2[5] = e * n - m * f; t2[5] = e * n - m * f;
t3[5] = e * j - i * f; */ t3[5] = e * j - i * f; */
t5 = glmm_fnmadd(x7, x5, _mm_mul_ps(x6, x4)); t5 = _mm_sub_ps(_mm_mul_ps(x6, x4), _mm_mul_ps(x7, x5));
x0 = glmm_shuff2(r1, r0, 0, 0, 0, 0, 2, 2, 2, 0); /* a a a e */ x0 = glmm_shuff2(r1, r0, 0, 0, 0, 0, 2, 2, 2, 0); /* a a a e */
x1 = glmm_shuff2(r1, r0, 1, 1, 1, 1, 2, 2, 2, 0); /* b b b f */ x1 = glmm_shuff2(r1, r0, 1, 1, 1, 1, 2, 2, 2, 0); /* b b b f */
@@ -324,35 +350,50 @@ glm_mat4_inv_sse2(mat4 mat, mat4 dest) {
dest[0][1] =-(b * t1[0] - c * t1[1] + d * t1[2]); dest[0][1] =-(b * t1[0] - c * t1[1] + d * t1[2]);
dest[0][2] = b * t2[0] - c * t2[1] + d * t2[2]; dest[0][2] = b * t2[0] - c * t2[1] + d * t2[2];
dest[0][3] =-(b * t3[0] - c * t3[1] + d * t3[2]); */ dest[0][3] =-(b * t3[0] - c * t3[1] + d * t3[2]); */
v0 = _mm_xor_ps(glmm_fmadd(x3, t2, glmm_fnmadd(x2, t1, _mm_mul_ps(x1, t0))), x8); v0 = _mm_add_ps(_mm_mul_ps(x3, t2),
_mm_sub_ps(_mm_mul_ps(x1, t0),
/* _mm_mul_ps(x2, t1)));
dest[2][0] = e * t1[1] - f * t1[3] + h * t1[5]; v0 = _mm_xor_ps(v0, _mm_set_ps(-0.f, 0.f, -0.f, 0.f));
dest[2][1] =-(a * t1[1] - b * t1[3] + d * t1[5]);
dest[2][2] = a * t2[1] - b * t2[3] + d * t2[5];
dest[2][3] =-(a * t3[1] - b * t3[3] + d * t3[5]);*/
v2 = _mm_xor_ps(glmm_fmadd(x3, t5, glmm_fnmadd(x1, t3, _mm_mul_ps(x0, t1))), x8);
/* /*
dest[1][0] =-(e * t1[0] - g * t1[3] + h * t1[4]); dest[1][0] =-(e * t1[0] - g * t1[3] + h * t1[4]);
dest[1][1] = a * t1[0] - c * t1[3] + d * t1[4]; dest[1][1] = a * t1[0] - c * t1[3] + d * t1[4];
dest[1][2] =-(a * t2[0] - c * t2[3] + d * t2[4]); dest[1][2] =-(a * t2[0] - c * t2[3] + d * t2[4]);
dest[1][3] = a * t3[0] - c * t3[3] + d * t3[4]; */ dest[1][3] = a * t3[0] - c * t3[3] + d * t3[4]; */
v1 = _mm_xor_ps(glmm_fmadd(x3, t4, glmm_fnmadd(x2, t3, _mm_mul_ps(x0, t0))), x9); v1 = _mm_add_ps(_mm_mul_ps(x3, t4),
_mm_sub_ps(_mm_mul_ps(x0, t0),
_mm_mul_ps(x2, t3)));
v1 = _mm_xor_ps(v1, _mm_set_ps(0.f, -0.f, 0.f, -0.f));
/*
dest[2][0] = e * t1[1] - f * t1[3] + h * t1[5];
dest[2][1] =-(a * t1[1] - b * t1[3] + d * t1[5]);
dest[2][2] = a * t2[1] - b * t2[3] + d * t2[5];
dest[2][3] =-(a * t3[1] - b * t3[3] + d * t3[5]);*/
v2 = _mm_add_ps(_mm_mul_ps(x3, t5),
_mm_sub_ps(_mm_mul_ps(x0, t1),
_mm_mul_ps(x1, t3)));
v2 = _mm_xor_ps(v2, _mm_set_ps(-0.f, 0.f, -0.f, 0.f));
/* /*
dest[3][0] =-(e * t1[2] - f * t1[4] + g * t1[5]); dest[3][0] =-(e * t1[2] - f * t1[4] + g * t1[5]);
dest[3][1] = a * t1[2] - b * t1[4] + c * t1[5]; dest[3][1] = a * t1[2] - b * t1[4] + c * t1[5];
dest[3][2] =-(a * t2[2] - b * t2[4] + c * t2[5]); dest[3][2] =-(a * t2[2] - b * t2[4] + c * t2[5]);
dest[3][3] = a * t3[2] - b * t3[4] + c * t3[5]; */ dest[3][3] = a * t3[2] - b * t3[4] + c * t3[5]; */
v3 = _mm_xor_ps(glmm_fmadd(x2, t5, glmm_fnmadd(x1, t4, _mm_mul_ps(x0, t2))), x9); v3 = _mm_add_ps(_mm_mul_ps(x2, t5),
_mm_sub_ps(_mm_mul_ps(x0, t2),
_mm_mul_ps(x1, t4)));
v3 = _mm_xor_ps(v3, _mm_set_ps(0.f, -0.f, 0.f, -0.f));
/* determinant */ /* determinant */
x0 = _mm_shuffle_ps(v0, v1, _MM_SHUFFLE(0, 0, 0, 0)); x0 = _mm_shuffle_ps(v0, v1, _MM_SHUFFLE(0, 0, 0, 0));
x1 = _mm_shuffle_ps(v2, v3, _MM_SHUFFLE(0, 0, 0, 0)); x1 = _mm_shuffle_ps(v2, v3, _MM_SHUFFLE(0, 0, 0, 0));
x0 = _mm_shuffle_ps(x0, x1, _MM_SHUFFLE(2, 0, 2, 0)); x0 = _mm_shuffle_ps(x0, x1, _MM_SHUFFLE(2, 0, 2, 0));
x0 = _mm_div_ps(_mm_set1_ps(1.0f), glmm_vhadd(_mm_mul_ps(x0, r0))); x0 = _mm_mul_ps(x0, r0);
x0 = _mm_add_ps(x0, glmm_shuff1(x0, 0, 1, 2, 3));
x0 = _mm_add_ps(x0, glmm_shuff1(x0, 1, 0, 0, 1));
x0 = _mm_div_ps(_mm_set1_ps(1.0f), x0);
glmm_store(dest[0], _mm_mul_ps(v0, x0)); glmm_store(dest[0], _mm_mul_ps(v0, x0));
glmm_store(dest[1], _mm_mul_ps(v1, x0)); glmm_store(dest[1], _mm_mul_ps(v1, x0));

View File

@@ -27,15 +27,15 @@ glm_quat_mul_sse2(versor p, versor q, versor dest) {
xp = glmm_load(p); /* 3 2 1 0 */ xp = glmm_load(p); /* 3 2 1 0 */
xq = glmm_load(q); xq = glmm_load(q);
r = _mm_mul_ps(glmm_splat(xp, 3), xq); r = _mm_mul_ps(glmm_shuff1x(xp, 3), xq);
x0 = _mm_xor_ps(glmm_splat(xp, 0), _mm_set_ps(-0.f, 0.f, -0.f, 0.f)); x0 = _mm_xor_ps(glmm_shuff1x(xp, 0), _mm_set_ps(-0.f, 0.f, -0.f, 0.f));
r = _mm_add_ps(r, _mm_mul_ps(x0, glmm_shuff1(xq, 0, 1, 2, 3))); r = _mm_add_ps(r, _mm_mul_ps(x0, glmm_shuff1(xq, 0, 1, 2, 3)));
x0 = _mm_xor_ps(glmm_splat(xp, 1), _mm_set_ps(-0.f, -0.f, 0.f, 0.f)); x0 = _mm_xor_ps(glmm_shuff1x(xp, 1), _mm_set_ps(-0.f, -0.f, 0.f, 0.f));
r = _mm_add_ps(r, _mm_mul_ps(x0, glmm_shuff1(xq, 1, 0, 3, 2))); r = _mm_add_ps(r, _mm_mul_ps(x0, glmm_shuff1(xq, 1, 0, 3, 2)));
x0 = _mm_xor_ps(glmm_splat(xp, 2), _mm_set_ps(-0.f, 0.f, 0.f, -0.f)); x0 = _mm_xor_ps(glmm_shuff1x(xp, 2), _mm_set_ps(-0.f, 0.f, 0.f, -0.f));
r = _mm_add_ps(r, _mm_mul_ps(x0, glmm_shuff1(xq, 2, 3, 0, 1))); r = _mm_add_ps(r, _mm_mul_ps(x0, glmm_shuff1(xq, 2, 3, 0, 1)));
glmm_store(dest, r); glmm_store(dest, r);

View File

@@ -18,9 +18,6 @@
# define glmm_store(p, a) _mm_store_ps(p, a) # define glmm_store(p, a) _mm_store_ps(p, a)
#endif #endif
#define glmm_set1(x) _mm_set1_ps(x)
#define glmm_128 __m128
#ifdef CGLM_USE_INT_DOMAIN #ifdef CGLM_USE_INT_DOMAIN
# define glmm_shuff1(xmm, z, y, x, w) \ # define glmm_shuff1(xmm, z, y, x, w) \
_mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(xmm), \ _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(xmm), \
@@ -30,16 +27,7 @@
_mm_shuffle_ps(xmm, xmm, _MM_SHUFFLE(z, y, x, w)) _mm_shuffle_ps(xmm, xmm, _MM_SHUFFLE(z, y, x, w))
#endif #endif
#define glmm_splat(x, lane) glmm_shuff1(x, lane, lane, lane, lane)
#define glmm_splat_x(x) glmm_splat(x, 0)
#define glmm_splat_y(x) glmm_splat(x, 1)
#define glmm_splat_z(x) glmm_splat(x, 2)
#define glmm_splat_w(x) glmm_splat(x, 3)
/* glmm_shuff1x() is DEPRECATED!, use glmm_splat() */
#define glmm_shuff1x(xmm, x) glmm_shuff1(xmm, x, x, x, x) #define glmm_shuff1x(xmm, x) glmm_shuff1(xmm, x, x, x, x)
#define glmm_shuff2(a, b, z0, y0, x0, w0, z1, y1, x1, w1) \ #define glmm_shuff2(a, b, z0, y0, x0, w0, z1, y1, x1, w1) \
glmm_shuff1(_mm_shuffle_ps(a, b, _MM_SHUFFLE(z0, y0, x0, w0)), \ glmm_shuff1(_mm_shuffle_ps(a, b, _MM_SHUFFLE(z0, y0, x0, w0)), \
z1, y1, x1, w1) z1, y1, x1, w1)
@@ -60,15 +48,6 @@ glmm_abs(__m128 x) {
return _mm_andnot_ps(_mm_set1_ps(-0.0f), x); return _mm_andnot_ps(_mm_set1_ps(-0.0f), x);
} }
static inline
__m128
glmm_vhadd(__m128 v) {
__m128 x0;
x0 = _mm_add_ps(v, glmm_shuff1(v, 0, 1, 2, 3));
x0 = _mm_add_ps(x0, glmm_shuff1(x0, 1, 0, 0, 1));
return x0;
}
static inline static inline
__m128 __m128
glmm_vhadds(__m128 v) { glmm_vhadds(__m128 v) {
@@ -101,7 +80,7 @@ glmm_vhmin(__m128 v) {
__m128 x0, x1, x2; __m128 x0, x1, x2;
x0 = _mm_movehl_ps(v, v); /* [2, 3, 2, 3] */ x0 = _mm_movehl_ps(v, v); /* [2, 3, 2, 3] */
x1 = _mm_min_ps(x0, v); /* [0|2, 1|3, 2|2, 3|3] */ x1 = _mm_min_ps(x0, v); /* [0|2, 1|3, 2|2, 3|3] */
x2 = glmm_splat(x1, 1); /* [1|3, 1|3, 1|3, 1|3] */ x2 = glmm_shuff1x(x1, 1); /* [1|3, 1|3, 1|3, 1|3] */
return _mm_min_ss(x1, x2); return _mm_min_ss(x1, x2);
} }
@@ -117,7 +96,7 @@ glmm_vhmax(__m128 v) {
__m128 x0, x1, x2; __m128 x0, x1, x2;
x0 = _mm_movehl_ps(v, v); /* [2, 3, 2, 3] */ x0 = _mm_movehl_ps(v, v); /* [2, 3, 2, 3] */
x1 = _mm_max_ps(x0, v); /* [0|2, 1|3, 2|2, 3|3] */ x1 = _mm_max_ps(x0, v); /* [0|2, 1|3, 2|2, 3|3] */
x2 = glmm_splat(x1, 1); /* [1|3, 1|3, 1|3, 1|3] */ x2 = glmm_shuff1x(x1, 1); /* [1|3, 1|3, 1|3, 1|3] */
return _mm_max_ss(x1, x2); return _mm_max_ss(x1, x2);
} }
@@ -209,99 +188,5 @@ glmm_store3(float v[3], __m128 vx) {
_mm_store_ss(&v[2], glmm_shuff1(vx, 2, 2, 2, 2)); _mm_store_ss(&v[2], glmm_shuff1(vx, 2, 2, 2, 2));
} }
static inline
__m128
glmm_div(__m128 a, __m128 b) {
return _mm_div_ps(a, b);
}
/* enable FMA macro for MSVC? */
#if defined(_MSC_VER) && !defined(__FMA__) && defined(__AVX2__)
# define __FMA__ 1
#endif
static inline
__m128
glmm_fmadd(__m128 a, __m128 b, __m128 c) {
#ifdef __FMA__
return _mm_fmadd_ps(a, b, c);
#else
return _mm_add_ps(c, _mm_mul_ps(a, b));
#endif
}
static inline
__m128
glmm_fnmadd(__m128 a, __m128 b, __m128 c) {
#ifdef __FMA__
return _mm_fnmadd_ps(a, b, c);
#else
return _mm_sub_ps(c, _mm_mul_ps(a, b));
#endif
}
static inline
__m128
glmm_fmsub(__m128 a, __m128 b, __m128 c) {
#ifdef __FMA__
return _mm_fmsub_ps(a, b, c);
#else
return _mm_sub_ps(_mm_mul_ps(a, b), c);
#endif
}
static inline
__m128
glmm_fnmsub(__m128 a, __m128 b, __m128 c) {
#ifdef __FMA__
return _mm_fnmsub_ps(a, b, c);
#else
return _mm_xor_ps(_mm_add_ps(_mm_mul_ps(a, b), c), _mm_set1_ps(-0.0f));
#endif
}
#if defined(__AVX__)
static inline
__m256
glmm256_fmadd(__m256 a, __m256 b, __m256 c) {
#ifdef __FMA__
return _mm256_fmadd_ps(a, b, c);
#else
return _mm256_add_ps(c, _mm256_mul_ps(a, b));
#endif
}
static inline
__m256
glmm256_fnmadd(__m256 a, __m256 b, __m256 c) {
#ifdef __FMA__
return _mm256_fnmadd_ps(a, b, c);
#else
return _mm256_sub_ps(c, _mm256_mul_ps(a, b));
#endif
}
static inline
__m256
glmm256_fmsub(__m256 a, __m256 b, __m256 c) {
#ifdef __FMA__
return _mm256_fmsub_ps(a, b, c);
#else
return _mm256_sub_ps(_mm256_mul_ps(a, b), c);
#endif
}
static inline
__m256
glmm256_fnmsub(__m256 a, __m256 b, __m256 c) {
#ifdef __FMA__
return _mm256_fmsub_ps(a, b, c);
#else
return _mm256_xor_ps(_mm256_sub_ps(_mm256_mul_ps(a, b), c),
_mm256_set1_ps(-0.0f));
#endif
}
#endif
#endif #endif
#endif /* cglm_simd_x86_h */ #endif /* cglm_simd_x86_h */

View File

@@ -237,9 +237,9 @@ glm_vec3_abs(vec3 v, vec3 dest) {
CGLM_INLINE CGLM_INLINE
void void
glm_vec3_fract(vec3 v, vec3 dest) { glm_vec3_fract(vec3 v, vec3 dest) {
dest[0] = fminf(v[0] - floorf(v[0]), 0.999999940395355224609375f); dest[0] = fminf(v[0] - floorf(v[0]), 0x1.fffffep-1f);
dest[1] = fminf(v[1] - floorf(v[1]), 0.999999940395355224609375f); dest[1] = fminf(v[1] - floorf(v[1]), 0x1.fffffep-1f);
dest[2] = fminf(v[2] - floorf(v[2]), 0.999999940395355224609375f); dest[2] = fminf(v[2] - floorf(v[2]), 0x1.fffffep-1f);
} }
/*! /*!

View File

@@ -224,10 +224,10 @@ glm_vec4_sign(vec4 v, vec4 dest) {
x0 = glmm_load(v); x0 = glmm_load(v);
x1 = _mm_set_ps(0.0f, 0.0f, 1.0f, -1.0f); x1 = _mm_set_ps(0.0f, 0.0f, 1.0f, -1.0f);
x2 = glmm_splat(x1, 2); x2 = glmm_shuff1x(x1, 2);
x3 = _mm_and_ps(_mm_cmpgt_ps(x0, x2), glmm_splat(x1, 1)); x3 = _mm_and_ps(_mm_cmpgt_ps(x0, x2), glmm_shuff1x(x1, 1));
x4 = _mm_and_ps(_mm_cmplt_ps(x0, x2), glmm_splat(x1, 0)); x4 = _mm_and_ps(_mm_cmplt_ps(x0, x2), glmm_shuff1x(x1, 0));
glmm_store(dest, _mm_or_ps(x3, x4)); glmm_store(dest, _mm_or_ps(x3, x4));
#else #else
@@ -268,10 +268,10 @@ glm_vec4_abs(vec4 v, vec4 dest) {
CGLM_INLINE CGLM_INLINE
void void
glm_vec4_fract(vec4 v, vec4 dest) { glm_vec4_fract(vec4 v, vec4 dest) {
dest[0] = fminf(v[0] - floorf(v[0]), 0.999999940395355224609375f); dest[0] = fminf(v[0] - floorf(v[0]), 0x1.fffffep-1f);
dest[1] = fminf(v[1] - floorf(v[1]), 0.999999940395355224609375f); dest[1] = fminf(v[1] - floorf(v[1]), 0x1.fffffep-1f);
dest[2] = fminf(v[2] - floorf(v[2]), 0.999999940395355224609375f); dest[2] = fminf(v[2] - floorf(v[2]), 0x1.fffffep-1f);
dest[3] = fminf(v[3] - floorf(v[3]), 0.999999940395355224609375f); dest[3] = fminf(v[3] - floorf(v[3]), 0x1.fffffep-1f);
} }
/*! /*!

View File

@@ -473,8 +473,8 @@ glm_vec4_scale_as(vec4 v, float s, vec4 dest) {
CGLM_INLINE CGLM_INLINE
void void
glm_vec4_div(vec4 a, vec4 b, vec4 dest) { glm_vec4_div(vec4 a, vec4 b, vec4 dest) {
#if defined(CGLM_SIMD) #if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(dest, glmm_div(glmm_load(a), glmm_load(b))); glmm_store(dest, _mm_div_ps(glmm_load(a), glmm_load(b)));
#else #else
dest[0] = a[0] / b[0]; dest[0] = a[0] / b[0];
dest[1] = a[1] / b[1]; dest[1] = a[1] / b[1];
@@ -568,8 +568,14 @@ glm_vec4_subadd(vec4 a, vec4 b, vec4 dest) {
CGLM_INLINE CGLM_INLINE
void void
glm_vec4_muladd(vec4 a, vec4 b, vec4 dest) { glm_vec4_muladd(vec4 a, vec4 b, vec4 dest) {
#if defined(CGLM_SIMD) #if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(dest, glmm_fmadd(glmm_load(a), glmm_load(b), glmm_load(dest))); glmm_store(dest, _mm_add_ps(glmm_load(dest),
_mm_mul_ps(glmm_load(a),
glmm_load(b))));
#elif defined(CGLM_NEON_FP)
vst1q_f32(dest, vaddq_f32(vld1q_f32(dest),
vmulq_f32(vld1q_f32(a),
vld1q_f32(b))));
#else #else
dest[0] += a[0] * b[0]; dest[0] += a[0] * b[0];
dest[1] += a[1] * b[1]; dest[1] += a[1] * b[1];
@@ -590,8 +596,14 @@ glm_vec4_muladd(vec4 a, vec4 b, vec4 dest) {
CGLM_INLINE CGLM_INLINE
void void
glm_vec4_muladds(vec4 a, float s, vec4 dest) { glm_vec4_muladds(vec4 a, float s, vec4 dest) {
#if defined(CGLM_SIMD) #if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(dest, glmm_fmadd(glmm_load(a), glmm_set1(s), glmm_load(dest))); glmm_store(dest, _mm_add_ps(glmm_load(dest),
_mm_mul_ps(glmm_load(a),
_mm_set1_ps(s))));
#elif defined(CGLM_NEON_FP)
vst1q_f32(dest, vaddq_f32(vld1q_f32(dest),
vmulq_f32(vld1q_f32(a),
vdupq_n_f32(s))));
#else #else
dest[0] += a[0] * s; dest[0] += a[0] * s;
dest[1] += a[1] * s; dest[1] += a[1] * s;

View File

@@ -10,6 +10,6 @@
#define CGLM_VERSION_MAJOR 0 #define CGLM_VERSION_MAJOR 0
#define CGLM_VERSION_MINOR 8 #define CGLM_VERSION_MINOR 8
#define CGLM_VERSION_PATCH 2 #define CGLM_VERSION_PATCH 0
#endif /* cglm_version_h */ #endif /* cglm_version_h */

View File

@@ -1,5 +1,5 @@
project('cglm', 'c', project('cglm', 'c',
version : '0.8.2', version : '0.8.0',
license : 'mit', license : 'mit',
default_options : [ default_options : [
'c_std=c11', 'c_std=c11',

View File

@@ -35,9 +35,9 @@ TEST_IMPL(camera_decomp) {
farVal = 100.0f; farVal = 100.0f;
glm_perspective(fovy, aspect, nearVal, farVal, proj); glm_perspective(fovy, aspect, nearVal, farVal, proj);
ASSERT(fabsf(aspect - glm_persp_aspect(proj)) < GLM_FLT_EPSILON) ASSERT(fabsf(aspect - glm_persp_aspect(proj)) < 1e-5f)
ASSERT(fabsf(fovy - glm_persp_fovy(proj)) < GLM_FLT_EPSILON) ASSERT(fabsf(fovy - glm_persp_fovy(proj)) < 1e-5f)
ASSERT(fabsf(49.984f - glm_deg(glm_persp_fovy(proj))) < GLM_FLT_EPSILON) ASSERT(fabsf(49.984f - glm_deg(glm_persp_fovy(proj))) < 1e-5f)
glm_persp_sizes(proj, fovy, sizes); glm_persp_sizes(proj, fovy, sizes);

View File

@@ -106,7 +106,7 @@ test_rand_quat(versor q);
CGLM_INLINE CGLM_INLINE
bool bool
test_eq(float a, float b) { test_eq(float a, float b) {
return fabsf(a - b) <= GLM_FLT_EPSILON * 10; return fabsf(a - b) <= 1e-5f;
} }
CGLM_INLINE CGLM_INLINE