Compare commits

...

18 Commits
v0.8.0 ... fma

Author SHA1 Message Date
Recep Aslantas
ebba4eea8e win, msvc: enable FMA macro for MSVC 2021-04-19 04:14:14 +03:00
Recep Aslantas
aa2fa89e6c arm: fma msub and nmsub 2021-04-19 00:35:19 +03:00
Recep Aslantas
7b0eee497e arm: fix fmadd parameter order 2021-04-19 00:28:07 +03:00
Recep Aslantas
04008d9c3f arm: fix fma for glm_vec4_muladds 2021-04-19 00:21:04 +03:00
Recep Aslantas
11b1588105 glmm: missing FMA funcs for SSE and AVX 2021-04-19 00:20:47 +03:00
Recep Aslantas
7c81482248 avx: implement scale matrix using AVX 2021-04-19 00:11:43 +03:00
Recep Aslantas
f3f29bd383 vec4: optimize muladd and muladds with fma 2021-04-18 16:24:29 +03:00
Recep Aslantas
0d0d22f96c opitimize affine matrix operations with fma 2021-04-18 13:51:22 +03:00
Recep Aslantas
7df5aa2e26 opitimize mat2 operations with fma 2021-04-18 13:51:09 +03:00
Recep Aslantas
7cc4c37afb opitimize mat3 operations with fma 2021-04-18 13:51:03 +03:00
Recep Aslantas
abe29a788a opitimize mat4 operations with fma 2021-04-18 13:50:51 +03:00
Recep Aslantas
c5655bbd2e glmm: define fma functions 2021-04-18 13:49:50 +03:00
Recep Aslantas
47e0045015 glmm, x86: define hadd function 2021-04-18 13:49:36 +03:00
Recep Aslantas
8f09cc8583 Merge pull request #183 from legends2k/master
Add CMake interface library target
2021-03-26 20:36:56 +03:00
Sundaram Ramaswamy
d6a0ac320b Update docs on CMake header-only usage 2021-03-26 12:32:36 +05:30
Sundaram Ramaswamy
616d38c13a Remove redundant header listing 2021-03-26 12:23:56 +05:30
Sundaram Ramaswamy
9e12908556 Add CMake interface library target
Projects using cglm as a header-only library needn’t build files under
src/. Provide a target which allows them to skip compiling them by

add_subdirectory(external/cglm EXCLUDE_FROM_ALL)
target_link_libraries(MyExe PRIVATE cglm_headers)
2021-03-25 17:47:43 +05:30
Recep Aslantas
405cda6ee9 now working on v0.8.1 2021-03-02 23:36:33 +03:00
17 changed files with 334 additions and 205 deletions

View File

@@ -1,5 +1,5 @@
cmake_minimum_required(VERSION 3.8.2) cmake_minimum_required(VERSION 3.8.2)
project(cglm VERSION 0.8.0 LANGUAGES C) project(cglm VERSION 0.8.1 LANGUAGES C)
set(CMAKE_C_STANDARD 11) set(CMAKE_C_STANDARD 11)
set(CMAKE_C_STANDARD_REQUIRED YES) set(CMAKE_C_STANDARD_REQUIRED YES)
@@ -93,6 +93,11 @@ target_include_directories(${PROJECT_NAME}
${CMAKE_CURRENT_SOURCE_DIR}/src ${CMAKE_CURRENT_SOURCE_DIR}/src
) )
# Target for header-only usage
add_library(${PROJECT_NAME}_headers INTERFACE)
target_include_directories(${PROJECT_NAME}_headers INTERFACE
${CMAKE_CURRENT_SOURCE_DIR}/include)
# Test Configuration # Test Configuration
if(CGLM_USE_TEST) if(CGLM_USE_TEST)
include(CTest) include(CTest)

View File

@@ -168,6 +168,24 @@ option(CGLM_USE_C99 "" OFF) # C11
option(CGLM_USE_TEST "Enable Tests" OFF) # for make check - make test option(CGLM_USE_TEST "Enable Tests" OFF) # for make check - make test
``` ```
#### Use as header-only library with your CMake project
This requires no building or installation of cglm.
* Example:
``` cmake
cmake_minimum_required(VERSION 3.8.2)
project(<Your Project Name>)
add_executable(${PROJECT_NAME} src/main.c)
target_link_libraries(${LIBRARY_NAME} PRIVATE
cglm_headers)
add_subdirectory(external/cglm/ EXCLUDE_FROM_ALL)
```
#### Use with your CMake project #### Use with your CMake project
* Example: * Example:
```cmake ```cmake

View File

@@ -2,7 +2,7 @@ Pod::Spec.new do |s|
# Description # Description
s.name = "cglm" s.name = "cglm"
s.version = "0.7.9" s.version = "0.8.0"
s.summary = "📽 Highly Optimized Graphics Math (glm) for C" s.summary = "📽 Highly Optimized Graphics Math (glm) for C"
s.description = <<-DESC s.description = <<-DESC
cglm is math library for graphics programming for C. See the documentation or README for all features. cglm is math library for graphics programming for C. See the documentation or README for all features.

View File

@@ -7,7 +7,7 @@
#***************************************************************************** #*****************************************************************************
AC_PREREQ([2.69]) AC_PREREQ([2.69])
AC_INIT([cglm], [0.8.0], [info@recp.me]) AC_INIT([cglm], [0.8.1], [info@recp.me])
AM_INIT_AUTOMAKE([-Wall -Werror foreign subdir-objects serial-tests]) AM_INIT_AUTOMAKE([-Wall -Werror foreign subdir-objects serial-tests])
# Don't use the default cflags (-O2 -g), we set ours manually in Makefile.am. # Don't use the default cflags (-O2 -g), we set ours manually in Makefile.am.

View File

@@ -32,6 +32,22 @@ If you don't want to install **cglm** to your system's folder you can get static
option(CGLM_USE_C99 "" OFF) # C11 option(CGLM_USE_C99 "" OFF) # C11
option(CGLM_USE_TEST "Enable Tests" OFF) # for make check - make test option(CGLM_USE_TEST "Enable Tests" OFF) # for make check - make test
**Use as header-only library with your CMake project example**
This requires no building or installation of cglm.
.. code-block:: CMake
:linenos:
cmake_minimum_required(VERSION 3.8.2)
project(<Your Project Name>)
add_executable(${PROJECT_NAME} src/main.c)
target_link_libraries(${LIBRARY_NAME} PRIVATE
cglm_headers)
add_subdirectory(external/cglm/ EXCLUDE_FROM_ALL)
**Use with your CMake project example** **Use with your CMake project example**
.. code-block:: CMake .. code-block:: CMake

View File

@@ -62,9 +62,9 @@ author = u'Recep Aslantas'
# built documents. # built documents.
# #
# The short X.Y version. # The short X.Y version.
version = u'0.8.0' version = u'0.8.1'
# The full version, including alpha/beta/rc tags. # The full version, including alpha/beta/rc tags.
release = u'0.8.0' release = u'0.8.1'
# The language for content autogenerated by Sphinx. Refer to documentation # The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages. # for a list of supported languages.

View File

@@ -539,7 +539,9 @@ glm_mat4_scale_p(mat4 m, float s) {
CGLM_INLINE CGLM_INLINE
void void
glm_mat4_scale(mat4 m, float s) { glm_mat4_scale(mat4 m, float s) {
#if defined( __SSE__ ) || defined( __SSE2__ ) #ifdef __AVX__
glm_mat4_scale_avx(m, s);
#elif defined( __SSE__ ) || defined( __SSE2__ )
glm_mat4_scale_sse2(m, s); glm_mat4_scale_sse2(m, s);
#elif defined(CGLM_NEON_FP) #elif defined(CGLM_NEON_FP)
glm_mat4_scale_neon(m, s); glm_mat4_scale_neon(m, s);

View File

@@ -79,5 +79,41 @@ glmm_norm_inf(float32x4_t a) {
return glmm_hmax(glmm_abs(a)); return glmm_hmax(glmm_abs(a));
} }
static inline
float32x4_t
glmm_fmadd(float32x4_t a, float32x4_t b, float32x4_t c) {
#if defined(__aarch64__)
return vfmaq_f32(c, a, b);
#else
return vmlaq_f32(c, a, b);
#endif
}
static inline
float32x4_t
glmm_fnmadd(float32x4_t a, float32x4_t b, float32x4_t c) {
#if defined(__aarch64__)
return vfmsq_f32(c, a, b);
#else
return vmlsq_f32(c, a, b);
#endif
}
static inline
float32x4_t
glmm_fmsub(float32x4_t a, float32x4_t b, float32x4_t c) {
#if defined(__aarch64__)
return vfmsq_f32(c, a, b);
#else
return vmlsq_f32(c, a, b);
#endif
}
static inline
float32x4_t
glmm_fnmsub(float32x4_t a, float32x4_t b, float32x4_t c) {
return vsubq_f32(vdupq_n_f32(0.0f), glmm_fmadd(a, b, c));
}
#endif #endif
#endif /* cglm_simd_arm_h */ #endif /* cglm_simd_arm_h */

View File

@@ -14,6 +14,16 @@
#include <immintrin.h> #include <immintrin.h>
CGLM_INLINE
void
glm_mat4_scale_avx(mat4 m, float s) {
__m256 y0;
y0 = _mm256_set1_ps(s);
glmm_store256(m[0], _mm256_mul_ps(y0, glmm_load256(m[0])));
glmm_store256(m[2], _mm256_mul_ps(y0, glmm_load256(m[2])));
}
CGLM_INLINE CGLM_INLINE
void void
glm_mat4_mul_avx(mat4 m1, mat4 m2, mat4 dest) { glm_mat4_mul_avx(mat4 m1, mat4 m2, mat4 dest) {

View File

@@ -25,28 +25,29 @@ glm_mul_sse2(mat4 m1, mat4 m2, mat4 dest) {
r = glmm_load(m2[0]); r = glmm_load(m2[0]);
glmm_store(dest[0], glmm_store(dest[0],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 0), l0), glmm_fmadd(glmm_shuff1x(r, 0), l0,
_mm_mul_ps(glmm_shuff1x(r, 1), l1)), glmm_fmadd(glmm_shuff1x(r, 1), l1,
_mm_mul_ps(glmm_shuff1x(r, 2), l2))); _mm_mul_ps(glmm_shuff1x(r, 2), l2))));
r = glmm_load(m2[1]); r = glmm_load(m2[1]);
glmm_store(dest[1], glmm_store(dest[1],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 0), l0), glmm_fmadd(glmm_shuff1x(r, 0), l0,
_mm_mul_ps(glmm_shuff1x(r, 1), l1)), glmm_fmadd(glmm_shuff1x(r, 1), l1,
_mm_mul_ps(glmm_shuff1x(r, 2), l2))); _mm_mul_ps(glmm_shuff1x(r, 2), l2))));
r = glmm_load(m2[2]); r = glmm_load(m2[2]);
glmm_store(dest[2], glmm_store(dest[2],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 0), l0), glmm_fmadd(glmm_shuff1x(r, 0), l0,
_mm_mul_ps(glmm_shuff1x(r, 1), l1)), glmm_fmadd(glmm_shuff1x(r, 1), l1,
_mm_mul_ps(glmm_shuff1x(r, 2), l2))); _mm_mul_ps(glmm_shuff1x(r, 2), l2))));
r = glmm_load(m2[3]); r = glmm_load(m2[3]);
glmm_store(dest[3], glmm_store(dest[3],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 0), l0), glmm_fmadd(glmm_shuff1x(r, 0), l0,
_mm_mul_ps(glmm_shuff1x(r, 1), l1)), glmm_fmadd(glmm_shuff1x(r, 1), l1,
_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 2), l2), glmm_fmadd(glmm_shuff1x(r, 2), l2,
_mm_mul_ps(glmm_shuff1x(r, 3), l3)))); _mm_mul_ps(glmm_shuff1x(r, 3),
l3)))));
} }
CGLM_INLINE CGLM_INLINE
@@ -62,21 +63,22 @@ glm_mul_rot_sse2(mat4 m1, mat4 m2, mat4 dest) {
r = glmm_load(m2[0]); r = glmm_load(m2[0]);
glmm_store(dest[0], glmm_store(dest[0],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 0), l0), glmm_fmadd(glmm_shuff1x(r, 0), l0,
_mm_mul_ps(glmm_shuff1x(r, 1), l1)), glmm_fmadd(glmm_shuff1x(r, 1), l1,
_mm_mul_ps(glmm_shuff1x(r, 2), l2))); _mm_mul_ps(glmm_shuff1x(r, 2), l2))));
r = glmm_load(m2[1]); r = glmm_load(m2[1]);
glmm_store(dest[1], glmm_store(dest[1],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 0), l0), glmm_fmadd(glmm_shuff1x(r, 0), l0,
_mm_mul_ps(glmm_shuff1x(r, 1), l1)), glmm_fmadd(glmm_shuff1x(r, 1), l1,
_mm_mul_ps(glmm_shuff1x(r, 2), l2))); _mm_mul_ps(glmm_shuff1x(r, 2), l2))));
r = glmm_load(m2[2]); r = glmm_load(m2[2]);
glmm_store(dest[2], glmm_store(dest[2],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 0), l0), glmm_fmadd(glmm_shuff1x(r, 0), l0,
_mm_mul_ps(glmm_shuff1x(r, 1), l1)), glmm_fmadd(glmm_shuff1x(r, 1), l1,
_mm_mul_ps(glmm_shuff1x(r, 2), l2))); _mm_mul_ps(glmm_shuff1x(r, 2), l2))));
glmm_store(dest[3], l3); glmm_store(dest[3], l3);
} }
@@ -94,9 +96,9 @@ glm_inv_tr_sse2(mat4 mat) {
_MM_TRANSPOSE4_PS(r0, r1, r2, x1); _MM_TRANSPOSE4_PS(r0, r1, r2, x1);
x0 = _mm_add_ps(_mm_mul_ps(r0, glmm_shuff1(r3, 0, 0, 0, 0)), x0 = glmm_fmadd(r0, glmm_shuff1(r3, 0, 0, 0, 0),
_mm_mul_ps(r1, glmm_shuff1(r3, 1, 1, 1, 1))); glmm_fmadd(r1, glmm_shuff1(r3, 1, 1, 1, 1),
x0 = _mm_add_ps(x0, _mm_mul_ps(r2, glmm_shuff1(r3, 2, 2, 2, 2))); _mm_mul_ps(r2, glmm_shuff1(r3, 2, 2, 2, 2))));
x0 = _mm_xor_ps(x0, _mm_set1_ps(-0.f)); x0 = _mm_xor_ps(x0, _mm_set1_ps(-0.f));
x0 = _mm_add_ps(x0, x1); x0 = _mm_add_ps(x0, x1);

View File

@@ -26,11 +26,11 @@ glm_mat2_mul_sse2(mat2 m1, mat2 m2, mat2 dest) {
dest[1][0] = a * g + c * h; dest[1][0] = a * g + c * h;
dest[1][1] = b * g + d * h; dest[1][1] = b * g + d * h;
*/ */
x0 = _mm_mul_ps(_mm_movelh_ps(x1, x1), glmm_shuff1(x2, 2, 2, 0, 0)); x0 = glmm_fmadd(_mm_movelh_ps(x1, x1), glmm_shuff1(x2, 2, 2, 0, 0),
x1 = _mm_mul_ps(_mm_movehl_ps(x1, x1), glmm_shuff1(x2, 3, 3, 1, 1)); _mm_mul_ps(_mm_movehl_ps(x1, x1),
x1 = _mm_add_ps(x0, x1); glmm_shuff1(x2, 3, 3, 1, 1)));
glmm_store(dest[0], x1); glmm_store(dest[0], x0);
} }
CGLM_INLINE CGLM_INLINE

View File

@@ -30,23 +30,16 @@ glm_mat3_mul_sse2(mat3 m1, mat3 m2, mat3 dest) {
x1 = glmm_shuff2(l0, l1, 1, 0, 3, 3, 0, 3, 2, 0); x1 = glmm_shuff2(l0, l1, 1, 0, 3, 3, 0, 3, 2, 0);
x2 = glmm_shuff2(l1, l2, 0, 0, 3, 2, 0, 2, 1, 0); x2 = glmm_shuff2(l1, l2, 0, 0, 3, 2, 0, 2, 1, 0);
x0 = _mm_add_ps(_mm_mul_ps(glmm_shuff1(l0, 0, 2, 1, 0), x0 = glmm_fmadd(glmm_shuff1(l0, 0, 2, 1, 0), glmm_shuff1(r0, 3, 0, 0, 0),
glmm_shuff1(r0, 3, 0, 0, 0)), glmm_fmadd(x1, glmm_shuff2(r0, r1, 0, 0, 1, 1, 2, 0, 0, 0),
_mm_mul_ps(x1, glmm_shuff2(r0, r1, 0, 0, 1, 1, 2, 0, 0, 0))); _mm_mul_ps(x2, glmm_shuff2(r0, r1, 1, 1, 2, 2, 2, 0, 0, 0))));
x0 = _mm_add_ps(x0,
_mm_mul_ps(x2, glmm_shuff2(r0, r1, 1, 1, 2, 2, 2, 0, 0, 0)));
_mm_storeu_ps(dest[0], x0); _mm_storeu_ps(dest[0], x0);
x0 = _mm_add_ps(_mm_mul_ps(glmm_shuff1(l0, 1, 0, 2, 1), x0 = glmm_fmadd(glmm_shuff1(l0, 1, 0, 2, 1), _mm_shuffle_ps(r0, r1, _MM_SHUFFLE(2, 2, 3, 3)),
_mm_shuffle_ps(r0, r1, _MM_SHUFFLE(2, 2, 3, 3))), glmm_fmadd(glmm_shuff1(x1, 1, 0, 2, 1), glmm_shuff1(r1, 3, 3, 0, 0),
_mm_mul_ps(glmm_shuff1(x1, 1, 0, 2, 1), _mm_mul_ps(glmm_shuff1(x2, 1, 0, 2, 1),
glmm_shuff1(r1, 3, 3, 0, 0))); _mm_shuffle_ps(r1, r2, _MM_SHUFFLE(0, 0, 1, 1)))));
x0 = _mm_add_ps(x0,
_mm_mul_ps(glmm_shuff1(x2, 1, 0, 2, 1),
_mm_shuffle_ps(r1, r2, _MM_SHUFFLE(0, 0, 1, 1))));
_mm_storeu_ps(&dest[1][1], x0); _mm_storeu_ps(&dest[1][1], x0);

View File

@@ -56,46 +56,37 @@ glm_mat4_mul_sse2(mat4 m1, mat4 m2, mat4 dest) {
l2 = glmm_load(m1[2]); l2 = glmm_load(m1[2]);
l3 = glmm_load(m1[3]); l3 = glmm_load(m1[3]);
r = glmm_load(m2[0]); #define XX(C) \
glmm_store(dest[0], \
_mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 0), l0), r = glmm_load(m2[C]); \
_mm_mul_ps(glmm_shuff1x(r, 1), l1)), glmm_store(dest[C], \
_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 2), l2), glmm_fmadd(glmm_shuff1x(r, 0), l0, \
_mm_mul_ps(glmm_shuff1x(r, 3), l3)))); glmm_fmadd(glmm_shuff1x(r, 1), l1, \
r = glmm_load(m2[1]); glmm_fmadd(glmm_shuff1x(r, 2), l2, \
glmm_store(dest[1], _mm_mul_ps(glmm_shuff1x(r, 3), \
_mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 0), l0), l3)))));
_mm_mul_ps(glmm_shuff1x(r, 1), l1)),
_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 2), l2),
_mm_mul_ps(glmm_shuff1x(r, 3), l3))));
r = glmm_load(m2[2]);
glmm_store(dest[2],
_mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 0), l0),
_mm_mul_ps(glmm_shuff1x(r, 1), l1)),
_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 2), l2),
_mm_mul_ps(glmm_shuff1x(r, 3), l3))));
r = glmm_load(m2[3]); XX(0);
glmm_store(dest[3], XX(1);
_mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 0), l0), XX(2);
_mm_mul_ps(glmm_shuff1x(r, 1), l1)), XX(3);
_mm_add_ps(_mm_mul_ps(glmm_shuff1x(r, 2), l2),
_mm_mul_ps(glmm_shuff1x(r, 3), l3)))); #undef XX
} }
CGLM_INLINE CGLM_INLINE
void void
glm_mat4_mulv_sse2(mat4 m, vec4 v, vec4 dest) { glm_mat4_mulv_sse2(mat4 m, vec4 v, vec4 dest) {
__m128 x0, x1, x2; __m128 x0, x1;
x0 = glmm_load(v); x0 = glmm_load(v);
x1 = _mm_add_ps(_mm_mul_ps(glmm_load(m[0]), glmm_shuff1x(x0, 0)), x1 = glmm_fmadd(glmm_load(m[0]), glmm_shuff1x(x0, 0),
_mm_mul_ps(glmm_load(m[1]), glmm_shuff1x(x0, 1))); glmm_fmadd(glmm_load(m[1]), glmm_shuff1x(x0, 1),
glmm_fmadd(glmm_load(m[2]), glmm_shuff1x(x0, 2),
_mm_mul_ps(glmm_load(m[3]),
glmm_shuff1x(x0, 3)))));
x2 = _mm_add_ps(_mm_mul_ps(glmm_load(m[2]), glmm_shuff1x(x0, 2)), glmm_store(dest, x1);
_mm_mul_ps(glmm_load(m[3]), glmm_shuff1x(x0, 3)));
glmm_store(dest, _mm_add_ps(x1, x2));
} }
CGLM_INLINE CGLM_INLINE
@@ -115,20 +106,18 @@ glm_mat4_det_sse2(mat4 mat) {
t[3] = i * p - m * l; t[3] = i * p - m * l;
t[4] = i * o - m * k; t[4] = i * o - m * k;
*/ */
x0 = _mm_sub_ps(_mm_mul_ps(glmm_shuff1(r2, 0, 0, 1, 1), x0 = glmm_fnmadd(glmm_shuff1(r3, 0, 0, 1, 1), glmm_shuff1(r2, 2, 3, 2, 3),
glmm_shuff1(r3, 2, 3, 2, 3)), _mm_mul_ps(glmm_shuff1(r2, 0, 0, 1, 1),
_mm_mul_ps(glmm_shuff1(r3, 0, 0, 1, 1), glmm_shuff1(r3, 2, 3, 2, 3)));
glmm_shuff1(r2, 2, 3, 2, 3)));
/* /*
t[0] = k * p - o * l; t[0] = k * p - o * l;
t[0] = k * p - o * l; t[0] = k * p - o * l;
t[5] = i * n - m * j; t[5] = i * n - m * j;
t[5] = i * n - m * j; t[5] = i * n - m * j;
*/ */
x1 = _mm_sub_ps(_mm_mul_ps(glmm_shuff1(r2, 0, 0, 2, 2), x1 = glmm_fnmadd(glmm_shuff1(r3, 0, 0, 2, 2), glmm_shuff1(r2, 1, 1, 3, 3),
glmm_shuff1(r3, 1, 1, 3, 3)), _mm_mul_ps(glmm_shuff1(r2, 0, 0, 2, 2),
_mm_mul_ps(glmm_shuff1(r3, 0, 0, 2, 2), glmm_shuff1(r3, 1, 1, 3, 3)));
glmm_shuff1(r2, 1, 1, 3, 3)));
/* /*
a * (f * t[0] - g * t[1] + h * t[2]) a * (f * t[0] - g * t[1] + h * t[2])
@@ -136,21 +125,16 @@ glm_mat4_det_sse2(mat4 mat) {
+ c * (e * t[1] - f * t[3] + h * t[5]) + c * (e * t[1] - f * t[3] + h * t[5])
- d * (e * t[2] - f * t[4] + g * t[5]) - d * (e * t[2] - f * t[4] + g * t[5])
*/ */
x2 = _mm_sub_ps(_mm_mul_ps(glmm_shuff1(r1, 0, 0, 0, 1), x2 = glmm_fnmadd(glmm_shuff1(r1, 1, 1, 2, 2), glmm_shuff1(x0, 3, 2, 2, 0),
_mm_shuffle_ps(x1, x0, _MM_SHUFFLE(1, 0, 0, 0))), _mm_mul_ps(glmm_shuff1(r1, 0, 0, 0, 1),
_mm_mul_ps(glmm_shuff1(r1, 1, 1, 2, 2), _mm_shuffle_ps(x1, x0, _MM_SHUFFLE(1, 0, 0, 0))));
glmm_shuff1(x0, 3, 2, 2, 0))); x2 = glmm_fmadd(glmm_shuff1(r1, 2, 3, 3, 3),
_mm_shuffle_ps(x0, x1, _MM_SHUFFLE(2, 2, 3, 1)),
x2);
x2 = _mm_add_ps(x2,
_mm_mul_ps(glmm_shuff1(r1, 2, 3, 3, 3),
_mm_shuffle_ps(x0, x1, _MM_SHUFFLE(2, 2, 3, 1))));
x2 = _mm_xor_ps(x2, _mm_set_ps(-0.f, 0.f, -0.f, 0.f)); x2 = _mm_xor_ps(x2, _mm_set_ps(-0.f, 0.f, -0.f, 0.f));
x0 = _mm_mul_ps(r0, x2); return glmm_hadd(_mm_mul_ps(x2, r0));
x0 = _mm_add_ps(x0, glmm_shuff1(x0, 0, 1, 2, 3));
x0 = _mm_add_ps(x0, glmm_shuff1(x0, 1, 3, 3, 1));
return _mm_cvtss_f32(x0);
} }
CGLM_INLINE CGLM_INLINE
@@ -159,7 +143,10 @@ glm_mat4_inv_fast_sse2(mat4 mat, mat4 dest) {
__m128 r0, r1, r2, r3, __m128 r0, r1, r2, r3,
v0, v1, v2, v3, v0, v1, v2, v3,
t0, t1, t2, t3, t4, t5, t0, t1, t2, t3, t4, t5,
x0, x1, x2, x3, x4, x5, x6, x7; x0, x1, x2, x3, x4, x5, x6, x7, x8, x9;
x8 = _mm_set_ps(-0.f, 0.f, -0.f, 0.f);
x9 = glmm_shuff1(x8, 2, 1, 2, 1);
/* 127 <- 0 */ /* 127 <- 0 */
r0 = glmm_load(mat[0]); /* d c b a */ r0 = glmm_load(mat[0]); /* d c b a */
@@ -177,7 +164,7 @@ glm_mat4_inv_fast_sse2(mat4 mat, mat4 dest) {
t1[0] = k * p - o * l; t1[0] = k * p - o * l;
t2[0] = g * p - o * h; t2[0] = g * p - o * h;
t3[0] = g * l - k * h; */ t3[0] = g * l - k * h; */
t0 = _mm_sub_ps(_mm_mul_ps(x3, x1), _mm_mul_ps(x2, x0)); t0 = glmm_fnmadd(x2, x0, _mm_mul_ps(x3, x1));
x4 = _mm_shuffle_ps(r2, r3, _MM_SHUFFLE(2, 1, 2, 1)); /* o n k j */ x4 = _mm_shuffle_ps(r2, r3, _MM_SHUFFLE(2, 1, 2, 1)); /* o n k j */
x4 = glmm_shuff1(x4, 0, 2, 2, 2); /* j n n n */ x4 = glmm_shuff1(x4, 0, 2, 2, 2); /* j n n n */
@@ -187,13 +174,13 @@ glm_mat4_inv_fast_sse2(mat4 mat, mat4 dest) {
t1[1] = j * p - n * l; t1[1] = j * p - n * l;
t2[1] = f * p - n * h; t2[1] = f * p - n * h;
t3[1] = f * l - j * h; */ t3[1] = f * l - j * h; */
t1 = _mm_sub_ps(_mm_mul_ps(x5, x1), _mm_mul_ps(x4, x0)); t1 = glmm_fnmadd(x4, x0, _mm_mul_ps(x5, x1));
/* t1[2] = j * o - n * k /* t1[2] = j * o - n * k
t1[2] = j * o - n * k; t1[2] = j * o - n * k;
t2[2] = f * o - n * g; t2[2] = f * o - n * g;
t3[2] = f * k - j * g; */ t3[2] = f * k - j * g; */
t2 = _mm_sub_ps(_mm_mul_ps(x5, x2), _mm_mul_ps(x4, x3)); t2 = glmm_fnmadd(x4, x3, _mm_mul_ps(x5, x2));
x6 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(0, 0, 0, 0)); /* e e i i */ x6 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(0, 0, 0, 0)); /* e e i i */
x7 = glmm_shuff2(r3, r2, 0, 0, 0, 0, 2, 0, 0, 0); /* i m m m */ x7 = glmm_shuff2(r3, r2, 0, 0, 0, 0, 2, 0, 0, 0); /* i m m m */
@@ -202,19 +189,19 @@ glm_mat4_inv_fast_sse2(mat4 mat, mat4 dest) {
t1[3] = i * p - m * l; t1[3] = i * p - m * l;
t2[3] = e * p - m * h; t2[3] = e * p - m * h;
t3[3] = e * l - i * h; */ t3[3] = e * l - i * h; */
t3 = _mm_sub_ps(_mm_mul_ps(x6, x1), _mm_mul_ps(x7, x0)); t3 = glmm_fnmadd(x7, x0, _mm_mul_ps(x6, x1));
/* t1[4] = i * o - m * k; /* t1[4] = i * o - m * k;
t1[4] = i * o - m * k; t1[4] = i * o - m * k;
t2[4] = e * o - m * g; t2[4] = e * o - m * g;
t3[4] = e * k - i * g; */ t3[4] = e * k - i * g; */
t4 = _mm_sub_ps(_mm_mul_ps(x6, x2), _mm_mul_ps(x7, x3)); t4 = glmm_fnmadd(x7, x3, _mm_mul_ps(x6, x2));
/* t1[5] = i * n - m * j; /* t1[5] = i * n - m * j;
t1[5] = i * n - m * j; t1[5] = i * n - m * j;
t2[5] = e * n - m * f; t2[5] = e * n - m * f;
t3[5] = e * j - i * f; */ t3[5] = e * j - i * f; */
t5 = _mm_sub_ps(_mm_mul_ps(x6, x4), _mm_mul_ps(x7, x5)); t5 = glmm_fnmadd(x7, x5, _mm_mul_ps(x6, x4));
x0 = glmm_shuff2(r1, r0, 0, 0, 0, 0, 2, 2, 2, 0); /* a a a e */ x0 = glmm_shuff2(r1, r0, 0, 0, 0, 0, 2, 2, 2, 0); /* a a a e */
x1 = glmm_shuff2(r1, r0, 1, 1, 1, 1, 2, 2, 2, 0); /* b b b f */ x1 = glmm_shuff2(r1, r0, 1, 1, 1, 1, 2, 2, 2, 0); /* b b b f */
@@ -226,50 +213,35 @@ glm_mat4_inv_fast_sse2(mat4 mat, mat4 dest) {
dest[0][1] =-(b * t1[0] - c * t1[1] + d * t1[2]); dest[0][1] =-(b * t1[0] - c * t1[1] + d * t1[2]);
dest[0][2] = b * t2[0] - c * t2[1] + d * t2[2]; dest[0][2] = b * t2[0] - c * t2[1] + d * t2[2];
dest[0][3] =-(b * t3[0] - c * t3[1] + d * t3[2]); */ dest[0][3] =-(b * t3[0] - c * t3[1] + d * t3[2]); */
v0 = _mm_add_ps(_mm_mul_ps(x3, t2), v0 = _mm_xor_ps(glmm_fmadd(x3, t2, glmm_fnmadd(x2, t1, _mm_mul_ps(x1, t0))), x8);
_mm_sub_ps(_mm_mul_ps(x1, t0),
_mm_mul_ps(x2, t1)));
v0 = _mm_xor_ps(v0, _mm_set_ps(-0.f, 0.f, -0.f, 0.f));
/*
dest[1][0] =-(e * t1[0] - g * t1[3] + h * t1[4]);
dest[1][1] = a * t1[0] - c * t1[3] + d * t1[4];
dest[1][2] =-(a * t2[0] - c * t2[3] + d * t2[4]);
dest[1][3] = a * t3[0] - c * t3[3] + d * t3[4]; */
v1 = _mm_add_ps(_mm_mul_ps(x3, t4),
_mm_sub_ps(_mm_mul_ps(x0, t0),
_mm_mul_ps(x2, t3)));
v1 = _mm_xor_ps(v1, _mm_set_ps(0.f, -0.f, 0.f, -0.f));
/* /*
dest[2][0] = e * t1[1] - f * t1[3] + h * t1[5]; dest[2][0] = e * t1[1] - f * t1[3] + h * t1[5];
dest[2][1] =-(a * t1[1] - b * t1[3] + d * t1[5]); dest[2][1] =-(a * t1[1] - b * t1[3] + d * t1[5]);
dest[2][2] = a * t2[1] - b * t2[3] + d * t2[5]; dest[2][2] = a * t2[1] - b * t2[3] + d * t2[5];
dest[2][3] =-(a * t3[1] - b * t3[3] + d * t3[5]);*/ dest[2][3] =-(a * t3[1] - b * t3[3] + d * t3[5]);*/
v2 = _mm_add_ps(_mm_mul_ps(x3, t5), v2 = _mm_xor_ps(glmm_fmadd(x3, t5, glmm_fnmadd(x1, t3, _mm_mul_ps(x0, t1))), x8);
_mm_sub_ps(_mm_mul_ps(x0, t1),
_mm_mul_ps(x1, t3))); /*
v2 = _mm_xor_ps(v2, _mm_set_ps(-0.f, 0.f, -0.f, 0.f)); dest[1][0] =-(e * t1[0] - g * t1[3] + h * t1[4]);
dest[1][1] = a * t1[0] - c * t1[3] + d * t1[4];
dest[1][2] =-(a * t2[0] - c * t2[3] + d * t2[4]);
dest[1][3] = a * t3[0] - c * t3[3] + d * t3[4]; */
v1 = _mm_xor_ps(glmm_fmadd(x3, t4, glmm_fnmadd(x2, t3, _mm_mul_ps(x0, t0))), x9);
/* /*
dest[3][0] =-(e * t1[2] - f * t1[4] + g * t1[5]); dest[3][0] =-(e * t1[2] - f * t1[4] + g * t1[5]);
dest[3][1] = a * t1[2] - b * t1[4] + c * t1[5]; dest[3][1] = a * t1[2] - b * t1[4] + c * t1[5];
dest[3][2] =-(a * t2[2] - b * t2[4] + c * t2[5]); dest[3][2] =-(a * t2[2] - b * t2[4] + c * t2[5]);
dest[3][3] = a * t3[2] - b * t3[4] + c * t3[5]; */ dest[3][3] = a * t3[2] - b * t3[4] + c * t3[5]; */
v3 = _mm_add_ps(_mm_mul_ps(x2, t5), v3 = _mm_xor_ps(glmm_fmadd(x2, t5, glmm_fnmadd(x1, t4, _mm_mul_ps(x0, t2))), x9);
_mm_sub_ps(_mm_mul_ps(x0, t2),
_mm_mul_ps(x1, t4)));
v3 = _mm_xor_ps(v3, _mm_set_ps(0.f, -0.f, 0.f, -0.f));
/* determinant */ /* determinant */
x0 = _mm_shuffle_ps(v0, v1, _MM_SHUFFLE(0, 0, 0, 0)); x0 = _mm_shuffle_ps(v0, v1, _MM_SHUFFLE(0, 0, 0, 0));
x1 = _mm_shuffle_ps(v2, v3, _MM_SHUFFLE(0, 0, 0, 0)); x1 = _mm_shuffle_ps(v2, v3, _MM_SHUFFLE(0, 0, 0, 0));
x0 = _mm_shuffle_ps(x0, x1, _MM_SHUFFLE(2, 0, 2, 0)); x0 = _mm_shuffle_ps(x0, x1, _MM_SHUFFLE(2, 0, 2, 0));
x0 = _mm_mul_ps(x0, r0); x0 = _mm_rcp_ps(glmm_vhadd(_mm_mul_ps(x0, r0)));
x0 = _mm_add_ps(x0, glmm_shuff1(x0, 0, 1, 2, 3));
x0 = _mm_add_ps(x0, glmm_shuff1(x0, 1, 0, 0, 1));
x0 = _mm_rcp_ps(x0);
glmm_store(dest[0], _mm_mul_ps(v0, x0)); glmm_store(dest[0], _mm_mul_ps(v0, x0));
glmm_store(dest[1], _mm_mul_ps(v1, x0)); glmm_store(dest[1], _mm_mul_ps(v1, x0));
@@ -283,7 +255,10 @@ glm_mat4_inv_sse2(mat4 mat, mat4 dest) {
__m128 r0, r1, r2, r3, __m128 r0, r1, r2, r3,
v0, v1, v2, v3, v0, v1, v2, v3,
t0, t1, t2, t3, t4, t5, t0, t1, t2, t3, t4, t5,
x0, x1, x2, x3, x4, x5, x6, x7; x0, x1, x2, x3, x4, x5, x6, x7, x8, x9;
x8 = _mm_set_ps(-0.f, 0.f, -0.f, 0.f);
x9 = glmm_shuff1(x8, 2, 1, 2, 1);
/* 127 <- 0 */ /* 127 <- 0 */
r0 = glmm_load(mat[0]); /* d c b a */ r0 = glmm_load(mat[0]); /* d c b a */
@@ -301,7 +276,7 @@ glm_mat4_inv_sse2(mat4 mat, mat4 dest) {
t1[0] = k * p - o * l; t1[0] = k * p - o * l;
t2[0] = g * p - o * h; t2[0] = g * p - o * h;
t3[0] = g * l - k * h; */ t3[0] = g * l - k * h; */
t0 = _mm_sub_ps(_mm_mul_ps(x3, x1), _mm_mul_ps(x2, x0)); t0 = glmm_fnmadd(x2, x0, _mm_mul_ps(x3, x1));
x4 = _mm_shuffle_ps(r2, r3, _MM_SHUFFLE(2, 1, 2, 1)); /* o n k j */ x4 = _mm_shuffle_ps(r2, r3, _MM_SHUFFLE(2, 1, 2, 1)); /* o n k j */
x4 = glmm_shuff1(x4, 0, 2, 2, 2); /* j n n n */ x4 = glmm_shuff1(x4, 0, 2, 2, 2); /* j n n n */
@@ -311,13 +286,13 @@ glm_mat4_inv_sse2(mat4 mat, mat4 dest) {
t1[1] = j * p - n * l; t1[1] = j * p - n * l;
t2[1] = f * p - n * h; t2[1] = f * p - n * h;
t3[1] = f * l - j * h; */ t3[1] = f * l - j * h; */
t1 = _mm_sub_ps(_mm_mul_ps(x5, x1), _mm_mul_ps(x4, x0)); t1 = glmm_fnmadd(x4, x0, _mm_mul_ps(x5, x1));
/* t1[2] = j * o - n * k /* t1[2] = j * o - n * k
t1[2] = j * o - n * k; t1[2] = j * o - n * k;
t2[2] = f * o - n * g; t2[2] = f * o - n * g;
t3[2] = f * k - j * g; */ t3[2] = f * k - j * g; */
t2 = _mm_sub_ps(_mm_mul_ps(x5, x2), _mm_mul_ps(x4, x3)); t2 = glmm_fnmadd(x4, x3, _mm_mul_ps(x5, x2));
x6 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(0, 0, 0, 0)); /* e e i i */ x6 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(0, 0, 0, 0)); /* e e i i */
x7 = glmm_shuff2(r3, r2, 0, 0, 0, 0, 2, 0, 0, 0); /* i m m m */ x7 = glmm_shuff2(r3, r2, 0, 0, 0, 0, 2, 0, 0, 0); /* i m m m */
@@ -326,19 +301,19 @@ glm_mat4_inv_sse2(mat4 mat, mat4 dest) {
t1[3] = i * p - m * l; t1[3] = i * p - m * l;
t2[3] = e * p - m * h; t2[3] = e * p - m * h;
t3[3] = e * l - i * h; */ t3[3] = e * l - i * h; */
t3 = _mm_sub_ps(_mm_mul_ps(x6, x1), _mm_mul_ps(x7, x0)); t3 = glmm_fnmadd(x7, x0, _mm_mul_ps(x6, x1));
/* t1[4] = i * o - m * k; /* t1[4] = i * o - m * k;
t1[4] = i * o - m * k; t1[4] = i * o - m * k;
t2[4] = e * o - m * g; t2[4] = e * o - m * g;
t3[4] = e * k - i * g; */ t3[4] = e * k - i * g; */
t4 = _mm_sub_ps(_mm_mul_ps(x6, x2), _mm_mul_ps(x7, x3)); t4 = glmm_fnmadd(x7, x3, _mm_mul_ps(x6, x2));
/* t1[5] = i * n - m * j; /* t1[5] = i * n - m * j;
t1[5] = i * n - m * j; t1[5] = i * n - m * j;
t2[5] = e * n - m * f; t2[5] = e * n - m * f;
t3[5] = e * j - i * f; */ t3[5] = e * j - i * f; */
t5 = _mm_sub_ps(_mm_mul_ps(x6, x4), _mm_mul_ps(x7, x5)); t5 = glmm_fnmadd(x7, x5, _mm_mul_ps(x6, x4));
x0 = glmm_shuff2(r1, r0, 0, 0, 0, 0, 2, 2, 2, 0); /* a a a e */ x0 = glmm_shuff2(r1, r0, 0, 0, 0, 0, 2, 2, 2, 0); /* a a a e */
x1 = glmm_shuff2(r1, r0, 1, 1, 1, 1, 2, 2, 2, 0); /* b b b f */ x1 = glmm_shuff2(r1, r0, 1, 1, 1, 1, 2, 2, 2, 0); /* b b b f */
@@ -350,50 +325,35 @@ glm_mat4_inv_sse2(mat4 mat, mat4 dest) {
dest[0][1] =-(b * t1[0] - c * t1[1] + d * t1[2]); dest[0][1] =-(b * t1[0] - c * t1[1] + d * t1[2]);
dest[0][2] = b * t2[0] - c * t2[1] + d * t2[2]; dest[0][2] = b * t2[0] - c * t2[1] + d * t2[2];
dest[0][3] =-(b * t3[0] - c * t3[1] + d * t3[2]); */ dest[0][3] =-(b * t3[0] - c * t3[1] + d * t3[2]); */
v0 = _mm_add_ps(_mm_mul_ps(x3, t2), v0 = _mm_xor_ps(glmm_fmadd(x3, t2, glmm_fnmadd(x2, t1, _mm_mul_ps(x1, t0))), x8);
_mm_sub_ps(_mm_mul_ps(x1, t0),
_mm_mul_ps(x2, t1)));
v0 = _mm_xor_ps(v0, _mm_set_ps(-0.f, 0.f, -0.f, 0.f));
/*
dest[1][0] =-(e * t1[0] - g * t1[3] + h * t1[4]);
dest[1][1] = a * t1[0] - c * t1[3] + d * t1[4];
dest[1][2] =-(a * t2[0] - c * t2[3] + d * t2[4]);
dest[1][3] = a * t3[0] - c * t3[3] + d * t3[4]; */
v1 = _mm_add_ps(_mm_mul_ps(x3, t4),
_mm_sub_ps(_mm_mul_ps(x0, t0),
_mm_mul_ps(x2, t3)));
v1 = _mm_xor_ps(v1, _mm_set_ps(0.f, -0.f, 0.f, -0.f));
/* /*
dest[2][0] = e * t1[1] - f * t1[3] + h * t1[5]; dest[2][0] = e * t1[1] - f * t1[3] + h * t1[5];
dest[2][1] =-(a * t1[1] - b * t1[3] + d * t1[5]); dest[2][1] =-(a * t1[1] - b * t1[3] + d * t1[5]);
dest[2][2] = a * t2[1] - b * t2[3] + d * t2[5]; dest[2][2] = a * t2[1] - b * t2[3] + d * t2[5];
dest[2][3] =-(a * t3[1] - b * t3[3] + d * t3[5]);*/ dest[2][3] =-(a * t3[1] - b * t3[3] + d * t3[5]);*/
v2 = _mm_add_ps(_mm_mul_ps(x3, t5), v2 = _mm_xor_ps(glmm_fmadd(x3, t5, glmm_fnmadd(x1, t3, _mm_mul_ps(x0, t1))), x8);
_mm_sub_ps(_mm_mul_ps(x0, t1),
_mm_mul_ps(x1, t3))); /*
v2 = _mm_xor_ps(v2, _mm_set_ps(-0.f, 0.f, -0.f, 0.f)); dest[1][0] =-(e * t1[0] - g * t1[3] + h * t1[4]);
dest[1][1] = a * t1[0] - c * t1[3] + d * t1[4];
dest[1][2] =-(a * t2[0] - c * t2[3] + d * t2[4]);
dest[1][3] = a * t3[0] - c * t3[3] + d * t3[4]; */
v1 = _mm_xor_ps(glmm_fmadd(x3, t4, glmm_fnmadd(x2, t3, _mm_mul_ps(x0, t0))), x9);
/* /*
dest[3][0] =-(e * t1[2] - f * t1[4] + g * t1[5]); dest[3][0] =-(e * t1[2] - f * t1[4] + g * t1[5]);
dest[3][1] = a * t1[2] - b * t1[4] + c * t1[5]; dest[3][1] = a * t1[2] - b * t1[4] + c * t1[5];
dest[3][2] =-(a * t2[2] - b * t2[4] + c * t2[5]); dest[3][2] =-(a * t2[2] - b * t2[4] + c * t2[5]);
dest[3][3] = a * t3[2] - b * t3[4] + c * t3[5]; */ dest[3][3] = a * t3[2] - b * t3[4] + c * t3[5]; */
v3 = _mm_add_ps(_mm_mul_ps(x2, t5), v3 = _mm_xor_ps(glmm_fmadd(x2, t5, glmm_fnmadd(x1, t4, _mm_mul_ps(x0, t2))), x9);
_mm_sub_ps(_mm_mul_ps(x0, t2),
_mm_mul_ps(x1, t4)));
v3 = _mm_xor_ps(v3, _mm_set_ps(0.f, -0.f, 0.f, -0.f));
/* determinant */ /* determinant */
x0 = _mm_shuffle_ps(v0, v1, _MM_SHUFFLE(0, 0, 0, 0)); x0 = _mm_shuffle_ps(v0, v1, _MM_SHUFFLE(0, 0, 0, 0));
x1 = _mm_shuffle_ps(v2, v3, _MM_SHUFFLE(0, 0, 0, 0)); x1 = _mm_shuffle_ps(v2, v3, _MM_SHUFFLE(0, 0, 0, 0));
x0 = _mm_shuffle_ps(x0, x1, _MM_SHUFFLE(2, 0, 2, 0)); x0 = _mm_shuffle_ps(x0, x1, _MM_SHUFFLE(2, 0, 2, 0));
x0 = _mm_mul_ps(x0, r0); x0 = _mm_div_ps(_mm_set1_ps(1.0f), glmm_vhadd(_mm_mul_ps(x0, r0)));
x0 = _mm_add_ps(x0, glmm_shuff1(x0, 0, 1, 2, 3));
x0 = _mm_add_ps(x0, glmm_shuff1(x0, 1, 0, 0, 1));
x0 = _mm_div_ps(_mm_set1_ps(1.0f), x0);
glmm_store(dest[0], _mm_mul_ps(v0, x0)); glmm_store(dest[0], _mm_mul_ps(v0, x0));
glmm_store(dest[1], _mm_mul_ps(v1, x0)); glmm_store(dest[1], _mm_mul_ps(v1, x0));

View File

@@ -48,6 +48,15 @@ glmm_abs(__m128 x) {
return _mm_andnot_ps(_mm_set1_ps(-0.0f), x); return _mm_andnot_ps(_mm_set1_ps(-0.0f), x);
} }
static inline
__m128
glmm_vhadd(__m128 v) {
__m128 x0;
x0 = _mm_add_ps(v, glmm_shuff1(v, 0, 1, 2, 3));
x0 = _mm_add_ps(x0, glmm_shuff1(x0, 1, 0, 0, 1));
return x0;
}
static inline static inline
__m128 __m128
glmm_vhadds(__m128 v) { glmm_vhadds(__m128 v) {
@@ -188,5 +197,93 @@ glmm_store3(float v[3], __m128 vx) {
_mm_store_ss(&v[2], glmm_shuff1(vx, 2, 2, 2, 2)); _mm_store_ss(&v[2], glmm_shuff1(vx, 2, 2, 2, 2));
} }
/* enable FMA macro for MSVC? */
#if !defined(__FMA__) && defined(__AVX2__)
# define __FMA__ 1
#endif
static inline
__m128
glmm_fmadd(__m128 a, __m128 b, __m128 c) {
#ifdef __FMA__
return _mm_fmadd_ps(a, b, c);
#else
return _mm_add_ps(c, _mm_mul_ps(a, b));
#endif
}
static inline
__m128
glmm_fnmadd(__m128 a, __m128 b, __m128 c) {
#ifdef __FMA__
return _mm_fnmadd_ps(a, b, c);
#else
return _mm_sub_ps(c, _mm_mul_ps(a, b));
#endif
}
static inline
__m128
glmm_fmsub(__m128 a, __m128 b, __m128 c) {
#ifdef __FMA__
return _mm_fmsub_ps(a, b, c);
#else
return _mm_sub_ps(_mm_mul_ps(a, b), c);
#endif
}
static inline
__m128
glmm_fnmsub(__m128 a, __m128 b, __m128 c) {
#ifdef __FMA__
return _mm_fnmsub_ps(a, b, c);
#else
return _mm_xor_ps(_mm_add_ps(_mm_mul_ps(a, b), c), _mm_set1_ps(-0.0f));
#endif
}
#if defined(__AVX__)
static inline
__m256
glmm256_fmadd(__m256 a, __m256 b, __m256 c) {
#ifdef __FMA__
return _mm256_fmadd_ps(a, b, c);
#else
return _mm256_add_ps(c, _mm256_mul_ps(a, b));
#endif
}
static inline
__m256
glmm256_fnmadd(__m256 a, __m256 b, __m256 c) {
#ifdef __FMA__
return _mm256_fnmadd_ps(a, b, c);
#else
return _mm256_sub_ps(c, _mm256_mul_ps(a, b));
#endif
}
static inline
__m256
glmm256_fmsub(__m256 a, __m256 b, __m256 c) {
#ifdef __FMA__
return _mm256_fmsub_ps(a, b, c);
#else
return _mm256_sub_ps(_mm256_mul_ps(a, b), c);
#endif
}
static inline
__m256
glmm256_fnmsub(__m256 a, __m256 b, __m256 c) {
#ifdef __FMA__
return _mm256_fmsub_ps(a, b, c);
#else
return _mm256_xor_ps(_mm256_sub_ps(_mm256_mul_ps(a, b), c),
_mm256_set1_ps(-0.0f));
#endif
}
#endif
#endif #endif
#endif /* cglm_simd_x86_h */ #endif /* cglm_simd_x86_h */

View File

@@ -568,14 +568,8 @@ glm_vec4_subadd(vec4 a, vec4 b, vec4 dest) {
CGLM_INLINE CGLM_INLINE
void void
glm_vec4_muladd(vec4 a, vec4 b, vec4 dest) { glm_vec4_muladd(vec4 a, vec4 b, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ ) #if defined(CGLM_SIMD)
glmm_store(dest, _mm_add_ps(glmm_load(dest), glmm_store(dest, glmm_fmadd(glmm_load(a), glmm_load(b), glmm_load(dest)));
_mm_mul_ps(glmm_load(a),
glmm_load(b))));
#elif defined(CGLM_NEON_FP)
vst1q_f32(dest, vaddq_f32(vld1q_f32(dest),
vmulq_f32(vld1q_f32(a),
vld1q_f32(b))));
#else #else
dest[0] += a[0] * b[0]; dest[0] += a[0] * b[0];
dest[1] += a[1] * b[1]; dest[1] += a[1] * b[1];
@@ -597,13 +591,9 @@ CGLM_INLINE
void void
glm_vec4_muladds(vec4 a, float s, vec4 dest) { glm_vec4_muladds(vec4 a, float s, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ ) #if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(dest, _mm_add_ps(glmm_load(dest), glmm_store(dest, glmm_fmadd(glmm_load(a), _mm_set1_ps(s), glmm_load(dest)));
_mm_mul_ps(glmm_load(a),
_mm_set1_ps(s))));
#elif defined(CGLM_NEON_FP) #elif defined(CGLM_NEON_FP)
vst1q_f32(dest, vaddq_f32(vld1q_f32(dest), glmm_store(dest, glmm_fmadd(glmm_load(a), vdupq_n_f32(s), glmm_load(dest)));
vmulq_f32(vld1q_f32(a),
vdupq_n_f32(s))));
#else #else
dest[0] += a[0] * s; dest[0] += a[0] * s;
dest[1] += a[1] * s; dest[1] += a[1] * s;

View File

@@ -10,6 +10,6 @@
#define CGLM_VERSION_MAJOR 0 #define CGLM_VERSION_MAJOR 0
#define CGLM_VERSION_MINOR 8 #define CGLM_VERSION_MINOR 8
#define CGLM_VERSION_PATCH 0 #define CGLM_VERSION_PATCH 1
#endif /* cglm_version_h */ #endif /* cglm_version_h */

View File

@@ -1,5 +1,5 @@
project('cglm', 'c', project('cglm', 'c',
version : '0.8.0', version : '0.8.1',
license : 'mit', license : 'mit',
default_options : [ default_options : [
'c_std=c11', 'c_std=c11',