mirror of
https://github.com/rehlds/rehlds.git
synced 2025-01-01 09:35:37 +03:00
Added sse mathfun implementation of sin, cos, sincos
Written parallel sse version of AngleVectors, AngleVectorsTranspose
This commit is contained in:
parent
ac413a748e
commit
f5d536ed4b
@ -28,6 +28,9 @@
|
||||
|
||||
#include "precompiled.h"
|
||||
|
||||
// Intrisics guide: https://software.intel.com/sites/landingpage/IntrinsicsGuide/
|
||||
// Shufps calculator: http://wurstcaptures.untergrund.net/assembler_tricks.html
|
||||
|
||||
vec3_t vec3_origin;
|
||||
//int nanmask;
|
||||
//short int new_cw;
|
||||
@ -36,6 +39,7 @@ vec3_t vec3_origin;
|
||||
|
||||
// aligned vec4_t
|
||||
typedef ALIGN16 vec4_t avec4_t;
|
||||
typedef ALIGN16 int aivec4_t[4];
|
||||
|
||||
// conversion multiplier
|
||||
const avec4_t deg2rad =
|
||||
@ -46,8 +50,24 @@ const avec4_t deg2rad =
|
||||
M_PI / 180.f
|
||||
};
|
||||
|
||||
const aivec4_t negmask[4] =
|
||||
{
|
||||
0x80000000,
|
||||
0x80000000,
|
||||
0x80000000,
|
||||
0x80000000
|
||||
};
|
||||
|
||||
const aivec4_t negmask_1001 =
|
||||
{
|
||||
0x80000000,
|
||||
0,
|
||||
0,
|
||||
0x80000000
|
||||
};
|
||||
|
||||
// save 4d xmm to 3d vector. we can't optimize many simple vector3 functions because saving back to 3d is slow.
|
||||
void xmm2vec(vec_t *v, const __m128 m)
|
||||
static inline void xmm2vec(vec_t *v, const __m128 m)
|
||||
{
|
||||
_mm_store_ss(v, m);
|
||||
_mm_storel_pi((__m64*)(v + 1), _mm_shuffle_ps(m, m, _MM_SHUFFLE(3, 2, 2, 1)));
|
||||
@ -145,6 +165,53 @@ void EXT_FUNC AngleVectors_ext(const vec_t *angles, vec_t *forward, vec_t *right
|
||||
AngleVectors(angles, forward, right, up);
|
||||
}
|
||||
|
||||
#ifdef REHLDS_FIXES
|
||||
// parallel SSE version
|
||||
void AngleVectors(const vec_t *angles, vec_t *forward, vec_t *right, vec_t *up)
|
||||
{
|
||||
#ifndef SWDS
|
||||
g_engdstAddrs.pfnAngleVectors(&angles, &forward, &right, &up);
|
||||
#endif // SWDS
|
||||
|
||||
__m128 s, c;
|
||||
sincos_ps(_mm_mul_ps(_mm_loadu_ps(angles), _mm_load_ps(deg2rad)), &s, &c);
|
||||
|
||||
__m128 m1 = _mm_shuffle_ps(c, s, 0x90); // [cp][cp][sy][sr]
|
||||
__m128 m2 = _mm_shuffle_ps(c, c, 0x09); // [cy][cr][cp][cp]
|
||||
__m128 cp_mults = _mm_mul_ps(m1, m2); // [cp * cy][cp * cr][cp * sy][cp * sr];
|
||||
|
||||
m1 = _mm_shuffle_ps(c, s, 0x15); // [cy][cy][sy][sp]
|
||||
m2 = _mm_shuffle_ps(s, c, 0xA0); // [sp][sp][cr][cr]
|
||||
m1 = _mm_shuffle_ps(m1, m1, 0xC8); // [cy][sy][cy][sp]
|
||||
|
||||
__m128 m3 = _mm_shuffle_ps(s, s, 0x4A); // [sr][sr][sp][sy];
|
||||
m3 = _mm_mul_ps(m3, _mm_mul_ps(m1, m2)); // [sp*cy*sr][sp*sy*sr][cr*cy*sp][cr*sp*sy]
|
||||
|
||||
m2 = _mm_shuffle_ps(s, c, 0x65); // [sy][sy][cr][cy]
|
||||
m1 = _mm_shuffle_ps(c, s, 0xA6); // [cr][cy][sr][sr]
|
||||
m2 = _mm_shuffle_ps(m2, m2, 0xD8); // [sy][cr][sy][cy]
|
||||
m1 = _mm_xor_ps(m1, _mm_load_ps((float *)&negmask_1001)); // [-cr][cy][sr][-sr]
|
||||
m1 = _mm_mul_ps(m1, m2); // [-cr*sy][cy*cr][sr*sy][-sr*cy]
|
||||
|
||||
m3 = _mm_add_ps(m3, m1);
|
||||
|
||||
if (forward)
|
||||
{
|
||||
_mm_storel_pi((__m64 *)forward, _mm_shuffle_ps(cp_mults, cp_mults, 0x08));
|
||||
forward[2] = -s.m128_f32[PITCH];
|
||||
}
|
||||
if (right)
|
||||
{
|
||||
__m128 r = _mm_shuffle_ps(m3, cp_mults, 0xF4); // [m3(0)][m3(1)][cp(3)][cp(3)]
|
||||
xmm2vec(right, _mm_xor_ps(r, _mm_load_ps((float *)&negmask)));
|
||||
}
|
||||
if (up)
|
||||
{
|
||||
_mm_storel_pi((__m64 *)up, _mm_shuffle_ps(m3, m3, 0x0E));
|
||||
up[2] = cp_mults.m128_f32[1];
|
||||
}
|
||||
}
|
||||
#else // REHLDS_FIXES
|
||||
/* <47067> ../engine/mathlib.c:267 */
|
||||
void AngleVectors(const vec_t *angles, vec_t *forward, vec_t *right, vec_t *up)
|
||||
{
|
||||
@ -154,18 +221,6 @@ void AngleVectors(const vec_t *angles, vec_t *forward, vec_t *right, vec_t *up)
|
||||
g_engdstAddrs.pfnAngleVectors(&angles, &forward, &right, &up);
|
||||
#endif // SWDS
|
||||
|
||||
#ifdef REHLDS_FIXES
|
||||
// convert to radians
|
||||
avec4_t rad_angles;
|
||||
_mm_store_ps(rad_angles, _mm_mul_ps(_mm_loadu_ps(angles), _mm_load_ps(deg2rad)));
|
||||
|
||||
sy = sin(rad_angles[YAW]);
|
||||
cy = cos(rad_angles[YAW]);
|
||||
sp = sin(rad_angles[PITCH]);
|
||||
cp = cos(rad_angles[PITCH]);
|
||||
sr = sin(rad_angles[ROLL]);
|
||||
cr = cos(rad_angles[ROLL]);
|
||||
#else
|
||||
float angle;
|
||||
angle = (float)(angles[YAW] * (M_PI * 2 / 360));
|
||||
sy = sin(angle);
|
||||
@ -176,7 +231,6 @@ void AngleVectors(const vec_t *angles, vec_t *forward, vec_t *right, vec_t *up)
|
||||
angle = (float)(angles[ROLL] * (M_PI * 2 / 360));
|
||||
sr = sin(angle);
|
||||
cr = cos(angle);
|
||||
#endif
|
||||
|
||||
if (forward)
|
||||
{
|
||||
@ -197,7 +251,56 @@ void AngleVectors(const vec_t *angles, vec_t *forward, vec_t *right, vec_t *up)
|
||||
up[2] = cr*cp;
|
||||
}
|
||||
}
|
||||
#endif // REHLDS_FIXES
|
||||
|
||||
#ifdef REHLDS_FIXES
|
||||
// parallel SSE version
|
||||
void AngleVectorsTranspose(const vec_t *angles, vec_t *forward, vec_t *right, vec_t *up)
|
||||
{
|
||||
#ifndef SWDS
|
||||
g_engdstAddrs.pfnAngleVectors(&angles, &forward, &right, &up);
|
||||
#endif // SWDS
|
||||
|
||||
__m128 s, c;
|
||||
sincos_ps(_mm_mul_ps(_mm_loadu_ps(angles), _mm_load_ps(deg2rad)), &s, &c);
|
||||
|
||||
__m128 m1 = _mm_shuffle_ps(c, s, 0x90); // [cp][cp][sy][sr]
|
||||
__m128 m2 = _mm_shuffle_ps(c, c, 0x09); // [cy][cr][cp][cp]
|
||||
__m128 cp_mults = _mm_mul_ps(m1, m2); // [cp * cy][cp * cr][cp * sy][cp * sr];
|
||||
|
||||
m1 = _mm_shuffle_ps(s, s, 0x50); // [sp][sp][sy][sy]
|
||||
m2 = _mm_shuffle_ps(c, s, 0x05); // [cy][cy][sp][sp]
|
||||
|
||||
__m128 m3 = _mm_shuffle_ps(s, c, 0xAA); // [sr][sr][cr][cr]
|
||||
m1 = _mm_mul_ps(m1, m2);
|
||||
m3 = _mm_shuffle_ps(m3, m3, 0xD8); // [sr][cr][sr][cr]
|
||||
m3 = _mm_mul_ps(m3, m1); // [sp*cy*sr][sp*cy*cr][sy*sp*sr][sy*sp*cr]
|
||||
|
||||
m2 = _mm_shuffle_ps(c, s, 0xA6); // [cr][cy][sr][sr]
|
||||
m1 = _mm_shuffle_ps(s, c, 0x65); // [sy][sy][cr][cy]
|
||||
m2 = _mm_shuffle_ps(m2, m2, 0xD8); // [cr][sr][cy][sr]
|
||||
m1 = _mm_xor_ps(m1, _mm_load_ps((float *)&negmask_1001)); // [-cr][cy][sr][-sr]
|
||||
m1 = _mm_mul_ps(m1, m2); // [-cr*sy][sr*sy][cy*cr][-sr*cy]
|
||||
|
||||
m3 = _mm_add_ps(m3, m1);
|
||||
|
||||
if (forward)
|
||||
{
|
||||
forward[0] = cp_mults.m128_f32[0];
|
||||
_mm_storel_pi((__m64*)(forward + 1), m3); // (sr*sp*cy + cr*-sy);
|
||||
}
|
||||
if (right)
|
||||
{
|
||||
right[0] = cp_mults.m128_f32[2];
|
||||
_mm_storel_pi((__m64*)(right + 1), _mm_shuffle_ps(m3, m3, 0x0E));
|
||||
}
|
||||
if (up)
|
||||
{
|
||||
up[0] = -s.m128_f32[PITCH];
|
||||
_mm_storel_pi((__m64 *)&up[1], _mm_shuffle_ps(cp_mults, cp_mults, 0x07));
|
||||
}
|
||||
}
|
||||
#else // REHLDS_FIXES
|
||||
/* <4712e> ../engine/mathlib.c:304 */
|
||||
void AngleVectorsTranspose(const vec_t *angles, vec_t *forward, vec_t *right, vec_t *up)
|
||||
{
|
||||
@ -246,6 +349,7 @@ void AngleVectorsTranspose(const vec_t *angles, vec_t *forward, vec_t *right, ve
|
||||
up[2] = cr*cp;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* <471e9> ../engine/mathlib.c:340 */
|
||||
void AngleMatrix(const vec_t *angles, float(*matrix)[4])
|
||||
|
447
rehlds/engine/sse_mathfun.cpp
Normal file
447
rehlds/engine/sse_mathfun.cpp
Normal file
@ -0,0 +1,447 @@
|
||||
/* SIMD (SSE1+MMX or SSE2) implementation of sin, cos, exp and log
|
||||
|
||||
Inspired by Intel Approximate Math library, and based on the
|
||||
corresponding algorithms of the cephes math library
|
||||
|
||||
The default is to use the SSE1 version. If you define USE_SSE2 the
|
||||
the SSE2 intrinsics will be used in place of the MMX intrinsics. Do
|
||||
not expect any significant performance improvement with SSE2.
|
||||
*/
|
||||
|
||||
/* Copyright (C) 2007 Julien Pommier
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
(this is the zlib license)
|
||||
*/
|
||||
|
||||
#include "precompiled.h"
|
||||
|
||||
/* natural logarithm computed for 4 simultaneous float
|
||||
return NaN for x <= 0
|
||||
*/
|
||||
v4sf log_ps(v4sf x) {
|
||||
v4si emm0;
|
||||
|
||||
v4sf one = *(v4sf*)_ps_1;
|
||||
|
||||
v4sf invalid_mask = _mm_cmple_ps(x, _mm_setzero_ps());
|
||||
|
||||
x = _mm_max_ps(x, *(v4sf*)_ps_min_norm_pos); /* cut off denormalized stuff */
|
||||
|
||||
emm0 = _mm_srli_epi32(_mm_castps_si128(x), 23);
|
||||
|
||||
/* keep only the fractional part */
|
||||
x = _mm_and_ps(x, *(v4sf*)_ps_inv_mant_mask);
|
||||
x = _mm_or_ps(x, *(v4sf*)_ps_0p5);
|
||||
|
||||
emm0 = _mm_sub_epi32(emm0, *(v4si*)_pi32_0x7f);
|
||||
v4sf e = _mm_cvtepi32_ps(emm0);
|
||||
|
||||
e = _mm_add_ps(e, one);
|
||||
|
||||
/* part2:
|
||||
if( x < SQRTHF ) {
|
||||
e -= 1;
|
||||
x = x + x - 1.0;
|
||||
} else { x = x - 1.0; }
|
||||
*/
|
||||
v4sf mask = _mm_cmplt_ps(x, *(v4sf*)_ps_cephes_SQRTHF);
|
||||
v4sf tmp = _mm_and_ps(x, mask);
|
||||
x = _mm_sub_ps(x, one);
|
||||
e = _mm_sub_ps(e, _mm_and_ps(one, mask));
|
||||
x = _mm_add_ps(x, tmp);
|
||||
|
||||
|
||||
v4sf z = _mm_mul_ps(x, x);
|
||||
|
||||
v4sf y = *(v4sf*)_ps_cephes_log_p0;
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p1);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p2);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p3);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p4);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p5);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p6);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p7);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p8);
|
||||
y = _mm_mul_ps(y, x);
|
||||
|
||||
y = _mm_mul_ps(y, z);
|
||||
|
||||
|
||||
tmp = _mm_mul_ps(e, *(v4sf*)_ps_cephes_log_q1);
|
||||
y = _mm_add_ps(y, tmp);
|
||||
|
||||
|
||||
tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
|
||||
y = _mm_sub_ps(y, tmp);
|
||||
|
||||
tmp = _mm_mul_ps(e, *(v4sf*)_ps_cephes_log_q2);
|
||||
x = _mm_add_ps(x, y);
|
||||
x = _mm_add_ps(x, tmp);
|
||||
x = _mm_or_ps(x, invalid_mask); // negative arg will be NAN
|
||||
return x;
|
||||
}
|
||||
|
||||
v4sf exp_ps(v4sf x) {
|
||||
v4sf tmp = _mm_setzero_ps(), fx;
|
||||
v4si emm0;
|
||||
v4sf one = *(v4sf*)_ps_1;
|
||||
|
||||
x = _mm_min_ps(x, *(v4sf*)_ps_exp_hi);
|
||||
x = _mm_max_ps(x, *(v4sf*)_ps_exp_lo);
|
||||
|
||||
/* express exp(x) as exp(g + n*log(2)) */
|
||||
fx = _mm_mul_ps(x, *(v4sf*)_ps_cephes_LOG2EF);
|
||||
fx = _mm_add_ps(fx, *(v4sf*)_ps_0p5);
|
||||
|
||||
/* how to perform a floorf with SSE: just below */
|
||||
emm0 = _mm_cvttps_epi32(fx);
|
||||
tmp = _mm_cvtepi32_ps(emm0);
|
||||
|
||||
/* if greater, substract 1 */
|
||||
v4sf mask = _mm_cmpgt_ps(tmp, fx);
|
||||
mask = _mm_and_ps(mask, one);
|
||||
fx = _mm_sub_ps(tmp, mask);
|
||||
|
||||
tmp = _mm_mul_ps(fx, *(v4sf*)_ps_cephes_exp_C1);
|
||||
v4sf z = _mm_mul_ps(fx, *(v4sf*)_ps_cephes_exp_C2);
|
||||
x = _mm_sub_ps(x, tmp);
|
||||
x = _mm_sub_ps(x, z);
|
||||
|
||||
z = _mm_mul_ps(x, x);
|
||||
|
||||
v4sf y = *(v4sf*)_ps_cephes_exp_p0;
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p1);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p2);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p3);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p4);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p5);
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_add_ps(y, x);
|
||||
y = _mm_add_ps(y, one);
|
||||
|
||||
/* build 2^n */
|
||||
emm0 = _mm_cvttps_epi32(fx);
|
||||
emm0 = _mm_add_epi32(emm0, *(v4si*)_pi32_0x7f);
|
||||
emm0 = _mm_slli_epi32(emm0, 23);
|
||||
v4sf pow2n = _mm_castsi128_ps(emm0);
|
||||
|
||||
y = _mm_mul_ps(y, pow2n);
|
||||
return y;
|
||||
}
|
||||
|
||||
/* evaluation of 4 sines at onces, using only SSE1+MMX intrinsics so
|
||||
it runs also on old athlons XPs and the pentium III of your grand
|
||||
mother.
|
||||
|
||||
The code is the exact rewriting of the cephes sinf function.
|
||||
Precision is excellent as long as x < 8192 (I did not bother to
|
||||
take into account the special handling they have for greater values
|
||||
-- it does not return garbage for arguments over 8192, though, but
|
||||
the extra precision is missing).
|
||||
|
||||
Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the
|
||||
surprising but correct result.
|
||||
|
||||
Performance is also surprisingly good, 1.33 times faster than the
|
||||
macos vsinf SSE2 function, and 1.5 times faster than the
|
||||
__vrs4_sinf of amd's ACML (which is only available in 64 bits). Not
|
||||
too bad for an SSE1 function (with no special tuning) !
|
||||
However the latter libraries probably have a much better handling of NaN,
|
||||
Inf, denormalized and other special arguments..
|
||||
|
||||
On my core 1 duo, the execution of this function takes approximately 95 cycles.
|
||||
|
||||
From what I have observed on the experiments with Intel AMath lib, switching to an
|
||||
SSE2 version would improve the perf by only 10%.
|
||||
|
||||
Since it is based on SSE intrinsics, it has to be compiled at -O2 to
|
||||
deliver full speed.
|
||||
*/
|
||||
v4sf sin_ps(v4sf x) { // any x
|
||||
v4sf xmm1, xmm2 = _mm_setzero_ps(), xmm3, sign_bit, y;
|
||||
|
||||
v4si emm0, emm2;
|
||||
|
||||
sign_bit = x;
|
||||
/* take the absolute value */
|
||||
x = _mm_and_ps(x, *(v4sf*)_ps_inv_sign_mask);
|
||||
/* extract the sign bit (upper one) */
|
||||
sign_bit = _mm_and_ps(sign_bit, *(v4sf*)_ps_sign_mask);
|
||||
|
||||
/* scale by 4/Pi */
|
||||
y = _mm_mul_ps(x, *(v4sf*)_ps_cephes_FOPI);
|
||||
|
||||
/* store the integer part of y in mm0 */
|
||||
emm2 = _mm_cvttps_epi32(y);
|
||||
/* j=(j+1) & (~1) (see the cephes sources) */
|
||||
emm2 = _mm_add_epi32(emm2, *(v4si*)_pi32_1);
|
||||
emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_inv1);
|
||||
y = _mm_cvtepi32_ps(emm2);
|
||||
|
||||
/* get the swap sign flag */
|
||||
emm0 = _mm_and_si128(emm2, *(v4si*)_pi32_4);
|
||||
emm0 = _mm_slli_epi32(emm0, 29);
|
||||
/* get the polynom selection mask
|
||||
there is one polynom for 0 <= x <= Pi/4
|
||||
and another one for Pi/4<x<=Pi/2
|
||||
|
||||
Both branches will be computed.
|
||||
*/
|
||||
emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_2);
|
||||
emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
|
||||
|
||||
v4sf swap_sign_bit = _mm_castsi128_ps(emm0);
|
||||
v4sf poly_mask = _mm_castsi128_ps(emm2);
|
||||
sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);
|
||||
|
||||
/* The magic pass: "Extended precision modular arithmetic"
|
||||
x = ((x - y * DP1) - y * DP2) - y * DP3; */
|
||||
xmm1 = *(v4sf*)_ps_minus_cephes_DP1;
|
||||
xmm2 = *(v4sf*)_ps_minus_cephes_DP2;
|
||||
xmm3 = *(v4sf*)_ps_minus_cephes_DP3;
|
||||
xmm1 = _mm_mul_ps(y, xmm1);
|
||||
xmm2 = _mm_mul_ps(y, xmm2);
|
||||
xmm3 = _mm_mul_ps(y, xmm3);
|
||||
x = _mm_add_ps(x, xmm1);
|
||||
x = _mm_add_ps(x, xmm2);
|
||||
x = _mm_add_ps(x, xmm3);
|
||||
|
||||
/* Evaluate the first polynom (0 <= x <= Pi/4) */
|
||||
y = *(v4sf*)_ps_coscof_p0;
|
||||
v4sf z = _mm_mul_ps(x, x);
|
||||
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p1);
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p2);
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_mul_ps(y, z);
|
||||
v4sf tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
|
||||
y = _mm_sub_ps(y, tmp);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_1);
|
||||
|
||||
/* Evaluate the second polynom (Pi/4 <= x <= 0) */
|
||||
|
||||
v4sf y2 = *(v4sf*)_ps_sincof_p0;
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p1);
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p2);
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_mul_ps(y2, x);
|
||||
y2 = _mm_add_ps(y2, x);
|
||||
|
||||
/* select the correct result from the two polynoms */
|
||||
xmm3 = poly_mask;
|
||||
y2 = _mm_and_ps(xmm3, y2); //, xmm3);
|
||||
y = _mm_andnot_ps(xmm3, y);
|
||||
y = _mm_add_ps(y, y2);
|
||||
/* update the sign */
|
||||
y = _mm_xor_ps(y, sign_bit);
|
||||
return y;
|
||||
}
|
||||
|
||||
/* almost the same as sin_ps */
|
||||
v4sf cos_ps(v4sf x) { // any x
|
||||
v4sf xmm1, xmm2 = _mm_setzero_ps(), xmm3, y;
|
||||
|
||||
v4si emm0, emm2;
|
||||
|
||||
/* take the absolute value */
|
||||
x = _mm_and_ps(x, *(v4sf*)_ps_inv_sign_mask);
|
||||
|
||||
/* scale by 4/Pi */
|
||||
y = _mm_mul_ps(x, *(v4sf*)_ps_cephes_FOPI);
|
||||
|
||||
/* store the integer part of y in mm0 */
|
||||
emm2 = _mm_cvttps_epi32(y);
|
||||
/* j=(j+1) & (~1) (see the cephes sources) */
|
||||
emm2 = _mm_add_epi32(emm2, *(v4si*)_pi32_1);
|
||||
emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_inv1);
|
||||
y = _mm_cvtepi32_ps(emm2);
|
||||
|
||||
emm2 = _mm_sub_epi32(emm2, *(v4si*)_pi32_2);
|
||||
|
||||
/* get the swap sign flag */
|
||||
emm0 = _mm_andnot_si128(emm2, *(v4si*)_pi32_4);
|
||||
emm0 = _mm_slli_epi32(emm0, 29);
|
||||
/* get the polynom selection mask */
|
||||
emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_2);
|
||||
emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
|
||||
|
||||
v4sf sign_bit = _mm_castsi128_ps(emm0);
|
||||
v4sf poly_mask = _mm_castsi128_ps(emm2);
|
||||
|
||||
/* The magic pass: "Extended precision modular arithmetic"
|
||||
x = ((x - y * DP1) - y * DP2) - y * DP3; */
|
||||
xmm1 = *(v4sf*)_ps_minus_cephes_DP1;
|
||||
xmm2 = *(v4sf*)_ps_minus_cephes_DP2;
|
||||
xmm3 = *(v4sf*)_ps_minus_cephes_DP3;
|
||||
xmm1 = _mm_mul_ps(y, xmm1);
|
||||
xmm2 = _mm_mul_ps(y, xmm2);
|
||||
xmm3 = _mm_mul_ps(y, xmm3);
|
||||
x = _mm_add_ps(x, xmm1);
|
||||
x = _mm_add_ps(x, xmm2);
|
||||
x = _mm_add_ps(x, xmm3);
|
||||
|
||||
/* Evaluate the first polynom (0 <= x <= Pi/4) */
|
||||
y = *(v4sf*)_ps_coscof_p0;
|
||||
v4sf z = _mm_mul_ps(x, x);
|
||||
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p1);
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p2);
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_mul_ps(y, z);
|
||||
v4sf tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
|
||||
y = _mm_sub_ps(y, tmp);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_1);
|
||||
|
||||
/* Evaluate the second polynom (Pi/4 <= x <= 0) */
|
||||
|
||||
v4sf y2 = *(v4sf*)_ps_sincof_p0;
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p1);
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p2);
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_mul_ps(y2, x);
|
||||
y2 = _mm_add_ps(y2, x);
|
||||
|
||||
/* select the correct result from the two polynoms */
|
||||
xmm3 = poly_mask;
|
||||
y2 = _mm_and_ps(xmm3, y2); //, xmm3);
|
||||
y = _mm_andnot_ps(xmm3, y);
|
||||
y = _mm_add_ps(y, y2);
|
||||
/* update the sign */
|
||||
y = _mm_xor_ps(y, sign_bit);
|
||||
|
||||
return y;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* since sin_ps and cos_ps are almost identical, sincos_ps could replace both of them..
|
||||
it is almost as fast, and gives you a free cosine with your sine */
|
||||
void sincos_ps(v4sf x, v4sf *s, v4sf *c) {
|
||||
v4sf xmm1, xmm2, xmm3 = _mm_setzero_ps(), sign_bit_sin, y;
|
||||
v4si emm0, emm2, emm4;
|
||||
|
||||
sign_bit_sin = x;
|
||||
/* take the absolute value */
|
||||
x = _mm_and_ps(x, *(v4sf*)_ps_inv_sign_mask);
|
||||
/* extract the sign bit (upper one) */
|
||||
sign_bit_sin = _mm_and_ps(sign_bit_sin, *(v4sf*)_ps_sign_mask);
|
||||
|
||||
/* scale by 4/Pi */
|
||||
y = _mm_mul_ps(x, *(v4sf*)_ps_cephes_FOPI);
|
||||
|
||||
/* store the integer part of y in emm2 */
|
||||
emm2 = _mm_cvttps_epi32(y);
|
||||
|
||||
/* j=(j+1) & (~1) (see the cephes sources) */
|
||||
emm2 = _mm_add_epi32(emm2, *(v4si*)_pi32_1);
|
||||
emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_inv1);
|
||||
y = _mm_cvtepi32_ps(emm2);
|
||||
|
||||
emm4 = emm2;
|
||||
|
||||
/* get the swap sign flag for the sine */
|
||||
emm0 = _mm_and_si128(emm2, *(v4si*)_pi32_4);
|
||||
emm0 = _mm_slli_epi32(emm0, 29);
|
||||
v4sf swap_sign_bit_sin = _mm_castsi128_ps(emm0);
|
||||
|
||||
/* get the polynom selection mask for the sine*/
|
||||
emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_2);
|
||||
emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
|
||||
v4sf poly_mask = _mm_castsi128_ps(emm2);
|
||||
|
||||
/* The magic pass: "Extended precision modular arithmetic"
|
||||
x = ((x - y * DP1) - y * DP2) - y * DP3; */
|
||||
xmm1 = *(v4sf*)_ps_minus_cephes_DP1;
|
||||
xmm2 = *(v4sf*)_ps_minus_cephes_DP2;
|
||||
xmm3 = *(v4sf*)_ps_minus_cephes_DP3;
|
||||
xmm1 = _mm_mul_ps(y, xmm1);
|
||||
xmm2 = _mm_mul_ps(y, xmm2);
|
||||
xmm3 = _mm_mul_ps(y, xmm3);
|
||||
x = _mm_add_ps(x, xmm1);
|
||||
x = _mm_add_ps(x, xmm2);
|
||||
x = _mm_add_ps(x, xmm3);
|
||||
|
||||
emm4 = _mm_sub_epi32(emm4, *(v4si*)_pi32_2);
|
||||
emm4 = _mm_andnot_si128(emm4, *(v4si*)_pi32_4);
|
||||
emm4 = _mm_slli_epi32(emm4, 29);
|
||||
v4sf sign_bit_cos = _mm_castsi128_ps(emm4);
|
||||
|
||||
sign_bit_sin = _mm_xor_ps(sign_bit_sin, swap_sign_bit_sin);
|
||||
|
||||
|
||||
/* Evaluate the first polynom (0 <= x <= Pi/4) */
|
||||
v4sf z = _mm_mul_ps(x, x);
|
||||
y = *(v4sf*)_ps_coscof_p0;
|
||||
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p1);
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p2);
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_mul_ps(y, z);
|
||||
v4sf tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
|
||||
y = _mm_sub_ps(y, tmp);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_1);
|
||||
|
||||
/* Evaluate the second polynom (Pi/4 <= x <= 0) */
|
||||
|
||||
v4sf y2 = *(v4sf*)_ps_sincof_p0;
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p1);
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p2);
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_mul_ps(y2, x);
|
||||
y2 = _mm_add_ps(y2, x);
|
||||
|
||||
/* select the correct result from the two polynoms */
|
||||
xmm3 = poly_mask;
|
||||
v4sf ysin2 = _mm_and_ps(xmm3, y2);
|
||||
v4sf ysin1 = _mm_andnot_ps(xmm3, y);
|
||||
y2 = _mm_sub_ps(y2, ysin2);
|
||||
y = _mm_sub_ps(y, ysin1);
|
||||
|
||||
xmm1 = _mm_add_ps(ysin1, ysin2);
|
||||
xmm2 = _mm_add_ps(y, y2);
|
||||
|
||||
/* update the sign */
|
||||
*s = _mm_xor_ps(xmm1, sign_bit_sin);
|
||||
*c = _mm_xor_ps(xmm2, sign_bit_cos);
|
||||
}
|
120
rehlds/engine/sse_mathfun.h
Normal file
120
rehlds/engine/sse_mathfun.h
Normal file
@ -0,0 +1,120 @@
|
||||
/* SIMD (SSE1+MMX or SSE2) implementation of sin, cos, exp and log
|
||||
|
||||
Inspired by Intel Approximate Math library, and based on the
|
||||
corresponding algorithms of the cephes math library
|
||||
|
||||
The default is to use the SSE1 version. If you define USE_SSE2 the
|
||||
the SSE2 intrinsics will be used in place of the MMX intrinsics. Do
|
||||
not expect any significant performance improvement with SSE2.
|
||||
*/
|
||||
|
||||
/* Copyright (C) 2007 Julien Pommier
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
(this is the zlib license)
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include <xmmintrin.h>
|
||||
|
||||
/* yes I know, the top of this file is quite ugly */
|
||||
|
||||
#ifdef _MSC_VER /* visual c++ */
|
||||
# define ALIGN16_BEG __declspec(align(16))
|
||||
# define ALIGN16_END
|
||||
#else /* gcc or icc */
|
||||
# define ALIGN16_BEG
|
||||
# define ALIGN16_END __attribute__((aligned(16)))
|
||||
#endif
|
||||
|
||||
/* __m128 is ugly to write */
|
||||
typedef __m128 v4sf; // vector of 4 float (sse1)
|
||||
|
||||
#include <emmintrin.h>
|
||||
typedef __m128i v4si; // vector of 4 int (sse2)
|
||||
|
||||
|
||||
/* declare some SSE constants -- why can't I figure a better way to do that? */
|
||||
#define _PS_CONST(Name, Val) \
|
||||
static const ALIGN16_BEG float _ps_##Name[4] ALIGN16_END = { Val, Val, Val, Val }
|
||||
#define _PI32_CONST(Name, Val) \
|
||||
static const ALIGN16_BEG int _pi32_##Name[4] ALIGN16_END = { Val, Val, Val, Val }
|
||||
#define _PS_CONST_TYPE(Name, Type, Val) \
|
||||
static const ALIGN16_BEG Type _ps_##Name[4] ALIGN16_END = { Val, Val, Val, Val }
|
||||
|
||||
_PS_CONST(1, 1.0f);
|
||||
_PS_CONST(0p5, 0.5f);
|
||||
/* the smallest non denormalized float number */
|
||||
_PS_CONST_TYPE(min_norm_pos, int, 0x00800000);
|
||||
_PS_CONST_TYPE(mant_mask, int, 0x7f800000);
|
||||
_PS_CONST_TYPE(inv_mant_mask, int, ~0x7f800000);
|
||||
|
||||
_PS_CONST_TYPE(sign_mask, int, (int)0x80000000);
|
||||
_PS_CONST_TYPE(inv_sign_mask, int, ~0x80000000);
|
||||
|
||||
_PI32_CONST(1, 1);
|
||||
_PI32_CONST(inv1, ~1);
|
||||
_PI32_CONST(2, 2);
|
||||
_PI32_CONST(4, 4);
|
||||
_PI32_CONST(0x7f, 0x7f);
|
||||
|
||||
_PS_CONST(cephes_SQRTHF, 0.707106781186547524f);
|
||||
_PS_CONST(cephes_log_p0, 7.0376836292E-2f);
|
||||
_PS_CONST(cephes_log_p1, -1.1514610310E-1f);
|
||||
_PS_CONST(cephes_log_p2, 1.1676998740E-1f);
|
||||
_PS_CONST(cephes_log_p3, -1.2420140846E-1f);
|
||||
_PS_CONST(cephes_log_p4, +1.4249322787E-1f);
|
||||
_PS_CONST(cephes_log_p5, -1.6668057665E-1f);
|
||||
_PS_CONST(cephes_log_p6, +2.0000714765E-1f);
|
||||
_PS_CONST(cephes_log_p7, -2.4999993993E-1f);
|
||||
_PS_CONST(cephes_log_p8, +3.3333331174E-1f);
|
||||
_PS_CONST(cephes_log_q1, -2.12194440e-4f);
|
||||
_PS_CONST(cephes_log_q2, 0.693359375f);
|
||||
|
||||
|
||||
|
||||
_PS_CONST(exp_hi, 88.3762626647949f);
|
||||
_PS_CONST(exp_lo, -88.3762626647949f);
|
||||
|
||||
_PS_CONST(cephes_LOG2EF, 1.44269504088896341f);
|
||||
_PS_CONST(cephes_exp_C1, 0.693359375f);
|
||||
_PS_CONST(cephes_exp_C2, -2.12194440e-4f);
|
||||
|
||||
_PS_CONST(cephes_exp_p0, 1.9875691500E-4f);
|
||||
_PS_CONST(cephes_exp_p1, 1.3981999507E-3f);
|
||||
_PS_CONST(cephes_exp_p2, 8.3334519073E-3f);
|
||||
_PS_CONST(cephes_exp_p3, 4.1665795894E-2f);
|
||||
_PS_CONST(cephes_exp_p4, 1.6666665459E-1f);
|
||||
_PS_CONST(cephes_exp_p5, 5.0000001201E-1f);
|
||||
|
||||
_PS_CONST(minus_cephes_DP1, -0.78515625f);
|
||||
_PS_CONST(minus_cephes_DP2, -2.4187564849853515625e-4f);
|
||||
_PS_CONST(minus_cephes_DP3, -3.77489497744594108e-8f);
|
||||
_PS_CONST(sincof_p0, -1.9515295891E-4f);
|
||||
_PS_CONST(sincof_p1, 8.3321608736E-3f);
|
||||
_PS_CONST(sincof_p2, -1.6666654611E-1f);
|
||||
_PS_CONST(coscof_p0, 2.443315711809948E-005f);
|
||||
_PS_CONST(coscof_p1, -1.388731625493765E-003f);
|
||||
_PS_CONST(coscof_p2, 4.166664568298827E-002f);
|
||||
_PS_CONST(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
|
||||
|
||||
extern v4sf log_ps(v4sf x);
|
||||
extern v4sf exp_ps(v4sf x);
|
||||
extern v4sf sin_ps(v4sf x);
|
||||
extern v4sf cos_ps(v4sf x);
|
||||
extern void sincos_ps(v4sf x, v4sf *s, v4sf *c);
|
@ -77,6 +77,7 @@
|
||||
<ClCompile Include="..\engine\public_amalgamation.cpp" />
|
||||
<ClCompile Include="..\engine\r_studio.cpp" />
|
||||
<ClCompile Include="..\engine\snd_null.cpp" />
|
||||
<ClCompile Include="..\engine\sse_mathfun.cpp" />
|
||||
<ClCompile Include="..\engine\sv_log.cpp" />
|
||||
<ClCompile Include="..\engine\sv_main.cpp" />
|
||||
<ClCompile Include="..\engine\sv_move.cpp" />
|
||||
@ -440,6 +441,7 @@
|
||||
<ClInclude Include="..\engine\server.h" />
|
||||
<ClInclude Include="..\engine\server_static.h" />
|
||||
<ClInclude Include="..\engine\sound.h" />
|
||||
<ClInclude Include="..\engine\sse_mathfun.h" />
|
||||
<ClInclude Include="..\engine\studio_rehlds.h" />
|
||||
<ClInclude Include="..\engine\sv_log.h" />
|
||||
<ClInclude Include="..\engine\sv_move.h" />
|
||||
|
@ -346,6 +346,9 @@
|
||||
<ClCompile Include="..\rehlds\rehlds_security.cpp">
|
||||
<Filter>rehlds</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\engine\sse_mathfun.cpp">
|
||||
<Filter>engine</Filter>
|
||||
</ClCompile>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="..\hookers\memory.h">
|
||||
@ -1068,6 +1071,9 @@
|
||||
<ClInclude Include="..\common\qlimits.h">
|
||||
<Filter>common</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\engine\sse_mathfun.h">
|
||||
<Filter>engine</Filter>
|
||||
</ClInclude>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<None Include="..\linux\appversion.sh">
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include "archtypes.h"
|
||||
#include "asmlib.h"
|
||||
#include "sse_mathfun.h"
|
||||
#include "mathlib.h"
|
||||
|
||||
#include "sys_shared.h"
|
||||
|
Loading…
Reference in New Issue
Block a user