A collection of basic/generally desirable code I use across multiple C++ projects.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

710 lines
21 KiB

1 year ago
  1. /* SIMD (SSE1+MMX or SSE2) implementation of sin, cos, exp and log
  2. Inspired by Intel Approximate Math library, and based on the
  3. corresponding algorithms of the cephes math library
  4. The default is to use the SSE1 version. If you define USE_SSE2 the
  5. the SSE2 intrinsics will be used in place of the MMX intrinsics. Do
  6. not expect any significant performance improvement with SSE2.
  7. */
  8. /* Copyright (C) 2007 Julien Pommier
  9. This software is provided 'as-is', without any express or implied
  10. warranty. In no event will the authors be held liable for any damages
  11. arising from the use of this software.
  12. Permission is granted to anyone to use this software for any purpose,
  13. including commercial applications, and to alter it and redistribute it
  14. freely, subject to the following restrictions:
  15. 1. The origin of this software must not be misrepresented; you must not
  16. claim that you wrote the original software. If you use this software
  17. in a product, an acknowledgment in the product documentation would be
  18. appreciated but is not required.
  19. 2. Altered source versions must be plainly marked as such, and must not be
  20. misrepresented as being the original software.
  21. 3. This notice may not be removed or altered from any source distribution.
  22. (this is the zlib license)
  23. */
  24. #include <xmmintrin.h>
  25. /* yes I know, the top of this file is quite ugly */
  26. #ifdef _MSC_VER /* visual c++ */
  27. # define ALIGN16_BEG __declspec(align(16))
  28. # define ALIGN16_END
  29. #else /* gcc or icc */
  30. # define ALIGN16_BEG
  31. # define ALIGN16_END __attribute__((aligned(16)))
  32. #endif
  33. /* __m128 is ugly to write */
  34. typedef __m128 v4sf; // vector of 4 float (sse1)
  35. #ifdef USE_SSE2
  36. # include <emmintrin.h>
  37. typedef __m128i v4si; // vector of 4 int (sse2)
  38. #else
  39. typedef __m64 v2si; // vector of 2 int (mmx)
  40. #endif
  41. /* declare some SSE constants -- why can't I figure a better way to do that? */
  42. #define _PS_CONST(Name, Val) \
  43. static const ALIGN16_BEG float _ps_##Name[4] ALIGN16_END = { Val, Val, Val, Val }
  44. #define _PI32_CONST(Name, Val) \
  45. static const ALIGN16_BEG int _pi32_##Name[4] ALIGN16_END = { Val, Val, Val, Val }
  46. #define _PS_CONST_TYPE(Name, Type, Val) \
  47. static const ALIGN16_BEG Type _ps_##Name[4] ALIGN16_END = { Val, Val, Val, Val }
  48. _PS_CONST(1 , 1.0f);
  49. _PS_CONST(0p5, 0.5f);
  50. /* the smallest non denormalized float number */
  51. _PS_CONST_TYPE(min_norm_pos, int, 0x00800000);
  52. _PS_CONST_TYPE(mant_mask, int, 0x7f800000);
  53. _PS_CONST_TYPE(inv_mant_mask, int, ~0x7f800000);
  54. _PS_CONST_TYPE(sign_mask, int, (int)0x80000000);
  55. _PS_CONST_TYPE(inv_sign_mask, int, ~0x80000000);
  56. _PI32_CONST(1, 1);
  57. _PI32_CONST(inv1, ~1);
  58. _PI32_CONST(2, 2);
  59. _PI32_CONST(4, 4);
  60. _PI32_CONST(0x7f, 0x7f);
  61. _PS_CONST(cephes_SQRTHF, 0.707106781186547524);
  62. _PS_CONST(cephes_log_p0, 7.0376836292E-2);
  63. _PS_CONST(cephes_log_p1, - 1.1514610310E-1);
  64. _PS_CONST(cephes_log_p2, 1.1676998740E-1);
  65. _PS_CONST(cephes_log_p3, - 1.2420140846E-1);
  66. _PS_CONST(cephes_log_p4, + 1.4249322787E-1);
  67. _PS_CONST(cephes_log_p5, - 1.6668057665E-1);
  68. _PS_CONST(cephes_log_p6, + 2.0000714765E-1);
  69. _PS_CONST(cephes_log_p7, - 2.4999993993E-1);
  70. _PS_CONST(cephes_log_p8, + 3.3333331174E-1);
  71. _PS_CONST(cephes_log_q1, -2.12194440e-4);
  72. _PS_CONST(cephes_log_q2, 0.693359375);
  73. #ifndef USE_SSE2
  74. typedef union xmm_mm_union {
  75. __m128 xmm;
  76. __m64 mm[2];
  77. } xmm_mm_union;
  78. #define COPY_XMM_TO_MM(xmm_, mm0_, mm1_) { \
  79. xmm_mm_union u; u.xmm = xmm_; \
  80. mm0_ = u.mm[0]; \
  81. mm1_ = u.mm[1]; \
  82. }
  83. #define COPY_MM_TO_XMM(mm0_, mm1_, xmm_) { \
  84. xmm_mm_union u; u.mm[0]=mm0_; u.mm[1]=mm1_; xmm_ = u.xmm; \
  85. }
  86. #endif // USE_SSE2
  87. /* natural logarithm computed for 4 simultaneous float
  88. return NaN for x <= 0
  89. */
  90. v4sf log_ps(v4sf x) {
  91. #ifdef USE_SSE2
  92. v4si emm0;
  93. #else
  94. v2si mm0, mm1;
  95. #endif
  96. v4sf one = *(v4sf*)_ps_1;
  97. v4sf invalid_mask = _mm_cmple_ps(x, _mm_setzero_ps());
  98. x = _mm_max_ps(x, *(v4sf*)_ps_min_norm_pos); /* cut off denormalized stuff */
  99. #ifndef USE_SSE2
  100. /* part 1: x = frexpf(x, &e); */
  101. COPY_XMM_TO_MM(x, mm0, mm1);
  102. mm0 = _mm_srli_pi32(mm0, 23);
  103. mm1 = _mm_srli_pi32(mm1, 23);
  104. #else
  105. emm0 = _mm_srli_epi32(_mm_castps_si128(x), 23);
  106. #endif
  107. /* keep only the fractional part */
  108. x = _mm_and_ps(x, *(v4sf*)_ps_inv_mant_mask);
  109. x = _mm_or_ps(x, *(v4sf*)_ps_0p5);
  110. #ifndef USE_SSE2
  111. /* now e=mm0:mm1 contain the really base-2 exponent */
  112. mm0 = _mm_sub_pi32(mm0, *(v2si*)_pi32_0x7f);
  113. mm1 = _mm_sub_pi32(mm1, *(v2si*)_pi32_0x7f);
  114. v4sf e = _mm_cvtpi32x2_ps(mm0, mm1);
  115. _mm_empty(); /* bye bye mmx */
  116. #else
  117. emm0 = _mm_sub_epi32(emm0, *(v4si*)_pi32_0x7f);
  118. v4sf e = _mm_cvtepi32_ps(emm0);
  119. #endif
  120. e = _mm_add_ps(e, one);
  121. /* part2:
  122. if( x < SQRTHF ) {
  123. e -= 1;
  124. x = x + x - 1.0;
  125. } else { x = x - 1.0; }
  126. */
  127. v4sf mask = _mm_cmplt_ps(x, *(v4sf*)_ps_cephes_SQRTHF);
  128. v4sf tmp = _mm_and_ps(x, mask);
  129. x = _mm_sub_ps(x, one);
  130. e = _mm_sub_ps(e, _mm_and_ps(one, mask));
  131. x = _mm_add_ps(x, tmp);
  132. v4sf z = _mm_mul_ps(x,x);
  133. v4sf y = *(v4sf*)_ps_cephes_log_p0;
  134. y = _mm_mul_ps(y, x);
  135. y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p1);
  136. y = _mm_mul_ps(y, x);
  137. y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p2);
  138. y = _mm_mul_ps(y, x);
  139. y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p3);
  140. y = _mm_mul_ps(y, x);
  141. y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p4);
  142. y = _mm_mul_ps(y, x);
  143. y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p5);
  144. y = _mm_mul_ps(y, x);
  145. y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p6);
  146. y = _mm_mul_ps(y, x);
  147. y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p7);
  148. y = _mm_mul_ps(y, x);
  149. y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p8);
  150. y = _mm_mul_ps(y, x);
  151. y = _mm_mul_ps(y, z);
  152. tmp = _mm_mul_ps(e, *(v4sf*)_ps_cephes_log_q1);
  153. y = _mm_add_ps(y, tmp);
  154. tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
  155. y = _mm_sub_ps(y, tmp);
  156. tmp = _mm_mul_ps(e, *(v4sf*)_ps_cephes_log_q2);
  157. x = _mm_add_ps(x, y);
  158. x = _mm_add_ps(x, tmp);
  159. x = _mm_or_ps(x, invalid_mask); // negative arg will be NAN
  160. return x;
  161. }
  162. _PS_CONST(exp_hi, 88.3762626647949f);
  163. _PS_CONST(exp_lo, -88.3762626647949f);
  164. _PS_CONST(cephes_LOG2EF, 1.44269504088896341);
  165. _PS_CONST(cephes_exp_C1, 0.693359375);
  166. _PS_CONST(cephes_exp_C2, -2.12194440e-4);
  167. _PS_CONST(cephes_exp_p0, 1.9875691500E-4);
  168. _PS_CONST(cephes_exp_p1, 1.3981999507E-3);
  169. _PS_CONST(cephes_exp_p2, 8.3334519073E-3);
  170. _PS_CONST(cephes_exp_p3, 4.1665795894E-2);
  171. _PS_CONST(cephes_exp_p4, 1.6666665459E-1);
  172. _PS_CONST(cephes_exp_p5, 5.0000001201E-1);
  173. v4sf exp_ps(v4sf x) {
  174. v4sf tmp = _mm_setzero_ps(), fx;
  175. #ifdef USE_SSE2
  176. v4si emm0;
  177. #else
  178. v2si mm0, mm1;
  179. #endif
  180. v4sf one = *(v4sf*)_ps_1;
  181. x = _mm_min_ps(x, *(v4sf*)_ps_exp_hi);
  182. x = _mm_max_ps(x, *(v4sf*)_ps_exp_lo);
  183. /* express exp(x) as exp(g + n*log(2)) */
  184. fx = _mm_mul_ps(x, *(v4sf*)_ps_cephes_LOG2EF);
  185. fx = _mm_add_ps(fx, *(v4sf*)_ps_0p5);
  186. /* how to perform a floorf with SSE: just below */
  187. #ifndef USE_SSE2
  188. /* step 1 : cast to int */
  189. tmp = _mm_movehl_ps(tmp, fx);
  190. mm0 = _mm_cvttps_pi32(fx);
  191. mm1 = _mm_cvttps_pi32(tmp);
  192. /* step 2 : cast back to float */
  193. tmp = _mm_cvtpi32x2_ps(mm0, mm1);
  194. #else
  195. emm0 = _mm_cvttps_epi32(fx);
  196. tmp = _mm_cvtepi32_ps(emm0);
  197. #endif
  198. /* if greater, substract 1 */
  199. v4sf mask = _mm_cmpgt_ps(tmp, fx);
  200. mask = _mm_and_ps(mask, one);
  201. fx = _mm_sub_ps(tmp, mask);
  202. tmp = _mm_mul_ps(fx, *(v4sf*)_ps_cephes_exp_C1);
  203. v4sf z = _mm_mul_ps(fx, *(v4sf*)_ps_cephes_exp_C2);
  204. x = _mm_sub_ps(x, tmp);
  205. x = _mm_sub_ps(x, z);
  206. z = _mm_mul_ps(x,x);
  207. v4sf y = *(v4sf*)_ps_cephes_exp_p0;
  208. y = _mm_mul_ps(y, x);
  209. y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p1);
  210. y = _mm_mul_ps(y, x);
  211. y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p2);
  212. y = _mm_mul_ps(y, x);
  213. y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p3);
  214. y = _mm_mul_ps(y, x);
  215. y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p4);
  216. y = _mm_mul_ps(y, x);
  217. y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p5);
  218. y = _mm_mul_ps(y, z);
  219. y = _mm_add_ps(y, x);
  220. y = _mm_add_ps(y, one);
  221. /* build 2^n */
  222. #ifndef USE_SSE2
  223. z = _mm_movehl_ps(z, fx);
  224. mm0 = _mm_cvttps_pi32(fx);
  225. mm1 = _mm_cvttps_pi32(z);
  226. mm0 = _mm_add_pi32(mm0, *(v2si*)_pi32_0x7f);
  227. mm1 = _mm_add_pi32(mm1, *(v2si*)_pi32_0x7f);
  228. mm0 = _mm_slli_pi32(mm0, 23);
  229. mm1 = _mm_slli_pi32(mm1, 23);
  230. v4sf pow2n;
  231. COPY_MM_TO_XMM(mm0, mm1, pow2n);
  232. _mm_empty();
  233. #else
  234. emm0 = _mm_cvttps_epi32(fx);
  235. emm0 = _mm_add_epi32(emm0, *(v4si*)_pi32_0x7f);
  236. emm0 = _mm_slli_epi32(emm0, 23);
  237. v4sf pow2n = _mm_castsi128_ps(emm0);
  238. #endif
  239. y = _mm_mul_ps(y, pow2n);
  240. return y;
  241. }
  242. _PS_CONST(minus_cephes_DP1, -0.78515625);
  243. _PS_CONST(minus_cephes_DP2, -2.4187564849853515625e-4);
  244. _PS_CONST(minus_cephes_DP3, -3.77489497744594108e-8);
  245. _PS_CONST(sincof_p0, -1.9515295891E-4);
  246. _PS_CONST(sincof_p1, 8.3321608736E-3);
  247. _PS_CONST(sincof_p2, -1.6666654611E-1);
  248. _PS_CONST(coscof_p0, 2.443315711809948E-005);
  249. _PS_CONST(coscof_p1, -1.388731625493765E-003);
  250. _PS_CONST(coscof_p2, 4.166664568298827E-002);
  251. _PS_CONST(cephes_FOPI, 1.27323954473516); // 4 / M_PI
  252. /* evaluation of 4 sines at onces, using only SSE1+MMX intrinsics so
  253. it runs also on old athlons XPs and the pentium III of your grand
  254. mother.
  255. The code is the exact rewriting of the cephes sinf function.
  256. Precision is excellent as long as x < 8192 (I did not bother to
  257. take into account the special handling they have for greater values
  258. -- it does not return garbage for arguments over 8192, though, but
  259. the extra precision is missing).
  260. Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the
  261. surprising but correct result.
  262. Performance is also surprisingly good, 1.33 times faster than the
  263. macos vsinf SSE2 function, and 1.5 times faster than the
  264. __vrs4_sinf of amd's ACML (which is only available in 64 bits). Not
  265. too bad for an SSE1 function (with no special tuning) !
  266. However the latter libraries probably have a much better handling of NaN,
  267. Inf, denormalized and other special arguments..
  268. On my core 1 duo, the execution of this function takes approximately 95 cycles.
  269. From what I have observed on the experiments with Intel AMath lib, switching to an
  270. SSE2 version would improve the perf by only 10%.
  271. Since it is based on SSE intrinsics, it has to be compiled at -O2 to
  272. deliver full speed.
  273. */
  274. v4sf sin_ps(v4sf x) { // any x
  275. v4sf xmm1, xmm2 = _mm_setzero_ps(), xmm3, sign_bit, y;
  276. #ifdef USE_SSE2
  277. v4si emm0, emm2;
  278. #else
  279. v2si mm0, mm1, mm2, mm3;
  280. #endif
  281. sign_bit = x;
  282. /* take the absolute value */
  283. x = _mm_and_ps(x, *(v4sf*)_ps_inv_sign_mask);
  284. /* extract the sign bit (upper one) */
  285. sign_bit = _mm_and_ps(sign_bit, *(v4sf*)_ps_sign_mask);
  286. /* scale by 4/Pi */
  287. y = _mm_mul_ps(x, *(v4sf*)_ps_cephes_FOPI);
  288. #ifdef USE_SSE2
  289. /* store the integer part of y in mm0 */
  290. emm2 = _mm_cvttps_epi32(y);
  291. /* j=(j+1) & (~1) (see the cephes sources) */
  292. emm2 = _mm_add_epi32(emm2, *(v4si*)_pi32_1);
  293. emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_inv1);
  294. y = _mm_cvtepi32_ps(emm2);
  295. /* get the swap sign flag */
  296. emm0 = _mm_and_si128(emm2, *(v4si*)_pi32_4);
  297. emm0 = _mm_slli_epi32(emm0, 29);
  298. /* get the polynom selection mask
  299. there is one polynom for 0 <= x <= Pi/4
  300. and another one for Pi/4<x<=Pi/2
  301. Both branches will be computed.
  302. */
  303. emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_2);
  304. emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
  305. v4sf swap_sign_bit = _mm_castsi128_ps(emm0);
  306. v4sf poly_mask = _mm_castsi128_ps(emm2);
  307. sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);
  308. #else
  309. /* store the integer part of y in mm0:mm1 */
  310. xmm2 = _mm_movehl_ps(xmm2, y);
  311. mm2 = _mm_cvttps_pi32(y);
  312. mm3 = _mm_cvttps_pi32(xmm2);
  313. /* j=(j+1) & (~1) (see the cephes sources) */
  314. mm2 = _mm_add_pi32(mm2, *(v2si*)_pi32_1);
  315. mm3 = _mm_add_pi32(mm3, *(v2si*)_pi32_1);
  316. mm2 = _mm_and_si64(mm2, *(v2si*)_pi32_inv1);
  317. mm3 = _mm_and_si64(mm3, *(v2si*)_pi32_inv1);
  318. y = _mm_cvtpi32x2_ps(mm2, mm3);
  319. /* get the swap sign flag */
  320. mm0 = _mm_and_si64(mm2, *(v2si*)_pi32_4);
  321. mm1 = _mm_and_si64(mm3, *(v2si*)_pi32_4);
  322. mm0 = _mm_slli_pi32(mm0, 29);
  323. mm1 = _mm_slli_pi32(mm1, 29);
  324. /* get the polynom selection mask */
  325. mm2 = _mm_and_si64(mm2, *(v2si*)_pi32_2);
  326. mm3 = _mm_and_si64(mm3, *(v2si*)_pi32_2);
  327. mm2 = _mm_cmpeq_pi32(mm2, _mm_setzero_si64());
  328. mm3 = _mm_cmpeq_pi32(mm3, _mm_setzero_si64());
  329. v4sf swap_sign_bit, poly_mask;
  330. COPY_MM_TO_XMM(mm0, mm1, swap_sign_bit);
  331. COPY_MM_TO_XMM(mm2, mm3, poly_mask);
  332. sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);
  333. _mm_empty(); /* good-bye mmx */
  334. #endif
  335. /* The magic pass: "Extended precision modular arithmetic"
  336. x = ((x - y * DP1) - y * DP2) - y * DP3; */
  337. xmm1 = *(v4sf*)_ps_minus_cephes_DP1;
  338. xmm2 = *(v4sf*)_ps_minus_cephes_DP2;
  339. xmm3 = *(v4sf*)_ps_minus_cephes_DP3;
  340. xmm1 = _mm_mul_ps(y, xmm1);
  341. xmm2 = _mm_mul_ps(y, xmm2);
  342. xmm3 = _mm_mul_ps(y, xmm3);
  343. x = _mm_add_ps(x, xmm1);
  344. x = _mm_add_ps(x, xmm2);
  345. x = _mm_add_ps(x, xmm3);
  346. /* Evaluate the first polynom (0 <= x <= Pi/4) */
  347. y = *(v4sf*)_ps_coscof_p0;
  348. v4sf z = _mm_mul_ps(x,x);
  349. y = _mm_mul_ps(y, z);
  350. y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p1);
  351. y = _mm_mul_ps(y, z);
  352. y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p2);
  353. y = _mm_mul_ps(y, z);
  354. y = _mm_mul_ps(y, z);
  355. v4sf tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
  356. y = _mm_sub_ps(y, tmp);
  357. y = _mm_add_ps(y, *(v4sf*)_ps_1);
  358. /* Evaluate the second polynom (Pi/4 <= x <= 0) */
  359. v4sf y2 = *(v4sf*)_ps_sincof_p0;
  360. y2 = _mm_mul_ps(y2, z);
  361. y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p1);
  362. y2 = _mm_mul_ps(y2, z);
  363. y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p2);
  364. y2 = _mm_mul_ps(y2, z);
  365. y2 = _mm_mul_ps(y2, x);
  366. y2 = _mm_add_ps(y2, x);
  367. /* select the correct result from the two polynoms */
  368. xmm3 = poly_mask;
  369. y2 = _mm_and_ps(xmm3, y2); //, xmm3);
  370. y = _mm_andnot_ps(xmm3, y);
  371. y = _mm_add_ps(y,y2);
  372. /* update the sign */
  373. y = _mm_xor_ps(y, sign_bit);
  374. return y;
  375. }
  376. /* almost the same as sin_ps */
  377. v4sf cos_ps(v4sf x) { // any x
  378. v4sf xmm1, xmm2 = _mm_setzero_ps(), xmm3, y;
  379. #ifdef USE_SSE2
  380. v4si emm0, emm2;
  381. #else
  382. v2si mm0, mm1, mm2, mm3;
  383. #endif
  384. /* take the absolute value */
  385. x = _mm_and_ps(x, *(v4sf*)_ps_inv_sign_mask);
  386. /* scale by 4/Pi */
  387. y = _mm_mul_ps(x, *(v4sf*)_ps_cephes_FOPI);
  388. #ifdef USE_SSE2
  389. /* store the integer part of y in mm0 */
  390. emm2 = _mm_cvttps_epi32(y);
  391. /* j=(j+1) & (~1) (see the cephes sources) */
  392. emm2 = _mm_add_epi32(emm2, *(v4si*)_pi32_1);
  393. emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_inv1);
  394. y = _mm_cvtepi32_ps(emm2);
  395. emm2 = _mm_sub_epi32(emm2, *(v4si*)_pi32_2);
  396. /* get the swap sign flag */
  397. emm0 = _mm_andnot_si128(emm2, *(v4si*)_pi32_4);
  398. emm0 = _mm_slli_epi32(emm0, 29);
  399. /* get the polynom selection mask */
  400. emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_2);
  401. emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
  402. v4sf sign_bit = _mm_castsi128_ps(emm0);
  403. v4sf poly_mask = _mm_castsi128_ps(emm2);
  404. #else
  405. /* store the integer part of y in mm0:mm1 */
  406. xmm2 = _mm_movehl_ps(xmm2, y);
  407. mm2 = _mm_cvttps_pi32(y);
  408. mm3 = _mm_cvttps_pi32(xmm2);
  409. /* j=(j+1) & (~1) (see the cephes sources) */
  410. mm2 = _mm_add_pi32(mm2, *(v2si*)_pi32_1);
  411. mm3 = _mm_add_pi32(mm3, *(v2si*)_pi32_1);
  412. mm2 = _mm_and_si64(mm2, *(v2si*)_pi32_inv1);
  413. mm3 = _mm_and_si64(mm3, *(v2si*)_pi32_inv1);
  414. y = _mm_cvtpi32x2_ps(mm2, mm3);
  415. mm2 = _mm_sub_pi32(mm2, *(v2si*)_pi32_2);
  416. mm3 = _mm_sub_pi32(mm3, *(v2si*)_pi32_2);
  417. /* get the swap sign flag in mm0:mm1 and the
  418. polynom selection mask in mm2:mm3 */
  419. mm0 = _mm_andnot_si64(mm2, *(v2si*)_pi32_4);
  420. mm1 = _mm_andnot_si64(mm3, *(v2si*)_pi32_4);
  421. mm0 = _mm_slli_pi32(mm0, 29);
  422. mm1 = _mm_slli_pi32(mm1, 29);
  423. mm2 = _mm_and_si64(mm2, *(v2si*)_pi32_2);
  424. mm3 = _mm_and_si64(mm3, *(v2si*)_pi32_2);
  425. mm2 = _mm_cmpeq_pi32(mm2, _mm_setzero_si64());
  426. mm3 = _mm_cmpeq_pi32(mm3, _mm_setzero_si64());
  427. v4sf sign_bit, poly_mask;
  428. COPY_MM_TO_XMM(mm0, mm1, sign_bit);
  429. COPY_MM_TO_XMM(mm2, mm3, poly_mask);
  430. _mm_empty(); /* good-bye mmx */
  431. #endif
  432. /* The magic pass: "Extended precision modular arithmetic"
  433. x = ((x - y * DP1) - y * DP2) - y * DP3; */
  434. xmm1 = *(v4sf*)_ps_minus_cephes_DP1;
  435. xmm2 = *(v4sf*)_ps_minus_cephes_DP2;
  436. xmm3 = *(v4sf*)_ps_minus_cephes_DP3;
  437. xmm1 = _mm_mul_ps(y, xmm1);
  438. xmm2 = _mm_mul_ps(y, xmm2);
  439. xmm3 = _mm_mul_ps(y, xmm3);
  440. x = _mm_add_ps(x, xmm1);
  441. x = _mm_add_ps(x, xmm2);
  442. x = _mm_add_ps(x, xmm3);
  443. /* Evaluate the first polynom (0 <= x <= Pi/4) */
  444. y = *(v4sf*)_ps_coscof_p0;
  445. v4sf z = _mm_mul_ps(x,x);
  446. y = _mm_mul_ps(y, z);
  447. y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p1);
  448. y = _mm_mul_ps(y, z);
  449. y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p2);
  450. y = _mm_mul_ps(y, z);
  451. y = _mm_mul_ps(y, z);
  452. v4sf tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
  453. y = _mm_sub_ps(y, tmp);
  454. y = _mm_add_ps(y, *(v4sf*)_ps_1);
  455. /* Evaluate the second polynom (Pi/4 <= x <= 0) */
  456. v4sf y2 = *(v4sf*)_ps_sincof_p0;
  457. y2 = _mm_mul_ps(y2, z);
  458. y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p1);
  459. y2 = _mm_mul_ps(y2, z);
  460. y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p2);
  461. y2 = _mm_mul_ps(y2, z);
  462. y2 = _mm_mul_ps(y2, x);
  463. y2 = _mm_add_ps(y2, x);
  464. /* select the correct result from the two polynoms */
  465. xmm3 = poly_mask;
  466. y2 = _mm_and_ps(xmm3, y2); //, xmm3);
  467. y = _mm_andnot_ps(xmm3, y);
  468. y = _mm_add_ps(y,y2);
  469. /* update the sign */
  470. y = _mm_xor_ps(y, sign_bit);
  471. return y;
  472. }
  473. /* since sin_ps and cos_ps are almost identical, sincos_ps could replace both of them..
  474. it is almost as fast, and gives you a free cosine with your sine */
  475. void sincos_ps(v4sf x, v4sf *s, v4sf *c) {
  476. v4sf xmm1, xmm2, xmm3 = _mm_setzero_ps(), sign_bit_sin, y;
  477. #ifdef USE_SSE2
  478. v4si emm0, emm2, emm4;
  479. #else
  480. v2si mm0, mm1, mm2, mm3, mm4, mm5;
  481. #endif
  482. sign_bit_sin = x;
  483. /* take the absolute value */
  484. x = _mm_and_ps(x, *(v4sf*)_ps_inv_sign_mask);
  485. /* extract the sign bit (upper one) */
  486. sign_bit_sin = _mm_and_ps(sign_bit_sin, *(v4sf*)_ps_sign_mask);
  487. /* scale by 4/Pi */
  488. y = _mm_mul_ps(x, *(v4sf*)_ps_cephes_FOPI);
  489. #ifdef USE_SSE2
  490. /* store the integer part of y in emm2 */
  491. emm2 = _mm_cvttps_epi32(y);
  492. /* j=(j+1) & (~1) (see the cephes sources) */
  493. emm2 = _mm_add_epi32(emm2, *(v4si*)_pi32_1);
  494. emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_inv1);
  495. y = _mm_cvtepi32_ps(emm2);
  496. emm4 = emm2;
  497. /* get the swap sign flag for the sine */
  498. emm0 = _mm_and_si128(emm2, *(v4si*)_pi32_4);
  499. emm0 = _mm_slli_epi32(emm0, 29);
  500. v4sf swap_sign_bit_sin = _mm_castsi128_ps(emm0);
  501. /* get the polynom selection mask for the sine*/
  502. emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_2);
  503. emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
  504. v4sf poly_mask = _mm_castsi128_ps(emm2);
  505. #else
  506. /* store the integer part of y in mm2:mm3 */
  507. xmm3 = _mm_movehl_ps(xmm3, y);
  508. mm2 = _mm_cvttps_pi32(y);
  509. mm3 = _mm_cvttps_pi32(xmm3);
  510. /* j=(j+1) & (~1) (see the cephes sources) */
  511. mm2 = _mm_add_pi32(mm2, *(v2si*)_pi32_1);
  512. mm3 = _mm_add_pi32(mm3, *(v2si*)_pi32_1);
  513. mm2 = _mm_and_si64(mm2, *(v2si*)_pi32_inv1);
  514. mm3 = _mm_and_si64(mm3, *(v2si*)_pi32_inv1);
  515. y = _mm_cvtpi32x2_ps(mm2, mm3);
  516. mm4 = mm2;
  517. mm5 = mm3;
  518. /* get the swap sign flag for the sine */
  519. mm0 = _mm_and_si64(mm2, *(v2si*)_pi32_4);
  520. mm1 = _mm_and_si64(mm3, *(v2si*)_pi32_4);
  521. mm0 = _mm_slli_pi32(mm0, 29);
  522. mm1 = _mm_slli_pi32(mm1, 29);
  523. v4sf swap_sign_bit_sin;
  524. COPY_MM_TO_XMM(mm0, mm1, swap_sign_bit_sin);
  525. /* get the polynom selection mask for the sine */
  526. mm2 = _mm_and_si64(mm2, *(v2si*)_pi32_2);
  527. mm3 = _mm_and_si64(mm3, *(v2si*)_pi32_2);
  528. mm2 = _mm_cmpeq_pi32(mm2, _mm_setzero_si64());
  529. mm3 = _mm_cmpeq_pi32(mm3, _mm_setzero_si64());
  530. v4sf poly_mask;
  531. COPY_MM_TO_XMM(mm2, mm3, poly_mask);
  532. #endif
  533. /* The magic pass: "Extended precision modular arithmetic"
  534. x = ((x - y * DP1) - y * DP2) - y * DP3; */
  535. xmm1 = *(v4sf*)_ps_minus_cephes_DP1;
  536. xmm2 = *(v4sf*)_ps_minus_cephes_DP2;
  537. xmm3 = *(v4sf*)_ps_minus_cephes_DP3;
  538. xmm1 = _mm_mul_ps(y, xmm1);
  539. xmm2 = _mm_mul_ps(y, xmm2);
  540. xmm3 = _mm_mul_ps(y, xmm3);
  541. x = _mm_add_ps(x, xmm1);
  542. x = _mm_add_ps(x, xmm2);
  543. x = _mm_add_ps(x, xmm3);
  544. #ifdef USE_SSE2
  545. emm4 = _mm_sub_epi32(emm4, *(v4si*)_pi32_2);
  546. emm4 = _mm_andnot_si128(emm4, *(v4si*)_pi32_4);
  547. emm4 = _mm_slli_epi32(emm4, 29);
  548. v4sf sign_bit_cos = _mm_castsi128_ps(emm4);
  549. #else
  550. /* get the sign flag for the cosine */
  551. mm4 = _mm_sub_pi32(mm4, *(v2si*)_pi32_2);
  552. mm5 = _mm_sub_pi32(mm5, *(v2si*)_pi32_2);
  553. mm4 = _mm_andnot_si64(mm4, *(v2si*)_pi32_4);
  554. mm5 = _mm_andnot_si64(mm5, *(v2si*)_pi32_4);
  555. mm4 = _mm_slli_pi32(mm4, 29);
  556. mm5 = _mm_slli_pi32(mm5, 29);
  557. v4sf sign_bit_cos;
  558. COPY_MM_TO_XMM(mm4, mm5, sign_bit_cos);
  559. _mm_empty(); /* good-bye mmx */
  560. #endif
  561. sign_bit_sin = _mm_xor_ps(sign_bit_sin, swap_sign_bit_sin);
  562. /* Evaluate the first polynom (0 <= x <= Pi/4) */
  563. v4sf z = _mm_mul_ps(x,x);
  564. y = *(v4sf*)_ps_coscof_p0;
  565. y = _mm_mul_ps(y, z);
  566. y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p1);
  567. y = _mm_mul_ps(y, z);
  568. y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p2);
  569. y = _mm_mul_ps(y, z);
  570. y = _mm_mul_ps(y, z);
  571. v4sf tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
  572. y = _mm_sub_ps(y, tmp);
  573. y = _mm_add_ps(y, *(v4sf*)_ps_1);
  574. /* Evaluate the second polynom (Pi/4 <= x <= 0) */
  575. v4sf y2 = *(v4sf*)_ps_sincof_p0;
  576. y2 = _mm_mul_ps(y2, z);
  577. y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p1);
  578. y2 = _mm_mul_ps(y2, z);
  579. y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p2);
  580. y2 = _mm_mul_ps(y2, z);
  581. y2 = _mm_mul_ps(y2, x);
  582. y2 = _mm_add_ps(y2, x);
  583. /* select the correct result from the two polynoms */
  584. xmm3 = poly_mask;
  585. v4sf ysin2 = _mm_and_ps(xmm3, y2);
  586. v4sf ysin1 = _mm_andnot_ps(xmm3, y);
  587. y2 = _mm_sub_ps(y2,ysin2);
  588. y = _mm_sub_ps(y, ysin1);
  589. xmm1 = _mm_add_ps(ysin1,ysin2);
  590. xmm2 = _mm_add_ps(y,y2);
  591. /* update the sign */
  592. *s = _mm_xor_ps(xmm1, sign_bit_sin);
  593. *c = _mm_xor_ps(xmm2, sign_bit_cos);
  594. }