bid_sqrt_macros.h 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331
  1. /* Copyright (C) 2007-2022 Free Software Foundation, Inc.
  2. This file is part of GCC.
  3. GCC is free software; you can redistribute it and/or modify it under
  4. the terms of the GNU General Public License as published by the Free
  5. Software Foundation; either version 3, or (at your option) any later
  6. version.
  7. GCC is distributed in the hope that it will be useful, but WITHOUT ANY
  8. WARRANTY; without even the implied warranty of MERCHANTABILITY or
  9. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  10. for more details.
  11. Under Section 7 of GPL version 3, you are granted additional
  12. permissions described in the GCC Runtime Library Exception, version
  13. 3.1, as published by the Free Software Foundation.
  14. You should have received a copy of the GNU General Public License and
  15. a copy of the GCC Runtime Library Exception along with this program;
  16. see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  17. <http://www.gnu.org/licenses/>. */
  18. #ifndef _SQRT_MACROS_H_
  19. #define _SQRT_MACROS_H_
  20. #define FENCE __fence
  21. #if DOUBLE_EXTENDED_ON
  22. extern BINARY80 SQRT80 (BINARY80);
  23. __BID_INLINE__ UINT64
  24. short_sqrt128 (UINT128 A10) {
  25. BINARY80 lx, ly, l64;
  26. int_float f64;
  27. // 2^64
  28. f64.i = 0x5f800000;
  29. l64 = (BINARY80) f64.d;
  30. lx = (BINARY80) A10.w[1] * l64 + (BINARY80) A10.w[0];
  31. ly = SQRT80 (lx);
  32. return (UINT64) ly;
  33. }
  34. __BID_INLINE__ void
  35. long_sqrt128 (UINT128 * pCS, UINT256 C256) {
  36. UINT256 C4;
  37. UINT128 CS;
  38. UINT64 X;
  39. SINT64 SE;
  40. BINARY80 l64, lm64, l128, lxL, lx, ly, lS, lSH, lSL, lE, l3, l2,
  41. l1, l0, lp, lCl;
  42. int_float fx, f64, fm64;
  43. int *ple = (int *) &lx;
  44. // 2^64
  45. f64.i = 0x5f800000;
  46. l64 = (BINARY80) f64.d;
  47. l128 = l64 * l64;
  48. lx = l3 = (BINARY80) C256.w[3] * l64 * l128;
  49. l2 = (BINARY80) C256.w[2] * l128;
  50. lx = FENCE (lx + l2);
  51. l1 = (BINARY80) C256.w[1] * l64;
  52. lx = FENCE (lx + l1);
  53. l0 = (BINARY80) C256.w[0];
  54. lx = FENCE (lx + l0);
  55. // sqrt(C256)
  56. lS = SQRT80 (lx);
  57. // get coefficient
  58. // 2^(-64)
  59. fm64.i = 0x1f800000;
  60. lm64 = (BINARY80) fm64.d;
  61. CS.w[1] = (UINT64) (lS * lm64);
  62. CS.w[0] = (UINT64) (lS - (BINARY80) CS.w[1] * l64);
  63. ///////////////////////////////////////
  64. // CAUTION!
  65. // little endian code only
  66. // add solution for big endian
  67. //////////////////////////////////////
  68. lSH = lS;
  69. *((UINT64 *) & lSH) &= 0xffffffff00000000ull;
  70. // correction for C256 rounding
  71. lCl = FENCE (l3 - lx);
  72. lCl = FENCE (lCl + l2);
  73. lCl = FENCE (lCl + l1);
  74. lCl = FENCE (lCl + l0);
  75. lSL = lS - lSH;
  76. //////////////////////////////////////////
  77. // Watch for compiler re-ordering
  78. //
  79. /////////////////////////////////////////
  80. // C256-S^2
  81. lxL = FENCE (lx - lSH * lSH);
  82. lp = lSH * lSL;
  83. lp += lp;
  84. lxL = FENCE (lxL - lp);
  85. lSL *= lSL;
  86. lxL = FENCE (lxL - lSL);
  87. lCl += lxL;
  88. // correction term
  89. lE = lCl / (lS + lS);
  90. // get low part of coefficient
  91. X = CS.w[0];
  92. if (lCl >= 0) {
  93. SE = (SINT64) (lE);
  94. CS.w[0] += SE;
  95. if (CS.w[0] < X)
  96. CS.w[1]++;
  97. } else {
  98. SE = (SINT64) (-lE);
  99. CS.w[0] -= SE;
  100. if (CS.w[0] > X)
  101. CS.w[1]--;
  102. }
  103. pCS->w[0] = CS.w[0];
  104. pCS->w[1] = CS.w[1];
  105. }
  106. #else
  107. extern double sqrt (double);
  108. __BID_INLINE__ UINT64
  109. short_sqrt128 (UINT128 A10) {
  110. UINT256 ARS, ARS0, AE0, AE, S;
  111. UINT64 MY, ES, CY;
  112. double lx, l64;
  113. int_double f64, ly;
  114. int ey, k;
  115. // 2^64
  116. f64.i = 0x43f0000000000000ull;
  117. l64 = f64.d;
  118. lx = (double) A10.w[1] * l64 + (double) A10.w[0];
  119. ly.d = 1.0 / sqrt (lx);
  120. MY = (ly.i & 0x000fffffffffffffull) | 0x0010000000000000ull;
  121. ey = 0x3ff - (ly.i >> 52);
  122. // A10*RS^2
  123. __mul_64x128_to_192 (ARS0, MY, A10);
  124. __mul_64x192_to_256 (ARS, MY, ARS0);
  125. // shr by 2*ey+40, to get a 64-bit value
  126. k = (ey << 1) + 104 - 64;
  127. if (k >= 128) {
  128. if (k > 128)
  129. ES = (ARS.w[2] >> (k - 128)) | (ARS.w[3] << (192 - k));
  130. else
  131. ES = ARS.w[2];
  132. } else {
  133. if (k >= 64) {
  134. ARS.w[0] = ARS.w[1];
  135. ARS.w[1] = ARS.w[2];
  136. k -= 64;
  137. }
  138. if (k) {
  139. __shr_128 (ARS, ARS, k);
  140. }
  141. ES = ARS.w[0];
  142. }
  143. ES = ((SINT64) ES) >> 1;
  144. if (((SINT64) ES) < 0) {
  145. ES = -ES;
  146. // A*RS*eps (scaled by 2^64)
  147. __mul_64x192_to_256 (AE0, ES, ARS0);
  148. AE.w[0] = AE0.w[1];
  149. AE.w[1] = AE0.w[2];
  150. AE.w[2] = AE0.w[3];
  151. __add_carry_out (S.w[0], CY, ARS0.w[0], AE.w[0]);
  152. __add_carry_in_out (S.w[1], CY, ARS0.w[1], AE.w[1], CY);
  153. S.w[2] = ARS0.w[2] + AE.w[2] + CY;
  154. } else {
  155. // A*RS*eps (scaled by 2^64)
  156. __mul_64x192_to_256 (AE0, ES, ARS0);
  157. AE.w[0] = AE0.w[1];
  158. AE.w[1] = AE0.w[2];
  159. AE.w[2] = AE0.w[3];
  160. __sub_borrow_out (S.w[0], CY, ARS0.w[0], AE.w[0]);
  161. __sub_borrow_in_out (S.w[1], CY, ARS0.w[1], AE.w[1], CY);
  162. S.w[2] = ARS0.w[2] - AE.w[2] - CY;
  163. }
  164. k = ey + 51;
  165. if (k >= 64) {
  166. if (k >= 128) {
  167. S.w[0] = S.w[2];
  168. S.w[1] = 0;
  169. k -= 128;
  170. } else {
  171. S.w[0] = S.w[1];
  172. S.w[1] = S.w[2];
  173. }
  174. k -= 64;
  175. }
  176. if (k) {
  177. __shr_128 (S, S, k);
  178. }
  179. return (UINT64) ((S.w[0] + 1) >> 1);
  180. }
  181. __BID_INLINE__ void
  182. long_sqrt128 (UINT128 * pCS, UINT256 C256) {
  183. UINT512 ARS0, ARS;
  184. UINT256 ARS00, AE, AE2, S;
  185. UINT128 ES, ES2, ARS1;
  186. UINT64 ES32, CY, MY;
  187. double l64, l128, lx, l2, l1, l0;
  188. int_double f64, ly;
  189. int ey, k, k2;
  190. // 2^64
  191. f64.i = 0x43f0000000000000ull;
  192. l64 = f64.d;
  193. l128 = l64 * l64;
  194. lx = (double) C256.w[3] * l64 * l128;
  195. l2 = (double) C256.w[2] * l128;
  196. lx = FENCE (lx + l2);
  197. l1 = (double) C256.w[1] * l64;
  198. lx = FENCE (lx + l1);
  199. l0 = (double) C256.w[0];
  200. lx = FENCE (lx + l0);
  201. // sqrt(C256)
  202. ly.d = 1.0 / sqrt (lx);
  203. MY = (ly.i & 0x000fffffffffffffull) | 0x0010000000000000ull;
  204. ey = 0x3ff - (ly.i >> 52);
  205. // A10*RS^2, scaled by 2^(2*ey+104)
  206. __mul_64x256_to_320 (ARS0, MY, C256);
  207. __mul_64x320_to_384 (ARS, MY, ARS0);
  208. // shr by k=(2*ey+104)-128
  209. // expect k is in the range (192, 256) if result in [10^33, 10^34)
  210. // apply an additional signed shift by 1 at the same time (to get eps=eps0/2)
  211. k = (ey << 1) + 104 - 128 - 192;
  212. k2 = 64 - k;
  213. ES.w[0] = (ARS.w[3] >> (k + 1)) | (ARS.w[4] << (k2 - 1));
  214. ES.w[1] = (ARS.w[4] >> k) | (ARS.w[5] << k2);
  215. ES.w[1] = ((SINT64) ES.w[1]) >> 1;
  216. // A*RS >> 192 (for error term computation)
  217. ARS1.w[0] = ARS0.w[3];
  218. ARS1.w[1] = ARS0.w[4];
  219. // A*RS>>64
  220. ARS00.w[0] = ARS0.w[1];
  221. ARS00.w[1] = ARS0.w[2];
  222. ARS00.w[2] = ARS0.w[3];
  223. ARS00.w[3] = ARS0.w[4];
  224. if (((SINT64) ES.w[1]) < 0) {
  225. ES.w[0] = -ES.w[0];
  226. ES.w[1] = -ES.w[1];
  227. if (ES.w[0])
  228. ES.w[1]--;
  229. // A*RS*eps
  230. __mul_128x128_to_256 (AE, ES, ARS1);
  231. __add_carry_out (S.w[0], CY, ARS00.w[0], AE.w[0]);
  232. __add_carry_in_out (S.w[1], CY, ARS00.w[1], AE.w[1], CY);
  233. __add_carry_in_out (S.w[2], CY, ARS00.w[2], AE.w[2], CY);
  234. S.w[3] = ARS00.w[3] + AE.w[3] + CY;
  235. } else {
  236. // A*RS*eps
  237. __mul_128x128_to_256 (AE, ES, ARS1);
  238. __sub_borrow_out (S.w[0], CY, ARS00.w[0], AE.w[0]);
  239. __sub_borrow_in_out (S.w[1], CY, ARS00.w[1], AE.w[1], CY);
  240. __sub_borrow_in_out (S.w[2], CY, ARS00.w[2], AE.w[2], CY);
  241. S.w[3] = ARS00.w[3] - AE.w[3] - CY;
  242. }
  243. // 3/2*eps^2, scaled by 2^128
  244. ES32 = ES.w[1] + (ES.w[1] >> 1);
  245. __mul_64x64_to_128 (ES2, ES32, ES.w[1]);
  246. // A*RS*3/2*eps^2
  247. __mul_128x128_to_256 (AE2, ES2, ARS1);
  248. // result, scaled by 2^(ey+52-64)
  249. __add_carry_out (S.w[0], CY, S.w[0], AE2.w[0]);
  250. __add_carry_in_out (S.w[1], CY, S.w[1], AE2.w[1], CY);
  251. __add_carry_in_out (S.w[2], CY, S.w[2], AE2.w[2], CY);
  252. S.w[3] = S.w[3] + AE2.w[3] + CY;
  253. // k in (0, 64)
  254. k = ey + 51 - 128;
  255. k2 = 64 - k;
  256. S.w[0] = (S.w[1] >> k) | (S.w[2] << k2);
  257. S.w[1] = (S.w[2] >> k) | (S.w[3] << k2);
  258. // round to nearest
  259. S.w[0]++;
  260. if (!S.w[0])
  261. S.w[1]++;
  262. pCS->w[0] = (S.w[1] << 63) | (S.w[0] >> 1);
  263. pCS->w[1] = S.w[1] >> 1;
  264. }
  265. #endif
  266. #endif