arith_arm64.s 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802
  1. // +build arm64,!noasm
  2. #include "textflag.h"
  3. TEXT ·fp503ConditionalSwap(SB), NOSPLIT, $0-17
  4. MOVD x+0(FP), R0
  5. MOVD y+8(FP), R1
  6. MOVB choice+16(FP), R2
  7. // Set flags
  8. // If choice is not 0 or 1, this implementation will swap completely
  9. CMP $0, R2
  10. LDP 0(R0), (R3, R4)
  11. LDP 0(R1), (R5, R6)
  12. CSEL EQ, R3, R5, R7
  13. CSEL EQ, R4, R6, R8
  14. STP (R7, R8), 0(R0)
  15. CSEL NE, R3, R5, R9
  16. CSEL NE, R4, R6, R10
  17. STP (R9, R10), 0(R1)
  18. LDP 16(R0), (R3, R4)
  19. LDP 16(R1), (R5, R6)
  20. CSEL EQ, R3, R5, R7
  21. CSEL EQ, R4, R6, R8
  22. STP (R7, R8), 16(R0)
  23. CSEL NE, R3, R5, R9
  24. CSEL NE, R4, R6, R10
  25. STP (R9, R10), 16(R1)
  26. LDP 32(R0), (R3, R4)
  27. LDP 32(R1), (R5, R6)
  28. CSEL EQ, R3, R5, R7
  29. CSEL EQ, R4, R6, R8
  30. STP (R7, R8), 32(R0)
  31. CSEL NE, R3, R5, R9
  32. CSEL NE, R4, R6, R10
  33. STP (R9, R10), 32(R1)
  34. LDP 48(R0), (R3, R4)
  35. LDP 48(R1), (R5, R6)
  36. CSEL EQ, R3, R5, R7
  37. CSEL EQ, R4, R6, R8
  38. STP (R7, R8), 48(R0)
  39. CSEL NE, R3, R5, R9
  40. CSEL NE, R4, R6, R10
  41. STP (R9, R10), 48(R1)
  42. RET
  43. TEXT ·fp503AddReduced(SB), NOSPLIT, $0-24
  44. MOVD z+0(FP), R2
  45. MOVD x+8(FP), R0
  46. MOVD y+16(FP), R1
  47. // Load first summand into R3-R10
  48. // Add first summand and second summand and store result in R3-R10
  49. LDP 0(R0), (R3, R4)
  50. LDP 0(R1), (R11, R12)
  51. LDP 16(R0), (R5, R6)
  52. LDP 16(R1), (R13, R14)
  53. ADDS R11, R3
  54. ADCS R12, R4
  55. ADCS R13, R5
  56. ADCS R14, R6
  57. LDP 32(R0), (R7, R8)
  58. LDP 32(R1), (R11, R12)
  59. LDP 48(R0), (R9, R10)
  60. LDP 48(R1), (R13, R14)
  61. ADCS R11, R7
  62. ADCS R12, R8
  63. ADCS R13, R9
  64. ADC R14, R10
  65. // Subtract 2 * p503 in R11-R17 from the result in R3-R10
  66. LDP ·p503x2+0(SB), (R11, R12)
  67. LDP ·p503x2+24(SB), (R13, R14)
  68. SUBS R11, R3
  69. SBCS R12, R4
  70. LDP ·p503x2+40(SB), (R15, R16)
  71. SBCS R12, R5
  72. SBCS R13, R6
  73. MOVD ·p503x2+56(SB), R17
  74. SBCS R14, R7
  75. SBCS R15, R8
  76. SBCS R16, R9
  77. SBCS R17, R10
  78. SBC ZR, ZR, R19
  79. // If x + y - 2 * p503 < 0, R19 is 1 and 2 * p503 should be added
  80. AND R19, R11
  81. AND R19, R12
  82. AND R19, R13
  83. AND R19, R14
  84. AND R19, R15
  85. AND R19, R16
  86. AND R19, R17
  87. ADDS R11, R3
  88. ADCS R12, R4
  89. STP (R3, R4), 0(R2)
  90. ADCS R12, R5
  91. ADCS R13, R6
  92. STP (R5, R6), 16(R2)
  93. ADCS R14, R7
  94. ADCS R15, R8
  95. STP (R7, R8), 32(R2)
  96. ADCS R16, R9
  97. ADC R17, R10
  98. STP (R9, R10), 48(R2)
  99. RET
  100. TEXT ·fp503SubReduced(SB), NOSPLIT, $0-24
  101. MOVD z+0(FP), R2
  102. MOVD x+8(FP), R0
  103. MOVD y+16(FP), R1
  104. // Load x into R3-R10
  105. // Subtract y from x and store result in R3-R10
  106. LDP 0(R0), (R3, R4)
  107. LDP 0(R1), (R11, R12)
  108. LDP 16(R0), (R5, R6)
  109. LDP 16(R1), (R13, R14)
  110. SUBS R11, R3
  111. SBCS R12, R4
  112. SBCS R13, R5
  113. SBCS R14, R6
  114. LDP 32(R0), (R7, R8)
  115. LDP 32(R1), (R11, R12)
  116. LDP 48(R0), (R9, R10)
  117. LDP 48(R1), (R13, R14)
  118. SBCS R11, R7
  119. SBCS R12, R8
  120. SBCS R13, R9
  121. SBCS R14, R10
  122. SBC ZR, ZR, R19
  123. // If x - y < 0, R19 is 1 and 2 * p503 should be added
  124. LDP ·p503x2+0(SB), (R11, R12)
  125. LDP ·p503x2+24(SB), (R13, R14)
  126. AND R19, R11
  127. AND R19, R12
  128. LDP ·p503x2+40(SB), (R15, R16)
  129. AND R19, R13
  130. AND R19, R14
  131. MOVD ·p503x2+56(SB), R17
  132. AND R19, R15
  133. AND R19, R16
  134. AND R19, R17
  135. ADDS R11, R3
  136. ADCS R12, R4
  137. STP (R3, R4), 0(R2)
  138. ADCS R12, R5
  139. ADCS R13, R6
  140. STP (R5, R6), 16(R2)
  141. ADCS R14, R7
  142. ADCS R15, R8
  143. STP (R7, R8), 32(R2)
  144. ADCS R16, R9
  145. ADC R17, R10
  146. STP (R9, R10), 48(R2)
  147. RET
  148. TEXT ·fp503AddLazy(SB), NOSPLIT, $0-24
  149. MOVD z+0(FP), R2
  150. MOVD x+8(FP), R0
  151. MOVD y+16(FP), R1
  152. // Load first summand into R3-R10
  153. // Add first summand and second summand and store result in R3-R10
  154. LDP 0(R0), (R3, R4)
  155. LDP 0(R1), (R11, R12)
  156. LDP 16(R0), (R5, R6)
  157. LDP 16(R1), (R13, R14)
  158. ADDS R11, R3
  159. ADCS R12, R4
  160. STP (R3, R4), 0(R2)
  161. ADCS R13, R5
  162. ADCS R14, R6
  163. STP (R5, R6), 16(R2)
  164. LDP 32(R0), (R7, R8)
  165. LDP 32(R1), (R11, R12)
  166. LDP 48(R0), (R9, R10)
  167. LDP 48(R1), (R13, R14)
  168. ADCS R11, R7
  169. ADCS R12, R8
  170. STP (R7, R8), 32(R2)
  171. ADCS R13, R9
  172. ADC R14, R10
  173. STP (R9, R10), 48(R2)
  174. RET
  175. TEXT ·fp503X2AddLazy(SB), NOSPLIT, $0-24
  176. MOVD z+0(FP), R2
  177. MOVD x+8(FP), R0
  178. MOVD y+16(FP), R1
  179. LDP 0(R0), (R3, R4)
  180. LDP 0(R1), (R11, R12)
  181. LDP 16(R0), (R5, R6)
  182. LDP 16(R1), (R13, R14)
  183. ADDS R11, R3
  184. ADCS R12, R4
  185. STP (R3, R4), 0(R2)
  186. ADCS R13, R5
  187. ADCS R14, R6
  188. STP (R5, R6), 16(R2)
  189. LDP 32(R0), (R7, R8)
  190. LDP 32(R1), (R11, R12)
  191. LDP 48(R0), (R9, R10)
  192. LDP 48(R1), (R13, R14)
  193. ADCS R11, R7
  194. ADCS R12, R8
  195. STP (R7, R8), 32(R2)
  196. ADCS R13, R9
  197. ADCS R14, R10
  198. STP (R9, R10), 48(R2)
  199. LDP 64(R0), (R3, R4)
  200. LDP 64(R1), (R11, R12)
  201. LDP 80(R0), (R5, R6)
  202. LDP 80(R1), (R13, R14)
  203. ADCS R11, R3
  204. ADCS R12, R4
  205. STP (R3, R4), 64(R2)
  206. ADCS R13, R5
  207. ADCS R14, R6
  208. STP (R5, R6), 80(R2)
  209. LDP 96(R0), (R7, R8)
  210. LDP 96(R1), (R11, R12)
  211. LDP 112(R0), (R9, R10)
  212. LDP 112(R1), (R13, R14)
  213. ADCS R11, R7
  214. ADCS R12, R8
  215. STP (R7, R8), 96(R2)
  216. ADCS R13, R9
  217. ADC R14, R10
  218. STP (R9, R10), 112(R2)
  219. RET
  220. TEXT ·fp503X2SubLazy(SB), NOSPLIT, $0-24
  221. MOVD z+0(FP), R2
  222. MOVD x+8(FP), R0
  223. MOVD y+16(FP), R1
  224. LDP 0(R0), (R3, R4)
  225. LDP 0(R1), (R11, R12)
  226. LDP 16(R0), (R5, R6)
  227. LDP 16(R1), (R13, R14)
  228. SUBS R11, R3
  229. SBCS R12, R4
  230. STP (R3, R4), 0(R2)
  231. SBCS R13, R5
  232. SBCS R14, R6
  233. STP (R5, R6), 16(R2)
  234. LDP 32(R0), (R7, R8)
  235. LDP 32(R1), (R11, R12)
  236. LDP 48(R0), (R9, R10)
  237. LDP 48(R1), (R13, R14)
  238. SBCS R11, R7
  239. SBCS R12, R8
  240. STP (R7, R8), 32(R2)
  241. SBCS R13, R9
  242. SBCS R14, R10
  243. STP (R9, R10), 48(R2)
  244. LDP 64(R0), (R3, R4)
  245. LDP 64(R1), (R11, R12)
  246. LDP 80(R0), (R5, R6)
  247. LDP 80(R1), (R13, R14)
  248. SBCS R11, R3
  249. SBCS R12, R4
  250. SBCS R13, R5
  251. SBCS R14, R6
  252. LDP 96(R0), (R7, R8)
  253. LDP 96(R1), (R11, R12)
  254. LDP 112(R0), (R9, R10)
  255. LDP 112(R1), (R13, R14)
  256. SBCS R11, R7
  257. SBCS R12, R8
  258. SBCS R13, R9
  259. SBCS R14, R10
  260. SBC ZR, ZR, R15
  261. // If x - y < 0, R15 is 1 and p503 should be added
  262. LDP ·p503+16(SB), (R16, R17)
  263. LDP ·p503+32(SB), (R19, R20)
  264. AND R15, R16
  265. AND R15, R17
  266. LDP ·p503+48(SB), (R21, R22)
  267. AND R15, R19
  268. AND R15, R20
  269. AND R15, R21
  270. AND R15, R22
  271. ADDS R16, R3
  272. ADCS R16, R4
  273. STP (R3, R4), 64(R2)
  274. ADCS R16, R5
  275. ADCS R17, R6
  276. STP (R5, R6), 80(R2)
  277. ADCS R19, R7
  278. ADCS R20, R8
  279. STP (R7, R8), 96(R2)
  280. ADCS R21, R9
  281. ADC R22, R10
  282. STP (R9, R10), 112(R2)
  283. RET
  284. // Expects that X0*Y0 is already in Z0(low),Z3(high) and X0*Y1 in Z1(low),Z2(high)
  285. // Z0 is not actually touched
  286. // Result of (X0-X1) * (Y0-Y1) will be in Z0-Z3
  287. // Inputs get overwritten, except for X1
  288. #define mul128x128comba(X0, X1, Y0, Y1, Z0, Z1, Z2, Z3, T0) \
  289. MUL X1, Y0, X0 \
  290. UMULH X1, Y0, Y0 \
  291. ADDS Z3, Z1 \
  292. ADC ZR, Z2 \
  293. \
  294. MUL Y1, X1, T0 \
  295. UMULH Y1, X1, Y1 \
  296. ADDS X0, Z1 \
  297. ADCS Y0, Z2 \
  298. ADC ZR, ZR, Z3 \
  299. \
  300. ADDS T0, Z2 \
  301. ADC Y1, Z3
  302. // Expects that X points to (X0-X1)
  303. // Result of (X0-X3) * (Y0-Y3) will be in Z0-Z7
  304. // Inputs get overwritten, except X2-X3 and Y2-Y3
  305. #define mul256x256karatsuba(X, X0, X1, X2, X3, Y0, Y1, Y2, Y3, Z0, Z1, Z2, Z3, Z4, Z5, Z6, Z7, T0, T1)\
  306. ADDS X2, X0 \ // xH + xL, destroys xL
  307. ADCS X3, X1 \
  308. ADCS ZR, ZR, T0 \
  309. \
  310. ADDS Y2, Y0, Z6 \ // yH + yL
  311. ADCS Y3, Y1, T1 \
  312. ADC ZR, ZR, Z7 \
  313. \
  314. SUB T0, ZR, Z2 \
  315. SUB Z7, ZR, Z3 \
  316. AND Z7, T0 \ // combined carry
  317. \
  318. AND Z2, Z6, Z0 \ // masked(yH + yL)
  319. AND Z2, T1, Z1 \
  320. \
  321. AND Z3, X0, Z4 \ // masked(xH + xL)
  322. AND Z3, X1, Z5 \
  323. \
  324. MUL Z6, X0, Z2 \
  325. MUL T1, X0, Z3 \
  326. \
  327. ADDS Z4, Z0 \
  328. UMULH T1, X0, Z4 \
  329. ADCS Z5, Z1 \
  330. UMULH Z6, X0, Z5 \
  331. ADC ZR, T0 \
  332. \ // (xH + xL) * (yH + yL)
  333. mul128x128comba(X0, X1, Z6, T1, Z2, Z3, Z4, Z5, Z7)\
  334. \
  335. LDP 0+X, (X0, X1) \
  336. \
  337. ADDS Z0, Z4 \
  338. UMULH Y0, X0, Z7 \
  339. UMULH Y1, X0, T1 \
  340. ADCS Z1, Z5 \
  341. MUL Y0, X0, Z0 \
  342. MUL Y1, X0, Z1 \
  343. ADC ZR, T0 \
  344. \ // xL * yL
  345. mul128x128comba(X0, X1, Y0, Y1, Z0, Z1, T1, Z7, Z6)\
  346. \
  347. MUL Y2, X2, X0 \
  348. UMULH Y2, X2, Y0 \
  349. SUBS Z0, Z2 \ // (xH + xL) * (yH + yL) - xL * yL
  350. SBCS Z1, Z3 \
  351. SBCS T1, Z4 \
  352. MUL Y3, X2, X1 \
  353. UMULH Y3, X2, Z6 \
  354. SBCS Z7, Z5 \
  355. SBCS ZR, T0 \
  356. \ // xH * yH
  357. mul128x128comba(X2, X3, Y2, Y3, X0, X1, Z6, Y0, Y1)\
  358. \
  359. SUBS X0, Z2 \ // (xH + xL) * (yH + yL) - xL * yL - xH * yH
  360. SBCS X1, Z3 \
  361. SBCS Z6, Z4 \
  362. SBCS Y0, Z5 \
  363. SBCS ZR, T0 \
  364. \
  365. ADDS T1, Z2 \ // (xH * yH) * 2^256 + ((xH + xL) * (yH + yL) - xL * yL - xH * yH) * 2^128 + xL * yL
  366. ADCS Z7, Z3 \
  367. ADCS X0, Z4 \
  368. ADCS X1, Z5 \
  369. ADCS T0, Z6 \
  370. ADC Y0, ZR, Z7
  371. // This implements two-level Karatsuba with a 128x128 Comba multiplier
  372. // at the bottom
  373. TEXT ·fp503Mul(SB), NOSPLIT, $0-24
  374. MOVD z+0(FP), R2
  375. MOVD x+8(FP), R0
  376. MOVD y+16(FP), R1
  377. // Load xL in R3-R6, xH in R7-R10
  378. // (xH + xL) in R25-R29
  379. LDP 0(R0), (R3, R4)
  380. LDP 32(R0), (R7, R8)
  381. ADDS R3, R7, R25
  382. ADCS R4, R8, R26
  383. LDP 16(R0), (R5, R6)
  384. LDP 48(R0), (R9, R10)
  385. ADCS R5, R9, R27
  386. ADCS R6, R10, R29
  387. ADC ZR, ZR, R7
  388. // Load yL in R11-R14, yH in R15-19
  389. // (yH + yL) in R11-R14, destroys yL
  390. LDP 0(R1), (R11, R12)
  391. LDP 32(R1), (R15, R16)
  392. ADDS R15, R11
  393. ADCS R16, R12
  394. LDP 16(R1), (R13, R14)
  395. LDP 48(R1), (R17, R19)
  396. ADCS R17, R13
  397. ADCS R19, R14
  398. ADC ZR, ZR, R8
  399. // Compute maskes and combined carry
  400. SUB R7, ZR, R9
  401. SUB R8, ZR, R10
  402. AND R8, R7
  403. // masked(yH + yL)
  404. AND R9, R11, R15
  405. AND R9, R12, R16
  406. AND R9, R13, R17
  407. AND R9, R14, R19
  408. // masked(xH + xL)
  409. AND R10, R25, R20
  410. AND R10, R26, R21
  411. AND R10, R27, R22
  412. AND R10, R29, R23
  413. // masked(xH + xL) + masked(yH + yL) in R15-R19
  414. ADDS R20, R15
  415. ADCS R21, R16
  416. ADCS R22, R17
  417. ADCS R23, R19
  418. ADC ZR, R7
  419. // Use z as temporary storage
  420. STP (R25, R26), 0(R2)
  421. // (xH + xL) * (yH + yL)
  422. mul256x256karatsuba(0(R2), R25, R26, R27, R29, R11, R12, R13, R14, R8, R9, R10, R20, R21, R22, R23, R24, R0, R1)
  423. MOVD x+8(FP), R0
  424. MOVD y+16(FP), R1
  425. ADDS R21, R15
  426. ADCS R22, R16
  427. ADCS R23, R17
  428. ADCS R24, R19
  429. ADC ZR, R7
  430. // Load yL in R11-R14
  431. LDP 0(R1), (R11, R12)
  432. LDP 16(R1), (R13, R14)
  433. // xL * yL
  434. mul256x256karatsuba(0(R0), R3, R4, R5, R6, R11, R12, R13, R14, R21, R22, R23, R24, R25, R26, R27, R29, R1, R2)
  435. MOVD z+0(FP), R2
  436. MOVD y+16(FP), R1
  437. // (xH + xL) * (yH + yL) - xL * yL
  438. SUBS R21, R8
  439. SBCS R22, R9
  440. STP (R21, R22), 0(R2)
  441. SBCS R23, R10
  442. SBCS R24, R20
  443. STP (R23, R24), 16(R2)
  444. SBCS R25, R15
  445. SBCS R26, R16
  446. SBCS R27, R17
  447. SBCS R29, R19
  448. SBC ZR, R7
  449. // Load xH in R3-R6, yH in R11-R14
  450. LDP 32(R0), (R3, R4)
  451. LDP 48(R0), (R5, R6)
  452. LDP 32(R1), (R11, R12)
  453. LDP 48(R1), (R13, R14)
  454. ADDS R25, R8
  455. ADCS R26, R9
  456. ADCS R27, R10
  457. ADCS R29, R20
  458. ADC ZR, ZR, R1
  459. MOVD R20, 32(R2)
  460. // xH * yH
  461. mul256x256karatsuba(32(R0), R3, R4, R5, R6, R11, R12, R13, R14, R21, R22, R23, R24, R25, R26, R27, R29, R2, R20)
  462. NEG R1, R1
  463. MOVD z+0(FP), R2
  464. MOVD 32(R2), R20
  465. // (xH + xL) * (yH + yL) - xL * yL - xH * yH in R8-R10,R20,R15-R19
  466. // Store lower half in z, that's done
  467. SUBS R21, R8
  468. SBCS R22, R9
  469. STP (R8, R9), 32(R2)
  470. SBCS R23, R10
  471. SBCS R24, R20
  472. STP (R10, R20), 48(R2)
  473. SBCS R25, R15
  474. SBCS R26, R16
  475. SBCS R27, R17
  476. SBCS R29, R19
  477. SBC ZR, R7
  478. // (xH * yH) * 2^512 + ((xH + xL) * (yH + yL) - xL * yL - xH * yH) * 2^256 + xL * yL
  479. // Store remaining limbs in z
  480. ADDS $1, R1
  481. ADCS R21, R15
  482. ADCS R22, R16
  483. STP (R15, R16), 64(R2)
  484. ADCS R23, R17
  485. ADCS R24, R19
  486. STP (R17, R19), 80(R2)
  487. ADCS R7, R25
  488. ADCS ZR, R26
  489. STP (R25, R26), 96(R2)
  490. ADCS ZR, R27
  491. ADC ZR, R29
  492. STP (R27, R29), 112(R2)
  493. RET
  494. // Expects that X0*Y0 is already in Z0(low),Z3(high) and X0*Y1 in Z1(low),Z2(high)
  495. // Z0 is not actually touched
  496. // Result of (X0-X1) * (Y0-Y3) will be in Z0-Z5
  497. // Inputs remain intact
  498. #define mul128x256comba(X0, X1, Y0, Y1, Y2, Y3, Z0, Z1, Z2, Z3, Z4, Z5, T0, T1, T2, T3)\
  499. MUL X1, Y0, T0 \
  500. UMULH X1, Y0, T1 \
  501. ADDS Z3, Z1 \
  502. ADC ZR, Z2 \
  503. \
  504. MUL X0, Y2, T2 \
  505. UMULH X0, Y2, T3 \
  506. ADDS T0, Z1 \
  507. ADCS T1, Z2 \
  508. ADC ZR, ZR, Z3 \
  509. \
  510. MUL X1, Y1, T0 \
  511. UMULH X1, Y1, T1 \
  512. ADDS T2, Z2 \
  513. ADCS T3, Z3 \
  514. ADC ZR, ZR, Z4 \
  515. \
  516. MUL X0, Y3, T2 \
  517. UMULH X0, Y3, T3 \
  518. ADDS T0, Z2 \
  519. ADCS T1, Z3 \
  520. ADC ZR, Z4 \
  521. \
  522. MUL X1, Y2, T0 \
  523. UMULH X1, Y2, T1 \
  524. ADDS T2, Z3 \
  525. ADCS T3, Z4 \
  526. ADC ZR, ZR, Z5 \
  527. \
  528. MUL X1, Y3, T2 \
  529. UMULH X1, Y3, T3 \
  530. ADDS T0, Z3 \
  531. ADCS T1, Z4 \
  532. ADC ZR, Z5 \
  533. ADDS T2, Z4 \
  534. ADC T3, Z5
  535. // This implements the shifted 2^(B*w) Montgomery reduction from
  536. // https://eprint.iacr.org/2016/986.pdf, section Section 3.2, with
  537. // B = 4, w = 64. Performance results were reported in
  538. // https://eprint.iacr.org/2018/700.pdf Section 6.
  539. TEXT ·fp503MontgomeryReduce(SB), NOSPLIT, $0-16
  540. MOVD x+8(FP), R0
  541. // Load x0-x1
  542. LDP 0(R0), (R2, R3)
  543. // Load the prime constant in R25-R29
  544. LDP ·p503p1s8+32(SB), (R25, R26)
  545. LDP ·p503p1s8+48(SB), (R27, R29)
  546. // [x0,x1] * p503p1s8 to R4-R9
  547. MUL R2, R25, R4 // x0 * p503p1s8[0]
  548. UMULH R2, R25, R7
  549. MUL R2, R26, R5 // x0 * p503p1s8[1]
  550. UMULH R2, R26, R6
  551. mul128x256comba(R2, R3, R25, R26, R27, R29, R4, R5, R6, R7, R8, R9, R10, R11, R12, R13)
  552. LDP 16(R0), (R3, R11) // x2
  553. LDP 32(R0), (R12, R13)
  554. LDP 48(R0), (R14, R15)
  555. // Left-shift result in R4-R9 by 56 to R4-R10
  556. ORR R9>>8, ZR, R10
  557. LSL $56, R9
  558. ORR R8>>8, R9
  559. LSL $56, R8
  560. ORR R7>>8, R8
  561. LSL $56, R7
  562. ORR R6>>8, R7
  563. LSL $56, R6
  564. ORR R5>>8, R6
  565. LSL $56, R5
  566. ORR R4>>8, R5
  567. LSL $56, R4
  568. ADDS R4, R11 // x3
  569. ADCS R5, R12 // x4
  570. ADCS R6, R13
  571. ADCS R7, R14
  572. ADCS R8, R15
  573. LDP 64(R0), (R16, R17)
  574. LDP 80(R0), (R19, R20)
  575. MUL R3, R25, R4 // x2 * p503p1s8[0]
  576. UMULH R3, R25, R7
  577. ADCS R9, R16
  578. ADCS R10, R17
  579. ADCS ZR, R19
  580. ADCS ZR, R20
  581. LDP 96(R0), (R21, R22)
  582. LDP 112(R0), (R23, R24)
  583. MUL R3, R26, R5 // x2 * p503p1s8[1]
  584. UMULH R3, R26, R6
  585. ADCS ZR, R21
  586. ADCS ZR, R22
  587. ADCS ZR, R23
  588. ADC ZR, R24
  589. // [x2,x3] * p503p1s8 to R4-R9
  590. mul128x256comba(R3, R11, R25, R26, R27, R29, R4, R5, R6, R7, R8, R9, R10, R0, R1, R2)
  591. ORR R9>>8, ZR, R10
  592. LSL $56, R9
  593. ORR R8>>8, R9
  594. LSL $56, R8
  595. ORR R7>>8, R8
  596. LSL $56, R7
  597. ORR R6>>8, R7
  598. LSL $56, R6
  599. ORR R5>>8, R6
  600. LSL $56, R5
  601. ORR R4>>8, R5
  602. LSL $56, R4
  603. ADDS R4, R13 // x5
  604. ADCS R5, R14 // x6
  605. ADCS R6, R15
  606. ADCS R7, R16
  607. MUL R12, R25, R4 // x4 * p503p1s8[0]
  608. UMULH R12, R25, R7
  609. ADCS R8, R17
  610. ADCS R9, R19
  611. ADCS R10, R20
  612. ADCS ZR, R21
  613. MUL R12, R26, R5 // x4 * p503p1s8[1]
  614. UMULH R12, R26, R6
  615. ADCS ZR, R22
  616. ADCS ZR, R23
  617. ADC ZR, R24
  618. // [x4,x5] * p503p1s8 to R4-R9
  619. mul128x256comba(R12, R13, R25, R26, R27, R29, R4, R5, R6, R7, R8, R9, R10, R0, R1, R2)
  620. ORR R9>>8, ZR, R10
  621. LSL $56, R9
  622. ORR R8>>8, R9
  623. LSL $56, R8
  624. ORR R7>>8, R8
  625. LSL $56, R7
  626. ORR R6>>8, R7
  627. LSL $56, R6
  628. ORR R5>>8, R6
  629. LSL $56, R5
  630. ORR R4>>8, R5
  631. LSL $56, R4
  632. ADDS R4, R15 // x7
  633. ADCS R5, R16 // x8
  634. ADCS R6, R17
  635. ADCS R7, R19
  636. MUL R14, R25, R4 // x6 * p503p1s8[0]
  637. UMULH R14, R25, R7
  638. ADCS R8, R20
  639. ADCS R9, R21
  640. ADCS R10, R22
  641. MUL R14, R26, R5 // x6 * p503p1s8[1]
  642. UMULH R14, R26, R6
  643. ADCS ZR, R23
  644. ADC ZR, R24
  645. // [x6,x7] * p503p1s8 to R4-R9
  646. mul128x256comba(R14, R15, R25, R26, R27, R29, R4, R5, R6, R7, R8, R9, R10, R0, R1, R2)
  647. ORR R9>>8, ZR, R10
  648. LSL $56, R9
  649. ORR R8>>8, R9
  650. LSL $56, R8
  651. ORR R7>>8, R8
  652. LSL $56, R7
  653. ORR R6>>8, R7
  654. LSL $56, R6
  655. ORR R5>>8, R6
  656. LSL $56, R5
  657. ORR R4>>8, R5
  658. LSL $56, R4
  659. MOVD z+0(FP), R0
  660. ADDS R4, R17
  661. ADCS R5, R19
  662. STP (R16, R17), 0(R0) // Store final result to z
  663. ADCS R6, R20
  664. ADCS R7, R21
  665. STP (R19, R20), 16(R0)
  666. ADCS R8, R22
  667. ADCS R9, R23
  668. STP (R21, R22), 32(R0)
  669. ADC R10, R24
  670. STP (R23, R24), 48(R0)
  671. RET
  672. TEXT ·fp503StrongReduce(SB), NOSPLIT, $0-8
  673. MOVD x+0(FP), R0
  674. // Keep x in R1-R8, p503 in R9-R14, subtract to R1-R8
  675. LDP ·p503+16(SB), (R9, R10)
  676. LDP 0(R0), (R1, R2)
  677. LDP 16(R0), (R3, R4)
  678. SUBS R9, R1
  679. SBCS R9, R2
  680. LDP 32(R0), (R5, R6)
  681. LDP ·p503+32(SB), (R11, R12)
  682. SBCS R9, R3
  683. SBCS R10, R4
  684. LDP 48(R0), (R7, R8)
  685. LDP ·p503+48(SB), (R13, R14)
  686. SBCS R11, R5
  687. SBCS R12, R6
  688. SBCS R13, R7
  689. SBCS R14, R8
  690. SBC ZR, ZR, R15
  691. // Mask with the borrow and add p503
  692. AND R15, R9
  693. AND R15, R10
  694. AND R15, R11
  695. AND R15, R12
  696. AND R15, R13
  697. AND R15, R14
  698. ADDS R9, R1
  699. ADCS R9, R2
  700. STP (R1, R2), 0(R0)
  701. ADCS R9, R3
  702. ADCS R10, R4
  703. STP (R3, R4), 16(R0)
  704. ADCS R11, R5
  705. ADCS R12, R6
  706. STP (R5, R6), 32(R0)
  707. ADCS R13, R7
  708. ADCS R14, R8
  709. STP (R7, R8), 48(R0)
  710. RET