jidctint-mmi.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571
  1. /*
  2. * Loongson MMI optimizations for libjpeg-turbo
  3. *
  4. * Copyright (C) 2014-2015, 2018, D. R. Commander. All Rights Reserved.
  5. * Copyright (C) 2016-2017, Loongson Technology Corporation Limited, BeiJing.
  6. * All Rights Reserved.
  7. * Authors: ZhuChen <zhuchen@loongson.cn>
  8. * CaiWanwei <caiwanwei@loongson.cn>
  9. * SunZhangzhi <sunzhangzhi-cq@loongson.cn>
  10. *
  11. * Based on the x86 SIMD extension for IJG JPEG library
  12. * Copyright (C) 1999-2006, MIYASAKA Masaru.
  13. *
  14. * This software is provided 'as-is', without any express or implied
  15. * warranty. In no event will the authors be held liable for any damages
  16. * arising from the use of this software.
  17. *
  18. * Permission is granted to anyone to use this software for any purpose,
  19. * including commercial applications, and to alter it and redistribute it
  20. * freely, subject to the following restrictions:
  21. *
  22. * 1. The origin of this software must not be misrepresented; you must not
  23. * claim that you wrote the original software. If you use this software
  24. * in a product, an acknowledgment in the product documentation would be
  25. * appreciated but is not required.
  26. * 2. Altered source versions must be plainly marked as such, and must not be
  27. * misrepresented as being the original software.
  28. * 3. This notice may not be removed or altered from any source distribution.
  29. */
  30. /* SLOW INTEGER INVERSE DCT */
  31. #include "jsimd_mmi.h"
  32. #define CONST_BITS 13
  33. #define PASS1_BITS 2
  34. #define DESCALE_P1 (CONST_BITS - PASS1_BITS)
  35. #define DESCALE_P2 (CONST_BITS + PASS1_BITS + 3)
  36. #define CENTERJSAMPLE 128
  37. #define FIX_0_298 ((short)2446) /* FIX(0.298631336) */
  38. #define FIX_0_390 ((short)3196) /* FIX(0.390180644) */
  39. #define FIX_0_899 ((short)7373) /* FIX(0.899976223) */
  40. #define FIX_0_541 ((short)4433) /* FIX(0.541196100) */
  41. #define FIX_0_765 ((short)6270) /* FIX(0.765366865) */
  42. #define FIX_1_175 ((short)9633) /* FIX(1.175875602) */
  43. #define FIX_1_501 ((short)12299) /* FIX(1.501321110) */
  44. #define FIX_1_847 ((short)15137) /* FIX(1.847759065) */
  45. #define FIX_1_961 ((short)16069) /* FIX(1.961570560) */
  46. #define FIX_2_053 ((short)16819) /* FIX(2.053119869) */
  47. #define FIX_2_562 ((short)20995) /* FIX(2.562915447) */
  48. #define FIX_3_072 ((short)25172) /* FIX(3.072711026) */
  49. enum const_index {
  50. index_PW_F130_F054,
  51. index_PW_F054_MF130,
  52. index_PW_MF078_F117,
  53. index_PW_F117_F078,
  54. index_PW_MF060_MF089,
  55. index_PW_MF089_F060,
  56. index_PW_MF050_MF256,
  57. index_PW_MF256_F050,
  58. index_PD_DESCALE_P1,
  59. index_PD_DESCALE_P2,
  60. index_PB_CENTERJSAMP
  61. };
  62. static uint64_t const_value[] = {
  63. _uint64_set_pi16(FIX_0_541, (FIX_0_541 + FIX_0_765),
  64. FIX_0_541, (FIX_0_541 + FIX_0_765)),
  65. _uint64_set_pi16((FIX_0_541 - FIX_1_847), FIX_0_541,
  66. (FIX_0_541 - FIX_1_847), FIX_0_541),
  67. _uint64_set_pi16(FIX_1_175, (FIX_1_175 - FIX_1_961),
  68. FIX_1_175, (FIX_1_175 - FIX_1_961)),
  69. _uint64_set_pi16((FIX_1_175 - FIX_0_390), FIX_1_175,
  70. (FIX_1_175 - FIX_0_390), FIX_1_175),
  71. _uint64_set_pi16(-FIX_0_899, (FIX_0_298 - FIX_0_899),
  72. -FIX_0_899, (FIX_0_298 - FIX_0_899)),
  73. _uint64_set_pi16((FIX_1_501 - FIX_0_899), -FIX_0_899,
  74. (FIX_1_501 - FIX_0_899), -FIX_0_899),
  75. _uint64_set_pi16(-FIX_2_562, (FIX_2_053 - FIX_2_562),
  76. -FIX_2_562, (FIX_2_053 - FIX_2_562)),
  77. _uint64_set_pi16((FIX_3_072 - FIX_2_562), -FIX_2_562,
  78. (FIX_3_072 - FIX_2_562), -FIX_2_562),
  79. _uint64_set_pi32((1 << (DESCALE_P1 - 1)), (1 << (DESCALE_P1 - 1))),
  80. _uint64_set_pi32((1 << (DESCALE_P2 - 1)), (1 << (DESCALE_P2 - 1))),
  81. _uint64_set_pi8(CENTERJSAMPLE, CENTERJSAMPLE, CENTERJSAMPLE, CENTERJSAMPLE,
  82. CENTERJSAMPLE, CENTERJSAMPLE, CENTERJSAMPLE, CENTERJSAMPLE)
  83. };
  84. #define PW_F130_F054 get_const_value(index_PW_F130_F054)
  85. #define PW_F054_MF130 get_const_value(index_PW_F054_MF130)
  86. #define PW_MF078_F117 get_const_value(index_PW_MF078_F117)
  87. #define PW_F117_F078 get_const_value(index_PW_F117_F078)
  88. #define PW_MF060_MF089 get_const_value(index_PW_MF060_MF089)
  89. #define PW_MF089_F060 get_const_value(index_PW_MF089_F060)
  90. #define PW_MF050_MF256 get_const_value(index_PW_MF050_MF256)
  91. #define PW_MF256_F050 get_const_value(index_PW_MF256_F050)
  92. #define PD_DESCALE_P1 get_const_value(index_PD_DESCALE_P1)
  93. #define PD_DESCALE_P2 get_const_value(index_PD_DESCALE_P2)
  94. #define PB_CENTERJSAMP get_const_value(index_PB_CENTERJSAMP)
  95. #define test_m32_zero(mm32) (!(*(uint32_t *)&mm32))
  96. #define test_m64_zero(mm64) (!(*(uint64_t *)&mm64))
  97. #define DO_IDCT_COMMON(PASS) { \
  98. __m64 tmp0_3l, tmp0_3h, tmp1_2l, tmp1_2h; \
  99. __m64 tmp0l, tmp0h, tmp1l, tmp1h, tmp2l, tmp2h, tmp3l, tmp3h; \
  100. __m64 z34l, z34h, z3l, z3h, z4l, z4h, z3, z4; \
  101. __m64 out0l, out0h, out1l, out1h, out2l, out2h, out3l, out3h; \
  102. __m64 out4l, out4h, out5l, out5h, out6l, out6h, out7l, out7h; \
  103. \
  104. z3 = _mm_add_pi16(tmp0, tmp2); \
  105. z4 = _mm_add_pi16(tmp1, tmp3); \
  106. \
  107. /* (Original) \
  108. * z5 = (z3 + z4) * 1.175875602; \
  109. * z3 = z3 * -1.961570560; z4 = z4 * -0.390180644; \
  110. * z3 += z5; z4 += z5; \
  111. * \
  112. * (This implementation) \
  113. * z3 = z3 * (1.175875602 - 1.961570560) + z4 * 1.175875602; \
  114. * z4 = z3 * 1.175875602 + z4 * (1.175875602 - 0.390180644); \
  115. */ \
  116. \
  117. z34l = _mm_unpacklo_pi16(z3, z4); \
  118. z34h = _mm_unpackhi_pi16(z3, z4); \
  119. z3l = _mm_madd_pi16(z34l, PW_MF078_F117); \
  120. z3h = _mm_madd_pi16(z34h, PW_MF078_F117); \
  121. z4l = _mm_madd_pi16(z34l, PW_F117_F078); \
  122. z4h = _mm_madd_pi16(z34h, PW_F117_F078); \
  123. \
  124. /* (Original) \
  125. * z1 = tmp0 + tmp3; z2 = tmp1 + tmp2; \
  126. * tmp0 = tmp0 * 0.298631336; tmp1 = tmp1 * 2.053119869; \
  127. * tmp2 = tmp2 * 3.072711026; tmp3 = tmp3 * 1.501321110; \
  128. * z1 = z1 * -0.899976223; z2 = z2 * -2.562915447; \
  129. * tmp0 += z1 + z3; tmp1 += z2 + z4; \
  130. * tmp2 += z2 + z3; tmp3 += z1 + z4; \
  131. * \
  132. * (This implementation) \
  133. * tmp0 = tmp0 * (0.298631336 - 0.899976223) + tmp3 * -0.899976223; \
  134. * tmp1 = tmp1 * (2.053119869 - 2.562915447) + tmp2 * -2.562915447; \
  135. * tmp2 = tmp1 * -2.562915447 + tmp2 * (3.072711026 - 2.562915447); \
  136. * tmp3 = tmp0 * -0.899976223 + tmp3 * (1.501321110 - 0.899976223); \
  137. * tmp0 += z3; tmp1 += z4; \
  138. * tmp2 += z3; tmp3 += z4; \
  139. */ \
  140. \
  141. tmp0_3l = _mm_unpacklo_pi16(tmp0, tmp3); \
  142. tmp0_3h = _mm_unpackhi_pi16(tmp0, tmp3); \
  143. \
  144. tmp0l = _mm_madd_pi16(tmp0_3l, PW_MF060_MF089); \
  145. tmp0h = _mm_madd_pi16(tmp0_3h, PW_MF060_MF089); \
  146. tmp3l = _mm_madd_pi16(tmp0_3l, PW_MF089_F060); \
  147. tmp3h = _mm_madd_pi16(tmp0_3h, PW_MF089_F060); \
  148. \
  149. tmp0l = _mm_add_pi32(tmp0l, z3l); \
  150. tmp0h = _mm_add_pi32(tmp0h, z3h); \
  151. tmp3l = _mm_add_pi32(tmp3l, z4l); \
  152. tmp3h = _mm_add_pi32(tmp3h, z4h); \
  153. \
  154. tmp1_2l = _mm_unpacklo_pi16(tmp1, tmp2); \
  155. tmp1_2h = _mm_unpackhi_pi16(tmp1, tmp2); \
  156. \
  157. tmp1l = _mm_madd_pi16(tmp1_2l, PW_MF050_MF256); \
  158. tmp1h = _mm_madd_pi16(tmp1_2h, PW_MF050_MF256); \
  159. tmp2l = _mm_madd_pi16(tmp1_2l, PW_MF256_F050); \
  160. tmp2h = _mm_madd_pi16(tmp1_2h, PW_MF256_F050); \
  161. \
  162. tmp1l = _mm_add_pi32(tmp1l, z4l); \
  163. tmp1h = _mm_add_pi32(tmp1h, z4h); \
  164. tmp2l = _mm_add_pi32(tmp2l, z3l); \
  165. tmp2h = _mm_add_pi32(tmp2h, z3h); \
  166. \
  167. /* Final output stage */ \
  168. \
  169. out0l = _mm_add_pi32(tmp10l, tmp3l); \
  170. out0h = _mm_add_pi32(tmp10h, tmp3h); \
  171. out7l = _mm_sub_pi32(tmp10l, tmp3l); \
  172. out7h = _mm_sub_pi32(tmp10h, tmp3h); \
  173. \
  174. out0l = _mm_add_pi32(out0l, PD_DESCALE_P##PASS); \
  175. out0h = _mm_add_pi32(out0h, PD_DESCALE_P##PASS); \
  176. out0l = _mm_srai_pi32(out0l, DESCALE_P##PASS); \
  177. out0h = _mm_srai_pi32(out0h, DESCALE_P##PASS); \
  178. \
  179. out7l = _mm_add_pi32(out7l, PD_DESCALE_P##PASS); \
  180. out7h = _mm_add_pi32(out7h, PD_DESCALE_P##PASS); \
  181. out7l = _mm_srai_pi32(out7l, DESCALE_P##PASS); \
  182. out7h = _mm_srai_pi32(out7h, DESCALE_P##PASS); \
  183. \
  184. out0 = _mm_packs_pi32(out0l, out0h); \
  185. out7 = _mm_packs_pi32(out7l, out7h); \
  186. \
  187. out1l = _mm_add_pi32(tmp11l, tmp2l); \
  188. out1h = _mm_add_pi32(tmp11h, tmp2h); \
  189. out6l = _mm_sub_pi32(tmp11l, tmp2l); \
  190. out6h = _mm_sub_pi32(tmp11h, tmp2h); \
  191. \
  192. out1l = _mm_add_pi32(out1l, PD_DESCALE_P##PASS); \
  193. out1h = _mm_add_pi32(out1h, PD_DESCALE_P##PASS); \
  194. out1l = _mm_srai_pi32(out1l, DESCALE_P##PASS); \
  195. out1h = _mm_srai_pi32(out1h, DESCALE_P##PASS); \
  196. \
  197. out6l = _mm_add_pi32(out6l, PD_DESCALE_P##PASS); \
  198. out6h = _mm_add_pi32(out6h, PD_DESCALE_P##PASS); \
  199. out6l = _mm_srai_pi32(out6l, DESCALE_P##PASS); \
  200. out6h = _mm_srai_pi32(out6h, DESCALE_P##PASS); \
  201. \
  202. out1 = _mm_packs_pi32(out1l, out1h); \
  203. out6 = _mm_packs_pi32(out6l, out6h); \
  204. \
  205. out2l = _mm_add_pi32(tmp12l, tmp1l); \
  206. out2h = _mm_add_pi32(tmp12h, tmp1h); \
  207. out5l = _mm_sub_pi32(tmp12l, tmp1l); \
  208. out5h = _mm_sub_pi32(tmp12h, tmp1h); \
  209. \
  210. out2l = _mm_add_pi32(out2l, PD_DESCALE_P##PASS); \
  211. out2h = _mm_add_pi32(out2h, PD_DESCALE_P##PASS); \
  212. out2l = _mm_srai_pi32(out2l, DESCALE_P##PASS); \
  213. out2h = _mm_srai_pi32(out2h, DESCALE_P##PASS); \
  214. \
  215. out5l = _mm_add_pi32(out5l, PD_DESCALE_P##PASS); \
  216. out5h = _mm_add_pi32(out5h, PD_DESCALE_P##PASS); \
  217. out5l = _mm_srai_pi32(out5l, DESCALE_P##PASS); \
  218. out5h = _mm_srai_pi32(out5h, DESCALE_P##PASS); \
  219. \
  220. out2 = _mm_packs_pi32(out2l, out2h); \
  221. out5 = _mm_packs_pi32(out5l, out5h); \
  222. \
  223. out3l = _mm_add_pi32(tmp13l, tmp0l); \
  224. out3h = _mm_add_pi32(tmp13h, tmp0h); \
  225. \
  226. out4l = _mm_sub_pi32(tmp13l, tmp0l); \
  227. out4h = _mm_sub_pi32(tmp13h, tmp0h); \
  228. \
  229. out3l = _mm_add_pi32(out3l, PD_DESCALE_P##PASS); \
  230. out3h = _mm_add_pi32(out3h, PD_DESCALE_P##PASS); \
  231. out3l = _mm_srai_pi32(out3l, DESCALE_P##PASS); \
  232. out3h = _mm_srai_pi32(out3h, DESCALE_P##PASS); \
  233. \
  234. out4l = _mm_add_pi32(out4l, PD_DESCALE_P##PASS); \
  235. out4h = _mm_add_pi32(out4h, PD_DESCALE_P##PASS); \
  236. out4l = _mm_srai_pi32(out4l, DESCALE_P##PASS); \
  237. out4h = _mm_srai_pi32(out4h, DESCALE_P##PASS); \
  238. \
  239. out3 = _mm_packs_pi32(out3l, out3h); \
  240. out4 = _mm_packs_pi32(out4l, out4h); \
  241. }
  242. #define DO_IDCT_PASS1(iter) { \
  243. __m64 col0l, col1l, col2l, col3l, col4l, col5l, col6l, col7l; \
  244. __m64 quant0l, quant1l, quant2l, quant3l; \
  245. __m64 quant4l, quant5l, quant6l, quant7l; \
  246. __m64 z23, z2, z3, z23l, z23h; \
  247. __m64 row01a, row01b, row01c, row01d, row23a, row23b, row23c, row23d; \
  248. __m64 row0l, row0h, row1l, row1h, row2l, row2h, row3l, row3h; \
  249. __m64 tmp0l, tmp0h, tmp1l, tmp1h, tmp2l, tmp2h, tmp3l, tmp3h; \
  250. __m64 tmp10l, tmp10h, tmp11l, tmp11h, tmp12l, tmp12h, tmp13l, tmp13h; \
  251. __m32 col0a, col1a, mm0; \
  252. \
  253. col0a = _mm_load_si32((__m32 *)&inptr[DCTSIZE * 1]); \
  254. col1a = _mm_load_si32((__m32 *)&inptr[DCTSIZE * 2]); \
  255. mm0 = _mm_or_si32(col0a, col1a); \
  256. \
  257. if (test_m32_zero(mm0)) { \
  258. __m64 mm1, mm2; \
  259. \
  260. col0l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 0]); \
  261. col1l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 1]); \
  262. col2l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 2]); \
  263. col3l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 3]); \
  264. col4l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 4]); \
  265. col5l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 5]); \
  266. col6l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 6]); \
  267. col7l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 7]); \
  268. \
  269. mm1 = _mm_or_si64(col1l, col3l); \
  270. mm2 = _mm_or_si64(col2l, col4l); \
  271. mm1 = _mm_or_si64(mm1, col5l); \
  272. mm2 = _mm_or_si64(mm2, col6l); \
  273. mm1 = _mm_or_si64(mm1, col7l); \
  274. mm1 = _mm_or_si64(mm1, mm2); \
  275. \
  276. if (test_m64_zero(mm1)) { \
  277. __m64 dcval, dcvall, dcvalh, row0, row1, row2, row3; \
  278. \
  279. /* AC terms all zero */ \
  280. \
  281. quant0l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 0]); \
  282. \
  283. dcval = _mm_mullo_pi16(col0l, quant0l); \
  284. dcval = _mm_slli_pi16(dcval, PASS1_BITS); /* dcval=(00 10 20 30) */ \
  285. \
  286. dcvall = _mm_unpacklo_pi16(dcval, dcval); /* dcvall=(00 00 10 10) */ \
  287. dcvalh = _mm_unpackhi_pi16(dcval, dcval); /* dcvalh=(20 20 30 30) */ \
  288. \
  289. row0 = _mm_unpacklo_pi32(dcvall, dcvall); /* row0=(00 00 00 00) */ \
  290. row1 = _mm_unpackhi_pi32(dcvall, dcvall); /* row1=(10 10 10 10) */ \
  291. row2 = _mm_unpacklo_pi32(dcvalh, dcvalh); /* row2=(20 20 20 20) */ \
  292. row3 = _mm_unpackhi_pi32(dcvalh, dcvalh); /* row3=(30 30 30 30) */ \
  293. \
  294. _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 0], row0); \
  295. _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 0 + 4], row0); \
  296. _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 1], row1); \
  297. _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 1 + 4], row1); \
  298. _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 2], row2); \
  299. _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 2 + 4], row2); \
  300. _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 3], row3); \
  301. _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 3 + 4], row3); \
  302. \
  303. goto nextcolumn##iter; \
  304. } \
  305. } \
  306. \
  307. /* Even part \
  308. * \
  309. * (Original) \
  310. * z1 = (z2 + z3) * 0.541196100; \
  311. * tmp2 = z1 + z3 * -1.847759065; \
  312. * tmp3 = z1 + z2 * 0.765366865; \
  313. * \
  314. * (This implementation) \
  315. * tmp2 = z2 * 0.541196100 + z3 * (0.541196100 - 1.847759065); \
  316. * tmp3 = z2 * (0.541196100 + 0.765366865) + z3 * 0.541196100; \
  317. */ \
  318. \
  319. col0l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 0]); /* (00 10 20 30) */ \
  320. col2l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 2]); /* (02 12 22 32) */ \
  321. col4l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 4]); /* (04 14 24 34) */ \
  322. col6l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 6]); /* (06 16 26 36) */ \
  323. \
  324. quant0l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 0]); \
  325. quant2l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 2]); \
  326. quant4l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 4]); \
  327. quant6l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 6]); \
  328. \
  329. z2 = _mm_mullo_pi16(col2l, quant2l); \
  330. z3 = _mm_mullo_pi16(col6l, quant6l); \
  331. \
  332. z23l = _mm_unpacklo_pi16(z2, z3); \
  333. z23h = _mm_unpackhi_pi16(z2, z3); \
  334. tmp3l = _mm_madd_pi16(z23l, PW_F130_F054); \
  335. tmp3h = _mm_madd_pi16(z23h, PW_F130_F054); \
  336. tmp2l = _mm_madd_pi16(z23l, PW_F054_MF130); \
  337. tmp2h = _mm_madd_pi16(z23h, PW_F054_MF130); \
  338. \
  339. z2 = _mm_mullo_pi16(col0l, quant0l); \
  340. z3 = _mm_mullo_pi16(col4l, quant4l); \
  341. \
  342. z23 = _mm_add_pi16(z2, z3); \
  343. tmp0l = _mm_loadlo_pi16_f(z23); \
  344. tmp0h = _mm_loadhi_pi16_f(z23); \
  345. tmp0l = _mm_srai_pi32(tmp0l, (16 - CONST_BITS)); \
  346. tmp0h = _mm_srai_pi32(tmp0h, (16 - CONST_BITS)); \
  347. \
  348. tmp10l = _mm_add_pi32(tmp0l, tmp3l); \
  349. tmp10h = _mm_add_pi32(tmp0h, tmp3h); \
  350. tmp13l = _mm_sub_pi32(tmp0l, tmp3l); \
  351. tmp13h = _mm_sub_pi32(tmp0h, tmp3h); \
  352. \
  353. z23 = _mm_sub_pi16(z2, z3); \
  354. tmp1l = _mm_loadlo_pi16_f(z23); \
  355. tmp1h = _mm_loadhi_pi16_f(z23); \
  356. tmp1l = _mm_srai_pi32(tmp1l, (16 - CONST_BITS)); \
  357. tmp1h = _mm_srai_pi32(tmp1h, (16 - CONST_BITS)); \
  358. \
  359. tmp11l = _mm_add_pi32(tmp1l, tmp2l); \
  360. tmp11h = _mm_add_pi32(tmp1h, tmp2h); \
  361. tmp12l = _mm_sub_pi32(tmp1l, tmp2l); \
  362. tmp12h = _mm_sub_pi32(tmp1h, tmp2h); \
  363. \
  364. /* Odd part */ \
  365. \
  366. col1l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 1]); /* (01 11 21 31) */ \
  367. col3l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 3]); /* (03 13 23 33) */ \
  368. col5l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 5]); /* (05 15 25 35) */ \
  369. col7l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 7]); /* (07 17 27 37) */ \
  370. \
  371. quant1l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 1]); \
  372. quant3l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 3]); \
  373. quant5l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 5]); \
  374. quant7l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 7]); \
  375. \
  376. tmp0 = _mm_mullo_pi16(col7l, quant7l); \
  377. tmp1 = _mm_mullo_pi16(col5l, quant5l); \
  378. tmp2 = _mm_mullo_pi16(col3l, quant3l); \
  379. tmp3 = _mm_mullo_pi16(col1l, quant1l); \
  380. \
  381. DO_IDCT_COMMON(1) \
  382. \
  383. /* out0=(00 10 20 30), out1=(01 11 21 31) */ \
  384. /* out2=(02 12 22 32), out3=(03 13 23 33) */ \
  385. /* out4=(04 14 24 34), out5=(05 15 25 35) */ \
  386. /* out6=(06 16 26 36), out7=(07 17 27 37) */ \
  387. \
  388. /* Transpose coefficients */ \
  389. \
  390. row01a = _mm_unpacklo_pi16(out0, out1); /* row01a=(00 01 10 11) */ \
  391. row23a = _mm_unpackhi_pi16(out0, out1); /* row23a=(20 21 30 31) */ \
  392. row01d = _mm_unpacklo_pi16(out6, out7); /* row01d=(06 07 16 17) */ \
  393. row23d = _mm_unpackhi_pi16(out6, out7); /* row23d=(26 27 36 37) */ \
  394. \
  395. row01b = _mm_unpacklo_pi16(out2, out3); /* row01b=(02 03 12 13) */ \
  396. row23b = _mm_unpackhi_pi16(out2, out3); /* row23b=(22 23 32 33) */ \
  397. row01c = _mm_unpacklo_pi16(out4, out5); /* row01c=(04 05 14 15) */ \
  398. row23c = _mm_unpackhi_pi16(out4, out5); /* row23c=(24 25 34 35) */ \
  399. \
  400. row0l = _mm_unpacklo_pi32(row01a, row01b); /* row0l=(00 01 02 03) */ \
  401. row1l = _mm_unpackhi_pi32(row01a, row01b); /* row1l=(10 11 12 13) */ \
  402. row2l = _mm_unpacklo_pi32(row23a, row23b); /* row2l=(20 21 22 23) */ \
  403. row3l = _mm_unpackhi_pi32(row23a, row23b); /* row3l=(30 31 32 33) */ \
  404. \
  405. row0h = _mm_unpacklo_pi32(row01c, row01d); /* row0h=(04 05 06 07) */ \
  406. row1h = _mm_unpackhi_pi32(row01c, row01d); /* row1h=(14 15 16 17) */ \
  407. row2h = _mm_unpacklo_pi32(row23c, row23d); /* row2h=(24 25 26 27) */ \
  408. row3h = _mm_unpackhi_pi32(row23c, row23d); /* row3h=(34 35 36 37) */ \
  409. \
  410. _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 0], row0l); \
  411. _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 0 + 4], row0h); \
  412. _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 1], row1l); \
  413. _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 1 + 4], row1h); \
  414. _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 2], row2l); \
  415. _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 2 + 4], row2h); \
  416. _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 3], row3l); \
  417. _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 3 + 4], row3h); \
  418. }
  419. #define DO_IDCT_PASS2(ctr) { \
  420. __m64 row0l, row1l, row2l, row3l, row4l, row5l, row6l, row7l; \
  421. __m64 z23, z23l, z23h; \
  422. __m64 col0123a, col0123b, col0123c, col0123d; \
  423. __m64 col01l, col01h, col23l, col23h, row06, row17, row24, row35; \
  424. __m64 col0, col1, col2, col3; \
  425. __m64 tmp0l, tmp0h, tmp1l, tmp1h, tmp2l, tmp2h, tmp3l, tmp3h; \
  426. __m64 tmp10l, tmp10h, tmp11l, tmp11h, tmp12l, tmp12h, tmp13l, tmp13h; \
  427. \
  428. row0l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 0]); /* (00 01 02 03) */ \
  429. row1l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 1]); /* (10 11 12 13) */ \
  430. row2l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 2]); /* (20 21 22 23) */ \
  431. row3l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 3]); /* (30 31 32 33) */ \
  432. row4l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 4]); /* (40 41 42 43) */ \
  433. row5l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 5]); /* (50 51 52 53) */ \
  434. row6l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 6]); /* (60 61 62 63) */ \
  435. row7l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 7]); /* (70 71 72 73) */ \
  436. \
  437. /* Even part \
  438. * \
  439. * (Original) \
  440. * z1 = (z2 + z3) * 0.541196100; \
  441. * tmp2 = z1 + z3 * -1.847759065; \
  442. * tmp3 = z1 + z2 * 0.765366865; \
  443. * \
  444. * (This implementation) \
  445. * tmp2 = z2 * 0.541196100 + z3 * (0.541196100 - 1.847759065); \
  446. * tmp3 = z2 * (0.541196100 + 0.765366865) + z3 * 0.541196100; \
  447. */ \
  448. \
  449. z23l = _mm_unpacklo_pi16(row2l, row6l); \
  450. z23h = _mm_unpackhi_pi16(row2l, row6l); \
  451. \
  452. tmp3l = _mm_madd_pi16(z23l, PW_F130_F054); \
  453. tmp3h = _mm_madd_pi16(z23h, PW_F130_F054); \
  454. tmp2l = _mm_madd_pi16(z23l, PW_F054_MF130); \
  455. tmp2h = _mm_madd_pi16(z23h, PW_F054_MF130); \
  456. \
  457. z23 = _mm_add_pi16(row0l, row4l); \
  458. tmp0l = _mm_loadlo_pi16_f(z23); \
  459. tmp0h = _mm_loadhi_pi16_f(z23); \
  460. tmp0l = _mm_srai_pi32(tmp0l, (16 - CONST_BITS)); \
  461. tmp0h = _mm_srai_pi32(tmp0h, (16 - CONST_BITS)); \
  462. \
  463. tmp10l = _mm_add_pi32(tmp0l, tmp3l); \
  464. tmp10h = _mm_add_pi32(tmp0h, tmp3h); \
  465. tmp13l = _mm_sub_pi32(tmp0l, tmp3l); \
  466. tmp13h = _mm_sub_pi32(tmp0h, tmp3h); \
  467. \
  468. z23 = _mm_sub_pi16(row0l, row4l); \
  469. tmp1l = _mm_loadlo_pi16_f(z23); \
  470. tmp1h = _mm_loadhi_pi16_f(z23); \
  471. tmp1l = _mm_srai_pi32(tmp1l, (16 - CONST_BITS)); \
  472. tmp1h = _mm_srai_pi32(tmp1h, (16 - CONST_BITS)); \
  473. \
  474. tmp11l = _mm_add_pi32(tmp1l, tmp2l); \
  475. tmp11h = _mm_add_pi32(tmp1h, tmp2h); \
  476. tmp12l = _mm_sub_pi32(tmp1l, tmp2l); \
  477. tmp12h = _mm_sub_pi32(tmp1h, tmp2h); \
  478. \
  479. /* Odd part */ \
  480. \
  481. tmp0 = row7l; \
  482. tmp1 = row5l; \
  483. tmp2 = row3l; \
  484. tmp3 = row1l; \
  485. \
  486. DO_IDCT_COMMON(2) \
  487. \
  488. /* out0=(00 01 02 03), out1=(10 11 12 13) */ \
  489. /* out2=(20 21 22 23), out3=(30 31 32 33) */ \
  490. /* out4=(40 41 42 43), out5=(50 51 52 53) */ \
  491. /* out6=(60 61 62 63), out7=(70 71 72 73) */ \
  492. \
  493. row06 = _mm_packs_pi16(out0, out6); /* row06=(00 01 02 03 60 61 62 63) */ \
  494. row17 = _mm_packs_pi16(out1, out7); /* row17=(10 11 12 13 70 71 72 73) */ \
  495. row24 = _mm_packs_pi16(out2, out4); /* row24=(20 21 22 23 40 41 42 43) */ \
  496. row35 = _mm_packs_pi16(out3, out5); /* row35=(30 31 32 33 50 51 52 53) */ \
  497. \
  498. row06 = _mm_add_pi8(row06, PB_CENTERJSAMP); \
  499. row17 = _mm_add_pi8(row17, PB_CENTERJSAMP); \
  500. row24 = _mm_add_pi8(row24, PB_CENTERJSAMP); \
  501. row35 = _mm_add_pi8(row35, PB_CENTERJSAMP); \
  502. \
  503. /* Transpose coefficients */ \
  504. \
  505. col0123a = _mm_unpacklo_pi8(row06, row17); /* col0123a=(00 10 01 11 02 12 03 13) */ \
  506. col0123d = _mm_unpackhi_pi8(row06, row17); /* col0123d=(60 70 61 71 62 72 63 73) */ \
  507. col0123b = _mm_unpacklo_pi8(row24, row35); /* col0123b=(20 30 21 31 22 32 23 33) */ \
  508. col0123c = _mm_unpackhi_pi8(row24, row35); /* col0123c=(40 50 41 51 42 52 43 53) */ \
  509. \
  510. col01l = _mm_unpacklo_pi16(col0123a, col0123b); /* col01l=(00 10 20 30 01 11 21 31) */ \
  511. col23l = _mm_unpackhi_pi16(col0123a, col0123b); /* col23l=(02 12 22 32 03 13 23 33) */ \
  512. col01h = _mm_unpacklo_pi16(col0123c, col0123d); /* col01h=(40 50 60 70 41 51 61 71) */ \
  513. col23h = _mm_unpackhi_pi16(col0123c, col0123d); /* col23h=(42 52 62 72 43 53 63 73) */ \
  514. \
  515. col0 = _mm_unpacklo_pi32(col01l, col01h); /* col0=(00 10 20 30 40 50 60 70) */ \
  516. col1 = _mm_unpackhi_pi32(col01l, col01h); /* col1=(01 11 21 31 41 51 61 71) */ \
  517. col2 = _mm_unpacklo_pi32(col23l, col23h); /* col2=(02 12 22 32 42 52 62 72) */ \
  518. col3 = _mm_unpackhi_pi32(col23l, col23h); /* col3=(03 13 23 33 43 53 63 73) */ \
  519. \
  520. _mm_store_si64((__m64 *)(output_buf[ctr + 0] + output_col), col0); \
  521. _mm_store_si64((__m64 *)(output_buf[ctr + 1] + output_col), col1); \
  522. _mm_store_si64((__m64 *)(output_buf[ctr + 2] + output_col), col2); \
  523. _mm_store_si64((__m64 *)(output_buf[ctr + 3] + output_col), col3); \
  524. }
  525. void jsimd_idct_islow_mmi(void *dct_table, JCOEFPTR coef_block,
  526. JSAMPARRAY output_buf, JDIMENSION output_col)
  527. {
  528. __m64 tmp0, tmp1, tmp2, tmp3;
  529. __m64 out0, out1, out2, out3, out4, out5, out6, out7;
  530. JCOEFPTR inptr;
  531. ISLOW_MULT_TYPE *quantptr;
  532. JCOEF *wsptr;
  533. JCOEF workspace[DCTSIZE2]; /* buffers data between passes */
  534. /* Pass 1: process columns. */
  535. inptr = coef_block;
  536. quantptr = (ISLOW_MULT_TYPE *)dct_table;
  537. wsptr = workspace;
  538. DO_IDCT_PASS1(1)
  539. nextcolumn1:
  540. inptr += 4;
  541. quantptr += 4;
  542. wsptr += DCTSIZE * 4;
  543. DO_IDCT_PASS1(2)
  544. nextcolumn2:
  545. /* Pass 2: process rows. */
  546. wsptr = workspace;
  547. DO_IDCT_PASS2(0)
  548. wsptr += 4;
  549. DO_IDCT_PASS2(4)
  550. }