jfdctint-mmi.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398
  1. /*
  2. * Loongson MMI optimizations for libjpeg-turbo
  3. *
  4. * Copyright (C) 2014, 2018, D. R. Commander. All Rights Reserved.
  5. * Copyright (C) 2016-2017, Loongson Technology Corporation Limited, BeiJing.
  6. * All Rights Reserved.
  7. * Authors: ZhuChen <zhuchen@loongson.cn>
  8. * CaiWanwei <caiwanwei@loongson.cn>
  9. * SunZhangzhi <sunzhangzhi-cq@loongson.cn>
  10. *
  11. * Based on the x86 SIMD extension for IJG JPEG library
  12. * Copyright (C) 1999-2006, MIYASAKA Masaru.
  13. *
  14. * This software is provided 'as-is', without any express or implied
  15. * warranty. In no event will the authors be held liable for any damages
  16. * arising from the use of this software.
  17. *
  18. * Permission is granted to anyone to use this software for any purpose,
  19. * including commercial applications, and to alter it and redistribute it
  20. * freely, subject to the following restrictions:
  21. *
  22. * 1. The origin of this software must not be misrepresented; you must not
  23. * claim that you wrote the original software. If you use this software
  24. * in a product, an acknowledgment in the product documentation would be
  25. * appreciated but is not required.
  26. * 2. Altered source versions must be plainly marked as such, and must not be
  27. * misrepresented as being the original software.
  28. * 3. This notice may not be removed or altered from any source distribution.
  29. */
  30. /* SLOW INTEGER FORWARD DCT */
  31. #include "jsimd_mmi.h"
  32. #define CONST_BITS 13
  33. #define PASS1_BITS 2
  34. #define DESCALE_P1 (CONST_BITS - PASS1_BITS)
  35. #define DESCALE_P2 (CONST_BITS + PASS1_BITS)
  36. #define FIX_0_298 ((short)2446) /* FIX(0.298631336) */
  37. #define FIX_0_390 ((short)3196) /* FIX(0.390180644) */
  38. #define FIX_0_541 ((short)4433) /* FIX(0.541196100) */
  39. #define FIX_0_765 ((short)6270) /* FIX(0.765366865) */
  40. #define FIX_0_899 ((short)7373) /* FIX(0.899976223) */
  41. #define FIX_1_175 ((short)9633) /* FIX(1.175875602) */
  42. #define FIX_1_501 ((short)12299) /* FIX(1.501321110) */
  43. #define FIX_1_847 ((short)15137) /* FIX(1.847759065) */
  44. #define FIX_1_961 ((short)16069) /* FIX(1.961570560) */
  45. #define FIX_2_053 ((short)16819) /* FIX(2.053119869) */
  46. #define FIX_2_562 ((short)20995) /* FIX(2.562915447) */
  47. #define FIX_3_072 ((short)25172) /* FIX(3.072711026) */
  48. enum const_index {
  49. index_PW_F130_F054,
  50. index_PW_F054_MF130,
  51. index_PW_MF078_F117,
  52. index_PW_F117_F078,
  53. index_PW_MF060_MF089,
  54. index_PW_MF089_F060,
  55. index_PW_MF050_MF256,
  56. index_PW_MF256_F050,
  57. index_PD_DESCALE_P1,
  58. index_PD_DESCALE_P2,
  59. index_PW_DESCALE_P2X
  60. };
  61. static uint64_t const_value[] = {
  62. _uint64_set_pi16(FIX_0_541, (FIX_0_541 + FIX_0_765),
  63. FIX_0_541, (FIX_0_541 + FIX_0_765)),
  64. _uint64_set_pi16((FIX_0_541 - FIX_1_847), FIX_0_541,
  65. (FIX_0_541 - FIX_1_847), FIX_0_541),
  66. _uint64_set_pi16(FIX_1_175, (FIX_1_175 - FIX_1_961),
  67. FIX_1_175, (FIX_1_175 - FIX_1_961)),
  68. _uint64_set_pi16((FIX_1_175 - FIX_0_390), FIX_1_175,
  69. (FIX_1_175 - FIX_0_390), FIX_1_175),
  70. _uint64_set_pi16(-FIX_0_899, (FIX_0_298 - FIX_0_899),
  71. -FIX_0_899, (FIX_0_298 - FIX_0_899)),
  72. _uint64_set_pi16((FIX_1_501 - FIX_0_899), -FIX_0_899,
  73. (FIX_1_501 - FIX_0_899), -FIX_0_899),
  74. _uint64_set_pi16(-FIX_2_562, (FIX_2_053 - FIX_2_562),
  75. -FIX_2_562, (FIX_2_053 - FIX_2_562)),
  76. _uint64_set_pi16((FIX_3_072 - FIX_2_562), -FIX_2_562,
  77. (FIX_3_072 - FIX_2_562), -FIX_2_562),
  78. _uint64_set_pi32((1 << (DESCALE_P1 - 1)), (1 << (DESCALE_P1 - 1))),
  79. _uint64_set_pi32((1 << (DESCALE_P2 - 1)), (1 << (DESCALE_P2 - 1))),
  80. _uint64_set_pi16((1 << (PASS1_BITS - 1)), (1 << (PASS1_BITS - 1)),
  81. (1 << (PASS1_BITS - 1)), (1 << (PASS1_BITS - 1)))
  82. };
  83. #define PW_F130_F054 get_const_value(index_PW_F130_F054)
  84. #define PW_F054_MF130 get_const_value(index_PW_F054_MF130)
  85. #define PW_MF078_F117 get_const_value(index_PW_MF078_F117)
  86. #define PW_F117_F078 get_const_value(index_PW_F117_F078)
  87. #define PW_MF060_MF089 get_const_value(index_PW_MF060_MF089)
  88. #define PW_MF089_F060 get_const_value(index_PW_MF089_F060)
  89. #define PW_MF050_MF256 get_const_value(index_PW_MF050_MF256)
  90. #define PW_MF256_F050 get_const_value(index_PW_MF256_F050)
  91. #define PD_DESCALE_P1 get_const_value(index_PD_DESCALE_P1)
  92. #define PD_DESCALE_P2 get_const_value(index_PD_DESCALE_P2)
  93. #define PW_DESCALE_P2X get_const_value(index_PW_DESCALE_P2X)
  94. #define DO_FDCT_COMMON(PASS) { \
  95. __m64 tmp1312l, tmp1312h, tmp47l, tmp47h, tmp4l, tmp4h, tmp7l, tmp7h; \
  96. __m64 tmp56l, tmp56h, tmp5l, tmp5h, tmp6l, tmp6h; \
  97. __m64 out1l, out1h, out2l, out2h, out3l, out3h; \
  98. __m64 out5l, out5h, out6l, out6h, out7l, out7h; \
  99. __m64 z34l, z34h, z3l, z3h, z4l, z4h, z3, z4; \
  100. \
  101. /* (Original) \
  102. * z1 = (tmp12 + tmp13) * 0.541196100; \
  103. * out2 = z1 + tmp13 * 0.765366865; \
  104. * out6 = z1 + tmp12 * -1.847759065; \
  105. * \
  106. * (This implementation) \
  107. * out2 = tmp13 * (0.541196100 + 0.765366865) + tmp12 * 0.541196100; \
  108. * out6 = tmp13 * 0.541196100 + tmp12 * (0.541196100 - 1.847759065); \
  109. */ \
  110. \
  111. tmp1312l = _mm_unpacklo_pi16(tmp13, tmp12); \
  112. tmp1312h = _mm_unpackhi_pi16(tmp13, tmp12); \
  113. \
  114. out2l = _mm_madd_pi16(tmp1312l, PW_F130_F054); \
  115. out2h = _mm_madd_pi16(tmp1312h, PW_F130_F054); \
  116. out6l = _mm_madd_pi16(tmp1312l, PW_F054_MF130); \
  117. out6h = _mm_madd_pi16(tmp1312h, PW_F054_MF130); \
  118. \
  119. out2l = _mm_add_pi32(out2l, PD_DESCALE_P##PASS); \
  120. out2h = _mm_add_pi32(out2h, PD_DESCALE_P##PASS); \
  121. out2l = _mm_srai_pi32(out2l, DESCALE_P##PASS); \
  122. out2h = _mm_srai_pi32(out2h, DESCALE_P##PASS); \
  123. \
  124. out6l = _mm_add_pi32(out6l, PD_DESCALE_P##PASS); \
  125. out6h = _mm_add_pi32(out6h, PD_DESCALE_P##PASS); \
  126. out6l = _mm_srai_pi32(out6l, DESCALE_P##PASS); \
  127. out6h = _mm_srai_pi32(out6h, DESCALE_P##PASS); \
  128. \
  129. out2 = _mm_packs_pi32(out2l, out2h); \
  130. out6 = _mm_packs_pi32(out6l, out6h); \
  131. \
  132. /* Odd part */ \
  133. \
  134. z3 = _mm_add_pi16(tmp4, tmp6); \
  135. z4 = _mm_add_pi16(tmp5, tmp7); \
  136. \
  137. /* (Original) \
  138. * z5 = (z3 + z4) * 1.175875602; \
  139. * z3 = z3 * -1.961570560; z4 = z4 * -0.390180644; \
  140. * z3 += z5; z4 += z5; \
  141. * \
  142. * (This implementation) \
  143. * z3 = z3 * (1.175875602 - 1.961570560) + z4 * 1.175875602; \
  144. * z4 = z3 * 1.175875602 + z4 * (1.175875602 - 0.390180644); \
  145. */ \
  146. \
  147. z34l = _mm_unpacklo_pi16(z3, z4); \
  148. z34h = _mm_unpackhi_pi16(z3, z4); \
  149. z3l = _mm_madd_pi16(z34l, PW_MF078_F117); \
  150. z3h = _mm_madd_pi16(z34h, PW_MF078_F117); \
  151. z4l = _mm_madd_pi16(z34l, PW_F117_F078); \
  152. z4h = _mm_madd_pi16(z34h, PW_F117_F078); \
  153. \
  154. /* (Original) \
  155. * z1 = tmp4 + tmp7; z2 = tmp5 + tmp6; \
  156. * tmp4 = tmp4 * 0.298631336; tmp5 = tmp5 * 2.053119869; \
  157. * tmp6 = tmp6 * 3.072711026; tmp7 = tmp7 * 1.501321110; \
  158. * z1 = z1 * -0.899976223; z2 = z2 * -2.562915447; \
  159. * out7 = tmp4 + z1 + z3; out5 = tmp5 + z2 + z4; \
  160. * out3 = tmp6 + z2 + z3; out1 = tmp7 + z1 + z4; \
  161. * \
  162. * (This implementation) \
  163. * tmp4 = tmp4 * (0.298631336 - 0.899976223) + tmp7 * -0.899976223; \
  164. * tmp5 = tmp5 * (2.053119869 - 2.562915447) + tmp6 * -2.562915447; \
  165. * tmp6 = tmp5 * -2.562915447 + tmp6 * (3.072711026 - 2.562915447); \
  166. * tmp7 = tmp4 * -0.899976223 + tmp7 * (1.501321110 - 0.899976223); \
  167. * out7 = tmp4 + z3; out5 = tmp5 + z4; \
  168. * out3 = tmp6 + z3; out1 = tmp7 + z4; \
  169. */ \
  170. \
  171. tmp47l = _mm_unpacklo_pi16(tmp4, tmp7); \
  172. tmp47h = _mm_unpackhi_pi16(tmp4, tmp7); \
  173. \
  174. tmp4l = _mm_madd_pi16(tmp47l, PW_MF060_MF089); \
  175. tmp4h = _mm_madd_pi16(tmp47h, PW_MF060_MF089); \
  176. tmp7l = _mm_madd_pi16(tmp47l, PW_MF089_F060); \
  177. tmp7h = _mm_madd_pi16(tmp47h, PW_MF089_F060); \
  178. \
  179. out7l = _mm_add_pi32(tmp4l, z3l); \
  180. out7h = _mm_add_pi32(tmp4h, z3h); \
  181. out1l = _mm_add_pi32(tmp7l, z4l); \
  182. out1h = _mm_add_pi32(tmp7h, z4h); \
  183. \
  184. out7l = _mm_add_pi32(out7l, PD_DESCALE_P##PASS); \
  185. out7h = _mm_add_pi32(out7h, PD_DESCALE_P##PASS); \
  186. out7l = _mm_srai_pi32(out7l, DESCALE_P##PASS); \
  187. out7h = _mm_srai_pi32(out7h, DESCALE_P##PASS); \
  188. \
  189. out1l = _mm_add_pi32(out1l, PD_DESCALE_P##PASS); \
  190. out1h = _mm_add_pi32(out1h, PD_DESCALE_P##PASS); \
  191. out1l = _mm_srai_pi32(out1l, DESCALE_P##PASS); \
  192. out1h = _mm_srai_pi32(out1h, DESCALE_P##PASS); \
  193. \
  194. out7 = _mm_packs_pi32(out7l, out7h); \
  195. out1 = _mm_packs_pi32(out1l, out1h); \
  196. \
  197. tmp56l = _mm_unpacklo_pi16(tmp5, tmp6); \
  198. tmp56h = _mm_unpackhi_pi16(tmp5, tmp6); \
  199. \
  200. tmp5l = _mm_madd_pi16(tmp56l, PW_MF050_MF256); \
  201. tmp5h = _mm_madd_pi16(tmp56h, PW_MF050_MF256); \
  202. tmp6l = _mm_madd_pi16(tmp56l, PW_MF256_F050); \
  203. tmp6h = _mm_madd_pi16(tmp56h, PW_MF256_F050); \
  204. \
  205. out5l = _mm_add_pi32(tmp5l, z4l); \
  206. out5h = _mm_add_pi32(tmp5h, z4h); \
  207. out3l = _mm_add_pi32(tmp6l, z3l); \
  208. out3h = _mm_add_pi32(tmp6h, z3h); \
  209. \
  210. out5l = _mm_add_pi32(out5l, PD_DESCALE_P##PASS); \
  211. out5h = _mm_add_pi32(out5h, PD_DESCALE_P##PASS); \
  212. out5l = _mm_srai_pi32(out5l, DESCALE_P##PASS); \
  213. out5h = _mm_srai_pi32(out5h, DESCALE_P##PASS); \
  214. \
  215. out3l = _mm_add_pi32(out3l, PD_DESCALE_P##PASS); \
  216. out3h = _mm_add_pi32(out3h, PD_DESCALE_P##PASS); \
  217. out3l = _mm_srai_pi32(out3l, DESCALE_P##PASS); \
  218. out3h = _mm_srai_pi32(out3h, DESCALE_P##PASS); \
  219. \
  220. out5 = _mm_packs_pi32(out5l, out5h); \
  221. out3 = _mm_packs_pi32(out3l, out3h); \
  222. }
  223. #define DO_FDCT_PASS1() { \
  224. __m64 row0l, row0h, row1l, row1h, row2l, row2h, row3l, row3h; \
  225. __m64 row01a, row01b, row01c, row01d, row23a, row23b, row23c, row23d; \
  226. __m64 col0, col1, col2, col3, col4, col5, col6, col7; \
  227. __m64 tmp10, tmp11; \
  228. \
  229. row0l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 0]); /* (00 01 02 03) */ \
  230. row0h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 0 + 4]); /* (04 05 06 07) */ \
  231. row1l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 1]); /* (10 11 12 13) */ \
  232. row1h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 1 + 4]); /* (14 15 16 17) */ \
  233. row2l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 2]); /* (20 21 22 23) */ \
  234. row2h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 2 + 4]); /* (24 25 26 27) */ \
  235. row3l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 3]); /* (30 31 32 33) */ \
  236. row3h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 3 + 4]); /* (34 35 36 37) */ \
  237. \
  238. /* Transpose coefficients */ \
  239. \
  240. row23a = _mm_unpacklo_pi16(row2l, row3l); /* row23a=(20 30 21 31) */ \
  241. row23b = _mm_unpackhi_pi16(row2l, row3l); /* row23b=(22 32 23 33) */ \
  242. row23c = _mm_unpacklo_pi16(row2h, row3h); /* row23c=(24 34 25 35) */ \
  243. row23d = _mm_unpackhi_pi16(row2h, row3h); /* row23d=(26 36 27 37) */ \
  244. \
  245. row01a = _mm_unpacklo_pi16(row0l, row1l); /* row01a=(00 10 01 11) */ \
  246. row01b = _mm_unpackhi_pi16(row0l, row1l); /* row01b=(02 12 03 13) */ \
  247. row01c = _mm_unpacklo_pi16(row0h, row1h); /* row01c=(04 14 05 15) */ \
  248. row01d = _mm_unpackhi_pi16(row0h, row1h); /* row01d=(06 16 07 17) */ \
  249. \
  250. col0 = _mm_unpacklo_pi32(row01a, row23a); /* col0=(00 10 20 30) */ \
  251. col1 = _mm_unpackhi_pi32(row01a, row23a); /* col1=(01 11 21 31) */ \
  252. col6 = _mm_unpacklo_pi32(row01d, row23d); /* col6=(06 16 26 36) */ \
  253. col7 = _mm_unpackhi_pi32(row01d, row23d); /* col7=(07 17 27 37) */ \
  254. \
  255. tmp6 = _mm_sub_pi16(col1, col6); /* tmp6=col1-col6 */ \
  256. tmp7 = _mm_sub_pi16(col0, col7); /* tmp7=col0-col7 */ \
  257. tmp1 = _mm_add_pi16(col1, col6); /* tmp1=col1+col6 */ \
  258. tmp0 = _mm_add_pi16(col0, col7); /* tmp0=col0+col7 */ \
  259. \
  260. col2 = _mm_unpacklo_pi32(row01b, row23b); /* col2=(02 12 22 32) */ \
  261. col3 = _mm_unpackhi_pi32(row01b, row23b); /* col3=(03 13 23 33) */ \
  262. col4 = _mm_unpacklo_pi32(row01c, row23c); /* col4=(04 14 24 34) */ \
  263. col5 = _mm_unpackhi_pi32(row01c, row23c); /* col5=(05 15 25 35) */ \
  264. \
  265. tmp3 = _mm_add_pi16(col3, col4); /* tmp3=col3+col4 */ \
  266. tmp2 = _mm_add_pi16(col2, col5); /* tmp2=col2+col5 */ \
  267. tmp4 = _mm_sub_pi16(col3, col4); /* tmp4=col3-col4 */ \
  268. tmp5 = _mm_sub_pi16(col2, col5); /* tmp5=col2-col5 */ \
  269. \
  270. /* Even part */ \
  271. \
  272. tmp10 = _mm_add_pi16(tmp0, tmp3); /* tmp10=tmp0+tmp3 */ \
  273. tmp13 = _mm_sub_pi16(tmp0, tmp3); /* tmp13=tmp0-tmp3 */ \
  274. tmp11 = _mm_add_pi16(tmp1, tmp2); /* tmp11=tmp1+tmp2 */ \
  275. tmp12 = _mm_sub_pi16(tmp1, tmp2); /* tmp12=tmp1-tmp2 */ \
  276. \
  277. out0 = _mm_add_pi16(tmp10, tmp11); /* out0=tmp10+tmp11 */ \
  278. out4 = _mm_sub_pi16(tmp10, tmp11); /* out4=tmp10-tmp11 */ \
  279. out0 = _mm_slli_pi16(out0, PASS1_BITS); \
  280. out4 = _mm_slli_pi16(out4, PASS1_BITS); \
  281. \
  282. DO_FDCT_COMMON(1) \
  283. \
  284. _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 0], out0); \
  285. _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 0 + 4], out4); \
  286. _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 1], out1); \
  287. _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 1 + 4], out5); \
  288. _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 2], out2); \
  289. _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 2 + 4], out6); \
  290. _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 3], out3); \
  291. _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 3 + 4], out7); \
  292. }
  293. #define DO_FDCT_PASS2() { \
  294. __m64 col0l, col0h, col1l, col1h, col2l, col2h, col3l, col3h; \
  295. __m64 col01a, col01b, col01c, col01d, col23a, col23b, col23c, col23d; \
  296. __m64 row0, row1, row2, row3, row4, row5, row6, row7; \
  297. __m64 tmp10, tmp11; \
  298. \
  299. col0l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 0]); /* (00 10 20 30) */ \
  300. col1l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 1]); /* (01 11 21 31) */ \
  301. col2l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 2]); /* (02 12 22 32) */ \
  302. col3l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 3]); /* (03 13 23 33) */ \
  303. col0h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 4]); /* (40 50 60 70) */ \
  304. col1h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 5]); /* (41 51 61 71) */ \
  305. col2h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 6]); /* (42 52 62 72) */ \
  306. col3h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 7]); /* (43 53 63 73) */ \
  307. \
  308. /* Transpose coefficients */ \
  309. \
  310. col23a = _mm_unpacklo_pi16(col2l, col3l); /* col23a=(02 03 12 13) */ \
  311. col23b = _mm_unpackhi_pi16(col2l, col3l); /* col23b=(22 23 32 33) */ \
  312. col23c = _mm_unpacklo_pi16(col2h, col3h); /* col23c=(42 43 52 53) */ \
  313. col23d = _mm_unpackhi_pi16(col2h, col3h); /* col23d=(62 63 72 73) */ \
  314. \
  315. col01a = _mm_unpacklo_pi16(col0l, col1l); /* col01a=(00 01 10 11) */ \
  316. col01b = _mm_unpackhi_pi16(col0l, col1l); /* col01b=(20 21 30 31) */ \
  317. col01c = _mm_unpacklo_pi16(col0h, col1h); /* col01c=(40 41 50 51) */ \
  318. col01d = _mm_unpackhi_pi16(col0h, col1h); /* col01d=(60 61 70 71) */ \
  319. \
  320. row0 = _mm_unpacklo_pi32(col01a, col23a); /* row0=(00 01 02 03) */ \
  321. row1 = _mm_unpackhi_pi32(col01a, col23a); /* row1=(10 11 12 13) */ \
  322. row6 = _mm_unpacklo_pi32(col01d, col23d); /* row6=(60 61 62 63) */ \
  323. row7 = _mm_unpackhi_pi32(col01d, col23d); /* row7=(70 71 72 73) */ \
  324. \
  325. tmp6 = _mm_sub_pi16(row1, row6); /* tmp6=row1-row6 */ \
  326. tmp7 = _mm_sub_pi16(row0, row7); /* tmp7=row0-row7 */ \
  327. tmp1 = _mm_add_pi16(row1, row6); /* tmp1=row1+row6 */ \
  328. tmp0 = _mm_add_pi16(row0, row7); /* tmp0=row0+row7 */ \
  329. \
  330. row2 = _mm_unpacklo_pi32(col01b, col23b); /* row2=(20 21 22 23) */ \
  331. row3 = _mm_unpackhi_pi32(col01b, col23b); /* row3=(30 31 32 33) */ \
  332. row4 = _mm_unpacklo_pi32(col01c, col23c); /* row4=(40 41 42 43) */ \
  333. row5 = _mm_unpackhi_pi32(col01c, col23c); /* row5=(50 51 52 53) */ \
  334. \
  335. tmp3 = _mm_add_pi16(row3, row4); /* tmp3=row3+row4 */ \
  336. tmp2 = _mm_add_pi16(row2, row5); /* tmp2=row2+row5 */ \
  337. tmp4 = _mm_sub_pi16(row3, row4); /* tmp4=row3-row4 */ \
  338. tmp5 = _mm_sub_pi16(row2, row5); /* tmp5=row2-row5 */ \
  339. \
  340. /* Even part */ \
  341. \
  342. tmp10 = _mm_add_pi16(tmp0, tmp3); /* tmp10=tmp0+tmp3 */ \
  343. tmp13 = _mm_sub_pi16(tmp0, tmp3); /* tmp13=tmp0-tmp3 */ \
  344. tmp11 = _mm_add_pi16(tmp1, tmp2); /* tmp11=tmp1+tmp2 */ \
  345. tmp12 = _mm_sub_pi16(tmp1, tmp2); /* tmp12=tmp1-tmp2 */ \
  346. \
  347. out0 = _mm_add_pi16(tmp10, tmp11); /* out0=tmp10+tmp11 */ \
  348. out4 = _mm_sub_pi16(tmp10, tmp11); /* out4=tmp10-tmp11 */ \
  349. \
  350. out0 = _mm_add_pi16(out0, PW_DESCALE_P2X); \
  351. out4 = _mm_add_pi16(out4, PW_DESCALE_P2X); \
  352. out0 = _mm_srai_pi16(out0, PASS1_BITS); \
  353. out4 = _mm_srai_pi16(out4, PASS1_BITS); \
  354. \
  355. DO_FDCT_COMMON(2) \
  356. \
  357. _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 0], out0); \
  358. _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 1], out1); \
  359. _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 2], out2); \
  360. _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 3], out3); \
  361. _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 4], out4); \
  362. _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 5], out5); \
  363. _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 6], out6); \
  364. _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 7], out7); \
  365. }
  366. void jsimd_fdct_islow_mmi(DCTELEM *data)
  367. {
  368. __m64 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
  369. __m64 out0, out1, out2, out3, out4, out5, out6, out7;
  370. __m64 tmp12, tmp13;
  371. DCTELEM *dataptr = data;
  372. /* Pass 1: process rows. */
  373. DO_FDCT_PASS1()
  374. dataptr += DCTSIZE * 4;
  375. DO_FDCT_PASS1()
  376. /* Pass 2: process columns. */
  377. dataptr = data;
  378. DO_FDCT_PASS2()
  379. dataptr += 4;
  380. DO_FDCT_PASS2()
  381. }