jdcolext-mmi.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424
  1. /*
  2. * Loongson MMI optimizations for libjpeg-turbo
  3. *
  4. * Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
  5. * Copyright (C) 2015, D. R. Commander. All Rights Reserved.
  6. * Copyright (C) 2016-2017, Loongson Technology Corporation Limited, BeiJing.
  7. * All Rights Reserved.
  8. * Authors: ZhuChen <zhuchen@loongson.cn>
  9. * SunZhangzhi <sunzhangzhi-cq@loongson.cn>
  10. * CaiWanwei <caiwanwei@loongson.cn>
  11. *
  12. * Based on the x86 SIMD extension for IJG JPEG library
  13. * Copyright (C) 1999-2006, MIYASAKA Masaru.
  14. *
  15. * This software is provided 'as-is', without any express or implied
  16. * warranty. In no event will the authors be held liable for any damages
  17. * arising from the use of this software.
  18. *
  19. * Permission is granted to anyone to use this software for any purpose,
  20. * including commercial applications, and to alter it and redistribute it
  21. * freely, subject to the following restrictions:
  22. *
  23. * 1. The origin of this software must not be misrepresented; you must not
  24. * claim that you wrote the original software. If you use this software
  25. * in a product, an acknowledgment in the product documentation would be
  26. * appreciated but is not required.
  27. * 2. Altered source versions must be plainly marked as such, and must not be
  28. * misrepresented as being the original software.
  29. * 3. This notice may not be removed or altered from any source distribution.
  30. */
  31. /* This file is included by jdcolor-mmi.c */
  32. #if RGB_RED == 0
  33. #define mmA mm0
  34. #define mmB mm1
  35. #elif RGB_GREEN == 0
  36. #define mmA mm2
  37. #define mmB mm3
  38. #elif RGB_BLUE == 0
  39. #define mmA mm4
  40. #define mmB mm5
  41. #else
  42. #define mmA mm6
  43. #define mmB mm7
  44. #endif
  45. #if RGB_RED == 1
  46. #define mmC mm0
  47. #define mmD mm1
  48. #elif RGB_GREEN == 1
  49. #define mmC mm2
  50. #define mmD mm3
  51. #elif RGB_BLUE == 1
  52. #define mmC mm4
  53. #define mmD mm5
  54. #else
  55. #define mmC mm6
  56. #define mmD mm7
  57. #endif
  58. #if RGB_RED == 2
  59. #define mmE mm0
  60. #define mmF mm1
  61. #elif RGB_GREEN == 2
  62. #define mmE mm2
  63. #define mmF mm3
  64. #elif RGB_BLUE == 2
  65. #define mmE mm4
  66. #define mmF mm5
  67. #else
  68. #define mmE mm6
  69. #define mmF mm7
  70. #endif
  71. #if RGB_RED == 3
  72. #define mmG mm0
  73. #define mmH mm1
  74. #elif RGB_GREEN == 3
  75. #define mmG mm2
  76. #define mmH mm3
  77. #elif RGB_BLUE == 3
  78. #define mmG mm4
  79. #define mmH mm5
  80. #else
  81. #define mmG mm6
  82. #define mmH mm7
  83. #endif
  84. void jsimd_ycc_rgb_convert_mmi(JDIMENSION out_width, JSAMPIMAGE input_buf,
  85. JDIMENSION input_row, JSAMPARRAY output_buf,
  86. int num_rows)
  87. {
  88. JSAMPROW outptr, inptr0, inptr1, inptr2;
  89. int num_cols, col;
  90. __m64 mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7;
  91. __m64 mm8, wk[2];
  92. while (--num_rows >= 0) {
  93. inptr0 = input_buf[0][input_row];
  94. inptr1 = input_buf[1][input_row];
  95. inptr2 = input_buf[2][input_row];
  96. input_row++;
  97. outptr = *output_buf++;
  98. for (num_cols = out_width; num_cols > 0; num_cols -= 8,
  99. inptr0 += 8, inptr1 += 8, inptr2 += 8) {
  100. mm5 = _mm_load_si64((__m64 *)inptr1);
  101. mm1 = _mm_load_si64((__m64 *)inptr2);
  102. mm8 = _mm_load_si64((__m64 *)inptr0);
  103. mm4 = 0;
  104. mm7 = 0;
  105. mm4 = _mm_cmpeq_pi16(mm4, mm4);
  106. mm7 = _mm_cmpeq_pi16(mm7, mm7);
  107. mm4 = _mm_srli_pi16(mm4, BYTE_BIT);
  108. mm7 = _mm_slli_pi16(mm7, 7); /* mm7={0xFF80 0xFF80 0xFF80 0xFF80} */
  109. mm0 = mm4; /* mm0=mm4={0xFF 0x00 0xFF 0x00 ..} */
  110. mm4 = _mm_and_si64(mm4, mm5); /* mm4=Cb(0246)=CbE */
  111. mm5 = _mm_srli_pi16(mm5, BYTE_BIT); /* mm5=Cb(1357)=CbO */
  112. mm0 = _mm_and_si64(mm0, mm1); /* mm0=Cr(0246)=CrE */
  113. mm1 = _mm_srli_pi16(mm1, BYTE_BIT); /* mm1=Cr(1357)=CrO */
  114. mm4 = _mm_add_pi16(mm4, mm7);
  115. mm5 = _mm_add_pi16(mm5, mm7);
  116. mm0 = _mm_add_pi16(mm0, mm7);
  117. mm1 = _mm_add_pi16(mm1, mm7);
  118. /* (Original)
  119. * R = Y + 1.40200 * Cr
  120. * G = Y - 0.34414 * Cb - 0.71414 * Cr
  121. * B = Y + 1.77200 * Cb
  122. *
  123. * (This implementation)
  124. * R = Y + 0.40200 * Cr + Cr
  125. * G = Y - 0.34414 * Cb + 0.28586 * Cr - Cr
  126. * B = Y - 0.22800 * Cb + Cb + Cb
  127. */
  128. mm2 = mm4; /* mm2 = CbE */
  129. mm3 = mm5; /* mm3 = CbO */
  130. mm4 = _mm_add_pi16(mm4, mm4); /* mm4 = 2*CbE */
  131. mm5 = _mm_add_pi16(mm5, mm5); /* mm5 = 2*CbO */
  132. mm6 = mm0; /* mm6 = CrE */
  133. mm7 = mm1; /* mm7 = CrO */
  134. mm0 = _mm_add_pi16(mm0, mm0); /* mm0 = 2*CrE */
  135. mm1 = _mm_add_pi16(mm1, mm1); /* mm1 = 2*CrO */
  136. mm4 = _mm_mulhi_pi16(mm4, PW_MF0228); /* mm4=(2*CbE * -FIX(0.22800) */
  137. mm5 = _mm_mulhi_pi16(mm5, PW_MF0228); /* mm5=(2*CbO * -FIX(0.22800) */
  138. mm0 = _mm_mulhi_pi16(mm0, PW_F0402); /* mm0=(2*CrE * FIX(0.40200)) */
  139. mm1 = _mm_mulhi_pi16(mm1, PW_F0402); /* mm1=(2*CrO * FIX(0.40200)) */
  140. mm4 = _mm_add_pi16(mm4, PW_ONE);
  141. mm5 = _mm_add_pi16(mm5, PW_ONE);
  142. mm4 = _mm_srai_pi16(mm4, 1); /* mm4=(CbE * -FIX(0.22800)) */
  143. mm5 = _mm_srai_pi16(mm5, 1); /* mm5=(CbO * -FIX(0.22800)) */
  144. mm0 = _mm_add_pi16(mm0, PW_ONE);
  145. mm1 = _mm_add_pi16(mm1, PW_ONE);
  146. mm0 = _mm_srai_pi16(mm0, 1); /* mm0=(CrE * FIX(0.40200)) */
  147. mm1 = _mm_srai_pi16(mm1, 1); /* mm1=(CrO * FIX(0.40200)) */
  148. mm4 = _mm_add_pi16(mm4, mm2);
  149. mm5 = _mm_add_pi16(mm5, mm3);
  150. mm4 = _mm_add_pi16(mm4, mm2); /* mm4=(CbE * FIX(1.77200))=(B-Y)E */
  151. mm5 = _mm_add_pi16(mm5, mm3); /* mm5=(CbO * FIX(1.77200))=(B-Y)O */
  152. mm0 = _mm_add_pi16(mm0, mm6); /* mm0=(CrE * FIX(1.40200))=(R-Y)E */
  153. mm1 = _mm_add_pi16(mm1, mm7); /* mm1=(CrO * FIX(1.40200))=(R-Y)O */
  154. wk[0] = mm4; /* wk(0)=(B-Y)E */
  155. wk[1] = mm5; /* wk(1)=(B-Y)O */
  156. mm4 = mm2;
  157. mm5 = mm3;
  158. mm2 = _mm_unpacklo_pi16(mm2, mm6);
  159. mm4 = _mm_unpackhi_pi16(mm4, mm6);
  160. mm2 = _mm_madd_pi16(mm2, PW_MF0344_F0285);
  161. mm4 = _mm_madd_pi16(mm4, PW_MF0344_F0285);
  162. mm3 = _mm_unpacklo_pi16(mm3, mm7);
  163. mm5 = _mm_unpackhi_pi16(mm5, mm7);
  164. mm3 = _mm_madd_pi16(mm3, PW_MF0344_F0285);
  165. mm5 = _mm_madd_pi16(mm5, PW_MF0344_F0285);
  166. mm2 = _mm_add_pi32(mm2, PD_ONEHALF);
  167. mm4 = _mm_add_pi32(mm4, PD_ONEHALF);
  168. mm2 = _mm_srai_pi32(mm2, SCALEBITS);
  169. mm4 = _mm_srai_pi32(mm4, SCALEBITS);
  170. mm3 = _mm_add_pi32(mm3, PD_ONEHALF);
  171. mm5 = _mm_add_pi32(mm5, PD_ONEHALF);
  172. mm3 = _mm_srai_pi32(mm3, SCALEBITS);
  173. mm5 = _mm_srai_pi32(mm5, SCALEBITS);
  174. mm2 = _mm_packs_pi32(mm2, mm4); /* mm2=CbE*-FIX(0.344)+CrE*FIX(0.285) */
  175. mm3 = _mm_packs_pi32(mm3, mm5); /* mm3=CbO*-FIX(0.344)+CrO*FIX(0.285) */
  176. mm2 = _mm_sub_pi16(mm2, mm6); /* mm2=CbE*-FIX(0.344)+CrE*-FIX(0.714)=(G-Y)E */
  177. mm3 = _mm_sub_pi16(mm3, mm7); /* mm3=CbO*-FIX(0.344)+CrO*-FIX(0.714)=(G-Y)O */
  178. mm5 = mm8; /* mm5=Y(01234567) */
  179. mm4 = _mm_cmpeq_pi16(mm4, mm4);
  180. mm4 = _mm_srli_pi16(mm4, BYTE_BIT); /* mm4={0xFF 0x00 0xFF 0x00 ..} */
  181. mm4 = _mm_and_si64(mm4, mm5); /* mm4=Y(0246)=YE */
  182. mm5 = _mm_srli_pi16(mm5, BYTE_BIT); /* mm5=Y(1357)=YO */
  183. mm0 = _mm_add_pi16(mm0, mm4); /* mm0=((R-Y)E+YE)=RE=(R0 R2 R4 R6) */
  184. mm1 = _mm_add_pi16(mm1, mm5); /* mm1=((R-Y)O+YO)=RO=(R1 R3 R5 R7) */
  185. mm0 = _mm_packs_pu16(mm0, mm0); /* mm0=(R0 R2 R4 R6 ** ** ** **) */
  186. mm1 = _mm_packs_pu16(mm1, mm1); /* mm1=(R1 R3 R5 R7 ** ** ** **) */
  187. mm2 = _mm_add_pi16(mm2, mm4); /* mm2=((G-Y)E+YE)=GE=(G0 G2 G4 G6) */
  188. mm3 = _mm_add_pi16(mm3, mm5); /* mm3=((G-Y)O+YO)=GO=(G1 G3 G5 G7) */
  189. mm2 = _mm_packs_pu16(mm2, mm2); /* mm2=(G0 G2 G4 G6 ** ** ** **) */
  190. mm3 = _mm_packs_pu16(mm3, mm3); /* mm3=(G1 G3 G5 G7 ** ** ** **) */
  191. mm4 = _mm_add_pi16(mm4, wk[0]); /* mm4=(YE+(B-Y)E)=BE=(B0 B2 B4 B6) */
  192. mm5 = _mm_add_pi16(mm5, wk[1]); /* mm5=(YO+(B-Y)O)=BO=(B1 B3 B5 B7) */
  193. mm4 = _mm_packs_pu16(mm4, mm4); /* mm4=(B0 B2 B4 B6 ** ** ** **) */
  194. mm5 = _mm_packs_pu16(mm5, mm5); /* mm5=(B1 B3 B5 B7 ** ** ** **) */
  195. #if RGB_PIXELSIZE == 3
  196. /* mmA=(00 02 04 06 ** ** ** **), mmB=(01 03 05 07 ** ** ** **) */
  197. /* mmC=(10 12 14 16 ** ** ** **), mmD=(11 13 15 17 ** ** ** **) */
  198. mmA = _mm_unpacklo_pi8(mmA, mmC); /* mmA=(00 10 02 12 04 14 06 16) */
  199. mmE = _mm_unpacklo_pi8(mmE, mmB); /* mmE=(20 01 22 03 24 05 26 07) */
  200. mmD = _mm_unpacklo_pi8(mmD, mmF); /* mmD=(11 21 13 23 15 25 17 27) */
  201. mmG = mmA;
  202. mmH = mmA;
  203. mmA = _mm_unpacklo_pi16(mmA, mmE); /* mmA=(00 10 20 01 02 12 22 03) */
  204. mmG = _mm_unpackhi_pi16(mmG, mmE); /* mmG=(04 14 24 05 06 16 26 07) */
  205. mmH = _mm_srli_si64(mmH, 2 * BYTE_BIT);
  206. mmE = _mm_srli_si64(mmE, 2 * BYTE_BIT);
  207. mmC = mmD;
  208. mmB = mmD;
  209. mmD = _mm_unpacklo_pi16(mmD, mmH); /* mmD=(11 21 02 12 13 23 04 14) */
  210. mmC = _mm_unpackhi_pi16(mmC, mmH); /* mmC=(15 25 06 16 17 27 -- --) */
  211. mmB = _mm_srli_si64(mmB, 2 * BYTE_BIT); /* mmB=(13 23 15 25 17 27 -- --) */
  212. mmF = mmE;
  213. mmE = _mm_unpacklo_pi16(mmE, mmB); /* mmE=(22 03 13 23 24 05 15 25) */
  214. mmF = _mm_unpackhi_pi16(mmF, mmB); /* mmF=(26 07 17 27 -- -- -- --) */
  215. mmA = _mm_unpacklo_pi32(mmA, mmD); /* mmA=(00 10 20 01 11 21 02 12) */
  216. mmE = _mm_unpacklo_pi32(mmE, mmG); /* mmE=(22 03 13 23 04 14 24 05) */
  217. mmC = _mm_unpacklo_pi32(mmC, mmF); /* mmC=(15 25 06 16 26 07 17 27) */
  218. if (num_cols >= 8) {
  219. _mm_store_si64((__m64 *)outptr, mmA);
  220. _mm_store_si64((__m64 *)(outptr + 8), mmE);
  221. _mm_store_si64((__m64 *)(outptr + 16), mmC);
  222. outptr += RGB_PIXELSIZE * 8;
  223. } else {
  224. col = num_cols * 3;
  225. asm(".set noreorder\r\n"
  226. "li $8, 16\r\n"
  227. "move $9, %4\r\n"
  228. "mov.s $f4, %1\r\n"
  229. "mov.s $f6, %3\r\n"
  230. "move $10, %5\r\n"
  231. "bltu $9, $8, 1f\r\n"
  232. "nop \r\n"
  233. "gssdlc1 $f4, 7($10)\r\n"
  234. "gssdrc1 $f4, 0($10)\r\n"
  235. "gssdlc1 $f6, 7+8($10)\r\n"
  236. "gssdrc1 $f6, 8($10)\r\n"
  237. "mov.s $f4, %2\r\n"
  238. "subu $9, $9, 16\r\n"
  239. "daddu $10, $10, 16\r\n"
  240. "b 2f\r\n"
  241. "nop \r\n"
  242. "1: \r\n"
  243. "li $8, 8\r\n" /* st8 */
  244. "bltu $9, $8, 2f\r\n"
  245. "nop \r\n"
  246. "gssdlc1 $f4, 7($10)\r\n"
  247. "gssdrc1 $f4, ($10)\r\n"
  248. "mov.s $f4, %3\r\n"
  249. "subu $9, $9, 8\r\n"
  250. "daddu $10, $10, 8\r\n"
  251. "2: \r\n"
  252. "li $8, 4\r\n" /* st4 */
  253. "mfc1 $11, $f4\r\n"
  254. "bltu $9, $8, 3f\r\n"
  255. "nop \r\n"
  256. "swl $11, 3($10)\r\n"
  257. "swr $11, 0($10)\r\n"
  258. "li $8, 32\r\n"
  259. "mtc1 $8, $f6\r\n"
  260. "dsrl $f4, $f4, $f6\r\n"
  261. "mfc1 $11, $f4\r\n"
  262. "subu $9, $9, 4\r\n"
  263. "daddu $10, $10, 4\r\n"
  264. "3: \r\n"
  265. "li $8, 2\r\n" /* st2 */
  266. "bltu $9, $8, 4f\r\n"
  267. "nop \r\n"
  268. "ush $11, 0($10)\r\n"
  269. "srl $11, 16\r\n"
  270. "subu $9, $9, 2\r\n"
  271. "daddu $10, $10, 2\r\n"
  272. "4: \r\n"
  273. "li $8, 1\r\n" /* st1 */
  274. "bltu $9, $8, 5f\r\n"
  275. "nop \r\n"
  276. "sb $11, 0($10)\r\n"
  277. "5: \r\n"
  278. "nop \r\n" /* end */
  279. : "=m" (*outptr)
  280. : "f" (mmA), "f" (mmC), "f" (mmE), "r" (col), "r" (outptr)
  281. : "$f4", "$f6", "$8", "$9", "$10", "$11", "memory"
  282. );
  283. }
  284. #else /* RGB_PIXELSIZE == 4 */
  285. #ifdef RGBX_FILLER_0XFF
  286. mm6 = _mm_cmpeq_pi8(mm6, mm6);
  287. mm7 = _mm_cmpeq_pi8(mm7, mm7);
  288. #else
  289. mm6 = _mm_xor_si64(mm6, mm6);
  290. mm7 = _mm_xor_si64(mm7, mm7);
  291. #endif
  292. /* mmA=(00 02 04 06 ** ** ** **), mmB=(01 03 05 07 ** ** ** **) */
  293. /* mmC=(10 12 14 16 ** ** ** **), mmD=(11 13 15 17 ** ** ** **) */
  294. /* mmE=(20 22 24 26 ** ** ** **), mmF=(21 23 25 27 ** ** ** **) */
  295. /* mmG=(30 32 34 36 ** ** ** **), mmH=(31 33 35 37 ** ** ** **) */
  296. mmA = _mm_unpacklo_pi8(mmA, mmC); /* mmA=(00 10 02 12 04 14 06 16) */
  297. mmE = _mm_unpacklo_pi8(mmE, mmG); /* mmE=(20 30 22 32 24 34 26 36) */
  298. mmB = _mm_unpacklo_pi8(mmB, mmD); /* mmB=(01 11 03 13 05 15 07 17) */
  299. mmF = _mm_unpacklo_pi8(mmF, mmH); /* mmF=(21 31 23 33 25 35 27 37) */
  300. mmC = mmA;
  301. mmA = _mm_unpacklo_pi16(mmA, mmE); /* mmA=(00 10 20 30 02 12 22 32) */
  302. mmC = _mm_unpackhi_pi16(mmC, mmE); /* mmC=(04 14 24 34 06 16 26 36) */
  303. mmG = mmB;
  304. mmB = _mm_unpacklo_pi16(mmB, mmF); /* mmB=(01 11 21 31 03 13 23 33) */
  305. mmG = _mm_unpackhi_pi16(mmG, mmF); /* mmG=(05 15 25 35 07 17 27 37) */
  306. mmD = mmA;
  307. mmA = _mm_unpacklo_pi32(mmA, mmB); /* mmA=(00 10 20 30 01 11 21 31) */
  308. mmD = _mm_unpackhi_pi32(mmD, mmB); /* mmD=(02 12 22 32 03 13 23 33) */
  309. mmH = mmC;
  310. mmC = _mm_unpacklo_pi32(mmC, mmG); /* mmC=(04 14 24 34 05 15 25 35) */
  311. mmH = _mm_unpackhi_pi32(mmH, mmG); /* mmH=(06 16 26 36 07 17 27 37) */
  312. if (num_cols >= 8) {
  313. _mm_store_si64((__m64 *)outptr, mmA);
  314. _mm_store_si64((__m64 *)(outptr + 8), mmD);
  315. _mm_store_si64((__m64 *)(outptr + 16), mmC);
  316. _mm_store_si64((__m64 *)(outptr + 24), mmH);
  317. outptr += RGB_PIXELSIZE * 8;
  318. } else {
  319. col = num_cols;
  320. asm(".set noreorder\r\n" /* st16 */
  321. "li $8, 4\r\n"
  322. "move $9, %6\r\n"
  323. "move $10, %7\r\n"
  324. "mov.s $f4, %2\r\n"
  325. "mov.s $f6, %4\r\n"
  326. "bltu $9, $8, 1f\r\n"
  327. "nop \r\n"
  328. "gssdlc1 $f4, 7($10)\r\n"
  329. "gssdrc1 $f4, ($10)\r\n"
  330. "gssdlc1 $f6, 7+8($10)\r\n"
  331. "gssdrc1 $f6, 8($10)\r\n"
  332. "mov.s $f4, %3\r\n"
  333. "mov.s $f6, %5\r\n"
  334. "subu $9, $9, 4\r\n"
  335. "daddu $10, $10, 16\r\n"
  336. "1: \r\n"
  337. "li $8, 2\r\n" /* st8 */
  338. "bltu $9, $8, 2f\r\n"
  339. "nop \r\n"
  340. "gssdlc1 $f4, 7($10)\r\n"
  341. "gssdrc1 $f4, 0($10)\r\n"
  342. "mov.s $f4, $f6\r\n"
  343. "subu $9, $9, 2\r\n"
  344. "daddu $10, $10, 8\r\n"
  345. "2: \r\n"
  346. "li $8, 1\r\n" /* st4 */
  347. "bltu $9, $8, 3f\r\n"
  348. "nop \r\n"
  349. "gsswlc1 $f4, 3($10)\r\n"
  350. "gsswrc1 $f4, 0($10)\r\n"
  351. "3: \r\n"
  352. "li %1, 0\r\n" /* end */
  353. : "=m" (*outptr), "=r" (col)
  354. : "f" (mmA), "f" (mmC), "f" (mmD), "f" (mmH), "r" (col),
  355. "r" (outptr)
  356. : "$f4", "$f6", "$8", "$9", "$10", "memory"
  357. );
  358. }
  359. #endif
  360. }
  361. }
  362. }
  363. #undef mmA
  364. #undef mmB
  365. #undef mmC
  366. #undef mmD
  367. #undef mmE
  368. #undef mmF
  369. #undef mmG
  370. #undef mmH