jccolext-altivec.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269
  1. /*
  2. * AltiVec optimizations for libjpeg-turbo
  3. *
  4. * Copyright (C) 2014-2015, D. R. Commander. All Rights Reserved.
  5. * Copyright (C) 2014, Jay Foad. All Rights Reserved.
  6. *
  7. * This software is provided 'as-is', without any express or implied
  8. * warranty. In no event will the authors be held liable for any damages
  9. * arising from the use of this software.
  10. *
  11. * Permission is granted to anyone to use this software for any purpose,
  12. * including commercial applications, and to alter it and redistribute it
  13. * freely, subject to the following restrictions:
  14. *
  15. * 1. The origin of this software must not be misrepresented; you must not
  16. * claim that you wrote the original software. If you use this software
  17. * in a product, an acknowledgment in the product documentation would be
  18. * appreciated but is not required.
  19. * 2. Altered source versions must be plainly marked as such, and must not be
  20. * misrepresented as being the original software.
  21. * 3. This notice may not be removed or altered from any source distribution.
  22. */
  23. /* This file is included by jccolor-altivec.c */
  24. void jsimd_rgb_ycc_convert_altivec(JDIMENSION img_width, JSAMPARRAY input_buf,
  25. JSAMPIMAGE output_buf,
  26. JDIMENSION output_row, int num_rows)
  27. {
  28. JSAMPROW inptr, outptr0, outptr1, outptr2;
  29. int pitch = img_width * RGB_PIXELSIZE, num_cols;
  30. #if __BIG_ENDIAN__
  31. int offset;
  32. #endif
  33. unsigned char __attribute__((aligned(16))) tmpbuf[RGB_PIXELSIZE * 16];
  34. __vector unsigned char rgb0, rgb1 = { 0 }, rgb2 = { 0 },
  35. rgbg0, rgbg1, rgbg2, rgbg3, y, cb, cr;
  36. #if __BIG_ENDIAN__ || RGB_PIXELSIZE == 4
  37. __vector unsigned char rgb3 = { 0 };
  38. #endif
  39. #if __BIG_ENDIAN__ && RGB_PIXELSIZE == 4
  40. __vector unsigned char rgb4 = { 0 };
  41. #endif
  42. __vector short rg0, rg1, rg2, rg3, bg0, bg1, bg2, bg3;
  43. __vector unsigned short yl, yh, crl, crh, cbl, cbh;
  44. __vector int y0, y1, y2, y3, cr0, cr1, cr2, cr3, cb0, cb1, cb2, cb3;
  45. /* Constants */
  46. __vector short pw_f0299_f0337 = { __4X2(F_0_299, F_0_337) },
  47. pw_f0114_f0250 = { __4X2(F_0_114, F_0_250) },
  48. pw_mf016_mf033 = { __4X2(-F_0_168, -F_0_331) },
  49. pw_mf008_mf041 = { __4X2(-F_0_081, -F_0_418) };
  50. __vector unsigned short pw_f050_f000 = { __4X2(F_0_500, 0) };
  51. __vector int pd_onehalf = { __4X(ONE_HALF) },
  52. pd_onehalfm1_cj = { __4X(ONE_HALF - 1 + (CENTERJSAMPLE << SCALEBITS)) };
  53. __vector unsigned char pb_zero = { __16X(0) },
  54. #if __BIG_ENDIAN__
  55. shift_pack_index =
  56. { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
  57. #else
  58. shift_pack_index =
  59. { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
  60. #endif
  61. while (--num_rows >= 0) {
  62. inptr = *input_buf++;
  63. outptr0 = output_buf[0][output_row];
  64. outptr1 = output_buf[1][output_row];
  65. outptr2 = output_buf[2][output_row];
  66. output_row++;
  67. for (num_cols = pitch; num_cols > 0;
  68. num_cols -= RGB_PIXELSIZE * 16, inptr += RGB_PIXELSIZE * 16,
  69. outptr0 += 16, outptr1 += 16, outptr2 += 16) {
  70. #if __BIG_ENDIAN__
  71. /* Load 16 pixels == 48 or 64 bytes */
  72. offset = (size_t)inptr & 15;
  73. if (offset) {
  74. __vector unsigned char unaligned_shift_index;
  75. int bytes = num_cols + offset;
  76. if (bytes < (RGB_PIXELSIZE + 1) * 16 && (bytes & 15)) {
  77. /* Slow path to prevent buffer overread. Since there is no way to
  78. * read a partial AltiVec register, overread would occur on the last
  79. * chunk of the last image row if the right edge is not on a 16-byte
  80. * boundary. It could also occur on other rows if the bytes per row
  81. * is low enough. Since we can't determine whether we're on the last
  82. * image row, we have to assume every row is the last.
  83. */
  84. memcpy(tmpbuf, inptr, min(num_cols, RGB_PIXELSIZE * 16));
  85. rgb0 = vec_ld(0, tmpbuf);
  86. rgb1 = vec_ld(16, tmpbuf);
  87. rgb2 = vec_ld(32, tmpbuf);
  88. #if RGB_PIXELSIZE == 4
  89. rgb3 = vec_ld(48, tmpbuf);
  90. #endif
  91. } else {
  92. /* Fast path */
  93. rgb0 = vec_ld(0, inptr);
  94. if (bytes > 16)
  95. rgb1 = vec_ld(16, inptr);
  96. if (bytes > 32)
  97. rgb2 = vec_ld(32, inptr);
  98. if (bytes > 48)
  99. rgb3 = vec_ld(48, inptr);
  100. #if RGB_PIXELSIZE == 4
  101. if (bytes > 64)
  102. rgb4 = vec_ld(64, inptr);
  103. #endif
  104. unaligned_shift_index = vec_lvsl(0, inptr);
  105. rgb0 = vec_perm(rgb0, rgb1, unaligned_shift_index);
  106. rgb1 = vec_perm(rgb1, rgb2, unaligned_shift_index);
  107. rgb2 = vec_perm(rgb2, rgb3, unaligned_shift_index);
  108. #if RGB_PIXELSIZE == 4
  109. rgb3 = vec_perm(rgb3, rgb4, unaligned_shift_index);
  110. #endif
  111. }
  112. } else {
  113. #endif /* __BIG_ENDIAN__ */
  114. if (num_cols < RGB_PIXELSIZE * 16 && (num_cols & 15)) {
  115. /* Slow path */
  116. memcpy(tmpbuf, inptr, min(num_cols, RGB_PIXELSIZE * 16));
  117. rgb0 = VEC_LD(0, tmpbuf);
  118. rgb1 = VEC_LD(16, tmpbuf);
  119. rgb2 = VEC_LD(32, tmpbuf);
  120. #if RGB_PIXELSIZE == 4
  121. rgb3 = VEC_LD(48, tmpbuf);
  122. #endif
  123. } else {
  124. /* Fast path */
  125. rgb0 = VEC_LD(0, inptr);
  126. if (num_cols > 16)
  127. rgb1 = VEC_LD(16, inptr);
  128. if (num_cols > 32)
  129. rgb2 = VEC_LD(32, inptr);
  130. #if RGB_PIXELSIZE == 4
  131. if (num_cols > 48)
  132. rgb3 = VEC_LD(48, inptr);
  133. #endif
  134. }
  135. #if __BIG_ENDIAN__
  136. }
  137. #endif
  138. #if RGB_PIXELSIZE == 3
  139. /* rgb0 = R0 G0 B0 R1 G1 B1 R2 G2 B2 R3 G3 B3 R4 G4 B4 R5
  140. * rgb1 = G5 B5 R6 G6 B6 R7 G7 B7 R8 G8 B8 R9 G9 B9 Ra Ga
  141. * rgb2 = Ba Rb Gb Bb Rc Gc Bc Rd Gd Bd Re Ge Be Rf Gf Bf
  142. *
  143. * rgbg0 = R0 G0 R1 G1 R2 G2 R3 G3 B0 G0 B1 G1 B2 G2 B3 G3
  144. * rgbg1 = R4 G4 R5 G5 R6 G6 R7 G7 B4 G4 B5 G5 B6 G6 B7 G7
  145. * rgbg2 = R8 G8 R9 G9 Ra Ga Rb Gb B8 G8 B9 G9 Ba Ga Bb Gb
  146. * rgbg3 = Rc Gc Rd Gd Re Ge Rf Gf Bc Gc Bd Gd Be Ge Bf Gf
  147. */
  148. rgbg0 = vec_perm(rgb0, rgb0, (__vector unsigned char)RGBG_INDEX0);
  149. rgbg1 = vec_perm(rgb0, rgb1, (__vector unsigned char)RGBG_INDEX1);
  150. rgbg2 = vec_perm(rgb1, rgb2, (__vector unsigned char)RGBG_INDEX2);
  151. rgbg3 = vec_perm(rgb2, rgb2, (__vector unsigned char)RGBG_INDEX3);
  152. #else
  153. /* rgb0 = R0 G0 B0 X0 R1 G1 B1 X1 R2 G2 B2 X2 R3 G3 B3 X3
  154. * rgb1 = R4 G4 B4 X4 R5 G5 B5 X5 R6 G6 B6 X6 R7 G7 B7 X7
  155. * rgb2 = R8 G8 B8 X8 R9 G9 B9 X9 Ra Ga Ba Xa Rb Gb Bb Xb
  156. * rgb3 = Rc Gc Bc Xc Rd Gd Bd Xd Re Ge Be Xe Rf Gf Bf Xf
  157. *
  158. * rgbg0 = R0 G0 R1 G1 R2 G2 R3 G3 B0 G0 B1 G1 B2 G2 B3 G3
  159. * rgbg1 = R4 G4 R5 G5 R6 G6 R7 G7 B4 G4 B5 G5 B6 G6 B7 G7
  160. * rgbg2 = R8 G8 R9 G9 Ra Ga Rb Gb B8 G8 B9 G9 Ba Ga Bb Gb
  161. * rgbg3 = Rc Gc Rd Gd Re Ge Rf Gf Bc Gc Bd Gd Be Ge Bf Gf
  162. */
  163. rgbg0 = vec_perm(rgb0, rgb0, (__vector unsigned char)RGBG_INDEX);
  164. rgbg1 = vec_perm(rgb1, rgb1, (__vector unsigned char)RGBG_INDEX);
  165. rgbg2 = vec_perm(rgb2, rgb2, (__vector unsigned char)RGBG_INDEX);
  166. rgbg3 = vec_perm(rgb3, rgb3, (__vector unsigned char)RGBG_INDEX);
  167. #endif
  168. /* rg0 = R0 G0 R1 G1 R2 G2 R3 G3
  169. * bg0 = B0 G0 B1 G1 B2 G2 B3 G3
  170. * ...
  171. *
  172. * NOTE: We have to use vec_merge*() here because vec_unpack*() doesn't
  173. * support unsigned vectors.
  174. */
  175. rg0 = (__vector signed short)VEC_UNPACKHU(rgbg0);
  176. bg0 = (__vector signed short)VEC_UNPACKLU(rgbg0);
  177. rg1 = (__vector signed short)VEC_UNPACKHU(rgbg1);
  178. bg1 = (__vector signed short)VEC_UNPACKLU(rgbg1);
  179. rg2 = (__vector signed short)VEC_UNPACKHU(rgbg2);
  180. bg2 = (__vector signed short)VEC_UNPACKLU(rgbg2);
  181. rg3 = (__vector signed short)VEC_UNPACKHU(rgbg3);
  182. bg3 = (__vector signed short)VEC_UNPACKLU(rgbg3);
  183. /* (Original)
  184. * Y = 0.29900 * R + 0.58700 * G + 0.11400 * B
  185. * Cb = -0.16874 * R - 0.33126 * G + 0.50000 * B + CENTERJSAMPLE
  186. * Cr = 0.50000 * R - 0.41869 * G - 0.08131 * B + CENTERJSAMPLE
  187. *
  188. * (This implementation)
  189. * Y = 0.29900 * R + 0.33700 * G + 0.11400 * B + 0.25000 * G
  190. * Cb = -0.16874 * R - 0.33126 * G + 0.50000 * B + CENTERJSAMPLE
  191. * Cr = 0.50000 * R - 0.41869 * G - 0.08131 * B + CENTERJSAMPLE
  192. */
  193. /* Calculate Y values */
  194. y0 = vec_msums(rg0, pw_f0299_f0337, pd_onehalf);
  195. y1 = vec_msums(rg1, pw_f0299_f0337, pd_onehalf);
  196. y2 = vec_msums(rg2, pw_f0299_f0337, pd_onehalf);
  197. y3 = vec_msums(rg3, pw_f0299_f0337, pd_onehalf);
  198. y0 = vec_msums(bg0, pw_f0114_f0250, y0);
  199. y1 = vec_msums(bg1, pw_f0114_f0250, y1);
  200. y2 = vec_msums(bg2, pw_f0114_f0250, y2);
  201. y3 = vec_msums(bg3, pw_f0114_f0250, y3);
  202. /* Clever way to avoid 4 shifts + 2 packs. This packs the high word from
  203. * each dword into a new 16-bit vector, which is the equivalent of
  204. * descaling the 32-bit results (right-shifting by 16 bits) and then
  205. * packing them.
  206. */
  207. yl = vec_perm((__vector unsigned short)y0, (__vector unsigned short)y1,
  208. shift_pack_index);
  209. yh = vec_perm((__vector unsigned short)y2, (__vector unsigned short)y3,
  210. shift_pack_index);
  211. y = vec_pack(yl, yh);
  212. vec_st(y, 0, outptr0);
  213. /* Calculate Cb values */
  214. cb0 = vec_msums(rg0, pw_mf016_mf033, pd_onehalfm1_cj);
  215. cb1 = vec_msums(rg1, pw_mf016_mf033, pd_onehalfm1_cj);
  216. cb2 = vec_msums(rg2, pw_mf016_mf033, pd_onehalfm1_cj);
  217. cb3 = vec_msums(rg3, pw_mf016_mf033, pd_onehalfm1_cj);
  218. cb0 = (__vector int)vec_msum((__vector unsigned short)bg0, pw_f050_f000,
  219. (__vector unsigned int)cb0);
  220. cb1 = (__vector int)vec_msum((__vector unsigned short)bg1, pw_f050_f000,
  221. (__vector unsigned int)cb1);
  222. cb2 = (__vector int)vec_msum((__vector unsigned short)bg2, pw_f050_f000,
  223. (__vector unsigned int)cb2);
  224. cb3 = (__vector int)vec_msum((__vector unsigned short)bg3, pw_f050_f000,
  225. (__vector unsigned int)cb3);
  226. cbl = vec_perm((__vector unsigned short)cb0,
  227. (__vector unsigned short)cb1, shift_pack_index);
  228. cbh = vec_perm((__vector unsigned short)cb2,
  229. (__vector unsigned short)cb3, shift_pack_index);
  230. cb = vec_pack(cbl, cbh);
  231. vec_st(cb, 0, outptr1);
  232. /* Calculate Cr values */
  233. cr0 = vec_msums(bg0, pw_mf008_mf041, pd_onehalfm1_cj);
  234. cr1 = vec_msums(bg1, pw_mf008_mf041, pd_onehalfm1_cj);
  235. cr2 = vec_msums(bg2, pw_mf008_mf041, pd_onehalfm1_cj);
  236. cr3 = vec_msums(bg3, pw_mf008_mf041, pd_onehalfm1_cj);
  237. cr0 = (__vector int)vec_msum((__vector unsigned short)rg0, pw_f050_f000,
  238. (__vector unsigned int)cr0);
  239. cr1 = (__vector int)vec_msum((__vector unsigned short)rg1, pw_f050_f000,
  240. (__vector unsigned int)cr1);
  241. cr2 = (__vector int)vec_msum((__vector unsigned short)rg2, pw_f050_f000,
  242. (__vector unsigned int)cr2);
  243. cr3 = (__vector int)vec_msum((__vector unsigned short)rg3, pw_f050_f000,
  244. (__vector unsigned int)cr3);
  245. crl = vec_perm((__vector unsigned short)cr0,
  246. (__vector unsigned short)cr1, shift_pack_index);
  247. crh = vec_perm((__vector unsigned short)cr2,
  248. (__vector unsigned short)cr3, shift_pack_index);
  249. cr = vec_pack(crl, crh);
  250. vec_st(cr, 0, outptr2);
  251. }
  252. }
  253. }