upsampling_sse2.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214
  1. // Copyright 2011 Google Inc. All Rights Reserved.
  2. //
  3. // Use of this source code is governed by a BSD-style license
  4. // that can be found in the COPYING file in the root of the source
  5. // tree. An additional intellectual property rights grant can be found
  6. // in the file PATENTS. All contributing project authors may
  7. // be found in the AUTHORS file in the root of the source tree.
  8. // -----------------------------------------------------------------------------
  9. //
  10. // SSE2 version of YUV to RGB upsampling functions.
  11. //
  12. // Author: somnath@google.com (Somnath Banerjee)
  13. #include "./dsp.h"
  14. #if defined(WEBP_USE_SSE2)
  15. #include <assert.h>
  16. #include <emmintrin.h>
  17. #include <string.h>
  18. #include "./yuv.h"
  19. #ifdef FANCY_UPSAMPLING
  20. // We compute (9*a + 3*b + 3*c + d + 8) / 16 as follows
  21. // u = (9*a + 3*b + 3*c + d + 8) / 16
  22. // = (a + (a + 3*b + 3*c + d) / 8 + 1) / 2
  23. // = (a + m + 1) / 2
  24. // where m = (a + 3*b + 3*c + d) / 8
  25. // = ((a + b + c + d) / 2 + b + c) / 4
  26. //
  27. // Let's say k = (a + b + c + d) / 4.
  28. // We can compute k as
  29. // k = (s + t + 1) / 2 - ((a^d) | (b^c) | (s^t)) & 1
  30. // where s = (a + d + 1) / 2 and t = (b + c + 1) / 2
  31. //
  32. // Then m can be written as
  33. // m = (k + t + 1) / 2 - (((b^c) & (s^t)) | (k^t)) & 1
  34. // Computes out = (k + in + 1) / 2 - ((ij & (s^t)) | (k^in)) & 1
  35. #define GET_M(ij, in, out) do { \
  36. const __m128i tmp0 = _mm_avg_epu8(k, (in)); /* (k + in + 1) / 2 */ \
  37. const __m128i tmp1 = _mm_and_si128((ij), st); /* (ij) & (s^t) */ \
  38. const __m128i tmp2 = _mm_xor_si128(k, (in)); /* (k^in) */ \
  39. const __m128i tmp3 = _mm_or_si128(tmp1, tmp2); /* ((ij) & (s^t)) | (k^in) */\
  40. const __m128i tmp4 = _mm_and_si128(tmp3, one); /* & 1 -> lsb_correction */ \
  41. (out) = _mm_sub_epi8(tmp0, tmp4); /* (k + in + 1) / 2 - lsb_correction */ \
  42. } while (0)
  43. // pack and store two alternating pixel rows
  44. #define PACK_AND_STORE(a, b, da, db, out) do { \
  45. const __m128i t_a = _mm_avg_epu8(a, da); /* (9a + 3b + 3c + d + 8) / 16 */ \
  46. const __m128i t_b = _mm_avg_epu8(b, db); /* (3a + 9b + c + 3d + 8) / 16 */ \
  47. const __m128i t_1 = _mm_unpacklo_epi8(t_a, t_b); \
  48. const __m128i t_2 = _mm_unpackhi_epi8(t_a, t_b); \
  49. _mm_store_si128(((__m128i*)(out)) + 0, t_1); \
  50. _mm_store_si128(((__m128i*)(out)) + 1, t_2); \
  51. } while (0)
  52. // Loads 17 pixels each from rows r1 and r2 and generates 32 pixels.
  53. #define UPSAMPLE_32PIXELS(r1, r2, out) { \
  54. const __m128i one = _mm_set1_epi8(1); \
  55. const __m128i a = _mm_loadu_si128((__m128i*)&(r1)[0]); \
  56. const __m128i b = _mm_loadu_si128((__m128i*)&(r1)[1]); \
  57. const __m128i c = _mm_loadu_si128((__m128i*)&(r2)[0]); \
  58. const __m128i d = _mm_loadu_si128((__m128i*)&(r2)[1]); \
  59. \
  60. const __m128i s = _mm_avg_epu8(a, d); /* s = (a + d + 1) / 2 */ \
  61. const __m128i t = _mm_avg_epu8(b, c); /* t = (b + c + 1) / 2 */ \
  62. const __m128i st = _mm_xor_si128(s, t); /* st = s^t */ \
  63. \
  64. const __m128i ad = _mm_xor_si128(a, d); /* ad = a^d */ \
  65. const __m128i bc = _mm_xor_si128(b, c); /* bc = b^c */ \
  66. \
  67. const __m128i t1 = _mm_or_si128(ad, bc); /* (a^d) | (b^c) */ \
  68. const __m128i t2 = _mm_or_si128(t1, st); /* (a^d) | (b^c) | (s^t) */ \
  69. const __m128i t3 = _mm_and_si128(t2, one); /* (a^d) | (b^c) | (s^t) & 1 */ \
  70. const __m128i t4 = _mm_avg_epu8(s, t); \
  71. const __m128i k = _mm_sub_epi8(t4, t3); /* k = (a + b + c + d) / 4 */ \
  72. __m128i diag1, diag2; \
  73. \
  74. GET_M(bc, t, diag1); /* diag1 = (a + 3b + 3c + d) / 8 */ \
  75. GET_M(ad, s, diag2); /* diag2 = (3a + b + c + 3d) / 8 */ \
  76. \
  77. /* pack the alternate pixels */ \
  78. PACK_AND_STORE(a, b, diag1, diag2, out + 0); /* store top */ \
  79. PACK_AND_STORE(c, d, diag2, diag1, out + 2 * 32); /* store bottom */ \
  80. }
  81. // Turn the macro into a function for reducing code-size when non-critical
  82. static void Upsample32Pixels(const uint8_t r1[], const uint8_t r2[],
  83. uint8_t* const out) {
  84. UPSAMPLE_32PIXELS(r1, r2, out);
  85. }
  86. #define UPSAMPLE_LAST_BLOCK(tb, bb, num_pixels, out) { \
  87. uint8_t r1[17], r2[17]; \
  88. memcpy(r1, (tb), (num_pixels)); \
  89. memcpy(r2, (bb), (num_pixels)); \
  90. /* replicate last byte */ \
  91. memset(r1 + (num_pixels), r1[(num_pixels) - 1], 17 - (num_pixels)); \
  92. memset(r2 + (num_pixels), r2[(num_pixels) - 1], 17 - (num_pixels)); \
  93. /* using the shared function instead of the macro saves ~3k code size */ \
  94. Upsample32Pixels(r1, r2, out); \
  95. }
  96. #define CONVERT2RGB(FUNC, XSTEP, top_y, bottom_y, \
  97. top_dst, bottom_dst, cur_x, num_pixels) { \
  98. int n; \
  99. for (n = 0; n < (num_pixels); ++n) { \
  100. FUNC(top_y[(cur_x) + n], r_u[n], r_v[n], \
  101. top_dst + ((cur_x) + n) * XSTEP); \
  102. } \
  103. if (bottom_y != NULL) { \
  104. for (n = 0; n < (num_pixels); ++n) { \
  105. FUNC(bottom_y[(cur_x) + n], r_u[64 + n], r_v[64 + n], \
  106. bottom_dst + ((cur_x) + n) * XSTEP); \
  107. } \
  108. } \
  109. }
  110. #define CONVERT2RGB_32(FUNC, XSTEP, top_y, bottom_y, \
  111. top_dst, bottom_dst, cur_x) do { \
  112. FUNC##32(top_y + (cur_x), r_u, r_v, top_dst + (cur_x) * XSTEP); \
  113. if (bottom_y != NULL) { \
  114. FUNC##32(bottom_y + (cur_x), r_u + 64, r_v + 64, \
  115. bottom_dst + (cur_x) * XSTEP); \
  116. } \
  117. } while (0)
  118. #define SSE2_UPSAMPLE_FUNC(FUNC_NAME, FUNC, XSTEP) \
  119. static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \
  120. const uint8_t* top_u, const uint8_t* top_v, \
  121. const uint8_t* cur_u, const uint8_t* cur_v, \
  122. uint8_t* top_dst, uint8_t* bottom_dst, int len) { \
  123. int uv_pos, pos; \
  124. /* 16byte-aligned array to cache reconstructed u and v */ \
  125. uint8_t uv_buf[4 * 32 + 15]; \
  126. uint8_t* const r_u = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \
  127. uint8_t* const r_v = r_u + 32; \
  128. \
  129. assert(top_y != NULL); \
  130. { /* Treat the first pixel in regular way */ \
  131. const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1; \
  132. const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1; \
  133. const int u0_t = (top_u[0] + u_diag) >> 1; \
  134. const int v0_t = (top_v[0] + v_diag) >> 1; \
  135. FUNC(top_y[0], u0_t, v0_t, top_dst); \
  136. if (bottom_y != NULL) { \
  137. const int u0_b = (cur_u[0] + u_diag) >> 1; \
  138. const int v0_b = (cur_v[0] + v_diag) >> 1; \
  139. FUNC(bottom_y[0], u0_b, v0_b, bottom_dst); \
  140. } \
  141. } \
  142. /* For UPSAMPLE_32PIXELS, 17 u/v values must be read-able for each block */ \
  143. for (pos = 1, uv_pos = 0; pos + 32 + 1 <= len; pos += 32, uv_pos += 16) { \
  144. UPSAMPLE_32PIXELS(top_u + uv_pos, cur_u + uv_pos, r_u); \
  145. UPSAMPLE_32PIXELS(top_v + uv_pos, cur_v + uv_pos, r_v); \
  146. CONVERT2RGB_32(FUNC, XSTEP, top_y, bottom_y, top_dst, bottom_dst, pos); \
  147. } \
  148. if (len > 1) { \
  149. const int left_over = ((len + 1) >> 1) - (pos >> 1); \
  150. assert(left_over > 0); \
  151. UPSAMPLE_LAST_BLOCK(top_u + uv_pos, cur_u + uv_pos, left_over, r_u); \
  152. UPSAMPLE_LAST_BLOCK(top_v + uv_pos, cur_v + uv_pos, left_over, r_v); \
  153. CONVERT2RGB(FUNC, XSTEP, top_y, bottom_y, top_dst, bottom_dst, \
  154. pos, len - pos); \
  155. } \
  156. }
  157. // SSE2 variants of the fancy upsampler.
  158. SSE2_UPSAMPLE_FUNC(UpsampleRgbLinePair, VP8YuvToRgb, 3)
  159. SSE2_UPSAMPLE_FUNC(UpsampleBgrLinePair, VP8YuvToBgr, 3)
  160. SSE2_UPSAMPLE_FUNC(UpsampleRgbaLinePair, VP8YuvToRgba, 4)
  161. SSE2_UPSAMPLE_FUNC(UpsampleBgraLinePair, VP8YuvToBgra, 4)
  162. #undef GET_M
  163. #undef PACK_AND_STORE
  164. #undef UPSAMPLE_32PIXELS
  165. #undef UPSAMPLE_LAST_BLOCK
  166. #undef CONVERT2RGB
  167. #undef CONVERT2RGB_32
  168. #undef SSE2_UPSAMPLE_FUNC
  169. #endif // FANCY_UPSAMPLING
  170. #endif // WEBP_USE_SSE2
  171. //------------------------------------------------------------------------------
  172. extern void WebPInitUpsamplersSSE2(void);
  173. #ifdef FANCY_UPSAMPLING
  174. extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */];
  175. void WebPInitUpsamplersSSE2(void) {
  176. #if defined(WEBP_USE_SSE2)
  177. VP8YUVInitSSE2();
  178. WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePair;
  179. WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePair;
  180. WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePair;
  181. WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePair;
  182. WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePair;
  183. WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePair;
  184. #endif // WEBP_USE_SSE2
  185. }
  186. #else
  187. // this empty function is to avoid an empty .o
  188. void WebPInitUpsamplersSSE2(void) {}
  189. #endif // FANCY_UPSAMPLING