dec_neon.c 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292
  1. // Copyright 2012 Google Inc. All Rights Reserved.
  2. //
  3. // Use of this source code is governed by a BSD-style license
  4. // that can be found in the COPYING file in the root of the source
  5. // tree. An additional intellectual property rights grant can be found
  6. // in the file PATENTS. All contributing project authors may
  7. // be found in the AUTHORS file in the root of the source tree.
  8. // -----------------------------------------------------------------------------
  9. //
  10. // ARM NEON version of dsp functions and loop filtering.
  11. //
  12. // Authors: Somnath Banerjee (somnath@google.com)
  13. // Johann Koenig (johannkoenig@google.com)
  14. #include "./dsp.h"
  15. #if defined(WEBP_USE_NEON)
  16. #include "./neon.h"
  17. #include "../dec/vp8i.h"
  18. //------------------------------------------------------------------------------
  19. // NxM Loading functions
  20. // Load/Store vertical edge
  21. #define LOAD8x4(c1, c2, c3, c4, b1, b2, stride) \
  22. "vld4.8 {" #c1"[0], " #c2"[0], " #c3"[0], " #c4"[0]}," #b1 "," #stride"\n" \
  23. "vld4.8 {" #c1"[1], " #c2"[1], " #c3"[1], " #c4"[1]}," #b2 "," #stride"\n" \
  24. "vld4.8 {" #c1"[2], " #c2"[2], " #c3"[2], " #c4"[2]}," #b1 "," #stride"\n" \
  25. "vld4.8 {" #c1"[3], " #c2"[3], " #c3"[3], " #c4"[3]}," #b2 "," #stride"\n" \
  26. "vld4.8 {" #c1"[4], " #c2"[4], " #c3"[4], " #c4"[4]}," #b1 "," #stride"\n" \
  27. "vld4.8 {" #c1"[5], " #c2"[5], " #c3"[5], " #c4"[5]}," #b2 "," #stride"\n" \
  28. "vld4.8 {" #c1"[6], " #c2"[6], " #c3"[6], " #c4"[6]}," #b1 "," #stride"\n" \
  29. "vld4.8 {" #c1"[7], " #c2"[7], " #c3"[7], " #c4"[7]}," #b2 "," #stride"\n"
  30. #define STORE8x2(c1, c2, p, stride) \
  31. "vst2.8 {" #c1"[0], " #c2"[0]}," #p "," #stride " \n" \
  32. "vst2.8 {" #c1"[1], " #c2"[1]}," #p "," #stride " \n" \
  33. "vst2.8 {" #c1"[2], " #c2"[2]}," #p "," #stride " \n" \
  34. "vst2.8 {" #c1"[3], " #c2"[3]}," #p "," #stride " \n" \
  35. "vst2.8 {" #c1"[4], " #c2"[4]}," #p "," #stride " \n" \
  36. "vst2.8 {" #c1"[5], " #c2"[5]}," #p "," #stride " \n" \
  37. "vst2.8 {" #c1"[6], " #c2"[6]}," #p "," #stride " \n" \
  38. "vst2.8 {" #c1"[7], " #c2"[7]}," #p "," #stride " \n"
  39. #if !defined(WORK_AROUND_GCC)
  40. // This intrinsics version makes gcc-4.6.3 crash during Load4x??() compilation
  41. // (register alloc, probably). The variants somewhat mitigate the problem, but
  42. // not quite. HFilter16i() remains problematic.
  43. static WEBP_INLINE uint8x8x4_t Load4x8(const uint8_t* const src, int stride) {
  44. const uint8x8_t zero = vdup_n_u8(0);
  45. uint8x8x4_t out;
  46. INIT_VECTOR4(out, zero, zero, zero, zero);
  47. out = vld4_lane_u8(src + 0 * stride, out, 0);
  48. out = vld4_lane_u8(src + 1 * stride, out, 1);
  49. out = vld4_lane_u8(src + 2 * stride, out, 2);
  50. out = vld4_lane_u8(src + 3 * stride, out, 3);
  51. out = vld4_lane_u8(src + 4 * stride, out, 4);
  52. out = vld4_lane_u8(src + 5 * stride, out, 5);
  53. out = vld4_lane_u8(src + 6 * stride, out, 6);
  54. out = vld4_lane_u8(src + 7 * stride, out, 7);
  55. return out;
  56. }
  57. static WEBP_INLINE void Load4x16(const uint8_t* const src, int stride,
  58. uint8x16_t* const p1, uint8x16_t* const p0,
  59. uint8x16_t* const q0, uint8x16_t* const q1) {
  60. // row0 = p1[0..7]|p0[0..7]|q0[0..7]|q1[0..7]
  61. // row8 = p1[8..15]|p0[8..15]|q0[8..15]|q1[8..15]
  62. const uint8x8x4_t row0 = Load4x8(src - 2 + 0 * stride, stride);
  63. const uint8x8x4_t row8 = Load4x8(src - 2 + 8 * stride, stride);
  64. *p1 = vcombine_u8(row0.val[0], row8.val[0]);
  65. *p0 = vcombine_u8(row0.val[1], row8.val[1]);
  66. *q0 = vcombine_u8(row0.val[2], row8.val[2]);
  67. *q1 = vcombine_u8(row0.val[3], row8.val[3]);
  68. }
  69. #else // WORK_AROUND_GCC
  70. #define LOADQ_LANE_32b(VALUE, LANE) do { \
  71. (VALUE) = vld1q_lane_u32((const uint32_t*)src, (VALUE), (LANE)); \
  72. src += stride; \
  73. } while (0)
  74. static WEBP_INLINE void Load4x16(const uint8_t* src, int stride,
  75. uint8x16_t* const p1, uint8x16_t* const p0,
  76. uint8x16_t* const q0, uint8x16_t* const q1) {
  77. const uint32x4_t zero = vdupq_n_u32(0);
  78. uint32x4x4_t in;
  79. INIT_VECTOR4(in, zero, zero, zero, zero);
  80. src -= 2;
  81. LOADQ_LANE_32b(in.val[0], 0);
  82. LOADQ_LANE_32b(in.val[1], 0);
  83. LOADQ_LANE_32b(in.val[2], 0);
  84. LOADQ_LANE_32b(in.val[3], 0);
  85. LOADQ_LANE_32b(in.val[0], 1);
  86. LOADQ_LANE_32b(in.val[1], 1);
  87. LOADQ_LANE_32b(in.val[2], 1);
  88. LOADQ_LANE_32b(in.val[3], 1);
  89. LOADQ_LANE_32b(in.val[0], 2);
  90. LOADQ_LANE_32b(in.val[1], 2);
  91. LOADQ_LANE_32b(in.val[2], 2);
  92. LOADQ_LANE_32b(in.val[3], 2);
  93. LOADQ_LANE_32b(in.val[0], 3);
  94. LOADQ_LANE_32b(in.val[1], 3);
  95. LOADQ_LANE_32b(in.val[2], 3);
  96. LOADQ_LANE_32b(in.val[3], 3);
  97. // Transpose four 4x4 parts:
  98. {
  99. const uint8x16x2_t row01 = vtrnq_u8(vreinterpretq_u8_u32(in.val[0]),
  100. vreinterpretq_u8_u32(in.val[1]));
  101. const uint8x16x2_t row23 = vtrnq_u8(vreinterpretq_u8_u32(in.val[2]),
  102. vreinterpretq_u8_u32(in.val[3]));
  103. const uint16x8x2_t row02 = vtrnq_u16(vreinterpretq_u16_u8(row01.val[0]),
  104. vreinterpretq_u16_u8(row23.val[0]));
  105. const uint16x8x2_t row13 = vtrnq_u16(vreinterpretq_u16_u8(row01.val[1]),
  106. vreinterpretq_u16_u8(row23.val[1]));
  107. *p1 = vreinterpretq_u8_u16(row02.val[0]);
  108. *p0 = vreinterpretq_u8_u16(row13.val[0]);
  109. *q0 = vreinterpretq_u8_u16(row02.val[1]);
  110. *q1 = vreinterpretq_u8_u16(row13.val[1]);
  111. }
  112. }
  113. #undef LOADQ_LANE_32b
  114. #endif // !WORK_AROUND_GCC
  115. static WEBP_INLINE void Load8x16(const uint8_t* const src, int stride,
  116. uint8x16_t* const p3, uint8x16_t* const p2,
  117. uint8x16_t* const p1, uint8x16_t* const p0,
  118. uint8x16_t* const q0, uint8x16_t* const q1,
  119. uint8x16_t* const q2, uint8x16_t* const q3) {
  120. Load4x16(src - 2, stride, p3, p2, p1, p0);
  121. Load4x16(src + 2, stride, q0, q1, q2, q3);
  122. }
  123. static WEBP_INLINE void Load16x4(const uint8_t* const src, int stride,
  124. uint8x16_t* const p1, uint8x16_t* const p0,
  125. uint8x16_t* const q0, uint8x16_t* const q1) {
  126. *p1 = vld1q_u8(src - 2 * stride);
  127. *p0 = vld1q_u8(src - 1 * stride);
  128. *q0 = vld1q_u8(src + 0 * stride);
  129. *q1 = vld1q_u8(src + 1 * stride);
  130. }
  131. static WEBP_INLINE void Load16x8(const uint8_t* const src, int stride,
  132. uint8x16_t* const p3, uint8x16_t* const p2,
  133. uint8x16_t* const p1, uint8x16_t* const p0,
  134. uint8x16_t* const q0, uint8x16_t* const q1,
  135. uint8x16_t* const q2, uint8x16_t* const q3) {
  136. Load16x4(src - 2 * stride, stride, p3, p2, p1, p0);
  137. Load16x4(src + 2 * stride, stride, q0, q1, q2, q3);
  138. }
  139. static WEBP_INLINE void Load8x8x2(const uint8_t* const u,
  140. const uint8_t* const v,
  141. int stride,
  142. uint8x16_t* const p3, uint8x16_t* const p2,
  143. uint8x16_t* const p1, uint8x16_t* const p0,
  144. uint8x16_t* const q0, uint8x16_t* const q1,
  145. uint8x16_t* const q2, uint8x16_t* const q3) {
  146. // We pack the 8x8 u-samples in the lower half of the uint8x16_t destination
  147. // and the v-samples on the higher half.
  148. *p3 = vcombine_u8(vld1_u8(u - 4 * stride), vld1_u8(v - 4 * stride));
  149. *p2 = vcombine_u8(vld1_u8(u - 3 * stride), vld1_u8(v - 3 * stride));
  150. *p1 = vcombine_u8(vld1_u8(u - 2 * stride), vld1_u8(v - 2 * stride));
  151. *p0 = vcombine_u8(vld1_u8(u - 1 * stride), vld1_u8(v - 1 * stride));
  152. *q0 = vcombine_u8(vld1_u8(u + 0 * stride), vld1_u8(v + 0 * stride));
  153. *q1 = vcombine_u8(vld1_u8(u + 1 * stride), vld1_u8(v + 1 * stride));
  154. *q2 = vcombine_u8(vld1_u8(u + 2 * stride), vld1_u8(v + 2 * stride));
  155. *q3 = vcombine_u8(vld1_u8(u + 3 * stride), vld1_u8(v + 3 * stride));
  156. }
  157. #if !defined(WORK_AROUND_GCC)
  158. #define LOAD_UV_8(ROW) \
  159. vcombine_u8(vld1_u8(u - 4 + (ROW) * stride), vld1_u8(v - 4 + (ROW) * stride))
  160. static WEBP_INLINE void Load8x8x2T(const uint8_t* const u,
  161. const uint8_t* const v,
  162. int stride,
  163. uint8x16_t* const p3, uint8x16_t* const p2,
  164. uint8x16_t* const p1, uint8x16_t* const p0,
  165. uint8x16_t* const q0, uint8x16_t* const q1,
  166. uint8x16_t* const q2, uint8x16_t* const q3) {
  167. // We pack the 8x8 u-samples in the lower half of the uint8x16_t destination
  168. // and the v-samples on the higher half.
  169. const uint8x16_t row0 = LOAD_UV_8(0);
  170. const uint8x16_t row1 = LOAD_UV_8(1);
  171. const uint8x16_t row2 = LOAD_UV_8(2);
  172. const uint8x16_t row3 = LOAD_UV_8(3);
  173. const uint8x16_t row4 = LOAD_UV_8(4);
  174. const uint8x16_t row5 = LOAD_UV_8(5);
  175. const uint8x16_t row6 = LOAD_UV_8(6);
  176. const uint8x16_t row7 = LOAD_UV_8(7);
  177. // Perform two side-by-side 8x8 transposes
  178. // u00 u01 u02 u03 u04 u05 u06 u07 | v00 v01 v02 v03 v04 v05 v06 v07
  179. // u10 u11 u12 u13 u14 u15 u16 u17 | v10 v11 v12 ...
  180. // u20 u21 u22 u23 u24 u25 u26 u27 | v20 v21 ...
  181. // u30 u31 u32 u33 u34 u35 u36 u37 | ...
  182. // u40 u41 u42 u43 u44 u45 u46 u47 | ...
  183. // u50 u51 u52 u53 u54 u55 u56 u57 | ...
  184. // u60 u61 u62 u63 u64 u65 u66 u67 | v60 ...
  185. // u70 u71 u72 u73 u74 u75 u76 u77 | v70 v71 v72 ...
  186. const uint8x16x2_t row01 = vtrnq_u8(row0, row1); // u00 u10 u02 u12 ...
  187. // u01 u11 u03 u13 ...
  188. const uint8x16x2_t row23 = vtrnq_u8(row2, row3); // u20 u30 u22 u32 ...
  189. // u21 u31 u23 u33 ...
  190. const uint8x16x2_t row45 = vtrnq_u8(row4, row5); // ...
  191. const uint8x16x2_t row67 = vtrnq_u8(row6, row7); // ...
  192. const uint16x8x2_t row02 = vtrnq_u16(vreinterpretq_u16_u8(row01.val[0]),
  193. vreinterpretq_u16_u8(row23.val[0]));
  194. const uint16x8x2_t row13 = vtrnq_u16(vreinterpretq_u16_u8(row01.val[1]),
  195. vreinterpretq_u16_u8(row23.val[1]));
  196. const uint16x8x2_t row46 = vtrnq_u16(vreinterpretq_u16_u8(row45.val[0]),
  197. vreinterpretq_u16_u8(row67.val[0]));
  198. const uint16x8x2_t row57 = vtrnq_u16(vreinterpretq_u16_u8(row45.val[1]),
  199. vreinterpretq_u16_u8(row67.val[1]));
  200. const uint32x4x2_t row04 = vtrnq_u32(vreinterpretq_u32_u16(row02.val[0]),
  201. vreinterpretq_u32_u16(row46.val[0]));
  202. const uint32x4x2_t row26 = vtrnq_u32(vreinterpretq_u32_u16(row02.val[1]),
  203. vreinterpretq_u32_u16(row46.val[1]));
  204. const uint32x4x2_t row15 = vtrnq_u32(vreinterpretq_u32_u16(row13.val[0]),
  205. vreinterpretq_u32_u16(row57.val[0]));
  206. const uint32x4x2_t row37 = vtrnq_u32(vreinterpretq_u32_u16(row13.val[1]),
  207. vreinterpretq_u32_u16(row57.val[1]));
  208. *p3 = vreinterpretq_u8_u32(row04.val[0]);
  209. *p2 = vreinterpretq_u8_u32(row15.val[0]);
  210. *p1 = vreinterpretq_u8_u32(row26.val[0]);
  211. *p0 = vreinterpretq_u8_u32(row37.val[0]);
  212. *q0 = vreinterpretq_u8_u32(row04.val[1]);
  213. *q1 = vreinterpretq_u8_u32(row15.val[1]);
  214. *q2 = vreinterpretq_u8_u32(row26.val[1]);
  215. *q3 = vreinterpretq_u8_u32(row37.val[1]);
  216. }
  217. #undef LOAD_UV_8
  218. #endif // !WORK_AROUND_GCC
  219. static WEBP_INLINE void Store2x8(const uint8x8x2_t v,
  220. uint8_t* const dst, int stride) {
  221. vst2_lane_u8(dst + 0 * stride, v, 0);
  222. vst2_lane_u8(dst + 1 * stride, v, 1);
  223. vst2_lane_u8(dst + 2 * stride, v, 2);
  224. vst2_lane_u8(dst + 3 * stride, v, 3);
  225. vst2_lane_u8(dst + 4 * stride, v, 4);
  226. vst2_lane_u8(dst + 5 * stride, v, 5);
  227. vst2_lane_u8(dst + 6 * stride, v, 6);
  228. vst2_lane_u8(dst + 7 * stride, v, 7);
  229. }
  230. static WEBP_INLINE void Store2x16(const uint8x16_t p0, const uint8x16_t q0,
  231. uint8_t* const dst, int stride) {
  232. uint8x8x2_t lo, hi;
  233. lo.val[0] = vget_low_u8(p0);
  234. lo.val[1] = vget_low_u8(q0);
  235. hi.val[0] = vget_high_u8(p0);
  236. hi.val[1] = vget_high_u8(q0);
  237. Store2x8(lo, dst - 1 + 0 * stride, stride);
  238. Store2x8(hi, dst - 1 + 8 * stride, stride);
  239. }
  240. #if !defined(WORK_AROUND_GCC)
  241. static WEBP_INLINE void Store4x8(const uint8x8x4_t v,
  242. uint8_t* const dst, int stride) {
  243. vst4_lane_u8(dst + 0 * stride, v, 0);
  244. vst4_lane_u8(dst + 1 * stride, v, 1);
  245. vst4_lane_u8(dst + 2 * stride, v, 2);
  246. vst4_lane_u8(dst + 3 * stride, v, 3);
  247. vst4_lane_u8(dst + 4 * stride, v, 4);
  248. vst4_lane_u8(dst + 5 * stride, v, 5);
  249. vst4_lane_u8(dst + 6 * stride, v, 6);
  250. vst4_lane_u8(dst + 7 * stride, v, 7);
  251. }
  252. static WEBP_INLINE void Store4x16(const uint8x16_t p1, const uint8x16_t p0,
  253. const uint8x16_t q0, const uint8x16_t q1,
  254. uint8_t* const dst, int stride) {
  255. uint8x8x4_t lo, hi;
  256. INIT_VECTOR4(lo,
  257. vget_low_u8(p1), vget_low_u8(p0),
  258. vget_low_u8(q0), vget_low_u8(q1));
  259. INIT_VECTOR4(hi,
  260. vget_high_u8(p1), vget_high_u8(p0),
  261. vget_high_u8(q0), vget_high_u8(q1));
  262. Store4x8(lo, dst - 2 + 0 * stride, stride);
  263. Store4x8(hi, dst - 2 + 8 * stride, stride);
  264. }
  265. #endif // !WORK_AROUND_GCC
  266. static WEBP_INLINE void Store16x2(const uint8x16_t p0, const uint8x16_t q0,
  267. uint8_t* const dst, int stride) {
  268. vst1q_u8(dst - stride, p0);
  269. vst1q_u8(dst, q0);
  270. }
  271. static WEBP_INLINE void Store16x4(const uint8x16_t p1, const uint8x16_t p0,
  272. const uint8x16_t q0, const uint8x16_t q1,
  273. uint8_t* const dst, int stride) {
  274. Store16x2(p1, p0, dst - stride, stride);
  275. Store16x2(q0, q1, dst + stride, stride);
  276. }
  277. static WEBP_INLINE void Store8x2x2(const uint8x16_t p0, const uint8x16_t q0,
  278. uint8_t* const u, uint8_t* const v,
  279. int stride) {
  280. // p0 and q0 contain the u+v samples packed in low/high halves.
  281. vst1_u8(u - stride, vget_low_u8(p0));
  282. vst1_u8(u, vget_low_u8(q0));
  283. vst1_u8(v - stride, vget_high_u8(p0));
  284. vst1_u8(v, vget_high_u8(q0));
  285. }
  286. static WEBP_INLINE void Store8x4x2(const uint8x16_t p1, const uint8x16_t p0,
  287. const uint8x16_t q0, const uint8x16_t q1,
  288. uint8_t* const u, uint8_t* const v,
  289. int stride) {
  290. // The p1...q1 registers contain the u+v samples packed in low/high halves.
  291. Store8x2x2(p1, p0, u - stride, v - stride, stride);
  292. Store8x2x2(q0, q1, u + stride, v + stride, stride);
  293. }
  294. #if !defined(WORK_AROUND_GCC)
  295. #define STORE6_LANE(DST, VAL0, VAL1, LANE) do { \
  296. vst3_lane_u8((DST) - 3, (VAL0), (LANE)); \
  297. vst3_lane_u8((DST) + 0, (VAL1), (LANE)); \
  298. (DST) += stride; \
  299. } while (0)
  300. static WEBP_INLINE void Store6x8x2(const uint8x16_t p2, const uint8x16_t p1,
  301. const uint8x16_t p0, const uint8x16_t q0,
  302. const uint8x16_t q1, const uint8x16_t q2,
  303. uint8_t* u, uint8_t* v,
  304. int stride) {
  305. uint8x8x3_t u0, u1, v0, v1;
  306. INIT_VECTOR3(u0, vget_low_u8(p2), vget_low_u8(p1), vget_low_u8(p0));
  307. INIT_VECTOR3(u1, vget_low_u8(q0), vget_low_u8(q1), vget_low_u8(q2));
  308. INIT_VECTOR3(v0, vget_high_u8(p2), vget_high_u8(p1), vget_high_u8(p0));
  309. INIT_VECTOR3(v1, vget_high_u8(q0), vget_high_u8(q1), vget_high_u8(q2));
  310. STORE6_LANE(u, u0, u1, 0);
  311. STORE6_LANE(u, u0, u1, 1);
  312. STORE6_LANE(u, u0, u1, 2);
  313. STORE6_LANE(u, u0, u1, 3);
  314. STORE6_LANE(u, u0, u1, 4);
  315. STORE6_LANE(u, u0, u1, 5);
  316. STORE6_LANE(u, u0, u1, 6);
  317. STORE6_LANE(u, u0, u1, 7);
  318. STORE6_LANE(v, v0, v1, 0);
  319. STORE6_LANE(v, v0, v1, 1);
  320. STORE6_LANE(v, v0, v1, 2);
  321. STORE6_LANE(v, v0, v1, 3);
  322. STORE6_LANE(v, v0, v1, 4);
  323. STORE6_LANE(v, v0, v1, 5);
  324. STORE6_LANE(v, v0, v1, 6);
  325. STORE6_LANE(v, v0, v1, 7);
  326. }
  327. #undef STORE6_LANE
  328. static WEBP_INLINE void Store4x8x2(const uint8x16_t p1, const uint8x16_t p0,
  329. const uint8x16_t q0, const uint8x16_t q1,
  330. uint8_t* const u, uint8_t* const v,
  331. int stride) {
  332. uint8x8x4_t u0, v0;
  333. INIT_VECTOR4(u0,
  334. vget_low_u8(p1), vget_low_u8(p0),
  335. vget_low_u8(q0), vget_low_u8(q1));
  336. INIT_VECTOR4(v0,
  337. vget_high_u8(p1), vget_high_u8(p0),
  338. vget_high_u8(q0), vget_high_u8(q1));
  339. vst4_lane_u8(u - 2 + 0 * stride, u0, 0);
  340. vst4_lane_u8(u - 2 + 1 * stride, u0, 1);
  341. vst4_lane_u8(u - 2 + 2 * stride, u0, 2);
  342. vst4_lane_u8(u - 2 + 3 * stride, u0, 3);
  343. vst4_lane_u8(u - 2 + 4 * stride, u0, 4);
  344. vst4_lane_u8(u - 2 + 5 * stride, u0, 5);
  345. vst4_lane_u8(u - 2 + 6 * stride, u0, 6);
  346. vst4_lane_u8(u - 2 + 7 * stride, u0, 7);
  347. vst4_lane_u8(v - 2 + 0 * stride, v0, 0);
  348. vst4_lane_u8(v - 2 + 1 * stride, v0, 1);
  349. vst4_lane_u8(v - 2 + 2 * stride, v0, 2);
  350. vst4_lane_u8(v - 2 + 3 * stride, v0, 3);
  351. vst4_lane_u8(v - 2 + 4 * stride, v0, 4);
  352. vst4_lane_u8(v - 2 + 5 * stride, v0, 5);
  353. vst4_lane_u8(v - 2 + 6 * stride, v0, 6);
  354. vst4_lane_u8(v - 2 + 7 * stride, v0, 7);
  355. }
  356. #endif // !WORK_AROUND_GCC
  357. // Treats 'v' as an uint8x8_t and zero extends to an int16x8_t.
  358. static WEBP_INLINE int16x8_t ConvertU8ToS16(uint32x2_t v) {
  359. return vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(v)));
  360. }
  361. // Performs unsigned 8b saturation on 'dst01' and 'dst23' storing the result
  362. // to the corresponding rows of 'dst'.
  363. static WEBP_INLINE void SaturateAndStore4x4(uint8_t* const dst,
  364. const int16x8_t dst01,
  365. const int16x8_t dst23) {
  366. // Unsigned saturate to 8b.
  367. const uint8x8_t dst01_u8 = vqmovun_s16(dst01);
  368. const uint8x8_t dst23_u8 = vqmovun_s16(dst23);
  369. // Store the results.
  370. vst1_lane_u32((uint32_t*)(dst + 0 * BPS), vreinterpret_u32_u8(dst01_u8), 0);
  371. vst1_lane_u32((uint32_t*)(dst + 1 * BPS), vreinterpret_u32_u8(dst01_u8), 1);
  372. vst1_lane_u32((uint32_t*)(dst + 2 * BPS), vreinterpret_u32_u8(dst23_u8), 0);
  373. vst1_lane_u32((uint32_t*)(dst + 3 * BPS), vreinterpret_u32_u8(dst23_u8), 1);
  374. }
  375. static WEBP_INLINE void Add4x4(const int16x8_t row01, const int16x8_t row23,
  376. uint8_t* const dst) {
  377. uint32x2_t dst01 = vdup_n_u32(0);
  378. uint32x2_t dst23 = vdup_n_u32(0);
  379. // Load the source pixels.
  380. dst01 = vld1_lane_u32((uint32_t*)(dst + 0 * BPS), dst01, 0);
  381. dst23 = vld1_lane_u32((uint32_t*)(dst + 2 * BPS), dst23, 0);
  382. dst01 = vld1_lane_u32((uint32_t*)(dst + 1 * BPS), dst01, 1);
  383. dst23 = vld1_lane_u32((uint32_t*)(dst + 3 * BPS), dst23, 1);
  384. {
  385. // Convert to 16b.
  386. const int16x8_t dst01_s16 = ConvertU8ToS16(dst01);
  387. const int16x8_t dst23_s16 = ConvertU8ToS16(dst23);
  388. // Descale with rounding.
  389. const int16x8_t out01 = vrsraq_n_s16(dst01_s16, row01, 3);
  390. const int16x8_t out23 = vrsraq_n_s16(dst23_s16, row23, 3);
  391. // Add the inverse transform.
  392. SaturateAndStore4x4(dst, out01, out23);
  393. }
  394. }
  395. //-----------------------------------------------------------------------------
  396. // Simple In-loop filtering (Paragraph 15.2)
  397. static uint8x16_t NeedsFilter(const uint8x16_t p1, const uint8x16_t p0,
  398. const uint8x16_t q0, const uint8x16_t q1,
  399. int thresh) {
  400. const uint8x16_t thresh_v = vdupq_n_u8((uint8_t)thresh);
  401. const uint8x16_t a_p0_q0 = vabdq_u8(p0, q0); // abs(p0-q0)
  402. const uint8x16_t a_p1_q1 = vabdq_u8(p1, q1); // abs(p1-q1)
  403. const uint8x16_t a_p0_q0_2 = vqaddq_u8(a_p0_q0, a_p0_q0); // 2 * abs(p0-q0)
  404. const uint8x16_t a_p1_q1_2 = vshrq_n_u8(a_p1_q1, 1); // abs(p1-q1) / 2
  405. const uint8x16_t sum = vqaddq_u8(a_p0_q0_2, a_p1_q1_2);
  406. const uint8x16_t mask = vcgeq_u8(thresh_v, sum);
  407. return mask;
  408. }
  409. static int8x16_t FlipSign(const uint8x16_t v) {
  410. const uint8x16_t sign_bit = vdupq_n_u8(0x80);
  411. return vreinterpretq_s8_u8(veorq_u8(v, sign_bit));
  412. }
  413. static uint8x16_t FlipSignBack(const int8x16_t v) {
  414. const int8x16_t sign_bit = vdupq_n_s8(0x80);
  415. return vreinterpretq_u8_s8(veorq_s8(v, sign_bit));
  416. }
  417. static int8x16_t GetBaseDelta(const int8x16_t p1, const int8x16_t p0,
  418. const int8x16_t q0, const int8x16_t q1) {
  419. const int8x16_t q0_p0 = vqsubq_s8(q0, p0); // (q0-p0)
  420. const int8x16_t p1_q1 = vqsubq_s8(p1, q1); // (p1-q1)
  421. const int8x16_t s1 = vqaddq_s8(p1_q1, q0_p0); // (p1-q1) + 1 * (q0 - p0)
  422. const int8x16_t s2 = vqaddq_s8(q0_p0, s1); // (p1-q1) + 2 * (q0 - p0)
  423. const int8x16_t s3 = vqaddq_s8(q0_p0, s2); // (p1-q1) + 3 * (q0 - p0)
  424. return s3;
  425. }
  426. static int8x16_t GetBaseDelta0(const int8x16_t p0, const int8x16_t q0) {
  427. const int8x16_t q0_p0 = vqsubq_s8(q0, p0); // (q0-p0)
  428. const int8x16_t s1 = vqaddq_s8(q0_p0, q0_p0); // 2 * (q0 - p0)
  429. const int8x16_t s2 = vqaddq_s8(q0_p0, s1); // 3 * (q0 - p0)
  430. return s2;
  431. }
  432. //------------------------------------------------------------------------------
  433. static void ApplyFilter2(const int8x16_t p0s, const int8x16_t q0s,
  434. const int8x16_t delta,
  435. uint8x16_t* const op0, uint8x16_t* const oq0) {
  436. const int8x16_t kCst3 = vdupq_n_s8(0x03);
  437. const int8x16_t kCst4 = vdupq_n_s8(0x04);
  438. const int8x16_t delta_p3 = vqaddq_s8(delta, kCst3);
  439. const int8x16_t delta_p4 = vqaddq_s8(delta, kCst4);
  440. const int8x16_t delta3 = vshrq_n_s8(delta_p3, 3);
  441. const int8x16_t delta4 = vshrq_n_s8(delta_p4, 3);
  442. const int8x16_t sp0 = vqaddq_s8(p0s, delta3);
  443. const int8x16_t sq0 = vqsubq_s8(q0s, delta4);
  444. *op0 = FlipSignBack(sp0);
  445. *oq0 = FlipSignBack(sq0);
  446. }
  447. #if defined(USE_INTRINSICS)
  448. static void DoFilter2(const uint8x16_t p1, const uint8x16_t p0,
  449. const uint8x16_t q0, const uint8x16_t q1,
  450. const uint8x16_t mask,
  451. uint8x16_t* const op0, uint8x16_t* const oq0) {
  452. const int8x16_t p1s = FlipSign(p1);
  453. const int8x16_t p0s = FlipSign(p0);
  454. const int8x16_t q0s = FlipSign(q0);
  455. const int8x16_t q1s = FlipSign(q1);
  456. const int8x16_t delta0 = GetBaseDelta(p1s, p0s, q0s, q1s);
  457. const int8x16_t delta1 = vandq_s8(delta0, vreinterpretq_s8_u8(mask));
  458. ApplyFilter2(p0s, q0s, delta1, op0, oq0);
  459. }
  460. static void SimpleVFilter16(uint8_t* p, int stride, int thresh) {
  461. uint8x16_t p1, p0, q0, q1, op0, oq0;
  462. Load16x4(p, stride, &p1, &p0, &q0, &q1);
  463. {
  464. const uint8x16_t mask = NeedsFilter(p1, p0, q0, q1, thresh);
  465. DoFilter2(p1, p0, q0, q1, mask, &op0, &oq0);
  466. }
  467. Store16x2(op0, oq0, p, stride);
  468. }
  469. static void SimpleHFilter16(uint8_t* p, int stride, int thresh) {
  470. uint8x16_t p1, p0, q0, q1, oq0, op0;
  471. Load4x16(p, stride, &p1, &p0, &q0, &q1);
  472. {
  473. const uint8x16_t mask = NeedsFilter(p1, p0, q0, q1, thresh);
  474. DoFilter2(p1, p0, q0, q1, mask, &op0, &oq0);
  475. }
  476. Store2x16(op0, oq0, p, stride);
  477. }
  478. #else
  479. #define QRegs "q0", "q1", "q2", "q3", \
  480. "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
  481. #define FLIP_SIGN_BIT2(a, b, s) \
  482. "veor " #a "," #a "," #s " \n" \
  483. "veor " #b "," #b "," #s " \n" \
  484. #define FLIP_SIGN_BIT4(a, b, c, d, s) \
  485. FLIP_SIGN_BIT2(a, b, s) \
  486. FLIP_SIGN_BIT2(c, d, s) \
  487. #define NEEDS_FILTER(p1, p0, q0, q1, thresh, mask) \
  488. "vabd.u8 q15," #p0 "," #q0 " \n" /* abs(p0 - q0) */ \
  489. "vabd.u8 q14," #p1 "," #q1 " \n" /* abs(p1 - q1) */ \
  490. "vqadd.u8 q15, q15, q15 \n" /* abs(p0 - q0) * 2 */ \
  491. "vshr.u8 q14, q14, #1 \n" /* abs(p1 - q1) / 2 */ \
  492. "vqadd.u8 q15, q15, q14 \n" /* abs(p0 - q0) * 2 + abs(p1 - q1) / 2 */ \
  493. "vdup.8 q14, " #thresh " \n" \
  494. "vcge.u8 " #mask ", q14, q15 \n" /* mask <= thresh */
  495. #define GET_BASE_DELTA(p1, p0, q0, q1, o) \
  496. "vqsub.s8 q15," #q0 "," #p0 " \n" /* (q0 - p0) */ \
  497. "vqsub.s8 " #o "," #p1 "," #q1 " \n" /* (p1 - q1) */ \
  498. "vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 1 * (p0 - q0) */ \
  499. "vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 2 * (p0 - q0) */ \
  500. "vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 3 * (p0 - q0) */
  501. #define DO_SIMPLE_FILTER(p0, q0, fl) \
  502. "vmov.i8 q15, #0x03 \n" \
  503. "vqadd.s8 q15, q15, " #fl " \n" /* filter1 = filter + 3 */ \
  504. "vshr.s8 q15, q15, #3 \n" /* filter1 >> 3 */ \
  505. "vqadd.s8 " #p0 "," #p0 ", q15 \n" /* p0 += filter1 */ \
  506. \
  507. "vmov.i8 q15, #0x04 \n" \
  508. "vqadd.s8 q15, q15, " #fl " \n" /* filter1 = filter + 4 */ \
  509. "vshr.s8 q15, q15, #3 \n" /* filter2 >> 3 */ \
  510. "vqsub.s8 " #q0 "," #q0 ", q15 \n" /* q0 -= filter2 */
  511. // Applies filter on 2 pixels (p0 and q0)
  512. #define DO_FILTER2(p1, p0, q0, q1, thresh) \
  513. NEEDS_FILTER(p1, p0, q0, q1, thresh, q9) /* filter mask in q9 */ \
  514. "vmov.i8 q10, #0x80 \n" /* sign bit */ \
  515. FLIP_SIGN_BIT4(p1, p0, q0, q1, q10) /* convert to signed value */ \
  516. GET_BASE_DELTA(p1, p0, q0, q1, q11) /* get filter level */ \
  517. "vand q9, q9, q11 \n" /* apply filter mask */ \
  518. DO_SIMPLE_FILTER(p0, q0, q9) /* apply filter */ \
  519. FLIP_SIGN_BIT2(p0, q0, q10)
  520. static void SimpleVFilter16(uint8_t* p, int stride, int thresh) {
  521. __asm__ volatile (
  522. "sub %[p], %[p], %[stride], lsl #1 \n" // p -= 2 * stride
  523. "vld1.u8 {q1}, [%[p]], %[stride] \n" // p1
  524. "vld1.u8 {q2}, [%[p]], %[stride] \n" // p0
  525. "vld1.u8 {q3}, [%[p]], %[stride] \n" // q0
  526. "vld1.u8 {q12}, [%[p]] \n" // q1
  527. DO_FILTER2(q1, q2, q3, q12, %[thresh])
  528. "sub %[p], %[p], %[stride], lsl #1 \n" // p -= 2 * stride
  529. "vst1.u8 {q2}, [%[p]], %[stride] \n" // store op0
  530. "vst1.u8 {q3}, [%[p]] \n" // store oq0
  531. : [p] "+r"(p)
  532. : [stride] "r"(stride), [thresh] "r"(thresh)
  533. : "memory", QRegs
  534. );
  535. }
  536. static void SimpleHFilter16(uint8_t* p, int stride, int thresh) {
  537. __asm__ volatile (
  538. "sub r4, %[p], #2 \n" // base1 = p - 2
  539. "lsl r6, %[stride], #1 \n" // r6 = 2 * stride
  540. "add r5, r4, %[stride] \n" // base2 = base1 + stride
  541. LOAD8x4(d2, d3, d4, d5, [r4], [r5], r6)
  542. LOAD8x4(d24, d25, d26, d27, [r4], [r5], r6)
  543. "vswp d3, d24 \n" // p1:q1 p0:q3
  544. "vswp d5, d26 \n" // q0:q2 q1:q4
  545. "vswp q2, q12 \n" // p1:q1 p0:q2 q0:q3 q1:q4
  546. DO_FILTER2(q1, q2, q12, q13, %[thresh])
  547. "sub %[p], %[p], #1 \n" // p - 1
  548. "vswp d5, d24 \n"
  549. STORE8x2(d4, d5, [%[p]], %[stride])
  550. STORE8x2(d24, d25, [%[p]], %[stride])
  551. : [p] "+r"(p)
  552. : [stride] "r"(stride), [thresh] "r"(thresh)
  553. : "memory", "r4", "r5", "r6", QRegs
  554. );
  555. }
  556. #endif // USE_INTRINSICS
  557. static void SimpleVFilter16i(uint8_t* p, int stride, int thresh) {
  558. uint32_t k;
  559. for (k = 3; k != 0; --k) {
  560. p += 4 * stride;
  561. SimpleVFilter16(p, stride, thresh);
  562. }
  563. }
  564. static void SimpleHFilter16i(uint8_t* p, int stride, int thresh) {
  565. uint32_t k;
  566. for (k = 3; k != 0; --k) {
  567. p += 4;
  568. SimpleHFilter16(p, stride, thresh);
  569. }
  570. }
  571. //------------------------------------------------------------------------------
  572. // Complex In-loop filtering (Paragraph 15.3)
  573. static uint8x16_t NeedsHev(const uint8x16_t p1, const uint8x16_t p0,
  574. const uint8x16_t q0, const uint8x16_t q1,
  575. int hev_thresh) {
  576. const uint8x16_t hev_thresh_v = vdupq_n_u8((uint8_t)hev_thresh);
  577. const uint8x16_t a_p1_p0 = vabdq_u8(p1, p0); // abs(p1 - p0)
  578. const uint8x16_t a_q1_q0 = vabdq_u8(q1, q0); // abs(q1 - q0)
  579. const uint8x16_t mask1 = vcgtq_u8(a_p1_p0, hev_thresh_v);
  580. const uint8x16_t mask2 = vcgtq_u8(a_q1_q0, hev_thresh_v);
  581. const uint8x16_t mask = vorrq_u8(mask1, mask2);
  582. return mask;
  583. }
  584. static uint8x16_t NeedsFilter2(const uint8x16_t p3, const uint8x16_t p2,
  585. const uint8x16_t p1, const uint8x16_t p0,
  586. const uint8x16_t q0, const uint8x16_t q1,
  587. const uint8x16_t q2, const uint8x16_t q3,
  588. int ithresh, int thresh) {
  589. const uint8x16_t ithresh_v = vdupq_n_u8((uint8_t)ithresh);
  590. const uint8x16_t a_p3_p2 = vabdq_u8(p3, p2); // abs(p3 - p2)
  591. const uint8x16_t a_p2_p1 = vabdq_u8(p2, p1); // abs(p2 - p1)
  592. const uint8x16_t a_p1_p0 = vabdq_u8(p1, p0); // abs(p1 - p0)
  593. const uint8x16_t a_q3_q2 = vabdq_u8(q3, q2); // abs(q3 - q2)
  594. const uint8x16_t a_q2_q1 = vabdq_u8(q2, q1); // abs(q2 - q1)
  595. const uint8x16_t a_q1_q0 = vabdq_u8(q1, q0); // abs(q1 - q0)
  596. const uint8x16_t max1 = vmaxq_u8(a_p3_p2, a_p2_p1);
  597. const uint8x16_t max2 = vmaxq_u8(a_p1_p0, a_q3_q2);
  598. const uint8x16_t max3 = vmaxq_u8(a_q2_q1, a_q1_q0);
  599. const uint8x16_t max12 = vmaxq_u8(max1, max2);
  600. const uint8x16_t max123 = vmaxq_u8(max12, max3);
  601. const uint8x16_t mask2 = vcgeq_u8(ithresh_v, max123);
  602. const uint8x16_t mask1 = NeedsFilter(p1, p0, q0, q1, thresh);
  603. const uint8x16_t mask = vandq_u8(mask1, mask2);
  604. return mask;
  605. }
  606. // 4-points filter
  607. static void ApplyFilter4(
  608. const int8x16_t p1, const int8x16_t p0,
  609. const int8x16_t q0, const int8x16_t q1,
  610. const int8x16_t delta0,
  611. uint8x16_t* const op1, uint8x16_t* const op0,
  612. uint8x16_t* const oq0, uint8x16_t* const oq1) {
  613. const int8x16_t kCst3 = vdupq_n_s8(0x03);
  614. const int8x16_t kCst4 = vdupq_n_s8(0x04);
  615. const int8x16_t delta1 = vqaddq_s8(delta0, kCst4);
  616. const int8x16_t delta2 = vqaddq_s8(delta0, kCst3);
  617. const int8x16_t a1 = vshrq_n_s8(delta1, 3);
  618. const int8x16_t a2 = vshrq_n_s8(delta2, 3);
  619. const int8x16_t a3 = vrshrq_n_s8(a1, 1); // a3 = (a1 + 1) >> 1
  620. *op0 = FlipSignBack(vqaddq_s8(p0, a2)); // clip(p0 + a2)
  621. *oq0 = FlipSignBack(vqsubq_s8(q0, a1)); // clip(q0 - a1)
  622. *op1 = FlipSignBack(vqaddq_s8(p1, a3)); // clip(p1 + a3)
  623. *oq1 = FlipSignBack(vqsubq_s8(q1, a3)); // clip(q1 - a3)
  624. }
  625. static void DoFilter4(
  626. const uint8x16_t p1, const uint8x16_t p0,
  627. const uint8x16_t q0, const uint8x16_t q1,
  628. const uint8x16_t mask, const uint8x16_t hev_mask,
  629. uint8x16_t* const op1, uint8x16_t* const op0,
  630. uint8x16_t* const oq0, uint8x16_t* const oq1) {
  631. // This is a fused version of DoFilter2() calling ApplyFilter2 directly
  632. const int8x16_t p1s = FlipSign(p1);
  633. int8x16_t p0s = FlipSign(p0);
  634. int8x16_t q0s = FlipSign(q0);
  635. const int8x16_t q1s = FlipSign(q1);
  636. const uint8x16_t simple_lf_mask = vandq_u8(mask, hev_mask);
  637. // do_filter2 part (simple loopfilter on pixels with hev)
  638. {
  639. const int8x16_t delta = GetBaseDelta(p1s, p0s, q0s, q1s);
  640. const int8x16_t simple_lf_delta =
  641. vandq_s8(delta, vreinterpretq_s8_u8(simple_lf_mask));
  642. uint8x16_t tmp_p0, tmp_q0;
  643. ApplyFilter2(p0s, q0s, simple_lf_delta, &tmp_p0, &tmp_q0);
  644. // TODO(skal): avoid the double FlipSign() in ApplyFilter2() and here
  645. p0s = FlipSign(tmp_p0);
  646. q0s = FlipSign(tmp_q0);
  647. }
  648. // do_filter4 part (complex loopfilter on pixels without hev)
  649. {
  650. const int8x16_t delta0 = GetBaseDelta0(p0s, q0s);
  651. // we use: (mask & hev_mask) ^ mask = mask & !hev_mask
  652. const uint8x16_t complex_lf_mask = veorq_u8(simple_lf_mask, mask);
  653. const int8x16_t complex_lf_delta =
  654. vandq_s8(delta0, vreinterpretq_s8_u8(complex_lf_mask));
  655. ApplyFilter4(p1s, p0s, q0s, q1s, complex_lf_delta, op1, op0, oq0, oq1);
  656. }
  657. }
  658. // 6-points filter
  659. static void ApplyFilter6(
  660. const int8x16_t p2, const int8x16_t p1, const int8x16_t p0,
  661. const int8x16_t q0, const int8x16_t q1, const int8x16_t q2,
  662. const int8x16_t delta,
  663. uint8x16_t* const op2, uint8x16_t* const op1, uint8x16_t* const op0,
  664. uint8x16_t* const oq0, uint8x16_t* const oq1, uint8x16_t* const oq2) {
  665. const int16x8_t kCst63 = vdupq_n_s16(63);
  666. const int8x8_t kCst27 = vdup_n_s8(27);
  667. const int8x8_t kCst18 = vdup_n_s8(18);
  668. const int8x8_t kCst9 = vdup_n_s8(9);
  669. const int8x8_t delta_lo = vget_low_s8(delta);
  670. const int8x8_t delta_hi = vget_high_s8(delta);
  671. const int16x8_t s1_lo = vmlal_s8(kCst63, kCst27, delta_lo); // 63 + 27 * a
  672. const int16x8_t s1_hi = vmlal_s8(kCst63, kCst27, delta_hi); // 63 + 27 * a
  673. const int16x8_t s2_lo = vmlal_s8(kCst63, kCst18, delta_lo); // 63 + 18 * a
  674. const int16x8_t s2_hi = vmlal_s8(kCst63, kCst18, delta_hi); // 63 + 18 * a
  675. const int16x8_t s3_lo = vmlal_s8(kCst63, kCst9, delta_lo); // 63 + 9 * a
  676. const int16x8_t s3_hi = vmlal_s8(kCst63, kCst9, delta_hi); // 63 + 9 * a
  677. const int8x8_t a1_lo = vqshrn_n_s16(s1_lo, 7);
  678. const int8x8_t a1_hi = vqshrn_n_s16(s1_hi, 7);
  679. const int8x8_t a2_lo = vqshrn_n_s16(s2_lo, 7);
  680. const int8x8_t a2_hi = vqshrn_n_s16(s2_hi, 7);
  681. const int8x8_t a3_lo = vqshrn_n_s16(s3_lo, 7);
  682. const int8x8_t a3_hi = vqshrn_n_s16(s3_hi, 7);
  683. const int8x16_t a1 = vcombine_s8(a1_lo, a1_hi);
  684. const int8x16_t a2 = vcombine_s8(a2_lo, a2_hi);
  685. const int8x16_t a3 = vcombine_s8(a3_lo, a3_hi);
  686. *op0 = FlipSignBack(vqaddq_s8(p0, a1)); // clip(p0 + a1)
  687. *oq0 = FlipSignBack(vqsubq_s8(q0, a1)); // clip(q0 - q1)
  688. *oq1 = FlipSignBack(vqsubq_s8(q1, a2)); // clip(q1 - a2)
  689. *op1 = FlipSignBack(vqaddq_s8(p1, a2)); // clip(p1 + a2)
  690. *oq2 = FlipSignBack(vqsubq_s8(q2, a3)); // clip(q2 - a3)
  691. *op2 = FlipSignBack(vqaddq_s8(p2, a3)); // clip(p2 + a3)
  692. }
  693. static void DoFilter6(
  694. const uint8x16_t p2, const uint8x16_t p1, const uint8x16_t p0,
  695. const uint8x16_t q0, const uint8x16_t q1, const uint8x16_t q2,
  696. const uint8x16_t mask, const uint8x16_t hev_mask,
  697. uint8x16_t* const op2, uint8x16_t* const op1, uint8x16_t* const op0,
  698. uint8x16_t* const oq0, uint8x16_t* const oq1, uint8x16_t* const oq2) {
  699. // This is a fused version of DoFilter2() calling ApplyFilter2 directly
  700. const int8x16_t p2s = FlipSign(p2);
  701. const int8x16_t p1s = FlipSign(p1);
  702. int8x16_t p0s = FlipSign(p0);
  703. int8x16_t q0s = FlipSign(q0);
  704. const int8x16_t q1s = FlipSign(q1);
  705. const int8x16_t q2s = FlipSign(q2);
  706. const uint8x16_t simple_lf_mask = vandq_u8(mask, hev_mask);
  707. const int8x16_t delta0 = GetBaseDelta(p1s, p0s, q0s, q1s);
  708. // do_filter2 part (simple loopfilter on pixels with hev)
  709. {
  710. const int8x16_t simple_lf_delta =
  711. vandq_s8(delta0, vreinterpretq_s8_u8(simple_lf_mask));
  712. uint8x16_t tmp_p0, tmp_q0;
  713. ApplyFilter2(p0s, q0s, simple_lf_delta, &tmp_p0, &tmp_q0);
  714. // TODO(skal): avoid the double FlipSign() in ApplyFilter2() and here
  715. p0s = FlipSign(tmp_p0);
  716. q0s = FlipSign(tmp_q0);
  717. }
  718. // do_filter6 part (complex loopfilter on pixels without hev)
  719. {
  720. // we use: (mask & hev_mask) ^ mask = mask & !hev_mask
  721. const uint8x16_t complex_lf_mask = veorq_u8(simple_lf_mask, mask);
  722. const int8x16_t complex_lf_delta =
  723. vandq_s8(delta0, vreinterpretq_s8_u8(complex_lf_mask));
  724. ApplyFilter6(p2s, p1s, p0s, q0s, q1s, q2s, complex_lf_delta,
  725. op2, op1, op0, oq0, oq1, oq2);
  726. }
  727. }
  728. // on macroblock edges
  729. static void VFilter16(uint8_t* p, int stride,
  730. int thresh, int ithresh, int hev_thresh) {
  731. uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3;
  732. Load16x8(p, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3);
  733. {
  734. const uint8x16_t mask = NeedsFilter2(p3, p2, p1, p0, q0, q1, q2, q3,
  735. ithresh, thresh);
  736. const uint8x16_t hev_mask = NeedsHev(p1, p0, q0, q1, hev_thresh);
  737. uint8x16_t op2, op1, op0, oq0, oq1, oq2;
  738. DoFilter6(p2, p1, p0, q0, q1, q2, mask, hev_mask,
  739. &op2, &op1, &op0, &oq0, &oq1, &oq2);
  740. Store16x2(op2, op1, p - 2 * stride, stride);
  741. Store16x2(op0, oq0, p + 0 * stride, stride);
  742. Store16x2(oq1, oq2, p + 2 * stride, stride);
  743. }
  744. }
  745. static void HFilter16(uint8_t* p, int stride,
  746. int thresh, int ithresh, int hev_thresh) {
  747. uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3;
  748. Load8x16(p, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3);
  749. {
  750. const uint8x16_t mask = NeedsFilter2(p3, p2, p1, p0, q0, q1, q2, q3,
  751. ithresh, thresh);
  752. const uint8x16_t hev_mask = NeedsHev(p1, p0, q0, q1, hev_thresh);
  753. uint8x16_t op2, op1, op0, oq0, oq1, oq2;
  754. DoFilter6(p2, p1, p0, q0, q1, q2, mask, hev_mask,
  755. &op2, &op1, &op0, &oq0, &oq1, &oq2);
  756. Store2x16(op2, op1, p - 2, stride);
  757. Store2x16(op0, oq0, p + 0, stride);
  758. Store2x16(oq1, oq2, p + 2, stride);
  759. }
  760. }
  761. // on three inner edges
  762. static void VFilter16i(uint8_t* p, int stride,
  763. int thresh, int ithresh, int hev_thresh) {
  764. uint32_t k;
  765. uint8x16_t p3, p2, p1, p0;
  766. Load16x4(p + 2 * stride, stride, &p3, &p2, &p1, &p0);
  767. for (k = 3; k != 0; --k) {
  768. uint8x16_t q0, q1, q2, q3;
  769. p += 4 * stride;
  770. Load16x4(p + 2 * stride, stride, &q0, &q1, &q2, &q3);
  771. {
  772. const uint8x16_t mask =
  773. NeedsFilter2(p3, p2, p1, p0, q0, q1, q2, q3, ithresh, thresh);
  774. const uint8x16_t hev_mask = NeedsHev(p1, p0, q0, q1, hev_thresh);
  775. // p3 and p2 are not just temporary variables here: they will be
  776. // re-used for next span. And q2/q3 will become p1/p0 accordingly.
  777. DoFilter4(p1, p0, q0, q1, mask, hev_mask, &p1, &p0, &p3, &p2);
  778. Store16x4(p1, p0, p3, p2, p, stride);
  779. p1 = q2;
  780. p0 = q3;
  781. }
  782. }
  783. }
  784. #if !defined(WORK_AROUND_GCC)
  785. static void HFilter16i(uint8_t* p, int stride,
  786. int thresh, int ithresh, int hev_thresh) {
  787. uint32_t k;
  788. uint8x16_t p3, p2, p1, p0;
  789. Load4x16(p + 2, stride, &p3, &p2, &p1, &p0);
  790. for (k = 3; k != 0; --k) {
  791. uint8x16_t q0, q1, q2, q3;
  792. p += 4;
  793. Load4x16(p + 2, stride, &q0, &q1, &q2, &q3);
  794. {
  795. const uint8x16_t mask =
  796. NeedsFilter2(p3, p2, p1, p0, q0, q1, q2, q3, ithresh, thresh);
  797. const uint8x16_t hev_mask = NeedsHev(p1, p0, q0, q1, hev_thresh);
  798. DoFilter4(p1, p0, q0, q1, mask, hev_mask, &p1, &p0, &p3, &p2);
  799. Store4x16(p1, p0, p3, p2, p, stride);
  800. p1 = q2;
  801. p0 = q3;
  802. }
  803. }
  804. }
  805. #endif // !WORK_AROUND_GCC
  806. // 8-pixels wide variant, for chroma filtering
  807. static void VFilter8(uint8_t* u, uint8_t* v, int stride,
  808. int thresh, int ithresh, int hev_thresh) {
  809. uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3;
  810. Load8x8x2(u, v, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3);
  811. {
  812. const uint8x16_t mask = NeedsFilter2(p3, p2, p1, p0, q0, q1, q2, q3,
  813. ithresh, thresh);
  814. const uint8x16_t hev_mask = NeedsHev(p1, p0, q0, q1, hev_thresh);
  815. uint8x16_t op2, op1, op0, oq0, oq1, oq2;
  816. DoFilter6(p2, p1, p0, q0, q1, q2, mask, hev_mask,
  817. &op2, &op1, &op0, &oq0, &oq1, &oq2);
  818. Store8x2x2(op2, op1, u - 2 * stride, v - 2 * stride, stride);
  819. Store8x2x2(op0, oq0, u + 0 * stride, v + 0 * stride, stride);
  820. Store8x2x2(oq1, oq2, u + 2 * stride, v + 2 * stride, stride);
  821. }
  822. }
  823. static void VFilter8i(uint8_t* u, uint8_t* v, int stride,
  824. int thresh, int ithresh, int hev_thresh) {
  825. uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3;
  826. u += 4 * stride;
  827. v += 4 * stride;
  828. Load8x8x2(u, v, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3);
  829. {
  830. const uint8x16_t mask = NeedsFilter2(p3, p2, p1, p0, q0, q1, q2, q3,
  831. ithresh, thresh);
  832. const uint8x16_t hev_mask = NeedsHev(p1, p0, q0, q1, hev_thresh);
  833. uint8x16_t op1, op0, oq0, oq1;
  834. DoFilter4(p1, p0, q0, q1, mask, hev_mask, &op1, &op0, &oq0, &oq1);
  835. Store8x4x2(op1, op0, oq0, oq1, u, v, stride);
  836. }
  837. }
  838. #if !defined(WORK_AROUND_GCC)
  839. static void HFilter8(uint8_t* u, uint8_t* v, int stride,
  840. int thresh, int ithresh, int hev_thresh) {
  841. uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3;
  842. Load8x8x2T(u, v, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3);
  843. {
  844. const uint8x16_t mask = NeedsFilter2(p3, p2, p1, p0, q0, q1, q2, q3,
  845. ithresh, thresh);
  846. const uint8x16_t hev_mask = NeedsHev(p1, p0, q0, q1, hev_thresh);
  847. uint8x16_t op2, op1, op0, oq0, oq1, oq2;
  848. DoFilter6(p2, p1, p0, q0, q1, q2, mask, hev_mask,
  849. &op2, &op1, &op0, &oq0, &oq1, &oq2);
  850. Store6x8x2(op2, op1, op0, oq0, oq1, oq2, u, v, stride);
  851. }
  852. }
  853. static void HFilter8i(uint8_t* u, uint8_t* v, int stride,
  854. int thresh, int ithresh, int hev_thresh) {
  855. uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3;
  856. u += 4;
  857. v += 4;
  858. Load8x8x2T(u, v, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3);
  859. {
  860. const uint8x16_t mask = NeedsFilter2(p3, p2, p1, p0, q0, q1, q2, q3,
  861. ithresh, thresh);
  862. const uint8x16_t hev_mask = NeedsHev(p1, p0, q0, q1, hev_thresh);
  863. uint8x16_t op1, op0, oq0, oq1;
  864. DoFilter4(p1, p0, q0, q1, mask, hev_mask, &op1, &op0, &oq0, &oq1);
  865. Store4x8x2(op1, op0, oq0, oq1, u, v, stride);
  866. }
  867. }
  868. #endif // !WORK_AROUND_GCC
  869. //-----------------------------------------------------------------------------
  870. // Inverse transforms (Paragraph 14.4)
  871. // Technically these are unsigned but vqdmulh is only available in signed.
  872. // vqdmulh returns high half (effectively >> 16) but also doubles the value,
  873. // changing the >> 16 to >> 15 and requiring an additional >> 1.
  874. // We use this to our advantage with kC2. The canonical value is 35468.
  875. // However, the high bit is set so treating it as signed will give incorrect
  876. // results. We avoid this by down shifting by 1 here to clear the highest bit.
  877. // Combined with the doubling effect of vqdmulh we get >> 16.
  878. // This can not be applied to kC1 because the lowest bit is set. Down shifting
  879. // the constant would reduce precision.
  880. // libwebp uses a trick to avoid some extra addition that libvpx does.
  881. // Instead of:
  882. // temp2 = ip[12] + ((ip[12] * cospi8sqrt2minus1) >> 16);
  883. // libwebp adds 1 << 16 to cospi8sqrt2minus1 (kC1). However, this causes the
  884. // same issue with kC1 and vqdmulh that we work around by down shifting kC2
  885. static const int16_t kC1 = 20091;
  886. static const int16_t kC2 = 17734; // half of kC2, actually. See comment above.
  887. #if defined(USE_INTRINSICS)
  888. static WEBP_INLINE void Transpose8x2(const int16x8_t in0, const int16x8_t in1,
  889. int16x8x2_t* const out) {
  890. // a0 a1 a2 a3 | b0 b1 b2 b3 => a0 b0 c0 d0 | a1 b1 c1 d1
  891. // c0 c1 c2 c3 | d0 d1 d2 d3 a2 b2 c2 d2 | a3 b3 c3 d3
  892. const int16x8x2_t tmp0 = vzipq_s16(in0, in1); // a0 c0 a1 c1 a2 c2 ...
  893. // b0 d0 b1 d1 b2 d2 ...
  894. *out = vzipq_s16(tmp0.val[0], tmp0.val[1]);
  895. }
  896. static WEBP_INLINE void TransformPass(int16x8x2_t* const rows) {
  897. // {rows} = in0 | in4
  898. // in8 | in12
  899. // B1 = in4 | in12
  900. const int16x8_t B1 =
  901. vcombine_s16(vget_high_s16(rows->val[0]), vget_high_s16(rows->val[1]));
  902. // C0 = kC1 * in4 | kC1 * in12
  903. // C1 = kC2 * in4 | kC2 * in12
  904. const int16x8_t C0 = vsraq_n_s16(B1, vqdmulhq_n_s16(B1, kC1), 1);
  905. const int16x8_t C1 = vqdmulhq_n_s16(B1, kC2);
  906. const int16x4_t a = vqadd_s16(vget_low_s16(rows->val[0]),
  907. vget_low_s16(rows->val[1])); // in0 + in8
  908. const int16x4_t b = vqsub_s16(vget_low_s16(rows->val[0]),
  909. vget_low_s16(rows->val[1])); // in0 - in8
  910. // c = kC2 * in4 - kC1 * in12
  911. // d = kC1 * in4 + kC2 * in12
  912. const int16x4_t c = vqsub_s16(vget_low_s16(C1), vget_high_s16(C0));
  913. const int16x4_t d = vqadd_s16(vget_low_s16(C0), vget_high_s16(C1));
  914. const int16x8_t D0 = vcombine_s16(a, b); // D0 = a | b
  915. const int16x8_t D1 = vcombine_s16(d, c); // D1 = d | c
  916. const int16x8_t E0 = vqaddq_s16(D0, D1); // a+d | b+c
  917. const int16x8_t E_tmp = vqsubq_s16(D0, D1); // a-d | b-c
  918. const int16x8_t E1 = vcombine_s16(vget_high_s16(E_tmp), vget_low_s16(E_tmp));
  919. Transpose8x2(E0, E1, rows);
  920. }
  921. static void TransformOne(const int16_t* in, uint8_t* dst) {
  922. int16x8x2_t rows;
  923. INIT_VECTOR2(rows, vld1q_s16(in + 0), vld1q_s16(in + 8));
  924. TransformPass(&rows);
  925. TransformPass(&rows);
  926. Add4x4(rows.val[0], rows.val[1], dst);
  927. }
  928. #else
  929. static void TransformOne(const int16_t* in, uint8_t* dst) {
  930. const int kBPS = BPS;
  931. // kC1, kC2. Padded because vld1.16 loads 8 bytes
  932. const int16_t constants[4] = { kC1, kC2, 0, 0 };
  933. /* Adapted from libvpx: vp8/common/arm/neon/shortidct4x4llm_neon.asm */
  934. __asm__ volatile (
  935. "vld1.16 {q1, q2}, [%[in]] \n"
  936. "vld1.16 {d0}, [%[constants]] \n"
  937. /* d2: in[0]
  938. * d3: in[8]
  939. * d4: in[4]
  940. * d5: in[12]
  941. */
  942. "vswp d3, d4 \n"
  943. /* q8 = {in[4], in[12]} * kC1 * 2 >> 16
  944. * q9 = {in[4], in[12]} * kC2 >> 16
  945. */
  946. "vqdmulh.s16 q8, q2, d0[0] \n"
  947. "vqdmulh.s16 q9, q2, d0[1] \n"
  948. /* d22 = a = in[0] + in[8]
  949. * d23 = b = in[0] - in[8]
  950. */
  951. "vqadd.s16 d22, d2, d3 \n"
  952. "vqsub.s16 d23, d2, d3 \n"
  953. /* The multiplication should be x * kC1 >> 16
  954. * However, with vqdmulh we get x * kC1 * 2 >> 16
  955. * (multiply, double, return high half)
  956. * We avoided this in kC2 by pre-shifting the constant.
  957. * q8 = in[4]/[12] * kC1 >> 16
  958. */
  959. "vshr.s16 q8, q8, #1 \n"
  960. /* Add {in[4], in[12]} back after the multiplication. This is handled by
  961. * adding 1 << 16 to kC1 in the libwebp C code.
  962. */
  963. "vqadd.s16 q8, q2, q8 \n"
  964. /* d20 = c = in[4]*kC2 - in[12]*kC1
  965. * d21 = d = in[4]*kC1 + in[12]*kC2
  966. */
  967. "vqsub.s16 d20, d18, d17 \n"
  968. "vqadd.s16 d21, d19, d16 \n"
  969. /* d2 = tmp[0] = a + d
  970. * d3 = tmp[1] = b + c
  971. * d4 = tmp[2] = b - c
  972. * d5 = tmp[3] = a - d
  973. */
  974. "vqadd.s16 d2, d22, d21 \n"
  975. "vqadd.s16 d3, d23, d20 \n"
  976. "vqsub.s16 d4, d23, d20 \n"
  977. "vqsub.s16 d5, d22, d21 \n"
  978. "vzip.16 q1, q2 \n"
  979. "vzip.16 q1, q2 \n"
  980. "vswp d3, d4 \n"
  981. /* q8 = {tmp[4], tmp[12]} * kC1 * 2 >> 16
  982. * q9 = {tmp[4], tmp[12]} * kC2 >> 16
  983. */
  984. "vqdmulh.s16 q8, q2, d0[0] \n"
  985. "vqdmulh.s16 q9, q2, d0[1] \n"
  986. /* d22 = a = tmp[0] + tmp[8]
  987. * d23 = b = tmp[0] - tmp[8]
  988. */
  989. "vqadd.s16 d22, d2, d3 \n"
  990. "vqsub.s16 d23, d2, d3 \n"
  991. /* See long winded explanations prior */
  992. "vshr.s16 q8, q8, #1 \n"
  993. "vqadd.s16 q8, q2, q8 \n"
  994. /* d20 = c = in[4]*kC2 - in[12]*kC1
  995. * d21 = d = in[4]*kC1 + in[12]*kC2
  996. */
  997. "vqsub.s16 d20, d18, d17 \n"
  998. "vqadd.s16 d21, d19, d16 \n"
  999. /* d2 = tmp[0] = a + d
  1000. * d3 = tmp[1] = b + c
  1001. * d4 = tmp[2] = b - c
  1002. * d5 = tmp[3] = a - d
  1003. */
  1004. "vqadd.s16 d2, d22, d21 \n"
  1005. "vqadd.s16 d3, d23, d20 \n"
  1006. "vqsub.s16 d4, d23, d20 \n"
  1007. "vqsub.s16 d5, d22, d21 \n"
  1008. "vld1.32 d6[0], [%[dst]], %[kBPS] \n"
  1009. "vld1.32 d6[1], [%[dst]], %[kBPS] \n"
  1010. "vld1.32 d7[0], [%[dst]], %[kBPS] \n"
  1011. "vld1.32 d7[1], [%[dst]], %[kBPS] \n"
  1012. "sub %[dst], %[dst], %[kBPS], lsl #2 \n"
  1013. /* (val) + 4 >> 3 */
  1014. "vrshr.s16 d2, d2, #3 \n"
  1015. "vrshr.s16 d3, d3, #3 \n"
  1016. "vrshr.s16 d4, d4, #3 \n"
  1017. "vrshr.s16 d5, d5, #3 \n"
  1018. "vzip.16 q1, q2 \n"
  1019. "vzip.16 q1, q2 \n"
  1020. /* Must accumulate before saturating */
  1021. "vmovl.u8 q8, d6 \n"
  1022. "vmovl.u8 q9, d7 \n"
  1023. "vqadd.s16 q1, q1, q8 \n"
  1024. "vqadd.s16 q2, q2, q9 \n"
  1025. "vqmovun.s16 d0, q1 \n"
  1026. "vqmovun.s16 d1, q2 \n"
  1027. "vst1.32 d0[0], [%[dst]], %[kBPS] \n"
  1028. "vst1.32 d0[1], [%[dst]], %[kBPS] \n"
  1029. "vst1.32 d1[0], [%[dst]], %[kBPS] \n"
  1030. "vst1.32 d1[1], [%[dst]] \n"
  1031. : [in] "+r"(in), [dst] "+r"(dst) /* modified registers */
  1032. : [kBPS] "r"(kBPS), [constants] "r"(constants) /* constants */
  1033. : "memory", "q0", "q1", "q2", "q8", "q9", "q10", "q11" /* clobbered */
  1034. );
  1035. }
  1036. #endif // USE_INTRINSICS
  1037. static void TransformTwo(const int16_t* in, uint8_t* dst, int do_two) {
  1038. TransformOne(in, dst);
  1039. if (do_two) {
  1040. TransformOne(in + 16, dst + 4);
  1041. }
  1042. }
  1043. static void TransformDC(const int16_t* in, uint8_t* dst) {
  1044. const int16x8_t DC = vdupq_n_s16(in[0]);
  1045. Add4x4(DC, DC, dst);
  1046. }
  1047. //------------------------------------------------------------------------------
  1048. #define STORE_WHT(dst, col, rows) do { \
  1049. *dst = vgetq_lane_s32(rows.val[0], col); (dst) += 16; \
  1050. *dst = vgetq_lane_s32(rows.val[1], col); (dst) += 16; \
  1051. *dst = vgetq_lane_s32(rows.val[2], col); (dst) += 16; \
  1052. *dst = vgetq_lane_s32(rows.val[3], col); (dst) += 16; \
  1053. } while (0)
  1054. static void TransformWHT(const int16_t* in, int16_t* out) {
  1055. int32x4x4_t tmp;
  1056. {
  1057. // Load the source.
  1058. const int16x4_t in00_03 = vld1_s16(in + 0);
  1059. const int16x4_t in04_07 = vld1_s16(in + 4);
  1060. const int16x4_t in08_11 = vld1_s16(in + 8);
  1061. const int16x4_t in12_15 = vld1_s16(in + 12);
  1062. const int32x4_t a0 = vaddl_s16(in00_03, in12_15); // in[0..3] + in[12..15]
  1063. const int32x4_t a1 = vaddl_s16(in04_07, in08_11); // in[4..7] + in[8..11]
  1064. const int32x4_t a2 = vsubl_s16(in04_07, in08_11); // in[4..7] - in[8..11]
  1065. const int32x4_t a3 = vsubl_s16(in00_03, in12_15); // in[0..3] - in[12..15]
  1066. tmp.val[0] = vaddq_s32(a0, a1);
  1067. tmp.val[1] = vaddq_s32(a3, a2);
  1068. tmp.val[2] = vsubq_s32(a0, a1);
  1069. tmp.val[3] = vsubq_s32(a3, a2);
  1070. // Arrange the temporary results column-wise.
  1071. tmp = Transpose4x4(tmp);
  1072. }
  1073. {
  1074. const int32x4_t kCst3 = vdupq_n_s32(3);
  1075. const int32x4_t dc = vaddq_s32(tmp.val[0], kCst3); // add rounder
  1076. const int32x4_t a0 = vaddq_s32(dc, tmp.val[3]);
  1077. const int32x4_t a1 = vaddq_s32(tmp.val[1], tmp.val[2]);
  1078. const int32x4_t a2 = vsubq_s32(tmp.val[1], tmp.val[2]);
  1079. const int32x4_t a3 = vsubq_s32(dc, tmp.val[3]);
  1080. tmp.val[0] = vaddq_s32(a0, a1);
  1081. tmp.val[1] = vaddq_s32(a3, a2);
  1082. tmp.val[2] = vsubq_s32(a0, a1);
  1083. tmp.val[3] = vsubq_s32(a3, a2);
  1084. // right shift the results by 3.
  1085. tmp.val[0] = vshrq_n_s32(tmp.val[0], 3);
  1086. tmp.val[1] = vshrq_n_s32(tmp.val[1], 3);
  1087. tmp.val[2] = vshrq_n_s32(tmp.val[2], 3);
  1088. tmp.val[3] = vshrq_n_s32(tmp.val[3], 3);
  1089. STORE_WHT(out, 0, tmp);
  1090. STORE_WHT(out, 1, tmp);
  1091. STORE_WHT(out, 2, tmp);
  1092. STORE_WHT(out, 3, tmp);
  1093. }
  1094. }
  1095. #undef STORE_WHT
  1096. //------------------------------------------------------------------------------
  1097. #define MUL(a, b) (((a) * (b)) >> 16)
  1098. static void TransformAC3(const int16_t* in, uint8_t* dst) {
  1099. static const int kC1_full = 20091 + (1 << 16);
  1100. static const int kC2_full = 35468;
  1101. const int16x4_t A = vdup_n_s16(in[0]);
  1102. const int16x4_t c4 = vdup_n_s16(MUL(in[4], kC2_full));
  1103. const int16x4_t d4 = vdup_n_s16(MUL(in[4], kC1_full));
  1104. const int c1 = MUL(in[1], kC2_full);
  1105. const int d1 = MUL(in[1], kC1_full);
  1106. const uint64_t cd = (uint64_t)( d1 & 0xffff) << 0 |
  1107. (uint64_t)( c1 & 0xffff) << 16 |
  1108. (uint64_t)(-c1 & 0xffff) << 32 |
  1109. (uint64_t)(-d1 & 0xffff) << 48;
  1110. const int16x4_t CD = vcreate_s16(cd);
  1111. const int16x4_t B = vqadd_s16(A, CD);
  1112. const int16x8_t m0_m1 = vcombine_s16(vqadd_s16(B, d4), vqadd_s16(B, c4));
  1113. const int16x8_t m2_m3 = vcombine_s16(vqsub_s16(B, c4), vqsub_s16(B, d4));
  1114. Add4x4(m0_m1, m2_m3, dst);
  1115. }
  1116. #undef MUL
  1117. #endif // WEBP_USE_NEON
  1118. //------------------------------------------------------------------------------
  1119. // Entry point
  1120. extern void VP8DspInitNEON(void);
  1121. void VP8DspInitNEON(void) {
  1122. #if defined(WEBP_USE_NEON)
  1123. VP8Transform = TransformTwo;
  1124. VP8TransformAC3 = TransformAC3;
  1125. VP8TransformDC = TransformDC;
  1126. VP8TransformWHT = TransformWHT;
  1127. VP8VFilter16 = VFilter16;
  1128. VP8VFilter16i = VFilter16i;
  1129. VP8HFilter16 = HFilter16;
  1130. #if !defined(WORK_AROUND_GCC)
  1131. VP8HFilter16i = HFilter16i;
  1132. #endif
  1133. VP8VFilter8 = VFilter8;
  1134. VP8VFilter8i = VFilter8i;
  1135. #if !defined(WORK_AROUND_GCC)
  1136. VP8HFilter8 = HFilter8;
  1137. VP8HFilter8i = HFilter8i;
  1138. #endif
  1139. VP8SimpleVFilter16 = SimpleVFilter16;
  1140. VP8SimpleHFilter16 = SimpleHFilter16;
  1141. VP8SimpleVFilter16i = SimpleVFilter16i;
  1142. VP8SimpleHFilter16i = SimpleHFilter16i;
  1143. #endif // WEBP_USE_NEON
  1144. }