jsimd_neon.S 97 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878
  1. /*
  2. * ARMv7 NEON optimizations for libjpeg-turbo
  3. *
  4. * Copyright (C) 2009-2011, Nokia Corporation and/or its subsidiary(-ies).
  5. * All Rights Reserved.
  6. * Author: Siarhei Siamashka <siarhei.siamashka@nokia.com>
  7. * Copyright (C) 2014, Siarhei Siamashka. All Rights Reserved.
  8. * Copyright (C) 2014, Linaro Limited. All Rights Reserved.
  9. * Copyright (C) 2015, D. R. Commander. All Rights Reserved.
  10. * Copyright (C) 2015-2016, 2018, Matthieu Darbois. All Rights Reserved.
  11. *
  12. * This software is provided 'as-is', without any express or implied
  13. * warranty. In no event will the authors be held liable for any damages
  14. * arising from the use of this software.
  15. *
  16. * Permission is granted to anyone to use this software for any purpose,
  17. * including commercial applications, and to alter it and redistribute it
  18. * freely, subject to the following restrictions:
  19. *
  20. * 1. The origin of this software must not be misrepresented; you must not
  21. * claim that you wrote the original software. If you use this software
  22. * in a product, an acknowledgment in the product documentation would be
  23. * appreciated but is not required.
  24. * 2. Altered source versions must be plainly marked as such, and must not be
  25. * misrepresented as being the original software.
  26. * 3. This notice may not be removed or altered from any source distribution.
  27. */
  28. #if defined(__linux__) && defined(__ELF__)
  29. .section .note.GNU-stack, "", %progbits /* mark stack as non-executable */
  30. #endif
  31. .text
  32. .fpu neon
  33. .arch armv7a
  34. .object_arch armv4
  35. .arm
  36. .syntax unified
  37. #define RESPECT_STRICT_ALIGNMENT 1
  38. /*****************************************************************************/
  39. /* Supplementary macro for setting function attributes */
  40. .macro asm_function fname
  41. #ifdef __APPLE__
  42. .private_extern _\fname
  43. .globl _\fname
  44. _\fname:
  45. #else
  46. .global \fname
  47. #ifdef __ELF__
  48. .hidden \fname
  49. .type \fname, %function
  50. #endif
  51. \fname:
  52. #endif
  53. .endm
  54. /* Transpose a block of 4x4 coefficients in four 64-bit registers */
  55. .macro transpose_4x4 x0, x1, x2, x3
  56. vtrn.16 \x0, \x1
  57. vtrn.16 \x2, \x3
  58. vtrn.32 \x0, \x2
  59. vtrn.32 \x1, \x3
  60. .endm
  61. #define CENTERJSAMPLE 128
  62. /*****************************************************************************/
  63. /*
  64. * Perform dequantization and inverse DCT on one block of coefficients.
  65. *
  66. * GLOBAL(void)
  67. * jsimd_idct_islow_neon(void *dct_table, JCOEFPTR coef_block,
  68. * JSAMPARRAY output_buf, JDIMENSION output_col)
  69. */
  70. #define FIX_0_298631336 (2446)
  71. #define FIX_0_390180644 (3196)
  72. #define FIX_0_541196100 (4433)
  73. #define FIX_0_765366865 (6270)
  74. #define FIX_0_899976223 (7373)
  75. #define FIX_1_175875602 (9633)
  76. #define FIX_1_501321110 (12299)
  77. #define FIX_1_847759065 (15137)
  78. #define FIX_1_961570560 (16069)
  79. #define FIX_2_053119869 (16819)
  80. #define FIX_2_562915447 (20995)
  81. #define FIX_3_072711026 (25172)
  82. #define FIX_1_175875602_MINUS_1_961570560 (FIX_1_175875602 - FIX_1_961570560)
  83. #define FIX_1_175875602_MINUS_0_390180644 (FIX_1_175875602 - FIX_0_390180644)
  84. #define FIX_0_541196100_MINUS_1_847759065 (FIX_0_541196100 - FIX_1_847759065)
  85. #define FIX_3_072711026_MINUS_2_562915447 (FIX_3_072711026 - FIX_2_562915447)
  86. #define FIX_0_298631336_MINUS_0_899976223 (FIX_0_298631336 - FIX_0_899976223)
  87. #define FIX_1_501321110_MINUS_0_899976223 (FIX_1_501321110 - FIX_0_899976223)
  88. #define FIX_2_053119869_MINUS_2_562915447 (FIX_2_053119869 - FIX_2_562915447)
  89. #define FIX_0_541196100_PLUS_0_765366865 (FIX_0_541196100 + FIX_0_765366865)
  90. /*
  91. * Reference SIMD-friendly 1-D ISLOW iDCT C implementation.
  92. * Uses some ideas from the comments in 'simd/jiss2int-64.asm'
  93. */
  94. #define REF_1D_IDCT(xrow0, xrow1, xrow2, xrow3, xrow4, xrow5, xrow6, xrow7) { \
  95. DCTELEM row0, row1, row2, row3, row4, row5, row6, row7; \
  96. JLONG q1, q2, q3, q4, q5, q6, q7; \
  97. JLONG tmp11_plus_tmp2, tmp11_minus_tmp2; \
  98. \
  99. /* 1-D iDCT input data */ \
  100. row0 = xrow0; \
  101. row1 = xrow1; \
  102. row2 = xrow2; \
  103. row3 = xrow3; \
  104. row4 = xrow4; \
  105. row5 = xrow5; \
  106. row6 = xrow6; \
  107. row7 = xrow7; \
  108. \
  109. q5 = row7 + row3; \
  110. q4 = row5 + row1; \
  111. q6 = MULTIPLY(q5, FIX_1_175875602_MINUS_1_961570560) + \
  112. MULTIPLY(q4, FIX_1_175875602); \
  113. q7 = MULTIPLY(q5, FIX_1_175875602) + \
  114. MULTIPLY(q4, FIX_1_175875602_MINUS_0_390180644); \
  115. q2 = MULTIPLY(row2, FIX_0_541196100) + \
  116. MULTIPLY(row6, FIX_0_541196100_MINUS_1_847759065); \
  117. q4 = q6; \
  118. q3 = ((JLONG)row0 - (JLONG)row4) << 13; \
  119. q6 += MULTIPLY(row5, -FIX_2_562915447) + \
  120. MULTIPLY(row3, FIX_3_072711026_MINUS_2_562915447); \
  121. /* now we can use q1 (reloadable constants have been used up) */ \
  122. q1 = q3 + q2; \
  123. q4 += MULTIPLY(row7, FIX_0_298631336_MINUS_0_899976223) + \
  124. MULTIPLY(row1, -FIX_0_899976223); \
  125. q5 = q7; \
  126. q1 = q1 + q6; \
  127. q7 += MULTIPLY(row7, -FIX_0_899976223) + \
  128. MULTIPLY(row1, FIX_1_501321110_MINUS_0_899976223); \
  129. \
  130. /* (tmp11 + tmp2) has been calculated (out_row1 before descale) */ \
  131. tmp11_plus_tmp2 = q1; \
  132. row1 = 0; \
  133. \
  134. q1 = q1 - q6; \
  135. q5 += MULTIPLY(row5, FIX_2_053119869_MINUS_2_562915447) + \
  136. MULTIPLY(row3, -FIX_2_562915447); \
  137. q1 = q1 - q6; \
  138. q6 = MULTIPLY(row2, FIX_0_541196100_PLUS_0_765366865) + \
  139. MULTIPLY(row6, FIX_0_541196100); \
  140. q3 = q3 - q2; \
  141. \
  142. /* (tmp11 - tmp2) has been calculated (out_row6 before descale) */ \
  143. tmp11_minus_tmp2 = q1; \
  144. \
  145. q1 = ((JLONG)row0 + (JLONG)row4) << 13; \
  146. q2 = q1 + q6; \
  147. q1 = q1 - q6; \
  148. \
  149. /* pick up the results */ \
  150. tmp0 = q4; \
  151. tmp1 = q5; \
  152. tmp2 = (tmp11_plus_tmp2 - tmp11_minus_tmp2) / 2; \
  153. tmp3 = q7; \
  154. tmp10 = q2; \
  155. tmp11 = (tmp11_plus_tmp2 + tmp11_minus_tmp2) / 2; \
  156. tmp12 = q3; \
  157. tmp13 = q1; \
  158. }
  159. #define XFIX_0_899976223 d0[0]
  160. #define XFIX_0_541196100 d0[1]
  161. #define XFIX_2_562915447 d0[2]
  162. #define XFIX_0_298631336_MINUS_0_899976223 d0[3]
  163. #define XFIX_1_501321110_MINUS_0_899976223 d1[0]
  164. #define XFIX_2_053119869_MINUS_2_562915447 d1[1]
  165. #define XFIX_0_541196100_PLUS_0_765366865 d1[2]
  166. #define XFIX_1_175875602 d1[3]
  167. #define XFIX_1_175875602_MINUS_0_390180644 d2[0]
  168. #define XFIX_0_541196100_MINUS_1_847759065 d2[1]
  169. #define XFIX_3_072711026_MINUS_2_562915447 d2[2]
  170. #define XFIX_1_175875602_MINUS_1_961570560 d2[3]
  171. .balign 16
  172. jsimd_idct_islow_neon_consts:
  173. .short FIX_0_899976223 /* d0[0] */
  174. .short FIX_0_541196100 /* d0[1] */
  175. .short FIX_2_562915447 /* d0[2] */
  176. .short FIX_0_298631336_MINUS_0_899976223 /* d0[3] */
  177. .short FIX_1_501321110_MINUS_0_899976223 /* d1[0] */
  178. .short FIX_2_053119869_MINUS_2_562915447 /* d1[1] */
  179. .short FIX_0_541196100_PLUS_0_765366865 /* d1[2] */
  180. .short FIX_1_175875602 /* d1[3] */
  181. /* reloadable constants */
  182. .short FIX_1_175875602_MINUS_0_390180644 /* d2[0] */
  183. .short FIX_0_541196100_MINUS_1_847759065 /* d2[1] */
  184. .short FIX_3_072711026_MINUS_2_562915447 /* d2[2] */
  185. .short FIX_1_175875602_MINUS_1_961570560 /* d2[3] */
  186. asm_function jsimd_idct_islow_neon
  187. DCT_TABLE .req r0
  188. COEF_BLOCK .req r1
  189. OUTPUT_BUF .req r2
  190. OUTPUT_COL .req r3
  191. TMP1 .req r0
  192. TMP2 .req r1
  193. TMP3 .req r2
  194. TMP4 .req ip
  195. ROW0L .req d16
  196. ROW0R .req d17
  197. ROW1L .req d18
  198. ROW1R .req d19
  199. ROW2L .req d20
  200. ROW2R .req d21
  201. ROW3L .req d22
  202. ROW3R .req d23
  203. ROW4L .req d24
  204. ROW4R .req d25
  205. ROW5L .req d26
  206. ROW5R .req d27
  207. ROW6L .req d28
  208. ROW6R .req d29
  209. ROW7L .req d30
  210. ROW7R .req d31
  211. /* Load and dequantize coefficients into NEON registers
  212. * with the following allocation:
  213. * 0 1 2 3 | 4 5 6 7
  214. * ---------+--------
  215. * 0 | d16 | d17 ( q8 )
  216. * 1 | d18 | d19 ( q9 )
  217. * 2 | d20 | d21 ( q10 )
  218. * 3 | d22 | d23 ( q11 )
  219. * 4 | d24 | d25 ( q12 )
  220. * 5 | d26 | d27 ( q13 )
  221. * 6 | d28 | d29 ( q14 )
  222. * 7 | d30 | d31 ( q15 )
  223. */
  224. adr ip, jsimd_idct_islow_neon_consts
  225. vld1.16 {d16, d17, d18, d19}, [COEF_BLOCK, :128]!
  226. vld1.16 {d0, d1, d2, d3}, [DCT_TABLE, :128]!
  227. vld1.16 {d20, d21, d22, d23}, [COEF_BLOCK, :128]!
  228. vmul.s16 q8, q8, q0
  229. vld1.16 {d4, d5, d6, d7}, [DCT_TABLE, :128]!
  230. vmul.s16 q9, q9, q1
  231. vld1.16 {d24, d25, d26, d27}, [COEF_BLOCK, :128]!
  232. vmul.s16 q10, q10, q2
  233. vld1.16 {d0, d1, d2, d3}, [DCT_TABLE, :128]!
  234. vmul.s16 q11, q11, q3
  235. vld1.16 {d28, d29, d30, d31}, [COEF_BLOCK, :128]
  236. vmul.s16 q12, q12, q0
  237. vld1.16 {d4, d5, d6, d7}, [DCT_TABLE, :128]!
  238. vmul.s16 q14, q14, q2
  239. vmul.s16 q13, q13, q1
  240. vld1.16 {d0, d1, d2, d3}, [ip, :128] /* load constants */
  241. add ip, ip, #16
  242. vmul.s16 q15, q15, q3
  243. vpush {d8-d15} /* save NEON registers */
  244. /* 1-D IDCT, pass 1, left 4x8 half */
  245. vadd.s16 d4, ROW7L, ROW3L
  246. vadd.s16 d5, ROW5L, ROW1L
  247. vmull.s16 q6, d4, XFIX_1_175875602_MINUS_1_961570560
  248. vmlal.s16 q6, d5, XFIX_1_175875602
  249. vmull.s16 q7, d4, XFIX_1_175875602
  250. /* Check for the zero coefficients in the right 4x8 half */
  251. push {r4, r5}
  252. vmlal.s16 q7, d5, XFIX_1_175875602_MINUS_0_390180644
  253. vsubl.s16 q3, ROW0L, ROW4L
  254. ldrd r4, [COEF_BLOCK, #(-96 + 2 * (4 + 1 * 8))]
  255. vmull.s16 q2, ROW2L, XFIX_0_541196100
  256. vmlal.s16 q2, ROW6L, XFIX_0_541196100_MINUS_1_847759065
  257. orr r0, r4, r5
  258. vmov q4, q6
  259. vmlsl.s16 q6, ROW5L, XFIX_2_562915447
  260. ldrd r4, [COEF_BLOCK, #(-96 + 2 * (4 + 2 * 8))]
  261. vmlal.s16 q6, ROW3L, XFIX_3_072711026_MINUS_2_562915447
  262. vshl.s32 q3, q3, #13
  263. orr r0, r0, r4
  264. vmlsl.s16 q4, ROW1L, XFIX_0_899976223
  265. orr r0, r0, r5
  266. vadd.s32 q1, q3, q2
  267. ldrd r4, [COEF_BLOCK, #(-96 + 2 * (4 + 3 * 8))]
  268. vmov q5, q7
  269. vadd.s32 q1, q1, q6
  270. orr r0, r0, r4
  271. vmlsl.s16 q7, ROW7L, XFIX_0_899976223
  272. orr r0, r0, r5
  273. vmlal.s16 q7, ROW1L, XFIX_1_501321110_MINUS_0_899976223
  274. vrshrn.s32 ROW1L, q1, #11
  275. ldrd r4, [COEF_BLOCK, #(-96 + 2 * (4 + 4 * 8))]
  276. vsub.s32 q1, q1, q6
  277. vmlal.s16 q5, ROW5L, XFIX_2_053119869_MINUS_2_562915447
  278. orr r0, r0, r4
  279. vmlsl.s16 q5, ROW3L, XFIX_2_562915447
  280. orr r0, r0, r5
  281. vsub.s32 q1, q1, q6
  282. vmull.s16 q6, ROW2L, XFIX_0_541196100_PLUS_0_765366865
  283. ldrd r4, [COEF_BLOCK, #(-96 + 2 * (4 + 5 * 8))]
  284. vmlal.s16 q6, ROW6L, XFIX_0_541196100
  285. vsub.s32 q3, q3, q2
  286. orr r0, r0, r4
  287. vrshrn.s32 ROW6L, q1, #11
  288. orr r0, r0, r5
  289. vadd.s32 q1, q3, q5
  290. ldrd r4, [COEF_BLOCK, #(-96 + 2 * (4 + 6 * 8))]
  291. vsub.s32 q3, q3, q5
  292. vaddl.s16 q5, ROW0L, ROW4L
  293. orr r0, r0, r4
  294. vrshrn.s32 ROW2L, q1, #11
  295. orr r0, r0, r5
  296. vrshrn.s32 ROW5L, q3, #11
  297. ldrd r4, [COEF_BLOCK, #(-96 + 2 * (4 + 7 * 8))]
  298. vshl.s32 q5, q5, #13
  299. vmlal.s16 q4, ROW7L, XFIX_0_298631336_MINUS_0_899976223
  300. orr r0, r0, r4
  301. vadd.s32 q2, q5, q6
  302. orrs r0, r0, r5
  303. vsub.s32 q1, q5, q6
  304. vadd.s32 q6, q2, q7
  305. ldrd r4, [COEF_BLOCK, #(-96 + 2 * (4 + 0 * 8))]
  306. vsub.s32 q2, q2, q7
  307. vadd.s32 q5, q1, q4
  308. orr r0, r4, r5
  309. vsub.s32 q3, q1, q4
  310. pop {r4, r5}
  311. vrshrn.s32 ROW7L, q2, #11
  312. vrshrn.s32 ROW3L, q5, #11
  313. vrshrn.s32 ROW0L, q6, #11
  314. vrshrn.s32 ROW4L, q3, #11
  315. beq 3f /* Go to do some special handling for the sparse
  316. right 4x8 half */
  317. /* 1-D IDCT, pass 1, right 4x8 half */
  318. vld1.s16 {d2}, [ip, :64] /* reload constants */
  319. vadd.s16 d10, ROW7R, ROW3R
  320. vadd.s16 d8, ROW5R, ROW1R
  321. /* Transpose left 4x8 half */
  322. vtrn.16 ROW6L, ROW7L
  323. vmull.s16 q6, d10, XFIX_1_175875602_MINUS_1_961570560
  324. vmlal.s16 q6, d8, XFIX_1_175875602
  325. vtrn.16 ROW2L, ROW3L
  326. vmull.s16 q7, d10, XFIX_1_175875602
  327. vmlal.s16 q7, d8, XFIX_1_175875602_MINUS_0_390180644
  328. vtrn.16 ROW0L, ROW1L
  329. vsubl.s16 q3, ROW0R, ROW4R
  330. vmull.s16 q2, ROW2R, XFIX_0_541196100
  331. vmlal.s16 q2, ROW6R, XFIX_0_541196100_MINUS_1_847759065
  332. vtrn.16 ROW4L, ROW5L
  333. vmov q4, q6
  334. vmlsl.s16 q6, ROW5R, XFIX_2_562915447
  335. vmlal.s16 q6, ROW3R, XFIX_3_072711026_MINUS_2_562915447
  336. vtrn.32 ROW1L, ROW3L
  337. vshl.s32 q3, q3, #13
  338. vmlsl.s16 q4, ROW1R, XFIX_0_899976223
  339. vtrn.32 ROW4L, ROW6L
  340. vadd.s32 q1, q3, q2
  341. vmov q5, q7
  342. vadd.s32 q1, q1, q6
  343. vtrn.32 ROW0L, ROW2L
  344. vmlsl.s16 q7, ROW7R, XFIX_0_899976223
  345. vmlal.s16 q7, ROW1R, XFIX_1_501321110_MINUS_0_899976223
  346. vrshrn.s32 ROW1R, q1, #11
  347. vtrn.32 ROW5L, ROW7L
  348. vsub.s32 q1, q1, q6
  349. vmlal.s16 q5, ROW5R, XFIX_2_053119869_MINUS_2_562915447
  350. vmlsl.s16 q5, ROW3R, XFIX_2_562915447
  351. vsub.s32 q1, q1, q6
  352. vmull.s16 q6, ROW2R, XFIX_0_541196100_PLUS_0_765366865
  353. vmlal.s16 q6, ROW6R, XFIX_0_541196100
  354. vsub.s32 q3, q3, q2
  355. vrshrn.s32 ROW6R, q1, #11
  356. vadd.s32 q1, q3, q5
  357. vsub.s32 q3, q3, q5
  358. vaddl.s16 q5, ROW0R, ROW4R
  359. vrshrn.s32 ROW2R, q1, #11
  360. vrshrn.s32 ROW5R, q3, #11
  361. vshl.s32 q5, q5, #13
  362. vmlal.s16 q4, ROW7R, XFIX_0_298631336_MINUS_0_899976223
  363. vadd.s32 q2, q5, q6
  364. vsub.s32 q1, q5, q6
  365. vadd.s32 q6, q2, q7
  366. vsub.s32 q2, q2, q7
  367. vadd.s32 q5, q1, q4
  368. vsub.s32 q3, q1, q4
  369. vrshrn.s32 ROW7R, q2, #11
  370. vrshrn.s32 ROW3R, q5, #11
  371. vrshrn.s32 ROW0R, q6, #11
  372. vrshrn.s32 ROW4R, q3, #11
  373. /* Transpose right 4x8 half */
  374. vtrn.16 ROW6R, ROW7R
  375. vtrn.16 ROW2R, ROW3R
  376. vtrn.16 ROW0R, ROW1R
  377. vtrn.16 ROW4R, ROW5R
  378. vtrn.32 ROW1R, ROW3R
  379. vtrn.32 ROW4R, ROW6R
  380. vtrn.32 ROW0R, ROW2R
  381. vtrn.32 ROW5R, ROW7R
  382. 1: /* 1-D IDCT, pass 2 (normal variant), left 4x8 half */
  383. vld1.s16 {d2}, [ip, :64] /* reload constants */
  384. vmull.s16 q6, ROW1R, XFIX_1_175875602 /* ROW5L <-> ROW1R */
  385. vmlal.s16 q6, ROW1L, XFIX_1_175875602
  386. vmlal.s16 q6, ROW3R, XFIX_1_175875602_MINUS_1_961570560 /* ROW7L <-> ROW3R */
  387. vmlal.s16 q6, ROW3L, XFIX_1_175875602_MINUS_1_961570560
  388. vmull.s16 q7, ROW3R, XFIX_1_175875602 /* ROW7L <-> ROW3R */
  389. vmlal.s16 q7, ROW3L, XFIX_1_175875602
  390. vmlal.s16 q7, ROW1R, XFIX_1_175875602_MINUS_0_390180644 /* ROW5L <-> ROW1R */
  391. vmlal.s16 q7, ROW1L, XFIX_1_175875602_MINUS_0_390180644
  392. vsubl.s16 q3, ROW0L, ROW0R /* ROW4L <-> ROW0R */
  393. vmull.s16 q2, ROW2L, XFIX_0_541196100
  394. vmlal.s16 q2, ROW2R, XFIX_0_541196100_MINUS_1_847759065 /* ROW6L <-> ROW2R */
  395. vmov q4, q6
  396. vmlsl.s16 q6, ROW1R, XFIX_2_562915447 /* ROW5L <-> ROW1R */
  397. vmlal.s16 q6, ROW3L, XFIX_3_072711026_MINUS_2_562915447
  398. vshl.s32 q3, q3, #13
  399. vmlsl.s16 q4, ROW1L, XFIX_0_899976223
  400. vadd.s32 q1, q3, q2
  401. vmov q5, q7
  402. vadd.s32 q1, q1, q6
  403. vmlsl.s16 q7, ROW3R, XFIX_0_899976223 /* ROW7L <-> ROW3R */
  404. vmlal.s16 q7, ROW1L, XFIX_1_501321110_MINUS_0_899976223
  405. vshrn.s32 ROW1L, q1, #16
  406. vsub.s32 q1, q1, q6
  407. vmlal.s16 q5, ROW1R, XFIX_2_053119869_MINUS_2_562915447 /* ROW5L <-> ROW1R */
  408. vmlsl.s16 q5, ROW3L, XFIX_2_562915447
  409. vsub.s32 q1, q1, q6
  410. vmull.s16 q6, ROW2L, XFIX_0_541196100_PLUS_0_765366865
  411. vmlal.s16 q6, ROW2R, XFIX_0_541196100 /* ROW6L <-> ROW2R */
  412. vsub.s32 q3, q3, q2
  413. vshrn.s32 ROW2R, q1, #16 /* ROW6L <-> ROW2R */
  414. vadd.s32 q1, q3, q5
  415. vsub.s32 q3, q3, q5
  416. vaddl.s16 q5, ROW0L, ROW0R /* ROW4L <-> ROW0R */
  417. vshrn.s32 ROW2L, q1, #16
  418. vshrn.s32 ROW1R, q3, #16 /* ROW5L <-> ROW1R */
  419. vshl.s32 q5, q5, #13
  420. vmlal.s16 q4, ROW3R, XFIX_0_298631336_MINUS_0_899976223 /* ROW7L <-> ROW3R */
  421. vadd.s32 q2, q5, q6
  422. vsub.s32 q1, q5, q6
  423. vadd.s32 q6, q2, q7
  424. vsub.s32 q2, q2, q7
  425. vadd.s32 q5, q1, q4
  426. vsub.s32 q3, q1, q4
  427. vshrn.s32 ROW3R, q2, #16 /* ROW7L <-> ROW3R */
  428. vshrn.s32 ROW3L, q5, #16
  429. vshrn.s32 ROW0L, q6, #16
  430. vshrn.s32 ROW0R, q3, #16 /* ROW4L <-> ROW0R */
  431. /* 1-D IDCT, pass 2, right 4x8 half */
  432. vld1.s16 {d2}, [ip, :64] /* reload constants */
  433. vmull.s16 q6, ROW5R, XFIX_1_175875602
  434. vmlal.s16 q6, ROW5L, XFIX_1_175875602 /* ROW5L <-> ROW1R */
  435. vmlal.s16 q6, ROW7R, XFIX_1_175875602_MINUS_1_961570560
  436. vmlal.s16 q6, ROW7L, XFIX_1_175875602_MINUS_1_961570560 /* ROW7L <-> ROW3R */
  437. vmull.s16 q7, ROW7R, XFIX_1_175875602
  438. vmlal.s16 q7, ROW7L, XFIX_1_175875602 /* ROW7L <-> ROW3R */
  439. vmlal.s16 q7, ROW5R, XFIX_1_175875602_MINUS_0_390180644
  440. vmlal.s16 q7, ROW5L, XFIX_1_175875602_MINUS_0_390180644 /* ROW5L <-> ROW1R */
  441. vsubl.s16 q3, ROW4L, ROW4R /* ROW4L <-> ROW0R */
  442. vmull.s16 q2, ROW6L, XFIX_0_541196100 /* ROW6L <-> ROW2R */
  443. vmlal.s16 q2, ROW6R, XFIX_0_541196100_MINUS_1_847759065
  444. vmov q4, q6
  445. vmlsl.s16 q6, ROW5R, XFIX_2_562915447
  446. vmlal.s16 q6, ROW7L, XFIX_3_072711026_MINUS_2_562915447 /* ROW7L <-> ROW3R */
  447. vshl.s32 q3, q3, #13
  448. vmlsl.s16 q4, ROW5L, XFIX_0_899976223 /* ROW5L <-> ROW1R */
  449. vadd.s32 q1, q3, q2
  450. vmov q5, q7
  451. vadd.s32 q1, q1, q6
  452. vmlsl.s16 q7, ROW7R, XFIX_0_899976223
  453. vmlal.s16 q7, ROW5L, XFIX_1_501321110_MINUS_0_899976223 /* ROW5L <-> ROW1R */
  454. vshrn.s32 ROW5L, q1, #16 /* ROW5L <-> ROW1R */
  455. vsub.s32 q1, q1, q6
  456. vmlal.s16 q5, ROW5R, XFIX_2_053119869_MINUS_2_562915447
  457. vmlsl.s16 q5, ROW7L, XFIX_2_562915447 /* ROW7L <-> ROW3R */
  458. vsub.s32 q1, q1, q6
  459. vmull.s16 q6, ROW6L, XFIX_0_541196100_PLUS_0_765366865 /* ROW6L <-> ROW2R */
  460. vmlal.s16 q6, ROW6R, XFIX_0_541196100
  461. vsub.s32 q3, q3, q2
  462. vshrn.s32 ROW6R, q1, #16
  463. vadd.s32 q1, q3, q5
  464. vsub.s32 q3, q3, q5
  465. vaddl.s16 q5, ROW4L, ROW4R /* ROW4L <-> ROW0R */
  466. vshrn.s32 ROW6L, q1, #16 /* ROW6L <-> ROW2R */
  467. vshrn.s32 ROW5R, q3, #16
  468. vshl.s32 q5, q5, #13
  469. vmlal.s16 q4, ROW7R, XFIX_0_298631336_MINUS_0_899976223
  470. vadd.s32 q2, q5, q6
  471. vsub.s32 q1, q5, q6
  472. vadd.s32 q6, q2, q7
  473. vsub.s32 q2, q2, q7
  474. vadd.s32 q5, q1, q4
  475. vsub.s32 q3, q1, q4
  476. vshrn.s32 ROW7R, q2, #16
  477. vshrn.s32 ROW7L, q5, #16 /* ROW7L <-> ROW3R */
  478. vshrn.s32 ROW4L, q6, #16 /* ROW4L <-> ROW0R */
  479. vshrn.s32 ROW4R, q3, #16
  480. 2: /* Descale to 8-bit and range limit */
  481. vqrshrn.s16 d16, q8, #2
  482. vqrshrn.s16 d17, q9, #2
  483. vqrshrn.s16 d18, q10, #2
  484. vqrshrn.s16 d19, q11, #2
  485. vpop {d8-d15} /* restore NEON registers */
  486. vqrshrn.s16 d20, q12, #2
  487. /* Transpose the final 8-bit samples and do signed->unsigned conversion */
  488. vtrn.16 q8, q9
  489. vqrshrn.s16 d21, q13, #2
  490. vqrshrn.s16 d22, q14, #2
  491. vmov.u8 q0, #(CENTERJSAMPLE)
  492. vqrshrn.s16 d23, q15, #2
  493. vtrn.8 d16, d17
  494. vtrn.8 d18, d19
  495. vadd.u8 q8, q8, q0
  496. vadd.u8 q9, q9, q0
  497. vtrn.16 q10, q11
  498. /* Store results to the output buffer */
  499. ldmia OUTPUT_BUF!, {TMP1, TMP2}
  500. add TMP1, TMP1, OUTPUT_COL
  501. add TMP2, TMP2, OUTPUT_COL
  502. vst1.8 {d16}, [TMP1]
  503. vtrn.8 d20, d21
  504. vst1.8 {d17}, [TMP2]
  505. ldmia OUTPUT_BUF!, {TMP1, TMP2}
  506. add TMP1, TMP1, OUTPUT_COL
  507. add TMP2, TMP2, OUTPUT_COL
  508. vst1.8 {d18}, [TMP1]
  509. vadd.u8 q10, q10, q0
  510. vst1.8 {d19}, [TMP2]
  511. ldmia OUTPUT_BUF, {TMP1, TMP2, TMP3, TMP4}
  512. add TMP1, TMP1, OUTPUT_COL
  513. add TMP2, TMP2, OUTPUT_COL
  514. add TMP3, TMP3, OUTPUT_COL
  515. add TMP4, TMP4, OUTPUT_COL
  516. vtrn.8 d22, d23
  517. vst1.8 {d20}, [TMP1]
  518. vadd.u8 q11, q11, q0
  519. vst1.8 {d21}, [TMP2]
  520. vst1.8 {d22}, [TMP3]
  521. vst1.8 {d23}, [TMP4]
  522. bx lr
  523. 3: /* Left 4x8 half is done, right 4x8 half contains mostly zeros */
  524. /* Transpose left 4x8 half */
  525. vtrn.16 ROW6L, ROW7L
  526. vtrn.16 ROW2L, ROW3L
  527. vtrn.16 ROW0L, ROW1L
  528. vtrn.16 ROW4L, ROW5L
  529. vshl.s16 ROW0R, ROW0R, #2 /* PASS1_BITS */
  530. vtrn.32 ROW1L, ROW3L
  531. vtrn.32 ROW4L, ROW6L
  532. vtrn.32 ROW0L, ROW2L
  533. vtrn.32 ROW5L, ROW7L
  534. cmp r0, #0
  535. beq 4f /* Right 4x8 half has all zeros, go to 'sparse' second
  536. pass */
  537. /* Only row 0 is non-zero for the right 4x8 half */
  538. vdup.s16 ROW1R, ROW0R[1]
  539. vdup.s16 ROW2R, ROW0R[2]
  540. vdup.s16 ROW3R, ROW0R[3]
  541. vdup.s16 ROW4R, ROW0R[0]
  542. vdup.s16 ROW5R, ROW0R[1]
  543. vdup.s16 ROW6R, ROW0R[2]
  544. vdup.s16 ROW7R, ROW0R[3]
  545. vdup.s16 ROW0R, ROW0R[0]
  546. b 1b /* Go to 'normal' second pass */
  547. 4: /* 1-D IDCT, pass 2 (sparse variant with zero rows 4-7), left 4x8 half */
  548. vld1.s16 {d2}, [ip, :64] /* reload constants */
  549. vmull.s16 q6, ROW1L, XFIX_1_175875602
  550. vmlal.s16 q6, ROW3L, XFIX_1_175875602_MINUS_1_961570560
  551. vmull.s16 q7, ROW3L, XFIX_1_175875602
  552. vmlal.s16 q7, ROW1L, XFIX_1_175875602_MINUS_0_390180644
  553. vmull.s16 q2, ROW2L, XFIX_0_541196100
  554. vshll.s16 q3, ROW0L, #13
  555. vmov q4, q6
  556. vmlal.s16 q6, ROW3L, XFIX_3_072711026_MINUS_2_562915447
  557. vmlsl.s16 q4, ROW1L, XFIX_0_899976223
  558. vadd.s32 q1, q3, q2
  559. vmov q5, q7
  560. vmlal.s16 q7, ROW1L, XFIX_1_501321110_MINUS_0_899976223
  561. vadd.s32 q1, q1, q6
  562. vadd.s32 q6, q6, q6
  563. vmlsl.s16 q5, ROW3L, XFIX_2_562915447
  564. vshrn.s32 ROW1L, q1, #16
  565. vsub.s32 q1, q1, q6
  566. vmull.s16 q6, ROW2L, XFIX_0_541196100_PLUS_0_765366865
  567. vsub.s32 q3, q3, q2
  568. vshrn.s32 ROW2R, q1, #16 /* ROW6L <-> ROW2R */
  569. vadd.s32 q1, q3, q5
  570. vsub.s32 q3, q3, q5
  571. vshll.s16 q5, ROW0L, #13
  572. vshrn.s32 ROW2L, q1, #16
  573. vshrn.s32 ROW1R, q3, #16 /* ROW5L <-> ROW1R */
  574. vadd.s32 q2, q5, q6
  575. vsub.s32 q1, q5, q6
  576. vadd.s32 q6, q2, q7
  577. vsub.s32 q2, q2, q7
  578. vadd.s32 q5, q1, q4
  579. vsub.s32 q3, q1, q4
  580. vshrn.s32 ROW3R, q2, #16 /* ROW7L <-> ROW3R */
  581. vshrn.s32 ROW3L, q5, #16
  582. vshrn.s32 ROW0L, q6, #16
  583. vshrn.s32 ROW0R, q3, #16 /* ROW4L <-> ROW0R */
  584. /* 1-D IDCT, pass 2 (sparse variant with zero rows 4-7), right 4x8 half */
  585. vld1.s16 {d2}, [ip, :64] /* reload constants */
  586. vmull.s16 q6, ROW5L, XFIX_1_175875602
  587. vmlal.s16 q6, ROW7L, XFIX_1_175875602_MINUS_1_961570560
  588. vmull.s16 q7, ROW7L, XFIX_1_175875602
  589. vmlal.s16 q7, ROW5L, XFIX_1_175875602_MINUS_0_390180644
  590. vmull.s16 q2, ROW6L, XFIX_0_541196100
  591. vshll.s16 q3, ROW4L, #13
  592. vmov q4, q6
  593. vmlal.s16 q6, ROW7L, XFIX_3_072711026_MINUS_2_562915447
  594. vmlsl.s16 q4, ROW5L, XFIX_0_899976223
  595. vadd.s32 q1, q3, q2
  596. vmov q5, q7
  597. vmlal.s16 q7, ROW5L, XFIX_1_501321110_MINUS_0_899976223
  598. vadd.s32 q1, q1, q6
  599. vadd.s32 q6, q6, q6
  600. vmlsl.s16 q5, ROW7L, XFIX_2_562915447
  601. vshrn.s32 ROW5L, q1, #16 /* ROW5L <-> ROW1R */
  602. vsub.s32 q1, q1, q6
  603. vmull.s16 q6, ROW6L, XFIX_0_541196100_PLUS_0_765366865
  604. vsub.s32 q3, q3, q2
  605. vshrn.s32 ROW6R, q1, #16
  606. vadd.s32 q1, q3, q5
  607. vsub.s32 q3, q3, q5
  608. vshll.s16 q5, ROW4L, #13
  609. vshrn.s32 ROW6L, q1, #16 /* ROW6L <-> ROW2R */
  610. vshrn.s32 ROW5R, q3, #16
  611. vadd.s32 q2, q5, q6
  612. vsub.s32 q1, q5, q6
  613. vadd.s32 q6, q2, q7
  614. vsub.s32 q2, q2, q7
  615. vadd.s32 q5, q1, q4
  616. vsub.s32 q3, q1, q4
  617. vshrn.s32 ROW7R, q2, #16
  618. vshrn.s32 ROW7L, q5, #16 /* ROW7L <-> ROW3R */
  619. vshrn.s32 ROW4L, q6, #16 /* ROW4L <-> ROW0R */
  620. vshrn.s32 ROW4R, q3, #16
  621. b 2b /* Go to epilogue */
  622. .unreq DCT_TABLE
  623. .unreq COEF_BLOCK
  624. .unreq OUTPUT_BUF
  625. .unreq OUTPUT_COL
  626. .unreq TMP1
  627. .unreq TMP2
  628. .unreq TMP3
  629. .unreq TMP4
  630. .unreq ROW0L
  631. .unreq ROW0R
  632. .unreq ROW1L
  633. .unreq ROW1R
  634. .unreq ROW2L
  635. .unreq ROW2R
  636. .unreq ROW3L
  637. .unreq ROW3R
  638. .unreq ROW4L
  639. .unreq ROW4R
  640. .unreq ROW5L
  641. .unreq ROW5R
  642. .unreq ROW6L
  643. .unreq ROW6R
  644. .unreq ROW7L
  645. .unreq ROW7R
  646. /*****************************************************************************/
  647. /*
  648. * jsimd_idct_ifast_neon
  649. *
  650. * This function contains a fast, not so accurate integer implementation of
  651. * the inverse DCT (Discrete Cosine Transform). It uses the same calculations
  652. * and produces exactly the same output as IJG's original 'jpeg_idct_ifast'
  653. * function from jidctfst.c
  654. *
  655. * Normally 1-D AAN DCT needs 5 multiplications and 29 additions.
  656. * But in ARM NEON case some extra additions are required because VQDMULH
  657. * instruction can't handle the constants larger than 1. So the expressions
  658. * like "x * 1.082392200" have to be converted to "x * 0.082392200 + x",
  659. * which introduces an extra addition. Overall, there are 6 extra additions
  660. * per 1-D IDCT pass, totalling to 5 VQDMULH and 35 VADD/VSUB instructions.
  661. */
  662. #define XFIX_1_082392200 d0[0]
  663. #define XFIX_1_414213562 d0[1]
  664. #define XFIX_1_847759065 d0[2]
  665. #define XFIX_2_613125930 d0[3]
  666. .balign 16
  667. jsimd_idct_ifast_neon_consts:
  668. .short (277 * 128 - 256 * 128) /* XFIX_1_082392200 */
  669. .short (362 * 128 - 256 * 128) /* XFIX_1_414213562 */
  670. .short (473 * 128 - 256 * 128) /* XFIX_1_847759065 */
  671. .short (669 * 128 - 512 * 128) /* XFIX_2_613125930 */
  672. asm_function jsimd_idct_ifast_neon
  673. DCT_TABLE .req r0
  674. COEF_BLOCK .req r1
  675. OUTPUT_BUF .req r2
  676. OUTPUT_COL .req r3
  677. TMP1 .req r0
  678. TMP2 .req r1
  679. TMP3 .req r2
  680. TMP4 .req ip
  681. /* Load and dequantize coefficients into NEON registers
  682. * with the following allocation:
  683. * 0 1 2 3 | 4 5 6 7
  684. * ---------+--------
  685. * 0 | d16 | d17 ( q8 )
  686. * 1 | d18 | d19 ( q9 )
  687. * 2 | d20 | d21 ( q10 )
  688. * 3 | d22 | d23 ( q11 )
  689. * 4 | d24 | d25 ( q12 )
  690. * 5 | d26 | d27 ( q13 )
  691. * 6 | d28 | d29 ( q14 )
  692. * 7 | d30 | d31 ( q15 )
  693. */
  694. adr ip, jsimd_idct_ifast_neon_consts
  695. vld1.16 {d16, d17, d18, d19}, [COEF_BLOCK, :128]!
  696. vld1.16 {d0, d1, d2, d3}, [DCT_TABLE, :128]!
  697. vld1.16 {d20, d21, d22, d23}, [COEF_BLOCK, :128]!
  698. vmul.s16 q8, q8, q0
  699. vld1.16 {d4, d5, d6, d7}, [DCT_TABLE, :128]!
  700. vmul.s16 q9, q9, q1
  701. vld1.16 {d24, d25, d26, d27}, [COEF_BLOCK, :128]!
  702. vmul.s16 q10, q10, q2
  703. vld1.16 {d0, d1, d2, d3}, [DCT_TABLE, :128]!
  704. vmul.s16 q11, q11, q3
  705. vld1.16 {d28, d29, d30, d31}, [COEF_BLOCK, :128]
  706. vmul.s16 q12, q12, q0
  707. vld1.16 {d4, d5, d6, d7}, [DCT_TABLE, :128]!
  708. vmul.s16 q14, q14, q2
  709. vmul.s16 q13, q13, q1
  710. vld1.16 {d0}, [ip, :64] /* load constants */
  711. vmul.s16 q15, q15, q3
  712. vpush {d8-d13} /* save NEON registers */
  713. /* 1-D IDCT, pass 1 */
  714. vsub.s16 q2, q10, q14
  715. vadd.s16 q14, q10, q14
  716. vsub.s16 q1, q11, q13
  717. vadd.s16 q13, q11, q13
  718. vsub.s16 q5, q9, q15
  719. vadd.s16 q15, q9, q15
  720. vqdmulh.s16 q4, q2, XFIX_1_414213562
  721. vqdmulh.s16 q6, q1, XFIX_2_613125930
  722. vadd.s16 q3, q1, q1
  723. vsub.s16 q1, q5, q1
  724. vadd.s16 q10, q2, q4
  725. vqdmulh.s16 q4, q1, XFIX_1_847759065
  726. vsub.s16 q2, q15, q13
  727. vadd.s16 q3, q3, q6
  728. vqdmulh.s16 q6, q2, XFIX_1_414213562
  729. vadd.s16 q1, q1, q4
  730. vqdmulh.s16 q4, q5, XFIX_1_082392200
  731. vsub.s16 q10, q10, q14
  732. vadd.s16 q2, q2, q6
  733. vsub.s16 q6, q8, q12
  734. vadd.s16 q12, q8, q12
  735. vadd.s16 q9, q5, q4
  736. vadd.s16 q5, q6, q10
  737. vsub.s16 q10, q6, q10
  738. vadd.s16 q6, q15, q13
  739. vadd.s16 q8, q12, q14
  740. vsub.s16 q3, q6, q3
  741. vsub.s16 q12, q12, q14
  742. vsub.s16 q3, q3, q1
  743. vsub.s16 q1, q9, q1
  744. vadd.s16 q2, q3, q2
  745. vsub.s16 q15, q8, q6
  746. vadd.s16 q1, q1, q2
  747. vadd.s16 q8, q8, q6
  748. vadd.s16 q14, q5, q3
  749. vsub.s16 q9, q5, q3
  750. vsub.s16 q13, q10, q2
  751. vadd.s16 q10, q10, q2
  752. /* Transpose */
  753. vtrn.16 q8, q9
  754. vsub.s16 q11, q12, q1
  755. vtrn.16 q14, q15
  756. vadd.s16 q12, q12, q1
  757. vtrn.16 q10, q11
  758. vtrn.16 q12, q13
  759. vtrn.32 q9, q11
  760. vtrn.32 q12, q14
  761. vtrn.32 q8, q10
  762. vtrn.32 q13, q15
  763. vswp d28, d21
  764. vswp d26, d19
  765. /* 1-D IDCT, pass 2 */
  766. vsub.s16 q2, q10, q14
  767. vswp d30, d23
  768. vadd.s16 q14, q10, q14
  769. vswp d24, d17
  770. vsub.s16 q1, q11, q13
  771. vadd.s16 q13, q11, q13
  772. vsub.s16 q5, q9, q15
  773. vadd.s16 q15, q9, q15
  774. vqdmulh.s16 q4, q2, XFIX_1_414213562
  775. vqdmulh.s16 q6, q1, XFIX_2_613125930
  776. vadd.s16 q3, q1, q1
  777. vsub.s16 q1, q5, q1
  778. vadd.s16 q10, q2, q4
  779. vqdmulh.s16 q4, q1, XFIX_1_847759065
  780. vsub.s16 q2, q15, q13
  781. vadd.s16 q3, q3, q6
  782. vqdmulh.s16 q6, q2, XFIX_1_414213562
  783. vadd.s16 q1, q1, q4
  784. vqdmulh.s16 q4, q5, XFIX_1_082392200
  785. vsub.s16 q10, q10, q14
  786. vadd.s16 q2, q2, q6
  787. vsub.s16 q6, q8, q12
  788. vadd.s16 q12, q8, q12
  789. vadd.s16 q9, q5, q4
  790. vadd.s16 q5, q6, q10
  791. vsub.s16 q10, q6, q10
  792. vadd.s16 q6, q15, q13
  793. vadd.s16 q8, q12, q14
  794. vsub.s16 q3, q6, q3
  795. vsub.s16 q12, q12, q14
  796. vsub.s16 q3, q3, q1
  797. vsub.s16 q1, q9, q1
  798. vadd.s16 q2, q3, q2
  799. vsub.s16 q15, q8, q6
  800. vadd.s16 q1, q1, q2
  801. vadd.s16 q8, q8, q6
  802. vadd.s16 q14, q5, q3
  803. vsub.s16 q9, q5, q3
  804. vsub.s16 q13, q10, q2
  805. vpop {d8-d13} /* restore NEON registers */
  806. vadd.s16 q10, q10, q2
  807. vsub.s16 q11, q12, q1
  808. vadd.s16 q12, q12, q1
  809. /* Descale to 8-bit and range limit */
  810. vmov.u8 q0, #0x80
  811. vqshrn.s16 d16, q8, #5
  812. vqshrn.s16 d17, q9, #5
  813. vqshrn.s16 d18, q10, #5
  814. vqshrn.s16 d19, q11, #5
  815. vqshrn.s16 d20, q12, #5
  816. vqshrn.s16 d21, q13, #5
  817. vqshrn.s16 d22, q14, #5
  818. vqshrn.s16 d23, q15, #5
  819. vadd.u8 q8, q8, q0
  820. vadd.u8 q9, q9, q0
  821. vadd.u8 q10, q10, q0
  822. vadd.u8 q11, q11, q0
  823. /* Transpose the final 8-bit samples */
  824. vtrn.16 q8, q9
  825. vtrn.16 q10, q11
  826. vtrn.32 q8, q10
  827. vtrn.32 q9, q11
  828. vtrn.8 d16, d17
  829. vtrn.8 d18, d19
  830. /* Store results to the output buffer */
  831. ldmia OUTPUT_BUF!, {TMP1, TMP2}
  832. add TMP1, TMP1, OUTPUT_COL
  833. add TMP2, TMP2, OUTPUT_COL
  834. vst1.8 {d16}, [TMP1]
  835. vst1.8 {d17}, [TMP2]
  836. ldmia OUTPUT_BUF!, {TMP1, TMP2}
  837. add TMP1, TMP1, OUTPUT_COL
  838. add TMP2, TMP2, OUTPUT_COL
  839. vst1.8 {d18}, [TMP1]
  840. vtrn.8 d20, d21
  841. vst1.8 {d19}, [TMP2]
  842. ldmia OUTPUT_BUF, {TMP1, TMP2, TMP3, TMP4}
  843. add TMP1, TMP1, OUTPUT_COL
  844. add TMP2, TMP2, OUTPUT_COL
  845. add TMP3, TMP3, OUTPUT_COL
  846. add TMP4, TMP4, OUTPUT_COL
  847. vst1.8 {d20}, [TMP1]
  848. vtrn.8 d22, d23
  849. vst1.8 {d21}, [TMP2]
  850. vst1.8 {d22}, [TMP3]
  851. vst1.8 {d23}, [TMP4]
  852. bx lr
  853. .unreq DCT_TABLE
  854. .unreq COEF_BLOCK
  855. .unreq OUTPUT_BUF
  856. .unreq OUTPUT_COL
  857. .unreq TMP1
  858. .unreq TMP2
  859. .unreq TMP3
  860. .unreq TMP4
  861. /*****************************************************************************/
  862. /*
  863. * jsimd_idct_4x4_neon
  864. *
  865. * This function contains inverse-DCT code for getting reduced-size
  866. * 4x4 pixels output from an 8x8 DCT block. It uses the same calculations
  867. * and produces exactly the same output as IJG's original 'jpeg_idct_4x4'
  868. * function from jpeg-6b (jidctred.c).
  869. *
  870. * NOTE: jpeg-8 has an improved implementation of 4x4 inverse-DCT, which
  871. * requires much less arithmetic operations and hence should be faster.
  872. * The primary purpose of this particular NEON optimized function is
  873. * bit exact compatibility with jpeg-6b.
  874. *
  875. * TODO: a bit better instructions scheduling can be achieved by expanding
  876. * idct_helper/transpose_4x4 macros and reordering instructions,
  877. * but readability will suffer somewhat.
  878. */
  879. #define CONST_BITS 13
  880. #define FIX_0_211164243 (1730) /* FIX(0.211164243) */
  881. #define FIX_0_509795579 (4176) /* FIX(0.509795579) */
  882. #define FIX_0_601344887 (4926) /* FIX(0.601344887) */
  883. #define FIX_0_720959822 (5906) /* FIX(0.720959822) */
  884. #define FIX_0_765366865 (6270) /* FIX(0.765366865) */
  885. #define FIX_0_850430095 (6967) /* FIX(0.850430095) */
  886. #define FIX_0_899976223 (7373) /* FIX(0.899976223) */
  887. #define FIX_1_061594337 (8697) /* FIX(1.061594337) */
  888. #define FIX_1_272758580 (10426) /* FIX(1.272758580) */
  889. #define FIX_1_451774981 (11893) /* FIX(1.451774981) */
  890. #define FIX_1_847759065 (15137) /* FIX(1.847759065) */
  891. #define FIX_2_172734803 (17799) /* FIX(2.172734803) */
  892. #define FIX_2_562915447 (20995) /* FIX(2.562915447) */
  893. #define FIX_3_624509785 (29692) /* FIX(3.624509785) */
  894. .balign 16
  895. jsimd_idct_4x4_neon_consts:
  896. .short FIX_1_847759065 /* d0[0] */
  897. .short -FIX_0_765366865 /* d0[1] */
  898. .short -FIX_0_211164243 /* d0[2] */
  899. .short FIX_1_451774981 /* d0[3] */
  900. .short -FIX_2_172734803 /* d1[0] */
  901. .short FIX_1_061594337 /* d1[1] */
  902. .short -FIX_0_509795579 /* d1[2] */
  903. .short -FIX_0_601344887 /* d1[3] */
  904. .short FIX_0_899976223 /* d2[0] */
  905. .short FIX_2_562915447 /* d2[1] */
  906. .short 1 << (CONST_BITS + 1) /* d2[2] */
  907. .short 0 /* d2[3] */
  908. .macro idct_helper x4, x6, x8, x10, x12, x14, x16, shift, y26, y27, y28, y29
  909. vmull.s16 q14, \x4, d2[2]
  910. vmlal.s16 q14, \x8, d0[0]
  911. vmlal.s16 q14, \x14, d0[1]
  912. vmull.s16 q13, \x16, d1[2]
  913. vmlal.s16 q13, \x12, d1[3]
  914. vmlal.s16 q13, \x10, d2[0]
  915. vmlal.s16 q13, \x6, d2[1]
  916. vmull.s16 q15, \x4, d2[2]
  917. vmlsl.s16 q15, \x8, d0[0]
  918. vmlsl.s16 q15, \x14, d0[1]
  919. vmull.s16 q12, \x16, d0[2]
  920. vmlal.s16 q12, \x12, d0[3]
  921. vmlal.s16 q12, \x10, d1[0]
  922. vmlal.s16 q12, \x6, d1[1]
  923. vadd.s32 q10, q14, q13
  924. vsub.s32 q14, q14, q13
  925. .if \shift > 16
  926. vrshr.s32 q10, q10, #\shift
  927. vrshr.s32 q14, q14, #\shift
  928. vmovn.s32 \y26, q10
  929. vmovn.s32 \y29, q14
  930. .else
  931. vrshrn.s32 \y26, q10, #\shift
  932. vrshrn.s32 \y29, q14, #\shift
  933. .endif
  934. vadd.s32 q10, q15, q12
  935. vsub.s32 q15, q15, q12
  936. .if \shift > 16
  937. vrshr.s32 q10, q10, #\shift
  938. vrshr.s32 q15, q15, #\shift
  939. vmovn.s32 \y27, q10
  940. vmovn.s32 \y28, q15
  941. .else
  942. vrshrn.s32 \y27, q10, #\shift
  943. vrshrn.s32 \y28, q15, #\shift
  944. .endif
  945. .endm
  946. asm_function jsimd_idct_4x4_neon
  947. DCT_TABLE .req r0
  948. COEF_BLOCK .req r1
  949. OUTPUT_BUF .req r2
  950. OUTPUT_COL .req r3
  951. TMP1 .req r0
  952. TMP2 .req r1
  953. TMP3 .req r2
  954. TMP4 .req ip
  955. vpush {d8-d15}
  956. /* Load constants (d3 is just used for padding) */
  957. adr TMP4, jsimd_idct_4x4_neon_consts
  958. vld1.16 {d0, d1, d2, d3}, [TMP4, :128]
  959. /* Load all COEF_BLOCK into NEON registers with the following allocation:
  960. * 0 1 2 3 | 4 5 6 7
  961. * ---------+--------
  962. * 0 | d4 | d5
  963. * 1 | d6 | d7
  964. * 2 | d8 | d9
  965. * 3 | d10 | d11
  966. * 4 | - | -
  967. * 5 | d12 | d13
  968. * 6 | d14 | d15
  969. * 7 | d16 | d17
  970. */
  971. vld1.16 {d4, d5, d6, d7}, [COEF_BLOCK, :128]!
  972. vld1.16 {d8, d9, d10, d11}, [COEF_BLOCK, :128]!
  973. add COEF_BLOCK, COEF_BLOCK, #16
  974. vld1.16 {d12, d13, d14, d15}, [COEF_BLOCK, :128]!
  975. vld1.16 {d16, d17}, [COEF_BLOCK, :128]!
  976. /* dequantize */
  977. vld1.16 {d18, d19, d20, d21}, [DCT_TABLE, :128]!
  978. vmul.s16 q2, q2, q9
  979. vld1.16 {d22, d23, d24, d25}, [DCT_TABLE, :128]!
  980. vmul.s16 q3, q3, q10
  981. vmul.s16 q4, q4, q11
  982. add DCT_TABLE, DCT_TABLE, #16
  983. vld1.16 {d26, d27, d28, d29}, [DCT_TABLE, :128]!
  984. vmul.s16 q5, q5, q12
  985. vmul.s16 q6, q6, q13
  986. vld1.16 {d30, d31}, [DCT_TABLE, :128]!
  987. vmul.s16 q7, q7, q14
  988. vmul.s16 q8, q8, q15
  989. /* Pass 1 */
  990. idct_helper d4, d6, d8, d10, d12, d14, d16, 12, d4, d6, d8, d10
  991. transpose_4x4 d4, d6, d8, d10
  992. idct_helper d5, d7, d9, d11, d13, d15, d17, 12, d5, d7, d9, d11
  993. transpose_4x4 d5, d7, d9, d11
  994. /* Pass 2 */
  995. idct_helper d4, d6, d8, d10, d7, d9, d11, 19, d26, d27, d28, d29
  996. transpose_4x4 d26, d27, d28, d29
  997. /* Range limit */
  998. vmov.u16 q15, #0x80
  999. vadd.s16 q13, q13, q15
  1000. vadd.s16 q14, q14, q15
  1001. vqmovun.s16 d26, q13
  1002. vqmovun.s16 d27, q14
  1003. /* Store results to the output buffer */
  1004. ldmia OUTPUT_BUF, {TMP1, TMP2, TMP3, TMP4}
  1005. add TMP1, TMP1, OUTPUT_COL
  1006. add TMP2, TMP2, OUTPUT_COL
  1007. add TMP3, TMP3, OUTPUT_COL
  1008. add TMP4, TMP4, OUTPUT_COL
  1009. #if defined(__ARMEL__) && !RESPECT_STRICT_ALIGNMENT
  1010. /* We can use much less instructions on little endian systems if the
  1011. * OS kernel is not configured to trap unaligned memory accesses
  1012. */
  1013. vst1.32 {d26[0]}, [TMP1]!
  1014. vst1.32 {d27[0]}, [TMP3]!
  1015. vst1.32 {d26[1]}, [TMP2]!
  1016. vst1.32 {d27[1]}, [TMP4]!
  1017. #else
  1018. vst1.8 {d26[0]}, [TMP1]!
  1019. vst1.8 {d27[0]}, [TMP3]!
  1020. vst1.8 {d26[1]}, [TMP1]!
  1021. vst1.8 {d27[1]}, [TMP3]!
  1022. vst1.8 {d26[2]}, [TMP1]!
  1023. vst1.8 {d27[2]}, [TMP3]!
  1024. vst1.8 {d26[3]}, [TMP1]!
  1025. vst1.8 {d27[3]}, [TMP3]!
  1026. vst1.8 {d26[4]}, [TMP2]!
  1027. vst1.8 {d27[4]}, [TMP4]!
  1028. vst1.8 {d26[5]}, [TMP2]!
  1029. vst1.8 {d27[5]}, [TMP4]!
  1030. vst1.8 {d26[6]}, [TMP2]!
  1031. vst1.8 {d27[6]}, [TMP4]!
  1032. vst1.8 {d26[7]}, [TMP2]!
  1033. vst1.8 {d27[7]}, [TMP4]!
  1034. #endif
  1035. vpop {d8-d15}
  1036. bx lr
  1037. .unreq DCT_TABLE
  1038. .unreq COEF_BLOCK
  1039. .unreq OUTPUT_BUF
  1040. .unreq OUTPUT_COL
  1041. .unreq TMP1
  1042. .unreq TMP2
  1043. .unreq TMP3
  1044. .unreq TMP4
  1045. .purgem idct_helper
  1046. /*****************************************************************************/
  1047. /*
  1048. * jsimd_idct_2x2_neon
  1049. *
  1050. * This function contains inverse-DCT code for getting reduced-size
  1051. * 2x2 pixels output from an 8x8 DCT block. It uses the same calculations
  1052. * and produces exactly the same output as IJG's original 'jpeg_idct_2x2'
  1053. * function from jpeg-6b (jidctred.c).
  1054. *
  1055. * NOTE: jpeg-8 has an improved implementation of 2x2 inverse-DCT, which
  1056. * requires much less arithmetic operations and hence should be faster.
  1057. * The primary purpose of this particular NEON optimized function is
  1058. * bit exact compatibility with jpeg-6b.
  1059. */
  1060. .balign 8
  1061. jsimd_idct_2x2_neon_consts:
  1062. .short -FIX_0_720959822 /* d0[0] */
  1063. .short FIX_0_850430095 /* d0[1] */
  1064. .short -FIX_1_272758580 /* d0[2] */
  1065. .short FIX_3_624509785 /* d0[3] */
  1066. .macro idct_helper x4, x6, x10, x12, x16, shift, y26, y27
  1067. vshll.s16 q14, \x4, #15
  1068. vmull.s16 q13, \x6, d0[3]
  1069. vmlal.s16 q13, \x10, d0[2]
  1070. vmlal.s16 q13, \x12, d0[1]
  1071. vmlal.s16 q13, \x16, d0[0]
  1072. vadd.s32 q10, q14, q13
  1073. vsub.s32 q14, q14, q13
  1074. .if \shift > 16
  1075. vrshr.s32 q10, q10, #\shift
  1076. vrshr.s32 q14, q14, #\shift
  1077. vmovn.s32 \y26, q10
  1078. vmovn.s32 \y27, q14
  1079. .else
  1080. vrshrn.s32 \y26, q10, #\shift
  1081. vrshrn.s32 \y27, q14, #\shift
  1082. .endif
  1083. .endm
  1084. asm_function jsimd_idct_2x2_neon
  1085. DCT_TABLE .req r0
  1086. COEF_BLOCK .req r1
  1087. OUTPUT_BUF .req r2
  1088. OUTPUT_COL .req r3
  1089. TMP1 .req r0
  1090. TMP2 .req ip
  1091. vpush {d8-d15}
  1092. /* Load constants */
  1093. adr TMP2, jsimd_idct_2x2_neon_consts
  1094. vld1.16 {d0}, [TMP2, :64]
  1095. /* Load all COEF_BLOCK into NEON registers with the following allocation:
  1096. * 0 1 2 3 | 4 5 6 7
  1097. * ---------+--------
  1098. * 0 | d4 | d5
  1099. * 1 | d6 | d7
  1100. * 2 | - | -
  1101. * 3 | d10 | d11
  1102. * 4 | - | -
  1103. * 5 | d12 | d13
  1104. * 6 | - | -
  1105. * 7 | d16 | d17
  1106. */
  1107. vld1.16 {d4, d5, d6, d7}, [COEF_BLOCK, :128]!
  1108. add COEF_BLOCK, COEF_BLOCK, #16
  1109. vld1.16 {d10, d11}, [COEF_BLOCK, :128]!
  1110. add COEF_BLOCK, COEF_BLOCK, #16
  1111. vld1.16 {d12, d13}, [COEF_BLOCK, :128]!
  1112. add COEF_BLOCK, COEF_BLOCK, #16
  1113. vld1.16 {d16, d17}, [COEF_BLOCK, :128]!
  1114. /* Dequantize */
  1115. vld1.16 {d18, d19, d20, d21}, [DCT_TABLE, :128]!
  1116. vmul.s16 q2, q2, q9
  1117. vmul.s16 q3, q3, q10
  1118. add DCT_TABLE, DCT_TABLE, #16
  1119. vld1.16 {d24, d25}, [DCT_TABLE, :128]!
  1120. vmul.s16 q5, q5, q12
  1121. add DCT_TABLE, DCT_TABLE, #16
  1122. vld1.16 {d26, d27}, [DCT_TABLE, :128]!
  1123. vmul.s16 q6, q6, q13
  1124. add DCT_TABLE, DCT_TABLE, #16
  1125. vld1.16 {d30, d31}, [DCT_TABLE, :128]!
  1126. vmul.s16 q8, q8, q15
  1127. /* Pass 1 */
  1128. #if 0
  1129. idct_helper d4, d6, d10, d12, d16, 13, d4, d6
  1130. transpose_4x4 d4, d6, d8, d10
  1131. idct_helper d5, d7, d11, d13, d17, 13, d5, d7
  1132. transpose_4x4 d5, d7, d9, d11
  1133. #else
  1134. vmull.s16 q13, d6, d0[3]
  1135. vmlal.s16 q13, d10, d0[2]
  1136. vmlal.s16 q13, d12, d0[1]
  1137. vmlal.s16 q13, d16, d0[0]
  1138. vmull.s16 q12, d7, d0[3]
  1139. vmlal.s16 q12, d11, d0[2]
  1140. vmlal.s16 q12, d13, d0[1]
  1141. vmlal.s16 q12, d17, d0[0]
  1142. vshll.s16 q14, d4, #15
  1143. vshll.s16 q15, d5, #15
  1144. vadd.s32 q10, q14, q13
  1145. vsub.s32 q14, q14, q13
  1146. vrshrn.s32 d4, q10, #13
  1147. vrshrn.s32 d6, q14, #13
  1148. vadd.s32 q10, q15, q12
  1149. vsub.s32 q14, q15, q12
  1150. vrshrn.s32 d5, q10, #13
  1151. vrshrn.s32 d7, q14, #13
  1152. vtrn.16 q2, q3
  1153. vtrn.32 q3, q5
  1154. #endif
  1155. /* Pass 2 */
  1156. idct_helper d4, d6, d10, d7, d11, 20, d26, d27
  1157. /* Range limit */
  1158. vmov.u16 q15, #0x80
  1159. vadd.s16 q13, q13, q15
  1160. vqmovun.s16 d26, q13
  1161. vqmovun.s16 d27, q13
  1162. /* Store results to the output buffer */
  1163. ldmia OUTPUT_BUF, {TMP1, TMP2}
  1164. add TMP1, TMP1, OUTPUT_COL
  1165. add TMP2, TMP2, OUTPUT_COL
  1166. vst1.8 {d26[0]}, [TMP1]!
  1167. vst1.8 {d27[4]}, [TMP1]!
  1168. vst1.8 {d26[1]}, [TMP2]!
  1169. vst1.8 {d27[5]}, [TMP2]!
  1170. vpop {d8-d15}
  1171. bx lr
  1172. .unreq DCT_TABLE
  1173. .unreq COEF_BLOCK
  1174. .unreq OUTPUT_BUF
  1175. .unreq OUTPUT_COL
  1176. .unreq TMP1
  1177. .unreq TMP2
  1178. .purgem idct_helper
  1179. /*****************************************************************************/
  1180. /*
  1181. * jsimd_ycc_extrgb_convert_neon
  1182. * jsimd_ycc_extbgr_convert_neon
  1183. * jsimd_ycc_extrgbx_convert_neon
  1184. * jsimd_ycc_extbgrx_convert_neon
  1185. * jsimd_ycc_extxbgr_convert_neon
  1186. * jsimd_ycc_extxrgb_convert_neon
  1187. *
  1188. * Colorspace conversion YCbCr -> RGB
  1189. */
  1190. .macro do_load size
  1191. .if \size == 8
  1192. vld1.8 {d4}, [U, :64]!
  1193. vld1.8 {d5}, [V, :64]!
  1194. vld1.8 {d0}, [Y, :64]!
  1195. pld [U, #64]
  1196. pld [V, #64]
  1197. pld [Y, #64]
  1198. .elseif \size == 4
  1199. vld1.8 {d4[0]}, [U]!
  1200. vld1.8 {d4[1]}, [U]!
  1201. vld1.8 {d4[2]}, [U]!
  1202. vld1.8 {d4[3]}, [U]!
  1203. vld1.8 {d5[0]}, [V]!
  1204. vld1.8 {d5[1]}, [V]!
  1205. vld1.8 {d5[2]}, [V]!
  1206. vld1.8 {d5[3]}, [V]!
  1207. vld1.8 {d0[0]}, [Y]!
  1208. vld1.8 {d0[1]}, [Y]!
  1209. vld1.8 {d0[2]}, [Y]!
  1210. vld1.8 {d0[3]}, [Y]!
  1211. .elseif \size == 2
  1212. vld1.8 {d4[4]}, [U]!
  1213. vld1.8 {d4[5]}, [U]!
  1214. vld1.8 {d5[4]}, [V]!
  1215. vld1.8 {d5[5]}, [V]!
  1216. vld1.8 {d0[4]}, [Y]!
  1217. vld1.8 {d0[5]}, [Y]!
  1218. .elseif \size == 1
  1219. vld1.8 {d4[6]}, [U]!
  1220. vld1.8 {d5[6]}, [V]!
  1221. vld1.8 {d0[6]}, [Y]!
  1222. .else
  1223. .error unsupported macroblock size
  1224. .endif
  1225. .endm
  1226. .macro do_store bpp, size
  1227. .if \bpp == 24
  1228. .if \size == 8
  1229. vst3.8 {d10, d11, d12}, [RGB]!
  1230. .elseif \size == 4
  1231. vst3.8 {d10[0], d11[0], d12[0]}, [RGB]!
  1232. vst3.8 {d10[1], d11[1], d12[1]}, [RGB]!
  1233. vst3.8 {d10[2], d11[2], d12[2]}, [RGB]!
  1234. vst3.8 {d10[3], d11[3], d12[3]}, [RGB]!
  1235. .elseif \size == 2
  1236. vst3.8 {d10[4], d11[4], d12[4]}, [RGB]!
  1237. vst3.8 {d10[5], d11[5], d12[5]}, [RGB]!
  1238. .elseif \size == 1
  1239. vst3.8 {d10[6], d11[6], d12[6]}, [RGB]!
  1240. .else
  1241. .error unsupported macroblock size
  1242. .endif
  1243. .elseif \bpp == 32
  1244. .if \size == 8
  1245. vst4.8 {d10, d11, d12, d13}, [RGB]!
  1246. .elseif \size == 4
  1247. vst4.8 {d10[0], d11[0], d12[0], d13[0]}, [RGB]!
  1248. vst4.8 {d10[1], d11[1], d12[1], d13[1]}, [RGB]!
  1249. vst4.8 {d10[2], d11[2], d12[2], d13[2]}, [RGB]!
  1250. vst4.8 {d10[3], d11[3], d12[3], d13[3]}, [RGB]!
  1251. .elseif \size == 2
  1252. vst4.8 {d10[4], d11[4], d12[4], d13[4]}, [RGB]!
  1253. vst4.8 {d10[5], d11[5], d12[5], d13[5]}, [RGB]!
  1254. .elseif \size == 1
  1255. vst4.8 {d10[6], d11[6], d12[6], d13[6]}, [RGB]!
  1256. .else
  1257. .error unsupported macroblock size
  1258. .endif
  1259. .elseif \bpp == 16
  1260. .if \size == 8
  1261. vst1.16 {q15}, [RGB]!
  1262. .elseif \size == 4
  1263. vst1.16 {d30}, [RGB]!
  1264. .elseif \size == 2
  1265. vst1.16 {d31[0]}, [RGB]!
  1266. vst1.16 {d31[1]}, [RGB]!
  1267. .elseif \size == 1
  1268. vst1.16 {d31[2]}, [RGB]!
  1269. .else
  1270. .error unsupported macroblock size
  1271. .endif
  1272. .else
  1273. .error unsupported bpp
  1274. .endif
  1275. .endm
  1276. .macro generate_jsimd_ycc_rgb_convert_neon colorid, bpp, r_offs, g_offs, b_offs
  1277. /*
  1278. * 2-stage pipelined YCbCr->RGB conversion
  1279. */
  1280. .macro do_yuv_to_rgb_stage1
  1281. vaddw.u8 q3, q1, d4 /* q3 = u - 128 */
  1282. vaddw.u8 q4, q1, d5 /* q2 = v - 128 */
  1283. vmull.s16 q10, d6, d1[1] /* multiply by -11277 */
  1284. vmlal.s16 q10, d8, d1[2] /* multiply by -23401 */
  1285. vmull.s16 q11, d7, d1[1] /* multiply by -11277 */
  1286. vmlal.s16 q11, d9, d1[2] /* multiply by -23401 */
  1287. vmull.s16 q12, d8, d1[0] /* multiply by 22971 */
  1288. vmull.s16 q13, d9, d1[0] /* multiply by 22971 */
  1289. vmull.s16 q14, d6, d1[3] /* multiply by 29033 */
  1290. vmull.s16 q15, d7, d1[3] /* multiply by 29033 */
  1291. .endm
  1292. .macro do_yuv_to_rgb_stage2
  1293. vrshrn.s32 d20, q10, #15
  1294. vrshrn.s32 d21, q11, #15
  1295. vrshrn.s32 d24, q12, #14
  1296. vrshrn.s32 d25, q13, #14
  1297. vrshrn.s32 d28, q14, #14
  1298. vrshrn.s32 d29, q15, #14
  1299. vaddw.u8 q11, q10, d0
  1300. vaddw.u8 q12, q12, d0
  1301. vaddw.u8 q14, q14, d0
  1302. .if \bpp != 16
  1303. vqmovun.s16 d1\g_offs, q11
  1304. vqmovun.s16 d1\r_offs, q12
  1305. vqmovun.s16 d1\b_offs, q14
  1306. .else /* rgb565 */
  1307. vqshlu.s16 q13, q11, #8
  1308. vqshlu.s16 q15, q12, #8
  1309. vqshlu.s16 q14, q14, #8
  1310. vsri.u16 q15, q13, #5
  1311. vsri.u16 q15, q14, #11
  1312. .endif
  1313. .endm
  1314. .macro do_yuv_to_rgb_stage2_store_load_stage1
  1315. /* "do_yuv_to_rgb_stage2" and "store" */
  1316. vrshrn.s32 d20, q10, #15
  1317. /* "load" and "do_yuv_to_rgb_stage1" */
  1318. pld [U, #64]
  1319. vrshrn.s32 d21, q11, #15
  1320. pld [V, #64]
  1321. vrshrn.s32 d24, q12, #14
  1322. vrshrn.s32 d25, q13, #14
  1323. vld1.8 {d4}, [U, :64]!
  1324. vrshrn.s32 d28, q14, #14
  1325. vld1.8 {d5}, [V, :64]!
  1326. vrshrn.s32 d29, q15, #14
  1327. vaddw.u8 q3, q1, d4 /* q3 = u - 128 */
  1328. vaddw.u8 q4, q1, d5 /* q2 = v - 128 */
  1329. vaddw.u8 q11, q10, d0
  1330. vmull.s16 q10, d6, d1[1] /* multiply by -11277 */
  1331. vmlal.s16 q10, d8, d1[2] /* multiply by -23401 */
  1332. vaddw.u8 q12, q12, d0
  1333. vaddw.u8 q14, q14, d0
  1334. .if \bpp != 16 /**************** rgb24/rgb32 ******************************/
  1335. vqmovun.s16 d1\g_offs, q11
  1336. pld [Y, #64]
  1337. vqmovun.s16 d1\r_offs, q12
  1338. vld1.8 {d0}, [Y, :64]!
  1339. vqmovun.s16 d1\b_offs, q14
  1340. vmull.s16 q11, d7, d1[1] /* multiply by -11277 */
  1341. vmlal.s16 q11, d9, d1[2] /* multiply by -23401 */
  1342. do_store \bpp, 8
  1343. vmull.s16 q12, d8, d1[0] /* multiply by 22971 */
  1344. vmull.s16 q13, d9, d1[0] /* multiply by 22971 */
  1345. vmull.s16 q14, d6, d1[3] /* multiply by 29033 */
  1346. vmull.s16 q15, d7, d1[3] /* multiply by 29033 */
  1347. .else /**************************** rgb565 ********************************/
  1348. vqshlu.s16 q13, q11, #8
  1349. pld [Y, #64]
  1350. vqshlu.s16 q15, q12, #8
  1351. vqshlu.s16 q14, q14, #8
  1352. vld1.8 {d0}, [Y, :64]!
  1353. vmull.s16 q11, d7, d1[1]
  1354. vmlal.s16 q11, d9, d1[2]
  1355. vsri.u16 q15, q13, #5
  1356. vmull.s16 q12, d8, d1[0]
  1357. vsri.u16 q15, q14, #11
  1358. vmull.s16 q13, d9, d1[0]
  1359. vmull.s16 q14, d6, d1[3]
  1360. do_store \bpp, 8
  1361. vmull.s16 q15, d7, d1[3]
  1362. .endif
  1363. .endm
  1364. .macro do_yuv_to_rgb
  1365. do_yuv_to_rgb_stage1
  1366. do_yuv_to_rgb_stage2
  1367. .endm
  1368. /* Apple gas crashes on adrl, work around that by using adr.
  1369. * But this requires a copy of these constants for each function.
  1370. */
  1371. .balign 16
  1372. jsimd_ycc_\colorid\()_neon_consts:
  1373. .short 0, 0, 0, 0
  1374. .short 22971, -11277, -23401, 29033
  1375. .short -128, -128, -128, -128
  1376. .short -128, -128, -128, -128
  1377. asm_function jsimd_ycc_\colorid\()_convert_neon
  1378. OUTPUT_WIDTH .req r0
  1379. INPUT_BUF .req r1
  1380. INPUT_ROW .req r2
  1381. OUTPUT_BUF .req r3
  1382. NUM_ROWS .req r4
  1383. INPUT_BUF0 .req r5
  1384. INPUT_BUF1 .req r6
  1385. INPUT_BUF2 .req INPUT_BUF
  1386. RGB .req r7
  1387. Y .req r8
  1388. U .req r9
  1389. V .req r10
  1390. N .req ip
  1391. /* Load constants to d1, d2, d3 (d0 is just used for padding) */
  1392. adr ip, jsimd_ycc_\colorid\()_neon_consts
  1393. vld1.16 {d0, d1, d2, d3}, [ip, :128]
  1394. /* Save ARM registers and handle input arguments */
  1395. push {r4, r5, r6, r7, r8, r9, r10, lr}
  1396. ldr NUM_ROWS, [sp, #(4 * 8)]
  1397. ldr INPUT_BUF0, [INPUT_BUF]
  1398. ldr INPUT_BUF1, [INPUT_BUF, #4]
  1399. ldr INPUT_BUF2, [INPUT_BUF, #8]
  1400. .unreq INPUT_BUF
  1401. /* Save NEON registers */
  1402. vpush {d8-d15}
  1403. /* Initially set d10, d11, d12, d13 to 0xFF */
  1404. vmov.u8 q5, #255
  1405. vmov.u8 q6, #255
  1406. /* Outer loop over scanlines */
  1407. cmp NUM_ROWS, #1
  1408. blt 9f
  1409. 0:
  1410. ldr Y, [INPUT_BUF0, INPUT_ROW, lsl #2]
  1411. ldr U, [INPUT_BUF1, INPUT_ROW, lsl #2]
  1412. mov N, OUTPUT_WIDTH
  1413. ldr V, [INPUT_BUF2, INPUT_ROW, lsl #2]
  1414. add INPUT_ROW, INPUT_ROW, #1
  1415. ldr RGB, [OUTPUT_BUF], #4
  1416. /* Inner loop over pixels */
  1417. subs N, N, #8
  1418. blt 3f
  1419. do_load 8
  1420. do_yuv_to_rgb_stage1
  1421. subs N, N, #8
  1422. blt 2f
  1423. 1:
  1424. do_yuv_to_rgb_stage2_store_load_stage1
  1425. subs N, N, #8
  1426. bge 1b
  1427. 2:
  1428. do_yuv_to_rgb_stage2
  1429. do_store \bpp, 8
  1430. tst N, #7
  1431. beq 8f
  1432. 3:
  1433. tst N, #4
  1434. beq 3f
  1435. do_load 4
  1436. 3:
  1437. tst N, #2
  1438. beq 4f
  1439. do_load 2
  1440. 4:
  1441. tst N, #1
  1442. beq 5f
  1443. do_load 1
  1444. 5:
  1445. do_yuv_to_rgb
  1446. tst N, #4
  1447. beq 6f
  1448. do_store \bpp, 4
  1449. 6:
  1450. tst N, #2
  1451. beq 7f
  1452. do_store \bpp, 2
  1453. 7:
  1454. tst N, #1
  1455. beq 8f
  1456. do_store \bpp, 1
  1457. 8:
  1458. subs NUM_ROWS, NUM_ROWS, #1
  1459. bgt 0b
  1460. 9:
  1461. /* Restore all registers and return */
  1462. vpop {d8-d15}
  1463. pop {r4, r5, r6, r7, r8, r9, r10, pc}
  1464. .unreq OUTPUT_WIDTH
  1465. .unreq INPUT_ROW
  1466. .unreq OUTPUT_BUF
  1467. .unreq NUM_ROWS
  1468. .unreq INPUT_BUF0
  1469. .unreq INPUT_BUF1
  1470. .unreq INPUT_BUF2
  1471. .unreq RGB
  1472. .unreq Y
  1473. .unreq U
  1474. .unreq V
  1475. .unreq N
  1476. .purgem do_yuv_to_rgb
  1477. .purgem do_yuv_to_rgb_stage1
  1478. .purgem do_yuv_to_rgb_stage2
  1479. .purgem do_yuv_to_rgb_stage2_store_load_stage1
  1480. .endm
  1481. /*--------------------------------- id ----- bpp R G B */
  1482. generate_jsimd_ycc_rgb_convert_neon extrgb, 24, 0, 1, 2
  1483. generate_jsimd_ycc_rgb_convert_neon extbgr, 24, 2, 1, 0
  1484. generate_jsimd_ycc_rgb_convert_neon extrgbx, 32, 0, 1, 2
  1485. generate_jsimd_ycc_rgb_convert_neon extbgrx, 32, 2, 1, 0
  1486. generate_jsimd_ycc_rgb_convert_neon extxbgr, 32, 3, 2, 1
  1487. generate_jsimd_ycc_rgb_convert_neon extxrgb, 32, 1, 2, 3
  1488. generate_jsimd_ycc_rgb_convert_neon rgb565, 16, 0, 0, 0
  1489. .purgem do_load
  1490. .purgem do_store
  1491. /*****************************************************************************/
  1492. /*
  1493. * jsimd_extrgb_ycc_convert_neon
  1494. * jsimd_extbgr_ycc_convert_neon
  1495. * jsimd_extrgbx_ycc_convert_neon
  1496. * jsimd_extbgrx_ycc_convert_neon
  1497. * jsimd_extxbgr_ycc_convert_neon
  1498. * jsimd_extxrgb_ycc_convert_neon
  1499. *
  1500. * Colorspace conversion RGB -> YCbCr
  1501. */
  1502. .macro do_store size
  1503. .if \size == 8
  1504. vst1.8 {d20}, [Y]!
  1505. vst1.8 {d21}, [U]!
  1506. vst1.8 {d22}, [V]!
  1507. .elseif \size == 4
  1508. vst1.8 {d20[0]}, [Y]!
  1509. vst1.8 {d20[1]}, [Y]!
  1510. vst1.8 {d20[2]}, [Y]!
  1511. vst1.8 {d20[3]}, [Y]!
  1512. vst1.8 {d21[0]}, [U]!
  1513. vst1.8 {d21[1]}, [U]!
  1514. vst1.8 {d21[2]}, [U]!
  1515. vst1.8 {d21[3]}, [U]!
  1516. vst1.8 {d22[0]}, [V]!
  1517. vst1.8 {d22[1]}, [V]!
  1518. vst1.8 {d22[2]}, [V]!
  1519. vst1.8 {d22[3]}, [V]!
  1520. .elseif \size == 2
  1521. vst1.8 {d20[4]}, [Y]!
  1522. vst1.8 {d20[5]}, [Y]!
  1523. vst1.8 {d21[4]}, [U]!
  1524. vst1.8 {d21[5]}, [U]!
  1525. vst1.8 {d22[4]}, [V]!
  1526. vst1.8 {d22[5]}, [V]!
  1527. .elseif \size == 1
  1528. vst1.8 {d20[6]}, [Y]!
  1529. vst1.8 {d21[6]}, [U]!
  1530. vst1.8 {d22[6]}, [V]!
  1531. .else
  1532. .error unsupported macroblock size
  1533. .endif
  1534. .endm
  1535. .macro do_load bpp, size
  1536. .if \bpp == 24
  1537. .if \size == 8
  1538. vld3.8 {d10, d11, d12}, [RGB]!
  1539. pld [RGB, #128]
  1540. .elseif \size == 4
  1541. vld3.8 {d10[0], d11[0], d12[0]}, [RGB]!
  1542. vld3.8 {d10[1], d11[1], d12[1]}, [RGB]!
  1543. vld3.8 {d10[2], d11[2], d12[2]}, [RGB]!
  1544. vld3.8 {d10[3], d11[3], d12[3]}, [RGB]!
  1545. .elseif \size == 2
  1546. vld3.8 {d10[4], d11[4], d12[4]}, [RGB]!
  1547. vld3.8 {d10[5], d11[5], d12[5]}, [RGB]!
  1548. .elseif \size == 1
  1549. vld3.8 {d10[6], d11[6], d12[6]}, [RGB]!
  1550. .else
  1551. .error unsupported macroblock size
  1552. .endif
  1553. .elseif \bpp == 32
  1554. .if \size == 8
  1555. vld4.8 {d10, d11, d12, d13}, [RGB]!
  1556. pld [RGB, #128]
  1557. .elseif \size == 4
  1558. vld4.8 {d10[0], d11[0], d12[0], d13[0]}, [RGB]!
  1559. vld4.8 {d10[1], d11[1], d12[1], d13[1]}, [RGB]!
  1560. vld4.8 {d10[2], d11[2], d12[2], d13[2]}, [RGB]!
  1561. vld4.8 {d10[3], d11[3], d12[3], d13[3]}, [RGB]!
  1562. .elseif \size == 2
  1563. vld4.8 {d10[4], d11[4], d12[4], d13[4]}, [RGB]!
  1564. vld4.8 {d10[5], d11[5], d12[5], d13[5]}, [RGB]!
  1565. .elseif \size == 1
  1566. vld4.8 {d10[6], d11[6], d12[6], d13[6]}, [RGB]!
  1567. .else
  1568. .error unsupported macroblock size
  1569. .endif
  1570. .else
  1571. .error unsupported bpp
  1572. .endif
  1573. .endm
  1574. .macro generate_jsimd_rgb_ycc_convert_neon colorid, bpp, r_offs, g_offs, b_offs
  1575. /*
  1576. * 2-stage pipelined RGB->YCbCr conversion
  1577. */
  1578. .macro do_rgb_to_yuv_stage1
  1579. vmovl.u8 q2, d1\r_offs /* r = { d4, d5 } */
  1580. vmovl.u8 q3, d1\g_offs /* g = { d6, d7 } */
  1581. vmovl.u8 q4, d1\b_offs /* b = { d8, d9 } */
  1582. vmull.u16 q7, d4, d0[0]
  1583. vmlal.u16 q7, d6, d0[1]
  1584. vmlal.u16 q7, d8, d0[2]
  1585. vmull.u16 q8, d5, d0[0]
  1586. vmlal.u16 q8, d7, d0[1]
  1587. vmlal.u16 q8, d9, d0[2]
  1588. vrev64.32 q9, q1
  1589. vrev64.32 q13, q1
  1590. vmlsl.u16 q9, d4, d0[3]
  1591. vmlsl.u16 q9, d6, d1[0]
  1592. vmlal.u16 q9, d8, d1[1]
  1593. vmlsl.u16 q13, d5, d0[3]
  1594. vmlsl.u16 q13, d7, d1[0]
  1595. vmlal.u16 q13, d9, d1[1]
  1596. vrev64.32 q14, q1
  1597. vrev64.32 q15, q1
  1598. vmlal.u16 q14, d4, d1[1]
  1599. vmlsl.u16 q14, d6, d1[2]
  1600. vmlsl.u16 q14, d8, d1[3]
  1601. vmlal.u16 q15, d5, d1[1]
  1602. vmlsl.u16 q15, d7, d1[2]
  1603. vmlsl.u16 q15, d9, d1[3]
  1604. .endm
  1605. .macro do_rgb_to_yuv_stage2
  1606. vrshrn.u32 d20, q7, #16
  1607. vrshrn.u32 d21, q8, #16
  1608. vshrn.u32 d22, q9, #16
  1609. vshrn.u32 d23, q13, #16
  1610. vshrn.u32 d24, q14, #16
  1611. vshrn.u32 d25, q15, #16
  1612. vmovn.u16 d20, q10 /* d20 = y */
  1613. vmovn.u16 d21, q11 /* d21 = u */
  1614. vmovn.u16 d22, q12 /* d22 = v */
  1615. .endm
  1616. .macro do_rgb_to_yuv
  1617. do_rgb_to_yuv_stage1
  1618. do_rgb_to_yuv_stage2
  1619. .endm
  1620. .macro do_rgb_to_yuv_stage2_store_load_stage1
  1621. vrshrn.u32 d20, q7, #16
  1622. vrshrn.u32 d21, q8, #16
  1623. vshrn.u32 d22, q9, #16
  1624. vrev64.32 q9, q1
  1625. vshrn.u32 d23, q13, #16
  1626. vrev64.32 q13, q1
  1627. vshrn.u32 d24, q14, #16
  1628. vshrn.u32 d25, q15, #16
  1629. do_load \bpp, 8
  1630. vmovn.u16 d20, q10 /* d20 = y */
  1631. vmovl.u8 q2, d1\r_offs /* r = { d4, d5 } */
  1632. vmovn.u16 d21, q11 /* d21 = u */
  1633. vmovl.u8 q3, d1\g_offs /* g = { d6, d7 } */
  1634. vmovn.u16 d22, q12 /* d22 = v */
  1635. vmovl.u8 q4, d1\b_offs /* b = { d8, d9 } */
  1636. vmull.u16 q7, d4, d0[0]
  1637. vmlal.u16 q7, d6, d0[1]
  1638. vmlal.u16 q7, d8, d0[2]
  1639. vst1.8 {d20}, [Y]!
  1640. vmull.u16 q8, d5, d0[0]
  1641. vmlal.u16 q8, d7, d0[1]
  1642. vmlal.u16 q8, d9, d0[2]
  1643. vmlsl.u16 q9, d4, d0[3]
  1644. vmlsl.u16 q9, d6, d1[0]
  1645. vmlal.u16 q9, d8, d1[1]
  1646. vst1.8 {d21}, [U]!
  1647. vmlsl.u16 q13, d5, d0[3]
  1648. vmlsl.u16 q13, d7, d1[0]
  1649. vmlal.u16 q13, d9, d1[1]
  1650. vrev64.32 q14, q1
  1651. vrev64.32 q15, q1
  1652. vmlal.u16 q14, d4, d1[1]
  1653. vmlsl.u16 q14, d6, d1[2]
  1654. vmlsl.u16 q14, d8, d1[3]
  1655. vst1.8 {d22}, [V]!
  1656. vmlal.u16 q15, d5, d1[1]
  1657. vmlsl.u16 q15, d7, d1[2]
  1658. vmlsl.u16 q15, d9, d1[3]
  1659. .endm
  1660. .balign 16
  1661. jsimd_\colorid\()_ycc_neon_consts:
  1662. .short 19595, 38470, 7471, 11059
  1663. .short 21709, 32768, 27439, 5329
  1664. .short 32767, 128, 32767, 128
  1665. .short 32767, 128, 32767, 128
  1666. asm_function jsimd_\colorid\()_ycc_convert_neon
  1667. OUTPUT_WIDTH .req r0
  1668. INPUT_BUF .req r1
  1669. OUTPUT_BUF .req r2
  1670. OUTPUT_ROW .req r3
  1671. NUM_ROWS .req r4
  1672. OUTPUT_BUF0 .req r5
  1673. OUTPUT_BUF1 .req r6
  1674. OUTPUT_BUF2 .req OUTPUT_BUF
  1675. RGB .req r7
  1676. Y .req r8
  1677. U .req r9
  1678. V .req r10
  1679. N .req ip
  1680. /* Load constants to d0, d1, d2, d3 */
  1681. adr ip, jsimd_\colorid\()_ycc_neon_consts
  1682. vld1.16 {d0, d1, d2, d3}, [ip, :128]
  1683. /* Save ARM registers and handle input arguments */
  1684. push {r4, r5, r6, r7, r8, r9, r10, lr}
  1685. ldr NUM_ROWS, [sp, #(4 * 8)]
  1686. ldr OUTPUT_BUF0, [OUTPUT_BUF]
  1687. ldr OUTPUT_BUF1, [OUTPUT_BUF, #4]
  1688. ldr OUTPUT_BUF2, [OUTPUT_BUF, #8]
  1689. .unreq OUTPUT_BUF
  1690. /* Save NEON registers */
  1691. vpush {d8-d15}
  1692. /* Outer loop over scanlines */
  1693. cmp NUM_ROWS, #1
  1694. blt 9f
  1695. 0:
  1696. ldr Y, [OUTPUT_BUF0, OUTPUT_ROW, lsl #2]
  1697. ldr U, [OUTPUT_BUF1, OUTPUT_ROW, lsl #2]
  1698. mov N, OUTPUT_WIDTH
  1699. ldr V, [OUTPUT_BUF2, OUTPUT_ROW, lsl #2]
  1700. add OUTPUT_ROW, OUTPUT_ROW, #1
  1701. ldr RGB, [INPUT_BUF], #4
  1702. /* Inner loop over pixels */
  1703. subs N, N, #8
  1704. blt 3f
  1705. do_load \bpp, 8
  1706. do_rgb_to_yuv_stage1
  1707. subs N, N, #8
  1708. blt 2f
  1709. 1:
  1710. do_rgb_to_yuv_stage2_store_load_stage1
  1711. subs N, N, #8
  1712. bge 1b
  1713. 2:
  1714. do_rgb_to_yuv_stage2
  1715. do_store 8
  1716. tst N, #7
  1717. beq 8f
  1718. 3:
  1719. tst N, #4
  1720. beq 3f
  1721. do_load \bpp, 4
  1722. 3:
  1723. tst N, #2
  1724. beq 4f
  1725. do_load \bpp, 2
  1726. 4:
  1727. tst N, #1
  1728. beq 5f
  1729. do_load \bpp, 1
  1730. 5:
  1731. do_rgb_to_yuv
  1732. tst N, #4
  1733. beq 6f
  1734. do_store 4
  1735. 6:
  1736. tst N, #2
  1737. beq 7f
  1738. do_store 2
  1739. 7:
  1740. tst N, #1
  1741. beq 8f
  1742. do_store 1
  1743. 8:
  1744. subs NUM_ROWS, NUM_ROWS, #1
  1745. bgt 0b
  1746. 9:
  1747. /* Restore all registers and return */
  1748. vpop {d8-d15}
  1749. pop {r4, r5, r6, r7, r8, r9, r10, pc}
  1750. .unreq OUTPUT_WIDTH
  1751. .unreq OUTPUT_ROW
  1752. .unreq INPUT_BUF
  1753. .unreq NUM_ROWS
  1754. .unreq OUTPUT_BUF0
  1755. .unreq OUTPUT_BUF1
  1756. .unreq OUTPUT_BUF2
  1757. .unreq RGB
  1758. .unreq Y
  1759. .unreq U
  1760. .unreq V
  1761. .unreq N
  1762. .purgem do_rgb_to_yuv
  1763. .purgem do_rgb_to_yuv_stage1
  1764. .purgem do_rgb_to_yuv_stage2
  1765. .purgem do_rgb_to_yuv_stage2_store_load_stage1
  1766. .endm
  1767. /*--------------------------------- id ----- bpp R G B */
  1768. generate_jsimd_rgb_ycc_convert_neon extrgb, 24, 0, 1, 2
  1769. generate_jsimd_rgb_ycc_convert_neon extbgr, 24, 2, 1, 0
  1770. generate_jsimd_rgb_ycc_convert_neon extrgbx, 32, 0, 1, 2
  1771. generate_jsimd_rgb_ycc_convert_neon extbgrx, 32, 2, 1, 0
  1772. generate_jsimd_rgb_ycc_convert_neon extxbgr, 32, 3, 2, 1
  1773. generate_jsimd_rgb_ycc_convert_neon extxrgb, 32, 1, 2, 3
  1774. .purgem do_load
  1775. .purgem do_store
  1776. /*****************************************************************************/
  1777. /*
  1778. * Load data into workspace, applying unsigned->signed conversion
  1779. *
  1780. * TODO: can be combined with 'jsimd_fdct_ifast_neon' to get
  1781. * rid of VST1.16 instructions
  1782. */
  1783. asm_function jsimd_convsamp_neon
  1784. SAMPLE_DATA .req r0
  1785. START_COL .req r1
  1786. WORKSPACE .req r2
  1787. TMP1 .req r3
  1788. TMP2 .req r4
  1789. TMP3 .req r5
  1790. TMP4 .req ip
  1791. push {r4, r5}
  1792. vmov.u8 d0, #128
  1793. ldmia SAMPLE_DATA!, {TMP1, TMP2, TMP3, TMP4}
  1794. add TMP1, TMP1, START_COL
  1795. add TMP2, TMP2, START_COL
  1796. add TMP3, TMP3, START_COL
  1797. add TMP4, TMP4, START_COL
  1798. vld1.8 {d16}, [TMP1]
  1799. vsubl.u8 q8, d16, d0
  1800. vld1.8 {d18}, [TMP2]
  1801. vsubl.u8 q9, d18, d0
  1802. vld1.8 {d20}, [TMP3]
  1803. vsubl.u8 q10, d20, d0
  1804. vld1.8 {d22}, [TMP4]
  1805. ldmia SAMPLE_DATA!, {TMP1, TMP2, TMP3, TMP4}
  1806. vsubl.u8 q11, d22, d0
  1807. vst1.16 {d16, d17, d18, d19}, [WORKSPACE, :128]!
  1808. add TMP1, TMP1, START_COL
  1809. add TMP2, TMP2, START_COL
  1810. vst1.16 {d20, d21, d22, d23}, [WORKSPACE, :128]!
  1811. add TMP3, TMP3, START_COL
  1812. add TMP4, TMP4, START_COL
  1813. vld1.8 {d24}, [TMP1]
  1814. vsubl.u8 q12, d24, d0
  1815. vld1.8 {d26}, [TMP2]
  1816. vsubl.u8 q13, d26, d0
  1817. vld1.8 {d28}, [TMP3]
  1818. vsubl.u8 q14, d28, d0
  1819. vld1.8 {d30}, [TMP4]
  1820. vsubl.u8 q15, d30, d0
  1821. vst1.16 {d24, d25, d26, d27}, [WORKSPACE, :128]!
  1822. vst1.16 {d28, d29, d30, d31}, [WORKSPACE, :128]!
  1823. pop {r4, r5}
  1824. bx lr
  1825. .unreq SAMPLE_DATA
  1826. .unreq START_COL
  1827. .unreq WORKSPACE
  1828. .unreq TMP1
  1829. .unreq TMP2
  1830. .unreq TMP3
  1831. .unreq TMP4
  1832. /*****************************************************************************/
  1833. /*
  1834. * jsimd_fdct_ifast_neon
  1835. *
  1836. * This function contains a fast, not so accurate integer implementation of
  1837. * the forward DCT (Discrete Cosine Transform). It uses the same calculations
  1838. * and produces exactly the same output as IJG's original 'jpeg_fdct_ifast'
  1839. * function from jfdctfst.c
  1840. *
  1841. * TODO: can be combined with 'jsimd_convsamp_neon' to get
  1842. * rid of a bunch of VLD1.16 instructions
  1843. */
  1844. #define XFIX_0_382683433 d0[0]
  1845. #define XFIX_0_541196100 d0[1]
  1846. #define XFIX_0_707106781 d0[2]
  1847. #define XFIX_1_306562965 d0[3]
  1848. .balign 16
  1849. jsimd_fdct_ifast_neon_consts:
  1850. .short (98 * 128) /* XFIX_0_382683433 */
  1851. .short (139 * 128) /* XFIX_0_541196100 */
  1852. .short (181 * 128) /* XFIX_0_707106781 */
  1853. .short (334 * 128 - 256 * 128) /* XFIX_1_306562965 */
  1854. asm_function jsimd_fdct_ifast_neon
  1855. DATA .req r0
  1856. TMP .req ip
  1857. vpush {d8-d15}
  1858. /* Load constants */
  1859. adr TMP, jsimd_fdct_ifast_neon_consts
  1860. vld1.16 {d0}, [TMP, :64]
  1861. /* Load all DATA into NEON registers with the following allocation:
  1862. * 0 1 2 3 | 4 5 6 7
  1863. * ---------+--------
  1864. * 0 | d16 | d17 | q8
  1865. * 1 | d18 | d19 | q9
  1866. * 2 | d20 | d21 | q10
  1867. * 3 | d22 | d23 | q11
  1868. * 4 | d24 | d25 | q12
  1869. * 5 | d26 | d27 | q13
  1870. * 6 | d28 | d29 | q14
  1871. * 7 | d30 | d31 | q15
  1872. */
  1873. vld1.16 {d16, d17, d18, d19}, [DATA, :128]!
  1874. vld1.16 {d20, d21, d22, d23}, [DATA, :128]!
  1875. vld1.16 {d24, d25, d26, d27}, [DATA, :128]!
  1876. vld1.16 {d28, d29, d30, d31}, [DATA, :128]
  1877. sub DATA, DATA, #(128 - 32)
  1878. mov TMP, #2
  1879. 1:
  1880. /* Transpose */
  1881. vtrn.16 q12, q13
  1882. vtrn.16 q10, q11
  1883. vtrn.16 q8, q9
  1884. vtrn.16 q14, q15
  1885. vtrn.32 q9, q11
  1886. vtrn.32 q13, q15
  1887. vtrn.32 q8, q10
  1888. vtrn.32 q12, q14
  1889. vswp d30, d23
  1890. vswp d24, d17
  1891. vswp d26, d19
  1892. /* 1-D FDCT */
  1893. vadd.s16 q2, q11, q12
  1894. vswp d28, d21
  1895. vsub.s16 q12, q11, q12
  1896. vsub.s16 q6, q10, q13
  1897. vadd.s16 q10, q10, q13
  1898. vsub.s16 q7, q9, q14
  1899. vadd.s16 q9, q9, q14
  1900. vsub.s16 q1, q8, q15
  1901. vadd.s16 q8, q8, q15
  1902. vsub.s16 q4, q9, q10
  1903. vsub.s16 q5, q8, q2
  1904. vadd.s16 q3, q9, q10
  1905. vadd.s16 q4, q4, q5
  1906. vadd.s16 q2, q8, q2
  1907. vqdmulh.s16 q4, q4, XFIX_0_707106781
  1908. vadd.s16 q11, q12, q6
  1909. vadd.s16 q8, q2, q3
  1910. vsub.s16 q12, q2, q3
  1911. vadd.s16 q3, q6, q7
  1912. vadd.s16 q7, q7, q1
  1913. vqdmulh.s16 q3, q3, XFIX_0_707106781
  1914. vsub.s16 q6, q11, q7
  1915. vadd.s16 q10, q5, q4
  1916. vqdmulh.s16 q6, q6, XFIX_0_382683433
  1917. vsub.s16 q14, q5, q4
  1918. vqdmulh.s16 q11, q11, XFIX_0_541196100
  1919. vqdmulh.s16 q5, q7, XFIX_1_306562965
  1920. vadd.s16 q4, q1, q3
  1921. vsub.s16 q3, q1, q3
  1922. vadd.s16 q7, q7, q6
  1923. vadd.s16 q11, q11, q6
  1924. vadd.s16 q7, q7, q5
  1925. vadd.s16 q13, q3, q11
  1926. vsub.s16 q11, q3, q11
  1927. vadd.s16 q9, q4, q7
  1928. vsub.s16 q15, q4, q7
  1929. subs TMP, TMP, #1
  1930. bne 1b
  1931. /* store results */
  1932. vst1.16 {d16, d17, d18, d19}, [DATA, :128]!
  1933. vst1.16 {d20, d21, d22, d23}, [DATA, :128]!
  1934. vst1.16 {d24, d25, d26, d27}, [DATA, :128]!
  1935. vst1.16 {d28, d29, d30, d31}, [DATA, :128]
  1936. vpop {d8-d15}
  1937. bx lr
  1938. .unreq DATA
  1939. .unreq TMP
  1940. /*****************************************************************************/
  1941. /*
  1942. * GLOBAL(void)
  1943. * jsimd_quantize_neon(JCOEFPTR coef_block, DCTELEM *divisors,
  1944. * DCTELEM *workspace);
  1945. *
  1946. * Note: the code uses 2 stage pipelining in order to improve instructions
  1947. * scheduling and eliminate stalls (this provides ~15% better
  1948. * performance for this function on both ARM Cortex-A8 and
  1949. * ARM Cortex-A9 when compared to the non-pipelined variant).
  1950. * The instructions which belong to the second stage use different
  1951. * indentation for better readiability.
  1952. */
  1953. asm_function jsimd_quantize_neon
  1954. COEF_BLOCK .req r0
  1955. DIVISORS .req r1
  1956. WORKSPACE .req r2
  1957. RECIPROCAL .req DIVISORS
  1958. CORRECTION .req r3
  1959. SHIFT .req ip
  1960. LOOP_COUNT .req r4
  1961. vld1.16 {d0, d1, d2, d3}, [WORKSPACE, :128]!
  1962. vabs.s16 q12, q0
  1963. add CORRECTION, DIVISORS, #(64 * 2)
  1964. add SHIFT, DIVISORS, #(64 * 6)
  1965. vld1.16 {d20, d21, d22, d23}, [CORRECTION, :128]!
  1966. vabs.s16 q13, q1
  1967. vld1.16 {d16, d17, d18, d19}, [RECIPROCAL, :128]!
  1968. vadd.u16 q12, q12, q10 /* add correction */
  1969. vadd.u16 q13, q13, q11
  1970. vmull.u16 q10, d24, d16 /* multiply by reciprocal */
  1971. vmull.u16 q11, d25, d17
  1972. vmull.u16 q8, d26, d18
  1973. vmull.u16 q9, d27, d19
  1974. vld1.16 {d24, d25, d26, d27}, [SHIFT, :128]!
  1975. vshrn.u32 d20, q10, #16
  1976. vshrn.u32 d21, q11, #16
  1977. vshrn.u32 d22, q8, #16
  1978. vshrn.u32 d23, q9, #16
  1979. vneg.s16 q12, q12
  1980. vneg.s16 q13, q13
  1981. vshr.s16 q2, q0, #15 /* extract sign */
  1982. vshr.s16 q3, q1, #15
  1983. vshl.u16 q14, q10, q12 /* shift */
  1984. vshl.u16 q15, q11, q13
  1985. push {r4, r5}
  1986. mov LOOP_COUNT, #3
  1987. 1:
  1988. vld1.16 {d0, d1, d2, d3}, [WORKSPACE, :128]!
  1989. veor.u16 q14, q14, q2 /* restore sign */
  1990. vabs.s16 q12, q0
  1991. vld1.16 {d20, d21, d22, d23}, [CORRECTION, :128]!
  1992. vabs.s16 q13, q1
  1993. veor.u16 q15, q15, q3
  1994. vld1.16 {d16, d17, d18, d19}, [RECIPROCAL, :128]!
  1995. vadd.u16 q12, q12, q10 /* add correction */
  1996. vadd.u16 q13, q13, q11
  1997. vmull.u16 q10, d24, d16 /* multiply by reciprocal */
  1998. vmull.u16 q11, d25, d17
  1999. vmull.u16 q8, d26, d18
  2000. vmull.u16 q9, d27, d19
  2001. vsub.u16 q14, q14, q2
  2002. vld1.16 {d24, d25, d26, d27}, [SHIFT, :128]!
  2003. vsub.u16 q15, q15, q3
  2004. vshrn.u32 d20, q10, #16
  2005. vshrn.u32 d21, q11, #16
  2006. vst1.16 {d28, d29, d30, d31}, [COEF_BLOCK, :128]!
  2007. vshrn.u32 d22, q8, #16
  2008. vshrn.u32 d23, q9, #16
  2009. vneg.s16 q12, q12
  2010. vneg.s16 q13, q13
  2011. vshr.s16 q2, q0, #15 /* extract sign */
  2012. vshr.s16 q3, q1, #15
  2013. vshl.u16 q14, q10, q12 /* shift */
  2014. vshl.u16 q15, q11, q13
  2015. subs LOOP_COUNT, LOOP_COUNT, #1
  2016. bne 1b
  2017. pop {r4, r5}
  2018. veor.u16 q14, q14, q2 /* restore sign */
  2019. veor.u16 q15, q15, q3
  2020. vsub.u16 q14, q14, q2
  2021. vsub.u16 q15, q15, q3
  2022. vst1.16 {d28, d29, d30, d31}, [COEF_BLOCK, :128]!
  2023. bx lr /* return */
  2024. .unreq COEF_BLOCK
  2025. .unreq DIVISORS
  2026. .unreq WORKSPACE
  2027. .unreq RECIPROCAL
  2028. .unreq CORRECTION
  2029. .unreq SHIFT
  2030. .unreq LOOP_COUNT
  2031. /*****************************************************************************/
  2032. /*
  2033. * GLOBAL(void)
  2034. * jsimd_h2v1_fancy_upsample_neon(int max_v_samp_factor,
  2035. * JDIMENSION downsampled_width,
  2036. * JSAMPARRAY input_data,
  2037. * JSAMPARRAY *output_data_ptr);
  2038. *
  2039. * Note: the use of unaligned writes is the main remaining bottleneck in
  2040. * this code, which can be potentially solved to get up to tens
  2041. * of percents performance improvement on Cortex-A8/Cortex-A9.
  2042. */
  2043. /*
  2044. * Upsample 16 source pixels to 32 destination pixels. The new 16 source
  2045. * pixels are loaded to q0. The previous 16 source pixels are in q1. The
  2046. * shifted-by-one source pixels are constructed in q2 by using q0 and q1.
  2047. * Register d28 is used for multiplication by 3. Register q15 is used
  2048. * for adding +1 bias.
  2049. */
  2050. .macro upsample16 OUTPTR, INPTR
  2051. vld1.8 {q0}, [\INPTR]!
  2052. vmovl.u8 q8, d0
  2053. vext.8 q2, q1, q0, #15
  2054. vmovl.u8 q9, d1
  2055. vaddw.u8 q10, q15, d4
  2056. vaddw.u8 q11, q15, d5
  2057. vmlal.u8 q8, d4, d28
  2058. vmlal.u8 q9, d5, d28
  2059. vmlal.u8 q10, d0, d28
  2060. vmlal.u8 q11, d1, d28
  2061. vmov q1, q0 /* backup source pixels to q1 */
  2062. vrshrn.u16 d6, q8, #2
  2063. vrshrn.u16 d7, q9, #2
  2064. vshrn.u16 d8, q10, #2
  2065. vshrn.u16 d9, q11, #2
  2066. vst2.8 {d6, d7, d8, d9}, [\OUTPTR]!
  2067. .endm
  2068. /*
  2069. * Upsample 32 source pixels to 64 destination pixels. Compared to 'usample16'
  2070. * macro, the roles of q0 and q1 registers are reversed for even and odd
  2071. * groups of 16 pixels, that's why "vmov q1, q0" instructions are not needed.
  2072. * Also this unrolling allows to reorder loads and stores to compensate
  2073. * multiplication latency and reduce stalls.
  2074. */
  2075. .macro upsample32 OUTPTR, INPTR
  2076. /* even 16 pixels group */
  2077. vld1.8 {q0}, [\INPTR]!
  2078. vmovl.u8 q8, d0
  2079. vext.8 q2, q1, q0, #15
  2080. vmovl.u8 q9, d1
  2081. vaddw.u8 q10, q15, d4
  2082. vaddw.u8 q11, q15, d5
  2083. vmlal.u8 q8, d4, d28
  2084. vmlal.u8 q9, d5, d28
  2085. vmlal.u8 q10, d0, d28
  2086. vmlal.u8 q11, d1, d28
  2087. /* odd 16 pixels group */
  2088. vld1.8 {q1}, [\INPTR]!
  2089. vrshrn.u16 d6, q8, #2
  2090. vrshrn.u16 d7, q9, #2
  2091. vshrn.u16 d8, q10, #2
  2092. vshrn.u16 d9, q11, #2
  2093. vmovl.u8 q8, d2
  2094. vext.8 q2, q0, q1, #15
  2095. vmovl.u8 q9, d3
  2096. vaddw.u8 q10, q15, d4
  2097. vaddw.u8 q11, q15, d5
  2098. vmlal.u8 q8, d4, d28
  2099. vmlal.u8 q9, d5, d28
  2100. vmlal.u8 q10, d2, d28
  2101. vmlal.u8 q11, d3, d28
  2102. vst2.8 {d6, d7, d8, d9}, [\OUTPTR]!
  2103. vrshrn.u16 d6, q8, #2
  2104. vrshrn.u16 d7, q9, #2
  2105. vshrn.u16 d8, q10, #2
  2106. vshrn.u16 d9, q11, #2
  2107. vst2.8 {d6, d7, d8, d9}, [\OUTPTR]!
  2108. .endm
  2109. /*
  2110. * Upsample a row of WIDTH pixels from INPTR to OUTPTR.
  2111. */
  2112. .macro upsample_row OUTPTR, INPTR, WIDTH, TMP1
  2113. /* special case for the first and last pixels */
  2114. sub \WIDTH, \WIDTH, #1
  2115. add \OUTPTR, \OUTPTR, #1
  2116. ldrb \TMP1, [\INPTR, \WIDTH]
  2117. strb \TMP1, [\OUTPTR, \WIDTH, asl #1]
  2118. ldrb \TMP1, [\INPTR], #1
  2119. strb \TMP1, [\OUTPTR, #-1]
  2120. vmov.8 d3[7], \TMP1
  2121. subs \WIDTH, \WIDTH, #32
  2122. blt 5f
  2123. 0: /* process 32 pixels per iteration */
  2124. upsample32 \OUTPTR, \INPTR
  2125. subs \WIDTH, \WIDTH, #32
  2126. bge 0b
  2127. 5:
  2128. adds \WIDTH, \WIDTH, #16
  2129. blt 1f
  2130. 0: /* process 16 pixels if needed */
  2131. upsample16 \OUTPTR, \INPTR
  2132. subs \WIDTH, \WIDTH, #16
  2133. 1:
  2134. adds \WIDTH, \WIDTH, #16
  2135. beq 9f
  2136. /* load the remaining 1-15 pixels */
  2137. add \INPTR, \INPTR, \WIDTH
  2138. tst \WIDTH, #1
  2139. beq 2f
  2140. sub \INPTR, \INPTR, #1
  2141. vld1.8 {d0[0]}, [\INPTR]
  2142. 2:
  2143. tst \WIDTH, #2
  2144. beq 2f
  2145. vext.8 d0, d0, d0, #6
  2146. sub \INPTR, \INPTR, #1
  2147. vld1.8 {d0[1]}, [\INPTR]
  2148. sub \INPTR, \INPTR, #1
  2149. vld1.8 {d0[0]}, [\INPTR]
  2150. 2:
  2151. tst \WIDTH, #4
  2152. beq 2f
  2153. vrev64.32 d0, d0
  2154. sub \INPTR, \INPTR, #1
  2155. vld1.8 {d0[3]}, [\INPTR]
  2156. sub \INPTR, \INPTR, #1
  2157. vld1.8 {d0[2]}, [\INPTR]
  2158. sub \INPTR, \INPTR, #1
  2159. vld1.8 {d0[1]}, [\INPTR]
  2160. sub \INPTR, \INPTR, #1
  2161. vld1.8 {d0[0]}, [\INPTR]
  2162. 2:
  2163. tst \WIDTH, #8
  2164. beq 2f
  2165. vmov d1, d0
  2166. sub \INPTR, \INPTR, #8
  2167. vld1.8 {d0}, [\INPTR]
  2168. 2: /* upsample the remaining pixels */
  2169. vmovl.u8 q8, d0
  2170. vext.8 q2, q1, q0, #15
  2171. vmovl.u8 q9, d1
  2172. vaddw.u8 q10, q15, d4
  2173. vaddw.u8 q11, q15, d5
  2174. vmlal.u8 q8, d4, d28
  2175. vmlal.u8 q9, d5, d28
  2176. vmlal.u8 q10, d0, d28
  2177. vmlal.u8 q11, d1, d28
  2178. vrshrn.u16 d10, q8, #2
  2179. vrshrn.u16 d12, q9, #2
  2180. vshrn.u16 d11, q10, #2
  2181. vshrn.u16 d13, q11, #2
  2182. vzip.8 d10, d11
  2183. vzip.8 d12, d13
  2184. /* store the remaining pixels */
  2185. tst \WIDTH, #8
  2186. beq 2f
  2187. vst1.8 {d10, d11}, [\OUTPTR]!
  2188. vmov q5, q6
  2189. 2:
  2190. tst \WIDTH, #4
  2191. beq 2f
  2192. vst1.8 {d10}, [\OUTPTR]!
  2193. vmov d10, d11
  2194. 2:
  2195. tst \WIDTH, #2
  2196. beq 2f
  2197. vst1.8 {d10[0]}, [\OUTPTR]!
  2198. vst1.8 {d10[1]}, [\OUTPTR]!
  2199. vst1.8 {d10[2]}, [\OUTPTR]!
  2200. vst1.8 {d10[3]}, [\OUTPTR]!
  2201. vext.8 d10, d10, d10, #4
  2202. 2:
  2203. tst \WIDTH, #1
  2204. beq 2f
  2205. vst1.8 {d10[0]}, [\OUTPTR]!
  2206. vst1.8 {d10[1]}, [\OUTPTR]!
  2207. 2:
  2208. 9:
  2209. .endm
  2210. asm_function jsimd_h2v1_fancy_upsample_neon
  2211. MAX_V_SAMP_FACTOR .req r0
  2212. DOWNSAMPLED_WIDTH .req r1
  2213. INPUT_DATA .req r2
  2214. OUTPUT_DATA_PTR .req r3
  2215. OUTPUT_DATA .req OUTPUT_DATA_PTR
  2216. OUTPTR .req r4
  2217. INPTR .req r5
  2218. WIDTH .req ip
  2219. TMP .req lr
  2220. push {r4, r5, r6, lr}
  2221. vpush {d8-d15}
  2222. ldr OUTPUT_DATA, [OUTPUT_DATA_PTR]
  2223. cmp MAX_V_SAMP_FACTOR, #0
  2224. ble 99f
  2225. /* initialize constants */
  2226. vmov.u8 d28, #3
  2227. vmov.u16 q15, #1
  2228. 11:
  2229. ldr INPTR, [INPUT_DATA], #4
  2230. ldr OUTPTR, [OUTPUT_DATA], #4
  2231. mov WIDTH, DOWNSAMPLED_WIDTH
  2232. upsample_row OUTPTR, INPTR, WIDTH, TMP
  2233. subs MAX_V_SAMP_FACTOR, MAX_V_SAMP_FACTOR, #1
  2234. bgt 11b
  2235. 99:
  2236. vpop {d8-d15}
  2237. pop {r4, r5, r6, pc}
  2238. .unreq MAX_V_SAMP_FACTOR
  2239. .unreq DOWNSAMPLED_WIDTH
  2240. .unreq INPUT_DATA
  2241. .unreq OUTPUT_DATA_PTR
  2242. .unreq OUTPUT_DATA
  2243. .unreq OUTPTR
  2244. .unreq INPTR
  2245. .unreq WIDTH
  2246. .unreq TMP
  2247. .purgem upsample16
  2248. .purgem upsample32
  2249. .purgem upsample_row
  2250. /*****************************************************************************/
  2251. /*
  2252. * GLOBAL(JOCTET *)
  2253. * jsimd_huff_encode_one_block(working_state *state, JOCTET *buffer,
  2254. * JCOEFPTR block, int last_dc_val,
  2255. * c_derived_tbl *dctbl, c_derived_tbl *actbl)
  2256. *
  2257. */
  2258. .macro emit_byte BUFFER, PUT_BUFFER, PUT_BITS, ZERO, TMP
  2259. sub \PUT_BITS, \PUT_BITS, #0x8
  2260. lsr \TMP, \PUT_BUFFER, \PUT_BITS
  2261. uxtb \TMP, \TMP
  2262. strb \TMP, [\BUFFER, #1]!
  2263. cmp \TMP, #0xff
  2264. /*it eq*/
  2265. strbeq \ZERO, [\BUFFER, #1]!
  2266. .endm
  2267. .macro put_bits PUT_BUFFER, PUT_BITS, CODE, SIZE
  2268. /*lsl \PUT_BUFFER, \PUT_BUFFER, \SIZE*/
  2269. add \PUT_BITS, \SIZE
  2270. /*orr \PUT_BUFFER, \PUT_BUFFER, \CODE*/
  2271. orr \PUT_BUFFER, \CODE, \PUT_BUFFER, lsl \SIZE
  2272. .endm
  2273. .macro checkbuf15 BUFFER, PUT_BUFFER, PUT_BITS, ZERO, TMP
  2274. cmp \PUT_BITS, #0x10
  2275. blt 15f
  2276. eor \ZERO, \ZERO, \ZERO
  2277. emit_byte \BUFFER, \PUT_BUFFER, \PUT_BITS, \ZERO, \TMP
  2278. emit_byte \BUFFER, \PUT_BUFFER, \PUT_BITS, \ZERO, \TMP
  2279. 15:
  2280. .endm
  2281. .balign 16
  2282. jsimd_huff_encode_one_block_neon_consts:
  2283. .byte 0x01
  2284. .byte 0x02
  2285. .byte 0x04
  2286. .byte 0x08
  2287. .byte 0x10
  2288. .byte 0x20
  2289. .byte 0x40
  2290. .byte 0x80
  2291. asm_function jsimd_huff_encode_one_block_neon
  2292. push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
  2293. add r7, sp, #0x1c
  2294. sub r4, sp, #0x40
  2295. bfc r4, #0, #5
  2296. mov sp, r4 /* align sp on 32 bytes */
  2297. vst1.64 {d8, d9, d10, d11}, [r4, :128]!
  2298. vst1.64 {d12, d13, d14, d15}, [r4, :128]
  2299. sub sp, #0x140 /* reserve 320 bytes */
  2300. str r0, [sp, #0x18] /* working state > sp + Ox18 */
  2301. add r4, sp, #0x20 /* r4 = t1 */
  2302. ldr lr, [r7, #0x8] /* lr = dctbl */
  2303. sub r10, r1, #0x1 /* r10=buffer-- */
  2304. ldrsh r1, [r2]
  2305. mov r9, #0x10
  2306. mov r8, #0x1
  2307. adr r5, jsimd_huff_encode_one_block_neon_consts
  2308. /* prepare data */
  2309. vld1.8 {d26}, [r5, :64]
  2310. veor q8, q8, q8
  2311. veor q9, q9, q9
  2312. vdup.16 q14, r9
  2313. vdup.16 q15, r8
  2314. veor q10, q10, q10
  2315. veor q11, q11, q11
  2316. sub r1, r1, r3
  2317. add r9, r2, #0x22
  2318. add r8, r2, #0x18
  2319. add r3, r2, #0x36
  2320. vmov.16 d0[0], r1
  2321. vld1.16 {d2[0]}, [r9, :16]
  2322. vld1.16 {d4[0]}, [r8, :16]
  2323. vld1.16 {d6[0]}, [r3, :16]
  2324. add r1, r2, #0x2
  2325. add r9, r2, #0x30
  2326. add r8, r2, #0x26
  2327. add r3, r2, #0x28
  2328. vld1.16 {d0[1]}, [r1, :16]
  2329. vld1.16 {d2[1]}, [r9, :16]
  2330. vld1.16 {d4[1]}, [r8, :16]
  2331. vld1.16 {d6[1]}, [r3, :16]
  2332. add r1, r2, #0x10
  2333. add r9, r2, #0x40
  2334. add r8, r2, #0x34
  2335. add r3, r2, #0x1a
  2336. vld1.16 {d0[2]}, [r1, :16]
  2337. vld1.16 {d2[2]}, [r9, :16]
  2338. vld1.16 {d4[2]}, [r8, :16]
  2339. vld1.16 {d6[2]}, [r3, :16]
  2340. add r1, r2, #0x20
  2341. add r9, r2, #0x32
  2342. add r8, r2, #0x42
  2343. add r3, r2, #0xc
  2344. vld1.16 {d0[3]}, [r1, :16]
  2345. vld1.16 {d2[3]}, [r9, :16]
  2346. vld1.16 {d4[3]}, [r8, :16]
  2347. vld1.16 {d6[3]}, [r3, :16]
  2348. add r1, r2, #0x12
  2349. add r9, r2, #0x24
  2350. add r8, r2, #0x50
  2351. add r3, r2, #0xe
  2352. vld1.16 {d1[0]}, [r1, :16]
  2353. vld1.16 {d3[0]}, [r9, :16]
  2354. vld1.16 {d5[0]}, [r8, :16]
  2355. vld1.16 {d7[0]}, [r3, :16]
  2356. add r1, r2, #0x4
  2357. add r9, r2, #0x16
  2358. add r8, r2, #0x60
  2359. add r3, r2, #0x1c
  2360. vld1.16 {d1[1]}, [r1, :16]
  2361. vld1.16 {d3[1]}, [r9, :16]
  2362. vld1.16 {d5[1]}, [r8, :16]
  2363. vld1.16 {d7[1]}, [r3, :16]
  2364. add r1, r2, #0x6
  2365. add r9, r2, #0x8
  2366. add r8, r2, #0x52
  2367. add r3, r2, #0x2a
  2368. vld1.16 {d1[2]}, [r1, :16]
  2369. vld1.16 {d3[2]}, [r9, :16]
  2370. vld1.16 {d5[2]}, [r8, :16]
  2371. vld1.16 {d7[2]}, [r3, :16]
  2372. add r1, r2, #0x14
  2373. add r9, r2, #0xa
  2374. add r8, r2, #0x44
  2375. add r3, r2, #0x38
  2376. vld1.16 {d1[3]}, [r1, :16]
  2377. vld1.16 {d3[3]}, [r9, :16]
  2378. vld1.16 {d5[3]}, [r8, :16]
  2379. vld1.16 {d7[3]}, [r3, :16]
  2380. vcgt.s16 q8, q8, q0
  2381. vcgt.s16 q9, q9, q1
  2382. vcgt.s16 q10, q10, q2
  2383. vcgt.s16 q11, q11, q3
  2384. vabs.s16 q0, q0
  2385. vabs.s16 q1, q1
  2386. vabs.s16 q2, q2
  2387. vabs.s16 q3, q3
  2388. veor q8, q8, q0
  2389. veor q9, q9, q1
  2390. veor q10, q10, q2
  2391. veor q11, q11, q3
  2392. add r9, r4, #0x20
  2393. add r8, r4, #0x80
  2394. add r3, r4, #0xa0
  2395. vclz.i16 q0, q0
  2396. vclz.i16 q1, q1
  2397. vclz.i16 q2, q2
  2398. vclz.i16 q3, q3
  2399. vsub.i16 q0, q14, q0
  2400. vsub.i16 q1, q14, q1
  2401. vsub.i16 q2, q14, q2
  2402. vsub.i16 q3, q14, q3
  2403. vst1.16 {d0, d1, d2, d3}, [r4, :256]
  2404. vst1.16 {d4, d5, d6, d7}, [r9, :256]
  2405. vshl.s16 q0, q15, q0
  2406. vshl.s16 q1, q15, q1
  2407. vshl.s16 q2, q15, q2
  2408. vshl.s16 q3, q15, q3
  2409. vsub.i16 q0, q0, q15
  2410. vsub.i16 q1, q1, q15
  2411. vsub.i16 q2, q2, q15
  2412. vsub.i16 q3, q3, q15
  2413. vand q8, q8, q0
  2414. vand q9, q9, q1
  2415. vand q10, q10, q2
  2416. vand q11, q11, q3
  2417. vst1.16 {d16, d17, d18, d19}, [r8, :256]
  2418. vst1.16 {d20, d21, d22, d23}, [r3, :256]
  2419. add r1, r2, #0x46
  2420. add r9, r2, #0x3a
  2421. add r8, r2, #0x74
  2422. add r3, r2, #0x6a
  2423. vld1.16 {d8[0]}, [r1, :16]
  2424. vld1.16 {d10[0]}, [r9, :16]
  2425. vld1.16 {d12[0]}, [r8, :16]
  2426. vld1.16 {d14[0]}, [r3, :16]
  2427. veor q8, q8, q8
  2428. veor q9, q9, q9
  2429. veor q10, q10, q10
  2430. veor q11, q11, q11
  2431. add r1, r2, #0x54
  2432. add r9, r2, #0x2c
  2433. add r8, r2, #0x76
  2434. add r3, r2, #0x78
  2435. vld1.16 {d8[1]}, [r1, :16]
  2436. vld1.16 {d10[1]}, [r9, :16]
  2437. vld1.16 {d12[1]}, [r8, :16]
  2438. vld1.16 {d14[1]}, [r3, :16]
  2439. add r1, r2, #0x62
  2440. add r9, r2, #0x1e
  2441. add r8, r2, #0x68
  2442. add r3, r2, #0x7a
  2443. vld1.16 {d8[2]}, [r1, :16]
  2444. vld1.16 {d10[2]}, [r9, :16]
  2445. vld1.16 {d12[2]}, [r8, :16]
  2446. vld1.16 {d14[2]}, [r3, :16]
  2447. add r1, r2, #0x70
  2448. add r9, r2, #0x2e
  2449. add r8, r2, #0x5a
  2450. add r3, r2, #0x6c
  2451. vld1.16 {d8[3]}, [r1, :16]
  2452. vld1.16 {d10[3]}, [r9, :16]
  2453. vld1.16 {d12[3]}, [r8, :16]
  2454. vld1.16 {d14[3]}, [r3, :16]
  2455. add r1, r2, #0x72
  2456. add r9, r2, #0x3c
  2457. add r8, r2, #0x4c
  2458. add r3, r2, #0x5e
  2459. vld1.16 {d9[0]}, [r1, :16]
  2460. vld1.16 {d11[0]}, [r9, :16]
  2461. vld1.16 {d13[0]}, [r8, :16]
  2462. vld1.16 {d15[0]}, [r3, :16]
  2463. add r1, r2, #0x64
  2464. add r9, r2, #0x4a
  2465. add r8, r2, #0x3e
  2466. add r3, r2, #0x6e
  2467. vld1.16 {d9[1]}, [r1, :16]
  2468. vld1.16 {d11[1]}, [r9, :16]
  2469. vld1.16 {d13[1]}, [r8, :16]
  2470. vld1.16 {d15[1]}, [r3, :16]
  2471. add r1, r2, #0x56
  2472. add r9, r2, #0x58
  2473. add r8, r2, #0x4e
  2474. add r3, r2, #0x7c
  2475. vld1.16 {d9[2]}, [r1, :16]
  2476. vld1.16 {d11[2]}, [r9, :16]
  2477. vld1.16 {d13[2]}, [r8, :16]
  2478. vld1.16 {d15[2]}, [r3, :16]
  2479. add r1, r2, #0x48
  2480. add r9, r2, #0x66
  2481. add r8, r2, #0x5c
  2482. add r3, r2, #0x7e
  2483. vld1.16 {d9[3]}, [r1, :16]
  2484. vld1.16 {d11[3]}, [r9, :16]
  2485. vld1.16 {d13[3]}, [r8, :16]
  2486. vld1.16 {d15[3]}, [r3, :16]
  2487. vcgt.s16 q8, q8, q4
  2488. vcgt.s16 q9, q9, q5
  2489. vcgt.s16 q10, q10, q6
  2490. vcgt.s16 q11, q11, q7
  2491. vabs.s16 q4, q4
  2492. vabs.s16 q5, q5
  2493. vabs.s16 q6, q6
  2494. vabs.s16 q7, q7
  2495. veor q8, q8, q4
  2496. veor q9, q9, q5
  2497. veor q10, q10, q6
  2498. veor q11, q11, q7
  2499. add r1, r4, #0x40
  2500. add r9, r4, #0x60
  2501. add r8, r4, #0xc0
  2502. add r3, r4, #0xe0
  2503. vclz.i16 q4, q4
  2504. vclz.i16 q5, q5
  2505. vclz.i16 q6, q6
  2506. vclz.i16 q7, q7
  2507. vsub.i16 q4, q14, q4
  2508. vsub.i16 q5, q14, q5
  2509. vsub.i16 q6, q14, q6
  2510. vsub.i16 q7, q14, q7
  2511. vst1.16 {d8, d9, d10, d11}, [r1, :256]
  2512. vst1.16 {d12, d13, d14, d15}, [r9, :256]
  2513. vshl.s16 q4, q15, q4
  2514. vshl.s16 q5, q15, q5
  2515. vshl.s16 q6, q15, q6
  2516. vshl.s16 q7, q15, q7
  2517. vsub.i16 q4, q4, q15
  2518. vsub.i16 q5, q5, q15
  2519. vsub.i16 q6, q6, q15
  2520. vsub.i16 q7, q7, q15
  2521. vand q8, q8, q4
  2522. vand q9, q9, q5
  2523. vand q10, q10, q6
  2524. vand q11, q11, q7
  2525. vst1.16 {d16, d17, d18, d19}, [r8, :256]
  2526. vst1.16 {d20, d21, d22, d23}, [r3, :256]
  2527. ldr r12, [r7, #0xc] /* r12 = actbl */
  2528. add r1, lr, #0x400 /* r1 = dctbl->ehufsi */
  2529. mov r9, r12 /* r9 = actbl */
  2530. add r6, r4, #0x80 /* r6 = t2 */
  2531. ldr r11, [r0, #0x8] /* r11 = put_buffer */
  2532. ldr r4, [r0, #0xc] /* r4 = put_bits */
  2533. ldrh r2, [r6, #-128] /* r2 = nbits */
  2534. ldrh r3, [r6] /* r3 = temp2 & (((JLONG)1)<<nbits) - 1; */
  2535. ldr r0, [lr, r2, lsl #2]
  2536. ldrb r5, [r1, r2]
  2537. put_bits r11, r4, r0, r5
  2538. checkbuf15 r10, r11, r4, r5, r0
  2539. put_bits r11, r4, r3, r2
  2540. checkbuf15 r10, r11, r4, r5, r0
  2541. mov lr, r6 /* lr = t2 */
  2542. add r5, r9, #0x400 /* r5 = actbl->ehufsi */
  2543. ldrsb r6, [r5, #0xf0] /* r6 = actbl->ehufsi[0xf0] */
  2544. veor q8, q8, q8
  2545. vceq.i16 q0, q0, q8
  2546. vceq.i16 q1, q1, q8
  2547. vceq.i16 q2, q2, q8
  2548. vceq.i16 q3, q3, q8
  2549. vceq.i16 q4, q4, q8
  2550. vceq.i16 q5, q5, q8
  2551. vceq.i16 q6, q6, q8
  2552. vceq.i16 q7, q7, q8
  2553. vmovn.i16 d0, q0
  2554. vmovn.i16 d2, q1
  2555. vmovn.i16 d4, q2
  2556. vmovn.i16 d6, q3
  2557. vmovn.i16 d8, q4
  2558. vmovn.i16 d10, q5
  2559. vmovn.i16 d12, q6
  2560. vmovn.i16 d14, q7
  2561. vand d0, d0, d26
  2562. vand d2, d2, d26
  2563. vand d4, d4, d26
  2564. vand d6, d6, d26
  2565. vand d8, d8, d26
  2566. vand d10, d10, d26
  2567. vand d12, d12, d26
  2568. vand d14, d14, d26
  2569. vpadd.i8 d0, d0, d2
  2570. vpadd.i8 d4, d4, d6
  2571. vpadd.i8 d8, d8, d10
  2572. vpadd.i8 d12, d12, d14
  2573. vpadd.i8 d0, d0, d4
  2574. vpadd.i8 d8, d8, d12
  2575. vpadd.i8 d0, d0, d8
  2576. vmov.32 r1, d0[1]
  2577. vmov.32 r8, d0[0]
  2578. mvn r1, r1
  2579. mvn r8, r8
  2580. lsrs r1, r1, #0x1
  2581. rrx r8, r8 /* shift in last r1 bit while shifting out DC bit */
  2582. rbit r1, r1 /* r1 = index1 */
  2583. rbit r8, r8 /* r8 = index0 */
  2584. ldr r0, [r9, #0x3c0] /* r0 = actbl->ehufco[0xf0] */
  2585. str r1, [sp, #0x14] /* index1 > sp + 0x14 */
  2586. cmp r8, #0x0
  2587. beq 6f
  2588. 1:
  2589. clz r2, r8
  2590. add lr, lr, r2, lsl #1
  2591. lsl r8, r8, r2
  2592. ldrh r1, [lr, #-126]
  2593. 2:
  2594. cmp r2, #0x10
  2595. blt 3f
  2596. sub r2, r2, #0x10
  2597. put_bits r11, r4, r0, r6
  2598. cmp r4, #0x10
  2599. blt 2b
  2600. eor r3, r3, r3
  2601. emit_byte r10, r11, r4, r3, r12
  2602. emit_byte r10, r11, r4, r3, r12
  2603. b 2b
  2604. 3:
  2605. add r2, r1, r2, lsl #4
  2606. ldrh r3, [lr, #2]!
  2607. ldr r12, [r9, r2, lsl #2]
  2608. ldrb r2, [r5, r2]
  2609. put_bits r11, r4, r12, r2
  2610. checkbuf15 r10, r11, r4, r2, r12
  2611. put_bits r11, r4, r3, r1
  2612. checkbuf15 r10, r11, r4, r2, r12
  2613. lsls r8, r8, #0x1
  2614. bne 1b
  2615. 6:
  2616. add r12, sp, #0x20 /* r12 = t1 */
  2617. ldr r8, [sp, #0x14] /* r8 = index1 */
  2618. adds r12, #0xc0 /* r12 = t2 + (DCTSIZE2/2) */
  2619. cmp r8, #0x0
  2620. beq 6f
  2621. clz r2, r8
  2622. sub r12, r12, lr
  2623. lsl r8, r8, r2
  2624. add r2, r2, r12, lsr #1
  2625. add lr, lr, r2, lsl #1
  2626. b 7f
  2627. 1:
  2628. clz r2, r8
  2629. add lr, lr, r2, lsl #1
  2630. lsl r8, r8, r2
  2631. 7:
  2632. ldrh r1, [lr, #-126]
  2633. 2:
  2634. cmp r2, #0x10
  2635. blt 3f
  2636. sub r2, r2, #0x10
  2637. put_bits r11, r4, r0, r6
  2638. cmp r4, #0x10
  2639. blt 2b
  2640. eor r3, r3, r3
  2641. emit_byte r10, r11, r4, r3, r12
  2642. emit_byte r10, r11, r4, r3, r12
  2643. b 2b
  2644. 3:
  2645. add r2, r1, r2, lsl #4
  2646. ldrh r3, [lr, #2]!
  2647. ldr r12, [r9, r2, lsl #2]
  2648. ldrb r2, [r5, r2]
  2649. put_bits r11, r4, r12, r2
  2650. checkbuf15 r10, r11, r4, r2, r12
  2651. put_bits r11, r4, r3, r1
  2652. checkbuf15 r10, r11, r4, r2, r12
  2653. lsls r8, r8, #0x1
  2654. bne 1b
  2655. 6:
  2656. add r0, sp, #0x20
  2657. add r0, #0xfe
  2658. cmp lr, r0
  2659. bhs 1f
  2660. ldr r1, [r9]
  2661. ldrb r0, [r5]
  2662. put_bits r11, r4, r1, r0
  2663. checkbuf15 r10, r11, r4, r0, r1
  2664. 1:
  2665. ldr r12, [sp, #0x18]
  2666. str r11, [r12, #0x8]
  2667. str r4, [r12, #0xc]
  2668. add r0, r10, #0x1
  2669. add r4, sp, #0x140
  2670. vld1.64 {d8, d9, d10, d11}, [r4, :128]!
  2671. vld1.64 {d12, d13, d14, d15}, [r4, :128]
  2672. sub r4, r7, #0x1c
  2673. mov sp, r4
  2674. pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
  2675. .purgem emit_byte
  2676. .purgem put_bits
  2677. .purgem checkbuf15