jquantf-sse2.asm 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. ;
  2. ; jquantf.asm - sample data conversion and quantization (64-bit SSE & SSE2)
  3. ;
  4. ; Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
  5. ; Copyright (C) 2009, 2016, D. R. Commander.
  6. ;
  7. ; Based on the x86 SIMD extension for IJG JPEG library
  8. ; Copyright (C) 1999-2006, MIYASAKA Masaru.
  9. ; For conditions of distribution and use, see copyright notice in jsimdext.inc
  10. ;
  11. ; This file should be assembled with NASM (Netwide Assembler),
  12. ; can *not* be assembled with Microsoft's MASM or any compatible
  13. ; assembler (including Borland's Turbo Assembler).
  14. ; NASM is available from http://nasm.sourceforge.net/ or
  15. ; http://sourceforge.net/project/showfiles.php?group_id=6208
  16. %include "jsimdext.inc"
  17. %include "jdct.inc"
  18. ; --------------------------------------------------------------------------
  19. SECTION SEG_TEXT
  20. BITS 64
  21. ;
  22. ; Load data into workspace, applying unsigned->signed conversion
  23. ;
  24. ; GLOBAL(void)
  25. ; jsimd_convsamp_float_sse2(JSAMPARRAY sample_data, JDIMENSION start_col,
  26. ; FAST_FLOAT *workspace);
  27. ;
  28. ; r10 = JSAMPARRAY sample_data
  29. ; r11d = JDIMENSION start_col
  30. ; r12 = FAST_FLOAT *workspace
  31. align 32
  32. GLOBAL_FUNCTION(jsimd_convsamp_float_sse2)
  33. EXTN(jsimd_convsamp_float_sse2):
  34. push rbp
  35. mov rax, rsp
  36. mov rbp, rsp
  37. collect_args 3
  38. push rbx
  39. pcmpeqw xmm7, xmm7
  40. psllw xmm7, 7
  41. packsswb xmm7, xmm7 ; xmm7 = PB_CENTERJSAMPLE (0x808080..)
  42. mov rsi, r10
  43. mov eax, r11d
  44. mov rdi, r12
  45. mov rcx, DCTSIZE/2
  46. .convloop:
  47. mov rbx, JSAMPROW [rsi+0*SIZEOF_JSAMPROW] ; (JSAMPLE *)
  48. mov rdx, JSAMPROW [rsi+1*SIZEOF_JSAMPROW] ; (JSAMPLE *)
  49. movq xmm0, XMM_MMWORD [rbx+rax*SIZEOF_JSAMPLE]
  50. movq xmm1, XMM_MMWORD [rdx+rax*SIZEOF_JSAMPLE]
  51. psubb xmm0, xmm7 ; xmm0=(01234567)
  52. psubb xmm1, xmm7 ; xmm1=(89ABCDEF)
  53. punpcklbw xmm0, xmm0 ; xmm0=(*0*1*2*3*4*5*6*7)
  54. punpcklbw xmm1, xmm1 ; xmm1=(*8*9*A*B*C*D*E*F)
  55. punpcklwd xmm2, xmm0 ; xmm2=(***0***1***2***3)
  56. punpckhwd xmm0, xmm0 ; xmm0=(***4***5***6***7)
  57. punpcklwd xmm3, xmm1 ; xmm3=(***8***9***A***B)
  58. punpckhwd xmm1, xmm1 ; xmm1=(***C***D***E***F)
  59. psrad xmm2, (DWORD_BIT-BYTE_BIT) ; xmm2=(0123)
  60. psrad xmm0, (DWORD_BIT-BYTE_BIT) ; xmm0=(4567)
  61. cvtdq2ps xmm2, xmm2 ; xmm2=(0123)
  62. cvtdq2ps xmm0, xmm0 ; xmm0=(4567)
  63. psrad xmm3, (DWORD_BIT-BYTE_BIT) ; xmm3=(89AB)
  64. psrad xmm1, (DWORD_BIT-BYTE_BIT) ; xmm1=(CDEF)
  65. cvtdq2ps xmm3, xmm3 ; xmm3=(89AB)
  66. cvtdq2ps xmm1, xmm1 ; xmm1=(CDEF)
  67. movaps XMMWORD [XMMBLOCK(0,0,rdi,SIZEOF_FAST_FLOAT)], xmm2
  68. movaps XMMWORD [XMMBLOCK(0,1,rdi,SIZEOF_FAST_FLOAT)], xmm0
  69. movaps XMMWORD [XMMBLOCK(1,0,rdi,SIZEOF_FAST_FLOAT)], xmm3
  70. movaps XMMWORD [XMMBLOCK(1,1,rdi,SIZEOF_FAST_FLOAT)], xmm1
  71. add rsi, byte 2*SIZEOF_JSAMPROW
  72. add rdi, byte 2*DCTSIZE*SIZEOF_FAST_FLOAT
  73. dec rcx
  74. jnz short .convloop
  75. pop rbx
  76. uncollect_args 3
  77. pop rbp
  78. ret
  79. ; --------------------------------------------------------------------------
  80. ;
  81. ; Quantize/descale the coefficients, and store into coef_block
  82. ;
  83. ; GLOBAL(void)
  84. ; jsimd_quantize_float_sse2(JCOEFPTR coef_block, FAST_FLOAT *divisors,
  85. ; FAST_FLOAT *workspace);
  86. ;
  87. ; r10 = JCOEFPTR coef_block
  88. ; r11 = FAST_FLOAT *divisors
  89. ; r12 = FAST_FLOAT *workspace
  90. align 32
  91. GLOBAL_FUNCTION(jsimd_quantize_float_sse2)
  92. EXTN(jsimd_quantize_float_sse2):
  93. push rbp
  94. mov rax, rsp
  95. mov rbp, rsp
  96. collect_args 3
  97. mov rsi, r12
  98. mov rdx, r11
  99. mov rdi, r10
  100. mov rax, DCTSIZE2/16
  101. .quantloop:
  102. movaps xmm0, XMMWORD [XMMBLOCK(0,0,rsi,SIZEOF_FAST_FLOAT)]
  103. movaps xmm1, XMMWORD [XMMBLOCK(0,1,rsi,SIZEOF_FAST_FLOAT)]
  104. mulps xmm0, XMMWORD [XMMBLOCK(0,0,rdx,SIZEOF_FAST_FLOAT)]
  105. mulps xmm1, XMMWORD [XMMBLOCK(0,1,rdx,SIZEOF_FAST_FLOAT)]
  106. movaps xmm2, XMMWORD [XMMBLOCK(1,0,rsi,SIZEOF_FAST_FLOAT)]
  107. movaps xmm3, XMMWORD [XMMBLOCK(1,1,rsi,SIZEOF_FAST_FLOAT)]
  108. mulps xmm2, XMMWORD [XMMBLOCK(1,0,rdx,SIZEOF_FAST_FLOAT)]
  109. mulps xmm3, XMMWORD [XMMBLOCK(1,1,rdx,SIZEOF_FAST_FLOAT)]
  110. cvtps2dq xmm0, xmm0
  111. cvtps2dq xmm1, xmm1
  112. cvtps2dq xmm2, xmm2
  113. cvtps2dq xmm3, xmm3
  114. packssdw xmm0, xmm1
  115. packssdw xmm2, xmm3
  116. movdqa XMMWORD [XMMBLOCK(0,0,rdi,SIZEOF_JCOEF)], xmm0
  117. movdqa XMMWORD [XMMBLOCK(1,0,rdi,SIZEOF_JCOEF)], xmm2
  118. add rsi, byte 16*SIZEOF_FAST_FLOAT
  119. add rdx, byte 16*SIZEOF_FAST_FLOAT
  120. add rdi, byte 16*SIZEOF_JCOEF
  121. dec rax
  122. jnz short .quantloop
  123. uncollect_args 3
  124. pop rbp
  125. ret
  126. ; For some reason, the OS X linker does not honor the request to align the
  127. ; segment unless we do this.
  128. align 32