jquanti-mmi.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. /*
  2. * Loongson MMI optimizations for libjpeg-turbo
  3. *
  4. * Copyright (C) 2016-2017, Loongson Technology Corporation Limited, BeiJing.
  5. * All Rights Reserved.
  6. * Authors: ZhuChen <zhuchen@loongson.cn>
  7. * CaiWanwei <caiwanwei@loongson.cn>
  8. * SunZhangzhi <sunzhangzhi-cq@loongson.cn>
  9. * Copyright (C) 2018, D. R. Commander. All Rights Reserved.
  10. *
  11. * Based on the x86 SIMD extension for IJG JPEG library
  12. * Copyright (C) 1999-2006, MIYASAKA Masaru.
  13. *
  14. * This software is provided 'as-is', without any express or implied
  15. * warranty. In no event will the authors be held liable for any damages
  16. * arising from the use of this software.
  17. *
  18. * Permission is granted to anyone to use this software for any purpose,
  19. * including commercial applications, and to alter it and redistribute it
  20. * freely, subject to the following restrictions:
  21. *
  22. * 1. The origin of this software must not be misrepresented; you must not
  23. * claim that you wrote the original software. If you use this software
  24. * in a product, an acknowledgment in the product documentation would be
  25. * appreciated but is not required.
  26. * 2. Altered source versions must be plainly marked as such, and must not be
  27. * misrepresented as being the original software.
  28. * 3. This notice may not be removed or altered from any source distribution.
  29. */
  30. /* INTEGER QUANTIZATION AND SAMPLE CONVERSION */
  31. #include "jsimd_mmi.h"
  32. #define DO_QUANT() { \
  33. mm2 = _mm_load_si64((__m64 *)&workspace[0]); \
  34. mm3 = _mm_load_si64((__m64 *)&workspace[4]); \
  35. \
  36. mm0 = mm2; \
  37. mm1 = mm3; \
  38. \
  39. mm2 = _mm_srai_pi16(mm2, (WORD_BIT - 1)); /* -1 if value < 0, */ \
  40. /* 0 otherwise */ \
  41. mm3 = _mm_srai_pi16(mm3, (WORD_BIT - 1)); \
  42. \
  43. mm0 = _mm_xor_si64(mm0, mm2); /* val = -val */ \
  44. mm1 = _mm_xor_si64(mm1, mm3); \
  45. mm0 = _mm_sub_pi16(mm0, mm2); \
  46. mm1 = _mm_sub_pi16(mm1, mm3); \
  47. \
  48. corr0 = _mm_load_si64((__m64 *)&divisors[DCTSIZE2 * 1]); /* correction */ \
  49. corr1 = _mm_load_si64((__m64 *)&divisors[DCTSIZE2 * 1 + 4]); \
  50. \
  51. mm0 = _mm_add_pi16(mm0, corr0); /* correction + roundfactor */ \
  52. mm1 = _mm_add_pi16(mm1, corr1); \
  53. \
  54. mm4 = mm0; \
  55. mm5 = mm1; \
  56. \
  57. recip0 = _mm_load_si64((__m64 *)&divisors[DCTSIZE2 * 0]); /* reciprocal */ \
  58. recip1 = _mm_load_si64((__m64 *)&divisors[DCTSIZE2 * 0 + 4]); \
  59. \
  60. mm0 = _mm_mulhi_pi16(mm0, recip0); \
  61. mm1 = _mm_mulhi_pi16(mm1, recip1); \
  62. \
  63. mm0 = _mm_add_pi16(mm0, mm4); /* reciprocal is always negative */ \
  64. mm1 = _mm_add_pi16(mm1, mm5); /* (MSB=1), so we always need to add the */ \
  65. /* initial value (input value is never */ \
  66. /* negative as we inverted it at the */ \
  67. /* start of this routine) */ \
  68. \
  69. scale0 = _mm_load_si64((__m64 *)&divisors[DCTSIZE2 * 2]); /* scale */ \
  70. scale1 = _mm_load_si64((__m64 *)&divisors[DCTSIZE2 * 2 + 4]); \
  71. \
  72. mm6 = scale0; \
  73. mm7 = scale1; \
  74. mm4 = mm0; \
  75. mm5 = mm1; \
  76. \
  77. mm0 = _mm_mulhi_pi16(mm0, mm6); \
  78. mm1 = _mm_mulhi_pi16(mm1, mm7); \
  79. \
  80. mm6 = _mm_srai_pi16(mm6, (WORD_BIT - 1)); /* determine if scale... */ \
  81. /* is negative */ \
  82. mm7 = _mm_srai_pi16(mm7, (WORD_BIT - 1)); \
  83. \
  84. mm6 = _mm_and_si64(mm6, mm4); /* and add input if it is */ \
  85. mm7 = _mm_and_si64(mm7, mm5); \
  86. mm0 = _mm_add_pi16(mm0, mm6); \
  87. mm1 = _mm_add_pi16(mm1, mm7); \
  88. \
  89. mm4 = _mm_srai_pi16(mm4, (WORD_BIT - 1)); /* then check if... */ \
  90. mm5 = _mm_srai_pi16(mm5, (WORD_BIT - 1)); /* negative input */ \
  91. \
  92. mm4 = _mm_and_si64(mm4, scale0); /* and add scale if it is */ \
  93. mm5 = _mm_and_si64(mm5, scale1); \
  94. mm0 = _mm_add_pi16(mm0, mm4); \
  95. mm1 = _mm_add_pi16(mm1, mm5); \
  96. \
  97. mm0 = _mm_xor_si64(mm0, mm2); /* val = -val */ \
  98. mm1 = _mm_xor_si64(mm1, mm3); \
  99. mm0 = _mm_sub_pi16(mm0, mm2); \
  100. mm1 = _mm_sub_pi16(mm1, mm3); \
  101. \
  102. _mm_store_si64((__m64 *)&output_ptr[0], mm0); \
  103. _mm_store_si64((__m64 *)&output_ptr[4], mm1); \
  104. \
  105. workspace += DCTSIZE; \
  106. divisors += DCTSIZE; \
  107. output_ptr += DCTSIZE; \
  108. }
  109. void jsimd_quantize_mmi(JCOEFPTR coef_block, DCTELEM *divisors,
  110. DCTELEM *workspace)
  111. {
  112. JCOEFPTR output_ptr = coef_block;
  113. __m64 mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7;
  114. __m64 corr0, corr1, recip0, recip1, scale0, scale1;
  115. DO_QUANT()
  116. DO_QUANT()
  117. DO_QUANT()
  118. DO_QUANT()
  119. DO_QUANT()
  120. DO_QUANT()
  121. DO_QUANT()
  122. DO_QUANT()
  123. }