SIMD AlphaComposite. avx2 implementation

This commit is contained in:
homm 2016-06-12 20:41:53 +03:00 committed by Alexander Karpinsky
parent c7ce11f0f5
commit e716f2125c

View File

@ -38,6 +38,17 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) {
-1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0); -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0);
__m128i mm_get_hi = _mm_set_epi8( __m128i mm_get_hi = _mm_set_epi8(
-1,-1, 13,12, 13,12, 13,12, -1,-1, 9,8, 9,8, 9,8); -1,-1, 13,12, 13,12, 13,12, -1,-1, 9,8, 9,8, 9,8);
#if defined(__AVX2__)
__m256i vmm_max_alpha = _mm256_set1_epi32(255);
__m256i vmm_max_alpha2 = _mm256_set1_epi32(255 * 255);
__m256i vmm_get_lo = _mm256_set_epi8(
-1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0,
-1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0);
__m256i vmm_get_hi = _mm256_set_epi8(
-1,-1, 13,12, 13,12, 13,12, -1,-1, 9,8, 9,8, 9,8,
-1,-1, 13,12, 13,12, 13,12, -1,-1, 9,8, 9,8, 9,8);
#endif
/* Check arguments */ /* Check arguments */
if (!imDst || !imSrc || strcmp(imDst->mode, "RGBA") || if (!imDst || !imSrc || strcmp(imDst->mode, "RGBA") ||
@ -56,9 +67,6 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) {
return NULL; return NULL;
} }
#define MM_SHIFTDIV255_epi16(src)\
_mm_srli_epi16(_mm_add_epi16(src, _mm_srli_epi16(src, 8)), 8)
for (y = 0; y < imDst->ysize; y++) { for (y = 0; y < imDst->ysize; y++) {
rgba8 *dst = (rgba8 *)imDst->image[y]; rgba8 *dst = (rgba8 *)imDst->image[y];
rgba8 *src = (rgba8 *)imSrc->image[y]; rgba8 *src = (rgba8 *)imSrc->image[y];
@ -66,7 +74,59 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) {
x = 0; x = 0;
for (; x < xsize - 4; x += 4) { #if defined(__AVX2__)
#define MM_SHIFTDIV255_epi16(src)\
_mm256_srli_epi16(_mm256_add_epi16(src, _mm256_srli_epi16(src, 8)), 8)
for (; x < xsize - 7; x += 8) {
__m256i mm_dst = _mm256_loadu_si256((__m256i *) &dst[x]);
__m256i mm_dst_lo = _mm256_unpacklo_epi8(mm_dst, _mm256_setzero_si256());
__m256i mm_dst_hi = _mm256_unpackhi_epi8(mm_dst, _mm256_setzero_si256());
__m256i mm_src = _mm256_loadu_si256((__m256i *) &src[x]);
__m256i mm_src_lo = _mm256_unpacklo_epi8(mm_src, _mm256_setzero_si256());
__m256i mm_src_hi = _mm256_unpackhi_epi8(mm_src, _mm256_setzero_si256());
__m256i mm_dst_a = _mm256_srli_epi32(mm_dst, 24);
__m256i mm_src_a = _mm256_srli_epi32(mm_src, 24);
// Compute coefficients
// blend = dst->a * (255 - src->a); 16 bits
__m256i mm_blend = _mm256_mullo_epi32(mm_dst_a, _mm256_sub_epi32(vmm_max_alpha, mm_src_a));
// outa = src->a * 255 + dst->a * (255 - src->a); 16 bits
__m256i mm_outa = _mm256_add_epi32(_mm256_mullo_epi32(mm_src_a, vmm_max_alpha), mm_blend);
__m256i mm_coef1 = _mm256_mullo_epi32(mm_src_a, vmm_max_alpha2);
// 8 bits
mm_coef1 = _mm256_cvtps_epi32(_mm256_div_ps(_mm256_cvtepi32_ps(mm_coef1), _mm256_cvtepi32_ps(mm_outa)));
// 8 bits
__m256i mm_coef2 = _mm256_sub_epi32(vmm_max_alpha, mm_coef1);
__m256i mm_out_lo = _mm256_add_epi16(
_mm256_mullo_epi16(mm_src_lo, _mm256_shuffle_epi8(mm_coef1, vmm_get_lo)),
_mm256_mullo_epi16(mm_dst_lo, _mm256_shuffle_epi8(mm_coef2, vmm_get_lo)));
mm_out_lo = _mm256_or_si256(mm_out_lo, _mm256_slli_epi64(
_mm256_unpacklo_epi32(mm_outa, _mm256_setzero_si256()), 48));
mm_out_lo = MM_SHIFTDIV255_epi16(mm_out_lo);
__m256i mm_out_hi = _mm256_add_epi16(
_mm256_mullo_epi16(mm_src_hi, _mm256_shuffle_epi8(mm_coef1, vmm_get_hi)),
_mm256_mullo_epi16(mm_dst_hi, _mm256_shuffle_epi8(mm_coef2, vmm_get_hi)));
mm_out_hi = _mm256_or_si256(mm_out_hi, _mm256_slli_epi64(
_mm256_unpackhi_epi32(mm_outa, _mm256_setzero_si256()), 48));
mm_out_hi = MM_SHIFTDIV255_epi16(mm_out_hi);
_mm256_storeu_si256((__m256i *) &out[x],
_mm256_packus_epi16(mm_out_lo, mm_out_hi));
}
#undef MM_SHIFTDIV255_epi16
#endif
#define MM_SHIFTDIV255_epi16(src)\
_mm_srli_epi16(_mm_add_epi16(src, _mm_srli_epi16(src, 8)), 8)
for (; x < xsize - 3; x += 4) {
__m128i mm_dst = _mm_loadu_si128((__m128i *) &dst[x]); __m128i mm_dst = _mm_loadu_si128((__m128i *) &dst[x]);
__m128i mm_dst_lo = _mm_unpacklo_epi8(mm_dst, _mm_setzero_si128()); __m128i mm_dst_lo = _mm_unpacklo_epi8(mm_dst, _mm_setzero_si128());
__m128i mm_dst_hi = _mm_unpackhi_epi8(mm_dst, _mm_setzero_si128()); __m128i mm_dst_hi = _mm_unpackhi_epi8(mm_dst, _mm_setzero_si128());
@ -106,6 +166,8 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) {
_mm_packus_epi16(mm_out_lo, mm_out_hi)); _mm_packus_epi16(mm_out_lo, mm_out_hi));
} }
#undef MM_SHIFTDIV255_epi16
for (; x < xsize; x += 1) { for (; x < xsize; x += 1) {
if (src[x].a == 0) { if (src[x].a == 0) {
// Copy 4 bytes at once. // Copy 4 bytes at once.