From 51f5d86bf4dd9e39eceb80cefb06f4f61e0ceb2f Mon Sep 17 00:00:00 2001 From: homm Date: Sun, 12 Jun 2016 19:40:56 +0300 Subject: [PATCH 1/8] sse4 implementation --- src/libImaging/AlphaComposite.c | 88 +++++++++++++++++++++++++++------ 1 file changed, 73 insertions(+), 15 deletions(-) diff --git a/src/libImaging/AlphaComposite.c b/src/libImaging/AlphaComposite.c index a074334aa..aff38e759 100644 --- a/src/libImaging/AlphaComposite.c +++ b/src/libImaging/AlphaComposite.c @@ -11,6 +11,14 @@ #include "Imaging.h" +#include +#include +#include +#if defined(__AVX2__) + #include +#endif + + #define PRECISION_BITS 7 typedef struct @@ -28,6 +36,13 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) { Imaging imOut; int x, y; + int xsize = imDst->xsize; + __m128i mm_max_alpha = _mm_set1_epi32(255); + __m128i mm_max_alpha2 = _mm_set1_epi32(255 * 255); + __m128i mm_get_lo = _mm_set_epi8( + -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0); + __m128i mm_get_hi = _mm_set_epi8( + -1,-1, 13,12, 13,12, 13,12, -1,-1, 9,8, 9,8, 9,8); /* Check arguments */ if (!imDst || !imSrc || @@ -47,38 +62,81 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) if (!imOut) return NULL; + #define MM_SHIFTDIV255_epi16(src)\ + _mm_srli_epi16(_mm_add_epi16(src, _mm_srli_epi16(src, 8)), 8) + for (y = 0; y < imDst->ysize; y++) { rgba8* dst = (rgba8*) imDst->image[y]; rgba8* src = (rgba8*) imSrc->image[y]; rgba8* out = (rgba8*) imOut->image[y]; - for (x = 0; x < imDst->xsize; x ++) { - if (src->a == 0) { + x = 0; + + for (; x < xsize - 4; x += 4) { + __m128i mm_dst = _mm_loadu_si128((__m128i *) &dst[x]); + __m128i mm_dst_lo = _mm_unpacklo_epi8(mm_dst, _mm_setzero_si128()); + __m128i mm_dst_hi = _mm_unpackhi_epi8(mm_dst, _mm_setzero_si128()); + __m128i mm_src = _mm_loadu_si128((__m128i *) &src[x]); + __m128i mm_src_lo = _mm_unpacklo_epi8(mm_src, _mm_setzero_si128()); + __m128i mm_src_hi = _mm_unpackhi_epi8(mm_src, _mm_setzero_si128()); + + __m128i mm_dst_a = _mm_srli_epi32(mm_dst, 24); + __m128i mm_src_a = _mm_srli_epi32(mm_src, 24); + + // Compute coefficients + // blend = dst->a * (255 - src->a); 16 bits + __m128i mm_blend = _mm_mullo_epi32(mm_dst_a, _mm_sub_epi32(mm_max_alpha, mm_src_a)); + // outa = src->a * 255 + dst->a * (255 - src->a); 16 bits + __m128i mm_outa = _mm_add_epi32(_mm_mullo_epi32(mm_src_a, mm_max_alpha), mm_blend); + __m128i mm_coef1 = _mm_mullo_epi32(mm_src_a, mm_max_alpha2); + // 8 bits + mm_coef1 = _mm_cvtps_epi32(_mm_div_ps(_mm_cvtepi32_ps(mm_coef1), _mm_cvtepi32_ps(mm_outa))); + // 8 bits + __m128i mm_coef2 = _mm_sub_epi32(mm_max_alpha, mm_coef1); + + __m128i mm_out_lo = _mm_add_epi16( + _mm_mullo_epi16(mm_src_lo, _mm_shuffle_epi8(mm_coef1, mm_get_lo)), + _mm_mullo_epi16(mm_dst_lo, _mm_shuffle_epi8(mm_coef2, mm_get_lo))); + mm_out_lo = _mm_or_si128(mm_out_lo, _mm_slli_epi64( + _mm_unpacklo_epi32(mm_outa, _mm_setzero_si128()), 48)); + mm_out_lo = MM_SHIFTDIV255_epi16(mm_out_lo); + + __m128i mm_out_hi = _mm_add_epi16( + _mm_mullo_epi16(mm_src_hi, _mm_shuffle_epi8(mm_coef1, mm_get_hi)), + _mm_mullo_epi16(mm_dst_hi, _mm_shuffle_epi8(mm_coef2, mm_get_hi))); + mm_out_hi = _mm_or_si128(mm_out_hi, _mm_slli_epi64( + _mm_unpackhi_epi32(mm_outa, _mm_setzero_si128()), 48)); + mm_out_hi = MM_SHIFTDIV255_epi16(mm_out_hi); + + _mm_storeu_si128((__m128i *) &out[x], + _mm_packus_epi16(mm_out_lo, mm_out_hi)); + } + + for (; x < xsize; x += 1) { + if (src[x].a == 0) { // Copy 4 bytes at once. - *out = *dst; + out[x] = dst[x]; } else { // Integer implementation with increased precision. // Each variable has extra meaningful bits. // Divisions are rounded. UINT32 tmpr, tmpg, tmpb; - UINT32 blend = dst->a * (255 - src->a); - UINT32 outa255 = src->a * 255 + blend; + UINT32 blend = dst[x].a * (255 - src[x].a); + UINT32 outa255 = src[x].a * 255 + blend; // There we use 7 bits for precision. // We could use more, but we go beyond 32 bits. - UINT32 coef1 = src->a * 255 * 255 * (1<r * coef1 + dst->r * coef2; - tmpg = src->g * coef1 + dst->g * coef2; - tmpb = src->b * coef1 + dst->b * coef2; - out->r = SHIFTFORDIV255(tmpr + (0x80<> PRECISION_BITS; - out->g = SHIFTFORDIV255(tmpg + (0x80<> PRECISION_BITS; - out->b = SHIFTFORDIV255(tmpb + (0x80<> PRECISION_BITS; - out->a = SHIFTFORDIV255(outa255 + 0x80); + tmpr = src[x].r * coef1 + dst[x].r * coef2; + tmpg = src[x].g * coef1 + dst[x].g * coef2; + tmpb = src[x].b * coef1 + dst[x].b * coef2; + out[x].r = SHIFTFORDIV255(tmpr + (0x80<> PRECISION_BITS; + out[x].g = SHIFTFORDIV255(tmpg + (0x80<> PRECISION_BITS; + out[x].b = SHIFTFORDIV255(tmpb + (0x80<> PRECISION_BITS; + out[x].a = SHIFTFORDIV255(outa255 + 0x80); } - - dst++; src++; out++; } } From 34ebee4d96aea58218a1c28b38ce30f341ae8f36 Mon Sep 17 00:00:00 2001 From: homm Date: Sun, 12 Jun 2016 20:41:53 +0300 Subject: [PATCH 2/8] avx2 implementation --- src/libImaging/AlphaComposite.c | 70 +++++++++++++++++++++++++++++++-- 1 file changed, 66 insertions(+), 4 deletions(-) diff --git a/src/libImaging/AlphaComposite.c b/src/libImaging/AlphaComposite.c index aff38e759..c65405afd 100644 --- a/src/libImaging/AlphaComposite.c +++ b/src/libImaging/AlphaComposite.c @@ -43,6 +43,17 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0); __m128i mm_get_hi = _mm_set_epi8( -1,-1, 13,12, 13,12, 13,12, -1,-1, 9,8, 9,8, 9,8); +#if defined(__AVX2__) + __m256i vmm_max_alpha = _mm256_set1_epi32(255); + __m256i vmm_max_alpha2 = _mm256_set1_epi32(255 * 255); + __m256i vmm_get_lo = _mm256_set_epi8( + -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0, + -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0); + __m256i vmm_get_hi = _mm256_set_epi8( + -1,-1, 13,12, 13,12, 13,12, -1,-1, 9,8, 9,8, 9,8, + -1,-1, 13,12, 13,12, 13,12, -1,-1, 9,8, 9,8, 9,8); +#endif + /* Check arguments */ if (!imDst || !imSrc || @@ -62,9 +73,6 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) if (!imOut) return NULL; - #define MM_SHIFTDIV255_epi16(src)\ - _mm_srli_epi16(_mm_add_epi16(src, _mm_srli_epi16(src, 8)), 8) - for (y = 0; y < imDst->ysize; y++) { rgba8* dst = (rgba8*) imDst->image[y]; rgba8* src = (rgba8*) imSrc->image[y]; @@ -72,7 +80,59 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) x = 0; - for (; x < xsize - 4; x += 4) { +#if defined(__AVX2__) + + #define MM_SHIFTDIV255_epi16(src)\ + _mm256_srli_epi16(_mm256_add_epi16(src, _mm256_srli_epi16(src, 8)), 8) + + for (; x < xsize - 7; x += 8) { + __m256i mm_dst = _mm256_loadu_si256((__m256i *) &dst[x]); + __m256i mm_dst_lo = _mm256_unpacklo_epi8(mm_dst, _mm256_setzero_si256()); + __m256i mm_dst_hi = _mm256_unpackhi_epi8(mm_dst, _mm256_setzero_si256()); + __m256i mm_src = _mm256_loadu_si256((__m256i *) &src[x]); + __m256i mm_src_lo = _mm256_unpacklo_epi8(mm_src, _mm256_setzero_si256()); + __m256i mm_src_hi = _mm256_unpackhi_epi8(mm_src, _mm256_setzero_si256()); + + __m256i mm_dst_a = _mm256_srli_epi32(mm_dst, 24); + __m256i mm_src_a = _mm256_srli_epi32(mm_src, 24); + + // Compute coefficients + // blend = dst->a * (255 - src->a); 16 bits + __m256i mm_blend = _mm256_mullo_epi32(mm_dst_a, _mm256_sub_epi32(vmm_max_alpha, mm_src_a)); + // outa = src->a * 255 + dst->a * (255 - src->a); 16 bits + __m256i mm_outa = _mm256_add_epi32(_mm256_mullo_epi32(mm_src_a, vmm_max_alpha), mm_blend); + __m256i mm_coef1 = _mm256_mullo_epi32(mm_src_a, vmm_max_alpha2); + // 8 bits + mm_coef1 = _mm256_cvtps_epi32(_mm256_div_ps(_mm256_cvtepi32_ps(mm_coef1), _mm256_cvtepi32_ps(mm_outa))); + // 8 bits + __m256i mm_coef2 = _mm256_sub_epi32(vmm_max_alpha, mm_coef1); + + __m256i mm_out_lo = _mm256_add_epi16( + _mm256_mullo_epi16(mm_src_lo, _mm256_shuffle_epi8(mm_coef1, vmm_get_lo)), + _mm256_mullo_epi16(mm_dst_lo, _mm256_shuffle_epi8(mm_coef2, vmm_get_lo))); + mm_out_lo = _mm256_or_si256(mm_out_lo, _mm256_slli_epi64( + _mm256_unpacklo_epi32(mm_outa, _mm256_setzero_si256()), 48)); + mm_out_lo = MM_SHIFTDIV255_epi16(mm_out_lo); + + __m256i mm_out_hi = _mm256_add_epi16( + _mm256_mullo_epi16(mm_src_hi, _mm256_shuffle_epi8(mm_coef1, vmm_get_hi)), + _mm256_mullo_epi16(mm_dst_hi, _mm256_shuffle_epi8(mm_coef2, vmm_get_hi))); + mm_out_hi = _mm256_or_si256(mm_out_hi, _mm256_slli_epi64( + _mm256_unpackhi_epi32(mm_outa, _mm256_setzero_si256()), 48)); + mm_out_hi = MM_SHIFTDIV255_epi16(mm_out_hi); + + _mm256_storeu_si256((__m256i *) &out[x], + _mm256_packus_epi16(mm_out_lo, mm_out_hi)); + } + + #undef MM_SHIFTDIV255_epi16 + +#endif + + #define MM_SHIFTDIV255_epi16(src)\ + _mm_srli_epi16(_mm_add_epi16(src, _mm_srli_epi16(src, 8)), 8) + + for (; x < xsize - 3; x += 4) { __m128i mm_dst = _mm_loadu_si128((__m128i *) &dst[x]); __m128i mm_dst_lo = _mm_unpacklo_epi8(mm_dst, _mm_setzero_si128()); __m128i mm_dst_hi = _mm_unpackhi_epi8(mm_dst, _mm_setzero_si128()); @@ -112,6 +172,8 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) _mm_packus_epi16(mm_out_lo, mm_out_hi)); } + #undef MM_SHIFTDIV255_epi16 + for (; x < xsize; x += 1) { if (src[x].a == 0) { // Copy 4 bytes at once. From ab46181de5ddc368584f200628461dea4feda602 Mon Sep 17 00:00:00 2001 From: homm Date: Sun, 12 Jun 2016 21:05:06 +0300 Subject: [PATCH 3/8] increase precision --- src/libImaging/AlphaComposite.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/libImaging/AlphaComposite.c b/src/libImaging/AlphaComposite.c index c65405afd..7750dcb23 100644 --- a/src/libImaging/AlphaComposite.c +++ b/src/libImaging/AlphaComposite.c @@ -39,6 +39,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) int xsize = imDst->xsize; __m128i mm_max_alpha = _mm_set1_epi32(255); __m128i mm_max_alpha2 = _mm_set1_epi32(255 * 255); + __m256i mm_half = _mm_set1_epi32(128); __m128i mm_get_lo = _mm_set_epi8( -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0); __m128i mm_get_hi = _mm_set_epi8( @@ -46,6 +47,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) #if defined(__AVX2__) __m256i vmm_max_alpha = _mm256_set1_epi32(255); __m256i vmm_max_alpha2 = _mm256_set1_epi32(255 * 255); + __m256i vmm_half = _mm256_set1_epi32(128); __m256i vmm_get_lo = _mm256_set_epi8( -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0, -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0); @@ -112,6 +114,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) _mm256_mullo_epi16(mm_dst_lo, _mm256_shuffle_epi8(mm_coef2, vmm_get_lo))); mm_out_lo = _mm256_or_si256(mm_out_lo, _mm256_slli_epi64( _mm256_unpacklo_epi32(mm_outa, _mm256_setzero_si256()), 48)); + mm_out_lo = _mm256_add_epi32(mm_out_lo, vmm_half); mm_out_lo = MM_SHIFTDIV255_epi16(mm_out_lo); __m256i mm_out_hi = _mm256_add_epi16( @@ -119,6 +122,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) _mm256_mullo_epi16(mm_dst_hi, _mm256_shuffle_epi8(mm_coef2, vmm_get_hi))); mm_out_hi = _mm256_or_si256(mm_out_hi, _mm256_slli_epi64( _mm256_unpackhi_epi32(mm_outa, _mm256_setzero_si256()), 48)); + mm_out_hi = _mm256_add_epi32(mm_out_hi, vmm_half); mm_out_hi = MM_SHIFTDIV255_epi16(mm_out_hi); _mm256_storeu_si256((__m256i *) &out[x], @@ -159,6 +163,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) _mm_mullo_epi16(mm_dst_lo, _mm_shuffle_epi8(mm_coef2, mm_get_lo))); mm_out_lo = _mm_or_si128(mm_out_lo, _mm_slli_epi64( _mm_unpacklo_epi32(mm_outa, _mm_setzero_si128()), 48)); + mm_out_lo = _mm_add_epi32(mm_out_lo, mm_half); mm_out_lo = MM_SHIFTDIV255_epi16(mm_out_lo); __m128i mm_out_hi = _mm_add_epi16( @@ -166,6 +171,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) _mm_mullo_epi16(mm_dst_hi, _mm_shuffle_epi8(mm_coef2, mm_get_hi))); mm_out_hi = _mm_or_si128(mm_out_hi, _mm_slli_epi64( _mm_unpackhi_epi32(mm_outa, _mm_setzero_si128()), 48)); + mm_out_hi = _mm_add_epi32(mm_out_hi, mm_half); mm_out_hi = MM_SHIFTDIV255_epi16(mm_out_hi); _mm_storeu_si128((__m128i *) &out[x], From 46d274a7d9e0e48bd8954e0a10d51cf6ab8fd619 Mon Sep 17 00:00:00 2001 From: homm Date: Sun, 19 Jun 2016 15:55:58 +0300 Subject: [PATCH 4/8] speedup sse4 by using _mm_mullo_epi16 instead of _mm_mullo_epi32 --- src/libImaging/AlphaComposite.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/src/libImaging/AlphaComposite.c b/src/libImaging/AlphaComposite.c index 7750dcb23..aca749cf9 100644 --- a/src/libImaging/AlphaComposite.c +++ b/src/libImaging/AlphaComposite.c @@ -39,7 +39,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) int xsize = imDst->xsize; __m128i mm_max_alpha = _mm_set1_epi32(255); __m128i mm_max_alpha2 = _mm_set1_epi32(255 * 255); - __m256i mm_half = _mm_set1_epi32(128); + __m128i mm_half = _mm_set1_epi32(128); __m128i mm_get_lo = _mm_set_epi8( -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0); __m128i mm_get_hi = _mm_set_epi8( @@ -137,22 +137,29 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) _mm_srli_epi16(_mm_add_epi16(src, _mm_srli_epi16(src, 8)), 8) for (; x < xsize - 3; x += 4) { + // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0 __m128i mm_dst = _mm_loadu_si128((__m128i *) &dst[x]); + // [16] a1 b1 g1 r1 a0 b0 g0 r0 __m128i mm_dst_lo = _mm_unpacklo_epi8(mm_dst, _mm_setzero_si128()); + // [16] a3 b3 g3 r3 a2 b2 g2 r2 __m128i mm_dst_hi = _mm_unpackhi_epi8(mm_dst, _mm_setzero_si128()); + // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0 __m128i mm_src = _mm_loadu_si128((__m128i *) &src[x]); __m128i mm_src_lo = _mm_unpacklo_epi8(mm_src, _mm_setzero_si128()); __m128i mm_src_hi = _mm_unpackhi_epi8(mm_src, _mm_setzero_si128()); + // [32] a3 a2 a1 a0 __m128i mm_dst_a = _mm_srli_epi32(mm_dst, 24); __m128i mm_src_a = _mm_srli_epi32(mm_src, 24); // Compute coefficients - // blend = dst->a * (255 - src->a); 16 bits - __m128i mm_blend = _mm_mullo_epi32(mm_dst_a, _mm_sub_epi32(mm_max_alpha, mm_src_a)); - // outa = src->a * 255 + dst->a * (255 - src->a); 16 bits - __m128i mm_outa = _mm_add_epi32(_mm_mullo_epi32(mm_src_a, mm_max_alpha), mm_blend); - __m128i mm_coef1 = _mm_mullo_epi32(mm_src_a, mm_max_alpha2); + // blend = dst->a * (255 - src->a) + // [16] xx b3 xx b2 xx b1 xx b0 + __m128i mm_blend = _mm_mullo_epi16(mm_dst_a, _mm_sub_epi32(mm_max_alpha, mm_src_a)); + // outa = src->a * 255 + dst->a * 255 - src->a * dst->a + // [16] xx a3 xx a2 xx a1 xx a0 + __m128i mm_outa = _mm_add_epi32(_mm_mullo_epi16(mm_src_a, mm_max_alpha), mm_blend); + __m128i mm_coef1 = _mm_mullo_epi16(mm_src_a, mm_max_alpha2); // 8 bits mm_coef1 = _mm_cvtps_epi32(_mm_div_ps(_mm_cvtepi32_ps(mm_coef1), _mm_cvtepi32_ps(mm_outa))); // 8 bits From 01563e732e75ee88521f564268a4ae435be5c93c Mon Sep 17 00:00:00 2001 From: homm Date: Sun, 19 Jun 2016 16:46:10 +0300 Subject: [PATCH 5/8] speedup avx2 by using _mm256_mullo_epi16 instead of _mm256_mullo_epi32 --- src/libImaging/AlphaComposite.c | 36 +++++++++++++++++---------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/src/libImaging/AlphaComposite.c b/src/libImaging/AlphaComposite.c index aca749cf9..5a9ce195e 100644 --- a/src/libImaging/AlphaComposite.c +++ b/src/libImaging/AlphaComposite.c @@ -39,6 +39,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) int xsize = imDst->xsize; __m128i mm_max_alpha = _mm_set1_epi32(255); __m128i mm_max_alpha2 = _mm_set1_epi32(255 * 255); + __m128i mm_zero = _mm_setzero_si128(); __m128i mm_half = _mm_set1_epi32(128); __m128i mm_get_lo = _mm_set_epi8( -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0); @@ -47,6 +48,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) #if defined(__AVX2__) __m256i vmm_max_alpha = _mm256_set1_epi32(255); __m256i vmm_max_alpha2 = _mm256_set1_epi32(255 * 255); + __m256i vmm_zero = _mm256_setzero_si256(); __m256i vmm_half = _mm256_set1_epi32(128); __m256i vmm_get_lo = _mm256_set_epi8( -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0, @@ -89,21 +91,21 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) for (; x < xsize - 7; x += 8) { __m256i mm_dst = _mm256_loadu_si256((__m256i *) &dst[x]); - __m256i mm_dst_lo = _mm256_unpacklo_epi8(mm_dst, _mm256_setzero_si256()); - __m256i mm_dst_hi = _mm256_unpackhi_epi8(mm_dst, _mm256_setzero_si256()); + __m256i mm_dst_lo = _mm256_unpacklo_epi8(mm_dst, vmm_zero); + __m256i mm_dst_hi = _mm256_unpackhi_epi8(mm_dst, vmm_zero); __m256i mm_src = _mm256_loadu_si256((__m256i *) &src[x]); - __m256i mm_src_lo = _mm256_unpacklo_epi8(mm_src, _mm256_setzero_si256()); - __m256i mm_src_hi = _mm256_unpackhi_epi8(mm_src, _mm256_setzero_si256()); + __m256i mm_src_lo = _mm256_unpacklo_epi8(mm_src, vmm_zero); + __m256i mm_src_hi = _mm256_unpackhi_epi8(mm_src, vmm_zero); __m256i mm_dst_a = _mm256_srli_epi32(mm_dst, 24); __m256i mm_src_a = _mm256_srli_epi32(mm_src, 24); // Compute coefficients // blend = dst->a * (255 - src->a); 16 bits - __m256i mm_blend = _mm256_mullo_epi32(mm_dst_a, _mm256_sub_epi32(vmm_max_alpha, mm_src_a)); + __m256i mm_blend = _mm256_mullo_epi16(mm_dst_a, _mm256_sub_epi32(vmm_max_alpha, mm_src_a)); // outa = src->a * 255 + dst->a * (255 - src->a); 16 bits - __m256i mm_outa = _mm256_add_epi32(_mm256_mullo_epi32(mm_src_a, vmm_max_alpha), mm_blend); - __m256i mm_coef1 = _mm256_mullo_epi32(mm_src_a, vmm_max_alpha2); + __m256i mm_outa = _mm256_add_epi32(_mm256_mullo_epi16(mm_src_a, vmm_max_alpha), mm_blend); + __m256i mm_coef1 = _mm256_mullo_epi16(mm_src_a, vmm_max_alpha2); // 8 bits mm_coef1 = _mm256_cvtps_epi32(_mm256_div_ps(_mm256_cvtepi32_ps(mm_coef1), _mm256_cvtepi32_ps(mm_outa))); // 8 bits @@ -113,7 +115,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) _mm256_mullo_epi16(mm_src_lo, _mm256_shuffle_epi8(mm_coef1, vmm_get_lo)), _mm256_mullo_epi16(mm_dst_lo, _mm256_shuffle_epi8(mm_coef2, vmm_get_lo))); mm_out_lo = _mm256_or_si256(mm_out_lo, _mm256_slli_epi64( - _mm256_unpacklo_epi32(mm_outa, _mm256_setzero_si256()), 48)); + _mm256_unpacklo_epi32(mm_outa, vmm_zero), 48)); mm_out_lo = _mm256_add_epi32(mm_out_lo, vmm_half); mm_out_lo = MM_SHIFTDIV255_epi16(mm_out_lo); @@ -121,7 +123,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) _mm256_mullo_epi16(mm_src_hi, _mm256_shuffle_epi8(mm_coef1, vmm_get_hi)), _mm256_mullo_epi16(mm_dst_hi, _mm256_shuffle_epi8(mm_coef2, vmm_get_hi))); mm_out_hi = _mm256_or_si256(mm_out_hi, _mm256_slli_epi64( - _mm256_unpackhi_epi32(mm_outa, _mm256_setzero_si256()), 48)); + _mm256_unpackhi_epi32(mm_outa, vmm_zero), 48)); mm_out_hi = _mm256_add_epi32(mm_out_hi, vmm_half); mm_out_hi = MM_SHIFTDIV255_epi16(mm_out_hi); @@ -140,13 +142,13 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0 __m128i mm_dst = _mm_loadu_si128((__m128i *) &dst[x]); // [16] a1 b1 g1 r1 a0 b0 g0 r0 - __m128i mm_dst_lo = _mm_unpacklo_epi8(mm_dst, _mm_setzero_si128()); + __m128i mm_dst_lo = _mm_unpacklo_epi8(mm_dst, mm_zero); // [16] a3 b3 g3 r3 a2 b2 g2 r2 - __m128i mm_dst_hi = _mm_unpackhi_epi8(mm_dst, _mm_setzero_si128()); + __m128i mm_dst_hi = _mm_unpackhi_epi8(mm_dst, mm_zero); // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0 __m128i mm_src = _mm_loadu_si128((__m128i *) &src[x]); - __m128i mm_src_lo = _mm_unpacklo_epi8(mm_src, _mm_setzero_si128()); - __m128i mm_src_hi = _mm_unpackhi_epi8(mm_src, _mm_setzero_si128()); + __m128i mm_src_lo = _mm_unpacklo_epi8(mm_src, mm_zero); + __m128i mm_src_hi = _mm_unpackhi_epi8(mm_src, mm_zero); // [32] a3 a2 a1 a0 __m128i mm_dst_a = _mm_srli_epi32(mm_dst, 24); @@ -160,16 +162,16 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) // [16] xx a3 xx a2 xx a1 xx a0 __m128i mm_outa = _mm_add_epi32(_mm_mullo_epi16(mm_src_a, mm_max_alpha), mm_blend); __m128i mm_coef1 = _mm_mullo_epi16(mm_src_a, mm_max_alpha2); - // 8 bits + // [8] xx xx xx c3 xx xx xx c2 xx xx xx c1 xx xx xx c0 mm_coef1 = _mm_cvtps_epi32(_mm_div_ps(_mm_cvtepi32_ps(mm_coef1), _mm_cvtepi32_ps(mm_outa))); - // 8 bits + // [8] xx xx xx c3 xx xx xx c2 xx xx xx c1 xx xx xx c0 __m128i mm_coef2 = _mm_sub_epi32(mm_max_alpha, mm_coef1); __m128i mm_out_lo = _mm_add_epi16( _mm_mullo_epi16(mm_src_lo, _mm_shuffle_epi8(mm_coef1, mm_get_lo)), _mm_mullo_epi16(mm_dst_lo, _mm_shuffle_epi8(mm_coef2, mm_get_lo))); mm_out_lo = _mm_or_si128(mm_out_lo, _mm_slli_epi64( - _mm_unpacklo_epi32(mm_outa, _mm_setzero_si128()), 48)); + _mm_unpacklo_epi32(mm_outa, mm_zero), 48)); mm_out_lo = _mm_add_epi32(mm_out_lo, mm_half); mm_out_lo = MM_SHIFTDIV255_epi16(mm_out_lo); @@ -177,7 +179,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) _mm_mullo_epi16(mm_src_hi, _mm_shuffle_epi8(mm_coef1, mm_get_hi)), _mm_mullo_epi16(mm_dst_hi, _mm_shuffle_epi8(mm_coef2, mm_get_hi))); mm_out_hi = _mm_or_si128(mm_out_hi, _mm_slli_epi64( - _mm_unpackhi_epi32(mm_outa, _mm_setzero_si128()), 48)); + _mm_unpackhi_epi32(mm_outa, mm_zero), 48)); mm_out_hi = _mm_add_epi32(mm_out_hi, mm_half); mm_out_hi = MM_SHIFTDIV255_epi16(mm_out_hi); From 786fd3d64d5cfa887be260327c646981a66c93ce Mon Sep 17 00:00:00 2001 From: homm Date: Sun, 3 Jul 2016 23:46:33 +0300 Subject: [PATCH 6/8] fix bugs --- src/libImaging/AlphaComposite.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/libImaging/AlphaComposite.c b/src/libImaging/AlphaComposite.c index 5a9ce195e..5e67b13c5 100644 --- a/src/libImaging/AlphaComposite.c +++ b/src/libImaging/AlphaComposite.c @@ -40,7 +40,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) __m128i mm_max_alpha = _mm_set1_epi32(255); __m128i mm_max_alpha2 = _mm_set1_epi32(255 * 255); __m128i mm_zero = _mm_setzero_si128(); - __m128i mm_half = _mm_set1_epi32(128); + __m128i mm_half = _mm_set1_epi16(128); __m128i mm_get_lo = _mm_set_epi8( -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0); __m128i mm_get_hi = _mm_set_epi8( @@ -49,7 +49,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) __m256i vmm_max_alpha = _mm256_set1_epi32(255); __m256i vmm_max_alpha2 = _mm256_set1_epi32(255 * 255); __m256i vmm_zero = _mm256_setzero_si256(); - __m256i vmm_half = _mm256_set1_epi32(128); + __m256i vmm_half = _mm256_set1_epi16(128); __m256i vmm_get_lo = _mm256_set_epi8( -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0, -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0); @@ -105,7 +105,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) __m256i mm_blend = _mm256_mullo_epi16(mm_dst_a, _mm256_sub_epi32(vmm_max_alpha, mm_src_a)); // outa = src->a * 255 + dst->a * (255 - src->a); 16 bits __m256i mm_outa = _mm256_add_epi32(_mm256_mullo_epi16(mm_src_a, vmm_max_alpha), mm_blend); - __m256i mm_coef1 = _mm256_mullo_epi16(mm_src_a, vmm_max_alpha2); + __m256i mm_coef1 = _mm256_mullo_epi32(mm_src_a, vmm_max_alpha2); // 8 bits mm_coef1 = _mm256_cvtps_epi32(_mm256_div_ps(_mm256_cvtepi32_ps(mm_coef1), _mm256_cvtepi32_ps(mm_outa))); // 8 bits @@ -116,7 +116,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) _mm256_mullo_epi16(mm_dst_lo, _mm256_shuffle_epi8(mm_coef2, vmm_get_lo))); mm_out_lo = _mm256_or_si256(mm_out_lo, _mm256_slli_epi64( _mm256_unpacklo_epi32(mm_outa, vmm_zero), 48)); - mm_out_lo = _mm256_add_epi32(mm_out_lo, vmm_half); + mm_out_lo = _mm256_add_epi16(mm_out_lo, vmm_half); mm_out_lo = MM_SHIFTDIV255_epi16(mm_out_lo); __m256i mm_out_hi = _mm256_add_epi16( @@ -124,7 +124,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) _mm256_mullo_epi16(mm_dst_hi, _mm256_shuffle_epi8(mm_coef2, vmm_get_hi))); mm_out_hi = _mm256_or_si256(mm_out_hi, _mm256_slli_epi64( _mm256_unpackhi_epi32(mm_outa, vmm_zero), 48)); - mm_out_hi = _mm256_add_epi32(mm_out_hi, vmm_half); + mm_out_hi = _mm256_add_epi16(mm_out_hi, vmm_half); mm_out_hi = MM_SHIFTDIV255_epi16(mm_out_hi); _mm256_storeu_si256((__m256i *) &out[x], @@ -158,10 +158,11 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) // blend = dst->a * (255 - src->a) // [16] xx b3 xx b2 xx b1 xx b0 __m128i mm_blend = _mm_mullo_epi16(mm_dst_a, _mm_sub_epi32(mm_max_alpha, mm_src_a)); - // outa = src->a * 255 + dst->a * 255 - src->a * dst->a + // outa = src->a * 255 + blend // [16] xx a3 xx a2 xx a1 xx a0 __m128i mm_outa = _mm_add_epi32(_mm_mullo_epi16(mm_src_a, mm_max_alpha), mm_blend); - __m128i mm_coef1 = _mm_mullo_epi16(mm_src_a, mm_max_alpha2); + // coef1 = src->a * 255 * 255 / outa + __m128i mm_coef1 = _mm_mullo_epi32(mm_src_a, mm_max_alpha2); // [8] xx xx xx c3 xx xx xx c2 xx xx xx c1 xx xx xx c0 mm_coef1 = _mm_cvtps_epi32(_mm_div_ps(_mm_cvtepi32_ps(mm_coef1), _mm_cvtepi32_ps(mm_outa))); // [8] xx xx xx c3 xx xx xx c2 xx xx xx c1 xx xx xx c0 @@ -172,7 +173,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) _mm_mullo_epi16(mm_dst_lo, _mm_shuffle_epi8(mm_coef2, mm_get_lo))); mm_out_lo = _mm_or_si128(mm_out_lo, _mm_slli_epi64( _mm_unpacklo_epi32(mm_outa, mm_zero), 48)); - mm_out_lo = _mm_add_epi32(mm_out_lo, mm_half); + mm_out_lo = _mm_add_epi16(mm_out_lo, mm_half); mm_out_lo = MM_SHIFTDIV255_epi16(mm_out_lo); __m128i mm_out_hi = _mm_add_epi16( @@ -180,7 +181,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) _mm_mullo_epi16(mm_dst_hi, _mm_shuffle_epi8(mm_coef2, mm_get_hi))); mm_out_hi = _mm_or_si128(mm_out_hi, _mm_slli_epi64( _mm_unpackhi_epi32(mm_outa, mm_zero), 48)); - mm_out_hi = _mm_add_epi32(mm_out_hi, mm_half); + mm_out_hi = _mm_add_epi16(mm_out_hi, mm_half); mm_out_hi = MM_SHIFTDIV255_epi16(mm_out_hi); _mm_storeu_si128((__m128i *) &out[x], From cae99973db7ff16c4de7a6c42477ca3fb7544f54 Mon Sep 17 00:00:00 2001 From: homm Date: Fri, 23 Sep 2016 14:52:54 +0300 Subject: [PATCH 7/8] move declarations to beginning of the blocks --- src/libImaging/AlphaComposite.c | 80 +++++++++++++++++++-------------- 1 file changed, 46 insertions(+), 34 deletions(-) diff --git a/src/libImaging/AlphaComposite.c b/src/libImaging/AlphaComposite.c index 5e67b13c5..e968c9a8e 100644 --- a/src/libImaging/AlphaComposite.c +++ b/src/libImaging/AlphaComposite.c @@ -90,40 +90,46 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) _mm256_srli_epi16(_mm256_add_epi16(src, _mm256_srli_epi16(src, 8)), 8) for (; x < xsize - 7; x += 8) { - __m256i mm_dst = _mm256_loadu_si256((__m256i *) &dst[x]); - __m256i mm_dst_lo = _mm256_unpacklo_epi8(mm_dst, vmm_zero); - __m256i mm_dst_hi = _mm256_unpackhi_epi8(mm_dst, vmm_zero); - __m256i mm_src = _mm256_loadu_si256((__m256i *) &src[x]); - __m256i mm_src_lo = _mm256_unpacklo_epi8(mm_src, vmm_zero); - __m256i mm_src_hi = _mm256_unpackhi_epi8(mm_src, vmm_zero); + __m256i mm_dst, mm_dst_lo, mm_dst_hi; + __m256i mm_src, mm_src_lo, mm_src_hi; + __m256i mm_dst_a, mm_src_a, mm_out_a, mm_blend; + __m256i mm_coef1, mm_coef2, mm_out_lo, mm_out_hi; - __m256i mm_dst_a = _mm256_srli_epi32(mm_dst, 24); - __m256i mm_src_a = _mm256_srli_epi32(mm_src, 24); + mm_dst = _mm256_loadu_si256((__m256i *) &dst[x]); + mm_dst_lo = _mm256_unpacklo_epi8(mm_dst, vmm_zero); + mm_dst_hi = _mm256_unpackhi_epi8(mm_dst, vmm_zero); + mm_src = _mm256_loadu_si256((__m256i *) &src[x]); + mm_src_lo = _mm256_unpacklo_epi8(mm_src, vmm_zero); + mm_src_hi = _mm256_unpackhi_epi8(mm_src, vmm_zero); + + mm_dst_a = _mm256_srli_epi32(mm_dst, 24); + mm_src_a = _mm256_srli_epi32(mm_src, 24); // Compute coefficients // blend = dst->a * (255 - src->a); 16 bits - __m256i mm_blend = _mm256_mullo_epi16(mm_dst_a, _mm256_sub_epi32(vmm_max_alpha, mm_src_a)); + mm_blend = _mm256_mullo_epi16(mm_dst_a, _mm256_sub_epi32(vmm_max_alpha, mm_src_a)); // outa = src->a * 255 + dst->a * (255 - src->a); 16 bits - __m256i mm_outa = _mm256_add_epi32(_mm256_mullo_epi16(mm_src_a, vmm_max_alpha), mm_blend); - __m256i mm_coef1 = _mm256_mullo_epi32(mm_src_a, vmm_max_alpha2); + mm_out_a = _mm256_add_epi32(_mm256_mullo_epi16(mm_src_a, vmm_max_alpha), mm_blend); + mm_coef1 = _mm256_mullo_epi32(mm_src_a, vmm_max_alpha2); // 8 bits - mm_coef1 = _mm256_cvtps_epi32(_mm256_div_ps(_mm256_cvtepi32_ps(mm_coef1), _mm256_cvtepi32_ps(mm_outa))); + mm_coef1 = _mm256_cvtps_epi32(_mm256_div_ps(_mm256_cvtepi32_ps(mm_coef1), + _mm256_cvtepi32_ps(mm_out_a))); // 8 bits - __m256i mm_coef2 = _mm256_sub_epi32(vmm_max_alpha, mm_coef1); + mm_coef2 = _mm256_sub_epi32(vmm_max_alpha, mm_coef1); - __m256i mm_out_lo = _mm256_add_epi16( + mm_out_lo = _mm256_add_epi16( _mm256_mullo_epi16(mm_src_lo, _mm256_shuffle_epi8(mm_coef1, vmm_get_lo)), _mm256_mullo_epi16(mm_dst_lo, _mm256_shuffle_epi8(mm_coef2, vmm_get_lo))); mm_out_lo = _mm256_or_si256(mm_out_lo, _mm256_slli_epi64( - _mm256_unpacklo_epi32(mm_outa, vmm_zero), 48)); + _mm256_unpacklo_epi32(mm_out_a, vmm_zero), 48)); mm_out_lo = _mm256_add_epi16(mm_out_lo, vmm_half); mm_out_lo = MM_SHIFTDIV255_epi16(mm_out_lo); - __m256i mm_out_hi = _mm256_add_epi16( + mm_out_hi = _mm256_add_epi16( _mm256_mullo_epi16(mm_src_hi, _mm256_shuffle_epi8(mm_coef1, vmm_get_hi)), _mm256_mullo_epi16(mm_dst_hi, _mm256_shuffle_epi8(mm_coef2, vmm_get_hi))); mm_out_hi = _mm256_or_si256(mm_out_hi, _mm256_slli_epi64( - _mm256_unpackhi_epi32(mm_outa, vmm_zero), 48)); + _mm256_unpackhi_epi32(mm_out_a, vmm_zero), 48)); mm_out_hi = _mm256_add_epi16(mm_out_hi, vmm_half); mm_out_hi = MM_SHIFTDIV255_epi16(mm_out_hi); @@ -139,48 +145,54 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) _mm_srli_epi16(_mm_add_epi16(src, _mm_srli_epi16(src, 8)), 8) for (; x < xsize - 3; x += 4) { + __m128i mm_dst, mm_dst_lo, mm_dst_hi; + __m128i mm_src, mm_src_hi, mm_src_lo; + __m128i mm_dst_a, mm_src_a, mm_out_a, mm_blend; + __m128i mm_coef1, mm_coef2, mm_out_lo, mm_out_hi; + // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0 - __m128i mm_dst = _mm_loadu_si128((__m128i *) &dst[x]); + mm_dst = _mm_loadu_si128((__m128i *) &dst[x]); // [16] a1 b1 g1 r1 a0 b0 g0 r0 - __m128i mm_dst_lo = _mm_unpacklo_epi8(mm_dst, mm_zero); + mm_dst_lo = _mm_unpacklo_epi8(mm_dst, mm_zero); // [16] a3 b3 g3 r3 a2 b2 g2 r2 - __m128i mm_dst_hi = _mm_unpackhi_epi8(mm_dst, mm_zero); + mm_dst_hi = _mm_unpackhi_epi8(mm_dst, mm_zero); // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0 - __m128i mm_src = _mm_loadu_si128((__m128i *) &src[x]); - __m128i mm_src_lo = _mm_unpacklo_epi8(mm_src, mm_zero); - __m128i mm_src_hi = _mm_unpackhi_epi8(mm_src, mm_zero); + mm_src = _mm_loadu_si128((__m128i *) &src[x]); + mm_src_lo = _mm_unpacklo_epi8(mm_src, mm_zero); + mm_src_hi = _mm_unpackhi_epi8(mm_src, mm_zero); // [32] a3 a2 a1 a0 - __m128i mm_dst_a = _mm_srli_epi32(mm_dst, 24); - __m128i mm_src_a = _mm_srli_epi32(mm_src, 24); + mm_dst_a = _mm_srli_epi32(mm_dst, 24); + mm_src_a = _mm_srli_epi32(mm_src, 24); // Compute coefficients // blend = dst->a * (255 - src->a) // [16] xx b3 xx b2 xx b1 xx b0 - __m128i mm_blend = _mm_mullo_epi16(mm_dst_a, _mm_sub_epi32(mm_max_alpha, mm_src_a)); + mm_blend = _mm_mullo_epi16(mm_dst_a, _mm_sub_epi32(mm_max_alpha, mm_src_a)); // outa = src->a * 255 + blend // [16] xx a3 xx a2 xx a1 xx a0 - __m128i mm_outa = _mm_add_epi32(_mm_mullo_epi16(mm_src_a, mm_max_alpha), mm_blend); + mm_out_a = _mm_add_epi32(_mm_mullo_epi16(mm_src_a, mm_max_alpha), mm_blend); // coef1 = src->a * 255 * 255 / outa - __m128i mm_coef1 = _mm_mullo_epi32(mm_src_a, mm_max_alpha2); + mm_coef1 = _mm_mullo_epi32(mm_src_a, mm_max_alpha2); // [8] xx xx xx c3 xx xx xx c2 xx xx xx c1 xx xx xx c0 - mm_coef1 = _mm_cvtps_epi32(_mm_div_ps(_mm_cvtepi32_ps(mm_coef1), _mm_cvtepi32_ps(mm_outa))); + mm_coef1 = _mm_cvtps_epi32(_mm_div_ps(_mm_cvtepi32_ps(mm_coef1), + _mm_cvtepi32_ps(mm_out_a))); // [8] xx xx xx c3 xx xx xx c2 xx xx xx c1 xx xx xx c0 - __m128i mm_coef2 = _mm_sub_epi32(mm_max_alpha, mm_coef1); + mm_coef2 = _mm_sub_epi32(mm_max_alpha, mm_coef1); - __m128i mm_out_lo = _mm_add_epi16( + mm_out_lo = _mm_add_epi16( _mm_mullo_epi16(mm_src_lo, _mm_shuffle_epi8(mm_coef1, mm_get_lo)), _mm_mullo_epi16(mm_dst_lo, _mm_shuffle_epi8(mm_coef2, mm_get_lo))); mm_out_lo = _mm_or_si128(mm_out_lo, _mm_slli_epi64( - _mm_unpacklo_epi32(mm_outa, mm_zero), 48)); + _mm_unpacklo_epi32(mm_out_a, mm_zero), 48)); mm_out_lo = _mm_add_epi16(mm_out_lo, mm_half); mm_out_lo = MM_SHIFTDIV255_epi16(mm_out_lo); - __m128i mm_out_hi = _mm_add_epi16( + mm_out_hi = _mm_add_epi16( _mm_mullo_epi16(mm_src_hi, _mm_shuffle_epi8(mm_coef1, mm_get_hi)), _mm_mullo_epi16(mm_dst_hi, _mm_shuffle_epi8(mm_coef2, mm_get_hi))); mm_out_hi = _mm_or_si128(mm_out_hi, _mm_slli_epi64( - _mm_unpackhi_epi32(mm_outa, mm_zero), 48)); + _mm_unpackhi_epi32(mm_out_a, mm_zero), 48)); mm_out_hi = _mm_add_epi16(mm_out_hi, mm_half); mm_out_hi = MM_SHIFTDIV255_epi16(mm_out_hi); From c76f541dad8a95ed696af7a94c3c0bfaa7479a5e Mon Sep 17 00:00:00 2001 From: Alexander Date: Mon, 23 Jan 2017 04:56:37 +0300 Subject: [PATCH 8/8] fast div aproximation --- src/libImaging/AlphaComposite.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/libImaging/AlphaComposite.c b/src/libImaging/AlphaComposite.c index e968c9a8e..df1b59b75 100644 --- a/src/libImaging/AlphaComposite.c +++ b/src/libImaging/AlphaComposite.c @@ -112,8 +112,10 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) mm_out_a = _mm256_add_epi32(_mm256_mullo_epi16(mm_src_a, vmm_max_alpha), mm_blend); mm_coef1 = _mm256_mullo_epi32(mm_src_a, vmm_max_alpha2); // 8 bits - mm_coef1 = _mm256_cvtps_epi32(_mm256_div_ps(_mm256_cvtepi32_ps(mm_coef1), - _mm256_cvtepi32_ps(mm_out_a))); + mm_coef1 = _mm256_cvtps_epi32( + _mm256_mul_ps(_mm256_cvtepi32_ps(mm_coef1), + _mm256_rcp_ps(_mm256_cvtepi32_ps(mm_out_a))) + ); // 8 bits mm_coef2 = _mm256_sub_epi32(vmm_max_alpha, mm_coef1); @@ -175,8 +177,10 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) // coef1 = src->a * 255 * 255 / outa mm_coef1 = _mm_mullo_epi32(mm_src_a, mm_max_alpha2); // [8] xx xx xx c3 xx xx xx c2 xx xx xx c1 xx xx xx c0 - mm_coef1 = _mm_cvtps_epi32(_mm_div_ps(_mm_cvtepi32_ps(mm_coef1), - _mm_cvtepi32_ps(mm_out_a))); + mm_coef1 = _mm_cvtps_epi32( + _mm_mul_ps(_mm_cvtepi32_ps(mm_coef1), + _mm_rcp_ps(_mm_cvtepi32_ps(mm_out_a))) + ); // [8] xx xx xx c3 xx xx xx c2 xx xx xx c1 xx xx xx c0 mm_coef2 = _mm_sub_epi32(mm_max_alpha, mm_coef1);