From f7f4e9e448909ce7e68208c9c761293ed048f2a9 Mon Sep 17 00:00:00 2001 From: homm Date: Sun, 3 Jul 2016 23:46:33 +0300 Subject: [PATCH] SIMD AlphaComposite. fix bugs --- src/libImaging/AlphaComposite.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/libImaging/AlphaComposite.c b/src/libImaging/AlphaComposite.c index effa69829..52fce4e47 100644 --- a/src/libImaging/AlphaComposite.c +++ b/src/libImaging/AlphaComposite.c @@ -35,7 +35,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) { __m128i mm_max_alpha = _mm_set1_epi32(255); __m128i mm_max_alpha2 = _mm_set1_epi32(255 * 255); __m128i mm_zero = _mm_setzero_si128(); - __m128i mm_half = _mm_set1_epi32(128); + __m128i mm_half = _mm_set1_epi16(128); __m128i mm_get_lo = _mm_set_epi8( -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0); __m128i mm_get_hi = _mm_set_epi8( @@ -44,7 +44,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) { __m256i vmm_max_alpha = _mm256_set1_epi32(255); __m256i vmm_max_alpha2 = _mm256_set1_epi32(255 * 255); __m256i vmm_zero = _mm256_setzero_si256(); - __m256i vmm_half = _mm256_set1_epi32(128); + __m256i vmm_half = _mm256_set1_epi16(128); __m256i vmm_get_lo = _mm256_set_epi8( -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0, -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0); @@ -99,7 +99,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) { __m256i mm_blend = _mm256_mullo_epi16(mm_dst_a, _mm256_sub_epi32(vmm_max_alpha, mm_src_a)); // outa = src->a * 255 + dst->a * (255 - src->a); 16 bits __m256i mm_outa = _mm256_add_epi32(_mm256_mullo_epi16(mm_src_a, vmm_max_alpha), mm_blend); - __m256i mm_coef1 = _mm256_mullo_epi16(mm_src_a, vmm_max_alpha2); + __m256i mm_coef1 = _mm256_mullo_epi32(mm_src_a, vmm_max_alpha2); // 8 bits mm_coef1 = _mm256_cvtps_epi32(_mm256_div_ps(_mm256_cvtepi32_ps(mm_coef1), _mm256_cvtepi32_ps(mm_outa))); // 8 bits @@ -110,7 +110,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) { _mm256_mullo_epi16(mm_dst_lo, _mm256_shuffle_epi8(mm_coef2, vmm_get_lo))); mm_out_lo = _mm256_or_si256(mm_out_lo, _mm256_slli_epi64( _mm256_unpacklo_epi32(mm_outa, vmm_zero), 48)); - mm_out_lo = _mm256_add_epi32(mm_out_lo, vmm_half); + mm_out_lo = _mm256_add_epi16(mm_out_lo, vmm_half); mm_out_lo = MM_SHIFTDIV255_epi16(mm_out_lo); __m256i mm_out_hi = _mm256_add_epi16( @@ -118,7 +118,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) { _mm256_mullo_epi16(mm_dst_hi, _mm256_shuffle_epi8(mm_coef2, vmm_get_hi))); mm_out_hi = _mm256_or_si256(mm_out_hi, _mm256_slli_epi64( _mm256_unpackhi_epi32(mm_outa, vmm_zero), 48)); - mm_out_hi = _mm256_add_epi32(mm_out_hi, vmm_half); + mm_out_hi = _mm256_add_epi16(mm_out_hi, vmm_half); mm_out_hi = MM_SHIFTDIV255_epi16(mm_out_hi); _mm256_storeu_si256((__m256i *) &out[x], @@ -152,10 +152,11 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) { // blend = dst->a * (255 - src->a) // [16] xx b3 xx b2 xx b1 xx b0 __m128i mm_blend = _mm_mullo_epi16(mm_dst_a, _mm_sub_epi32(mm_max_alpha, mm_src_a)); - // outa = src->a * 255 + dst->a * 255 - src->a * dst->a + // outa = src->a * 255 + blend // [16] xx a3 xx a2 xx a1 xx a0 __m128i mm_outa = _mm_add_epi32(_mm_mullo_epi16(mm_src_a, mm_max_alpha), mm_blend); - __m128i mm_coef1 = _mm_mullo_epi16(mm_src_a, mm_max_alpha2); + // coef1 = src->a * 255 * 255 / outa + __m128i mm_coef1 = _mm_mullo_epi32(mm_src_a, mm_max_alpha2); // [8] xx xx xx c3 xx xx xx c2 xx xx xx c1 xx xx xx c0 mm_coef1 = _mm_cvtps_epi32(_mm_div_ps(_mm_cvtepi32_ps(mm_coef1), _mm_cvtepi32_ps(mm_outa))); // [8] xx xx xx c3 xx xx xx c2 xx xx xx c1 xx xx xx c0 @@ -166,7 +167,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) { _mm_mullo_epi16(mm_dst_lo, _mm_shuffle_epi8(mm_coef2, mm_get_lo))); mm_out_lo = _mm_or_si128(mm_out_lo, _mm_slli_epi64( _mm_unpacklo_epi32(mm_outa, mm_zero), 48)); - mm_out_lo = _mm_add_epi32(mm_out_lo, mm_half); + mm_out_lo = _mm_add_epi16(mm_out_lo, mm_half); mm_out_lo = MM_SHIFTDIV255_epi16(mm_out_lo); __m128i mm_out_hi = _mm_add_epi16( @@ -174,7 +175,7 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) { _mm_mullo_epi16(mm_dst_hi, _mm_shuffle_epi8(mm_coef2, mm_get_hi))); mm_out_hi = _mm_or_si128(mm_out_hi, _mm_slli_epi64( _mm_unpackhi_epi32(mm_outa, mm_zero), 48)); - mm_out_hi = _mm_add_epi32(mm_out_hi, mm_half); + mm_out_hi = _mm_add_epi16(mm_out_hi, mm_half); mm_out_hi = MM_SHIFTDIV255_epi16(mm_out_hi); _mm_storeu_si128((__m128i *) &out[x],