Apply clang-format

This commit is contained in:
Aleksandr Karpinskii 2024-08-11 21:06:37 +04:00
parent d1717f3ffc
commit b6fb4d8ff8

View File

@ -49,148 +49,235 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) {
x = 0; x = 0;
#if defined(__AVX2__) #if defined(__AVX2__)
{ {
__m256i vmm_max_alpha = _mm256_set1_epi32(255); __m256i vmm_max_alpha = _mm256_set1_epi32(255);
__m256i vmm_max_alpha2 = _mm256_set1_epi32(255 * 255); __m256i vmm_max_alpha2 = _mm256_set1_epi32(255 * 255);
__m256i vmm_zero = _mm256_setzero_si256(); __m256i vmm_zero = _mm256_setzero_si256();
__m256i vmm_half = _mm256_set1_epi16(128); __m256i vmm_half = _mm256_set1_epi16(128);
__m256i vmm_get_lo = _mm256_set_epi8( __m256i vmm_get_lo = _mm256_set_epi8(
-1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0, -1,
-1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0); -1,
__m256i vmm_get_hi = _mm256_set_epi8( 5,
-1,-1, 13,12, 13,12, 13,12, -1,-1, 9,8, 9,8, 9,8, 4,
-1,-1, 13,12, 13,12, 13,12, -1,-1, 9,8, 9,8, 9,8); 5,
4,
#define MM_SHIFTDIV255_epi16(src)\ 5,
_mm256_srli_epi16(_mm256_add_epi16(src, _mm256_srli_epi16(src, 8)), 8) 4,
-1,
for (; x < imDst->xsize - 7; x += 8) { -1,
__m256i mm_dst, mm_dst_lo, mm_dst_hi; 1,
__m256i mm_src, mm_src_lo, mm_src_hi; 0,
__m256i mm_dst_a, mm_src_a, mm_out_a, mm_blend; 1,
__m256i mm_coef1, mm_coef2, mm_out_lo, mm_out_hi; 0,
1,
mm_dst = _mm256_loadu_si256((__m256i *) &dst[x]); 0,
mm_dst_lo = _mm256_unpacklo_epi8(mm_dst, vmm_zero); -1,
mm_dst_hi = _mm256_unpackhi_epi8(mm_dst, vmm_zero); -1,
mm_src = _mm256_loadu_si256((__m256i *) &src[x]); 5,
mm_src_lo = _mm256_unpacklo_epi8(mm_src, vmm_zero); 4,
mm_src_hi = _mm256_unpackhi_epi8(mm_src, vmm_zero); 5,
4,
mm_dst_a = _mm256_srli_epi32(mm_dst, 24); 5,
mm_src_a = _mm256_srli_epi32(mm_src, 24); 4,
-1,
// Compute coefficients -1,
// blend = dst->a * (255 - src->a); 16 bits 1,
mm_blend = _mm256_mullo_epi16(mm_dst_a, _mm256_sub_epi32(vmm_max_alpha, mm_src_a)); 0,
// outa = src->a * 255 + dst->a * (255 - src->a); 16 bits 1,
mm_out_a = _mm256_add_epi32(_mm256_mullo_epi16(mm_src_a, vmm_max_alpha), mm_blend); 0,
mm_coef1 = _mm256_mullo_epi32(mm_src_a, vmm_max_alpha2); 1,
// 8 bits 0
mm_coef1 = _mm256_cvtps_epi32( );
_mm256_mul_ps(_mm256_cvtepi32_ps(mm_coef1), __m256i vmm_get_hi = _mm256_set_epi8(
_mm256_rcp_ps(_mm256_cvtepi32_ps(mm_out_a))) -1,
-1,
13,
12,
13,
12,
13,
12,
-1,
-1,
9,
8,
9,
8,
9,
8,
-1,
-1,
13,
12,
13,
12,
13,
12,
-1,
-1,
9,
8,
9,
8,
9,
8
); );
// 8 bits
mm_coef2 = _mm256_sub_epi32(vmm_max_alpha, mm_coef1);
mm_out_lo = _mm256_add_epi16( #define MM_SHIFTDIV255_epi16(src) \
_mm256_mullo_epi16(mm_src_lo, _mm256_shuffle_epi8(mm_coef1, vmm_get_lo)), _mm256_srli_epi16(_mm256_add_epi16(src, _mm256_srli_epi16(src, 8)), 8)
_mm256_mullo_epi16(mm_dst_lo, _mm256_shuffle_epi8(mm_coef2, vmm_get_lo)));
mm_out_lo = _mm256_or_si256(mm_out_lo, _mm256_slli_epi64(
_mm256_unpacklo_epi32(mm_out_a, vmm_zero), 48));
mm_out_lo = _mm256_add_epi16(mm_out_lo, vmm_half);
mm_out_lo = MM_SHIFTDIV255_epi16(mm_out_lo);
mm_out_hi = _mm256_add_epi16( for (; x < imDst->xsize - 7; x += 8) {
_mm256_mullo_epi16(mm_src_hi, _mm256_shuffle_epi8(mm_coef1, vmm_get_hi)), __m256i mm_dst, mm_dst_lo, mm_dst_hi;
_mm256_mullo_epi16(mm_dst_hi, _mm256_shuffle_epi8(mm_coef2, vmm_get_hi))); __m256i mm_src, mm_src_lo, mm_src_hi;
mm_out_hi = _mm256_or_si256(mm_out_hi, _mm256_slli_epi64( __m256i mm_dst_a, mm_src_a, mm_out_a, mm_blend;
_mm256_unpackhi_epi32(mm_out_a, vmm_zero), 48)); __m256i mm_coef1, mm_coef2, mm_out_lo, mm_out_hi;
mm_out_hi = _mm256_add_epi16(mm_out_hi, vmm_half);
mm_out_hi = MM_SHIFTDIV255_epi16(mm_out_hi);
_mm256_storeu_si256((__m256i *) &out[x], mm_dst = _mm256_loadu_si256((__m256i *)&dst[x]);
_mm256_packus_epi16(mm_out_lo, mm_out_hi)); mm_dst_lo = _mm256_unpacklo_epi8(mm_dst, vmm_zero);
mm_dst_hi = _mm256_unpackhi_epi8(mm_dst, vmm_zero);
mm_src = _mm256_loadu_si256((__m256i *)&src[x]);
mm_src_lo = _mm256_unpacklo_epi8(mm_src, vmm_zero);
mm_src_hi = _mm256_unpackhi_epi8(mm_src, vmm_zero);
mm_dst_a = _mm256_srli_epi32(mm_dst, 24);
mm_src_a = _mm256_srli_epi32(mm_src, 24);
// Compute coefficients
// blend = dst->a * (255 - src->a); 16 bits
mm_blend = _mm256_mullo_epi16(
mm_dst_a, _mm256_sub_epi32(vmm_max_alpha, mm_src_a)
);
// outa = src->a * 255 + dst->a * (255 - src->a); 16 bits
mm_out_a = _mm256_add_epi32(
_mm256_mullo_epi16(mm_src_a, vmm_max_alpha), mm_blend
);
mm_coef1 = _mm256_mullo_epi32(mm_src_a, vmm_max_alpha2);
// 8 bits
mm_coef1 = _mm256_cvtps_epi32(_mm256_mul_ps(
_mm256_cvtepi32_ps(mm_coef1),
_mm256_rcp_ps(_mm256_cvtepi32_ps(mm_out_a))
));
// 8 bits
mm_coef2 = _mm256_sub_epi32(vmm_max_alpha, mm_coef1);
mm_out_lo = _mm256_add_epi16(
_mm256_mullo_epi16(
mm_src_lo, _mm256_shuffle_epi8(mm_coef1, vmm_get_lo)
),
_mm256_mullo_epi16(
mm_dst_lo, _mm256_shuffle_epi8(mm_coef2, vmm_get_lo)
)
);
mm_out_lo = _mm256_or_si256(
mm_out_lo,
_mm256_slli_epi64(_mm256_unpacklo_epi32(mm_out_a, vmm_zero), 48)
);
mm_out_lo = _mm256_add_epi16(mm_out_lo, vmm_half);
mm_out_lo = MM_SHIFTDIV255_epi16(mm_out_lo);
mm_out_hi = _mm256_add_epi16(
_mm256_mullo_epi16(
mm_src_hi, _mm256_shuffle_epi8(mm_coef1, vmm_get_hi)
),
_mm256_mullo_epi16(
mm_dst_hi, _mm256_shuffle_epi8(mm_coef2, vmm_get_hi)
)
);
mm_out_hi = _mm256_or_si256(
mm_out_hi,
_mm256_slli_epi64(_mm256_unpackhi_epi32(mm_out_a, vmm_zero), 48)
);
mm_out_hi = _mm256_add_epi16(mm_out_hi, vmm_half);
mm_out_hi = MM_SHIFTDIV255_epi16(mm_out_hi);
_mm256_storeu_si256(
(__m256i *)&out[x], _mm256_packus_epi16(mm_out_lo, mm_out_hi)
);
}
#undef MM_SHIFTDIV255_epi16
} }
#undef MM_SHIFTDIV255_epi16
}
#endif #endif
#if defined(__SSE4__) #if defined(__SSE4__)
{ {
__m128i mm_max_alpha = _mm_set1_epi32(255); __m128i mm_max_alpha = _mm_set1_epi32(255);
__m128i mm_max_alpha2 = _mm_set1_epi32(255 * 255); __m128i mm_max_alpha2 = _mm_set1_epi32(255 * 255);
__m128i mm_zero = _mm_setzero_si128(); __m128i mm_zero = _mm_setzero_si128();
__m128i mm_half = _mm_set1_epi16(128); __m128i mm_half = _mm_set1_epi16(128);
__m128i mm_get_lo = _mm_set_epi8( __m128i mm_get_lo =
-1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0); _mm_set_epi8(-1, -1, 5, 4, 5, 4, 5, 4, -1, -1, 1, 0, 1, 0, 1, 0);
__m128i mm_get_hi = _mm_set_epi8( __m128i mm_get_hi =
-1,-1, 13,12, 13,12, 13,12, -1,-1, 9,8, 9,8, 9,8); _mm_set_epi8(-1, -1, 13, 12, 13, 12, 13, 12, -1, -1, 9, 8, 9, 8, 9, 8);
#define MM_SHIFTDIV255_epi16(src)\ #define MM_SHIFTDIV255_epi16(src) \
_mm_srli_epi16(_mm_add_epi16(src, _mm_srli_epi16(src, 8)), 8) _mm_srli_epi16(_mm_add_epi16(src, _mm_srli_epi16(src, 8)), 8)
for (; x < imDst->xsize - 3; x += 4) { for (; x < imDst->xsize - 3; x += 4) {
__m128i mm_dst, mm_dst_lo, mm_dst_hi; __m128i mm_dst, mm_dst_lo, mm_dst_hi;
__m128i mm_src, mm_src_hi, mm_src_lo; __m128i mm_src, mm_src_hi, mm_src_lo;
__m128i mm_dst_a, mm_src_a, mm_out_a, mm_blend; __m128i mm_dst_a, mm_src_a, mm_out_a, mm_blend;
__m128i mm_coef1, mm_coef2, mm_out_lo, mm_out_hi; __m128i mm_coef1, mm_coef2, mm_out_lo, mm_out_hi;
// [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0 // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
mm_dst = _mm_loadu_si128((__m128i *) &dst[x]); mm_dst = _mm_loadu_si128((__m128i *)&dst[x]);
// [16] a1 b1 g1 r1 a0 b0 g0 r0 // [16] a1 b1 g1 r1 a0 b0 g0 r0
mm_dst_lo = _mm_unpacklo_epi8(mm_dst, mm_zero); mm_dst_lo = _mm_unpacklo_epi8(mm_dst, mm_zero);
// [16] a3 b3 g3 r3 a2 b2 g2 r2 // [16] a3 b3 g3 r3 a2 b2 g2 r2
mm_dst_hi = _mm_unpackhi_epi8(mm_dst, mm_zero); mm_dst_hi = _mm_unpackhi_epi8(mm_dst, mm_zero);
// [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0 // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
mm_src = _mm_loadu_si128((__m128i *) &src[x]); mm_src = _mm_loadu_si128((__m128i *)&src[x]);
mm_src_lo = _mm_unpacklo_epi8(mm_src, mm_zero); mm_src_lo = _mm_unpacklo_epi8(mm_src, mm_zero);
mm_src_hi = _mm_unpackhi_epi8(mm_src, mm_zero); mm_src_hi = _mm_unpackhi_epi8(mm_src, mm_zero);
// [32] a3 a2 a1 a0 // [32] a3 a2 a1 a0
mm_dst_a = _mm_srli_epi32(mm_dst, 24); mm_dst_a = _mm_srli_epi32(mm_dst, 24);
mm_src_a = _mm_srli_epi32(mm_src, 24); mm_src_a = _mm_srli_epi32(mm_src, 24);
// Compute coefficients // Compute coefficients
// blend = dst->a * (255 - src->a) // blend = dst->a * (255 - src->a)
// [16] xx b3 xx b2 xx b1 xx b0 // [16] xx b3 xx b2 xx b1 xx b0
mm_blend = _mm_mullo_epi16(mm_dst_a, _mm_sub_epi32(mm_max_alpha, mm_src_a)); mm_blend =
// outa = src->a * 255 + blend _mm_mullo_epi16(mm_dst_a, _mm_sub_epi32(mm_max_alpha, mm_src_a));
// [16] xx a3 xx a2 xx a1 xx a0 // outa = src->a * 255 + blend
mm_out_a = _mm_add_epi32(_mm_mullo_epi16(mm_src_a, mm_max_alpha), mm_blend); // [16] xx a3 xx a2 xx a1 xx a0
// coef1 = src->a * 255 * 255 / outa mm_out_a =
mm_coef1 = _mm_mullo_epi32(mm_src_a, mm_max_alpha2); _mm_add_epi32(_mm_mullo_epi16(mm_src_a, mm_max_alpha), mm_blend);
// [8] xx xx xx c3 xx xx xx c2 xx xx xx c1 xx xx xx c0 // coef1 = src->a * 255 * 255 / outa
mm_coef1 = _mm_cvtps_epi32( mm_coef1 = _mm_mullo_epi32(mm_src_a, mm_max_alpha2);
_mm_mul_ps(_mm_cvtepi32_ps(mm_coef1), // [8] xx xx xx c3 xx xx xx c2 xx xx xx c1 xx xx xx c0
_mm_rcp_ps(_mm_cvtepi32_ps(mm_out_a))) mm_coef1 = _mm_cvtps_epi32(_mm_mul_ps(
); _mm_cvtepi32_ps(mm_coef1), _mm_rcp_ps(_mm_cvtepi32_ps(mm_out_a))
// [8] xx xx xx c3 xx xx xx c2 xx xx xx c1 xx xx xx c0 ));
mm_coef2 = _mm_sub_epi32(mm_max_alpha, mm_coef1); // [8] xx xx xx c3 xx xx xx c2 xx xx xx c1 xx xx xx c0
mm_coef2 = _mm_sub_epi32(mm_max_alpha, mm_coef1);
mm_out_lo = _mm_add_epi16( mm_out_lo = _mm_add_epi16(
_mm_mullo_epi16(mm_src_lo, _mm_shuffle_epi8(mm_coef1, mm_get_lo)), _mm_mullo_epi16(mm_src_lo, _mm_shuffle_epi8(mm_coef1, mm_get_lo)),
_mm_mullo_epi16(mm_dst_lo, _mm_shuffle_epi8(mm_coef2, mm_get_lo))); _mm_mullo_epi16(mm_dst_lo, _mm_shuffle_epi8(mm_coef2, mm_get_lo))
mm_out_lo = _mm_or_si128(mm_out_lo, _mm_slli_epi64( );
_mm_unpacklo_epi32(mm_out_a, mm_zero), 48)); mm_out_lo = _mm_or_si128(
mm_out_lo = _mm_add_epi16(mm_out_lo, mm_half); mm_out_lo, _mm_slli_epi64(_mm_unpacklo_epi32(mm_out_a, mm_zero), 48)
mm_out_lo = MM_SHIFTDIV255_epi16(mm_out_lo); );
mm_out_lo = _mm_add_epi16(mm_out_lo, mm_half);
mm_out_lo = MM_SHIFTDIV255_epi16(mm_out_lo);
mm_out_hi = _mm_add_epi16( mm_out_hi = _mm_add_epi16(
_mm_mullo_epi16(mm_src_hi, _mm_shuffle_epi8(mm_coef1, mm_get_hi)), _mm_mullo_epi16(mm_src_hi, _mm_shuffle_epi8(mm_coef1, mm_get_hi)),
_mm_mullo_epi16(mm_dst_hi, _mm_shuffle_epi8(mm_coef2, mm_get_hi))); _mm_mullo_epi16(mm_dst_hi, _mm_shuffle_epi8(mm_coef2, mm_get_hi))
mm_out_hi = _mm_or_si128(mm_out_hi, _mm_slli_epi64( );
_mm_unpackhi_epi32(mm_out_a, mm_zero), 48)); mm_out_hi = _mm_or_si128(
mm_out_hi = _mm_add_epi16(mm_out_hi, mm_half); mm_out_hi, _mm_slli_epi64(_mm_unpackhi_epi32(mm_out_a, mm_zero), 48)
mm_out_hi = MM_SHIFTDIV255_epi16(mm_out_hi); );
mm_out_hi = _mm_add_epi16(mm_out_hi, mm_half);
mm_out_hi = MM_SHIFTDIV255_epi16(mm_out_hi);
_mm_storeu_si128((__m128i *) &out[x], _mm_storeu_si128(
_mm_packus_epi16(mm_out_lo, mm_out_hi)); (__m128i *)&out[x], _mm_packus_epi16(mm_out_lo, mm_out_hi)
);
}
#undef MM_SHIFTDIV255_epi16
} }
#undef MM_SHIFTDIV255_epi16
}
#endif #endif
for (; x < imDst->xsize; x += 1) { for (; x < imDst->xsize; x += 1) {
@ -207,15 +294,18 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) {
UINT32 outa255 = src[x].a * 255 + blend; UINT32 outa255 = src[x].a * 255 + blend;
// There we use 7 bits for precision. // There we use 7 bits for precision.
// We could use more, but we go beyond 32 bits. // We could use more, but we go beyond 32 bits.
UINT32 coef1 = src[x].a * 255 * 255 * (1<<PRECISION_BITS) / outa255; UINT32 coef1 = src[x].a * 255 * 255 * (1 << PRECISION_BITS) / outa255;
UINT32 coef2 = 255 * (1<<PRECISION_BITS) - coef1; UINT32 coef2 = 255 * (1 << PRECISION_BITS) - coef1;
tmpr = src[x].r * coef1 + dst[x].r * coef2; tmpr = src[x].r * coef1 + dst[x].r * coef2;
tmpg = src[x].g * coef1 + dst[x].g * coef2; tmpg = src[x].g * coef1 + dst[x].g * coef2;
tmpb = src[x].b * coef1 + dst[x].b * coef2; tmpb = src[x].b * coef1 + dst[x].b * coef2;
out[x].r = SHIFTFORDIV255(tmpr + (0x80<<PRECISION_BITS)) >> PRECISION_BITS; out[x].r =
out[x].g = SHIFTFORDIV255(tmpg + (0x80<<PRECISION_BITS)) >> PRECISION_BITS; SHIFTFORDIV255(tmpr + (0x80 << PRECISION_BITS)) >> PRECISION_BITS;
out[x].b = SHIFTFORDIV255(tmpb + (0x80<<PRECISION_BITS)) >> PRECISION_BITS; out[x].g =
SHIFTFORDIV255(tmpg + (0x80 << PRECISION_BITS)) >> PRECISION_BITS;
out[x].b =
SHIFTFORDIV255(tmpb + (0x80 << PRECISION_BITS)) >> PRECISION_BITS;
out[x].a = SHIFTFORDIV255(outa255 + 0x80); out[x].a = SHIFTFORDIV255(outa255 + 0x80);
} }
} }