mirror of
https://github.com/python-pillow/Pillow.git
synced 2025-08-25 22:54:46 +03:00
Merge branch 'simd/rgba-convert' into simd/5.3.x
This commit is contained in:
commit
d9cc462106
|
@ -35,6 +35,14 @@
|
|||
|
||||
#include "Imaging.h"
|
||||
|
||||
#include <emmintrin.h>
|
||||
#include <mmintrin.h>
|
||||
#include <smmintrin.h>
|
||||
#if defined(__AVX2__)
|
||||
#include <immintrin.h>
|
||||
#endif
|
||||
|
||||
|
||||
#define MAX(a, b) (a)>(b) ? (a) : (b)
|
||||
#define MIN(a, b) (a)<(b) ? (a) : (b)
|
||||
|
||||
|
@ -208,17 +216,48 @@ rgb2bit(UINT8* out, const UINT8* in, int xsize)
|
|||
static void
|
||||
rgb2l(UINT8* out, const UINT8* in, int xsize)
|
||||
{
|
||||
int x;
|
||||
for (x = 0; x < xsize; x++, in += 4)
|
||||
int x = 0;
|
||||
__m128i coeff = _mm_set_epi16(
|
||||
0, 3735, 19235, 9798, 0, 3735, 19235, 9798);
|
||||
for (; x < xsize - 3; x += 4, in += 16) {
|
||||
__m128i pix0, pix1;
|
||||
__m128i source = _mm_loadu_si128((__m128i*)in);
|
||||
pix0 = _mm_unpacklo_epi8(source, _mm_setzero_si128());
|
||||
pix1 = _mm_unpackhi_epi8(source, _mm_setzero_si128());
|
||||
pix0 = _mm_madd_epi16(pix0, coeff);
|
||||
pix1 = _mm_madd_epi16(pix1, coeff);
|
||||
pix0 = _mm_hadd_epi32(pix0, pix1);
|
||||
pix0 = _mm_srli_epi32(pix0, 15);
|
||||
pix0 = _mm_packus_epi32(pix0, pix0);
|
||||
pix0 = _mm_packus_epi16(pix0, pix0);
|
||||
*(UINT32*)&out[x] = _mm_cvtsi128_si32(pix0);
|
||||
}
|
||||
for (; x < xsize; x++, in += 4)
|
||||
/* ITU-R Recommendation 601-2 (assuming nonlinear RGB) */
|
||||
*out++ = L24(in) >> 16;
|
||||
out[x] = L24(in) >> 16;
|
||||
}
|
||||
|
||||
static void
|
||||
rgb2la(UINT8* out, const UINT8* in, int xsize)
|
||||
{
|
||||
int x;
|
||||
for (x = 0; x < xsize; x++, in += 4, out += 4) {
|
||||
int x = 0;
|
||||
__m128i coeff = _mm_set_epi16(
|
||||
0, 3735, 19235, 9798, 0, 3735, 19235, 9798);
|
||||
for (; x < xsize - 3; x += 4, in += 16, out += 16) {
|
||||
__m128i pix0, pix1;
|
||||
__m128i source = _mm_loadu_si128((__m128i*)in);
|
||||
pix0 = _mm_unpacklo_epi8(source, _mm_setzero_si128());
|
||||
pix1 = _mm_unpackhi_epi8(source, _mm_setzero_si128());
|
||||
pix0 = _mm_madd_epi16(pix0, coeff);
|
||||
pix1 = _mm_madd_epi16(pix1, coeff);
|
||||
pix0 = _mm_hadd_epi32(pix0, pix1);
|
||||
pix0 = _mm_srli_epi32(pix0, 15);
|
||||
pix0 = _mm_shuffle_epi8(pix0, _mm_set_epi8(
|
||||
-1,12,12,12, -1,8,8,8, -1,4,4,4, -1,0,0,0));
|
||||
pix0 = _mm_or_si128(pix0, _mm_set1_epi32(0xff000000));
|
||||
_mm_storeu_si128((__m128i*)out, pix0);
|
||||
}
|
||||
for (; x < xsize; x++, in += 4, out += 4) {
|
||||
/* ITU-R Recommendation 601-2 (assuming nonlinear RGB) */
|
||||
out[0] = out[1] = out[2] = L24(in) >> 16;
|
||||
out[3] = 255;
|
||||
|
@ -228,9 +267,22 @@ rgb2la(UINT8* out, const UINT8* in, int xsize)
|
|||
static void
|
||||
rgb2i(UINT8* out_, const UINT8* in, int xsize)
|
||||
{
|
||||
int x;
|
||||
int x = 0;
|
||||
INT32* out = (INT32*) out_;
|
||||
for (x = 0; x < xsize; x++, in += 4)
|
||||
__m128i coeff = _mm_set_epi16(
|
||||
0, 3735, 19235, 9798, 0, 3735, 19235, 9798);
|
||||
for (; x < xsize - 3; x += 4, in += 16, out += 4) {
|
||||
__m128i pix0, pix1;
|
||||
__m128i source = _mm_loadu_si128((__m128i*)in);
|
||||
pix0 = _mm_unpacklo_epi8(source, _mm_setzero_si128());
|
||||
pix1 = _mm_unpackhi_epi8(source, _mm_setzero_si128());
|
||||
pix0 = _mm_madd_epi16(pix0, coeff);
|
||||
pix1 = _mm_madd_epi16(pix1, coeff);
|
||||
pix0 = _mm_hadd_epi32(pix0, pix1);
|
||||
pix0 = _mm_srli_epi32(pix0, 15);
|
||||
_mm_storeu_si128((__m128i*)out, pix0);
|
||||
}
|
||||
for (; x < xsize; x++, in += 4)
|
||||
*out++ = L24(in) >> 16;
|
||||
}
|
||||
|
||||
|
@ -417,8 +469,25 @@ rgb2rgba(UINT8* out, const UINT8* in, int xsize)
|
|||
static void
|
||||
rgba2la(UINT8* out, const UINT8* in, int xsize)
|
||||
{
|
||||
int x;
|
||||
for (x = 0; x < xsize; x++, in += 4, out += 4) {
|
||||
int x = 0;
|
||||
__m128i coeff = _mm_set_epi16(
|
||||
0, 3735, 19235, 9798, 0, 3735, 19235, 9798);
|
||||
for (; x < xsize - 3; x += 4, in += 16, out += 16) {
|
||||
__m128i pix0, pix1;
|
||||
__m128i source = _mm_loadu_si128((__m128i*)in);
|
||||
__m128i alpha = _mm_and_si128(source, _mm_set1_epi32(0xff000000));
|
||||
pix0 = _mm_unpacklo_epi8(source, _mm_setzero_si128());
|
||||
pix1 = _mm_unpackhi_epi8(source, _mm_setzero_si128());
|
||||
pix0 = _mm_madd_epi16(pix0, coeff);
|
||||
pix1 = _mm_madd_epi16(pix1, coeff);
|
||||
pix0 = _mm_hadd_epi32(pix0, pix1);
|
||||
pix0 = _mm_srli_epi32(pix0, 15);
|
||||
pix0 = _mm_shuffle_epi8(pix0, _mm_set_epi8(
|
||||
-1,12,12,12, -1,8,8,8, -1,4,4,4, -1,0,0,0));
|
||||
pix0 = _mm_or_si128(pix0, alpha);
|
||||
_mm_storeu_si128((__m128i*)out, pix0);
|
||||
}
|
||||
for (; x < xsize; x++, in += 4, out += 4) {
|
||||
/* ITU-R Recommendation 601-2 (assuming nonlinear RGB) */
|
||||
out[0] = out[1] = out[2] = L24(in) >> 16;
|
||||
out[3] = in[3];
|
||||
|
@ -440,14 +509,81 @@ rgba2rgb(UINT8* out, const UINT8* in, int xsize)
|
|||
static void
|
||||
rgbA2rgba(UINT8* out, const UINT8* in, int xsize)
|
||||
{
|
||||
int x;
|
||||
unsigned int alpha, tmp;
|
||||
for (x = 0; x < xsize; x++) {
|
||||
alpha = in[3];
|
||||
*out++ = MULDIV255(*in++, alpha, tmp);
|
||||
*out++ = MULDIV255(*in++, alpha, tmp);
|
||||
*out++ = MULDIV255(*in++, alpha, tmp);
|
||||
*out++ = *in++;
|
||||
unsigned int tmp;
|
||||
unsigned char alpha;
|
||||
int x = 0;
|
||||
|
||||
#if defined(__AVX2__)
|
||||
|
||||
__m256i zero = _mm256_setzero_si256();
|
||||
__m256i half = _mm256_set1_epi16(128);
|
||||
__m256i maxalpha = _mm256_set_epi32(
|
||||
0xff000000, 0xff000000, 0xff000000, 0xff000000,
|
||||
0xff000000, 0xff000000, 0xff000000, 0xff000000);
|
||||
__m256i factormask = _mm256_set_epi8(
|
||||
15,15,15,15, 11,11,11,11, 7,7,7,7, 3,3,3,3,
|
||||
15,15,15,15, 11,11,11,11, 7,7,7,7, 3,3,3,3);
|
||||
__m256i factorsource, source, pix1, pix2, factors;
|
||||
|
||||
for (; x < xsize - 7; x += 8) {
|
||||
source = _mm256_loadu_si256((__m256i *) &in[x * 4]);
|
||||
factorsource = _mm256_shuffle_epi8(source, factormask);
|
||||
factorsource = _mm256_or_si256(factorsource, maxalpha);
|
||||
|
||||
pix1 = _mm256_unpacklo_epi8(source, zero);
|
||||
factors = _mm256_unpacklo_epi8(factorsource, zero);
|
||||
pix1 = _mm256_add_epi16(_mm256_mullo_epi16(pix1, factors), half);
|
||||
pix1 = _mm256_add_epi16(pix1, _mm256_srli_epi16(pix1, 8));
|
||||
pix1 = _mm256_srli_epi16(pix1, 8);
|
||||
|
||||
pix2 = _mm256_unpackhi_epi8(source, zero);
|
||||
factors = _mm256_unpackhi_epi8(factorsource, zero);
|
||||
pix2 = _mm256_add_epi16(_mm256_mullo_epi16(pix2, factors), half);
|
||||
pix2 = _mm256_add_epi16(pix2, _mm256_srli_epi16(pix2, 8));
|
||||
pix2 = _mm256_srli_epi16(pix2, 8);
|
||||
|
||||
source = _mm256_packus_epi16(pix1, pix2);
|
||||
_mm256_storeu_si256((__m256i *) &out[x * 4], source);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
__m128i zero = _mm_setzero_si128();
|
||||
__m128i half = _mm_set1_epi16(128);
|
||||
__m128i maxalpha = _mm_set1_epi32(0xff000000);
|
||||
__m128i factormask = _mm_set_epi8(
|
||||
15,15,15,15, 11,11,11,11, 7,7,7,7, 3,3,3,3);
|
||||
__m128i factorsource, source, pix1, pix2, factors;
|
||||
|
||||
for (; x < xsize - 3; x += 4) {
|
||||
source = _mm_loadu_si128((__m128i *) &in[x * 4]);
|
||||
factorsource = _mm_shuffle_epi8(source, factormask);
|
||||
factorsource = _mm_or_si128(factorsource, maxalpha);
|
||||
|
||||
pix1 = _mm_unpacklo_epi8(source, zero);
|
||||
factors = _mm_unpacklo_epi8(factorsource, zero);
|
||||
pix1 = _mm_add_epi16(_mm_mullo_epi16(pix1, factors), half);
|
||||
pix1 = _mm_add_epi16(pix1, _mm_srli_epi16(pix1, 8));
|
||||
pix1 = _mm_srli_epi16(pix1, 8);
|
||||
|
||||
pix2 = _mm_unpackhi_epi8(source, zero);
|
||||
factors = _mm_unpackhi_epi8(factorsource, zero);
|
||||
pix2 = _mm_add_epi16(_mm_mullo_epi16(pix2, factors), half);
|
||||
pix2 = _mm_add_epi16(pix2, _mm_srli_epi16(pix2, 8));
|
||||
pix2 = _mm_srli_epi16(pix2, 8);
|
||||
|
||||
source = _mm_packus_epi16(pix1, pix2);
|
||||
_mm_storeu_si128((__m128i *) &out[x * 4], source);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
for (; x < xsize; x++) {
|
||||
alpha = in[x * 4 + 3];
|
||||
out[x * 4 + 0] = MULDIV255(in[x * 4 + 0], alpha, tmp);
|
||||
out[x * 4 + 1] = MULDIV255(in[x * 4 + 1], alpha, tmp);
|
||||
out[x * 4 + 2] = MULDIV255(in[x * 4 + 2], alpha, tmp);
|
||||
out[x * 4 + 3] = alpha;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -456,9 +592,81 @@ rgbA2rgba(UINT8* out, const UINT8* in, int xsize)
|
|||
static void
|
||||
rgba2rgbA(UINT8* out, const UINT8* in, int xsize)
|
||||
{
|
||||
int x;
|
||||
int x = 0;
|
||||
unsigned int alpha;
|
||||
for (x = 0; x < xsize; x++, in+=4) {
|
||||
|
||||
#if defined(__AVX2__)
|
||||
|
||||
for (; x < xsize - 7; x += 8) {
|
||||
__m256 mmaf;
|
||||
__m256i pix0, pix1, mma;
|
||||
__m256i mma0, mma1;
|
||||
__m256i source = _mm256_loadu_si256((__m256i *) &in[x * 4]);
|
||||
|
||||
mma = _mm256_and_si256(source, _mm256_set_epi8(
|
||||
0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0,
|
||||
0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0));
|
||||
|
||||
mmaf = _mm256_cvtepi32_ps(_mm256_srli_epi32(source, 24));
|
||||
mmaf = _mm256_mul_ps(_mm256_set1_ps(255.5 * 256), _mm256_rcp_ps(mmaf));
|
||||
mma1 = _mm256_cvtps_epi32(mmaf);
|
||||
|
||||
mma0 = _mm256_shuffle_epi8(mma1, _mm256_set_epi8(
|
||||
5,4,5,4, 5,4,5,4, 1,0,1,0, 1,0,1,0,
|
||||
5,4,5,4, 5,4,5,4, 1,0,1,0, 1,0,1,0));
|
||||
mma1 = _mm256_shuffle_epi8(mma1, _mm256_set_epi8(
|
||||
13,12,13,12, 13,12,13,12, 9,8,9,8, 9,8,9,8,
|
||||
13,12,13,12, 13,12,13,12, 9,8,9,8, 9,8,9,8));
|
||||
|
||||
pix0 = _mm256_unpacklo_epi8(_mm256_setzero_si256(), source);
|
||||
pix1 = _mm256_unpackhi_epi8(_mm256_setzero_si256(), source);
|
||||
|
||||
pix0 = _mm256_mulhi_epu16(pix0, mma0);
|
||||
pix1 = _mm256_mulhi_epu16(pix1, mma1);
|
||||
|
||||
source = _mm256_packus_epi16(pix0, pix1);
|
||||
source = _mm256_blendv_epi8(source, mma, _mm256_set_epi8(
|
||||
0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0,
|
||||
0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0));
|
||||
_mm256_storeu_si256((__m256i *) &out[x * 4], source);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
for (; x < xsize - 3; x += 4) {
|
||||
__m128 mmaf;
|
||||
__m128i pix0, pix1, mma;
|
||||
__m128i mma0, mma1;
|
||||
__m128i source = _mm_loadu_si128((__m128i *) &in[x * 4]);
|
||||
|
||||
mma = _mm_and_si128(source, _mm_set_epi8(
|
||||
0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0));
|
||||
|
||||
mmaf = _mm_cvtepi32_ps(_mm_srli_epi32(source, 24));
|
||||
mmaf = _mm_mul_ps(_mm_set1_ps(255.5 * 256), _mm_rcp_ps(mmaf));
|
||||
mma1 = _mm_cvtps_epi32(mmaf);
|
||||
|
||||
mma0 = _mm_shuffle_epi8(mma1, _mm_set_epi8(
|
||||
5,4,5,4, 5,4,5,4, 1,0,1,0, 1,0,1,0));
|
||||
mma1 = _mm_shuffle_epi8(mma1, _mm_set_epi8(
|
||||
13,12,13,12, 13,12,13,12, 9,8,9,8, 9,8,9,8));
|
||||
|
||||
pix0 = _mm_unpacklo_epi8(_mm_setzero_si128(), source);
|
||||
pix1 = _mm_unpackhi_epi8(_mm_setzero_si128(), source);
|
||||
|
||||
pix0 = _mm_mulhi_epu16(pix0, mma0);
|
||||
pix1 = _mm_mulhi_epu16(pix1, mma1);
|
||||
|
||||
source = _mm_packus_epi16(pix0, pix1);
|
||||
source = _mm_blendv_epi8(source, mma, _mm_set_epi8(
|
||||
0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0));
|
||||
_mm_storeu_si128((__m128i *) &out[x * 4], source);
|
||||
}
|
||||
|
||||
in = &in[x * 4];
|
||||
out = &out[x * 4];
|
||||
|
||||
for (; x < xsize; x++, in += 4) {
|
||||
alpha = in[3];
|
||||
if (alpha == 255 || alpha == 0) {
|
||||
*out++ = in[0];
|
||||
|
|
Loading…
Reference in New Issue
Block a user