mirror of
https://github.com/python-pillow/Pillow.git
synced 2025-08-01 19:10:10 +03:00
Merge b6fb4d8ff8
into 09d75436f3
This commit is contained in:
commit
c28c668767
13
.github/workflows/test.yml
vendored
13
.github/workflows/test.yml
vendored
|
@ -51,15 +51,19 @@ jobs:
|
|||
include:
|
||||
- { python-version: "3.11", PYTHONOPTIMIZE: 1, REVERSE: "--reverse" }
|
||||
- { python-version: "3.10", PYTHONOPTIMIZE: 2 }
|
||||
# SIMD-accelerated builds for x86
|
||||
- { os: "ubuntu-latest", python-version: "3.9", acceleration: "sse4"}
|
||||
- { os: "ubuntu-latest", python-version: "3.12", acceleration: "avx2"}
|
||||
# Free-threaded
|
||||
- { os: "ubuntu-latest", python-version: "3.13-dev", disable-gil: true }
|
||||
# M1 only available for 3.10+
|
||||
- { os: "macos-13", python-version: "3.9" }
|
||||
- { os: "macos-13", python-version: "3.9", acceleration: "avx2"}
|
||||
exclude:
|
||||
- { os: "macos-latest", python-version: "3.9" }
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
name: ${{ matrix.os }} Python ${{ matrix.python-version }} ${{ matrix.disable-gil && 'free-threaded' || '' }}
|
||||
name: ${{ matrix.os }} Python ${{ matrix.python-version }} ${{ matrix.acceleration }} ${{ matrix.disable-gil && 'free-threaded' || '' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
@ -109,7 +113,7 @@ jobs:
|
|||
GHA_LIBIMAGEQUANT_CACHE_HIT: ${{ steps.cache-libimagequant.outputs.cache-hit }}
|
||||
|
||||
- name: Install macOS dependencies
|
||||
if: startsWith(matrix.os, 'macOS')
|
||||
if: startsWith(matrix.os, 'macos')
|
||||
run: |
|
||||
.github/workflows/macos-install.sh
|
||||
env:
|
||||
|
@ -119,6 +123,11 @@ jobs:
|
|||
if: "matrix.os == 'ubuntu-latest' && matrix.python-version == '3.13'"
|
||||
run: echo "::add-matcher::.github/problem-matchers/gcc.json"
|
||||
|
||||
- name: Set compiler options for optimization
|
||||
if: ${{ matrix.acceleration }}
|
||||
run: |
|
||||
echo "CC=cc -m${{ matrix.acceleration }}" >> $GITHUB_ENV
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
.ci/build.sh
|
||||
|
|
|
@ -35,7 +35,9 @@ def test_version() -> None:
|
|||
assert version is None
|
||||
else:
|
||||
assert function(name) == version
|
||||
if name != "PIL":
|
||||
if name == "acceleration":
|
||||
assert version in ("avx2", "sse4", "sse2", "neon", None)
|
||||
elif name != "PIL":
|
||||
if name == "zlib" and version is not None:
|
||||
version = re.sub(".zlib-ng$", "", version)
|
||||
elif name == "libtiff" and version is not None:
|
||||
|
|
|
@ -129,6 +129,7 @@ features: dict[str, tuple[str, str | bool, str | None]] = {
|
|||
"libjpeg_turbo": ("PIL._imaging", "HAVE_LIBJPEGTURBO", "libjpeg_turbo_version"),
|
||||
"libimagequant": ("PIL._imaging", "HAVE_LIBIMAGEQUANT", "imagequant_version"),
|
||||
"xcb": ("PIL._imaging", "HAVE_XCB", None),
|
||||
"acceleration": ("PIL._imaging", "acceleration", "acceleration"),
|
||||
}
|
||||
|
||||
|
||||
|
@ -282,6 +283,7 @@ def pilinfo(out: IO[str] | None = None, supported_formats: bool = True) -> None:
|
|||
|
||||
for name, feature in [
|
||||
("pil", "PIL CORE"),
|
||||
("acceleration", "Acceleration"),
|
||||
("tkinter", "TKINTER"),
|
||||
("freetype2", "FREETYPE2"),
|
||||
("littlecms2", "LITTLECMS2"),
|
||||
|
@ -303,7 +305,7 @@ def pilinfo(out: IO[str] | None = None, supported_formats: bool = True) -> None:
|
|||
if v is None:
|
||||
v = version(name)
|
||||
if v is not None:
|
||||
version_static = name in ("pil", "jpg")
|
||||
version_static = name in ("pil", "jpg", "acceleration")
|
||||
if name == "littlecms2":
|
||||
# this check is also in src/_imagingcms.c:setup_module()
|
||||
version_static = tuple(int(x) for x in v.split(".")) < (2, 7)
|
||||
|
|
|
@ -4425,6 +4425,19 @@ setup_module(PyObject *m) {
|
|||
Py_INCREF(have_xcb);
|
||||
PyModule_AddObject(m, "HAVE_XCB", have_xcb);
|
||||
|
||||
#ifdef __AVX2__
|
||||
PyModule_AddStringConstant(m, "acceleration", "avx2");
|
||||
#elif defined(__SSE4__)
|
||||
PyModule_AddStringConstant(m, "acceleration", "sse4");
|
||||
#elif defined(__SSE2__)
|
||||
PyModule_AddStringConstant(m, "acceleration", "sse2");
|
||||
#elif defined(__NEON__)
|
||||
PyModule_AddStringConstant(m, "acceleration", "neon");
|
||||
#else
|
||||
Py_INCREF(Py_False);
|
||||
PyModule_AddObject(m, "acceleration", Py_False);
|
||||
#endif
|
||||
|
||||
PyObject *pillow_version = PyUnicode_FromString(version);
|
||||
PyDict_SetItemString(
|
||||
d, "PILLOW_VERSION", pillow_version ? pillow_version : Py_None
|
||||
|
|
|
@ -46,38 +46,268 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) {
|
|||
rgba8 *src = (rgba8 *)imSrc->image[y];
|
||||
rgba8 *out = (rgba8 *)imOut->image[y];
|
||||
|
||||
for (x = 0; x < imDst->xsize; x++) {
|
||||
if (src->a == 0) {
|
||||
x = 0;
|
||||
|
||||
#if defined(__AVX2__)
|
||||
{
|
||||
__m256i vmm_max_alpha = _mm256_set1_epi32(255);
|
||||
__m256i vmm_max_alpha2 = _mm256_set1_epi32(255 * 255);
|
||||
__m256i vmm_zero = _mm256_setzero_si256();
|
||||
__m256i vmm_half = _mm256_set1_epi16(128);
|
||||
__m256i vmm_get_lo = _mm256_set_epi8(
|
||||
-1,
|
||||
-1,
|
||||
5,
|
||||
4,
|
||||
5,
|
||||
4,
|
||||
5,
|
||||
4,
|
||||
-1,
|
||||
-1,
|
||||
1,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
-1,
|
||||
-1,
|
||||
5,
|
||||
4,
|
||||
5,
|
||||
4,
|
||||
5,
|
||||
4,
|
||||
-1,
|
||||
-1,
|
||||
1,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
1,
|
||||
0
|
||||
);
|
||||
__m256i vmm_get_hi = _mm256_set_epi8(
|
||||
-1,
|
||||
-1,
|
||||
13,
|
||||
12,
|
||||
13,
|
||||
12,
|
||||
13,
|
||||
12,
|
||||
-1,
|
||||
-1,
|
||||
9,
|
||||
8,
|
||||
9,
|
||||
8,
|
||||
9,
|
||||
8,
|
||||
-1,
|
||||
-1,
|
||||
13,
|
||||
12,
|
||||
13,
|
||||
12,
|
||||
13,
|
||||
12,
|
||||
-1,
|
||||
-1,
|
||||
9,
|
||||
8,
|
||||
9,
|
||||
8,
|
||||
9,
|
||||
8
|
||||
);
|
||||
|
||||
#define MM_SHIFTDIV255_epi16(src) \
|
||||
_mm256_srli_epi16(_mm256_add_epi16(src, _mm256_srli_epi16(src, 8)), 8)
|
||||
|
||||
for (; x < imDst->xsize - 7; x += 8) {
|
||||
__m256i mm_dst, mm_dst_lo, mm_dst_hi;
|
||||
__m256i mm_src, mm_src_lo, mm_src_hi;
|
||||
__m256i mm_dst_a, mm_src_a, mm_out_a, mm_blend;
|
||||
__m256i mm_coef1, mm_coef2, mm_out_lo, mm_out_hi;
|
||||
|
||||
mm_dst = _mm256_loadu_si256((__m256i *)&dst[x]);
|
||||
mm_dst_lo = _mm256_unpacklo_epi8(mm_dst, vmm_zero);
|
||||
mm_dst_hi = _mm256_unpackhi_epi8(mm_dst, vmm_zero);
|
||||
mm_src = _mm256_loadu_si256((__m256i *)&src[x]);
|
||||
mm_src_lo = _mm256_unpacklo_epi8(mm_src, vmm_zero);
|
||||
mm_src_hi = _mm256_unpackhi_epi8(mm_src, vmm_zero);
|
||||
|
||||
mm_dst_a = _mm256_srli_epi32(mm_dst, 24);
|
||||
mm_src_a = _mm256_srli_epi32(mm_src, 24);
|
||||
|
||||
// Compute coefficients
|
||||
// blend = dst->a * (255 - src->a); 16 bits
|
||||
mm_blend = _mm256_mullo_epi16(
|
||||
mm_dst_a, _mm256_sub_epi32(vmm_max_alpha, mm_src_a)
|
||||
);
|
||||
// outa = src->a * 255 + dst->a * (255 - src->a); 16 bits
|
||||
mm_out_a = _mm256_add_epi32(
|
||||
_mm256_mullo_epi16(mm_src_a, vmm_max_alpha), mm_blend
|
||||
);
|
||||
mm_coef1 = _mm256_mullo_epi32(mm_src_a, vmm_max_alpha2);
|
||||
// 8 bits
|
||||
mm_coef1 = _mm256_cvtps_epi32(_mm256_mul_ps(
|
||||
_mm256_cvtepi32_ps(mm_coef1),
|
||||
_mm256_rcp_ps(_mm256_cvtepi32_ps(mm_out_a))
|
||||
));
|
||||
// 8 bits
|
||||
mm_coef2 = _mm256_sub_epi32(vmm_max_alpha, mm_coef1);
|
||||
|
||||
mm_out_lo = _mm256_add_epi16(
|
||||
_mm256_mullo_epi16(
|
||||
mm_src_lo, _mm256_shuffle_epi8(mm_coef1, vmm_get_lo)
|
||||
),
|
||||
_mm256_mullo_epi16(
|
||||
mm_dst_lo, _mm256_shuffle_epi8(mm_coef2, vmm_get_lo)
|
||||
)
|
||||
);
|
||||
mm_out_lo = _mm256_or_si256(
|
||||
mm_out_lo,
|
||||
_mm256_slli_epi64(_mm256_unpacklo_epi32(mm_out_a, vmm_zero), 48)
|
||||
);
|
||||
mm_out_lo = _mm256_add_epi16(mm_out_lo, vmm_half);
|
||||
mm_out_lo = MM_SHIFTDIV255_epi16(mm_out_lo);
|
||||
|
||||
mm_out_hi = _mm256_add_epi16(
|
||||
_mm256_mullo_epi16(
|
||||
mm_src_hi, _mm256_shuffle_epi8(mm_coef1, vmm_get_hi)
|
||||
),
|
||||
_mm256_mullo_epi16(
|
||||
mm_dst_hi, _mm256_shuffle_epi8(mm_coef2, vmm_get_hi)
|
||||
)
|
||||
);
|
||||
mm_out_hi = _mm256_or_si256(
|
||||
mm_out_hi,
|
||||
_mm256_slli_epi64(_mm256_unpackhi_epi32(mm_out_a, vmm_zero), 48)
|
||||
);
|
||||
mm_out_hi = _mm256_add_epi16(mm_out_hi, vmm_half);
|
||||
mm_out_hi = MM_SHIFTDIV255_epi16(mm_out_hi);
|
||||
|
||||
_mm256_storeu_si256(
|
||||
(__m256i *)&out[x], _mm256_packus_epi16(mm_out_lo, mm_out_hi)
|
||||
);
|
||||
}
|
||||
|
||||
#undef MM_SHIFTDIV255_epi16
|
||||
}
|
||||
#endif
|
||||
#if defined(__SSE4__)
|
||||
{
|
||||
__m128i mm_max_alpha = _mm_set1_epi32(255);
|
||||
__m128i mm_max_alpha2 = _mm_set1_epi32(255 * 255);
|
||||
__m128i mm_zero = _mm_setzero_si128();
|
||||
__m128i mm_half = _mm_set1_epi16(128);
|
||||
__m128i mm_get_lo =
|
||||
_mm_set_epi8(-1, -1, 5, 4, 5, 4, 5, 4, -1, -1, 1, 0, 1, 0, 1, 0);
|
||||
__m128i mm_get_hi =
|
||||
_mm_set_epi8(-1, -1, 13, 12, 13, 12, 13, 12, -1, -1, 9, 8, 9, 8, 9, 8);
|
||||
|
||||
#define MM_SHIFTDIV255_epi16(src) \
|
||||
_mm_srli_epi16(_mm_add_epi16(src, _mm_srli_epi16(src, 8)), 8)
|
||||
|
||||
for (; x < imDst->xsize - 3; x += 4) {
|
||||
__m128i mm_dst, mm_dst_lo, mm_dst_hi;
|
||||
__m128i mm_src, mm_src_hi, mm_src_lo;
|
||||
__m128i mm_dst_a, mm_src_a, mm_out_a, mm_blend;
|
||||
__m128i mm_coef1, mm_coef2, mm_out_lo, mm_out_hi;
|
||||
|
||||
// [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
|
||||
mm_dst = _mm_loadu_si128((__m128i *)&dst[x]);
|
||||
// [16] a1 b1 g1 r1 a0 b0 g0 r0
|
||||
mm_dst_lo = _mm_unpacklo_epi8(mm_dst, mm_zero);
|
||||
// [16] a3 b3 g3 r3 a2 b2 g2 r2
|
||||
mm_dst_hi = _mm_unpackhi_epi8(mm_dst, mm_zero);
|
||||
// [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
|
||||
mm_src = _mm_loadu_si128((__m128i *)&src[x]);
|
||||
mm_src_lo = _mm_unpacklo_epi8(mm_src, mm_zero);
|
||||
mm_src_hi = _mm_unpackhi_epi8(mm_src, mm_zero);
|
||||
|
||||
// [32] a3 a2 a1 a0
|
||||
mm_dst_a = _mm_srli_epi32(mm_dst, 24);
|
||||
mm_src_a = _mm_srli_epi32(mm_src, 24);
|
||||
|
||||
// Compute coefficients
|
||||
// blend = dst->a * (255 - src->a)
|
||||
// [16] xx b3 xx b2 xx b1 xx b0
|
||||
mm_blend =
|
||||
_mm_mullo_epi16(mm_dst_a, _mm_sub_epi32(mm_max_alpha, mm_src_a));
|
||||
// outa = src->a * 255 + blend
|
||||
// [16] xx a3 xx a2 xx a1 xx a0
|
||||
mm_out_a =
|
||||
_mm_add_epi32(_mm_mullo_epi16(mm_src_a, mm_max_alpha), mm_blend);
|
||||
// coef1 = src->a * 255 * 255 / outa
|
||||
mm_coef1 = _mm_mullo_epi32(mm_src_a, mm_max_alpha2);
|
||||
// [8] xx xx xx c3 xx xx xx c2 xx xx xx c1 xx xx xx c0
|
||||
mm_coef1 = _mm_cvtps_epi32(_mm_mul_ps(
|
||||
_mm_cvtepi32_ps(mm_coef1), _mm_rcp_ps(_mm_cvtepi32_ps(mm_out_a))
|
||||
));
|
||||
// [8] xx xx xx c3 xx xx xx c2 xx xx xx c1 xx xx xx c0
|
||||
mm_coef2 = _mm_sub_epi32(mm_max_alpha, mm_coef1);
|
||||
|
||||
mm_out_lo = _mm_add_epi16(
|
||||
_mm_mullo_epi16(mm_src_lo, _mm_shuffle_epi8(mm_coef1, mm_get_lo)),
|
||||
_mm_mullo_epi16(mm_dst_lo, _mm_shuffle_epi8(mm_coef2, mm_get_lo))
|
||||
);
|
||||
mm_out_lo = _mm_or_si128(
|
||||
mm_out_lo, _mm_slli_epi64(_mm_unpacklo_epi32(mm_out_a, mm_zero), 48)
|
||||
);
|
||||
mm_out_lo = _mm_add_epi16(mm_out_lo, mm_half);
|
||||
mm_out_lo = MM_SHIFTDIV255_epi16(mm_out_lo);
|
||||
|
||||
mm_out_hi = _mm_add_epi16(
|
||||
_mm_mullo_epi16(mm_src_hi, _mm_shuffle_epi8(mm_coef1, mm_get_hi)),
|
||||
_mm_mullo_epi16(mm_dst_hi, _mm_shuffle_epi8(mm_coef2, mm_get_hi))
|
||||
);
|
||||
mm_out_hi = _mm_or_si128(
|
||||
mm_out_hi, _mm_slli_epi64(_mm_unpackhi_epi32(mm_out_a, mm_zero), 48)
|
||||
);
|
||||
mm_out_hi = _mm_add_epi16(mm_out_hi, mm_half);
|
||||
mm_out_hi = MM_SHIFTDIV255_epi16(mm_out_hi);
|
||||
|
||||
_mm_storeu_si128(
|
||||
(__m128i *)&out[x], _mm_packus_epi16(mm_out_lo, mm_out_hi)
|
||||
);
|
||||
}
|
||||
|
||||
#undef MM_SHIFTDIV255_epi16
|
||||
}
|
||||
#endif
|
||||
|
||||
for (; x < imDst->xsize; x += 1) {
|
||||
if (src[x].a == 0) {
|
||||
// Copy 4 bytes at once.
|
||||
*out = *dst;
|
||||
out[x] = dst[x];
|
||||
} else {
|
||||
// Integer implementation with increased precision.
|
||||
// Each variable has extra meaningful bits.
|
||||
// Divisions are rounded.
|
||||
|
||||
UINT32 tmpr, tmpg, tmpb;
|
||||
UINT32 blend = dst->a * (255 - src->a);
|
||||
UINT32 outa255 = src->a * 255 + blend;
|
||||
UINT32 blend = dst[x].a * (255 - src[x].a);
|
||||
UINT32 outa255 = src[x].a * 255 + blend;
|
||||
// There we use 7 bits for precision.
|
||||
// We could use more, but we go beyond 32 bits.
|
||||
UINT32 coef1 = src->a * 255 * 255 * (1 << PRECISION_BITS) / outa255;
|
||||
UINT32 coef1 = src[x].a * 255 * 255 * (1 << PRECISION_BITS) / outa255;
|
||||
UINT32 coef2 = 255 * (1 << PRECISION_BITS) - coef1;
|
||||
|
||||
tmpr = src->r * coef1 + dst->r * coef2;
|
||||
tmpg = src->g * coef1 + dst->g * coef2;
|
||||
tmpb = src->b * coef1 + dst->b * coef2;
|
||||
out->r =
|
||||
tmpr = src[x].r * coef1 + dst[x].r * coef2;
|
||||
tmpg = src[x].g * coef1 + dst[x].g * coef2;
|
||||
tmpb = src[x].b * coef1 + dst[x].b * coef2;
|
||||
out[x].r =
|
||||
SHIFTFORDIV255(tmpr + (0x80 << PRECISION_BITS)) >> PRECISION_BITS;
|
||||
out->g =
|
||||
out[x].g =
|
||||
SHIFTFORDIV255(tmpg + (0x80 << PRECISION_BITS)) >> PRECISION_BITS;
|
||||
out->b =
|
||||
out[x].b =
|
||||
SHIFTFORDIV255(tmpb + (0x80 << PRECISION_BITS)) >> PRECISION_BITS;
|
||||
out->a = SHIFTFORDIV255(outa255 + 0x80);
|
||||
out[x].a = SHIFTFORDIV255(outa255 + 0x80);
|
||||
}
|
||||
|
||||
dst++;
|
||||
src++;
|
||||
out++;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -21,6 +21,9 @@ Imaging
|
|||
ImagingGetBand(Imaging imIn, int band) {
|
||||
Imaging imOut;
|
||||
int x, y;
|
||||
#ifdef __SSE4__
|
||||
__m128i shuffle_mask;
|
||||
#endif
|
||||
|
||||
/* Check arguments */
|
||||
if (!imIn || imIn->type != IMAGING_TYPE_UINT8) {
|
||||
|
@ -46,14 +49,41 @@ ImagingGetBand(Imaging imIn, int band) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef __SSE4__
|
||||
shuffle_mask = _mm_set_epi8(
|
||||
-1,
|
||||
-1,
|
||||
-1,
|
||||
-1,
|
||||
-1,
|
||||
-1,
|
||||
-1,
|
||||
-1,
|
||||
-1,
|
||||
-1,
|
||||
-1,
|
||||
-1,
|
||||
12 + band,
|
||||
8 + band,
|
||||
4 + band,
|
||||
0 + band
|
||||
);
|
||||
#endif
|
||||
|
||||
/* Extract band from image */
|
||||
for (y = 0; y < imIn->ysize; y++) {
|
||||
UINT8 *in = (UINT8 *)imIn->image[y] + band;
|
||||
UINT8 *out = imOut->image8[y];
|
||||
x = 0;
|
||||
for (; x < imIn->xsize - 3; x += 4) {
|
||||
#ifdef __SSE4__
|
||||
__m128i source = _mm_loadu_si128((__m128i *)(in - band));
|
||||
*((UINT32 *)(out + x)) =
|
||||
_mm_cvtsi128_si32(_mm_shuffle_epi8(source, shuffle_mask));
|
||||
#else
|
||||
UINT32 v = MAKE_UINT32(in[0], in[4], in[8], in[12]);
|
||||
memcpy(out + x, &v, sizeof(v));
|
||||
#endif
|
||||
in += 16;
|
||||
}
|
||||
for (; x < imIn->xsize; x++) {
|
||||
|
@ -99,10 +129,20 @@ ImagingSplit(Imaging imIn, Imaging bands[4]) {
|
|||
UINT8 *out1 = bands[1]->image8[y];
|
||||
x = 0;
|
||||
for (; x < imIn->xsize - 3; x += 4) {
|
||||
#ifdef __SSE4__
|
||||
__m128i source = _mm_loadu_si128((__m128i *)in);
|
||||
source = _mm_shuffle_epi8(
|
||||
source,
|
||||
_mm_set_epi8(15, 11, 7, 3, 14, 10, 6, 2, 13, 9, 5, 1, 12, 8, 4, 0)
|
||||
);
|
||||
*((UINT32 *)(out0 + x)) = _mm_cvtsi128_si32(source);
|
||||
*((UINT32 *)(out1 + x)) = _mm_cvtsi128_si32(_mm_srli_si128(source, 12));
|
||||
#else
|
||||
UINT32 v = MAKE_UINT32(in[0], in[4], in[8], in[12]);
|
||||
memcpy(out0 + x, &v, sizeof(v));
|
||||
v = MAKE_UINT32(in[0 + 3], in[4 + 3], in[8 + 3], in[12 + 3]);
|
||||
memcpy(out1 + x, &v, sizeof(v));
|
||||
#endif
|
||||
in += 16;
|
||||
}
|
||||
for (; x < imIn->xsize; x++) {
|
||||
|
@ -119,12 +159,23 @@ ImagingSplit(Imaging imIn, Imaging bands[4]) {
|
|||
UINT8 *out2 = bands[2]->image8[y];
|
||||
x = 0;
|
||||
for (; x < imIn->xsize - 3; x += 4) {
|
||||
#ifdef __SSE4__
|
||||
__m128i source = _mm_loadu_si128((__m128i *)in);
|
||||
source = _mm_shuffle_epi8(
|
||||
source,
|
||||
_mm_set_epi8(15, 11, 7, 3, 14, 10, 6, 2, 13, 9, 5, 1, 12, 8, 4, 0)
|
||||
);
|
||||
*((UINT32 *)(out0 + x)) = _mm_cvtsi128_si32(source);
|
||||
*((UINT32 *)(out1 + x)) = _mm_cvtsi128_si32(_mm_srli_si128(source, 4));
|
||||
*((UINT32 *)(out2 + x)) = _mm_cvtsi128_si32(_mm_srli_si128(source, 8));
|
||||
#else
|
||||
UINT32 v = MAKE_UINT32(in[0], in[4], in[8], in[12]);
|
||||
memcpy(out0 + x, &v, sizeof(v));
|
||||
v = MAKE_UINT32(in[0 + 1], in[4 + 1], in[8 + 1], in[12 + 1]);
|
||||
memcpy(out1 + x, &v, sizeof(v));
|
||||
v = MAKE_UINT32(in[0 + 2], in[4 + 2], in[8 + 2], in[12 + 2]);
|
||||
memcpy(out2 + x, &v, sizeof(v));
|
||||
#endif
|
||||
in += 16;
|
||||
}
|
||||
for (; x < imIn->xsize; x++) {
|
||||
|
@ -143,6 +194,17 @@ ImagingSplit(Imaging imIn, Imaging bands[4]) {
|
|||
UINT8 *out3 = bands[3]->image8[y];
|
||||
x = 0;
|
||||
for (; x < imIn->xsize - 3; x += 4) {
|
||||
#ifdef __SSE4__
|
||||
__m128i source = _mm_loadu_si128((__m128i *)in);
|
||||
source = _mm_shuffle_epi8(
|
||||
source,
|
||||
_mm_set_epi8(15, 11, 7, 3, 14, 10, 6, 2, 13, 9, 5, 1, 12, 8, 4, 0)
|
||||
);
|
||||
*((UINT32 *)(out0 + x)) = _mm_cvtsi128_si32(source);
|
||||
*((UINT32 *)(out1 + x)) = _mm_cvtsi128_si32(_mm_srli_si128(source, 4));
|
||||
*((UINT32 *)(out2 + x)) = _mm_cvtsi128_si32(_mm_srli_si128(source, 8));
|
||||
*((UINT32 *)(out3 + x)) = _mm_cvtsi128_si32(_mm_srli_si128(source, 12));
|
||||
#else
|
||||
UINT32 v = MAKE_UINT32(in[0], in[4], in[8], in[12]);
|
||||
memcpy(out0 + x, &v, sizeof(v));
|
||||
v = MAKE_UINT32(in[0 + 1], in[4 + 1], in[8 + 1], in[12 + 1]);
|
||||
|
@ -151,6 +213,7 @@ ImagingSplit(Imaging imIn, Imaging bands[4]) {
|
|||
memcpy(out2 + x, &v, sizeof(v));
|
||||
v = MAKE_UINT32(in[0 + 3], in[4 + 3], in[8 + 3], in[12 + 3]);
|
||||
memcpy(out3 + x, &v, sizeof(v));
|
||||
#endif
|
||||
in += 16;
|
||||
}
|
||||
for (; x < imIn->xsize; x++) {
|
||||
|
|
|
@ -97,3 +97,5 @@ typedef signed __int64 int64_t;
|
|||
#ifdef __GNUC__
|
||||
#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
|
||||
#endif
|
||||
|
||||
#include "ImagingSIMD.h"
|
||||
|
|
49
src/libImaging/ImagingSIMD.h
Normal file
49
src/libImaging/ImagingSIMD.h
Normal file
|
@ -0,0 +1,49 @@
|
|||
/* Microsoft compiler doesn't limit intrinsics for an architecture.
|
||||
This macro is set only on x86 and means SSE2 and above including AVX2. */
|
||||
#if defined(_M_X64) || _M_IX86_FP == 2
|
||||
#define __SSE2__
|
||||
/* However, Microsoft compiler set __AVX2__ if /arch:AVX2 option is set */
|
||||
#ifdef __AVX2__
|
||||
#define __SSE4_2__
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* For better readability */
|
||||
#ifdef __SSE4_2__
|
||||
#define __SSE4__
|
||||
#endif
|
||||
#ifdef __aarch64__
|
||||
#define __NEON__
|
||||
#endif
|
||||
|
||||
#ifdef __SSE2__
|
||||
#include <mmintrin.h> // MMX
|
||||
#include <xmmintrin.h> // SSE
|
||||
#include <emmintrin.h> // SSE2
|
||||
#endif
|
||||
#ifdef __SSE4__
|
||||
#include <pmmintrin.h> // SSE3
|
||||
#include <tmmintrin.h> // SSSE3
|
||||
#include <smmintrin.h> // SSE4.1
|
||||
#include <nmmintrin.h> // SSE4.2
|
||||
#endif
|
||||
#ifdef __AVX2__
|
||||
#include <immintrin.h> // AVX, AVX2
|
||||
#endif
|
||||
#ifdef __NEON__
|
||||
#include <arm_neon.h> // ARM NEON
|
||||
#endif
|
||||
|
||||
#ifdef __SSE4__
|
||||
static inline __m128i
|
||||
mm_cvtepu8_epi32(void *ptr) {
|
||||
return _mm_cvtepu8_epi32(_mm_cvtsi32_si128(*(INT32 *)ptr));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef __AVX2__
|
||||
static inline __m256i
|
||||
mm256_cvtepu8_epi32(void *ptr) {
|
||||
return _mm256_cvtepu8_epi32(_mm_loadl_epi64((__m128i *)ptr));
|
||||
}
|
||||
#endif
|
Loading…
Reference in New Issue
Block a user