From e0649632ff53c883b684fd7f4f7591174695d301 Mon Sep 17 00:00:00 2001 From: Aleksandr Karpinskii Date: Sat, 6 Jul 2024 15:30:14 +0400 Subject: [PATCH 01/12] Add SIMD headers --- src/libImaging/ImPlatform.h | 2 ++ src/libImaging/ImagingSIMD.h | 41 ++++++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100644 src/libImaging/ImagingSIMD.h diff --git a/src/libImaging/ImPlatform.h b/src/libImaging/ImPlatform.h index f6e7fb6b9..e5fb4595e 100644 --- a/src/libImaging/ImPlatform.h +++ b/src/libImaging/ImPlatform.h @@ -96,3 +96,5 @@ typedef signed __int64 int64_t; #ifdef __GNUC__ #define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) #endif + +#include "ImagingSIMD.h" diff --git a/src/libImaging/ImagingSIMD.h b/src/libImaging/ImagingSIMD.h new file mode 100644 index 000000000..3d80c79d8 --- /dev/null +++ b/src/libImaging/ImagingSIMD.h @@ -0,0 +1,41 @@ +/* Microsoft compiler doesn't limit intrinsics for an architecture. + This macro is set only on x86 and means SSE2 and above including AVX2. */ +#if defined(_M_X64) || _M_IX86_FP == 2 + #define __SSE2__ +#endif + +#ifdef __SSE4_2__ + #define __SSE4__ +#endif + +#ifdef __SSE2__ + #include // MMX + #include // SSE + #include // SSE2 +#endif +#ifdef __SSE4__ + #include // SSE3 + #include // SSSE3 + #include // SSE4.1 + #include // SSE4.2 +#endif +#ifdef __AVX2__ + #include // AVX, AVX2 +#endif +#ifdef __aarch64__ + #include // ARM NEON +#endif + +#ifdef __SSE4__ +static __m128i inline +mm_cvtepu8_epi32(void *ptr) { + return _mm_cvtepu8_epi32(_mm_cvtsi32_si128(*(INT32 *) ptr)); +} +#endif + +#ifdef __AVX2__ +static __m256i inline +mm256_cvtepu8_epi32(void *ptr) { + return _mm256_cvtepu8_epi32(_mm_loadl_epi64((__m128i *) ptr)); +} +#endif From 73f76e55c36d13f16802d07e86c4906be91c5fe6 Mon Sep 17 00:00:00 2001 From: Aleksandr Karpinskii Date: Sat, 6 Jul 2024 15:30:22 +0400 Subject: [PATCH 02/12] Add SIMD example --- src/libImaging/Bands.c | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/src/libImaging/Bands.c b/src/libImaging/Bands.c index e1b16b34a..1cfeb04bf 100644 --- a/src/libImaging/Bands.c +++ b/src/libImaging/Bands.c @@ -21,6 +21,9 @@ Imaging ImagingGetBand(Imaging imIn, int band) { Imaging imOut; int x, y; +#ifdef __SSE4__ + __m128i shuffle_mask; +#endif /* Check arguments */ if (!imIn || imIn->type != IMAGING_TYPE_UINT8) { @@ -46,14 +49,25 @@ ImagingGetBand(Imaging imIn, int band) { return NULL; } +#ifdef __SSE4__ + shuffle_mask = _mm_set_epi8( + -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, 12+band,8+band,4+band,0+band); +#endif + /* Extract band from image */ for (y = 0; y < imIn->ysize; y++) { UINT8 *in = (UINT8 *)imIn->image[y] + band; UINT8 *out = imOut->image8[y]; x = 0; for (; x < imIn->xsize - 3; x += 4) { +#ifdef __SSE4__ + __m128i source = _mm_loadu_si128((__m128i *)(in - band)); + *((UINT32 *)(out + x)) = _mm_cvtsi128_si32( + _mm_shuffle_epi8(source, shuffle_mask)); +#else UINT32 v = MAKE_UINT32(in[0], in[4], in[8], in[12]); memcpy(out + x, &v, sizeof(v)); +#endif in += 16; } for (; x < imIn->xsize; x++) { @@ -99,10 +113,18 @@ ImagingSplit(Imaging imIn, Imaging bands[4]) { UINT8 *out1 = bands[1]->image8[y]; x = 0; for (; x < imIn->xsize - 3; x += 4) { +#ifdef __SSE4__ + __m128i source = _mm_loadu_si128((__m128i *)in); + source = _mm_shuffle_epi8(source, _mm_set_epi8( + 15,11,7,3, 14,10,6,2, 13,9,5,1, 12,8,4,0)); + *((UINT32 *)(out0 + x)) = _mm_cvtsi128_si32(source); + *((UINT32 *)(out1 + x)) = _mm_cvtsi128_si32(_mm_srli_si128(source, 12)); +#else UINT32 v = MAKE_UINT32(in[0], in[4], in[8], in[12]); memcpy(out0 + x, &v, sizeof(v)); v = MAKE_UINT32(in[0 + 3], in[4 + 3], in[8 + 3], in[12 + 3]); memcpy(out1 + x, &v, sizeof(v)); +#endif in += 16; } for (; x < imIn->xsize; x++) { @@ -119,12 +141,21 @@ ImagingSplit(Imaging imIn, Imaging bands[4]) { UINT8 *out2 = bands[2]->image8[y]; x = 0; for (; x < imIn->xsize - 3; x += 4) { +#ifdef __SSE4__ + __m128i source = _mm_loadu_si128((__m128i *)in); + source = _mm_shuffle_epi8(source, _mm_set_epi8( + 15,11,7,3, 14,10,6,2, 13,9,5,1, 12,8,4,0)); + *((UINT32 *)(out0 + x)) = _mm_cvtsi128_si32(source); + *((UINT32 *)(out1 + x)) = _mm_cvtsi128_si32(_mm_srli_si128(source, 4)); + *((UINT32 *)(out2 + x)) = _mm_cvtsi128_si32(_mm_srli_si128(source, 8)); +#else UINT32 v = MAKE_UINT32(in[0], in[4], in[8], in[12]); memcpy(out0 + x, &v, sizeof(v)); v = MAKE_UINT32(in[0 + 1], in[4 + 1], in[8 + 1], in[12 + 1]); memcpy(out1 + x, &v, sizeof(v)); v = MAKE_UINT32(in[0 + 2], in[4 + 2], in[8 + 2], in[12 + 2]); memcpy(out2 + x, &v, sizeof(v)); +#endif in += 16; } for (; x < imIn->xsize; x++) { @@ -143,6 +174,15 @@ ImagingSplit(Imaging imIn, Imaging bands[4]) { UINT8 *out3 = bands[3]->image8[y]; x = 0; for (; x < imIn->xsize - 3; x += 4) { +#ifdef __SSE4__ + __m128i source = _mm_loadu_si128((__m128i *)in); + source = _mm_shuffle_epi8(source, _mm_set_epi8( + 15,11,7,3, 14,10,6,2, 13,9,5,1, 12,8,4,0)); + *((UINT32 *)(out0 + x)) = _mm_cvtsi128_si32(source); + *((UINT32 *)(out1 + x)) = _mm_cvtsi128_si32(_mm_srli_si128(source, 4)); + *((UINT32 *)(out2 + x)) = _mm_cvtsi128_si32(_mm_srli_si128(source, 8)); + *((UINT32 *)(out3 + x)) = _mm_cvtsi128_si32(_mm_srli_si128(source, 12)); +#else UINT32 v = MAKE_UINT32(in[0], in[4], in[8], in[12]); memcpy(out0 + x, &v, sizeof(v)); v = MAKE_UINT32(in[0 + 1], in[4 + 1], in[8 + 1], in[12 + 1]); @@ -151,6 +191,7 @@ ImagingSplit(Imaging imIn, Imaging bands[4]) { memcpy(out2 + x, &v, sizeof(v)); v = MAKE_UINT32(in[0 + 3], in[4 + 3], in[8 + 3], in[12 + 3]); memcpy(out3 + x, &v, sizeof(v)); +#endif in += 16; } for (; x < imIn->xsize; x++) { From 910911061fda520ba3a9d346cc83d9fa0bfea2b4 Mon Sep 17 00:00:00 2001 From: Aleksandr Karpinskii Date: Sat, 6 Jul 2024 15:40:30 +0400 Subject: [PATCH 03/12] Add __SSE4_2__ for MSCC --- src/libImaging/ImagingSIMD.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/libImaging/ImagingSIMD.h b/src/libImaging/ImagingSIMD.h index 3d80c79d8..ed259cebe 100644 --- a/src/libImaging/ImagingSIMD.h +++ b/src/libImaging/ImagingSIMD.h @@ -2,8 +2,13 @@ This macro is set only on x86 and means SSE2 and above including AVX2. */ #if defined(_M_X64) || _M_IX86_FP == 2 #define __SSE2__ + /* However, Microsoft compiler set __AVX2__ if /arch:AVX2 option is set */ + #ifdef __AVX2__ + #define __SSE4_2__ + #endif #endif +/* For better readability */ #ifdef __SSE4_2__ #define __SSE4__ #endif From 2db9cd34991a06ec01943a5a78c7dba9e7d67d9b Mon Sep 17 00:00:00 2001 From: Aleksandr Karpinskii Date: Sun, 7 Jul 2024 17:13:44 +0400 Subject: [PATCH 04/12] Add core.acceleration attribute --- src/PIL/features.py | 4 +++- src/_imaging.c | 13 +++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/src/PIL/features.py b/src/PIL/features.py index 13908c4eb..9f4735aee 100644 --- a/src/PIL/features.py +++ b/src/PIL/features.py @@ -128,6 +128,7 @@ features = { "libjpeg_turbo": ("PIL._imaging", "HAVE_LIBJPEGTURBO", "libjpeg_turbo_version"), "libimagequant": ("PIL._imaging", "HAVE_LIBIMAGEQUANT", "imagequant_version"), "xcb": ("PIL._imaging", "HAVE_XCB", None), + "acceleration": ("PIL._imaging", "acceleration", "acceleration"), } @@ -267,6 +268,7 @@ def pilinfo(out: IO[str] | None = None, supported_formats: bool = True) -> None: for name, feature in [ ("pil", "PIL CORE"), + ("acceleration", "Acceleration"), ("tkinter", "TKINTER"), ("freetype2", "FREETYPE2"), ("littlecms2", "LITTLECMS2"), @@ -291,7 +293,7 @@ def pilinfo(out: IO[str] | None = None, supported_formats: bool = True) -> None: if v is None: v = version(name) if v is not None: - version_static = name in ("pil", "jpg") + version_static = name in ("pil", "jpg", "acceleration") if name == "littlecms2": # this check is also in src/_imagingcms.c:setup_module() version_static = tuple(int(x) for x in v.split(".")) < (2, 7) diff --git a/src/_imaging.c b/src/_imaging.c index 07d9a64cc..84c1d55c3 100644 --- a/src/_imaging.c +++ b/src/_imaging.c @@ -4407,6 +4407,19 @@ setup_module(PyObject *m) { Py_INCREF(have_xcb); PyModule_AddObject(m, "HAVE_XCB", have_xcb); +#ifdef __AVX2__ + PyModule_AddStringConstant(m, "acceleration", "avx2"); +#elif defined(__SSE4__) + PyModule_AddStringConstant(m, "acceleration", "sse4"); +#elif defined(__SSE2__) + PyModule_AddStringConstant(m, "acceleration", "sse2"); +#elif defined(__NEON__) + PyModule_AddStringConstant(m, "acceleration", "neon"); +#else + Py_INCREF(Py_False); + PyModule_AddObject(m, "acceleration", Py_False); +#endif + PyObject *pillow_version = PyUnicode_FromString(version); PyDict_SetItemString( d, "PILLOW_VERSION", pillow_version ? pillow_version : Py_None From 5df34a24f88e9e68f18011ddeb6cdca2f435612b Mon Sep 17 00:00:00 2001 From: Aleksandr Karpinskii Date: Sun, 7 Jul 2024 19:09:35 +0400 Subject: [PATCH 05/12] clang-format --- src/libImaging/Bands.c | 36 +++++++++++++++++++++++++--------- src/libImaging/ImagingSIMD.h | 38 ++++++++++++++++++------------------ 2 files changed, 46 insertions(+), 28 deletions(-) diff --git a/src/libImaging/Bands.c b/src/libImaging/Bands.c index 1cfeb04bf..6b3f8c03a 100644 --- a/src/libImaging/Bands.c +++ b/src/libImaging/Bands.c @@ -51,7 +51,22 @@ ImagingGetBand(Imaging imIn, int band) { #ifdef __SSE4__ shuffle_mask = _mm_set_epi8( - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, 12+band,8+band,4+band,0+band); + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + 12 + band, + 8 + band, + 4 + band, + 0 + band); #endif /* Extract band from image */ @@ -62,8 +77,8 @@ ImagingGetBand(Imaging imIn, int band) { for (; x < imIn->xsize - 3; x += 4) { #ifdef __SSE4__ __m128i source = _mm_loadu_si128((__m128i *)(in - band)); - *((UINT32 *)(out + x)) = _mm_cvtsi128_si32( - _mm_shuffle_epi8(source, shuffle_mask)); + *((UINT32 *)(out + x)) = + _mm_cvtsi128_si32(_mm_shuffle_epi8(source, shuffle_mask)); #else UINT32 v = MAKE_UINT32(in[0], in[4], in[8], in[12]); memcpy(out + x, &v, sizeof(v)); @@ -115,8 +130,9 @@ ImagingSplit(Imaging imIn, Imaging bands[4]) { for (; x < imIn->xsize - 3; x += 4) { #ifdef __SSE4__ __m128i source = _mm_loadu_si128((__m128i *)in); - source = _mm_shuffle_epi8(source, _mm_set_epi8( - 15,11,7,3, 14,10,6,2, 13,9,5,1, 12,8,4,0)); + source = _mm_shuffle_epi8( + source, + _mm_set_epi8(15, 11, 7, 3, 14, 10, 6, 2, 13, 9, 5, 1, 12, 8, 4, 0)); *((UINT32 *)(out0 + x)) = _mm_cvtsi128_si32(source); *((UINT32 *)(out1 + x)) = _mm_cvtsi128_si32(_mm_srli_si128(source, 12)); #else @@ -143,8 +159,9 @@ ImagingSplit(Imaging imIn, Imaging bands[4]) { for (; x < imIn->xsize - 3; x += 4) { #ifdef __SSE4__ __m128i source = _mm_loadu_si128((__m128i *)in); - source = _mm_shuffle_epi8(source, _mm_set_epi8( - 15,11,7,3, 14,10,6,2, 13,9,5,1, 12,8,4,0)); + source = _mm_shuffle_epi8( + source, + _mm_set_epi8(15, 11, 7, 3, 14, 10, 6, 2, 13, 9, 5, 1, 12, 8, 4, 0)); *((UINT32 *)(out0 + x)) = _mm_cvtsi128_si32(source); *((UINT32 *)(out1 + x)) = _mm_cvtsi128_si32(_mm_srli_si128(source, 4)); *((UINT32 *)(out2 + x)) = _mm_cvtsi128_si32(_mm_srli_si128(source, 8)); @@ -176,8 +193,9 @@ ImagingSplit(Imaging imIn, Imaging bands[4]) { for (; x < imIn->xsize - 3; x += 4) { #ifdef __SSE4__ __m128i source = _mm_loadu_si128((__m128i *)in); - source = _mm_shuffle_epi8(source, _mm_set_epi8( - 15,11,7,3, 14,10,6,2, 13,9,5,1, 12,8,4,0)); + source = _mm_shuffle_epi8( + source, + _mm_set_epi8(15, 11, 7, 3, 14, 10, 6, 2, 13, 9, 5, 1, 12, 8, 4, 0)); *((UINT32 *)(out0 + x)) = _mm_cvtsi128_si32(source); *((UINT32 *)(out1 + x)) = _mm_cvtsi128_si32(_mm_srli_si128(source, 4)); *((UINT32 *)(out2 + x)) = _mm_cvtsi128_si32(_mm_srli_si128(source, 8)); diff --git a/src/libImaging/ImagingSIMD.h b/src/libImaging/ImagingSIMD.h index ed259cebe..a63f3b0aa 100644 --- a/src/libImaging/ImagingSIMD.h +++ b/src/libImaging/ImagingSIMD.h @@ -1,46 +1,46 @@ /* Microsoft compiler doesn't limit intrinsics for an architecture. This macro is set only on x86 and means SSE2 and above including AVX2. */ #if defined(_M_X64) || _M_IX86_FP == 2 - #define __SSE2__ - /* However, Microsoft compiler set __AVX2__ if /arch:AVX2 option is set */ - #ifdef __AVX2__ - #define __SSE4_2__ - #endif +#define __SSE2__ +/* However, Microsoft compiler set __AVX2__ if /arch:AVX2 option is set */ +#ifdef __AVX2__ +#define __SSE4_2__ +#endif #endif /* For better readability */ #ifdef __SSE4_2__ - #define __SSE4__ +#define __SSE4__ #endif #ifdef __SSE2__ - #include // MMX - #include // SSE - #include // SSE2 +#include // MMX +#include // SSE +#include // SSE2 #endif #ifdef __SSE4__ - #include // SSE3 - #include // SSSE3 - #include // SSE4.1 - #include // SSE4.2 +#include // SSE3 +#include // SSSE3 +#include // SSE4.1 +#include // SSE4.2 #endif #ifdef __AVX2__ - #include // AVX, AVX2 +#include // AVX, AVX2 #endif #ifdef __aarch64__ - #include // ARM NEON +#include // ARM NEON #endif #ifdef __SSE4__ -static __m128i inline +static inline __m128i mm_cvtepu8_epi32(void *ptr) { - return _mm_cvtepu8_epi32(_mm_cvtsi32_si128(*(INT32 *) ptr)); + return _mm_cvtepu8_epi32(_mm_cvtsi32_si128(*(INT32 *)ptr)); } #endif #ifdef __AVX2__ -static __m256i inline +static inline __m256i mm256_cvtepu8_epi32(void *ptr) { - return _mm256_cvtepu8_epi32(_mm_loadl_epi64((__m128i *) ptr)); + return _mm256_cvtepu8_epi32(_mm_loadl_epi64((__m128i *)ptr)); } #endif From f891043101f8f890776b079cf42b1b1411047575 Mon Sep 17 00:00:00 2001 From: Aleksandr Karpinskii Date: Mon, 8 Jul 2024 11:13:21 +0400 Subject: [PATCH 06/12] Define better name __NEON__ --- src/libImaging/ImagingSIMD.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/libImaging/ImagingSIMD.h b/src/libImaging/ImagingSIMD.h index a63f3b0aa..fbf612868 100644 --- a/src/libImaging/ImagingSIMD.h +++ b/src/libImaging/ImagingSIMD.h @@ -12,6 +12,9 @@ #ifdef __SSE4_2__ #define __SSE4__ #endif +#ifdef __aarch64__ +#define __NEON__ +#endif #ifdef __SSE2__ #include // MMX @@ -27,7 +30,7 @@ #ifdef __AVX2__ #include // AVX, AVX2 #endif -#ifdef __aarch64__ +#ifdef __NEON__ #include // ARM NEON #endif From 4d10b3c0ab9e5b1f8173ecf89896fdee98417dd3 Mon Sep 17 00:00:00 2001 From: Andrew Murray Date: Mon, 8 Jul 2024 18:32:03 +1000 Subject: [PATCH 07/12] Updated test --- Tests/test_features.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Tests/test_features.py b/Tests/test_features.py index b7eefa09a..dbe53ee29 100644 --- a/Tests/test_features.py +++ b/Tests/test_features.py @@ -36,7 +36,9 @@ def test_version() -> None: assert version is None else: assert function(name) == version - if name != "PIL": + if name == "acceleration": + assert version in ("avx2", "sse4", "sse2", "neon", None) + elif name != "PIL": if name == "zlib" and version is not None: version = re.sub(".zlib-ng$", "", version) elif name == "libtiff" and version is not None: From a410dcbe9a9c0db5d2e254302876e8e9b0af3a40 Mon Sep 17 00:00:00 2001 From: Aleksandr Karpinskii Date: Tue, 16 Jul 2024 22:45:27 +0400 Subject: [PATCH 08/12] Add accelerated test builds --- .github/workflows/test.yml | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 6e63333b0..9e6fcab32 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -52,15 +52,19 @@ jobs: include: - { python-version: "3.11", PYTHONOPTIMIZE: 1, REVERSE: "--reverse" } - { python-version: "3.10", PYTHONOPTIMIZE: 2 } + # SIMD-accelerated builds for x86 + - { os: "ubuntu-latest", python-version: "3.9", acceleration: "sse4"} + - { os: "ubuntu-latest", python-version: "3.12", acceleration: "avx2"} # Free-threaded - { os: "ubuntu-latest", python-version: "3.13-dev", disable-gil: true } # M1 only available for 3.10+ - { os: "macos-13", python-version: "3.9" } + - { os: "macos-13", python-version: "3.9", acceleration: "avx2"} exclude: - { os: "macos-14", python-version: "3.9" } runs-on: ${{ matrix.os }} - name: ${{ matrix.os }} Python ${{ matrix.python-version }} ${{ matrix.disable-gil && 'free-threaded' || '' }} + name: ${{ matrix.os }} Python ${{ matrix.python-version }} ${{ matrix.acceleration }} ${{ matrix.disable-gil && 'free-threaded' || '' }} steps: - uses: actions/checkout@v4 @@ -108,7 +112,7 @@ jobs: GHA_LIBIMAGEQUANT_CACHE_HIT: ${{ steps.cache-libimagequant.outputs.cache-hit }} - name: Install macOS dependencies - if: startsWith(matrix.os, 'macOS') + if: startsWith(matrix.os, 'macos') run: | .github/workflows/macos-install.sh env: @@ -118,6 +122,11 @@ jobs: if: "matrix.os == 'ubuntu-latest' && matrix.python-version == '3.12'" run: echo "::add-matcher::.github/problem-matchers/gcc.json" + - name: Set compiler options for optimization + if: ${{ matrix.acceleration }} + run: | + echo "CC=cc -m${{ matrix.acceleration }}" >> $GITHUB_ENV + - name: Build run: | .ci/build.sh From cb55ee64b50327721b630fd6be98e1d170b67083 Mon Sep 17 00:00:00 2001 From: Aleksandr Karpinskii Date: Sun, 11 Aug 2024 21:11:46 +0400 Subject: [PATCH 09/12] update clang formatting --- src/libImaging/Bands.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/libImaging/Bands.c b/src/libImaging/Bands.c index 6b3f8c03a..0ab3156ed 100644 --- a/src/libImaging/Bands.c +++ b/src/libImaging/Bands.c @@ -66,7 +66,8 @@ ImagingGetBand(Imaging imIn, int band) { 12 + band, 8 + band, 4 + band, - 0 + band); + 0 + band + ); #endif /* Extract band from image */ @@ -132,7 +133,8 @@ ImagingSplit(Imaging imIn, Imaging bands[4]) { __m128i source = _mm_loadu_si128((__m128i *)in); source = _mm_shuffle_epi8( source, - _mm_set_epi8(15, 11, 7, 3, 14, 10, 6, 2, 13, 9, 5, 1, 12, 8, 4, 0)); + _mm_set_epi8(15, 11, 7, 3, 14, 10, 6, 2, 13, 9, 5, 1, 12, 8, 4, 0) + ); *((UINT32 *)(out0 + x)) = _mm_cvtsi128_si32(source); *((UINT32 *)(out1 + x)) = _mm_cvtsi128_si32(_mm_srli_si128(source, 12)); #else @@ -161,7 +163,8 @@ ImagingSplit(Imaging imIn, Imaging bands[4]) { __m128i source = _mm_loadu_si128((__m128i *)in); source = _mm_shuffle_epi8( source, - _mm_set_epi8(15, 11, 7, 3, 14, 10, 6, 2, 13, 9, 5, 1, 12, 8, 4, 0)); + _mm_set_epi8(15, 11, 7, 3, 14, 10, 6, 2, 13, 9, 5, 1, 12, 8, 4, 0) + ); *((UINT32 *)(out0 + x)) = _mm_cvtsi128_si32(source); *((UINT32 *)(out1 + x)) = _mm_cvtsi128_si32(_mm_srli_si128(source, 4)); *((UINT32 *)(out2 + x)) = _mm_cvtsi128_si32(_mm_srli_si128(source, 8)); @@ -195,7 +198,8 @@ ImagingSplit(Imaging imIn, Imaging bands[4]) { __m128i source = _mm_loadu_si128((__m128i *)in); source = _mm_shuffle_epi8( source, - _mm_set_epi8(15, 11, 7, 3, 14, 10, 6, 2, 13, 9, 5, 1, 12, 8, 4, 0)); + _mm_set_epi8(15, 11, 7, 3, 14, 10, 6, 2, 13, 9, 5, 1, 12, 8, 4, 0) + ); *((UINT32 *)(out0 + x)) = _mm_cvtsi128_si32(source); *((UINT32 *)(out1 + x)) = _mm_cvtsi128_si32(_mm_srli_si128(source, 4)); *((UINT32 *)(out2 + x)) = _mm_cvtsi128_si32(_mm_srli_si128(source, 8)); From 2ef6424b554e0a9450d4f08bd9fae69b940810ba Mon Sep 17 00:00:00 2001 From: homm Date: Sun, 12 Jun 2016 19:40:56 +0300 Subject: [PATCH 10/12] SIMD AlphaComposite. sse4 implementation SIMD AlphaComposite. avx2 implementation SIMD AlphaComposite. increase precision SIMD AlphaComposite. speedup sse4 by using _mm_mullo_epi16 instead of _mm_mullo_epi32 SIMD AlphaComposite. speedup avx2 by using _mm256_mullo_epi16 instead of _mm256_mullo_epi32 SIMD AlphaComposite. fix bugs SIMD AlphaComposite. move declarations to beginning of the blocks SIMD AlphaComposite. fast div aproximation --- src/libImaging/AlphaComposite.c | 181 ++++++++++++++++++++++++++++---- 1 file changed, 160 insertions(+), 21 deletions(-) diff --git a/src/libImaging/AlphaComposite.c b/src/libImaging/AlphaComposite.c index 6d728f908..1e5576345 100644 --- a/src/libImaging/AlphaComposite.c +++ b/src/libImaging/AlphaComposite.c @@ -23,6 +23,28 @@ Imaging ImagingAlphaComposite(Imaging imDst, Imaging imSrc) { Imaging imOut; int x, y; + int xsize = imDst->xsize; + __m128i mm_max_alpha = _mm_set1_epi32(255); + __m128i mm_max_alpha2 = _mm_set1_epi32(255 * 255); + __m128i mm_zero = _mm_setzero_si128(); + __m128i mm_half = _mm_set1_epi16(128); + __m128i mm_get_lo = _mm_set_epi8( + -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0); + __m128i mm_get_hi = _mm_set_epi8( + -1,-1, 13,12, 13,12, 13,12, -1,-1, 9,8, 9,8, 9,8); +#if defined(__AVX2__) + __m256i vmm_max_alpha = _mm256_set1_epi32(255); + __m256i vmm_max_alpha2 = _mm256_set1_epi32(255 * 255); + __m256i vmm_zero = _mm256_setzero_si256(); + __m256i vmm_half = _mm256_set1_epi16(128); + __m256i vmm_get_lo = _mm256_set_epi8( + -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0, + -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0); + __m256i vmm_get_hi = _mm256_set_epi8( + -1,-1, 13,12, 13,12, 13,12, -1,-1, 9,8, 9,8, 9,8, + -1,-1, 13,12, 13,12, 13,12, -1,-1, 9,8, 9,8, 9,8); +#endif + /* Check arguments */ if (!imDst || !imSrc || strcmp(imDst->mode, "RGBA") || @@ -46,38 +68,155 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) { rgba8 *src = (rgba8 *)imSrc->image[y]; rgba8 *out = (rgba8 *)imOut->image[y]; - for (x = 0; x < imDst->xsize; x++) { - if (src->a == 0) { + x = 0; + +#if defined(__AVX2__) + + #define MM_SHIFTDIV255_epi16(src)\ + _mm256_srli_epi16(_mm256_add_epi16(src, _mm256_srli_epi16(src, 8)), 8) + + for (; x < xsize - 7; x += 8) { + __m256i mm_dst, mm_dst_lo, mm_dst_hi; + __m256i mm_src, mm_src_lo, mm_src_hi; + __m256i mm_dst_a, mm_src_a, mm_out_a, mm_blend; + __m256i mm_coef1, mm_coef2, mm_out_lo, mm_out_hi; + + mm_dst = _mm256_loadu_si256((__m256i *) &dst[x]); + mm_dst_lo = _mm256_unpacklo_epi8(mm_dst, vmm_zero); + mm_dst_hi = _mm256_unpackhi_epi8(mm_dst, vmm_zero); + mm_src = _mm256_loadu_si256((__m256i *) &src[x]); + mm_src_lo = _mm256_unpacklo_epi8(mm_src, vmm_zero); + mm_src_hi = _mm256_unpackhi_epi8(mm_src, vmm_zero); + + mm_dst_a = _mm256_srli_epi32(mm_dst, 24); + mm_src_a = _mm256_srli_epi32(mm_src, 24); + + // Compute coefficients + // blend = dst->a * (255 - src->a); 16 bits + mm_blend = _mm256_mullo_epi16(mm_dst_a, _mm256_sub_epi32(vmm_max_alpha, mm_src_a)); + // outa = src->a * 255 + dst->a * (255 - src->a); 16 bits + mm_out_a = _mm256_add_epi32(_mm256_mullo_epi16(mm_src_a, vmm_max_alpha), mm_blend); + mm_coef1 = _mm256_mullo_epi32(mm_src_a, vmm_max_alpha2); + // 8 bits + mm_coef1 = _mm256_cvtps_epi32( + _mm256_mul_ps(_mm256_cvtepi32_ps(mm_coef1), + _mm256_rcp_ps(_mm256_cvtepi32_ps(mm_out_a))) + ); + // 8 bits + mm_coef2 = _mm256_sub_epi32(vmm_max_alpha, mm_coef1); + + mm_out_lo = _mm256_add_epi16( + _mm256_mullo_epi16(mm_src_lo, _mm256_shuffle_epi8(mm_coef1, vmm_get_lo)), + _mm256_mullo_epi16(mm_dst_lo, _mm256_shuffle_epi8(mm_coef2, vmm_get_lo))); + mm_out_lo = _mm256_or_si256(mm_out_lo, _mm256_slli_epi64( + _mm256_unpacklo_epi32(mm_out_a, vmm_zero), 48)); + mm_out_lo = _mm256_add_epi16(mm_out_lo, vmm_half); + mm_out_lo = MM_SHIFTDIV255_epi16(mm_out_lo); + + mm_out_hi = _mm256_add_epi16( + _mm256_mullo_epi16(mm_src_hi, _mm256_shuffle_epi8(mm_coef1, vmm_get_hi)), + _mm256_mullo_epi16(mm_dst_hi, _mm256_shuffle_epi8(mm_coef2, vmm_get_hi))); + mm_out_hi = _mm256_or_si256(mm_out_hi, _mm256_slli_epi64( + _mm256_unpackhi_epi32(mm_out_a, vmm_zero), 48)); + mm_out_hi = _mm256_add_epi16(mm_out_hi, vmm_half); + mm_out_hi = MM_SHIFTDIV255_epi16(mm_out_hi); + + _mm256_storeu_si256((__m256i *) &out[x], + _mm256_packus_epi16(mm_out_lo, mm_out_hi)); + } + + #undef MM_SHIFTDIV255_epi16 + +#endif + + #define MM_SHIFTDIV255_epi16(src)\ + _mm_srli_epi16(_mm_add_epi16(src, _mm_srli_epi16(src, 8)), 8) + + for (; x < xsize - 3; x += 4) { + __m128i mm_dst, mm_dst_lo, mm_dst_hi; + __m128i mm_src, mm_src_hi, mm_src_lo; + __m128i mm_dst_a, mm_src_a, mm_out_a, mm_blend; + __m128i mm_coef1, mm_coef2, mm_out_lo, mm_out_hi; + + // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0 + mm_dst = _mm_loadu_si128((__m128i *) &dst[x]); + // [16] a1 b1 g1 r1 a0 b0 g0 r0 + mm_dst_lo = _mm_unpacklo_epi8(mm_dst, mm_zero); + // [16] a3 b3 g3 r3 a2 b2 g2 r2 + mm_dst_hi = _mm_unpackhi_epi8(mm_dst, mm_zero); + // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0 + mm_src = _mm_loadu_si128((__m128i *) &src[x]); + mm_src_lo = _mm_unpacklo_epi8(mm_src, mm_zero); + mm_src_hi = _mm_unpackhi_epi8(mm_src, mm_zero); + + // [32] a3 a2 a1 a0 + mm_dst_a = _mm_srli_epi32(mm_dst, 24); + mm_src_a = _mm_srli_epi32(mm_src, 24); + + // Compute coefficients + // blend = dst->a * (255 - src->a) + // [16] xx b3 xx b2 xx b1 xx b0 + mm_blend = _mm_mullo_epi16(mm_dst_a, _mm_sub_epi32(mm_max_alpha, mm_src_a)); + // outa = src->a * 255 + blend + // [16] xx a3 xx a2 xx a1 xx a0 + mm_out_a = _mm_add_epi32(_mm_mullo_epi16(mm_src_a, mm_max_alpha), mm_blend); + // coef1 = src->a * 255 * 255 / outa + mm_coef1 = _mm_mullo_epi32(mm_src_a, mm_max_alpha2); + // [8] xx xx xx c3 xx xx xx c2 xx xx xx c1 xx xx xx c0 + mm_coef1 = _mm_cvtps_epi32( + _mm_mul_ps(_mm_cvtepi32_ps(mm_coef1), + _mm_rcp_ps(_mm_cvtepi32_ps(mm_out_a))) + ); + // [8] xx xx xx c3 xx xx xx c2 xx xx xx c1 xx xx xx c0 + mm_coef2 = _mm_sub_epi32(mm_max_alpha, mm_coef1); + + mm_out_lo = _mm_add_epi16( + _mm_mullo_epi16(mm_src_lo, _mm_shuffle_epi8(mm_coef1, mm_get_lo)), + _mm_mullo_epi16(mm_dst_lo, _mm_shuffle_epi8(mm_coef2, mm_get_lo))); + mm_out_lo = _mm_or_si128(mm_out_lo, _mm_slli_epi64( + _mm_unpacklo_epi32(mm_out_a, mm_zero), 48)); + mm_out_lo = _mm_add_epi16(mm_out_lo, mm_half); + mm_out_lo = MM_SHIFTDIV255_epi16(mm_out_lo); + + mm_out_hi = _mm_add_epi16( + _mm_mullo_epi16(mm_src_hi, _mm_shuffle_epi8(mm_coef1, mm_get_hi)), + _mm_mullo_epi16(mm_dst_hi, _mm_shuffle_epi8(mm_coef2, mm_get_hi))); + mm_out_hi = _mm_or_si128(mm_out_hi, _mm_slli_epi64( + _mm_unpackhi_epi32(mm_out_a, mm_zero), 48)); + mm_out_hi = _mm_add_epi16(mm_out_hi, mm_half); + mm_out_hi = MM_SHIFTDIV255_epi16(mm_out_hi); + + _mm_storeu_si128((__m128i *) &out[x], + _mm_packus_epi16(mm_out_lo, mm_out_hi)); + } + + #undef MM_SHIFTDIV255_epi16 + + for (; x < xsize; x += 1) { + if (src[x].a == 0) { // Copy 4 bytes at once. - *out = *dst; + out[x] = dst[x]; } else { // Integer implementation with increased precision. // Each variable has extra meaningful bits. // Divisions are rounded. UINT32 tmpr, tmpg, tmpb; - UINT32 blend = dst->a * (255 - src->a); - UINT32 outa255 = src->a * 255 + blend; + UINT32 blend = dst[x].a * (255 - src[x].a); + UINT32 outa255 = src[x].a * 255 + blend; // There we use 7 bits for precision. // We could use more, but we go beyond 32 bits. - UINT32 coef1 = src->a * 255 * 255 * (1 << PRECISION_BITS) / outa255; - UINT32 coef2 = 255 * (1 << PRECISION_BITS) - coef1; + UINT32 coef1 = src[x].a * 255 * 255 * (1<r * coef1 + dst->r * coef2; - tmpg = src->g * coef1 + dst->g * coef2; - tmpb = src->b * coef1 + dst->b * coef2; - out->r = - SHIFTFORDIV255(tmpr + (0x80 << PRECISION_BITS)) >> PRECISION_BITS; - out->g = - SHIFTFORDIV255(tmpg + (0x80 << PRECISION_BITS)) >> PRECISION_BITS; - out->b = - SHIFTFORDIV255(tmpb + (0x80 << PRECISION_BITS)) >> PRECISION_BITS; - out->a = SHIFTFORDIV255(outa255 + 0x80); + tmpr = src[x].r * coef1 + dst[x].r * coef2; + tmpg = src[x].g * coef1 + dst[x].g * coef2; + tmpb = src[x].b * coef1 + dst[x].b * coef2; + out[x].r = SHIFTFORDIV255(tmpr + (0x80<> PRECISION_BITS; + out[x].g = SHIFTFORDIV255(tmpg + (0x80<> PRECISION_BITS; + out[x].b = SHIFTFORDIV255(tmpb + (0x80<> PRECISION_BITS; + out[x].a = SHIFTFORDIV255(outa255 + 0x80); } - - dst++; - src++; - out++; } } From d1717f3ffc210047443794c7e43c5da0e76ec235 Mon Sep 17 00:00:00 2001 From: Aleksandr Karpinskii Date: Sun, 11 Aug 2024 21:03:53 +0400 Subject: [PATCH 11/12] Make SSE4 optional --- src/libImaging/AlphaComposite.c | 53 +++++++++++++++++---------------- 1 file changed, 27 insertions(+), 26 deletions(-) diff --git a/src/libImaging/AlphaComposite.c b/src/libImaging/AlphaComposite.c index 1e5576345..d1c29aadc 100644 --- a/src/libImaging/AlphaComposite.c +++ b/src/libImaging/AlphaComposite.c @@ -23,28 +23,6 @@ Imaging ImagingAlphaComposite(Imaging imDst, Imaging imSrc) { Imaging imOut; int x, y; - int xsize = imDst->xsize; - __m128i mm_max_alpha = _mm_set1_epi32(255); - __m128i mm_max_alpha2 = _mm_set1_epi32(255 * 255); - __m128i mm_zero = _mm_setzero_si128(); - __m128i mm_half = _mm_set1_epi16(128); - __m128i mm_get_lo = _mm_set_epi8( - -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0); - __m128i mm_get_hi = _mm_set_epi8( - -1,-1, 13,12, 13,12, 13,12, -1,-1, 9,8, 9,8, 9,8); -#if defined(__AVX2__) - __m256i vmm_max_alpha = _mm256_set1_epi32(255); - __m256i vmm_max_alpha2 = _mm256_set1_epi32(255 * 255); - __m256i vmm_zero = _mm256_setzero_si256(); - __m256i vmm_half = _mm256_set1_epi16(128); - __m256i vmm_get_lo = _mm256_set_epi8( - -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0, - -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0); - __m256i vmm_get_hi = _mm256_set_epi8( - -1,-1, 13,12, 13,12, 13,12, -1,-1, 9,8, 9,8, 9,8, - -1,-1, 13,12, 13,12, 13,12, -1,-1, 9,8, 9,8, 9,8); -#endif - /* Check arguments */ if (!imDst || !imSrc || strcmp(imDst->mode, "RGBA") || @@ -71,11 +49,22 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) { x = 0; #if defined(__AVX2__) + { + __m256i vmm_max_alpha = _mm256_set1_epi32(255); + __m256i vmm_max_alpha2 = _mm256_set1_epi32(255 * 255); + __m256i vmm_zero = _mm256_setzero_si256(); + __m256i vmm_half = _mm256_set1_epi16(128); + __m256i vmm_get_lo = _mm256_set_epi8( + -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0, + -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0); + __m256i vmm_get_hi = _mm256_set_epi8( + -1,-1, 13,12, 13,12, 13,12, -1,-1, 9,8, 9,8, 9,8, + -1,-1, 13,12, 13,12, 13,12, -1,-1, 9,8, 9,8, 9,8); #define MM_SHIFTDIV255_epi16(src)\ _mm256_srli_epi16(_mm256_add_epi16(src, _mm256_srli_epi16(src, 8)), 8) - for (; x < xsize - 7; x += 8) { + for (; x < imDst->xsize - 7; x += 8) { __m256i mm_dst, mm_dst_lo, mm_dst_hi; __m256i mm_src, mm_src_lo, mm_src_hi; __m256i mm_dst_a, mm_src_a, mm_out_a, mm_blend; @@ -126,13 +115,23 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) { } #undef MM_SHIFTDIV255_epi16 - + } #endif +#if defined(__SSE4__) + { + __m128i mm_max_alpha = _mm_set1_epi32(255); + __m128i mm_max_alpha2 = _mm_set1_epi32(255 * 255); + __m128i mm_zero = _mm_setzero_si128(); + __m128i mm_half = _mm_set1_epi16(128); + __m128i mm_get_lo = _mm_set_epi8( + -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0); + __m128i mm_get_hi = _mm_set_epi8( + -1,-1, 13,12, 13,12, 13,12, -1,-1, 9,8, 9,8, 9,8); #define MM_SHIFTDIV255_epi16(src)\ _mm_srli_epi16(_mm_add_epi16(src, _mm_srli_epi16(src, 8)), 8) - for (; x < xsize - 3; x += 4) { + for (; x < imDst->xsize - 3; x += 4) { __m128i mm_dst, mm_dst_lo, mm_dst_hi; __m128i mm_src, mm_src_hi, mm_src_lo; __m128i mm_dst_a, mm_src_a, mm_out_a, mm_blend; @@ -191,8 +190,10 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) { } #undef MM_SHIFTDIV255_epi16 + } +#endif - for (; x < xsize; x += 1) { + for (; x < imDst->xsize; x += 1) { if (src[x].a == 0) { // Copy 4 bytes at once. out[x] = dst[x]; From b6fb4d8ff85bba584c669ce5f21044daa59fe438 Mon Sep 17 00:00:00 2001 From: Aleksandr Karpinskii Date: Sun, 11 Aug 2024 21:06:37 +0400 Subject: [PATCH 12/12] Apply clang-format --- src/libImaging/AlphaComposite.c | 352 ++++++++++++++++++++------------ 1 file changed, 221 insertions(+), 131 deletions(-) diff --git a/src/libImaging/AlphaComposite.c b/src/libImaging/AlphaComposite.c index d1c29aadc..6ef15e91b 100644 --- a/src/libImaging/AlphaComposite.c +++ b/src/libImaging/AlphaComposite.c @@ -49,148 +49,235 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) { x = 0; #if defined(__AVX2__) - { - __m256i vmm_max_alpha = _mm256_set1_epi32(255); - __m256i vmm_max_alpha2 = _mm256_set1_epi32(255 * 255); - __m256i vmm_zero = _mm256_setzero_si256(); - __m256i vmm_half = _mm256_set1_epi16(128); - __m256i vmm_get_lo = _mm256_set_epi8( - -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0, - -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0); - __m256i vmm_get_hi = _mm256_set_epi8( - -1,-1, 13,12, 13,12, 13,12, -1,-1, 9,8, 9,8, 9,8, - -1,-1, 13,12, 13,12, 13,12, -1,-1, 9,8, 9,8, 9,8); - - #define MM_SHIFTDIV255_epi16(src)\ - _mm256_srli_epi16(_mm256_add_epi16(src, _mm256_srli_epi16(src, 8)), 8) - - for (; x < imDst->xsize - 7; x += 8) { - __m256i mm_dst, mm_dst_lo, mm_dst_hi; - __m256i mm_src, mm_src_lo, mm_src_hi; - __m256i mm_dst_a, mm_src_a, mm_out_a, mm_blend; - __m256i mm_coef1, mm_coef2, mm_out_lo, mm_out_hi; - - mm_dst = _mm256_loadu_si256((__m256i *) &dst[x]); - mm_dst_lo = _mm256_unpacklo_epi8(mm_dst, vmm_zero); - mm_dst_hi = _mm256_unpackhi_epi8(mm_dst, vmm_zero); - mm_src = _mm256_loadu_si256((__m256i *) &src[x]); - mm_src_lo = _mm256_unpacklo_epi8(mm_src, vmm_zero); - mm_src_hi = _mm256_unpackhi_epi8(mm_src, vmm_zero); - - mm_dst_a = _mm256_srli_epi32(mm_dst, 24); - mm_src_a = _mm256_srli_epi32(mm_src, 24); - - // Compute coefficients - // blend = dst->a * (255 - src->a); 16 bits - mm_blend = _mm256_mullo_epi16(mm_dst_a, _mm256_sub_epi32(vmm_max_alpha, mm_src_a)); - // outa = src->a * 255 + dst->a * (255 - src->a); 16 bits - mm_out_a = _mm256_add_epi32(_mm256_mullo_epi16(mm_src_a, vmm_max_alpha), mm_blend); - mm_coef1 = _mm256_mullo_epi32(mm_src_a, vmm_max_alpha2); - // 8 bits - mm_coef1 = _mm256_cvtps_epi32( - _mm256_mul_ps(_mm256_cvtepi32_ps(mm_coef1), - _mm256_rcp_ps(_mm256_cvtepi32_ps(mm_out_a))) + { + __m256i vmm_max_alpha = _mm256_set1_epi32(255); + __m256i vmm_max_alpha2 = _mm256_set1_epi32(255 * 255); + __m256i vmm_zero = _mm256_setzero_si256(); + __m256i vmm_half = _mm256_set1_epi16(128); + __m256i vmm_get_lo = _mm256_set_epi8( + -1, + -1, + 5, + 4, + 5, + 4, + 5, + 4, + -1, + -1, + 1, + 0, + 1, + 0, + 1, + 0, + -1, + -1, + 5, + 4, + 5, + 4, + 5, + 4, + -1, + -1, + 1, + 0, + 1, + 0, + 1, + 0 + ); + __m256i vmm_get_hi = _mm256_set_epi8( + -1, + -1, + 13, + 12, + 13, + 12, + 13, + 12, + -1, + -1, + 9, + 8, + 9, + 8, + 9, + 8, + -1, + -1, + 13, + 12, + 13, + 12, + 13, + 12, + -1, + -1, + 9, + 8, + 9, + 8, + 9, + 8 ); - // 8 bits - mm_coef2 = _mm256_sub_epi32(vmm_max_alpha, mm_coef1); - mm_out_lo = _mm256_add_epi16( - _mm256_mullo_epi16(mm_src_lo, _mm256_shuffle_epi8(mm_coef1, vmm_get_lo)), - _mm256_mullo_epi16(mm_dst_lo, _mm256_shuffle_epi8(mm_coef2, vmm_get_lo))); - mm_out_lo = _mm256_or_si256(mm_out_lo, _mm256_slli_epi64( - _mm256_unpacklo_epi32(mm_out_a, vmm_zero), 48)); - mm_out_lo = _mm256_add_epi16(mm_out_lo, vmm_half); - mm_out_lo = MM_SHIFTDIV255_epi16(mm_out_lo); +#define MM_SHIFTDIV255_epi16(src) \ + _mm256_srli_epi16(_mm256_add_epi16(src, _mm256_srli_epi16(src, 8)), 8) - mm_out_hi = _mm256_add_epi16( - _mm256_mullo_epi16(mm_src_hi, _mm256_shuffle_epi8(mm_coef1, vmm_get_hi)), - _mm256_mullo_epi16(mm_dst_hi, _mm256_shuffle_epi8(mm_coef2, vmm_get_hi))); - mm_out_hi = _mm256_or_si256(mm_out_hi, _mm256_slli_epi64( - _mm256_unpackhi_epi32(mm_out_a, vmm_zero), 48)); - mm_out_hi = _mm256_add_epi16(mm_out_hi, vmm_half); - mm_out_hi = MM_SHIFTDIV255_epi16(mm_out_hi); + for (; x < imDst->xsize - 7; x += 8) { + __m256i mm_dst, mm_dst_lo, mm_dst_hi; + __m256i mm_src, mm_src_lo, mm_src_hi; + __m256i mm_dst_a, mm_src_a, mm_out_a, mm_blend; + __m256i mm_coef1, mm_coef2, mm_out_lo, mm_out_hi; - _mm256_storeu_si256((__m256i *) &out[x], - _mm256_packus_epi16(mm_out_lo, mm_out_hi)); + mm_dst = _mm256_loadu_si256((__m256i *)&dst[x]); + mm_dst_lo = _mm256_unpacklo_epi8(mm_dst, vmm_zero); + mm_dst_hi = _mm256_unpackhi_epi8(mm_dst, vmm_zero); + mm_src = _mm256_loadu_si256((__m256i *)&src[x]); + mm_src_lo = _mm256_unpacklo_epi8(mm_src, vmm_zero); + mm_src_hi = _mm256_unpackhi_epi8(mm_src, vmm_zero); + + mm_dst_a = _mm256_srli_epi32(mm_dst, 24); + mm_src_a = _mm256_srli_epi32(mm_src, 24); + + // Compute coefficients + // blend = dst->a * (255 - src->a); 16 bits + mm_blend = _mm256_mullo_epi16( + mm_dst_a, _mm256_sub_epi32(vmm_max_alpha, mm_src_a) + ); + // outa = src->a * 255 + dst->a * (255 - src->a); 16 bits + mm_out_a = _mm256_add_epi32( + _mm256_mullo_epi16(mm_src_a, vmm_max_alpha), mm_blend + ); + mm_coef1 = _mm256_mullo_epi32(mm_src_a, vmm_max_alpha2); + // 8 bits + mm_coef1 = _mm256_cvtps_epi32(_mm256_mul_ps( + _mm256_cvtepi32_ps(mm_coef1), + _mm256_rcp_ps(_mm256_cvtepi32_ps(mm_out_a)) + )); + // 8 bits + mm_coef2 = _mm256_sub_epi32(vmm_max_alpha, mm_coef1); + + mm_out_lo = _mm256_add_epi16( + _mm256_mullo_epi16( + mm_src_lo, _mm256_shuffle_epi8(mm_coef1, vmm_get_lo) + ), + _mm256_mullo_epi16( + mm_dst_lo, _mm256_shuffle_epi8(mm_coef2, vmm_get_lo) + ) + ); + mm_out_lo = _mm256_or_si256( + mm_out_lo, + _mm256_slli_epi64(_mm256_unpacklo_epi32(mm_out_a, vmm_zero), 48) + ); + mm_out_lo = _mm256_add_epi16(mm_out_lo, vmm_half); + mm_out_lo = MM_SHIFTDIV255_epi16(mm_out_lo); + + mm_out_hi = _mm256_add_epi16( + _mm256_mullo_epi16( + mm_src_hi, _mm256_shuffle_epi8(mm_coef1, vmm_get_hi) + ), + _mm256_mullo_epi16( + mm_dst_hi, _mm256_shuffle_epi8(mm_coef2, vmm_get_hi) + ) + ); + mm_out_hi = _mm256_or_si256( + mm_out_hi, + _mm256_slli_epi64(_mm256_unpackhi_epi32(mm_out_a, vmm_zero), 48) + ); + mm_out_hi = _mm256_add_epi16(mm_out_hi, vmm_half); + mm_out_hi = MM_SHIFTDIV255_epi16(mm_out_hi); + + _mm256_storeu_si256( + (__m256i *)&out[x], _mm256_packus_epi16(mm_out_lo, mm_out_hi) + ); + } + +#undef MM_SHIFTDIV255_epi16 } - - #undef MM_SHIFTDIV255_epi16 - } #endif #if defined(__SSE4__) - { - __m128i mm_max_alpha = _mm_set1_epi32(255); - __m128i mm_max_alpha2 = _mm_set1_epi32(255 * 255); - __m128i mm_zero = _mm_setzero_si128(); - __m128i mm_half = _mm_set1_epi16(128); - __m128i mm_get_lo = _mm_set_epi8( - -1,-1, 5,4, 5,4, 5,4, -1,-1, 1,0, 1,0, 1,0); - __m128i mm_get_hi = _mm_set_epi8( - -1,-1, 13,12, 13,12, 13,12, -1,-1, 9,8, 9,8, 9,8); + { + __m128i mm_max_alpha = _mm_set1_epi32(255); + __m128i mm_max_alpha2 = _mm_set1_epi32(255 * 255); + __m128i mm_zero = _mm_setzero_si128(); + __m128i mm_half = _mm_set1_epi16(128); + __m128i mm_get_lo = + _mm_set_epi8(-1, -1, 5, 4, 5, 4, 5, 4, -1, -1, 1, 0, 1, 0, 1, 0); + __m128i mm_get_hi = + _mm_set_epi8(-1, -1, 13, 12, 13, 12, 13, 12, -1, -1, 9, 8, 9, 8, 9, 8); - #define MM_SHIFTDIV255_epi16(src)\ - _mm_srli_epi16(_mm_add_epi16(src, _mm_srli_epi16(src, 8)), 8) +#define MM_SHIFTDIV255_epi16(src) \ + _mm_srli_epi16(_mm_add_epi16(src, _mm_srli_epi16(src, 8)), 8) - for (; x < imDst->xsize - 3; x += 4) { - __m128i mm_dst, mm_dst_lo, mm_dst_hi; - __m128i mm_src, mm_src_hi, mm_src_lo; - __m128i mm_dst_a, mm_src_a, mm_out_a, mm_blend; - __m128i mm_coef1, mm_coef2, mm_out_lo, mm_out_hi; + for (; x < imDst->xsize - 3; x += 4) { + __m128i mm_dst, mm_dst_lo, mm_dst_hi; + __m128i mm_src, mm_src_hi, mm_src_lo; + __m128i mm_dst_a, mm_src_a, mm_out_a, mm_blend; + __m128i mm_coef1, mm_coef2, mm_out_lo, mm_out_hi; - // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0 - mm_dst = _mm_loadu_si128((__m128i *) &dst[x]); - // [16] a1 b1 g1 r1 a0 b0 g0 r0 - mm_dst_lo = _mm_unpacklo_epi8(mm_dst, mm_zero); - // [16] a3 b3 g3 r3 a2 b2 g2 r2 - mm_dst_hi = _mm_unpackhi_epi8(mm_dst, mm_zero); - // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0 - mm_src = _mm_loadu_si128((__m128i *) &src[x]); - mm_src_lo = _mm_unpacklo_epi8(mm_src, mm_zero); - mm_src_hi = _mm_unpackhi_epi8(mm_src, mm_zero); + // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0 + mm_dst = _mm_loadu_si128((__m128i *)&dst[x]); + // [16] a1 b1 g1 r1 a0 b0 g0 r0 + mm_dst_lo = _mm_unpacklo_epi8(mm_dst, mm_zero); + // [16] a3 b3 g3 r3 a2 b2 g2 r2 + mm_dst_hi = _mm_unpackhi_epi8(mm_dst, mm_zero); + // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0 + mm_src = _mm_loadu_si128((__m128i *)&src[x]); + mm_src_lo = _mm_unpacklo_epi8(mm_src, mm_zero); + mm_src_hi = _mm_unpackhi_epi8(mm_src, mm_zero); - // [32] a3 a2 a1 a0 - mm_dst_a = _mm_srli_epi32(mm_dst, 24); - mm_src_a = _mm_srli_epi32(mm_src, 24); + // [32] a3 a2 a1 a0 + mm_dst_a = _mm_srli_epi32(mm_dst, 24); + mm_src_a = _mm_srli_epi32(mm_src, 24); - // Compute coefficients - // blend = dst->a * (255 - src->a) - // [16] xx b3 xx b2 xx b1 xx b0 - mm_blend = _mm_mullo_epi16(mm_dst_a, _mm_sub_epi32(mm_max_alpha, mm_src_a)); - // outa = src->a * 255 + blend - // [16] xx a3 xx a2 xx a1 xx a0 - mm_out_a = _mm_add_epi32(_mm_mullo_epi16(mm_src_a, mm_max_alpha), mm_blend); - // coef1 = src->a * 255 * 255 / outa - mm_coef1 = _mm_mullo_epi32(mm_src_a, mm_max_alpha2); - // [8] xx xx xx c3 xx xx xx c2 xx xx xx c1 xx xx xx c0 - mm_coef1 = _mm_cvtps_epi32( - _mm_mul_ps(_mm_cvtepi32_ps(mm_coef1), - _mm_rcp_ps(_mm_cvtepi32_ps(mm_out_a))) - ); - // [8] xx xx xx c3 xx xx xx c2 xx xx xx c1 xx xx xx c0 - mm_coef2 = _mm_sub_epi32(mm_max_alpha, mm_coef1); + // Compute coefficients + // blend = dst->a * (255 - src->a) + // [16] xx b3 xx b2 xx b1 xx b0 + mm_blend = + _mm_mullo_epi16(mm_dst_a, _mm_sub_epi32(mm_max_alpha, mm_src_a)); + // outa = src->a * 255 + blend + // [16] xx a3 xx a2 xx a1 xx a0 + mm_out_a = + _mm_add_epi32(_mm_mullo_epi16(mm_src_a, mm_max_alpha), mm_blend); + // coef1 = src->a * 255 * 255 / outa + mm_coef1 = _mm_mullo_epi32(mm_src_a, mm_max_alpha2); + // [8] xx xx xx c3 xx xx xx c2 xx xx xx c1 xx xx xx c0 + mm_coef1 = _mm_cvtps_epi32(_mm_mul_ps( + _mm_cvtepi32_ps(mm_coef1), _mm_rcp_ps(_mm_cvtepi32_ps(mm_out_a)) + )); + // [8] xx xx xx c3 xx xx xx c2 xx xx xx c1 xx xx xx c0 + mm_coef2 = _mm_sub_epi32(mm_max_alpha, mm_coef1); - mm_out_lo = _mm_add_epi16( - _mm_mullo_epi16(mm_src_lo, _mm_shuffle_epi8(mm_coef1, mm_get_lo)), - _mm_mullo_epi16(mm_dst_lo, _mm_shuffle_epi8(mm_coef2, mm_get_lo))); - mm_out_lo = _mm_or_si128(mm_out_lo, _mm_slli_epi64( - _mm_unpacklo_epi32(mm_out_a, mm_zero), 48)); - mm_out_lo = _mm_add_epi16(mm_out_lo, mm_half); - mm_out_lo = MM_SHIFTDIV255_epi16(mm_out_lo); + mm_out_lo = _mm_add_epi16( + _mm_mullo_epi16(mm_src_lo, _mm_shuffle_epi8(mm_coef1, mm_get_lo)), + _mm_mullo_epi16(mm_dst_lo, _mm_shuffle_epi8(mm_coef2, mm_get_lo)) + ); + mm_out_lo = _mm_or_si128( + mm_out_lo, _mm_slli_epi64(_mm_unpacklo_epi32(mm_out_a, mm_zero), 48) + ); + mm_out_lo = _mm_add_epi16(mm_out_lo, mm_half); + mm_out_lo = MM_SHIFTDIV255_epi16(mm_out_lo); - mm_out_hi = _mm_add_epi16( - _mm_mullo_epi16(mm_src_hi, _mm_shuffle_epi8(mm_coef1, mm_get_hi)), - _mm_mullo_epi16(mm_dst_hi, _mm_shuffle_epi8(mm_coef2, mm_get_hi))); - mm_out_hi = _mm_or_si128(mm_out_hi, _mm_slli_epi64( - _mm_unpackhi_epi32(mm_out_a, mm_zero), 48)); - mm_out_hi = _mm_add_epi16(mm_out_hi, mm_half); - mm_out_hi = MM_SHIFTDIV255_epi16(mm_out_hi); + mm_out_hi = _mm_add_epi16( + _mm_mullo_epi16(mm_src_hi, _mm_shuffle_epi8(mm_coef1, mm_get_hi)), + _mm_mullo_epi16(mm_dst_hi, _mm_shuffle_epi8(mm_coef2, mm_get_hi)) + ); + mm_out_hi = _mm_or_si128( + mm_out_hi, _mm_slli_epi64(_mm_unpackhi_epi32(mm_out_a, mm_zero), 48) + ); + mm_out_hi = _mm_add_epi16(mm_out_hi, mm_half); + mm_out_hi = MM_SHIFTDIV255_epi16(mm_out_hi); - _mm_storeu_si128((__m128i *) &out[x], - _mm_packus_epi16(mm_out_lo, mm_out_hi)); + _mm_storeu_si128( + (__m128i *)&out[x], _mm_packus_epi16(mm_out_lo, mm_out_hi) + ); + } + +#undef MM_SHIFTDIV255_epi16 } - - #undef MM_SHIFTDIV255_epi16 - } #endif for (; x < imDst->xsize; x += 1) { @@ -207,15 +294,18 @@ ImagingAlphaComposite(Imaging imDst, Imaging imSrc) { UINT32 outa255 = src[x].a * 255 + blend; // There we use 7 bits for precision. // We could use more, but we go beyond 32 bits. - UINT32 coef1 = src[x].a * 255 * 255 * (1<> PRECISION_BITS; - out[x].g = SHIFTFORDIV255(tmpg + (0x80<> PRECISION_BITS; - out[x].b = SHIFTFORDIV255(tmpb + (0x80<> PRECISION_BITS; + out[x].r = + SHIFTFORDIV255(tmpr + (0x80 << PRECISION_BITS)) >> PRECISION_BITS; + out[x].g = + SHIFTFORDIV255(tmpg + (0x80 << PRECISION_BITS)) >> PRECISION_BITS; + out[x].b = + SHIFTFORDIV255(tmpb + (0x80 << PRECISION_BITS)) >> PRECISION_BITS; out[x].a = SHIFTFORDIV255(outa255 + 0x80); } }