Merge branch 'main' into fp

This commit is contained in:
Andrew Murray 2025-01-31 23:18:21 +11:00 committed by GitHub
commit 0d733fe056
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
93 changed files with 997 additions and 1052 deletions

View File

@ -1,99 +0,0 @@
skip_commits:
files:
- ".github/**/*"
- ".gitmodules"
- "docs/**/*"
- "wheels/**/*"
version: '{build}'
clone_folder: c:\pillow
init:
- ECHO %PYTHON%
#- ps: iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-rdp.ps1'))
# Uncomment previous line to get RDP access during the build.
environment:
COVERAGE_CORE: sysmon
EXECUTABLE: python.exe
TEST_OPTIONS:
DEPLOY: YES
matrix:
- PYTHON: C:/Python313
ARCHITECTURE: x86
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2022
- PYTHON: C:/Python39-x64
ARCHITECTURE: AMD64
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
install:
- '%PYTHON%\%EXECUTABLE% --version'
- '%PYTHON%\%EXECUTABLE% -m pip install --upgrade pip'
- curl -fsSL -o pillow-test-images.zip https://github.com/python-pillow/test-images/archive/main.zip
- 7z x pillow-test-images.zip -oc:\
- xcopy /S /Y c:\test-images-main\* c:\pillow\tests\images
- curl -fsSL -o nasm-win64.zip https://raw.githubusercontent.com/python-pillow/pillow-depends/main/nasm-2.16.03-win64.zip
- 7z x nasm-win64.zip -oc:\
- choco install ghostscript --version=10.4.0
- path c:\nasm-2.16.03;C:\Program Files\gs\gs10.04.0\bin;%PATH%
- cd c:\pillow\winbuild\
- ps: |
c:\python39\python.exe c:\pillow\winbuild\build_prepare.py -v --depends=C:\pillow-depends\
c:\pillow\winbuild\build\build_dep_all.cmd
$host.SetShouldExit(0)
- path C:\pillow\winbuild\build\bin;%PATH%
build_script:
- cd c:\pillow
- winbuild\build\build_env.cmd
- '%PYTHON%\%EXECUTABLE% -m pip install -v -C raqm=vendor -C fribidi=vendor .'
- '%PYTHON%\%EXECUTABLE% selftest.py --installed'
test_script:
- cd c:\pillow
- '%PYTHON%\%EXECUTABLE% -m pip install pytest pytest-cov pytest-timeout defusedxml ipython numpy olefile pyroma'
- c:\"Program Files (x86)"\"Windows Kits"\10\Debuggers\x86\gflags.exe /p /enable %PYTHON%\%EXECUTABLE%
- path %PYTHON%;%PATH%
- .ci\test.cmd
after_test:
- curl -Os https://uploader.codecov.io/latest/windows/codecov.exe
- .\codecov.exe --file coverage.xml --name %PYTHON% --flags AppVeyor
matrix:
fast_finish: true
cache:
- '%LOCALAPPDATA%\pip\Cache'
artifacts:
- path: pillow\*.egg
name: egg
- path: pillow\*.whl
name: wheel
before_deploy:
- cd c:\pillow
- '%PYTHON%\%EXECUTABLE% -m pip wheel -v -C raqm=vendor -C fribidi=vendor .'
- ps: Get-ChildItem .\*.whl | % { Push-AppveyorArtifact $_.FullName -FileName $_.Name }
deploy:
provider: S3
region: us-west-2
access_key_id: AKIAIRAXC62ZNTVQJMOQ
secret_access_key:
secure: Hwb6klTqtBeMgxAjRoDltiiqpuH8xbwD4UooDzBSiCWXjuFj1lyl4kHgHwTCCGqi
bucket: pillow-nightly
folder: win/$(APPVEYOR_BUILD_NUMBER)/
artifact: /.*egg|wheel/
on:
APPVEYOR_REPO_NAME: python-pillow/Pillow
branch: main
deploy: YES
# Uncomment the following lines to get RDP access after the build/test and block for
# up to the timeout limit (~1hr)
#
#on_finish:
#- ps: $blockRdp = $true; iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-rdp.ps1'))

View File

@ -2,8 +2,4 @@
# gather the coverage data
python3 -m pip install coverage
if [[ $MATRIX_DOCKER ]]; then
python3 -m coverage xml --ignore-errors
else
python3 -m coverage xml
fi
python3 -m coverage xml

View File

@ -3,8 +3,5 @@
set -e
python3 -m coverage erase
if [ $(uname) == "Darwin" ]; then
export CPPFLAGS="-I/usr/local/miniconda/include";
fi
make clean
make install-coverage

View File

@ -1,4 +1,4 @@
mypy==1.14.0
mypy==1.14.1
IceSpringPySideStubs-PyQt6
IceSpringPySideStubs-PySide6
ipython

View File

@ -9,7 +9,7 @@ Please send a pull request to the `main` branch. Please include [documentation](
- Fork the Pillow repository.
- Create a branch from `main`.
- Develop bug fixes, features, tests, etc.
- Run the test suite. You can enable GitHub Actions (https://github.com/MY-USERNAME/Pillow/actions) and [AppVeyor](https://ci.appveyor.com/projects/new) on your repo to catch test failures prior to the pull request, and [Codecov](https://codecov.io/gh) to see if the changed code is covered by tests.
- Run the test suite. You can enable GitHub Actions (https://github.com/MY-USERNAME/Pillow/actions) on your repo to catch test failures prior to the pull request, and [Codecov](https://codecov.io/gh) to see if the changed code is covered by tests.
- Create a pull request to pull the changes from your branch to the Pillow `main`.
### Guidelines
@ -17,7 +17,7 @@ Please send a pull request to the `main` branch. Please include [documentation](
- Separate code commits from reformatting commits.
- Provide tests for any newly added code.
- Follow PEP 8.
- When committing only documentation changes please include `[ci skip]` in the commit message to avoid running tests on AppVeyor.
- When committing only documentation changes please include `[ci skip]` in the commit message to avoid running extra tests.
- Include [release notes](https://github.com/python-pillow/Pillow/tree/main/docs/releasenotes) as needed or appropriate with your bug fixes, feature additions and tests.
## Reporting Issues

1
.github/mergify.yml vendored
View File

@ -9,7 +9,6 @@ pull_request_rules:
- status-success=Windows Test Successful
- status-success=MinGW
- status-success=Cygwin Test Successful
- status-success=continuous-integration/appveyor/pr
actions:
merge:
method: merge

View File

@ -52,7 +52,7 @@ jobs:
persist-credentials: false
- name: Install Cygwin
uses: cygwin/cygwin-install-action@v4
uses: cygwin/cygwin-install-action@v5
with:
packages: >
gcc-g++

View File

@ -29,21 +29,18 @@ concurrency:
jobs:
build:
runs-on: ubuntu-latest
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: ["ubuntu-latest"]
docker: [
# Run slower jobs first to give them a headstart and reduce waiting time
ubuntu-22.04-jammy-arm64v8,
ubuntu-24.04-noble-ppc64le,
ubuntu-24.04-noble-s390x,
# Then run the remainder
alpine,
amazon-2-amd64,
amazon-2023-amd64,
arch,
centos-stream-9-amd64,
centos-stream-10-amd64,
debian-12-bookworm-x86,
debian-12-bookworm-amd64,
fedora-40-amd64,
@ -54,12 +51,17 @@ jobs:
]
dockerTag: [main]
include:
- docker: "ubuntu-22.04-jammy-arm64v8"
qemu-arch: "aarch64"
- docker: "ubuntu-24.04-noble-ppc64le"
os: "ubuntu-22.04"
qemu-arch: "ppc64le"
dockerTag: main
- docker: "ubuntu-24.04-noble-s390x"
os: "ubuntu-22.04"
qemu-arch: "s390x"
dockerTag: main
- docker: "ubuntu-24.04-noble-arm64v8"
os: "ubuntu-24.04-arm"
dockerTag: main
name: ${{ matrix.docker }}
@ -89,15 +91,15 @@ jobs:
- name: After success
run: |
PATH="$PATH:~/.local/bin"
docker start pillow_container
sudo docker cp pillow_container:/Pillow /Pillow
sudo chown -R runner /Pillow
pil_path=`docker exec pillow_container /vpy3/bin/python -c 'import os, PIL;print(os.path.realpath(os.path.dirname(PIL.__file__)))'`
docker stop pillow_container
sudo mkdir -p $pil_path
sudo cp src/PIL/*.py $pil_path
cd /Pillow
.ci/after_success.sh
env:
MATRIX_DOCKER: ${{ matrix.docker }}
- name: Upload coverage
uses: codecov/codecov-action@v5

View File

@ -66,9 +66,9 @@ jobs:
mingw-w64-x86_64-libtiff \
mingw-w64-x86_64-libwebp \
mingw-w64-x86_64-openjpeg2 \
mingw-w64-x86_64-python3-numpy \
mingw-w64-x86_64-python3-olefile \
mingw-w64-x86_64-python3-pip \
mingw-w64-x86_64-python-numpy \
mingw-w64-x86_64-python-olefile \
mingw-w64-x86_64-python-pip \
mingw-w64-x86_64-python-pytest \
mingw-w64-x86_64-python-pytest-cov \
mingw-w64-x86_64-python-pytest-timeout \

View File

@ -31,15 +31,20 @@ env:
jobs:
build:
runs-on: windows-latest
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
python-version: ["pypy3.10", "3.9", "3.10", "3.11", "3.12", "3.13"]
python-version: ["pypy3.10", "3.10", "3.11", "3.12", "3.13", "3.14"]
architecture: ["x64"]
os: ["windows-latest"]
include:
# Test the oldest Python on 32-bit
- { python-version: "3.9", architecture: "x86", os: "windows-2019" }
timeout-minutes: 30
name: Python ${{ matrix.python-version }}
name: Python ${{ matrix.python-version }} (${{ matrix.architecture }})
steps:
- name: Checkout Pillow
@ -67,6 +72,7 @@ jobs:
with:
python-version: ${{ matrix.python-version }}
allow-prereleases: true
architecture: ${{ matrix.architecture }}
cache: pip
cache-dependency-path: ".github/workflows/test-windows.yml"
@ -78,7 +84,7 @@ jobs:
python3 -m pip install --upgrade pip
- name: Install CPython dependencies
if: "!contains(matrix.python-version, 'pypy')"
if: "!contains(matrix.python-version, 'pypy') && matrix.architecture != 'x86'"
run: |
python3 -m pip install PyQt6

View File

@ -42,6 +42,7 @@ jobs:
]
python-version: [
"pypy3.10",
"3.14",
"3.13t",
"3.13",
"3.12",

View File

@ -37,20 +37,15 @@ fi
ARCHIVE_SDIR=pillow-depends-main
# Package versions for fresh source builds
FREETYPE_VERSION=2.13.2
HARFBUZZ_VERSION=10.1.0
LIBPNG_VERSION=1.6.44
FREETYPE_VERSION=2.13.3
HARFBUZZ_VERSION=10.2.0
LIBPNG_VERSION=1.6.46
JPEGTURBO_VERSION=3.1.0
OPENJPEG_VERSION=2.5.3
XZ_VERSION=5.6.3
XZ_VERSION=5.6.4
TIFF_VERSION=4.6.0
LCMS2_VERSION=2.16
if [[ -n "$IS_MACOS" ]]; then
GIFLIB_VERSION=5.2.2
else
GIFLIB_VERSION=5.2.1
fi
ZLIB_NG_VERSION=2.2.2
ZLIB_NG_VERSION=2.2.3
LIBWEBP_VERSION=1.5.0
BZIP2_VERSION=1.0.8
LIBXCB_VERSION=1.17.0
@ -103,7 +98,7 @@ function build_harfbuzz {
function build {
build_xz
if [ -z "$IS_ALPINE" ] && [ -z "$IS_MACOS" ]; then
if [ -z "$IS_ALPINE" ] && [ -z "$SANITIZER" ] && [ -z "$IS_MACOS" ]; then
yum remove -y zlib-devel
fi
build_zlib_ng
@ -140,7 +135,9 @@ function build {
if [[ -n "$IS_MACOS" ]]; then
CFLAGS="$CFLAGS -Wl,-headerpad_max_install_names"
fi
build_libwebp
build_simple libwebp $LIBWEBP_VERSION \
https://storage.googleapis.com/downloads.webmproject.org/releases/webp tar.gz \
--enable-libwebpmux --enable-libwebpdemux
CFLAGS=$ORIGINAL_CFLAGS
build_brotli

View File

@ -11,6 +11,9 @@ if ("$venv" -like "*\cibw-run-*\pp*-win_amd64\*") {
$env:path += ";$pillow\winbuild\build\bin\"
& "$venv\Scripts\activate.ps1"
& reg add "HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Image File Execution Options\python.exe" /v "GlobalFlag" /t REG_SZ /d "0x02000000" /f
if ("$venv" -like "*\cibw-run-*-win_amd64\*") {
& python -m pip install numpy
}
cd $pillow
& python -VV
if (!$?) { exit $LASTEXITCODE }

View File

@ -13,6 +13,7 @@ on:
paths:
- ".ci/requirements-cibw.txt"
- ".github/workflows/wheel*"
- "pyproject.toml"
- "setup.py"
- "wheels/*"
- "winbuild/build_prepare.py"
@ -23,6 +24,7 @@ on:
paths:
- ".ci/requirements-cibw.txt"
- ".github/workflows/wheel*"
- "pyproject.toml"
- "setup.py"
- "wheels/*"
- "winbuild/build_prepare.py"
@ -40,62 +42,7 @@ env:
FORCE_COLOR: 1
jobs:
build-1-QEMU-emulated-wheels:
if: github.event_name != 'schedule'
name: aarch64 ${{ matrix.python-version }} ${{ matrix.spec }}
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
python-version:
- pp310
- cp3{9,10,11}
- cp3{12,13}
spec:
- manylinux2014
- manylinux_2_28
- musllinux
exclude:
- { python-version: pp310, spec: musllinux }
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
submodules: true
- uses: actions/setup-python@v5
with:
python-version: "3.x"
# https://github.com/docker/setup-qemu-action
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Install cibuildwheel
run: |
python3 -m pip install -r .ci/requirements-cibw.txt
- name: Build wheels
run: |
python3 -m cibuildwheel --output-dir wheelhouse
env:
# Build only the currently selected Linux architecture (so we can
# parallelise for speed).
CIBW_ARCHS: "aarch64"
# Likewise, select only one Python version per job to speed this up.
CIBW_BUILD: "${{ matrix.python-version }}-${{ matrix.spec == 'musllinux' && 'musllinux' || 'manylinux' }}*"
CIBW_ENABLE: cpython-prerelease
# Extra options for manylinux.
CIBW_MANYLINUX_AARCH64_IMAGE: ${{ matrix.spec }}
CIBW_MANYLINUX_PYPY_AARCH64_IMAGE: ${{ matrix.spec }}
- uses: actions/upload-artifact@v4
with:
name: dist-qemu-${{ matrix.python-version }}-${{ matrix.spec }}
path: ./wheelhouse/*.whl
build-2-native-wheels:
build-native-wheels:
if: github.event_name != 'schedule' || github.repository_owner == 'python-pillow'
name: ${{ matrix.name }}
runs-on: ${{ matrix.os }}
@ -130,6 +77,14 @@ jobs:
cibw_arch: x86_64
build: "*manylinux*"
manylinux: "manylinux_2_28"
- name: "manylinux2014 and musllinux aarch64"
os: ubuntu-24.04-arm
cibw_arch: aarch64
- name: "manylinux_2_28 aarch64"
os: ubuntu-24.04-arm
cibw_arch: aarch64
build: "*manylinux*"
manylinux: "manylinux_2_28"
steps:
- uses: actions/checkout@v4
with:
@ -150,7 +105,9 @@ jobs:
env:
CIBW_ARCHS: ${{ matrix.cibw_arch }}
CIBW_BUILD: ${{ matrix.build }}
CIBW_ENABLE: cpython-prerelease cpython-freethreading
CIBW_ENABLE: cpython-prerelease cpython-freethreading pypy
CIBW_MANYLINUX_AARCH64_IMAGE: ${{ matrix.manylinux }}
CIBW_MANYLINUX_PYPY_AARCH64_IMAGE: ${{ matrix.manylinux }}
CIBW_MANYLINUX_PYPY_X86_64_IMAGE: ${{ matrix.manylinux }}
CIBW_MANYLINUX_X86_64_IMAGE: ${{ matrix.manylinux }}
CIBW_SKIP: pp39-*
@ -227,7 +184,7 @@ jobs:
CIBW_ARCHS: ${{ matrix.cibw_arch }}
CIBW_BEFORE_ALL: "{package}\\winbuild\\build\\build_dep_all.cmd"
CIBW_CACHE_PATH: "C:\\cibw"
CIBW_ENABLE: cpython-prerelease cpython-freethreading
CIBW_ENABLE: cpython-prerelease cpython-freethreading pypy
CIBW_SKIP: pp39-*
CIBW_TEST_SKIP: "*-win_arm64"
CIBW_TEST_COMMAND: 'docker run --rm
@ -263,8 +220,6 @@ jobs:
uses: actions/setup-python@v5
with:
python-version: "3.x"
cache: pip
cache-dependency-path: "Makefile"
- run: make sdist
@ -275,7 +230,7 @@ jobs:
scientific-python-nightly-wheels-publish:
if: github.repository_owner == 'python-pillow' && (github.event_name == 'schedule' || github.event_name == 'workflow_dispatch')
needs: [build-2-native-wheels, windows]
needs: [build-native-wheels, windows]
runs-on: ubuntu-latest
name: Upload wheels to scientific-python-nightly-wheels
steps:
@ -292,7 +247,7 @@ jobs:
pypi-publish:
if: github.repository_owner == 'python-pillow' && github.event_name == 'push' && startsWith(github.ref, 'refs/tags')
needs: [build-1-QEMU-emulated-wheels, build-2-native-wheels, windows, sdist]
needs: [build-native-wheels, windows, sdist]
runs-on: ubuntu-latest
name: Upload release to PyPI
environment:

View File

@ -1,6 +1,6 @@
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.8.1
rev: v0.8.6
hooks:
- id: ruff
args: [--exit-non-zero-on-fix]
@ -24,7 +24,7 @@ repos:
exclude: (Makefile$|\.bat$|\.cmake$|\.eps$|\.fits$|\.gd$|\.opt$)
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: v19.1.4
rev: v19.1.6
hooks:
- id: clang-format
types: [c]
@ -56,6 +56,11 @@ repos:
- id: check-readthedocs
- id: check-renovate
- repo: https://github.com/woodruffw/zizmor-pre-commit
rev: v1.0.0
hooks:
- id: zizmor
- repo: https://github.com/sphinx-contrib/sphinx-lint
rev: v1.0.0
hooks:

View File

@ -1,5 +1,8 @@
version: 2
sphinx:
configuration: docs/conf.py
formats: [pdf]
build:

View File

@ -20,7 +20,6 @@ graft docs
graft _custom_build
# build/src control detritus
exclude .appveyor.yml
exclude .clang-format
exclude .coveragerc
exclude .editorconfig

View File

@ -42,9 +42,6 @@ As of 2019, Pillow development is
<a href="https://github.com/python-pillow/Pillow/actions/workflows/test-docker.yml"><img
alt="GitHub Actions build status (Test Docker)"
src="https://github.com/python-pillow/Pillow/workflows/Test%20Docker/badge.svg"></a>
<a href="https://ci.appveyor.com/project/python-pillow/Pillow"><img
alt="AppVeyor CI build status (Windows)"
src="https://img.shields.io/appveyor/build/python-pillow/Pillow/main.svg?label=Windows%20build"></a>
<a href="https://github.com/python-pillow/Pillow/actions/workflows/wheels.yml"><img
alt="GitHub Actions build status (Wheels)"
src="https://github.com/python-pillow/Pillow/workflows/Wheels/badge.svg"></a>

View File

@ -9,7 +9,7 @@ Released quarterly on January 2nd, April 1st, July 1st and October 15th.
* [ ] Open a release ticket e.g. https://github.com/python-pillow/Pillow/issues/3154
* [ ] Develop and prepare release in `main` branch.
* [ ] Check [GitHub Actions](https://github.com/python-pillow/Pillow/actions) and [AppVeyor](https://ci.appveyor.com/project/python-pillow/Pillow) to confirm passing tests in `main` branch.
* [ ] Check [GitHub Actions](https://github.com/python-pillow/Pillow/actions) to confirm passing tests in `main` branch.
* [ ] Check that all the wheel builds pass the tests in the [GitHub Actions "Wheels" workflow](https://github.com/python-pillow/Pillow/actions/workflows/wheels.yml) jobs by manually triggering them.
* [ ] In compliance with [PEP 440](https://peps.python.org/pep-0440/), update version identifier in `src/PIL/_version.py`
* [ ] Run pre-release check via `make release-test` in a freshly cloned repo.
@ -38,7 +38,7 @@ Released as needed for security, installation or critical bug fixes.
git checkout -t remotes/origin/5.2.x
```
* [ ] Cherry pick individual commits from `main` branch to release branch e.g. `5.2.x`, then `git push`.
* [ ] Check [GitHub Actions](https://github.com/python-pillow/Pillow/actions) and [AppVeyor](https://ci.appveyor.com/project/python-pillow/Pillow) to confirm passing tests in release branch e.g. `5.2.x`.
* [ ] Check [GitHub Actions](https://github.com/python-pillow/Pillow/actions) to confirm passing tests in release branch e.g. `5.2.x`.
* [ ] In compliance with [PEP 440](https://peps.python.org/pep-0440/), update version identifier in `src/PIL/_version.py`
* [ ] Run pre-release check via `make release-test`.
* [ ] Create tag for release e.g.:

View File

@ -3,26 +3,25 @@ from __future__ import annotations
import zlib
from io import BytesIO
import pytest
from PIL import Image, ImageFile, PngImagePlugin
TEST_FILE = "Tests/images/png_decompression_dos.png"
def test_ignore_dos_text() -> None:
ImageFile.LOAD_TRUNCATED_IMAGES = True
def test_ignore_dos_text(monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
try:
im = Image.open(TEST_FILE)
with Image.open(TEST_FILE) as im:
im.load()
finally:
ImageFile.LOAD_TRUNCATED_IMAGES = False
assert isinstance(im, PngImagePlugin.PngImageFile)
for s in im.text.values():
assert len(s) < 1024 * 1024, "Text chunk larger than 1M"
assert isinstance(im, PngImagePlugin.PngImageFile)
for s in im.text.values():
assert len(s) < 1024 * 1024, "Text chunk larger than 1M"
for s in im.info.values():
assert len(s) < 1024 * 1024, "Text chunk larger than 1M"
for s in im.info.values():
assert len(s) < 1024 * 1024, "Text chunk larger than 1M"
def test_dos_text() -> None:

View File

@ -140,18 +140,11 @@ def assert_image_similar_tofile(
filename: str,
epsilon: float,
msg: str | None = None,
mode: str | None = None,
) -> None:
with Image.open(filename) as img:
if mode:
img = img.convert(mode)
assert_image_similar(a, img, epsilon, msg)
def assert_all_same(items: Sequence[Any], msg: str | None = None) -> None:
assert items.count(items[0]) == len(items), msg
def assert_not_all_same(items: Sequence[Any], msg: str | None = None) -> None:
assert items.count(items[0]) != len(items), msg
@ -327,16 +320,7 @@ def magick_command() -> list[str] | None:
return None
def on_appveyor() -> bool:
return "APPVEYOR" in os.environ
def on_github_actions() -> bool:
return "GITHUB_ACTIONS" in os.environ
def on_ci() -> bool:
# GitHub Actions and AppVeyor have "CI"
return "CI" in os.environ

Binary file not shown.

After

Width:  |  Height:  |  Size: 391 B

View File

@ -7,7 +7,7 @@ import fuzzers
import packaging
import pytest
from PIL import Image, UnidentifiedImageError, features
from PIL import Image, features
from Tests.helper import skip_unless_feature
if sys.platform.startswith("win32"):
@ -32,21 +32,17 @@ def test_fuzz_images(path: str) -> None:
fuzzers.fuzz_image(f.read())
assert True
except (
# Known exceptions from Pillow
OSError,
SyntaxError,
MemoryError,
ValueError,
NotImplementedError,
OverflowError,
):
# Known exceptions that are through from Pillow
assert True
except (
# Known Image.* exceptions
Image.DecompressionBombError,
Image.DecompressionBombWarning,
UnidentifiedImageError,
):
# Known Image.* exceptions
assert True
finally:
fuzzers.disable_decompressionbomb_error()

View File

@ -12,19 +12,16 @@ ORIGINAL_LIMIT = Image.MAX_IMAGE_PIXELS
class TestDecompressionBomb:
def teardown_method(self) -> None:
Image.MAX_IMAGE_PIXELS = ORIGINAL_LIMIT
def test_no_warning_small_file(self) -> None:
# Implicit assert: no warning.
# A warning would cause a failure.
with Image.open(TEST_FILE):
pass
def test_no_warning_no_limit(self) -> None:
def test_no_warning_no_limit(self, monkeypatch: pytest.MonkeyPatch) -> None:
# Arrange
# Turn limit off
Image.MAX_IMAGE_PIXELS = None
monkeypatch.setattr(Image, "MAX_IMAGE_PIXELS", None)
assert Image.MAX_IMAGE_PIXELS is None
# Act / Assert
@ -33,18 +30,18 @@ class TestDecompressionBomb:
with Image.open(TEST_FILE):
pass
def test_warning(self) -> None:
def test_warning(self, monkeypatch: pytest.MonkeyPatch) -> None:
# Set limit to trigger warning on the test file
Image.MAX_IMAGE_PIXELS = 128 * 128 - 1
monkeypatch.setattr(Image, "MAX_IMAGE_PIXELS", 128 * 128 - 1)
assert Image.MAX_IMAGE_PIXELS == 128 * 128 - 1
with pytest.warns(Image.DecompressionBombWarning):
with Image.open(TEST_FILE):
pass
def test_exception(self) -> None:
def test_exception(self, monkeypatch: pytest.MonkeyPatch) -> None:
# Set limit to trigger exception on the test file
Image.MAX_IMAGE_PIXELS = 64 * 128 - 1
monkeypatch.setattr(Image, "MAX_IMAGE_PIXELS", 64 * 128 - 1)
assert Image.MAX_IMAGE_PIXELS == 64 * 128 - 1
with pytest.raises(Image.DecompressionBombError):
@ -66,9 +63,9 @@ class TestDecompressionBomb:
with pytest.raises(Image.DecompressionBombError):
im.seek(1)
def test_exception_gif_zero_width(self) -> None:
def test_exception_gif_zero_width(self, monkeypatch: pytest.MonkeyPatch) -> None:
# Set limit to trigger exception on the test file
Image.MAX_IMAGE_PIXELS = 4 * 64 * 128
monkeypatch.setattr(Image, "MAX_IMAGE_PIXELS", 4 * 64 * 128)
assert Image.MAX_IMAGE_PIXELS == 4 * 64 * 128
with pytest.raises(Image.DecompressionBombError):

View File

@ -307,13 +307,8 @@ def test_apng_syntax_errors() -> None:
im.load()
# we can handle this case gracefully
exception = None
with Image.open("Tests/images/apng/syntax_num_frames_low.png") as im:
try:
im.seek(im.n_frames - 1)
except Exception as e:
exception = e
assert exception is None
im.seek(im.n_frames - 1)
with pytest.raises(OSError):
with Image.open("Tests/images/apng/syntax_num_frames_high.png") as im:
@ -405,13 +400,8 @@ def test_apng_save_split_fdat(tmp_path: Path) -> None:
append_images=frames,
)
with Image.open(test_file) as im:
exception = None
try:
im.seek(im.n_frames - 1)
im.load()
except Exception as e:
exception = e
assert exception is None
im.seek(im.n_frames - 1)
im.load()
def test_apng_save_duration_loop(tmp_path: Path) -> None:

View File

@ -4,7 +4,7 @@ from pathlib import Path
import pytest
from PIL import Image
from PIL import BlpImagePlugin, Image
from .helper import (
assert_image_equal,
@ -19,6 +19,7 @@ def test_load_blp1() -> None:
assert_image_equal_tofile(im, "Tests/images/blp/blp1_jpeg.png")
with Image.open("Tests/images/blp/blp1_jpeg2.blp") as im:
assert im.mode == "RGBA"
im.load()
@ -37,6 +38,13 @@ def test_load_blp2_dxt1a() -> None:
assert_image_equal_tofile(im, "Tests/images/blp/blp2_dxt1a.png")
def test_invalid_file() -> None:
invalid_file = "Tests/images/flower.jpg"
with pytest.raises(BlpImagePlugin.BLPFormatError):
BlpImagePlugin.BlpImageFile(invalid_file)
def test_save(tmp_path: Path) -> None:
f = str(tmp_path / "temp.blp")

View File

@ -35,22 +35,19 @@ def test_sanity() -> None:
assert im.is_animated
def test_prefix_chunk() -> None:
ImageFile.LOAD_TRUNCATED_IMAGES = True
try:
with Image.open(animated_test_file_with_prefix_chunk) as im:
assert im.mode == "P"
assert im.size == (320, 200)
assert im.format == "FLI"
assert im.info["duration"] == 171
assert im.is_animated
def test_prefix_chunk(monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
with Image.open(animated_test_file_with_prefix_chunk) as im:
assert im.mode == "P"
assert im.size == (320, 200)
assert im.format == "FLI"
assert im.info["duration"] == 171
assert im.is_animated
palette = im.getpalette()
assert palette[3:6] == [255, 255, 255]
assert palette[381:384] == [204, 204, 12]
assert palette[765:] == [252, 0, 0]
finally:
ImageFile.LOAD_TRUNCATED_IMAGES = False
palette = im.getpalette()
assert palette[3:6] == [255, 255, 255]
assert palette[381:384] == [204, 204, 12]
assert palette[765:] == [252, 0, 0]
@pytest.mark.skipif(is_pypy(), reason="Requires CPython")

View File

@ -109,7 +109,7 @@ def test_palette_not_needed_for_second_frame() -> None:
assert_image_similar(im, hopper("L").convert("RGB"), 8)
def test_strategy() -> None:
def test_strategy(monkeypatch: pytest.MonkeyPatch) -> None:
with Image.open("Tests/images/iss634.gif") as im:
expected_rgb_always = im.convert("RGB")
@ -119,35 +119,36 @@ def test_strategy() -> None:
im.seek(1)
expected_different = im.convert("RGB")
try:
GifImagePlugin.LOADING_STRATEGY = GifImagePlugin.LoadingStrategy.RGB_ALWAYS
with Image.open("Tests/images/iss634.gif") as im:
assert im.mode == "RGB"
assert_image_equal(im, expected_rgb_always)
monkeypatch.setattr(
GifImagePlugin, "LOADING_STRATEGY", GifImagePlugin.LoadingStrategy.RGB_ALWAYS
)
with Image.open("Tests/images/iss634.gif") as im:
assert im.mode == "RGB"
assert_image_equal(im, expected_rgb_always)
with Image.open("Tests/images/chi.gif") as im:
assert im.mode == "RGBA"
assert_image_equal(im, expected_rgb_always_rgba)
with Image.open("Tests/images/chi.gif") as im:
assert im.mode == "RGBA"
assert_image_equal(im, expected_rgb_always_rgba)
GifImagePlugin.LOADING_STRATEGY = (
GifImagePlugin.LoadingStrategy.RGB_AFTER_DIFFERENT_PALETTE_ONLY
)
# Stay in P mode with only a global palette
with Image.open("Tests/images/chi.gif") as im:
assert im.mode == "P"
monkeypatch.setattr(
GifImagePlugin,
"LOADING_STRATEGY",
GifImagePlugin.LoadingStrategy.RGB_AFTER_DIFFERENT_PALETTE_ONLY,
)
# Stay in P mode with only a global palette
with Image.open("Tests/images/chi.gif") as im:
assert im.mode == "P"
im.seek(1)
assert im.mode == "P"
assert_image_equal(im.convert("RGB"), expected_different)
im.seek(1)
assert im.mode == "P"
assert_image_equal(im.convert("RGB"), expected_different)
# Change to RGB mode when a frame has an individual palette
with Image.open("Tests/images/iss634.gif") as im:
assert im.mode == "P"
# Change to RGB mode when a frame has an individual palette
with Image.open("Tests/images/iss634.gif") as im:
assert im.mode == "P"
im.seek(1)
assert im.mode == "RGB"
finally:
GifImagePlugin.LOADING_STRATEGY = GifImagePlugin.LoadingStrategy.RGB_AFTER_FIRST
im.seek(1)
assert im.mode == "RGB"
def test_optimize() -> None:
@ -555,17 +556,15 @@ def test_dispose_background_transparency() -> None:
def test_transparent_dispose(
loading_strategy: GifImagePlugin.LoadingStrategy,
expected_colors: tuple[tuple[int | tuple[int, int, int, int], ...]],
monkeypatch: pytest.MonkeyPatch,
) -> None:
GifImagePlugin.LOADING_STRATEGY = loading_strategy
try:
with Image.open("Tests/images/transparent_dispose.gif") as img:
for frame in range(3):
img.seek(frame)
for x in range(3):
color = img.getpixel((x, 0))
assert color == expected_colors[frame][x]
finally:
GifImagePlugin.LOADING_STRATEGY = GifImagePlugin.LoadingStrategy.RGB_AFTER_FIRST
monkeypatch.setattr(GifImagePlugin, "LOADING_STRATEGY", loading_strategy)
with Image.open("Tests/images/transparent_dispose.gif") as img:
for frame in range(3):
img.seek(frame)
for x in range(3):
color = img.getpixel((x, 0))
assert color == expected_colors[frame][x]
def test_dispose_previous() -> None:
@ -1398,24 +1397,23 @@ def test_lzw_bits() -> None:
),
)
def test_extents(
test_file: str, loading_strategy: GifImagePlugin.LoadingStrategy
test_file: str,
loading_strategy: GifImagePlugin.LoadingStrategy,
monkeypatch: pytest.MonkeyPatch,
) -> None:
GifImagePlugin.LOADING_STRATEGY = loading_strategy
try:
with Image.open("Tests/images/" + test_file) as im:
assert im.size == (100, 100)
monkeypatch.setattr(GifImagePlugin, "LOADING_STRATEGY", loading_strategy)
with Image.open("Tests/images/" + test_file) as im:
assert im.size == (100, 100)
# Check that n_frames does not change the size
assert im.n_frames == 2
assert im.size == (100, 100)
# Check that n_frames does not change the size
assert im.n_frames == 2
assert im.size == (100, 100)
im.seek(1)
assert im.size == (150, 150)
im.seek(1)
assert im.size == (150, 150)
im.load()
assert im.im.size == (150, 150)
finally:
GifImagePlugin.LOADING_STRATEGY = GifImagePlugin.LoadingStrategy.RGB_AFTER_FIRST
im.load()
assert im.im.size == (150, 150)
def test_missing_background() -> None:

View File

@ -243,27 +243,23 @@ def test_draw_reloaded(tmp_path: Path) -> None:
assert_image_equal_tofile(im, "Tests/images/hopper_draw.ico")
def test_truncated_mask() -> None:
def test_truncated_mask(monkeypatch: pytest.MonkeyPatch) -> None:
# 1 bpp
with open("Tests/images/hopper_mask.ico", "rb") as fp:
data = fp.read()
ImageFile.LOAD_TRUNCATED_IMAGES = True
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
data = data[:-3]
try:
with Image.open(io.BytesIO(data)) as im:
with Image.open("Tests/images/hopper_mask.png") as expected:
assert im.mode == "1"
with Image.open(io.BytesIO(data)) as im:
assert im.mode == "1"
# 32 bpp
output = io.BytesIO()
expected = hopper("RGBA")
expected.save(output, "ico", bitmap_format="bmp")
# 32 bpp
output = io.BytesIO()
expected = hopper("RGBA")
expected.save(output, "ico", bitmap_format="bmp")
data = output.getvalue()[:-1]
data = output.getvalue()[:-1]
with Image.open(io.BytesIO(data)) as im:
assert im.mode == "RGB"
finally:
ImageFile.LOAD_TRUNCATED_IMAGES = False
with Image.open(io.BytesIO(data)) as im:
assert im.mode == "RGB"

View File

@ -58,10 +58,7 @@ def test_getiptcinfo_fotostation() -> None:
# Assert
assert iptc is not None
for tag in iptc.keys():
if tag[0] == 240:
return
pytest.fail("FotoStation tag not found")
assert 240 in (tag[0] for tag in iptc.keys()), "FotoStation tag not found"
def test_getiptcinfo_zero_padding() -> None:

View File

@ -181,6 +181,10 @@ class TestFileJpeg:
assert test(100, 200) == (100, 200)
assert test(0) is None # square pixels
def test_dpi_jfif_cm(self) -> None:
with Image.open("Tests/images/jfif_unit_cm.jpg") as im:
assert im.info["dpi"] == (2.54, 5.08)
@mark_if_feature_version(
pytest.mark.valgrind_known_error, "libjpeg_turbo", "2.0", reason="Known Failing"
)
@ -277,7 +281,10 @@ class TestFileJpeg:
assert not im2.info.get("progressive")
assert im3.info.get("progressive")
assert_image_equal(im1, im3)
if features.check_feature("mozjpeg"):
assert_image_similar(im1, im3, 9.39)
else:
assert_image_equal(im1, im3)
assert im1_bytes >= im3_bytes
def test_progressive_large_buffer(self, tmp_path: Path) -> None:
@ -349,7 +356,6 @@ class TestFileJpeg:
assert exif.get_ifd(0x8825) == {}
transposed = ImageOps.exif_transpose(im)
assert transposed is not None
exif = transposed.getexif()
assert exif.get_ifd(0x8825) == {}
@ -420,8 +426,12 @@ class TestFileJpeg:
im2 = self.roundtrip(hopper(), progressive=1)
im3 = self.roundtrip(hopper(), progression=1) # compatibility
assert_image_equal(im1, im2)
assert_image_equal(im1, im3)
if features.check_feature("mozjpeg"):
assert_image_similar(im1, im2, 9.39)
assert_image_similar(im1, im3, 9.39)
else:
assert_image_equal(im1, im2)
assert_image_equal(im1, im3)
assert im2.info.get("progressive")
assert im2.info.get("progression")
assert im3.info.get("progressive")
@ -520,12 +530,13 @@ class TestFileJpeg:
@mark_if_feature_version(
pytest.mark.valgrind_known_error, "libjpeg_turbo", "2.0", reason="Known Failing"
)
def test_truncated_jpeg_should_read_all_the_data(self) -> None:
def test_truncated_jpeg_should_read_all_the_data(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
filename = "Tests/images/truncated_jpeg.jpg"
ImageFile.LOAD_TRUNCATED_IMAGES = True
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
with Image.open(filename) as im:
im.load()
ImageFile.LOAD_TRUNCATED_IMAGES = False
assert im.getbbox() is not None
def test_truncated_jpeg_throws_oserror(self) -> None:
@ -1014,7 +1025,7 @@ class TestFileJpeg:
im.save(f, xmp=b"1" * 65505)
@pytest.mark.timeout(timeout=1)
def test_eof(self) -> None:
def test_eof(self, monkeypatch: pytest.MonkeyPatch) -> None:
# Even though this decoder never says that it is finished
# the image should still end when there is no new data
class InfiniteMockPyDecoder(ImageFile.PyDecoder):
@ -1027,11 +1038,10 @@ class TestFileJpeg:
with Image.open(TEST_FILE) as im:
im.tile = [
("INFINITE", (0, 0, 128, 128), 0, ("RGB", 0, 1)),
ImageFile._Tile("INFINITE", (0, 0, 128, 128), 0, ("RGB", 0, 1)),
]
ImageFile.LOAD_TRUNCATED_IMAGES = True
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
im.load()
ImageFile.LOAD_TRUNCATED_IMAGES = False
def test_separate_tables(self) -> None:
im = hopper()

View File

@ -181,14 +181,11 @@ def test_load_dpi() -> None:
assert "dpi" not in im.info
def test_restricted_icc_profile() -> None:
ImageFile.LOAD_TRUNCATED_IMAGES = True
try:
# JPEG2000 image with a restricted ICC profile and a known colorspace
with Image.open("Tests/images/balloon_eciRGBv2_aware.jp2") as im:
assert im.mode == "RGB"
finally:
ImageFile.LOAD_TRUNCATED_IMAGES = False
def test_restricted_icc_profile(monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
# JPEG2000 image with a restricted ICC profile and a known colorspace
with Image.open("Tests/images/balloon_eciRGBv2_aware.jp2") as im:
assert im.mode == "RGB"
@pytest.mark.skipif(
@ -325,6 +322,18 @@ def test_cmyk() -> None:
assert im.getpixel((0, 0)) == (185, 134, 0, 0)
@pytest.mark.skipif(
not os.path.exists(EXTRA_DIR), reason="Extra image files not installed"
)
@skip_unless_feature_version("jpg_2000", "2.5.3")
def test_cmyk_save() -> None:
with Image.open(f"{EXTRA_DIR}/issue205.jp2") as jp2:
assert jp2.mode == "CMYK"
im = roundtrip(jp2)
assert_image_equal(im, jp2)
@pytest.mark.parametrize("ext", (".j2k", ".jp2"))
def test_16bit_monochrome_has_correct_mode(ext: str) -> None:
with Image.open("Tests/images/16bit.cropped" + ext) as im:
@ -480,8 +489,7 @@ def test_plt_marker(card: ImageFile.ImageFile) -> None:
out.seek(0)
while True:
marker = out.read(2)
if not marker:
pytest.fail("End of stream without PLT")
assert marker, "End of stream without PLT"
jp2_boxid = _binary.i16be(marker)
if jp2_boxid == 0xFF4F:

View File

@ -44,11 +44,7 @@ class LibTiffTestCase:
im.load()
im.getdata()
try:
assert im._compression == "group4"
except AttributeError:
print("No _compression")
print(dir(im))
assert im._compression == "group4"
# can we write it back out, in a different form.
out = str(tmp_path / "temp.png")
@ -1116,13 +1112,15 @@ class TestFileLibTiff(LibTiffTestCase):
)
def test_buffering(self, test_file: str) -> None:
# load exif first
with Image.open(open(test_file, "rb", buffering=1048576)) as im:
exif = dict(im.getexif())
with open(test_file, "rb", buffering=1048576) as f:
with Image.open(f) as im:
exif = dict(im.getexif())
# load image before exif
with Image.open(open(test_file, "rb", buffering=1048576)) as im2:
im2.load()
exif_after_load = dict(im2.getexif())
with open(test_file, "rb", buffering=1048576) as f:
with Image.open(f) as im2:
im2.load()
exif_after_load = dict(im2.getexif())
assert exif == exif_after_load
@ -1155,7 +1153,7 @@ class TestFileLibTiff(LibTiffTestCase):
im.load()
# Assert that the error code is IMAGING_CODEC_MEMORY
assert str(e.value) == "-9"
assert str(e.value) == "decoder error -9"
@pytest.mark.parametrize("compression", ("tiff_adobe_deflate", "jpeg"))
def test_save_multistrip(self, compression: str, tmp_path: Path) -> None:
@ -1169,23 +1167,22 @@ class TestFileLibTiff(LibTiffTestCase):
assert len(im.tag_v2[STRIPOFFSETS]) > 1
@pytest.mark.parametrize("argument", (True, False))
def test_save_single_strip(self, argument: bool, tmp_path: Path) -> None:
def test_save_single_strip(
self, argument: bool, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
) -> None:
im = hopper("RGB").resize((256, 256))
out = str(tmp_path / "temp.tif")
if not argument:
TiffImagePlugin.STRIP_SIZE = 2**18
try:
arguments: dict[str, str | int] = {"compression": "tiff_adobe_deflate"}
if argument:
arguments["strip_size"] = 2**18
im.save(out, "TIFF", **arguments)
monkeypatch.setattr(TiffImagePlugin, "STRIP_SIZE", 2**18)
arguments: dict[str, str | int] = {"compression": "tiff_adobe_deflate"}
if argument:
arguments["strip_size"] = 2**18
im.save(out, "TIFF", **arguments)
with Image.open(out) as im:
assert isinstance(im, TiffImagePlugin.TiffImageFile)
assert len(im.tag_v2[STRIPOFFSETS]) == 1
finally:
TiffImagePlugin.STRIP_SIZE = 65536
with Image.open(out) as im:
assert isinstance(im, TiffImagePlugin.TiffImageFile)
assert len(im.tag_v2[STRIPOFFSETS]) == 1
@pytest.mark.parametrize("compression", ("tiff_adobe_deflate", None))
def test_save_zero(self, compression: str | None, tmp_path: Path) -> None:

View File

@ -363,7 +363,7 @@ class TestFilePng:
with pytest.raises((OSError, SyntaxError)):
im.verify()
def test_verify_ignores_crc_error(self) -> None:
def test_verify_ignores_crc_error(self, monkeypatch: pytest.MonkeyPatch) -> None:
# check ignores crc errors in ancillary chunks
chunk_data = chunk(b"tEXt", b"spam")
@ -373,24 +373,20 @@ class TestFilePng:
with pytest.raises(SyntaxError):
PngImagePlugin.PngImageFile(BytesIO(image_data))
ImageFile.LOAD_TRUNCATED_IMAGES = True
try:
im = load(image_data)
assert im is not None
finally:
ImageFile.LOAD_TRUNCATED_IMAGES = False
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
im = load(image_data)
assert im is not None
def test_verify_not_ignores_crc_error_in_required_chunk(self) -> None:
def test_verify_not_ignores_crc_error_in_required_chunk(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
# check does not ignore crc errors in required chunks
image_data = MAGIC + IHDR[:-1] + b"q" + TAIL
ImageFile.LOAD_TRUNCATED_IMAGES = True
try:
with pytest.raises(SyntaxError):
PngImagePlugin.PngImageFile(BytesIO(image_data))
finally:
ImageFile.LOAD_TRUNCATED_IMAGES = False
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
with pytest.raises(SyntaxError):
PngImagePlugin.PngImageFile(BytesIO(image_data))
def test_roundtrip_dpi(self) -> None:
# Check dpi roundtripping
@ -600,7 +596,7 @@ class TestFilePng:
(b"prIV", b"VALUE3", True),
]
def test_textual_chunks_after_idat(self) -> None:
def test_textual_chunks_after_idat(self, monkeypatch: pytest.MonkeyPatch) -> None:
with Image.open("Tests/images/hopper.png") as im:
assert "comment" in im.text
for k, v in {
@ -614,18 +610,17 @@ class TestFilePng:
with pytest.raises(OSError):
assert isinstance(im.text, dict)
# Raises an EOFError in load_end
with Image.open("Tests/images/hopper_idat_after_image_end.png") as im:
assert im.text == {"TXT": "VALUE", "ZIP": "VALUE"}
# Raises a UnicodeDecodeError in load_end
with Image.open("Tests/images/truncated_image.png") as im:
# The file is truncated
with pytest.raises(OSError):
im.text()
ImageFile.LOAD_TRUNCATED_IMAGES = True
im.text
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
assert isinstance(im.text, dict)
ImageFile.LOAD_TRUNCATED_IMAGES = False
# Raises an EOFError in load_end
with Image.open("Tests/images/hopper_idat_after_image_end.png") as im:
assert im.text == {"TXT": "VALUE", "ZIP": "VALUE"}
def test_unknown_compression_method(self) -> None:
with pytest.raises(SyntaxError, match="Unknown compression method"):
@ -651,15 +646,16 @@ class TestFilePng:
@pytest.mark.parametrize(
"cid", (b"IHDR", b"sRGB", b"pHYs", b"acTL", b"fcTL", b"fdAT")
)
def test_truncated_chunks(self, cid: bytes) -> None:
def test_truncated_chunks(
self, cid: bytes, monkeypatch: pytest.MonkeyPatch
) -> None:
fp = BytesIO()
with PngImagePlugin.PngStream(fp) as png:
with pytest.raises(ValueError):
png.call(cid, 0, 0)
ImageFile.LOAD_TRUNCATED_IMAGES = True
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
png.call(cid, 0, 0)
ImageFile.LOAD_TRUNCATED_IMAGES = False
@pytest.mark.parametrize("save_all", (True, False))
def test_specify_bits(self, save_all: bool, tmp_path: Path) -> None:
@ -789,17 +785,14 @@ class TestFilePng:
with Image.open(mystdout) as reloaded:
assert_image_equal_tofile(reloaded, TEST_PNG_FILE)
def test_truncated_end_chunk(self) -> None:
def test_truncated_end_chunk(self, monkeypatch: pytest.MonkeyPatch) -> None:
with Image.open("Tests/images/truncated_end_chunk.png") as im:
with pytest.raises(OSError):
im.load()
ImageFile.LOAD_TRUNCATED_IMAGES = True
try:
with Image.open("Tests/images/truncated_end_chunk.png") as im:
assert_image_equal_tofile(im, "Tests/images/hopper.png")
finally:
ImageFile.LOAD_TRUNCATED_IMAGES = False
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
with Image.open("Tests/images/truncated_end_chunk.png") as im:
assert_image_equal_tofile(im, "Tests/images/hopper.png")
@pytest.mark.skipif(is_win32(), reason="Requires Unix or macOS")
@ -808,11 +801,11 @@ class TestTruncatedPngPLeaks(PillowLeakTestCase):
mem_limit = 2 * 1024 # max increase in K
iterations = 100 # Leak is 56k/iteration, this will leak 5.6megs
def test_leak_load(self) -> None:
def test_leak_load(self, monkeypatch: pytest.MonkeyPatch) -> None:
with open("Tests/images/hopper.png", "rb") as f:
DATA = BytesIO(f.read(16 * 1024))
ImageFile.LOAD_TRUNCATED_IMAGES = True
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
with Image.open(DATA) as im:
im.load()
@ -820,7 +813,4 @@ class TestTruncatedPngPLeaks(PillowLeakTestCase):
with Image.open(DATA) as im:
im.load()
try:
self._test_leak(core)
finally:
ImageFile.LOAD_TRUNCATED_IMAGES = False
self._test_leak(core)

View File

@ -7,7 +7,7 @@ from pathlib import Path
import pytest
from PIL import Image, ImageSequence, SpiderImagePlugin
from PIL import Image, SpiderImagePlugin
from .helper import assert_image_equal, hopper, is_pypy
@ -153,8 +153,8 @@ def test_nonstack_file() -> None:
def test_nonstack_dos() -> None:
with Image.open(TEST_FILE) as im:
for i, frame in enumerate(ImageSequence.Iterator(im)):
assert i <= 1, "Non-stack DOS file test failed"
with pytest.raises(EOFError):
im.seek(0)
# for issue #4093

View File

@ -115,6 +115,19 @@ class TestFileTiff:
outfile = str(tmp_path / "temp.tif")
im.save(outfile, save_all=True, append_images=[im], tiffinfo=im.tag_v2)
def test_bigtiff_save(self, tmp_path: Path) -> None:
outfile = str(tmp_path / "temp.tif")
im = hopper()
im.save(outfile, big_tiff=True)
with Image.open(outfile) as reloaded:
assert reloaded.tag_v2._bigtiff is True
im.save(outfile, save_all=True, append_images=[im], big_tiff=True)
with Image.open(outfile) as reloaded:
assert reloaded.tag_v2._bigtiff is True
def test_seek_too_large(self) -> None:
with pytest.raises(ValueError, match="Unable to seek to frame"):
Image.open("Tests/images/seek_too_large.tif")
@ -733,7 +746,7 @@ class TestFileTiff:
assert reread.n_frames == 3
def test_fixoffsets(self) -> None:
b = BytesIO(b"II\x2a\x00\x00\x00\x00\x00")
b = BytesIO(b"II\x2A\x00\x00\x00\x00\x00")
with TiffImagePlugin.AppendingTiffWriter(b) as a:
b.seek(0)
a.fixOffsets(1, isShort=True)
@ -746,6 +759,37 @@ class TestFileTiff:
with pytest.raises(RuntimeError):
a.fixOffsets(1)
b = BytesIO(b"II\x2A\x00\x00\x00\x00\x00")
with TiffImagePlugin.AppendingTiffWriter(b) as a:
a.offsetOfNewPage = 2**16
b.seek(0)
a.fixOffsets(1, isShort=True)
b = BytesIO(b"II\x2B\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")
with TiffImagePlugin.AppendingTiffWriter(b) as a:
a.offsetOfNewPage = 2**32
b.seek(0)
a.fixOffsets(1, isShort=True)
b.seek(0)
a.fixOffsets(1, isLong=True)
def test_appending_tiff_writer_writelong(self) -> None:
data = b"II\x2A\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b = BytesIO(data)
with TiffImagePlugin.AppendingTiffWriter(b) as a:
a.writeLong(2**32 - 1)
assert b.getvalue() == data + b"\xff\xff\xff\xff"
def test_appending_tiff_writer_rewritelastshorttolong(self) -> None:
data = b"II\x2A\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b = BytesIO(data)
with TiffImagePlugin.AppendingTiffWriter(b) as a:
a.rewriteLastShortToLong(2**32 - 1)
assert b.getvalue() == data[:-2] + b"\xff\xff\xff\xff"
def test_saving_icc_profile(self, tmp_path: Path) -> None:
# Tests saving TIFF with icc_profile set.
# At the time of writing this will only work for non-compressed tiffs
@ -897,11 +941,10 @@ class TestFileTiff:
@pytest.mark.timeout(6)
@pytest.mark.filterwarnings("ignore:Truncated File Read")
def test_timeout(self) -> None:
def test_timeout(self, monkeypatch: pytest.MonkeyPatch) -> None:
with Image.open("Tests/images/timeout-6646305047838720") as im:
ImageFile.LOAD_TRUNCATED_IMAGES = True
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
im.load()
ImageFile.LOAD_TRUNCATED_IMAGES = False
@pytest.mark.parametrize(
"test_file",

View File

@ -28,9 +28,9 @@ except ImportError:
class TestUnsupportedWebp:
def test_unsupported(self) -> None:
def test_unsupported(self, monkeypatch: pytest.MonkeyPatch) -> None:
if HAVE_WEBP:
WebPImagePlugin.SUPPORTED = False
monkeypatch.setattr(WebPImagePlugin, "SUPPORTED", False)
file_path = "Tests/images/hopper.webp"
with pytest.warns(UserWarning):
@ -38,9 +38,6 @@ class TestUnsupportedWebp:
with Image.open(file_path):
pass
if HAVE_WEBP:
WebPImagePlugin.SUPPORTED = True
@skip_unless_feature("webp")
class TestFileWebp:

View File

@ -189,8 +189,6 @@ class TestImage:
if ext == ".jp2" and not features.check_codec("jpg_2000"):
pytest.skip("jpg_2000 not available")
temp_file = str(tmp_path / ("temp." + ext))
if os.path.exists(temp_file):
os.remove(temp_file)
im.save(Path(temp_file))
def test_fp_name(self, tmp_path: Path) -> None:
@ -667,7 +665,7 @@ class TestImage:
# Test illegal image mode
with hopper() as im:
with pytest.raises(ValueError):
im.remap_palette(None)
im.remap_palette([])
def test_remap_palette_transparency(self) -> None:
im = Image.new("P", (1, 2), (0, 0, 0))
@ -770,7 +768,7 @@ class TestImage:
assert dict(exif)
# Test that exif data is cleared after another load
exif.load(None)
exif.load(b"")
assert not dict(exif)
# Test loading just the EXIF header
@ -793,6 +791,10 @@ class TestImage:
ifd[36864] = b"0220"
assert exif.get_ifd(0x8769) == {36864: b"0220"}
reloaded_exif = Image.Exif()
reloaded_exif.load(exif.tobytes())
assert reloaded_exif.get_ifd(0x8769) == {36864: b"0220"}
@mark_if_feature_version(
pytest.mark.valgrind_known_error, "libjpeg_turbo", "2.0", reason="Known Failing"
)
@ -987,6 +989,11 @@ class TestImage:
else:
assert im.getxmp() == {"xmpmeta": None}
def test_get_child_images(self) -> None:
im = Image.new("RGB", (1, 1))
with pytest.warns(DeprecationWarning):
assert im.get_child_images() == []
@pytest.mark.parametrize("size", ((1, 0), (0, 1), (0, 0)))
def test_zero_tobytes(self, size: tuple[int, int]) -> None:
im = Image.new("RGB", size)

View File

@ -271,13 +271,25 @@ class TestImagePutPixelError:
class TestEmbeddable:
@pytest.mark.xfail(reason="failing test")
@pytest.mark.xfail(not (sys.version_info >= (3, 13)), reason="failing test")
@pytest.mark.skipif(not is_win32(), reason="requires Windows")
def test_embeddable(self) -> None:
import ctypes
from setuptools.command import build_ext
compiler = getattr(build_ext, "new_compiler")()
compiler.add_include_dir(sysconfig.get_config_var("INCLUDEPY"))
libdir = sysconfig.get_config_var("LIBDIR") or sysconfig.get_config_var(
"INCLUDEPY"
).replace("include", "libs")
compiler.add_library_dir(libdir)
try:
compiler.initialize()
except Exception:
pytest.skip("Compiler could not be initialized")
with open("embed_pil.c", "w", encoding="utf-8") as fh:
home = sys.prefix.replace("\\", "\\\\")
fh.write(
@ -305,13 +317,6 @@ int main(int argc, char* argv[])
"""
)
compiler = getattr(build_ext, "new_compiler")()
compiler.add_include_dir(sysconfig.get_config_var("INCLUDEPY"))
libdir = sysconfig.get_config_var("LIBDIR") or sysconfig.get_config_var(
"INCLUDEPY"
).replace("include", "libs")
compiler.add_library_dir(libdir)
objects = compiler.compile(["embed_pil.c"])
compiler.link_executable(objects, "embed_pil")

View File

@ -309,7 +309,7 @@ class TestImageResize:
# Test unknown resampling filter
with hopper() as im:
with pytest.raises(ValueError):
im.resize((10, 10), "unknown")
im.resize((10, 10), -1)
@skip_unless_feature("libtiff")
def test_transposed(self) -> None:

View File

@ -1396,6 +1396,28 @@ def test_stroke_descender() -> None:
assert_image_similar_tofile(im, "Tests/images/imagedraw_stroke_descender.png", 6.76)
@skip_unless_feature("freetype2")
def test_stroke_inside_gap() -> None:
# Arrange
im = Image.new("RGB", (120, 130))
draw = ImageDraw.Draw(im)
font = ImageFont.truetype("Tests/fonts/FreeMono.ttf", 120)
# Act
draw.text((12, 12), "i", "#f00", font, stroke_width=20)
# Assert
for y in range(im.height):
glyph = ""
for x in range(im.width):
if im.getpixel((x, y)) == (0, 0, 0):
if glyph == "started":
glyph = "ended"
else:
assert glyph != "ended", "Gap inside stroked glyph"
glyph = "started"
@skip_unless_feature("freetype2")
def test_split_word() -> None:
# Arrange

View File

@ -191,13 +191,10 @@ class TestImageFile:
im.load()
@skip_unless_feature("zlib")
def test_truncated_without_errors(self) -> None:
def test_truncated_without_errors(self, monkeypatch: pytest.MonkeyPatch) -> None:
with Image.open("Tests/images/truncated_image.png") as im:
ImageFile.LOAD_TRUNCATED_IMAGES = True
try:
im.load()
finally:
ImageFile.LOAD_TRUNCATED_IMAGES = False
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
im.load()
@skip_unless_feature("zlib")
def test_broken_datastream_with_errors(self) -> None:
@ -206,13 +203,12 @@ class TestImageFile:
im.load()
@skip_unless_feature("zlib")
def test_broken_datastream_without_errors(self) -> None:
def test_broken_datastream_without_errors(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
with Image.open("Tests/images/broken_data_stream.png") as im:
ImageFile.LOAD_TRUNCATED_IMAGES = True
try:
im.load()
finally:
ImageFile.LOAD_TRUNCATED_IMAGES = False
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
im.load()
class MockPyDecoder(ImageFile.PyDecoder):

View File

@ -461,6 +461,20 @@ def test_free_type_font_get_mask(font: ImageFont.FreeTypeFont) -> None:
assert mask.size == (108, 13)
def test_stroke_mask() -> None:
# Arrange
text = "i"
# Act
font = ImageFont.truetype(FONT_PATH, 128)
mask = font.getmask(text, stroke_width=2)
# Assert
assert mask.getpixel((34, 5)) == 255
assert mask.getpixel((38, 5)) == 0
assert mask.getpixel((42, 5)) == 255
def test_load_when_image_not_found() -> None:
with tempfile.NamedTemporaryFile(delete=False) as tmp:
pass

View File

@ -405,7 +405,6 @@ def test_exif_transpose() -> None:
else:
original_exif = im.info["exif"]
transposed_im = ImageOps.exif_transpose(im)
assert transposed_im is not None
assert_image_similar(base_im, transposed_im, 17)
if orientation_im is base_im:
assert "exif" not in im.info
@ -417,7 +416,6 @@ def test_exif_transpose() -> None:
# Repeat the operation to test that it does not keep transposing
transposed_im2 = ImageOps.exif_transpose(transposed_im)
assert transposed_im2 is not None
assert_image_equal(transposed_im2, transposed_im)
check(base_im)
@ -433,7 +431,6 @@ def test_exif_transpose() -> None:
assert im.getexif()[0x0112] == 3
transposed_im = ImageOps.exif_transpose(im)
assert transposed_im is not None
assert 0x0112 not in transposed_im.getexif()
transposed_im._reload_exif()
@ -446,14 +443,12 @@ def test_exif_transpose() -> None:
assert im.getexif()[0x0112] == 3
transposed_im = ImageOps.exif_transpose(im)
assert transposed_im is not None
assert 0x0112 not in transposed_im.getexif()
# Orientation set directly on Image.Exif
im = hopper()
im.getexif()[0x0112] = 3
transposed_im = ImageOps.exif_transpose(im)
assert transposed_im is not None
assert 0x0112 not in transposed_im.getexif()
@ -464,7 +459,6 @@ def test_exif_transpose_xml_without_xmp() -> None:
del im.info["xmp"]
transposed_im = ImageOps.exif_transpose(im)
assert transposed_im is not None
assert 0x0112 not in transposed_im.getexif()

View File

@ -7,36 +7,30 @@ import pytest
from PIL import Image
def test_overflow() -> None:
def test_overflow(monkeypatch: pytest.MonkeyPatch) -> None:
# There is the potential to overflow comparisons in map.c
# if there are > SIZE_MAX bytes in the image or if
# the file encodes an offset that makes
# (offset + size(bytes)) > SIZE_MAX
# Note that this image triggers the decompression bomb warning:
max_pixels = Image.MAX_IMAGE_PIXELS
Image.MAX_IMAGE_PIXELS = None
monkeypatch.setattr(Image, "MAX_IMAGE_PIXELS", None)
# This image hits the offset test.
with Image.open("Tests/images/l2rgb_read.bmp") as im:
with pytest.raises((ValueError, MemoryError, OSError)):
im.load()
Image.MAX_IMAGE_PIXELS = max_pixels
def test_tobytes() -> None:
def test_tobytes(monkeypatch: pytest.MonkeyPatch) -> None:
# Note that this image triggers the decompression bomb warning:
max_pixels = Image.MAX_IMAGE_PIXELS
Image.MAX_IMAGE_PIXELS = None
monkeypatch.setattr(Image, "MAX_IMAGE_PIXELS", None)
# Previously raised an access violation on Windows
with Image.open("Tests/images/l2rgb_read.bmp") as im:
with pytest.raises((ValueError, MemoryError, OSError)):
im.tobytes()
Image.MAX_IMAGE_PIXELS = max_pixels
@pytest.mark.skipif(sys.maxsize <= 2**32, reason="Requires 64-bit system")
def test_ysize() -> None:

View File

@ -2,7 +2,7 @@
# install libimagequant
archive_name=libimagequant
archive_version=4.3.3
archive_version=4.3.4
archive=$archive_name-$archive_version

View File

@ -6,12 +6,11 @@ Goals
The fork author's goal is to foster and support active development of PIL through:
- Continuous integration testing via `GitHub Actions`_ and `AppVeyor`_
- Continuous integration testing via `GitHub Actions`_
- Publicized development activity on `GitHub`_
- Regular releases to the `Python Package Index`_
.. _GitHub Actions: https://github.com/python-pillow/Pillow/actions
.. _AppVeyor: https://ci.appveyor.com/project/Python-pillow/pillow
.. _GitHub: https://github.com/python-pillow/Pillow
.. _Python Package Index: https://pypi.org/project/pillow/

View File

@ -175,6 +175,24 @@ deprecated and will be removed in Pillow 12 (2025-10-15). They were used for obt
raw pointers to ``ImagingCore`` internals. To interact with C code, you can use
``Image.Image.getim()``, which returns a ``Capsule`` object.
ExifTags.IFD.Makernote
^^^^^^^^^^^^^^^^^^^^^^
.. deprecated:: 11.1.0
``ExifTags.IFD.Makernote`` has been deprecated. Instead, use
``ExifTags.IFD.MakerNote``.
Image.Image.get_child_images()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. deprecated:: 11.2.0
``Image.Image.get_child_images()`` has been deprecated. and will be removed in Pillow
13 (2026-10-15). It will be moved to ``ImageFile.ImageFile.get_child_images()``. The
method uses an image's file pointer, and so child images could only be retrieved from
an :py:class:`PIL.ImageFile.ImageFile` instance.
Removed features
----------------

View File

@ -572,10 +572,19 @@ JPEG 2000
Pillow reads and writes JPEG 2000 files containing ``L``, ``LA``, ``RGB``,
``RGBA``, or ``YCbCr`` data. When reading, ``YCbCr`` data is converted to
``RGB`` or ``RGBA`` depending on whether or not there is an alpha channel.
Beginning with version 8.3.0, Pillow can read (but not write) ``RGB``,
``RGBA``, and ``YCbCr`` images with subsampled components. Pillow supports
JPEG 2000 raw codestreams (``.j2k`` files), as well as boxed JPEG 2000 files
(``.jp2`` or ``.jpx`` files).
.. versionadded:: 8.3.0
Pillow can read (but not write) ``RGB``, ``RGBA``, and ``YCbCr`` images with
subsampled components.
.. versionadded:: 10.4.0
Pillow can read ``CMYK`` images with OpenJPEG 2.5.1 and later.
.. versionadded:: 11.1.0
Pillow can write ``CMYK`` images with OpenJPEG 2.5.3 and later.
Pillow supports JPEG 2000 raw codestreams (``.j2k`` files), as well as boxed
JPEG 2000 files (``.jp2`` or ``.jpx`` files).
When loading, if you set the ``mode`` on the image prior to the
:py:meth:`~PIL.Image.Image.load` method being invoked, you can ask Pillow to
@ -1199,6 +1208,11 @@ The :py:meth:`~PIL.Image.Image.save` method can take the following keyword argum
.. versionadded:: 8.4.0
**big_tiff**
If true, the image will be saved as a BigTIFF.
.. versionadded:: 11.1.0
**compression**
A string containing the desired compression method for the
file. (valid only with libtiff installed) Valid compression

View File

@ -33,10 +33,6 @@ Pillow for enterprise is available via the Tidelift Subscription. `Learn more <h
:target: https://github.com/python-pillow/Pillow/actions/workflows/test-cygwin.yml
:alt: GitHub Actions build status (Test Cygwin)
.. image:: https://img.shields.io/appveyor/build/python-pillow/Pillow/main.svg?label=Windows%20build
:target: https://ci.appveyor.com/project/python-pillow/Pillow
:alt: AppVeyor CI build status (Windows)
.. image:: https://github.com/python-pillow/Pillow/workflows/Wheels/badge.svg
:target: https://github.com/python-pillow/Pillow/actions/workflows/wheels.yml
:alt: GitHub Actions build status (Wheels)

View File

@ -64,7 +64,7 @@ Many of Pillow's features require external libraries:
* **libimagequant** provides improved color quantization
* Pillow has been tested with libimagequant **2.6-4.3.3**
* Pillow has been tested with libimagequant **2.6-4.3.4**
* Libimagequant is licensed GPLv3, which is more restrictive than
the Pillow license, therefore we will not be distributing binaries
with libimagequant support enabled.

View File

@ -27,6 +27,8 @@ These platforms are built and tested for every change.
+----------------------------------+----------------------------+---------------------+
| CentOS Stream 9 | 3.9 | x86-64 |
+----------------------------------+----------------------------+---------------------+
| CentOS Stream 10 | 3.12 | x86-64 |
+----------------------------------+----------------------------+---------------------+
| Debian 12 Bookworm | 3.11 | x86, x86-64 |
+----------------------------------+----------------------------+---------------------+
| Fedora 40 | 3.12 | x86-64 |
@ -42,18 +44,14 @@ These platforms are built and tested for every change.
+----------------------------------+----------------------------+---------------------+
| Ubuntu Linux 22.04 LTS (Jammy) | 3.9, 3.10, 3.11, | x86-64 |
| | 3.12, 3.13, PyPy3 | |
| +----------------------------+---------------------+
| | 3.10 | arm64v8 |
+----------------------------------+----------------------------+---------------------+
| Ubuntu Linux 24.04 LTS (Noble) | 3.12 | x86-64, ppc64le, |
| | | s390x |
| Ubuntu Linux 24.04 LTS (Noble) | 3.12 | x86-64, arm64v8, |
| | | ppc64le, s390x |
+----------------------------------+----------------------------+---------------------+
| Windows Server 2019 | 3.9 | x86-64 |
| Windows Server 2019 | 3.9 | x86 |
+----------------------------------+----------------------------+---------------------+
| Windows Server 2022 | 3.9, 3.10, 3.11, | x86-64 |
| | 3.12, 3.13, PyPy3 | |
| +----------------------------+---------------------+
| | 3.13 | x86 |
| Windows Server 2022 | 3.10, 3.11, 3.12, 3.13, | x86-64 |
| | PyPy3 | |
| +----------------------------+---------------------+
| | 3.12 (MinGW) | x86-64 |
| +----------------------------+---------------------+
@ -75,7 +73,7 @@ These platforms have been reported to work at the versions mentioned.
| Operating system | | Tested Python | | Latest tested | | Tested |
| | | versions | | Pillow version | | processors |
+==================================+============================+==================+==============+
| macOS 15 Sequoia | 3.9, 3.10, 3.11, 3.12, 3.13| 11.0.0 |arm |
| macOS 15 Sequoia | 3.9, 3.10, 3.11, 3.12, 3.13| 11.1.0 |arm |
| +----------------------------+------------------+ |
| | 3.8 | 10.4.0 | |
+----------------------------------+----------------------------+------------------+--------------+

View File

@ -54,6 +54,7 @@ Feature version numbers are available only where stated.
Support for the following features can be checked:
* ``libjpeg_turbo``: (compile time) Whether Pillow was compiled against the libjpeg-turbo version of libjpeg. Compile-time version number is available.
* ``mozjpeg``: (compile time) Whether Pillow was compiled against the MozJPEG version of libjpeg. Compile-time version number is available.
* ``zlib_ng``: (compile time) Whether Pillow was compiled against the zlib-ng version of zlib. Compile-time version number is available.
* ``raqm``: Raqm library, required for ``ImageFont.Layout.RAQM`` in :py:func:`PIL.ImageFont.truetype`. Run-time version number is available for Raqm 0.7.0 or newer.
* ``libimagequant``: (compile time) ImageQuant quantization support in :py:func:`PIL.Image.Image.quantize`. Run-time version number is available.

View File

@ -1,40 +1,38 @@
11.1.0
------
Security
========
TODO
^^^^
TODO
:cve:`YYYY-XXXXX`: TODO
^^^^^^^^^^^^^^^^^^^^^^^
TODO
Backwards Incompatible Changes
==============================
TODO
^^^^
Deprecations
============
TODO
^^^^
ExifTags.IFD.Makernote
^^^^^^^^^^^^^^^^^^^^^^
TODO
``ExifTags.IFD.Makernote`` has been deprecated. Instead, use
``ExifTags.IFD.MakerNote``.
API Changes
===========
TODO
^^^^
Writing XMP bytes to JPEG and MPO
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
TODO
Pillow 11.0.0 added writing XMP data to JPEG and MPO images::
im.info["xmp"] = b"test"
im.save("out.jpg")
However, this meant that XMP data was automatically kept from an opened image,
which is inconsistent with the rest of Pillow's behaviour. This functionality
has been removed. To write XMP data, the ``xmp`` argument can still be used for
JPEG files::
im.save("out.jpg", xmp=b"test")
To save XMP data to the second frame of an MPO image, ``encoderinfo`` can now
be used::
second_im.encoderinfo = {"xmp": b"test"}
im.save("out.mpo", save_all=True, append_images=[second_im])
API Additions
=============
@ -49,6 +47,13 @@ zlib library, and what version of zlib-ng is being used::
features.check_feature("zlib_ng") # True or False
features.version_feature("zlib_ng") # "2.2.2" for example, or None
Saving TIFF as BigTIFF
^^^^^^^^^^^^^^^^^^^^^^
TIFF images can now be saved as BigTIFF using a ``big_tiff`` argument::
im.save("out.tiff", big_tiff=True)
Other Changes
=============
@ -58,6 +63,16 @@ Reading JPEG 2000 comments
When opening a JPEG 2000 image, the comment may now be read into
:py:attr:`~PIL.Image.Image.info` for J2K images, not just JP2 images.
Saving JPEG 2000 CMYK images
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
With OpenJPEG 2.5.3 or later, Pillow can now save CMYK images as JPEG 2000 files.
Minimum C version
^^^^^^^^^^^^^^^^^
C99 is now the minimum version of C required to compile Pillow from source.
zlib-ng in wheels
^^^^^^^^^^^^^^^^^

View File

@ -0,0 +1,63 @@
11.2.0
------
Security
========
TODO
^^^^
TODO
:cve:`YYYY-XXXXX`: TODO
^^^^^^^^^^^^^^^^^^^^^^^
TODO
Backwards Incompatible Changes
==============================
TODO
^^^^
Deprecations
============
Image.Image.get_child_images()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. deprecated:: 11.2.0
``Image.Image.get_child_images()`` has been deprecated. and will be removed in Pillow
13 (2026-10-15). It will be moved to ``ImageFile.ImageFile.get_child_images()``. The
method uses an image's file pointer, and so child images could only be retrieved from
an :py:class:`PIL.ImageFile.ImageFile` instance.
API Changes
===========
TODO
^^^^
TODO
API Additions
=============
Check for MozJPEG
^^^^^^^^^^^^^^^^^
You can check if Pillow has been built against the MozJPEG version of the
libjpeg library, and what version of MozJPEG is being used::
from PIL import features
features.check_feature("mozjpeg") # True or False
features.version_feature("mozjpeg") # "4.1.1" for example, or None
Other Changes
=============
TODO
^^^^
TODO

View File

@ -14,6 +14,7 @@ expected to be backported to earlier versions.
.. toctree::
:maxdepth: 2
11.2.0
11.1.0
11.0.0
10.4.0

View File

@ -260,21 +260,36 @@ class BlpImageFile(ImageFile.ImageFile):
def _open(self) -> None:
assert self.fp is not None
self.magic = self.fp.read(4)
self.fp.seek(5, os.SEEK_CUR)
(self._blp_alpha_depth,) = struct.unpack("<b", self.fp.read(1))
self.fp.seek(2, os.SEEK_CUR)
self._size = struct.unpack("<II", self.fp.read(8))
if self.magic in (b"BLP1", b"BLP2"):
decoder = self.magic.decode()
else:
if not _accept(self.magic):
msg = f"Bad BLP magic {repr(self.magic)}"
raise BLPFormatError(msg)
self._mode = "RGBA" if self._blp_alpha_depth else "RGB"
self.tile = [ImageFile._Tile(decoder, (0, 0) + self.size, 0, self.mode)]
compression = struct.unpack("<i", self.fp.read(4))[0]
if self.magic == b"BLP1":
alpha = struct.unpack("<I", self.fp.read(4))[0] != 0
else:
encoding = struct.unpack("<b", self.fp.read(1))[0]
alpha = struct.unpack("<b", self.fp.read(1))[0] != 0
alpha_encoding = struct.unpack("<b", self.fp.read(1))[0]
self.fp.seek(1, os.SEEK_CUR) # mips
self._size = struct.unpack("<II", self.fp.read(8))
args: tuple[int, int, bool] | tuple[int, int, bool, int]
if self.magic == b"BLP1":
encoding = struct.unpack("<i", self.fp.read(4))[0]
self.fp.seek(4, os.SEEK_CUR) # subtype
args = (compression, encoding, alpha)
offset = 28
else:
args = (compression, encoding, alpha, alpha_encoding)
offset = 20
decoder = self.magic.decode()
self._mode = "RGBA" if alpha else "RGB"
self.tile = [ImageFile._Tile(decoder, (0, 0) + self.size, offset, args)]
class _BLPBaseDecoder(ImageFile.PyDecoder):
@ -282,7 +297,7 @@ class _BLPBaseDecoder(ImageFile.PyDecoder):
def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]:
try:
self._read_blp_header()
self._read_header()
self._load()
except struct.error as e:
msg = "Truncated BLP file"
@ -293,25 +308,9 @@ class _BLPBaseDecoder(ImageFile.PyDecoder):
def _load(self) -> None:
pass
def _read_blp_header(self) -> None:
assert self.fd is not None
self.fd.seek(4)
(self._blp_compression,) = struct.unpack("<i", self._safe_read(4))
(self._blp_encoding,) = struct.unpack("<b", self._safe_read(1))
(self._blp_alpha_depth,) = struct.unpack("<b", self._safe_read(1))
(self._blp_alpha_encoding,) = struct.unpack("<b", self._safe_read(1))
self.fd.seek(1, os.SEEK_CUR) # mips
self.size = struct.unpack("<II", self._safe_read(8))
if isinstance(self, BLP1Decoder):
# Only present for BLP1
(self._blp_encoding,) = struct.unpack("<i", self._safe_read(4))
self.fd.seek(4, os.SEEK_CUR) # subtype
self._blp_offsets = struct.unpack("<16I", self._safe_read(16 * 4))
self._blp_lengths = struct.unpack("<16I", self._safe_read(16 * 4))
def _read_header(self) -> None:
self._offsets = struct.unpack("<16I", self._safe_read(16 * 4))
self._lengths = struct.unpack("<16I", self._safe_read(16 * 4))
def _safe_read(self, length: int) -> bytes:
assert self.fd is not None
@ -327,9 +326,11 @@ class _BLPBaseDecoder(ImageFile.PyDecoder):
ret.append((b, g, r, a))
return ret
def _read_bgra(self, palette: list[tuple[int, int, int, int]]) -> bytearray:
def _read_bgra(
self, palette: list[tuple[int, int, int, int]], alpha: bool
) -> bytearray:
data = bytearray()
_data = BytesIO(self._safe_read(self._blp_lengths[0]))
_data = BytesIO(self._safe_read(self._lengths[0]))
while True:
try:
(offset,) = struct.unpack("<B", _data.read(1))
@ -337,7 +338,7 @@ class _BLPBaseDecoder(ImageFile.PyDecoder):
break
b, g, r, a = palette[offset]
d: tuple[int, ...] = (r, g, b)
if self._blp_alpha_depth:
if alpha:
d += (a,)
data.extend(d)
return data
@ -345,19 +346,21 @@ class _BLPBaseDecoder(ImageFile.PyDecoder):
class BLP1Decoder(_BLPBaseDecoder):
def _load(self) -> None:
if self._blp_compression == Format.JPEG:
self._compression, self._encoding, alpha = self.args
if self._compression == Format.JPEG:
self._decode_jpeg_stream()
elif self._blp_compression == 1:
if self._blp_encoding in (4, 5):
elif self._compression == 1:
if self._encoding in (4, 5):
palette = self._read_palette()
data = self._read_bgra(palette)
data = self._read_bgra(palette, alpha)
self.set_as_raw(data)
else:
msg = f"Unsupported BLP encoding {repr(self._blp_encoding)}"
msg = f"Unsupported BLP encoding {repr(self._encoding)}"
raise BLPFormatError(msg)
else:
msg = f"Unsupported BLP compression {repr(self._blp_encoding)}"
msg = f"Unsupported BLP compression {repr(self._encoding)}"
raise BLPFormatError(msg)
def _decode_jpeg_stream(self) -> None:
@ -366,65 +369,61 @@ class BLP1Decoder(_BLPBaseDecoder):
(jpeg_header_size,) = struct.unpack("<I", self._safe_read(4))
jpeg_header = self._safe_read(jpeg_header_size)
assert self.fd is not None
self._safe_read(self._blp_offsets[0] - self.fd.tell()) # What IS this?
data = self._safe_read(self._blp_lengths[0])
self._safe_read(self._offsets[0] - self.fd.tell()) # What IS this?
data = self._safe_read(self._lengths[0])
data = jpeg_header + data
image = JpegImageFile(BytesIO(data))
Image._decompression_bomb_check(image.size)
if image.mode == "CMYK":
decoder_name, extents, offset, args = image.tile[0]
args = image.tile[0].args
assert isinstance(args, tuple)
image.tile = [
ImageFile._Tile(decoder_name, extents, offset, (args[0], "CMYK"))
]
r, g, b = image.convert("RGB").split()
reversed_image = Image.merge("RGB", (b, g, r))
self.set_as_raw(reversed_image.tobytes())
image.tile = [image.tile[0]._replace(args=(args[0], "CMYK"))]
self.set_as_raw(image.convert("RGB").tobytes(), "BGR")
class BLP2Decoder(_BLPBaseDecoder):
def _load(self) -> None:
self._compression, self._encoding, alpha, self._alpha_encoding = self.args
palette = self._read_palette()
assert self.fd is not None
self.fd.seek(self._blp_offsets[0])
self.fd.seek(self._offsets[0])
if self._blp_compression == 1:
if self._compression == 1:
# Uncompressed or DirectX compression
if self._blp_encoding == Encoding.UNCOMPRESSED:
data = self._read_bgra(palette)
if self._encoding == Encoding.UNCOMPRESSED:
data = self._read_bgra(palette, alpha)
elif self._blp_encoding == Encoding.DXT:
elif self._encoding == Encoding.DXT:
data = bytearray()
if self._blp_alpha_encoding == AlphaEncoding.DXT1:
linesize = (self.size[0] + 3) // 4 * 8
for yb in range((self.size[1] + 3) // 4):
for d in decode_dxt1(
self._safe_read(linesize), alpha=bool(self._blp_alpha_depth)
):
if self._alpha_encoding == AlphaEncoding.DXT1:
linesize = (self.state.xsize + 3) // 4 * 8
for yb in range((self.state.ysize + 3) // 4):
for d in decode_dxt1(self._safe_read(linesize), alpha):
data += d
elif self._blp_alpha_encoding == AlphaEncoding.DXT3:
linesize = (self.size[0] + 3) // 4 * 16
for yb in range((self.size[1] + 3) // 4):
elif self._alpha_encoding == AlphaEncoding.DXT3:
linesize = (self.state.xsize + 3) // 4 * 16
for yb in range((self.state.ysize + 3) // 4):
for d in decode_dxt3(self._safe_read(linesize)):
data += d
elif self._blp_alpha_encoding == AlphaEncoding.DXT5:
linesize = (self.size[0] + 3) // 4 * 16
for yb in range((self.size[1] + 3) // 4):
elif self._alpha_encoding == AlphaEncoding.DXT5:
linesize = (self.state.xsize + 3) // 4 * 16
for yb in range((self.state.ysize + 3) // 4):
for d in decode_dxt5(self._safe_read(linesize)):
data += d
else:
msg = f"Unsupported alpha encoding {repr(self._blp_alpha_encoding)}"
msg = f"Unsupported alpha encoding {repr(self._alpha_encoding)}"
raise BLPFormatError(msg)
else:
msg = f"Unknown BLP encoding {repr(self._blp_encoding)}"
msg = f"Unknown BLP encoding {repr(self._encoding)}"
raise BLPFormatError(msg)
else:
msg = f"Unknown BLP compression {repr(self._blp_compression)}"
msg = f"Unknown BLP compression {repr(self._compression)}"
raise BLPFormatError(msg)
self.set_as_raw(data)
@ -473,10 +472,15 @@ def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
assert im.palette is not None
fp.write(struct.pack("<i", 1)) # Uncompressed or DirectX compression
fp.write(struct.pack("<b", Encoding.UNCOMPRESSED))
fp.write(struct.pack("<b", 1 if im.palette.mode == "RGBA" else 0))
fp.write(struct.pack("<b", 0)) # alpha encoding
fp.write(struct.pack("<b", 0)) # mips
alpha_depth = 1 if im.palette.mode == "RGBA" else 0
if magic == b"BLP1":
fp.write(struct.pack("<L", alpha_depth))
else:
fp.write(struct.pack("<b", Encoding.UNCOMPRESSED))
fp.write(struct.pack("<b", alpha_depth))
fp.write(struct.pack("<b", 0)) # alpha encoding
fp.write(struct.pack("<b", 0)) # mips
fp.write(struct.pack("<II", *im.size))
if magic == b"BLP1":
fp.write(struct.pack("<i", 5))

View File

@ -10,6 +10,7 @@
#
from __future__ import annotations
import os
from typing import IO
from . import Image, ImageFile
@ -41,13 +42,11 @@ class BufrStubImageFile(ImageFile.StubImageFile):
def _open(self) -> None:
assert self.fp is not None
offset = self.fp.tell()
if not _accept(self.fp.read(4)):
msg = "Not a BUFR file"
raise SyntaxError(msg)
self.fp.seek(offset)
self.fp.seek(-4, os.SEEK_CUR)
# make something up
self._mode = "F"

View File

@ -353,6 +353,7 @@ class IFD(IntEnum):
Exif = 0x8769
GPSInfo = 0x8825
MakerNote = 0x927C
Makernote = 0x927C # Deprecated
Interop = 0xA005
IFD1 = -1

View File

@ -10,6 +10,7 @@
#
from __future__ import annotations
import os
from typing import IO
from . import Image, ImageFile
@ -41,13 +42,11 @@ class GribStubImageFile(ImageFile.StubImageFile):
def _open(self) -> None:
assert self.fp is not None
offset = self.fp.tell()
if not _accept(self.fp.read(8)):
msg = "Not a GRIB file"
raise SyntaxError(msg)
self.fp.seek(offset)
self.fp.seek(-8, os.SEEK_CUR)
# make something up
self._mode = "F"

View File

@ -10,6 +10,7 @@
#
from __future__ import annotations
import os
from typing import IO
from . import Image, ImageFile
@ -41,13 +42,11 @@ class HDF5StubImageFile(ImageFile.StubImageFile):
def _open(self) -> None:
assert self.fp is not None
offset = self.fp.tell()
if not _accept(self.fp.read(8)):
msg = "Not an HDF file"
raise SyntaxError(msg)
self.fp.seek(offset)
self.fp.seek(-8, os.SEEK_CUR)
# make something up
self._mode = "F"

View File

@ -603,24 +603,16 @@ class Image:
def __enter__(self):
return self
def _close_fp(self):
if getattr(self, "_fp", False):
if self._fp != self.fp:
self._fp.close()
self._fp = DeferredError(ValueError("Operation on closed image"))
if self.fp:
self.fp.close()
def __exit__(self, *args):
if hasattr(self, "fp"):
from . import ImageFile
if isinstance(self, ImageFile.ImageFile):
if getattr(self, "_exclusive_fp", False):
self._close_fp()
self.fp = None
def close(self) -> None:
"""
Closes the file pointer, if possible.
This operation will destroy the image core and release its memory.
The image data will be unusable afterward.
@ -629,13 +621,6 @@ class Image:
:py:meth:`~PIL.Image.Image.load` method. See :ref:`file-handling` for
more information.
"""
if hasattr(self, "fp"):
try:
self._close_fp()
self.fp = None
except Exception as msg:
logger.debug("Error closing: %s", msg)
if getattr(self, "map", None):
self.map: mmap.mmap | None = None
@ -1554,50 +1539,10 @@ class Image:
self.getexif()
def get_child_images(self) -> list[ImageFile.ImageFile]:
child_images = []
exif = self.getexif()
ifds = []
if ExifTags.Base.SubIFDs in exif:
subifd_offsets = exif[ExifTags.Base.SubIFDs]
if subifd_offsets:
if not isinstance(subifd_offsets, tuple):
subifd_offsets = (subifd_offsets,)
for subifd_offset in subifd_offsets:
ifds.append((exif._get_ifd_dict(subifd_offset), subifd_offset))
ifd1 = exif.get_ifd(ExifTags.IFD.IFD1)
if ifd1 and ifd1.get(ExifTags.Base.JpegIFOffset):
assert exif._info is not None
ifds.append((ifd1, exif._info.next))
from . import ImageFile
offset = None
for ifd, ifd_offset in ifds:
current_offset = self.fp.tell()
if offset is None:
offset = current_offset
fp = self.fp
if ifd is not None:
thumbnail_offset = ifd.get(ExifTags.Base.JpegIFOffset)
if thumbnail_offset is not None:
thumbnail_offset += getattr(self, "_exif_offset", 0)
self.fp.seek(thumbnail_offset)
data = self.fp.read(ifd.get(ExifTags.Base.JpegIFByteCount))
fp = io.BytesIO(data)
with open(fp) as im:
from . import TiffImagePlugin
if thumbnail_offset is None and isinstance(
im, TiffImagePlugin.TiffImageFile
):
im._frame_pos = [ifd_offset]
im._seek(0)
im.load()
child_images.append(im)
if offset is not None:
self.fp.seek(offset)
return child_images
deprecate("Image.Image.get_child_images", 13)
return ImageFile.ImageFile.get_child_images(self) # type: ignore[arg-type]
def getim(self) -> CapsuleType:
"""
@ -4023,6 +3968,9 @@ class Exif(_ExifBase):
head = self._get_head()
ifd = TiffImagePlugin.ImageFileDirectory_v2(ifh=head)
for tag, ifd_dict in self._ifds.items():
if tag not in self:
ifd[tag] = ifd_dict
for tag, value in self.items():
if tag in [
ExifTags.IFD.Exif,

View File

@ -643,6 +643,7 @@ class ImageDraw:
features=features,
language=language,
stroke_width=stroke_width,
stroke_filled=True,
anchor=anchor,
ink=ink,
start=start,
@ -692,7 +693,8 @@ class ImageDraw:
draw_text(stroke_ink, stroke_width)
# Draw normal text
draw_text(ink, 0)
if ink != stroke_ink:
draw_text(ink)
else:
# Only draw normal text
draw_text(ink)

View File

@ -31,18 +31,21 @@ from __future__ import annotations
import abc
import io
import itertools
import logging
import os
import struct
import sys
from typing import IO, TYPE_CHECKING, Any, NamedTuple, cast
from . import Image
from . import ExifTags, Image
from ._deprecate import deprecate
from ._util import is_path
from ._util import DeferredError, is_path
if TYPE_CHECKING:
from ._typing import StrOrBytesPath
logger = logging.getLogger(__name__)
MAXBLOCK = 65536
SAFEBLOCK = 1024 * 1024
@ -163,6 +166,85 @@ class ImageFile(Image.Image):
def _open(self) -> None:
pass
def _close_fp(self):
if getattr(self, "_fp", False):
if self._fp != self.fp:
self._fp.close()
self._fp = DeferredError(ValueError("Operation on closed image"))
if self.fp:
self.fp.close()
def close(self) -> None:
"""
Closes the file pointer, if possible.
This operation will destroy the image core and release its memory.
The image data will be unusable afterward.
This function is required to close images that have multiple frames or
have not had their file read and closed by the
:py:meth:`~PIL.Image.Image.load` method. See :ref:`file-handling` for
more information.
"""
try:
self._close_fp()
self.fp = None
except Exception as msg:
logger.debug("Error closing: %s", msg)
super().close()
def get_child_images(self) -> list[ImageFile]:
child_images = []
exif = self.getexif()
ifds = []
if ExifTags.Base.SubIFDs in exif:
subifd_offsets = exif[ExifTags.Base.SubIFDs]
if subifd_offsets:
if not isinstance(subifd_offsets, tuple):
subifd_offsets = (subifd_offsets,)
for subifd_offset in subifd_offsets:
ifds.append((exif._get_ifd_dict(subifd_offset), subifd_offset))
ifd1 = exif.get_ifd(ExifTags.IFD.IFD1)
if ifd1 and ifd1.get(ExifTags.Base.JpegIFOffset):
assert exif._info is not None
ifds.append((ifd1, exif._info.next))
offset = None
for ifd, ifd_offset in ifds:
assert self.fp is not None
current_offset = self.fp.tell()
if offset is None:
offset = current_offset
fp = self.fp
if ifd is not None:
thumbnail_offset = ifd.get(ExifTags.Base.JpegIFOffset)
if thumbnail_offset is not None:
thumbnail_offset += getattr(self, "_exif_offset", 0)
self.fp.seek(thumbnail_offset)
length = ifd.get(ExifTags.Base.JpegIFByteCount)
assert isinstance(length, int)
data = self.fp.read(length)
fp = io.BytesIO(data)
with Image.open(fp) as im:
from . import TiffImagePlugin
if thumbnail_offset is None and isinstance(
im, TiffImagePlugin.TiffImageFile
):
im._frame_pos = [ifd_offset]
im._seek(0)
im.load()
child_images.append(im)
if offset is not None:
assert self.fp is not None
self.fp.seek(offset)
return child_images
def get_format_mimetype(self) -> str | None:
if self.custom_mimetype:
return self.custom_mimetype

View File

@ -644,6 +644,7 @@ class FreeTypeFont:
features,
language,
stroke_width,
kwargs.get("stroke_filled", False),
anchor,
ink,
start[0],

View File

@ -22,7 +22,7 @@ import functools
import operator
import re
from collections.abc import Sequence
from typing import Protocol, cast
from typing import Literal, Protocol, cast, overload
from . import ExifTags, Image, ImagePalette
@ -673,6 +673,16 @@ def solarize(image: Image.Image, threshold: int = 128) -> Image.Image:
return _lut(image, lut)
@overload
def exif_transpose(image: Image.Image, *, in_place: Literal[True]) -> None: ...
@overload
def exif_transpose(
image: Image.Image, *, in_place: Literal[False] = False
) -> Image.Image: ...
def exif_transpose(image: Image.Image, *, in_place: bool = False) -> Image.Image | None:
"""
If an image has an EXIF Orientation tag, other than 1, transpose the image

View File

@ -92,6 +92,9 @@ def APP(self: JpegImageFile, marker: int) -> None:
else:
if jfif_unit == 1:
self.info["dpi"] = jfif_density
elif jfif_unit == 2: # cm
# 1 dpcm = 2.54 dpi
self.info["dpi"] = tuple(d * 2.54 for d in jfif_density)
self.info["jfif_unit"] = jfif_unit
self.info["jfif_density"] = jfif_density
elif marker == 0xFFE1 and s[:6] == b"Exif\0\0":

View File

@ -1385,7 +1385,7 @@ def _save(
b"\0", # 12: interlace flag
)
chunks = [b"cHRM", b"gAMA", b"sBIT", b"sRGB", b"tIME"]
chunks = [b"cHRM", b"cICP", b"gAMA", b"sBIT", b"sRGB", b"tIME"]
icc = im.encoderinfo.get("icc_profile", im.info.get("icc_profile"))
if icc:

View File

@ -268,7 +268,7 @@ def makeSpiderHeader(im: Image.Image) -> list[bytes]:
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
if im.mode[0] != "F":
if im.mode != "F":
im = im.convert("F")
hdr = makeSpiderHeader(im)

View File

@ -582,7 +582,7 @@ class ImageFileDirectory_v2(_IFDv2Base):
def __init__(
self,
ifh: bytes = b"II\052\0\0\0\0\0",
ifh: bytes = b"II\x2A\x00\x00\x00\x00\x00",
prefix: bytes | None = None,
group: int | None = None,
) -> None:
@ -949,12 +949,25 @@ class ImageFileDirectory_v2(_IFDv2Base):
warnings.warn(str(msg))
return
def _get_ifh(self) -> bytes:
ifh = self._prefix + self._pack("H", 43 if self._bigtiff else 42)
if self._bigtiff:
ifh += self._pack("HH", 8, 0)
ifh += self._pack("Q", 16) if self._bigtiff else self._pack("L", 8)
return ifh
def tobytes(self, offset: int = 0) -> bytes:
# FIXME What about tagdata?
result = self._pack("H", len(self._tags_v2))
result = self._pack("Q" if self._bigtiff else "H", len(self._tags_v2))
entries: list[tuple[int, int, int, bytes, bytes]] = []
offset = offset + len(result) + len(self._tags_v2) * 12 + 4
fmt = "Q" if self._bigtiff else "L"
fmt_size = 8 if self._bigtiff else 4
offset += (
len(result) + len(self._tags_v2) * (20 if self._bigtiff else 12) + fmt_size
)
stripoffsets = None
# pass 1: convert tags to binary format
@ -966,11 +979,7 @@ class ImageFileDirectory_v2(_IFDv2Base):
logger.debug("Tag %s, Type: %s, Value: %s", tag, typ, repr(value))
is_ifd = typ == TiffTags.LONG and isinstance(value, dict)
if is_ifd:
if self._endian == "<":
ifh = b"II\x2A\x00\x08\x00\x00\x00"
else:
ifh = b"MM\x00\x2A\x00\x00\x00\x08"
ifd = ImageFileDirectory_v2(ifh, group=tag)
ifd = ImageFileDirectory_v2(self._get_ifh(), group=tag)
values = self._tags_v2[tag]
for ifd_tag, ifd_value in values.items():
ifd[ifd_tag] = ifd_value
@ -993,10 +1002,10 @@ class ImageFileDirectory_v2(_IFDv2Base):
else:
count = len(values)
# figure out if data fits into the entry
if len(data) <= 4:
entries.append((tag, typ, count, data.ljust(4, b"\0"), b""))
if len(data) <= fmt_size:
entries.append((tag, typ, count, data.ljust(fmt_size, b"\0"), b""))
else:
entries.append((tag, typ, count, self._pack("L", offset), data))
entries.append((tag, typ, count, self._pack(fmt, offset), data))
offset += (len(data) + 1) // 2 * 2 # pad to word
# update strip offset data to point beyond auxiliary data
@ -1007,16 +1016,18 @@ class ImageFileDirectory_v2(_IFDv2Base):
values = [val + offset for val in handler(self, data, self.legacy_api)]
data = self._write_dispatch[typ](self, *values)
else:
value = self._pack("L", self._unpack("L", value)[0] + offset)
value = self._pack(fmt, self._unpack(fmt, value)[0] + offset)
entries[stripoffsets] = tag, typ, count, value, data
# pass 2: write entries to file
for tag, typ, count, value, data in entries:
logger.debug("%s %s %s %s %s", tag, typ, count, repr(value), repr(data))
result += self._pack("HHL4s", tag, typ, count, value)
result += self._pack(
"HHQ8s" if self._bigtiff else "HHL4s", tag, typ, count, value
)
# -- overwrite here for multi-page --
result += b"\0\0\0\0" # end of entries
result += self._pack(fmt, 0) # end of entries
# pass 3: write auxiliary data to file
for tag, typ, count, value, data in entries:
@ -1028,8 +1039,7 @@ class ImageFileDirectory_v2(_IFDv2Base):
def save(self, fp: IO[bytes]) -> int:
if fp.tell() == 0: # skip TIFF header on subsequent pages
# tiff header -- PIL always starts the first IFD at offset 8
fp.write(self._prefix + self._pack("HL", 42, 8))
fp.write(self._get_ifh())
offset = fp.tell()
result = self.tobytes(offset)
@ -1401,7 +1411,8 @@ class TiffImageFile(ImageFile.ImageFile):
self.fp = None # might be shared
if err < 0:
raise OSError(err)
msg = f"decoder error {err}"
raise OSError(msg)
return Image.Image.load(self)
@ -1561,17 +1572,6 @@ class TiffImageFile(ImageFile.ImageFile):
# fillorder==2 modes have a corresponding
# fillorder=1 mode
self._mode, rawmode = OPEN_INFO[key]
# libtiff always returns the bytes in native order.
# we're expecting image byte order. So, if the rawmode
# contains I;16, we need to convert from native to image
# byte order.
if rawmode == "I;16":
rawmode = "I;16N"
if ";16B" in rawmode:
rawmode = rawmode.replace(";16B", ";16N")
if ";16L" in rawmode:
rawmode = rawmode.replace(";16L", ";16N")
# YCbCr images with new jpeg compression with pixels in one plane
# unpacked straight into RGB values
if (
@ -1580,6 +1580,14 @@ class TiffImageFile(ImageFile.ImageFile):
and self._planar_configuration == 1
):
rawmode = "RGB"
# libtiff always returns the bytes in native order.
# we're expecting image byte order. So, if the rawmode
# contains I;16, we need to convert from native to image
# byte order.
elif rawmode == "I;16":
rawmode = "I;16N"
elif rawmode.endswith(";16B") or rawmode.endswith(";16L"):
rawmode = rawmode[:-1] + "N"
# Offset in the tile tuple is 0, we go from 0,0 to
# w,h, and we only do this once -- eds
@ -1685,10 +1693,13 @@ def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
msg = f"cannot write mode {im.mode} as TIFF"
raise OSError(msg) from e
ifd = ImageFileDirectory_v2(prefix=prefix)
encoderinfo = im.encoderinfo
encoderconfig = im.encoderconfig
ifd = ImageFileDirectory_v2(prefix=prefix)
if encoderinfo.get("big_tiff"):
ifd._bigtiff = True
try:
compression = encoderinfo["compression"]
except KeyError:
@ -2038,20 +2049,21 @@ class AppendingTiffWriter(io.BytesIO):
self.offsetOfNewPage = 0
self.IIMM = iimm = self.f.read(4)
self._bigtiff = b"\x2B" in iimm
if not iimm:
# empty file - first page
self.isFirst = True
return
self.isFirst = False
if iimm == b"II\x2a\x00":
self.setEndian("<")
elif iimm == b"MM\x00\x2a":
self.setEndian(">")
else:
if iimm not in PREFIXES:
msg = "Invalid TIFF file header"
raise RuntimeError(msg)
self.setEndian("<" if iimm.startswith(II) else ">")
if self._bigtiff:
self.f.seek(4, os.SEEK_CUR)
self.skipIFDs()
self.goToEnd()
@ -2071,11 +2083,13 @@ class AppendingTiffWriter(io.BytesIO):
msg = "IIMM of new page doesn't match IIMM of first page"
raise RuntimeError(msg)
ifd_offset = self.readLong()
if self._bigtiff:
self.f.seek(4, os.SEEK_CUR)
ifd_offset = self._read(8 if self._bigtiff else 4)
ifd_offset += self.offsetOfNewPage
assert self.whereToWriteNewIFDOffset is not None
self.f.seek(self.whereToWriteNewIFDOffset)
self.writeLong(ifd_offset)
self._write(ifd_offset, 8 if self._bigtiff else 4)
self.f.seek(ifd_offset)
self.fixIFD()
@ -2121,18 +2135,20 @@ class AppendingTiffWriter(io.BytesIO):
self.endian = endian
self.longFmt = f"{self.endian}L"
self.shortFmt = f"{self.endian}H"
self.tagFormat = f"{self.endian}HHL"
self.tagFormat = f"{self.endian}HH" + ("Q" if self._bigtiff else "L")
def skipIFDs(self) -> None:
while True:
ifd_offset = self.readLong()
ifd_offset = self._read(8 if self._bigtiff else 4)
if ifd_offset == 0:
self.whereToWriteNewIFDOffset = self.f.tell() - 4
self.whereToWriteNewIFDOffset = self.f.tell() - (
8 if self._bigtiff else 4
)
break
self.f.seek(ifd_offset)
num_tags = self.readShort()
self.f.seek(num_tags * 12, os.SEEK_CUR)
num_tags = self._read(8 if self._bigtiff else 2)
self.f.seek(num_tags * (20 if self._bigtiff else 12), os.SEEK_CUR)
def write(self, data: Buffer, /) -> int:
return self.f.write(data)
@ -2162,17 +2178,19 @@ class AppendingTiffWriter(io.BytesIO):
msg = f"wrote only {bytes_written} bytes but wanted {expected}"
raise RuntimeError(msg)
def rewriteLastShortToLong(self, value: int) -> None:
self.f.seek(-2, os.SEEK_CUR)
bytes_written = self.f.write(struct.pack(self.longFmt, value))
self._verify_bytes_written(bytes_written, 4)
def _rewriteLast(self, value: int, field_size: int) -> None:
def _rewriteLast(
self, value: int, field_size: int, new_field_size: int = 0
) -> None:
self.f.seek(-field_size, os.SEEK_CUR)
if not new_field_size:
new_field_size = field_size
bytes_written = self.f.write(
struct.pack(self.endian + self._fmt(field_size), value)
struct.pack(self.endian + self._fmt(new_field_size), value)
)
self._verify_bytes_written(bytes_written, field_size)
self._verify_bytes_written(bytes_written, new_field_size)
def rewriteLastShortToLong(self, value: int) -> None:
self._rewriteLast(value, 2, 4)
def rewriteLastShort(self, value: int) -> None:
return self._rewriteLast(value, 2)
@ -2180,13 +2198,17 @@ class AppendingTiffWriter(io.BytesIO):
def rewriteLastLong(self, value: int) -> None:
return self._rewriteLast(value, 4)
def _write(self, value: int, field_size: int) -> None:
bytes_written = self.f.write(
struct.pack(self.endian + self._fmt(field_size), value)
)
self._verify_bytes_written(bytes_written, field_size)
def writeShort(self, value: int) -> None:
bytes_written = self.f.write(struct.pack(self.shortFmt, value))
self._verify_bytes_written(bytes_written, 2)
self._write(value, 2)
def writeLong(self, value: int) -> None:
bytes_written = self.f.write(struct.pack(self.longFmt, value))
self._verify_bytes_written(bytes_written, 4)
self._write(value, 4)
def close(self) -> None:
self.finalize()
@ -2194,24 +2216,37 @@ class AppendingTiffWriter(io.BytesIO):
self.f.close()
def fixIFD(self) -> None:
num_tags = self.readShort()
num_tags = self._read(8 if self._bigtiff else 2)
for i in range(num_tags):
tag, field_type, count = struct.unpack(self.tagFormat, self.f.read(8))
tag, field_type, count = struct.unpack(
self.tagFormat, self.f.read(12 if self._bigtiff else 8)
)
field_size = self.fieldSizes[field_type]
total_size = field_size * count
is_local = total_size <= 4
fmt_size = 8 if self._bigtiff else 4
is_local = total_size <= fmt_size
if not is_local:
offset = self.readLong() + self.offsetOfNewPage
self.rewriteLastLong(offset)
offset = self._read(fmt_size) + self.offsetOfNewPage
self._rewriteLast(offset, fmt_size)
if tag in self.Tags:
cur_pos = self.f.tell()
logger.debug(
"fixIFD: %s (%d) - type: %s (%d) - type size: %d - count: %d",
TiffTags.lookup(tag).name,
tag,
TYPES.get(field_type, "unknown"),
field_type,
field_size,
count,
)
if is_local:
self._fixOffsets(count, field_size)
self.f.seek(cur_pos + 4)
self.f.seek(cur_pos + fmt_size)
else:
self.f.seek(offset)
self._fixOffsets(count, field_size)
@ -2219,24 +2254,33 @@ class AppendingTiffWriter(io.BytesIO):
elif is_local:
# skip the locally stored value that is not an offset
self.f.seek(4, os.SEEK_CUR)
self.f.seek(fmt_size, os.SEEK_CUR)
def _fixOffsets(self, count: int, field_size: int) -> None:
for i in range(count):
offset = self._read(field_size)
offset += self.offsetOfNewPage
if field_size == 2 and offset >= 65536:
# offset is now too large - we must convert shorts to longs
new_field_size = 0
if self._bigtiff and field_size in (2, 4) and offset >= 2**32:
# offset is now too large - we must convert long to long8
new_field_size = 8
elif field_size == 2 and offset >= 2**16:
# offset is now too large - we must convert short to long
new_field_size = 4
if new_field_size:
if count != 1:
msg = "not implemented"
raise RuntimeError(msg) # XXX TODO
# simple case - the offset is just one and therefore it is
# local (not referenced with another offset)
self.rewriteLastShortToLong(offset)
self.f.seek(-10, os.SEEK_CUR)
self.writeShort(TiffTags.LONG) # rewrite the type to LONG
self.f.seek(8, os.SEEK_CUR)
self._rewriteLast(offset, field_size, new_field_size)
# Move back past the new offset, past 'count', and before 'field_type'
rewind = -new_field_size - 4 - 2
self.f.seek(rewind, os.SEEK_CUR)
self.writeShort(new_field_size) # rewrite the type
self.f.seek(2 - rewind, os.SEEK_CUR)
else:
self._rewriteLast(offset, field_size)

View File

@ -47,6 +47,8 @@ def deprecate(
raise RuntimeError(msg)
elif when == 12:
removed = "Pillow 12 (2025-10-15)"
elif when == 13:
removed = "Pillow 13 (2026-10-15)"
else:
msg = f"Unknown removal version: {when}. Update {__name__}?"
raise ValueError(msg)

View File

@ -28,6 +28,7 @@ class Font:
features: list[str] | None,
lang: str | None,
stroke_width: float,
stroke_filled: bool,
anchor: str | None,
foreground_ink_long: int,
x_start: float,

View File

@ -1,4 +1,4 @@
# Master version for Pillow
from __future__ import annotations
__version__ = "11.1.0.dev0"
__version__ = "11.2.0.dev0"

View File

@ -127,6 +127,7 @@ features: dict[str, tuple[str, str | bool, str | None]] = {
"fribidi": ("PIL._imagingft", "HAVE_FRIBIDI", "fribidi_version"),
"harfbuzz": ("PIL._imagingft", "HAVE_HARFBUZZ", "harfbuzz_version"),
"libjpeg_turbo": ("PIL._imaging", "HAVE_LIBJPEGTURBO", "libjpeg_turbo_version"),
"mozjpeg": ("PIL._imaging", "HAVE_MOZJPEG", "libjpeg_turbo_version"),
"zlib_ng": ("PIL._imaging", "HAVE_ZLIBNG", "zlib_ng_version"),
"libimagequant": ("PIL._imaging", "HAVE_LIBIMAGEQUANT", "imagequant_version"),
"xcb": ("PIL._imaging", "HAVE_XCB", None),
@ -300,7 +301,8 @@ def pilinfo(out: IO[str] | None = None, supported_formats: bool = True) -> None:
if name == "jpg":
libjpeg_turbo_version = version_feature("libjpeg_turbo")
if libjpeg_turbo_version is not None:
v = "libjpeg-turbo " + libjpeg_turbo_version
v = "mozjpeg" if check_feature("mozjpeg") else "libjpeg-turbo"
v += " " + libjpeg_turbo_version
if v is None:
v = version(name)
if v is not None:

View File

@ -76,6 +76,13 @@
#ifdef HAVE_LIBJPEG
#include "jconfig.h"
#ifdef LIBJPEG_TURBO_VERSION
#define JCONFIG_INCLUDED
#ifdef __CYGWIN__
#define _BASETSD_H
#endif
#include "jpeglib.h"
#endif
#endif
#ifdef HAVE_LIBZ
@ -466,8 +473,7 @@ getpixel(Imaging im, ImagingAccess access, int x, int y) {
}
/* unknown type */
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static char *
@ -958,8 +964,7 @@ _convert2(ImagingObject *self, PyObject *args) {
return NULL;
}
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -1207,8 +1212,7 @@ _getpixel(ImagingObject *self, PyObject *args) {
}
if (self->access == NULL) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
return getpixel(self->image, self->access, x, y);
@ -1410,8 +1414,7 @@ _paste(ImagingObject *self, PyObject *args) {
return NULL;
}
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -1684,8 +1687,7 @@ _putdata(ImagingObject *self, PyObject *args) {
Py_XDECREF(seq);
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -1745,8 +1747,7 @@ _putpalette(ImagingObject *self, PyObject *args) {
self->image->palette->size = palettesize * 8 / bits;
unpack(self->image->palette->palette, palette, self->image->palette->size);
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -1770,8 +1771,7 @@ _putpalettealpha(ImagingObject *self, PyObject *args) {
strcpy(self->image->palette->mode, "RGBA");
self->image->palette->palette[index * 4 + 3] = (UINT8)alpha;
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -1798,8 +1798,7 @@ _putpalettealphas(ImagingObject *self, PyObject *args) {
self->image->palette->palette[i * 4 + 3] = (UINT8)values[i];
}
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -1835,8 +1834,7 @@ _putpixel(ImagingObject *self, PyObject *args) {
self->access->put_pixel(im, x, y, ink);
}
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -2003,8 +2001,7 @@ im_setmode(ImagingObject *self, PyObject *args) {
}
self->access = ImagingAccessNew(im);
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -2067,8 +2064,7 @@ _transform(ImagingObject *self, PyObject *args) {
return NULL;
}
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -2195,8 +2191,7 @@ _getbbox(ImagingObject *self, PyObject *args) {
}
if (!ImagingGetBBox(self->image, bbox, alpha_only)) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
return Py_BuildValue("iiii", bbox[0], bbox[1], bbox[2], bbox[3]);
@ -2276,8 +2271,7 @@ _getextrema(ImagingObject *self) {
}
}
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -2340,8 +2334,7 @@ _fillband(ImagingObject *self, PyObject *args) {
return NULL;
}
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -2356,8 +2349,7 @@ _putband(ImagingObject *self, PyObject *args) {
return NULL;
}
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -2943,8 +2935,7 @@ _draw_arc(ImagingDrawObject *self, PyObject *args) {
return NULL;
}
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -2981,8 +2972,7 @@ _draw_bitmap(ImagingDrawObject *self, PyObject *args) {
return NULL;
}
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -3038,8 +3028,7 @@ _draw_chord(ImagingDrawObject *self, PyObject *args) {
return NULL;
}
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -3093,8 +3082,7 @@ _draw_ellipse(ImagingDrawObject *self, PyObject *args) {
return NULL;
}
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -3157,8 +3145,7 @@ _draw_lines(ImagingDrawObject *self, PyObject *args) {
free(xy);
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -3189,8 +3176,7 @@ _draw_points(ImagingDrawObject *self, PyObject *args) {
free(xy);
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
/* from outline.c */
@ -3218,8 +3204,7 @@ _draw_outline(ImagingDrawObject *self, PyObject *args) {
return NULL;
}
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -3275,8 +3260,7 @@ _draw_pieslice(ImagingDrawObject *self, PyObject *args) {
return NULL;
}
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -3327,8 +3311,7 @@ _draw_polygon(ImagingDrawObject *self, PyObject *args) {
free(ixy);
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -3382,8 +3365,7 @@ _draw_rectangle(ImagingDrawObject *self, PyObject *args) {
return NULL;
}
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static struct PyMethodDef _draw_methods[] = {
@ -3588,8 +3570,7 @@ _save_ppm(ImagingObject *self, PyObject *args) {
return NULL;
}
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
/* -------------------------------------------------------------------- */
@ -3977,8 +3958,7 @@ _reset_stats(PyObject *self, PyObject *args) {
arena->stats_freed_blocks = 0;
MUTEX_UNLOCK(&ImagingDefaultArena.mutex);
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -4038,8 +4018,7 @@ _set_alignment(PyObject *self, PyObject *args) {
ImagingDefaultArena.alignment = alignment;
MUTEX_UNLOCK(&ImagingDefaultArena.mutex);
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -4063,8 +4042,7 @@ _set_block_size(PyObject *self, PyObject *args) {
ImagingDefaultArena.block_size = block_size;
MUTEX_UNLOCK(&ImagingDefaultArena.mutex);
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -4092,8 +4070,7 @@ _set_blocks_max(PyObject *self, PyObject *args) {
return ImagingError_MemoryError();
}
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -4108,8 +4085,7 @@ _clear_cache(PyObject *self, PyObject *args) {
ImagingMemoryClearCache(&ImagingDefaultArena, i);
MUTEX_UNLOCK(&ImagingDefaultArena.mutex);
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
/* -------------------------------------------------------------------- */
@ -4367,6 +4343,15 @@ setup_module(PyObject *m) {
Py_INCREF(have_libjpegturbo);
PyModule_AddObject(m, "HAVE_LIBJPEGTURBO", have_libjpegturbo);
PyObject *have_mozjpeg;
#ifdef JPEG_C_PARAM_SUPPORTED
have_mozjpeg = Py_True;
#else
have_mozjpeg = Py_False;
#endif
Py_INCREF(have_mozjpeg);
PyModule_AddObject(m, "HAVE_MOZJPEG", have_mozjpeg);
PyObject *have_libimagequant;
#ifdef HAVE_LIBIMAGEQUANT
have_libimagequant = Py_True;

View File

@ -654,8 +654,7 @@ cms_get_display_profile_win32(PyObject *self, PyObject *args) {
return PyUnicode_FromStringAndSize(filename, filename_size - 1);
}
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
#endif
@ -672,20 +671,17 @@ _profile_read_mlu(CmsProfileObject *self, cmsTagSignature info) {
wchar_t *buf;
if (!cmsIsTag(self->profile, info)) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
mlu = cmsReadTag(self->profile, info);
if (!mlu) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
len = cmsMLUgetWide(mlu, lc, cc, NULL, 0);
if (len == 0) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
buf = malloc(len);
@ -723,14 +719,12 @@ _profile_read_signature(CmsProfileObject *self, cmsTagSignature info) {
unsigned int *sig;
if (!cmsIsTag(self->profile, info)) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
sig = (unsigned int *)cmsReadTag(self->profile, info);
if (!sig) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
return _profile_read_int_as_string(*sig);
@ -780,14 +774,12 @@ _profile_read_ciexyz(CmsProfileObject *self, cmsTagSignature info, int multi) {
cmsCIEXYZ *XYZ;
if (!cmsIsTag(self->profile, info)) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
XYZ = (cmsCIEXYZ *)cmsReadTag(self->profile, info);
if (!XYZ) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
if (multi) {
return _xyz3_py(XYZ);
@ -801,14 +793,12 @@ _profile_read_ciexyy_triple(CmsProfileObject *self, cmsTagSignature info) {
cmsCIExyYTRIPLE *triple;
if (!cmsIsTag(self->profile, info)) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
triple = (cmsCIExyYTRIPLE *)cmsReadTag(self->profile, info);
if (!triple) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
/* Note: lcms does all the heavy lifting and error checking (nr of
@ -835,21 +825,18 @@ _profile_read_named_color_list(CmsProfileObject *self, cmsTagSignature info) {
PyObject *result;
if (!cmsIsTag(self->profile, info)) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
ncl = (cmsNAMEDCOLORLIST *)cmsReadTag(self->profile, info);
if (ncl == NULL) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
n = cmsNamedColorCount(ncl);
result = PyList_New(n);
if (!result) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
for (i = 0; i < n; i++) {
@ -858,8 +845,7 @@ _profile_read_named_color_list(CmsProfileObject *self, cmsTagSignature info) {
str = PyUnicode_FromString(name);
if (str == NULL) {
Py_DECREF(result);
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
PyList_SET_ITEM(result, i, str);
}
@ -926,8 +912,7 @@ _is_intent_supported(CmsProfileObject *self, int clut) {
result = PyDict_New();
if (result == NULL) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
n = cmsGetSupportedIntents(INTENTS, intent_ids, intent_descs);
@ -957,8 +942,7 @@ _is_intent_supported(CmsProfileObject *self, int clut) {
Py_XDECREF(id);
Py_XDECREF(entry);
Py_XDECREF(result);
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
PyDict_SetItem(result, id, entry);
Py_DECREF(id);
@ -1042,8 +1026,7 @@ cms_profile_getattr_creation_date(CmsProfileObject *self, void *closure) {
result = cmsGetHeaderCreationDateTime(self->profile, &ct);
if (!result) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
return PyDateTime_FromDateAndTime(
@ -1141,8 +1124,7 @@ cms_profile_getattr_saturation_rendering_intent_gamut(
static PyObject *
cms_profile_getattr_red_colorant(CmsProfileObject *self, void *closure) {
if (!cmsIsMatrixShaper(self->profile)) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
return _profile_read_ciexyz(self, cmsSigRedColorantTag, 0);
}
@ -1150,8 +1132,7 @@ cms_profile_getattr_red_colorant(CmsProfileObject *self, void *closure) {
static PyObject *
cms_profile_getattr_green_colorant(CmsProfileObject *self, void *closure) {
if (!cmsIsMatrixShaper(self->profile)) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
return _profile_read_ciexyz(self, cmsSigGreenColorantTag, 0);
}
@ -1159,8 +1140,7 @@ cms_profile_getattr_green_colorant(CmsProfileObject *self, void *closure) {
static PyObject *
cms_profile_getattr_blue_colorant(CmsProfileObject *self, void *closure) {
if (!cmsIsMatrixShaper(self->profile)) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
return _profile_read_ciexyz(self, cmsSigBlueColorantTag, 0);
}
@ -1176,21 +1156,18 @@ cms_profile_getattr_media_white_point_temperature(
cmsBool result;
if (!cmsIsTag(self->profile, info)) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
XYZ = (cmsCIEXYZ *)cmsReadTag(self->profile, info);
if (XYZ == NULL || XYZ->X == 0) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
cmsXYZ2xyY(&xyY, XYZ);
result = cmsTempFromWhitePoint(&tempK, &xyY);
if (!result) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
return PyFloat_FromDouble(tempK);
}
@ -1229,8 +1206,7 @@ cms_profile_getattr_red_primary(CmsProfileObject *self, void *closure) {
result = _calculate_rgb_primaries(self, &primaries);
}
if (!result) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
return _xyz_py(&primaries.Red);
@ -1245,8 +1221,7 @@ cms_profile_getattr_green_primary(CmsProfileObject *self, void *closure) {
result = _calculate_rgb_primaries(self, &primaries);
}
if (!result) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
return _xyz_py(&primaries.Green);
@ -1261,8 +1236,7 @@ cms_profile_getattr_blue_primary(CmsProfileObject *self, void *closure) {
result = _calculate_rgb_primaries(self, &primaries);
}
if (!result) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
return _xyz_py(&primaries.Blue);
@ -1321,14 +1295,12 @@ cms_profile_getattr_icc_measurement_condition(CmsProfileObject *self, void *clos
const char *geo;
if (!cmsIsTag(self->profile, info)) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
mc = (cmsICCMeasurementConditions *)cmsReadTag(self->profile, info);
if (!mc) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
if (mc->Geometry == 1) {
@ -1362,14 +1334,12 @@ cms_profile_getattr_icc_viewing_condition(CmsProfileObject *self, void *closure)
cmsTagSignature info = cmsSigViewingConditionsTag;
if (!cmsIsTag(self->profile, info)) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
vc = (cmsICCViewingConditions *)cmsReadTag(self->profile, info);
if (!vc) {
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
return Py_BuildValue(

View File

@ -339,29 +339,23 @@ text_layout_raqm(
len = PySequence_Fast_GET_SIZE(seq);
for (j = 0; j < len; j++) {
PyObject *item = PySequence_Fast_GET_ITEM(seq, j);
char *feature = NULL;
Py_ssize_t size = 0;
PyObject *bytes;
if (!PyUnicode_Check(item)) {
Py_DECREF(seq);
PyErr_SetString(PyExc_TypeError, "expected a string");
goto failed;
}
bytes = PyUnicode_AsUTF8String(item);
if (bytes == NULL) {
Py_ssize_t size;
const char *feature = PyUnicode_AsUTF8AndSize(item, &size);
if (feature == NULL) {
Py_DECREF(seq);
goto failed;
}
feature = PyBytes_AS_STRING(bytes);
size = PyBytes_GET_SIZE(bytes);
if (!raqm_add_font_feature(rq, feature, size)) {
Py_DECREF(seq);
Py_DECREF(bytes);
PyErr_SetString(PyExc_ValueError, "raqm_add_font_feature() failed");
goto failed;
}
Py_DECREF(bytes);
}
Py_DECREF(seq);
}
@ -840,6 +834,7 @@ font_render(FontObject *self, PyObject *args) {
int mask = 0; /* is FT_LOAD_TARGET_MONO enabled? */
int color = 0; /* is FT_LOAD_COLOR enabled? */
float stroke_width = 0;
int stroke_filled = 0;
PY_LONG_LONG foreground_ink_long = 0;
unsigned int foreground_ink;
const char *mode = NULL;
@ -859,7 +854,7 @@ font_render(FontObject *self, PyObject *args) {
if (!PyArg_ParseTuple(
args,
"OO|zzOzfzLffO:render",
"OO|zzOzfpzLffO:render",
&string,
&fill,
&mode,
@ -867,6 +862,7 @@ font_render(FontObject *self, PyObject *args) {
&features,
&lang,
&stroke_width,
&stroke_filled,
&anchor,
&foreground_ink_long,
&x_start,
@ -1011,7 +1007,8 @@ font_render(FontObject *self, PyObject *args) {
if (stroker != NULL) {
error = FT_Get_Glyph(glyph_slot, &glyph);
if (!error) {
error = FT_Glyph_Stroke(&glyph, stroker, 1);
error = stroke_filled ? FT_Glyph_StrokeBorder(&glyph, stroker, 0, 1)
: FT_Glyph_Stroke(&glyph, stroker, 1);
}
if (!error) {
FT_Vector origin = {0, 0};
@ -1377,8 +1374,7 @@ font_setvarname(FontObject *self, PyObject *args) {
return geterror(error);
}
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -1432,8 +1428,7 @@ font_setvaraxes(FontObject *self, PyObject *args) {
return geterror(error);
}
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
#endif

View File

@ -192,8 +192,7 @@ _unop(PyObject *self, PyObject *args) {
unop(out, im1);
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -226,8 +225,7 @@ _binop(PyObject *self, PyObject *args) {
binop(out, im1, im2);
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyMethodDef _functions[] = {

View File

@ -37,8 +37,7 @@ _tkinit(PyObject *self, PyObject *args) {
/* This will bomb if interp is invalid... */
TkImaging_Init(interp);
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyMethodDef functions[] = {

View File

@ -213,8 +213,7 @@ _setimage(ImagingDecoderObject *decoder, PyObject *args) {
Py_XDECREF(decoder->lock);
decoder->lock = op;
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -231,8 +230,7 @@ _setfd(ImagingDecoderObject *decoder, PyObject *args) {
Py_XINCREF(fd);
state->fd = fd;
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *

View File

@ -85,8 +85,7 @@ _expose(ImagingDisplayObject *display, PyObject *args) {
ImagingExposeDIB(display->dib, hdc);
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -112,8 +111,7 @@ _draw(ImagingDisplayObject *display, PyObject *args) {
ImagingDrawDIB(display->dib, hdc, dst, src);
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
extern Imaging
@ -143,8 +141,7 @@ _paste(ImagingDisplayObject *display, PyObject *args) {
ImagingPasteDIB(display->dib, im, xy);
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -190,8 +187,7 @@ _releasedc(ImagingDisplayObject *display, PyObject *args) {
ReleaseDC(window, dc);
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -211,8 +207,7 @@ _frombytes(ImagingDisplayObject *display, PyObject *args) {
memcpy(display->dib->bits, buffer.buf, buffer.len);
PyBuffer_Release(&buffer);
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -709,8 +704,7 @@ PyImaging_EventLoopWin32(PyObject *self, PyObject *args) {
}
Py_END_ALLOW_THREADS;
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
/* -------------------------------------------------------------------- */

View File

@ -278,8 +278,7 @@ _setimage(ImagingEncoderObject *encoder, PyObject *args) {
Py_XDECREF(encoder->lock);
encoder->lock = op;
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -296,8 +295,7 @@ _setfd(ImagingEncoderObject *encoder, PyObject *args) {
Py_XINCREF(fd);
state->fd = fd;
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *

View File

@ -1,72 +0,0 @@
/*
* The Python Imaging Library
* $Id$
*
* default exception handling
*
* This module is usually overridden by application code (e.g.
* _imaging.c for PIL's standard Python bindings). If you get
* linking errors, remove this file from your project/library.
*
* history:
* 1995-06-15 fl Created
* 1998-12-29 fl Minor tweaks
* 2003-09-13 fl Added ImagingEnter/LeaveSection()
*
* Copyright (c) 1997-2003 by Secret Labs AB.
* Copyright (c) 1995-2003 by Fredrik Lundh.
*
* See the README file for information on usage and redistribution.
*/
#include "Imaging.h"
/* exception state */
void *
ImagingError_OSError(void) {
fprintf(stderr, "*** exception: file access error\n");
return NULL;
}
void *
ImagingError_MemoryError(void) {
fprintf(stderr, "*** exception: out of memory\n");
return NULL;
}
void *
ImagingError_ModeError(void) {
return ImagingError_ValueError("bad image mode");
}
void *
ImagingError_Mismatch(void) {
return ImagingError_ValueError("images don't match");
}
void *
ImagingError_ValueError(const char *message) {
if (!message) {
message = "exception: bad argument to function";
}
fprintf(stderr, "*** %s\n", message);
return NULL;
}
void
ImagingError_Clear(void) {
/* nop */;
}
/* thread state */
void
ImagingSectionEnter(ImagingSectionCookie *cookie) {
/* pass */
}
void
ImagingSectionLeave(ImagingSectionCookie *cookie) {
/* pass */
}

View File

@ -44,8 +44,6 @@
defines their own types with the same names, so we need to be able to undef
ours before including the JPEG code. */
#if __STDC_VERSION__ >= 199901L /* C99+ */
#include <stdint.h>
#define INT8 int8_t
@ -55,34 +53,6 @@
#define INT32 int32_t
#define UINT32 uint32_t
#else /* < C99 */
#define INT8 signed char
#if SIZEOF_SHORT == 2
#define INT16 short
#elif SIZEOF_INT == 2
#define INT16 int
#else
#error Cannot find required 16-bit integer type
#endif
#if SIZEOF_SHORT == 4
#define INT32 short
#elif SIZEOF_INT == 4
#define INT32 int
#elif SIZEOF_LONG == 4
#define INT32 long
#else
#error Cannot find required 32-bit integer type
#endif
#define UINT8 unsigned char
#define UINT16 unsigned INT16
#define UINT32 unsigned INT32
#endif /* < C99 */
#endif /* not WIN */
/* assume IEEE; tweak if necessary (patches are welcome) */

View File

@ -609,10 +609,6 @@ ImagingLibTiffDecode(
extern int
ImagingLibTiffEncode(Imaging im, ImagingCodecState state, UINT8 *buffer, int bytes);
#endif
#ifdef HAVE_LIBMPEG
extern int
ImagingMpegDecode(Imaging im, ImagingCodecState state, UINT8 *buffer, Py_ssize_t bytes);
#endif
extern int
ImagingMspDecode(Imaging im, ImagingCodecState state, UINT8 *buffer, Py_ssize_t bytes);
extern int

View File

@ -330,6 +330,13 @@ j2k_encode_entry(Imaging im, ImagingCodecState state) {
components = 4;
color_space = OPJ_CLRSPC_SRGB;
pack = j2k_pack_rgba;
#if ((OPJ_VERSION_MAJOR == 2 && OPJ_VERSION_MINOR == 5 && OPJ_VERSION_BUILD >= 3) || \
(OPJ_VERSION_MAJOR == 2 && OPJ_VERSION_MINOR > 5) || OPJ_VERSION_MAJOR > 2)
} else if (strcmp(im->mode, "CMYK") == 0) {
components = 4;
color_space = OPJ_CLRSPC_CMYK;
pack = j2k_pack_rgba;
#endif
} else {
state->errcode = IMAGING_CODEC_BROKEN;
state->state = J2K_STATE_FAILED;

View File

@ -134,7 +134,16 @@ ImagingJpegEncode(Imaging im, ImagingCodecState state, UINT8 *buf, int bytes) {
return -1;
}
/* Compressor configuration */
/* Compressor configuration */
#ifdef JPEG_C_PARAM_SUPPORTED
/* MozJPEG */
if (!context->progressive) {
/* Do not use MozJPEG progressive default */
jpeg_c_set_int_param(
&context->cinfo, JINT_COMPRESS_PROFILE, JCP_FASTEST
);
}
#endif
jpeg_set_defaults(&context->cinfo);
/* Prevent RGB -> YCbCr conversion */

View File

@ -1664,6 +1664,7 @@ static struct {
{"RGBA", "RGBaXX", 48, unpackRGBaskip2},
{"RGBA", "RGBa;16L", 64, unpackRGBa16L},
{"RGBA", "RGBa;16B", 64, unpackRGBa16B},
{"RGBA", "BGR", 24, ImagingUnpackBGR},
{"RGBA", "BGRa", 32, unpackBGRa},
{"RGBA", "RGBA;I", 32, unpackRGBAI},
{"RGBA", "RGBA;L", 32, unpackRGBAL},
@ -1695,6 +1696,7 @@ static struct {
#ifdef WORDS_BIGENDIAN
{"RGB", "RGB;16N", 48, unpackRGB16B},
{"RGB", "RGBX;16N", 64, unpackRGBA16B},
{"RGBA", "RGBa;16N", 64, unpackRGBa16B},
{"RGBA", "RGBA;16N", 64, unpackRGBA16B},
{"RGBX", "RGBX;16N", 64, unpackRGBA16B},
@ -1708,6 +1710,7 @@ static struct {
{"RGBA", "A;16N", 16, band316B},
#else
{"RGB", "RGB;16N", 48, unpackRGB16L},
{"RGB", "RGBX;16N", 64, unpackRGBA16L},
{"RGBA", "RGBa;16N", 64, unpackRGBa16L},
{"RGBA", "RGBA;16N", 64, unpackRGBA16L},
{"RGBX", "RGBX;16N", 64, unpackRGBA16L},

View File

@ -89,8 +89,7 @@ _outline_move(OutlineObject *self, PyObject *args) {
ImagingOutlineMove(self->outline, x0, y0);
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -102,8 +101,7 @@ _outline_line(OutlineObject *self, PyObject *args) {
ImagingOutlineLine(self->outline, x1, y1);
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -115,8 +113,7 @@ _outline_curve(OutlineObject *self, PyObject *args) {
ImagingOutlineCurve(self->outline, x1, y1, x2, y2, x3, y3);
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -127,8 +124,7 @@ _outline_close(OutlineObject *self, PyObject *args) {
ImagingOutlineClose(self->outline);
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static PyObject *
@ -140,8 +136,7 @@ _outline_transform(OutlineObject *self, PyObject *args) {
ImagingOutlineTransform(self->outline, a);
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static struct PyMethodDef _outline_methods[] = {

View File

@ -415,8 +415,7 @@ path_map(PyPathObject *self, PyObject *args) {
}
self->mapping = 0;
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static int
@ -528,8 +527,7 @@ path_transform(PyPathObject *self, PyObject *args) {
}
}
Py_INCREF(Py_None);
return Py_None;
Py_RETURN_NONE;
}
static struct PyMethodDef methods[] = {

View File

@ -11,10 +11,11 @@ For more extensive info, see the [Windows build instructions](build.rst).
* Requires Microsoft Visual Studio 2017 or newer with C++ component.
* Requires NASM for libjpeg-turbo, a required dependency when using this script.
* Requires CMake 3.15 or newer (available as Visual Studio component).
* Tested on Windows Server 2019 with Visual Studio 2019 Community and Visual Studio 2022 Community (AppVeyor).
* Tested on Windows Server 2022 with Visual Studio 2022 Enterprise (GitHub Actions).
* Tested on Windows Server 2022 with Visual Studio 2022 Enterprise and Windows Server
2019 with Visual Studio 2019 Enterprise (GitHub Actions).
Here's an example script to build on Windows:
The following is a simplified version of the script used on AppVeyor:
```
set PYTHON=C:\Python39\bin
cd /D C:\Pillow\winbuild

View File

@ -6,7 +6,7 @@ Building Pillow on Windows
be sufficient.
This page describes the steps necessary to build Pillow using the same
scripts used on GitHub Actions and AppVeyor CIs.
scripts used on GitHub Actions CI.
Prerequisites
-------------
@ -112,7 +112,7 @@ directory.
Example
-------
The following is a simplified version of the script used on AppVeyor::
Here's an example script to build on Windows::
set PYTHON=C:\Python39\bin
cd /D C:\Pillow\winbuild

View File

@ -113,17 +113,16 @@ V = {
"BROTLI": "1.1.0",
"FREETYPE": "2.13.3",
"FRIBIDI": "1.0.16",
"HARFBUZZ": "10.1.0",
"HARFBUZZ": "10.2.0",
"JPEGTURBO": "3.1.0",
"LCMS2": "2.16",
"LIBPNG": "1.6.44",
"LIBPNG": "1.6.46",
"LIBWEBP": "1.5.0",
"OPENJPEG": "2.5.3",
"TIFF": "4.6.0",
"XZ": "5.6.3",
"ZLIBNG": "2.2.2",
"XZ": "5.6.4",
"ZLIBNG": "2.2.3",
}
V["LIBPNG_DOTLESS"] = V["LIBPNG"].replace(".", "")
V["LIBPNG_XY"] = "".join(V["LIBPNG"].split(".")[:2])
@ -241,8 +240,8 @@ DEPS: dict[str, dict[str, Any]] = {
},
"libpng": {
"url": f"{SF_PROJECTS}/libpng/files/libpng{V['LIBPNG_XY']}/{V['LIBPNG']}/"
f"lpng{V['LIBPNG_DOTLESS']}.zip/download",
"filename": f"lpng{V['LIBPNG_DOTLESS']}.zip",
f"FILENAME/download",
"filename": f"libpng-{V['LIBPNG']}.tar.gz",
"license": "LICENSE",
"build": [
*cmds_cmake("png_static", "-DPNG_SHARED:BOOL=OFF", "-DPNG_TESTS:BOOL=OFF"),