Merge branch 'main' into main

This commit is contained in:
Andrew Murray 2025-03-04 08:49:12 +11:00 committed by GitHub
commit b16b579797
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
249 changed files with 4121 additions and 2971 deletions

View File

@ -1,99 +0,0 @@
skip_commits:
files:
- ".github/**/*"
- ".gitmodules"
- "docs/**/*"
- "wheels/**/*"
version: '{build}'
clone_folder: c:\pillow
init:
- ECHO %PYTHON%
#- ps: iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-rdp.ps1'))
# Uncomment previous line to get RDP access during the build.
environment:
COVERAGE_CORE: sysmon
EXECUTABLE: python.exe
TEST_OPTIONS:
DEPLOY: YES
matrix:
- PYTHON: C:/Python312
ARCHITECTURE: x86
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2022
- PYTHON: C:/Python39-x64
ARCHITECTURE: AMD64
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
install:
- '%PYTHON%\%EXECUTABLE% --version'
- '%PYTHON%\%EXECUTABLE% -m pip install --upgrade pip'
- curl -fsSL -o pillow-test-images.zip https://github.com/python-pillow/test-images/archive/main.zip
- 7z x pillow-test-images.zip -oc:\
- xcopy /S /Y c:\test-images-main\* c:\pillow\tests\images
- curl -fsSL -o nasm-win64.zip https://raw.githubusercontent.com/python-pillow/pillow-depends/main/nasm-2.16.03-win64.zip
- 7z x nasm-win64.zip -oc:\
- choco install ghostscript --version=10.4.0
- path c:\nasm-2.16.03;C:\Program Files\gs\gs10.04.0\bin;%PATH%
- cd c:\pillow\winbuild\
- ps: |
c:\python39\python.exe c:\pillow\winbuild\build_prepare.py -v --depends=C:\pillow-depends\
c:\pillow\winbuild\build\build_dep_all.cmd
$host.SetShouldExit(0)
- path C:\pillow\winbuild\build\bin;%PATH%
build_script:
- cd c:\pillow
- winbuild\build\build_env.cmd
- '%PYTHON%\%EXECUTABLE% -m pip install -v -C raqm=vendor -C fribidi=vendor .'
- '%PYTHON%\%EXECUTABLE% selftest.py --installed'
test_script:
- cd c:\pillow
- '%PYTHON%\%EXECUTABLE% -m pip install pytest pytest-cov pytest-timeout defusedxml ipython numpy olefile pyroma'
- c:\"Program Files (x86)"\"Windows Kits"\10\Debuggers\x86\gflags.exe /p /enable %PYTHON%\%EXECUTABLE%
- path %PYTHON%;%PATH%
- .ci\test.cmd
after_test:
- curl -Os https://uploader.codecov.io/latest/windows/codecov.exe
- .\codecov.exe --file coverage.xml --name %PYTHON% --flags AppVeyor
matrix:
fast_finish: true
cache:
- '%LOCALAPPDATA%\pip\Cache'
artifacts:
- path: pillow\*.egg
name: egg
- path: pillow\*.whl
name: wheel
before_deploy:
- cd c:\pillow
- '%PYTHON%\%EXECUTABLE% -m pip wheel -v -C raqm=vendor -C fribidi=vendor .'
- ps: Get-ChildItem .\*.whl | % { Push-AppveyorArtifact $_.FullName -FileName $_.Name }
deploy:
provider: S3
region: us-west-2
access_key_id: AKIAIRAXC62ZNTVQJMOQ
secret_access_key:
secure: Hwb6klTqtBeMgxAjRoDltiiqpuH8xbwD4UooDzBSiCWXjuFj1lyl4kHgHwTCCGqi
bucket: pillow-nightly
folder: win/$(APPVEYOR_BUILD_NUMBER)/
artifact: /.*egg|wheel/
on:
APPVEYOR_REPO_NAME: python-pillow/Pillow
branch: main
deploy: YES
# Uncomment the following lines to get RDP access after the build/test and block for
# up to the timeout limit (~1hr)
#
#on_finish:
#- ps: $blockRdp = $true; iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-rdp.ps1'))

View File

@ -2,8 +2,4 @@
# gather the coverage data
python3 -m pip install coverage
if [[ $MATRIX_DOCKER ]]; then
python3 -m coverage xml --ignore-errors
else
python3 -m coverage xml
fi
python3 -m coverage xml

View File

@ -3,8 +3,5 @@
set -e
python3 -m coverage erase
if [ $(uname) == "Darwin" ]; then
export CPPFLAGS="-I/usr/local/miniconda/include";
fi
make clean
make install-coverage

View File

@ -2,12 +2,12 @@
aptget_update()
{
if [ ! -z $1 ]; then
if [ -n "$1" ]; then
echo ""
echo "Retrying apt-get update..."
echo ""
fi
output=`sudo apt-get update 2>&1`
output=$(sudo apt-get update 2>&1)
echo "$output"
if [[ $output == *[WE]:\ * ]]; then
return 1
@ -21,7 +21,7 @@ set -e
if [[ $(uname) != CYGWIN* ]]; then
sudo apt-get -qq install libfreetype6-dev liblcms2-dev python3-tk\
ghostscript libjpeg-turbo-progs libopenjp2-7-dev\
ghostscript libjpeg-turbo8-dev libopenjp2-7-dev\
cmake meson imagemagick libharfbuzz-dev libfribidi-dev\
sway wl-clipboard libopenblas-dev
fi

View File

@ -1 +1 @@
cibuildwheel==2.21.2
cibuildwheel==2.23.0

View File

@ -1,4 +1,4 @@
mypy==1.11.2
mypy==1.15.0
IceSpringPySideStubs-PyQt6
IceSpringPySideStubs-PySide6
ipython

View File

@ -9,7 +9,7 @@ Please send a pull request to the `main` branch. Please include [documentation](
- Fork the Pillow repository.
- Create a branch from `main`.
- Develop bug fixes, features, tests, etc.
- Run the test suite. You can enable GitHub Actions (https://github.com/MY-USERNAME/Pillow/actions) and [AppVeyor](https://ci.appveyor.com/projects/new) on your repo to catch test failures prior to the pull request, and [Codecov](https://codecov.io/gh) to see if the changed code is covered by tests.
- Run the test suite. You can enable GitHub Actions (https://github.com/MY-USERNAME/Pillow/actions) on your repo to catch test failures prior to the pull request, and [Codecov](https://codecov.io/gh) to see if the changed code is covered by tests.
- Create a pull request to pull the changes from your branch to the Pillow `main`.
### Guidelines
@ -17,9 +17,8 @@ Please send a pull request to the `main` branch. Please include [documentation](
- Separate code commits from reformatting commits.
- Provide tests for any newly added code.
- Follow PEP 8.
- When committing only documentation changes please include `[ci skip]` in the commit message to avoid running tests on AppVeyor.
- When committing only documentation changes please include `[ci skip]` in the commit message to avoid running extra tests.
- Include [release notes](https://github.com/python-pillow/Pillow/tree/main/docs/releasenotes) as needed or appropriate with your bug fixes, feature additions and tests.
- Do not add to the [changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst) for proposed changes, as that is updated after changes are merged.
## Reporting Issues

1
.github/mergify.yml vendored
View File

@ -9,7 +9,6 @@ pull_request_rules:
- status-success=Windows Test Successful
- status-success=MinGW
- status-success=Cygwin Test Successful
- status-success=continuous-integration/appveyor/pr
actions:
merge:
method: merge

View File

@ -3,18 +3,19 @@ tag-template: "$NEXT_MINOR_VERSION"
change-template: '- $TITLE #$NUMBER [@$AUTHOR]'
categories:
- title: "Dependencies"
label: "Dependency"
- title: "Removals"
label: "Removal"
- title: "Deprecations"
label: "Deprecation"
- title: "Documentation"
label: "Documentation"
- title: "Removals"
label: "Removal"
- title: "Dependencies"
label: "Dependency"
- title: "Testing"
label: "Testing"
- title: "Type hints"
label: "Type hints"
- title: "Other changes"
exclude-labels:
- "changelog: skip"
@ -23,6 +24,4 @@ template: |
https://pillow.readthedocs.io/en/stable/releasenotes/$NEXT_MINOR_VERSION.html
## Changes
$CHANGES

12
.github/renovate.json vendored
View File

@ -1,7 +1,7 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"config:base"
"config:recommended"
],
"labels": [
"Dependency"
@ -9,9 +9,13 @@
"packageRules": [
{
"groupName": "github-actions",
"matchManagers": ["github-actions"],
"separateMajorMinor": "false"
"matchManagers": [
"github-actions"
],
"separateMajorMinor": false
}
],
"schedule": ["on the 3rd day of the month"]
"schedule": [
"on the 3rd day of the month"
]
}

View File

@ -33,6 +33,8 @@ jobs:
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up Python
uses: actions/setup-python@v5

View File

@ -21,6 +21,8 @@ jobs:
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- name: pre-commit cache
uses: actions/cache@v4

View File

@ -8,17 +8,13 @@ fi
brew install \
freetype \
ghostscript \
jpeg-turbo \
libimagequant \
libjpeg \
libraqm \
libtiff \
little-cms2 \
openjpeg \
webp
if [[ "$ImageOS" == "macos13" ]]; then
brew install --ignore-dependencies libraqm
else
brew install libraqm
fi
export PKG_CONFIG_PATH="/usr/local/opt/openblas/lib/pkgconfig"
python3 -m pip install coverage

View File

@ -6,7 +6,7 @@ on:
workflow_dispatch:
permissions:
issues: write
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
@ -15,6 +15,8 @@ concurrency:
jobs:
stale:
if: github.repository_owner == 'python-pillow'
permissions:
issues: write
runs-on: ubuntu-latest

View File

@ -48,9 +48,11 @@ jobs:
- name: Checkout Pillow
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Install Cygwin
uses: cygwin/cygwin-install-action@v4
uses: cygwin/cygwin-install-action@v5
with:
packages: >
gcc-g++
@ -131,11 +133,12 @@ jobs:
- name: After success
run: |
bash.exe .ci/after_success.sh
rm C:\cygwin\bin\bash.EXE
- name: Upload coverage
uses: codecov/codecov-action@v4
uses: codecov/codecov-action@v5
with:
file: ./coverage.xml
files: ./coverage.xml
flags: GHA_Cygwin
name: Cygwin Python 3.${{ matrix.python-minor-version }}
token: ${{ secrets.CODECOV_ORG_TOKEN }}

View File

@ -29,42 +29,46 @@ concurrency:
jobs:
build:
runs-on: ubuntu-latest
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: ["ubuntu-latest"]
docker: [
# Run slower jobs first to give them a headstart and reduce waiting time
ubuntu-22.04-jammy-arm64v8,
ubuntu-24.04-noble-ppc64le,
ubuntu-24.04-noble-s390x,
# Then run the remainder
alpine,
amazon-2-amd64,
amazon-2023-amd64,
arch,
centos-stream-9-amd64,
centos-stream-10-amd64,
debian-12-bookworm-x86,
debian-12-bookworm-amd64,
fedora-39-amd64,
fedora-40-amd64,
fedora-41-amd64,
gentoo,
ubuntu-22.04-jammy-amd64,
ubuntu-24.04-noble-amd64,
]
dockerTag: [main]
include:
- docker: "ubuntu-22.04-jammy-arm64v8"
qemu-arch: "aarch64"
- docker: "ubuntu-24.04-noble-ppc64le"
os: "ubuntu-22.04"
qemu-arch: "ppc64le"
dockerTag: main
- docker: "ubuntu-24.04-noble-s390x"
os: "ubuntu-22.04"
qemu-arch: "s390x"
dockerTag: main
- docker: "ubuntu-24.04-noble-arm64v8"
os: "ubuntu-24.04-arm"
dockerTag: main
name: ${{ matrix.docker }}
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- name: Build system information
run: python3 .github/workflows/system-info.py
@ -87,22 +91,21 @@ jobs:
- name: After success
run: |
PATH="$PATH:~/.local/bin"
docker start pillow_container
sudo docker cp pillow_container:/Pillow /Pillow
sudo chown -R runner /Pillow
pil_path=`docker exec pillow_container /vpy3/bin/python -c 'import os, PIL;print(os.path.realpath(os.path.dirname(PIL.__file__)))'`
docker stop pillow_container
sudo mkdir -p $pil_path
sudo cp src/PIL/*.py $pil_path
cd /Pillow
.ci/after_success.sh
env:
MATRIX_DOCKER: ${{ matrix.docker }}
- name: Upload coverage
uses: codecov/codecov-action@v4
uses: codecov/codecov-action@v5
with:
flags: GHA_Docker
name: ${{ matrix.docker }}
gcov: true
token: ${{ secrets.CODECOV_ORG_TOKEN }}
success:

View File

@ -46,6 +46,8 @@ jobs:
steps:
- name: Checkout Pillow
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up shell
run: echo "C:\msys64\usr\bin\" >> $env:GITHUB_PATH
@ -64,18 +66,18 @@ jobs:
mingw-w64-x86_64-libtiff \
mingw-w64-x86_64-libwebp \
mingw-w64-x86_64-openjpeg2 \
mingw-w64-x86_64-python3-numpy \
mingw-w64-x86_64-python3-olefile \
mingw-w64-x86_64-python3-setuptools \
mingw-w64-x86_64-python-numpy \
mingw-w64-x86_64-python-olefile \
mingw-w64-x86_64-python-pip \
mingw-w64-x86_64-python-pytest \
mingw-w64-x86_64-python-pytest-cov \
mingw-w64-x86_64-python-pytest-timeout \
mingw-w64-x86_64-python-pyqt6
python3 -m ensurepip
python3 -m pip install pyroma pytest pytest-cov pytest-timeout
pushd depends && ./install_extra_test_images.sh && popd
- name: Build Pillow
run: SETUPTOOLS_USE_DISTUTILS="stdlib" CFLAGS="-coverage" python3 -m pip install .
run: CFLAGS="-coverage" python3 -m pip install .
- name: Test Pillow
run: |
@ -83,9 +85,9 @@ jobs:
.ci/test.sh
- name: Upload coverage
uses: codecov/codecov-action@v4
uses: codecov/codecov-action@v5
with:
file: ./coverage.xml
files: ./coverage.xml
flags: GHA_Windows
name: "MSYS2 MinGW"
token: ${{ secrets.CODECOV_ORG_TOKEN }}

View File

@ -40,6 +40,8 @@ jobs:
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- name: Build system information
run: python3 .github/workflows/system-info.py

View File

@ -31,29 +31,38 @@ env:
jobs:
build:
runs-on: windows-latest
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
python-version: ["pypy3.10", "3.9", "3.10", "3.11", "3.12", "3.13"]
python-version: ["pypy3.11", "pypy3.10", "3.10", "3.11", "3.12", "3.13", "3.14"]
architecture: ["x64"]
os: ["windows-latest"]
include:
# Test the oldest Python on 32-bit
- { python-version: "3.9", architecture: "x86", os: "windows-2019" }
timeout-minutes: 30
name: Python ${{ matrix.python-version }}
name: Python ${{ matrix.python-version }} (${{ matrix.architecture }})
steps:
- name: Checkout Pillow
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Checkout cached dependencies
uses: actions/checkout@v4
with:
persist-credentials: false
repository: python-pillow/pillow-depends
path: winbuild\depends
- name: Checkout extra test images
uses: actions/checkout@v4
with:
persist-credentials: false
repository: python-pillow/test-images
path: Tests\test-images
@ -63,22 +72,21 @@ jobs:
with:
python-version: ${{ matrix.python-version }}
allow-prereleases: true
architecture: ${{ matrix.architecture }}
cache: pip
cache-dependency-path: ".github/workflows/test-windows.yml"
- name: Print build system information
run: python3 .github/workflows/system-info.py
- name: Install Python dependencies
run: >
python3 -m pip install
coverage>=7.4.2
defusedxml
olefile
pyroma
pytest
pytest-cov
pytest-timeout
- name: Upgrade pip
run: |
python3 -m pip install --upgrade pip
- name: Install CPython dependencies
if: "!contains(matrix.python-version, 'pypy') && matrix.architecture != 'x86'"
run: |
python3 -m pip install PyQt6
- name: Install dependencies
id: install
@ -178,7 +186,7 @@ jobs:
- name: Build Pillow
run: |
$FLAGS="-C raqm=vendor -C fribidi=vendor"
cmd /c "winbuild\build\build_env.cmd && $env:pythonLocation\python.exe -m pip install -v $FLAGS ."
cmd /c "winbuild\build\build_env.cmd && $env:pythonLocation\python.exe -m pip install -v $FLAGS .[tests]"
& $env:pythonLocation\python.exe selftest.py --installed
shell: pwsh
@ -213,9 +221,9 @@ jobs:
shell: pwsh
- name: Upload coverage
uses: codecov/codecov-action@v4
uses: codecov/codecov-action@v5
with:
file: ./coverage.xml
files: ./coverage.xml
flags: GHA_Windows
name: ${{ runner.os }} Python ${{ matrix.python-version }}
token: ${{ secrets.CODECOV_ORG_TOKEN }}

View File

@ -41,7 +41,10 @@ jobs:
"ubuntu-latest",
]
python-version: [
"pypy3.11",
"pypy3.10",
"3.14",
"3.13t",
"3.13",
"3.12",
"3.11",
@ -52,21 +55,22 @@ jobs:
- { python-version: "3.11", PYTHONOPTIMIZE: 1, REVERSE: "--reverse" }
- { python-version: "3.10", PYTHONOPTIMIZE: 2 }
# Free-threaded
- { os: "ubuntu-latest", python-version: "3.13-dev", disable-gil: true }
- { python-version: "3.13t", disable-gil: true }
# M1 only available for 3.10+
- { os: "macos-13", python-version: "3.9" }
exclude:
- { os: "macos-latest", python-version: "3.9" }
runs-on: ${{ matrix.os }}
name: ${{ matrix.os }} Python ${{ matrix.python-version }} ${{ matrix.disable-gil && 'free-threaded' || '' }}
name: ${{ matrix.os }} Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
if: "${{ !matrix.disable-gil }}"
uses: Quansight-Labs/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
allow-prereleases: true
@ -75,13 +79,6 @@ jobs:
".ci/*.sh"
"pyproject.toml"
- name: Set up Python ${{ matrix.python-version }} (free-threaded)
uses: deadsnakes/action@v3.2.0
if: "${{ matrix.disable-gil }}"
with:
python-version: ${{ matrix.python-version }}
nogil: ${{ matrix.disable-gil }}
- name: Set PYTHON_GIL
if: "${{ matrix.disable-gil }}"
run: |
@ -114,7 +111,7 @@ jobs:
GHA_PYTHON_VERSION: ${{ matrix.python-version }}
- name: Register gcc problem matcher
if: "matrix.os == 'ubuntu-latest' && matrix.python-version == '3.12'"
if: "matrix.os == 'ubuntu-latest' && matrix.python-version == '3.13'"
run: echo "::add-matcher::.github/problem-matchers/gcc.json"
- name: Build
@ -154,11 +151,10 @@ jobs:
.ci/after_success.sh
- name: Upload coverage
uses: codecov/codecov-action@v4
uses: codecov/codecov-action@v5
with:
flags: ${{ matrix.os == 'ubuntu-latest' && 'GHA_Ubuntu' || 'GHA_macOS' }}
name: ${{ matrix.os }} Python ${{ matrix.python-version }}
gcov: true
token: ${{ secrets.CODECOV_ORG_TOKEN }}
success:

View File

@ -1,11 +1,33 @@
#!/bin/bash
# Define custom utilities
# Test for macOS with [ -n "$IS_MACOS" ]
if [ -z "$IS_MACOS" ]; then
export MB_ML_LIBC=${AUDITWHEEL_POLICY::9}
export MB_ML_VER=${AUDITWHEEL_POLICY:9}
# Setup that needs to be done before multibuild utils are invoked
PROJECTDIR=$(pwd)
if [[ "$(uname -s)" == "Darwin" ]]; then
# Safety check - macOS builds require that CIBW_ARCHS is set, and that it
# only contains a single value (even though cibuildwheel allows multiple
# values in CIBW_ARCHS).
if [[ -z "$CIBW_ARCHS" ]]; then
echo "ERROR: Pillow macOS builds require CIBW_ARCHS be defined."
exit 1
fi
if [[ "$CIBW_ARCHS" == *" "* ]]; then
echo "ERROR: Pillow macOS builds only support a single architecture in CIBW_ARCHS."
exit 1
fi
# Build macOS dependencies in `build/darwin`
# Install them into `build/deps/darwin`
WORKDIR=$(pwd)/build/darwin
BUILD_PREFIX=$(pwd)/build/deps/darwin
else
# Build prefix will default to /usr/local
WORKDIR=$(pwd)/build
MB_ML_LIBC=${AUDITWHEEL_POLICY::9}
MB_ML_VER=${AUDITWHEEL_POLICY:9}
fi
export PLAT=$CIBW_ARCHS
PLAT=$CIBW_ARCHS
# Define custom utilities
source wheels/multibuild/common_utils.sh
source wheels/multibuild/library_builders.sh
if [ -z "$IS_MACOS" ]; then
@ -15,103 +37,111 @@ fi
ARCHIVE_SDIR=pillow-depends-main
# Package versions for fresh source builds
FREETYPE_VERSION=2.13.2
HARFBUZZ_VERSION=10.0.1
LIBPNG_VERSION=1.6.44
JPEGTURBO_VERSION=3.0.4
OPENJPEG_VERSION=2.5.2
XZ_VERSION=5.6.3
FREETYPE_VERSION=2.13.3
HARFBUZZ_VERSION=10.4.0
LIBPNG_VERSION=1.6.47
JPEGTURBO_VERSION=3.1.0
OPENJPEG_VERSION=2.5.3
XZ_VERSION=5.6.4
TIFF_VERSION=4.6.0
LCMS2_VERSION=2.16
if [[ -n "$IS_MACOS" ]]; then
GIFLIB_VERSION=5.2.2
else
GIFLIB_VERSION=5.2.1
fi
if [[ -n "$IS_MACOS" ]] || [[ "$MB_ML_VER" != 2014 ]]; then
ZLIB_VERSION=1.3.1
else
ZLIB_VERSION=1.2.8
fi
LIBWEBP_VERSION=1.4.0
LCMS2_VERSION=2.17
ZLIB_NG_VERSION=2.2.4
LIBWEBP_VERSION=1.5.0
BZIP2_VERSION=1.0.8
LIBXCB_VERSION=1.17.0
BROTLI_VERSION=1.1.0
if [[ -n "$IS_MACOS" ]] && [[ "$CIBW_ARCHS" == "x86_64" ]]; then
function build_openjpeg {
local out_dir=$(fetch_unpack https://github.com/uclouvain/openjpeg/archive/v$OPENJPEG_VERSION.tar.gz openjpeg-$OPENJPEG_VERSION.tar.gz)
(cd $out_dir \
&& cmake -DCMAKE_INSTALL_PREFIX=$BUILD_PREFIX -DCMAKE_INSTALL_NAME_DIR=$BUILD_PREFIX/lib . \
&& make install)
touch openjpeg-stamp
}
fi
function build_pkg_config {
if [ -e pkg-config-stamp ]; then return; fi
# This essentially duplicates the Homebrew recipe
CFLAGS="$CFLAGS -Wno-int-conversion" build_simple pkg-config 0.29.2 https://pkg-config.freedesktop.org/releases tar.gz \
--disable-debug --disable-host-tool --with-internal-glib \
--with-pc-path=$BUILD_PREFIX/share/pkgconfig:$BUILD_PREFIX/lib/pkgconfig \
--with-system-include-path=$(xcrun --show-sdk-path --sdk macosx)/usr/include
export PKG_CONFIG=$BUILD_PREFIX/bin/pkg-config
touch pkg-config-stamp
}
function build_zlib_ng {
if [ -e zlib-stamp ]; then return; fi
fetch_unpack https://github.com/zlib-ng/zlib-ng/archive/$ZLIB_NG_VERSION.tar.gz zlib-ng-$ZLIB_NG_VERSION.tar.gz
(cd zlib-ng-$ZLIB_NG_VERSION \
&& ./configure --prefix=$BUILD_PREFIX --zlib-compat \
&& make -j4 \
&& make install)
if [ -n "$IS_MACOS" ]; then
# Ensure that on macOS, the library name is an absolute path, not an
# @rpath, so that delocate picks up the right library (and doesn't need
# DYLD_LIBRARY_PATH to be set). The default Makefile doesn't have an
# option to control the install_name.
install_name_tool -id $BUILD_PREFIX/lib/libz.1.dylib $BUILD_PREFIX/lib/libz.1.dylib
fi
touch zlib-stamp
}
function build_brotli {
local cmake=$(get_modern_cmake)
if [ -e brotli-stamp ]; then return; fi
local out_dir=$(fetch_unpack https://github.com/google/brotli/archive/v$BROTLI_VERSION.tar.gz brotli-$BROTLI_VERSION.tar.gz)
(cd $out_dir \
&& $cmake -DCMAKE_INSTALL_PREFIX=$BUILD_PREFIX -DCMAKE_INSTALL_NAME_DIR=$BUILD_PREFIX/lib . \
&& cmake -DCMAKE_INSTALL_PREFIX=$BUILD_PREFIX -DCMAKE_INSTALL_LIBDIR=$BUILD_PREFIX/lib -DCMAKE_INSTALL_NAME_DIR=$BUILD_PREFIX/lib . \
&& make install)
if [[ "$MB_ML_LIBC" == "manylinux" ]]; then
cp /usr/local/lib64/libbrotli* /usr/local/lib
cp /usr/local/lib64/pkgconfig/libbrotli* /usr/local/lib/pkgconfig
fi
touch brotli-stamp
}
function build_harfbuzz {
if [ -e harfbuzz-stamp ]; then return; fi
python3 -m pip install meson ninja
local out_dir=$(fetch_unpack https://github.com/harfbuzz/harfbuzz/releases/download/$HARFBUZZ_VERSION/$HARFBUZZ_VERSION.tar.xz harfbuzz-$HARFBUZZ_VERSION.tar.xz)
local out_dir=$(fetch_unpack https://github.com/harfbuzz/harfbuzz/releases/download/$HARFBUZZ_VERSION/harfbuzz-$HARFBUZZ_VERSION.tar.xz harfbuzz-$HARFBUZZ_VERSION.tar.xz)
(cd $out_dir \
&& meson setup build --buildtype=release -Dfreetype=enabled -Dglib=disabled)
&& meson setup build --prefix=$BUILD_PREFIX --libdir=$BUILD_PREFIX/lib --buildtype=release -Dfreetype=enabled -Dglib=disabled)
(cd $out_dir/build \
&& meson install)
if [[ "$MB_ML_LIBC" == "manylinux" ]]; then
cp /usr/local/lib64/libharfbuzz* /usr/local/lib
fi
touch harfbuzz-stamp
}
function build {
if [[ -n "$IS_MACOS" ]] && [[ "$CIBW_ARCHS" == "arm64" ]]; then
sudo chown -R runner /usr/local
fi
build_xz
if [ -z "$IS_ALPINE" ] && [ -z "$IS_MACOS" ]; then
if [ -z "$IS_ALPINE" ] && [ -z "$SANITIZER" ] && [ -z "$IS_MACOS" ]; then
yum remove -y zlib-devel
fi
build_new_zlib
build_zlib_ng
build_simple xcb-proto 1.17.0 https://xorg.freedesktop.org/archive/individual/proto
if [ -n "$IS_MACOS" ]; then
build_simple xorgproto 2024.1 https://www.x.org/pub/individual/proto
build_simple libXau 1.0.11 https://www.x.org/pub/individual/lib
build_simple libXau 1.0.12 https://www.x.org/pub/individual/lib
build_simple libpthread-stubs 0.5 https://xcb.freedesktop.org/dist
if [[ "$CIBW_ARCHS" == "arm64" ]]; then
cp /usr/local/share/pkgconfig/xcb-proto.pc /usr/local/lib/pkgconfig
fi
else
sed s/\${pc_sysrootdir\}// /usr/local/share/pkgconfig/xcb-proto.pc > /usr/local/lib/pkgconfig/xcb-proto.pc
sed s/\${pc_sysrootdir\}// $BUILD_PREFIX/share/pkgconfig/xcb-proto.pc > $BUILD_PREFIX/lib/pkgconfig/xcb-proto.pc
fi
build_simple libxcb $LIBXCB_VERSION https://www.x.org/releases/individual/lib
build_libjpeg_turbo
build_tiff
if [ -n "$IS_MACOS" ]; then
# Custom tiff build to include jpeg; by default, configure won't include
# headers/libs in the custom macOS prefix. Explicitly disable webp,
# libdeflate and zstd, because on x86_64 macs, it will pick up the
# Homebrew versions of those libraries from /usr/local.
build_simple tiff $TIFF_VERSION https://download.osgeo.org/libtiff tar.gz \
--with-jpeg-include-dir=$BUILD_PREFIX/include --with-jpeg-lib-dir=$BUILD_PREFIX/lib \
--disable-webp --disable-libdeflate --disable-zstd
else
build_tiff
fi
build_libpng
build_lcms2
build_openjpeg
if [ -f /usr/local/lib64/libopenjp2.so ]; then
cp /usr/local/lib64/libopenjp2.so /usr/local/lib
fi
ORIGINAL_CFLAGS=$CFLAGS
CFLAGS="$CFLAGS -O3 -DNDEBUG"
webp_cflags="-O3 -DNDEBUG"
if [[ -n "$IS_MACOS" ]]; then
CFLAGS="$CFLAGS -Wl,-headerpad_max_install_names"
webp_cflags="$webp_cflags -Wl,-headerpad_max_install_names"
fi
build_libwebp
CFLAGS=$ORIGINAL_CFLAGS
CFLAGS="$CFLAGS $webp_cflags" build_simple libwebp $LIBWEBP_VERSION \
https://storage.googleapis.com/downloads.webmproject.org/releases/webp tar.gz \
--enable-libwebpmux --enable-libwebpdemux
build_brotli
@ -125,31 +155,47 @@ function build {
build_harfbuzz
}
# Perform all dependency builds in the build subfolder.
mkdir -p $WORKDIR
pushd $WORKDIR > /dev/null
# Any stuff that you need to do before you start building the wheels
# Runs in the root directory of this repository.
curl -fsSL -o pillow-depends-main.zip https://github.com/python-pillow/pillow-depends/archive/main.zip
untar pillow-depends-main.zip
if [[ ! -d $WORKDIR/pillow-depends-main ]]; then
if [[ ! -f $PROJECTDIR/pillow-depends-main.zip ]]; then
echo "Download pillow dependency sources..."
curl -fSL -o $PROJECTDIR/pillow-depends-main.zip https://github.com/python-pillow/pillow-depends/archive/main.zip
fi
echo "Unpacking pillow dependency sources..."
untar $PROJECTDIR/pillow-depends-main.zip
fi
if [[ -n "$IS_MACOS" ]]; then
# libtiff and libxcb cause a conflict with building libtiff and libxcb
# libxau and libxdmcp cause an issue on macOS < 11
# remove cairo to fix building harfbuzz on arm64
# remove lcms2 and libpng to fix building openjpeg on arm64
# remove jpeg-turbo to avoid inclusion on arm64
# remove webp and zstd to avoid inclusion on x86_64
# curl from brew requires zstd, use system curl
brew remove --ignore-dependencies libpng libtiff libxcb libxau libxdmcp curl cairo lcms2 zstd
if [[ "$CIBW_ARCHS" == "arm64" ]]; then
brew remove --ignore-dependencies jpeg-turbo
else
brew remove --ignore-dependencies webp
fi
# Homebrew (or similar packaging environments) install can contain some of
# the libraries that we're going to build. However, they may be compiled
# with a MACOSX_DEPLOYMENT_TARGET that doesn't match what we want to use,
# and they may bring in other dependencies that we don't want. The same will
# be true of any other locations on the path. To avoid conflicts, strip the
# path down to the bare minimum (which, on macOS, won't include any
# development dependencies).
export PATH="$BUILD_PREFIX/bin:$(dirname $(which python3)):/usr/bin:/bin:/usr/sbin:/sbin:/Library/Apple/usr/bin"
export CMAKE_PREFIX_PATH=$BUILD_PREFIX
brew install pkg-config
# Ensure the basic structure of the build prefix directory exists.
mkdir -p "$BUILD_PREFIX/bin"
mkdir -p "$BUILD_PREFIX/lib"
# Ensure pkg-config is available
build_pkg_config
# Ensure cmake is available
python3 -m pip install cmake
fi
wrap_wheel_builder build
# Return to the project root to finish the build
popd > /dev/null
# Append licenses
for filename in wheels/dependency_licenses/*; do
echo -e "\n\n----\n\n$(basename $filename | cut -f 1 -d '.')\n" | cat >> LICENSE

View File

@ -11,6 +11,9 @@ if ("$venv" -like "*\cibw-run-*\pp*-win_amd64\*") {
$env:path += ";$pillow\winbuild\build\bin\"
& "$venv\Scripts\activate.ps1"
& reg add "HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Image File Execution Options\python.exe" /v "GlobalFlag" /t REG_SZ /d "0x02000000" /f
if ("$venv" -like "*\cibw-run-*-win_amd64\*") {
& python -m pip install numpy
}
cd $pillow
& python -VV
if (!$?) { exit $LASTEXITCODE }

View File

@ -1,12 +1,24 @@
#!/bin/bash
set -e
# Ensure fribidi is installed by the system.
if [[ "$OSTYPE" == "darwin"* ]]; then
brew install fribidi
export PKG_CONFIG_PATH="/usr/local/opt/openblas/lib/pkgconfig"
if [ -f /opt/homebrew/lib/libfribidi.dylib ]; then
sudo cp /opt/homebrew/lib/libfribidi.dylib /usr/local/lib
# If Homebrew is on the path during the build, it may leak into the wheels.
# However, we *do* need Homebrew to provide a copy of fribidi for
# testing purposes so that we can verify the fribidi shim works as expected.
if [[ "$(uname -m)" == "x86_64" ]]; then
HOMEBREW_PREFIX=/usr/local
else
HOMEBREW_PREFIX=/opt/homebrew
fi
$HOMEBREW_PREFIX/bin/brew install fribidi
# Add the lib folder for fribidi so that the vendored library can be found.
# Don't use $HOMEWBREW_PREFIX/lib directly - use the lib folder where the
# installed copy of fribidi is cellared. This ensures we don't pick up the
# Homebrew version of any other library that we're dependent on (most notably,
# freetype).
export DYLD_LIBRARY_PATH=$(dirname $(realpath $HOMEBREW_PREFIX/lib/libfribidi.dylib))
elif [ "${AUDITWHEEL_POLICY::9}" == "musllinux" ]; then
apk add curl fribidi
else

View File

@ -13,6 +13,7 @@ on:
paths:
- ".ci/requirements-cibw.txt"
- ".github/workflows/wheel*"
- "pyproject.toml"
- "setup.py"
- "wheels/*"
- "winbuild/build_prepare.py"
@ -23,6 +24,7 @@ on:
paths:
- ".ci/requirements-cibw.txt"
- ".github/workflows/wheel*"
- "pyproject.toml"
- "setup.py"
- "wheels/*"
- "winbuild/build_prepare.py"
@ -40,61 +42,7 @@ env:
FORCE_COLOR: 1
jobs:
build-1-QEMU-emulated-wheels:
if: github.event_name != 'schedule' && github.event_name != 'workflow_dispatch'
name: aarch64 ${{ matrix.python-version }} ${{ matrix.spec }}
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
python-version:
- pp310
- cp3{9,10,11}
- cp3{12,13}
spec:
- manylinux2014
- manylinux_2_28
- musllinux
exclude:
- { python-version: pp310, spec: musllinux }
steps:
- uses: actions/checkout@v4
with:
submodules: true
- uses: actions/setup-python@v5
with:
python-version: "3.x"
# https://github.com/docker/setup-qemu-action
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Install cibuildwheel
run: |
python3 -m pip install -r .ci/requirements-cibw.txt
- name: Build wheels
run: |
python3 -m cibuildwheel --output-dir wheelhouse
env:
# Build only the currently selected Linux architecture (so we can
# parallelise for speed).
CIBW_ARCHS: "aarch64"
# Likewise, select only one Python version per job to speed this up.
CIBW_BUILD: "${{ matrix.python-version }}-${{ matrix.spec == 'musllinux' && 'musllinux' || 'manylinux' }}*"
CIBW_PRERELEASE_PYTHONS: True
# Extra options for manylinux.
CIBW_MANYLINUX_AARCH64_IMAGE: ${{ matrix.spec }}
CIBW_MANYLINUX_PYPY_AARCH64_IMAGE: ${{ matrix.spec }}
- uses: actions/upload-artifact@v4
with:
name: dist-qemu-${{ matrix.python-version }}-${{ matrix.spec }}
path: ./wheelhouse/*.whl
build-2-native-wheels:
build-native-wheels:
if: github.event_name != 'schedule' || github.repository_owner == 'python-pillow'
name: ${{ matrix.name }}
runs-on: ${{ matrix.os }}
@ -115,7 +63,7 @@ jobs:
- name: "macOS 10.15 x86_64"
os: macos-13
cibw_arch: x86_64
build: "pp310*"
build: "pp3*"
macosx_deployment_target: "10.15"
- name: "macOS arm64"
os: macos-latest
@ -129,9 +77,18 @@ jobs:
cibw_arch: x86_64
build: "*manylinux*"
manylinux: "manylinux_2_28"
- name: "manylinux2014 and musllinux aarch64"
os: ubuntu-24.04-arm
cibw_arch: aarch64
- name: "manylinux_2_28 aarch64"
os: ubuntu-24.04-arm
cibw_arch: aarch64
build: "*manylinux*"
manylinux: "manylinux_2_28"
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
submodules: true
- uses: actions/setup-python@v5
@ -148,10 +105,12 @@ jobs:
env:
CIBW_ARCHS: ${{ matrix.cibw_arch }}
CIBW_BUILD: ${{ matrix.build }}
CIBW_FREE_THREADED_SUPPORT: True
CIBW_ENABLE: cpython-prerelease cpython-freethreading pypy
CIBW_MANYLINUX_AARCH64_IMAGE: ${{ matrix.manylinux }}
CIBW_MANYLINUX_PYPY_AARCH64_IMAGE: ${{ matrix.manylinux }}
CIBW_MANYLINUX_PYPY_X86_64_IMAGE: ${{ matrix.manylinux }}
CIBW_MANYLINUX_X86_64_IMAGE: ${{ matrix.manylinux }}
CIBW_PRERELEASE_PYTHONS: True
CIBW_SKIP: pp39-*
MACOSX_DEPLOYMENT_TARGET: ${{ matrix.macosx_deployment_target }}
- uses: actions/upload-artifact@v4
@ -172,10 +131,13 @@ jobs:
- cibw_arch: ARM64
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- name: Checkout extra test images
uses: actions/checkout@v4
with:
persist-credentials: false
repository: python-pillow/test-images
path: Tests\test-images
@ -222,8 +184,8 @@ jobs:
CIBW_ARCHS: ${{ matrix.cibw_arch }}
CIBW_BEFORE_ALL: "{package}\\winbuild\\build\\build_dep_all.cmd"
CIBW_CACHE_PATH: "C:\\cibw"
CIBW_FREE_THREADED_SUPPORT: True
CIBW_PRERELEASE_PYTHONS: True
CIBW_ENABLE: cpython-prerelease cpython-freethreading pypy
CIBW_SKIP: pp39-*
CIBW_TEST_SKIP: "*-win_arm64"
CIBW_TEST_COMMAND: 'docker run --rm
-v {project}:C:\pillow
@ -251,13 +213,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.x"
cache: pip
cache-dependency-path: "Makefile"
- run: make sdist
@ -268,7 +230,7 @@ jobs:
scientific-python-nightly-wheels-publish:
if: github.repository_owner == 'python-pillow' && (github.event_name == 'schedule' || github.event_name == 'workflow_dispatch')
needs: [build-2-native-wheels, windows]
needs: [build-native-wheels, windows]
runs-on: ubuntu-latest
name: Upload wheels to scientific-python-nightly-wheels
steps:
@ -285,7 +247,7 @@ jobs:
pypi-publish:
if: github.repository_owner == 'python-pillow' && github.event_name == 'push' && startsWith(github.ref, 'refs/tags')
needs: [build-1-QEMU-emulated-wheels, build-2-native-wheels, windows, sdist]
needs: [build-native-wheels, windows, sdist]
runs-on: ubuntu-latest
name: Upload release to PyPI
environment:
@ -301,3 +263,5 @@ jobs:
merge-multiple: true
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
attestations: true

5
.gitignore vendored
View File

@ -19,6 +19,7 @@ lib64/
parts/
sdist/
var/
wheelhouse/
*.egg-info/
.installed.cfg
*.egg
@ -90,5 +91,9 @@ Tests/images/msp
Tests/images/picins
Tests/images/sunraster
# Test and dependency downloads
pillow-depends-main.zip
pillow-test-images.zip
# pyinstaller
*.spec

View File

@ -1,17 +1,17 @@
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.6.3
rev: v0.9.9
hooks:
- id: ruff
args: [--exit-non-zero-on-fix]
- repo: https://github.com/psf/black-pre-commit-mirror
rev: 24.8.0
rev: 25.1.0
hooks:
- id: black
- repo: https://github.com/PyCQA/bandit
rev: 1.7.9
rev: 1.8.3
hooks:
- id: bandit
args: [--severity-level=high]
@ -24,7 +24,7 @@ repos:
exclude: (Makefile$|\.bat$|\.cmake$|\.eps$|\.fits$|\.gd$|\.opt$)
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: v18.1.8
rev: v19.1.7
hooks:
- id: clang-format
types: [c]
@ -36,7 +36,7 @@ repos:
- id: rst-backticks
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0
rev: v5.0.0
hooks:
- id: check-executables-have-shebangs
- id: check-shebang-scripts-are-executable
@ -50,29 +50,35 @@ repos:
exclude: ^.github/.*TEMPLATE|^Tests/(fonts|images)/
- repo: https://github.com/python-jsonschema/check-jsonschema
rev: 0.29.2
rev: 0.31.2
hooks:
- id: check-github-workflows
- id: check-readthedocs
- id: check-renovate
- repo: https://github.com/woodruffw/zizmor-pre-commit
rev: v1.4.1
hooks:
- id: zizmor
- repo: https://github.com/sphinx-contrib/sphinx-lint
rev: v0.9.1
rev: v1.0.0
hooks:
- id: sphinx-lint
- repo: https://github.com/tox-dev/pyproject-fmt
rev: 2.2.1
rev: v2.5.1
hooks:
- id: pyproject-fmt
- repo: https://github.com/abravalheri/validate-pyproject
rev: v0.19
rev: v0.23
hooks:
- id: validate-pyproject
additional_dependencies: [trove-classifiers>=2024.10.12]
- repo: https://github.com/tox-dev/tox-ini-fmt
rev: 1.3.1
rev: 1.5.0
hooks:
- id: tox-ini-fmt

View File

@ -1,5 +1,8 @@
version: 2
sphinx:
configuration: docs/conf.py
formats: [pdf]
build:

View File

@ -2,9 +2,43 @@
Changelog (Pillow)
==================
11.0.0 (unreleased)
11.1.0 and newer
----------------
See GitHub Releases:
- https://github.com/python-pillow/Pillow/releases
11.0.0 (2024-10-15)
-------------------
- Update licence to MIT-CMU #8460
[hugovk]
- Conditionally define ImageCms type hint to avoid requiring core #8197
[radarhere]
- Support writing LONG8 offsets in AppendingTiffWriter #8417
[radarhere]
- Use ImageFile.MAXBLOCK when saving TIFF images #8461
[radarhere]
- Do not close provided file handles with libtiff when saving #8458
[radarhere]
- Support ImageFilter.BuiltinFilter for I;16* images #8438
[radarhere]
- Use ImagingCore.ptr instead of ImagingCore.id #8341
[homm, radarhere, hugovk]
- Updated EPS mode when opening images without transparency #8281
[Yay295, radarhere]
- Use transparency when combining P frames from APNGs #8443
[radarhere]
- Support all resampling filters when resizing I;16* images #8422
[radarhere]

View File

@ -5,9 +5,9 @@ The Python Imaging Library (PIL) is
Pillow is the friendly PIL fork. It is
Copyright © 2010-2024 by Jeffrey A. Clark and contributors
Copyright © 2010 by Jeffrey A. Clark and contributors
Like PIL, Pillow is licensed under the open source HPND License:
Like PIL, Pillow is licensed under the open source MIT-CMU License:
By obtaining, using, and/or copying this software and/or its associated
documentation, you agree that you have read, understood, and will comply

View File

@ -20,7 +20,6 @@ graft docs
graft _custom_build
# build/src control detritus
exclude .appveyor.yml
exclude .clang-format
exclude .coveragerc
exclude .editorconfig

View File

@ -17,12 +17,10 @@ coverage:
.PHONY: doc
.PHONY: html
doc html:
python3 -c "import PIL" > /dev/null 2>&1 || python3 -m pip install .
$(MAKE) -C docs html
.PHONY: htmlview
htmlview:
python3 -c "import PIL" > /dev/null 2>&1 || python3 -m pip install .
$(MAKE) -C docs htmlview
.PHONY: doccheck

View File

@ -42,9 +42,6 @@ As of 2019, Pillow development is
<a href="https://github.com/python-pillow/Pillow/actions/workflows/test-docker.yml"><img
alt="GitHub Actions build status (Test Docker)"
src="https://github.com/python-pillow/Pillow/workflows/Test%20Docker/badge.svg"></a>
<a href="https://ci.appveyor.com/project/python-pillow/Pillow"><img
alt="AppVeyor CI build status (Windows)"
src="https://img.shields.io/appveyor/build/python-pillow/Pillow/main.svg?label=Windows%20build"></a>
<a href="https://github.com/python-pillow/Pillow/actions/workflows/wheels.yml"><img
alt="GitHub Actions build status (Wheels)"
src="https://github.com/python-pillow/Pillow/workflows/Wheels/badge.svg"></a>
@ -107,7 +104,7 @@ The core image library is designed for fast access to data stored in a few basic
- [Issues](https://github.com/python-pillow/Pillow/issues)
- [Pull requests](https://github.com/python-pillow/Pillow/pulls)
- [Release notes](https://pillow.readthedocs.io/en/stable/releasenotes/index.html)
- [Changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst)
- [Changelog](https://github.com/python-pillow/Pillow/releases)
- [Pre-fork](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst#pre-fork)
## Report a Vulnerability

View File

@ -9,10 +9,9 @@ Released quarterly on January 2nd, April 1st, July 1st and October 15th.
* [ ] Open a release ticket e.g. https://github.com/python-pillow/Pillow/issues/3154
* [ ] Develop and prepare release in `main` branch.
* [ ] Check [GitHub Actions](https://github.com/python-pillow/Pillow/actions) and [AppVeyor](https://ci.appveyor.com/project/python-pillow/Pillow) to confirm passing tests in `main` branch.
* [ ] Check [GitHub Actions](https://github.com/python-pillow/Pillow/actions) to confirm passing tests in `main` branch.
* [ ] Check that all the wheel builds pass the tests in the [GitHub Actions "Wheels" workflow](https://github.com/python-pillow/Pillow/actions/workflows/wheels.yml) jobs by manually triggering them.
* [ ] In compliance with [PEP 440](https://peps.python.org/pep-0440/), update version identifier in `src/PIL/_version.py`
* [ ] Update `CHANGES.rst`.
* [ ] Run pre-release check via `make release-test` in a freshly cloned repo.
* [ ] Create branch and tag for release e.g.:
```bash
@ -34,13 +33,12 @@ Released quarterly on January 2nd, April 1st, July 1st and October 15th.
Released as needed for security, installation or critical bug fixes.
* [ ] Make necessary changes in `main` branch.
* [ ] Update `CHANGES.rst`.
* [ ] Check out release branch e.g.:
```bash
git checkout -t remotes/origin/5.2.x
```
* [ ] Cherry pick individual commits from `main` branch to release branch e.g. `5.2.x`, then `git push`.
* [ ] Check [GitHub Actions](https://github.com/python-pillow/Pillow/actions) and [AppVeyor](https://ci.appveyor.com/project/python-pillow/Pillow) to confirm passing tests in release branch e.g. `5.2.x`.
* [ ] Check [GitHub Actions](https://github.com/python-pillow/Pillow/actions) to confirm passing tests in release branch e.g. `5.2.x`.
* [ ] In compliance with [PEP 440](https://peps.python.org/pep-0440/), update version identifier in `src/PIL/_version.py`
* [ ] Run pre-release check via `make release-test`.
* [ ] Create tag for release e.g.:

View File

@ -3,26 +3,25 @@ from __future__ import annotations
import zlib
from io import BytesIO
import pytest
from PIL import Image, ImageFile, PngImagePlugin
TEST_FILE = "Tests/images/png_decompression_dos.png"
def test_ignore_dos_text() -> None:
ImageFile.LOAD_TRUNCATED_IMAGES = True
def test_ignore_dos_text(monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
try:
im = Image.open(TEST_FILE)
with Image.open(TEST_FILE) as im:
im.load()
finally:
ImageFile.LOAD_TRUNCATED_IMAGES = False
assert isinstance(im, PngImagePlugin.PngImageFile)
for s in im.text.values():
assert len(s) < 1024 * 1024, "Text chunk larger than 1M"
assert isinstance(im, PngImagePlugin.PngImageFile)
for s in im.text.values():
assert len(s) < 1024 * 1024, "Text chunk larger than 1M"
for s in im.info.values():
assert len(s) < 1024 * 1024, "Text chunk larger than 1M"
for s in im.info.values():
assert len(s) < 1024 * 1024, "Text chunk larger than 1M"
def test_dos_text() -> None:

View File

@ -34,6 +34,7 @@ def test_wheel_features() -> None:
"fribidi",
"harfbuzz",
"libjpeg_turbo",
"zlib_ng",
"xcb",
}

View File

@ -9,7 +9,6 @@ import os
import shutil
import subprocess
import sys
import sysconfig
import tempfile
from collections.abc import Sequence
from functools import lru_cache
@ -140,18 +139,11 @@ def assert_image_similar_tofile(
filename: str,
epsilon: float,
msg: str | None = None,
mode: str | None = None,
) -> None:
with Image.open(filename) as img:
if mode:
img = img.convert(mode)
assert_image_similar(a, img, epsilon, msg)
def assert_all_same(items: Sequence[Any], msg: str | None = None) -> None:
assert items.count(items[0]) == len(items), msg
def assert_not_all_same(items: Sequence[Any], msg: str | None = None) -> None:
assert items.count(items[0]) != len(items), msg
@ -327,16 +319,7 @@ def magick_command() -> list[str] | None:
return None
def on_appveyor() -> bool:
return "APPVEYOR" in os.environ
def on_github_actions() -> bool:
return "GITHUB_ACTIONS" in os.environ
def on_ci() -> bool:
# GitHub Actions and AppVeyor have "CI"
return "CI" in os.environ
@ -358,10 +341,6 @@ def is_pypy() -> bool:
return hasattr(sys, "pypy_translation_info")
def is_mingw() -> bool:
return sysconfig.get_platform() == "mingw"
class CachedProperty:
def __init__(self, func: Callable[[Any], Any]) -> None:
self.func = func

BIN
Tests/images/eps/1.bmp Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 KiB

Binary file not shown.

Binary file not shown.

View File

Before

Width:  |  Height:  |  Size: 2.5 KiB

After

Width:  |  Height:  |  Size: 2.5 KiB

View File

Before

Width:  |  Height:  |  Size: 5.4 KiB

After

Width:  |  Height:  |  Size: 5.4 KiB

View File

Before

Width:  |  Height:  |  Size: 5.9 KiB

After

Width:  |  Height:  |  Size: 5.9 KiB

View File

Before

Width:  |  Height:  |  Size: 17 KiB

After

Width:  |  Height:  |  Size: 17 KiB

View File

Before

Width:  |  Height:  |  Size: 2.7 KiB

After

Width:  |  Height:  |  Size: 2.7 KiB

View File

Before

Width:  |  Height:  |  Size: 5.8 KiB

After

Width:  |  Height:  |  Size: 5.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 486 B

After

Width:  |  Height:  |  Size: 533 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 391 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.2 KiB

View File

@ -7,7 +7,7 @@ import fuzzers
import packaging
import pytest
from PIL import Image, UnidentifiedImageError, features
from PIL import Image, features
from Tests.helper import skip_unless_feature
if sys.platform.startswith("win32"):
@ -32,21 +32,17 @@ def test_fuzz_images(path: str) -> None:
fuzzers.fuzz_image(f.read())
assert True
except (
# Known exceptions from Pillow
OSError,
SyntaxError,
MemoryError,
ValueError,
NotImplementedError,
OverflowError,
):
# Known exceptions that are through from Pillow
assert True
except (
# Known Image.* exceptions
Image.DecompressionBombError,
Image.DecompressionBombWarning,
UnidentifiedImageError,
):
# Known Image.* exceptions
assert True
finally:
fuzzers.disable_decompressionbomb_error()

View File

@ -22,6 +22,8 @@ def test_bad() -> None:
for f in get_files("b"):
# Assert that there is no unclosed file warning
with warnings.catch_warnings():
warnings.simplefilter("error")
try:
with Image.open(f) as im:
im.load()

View File

@ -19,7 +19,7 @@ except ImportError:
class TestColorLut3DCoreAPI:
def generate_identity_table(
self, channels: int, size: int | tuple[int, int, int]
) -> tuple[int, int, int, int, list[float]]:
) -> tuple[int, tuple[int, int, int], list[float]]:
if isinstance(size, tuple):
size_1d, size_2d, size_3d = size
else:
@ -39,9 +39,7 @@ class TestColorLut3DCoreAPI:
]
return (
channels,
size_1d,
size_2d,
size_3d,
(size_1d, size_2d, size_3d),
[item for sublist in table for item in sublist],
)
@ -89,21 +87,21 @@ class TestColorLut3DCoreAPI:
with pytest.raises(ValueError, match=r"size1D \* size2D \* size3D"):
im.im.color_lut_3d(
"RGB", Image.Resampling.BILINEAR, 3, 2, 2, 2, [0, 0, 0] * 7
"RGB", Image.Resampling.BILINEAR, 3, (2, 2, 2), [0, 0, 0] * 7
)
with pytest.raises(ValueError, match=r"size1D \* size2D \* size3D"):
im.im.color_lut_3d(
"RGB", Image.Resampling.BILINEAR, 3, 2, 2, 2, [0, 0, 0] * 9
"RGB", Image.Resampling.BILINEAR, 3, (2, 2, 2), [0, 0, 0] * 9
)
with pytest.raises(TypeError):
im.im.color_lut_3d(
"RGB", Image.Resampling.BILINEAR, 3, 2, 2, 2, [0, 0, "0"] * 8
"RGB", Image.Resampling.BILINEAR, 3, (2, 2, 2), [0, 0, "0"] * 8
)
with pytest.raises(TypeError):
im.im.color_lut_3d("RGB", Image.Resampling.BILINEAR, 3, 2, 2, 2, 16)
im.im.color_lut_3d("RGB", Image.Resampling.BILINEAR, 3, (2, 2, 2), 16)
@pytest.mark.parametrize(
"lut_mode, table_channels, table_size",
@ -264,7 +262,7 @@ class TestColorLut3DCoreAPI:
assert_image_equal(
Image.merge('RGB', im.split()[::-1]),
im._new(im.im.color_lut_3d('RGB', Image.Resampling.BILINEAR,
3, 2, 2, 2, [
3, (2, 2, 2), [
0, 0, 0, 0, 0, 1,
0, 1, 0, 0, 1, 1,
@ -286,7 +284,7 @@ class TestColorLut3DCoreAPI:
# fmt: off
transformed = im._new(im.im.color_lut_3d('RGB', Image.Resampling.BILINEAR,
3, 2, 2, 2,
3, (2, 2, 2),
[
-1, -1, -1, 2, -1, -1,
-1, 2, -1, 2, 2, -1,
@ -307,7 +305,7 @@ class TestColorLut3DCoreAPI:
# fmt: off
transformed = im._new(im.im.color_lut_3d('RGB', Image.Resampling.BILINEAR,
3, 2, 2, 2,
3, (2, 2, 2),
[
-3, -3, -3, 5, -3, -3,
-3, 5, -3, 5, 5, -3,
@ -388,10 +386,12 @@ class TestColorLut3DFilter:
table = numpy.ones((7 * 6 * 5, 3), dtype=numpy.float16)
lut = ImageFilter.Color3DLUT((5, 6, 7), table)
assert isinstance(lut.table, numpy.ndarray)
assert lut.table.shape == (table.size,)
table = numpy.ones((7 * 6 * 5 * 3), dtype=numpy.float16)
lut = ImageFilter.Color3DLUT((5, 6, 7), table)
assert isinstance(lut.table, numpy.ndarray)
assert lut.table.shape == (table.size,)
# Check application

View File

@ -12,19 +12,16 @@ ORIGINAL_LIMIT = Image.MAX_IMAGE_PIXELS
class TestDecompressionBomb:
def teardown_method(self) -> None:
Image.MAX_IMAGE_PIXELS = ORIGINAL_LIMIT
def test_no_warning_small_file(self) -> None:
# Implicit assert: no warning.
# A warning would cause a failure.
with Image.open(TEST_FILE):
pass
def test_no_warning_no_limit(self) -> None:
def test_no_warning_no_limit(self, monkeypatch: pytest.MonkeyPatch) -> None:
# Arrange
# Turn limit off
Image.MAX_IMAGE_PIXELS = None
monkeypatch.setattr(Image, "MAX_IMAGE_PIXELS", None)
assert Image.MAX_IMAGE_PIXELS is None
# Act / Assert
@ -33,18 +30,18 @@ class TestDecompressionBomb:
with Image.open(TEST_FILE):
pass
def test_warning(self) -> None:
def test_warning(self, monkeypatch: pytest.MonkeyPatch) -> None:
# Set limit to trigger warning on the test file
Image.MAX_IMAGE_PIXELS = 128 * 128 - 1
monkeypatch.setattr(Image, "MAX_IMAGE_PIXELS", 128 * 128 - 1)
assert Image.MAX_IMAGE_PIXELS == 128 * 128 - 1
with pytest.warns(Image.DecompressionBombWarning):
with Image.open(TEST_FILE):
pass
def test_exception(self) -> None:
def test_exception(self, monkeypatch: pytest.MonkeyPatch) -> None:
# Set limit to trigger exception on the test file
Image.MAX_IMAGE_PIXELS = 64 * 128 - 1
monkeypatch.setattr(Image, "MAX_IMAGE_PIXELS", 64 * 128 - 1)
assert Image.MAX_IMAGE_PIXELS == 64 * 128 - 1
with pytest.raises(Image.DecompressionBombError):
@ -66,9 +63,9 @@ class TestDecompressionBomb:
with pytest.raises(Image.DecompressionBombError):
im.seek(1)
def test_exception_gif_zero_width(self) -> None:
def test_exception_gif_zero_width(self, monkeypatch: pytest.MonkeyPatch) -> None:
# Set limit to trigger exception on the test file
Image.MAX_IMAGE_PIXELS = 4 * 64 * 128
monkeypatch.setattr(Image, "MAX_IMAGE_PIXELS", 4 * 64 * 128)
assert Image.MAX_IMAGE_PIXELS == 4 * 64 * 128
with pytest.raises(Image.DecompressionBombError):

View File

@ -36,10 +36,11 @@ def test_version() -> None:
else:
assert function(name) == version
if name != "PIL":
if name == "zlib" and version is not None:
version = re.sub(".zlib-ng$", "", version)
elif name == "libtiff" and version is not None:
version = re.sub("t$", "", version)
if version is not None:
if name == "zlib" and features.check_feature("zlib_ng"):
version = re.sub(".zlib-ng$", "", version)
elif name == "libtiff":
version = re.sub("t$", "", version)
assert version is None or re.search(r"\d+(\.\d+)*$", version)
for module in features.modules:
@ -56,17 +57,17 @@ def test_version() -> None:
def test_webp_transparency() -> None:
with pytest.warns(DeprecationWarning):
assert features.check("transp_webp") == features.check_module("webp")
assert (features.check("transp_webp") or False) == features.check_module("webp")
def test_webp_mux() -> None:
with pytest.warns(DeprecationWarning):
assert features.check("webp_mux") == features.check_module("webp")
assert (features.check("webp_mux") or False) == features.check_module("webp")
def test_webp_anim() -> None:
with pytest.warns(DeprecationWarning):
assert features.check("webp_anim") == features.check_module("webp")
assert (features.check("webp_anim") or False) == features.check_module("webp")
@skip_unless_feature("libjpeg_turbo")

View File

@ -258,8 +258,8 @@ def test_apng_mode() -> None:
assert im.mode == "P"
im.seek(im.n_frames - 1)
im = im.convert("RGBA")
assert im.getpixel((0, 0)) == (255, 0, 0, 0)
assert im.getpixel((64, 32)) == (255, 0, 0, 0)
assert im.getpixel((0, 0)) == (0, 255, 0, 255)
assert im.getpixel((64, 32)) == (0, 255, 0, 255)
with Image.open("Tests/images/apng/mode_palette_1bit_alpha.png") as im:
assert im.mode == "P"
@ -307,13 +307,8 @@ def test_apng_syntax_errors() -> None:
im.load()
# we can handle this case gracefully
exception = None
with Image.open("Tests/images/apng/syntax_num_frames_low.png") as im:
try:
im.seek(im.n_frames - 1)
except Exception as e:
exception = e
assert exception is None
im.seek(im.n_frames - 1)
with pytest.raises(OSError):
with Image.open("Tests/images/apng/syntax_num_frames_high.png") as im:
@ -405,13 +400,8 @@ def test_apng_save_split_fdat(tmp_path: Path) -> None:
append_images=frames,
)
with Image.open(test_file) as im:
exception = None
try:
im.seek(im.n_frames - 1)
im.load()
except Exception as e:
exception = e
assert exception is None
im.seek(im.n_frames - 1)
im.load()
def test_apng_save_duration_loop(tmp_path: Path) -> None:

View File

@ -4,7 +4,7 @@ from pathlib import Path
import pytest
from PIL import Image
from PIL import BlpImagePlugin, Image
from .helper import (
assert_image_equal,
@ -19,6 +19,7 @@ def test_load_blp1() -> None:
assert_image_equal_tofile(im, "Tests/images/blp/blp1_jpeg.png")
with Image.open("Tests/images/blp/blp1_jpeg2.blp") as im:
assert im.mode == "RGBA"
im.load()
@ -37,6 +38,13 @@ def test_load_blp2_dxt1a() -> None:
assert_image_equal_tofile(im, "Tests/images/blp/blp2_dxt1a.png")
def test_invalid_file() -> None:
invalid_file = "Tests/images/flower.jpg"
with pytest.raises(BlpImagePlugin.BLPFormatError):
BlpImagePlugin.BlpImageFile(invalid_file)
def test_save(tmp_path: Path) -> None:
f = str(tmp_path / "temp.blp")

View File

@ -83,4 +83,4 @@ def test_handler(tmp_path: Path) -> None:
im.save(temp_file)
assert handler.saved
BufrStubImagePlugin._handler = None
BufrStubImagePlugin.register_handler(None)

View File

@ -4,8 +4,6 @@ import pytest
from PIL import ContainerIO, Image
from .helper import hopper
TEST_FILE = "Tests/images/dummy.container"
@ -15,15 +13,15 @@ def test_sanity() -> None:
def test_isatty() -> None:
with hopper() as im:
container = ContainerIO.ContainerIO(im, 0, 0)
with open(TEST_FILE, "rb") as fh:
container = ContainerIO.ContainerIO(fh, 0, 0)
assert container.isatty() is False
def test_seekable() -> None:
with hopper() as im:
container = ContainerIO.ContainerIO(im, 0, 0)
with open(TEST_FILE, "rb") as fh:
container = ContainerIO.ContainerIO(fh, 0, 0)
assert container.seekable() is True

View File

@ -26,16 +26,18 @@ def test_sanity() -> None:
@pytest.mark.skipif(is_pypy(), reason="Requires CPython")
def test_unclosed_file() -> None:
def open() -> None:
def open_test_image() -> None:
im = Image.open(TEST_FILE)
im.load()
with pytest.warns(ResourceWarning):
open()
open_test_image()
def test_closed_file() -> None:
with warnings.catch_warnings():
warnings.simplefilter("error")
im = Image.open(TEST_FILE)
im.load()
im.close()
@ -43,6 +45,8 @@ def test_closed_file() -> None:
def test_context_manager() -> None:
with warnings.catch_warnings():
warnings.simplefilter("error")
with Image.open(TEST_FILE) as im:
im.load()

View File

@ -331,11 +331,13 @@ def test_dxt5_colorblock_alpha_issue_4142() -> None:
with Image.open("Tests/images/dxt5-colorblock-alpha-issue-4142.dds") as im:
px = im.getpixel((0, 0))
assert isinstance(px, tuple)
assert px[0] != 0
assert px[1] != 0
assert px[2] != 0
px = im.getpixel((1, 0))
assert isinstance(px, tuple)
assert px[0] != 0
assert px[1] != 0
assert px[2] != 0

View File

@ -8,6 +8,7 @@ import pytest
from PIL import EpsImagePlugin, Image, UnidentifiedImageError, features
from .helper import (
assert_image_equal_tofile,
assert_image_similar,
assert_image_similar_tofile,
hopper,
@ -19,18 +20,18 @@ from .helper import (
HAS_GHOSTSCRIPT = EpsImagePlugin.has_ghostscript()
# Our two EPS test files (they are identical except for their bounding boxes)
FILE1 = "Tests/images/zero_bb.eps"
FILE2 = "Tests/images/non_zero_bb.eps"
FILE1 = "Tests/images/eps/zero_bb.eps"
FILE2 = "Tests/images/eps/non_zero_bb.eps"
# Due to palletization, we'll need to convert these to RGB after load
FILE1_COMPARE = "Tests/images/zero_bb.png"
FILE1_COMPARE_SCALE2 = "Tests/images/zero_bb_scale2.png"
FILE1_COMPARE = "Tests/images/eps/zero_bb.png"
FILE1_COMPARE_SCALE2 = "Tests/images/eps/zero_bb_scale2.png"
FILE2_COMPARE = "Tests/images/non_zero_bb.png"
FILE2_COMPARE_SCALE2 = "Tests/images/non_zero_bb_scale2.png"
FILE2_COMPARE = "Tests/images/eps/non_zero_bb.png"
FILE2_COMPARE_SCALE2 = "Tests/images/eps/non_zero_bb_scale2.png"
# EPS test files with binary preview
FILE3 = "Tests/images/binary_preview_map.eps"
FILE3 = "Tests/images/eps/binary_preview_map.eps"
# Three unsigned 32bit little-endian values:
# 0xC6D3D0C5 magic number
@ -94,10 +95,14 @@ def test_sanity(filename: str, size: tuple[int, int], scale: int) -> None:
@pytest.mark.skipif(not HAS_GHOSTSCRIPT, reason="Ghostscript not available")
def test_load() -> None:
with Image.open(FILE1) as im:
assert im.load()[0, 0] == (255, 255, 255)
px = im.load()
assert px is not None
assert px[0, 0] == (255, 255, 255)
# Test again now that it has already been loaded once
assert im.load()[0, 0] == (255, 255, 255)
px = im.load()
assert px is not None
assert px[0, 0] == (255, 255, 255)
def test_binary() -> None:
@ -126,6 +131,15 @@ def test_binary_header_only() -> None:
EpsImagePlugin.EpsImageFile(data)
@pytest.mark.parametrize("prefix", (b"", simple_binary_header))
def test_simple_eps_file(prefix: bytes) -> None:
data = io.BytesIO(prefix + b"\n".join(simple_eps_file))
with Image.open(data) as img:
assert img.mode == "RGB"
assert img.size == (100, 100)
assert img.format == "EPS"
@pytest.mark.parametrize("prefix", (b"", simple_binary_header))
def test_missing_version_comment(prefix: bytes) -> None:
data = io.BytesIO(prefix + b"\n".join(simple_eps_file_without_version))
@ -141,23 +155,21 @@ def test_missing_boundingbox_comment(prefix: bytes) -> None:
@pytest.mark.parametrize("prefix", (b"", simple_binary_header))
def test_invalid_boundingbox_comment(prefix: bytes) -> None:
data = io.BytesIO(prefix + b"\n".join(simple_eps_file_with_invalid_boundingbox))
@pytest.mark.parametrize(
"file_lines",
(
simple_eps_file_with_invalid_boundingbox,
simple_eps_file_with_invalid_boundingbox_valid_imagedata,
),
)
def test_invalid_boundingbox_comment(
prefix: bytes, file_lines: tuple[bytes, ...]
) -> None:
data = io.BytesIO(prefix + b"\n".join(file_lines))
with pytest.raises(OSError, match="cannot determine EPS bounding box"):
EpsImagePlugin.EpsImageFile(data)
@pytest.mark.parametrize("prefix", (b"", simple_binary_header))
def test_invalid_boundingbox_comment_valid_imagedata_comment(prefix: bytes) -> None:
data = io.BytesIO(
prefix + b"\n".join(simple_eps_file_with_invalid_boundingbox_valid_imagedata)
)
with Image.open(data) as img:
assert img.mode == "RGB"
assert img.size == (100, 100)
assert img.format == "EPS"
@pytest.mark.parametrize("prefix", (b"", simple_binary_header))
def test_ascii_comment_too_long(prefix: bytes) -> None:
data = io.BytesIO(prefix + b"\n".join(simple_eps_file_with_long_ascii_comment))
@ -177,7 +189,7 @@ def test_load_long_binary_data(prefix: bytes) -> None:
data = io.BytesIO(prefix + b"\n".join(simple_eps_file_with_long_binary_data))
with Image.open(data) as img:
img.load()
assert img.mode == "RGB"
assert img.mode == "1"
assert img.size == (100, 100)
assert img.format == "EPS"
@ -187,7 +199,7 @@ def test_load_long_binary_data(prefix: bytes) -> None:
)
@pytest.mark.skipif(not HAS_GHOSTSCRIPT, reason="Ghostscript not available")
def test_cmyk() -> None:
with Image.open("Tests/images/pil_sample_cmyk.eps") as cmyk_image:
with Image.open("Tests/images/eps/pil_sample_cmyk.eps") as cmyk_image:
assert cmyk_image.mode == "CMYK"
assert cmyk_image.size == (100, 100)
assert cmyk_image.format == "EPS"
@ -204,8 +216,8 @@ def test_cmyk() -> None:
@pytest.mark.skipif(not HAS_GHOSTSCRIPT, reason="Ghostscript not available")
def test_showpage() -> None:
# See https://github.com/python-pillow/Pillow/issues/2615
with Image.open("Tests/images/reqd_showpage.eps") as plot_image:
with Image.open("Tests/images/reqd_showpage.png") as target:
with Image.open("Tests/images/eps/reqd_showpage.eps") as plot_image:
with Image.open("Tests/images/eps/reqd_showpage.png") as target:
# should not crash/hang
plot_image.load()
# fonts could be slightly different
@ -214,11 +226,11 @@ def test_showpage() -> None:
@pytest.mark.skipif(not HAS_GHOSTSCRIPT, reason="Ghostscript not available")
def test_transparency() -> None:
with Image.open("Tests/images/reqd_showpage.eps") as plot_image:
with Image.open("Tests/images/eps/reqd_showpage.eps") as plot_image:
plot_image.load(transparency=True)
assert plot_image.mode == "RGBA"
with Image.open("Tests/images/reqd_showpage_transparency.png") as target:
with Image.open("Tests/images/eps/reqd_showpage_transparency.png") as target:
# fonts could be slightly different
assert_image_similar(plot_image, target, 6)
@ -245,9 +257,19 @@ def test_bytesio_object() -> None:
assert_image_similar(img, image1_scale1_compare, 5)
def test_1_mode() -> None:
with Image.open("Tests/images/1.eps") as im:
assert im.mode == "1"
@pytest.mark.skipif(not HAS_GHOSTSCRIPT, reason="Ghostscript not available")
@pytest.mark.parametrize(
# These images have an "ImageData" descriptor.
"filename",
(
"Tests/images/eps/1.eps",
"Tests/images/eps/1_boundingbox_after_imagedata.eps",
"Tests/images/eps/1_second_imagedata.eps",
),
)
def test_1(filename: str) -> None:
with Image.open(filename) as im:
assert_image_equal_tofile(im, "Tests/images/eps/1.bmp")
def test_image_mode_not_supported(tmp_path: Path) -> None:
@ -302,7 +324,9 @@ def test_render_scale2() -> None:
@pytest.mark.skipif(not HAS_GHOSTSCRIPT, reason="Ghostscript not available")
@pytest.mark.parametrize("filename", (FILE1, FILE2, "Tests/images/illu10_preview.eps"))
@pytest.mark.parametrize(
"filename", (FILE1, FILE2, "Tests/images/eps/illu10_preview.eps")
)
def test_resize(filename: str) -> None:
with Image.open(filename) as im:
new_size = (100, 100)
@ -344,10 +368,10 @@ def test_readline(prefix: bytes, line_ending: bytes) -> None:
@pytest.mark.parametrize(
"filename",
(
"Tests/images/illu10_no_preview.eps",
"Tests/images/illu10_preview.eps",
"Tests/images/illuCS6_no_preview.eps",
"Tests/images/illuCS6_preview.eps",
"Tests/images/eps/illu10_no_preview.eps",
"Tests/images/eps/illu10_preview.eps",
"Tests/images/eps/illuCS6_no_preview.eps",
"Tests/images/eps/illuCS6_preview.eps",
),
)
def test_open_eps(filename: str) -> None:
@ -359,7 +383,7 @@ def test_open_eps(filename: str) -> None:
@pytest.mark.skipif(not HAS_GHOSTSCRIPT, reason="Ghostscript not available")
def test_emptyline() -> None:
# Test file includes an empty line in the header data
emptyline_file = "Tests/images/zero_bb_emptyline.eps"
emptyline_file = "Tests/images/eps/zero_bb_emptyline.eps"
with Image.open(emptyline_file) as image:
image.load()
@ -371,7 +395,7 @@ def test_emptyline() -> None:
@pytest.mark.timeout(timeout=5)
@pytest.mark.parametrize(
"test_file",
["Tests/images/timeout-d675703545fee17acab56e5fec644c19979175de.eps"],
["Tests/images/eps/timeout-d675703545fee17acab56e5fec644c19979175de.eps"],
)
def test_timeout(test_file: str) -> None:
with open(test_file, "rb") as f:
@ -384,7 +408,7 @@ def test_bounding_box_in_trailer() -> None:
# Check bounding boxes are parsed in the same way
# when specified in the header and the trailer
with (
Image.open("Tests/images/zero_bb_trailer.eps") as trailer_image,
Image.open("Tests/images/eps/zero_bb_trailer.eps") as trailer_image,
Image.open(FILE1) as header_image,
):
assert trailer_image.size == header_image.size
@ -392,12 +416,12 @@ def test_bounding_box_in_trailer() -> None:
def test_eof_before_bounding_box() -> None:
with pytest.raises(OSError):
with Image.open("Tests/images/zero_bb_eof_before_boundingbox.eps"):
with Image.open("Tests/images/eps/zero_bb_eof_before_boundingbox.eps"):
pass
def test_invalid_data_after_eof() -> None:
with open("Tests/images/illuCS6_preview.eps", "rb") as f:
with open("Tests/images/eps/illuCS6_preview.eps", "rb") as f:
img_bytes = io.BytesIO(f.read() + b"\r\n%" + (b" " * 255))
with Image.open(img_bytes) as img:

View File

@ -35,36 +35,35 @@ def test_sanity() -> None:
assert im.is_animated
def test_prefix_chunk() -> None:
ImageFile.LOAD_TRUNCATED_IMAGES = True
try:
with Image.open(animated_test_file_with_prefix_chunk) as im:
assert im.mode == "P"
assert im.size == (320, 200)
assert im.format == "FLI"
assert im.info["duration"] == 171
assert im.is_animated
def test_prefix_chunk(monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
with Image.open(animated_test_file_with_prefix_chunk) as im:
assert im.mode == "P"
assert im.size == (320, 200)
assert im.format == "FLI"
assert im.info["duration"] == 171
assert im.is_animated
palette = im.getpalette()
assert palette[3:6] == [255, 255, 255]
assert palette[381:384] == [204, 204, 12]
assert palette[765:] == [252, 0, 0]
finally:
ImageFile.LOAD_TRUNCATED_IMAGES = False
palette = im.getpalette()
assert palette[3:6] == [255, 255, 255]
assert palette[381:384] == [204, 204, 12]
assert palette[765:] == [252, 0, 0]
@pytest.mark.skipif(is_pypy(), reason="Requires CPython")
def test_unclosed_file() -> None:
def open() -> None:
def open_test_image() -> None:
im = Image.open(static_test_file)
im.load()
with pytest.warns(ResourceWarning):
open()
open_test_image()
def test_closed_file() -> None:
with warnings.catch_warnings():
warnings.simplefilter("error")
im = Image.open(static_test_file)
im.load()
im.close()
@ -81,6 +80,8 @@ def test_seek_after_close() -> None:
def test_context_manager() -> None:
with warnings.catch_warnings():
warnings.simplefilter("error")
with Image.open(static_test_file) as im:
im.load()

View File

@ -1,5 +1,8 @@
from __future__ import annotations
import io
import struct
import pytest
from PIL import FtexImagePlugin, Image
@ -23,3 +26,15 @@ def test_invalid_file() -> None:
with pytest.raises(SyntaxError):
FtexImagePlugin.FtexImageFile(invalid_file)
def test_invalid_texture() -> None:
with open("Tests/images/ftex_dxt1.ftc", "rb") as fp:
data = fp.read()
# Change texture compression format
data = data[:24] + struct.pack("<i", 2) + data[28:]
with pytest.raises(ValueError, match="Invalid texture compression format: 2"):
with Image.open(io.BytesIO(data)):
pass

View File

@ -14,10 +14,14 @@ def test_gbr_file() -> None:
def test_load() -> None:
with Image.open("Tests/images/gbr.gbr") as im:
assert im.load()[0, 0] == (0, 0, 0, 0)
px = im.load()
assert px is not None
assert px[0, 0] == (0, 0, 0, 0)
# Test again now that it has already been loaded once
assert im.load()[0, 0] == (0, 0, 0, 0)
px = im.load()
assert px is not None
assert px[0, 0] == (0, 0, 0, 0)
def test_multiple_load_operations() -> None:

View File

@ -4,6 +4,8 @@ import pytest
from PIL import GdImageFile, UnidentifiedImageError
from .helper import assert_image_similar_tofile
TEST_GD_FILE = "Tests/images/hopper.gd"
@ -11,6 +13,7 @@ def test_sanity() -> None:
with GdImageFile.open(TEST_GD_FILE) as im:
assert im.size == (128, 128)
assert im.format == "GD"
assert_image_similar_tofile(im.convert("RGB"), "Tests/images/hopper.jpg", 14)
def test_bad_mode() -> None:

View File

@ -4,6 +4,7 @@ import warnings
from collections.abc import Generator
from io import BytesIO
from pathlib import Path
from typing import Any
import pytest
@ -21,9 +22,6 @@ from .helper import (
# sample gif stream
TEST_GIF = "Tests/images/hopper.gif"
with open(TEST_GIF, "rb") as f:
data = f.read()
def test_sanity() -> None:
with Image.open(TEST_GIF) as im:
@ -36,16 +34,18 @@ def test_sanity() -> None:
@pytest.mark.skipif(is_pypy(), reason="Requires CPython")
def test_unclosed_file() -> None:
def open() -> None:
def open_test_image() -> None:
im = Image.open(TEST_GIF)
im.load()
with pytest.warns(ResourceWarning):
open()
open_test_image()
def test_closed_file() -> None:
with warnings.catch_warnings():
warnings.simplefilter("error")
im = Image.open(TEST_GIF)
im.load()
im.close()
@ -67,6 +67,8 @@ def test_seek_after_close() -> None:
def test_context_manager() -> None:
with warnings.catch_warnings():
warnings.simplefilter("error")
with Image.open(TEST_GIF) as im:
im.load()
@ -81,12 +83,12 @@ def test_invalid_file() -> None:
def test_l_mode_transparency() -> None:
with Image.open("Tests/images/no_palette_with_transparency.gif") as im:
assert im.mode == "L"
assert im.load()[0, 0] == 128
assert im.getpixel((0, 0)) == 128
assert im.info["transparency"] == 255
im.seek(1)
assert im.mode == "L"
assert im.load()[0, 0] == 128
assert im.getpixel((0, 0)) == 128
def test_l_mode_after_rgb() -> None:
@ -104,7 +106,7 @@ def test_palette_not_needed_for_second_frame() -> None:
assert_image_similar(im, hopper("L").convert("RGB"), 8)
def test_strategy() -> None:
def test_strategy(monkeypatch: pytest.MonkeyPatch) -> None:
with Image.open("Tests/images/iss634.gif") as im:
expected_rgb_always = im.convert("RGB")
@ -114,35 +116,36 @@ def test_strategy() -> None:
im.seek(1)
expected_different = im.convert("RGB")
try:
GifImagePlugin.LOADING_STRATEGY = GifImagePlugin.LoadingStrategy.RGB_ALWAYS
with Image.open("Tests/images/iss634.gif") as im:
assert im.mode == "RGB"
assert_image_equal(im, expected_rgb_always)
monkeypatch.setattr(
GifImagePlugin, "LOADING_STRATEGY", GifImagePlugin.LoadingStrategy.RGB_ALWAYS
)
with Image.open("Tests/images/iss634.gif") as im:
assert im.mode == "RGB"
assert_image_equal(im, expected_rgb_always)
with Image.open("Tests/images/chi.gif") as im:
assert im.mode == "RGBA"
assert_image_equal(im, expected_rgb_always_rgba)
with Image.open("Tests/images/chi.gif") as im:
assert im.mode == "RGBA"
assert_image_equal(im, expected_rgb_always_rgba)
GifImagePlugin.LOADING_STRATEGY = (
GifImagePlugin.LoadingStrategy.RGB_AFTER_DIFFERENT_PALETTE_ONLY
)
# Stay in P mode with only a global palette
with Image.open("Tests/images/chi.gif") as im:
assert im.mode == "P"
monkeypatch.setattr(
GifImagePlugin,
"LOADING_STRATEGY",
GifImagePlugin.LoadingStrategy.RGB_AFTER_DIFFERENT_PALETTE_ONLY,
)
# Stay in P mode with only a global palette
with Image.open("Tests/images/chi.gif") as im:
assert im.mode == "P"
im.seek(1)
assert im.mode == "P"
assert_image_equal(im.convert("RGB"), expected_different)
im.seek(1)
assert im.mode == "P"
assert_image_equal(im.convert("RGB"), expected_different)
# Change to RGB mode when a frame has an individual palette
with Image.open("Tests/images/iss634.gif") as im:
assert im.mode == "P"
# Change to RGB mode when a frame has an individual palette
with Image.open("Tests/images/iss634.gif") as im:
assert im.mode == "P"
im.seek(1)
assert im.mode == "RGB"
finally:
GifImagePlugin.LOADING_STRATEGY = GifImagePlugin.LoadingStrategy.RGB_AFTER_FIRST
im.seek(1)
assert im.mode == "RGB"
def test_optimize() -> None:
@ -304,8 +307,9 @@ def test_roundtrip_save_all_1(tmp_path: Path) -> None:
def test_loading_multiple_palettes(path: str, mode: str) -> None:
with Image.open(path) as im:
assert im.mode == "P"
assert im.palette is not None
first_frame_colors = im.palette.colors.keys()
original_color = im.convert("RGB").load()[0, 0]
original_color = im.convert("RGB").getpixel((0, 0))
im.seek(1)
assert im.mode == mode
@ -313,10 +317,10 @@ def test_loading_multiple_palettes(path: str, mode: str) -> None:
im = im.convert("RGB")
# Check a color only from the old palette
assert im.load()[0, 0] == original_color
assert im.getpixel((0, 0)) == original_color
# Check a color from the new palette
assert im.load()[24, 24] not in first_frame_colors
assert im.getpixel((24, 24)) not in first_frame_colors
def test_headers_saving_for_animated_gifs(tmp_path: Path) -> None:
@ -482,8 +486,7 @@ def test_eoferror() -> None:
def test_first_frame_transparency() -> None:
with Image.open("Tests/images/first_frame_transparency.gif") as im:
px = im.load()
assert px[0, 0] == im.info["transparency"]
assert im.getpixel((0, 0)) == im.info["transparency"]
def test_dispose_none() -> None:
@ -523,6 +526,7 @@ def test_dispose_background_transparency() -> None:
with Image.open("Tests/images/dispose_bgnd_transparency.gif") as img:
img.seek(2)
px = img.load()
assert px is not None
assert px[35, 30][3] == 0
@ -550,17 +554,15 @@ def test_dispose_background_transparency() -> None:
def test_transparent_dispose(
loading_strategy: GifImagePlugin.LoadingStrategy,
expected_colors: tuple[tuple[int | tuple[int, int, int, int], ...]],
monkeypatch: pytest.MonkeyPatch,
) -> None:
GifImagePlugin.LOADING_STRATEGY = loading_strategy
try:
with Image.open("Tests/images/transparent_dispose.gif") as img:
for frame in range(3):
img.seek(frame)
for x in range(3):
color = img.getpixel((x, 0))
assert color == expected_colors[frame][x]
finally:
GifImagePlugin.LOADING_STRATEGY = GifImagePlugin.LoadingStrategy.RGB_AFTER_FIRST
monkeypatch.setattr(GifImagePlugin, "LOADING_STRATEGY", loading_strategy)
with Image.open("Tests/images/transparent_dispose.gif") as img:
for frame in range(3):
img.seek(frame)
for x in range(3):
color = img.getpixel((x, 0))
assert color == expected_colors[frame][x]
def test_dispose_previous() -> None:
@ -759,6 +761,21 @@ def test_dispose2_previous_frame(tmp_path: Path) -> None:
assert im.getpixel((0, 0)) == (0, 0, 0, 255)
def test_dispose2_without_transparency(tmp_path: Path) -> None:
out = str(tmp_path / "temp.gif")
im = Image.new("P", (100, 100))
im2 = Image.new("P", (100, 100), (0, 0, 0))
im2.putpixel((50, 50), (255, 0, 0))
im.save(out, save_all=True, append_images=[im2], disposal=2)
with Image.open(out) as reloaded:
reloaded.seek(1)
assert reloaded.tile[0].extents == (0, 0, 100, 100)
def test_transparency_in_second_frame(tmp_path: Path) -> None:
out = str(tmp_path / "temp.gif")
with Image.open("Tests/images/different_transparency.gif") as im:
@ -1308,6 +1325,7 @@ def test_palette_save_all_P(tmp_path: Path) -> None:
with Image.open(out) as im:
# Assert that the frames are correct, and each frame has the same palette
assert_image_equal(im.convert("RGB"), frames[0].convert("RGB"))
assert im.palette is not None
assert im.palette.palette == im.global_palette.palette
im.seek(1)
@ -1342,32 +1360,30 @@ def test_save_I(tmp_path: Path) -> None:
assert_image_equal(reloaded.convert("L"), im.convert("L"))
def test_getdata() -> None:
def test_getdata(monkeypatch: pytest.MonkeyPatch) -> None:
# Test getheader/getdata against legacy values.
# Create a 'P' image with holes in the palette.
im = Image._wedge().resize((16, 16), Image.Resampling.NEAREST)
im = Image.linear_gradient(mode="L").resize((16, 16), Image.Resampling.NEAREST)
im.putpalette(ImagePalette.ImagePalette("RGB"))
im.info = {"background": 0}
passed_palette = bytes(255 - i // 3 for i in range(768))
GifImagePlugin._FORCE_OPTIMIZE = True
try:
h = GifImagePlugin.getheader(im, passed_palette)
d = GifImagePlugin.getdata(im)
monkeypatch.setattr(GifImagePlugin, "_FORCE_OPTIMIZE", True)
import pickle
h = GifImagePlugin.getheader(im, passed_palette)
d = GifImagePlugin.getdata(im)
# Enable to get target values on pre-refactor version
# with open('Tests/images/gif_header_data.pkl', 'wb') as f:
# pickle.dump((h, d), f, 1)
with open("Tests/images/gif_header_data.pkl", "rb") as f:
(h_target, d_target) = pickle.load(f)
import pickle
assert h == h_target
assert d == d_target
finally:
GifImagePlugin._FORCE_OPTIMIZE = False
# Enable to get target values on pre-refactor version
# with open('Tests/images/gif_header_data.pkl', 'wb') as f:
# pickle.dump((h, d), f, 1)
with open("Tests/images/gif_header_data.pkl", "rb") as f:
(h_target, d_target) = pickle.load(f)
assert h == h_target
assert d == d_target
def test_lzw_bits() -> None:
@ -1393,24 +1409,23 @@ def test_lzw_bits() -> None:
),
)
def test_extents(
test_file: str, loading_strategy: GifImagePlugin.LoadingStrategy
test_file: str,
loading_strategy: GifImagePlugin.LoadingStrategy,
monkeypatch: pytest.MonkeyPatch,
) -> None:
GifImagePlugin.LOADING_STRATEGY = loading_strategy
try:
with Image.open("Tests/images/" + test_file) as im:
assert im.size == (100, 100)
monkeypatch.setattr(GifImagePlugin, "LOADING_STRATEGY", loading_strategy)
with Image.open("Tests/images/" + test_file) as im:
assert im.size == (100, 100)
# Check that n_frames does not change the size
assert im.n_frames == 2
assert im.size == (100, 100)
# Check that n_frames does not change the size
assert im.n_frames == 2
assert im.size == (100, 100)
im.seek(1)
assert im.size == (150, 150)
im.seek(1)
assert im.size == (150, 150)
im.load()
assert im.im.size == (150, 150)
finally:
GifImagePlugin.LOADING_STRATEGY = GifImagePlugin.LoadingStrategy.RGB_AFTER_FIRST
im.load()
assert im.im.size == (150, 150)
def test_missing_background() -> None:
@ -1431,7 +1446,8 @@ def test_saving_rgba(tmp_path: Path) -> None:
assert reloaded_rgba.load()[0, 0][3] == 0
def test_optimizing_p_rgba(tmp_path: Path) -> None:
@pytest.mark.parametrize("params", ({}, {"disposal": 2, "optimize": False}))
def test_p_rgba(tmp_path: Path, params: dict[str, Any]) -> None:
out = str(tmp_path / "temp.gif")
im1 = Image.new("P", (100, 100))
@ -1443,7 +1459,7 @@ def test_optimizing_p_rgba(tmp_path: Path) -> None:
im2 = Image.new("P", (100, 100))
im2.putpalette(data, "RGBA")
im1.save(out, save_all=True, append_images=[im2])
im1.save(out, save_all=True, append_images=[im2], **params)
with Image.open(out) as reloaded:
assert reloaded.n_frames == 2

View File

@ -83,4 +83,4 @@ def test_handler(tmp_path: Path) -> None:
im.save(temp_file)
assert handler.saved
GribStubImagePlugin._handler = None
GribStubImagePlugin.register_handler(None)

View File

@ -85,4 +85,4 @@ def test_handler(tmp_path: Path) -> None:
im.save(temp_file)
assert handler.saved
Hdf5StubImagePlugin._handler = None
Hdf5StubImagePlugin.register_handler(None)

View File

@ -21,6 +21,8 @@ def test_sanity() -> None:
with Image.open(TEST_FILE) as im:
# Assert that there is no unclosed file warning
with warnings.catch_warnings():
warnings.simplefilter("error")
im.load()
assert im.mode == "RGBA"
@ -30,10 +32,14 @@ def test_sanity() -> None:
def test_load() -> None:
with Image.open(TEST_FILE) as im:
assert im.load()[0, 0] == (0, 0, 0, 0)
px = im.load()
assert px is not None
assert px[0, 0] == (0, 0, 0, 0)
# Test again now that it has already been loaded once
assert im.load()[0, 0] == (0, 0, 0, 0)
px = im.load()
assert px is not None
assert px[0, 0] == (0, 0, 0, 0)
def test_save(tmp_path: Path) -> None:

View File

@ -24,7 +24,9 @@ def test_sanity() -> None:
def test_load() -> None:
with Image.open(TEST_ICO_FILE) as im:
assert im.load()[0, 0] == (1, 1, 9, 255)
px = im.load()
assert px is not None
assert px[0, 0] == (1, 1, 9, 255)
def test_mask() -> None:
@ -243,27 +245,23 @@ def test_draw_reloaded(tmp_path: Path) -> None:
assert_image_equal_tofile(im, "Tests/images/hopper_draw.ico")
def test_truncated_mask() -> None:
def test_truncated_mask(monkeypatch: pytest.MonkeyPatch) -> None:
# 1 bpp
with open("Tests/images/hopper_mask.ico", "rb") as fp:
data = fp.read()
ImageFile.LOAD_TRUNCATED_IMAGES = True
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
data = data[:-3]
try:
with Image.open(io.BytesIO(data)) as im:
with Image.open("Tests/images/hopper_mask.png") as expected:
assert im.mode == "1"
with Image.open(io.BytesIO(data)) as im:
assert im.mode == "1"
# 32 bpp
output = io.BytesIO()
expected = hopper("RGBA")
expected.save(output, "ico", bitmap_format="bmp")
# 32 bpp
output = io.BytesIO()
expected = hopper("RGBA")
expected.save(output, "ico", bitmap_format="bmp")
data = output.getvalue()[:-1]
data = output.getvalue()[:-1]
with Image.open(io.BytesIO(data)) as im:
assert im.mode == "RGB"
finally:
ImageFile.LOAD_TRUNCATED_IMAGES = False
with Image.open(io.BytesIO(data)) as im:
assert im.mode == "RGB"

View File

@ -31,16 +31,18 @@ def test_name_limit(tmp_path: Path) -> None:
@pytest.mark.skipif(is_pypy(), reason="Requires CPython")
def test_unclosed_file() -> None:
def open() -> None:
def open_test_image() -> None:
im = Image.open(TEST_IM)
im.load()
with pytest.warns(ResourceWarning):
open()
open_test_image()
def test_closed_file() -> None:
with warnings.catch_warnings():
warnings.simplefilter("error")
im = Image.open(TEST_IM)
im.load()
im.close()
@ -48,6 +50,8 @@ def test_closed_file() -> None:
def test_context_manager() -> None:
with warnings.catch_warnings():
warnings.simplefilter("error")
with Image.open(TEST_IM) as im:
im.load()

View File

@ -58,10 +58,7 @@ def test_getiptcinfo_fotostation() -> None:
# Assert
assert iptc is not None
for tag in iptc.keys():
if tag[0] == 240:
return
pytest.fail("FotoStation tag not found")
assert 240 in (tag[0] for tag in iptc.keys()), "FotoStation tag not found"
def test_getiptcinfo_zero_padding() -> None:

View File

@ -181,6 +181,10 @@ class TestFileJpeg:
assert test(100, 200) == (100, 200)
assert test(0) is None # square pixels
def test_dpi_jfif_cm(self) -> None:
with Image.open("Tests/images/jfif_unit_cm.jpg") as im:
assert im.info["dpi"] == (2.54, 5.08)
@mark_if_feature_version(
pytest.mark.valgrind_known_error, "libjpeg_turbo", "2.0", reason="Known Failing"
)
@ -277,7 +281,10 @@ class TestFileJpeg:
assert not im2.info.get("progressive")
assert im3.info.get("progressive")
assert_image_equal(im1, im3)
if features.check_feature("mozjpeg"):
assert_image_similar(im1, im3, 9.39)
else:
assert_image_equal(im1, im3)
assert im1_bytes >= im3_bytes
def test_progressive_large_buffer(self, tmp_path: Path) -> None:
@ -349,7 +356,6 @@ class TestFileJpeg:
assert exif.get_ifd(0x8825) == {}
transposed = ImageOps.exif_transpose(im)
assert transposed is not None
exif = transposed.getexif()
assert exif.get_ifd(0x8825) == {}
@ -420,8 +426,12 @@ class TestFileJpeg:
im2 = self.roundtrip(hopper(), progressive=1)
im3 = self.roundtrip(hopper(), progression=1) # compatibility
assert_image_equal(im1, im2)
assert_image_equal(im1, im3)
if features.check_feature("mozjpeg"):
assert_image_similar(im1, im2, 9.39)
assert_image_similar(im1, im3, 9.39)
else:
assert_image_equal(im1, im2)
assert_image_equal(im1, im3)
assert im2.info.get("progressive")
assert im2.info.get("progression")
assert im3.info.get("progressive")
@ -520,12 +530,13 @@ class TestFileJpeg:
@mark_if_feature_version(
pytest.mark.valgrind_known_error, "libjpeg_turbo", "2.0", reason="Known Failing"
)
def test_truncated_jpeg_should_read_all_the_data(self) -> None:
def test_truncated_jpeg_should_read_all_the_data(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
filename = "Tests/images/truncated_jpeg.jpg"
ImageFile.LOAD_TRUNCATED_IMAGES = True
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
with Image.open(filename) as im:
im.load()
ImageFile.LOAD_TRUNCATED_IMAGES = False
assert im.getbbox() is not None
def test_truncated_jpeg_throws_oserror(self) -> None:
@ -541,12 +552,12 @@ class TestFileJpeg:
@mark_if_feature_version(
pytest.mark.valgrind_known_error, "libjpeg_turbo", "2.0", reason="Known Failing"
)
def test_qtables(self, tmp_path: Path) -> None:
def test_qtables(self) -> None:
def _n_qtables_helper(n: int, test_file: str) -> None:
b = BytesIO()
with Image.open(test_file) as im:
f = str(tmp_path / "temp.jpg")
im.save(f, qtables=[[n] * 64] * n)
with Image.open(f) as im:
im.save(b, "JPEG", qtables=[[n] * 64] * n)
with Image.open(b) as im:
assert len(im.quantization) == n
reloaded = self.roundtrip(im, qtables="keep")
assert im.quantization == reloaded.quantization
@ -850,6 +861,8 @@ class TestFileJpeg:
out = str(tmp_path / "out.jpg")
with warnings.catch_warnings():
warnings.simplefilter("error")
im.save(out, exif=exif)
with Image.open(out) as reloaded:
@ -921,7 +934,7 @@ class TestFileJpeg:
def test_jpeg_magic_number(self, monkeypatch: pytest.MonkeyPatch) -> None:
size = 4097
buffer = BytesIO(b"\xFF" * size) # Many xFF bytes
buffer = BytesIO(b"\xff" * size) # Many xff bytes
max_pos = 0
orig_read = buffer.read
@ -998,8 +1011,13 @@ class TestFileJpeg:
with Image.open(f) as reloaded:
assert reloaded.info["xmp"] == b"XMP test"
im.info["xmp"] = b"1" * 65504
im.save(f)
# Check that XMP is not saved from image info
reloaded.save(f)
with Image.open(f) as reloaded:
assert "xmp" not in reloaded.info
im.save(f, xmp=b"1" * 65504)
with Image.open(f) as reloaded:
assert reloaded.info["xmp"] == b"1" * 65504
@ -1007,7 +1025,7 @@ class TestFileJpeg:
im.save(f, xmp=b"1" * 65505)
@pytest.mark.timeout(timeout=1)
def test_eof(self) -> None:
def test_eof(self, monkeypatch: pytest.MonkeyPatch) -> None:
# Even though this decoder never says that it is finished
# the image should still end when there is no new data
class InfiniteMockPyDecoder(ImageFile.PyDecoder):
@ -1020,11 +1038,10 @@ class TestFileJpeg:
with Image.open(TEST_FILE) as im:
im.tile = [
("INFINITE", (0, 0, 128, 128), 0, ("RGB", 0, 1)),
ImageFile._Tile("INFINITE", (0, 0, 128, 128), 0, ("RGB", 0, 1)),
]
ImageFile.LOAD_TRUNCATED_IMAGES = True
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
im.load()
ImageFile.LOAD_TRUNCATED_IMAGES = False
def test_separate_tables(self) -> None:
im = hopper()

View File

@ -2,6 +2,7 @@ from __future__ import annotations
import os
import re
from collections.abc import Generator
from io import BytesIO
from pathlib import Path
from typing import Any
@ -29,8 +30,16 @@ EXTRA_DIR = "Tests/images/jpeg2000"
pytestmark = skip_unless_feature("jpg_2000")
test_card = Image.open("Tests/images/test-card.png")
test_card.load()
@pytest.fixture
def card() -> Generator[ImageFile.ImageFile, None, None]:
with Image.open("Tests/images/test-card.png") as im:
im.load()
try:
yield im
finally:
im.close()
# OpenJPEG 2.0.0 outputs this debugging message sometimes; we should
# ignore it---it doesn't represent a test failure.
@ -54,6 +63,7 @@ def test_sanity() -> None:
with Image.open("Tests/images/test-card-lossless.jp2") as im:
px = im.load()
assert px is not None
assert px[0, 0] == (0, 0, 0)
assert im.mode == "RGB"
assert im.size == (640, 480)
@ -74,76 +84,76 @@ def test_invalid_file() -> None:
Jpeg2KImagePlugin.Jpeg2KImageFile(invalid_file)
def test_bytesio() -> None:
def test_bytesio(card: ImageFile.ImageFile) -> None:
with open("Tests/images/test-card-lossless.jp2", "rb") as f:
data = BytesIO(f.read())
with Image.open(data) as im:
im.load()
assert_image_similar(im, test_card, 1.0e-3)
assert_image_similar(im, card, 1.0e-3)
# These two test pre-written JPEG 2000 files that were not written with
# PIL (they were made using Adobe Photoshop)
def test_lossless(tmp_path: Path) -> None:
def test_lossless(card: ImageFile.ImageFile, tmp_path: Path) -> None:
with Image.open("Tests/images/test-card-lossless.jp2") as im:
im.load()
outfile = str(tmp_path / "temp_test-card.png")
im.save(outfile)
assert_image_similar(im, test_card, 1.0e-3)
assert_image_similar(im, card, 1.0e-3)
def test_lossy_tiled() -> None:
assert_image_similar_tofile(
test_card, "Tests/images/test-card-lossy-tiled.jp2", 2.0
)
def test_lossy_tiled(card: ImageFile.ImageFile) -> None:
assert_image_similar_tofile(card, "Tests/images/test-card-lossy-tiled.jp2", 2.0)
def test_lossless_rt() -> None:
im = roundtrip(test_card)
assert_image_equal(im, test_card)
def test_lossless_rt(card: ImageFile.ImageFile) -> None:
im = roundtrip(card)
assert_image_equal(im, card)
def test_lossy_rt() -> None:
im = roundtrip(test_card, quality_layers=[20])
assert_image_similar(im, test_card, 2.0)
def test_lossy_rt(card: ImageFile.ImageFile) -> None:
im = roundtrip(card, quality_layers=[20])
assert_image_similar(im, card, 2.0)
def test_tiled_rt() -> None:
im = roundtrip(test_card, tile_size=(128, 128))
assert_image_equal(im, test_card)
def test_tiled_rt(card: ImageFile.ImageFile) -> None:
im = roundtrip(card, tile_size=(128, 128))
assert_image_equal(im, card)
def test_tiled_offset_rt() -> None:
im = roundtrip(test_card, tile_size=(128, 128), tile_offset=(0, 0), offset=(32, 32))
assert_image_equal(im, test_card)
def test_tiled_offset_rt(card: ImageFile.ImageFile) -> None:
im = roundtrip(card, tile_size=(128, 128), tile_offset=(0, 0), offset=(32, 32))
assert_image_equal(im, card)
def test_tiled_offset_too_small() -> None:
def test_tiled_offset_too_small(card: ImageFile.ImageFile) -> None:
with pytest.raises(ValueError):
roundtrip(test_card, tile_size=(128, 128), tile_offset=(0, 0), offset=(128, 32))
roundtrip(card, tile_size=(128, 128), tile_offset=(0, 0), offset=(128, 32))
def test_irreversible_rt() -> None:
im = roundtrip(test_card, irreversible=True, quality_layers=[20])
assert_image_similar(im, test_card, 2.0)
def test_irreversible_rt(card: ImageFile.ImageFile) -> None:
im = roundtrip(card, irreversible=True, quality_layers=[20])
assert_image_similar(im, card, 2.0)
def test_prog_qual_rt() -> None:
im = roundtrip(test_card, quality_layers=[60, 40, 20], progression="LRCP")
assert_image_similar(im, test_card, 2.0)
def test_prog_qual_rt(card: ImageFile.ImageFile) -> None:
im = roundtrip(card, quality_layers=[60, 40, 20], progression="LRCP")
assert_image_similar(im, card, 2.0)
def test_prog_res_rt() -> None:
im = roundtrip(test_card, num_resolutions=8, progression="RLCP")
assert_image_equal(im, test_card)
def test_prog_res_rt(card: ImageFile.ImageFile) -> None:
im = roundtrip(card, num_resolutions=8, progression="RLCP")
assert_image_equal(im, card)
@pytest.mark.parametrize("num_resolutions", range(2, 6))
def test_default_num_resolutions(num_resolutions: int) -> None:
def test_default_num_resolutions(
card: ImageFile.ImageFile, num_resolutions: int
) -> None:
d = 1 << (num_resolutions - 1)
im = test_card.resize((d - 1, d - 1))
im = card.resize((d - 1, d - 1))
with pytest.raises(OSError):
roundtrip(im, num_resolutions=num_resolutions)
reloaded = roundtrip(im)
@ -172,14 +182,11 @@ def test_load_dpi() -> None:
assert "dpi" not in im.info
def test_restricted_icc_profile() -> None:
ImageFile.LOAD_TRUNCATED_IMAGES = True
try:
# JPEG2000 image with a restricted ICC profile and a known colorspace
with Image.open("Tests/images/balloon_eciRGBv2_aware.jp2") as im:
assert im.mode == "RGB"
finally:
ImageFile.LOAD_TRUNCATED_IMAGES = False
def test_restricted_icc_profile(monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
# JPEG2000 image with a restricted ICC profile and a known colorspace
with Image.open("Tests/images/balloon_eciRGBv2_aware.jp2") as im:
assert im.mode == "RGB"
@pytest.mark.skipif(
@ -205,31 +212,31 @@ def test_header_errors() -> None:
pass
def test_layers_type(tmp_path: Path) -> None:
def test_layers_type(card: ImageFile.ImageFile, tmp_path: Path) -> None:
outfile = str(tmp_path / "temp_layers.jp2")
for quality_layers in [[100, 50, 10], (100, 50, 10), None]:
test_card.save(outfile, quality_layers=quality_layers)
card.save(outfile, quality_layers=quality_layers)
for quality_layers_str in ["quality_layers", ("100", "50", "10")]:
with pytest.raises(ValueError):
test_card.save(outfile, quality_layers=quality_layers_str)
card.save(outfile, quality_layers=quality_layers_str)
def test_layers() -> None:
def test_layers(card: ImageFile.ImageFile) -> None:
out = BytesIO()
test_card.save(out, "JPEG2000", quality_layers=[100, 50, 10], progression="LRCP")
card.save(out, "JPEG2000", quality_layers=[100, 50, 10], progression="LRCP")
out.seek(0)
with Image.open(out) as im:
im.layers = 1
im.load()
assert_image_similar(im, test_card, 13)
assert_image_similar(im, card, 13)
out.seek(0)
with Image.open(out) as im:
im.layers = 3
im.load()
assert_image_similar(im, test_card, 0.4)
assert_image_similar(im, card, 0.4)
@pytest.mark.parametrize(
@ -245,24 +252,30 @@ def test_layers() -> None:
(None, {"no_jp2": False}, 4, b"jP"),
),
)
def test_no_jp2(name: str, args: dict[str, bool], offset: int, data: bytes) -> None:
def test_no_jp2(
card: ImageFile.ImageFile,
name: str,
args: dict[str, bool],
offset: int,
data: bytes,
) -> None:
out = BytesIO()
if name:
out.name = name
test_card.save(out, "JPEG2000", **args)
card.save(out, "JPEG2000", **args)
out.seek(offset)
assert out.read(2) == data
def test_mct() -> None:
def test_mct(card: ImageFile.ImageFile) -> None:
# Three component
for val in (0, 1):
out = BytesIO()
test_card.save(out, "JPEG2000", mct=val, no_jp2=True)
card.save(out, "JPEG2000", mct=val, no_jp2=True)
assert out.getvalue()[59] == val
with Image.open(out) as im:
assert_image_similar(im, test_card, 1.0e-3)
assert_image_similar(im, card, 1.0e-3)
# Single component should have MCT disabled
for val in (0, 1):
@ -310,6 +323,18 @@ def test_cmyk() -> None:
assert im.getpixel((0, 0)) == (185, 134, 0, 0)
@pytest.mark.skipif(
not os.path.exists(EXTRA_DIR), reason="Extra image files not installed"
)
@skip_unless_feature_version("jpg_2000", "2.5.3")
def test_cmyk_save() -> None:
with Image.open(f"{EXTRA_DIR}/issue205.jp2") as jp2:
assert jp2.mode == "CMYK"
im = roundtrip(jp2)
assert_image_equal(im, jp2)
@pytest.mark.parametrize("ext", (".j2k", ".jp2"))
def test_16bit_monochrome_has_correct_mode(ext: str) -> None:
with Image.open("Tests/images/16bit.cropped" + ext) as im:
@ -397,6 +422,7 @@ def test_subsampling_decode(name: str) -> None:
def test_pclr() -> None:
with Image.open(f"{EXTRA_DIR}/issue104_jpxstream.jp2") as im:
assert im.mode == "P"
assert im.palette is not None
assert len(im.palette.colors) == 256
assert im.palette.colors[(255, 255, 255)] == 0
@ -404,13 +430,15 @@ def test_pclr() -> None:
f"{EXTRA_DIR}/147af3f1083de4393666b7d99b01b58b_signal_sigsegv_130c531_6155_5136.jp2"
) as im:
assert im.mode == "P"
assert im.palette is not None
assert len(im.palette.colors) == 139
assert im.palette.colors[(0, 0, 0, 0)] == 0
def test_comment() -> None:
with Image.open("Tests/images/comment.jp2") as im:
assert im.info["comment"] == b"Created by OpenJPEG version 2.5.0"
for path in ("Tests/images/9bit.j2k", "Tests/images/comment.jp2"):
with Image.open(path) as im:
assert im.info["comment"] == b"Created by OpenJPEG version 2.5.0"
# Test an image that is truncated partway through a codestream
with open("Tests/images/comment.jp2", "rb") as fp:
@ -419,22 +447,22 @@ def test_comment() -> None:
pass
def test_save_comment() -> None:
def test_save_comment(card: ImageFile.ImageFile) -> None:
for comment in ("Created by Pillow", b"Created by Pillow"):
out = BytesIO()
test_card.save(out, "JPEG2000", comment=comment)
card.save(out, "JPEG2000", comment=comment)
with Image.open(out) as im:
assert im.info["comment"] == b"Created by Pillow"
out = BytesIO()
long_comment = b" " * 65531
test_card.save(out, "JPEG2000", comment=long_comment)
card.save(out, "JPEG2000", comment=long_comment)
with Image.open(out) as im:
assert im.info["comment"] == long_comment
with pytest.raises(ValueError):
test_card.save(out, "JPEG2000", comment=long_comment + b" ")
card.save(out, "JPEG2000", comment=long_comment + b" ")
@pytest.mark.parametrize(
@ -457,15 +485,14 @@ def test_crashes(test_file: str) -> None:
@skip_unless_feature_version("jpg_2000", "2.4.0")
def test_plt_marker() -> None:
def test_plt_marker(card: ImageFile.ImageFile) -> None:
# Search the start of the codesteam for PLT
out = BytesIO()
test_card.save(out, "JPEG2000", no_jp2=True, plt=True)
card.save(out, "JPEG2000", no_jp2=True, plt=True)
out.seek(0)
while True:
marker = out.read(2)
if not marker:
pytest.fail("End of stream without PLT")
assert marker, "End of stream without PLT"
jp2_boxid = _binary.i16be(marker)
if jp2_boxid == 0xFF4F:

View File

@ -36,11 +36,7 @@ class LibTiffTestCase:
im.load()
im.getdata()
try:
assert im._compression == "group4"
except AttributeError:
print("No _compression")
print(dir(im))
assert im._compression == "group4"
# can we write it back out, in a different form.
out = str(tmp_path / "temp.png")
@ -313,7 +309,7 @@ class TestFileLibTiff(LibTiffTestCase):
}
def check_tags(
tiffinfo: TiffImagePlugin.ImageFileDirectory_v2 | dict[int, str]
tiffinfo: TiffImagePlugin.ImageFileDirectory_v2 | dict[int, str],
) -> None:
im = hopper()
@ -1098,6 +1094,27 @@ class TestFileLibTiff(LibTiffTestCase):
assert_image_similar(base_im, im, 0.7)
@pytest.mark.parametrize(
"test_file",
[
"Tests/images/old-style-jpeg-compression-no-samplesperpixel.tif",
"Tests/images/old-style-jpeg-compression.tif",
],
)
def test_buffering(self, test_file: str) -> None:
# load exif first
with open(test_file, "rb", buffering=1048576) as f:
with Image.open(f) as im:
exif = dict(im.getexif())
# load image before exif
with open(test_file, "rb", buffering=1048576) as f:
with Image.open(f) as im2:
im2.load()
exif_after_load = dict(im2.getexif())
assert exif == exif_after_load
@pytest.mark.valgrind_known_error(reason="Backtrace in Python Core")
def test_sampleformat_not_corrupted(self) -> None:
# Assert that a TIFF image with SampleFormat=UINT tag is not corrupted
@ -1127,7 +1144,7 @@ class TestFileLibTiff(LibTiffTestCase):
im.load()
# Assert that the error code is IMAGING_CODEC_MEMORY
assert str(e.value) == "-9"
assert str(e.value) == "decoder error -9"
@pytest.mark.parametrize("compression", ("tiff_adobe_deflate", "jpeg"))
def test_save_multistrip(self, compression: str, tmp_path: Path) -> None:
@ -1141,23 +1158,22 @@ class TestFileLibTiff(LibTiffTestCase):
assert len(im.tag_v2[STRIPOFFSETS]) > 1
@pytest.mark.parametrize("argument", (True, False))
def test_save_single_strip(self, argument: bool, tmp_path: Path) -> None:
def test_save_single_strip(
self, argument: bool, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
) -> None:
im = hopper("RGB").resize((256, 256))
out = str(tmp_path / "temp.tif")
if not argument:
TiffImagePlugin.STRIP_SIZE = 2**18
try:
arguments: dict[str, str | int] = {"compression": "tiff_adobe_deflate"}
if argument:
arguments["strip_size"] = 2**18
im.save(out, "TIFF", **arguments)
monkeypatch.setattr(TiffImagePlugin, "STRIP_SIZE", 2**18)
arguments: dict[str, str | int] = {"compression": "tiff_adobe_deflate"}
if argument:
arguments["strip_size"] = 2**18
im.save(out, "TIFF", **arguments)
with Image.open(out) as im:
assert isinstance(im, TiffImagePlugin.TiffImageFile)
assert len(im.tag_v2[STRIPOFFSETS]) == 1
finally:
TiffImagePlugin.STRIP_SIZE = 65536
with Image.open(out) as im:
assert isinstance(im, TiffImagePlugin.TiffImageFile)
assert len(im.tag_v2[STRIPOFFSETS]) == 1
@pytest.mark.parametrize("compression", ("tiff_adobe_deflate", None))
def test_save_zero(self, compression: str | None, tmp_path: Path) -> None:

View File

@ -29,25 +29,32 @@ def roundtrip(im: Image.Image, **options: Any) -> ImageFile.ImageFile:
@pytest.mark.parametrize("test_file", test_files)
def test_sanity(test_file: str) -> None:
with Image.open(test_file) as im:
def check(im: ImageFile.ImageFile) -> None:
im.load()
assert im.mode == "RGB"
assert im.size == (640, 480)
assert im.format == "MPO"
with Image.open(test_file) as im:
check(im)
with MpoImagePlugin.MpoImageFile(test_file) as im:
check(im)
@pytest.mark.skipif(is_pypy(), reason="Requires CPython")
def test_unclosed_file() -> None:
def open() -> None:
def open_test_image() -> None:
im = Image.open(test_files[0])
im.load()
with pytest.warns(ResourceWarning):
open()
open_test_image()
def test_closed_file() -> None:
with warnings.catch_warnings():
warnings.simplefilter("error")
im = Image.open(test_files[0])
im.load()
im.close()
@ -63,6 +70,8 @@ def test_seek_after_close() -> None:
def test_context_manager() -> None:
with warnings.catch_warnings():
warnings.simplefilter("error")
with Image.open(test_files[0]) as im:
im.load()
@ -73,8 +82,8 @@ def test_app(test_file: str) -> None:
with Image.open(test_file) as im:
assert im.applist[0][0] == "APP1"
assert im.applist[1][0] == "APP2"
assert (
im.applist[1][1][:16] == b"MPF\x00MM\x00*\x00\x00\x00\x08\x00\x03\xb0\x00"
assert im.applist[1][1].startswith(
b"MPF\x00MM\x00*\x00\x00\x00\x08\x00\x03\xb0\x00"
)
assert len(im.applist) == 2
@ -293,3 +302,15 @@ def test_save_all() -> None:
# Test that a single frame image will not be saved as an MPO
jpg = roundtrip(im, save_all=True)
assert "mp" not in jpg.info
def test_save_xmp() -> None:
im = Image.new("RGB", (1, 1))
im2 = Image.new("RGB", (1, 1), "#f00")
im2.encoderinfo = {"xmp": b"Second frame"}
im_reloaded = roundtrip(im, xmp=b"First frame", save_all=True, append_images=[im2])
assert im_reloaded.info["xmp"] == b"First frame"
im_reloaded.seek(1)
assert im_reloaded.info["xmp"] == b"Second frame"

View File

@ -264,7 +264,7 @@ def test_pdf_append(tmp_path: Path) -> None:
# append some info
pdf.info.Title = "abc"
pdf.info.Author = "def"
pdf.info.Subject = "ghi\uABCD"
pdf.info.Subject = "ghi\uabcd"
pdf.info.Keywords = "qw)e\\r(ty"
pdf.info.Creator = "hopper()"
pdf.start_writing()
@ -292,7 +292,7 @@ def test_pdf_append(tmp_path: Path) -> None:
assert pdf.info.Title == "abc"
assert pdf.info.Producer == "PdfParser"
assert pdf.info.Keywords == "qw)e\\r(ty"
assert pdf.info.Subject == "ghi\uABCD"
assert pdf.info.Subject == "ghi\uabcd"
assert b"CreationDate" in pdf.info
assert b"ModDate" in pdf.info
check_pdf_pages_consistency(pdf)

View File

@ -338,6 +338,8 @@ class TestFilePng:
with Image.open(TEST_PNG_FILE) as im:
# Assert that there is no unclosed file warning
with warnings.catch_warnings():
warnings.simplefilter("error")
im.verify()
with Image.open(TEST_PNG_FILE) as im:
@ -361,7 +363,7 @@ class TestFilePng:
with pytest.raises((OSError, SyntaxError)):
im.verify()
def test_verify_ignores_crc_error(self) -> None:
def test_verify_ignores_crc_error(self, monkeypatch: pytest.MonkeyPatch) -> None:
# check ignores crc errors in ancillary chunks
chunk_data = chunk(b"tEXt", b"spam")
@ -371,24 +373,20 @@ class TestFilePng:
with pytest.raises(SyntaxError):
PngImagePlugin.PngImageFile(BytesIO(image_data))
ImageFile.LOAD_TRUNCATED_IMAGES = True
try:
im = load(image_data)
assert im is not None
finally:
ImageFile.LOAD_TRUNCATED_IMAGES = False
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
im = load(image_data)
assert im is not None
def test_verify_not_ignores_crc_error_in_required_chunk(self) -> None:
def test_verify_not_ignores_crc_error_in_required_chunk(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
# check does not ignore crc errors in required chunks
image_data = MAGIC + IHDR[:-1] + b"q" + TAIL
ImageFile.LOAD_TRUNCATED_IMAGES = True
try:
with pytest.raises(SyntaxError):
PngImagePlugin.PngImageFile(BytesIO(image_data))
finally:
ImageFile.LOAD_TRUNCATED_IMAGES = False
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
with pytest.raises(SyntaxError):
PngImagePlugin.PngImageFile(BytesIO(image_data))
def test_roundtrip_dpi(self) -> None:
# Check dpi roundtripping
@ -598,7 +596,7 @@ class TestFilePng:
(b"prIV", b"VALUE3", True),
]
def test_textual_chunks_after_idat(self) -> None:
def test_textual_chunks_after_idat(self, monkeypatch: pytest.MonkeyPatch) -> None:
with Image.open("Tests/images/hopper.png") as im:
assert "comment" in im.text
for k, v in {
@ -612,18 +610,17 @@ class TestFilePng:
with pytest.raises(OSError):
assert isinstance(im.text, dict)
# Raises an EOFError in load_end
with Image.open("Tests/images/hopper_idat_after_image_end.png") as im:
assert im.text == {"TXT": "VALUE", "ZIP": "VALUE"}
# Raises a UnicodeDecodeError in load_end
with Image.open("Tests/images/truncated_image.png") as im:
# The file is truncated
with pytest.raises(OSError):
im.text()
ImageFile.LOAD_TRUNCATED_IMAGES = True
im.text
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
assert isinstance(im.text, dict)
ImageFile.LOAD_TRUNCATED_IMAGES = False
# Raises an EOFError in load_end
with Image.open("Tests/images/hopper_idat_after_image_end.png") as im:
assert im.text == {"TXT": "VALUE", "ZIP": "VALUE"}
def test_unknown_compression_method(self) -> None:
with pytest.raises(SyntaxError, match="Unknown compression method"):
@ -649,15 +646,16 @@ class TestFilePng:
@pytest.mark.parametrize(
"cid", (b"IHDR", b"sRGB", b"pHYs", b"acTL", b"fcTL", b"fdAT")
)
def test_truncated_chunks(self, cid: bytes) -> None:
def test_truncated_chunks(
self, cid: bytes, monkeypatch: pytest.MonkeyPatch
) -> None:
fp = BytesIO()
with PngImagePlugin.PngStream(fp) as png:
with pytest.raises(ValueError):
png.call(cid, 0, 0)
ImageFile.LOAD_TRUNCATED_IMAGES = True
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
png.call(cid, 0, 0)
ImageFile.LOAD_TRUNCATED_IMAGES = False
@pytest.mark.parametrize("save_all", (True, False))
def test_specify_bits(self, save_all: bool, tmp_path: Path) -> None:
@ -770,38 +768,31 @@ class TestFilePng:
im.seek(1)
@pytest.mark.parametrize("buffer", (True, False))
def test_save_stdout(self, buffer: bool) -> None:
old_stdout = sys.stdout
def test_save_stdout(self, buffer: bool, monkeypatch: pytest.MonkeyPatch) -> None:
class MyStdOut:
buffer = BytesIO()
mystdout: MyStdOut | BytesIO = MyStdOut() if buffer else BytesIO()
sys.stdout = mystdout
monkeypatch.setattr(sys, "stdout", mystdout)
with Image.open(TEST_PNG_FILE) as im:
im.save(sys.stdout, "PNG")
# Reset stdout
sys.stdout = old_stdout
if isinstance(mystdout, MyStdOut):
mystdout = mystdout.buffer
with Image.open(mystdout) as reloaded:
assert_image_equal_tofile(reloaded, TEST_PNG_FILE)
def test_truncated_end_chunk(self) -> None:
def test_truncated_end_chunk(self, monkeypatch: pytest.MonkeyPatch) -> None:
with Image.open("Tests/images/truncated_end_chunk.png") as im:
with pytest.raises(OSError):
im.load()
ImageFile.LOAD_TRUNCATED_IMAGES = True
try:
with Image.open("Tests/images/truncated_end_chunk.png") as im:
assert_image_equal_tofile(im, "Tests/images/hopper.png")
finally:
ImageFile.LOAD_TRUNCATED_IMAGES = False
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
with Image.open("Tests/images/truncated_end_chunk.png") as im:
assert_image_equal_tofile(im, "Tests/images/hopper.png")
@pytest.mark.skipif(is_win32(), reason="Requires Unix or macOS")
@ -810,11 +801,11 @@ class TestTruncatedPngPLeaks(PillowLeakTestCase):
mem_limit = 2 * 1024 # max increase in K
iterations = 100 # Leak is 56k/iteration, this will leak 5.6megs
def test_leak_load(self) -> None:
def test_leak_load(self, monkeypatch: pytest.MonkeyPatch) -> None:
with open("Tests/images/hopper.png", "rb") as f:
DATA = BytesIO(f.read(16 * 1024))
ImageFile.LOAD_TRUNCATED_IMAGES = True
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
with Image.open(DATA) as im:
im.load()
@ -822,7 +813,4 @@ class TestTruncatedPngPLeaks(PillowLeakTestCase):
with Image.open(DATA) as im:
im.load()
try:
self._test_leak(core)
finally:
ImageFile.LOAD_TRUNCATED_IMAGES = False
self._test_leak(core)

View File

@ -49,7 +49,7 @@ def test_sanity() -> None:
(b"P5 3 1 257 \x00\x00\x00\x80\x01\x01", "I", (0, 32640, 65535)),
# P6 with maxval < 255
(
b"P6 3 1 17 \x00\x01\x02\x08\x09\x0A\x0F\x10\x11",
b"P6 3 1 17 \x00\x01\x02\x08\x09\x0a\x0f\x10\x11",
"RGB",
(
(0, 15, 30),
@ -60,7 +60,7 @@ def test_sanity() -> None:
# P6 with maxval > 255
(
b"P6 3 1 257 \x00\x00\x00\x01\x00\x02"
b"\x00\x80\x00\x81\x00\x82\x01\x00\x01\x01\xFF\xFF",
b"\x00\x80\x00\x81\x00\x82\x01\x00\x01\x01\xff\xff",
"RGB",
(
(0, 1, 2),
@ -79,6 +79,7 @@ def test_arbitrary_maxval(
assert im.mode == mode
px = im.load()
assert px is not None
assert tuple(px[x, 0] for x in range(3)) == pixels
@ -367,22 +368,18 @@ def test_mimetypes(tmp_path: Path) -> None:
@pytest.mark.parametrize("buffer", (True, False))
def test_save_stdout(buffer: bool) -> None:
old_stdout = sys.stdout
def test_save_stdout(buffer: bool, monkeypatch: pytest.MonkeyPatch) -> None:
class MyStdOut:
buffer = BytesIO()
mystdout: MyStdOut | BytesIO = MyStdOut() if buffer else BytesIO()
sys.stdout = mystdout
monkeypatch.setattr(sys, "stdout", mystdout)
with Image.open(TEST_FILE) as im:
im.save(sys.stdout, "PPM")
# Reset stdout
sys.stdout = old_stdout
if isinstance(mystdout, MyStdOut):
mystdout = mystdout.buffer
with Image.open(mystdout) as reloaded:

View File

@ -25,16 +25,18 @@ def test_sanity() -> None:
@pytest.mark.skipif(is_pypy(), reason="Requires CPython")
def test_unclosed_file() -> None:
def open() -> None:
def open_test_image() -> None:
im = Image.open(test_file)
im.load()
with pytest.warns(ResourceWarning):
open()
open_test_image()
def test_closed_file() -> None:
with warnings.catch_warnings():
warnings.simplefilter("error")
im = Image.open(test_file)
im.load()
im.close()
@ -42,6 +44,8 @@ def test_closed_file() -> None:
def test_context_manager() -> None:
with warnings.catch_warnings():
warnings.simplefilter("error")
with Image.open(test_file) as im:
im.load()

View File

@ -7,7 +7,7 @@ from pathlib import Path
import pytest
from PIL import Image, ImageSequence, SpiderImagePlugin
from PIL import Image, SpiderImagePlugin
from .helper import assert_image_equal, hopper, is_pypy
@ -24,16 +24,18 @@ def test_sanity() -> None:
@pytest.mark.skipif(is_pypy(), reason="Requires CPython")
def test_unclosed_file() -> None:
def open() -> None:
def open_test_image() -> None:
im = Image.open(TEST_FILE)
im.load()
with pytest.warns(ResourceWarning):
open()
open_test_image()
def test_closed_file() -> None:
with warnings.catch_warnings():
warnings.simplefilter("error")
im = Image.open(TEST_FILE)
im.load()
im.close()
@ -41,6 +43,8 @@ def test_closed_file() -> None:
def test_context_manager() -> None:
with warnings.catch_warnings():
warnings.simplefilter("error")
with Image.open(TEST_FILE) as im:
im.load()
@ -149,8 +153,8 @@ def test_nonstack_file() -> None:
def test_nonstack_dos() -> None:
with Image.open(TEST_FILE) as im:
for i, frame in enumerate(ImageSequence.Iterator(im)):
assert i <= 1, "Non-stack DOS file test failed"
with pytest.raises(EOFError):
im.seek(0)
# for issue #4093

View File

@ -1,10 +1,11 @@
from __future__ import annotations
import io
import os
import pytest
from PIL import Image, SunImagePlugin
from PIL import Image, SunImagePlugin, _binary
from .helper import assert_image_equal_tofile, assert_image_similar, hopper
@ -33,6 +34,60 @@ def test_im1() -> None:
assert_image_equal_tofile(im, "Tests/images/sunraster.im1.png")
def _sun_header(
depth: int = 0, file_type: int = 0, palette_length: int = 0
) -> io.BytesIO:
return io.BytesIO(
_binary.o32be(0x59A66A95)
+ b"\x00" * 8
+ _binary.o32be(depth)
+ b"\x00" * 4
+ _binary.o32be(file_type)
+ b"\x00" * 4
+ _binary.o32be(palette_length)
)
def test_unsupported_mode_bit_depth() -> None:
with pytest.raises(SyntaxError, match="Unsupported Mode/Bit Depth"):
with SunImagePlugin.SunImageFile(_sun_header()):
pass
def test_unsupported_color_palette_length() -> None:
with pytest.raises(SyntaxError, match="Unsupported Color Palette Length"):
with SunImagePlugin.SunImageFile(_sun_header(depth=1, palette_length=1025)):
pass
def test_unsupported_palette_type() -> None:
with pytest.raises(SyntaxError, match="Unsupported Palette Type"):
with SunImagePlugin.SunImageFile(_sun_header(depth=1, palette_length=1)):
pass
def test_unsupported_file_type() -> None:
with pytest.raises(SyntaxError, match="Unsupported Sun Raster file type"):
with SunImagePlugin.SunImageFile(_sun_header(depth=1, file_type=6)):
pass
@pytest.mark.skipif(
not os.path.exists(EXTRA_DIR), reason="Extra image files not installed"
)
def test_rgbx() -> None:
with open(os.path.join(EXTRA_DIR, "32bpp.ras"), "rb") as fp:
data = fp.read()
# Set file type to 3
data = data[:20] + _binary.o32be(3) + data[24:]
with Image.open(io.BytesIO(data)) as im:
r, g, b = im.split()
im = Image.merge("RGB", (b, g, r))
assert_image_equal_tofile(im, os.path.join(EXTRA_DIR, "32bpp.png"))
@pytest.mark.skipif(
not os.path.exists(EXTRA_DIR), reason="Extra image files not installed"
)

View File

@ -1,6 +1,7 @@
from __future__ import annotations
import warnings
from pathlib import Path
import pytest
@ -29,6 +30,22 @@ def test_sanity(codec: str, test_path: str, format: str) -> None:
assert im.format == format
def test_unexpected_end(tmp_path: Path) -> None:
tmpfile = str(tmp_path / "temp.tar")
with open(tmpfile, "w"):
pass
with pytest.raises(OSError, match="unexpected end of tar file"):
with TarIO.TarIO(tmpfile, "test"):
pass
def test_cannot_find_subfile() -> None:
with pytest.raises(OSError, match="cannot find subfile"):
with TarIO.TarIO(TEST_TAR_FILE, "test"):
pass
@pytest.mark.skipif(is_pypy(), reason="Requires CPython")
def test_unclosed_file() -> None:
with pytest.warns(ResourceWarning):
@ -37,11 +54,15 @@ def test_unclosed_file() -> None:
def test_close() -> None:
with warnings.catch_warnings():
warnings.simplefilter("error")
tar = TarIO.TarIO(TEST_TAR_FILE, "hopper.jpg")
tar.close()
def test_contextmanager() -> None:
with warnings.catch_warnings():
warnings.simplefilter("error")
with TarIO.TarIO(TEST_TAR_FILE, "hopper.jpg"):
pass

View File

@ -72,6 +72,7 @@ def test_palette_depth_8(tmp_path: Path) -> None:
def test_palette_depth_16(tmp_path: Path) -> None:
with Image.open("Tests/images/p_16.tga") as im:
assert im.palette is not None
assert im.palette.mode == "RGBA"
assert_image_equal_tofile(im.convert("RGBA"), "Tests/images/p_16.png")
@ -213,10 +214,14 @@ def test_save_orientation(tmp_path: Path) -> None:
def test_horizontal_orientations() -> None:
# These images have been manually hexedited to have the relevant orientations
with Image.open("Tests/images/rgb32rle_top_right.tga") as im:
assert im.load()[90, 90][:3] == (0, 0, 0)
px = im.load()
assert px is not None
assert px[90, 90][:3] == (0, 0, 0)
with Image.open("Tests/images/rgb32rle_bottom_right.tga") as im:
assert im.load()[90, 90][:3] == (0, 255, 0)
px = im.load()
assert px is not None
assert px[90, 90][:3] == (0, 255, 0)
def test_save_rle(tmp_path: Path) -> None:

View File

@ -63,15 +63,17 @@ class TestFileTiff:
@pytest.mark.skipif(is_pypy(), reason="Requires CPython")
def test_unclosed_file(self) -> None:
def open() -> None:
def open_test_image() -> None:
im = Image.open("Tests/images/multipage.tiff")
im.load()
with pytest.warns(ResourceWarning):
open()
open_test_image()
def test_closed_file(self) -> None:
with warnings.catch_warnings():
warnings.simplefilter("error")
im = Image.open("Tests/images/multipage.tiff")
im.load()
im.close()
@ -88,6 +90,8 @@ class TestFileTiff:
def test_context_manager(self) -> None:
with warnings.catch_warnings():
warnings.simplefilter("error")
with Image.open("Tests/images/multipage.tiff") as im:
im.load()
@ -108,13 +112,22 @@ class TestFileTiff:
assert_image_equal_tofile(im, "Tests/images/hopper.tif")
with Image.open("Tests/images/hopper_bigtiff.tif") as im:
# The data type of this file's StripOffsets tag is LONG8,
# which is not yet supported for offset data when saving multiple frames.
del im.tag_v2[273]
outfile = str(tmp_path / "temp.tif")
im.save(outfile, save_all=True, append_images=[im], tiffinfo=im.tag_v2)
def test_bigtiff_save(self, tmp_path: Path) -> None:
outfile = str(tmp_path / "temp.tif")
im = hopper()
im.save(outfile, big_tiff=True)
with Image.open(outfile) as reloaded:
assert reloaded.tag_v2._bigtiff is True
im.save(outfile, save_all=True, append_images=[im], big_tiff=True)
with Image.open(outfile) as reloaded:
assert reloaded.tag_v2._bigtiff is True
def test_seek_too_large(self) -> None:
with pytest.raises(ValueError, match="Unable to seek to frame"):
Image.open("Tests/images/seek_too_large.tif")
@ -732,6 +745,53 @@ class TestFileTiff:
with Image.open(mp) as reread:
assert reread.n_frames == 3
def test_fixoffsets(self) -> None:
b = BytesIO(b"II\x2a\x00\x00\x00\x00\x00")
with TiffImagePlugin.AppendingTiffWriter(b) as a:
b.seek(0)
a.fixOffsets(1, isShort=True)
b.seek(0)
a.fixOffsets(1, isLong=True)
# Neither short nor long
b.seek(0)
with pytest.raises(RuntimeError):
a.fixOffsets(1)
b = BytesIO(b"II\x2a\x00\x00\x00\x00\x00")
with TiffImagePlugin.AppendingTiffWriter(b) as a:
a.offsetOfNewPage = 2**16
b.seek(0)
a.fixOffsets(1, isShort=True)
b = BytesIO(b"II\x2b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")
with TiffImagePlugin.AppendingTiffWriter(b) as a:
a.offsetOfNewPage = 2**32
b.seek(0)
a.fixOffsets(1, isShort=True)
b.seek(0)
a.fixOffsets(1, isLong=True)
def test_appending_tiff_writer_writelong(self) -> None:
data = b"II\x2a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b = BytesIO(data)
with TiffImagePlugin.AppendingTiffWriter(b) as a:
a.seek(-4, os.SEEK_CUR)
a.writeLong(2**32 - 1)
assert b.getvalue() == data[:-4] + b"\xff\xff\xff\xff"
def test_appending_tiff_writer_rewritelastshorttolong(self) -> None:
data = b"II\x2a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b = BytesIO(data)
with TiffImagePlugin.AppendingTiffWriter(b) as a:
a.seek(-2, os.SEEK_CUR)
a.rewriteLastShortToLong(2**32 - 1)
assert b.getvalue() == data[:-4] + b"\xff\xff\xff\xff"
def test_saving_icc_profile(self, tmp_path: Path) -> None:
# Tests saving TIFF with icc_profile set.
# At the time of writing this will only work for non-compressed tiffs
@ -881,11 +941,10 @@ class TestFileTiff:
@pytest.mark.timeout(6)
@pytest.mark.filterwarnings("ignore:Truncated File Read")
def test_timeout(self) -> None:
def test_timeout(self, monkeypatch: pytest.MonkeyPatch) -> None:
with Image.open("Tests/images/timeout-6646305047838720") as im:
ImageFile.LOAD_TRUNCATED_IMAGES = True
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
im.load()
ImageFile.LOAD_TRUNCATED_IMAGES = False
@pytest.mark.parametrize(
"test_file",

View File

@ -21,7 +21,11 @@ def test_open() -> None:
def test_load() -> None:
with WalImageFile.open(TEST_FILE) as im:
assert im.load()[0, 0] == 122
px = im.load()
assert px is not None
assert px[0, 0] == 122
# Test again now that it has already been loaded once
assert im.load()[0, 0] == 122
px = im.load()
assert px is not None
assert px[0, 0] == 122

Some files were not shown because too many files have changed in this diff Show More