mirror of
https://github.com/python-pillow/Pillow.git
synced 2025-02-23 07:10:33 +03:00
Merge branch 'main' into zlib-ng-with-sip
This commit is contained in:
commit
56a72a3da4
|
@ -1,99 +0,0 @@
|
|||
skip_commits:
|
||||
files:
|
||||
- ".github/**/*"
|
||||
- ".gitmodules"
|
||||
- "docs/**/*"
|
||||
- "wheels/**/*"
|
||||
|
||||
version: '{build}'
|
||||
clone_folder: c:\pillow
|
||||
init:
|
||||
- ECHO %PYTHON%
|
||||
#- ps: iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-rdp.ps1'))
|
||||
# Uncomment previous line to get RDP access during the build.
|
||||
|
||||
environment:
|
||||
COVERAGE_CORE: sysmon
|
||||
EXECUTABLE: python.exe
|
||||
TEST_OPTIONS:
|
||||
DEPLOY: YES
|
||||
matrix:
|
||||
- PYTHON: C:/Python313
|
||||
ARCHITECTURE: x86
|
||||
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2022
|
||||
- PYTHON: C:/Python39-x64
|
||||
ARCHITECTURE: AMD64
|
||||
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
|
||||
|
||||
|
||||
install:
|
||||
- '%PYTHON%\%EXECUTABLE% --version'
|
||||
- '%PYTHON%\%EXECUTABLE% -m pip install --upgrade pip'
|
||||
- curl -fsSL -o pillow-test-images.zip https://github.com/python-pillow/test-images/archive/main.zip
|
||||
- 7z x pillow-test-images.zip -oc:\
|
||||
- xcopy /S /Y c:\test-images-main\* c:\pillow\tests\images
|
||||
- curl -fsSL -o nasm-win64.zip https://raw.githubusercontent.com/python-pillow/pillow-depends/main/nasm-2.16.03-win64.zip
|
||||
- 7z x nasm-win64.zip -oc:\
|
||||
- choco install ghostscript --version=10.4.0
|
||||
- path c:\nasm-2.16.03;C:\Program Files\gs\gs10.04.0\bin;%PATH%
|
||||
- cd c:\pillow\winbuild\
|
||||
- ps: |
|
||||
c:\python39\python.exe c:\pillow\winbuild\build_prepare.py -v --depends=C:\pillow-depends\
|
||||
c:\pillow\winbuild\build\build_dep_all.cmd
|
||||
$host.SetShouldExit(0)
|
||||
- path C:\pillow\winbuild\build\bin;%PATH%
|
||||
|
||||
build_script:
|
||||
- cd c:\pillow
|
||||
- winbuild\build\build_env.cmd
|
||||
- '%PYTHON%\%EXECUTABLE% -m pip install -v -C raqm=vendor -C fribidi=vendor .'
|
||||
- '%PYTHON%\%EXECUTABLE% selftest.py --installed'
|
||||
|
||||
test_script:
|
||||
- cd c:\pillow
|
||||
- '%PYTHON%\%EXECUTABLE% -m pip install pytest pytest-cov pytest-timeout defusedxml ipython numpy olefile pyroma'
|
||||
- c:\"Program Files (x86)"\"Windows Kits"\10\Debuggers\x86\gflags.exe /p /enable %PYTHON%\%EXECUTABLE%
|
||||
- path %PYTHON%;%PATH%
|
||||
- .ci\test.cmd
|
||||
|
||||
after_test:
|
||||
- curl -Os https://uploader.codecov.io/latest/windows/codecov.exe
|
||||
- .\codecov.exe --file coverage.xml --name %PYTHON% --flags AppVeyor
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
|
||||
cache:
|
||||
- '%LOCALAPPDATA%\pip\Cache'
|
||||
|
||||
artifacts:
|
||||
- path: pillow\*.egg
|
||||
name: egg
|
||||
- path: pillow\*.whl
|
||||
name: wheel
|
||||
|
||||
before_deploy:
|
||||
- cd c:\pillow
|
||||
- '%PYTHON%\%EXECUTABLE% -m pip wheel -v -C raqm=vendor -C fribidi=vendor .'
|
||||
- ps: Get-ChildItem .\*.whl | % { Push-AppveyorArtifact $_.FullName -FileName $_.Name }
|
||||
|
||||
deploy:
|
||||
provider: S3
|
||||
region: us-west-2
|
||||
access_key_id: AKIAIRAXC62ZNTVQJMOQ
|
||||
secret_access_key:
|
||||
secure: Hwb6klTqtBeMgxAjRoDltiiqpuH8xbwD4UooDzBSiCWXjuFj1lyl4kHgHwTCCGqi
|
||||
bucket: pillow-nightly
|
||||
folder: win/$(APPVEYOR_BUILD_NUMBER)/
|
||||
artifact: /.*egg|wheel/
|
||||
on:
|
||||
APPVEYOR_REPO_NAME: python-pillow/Pillow
|
||||
branch: main
|
||||
deploy: YES
|
||||
|
||||
|
||||
# Uncomment the following lines to get RDP access after the build/test and block for
|
||||
# up to the timeout limit (~1hr)
|
||||
#
|
||||
#on_finish:
|
||||
#- ps: $blockRdp = $true; iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-rdp.ps1'))
|
|
@ -2,8 +2,4 @@
|
|||
|
||||
# gather the coverage data
|
||||
python3 -m pip install coverage
|
||||
if [[ $MATRIX_DOCKER ]]; then
|
||||
python3 -m coverage xml --ignore-errors
|
||||
else
|
||||
python3 -m coverage xml
|
||||
fi
|
||||
python3 -m coverage xml
|
||||
|
|
|
@ -3,8 +3,5 @@
|
|||
set -e
|
||||
|
||||
python3 -m coverage erase
|
||||
if [ $(uname) == "Darwin" ]; then
|
||||
export CPPFLAGS="-I/usr/local/miniconda/include";
|
||||
fi
|
||||
make clean
|
||||
make install-coverage
|
||||
|
|
4
.github/CONTRIBUTING.md
vendored
4
.github/CONTRIBUTING.md
vendored
|
@ -9,7 +9,7 @@ Please send a pull request to the `main` branch. Please include [documentation](
|
|||
- Fork the Pillow repository.
|
||||
- Create a branch from `main`.
|
||||
- Develop bug fixes, features, tests, etc.
|
||||
- Run the test suite. You can enable GitHub Actions (https://github.com/MY-USERNAME/Pillow/actions) and [AppVeyor](https://ci.appveyor.com/projects/new) on your repo to catch test failures prior to the pull request, and [Codecov](https://codecov.io/gh) to see if the changed code is covered by tests.
|
||||
- Run the test suite. You can enable GitHub Actions (https://github.com/MY-USERNAME/Pillow/actions) on your repo to catch test failures prior to the pull request, and [Codecov](https://codecov.io/gh) to see if the changed code is covered by tests.
|
||||
- Create a pull request to pull the changes from your branch to the Pillow `main`.
|
||||
|
||||
### Guidelines
|
||||
|
@ -17,7 +17,7 @@ Please send a pull request to the `main` branch. Please include [documentation](
|
|||
- Separate code commits from reformatting commits.
|
||||
- Provide tests for any newly added code.
|
||||
- Follow PEP 8.
|
||||
- When committing only documentation changes please include `[ci skip]` in the commit message to avoid running tests on AppVeyor.
|
||||
- When committing only documentation changes please include `[ci skip]` in the commit message to avoid running extra tests.
|
||||
- Include [release notes](https://github.com/python-pillow/Pillow/tree/main/docs/releasenotes) as needed or appropriate with your bug fixes, feature additions and tests.
|
||||
|
||||
## Reporting Issues
|
||||
|
|
1
.github/mergify.yml
vendored
1
.github/mergify.yml
vendored
|
@ -9,7 +9,6 @@ pull_request_rules:
|
|||
- status-success=Windows Test Successful
|
||||
- status-success=MinGW
|
||||
- status-success=Cygwin Test Successful
|
||||
- status-success=continuous-integration/appveyor/pr
|
||||
actions:
|
||||
merge:
|
||||
method: merge
|
||||
|
|
6
.github/workflows/macos-install.sh
vendored
6
.github/workflows/macos-install.sh
vendored
|
@ -10,15 +10,11 @@ brew install \
|
|||
ghostscript \
|
||||
jpeg-turbo \
|
||||
libimagequant \
|
||||
libraqm \
|
||||
libtiff \
|
||||
little-cms2 \
|
||||
openjpeg \
|
||||
webp
|
||||
if [[ "$ImageOS" == "macos13" ]]; then
|
||||
brew install --ignore-dependencies libraqm
|
||||
else
|
||||
brew install libraqm
|
||||
fi
|
||||
export PKG_CONFIG_PATH="/usr/local/opt/openblas/lib/pkgconfig"
|
||||
|
||||
python3 -m pip install coverage
|
||||
|
|
2
.github/workflows/test-cygwin.yml
vendored
2
.github/workflows/test-cygwin.yml
vendored
|
@ -52,7 +52,7 @@ jobs:
|
|||
persist-credentials: false
|
||||
|
||||
- name: Install Cygwin
|
||||
uses: cygwin/cygwin-install-action@v4
|
||||
uses: cygwin/cygwin-install-action@v5
|
||||
with:
|
||||
packages: >
|
||||
gcc-g++
|
||||
|
|
23
.github/workflows/test-docker.yml
vendored
23
.github/workflows/test-docker.yml
vendored
|
@ -29,16 +29,12 @@ concurrency:
|
|||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: ["ubuntu-latest"]
|
||||
docker: [
|
||||
# Run slower jobs first to give them a headstart and reduce waiting time
|
||||
ubuntu-22.04-jammy-arm64v8,
|
||||
ubuntu-24.04-noble-ppc64le,
|
||||
ubuntu-24.04-noble-s390x,
|
||||
# Then run the remainder
|
||||
alpine,
|
||||
amazon-2-amd64,
|
||||
amazon-2023-amd64,
|
||||
|
@ -55,12 +51,17 @@ jobs:
|
|||
]
|
||||
dockerTag: [main]
|
||||
include:
|
||||
- docker: "ubuntu-22.04-jammy-arm64v8"
|
||||
qemu-arch: "aarch64"
|
||||
- docker: "ubuntu-24.04-noble-ppc64le"
|
||||
os: "ubuntu-22.04"
|
||||
qemu-arch: "ppc64le"
|
||||
dockerTag: main
|
||||
- docker: "ubuntu-24.04-noble-s390x"
|
||||
os: "ubuntu-22.04"
|
||||
qemu-arch: "s390x"
|
||||
dockerTag: main
|
||||
- docker: "ubuntu-24.04-noble-arm64v8"
|
||||
os: "ubuntu-24.04-arm"
|
||||
dockerTag: main
|
||||
|
||||
name: ${{ matrix.docker }}
|
||||
|
||||
|
@ -90,15 +91,15 @@ jobs:
|
|||
|
||||
- name: After success
|
||||
run: |
|
||||
PATH="$PATH:~/.local/bin"
|
||||
docker start pillow_container
|
||||
sudo docker cp pillow_container:/Pillow /Pillow
|
||||
sudo chown -R runner /Pillow
|
||||
pil_path=`docker exec pillow_container /vpy3/bin/python -c 'import os, PIL;print(os.path.realpath(os.path.dirname(PIL.__file__)))'`
|
||||
docker stop pillow_container
|
||||
sudo mkdir -p $pil_path
|
||||
sudo cp src/PIL/*.py $pil_path
|
||||
cd /Pillow
|
||||
.ci/after_success.sh
|
||||
env:
|
||||
MATRIX_DOCKER: ${{ matrix.docker }}
|
||||
|
||||
- name: Upload coverage
|
||||
uses: codecov/codecov-action@v5
|
||||
|
|
7
.github/workflows/test-mingw.yml
vendored
7
.github/workflows/test-mingw.yml
vendored
|
@ -60,15 +60,14 @@ jobs:
|
|||
mingw-w64-x86_64-gcc \
|
||||
mingw-w64-x86_64-ghostscript \
|
||||
mingw-w64-x86_64-lcms2 \
|
||||
mingw-w64-x86_64-libimagequant \
|
||||
mingw-w64-x86_64-libjpeg-turbo \
|
||||
mingw-w64-x86_64-libraqm \
|
||||
mingw-w64-x86_64-libtiff \
|
||||
mingw-w64-x86_64-libwebp \
|
||||
mingw-w64-x86_64-openjpeg2 \
|
||||
mingw-w64-x86_64-python3-numpy \
|
||||
mingw-w64-x86_64-python3-olefile \
|
||||
mingw-w64-x86_64-python3-pip \
|
||||
mingw-w64-x86_64-python-numpy \
|
||||
mingw-w64-x86_64-python-olefile \
|
||||
mingw-w64-x86_64-python-pip \
|
||||
mingw-w64-x86_64-python-pytest \
|
||||
mingw-w64-x86_64-python-pytest-cov \
|
||||
mingw-w64-x86_64-python-pytest-timeout \
|
||||
|
|
14
.github/workflows/test-windows.yml
vendored
14
.github/workflows/test-windows.yml
vendored
|
@ -31,15 +31,20 @@ env:
|
|||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: windows-latest
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["pypy3.10", "3.9", "3.10", "3.11", "3.12", "3.13"]
|
||||
python-version: ["pypy3.10", "3.10", "3.11", "3.12", "3.13", "3.14"]
|
||||
architecture: ["x64"]
|
||||
os: ["windows-latest"]
|
||||
include:
|
||||
# Test the oldest Python on 32-bit
|
||||
- { python-version: "3.9", architecture: "x86", os: "windows-2019" }
|
||||
|
||||
timeout-minutes: 30
|
||||
|
||||
name: Python ${{ matrix.python-version }}
|
||||
name: Python ${{ matrix.python-version }} (${{ matrix.architecture }})
|
||||
|
||||
steps:
|
||||
- name: Checkout Pillow
|
||||
|
@ -67,6 +72,7 @@ jobs:
|
|||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
allow-prereleases: true
|
||||
architecture: ${{ matrix.architecture }}
|
||||
cache: pip
|
||||
cache-dependency-path: ".github/workflows/test-windows.yml"
|
||||
|
||||
|
@ -78,7 +84,7 @@ jobs:
|
|||
python3 -m pip install --upgrade pip
|
||||
|
||||
- name: Install CPython dependencies
|
||||
if: "!contains(matrix.python-version, 'pypy')"
|
||||
if: "!contains(matrix.python-version, 'pypy') && matrix.architecture != 'x86'"
|
||||
run: |
|
||||
python3 -m pip install PyQt6
|
||||
|
||||
|
|
1
.github/workflows/test.yml
vendored
1
.github/workflows/test.yml
vendored
|
@ -42,6 +42,7 @@ jobs:
|
|||
]
|
||||
python-version: [
|
||||
"pypy3.10",
|
||||
"3.14",
|
||||
"3.13t",
|
||||
"3.13",
|
||||
"3.12",
|
||||
|
|
6
.github/workflows/wheels-dependencies.sh
vendored
6
.github/workflows/wheels-dependencies.sh
vendored
|
@ -38,11 +38,11 @@ ARCHIVE_SDIR=pillow-depends-main
|
|||
|
||||
# Package versions for fresh source builds
|
||||
FREETYPE_VERSION=2.13.3
|
||||
HARFBUZZ_VERSION=10.1.0
|
||||
LIBPNG_VERSION=1.6.44
|
||||
HARFBUZZ_VERSION=10.2.0
|
||||
LIBPNG_VERSION=1.6.46
|
||||
JPEGTURBO_VERSION=3.1.0
|
||||
OPENJPEG_VERSION=2.5.3
|
||||
XZ_VERSION=5.6.3
|
||||
XZ_VERSION=5.6.4
|
||||
TIFF_VERSION=4.6.0
|
||||
LCMS2_VERSION=2.16
|
||||
ZLIB_NG_VERSION=2.2.3
|
||||
|
|
3
.github/workflows/wheels-test.ps1
vendored
3
.github/workflows/wheels-test.ps1
vendored
|
@ -11,6 +11,9 @@ if ("$venv" -like "*\cibw-run-*\pp*-win_amd64\*") {
|
|||
$env:path += ";$pillow\winbuild\build\bin\"
|
||||
& "$venv\Scripts\activate.ps1"
|
||||
& reg add "HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Image File Execution Options\python.exe" /v "GlobalFlag" /t REG_SZ /d "0x02000000" /f
|
||||
if ("$venv" -like "*\cibw-run-*-win_amd64\*") {
|
||||
& python -m pip install numpy
|
||||
}
|
||||
cd $pillow
|
||||
& python -VV
|
||||
if (!$?) { exit $LASTEXITCODE }
|
||||
|
|
77
.github/workflows/wheels.yml
vendored
77
.github/workflows/wheels.yml
vendored
|
@ -13,6 +13,7 @@ on:
|
|||
paths:
|
||||
- ".ci/requirements-cibw.txt"
|
||||
- ".github/workflows/wheel*"
|
||||
- "pyproject.toml"
|
||||
- "setup.py"
|
||||
- "wheels/*"
|
||||
- "winbuild/build_prepare.py"
|
||||
|
@ -23,6 +24,7 @@ on:
|
|||
paths:
|
||||
- ".ci/requirements-cibw.txt"
|
||||
- ".github/workflows/wheel*"
|
||||
- "pyproject.toml"
|
||||
- "setup.py"
|
||||
- "wheels/*"
|
||||
- "winbuild/build_prepare.py"
|
||||
|
@ -40,62 +42,7 @@ env:
|
|||
FORCE_COLOR: 1
|
||||
|
||||
jobs:
|
||||
build-1-QEMU-emulated-wheels:
|
||||
if: github.event_name != 'schedule'
|
||||
name: aarch64 ${{ matrix.python-version }} ${{ matrix.spec }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version:
|
||||
- pp310
|
||||
- cp3{9,10,11}
|
||||
- cp3{12,13}
|
||||
spec:
|
||||
- manylinux2014
|
||||
- manylinux_2_28
|
||||
- musllinux
|
||||
exclude:
|
||||
- { python-version: pp310, spec: musllinux }
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
submodules: true
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
# https://github.com/docker/setup-qemu-action
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Install cibuildwheel
|
||||
run: |
|
||||
python3 -m pip install -r .ci/requirements-cibw.txt
|
||||
|
||||
- name: Build wheels
|
||||
run: |
|
||||
python3 -m cibuildwheel --output-dir wheelhouse
|
||||
env:
|
||||
# Build only the currently selected Linux architecture (so we can
|
||||
# parallelise for speed).
|
||||
CIBW_ARCHS: "aarch64"
|
||||
# Likewise, select only one Python version per job to speed this up.
|
||||
CIBW_BUILD: "${{ matrix.python-version }}-${{ matrix.spec == 'musllinux' && 'musllinux' || 'manylinux' }}*"
|
||||
CIBW_ENABLE: cpython-prerelease
|
||||
# Extra options for manylinux.
|
||||
CIBW_MANYLINUX_AARCH64_IMAGE: ${{ matrix.spec }}
|
||||
CIBW_MANYLINUX_PYPY_AARCH64_IMAGE: ${{ matrix.spec }}
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: dist-qemu-${{ matrix.python-version }}-${{ matrix.spec }}
|
||||
path: ./wheelhouse/*.whl
|
||||
|
||||
build-2-native-wheels:
|
||||
build-native-wheels:
|
||||
if: github.event_name != 'schedule' || github.repository_owner == 'python-pillow'
|
||||
name: ${{ matrix.name }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
@ -130,6 +77,14 @@ jobs:
|
|||
cibw_arch: x86_64
|
||||
build: "*manylinux*"
|
||||
manylinux: "manylinux_2_28"
|
||||
- name: "manylinux2014 and musllinux aarch64"
|
||||
os: ubuntu-24.04-arm
|
||||
cibw_arch: aarch64
|
||||
- name: "manylinux_2_28 aarch64"
|
||||
os: ubuntu-24.04-arm
|
||||
cibw_arch: aarch64
|
||||
build: "*manylinux*"
|
||||
manylinux: "manylinux_2_28"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
|
@ -150,7 +105,9 @@ jobs:
|
|||
env:
|
||||
CIBW_ARCHS: ${{ matrix.cibw_arch }}
|
||||
CIBW_BUILD: ${{ matrix.build }}
|
||||
CIBW_ENABLE: cpython-prerelease cpython-freethreading
|
||||
CIBW_ENABLE: cpython-prerelease cpython-freethreading pypy
|
||||
CIBW_MANYLINUX_AARCH64_IMAGE: ${{ matrix.manylinux }}
|
||||
CIBW_MANYLINUX_PYPY_AARCH64_IMAGE: ${{ matrix.manylinux }}
|
||||
CIBW_MANYLINUX_PYPY_X86_64_IMAGE: ${{ matrix.manylinux }}
|
||||
CIBW_MANYLINUX_X86_64_IMAGE: ${{ matrix.manylinux }}
|
||||
CIBW_SKIP: pp39-*
|
||||
|
@ -227,7 +184,7 @@ jobs:
|
|||
CIBW_ARCHS: ${{ matrix.cibw_arch }}
|
||||
CIBW_BEFORE_ALL: "{package}\\winbuild\\build\\build_dep_all.cmd"
|
||||
CIBW_CACHE_PATH: "C:\\cibw"
|
||||
CIBW_ENABLE: cpython-prerelease cpython-freethreading
|
||||
CIBW_ENABLE: cpython-prerelease cpython-freethreading pypy
|
||||
CIBW_SKIP: pp39-*
|
||||
CIBW_TEST_SKIP: "*-win_arm64"
|
||||
CIBW_TEST_COMMAND: 'docker run --rm
|
||||
|
@ -273,7 +230,7 @@ jobs:
|
|||
|
||||
scientific-python-nightly-wheels-publish:
|
||||
if: github.repository_owner == 'python-pillow' && (github.event_name == 'schedule' || github.event_name == 'workflow_dispatch')
|
||||
needs: [build-2-native-wheels, windows]
|
||||
needs: [build-native-wheels, windows]
|
||||
runs-on: ubuntu-latest
|
||||
name: Upload wheels to scientific-python-nightly-wheels
|
||||
steps:
|
||||
|
@ -290,7 +247,7 @@ jobs:
|
|||
|
||||
pypi-publish:
|
||||
if: github.repository_owner == 'python-pillow' && github.event_name == 'push' && startsWith(github.ref, 'refs/tags')
|
||||
needs: [build-1-QEMU-emulated-wheels, build-2-native-wheels, windows, sdist]
|
||||
needs: [build-native-wheels, windows, sdist]
|
||||
runs-on: ubuntu-latest
|
||||
name: Upload release to PyPI
|
||||
environment:
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
repos:
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.8.4
|
||||
rev: v0.9.4
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: [--exit-non-zero-on-fix]
|
||||
|
||||
- repo: https://github.com/psf/black-pre-commit-mirror
|
||||
rev: 24.10.0
|
||||
rev: 25.1.0
|
||||
hooks:
|
||||
- id: black
|
||||
|
||||
- repo: https://github.com/PyCQA/bandit
|
||||
rev: 1.8.0
|
||||
rev: 1.8.2
|
||||
hooks:
|
||||
- id: bandit
|
||||
args: [--severity-level=high]
|
||||
|
@ -24,7 +24,7 @@ repos:
|
|||
exclude: (Makefile$|\.bat$|\.cmake$|\.eps$|\.fits$|\.gd$|\.opt$)
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-clang-format
|
||||
rev: v19.1.5
|
||||
rev: v19.1.7
|
||||
hooks:
|
||||
- id: clang-format
|
||||
types: [c]
|
||||
|
@ -50,14 +50,14 @@ repos:
|
|||
exclude: ^.github/.*TEMPLATE|^Tests/(fonts|images)/
|
||||
|
||||
- repo: https://github.com/python-jsonschema/check-jsonschema
|
||||
rev: 0.30.0
|
||||
rev: 0.31.1
|
||||
hooks:
|
||||
- id: check-github-workflows
|
||||
- id: check-readthedocs
|
||||
- id: check-renovate
|
||||
|
||||
- repo: https://github.com/woodruffw/zizmor-pre-commit
|
||||
rev: v0.10.0
|
||||
rev: v1.3.0
|
||||
hooks:
|
||||
- id: zizmor
|
||||
|
||||
|
@ -78,7 +78,7 @@ repos:
|
|||
additional_dependencies: [trove-classifiers>=2024.10.12]
|
||||
|
||||
- repo: https://github.com/tox-dev/tox-ini-fmt
|
||||
rev: 1.4.1
|
||||
rev: 1.5.0
|
||||
hooks:
|
||||
- id: tox-ini-fmt
|
||||
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
version: 2
|
||||
|
||||
sphinx:
|
||||
configuration: docs/conf.py
|
||||
|
||||
formats: [pdf]
|
||||
|
||||
build:
|
||||
|
|
|
@ -20,7 +20,6 @@ graft docs
|
|||
graft _custom_build
|
||||
|
||||
# build/src control detritus
|
||||
exclude .appveyor.yml
|
||||
exclude .clang-format
|
||||
exclude .coveragerc
|
||||
exclude .editorconfig
|
||||
|
|
|
@ -42,9 +42,6 @@ As of 2019, Pillow development is
|
|||
<a href="https://github.com/python-pillow/Pillow/actions/workflows/test-docker.yml"><img
|
||||
alt="GitHub Actions build status (Test Docker)"
|
||||
src="https://github.com/python-pillow/Pillow/workflows/Test%20Docker/badge.svg"></a>
|
||||
<a href="https://ci.appveyor.com/project/python-pillow/Pillow"><img
|
||||
alt="AppVeyor CI build status (Windows)"
|
||||
src="https://img.shields.io/appveyor/build/python-pillow/Pillow/main.svg?label=Windows%20build"></a>
|
||||
<a href="https://github.com/python-pillow/Pillow/actions/workflows/wheels.yml"><img
|
||||
alt="GitHub Actions build status (Wheels)"
|
||||
src="https://github.com/python-pillow/Pillow/workflows/Wheels/badge.svg"></a>
|
||||
|
|
|
@ -9,7 +9,7 @@ Released quarterly on January 2nd, April 1st, July 1st and October 15th.
|
|||
|
||||
* [ ] Open a release ticket e.g. https://github.com/python-pillow/Pillow/issues/3154
|
||||
* [ ] Develop and prepare release in `main` branch.
|
||||
* [ ] Check [GitHub Actions](https://github.com/python-pillow/Pillow/actions) and [AppVeyor](https://ci.appveyor.com/project/python-pillow/Pillow) to confirm passing tests in `main` branch.
|
||||
* [ ] Check [GitHub Actions](https://github.com/python-pillow/Pillow/actions) to confirm passing tests in `main` branch.
|
||||
* [ ] Check that all the wheel builds pass the tests in the [GitHub Actions "Wheels" workflow](https://github.com/python-pillow/Pillow/actions/workflows/wheels.yml) jobs by manually triggering them.
|
||||
* [ ] In compliance with [PEP 440](https://peps.python.org/pep-0440/), update version identifier in `src/PIL/_version.py`
|
||||
* [ ] Run pre-release check via `make release-test` in a freshly cloned repo.
|
||||
|
@ -38,7 +38,7 @@ Released as needed for security, installation or critical bug fixes.
|
|||
git checkout -t remotes/origin/5.2.x
|
||||
```
|
||||
* [ ] Cherry pick individual commits from `main` branch to release branch e.g. `5.2.x`, then `git push`.
|
||||
* [ ] Check [GitHub Actions](https://github.com/python-pillow/Pillow/actions) and [AppVeyor](https://ci.appveyor.com/project/python-pillow/Pillow) to confirm passing tests in release branch e.g. `5.2.x`.
|
||||
* [ ] Check [GitHub Actions](https://github.com/python-pillow/Pillow/actions) to confirm passing tests in release branch e.g. `5.2.x`.
|
||||
* [ ] In compliance with [PEP 440](https://peps.python.org/pep-0440/), update version identifier in `src/PIL/_version.py`
|
||||
* [ ] Run pre-release check via `make release-test`.
|
||||
* [ ] Create tag for release e.g.:
|
||||
|
|
|
@ -3,19 +3,18 @@ from __future__ import annotations
|
|||
import zlib
|
||||
from io import BytesIO
|
||||
|
||||
import pytest
|
||||
|
||||
from PIL import Image, ImageFile, PngImagePlugin
|
||||
|
||||
TEST_FILE = "Tests/images/png_decompression_dos.png"
|
||||
|
||||
|
||||
def test_ignore_dos_text() -> None:
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
||||
def test_ignore_dos_text(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
|
||||
|
||||
try:
|
||||
im = Image.open(TEST_FILE)
|
||||
with Image.open(TEST_FILE) as im:
|
||||
im.load()
|
||||
finally:
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = False
|
||||
|
||||
assert isinstance(im, PngImagePlugin.PngImageFile)
|
||||
for s in im.text.values():
|
||||
|
|
|
@ -140,18 +140,11 @@ def assert_image_similar_tofile(
|
|||
filename: str,
|
||||
epsilon: float,
|
||||
msg: str | None = None,
|
||||
mode: str | None = None,
|
||||
) -> None:
|
||||
with Image.open(filename) as img:
|
||||
if mode:
|
||||
img = img.convert(mode)
|
||||
assert_image_similar(a, img, epsilon, msg)
|
||||
|
||||
|
||||
def assert_all_same(items: Sequence[Any], msg: str | None = None) -> None:
|
||||
assert items.count(items[0]) == len(items), msg
|
||||
|
||||
|
||||
def assert_not_all_same(items: Sequence[Any], msg: str | None = None) -> None:
|
||||
assert items.count(items[0]) != len(items), msg
|
||||
|
||||
|
@ -327,16 +320,7 @@ def magick_command() -> list[str] | None:
|
|||
return None
|
||||
|
||||
|
||||
def on_appveyor() -> bool:
|
||||
return "APPVEYOR" in os.environ
|
||||
|
||||
|
||||
def on_github_actions() -> bool:
|
||||
return "GITHUB_ACTIONS" in os.environ
|
||||
|
||||
|
||||
def on_ci() -> bool:
|
||||
# GitHub Actions and AppVeyor have "CI"
|
||||
return "CI" in os.environ
|
||||
|
||||
|
||||
|
|
BIN
Tests/images/multiline_text_justify.png
Normal file
BIN
Tests/images/multiline_text_justify.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 3.2 KiB |
|
@ -7,7 +7,7 @@ import fuzzers
|
|||
import packaging
|
||||
import pytest
|
||||
|
||||
from PIL import Image, UnidentifiedImageError, features
|
||||
from PIL import Image, features
|
||||
from Tests.helper import skip_unless_feature
|
||||
|
||||
if sys.platform.startswith("win32"):
|
||||
|
@ -32,21 +32,17 @@ def test_fuzz_images(path: str) -> None:
|
|||
fuzzers.fuzz_image(f.read())
|
||||
assert True
|
||||
except (
|
||||
# Known exceptions from Pillow
|
||||
OSError,
|
||||
SyntaxError,
|
||||
MemoryError,
|
||||
ValueError,
|
||||
NotImplementedError,
|
||||
OverflowError,
|
||||
):
|
||||
# Known exceptions that are through from Pillow
|
||||
assert True
|
||||
except (
|
||||
# Known Image.* exceptions
|
||||
Image.DecompressionBombError,
|
||||
Image.DecompressionBombWarning,
|
||||
UnidentifiedImageError,
|
||||
):
|
||||
# Known Image.* exceptions
|
||||
assert True
|
||||
finally:
|
||||
fuzzers.disable_decompressionbomb_error()
|
||||
|
|
|
@ -19,7 +19,7 @@ except ImportError:
|
|||
class TestColorLut3DCoreAPI:
|
||||
def generate_identity_table(
|
||||
self, channels: int, size: int | tuple[int, int, int]
|
||||
) -> tuple[int, int, int, int, list[float]]:
|
||||
) -> tuple[int, tuple[int, int, int], list[float]]:
|
||||
if isinstance(size, tuple):
|
||||
size_1d, size_2d, size_3d = size
|
||||
else:
|
||||
|
@ -39,9 +39,7 @@ class TestColorLut3DCoreAPI:
|
|||
]
|
||||
return (
|
||||
channels,
|
||||
size_1d,
|
||||
size_2d,
|
||||
size_3d,
|
||||
(size_1d, size_2d, size_3d),
|
||||
[item for sublist in table for item in sublist],
|
||||
)
|
||||
|
||||
|
@ -89,21 +87,21 @@ class TestColorLut3DCoreAPI:
|
|||
|
||||
with pytest.raises(ValueError, match=r"size1D \* size2D \* size3D"):
|
||||
im.im.color_lut_3d(
|
||||
"RGB", Image.Resampling.BILINEAR, 3, 2, 2, 2, [0, 0, 0] * 7
|
||||
"RGB", Image.Resampling.BILINEAR, 3, (2, 2, 2), [0, 0, 0] * 7
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError, match=r"size1D \* size2D \* size3D"):
|
||||
im.im.color_lut_3d(
|
||||
"RGB", Image.Resampling.BILINEAR, 3, 2, 2, 2, [0, 0, 0] * 9
|
||||
"RGB", Image.Resampling.BILINEAR, 3, (2, 2, 2), [0, 0, 0] * 9
|
||||
)
|
||||
|
||||
with pytest.raises(TypeError):
|
||||
im.im.color_lut_3d(
|
||||
"RGB", Image.Resampling.BILINEAR, 3, 2, 2, 2, [0, 0, "0"] * 8
|
||||
"RGB", Image.Resampling.BILINEAR, 3, (2, 2, 2), [0, 0, "0"] * 8
|
||||
)
|
||||
|
||||
with pytest.raises(TypeError):
|
||||
im.im.color_lut_3d("RGB", Image.Resampling.BILINEAR, 3, 2, 2, 2, 16)
|
||||
im.im.color_lut_3d("RGB", Image.Resampling.BILINEAR, 3, (2, 2, 2), 16)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"lut_mode, table_channels, table_size",
|
||||
|
@ -264,7 +262,7 @@ class TestColorLut3DCoreAPI:
|
|||
assert_image_equal(
|
||||
Image.merge('RGB', im.split()[::-1]),
|
||||
im._new(im.im.color_lut_3d('RGB', Image.Resampling.BILINEAR,
|
||||
3, 2, 2, 2, [
|
||||
3, (2, 2, 2), [
|
||||
0, 0, 0, 0, 0, 1,
|
||||
0, 1, 0, 0, 1, 1,
|
||||
|
||||
|
@ -286,7 +284,7 @@ class TestColorLut3DCoreAPI:
|
|||
|
||||
# fmt: off
|
||||
transformed = im._new(im.im.color_lut_3d('RGB', Image.Resampling.BILINEAR,
|
||||
3, 2, 2, 2,
|
||||
3, (2, 2, 2),
|
||||
[
|
||||
-1, -1, -1, 2, -1, -1,
|
||||
-1, 2, -1, 2, 2, -1,
|
||||
|
@ -307,7 +305,7 @@ class TestColorLut3DCoreAPI:
|
|||
|
||||
# fmt: off
|
||||
transformed = im._new(im.im.color_lut_3d('RGB', Image.Resampling.BILINEAR,
|
||||
3, 2, 2, 2,
|
||||
3, (2, 2, 2),
|
||||
[
|
||||
-3, -3, -3, 5, -3, -3,
|
||||
-3, 5, -3, 5, 5, -3,
|
||||
|
|
|
@ -12,19 +12,16 @@ ORIGINAL_LIMIT = Image.MAX_IMAGE_PIXELS
|
|||
|
||||
|
||||
class TestDecompressionBomb:
|
||||
def teardown_method(self) -> None:
|
||||
Image.MAX_IMAGE_PIXELS = ORIGINAL_LIMIT
|
||||
|
||||
def test_no_warning_small_file(self) -> None:
|
||||
# Implicit assert: no warning.
|
||||
# A warning would cause a failure.
|
||||
with Image.open(TEST_FILE):
|
||||
pass
|
||||
|
||||
def test_no_warning_no_limit(self) -> None:
|
||||
def test_no_warning_no_limit(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
# Arrange
|
||||
# Turn limit off
|
||||
Image.MAX_IMAGE_PIXELS = None
|
||||
monkeypatch.setattr(Image, "MAX_IMAGE_PIXELS", None)
|
||||
assert Image.MAX_IMAGE_PIXELS is None
|
||||
|
||||
# Act / Assert
|
||||
|
@ -33,18 +30,18 @@ class TestDecompressionBomb:
|
|||
with Image.open(TEST_FILE):
|
||||
pass
|
||||
|
||||
def test_warning(self) -> None:
|
||||
def test_warning(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
# Set limit to trigger warning on the test file
|
||||
Image.MAX_IMAGE_PIXELS = 128 * 128 - 1
|
||||
monkeypatch.setattr(Image, "MAX_IMAGE_PIXELS", 128 * 128 - 1)
|
||||
assert Image.MAX_IMAGE_PIXELS == 128 * 128 - 1
|
||||
|
||||
with pytest.warns(Image.DecompressionBombWarning):
|
||||
with Image.open(TEST_FILE):
|
||||
pass
|
||||
|
||||
def test_exception(self) -> None:
|
||||
def test_exception(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
# Set limit to trigger exception on the test file
|
||||
Image.MAX_IMAGE_PIXELS = 64 * 128 - 1
|
||||
monkeypatch.setattr(Image, "MAX_IMAGE_PIXELS", 64 * 128 - 1)
|
||||
assert Image.MAX_IMAGE_PIXELS == 64 * 128 - 1
|
||||
|
||||
with pytest.raises(Image.DecompressionBombError):
|
||||
|
@ -66,9 +63,9 @@ class TestDecompressionBomb:
|
|||
with pytest.raises(Image.DecompressionBombError):
|
||||
im.seek(1)
|
||||
|
||||
def test_exception_gif_zero_width(self) -> None:
|
||||
def test_exception_gif_zero_width(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
# Set limit to trigger exception on the test file
|
||||
Image.MAX_IMAGE_PIXELS = 4 * 64 * 128
|
||||
monkeypatch.setattr(Image, "MAX_IMAGE_PIXELS", 4 * 64 * 128)
|
||||
assert Image.MAX_IMAGE_PIXELS == 4 * 64 * 128
|
||||
|
||||
with pytest.raises(Image.DecompressionBombError):
|
||||
|
|
|
@ -307,13 +307,8 @@ def test_apng_syntax_errors() -> None:
|
|||
im.load()
|
||||
|
||||
# we can handle this case gracefully
|
||||
exception = None
|
||||
with Image.open("Tests/images/apng/syntax_num_frames_low.png") as im:
|
||||
try:
|
||||
im.seek(im.n_frames - 1)
|
||||
except Exception as e:
|
||||
exception = e
|
||||
assert exception is None
|
||||
|
||||
with pytest.raises(OSError):
|
||||
with Image.open("Tests/images/apng/syntax_num_frames_high.png") as im:
|
||||
|
@ -405,13 +400,8 @@ def test_apng_save_split_fdat(tmp_path: Path) -> None:
|
|||
append_images=frames,
|
||||
)
|
||||
with Image.open(test_file) as im:
|
||||
exception = None
|
||||
try:
|
||||
im.seek(im.n_frames - 1)
|
||||
im.load()
|
||||
except Exception as e:
|
||||
exception = e
|
||||
assert exception is None
|
||||
|
||||
|
||||
def test_apng_save_duration_loop(tmp_path: Path) -> None:
|
||||
|
|
|
@ -35,9 +35,8 @@ def test_sanity() -> None:
|
|||
assert im.is_animated
|
||||
|
||||
|
||||
def test_prefix_chunk() -> None:
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
||||
try:
|
||||
def test_prefix_chunk(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
|
||||
with Image.open(animated_test_file_with_prefix_chunk) as im:
|
||||
assert im.mode == "P"
|
||||
assert im.size == (320, 200)
|
||||
|
@ -49,8 +48,6 @@ def test_prefix_chunk() -> None:
|
|||
assert palette[3:6] == [255, 255, 255]
|
||||
assert palette[381:384] == [204, 204, 12]
|
||||
assert palette[765:] == [252, 0, 0]
|
||||
finally:
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = False
|
||||
|
||||
|
||||
@pytest.mark.skipif(is_pypy(), reason="Requires CPython")
|
||||
|
|
|
@ -86,12 +86,12 @@ def test_invalid_file() -> None:
|
|||
def test_l_mode_transparency() -> None:
|
||||
with Image.open("Tests/images/no_palette_with_transparency.gif") as im:
|
||||
assert im.mode == "L"
|
||||
assert im.load()[0, 0] == 128
|
||||
assert im.getpixel((0, 0)) == 128
|
||||
assert im.info["transparency"] == 255
|
||||
|
||||
im.seek(1)
|
||||
assert im.mode == "L"
|
||||
assert im.load()[0, 0] == 128
|
||||
assert im.getpixel((0, 0)) == 128
|
||||
|
||||
|
||||
def test_l_mode_after_rgb() -> None:
|
||||
|
@ -109,7 +109,7 @@ def test_palette_not_needed_for_second_frame() -> None:
|
|||
assert_image_similar(im, hopper("L").convert("RGB"), 8)
|
||||
|
||||
|
||||
def test_strategy() -> None:
|
||||
def test_strategy(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
with Image.open("Tests/images/iss634.gif") as im:
|
||||
expected_rgb_always = im.convert("RGB")
|
||||
|
||||
|
@ -119,8 +119,9 @@ def test_strategy() -> None:
|
|||
im.seek(1)
|
||||
expected_different = im.convert("RGB")
|
||||
|
||||
try:
|
||||
GifImagePlugin.LOADING_STRATEGY = GifImagePlugin.LoadingStrategy.RGB_ALWAYS
|
||||
monkeypatch.setattr(
|
||||
GifImagePlugin, "LOADING_STRATEGY", GifImagePlugin.LoadingStrategy.RGB_ALWAYS
|
||||
)
|
||||
with Image.open("Tests/images/iss634.gif") as im:
|
||||
assert im.mode == "RGB"
|
||||
assert_image_equal(im, expected_rgb_always)
|
||||
|
@ -129,8 +130,10 @@ def test_strategy() -> None:
|
|||
assert im.mode == "RGBA"
|
||||
assert_image_equal(im, expected_rgb_always_rgba)
|
||||
|
||||
GifImagePlugin.LOADING_STRATEGY = (
|
||||
GifImagePlugin.LoadingStrategy.RGB_AFTER_DIFFERENT_PALETTE_ONLY
|
||||
monkeypatch.setattr(
|
||||
GifImagePlugin,
|
||||
"LOADING_STRATEGY",
|
||||
GifImagePlugin.LoadingStrategy.RGB_AFTER_DIFFERENT_PALETTE_ONLY,
|
||||
)
|
||||
# Stay in P mode with only a global palette
|
||||
with Image.open("Tests/images/chi.gif") as im:
|
||||
|
@ -146,8 +149,6 @@ def test_strategy() -> None:
|
|||
|
||||
im.seek(1)
|
||||
assert im.mode == "RGB"
|
||||
finally:
|
||||
GifImagePlugin.LOADING_STRATEGY = GifImagePlugin.LoadingStrategy.RGB_AFTER_FIRST
|
||||
|
||||
|
||||
def test_optimize() -> None:
|
||||
|
@ -310,7 +311,7 @@ def test_loading_multiple_palettes(path: str, mode: str) -> None:
|
|||
with Image.open(path) as im:
|
||||
assert im.mode == "P"
|
||||
first_frame_colors = im.palette.colors.keys()
|
||||
original_color = im.convert("RGB").load()[0, 0]
|
||||
original_color = im.convert("RGB").getpixel((0, 0))
|
||||
|
||||
im.seek(1)
|
||||
assert im.mode == mode
|
||||
|
@ -318,10 +319,10 @@ def test_loading_multiple_palettes(path: str, mode: str) -> None:
|
|||
im = im.convert("RGB")
|
||||
|
||||
# Check a color only from the old palette
|
||||
assert im.load()[0, 0] == original_color
|
||||
assert im.getpixel((0, 0)) == original_color
|
||||
|
||||
# Check a color from the new palette
|
||||
assert im.load()[24, 24] not in first_frame_colors
|
||||
assert im.getpixel((24, 24)) not in first_frame_colors
|
||||
|
||||
|
||||
def test_headers_saving_for_animated_gifs(tmp_path: Path) -> None:
|
||||
|
@ -487,8 +488,7 @@ def test_eoferror() -> None:
|
|||
|
||||
def test_first_frame_transparency() -> None:
|
||||
with Image.open("Tests/images/first_frame_transparency.gif") as im:
|
||||
px = im.load()
|
||||
assert px[0, 0] == im.info["transparency"]
|
||||
assert im.getpixel((0, 0)) == im.info["transparency"]
|
||||
|
||||
|
||||
def test_dispose_none() -> None:
|
||||
|
@ -555,17 +555,15 @@ def test_dispose_background_transparency() -> None:
|
|||
def test_transparent_dispose(
|
||||
loading_strategy: GifImagePlugin.LoadingStrategy,
|
||||
expected_colors: tuple[tuple[int | tuple[int, int, int, int], ...]],
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
GifImagePlugin.LOADING_STRATEGY = loading_strategy
|
||||
try:
|
||||
monkeypatch.setattr(GifImagePlugin, "LOADING_STRATEGY", loading_strategy)
|
||||
with Image.open("Tests/images/transparent_dispose.gif") as img:
|
||||
for frame in range(3):
|
||||
img.seek(frame)
|
||||
for x in range(3):
|
||||
color = img.getpixel((x, 0))
|
||||
assert color == expected_colors[frame][x]
|
||||
finally:
|
||||
GifImagePlugin.LOADING_STRATEGY = GifImagePlugin.LoadingStrategy.RGB_AFTER_FIRST
|
||||
|
||||
|
||||
def test_dispose_previous() -> None:
|
||||
|
@ -1398,10 +1396,11 @@ def test_lzw_bits() -> None:
|
|||
),
|
||||
)
|
||||
def test_extents(
|
||||
test_file: str, loading_strategy: GifImagePlugin.LoadingStrategy
|
||||
test_file: str,
|
||||
loading_strategy: GifImagePlugin.LoadingStrategy,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
GifImagePlugin.LOADING_STRATEGY = loading_strategy
|
||||
try:
|
||||
monkeypatch.setattr(GifImagePlugin, "LOADING_STRATEGY", loading_strategy)
|
||||
with Image.open("Tests/images/" + test_file) as im:
|
||||
assert im.size == (100, 100)
|
||||
|
||||
|
@ -1414,8 +1413,6 @@ def test_extents(
|
|||
|
||||
im.load()
|
||||
assert im.im.size == (150, 150)
|
||||
finally:
|
||||
GifImagePlugin.LOADING_STRATEGY = GifImagePlugin.LoadingStrategy.RGB_AFTER_FIRST
|
||||
|
||||
|
||||
def test_missing_background() -> None:
|
||||
|
|
|
@ -243,17 +243,15 @@ def test_draw_reloaded(tmp_path: Path) -> None:
|
|||
assert_image_equal_tofile(im, "Tests/images/hopper_draw.ico")
|
||||
|
||||
|
||||
def test_truncated_mask() -> None:
|
||||
def test_truncated_mask(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
# 1 bpp
|
||||
with open("Tests/images/hopper_mask.ico", "rb") as fp:
|
||||
data = fp.read()
|
||||
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
||||
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
|
||||
data = data[:-3]
|
||||
|
||||
try:
|
||||
with Image.open(io.BytesIO(data)) as im:
|
||||
with Image.open("Tests/images/hopper_mask.png") as expected:
|
||||
assert im.mode == "1"
|
||||
|
||||
# 32 bpp
|
||||
|
@ -265,5 +263,3 @@ def test_truncated_mask() -> None:
|
|||
|
||||
with Image.open(io.BytesIO(data)) as im:
|
||||
assert im.mode == "RGB"
|
||||
finally:
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = False
|
||||
|
|
|
@ -58,10 +58,7 @@ def test_getiptcinfo_fotostation() -> None:
|
|||
|
||||
# Assert
|
||||
assert iptc is not None
|
||||
for tag in iptc.keys():
|
||||
if tag[0] == 240:
|
||||
return
|
||||
pytest.fail("FotoStation tag not found")
|
||||
assert 240 in (tag[0] for tag in iptc.keys()), "FotoStation tag not found"
|
||||
|
||||
|
||||
def test_getiptcinfo_zero_padding() -> None:
|
||||
|
|
|
@ -181,7 +181,7 @@ class TestFileJpeg:
|
|||
assert test(100, 200) == (100, 200)
|
||||
assert test(0) is None # square pixels
|
||||
|
||||
def test_dpi_jfif_cm(self):
|
||||
def test_dpi_jfif_cm(self) -> None:
|
||||
with Image.open("Tests/images/jfif_unit_cm.jpg") as im:
|
||||
assert im.info["dpi"] == (2.54, 5.08)
|
||||
|
||||
|
@ -281,6 +281,9 @@ class TestFileJpeg:
|
|||
assert not im2.info.get("progressive")
|
||||
assert im3.info.get("progressive")
|
||||
|
||||
if features.check_feature("mozjpeg"):
|
||||
assert_image_similar(im1, im3, 9.39)
|
||||
else:
|
||||
assert_image_equal(im1, im3)
|
||||
assert im1_bytes >= im3_bytes
|
||||
|
||||
|
@ -423,6 +426,10 @@ class TestFileJpeg:
|
|||
|
||||
im2 = self.roundtrip(hopper(), progressive=1)
|
||||
im3 = self.roundtrip(hopper(), progression=1) # compatibility
|
||||
if features.check_feature("mozjpeg"):
|
||||
assert_image_similar(im1, im2, 9.39)
|
||||
assert_image_similar(im1, im3, 9.39)
|
||||
else:
|
||||
assert_image_equal(im1, im2)
|
||||
assert_image_equal(im1, im3)
|
||||
assert im2.info.get("progressive")
|
||||
|
@ -523,12 +530,13 @@ class TestFileJpeg:
|
|||
@mark_if_feature_version(
|
||||
pytest.mark.valgrind_known_error, "libjpeg_turbo", "2.0", reason="Known Failing"
|
||||
)
|
||||
def test_truncated_jpeg_should_read_all_the_data(self) -> None:
|
||||
def test_truncated_jpeg_should_read_all_the_data(
|
||||
self, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
filename = "Tests/images/truncated_jpeg.jpg"
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
||||
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
|
||||
with Image.open(filename) as im:
|
||||
im.load()
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = False
|
||||
assert im.getbbox() is not None
|
||||
|
||||
def test_truncated_jpeg_throws_oserror(self) -> None:
|
||||
|
@ -926,7 +934,7 @@ class TestFileJpeg:
|
|||
|
||||
def test_jpeg_magic_number(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
size = 4097
|
||||
buffer = BytesIO(b"\xFF" * size) # Many xFF bytes
|
||||
buffer = BytesIO(b"\xff" * size) # Many xff bytes
|
||||
max_pos = 0
|
||||
orig_read = buffer.read
|
||||
|
||||
|
@ -1017,7 +1025,7 @@ class TestFileJpeg:
|
|||
im.save(f, xmp=b"1" * 65505)
|
||||
|
||||
@pytest.mark.timeout(timeout=1)
|
||||
def test_eof(self) -> None:
|
||||
def test_eof(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
# Even though this decoder never says that it is finished
|
||||
# the image should still end when there is no new data
|
||||
class InfiniteMockPyDecoder(ImageFile.PyDecoder):
|
||||
|
@ -1030,11 +1038,10 @@ class TestFileJpeg:
|
|||
|
||||
with Image.open(TEST_FILE) as im:
|
||||
im.tile = [
|
||||
("INFINITE", (0, 0, 128, 128), 0, ("RGB", 0, 1)),
|
||||
ImageFile._Tile("INFINITE", (0, 0, 128, 128), 0, ("RGB", 0, 1)),
|
||||
]
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
||||
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
|
||||
im.load()
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = False
|
||||
|
||||
def test_separate_tables(self) -> None:
|
||||
im = hopper()
|
||||
|
|
|
@ -181,14 +181,11 @@ def test_load_dpi() -> None:
|
|||
assert "dpi" not in im.info
|
||||
|
||||
|
||||
def test_restricted_icc_profile() -> None:
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
||||
try:
|
||||
def test_restricted_icc_profile(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
|
||||
# JPEG2000 image with a restricted ICC profile and a known colorspace
|
||||
with Image.open("Tests/images/balloon_eciRGBv2_aware.jp2") as im:
|
||||
assert im.mode == "RGB"
|
||||
finally:
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = False
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
|
@ -492,8 +489,7 @@ def test_plt_marker(card: ImageFile.ImageFile) -> None:
|
|||
out.seek(0)
|
||||
while True:
|
||||
marker = out.read(2)
|
||||
if not marker:
|
||||
pytest.fail("End of stream without PLT")
|
||||
assert marker, "End of stream without PLT"
|
||||
|
||||
jp2_boxid = _binary.i16be(marker)
|
||||
if jp2_boxid == 0xFF4F:
|
||||
|
|
|
@ -36,11 +36,7 @@ class LibTiffTestCase:
|
|||
im.load()
|
||||
im.getdata()
|
||||
|
||||
try:
|
||||
assert im._compression == "group4"
|
||||
except AttributeError:
|
||||
print("No _compression")
|
||||
print(dir(im))
|
||||
|
||||
# can we write it back out, in a different form.
|
||||
out = str(tmp_path / "temp.png")
|
||||
|
@ -313,7 +309,7 @@ class TestFileLibTiff(LibTiffTestCase):
|
|||
}
|
||||
|
||||
def check_tags(
|
||||
tiffinfo: TiffImagePlugin.ImageFileDirectory_v2 | dict[int, str]
|
||||
tiffinfo: TiffImagePlugin.ImageFileDirectory_v2 | dict[int, str],
|
||||
) -> None:
|
||||
im = hopper()
|
||||
|
||||
|
@ -1107,11 +1103,13 @@ class TestFileLibTiff(LibTiffTestCase):
|
|||
)
|
||||
def test_buffering(self, test_file: str) -> None:
|
||||
# load exif first
|
||||
with Image.open(open(test_file, "rb", buffering=1048576)) as im:
|
||||
with open(test_file, "rb", buffering=1048576) as f:
|
||||
with Image.open(f) as im:
|
||||
exif = dict(im.getexif())
|
||||
|
||||
# load image before exif
|
||||
with Image.open(open(test_file, "rb", buffering=1048576)) as im2:
|
||||
with open(test_file, "rb", buffering=1048576) as f:
|
||||
with Image.open(f) as im2:
|
||||
im2.load()
|
||||
exif_after_load = dict(im2.getexif())
|
||||
|
||||
|
@ -1146,7 +1144,7 @@ class TestFileLibTiff(LibTiffTestCase):
|
|||
im.load()
|
||||
|
||||
# Assert that the error code is IMAGING_CODEC_MEMORY
|
||||
assert str(e.value) == "-9"
|
||||
assert str(e.value) == "decoder error -9"
|
||||
|
||||
@pytest.mark.parametrize("compression", ("tiff_adobe_deflate", "jpeg"))
|
||||
def test_save_multistrip(self, compression: str, tmp_path: Path) -> None:
|
||||
|
@ -1160,13 +1158,14 @@ class TestFileLibTiff(LibTiffTestCase):
|
|||
assert len(im.tag_v2[STRIPOFFSETS]) > 1
|
||||
|
||||
@pytest.mark.parametrize("argument", (True, False))
|
||||
def test_save_single_strip(self, argument: bool, tmp_path: Path) -> None:
|
||||
def test_save_single_strip(
|
||||
self, argument: bool, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
im = hopper("RGB").resize((256, 256))
|
||||
out = str(tmp_path / "temp.tif")
|
||||
|
||||
if not argument:
|
||||
TiffImagePlugin.STRIP_SIZE = 2**18
|
||||
try:
|
||||
monkeypatch.setattr(TiffImagePlugin, "STRIP_SIZE", 2**18)
|
||||
arguments: dict[str, str | int] = {"compression": "tiff_adobe_deflate"}
|
||||
if argument:
|
||||
arguments["strip_size"] = 2**18
|
||||
|
@ -1175,8 +1174,6 @@ class TestFileLibTiff(LibTiffTestCase):
|
|||
with Image.open(out) as im:
|
||||
assert isinstance(im, TiffImagePlugin.TiffImageFile)
|
||||
assert len(im.tag_v2[STRIPOFFSETS]) == 1
|
||||
finally:
|
||||
TiffImagePlugin.STRIP_SIZE = 65536
|
||||
|
||||
@pytest.mark.parametrize("compression", ("tiff_adobe_deflate", None))
|
||||
def test_save_zero(self, compression: str | None, tmp_path: Path) -> None:
|
||||
|
|
|
@ -264,7 +264,7 @@ def test_pdf_append(tmp_path: Path) -> None:
|
|||
# append some info
|
||||
pdf.info.Title = "abc"
|
||||
pdf.info.Author = "def"
|
||||
pdf.info.Subject = "ghi\uABCD"
|
||||
pdf.info.Subject = "ghi\uabcd"
|
||||
pdf.info.Keywords = "qw)e\\r(ty"
|
||||
pdf.info.Creator = "hopper()"
|
||||
pdf.start_writing()
|
||||
|
@ -292,7 +292,7 @@ def test_pdf_append(tmp_path: Path) -> None:
|
|||
assert pdf.info.Title == "abc"
|
||||
assert pdf.info.Producer == "PdfParser"
|
||||
assert pdf.info.Keywords == "qw)e\\r(ty"
|
||||
assert pdf.info.Subject == "ghi\uABCD"
|
||||
assert pdf.info.Subject == "ghi\uabcd"
|
||||
assert b"CreationDate" in pdf.info
|
||||
assert b"ModDate" in pdf.info
|
||||
check_pdf_pages_consistency(pdf)
|
||||
|
|
|
@ -363,7 +363,7 @@ class TestFilePng:
|
|||
with pytest.raises((OSError, SyntaxError)):
|
||||
im.verify()
|
||||
|
||||
def test_verify_ignores_crc_error(self) -> None:
|
||||
def test_verify_ignores_crc_error(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
# check ignores crc errors in ancillary chunks
|
||||
|
||||
chunk_data = chunk(b"tEXt", b"spam")
|
||||
|
@ -373,24 +373,20 @@ class TestFilePng:
|
|||
with pytest.raises(SyntaxError):
|
||||
PngImagePlugin.PngImageFile(BytesIO(image_data))
|
||||
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
||||
try:
|
||||
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
|
||||
im = load(image_data)
|
||||
assert im is not None
|
||||
finally:
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = False
|
||||
|
||||
def test_verify_not_ignores_crc_error_in_required_chunk(self) -> None:
|
||||
def test_verify_not_ignores_crc_error_in_required_chunk(
|
||||
self, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
# check does not ignore crc errors in required chunks
|
||||
|
||||
image_data = MAGIC + IHDR[:-1] + b"q" + TAIL
|
||||
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
||||
try:
|
||||
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
|
||||
with pytest.raises(SyntaxError):
|
||||
PngImagePlugin.PngImageFile(BytesIO(image_data))
|
||||
finally:
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = False
|
||||
|
||||
def test_roundtrip_dpi(self) -> None:
|
||||
# Check dpi roundtripping
|
||||
|
@ -600,7 +596,7 @@ class TestFilePng:
|
|||
(b"prIV", b"VALUE3", True),
|
||||
]
|
||||
|
||||
def test_textual_chunks_after_idat(self) -> None:
|
||||
def test_textual_chunks_after_idat(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
with Image.open("Tests/images/hopper.png") as im:
|
||||
assert "comment" in im.text
|
||||
for k, v in {
|
||||
|
@ -614,18 +610,17 @@ class TestFilePng:
|
|||
with pytest.raises(OSError):
|
||||
assert isinstance(im.text, dict)
|
||||
|
||||
# Raises an EOFError in load_end
|
||||
with Image.open("Tests/images/hopper_idat_after_image_end.png") as im:
|
||||
assert im.text == {"TXT": "VALUE", "ZIP": "VALUE"}
|
||||
|
||||
# Raises a UnicodeDecodeError in load_end
|
||||
with Image.open("Tests/images/truncated_image.png") as im:
|
||||
# The file is truncated
|
||||
with pytest.raises(OSError):
|
||||
im.text()
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
||||
im.text
|
||||
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
|
||||
assert isinstance(im.text, dict)
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = False
|
||||
|
||||
# Raises an EOFError in load_end
|
||||
with Image.open("Tests/images/hopper_idat_after_image_end.png") as im:
|
||||
assert im.text == {"TXT": "VALUE", "ZIP": "VALUE"}
|
||||
|
||||
def test_unknown_compression_method(self) -> None:
|
||||
with pytest.raises(SyntaxError, match="Unknown compression method"):
|
||||
|
@ -651,15 +646,16 @@ class TestFilePng:
|
|||
@pytest.mark.parametrize(
|
||||
"cid", (b"IHDR", b"sRGB", b"pHYs", b"acTL", b"fcTL", b"fdAT")
|
||||
)
|
||||
def test_truncated_chunks(self, cid: bytes) -> None:
|
||||
def test_truncated_chunks(
|
||||
self, cid: bytes, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
fp = BytesIO()
|
||||
with PngImagePlugin.PngStream(fp) as png:
|
||||
with pytest.raises(ValueError):
|
||||
png.call(cid, 0, 0)
|
||||
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
||||
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
|
||||
png.call(cid, 0, 0)
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = False
|
||||
|
||||
@pytest.mark.parametrize("save_all", (True, False))
|
||||
def test_specify_bits(self, save_all: bool, tmp_path: Path) -> None:
|
||||
|
@ -789,17 +785,14 @@ class TestFilePng:
|
|||
with Image.open(mystdout) as reloaded:
|
||||
assert_image_equal_tofile(reloaded, TEST_PNG_FILE)
|
||||
|
||||
def test_truncated_end_chunk(self) -> None:
|
||||
def test_truncated_end_chunk(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
with Image.open("Tests/images/truncated_end_chunk.png") as im:
|
||||
with pytest.raises(OSError):
|
||||
im.load()
|
||||
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
||||
try:
|
||||
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
|
||||
with Image.open("Tests/images/truncated_end_chunk.png") as im:
|
||||
assert_image_equal_tofile(im, "Tests/images/hopper.png")
|
||||
finally:
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = False
|
||||
|
||||
|
||||
@pytest.mark.skipif(is_win32(), reason="Requires Unix or macOS")
|
||||
|
@ -808,11 +801,11 @@ class TestTruncatedPngPLeaks(PillowLeakTestCase):
|
|||
mem_limit = 2 * 1024 # max increase in K
|
||||
iterations = 100 # Leak is 56k/iteration, this will leak 5.6megs
|
||||
|
||||
def test_leak_load(self) -> None:
|
||||
def test_leak_load(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
with open("Tests/images/hopper.png", "rb") as f:
|
||||
DATA = BytesIO(f.read(16 * 1024))
|
||||
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
||||
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
|
||||
with Image.open(DATA) as im:
|
||||
im.load()
|
||||
|
||||
|
@ -820,7 +813,4 @@ class TestTruncatedPngPLeaks(PillowLeakTestCase):
|
|||
with Image.open(DATA) as im:
|
||||
im.load()
|
||||
|
||||
try:
|
||||
self._test_leak(core)
|
||||
finally:
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = False
|
||||
|
|
|
@ -49,7 +49,7 @@ def test_sanity() -> None:
|
|||
(b"P5 3 1 257 \x00\x00\x00\x80\x01\x01", "I", (0, 32640, 65535)),
|
||||
# P6 with maxval < 255
|
||||
(
|
||||
b"P6 3 1 17 \x00\x01\x02\x08\x09\x0A\x0F\x10\x11",
|
||||
b"P6 3 1 17 \x00\x01\x02\x08\x09\x0a\x0f\x10\x11",
|
||||
"RGB",
|
||||
(
|
||||
(0, 15, 30),
|
||||
|
@ -60,7 +60,7 @@ def test_sanity() -> None:
|
|||
# P6 with maxval > 255
|
||||
(
|
||||
b"P6 3 1 257 \x00\x00\x00\x01\x00\x02"
|
||||
b"\x00\x80\x00\x81\x00\x82\x01\x00\x01\x01\xFF\xFF",
|
||||
b"\x00\x80\x00\x81\x00\x82\x01\x00\x01\x01\xff\xff",
|
||||
"RGB",
|
||||
(
|
||||
(0, 1, 2),
|
||||
|
|
|
@ -7,7 +7,7 @@ from pathlib import Path
|
|||
|
||||
import pytest
|
||||
|
||||
from PIL import Image, ImageSequence, SpiderImagePlugin
|
||||
from PIL import Image, SpiderImagePlugin
|
||||
|
||||
from .helper import assert_image_equal, hopper, is_pypy
|
||||
|
||||
|
@ -153,8 +153,8 @@ def test_nonstack_file() -> None:
|
|||
|
||||
def test_nonstack_dos() -> None:
|
||||
with Image.open(TEST_FILE) as im:
|
||||
for i, frame in enumerate(ImageSequence.Iterator(im)):
|
||||
assert i <= 1, "Non-stack DOS file test failed"
|
||||
with pytest.raises(EOFError):
|
||||
im.seek(0)
|
||||
|
||||
|
||||
# for issue #4093
|
||||
|
|
|
@ -117,10 +117,16 @@ class TestFileTiff:
|
|||
|
||||
def test_bigtiff_save(self, tmp_path: Path) -> None:
|
||||
outfile = str(tmp_path / "temp.tif")
|
||||
hopper().save(outfile, big_tiff=True)
|
||||
im = hopper()
|
||||
im.save(outfile, big_tiff=True)
|
||||
|
||||
with Image.open(outfile) as im:
|
||||
assert im.tag_v2._bigtiff is True
|
||||
with Image.open(outfile) as reloaded:
|
||||
assert reloaded.tag_v2._bigtiff is True
|
||||
|
||||
im.save(outfile, save_all=True, append_images=[im], big_tiff=True)
|
||||
|
||||
with Image.open(outfile) as reloaded:
|
||||
assert reloaded.tag_v2._bigtiff is True
|
||||
|
||||
def test_seek_too_large(self) -> None:
|
||||
with pytest.raises(ValueError, match="Unable to seek to frame"):
|
||||
|
@ -753,6 +759,39 @@ class TestFileTiff:
|
|||
with pytest.raises(RuntimeError):
|
||||
a.fixOffsets(1)
|
||||
|
||||
b = BytesIO(b"II\x2a\x00\x00\x00\x00\x00")
|
||||
with TiffImagePlugin.AppendingTiffWriter(b) as a:
|
||||
a.offsetOfNewPage = 2**16
|
||||
|
||||
b.seek(0)
|
||||
a.fixOffsets(1, isShort=True)
|
||||
|
||||
b = BytesIO(b"II\x2b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")
|
||||
with TiffImagePlugin.AppendingTiffWriter(b) as a:
|
||||
a.offsetOfNewPage = 2**32
|
||||
|
||||
b.seek(0)
|
||||
a.fixOffsets(1, isShort=True)
|
||||
|
||||
b.seek(0)
|
||||
a.fixOffsets(1, isLong=True)
|
||||
|
||||
def test_appending_tiff_writer_writelong(self) -> None:
|
||||
data = b"II\x2a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
b = BytesIO(data)
|
||||
with TiffImagePlugin.AppendingTiffWriter(b) as a:
|
||||
a.seek(-4, os.SEEK_CUR)
|
||||
a.writeLong(2**32 - 1)
|
||||
assert b.getvalue() == data[:-4] + b"\xff\xff\xff\xff"
|
||||
|
||||
def test_appending_tiff_writer_rewritelastshorttolong(self) -> None:
|
||||
data = b"II\x2a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
b = BytesIO(data)
|
||||
with TiffImagePlugin.AppendingTiffWriter(b) as a:
|
||||
a.seek(-2, os.SEEK_CUR)
|
||||
a.rewriteLastShortToLong(2**32 - 1)
|
||||
assert b.getvalue() == data[:-4] + b"\xff\xff\xff\xff"
|
||||
|
||||
def test_saving_icc_profile(self, tmp_path: Path) -> None:
|
||||
# Tests saving TIFF with icc_profile set.
|
||||
# At the time of writing this will only work for non-compressed tiffs
|
||||
|
@ -902,11 +941,10 @@ class TestFileTiff:
|
|||
|
||||
@pytest.mark.timeout(6)
|
||||
@pytest.mark.filterwarnings("ignore:Truncated File Read")
|
||||
def test_timeout(self) -> None:
|
||||
def test_timeout(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
with Image.open("Tests/images/timeout-6646305047838720") as im:
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
||||
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
|
||||
im.load()
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = False
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"test_file",
|
||||
|
|
|
@ -28,9 +28,9 @@ except ImportError:
|
|||
|
||||
|
||||
class TestUnsupportedWebp:
|
||||
def test_unsupported(self) -> None:
|
||||
def test_unsupported(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
if HAVE_WEBP:
|
||||
WebPImagePlugin.SUPPORTED = False
|
||||
monkeypatch.setattr(WebPImagePlugin, "SUPPORTED", False)
|
||||
|
||||
file_path = "Tests/images/hopper.webp"
|
||||
with pytest.warns(UserWarning):
|
||||
|
@ -38,9 +38,6 @@ class TestUnsupportedWebp:
|
|||
with Image.open(file_path):
|
||||
pass
|
||||
|
||||
if HAVE_WEBP:
|
||||
WebPImagePlugin.SUPPORTED = True
|
||||
|
||||
|
||||
@skip_unless_feature("webp")
|
||||
class TestFileWebp:
|
||||
|
|
|
@ -71,7 +71,7 @@ def test_load_float_dpi() -> None:
|
|||
|
||||
with open("Tests/images/drawing.emf", "rb") as fp:
|
||||
data = fp.read()
|
||||
b = BytesIO(data[:8] + b"\x06\xFA" + data[10:])
|
||||
b = BytesIO(data[:8] + b"\x06\xfa" + data[10:])
|
||||
with Image.open(b) as im:
|
||||
assert im.info["dpi"][0] == 2540
|
||||
|
||||
|
|
|
@ -189,8 +189,6 @@ class TestImage:
|
|||
if ext == ".jp2" and not features.check_codec("jpg_2000"):
|
||||
pytest.skip("jpg_2000 not available")
|
||||
temp_file = str(tmp_path / ("temp." + ext))
|
||||
if os.path.exists(temp_file):
|
||||
os.remove(temp_file)
|
||||
im.save(Path(temp_file))
|
||||
|
||||
def test_fp_name(self, tmp_path: Path) -> None:
|
||||
|
@ -580,9 +578,7 @@ class TestImage:
|
|||
def test_one_item_tuple(self) -> None:
|
||||
for mode in ("I", "F", "L"):
|
||||
im = Image.new(mode, (100, 100), (5,))
|
||||
px = im.load()
|
||||
assert px is not None
|
||||
assert px[0, 0] == 5
|
||||
assert im.getpixel((0, 0)) == 5
|
||||
|
||||
def test_linear_gradient_wrong_mode(self) -> None:
|
||||
# Arrange
|
||||
|
@ -667,7 +663,7 @@ class TestImage:
|
|||
# Test illegal image mode
|
||||
with hopper() as im:
|
||||
with pytest.raises(ValueError):
|
||||
im.remap_palette(None)
|
||||
im.remap_palette([])
|
||||
|
||||
def test_remap_palette_transparency(self) -> None:
|
||||
im = Image.new("P", (1, 2), (0, 0, 0))
|
||||
|
@ -770,7 +766,7 @@ class TestImage:
|
|||
assert dict(exif)
|
||||
|
||||
# Test that exif data is cleared after another load
|
||||
exif.load(None)
|
||||
exif.load(b"")
|
||||
assert not dict(exif)
|
||||
|
||||
# Test loading just the EXIF header
|
||||
|
@ -991,6 +987,11 @@ class TestImage:
|
|||
else:
|
||||
assert im.getxmp() == {"xmpmeta": None}
|
||||
|
||||
def test_get_child_images(self) -> None:
|
||||
im = Image.new("RGB", (1, 1))
|
||||
with pytest.warns(DeprecationWarning):
|
||||
assert im.get_child_images() == []
|
||||
|
||||
@pytest.mark.parametrize("size", ((1, 0), (0, 1), (0, 0)))
|
||||
def test_zero_tobytes(self, size: tuple[int, int]) -> None:
|
||||
im = Image.new("RGB", size)
|
||||
|
|
|
@ -271,13 +271,25 @@ class TestImagePutPixelError:
|
|||
|
||||
|
||||
class TestEmbeddable:
|
||||
@pytest.mark.xfail(reason="failing test")
|
||||
@pytest.mark.xfail(not (sys.version_info >= (3, 13)), reason="failing test")
|
||||
@pytest.mark.skipif(not is_win32(), reason="requires Windows")
|
||||
def test_embeddable(self) -> None:
|
||||
import ctypes
|
||||
|
||||
from setuptools.command import build_ext
|
||||
|
||||
compiler = getattr(build_ext, "new_compiler")()
|
||||
compiler.add_include_dir(sysconfig.get_config_var("INCLUDEPY"))
|
||||
|
||||
libdir = sysconfig.get_config_var("LIBDIR") or sysconfig.get_config_var(
|
||||
"INCLUDEPY"
|
||||
).replace("include", "libs")
|
||||
compiler.add_library_dir(libdir)
|
||||
try:
|
||||
compiler.initialize()
|
||||
except Exception:
|
||||
pytest.skip("Compiler could not be initialized")
|
||||
|
||||
with open("embed_pil.c", "w", encoding="utf-8") as fh:
|
||||
home = sys.prefix.replace("\\", "\\\\")
|
||||
fh.write(
|
||||
|
@ -305,13 +317,6 @@ int main(int argc, char* argv[])
|
|||
"""
|
||||
)
|
||||
|
||||
compiler = getattr(build_ext, "new_compiler")()
|
||||
compiler.add_include_dir(sysconfig.get_config_var("INCLUDEPY"))
|
||||
|
||||
libdir = sysconfig.get_config_var("LIBDIR") or sysconfig.get_config_var(
|
||||
"INCLUDEPY"
|
||||
).replace("include", "libs")
|
||||
compiler.add_library_dir(libdir)
|
||||
objects = compiler.compile(["embed_pil.c"])
|
||||
compiler.link_executable(objects, "embed_pil")
|
||||
|
||||
|
|
|
@ -222,9 +222,7 @@ def test_l_macro_rounding(convert_mode: str) -> None:
|
|||
im.palette.getcolor((0, 1, 2))
|
||||
|
||||
converted_im = im.convert(convert_mode)
|
||||
px = converted_im.load()
|
||||
assert px is not None
|
||||
converted_color = px[0, 0]
|
||||
converted_color = converted_im.getpixel((0, 0))
|
||||
if convert_mode == "LA":
|
||||
assert isinstance(converted_color, tuple)
|
||||
converted_color = converted_color[0]
|
||||
|
|
|
@ -148,10 +148,8 @@ def test_palette(method: Image.Quantize, color: tuple[int, ...]) -> None:
|
|||
im = Image.new("RGBA" if len(color) == 4 else "RGB", (1, 1), color)
|
||||
|
||||
converted = im.quantize(method=method)
|
||||
converted_px = converted.load()
|
||||
assert converted_px is not None
|
||||
assert converted.palette is not None
|
||||
assert converted_px[0, 0] == converted.palette.colors[color]
|
||||
assert converted.getpixel((0, 0)) == converted.palette.colors[color]
|
||||
|
||||
|
||||
def test_small_palette() -> None:
|
||||
|
|
|
@ -309,7 +309,7 @@ class TestImageResize:
|
|||
# Test unknown resampling filter
|
||||
with hopper() as im:
|
||||
with pytest.raises(ValueError):
|
||||
im.resize((10, 10), "unknown")
|
||||
im.resize((10, 10), -1)
|
||||
|
||||
@skip_unless_feature("libtiff")
|
||||
def test_transposed(self) -> None:
|
||||
|
|
|
@ -812,7 +812,7 @@ def test_rounded_rectangle(
|
|||
tuple[int, int, int, int]
|
||||
| tuple[list[int]]
|
||||
| tuple[tuple[int, int], tuple[int, int]]
|
||||
)
|
||||
),
|
||||
) -> None:
|
||||
# Arrange
|
||||
im = Image.new("RGB", (200, 200))
|
||||
|
@ -1396,6 +1396,28 @@ def test_stroke_descender() -> None:
|
|||
assert_image_similar_tofile(im, "Tests/images/imagedraw_stroke_descender.png", 6.76)
|
||||
|
||||
|
||||
@skip_unless_feature("freetype2")
|
||||
def test_stroke_inside_gap() -> None:
|
||||
# Arrange
|
||||
im = Image.new("RGB", (120, 130))
|
||||
draw = ImageDraw.Draw(im)
|
||||
font = ImageFont.truetype("Tests/fonts/FreeMono.ttf", 120)
|
||||
|
||||
# Act
|
||||
draw.text((12, 12), "i", "#f00", font, stroke_width=20)
|
||||
|
||||
# Assert
|
||||
for y in range(im.height):
|
||||
glyph = ""
|
||||
for x in range(im.width):
|
||||
if im.getpixel((x, y)) == (0, 0, 0):
|
||||
if glyph == "started":
|
||||
glyph = "ended"
|
||||
else:
|
||||
assert glyph != "ended", "Gap inside stroked glyph"
|
||||
glyph = "started"
|
||||
|
||||
|
||||
@skip_unless_feature("freetype2")
|
||||
def test_split_word() -> None:
|
||||
# Arrange
|
||||
|
|
|
@ -191,13 +191,10 @@ class TestImageFile:
|
|||
im.load()
|
||||
|
||||
@skip_unless_feature("zlib")
|
||||
def test_truncated_without_errors(self) -> None:
|
||||
def test_truncated_without_errors(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
with Image.open("Tests/images/truncated_image.png") as im:
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
||||
try:
|
||||
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
|
||||
im.load()
|
||||
finally:
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = False
|
||||
|
||||
@skip_unless_feature("zlib")
|
||||
def test_broken_datastream_with_errors(self) -> None:
|
||||
|
@ -206,13 +203,12 @@ class TestImageFile:
|
|||
im.load()
|
||||
|
||||
@skip_unless_feature("zlib")
|
||||
def test_broken_datastream_without_errors(self) -> None:
|
||||
def test_broken_datastream_without_errors(
|
||||
self, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
with Image.open("Tests/images/broken_data_stream.png") as im:
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
||||
try:
|
||||
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
|
||||
im.load()
|
||||
finally:
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = False
|
||||
|
||||
|
||||
class MockPyDecoder(ImageFile.PyDecoder):
|
||||
|
|
|
@ -254,7 +254,8 @@ def test_render_multiline_text(font: ImageFont.FreeTypeFont) -> None:
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"align, ext", (("left", ""), ("center", "_center"), ("right", "_right"))
|
||||
"align, ext",
|
||||
(("left", ""), ("center", "_center"), ("right", "_right"), ("justify", "_justify")),
|
||||
)
|
||||
def test_render_multiline_text_align(
|
||||
font: ImageFont.FreeTypeFont, align: str, ext: str
|
||||
|
@ -461,6 +462,20 @@ def test_free_type_font_get_mask(font: ImageFont.FreeTypeFont) -> None:
|
|||
assert mask.size == (108, 13)
|
||||
|
||||
|
||||
def test_stroke_mask() -> None:
|
||||
# Arrange
|
||||
text = "i"
|
||||
|
||||
# Act
|
||||
font = ImageFont.truetype(FONT_PATH, 128)
|
||||
mask = font.getmask(text, stroke_width=2)
|
||||
|
||||
# Assert
|
||||
assert mask.getpixel((34, 5)) == 255
|
||||
assert mask.getpixel((38, 5)) == 0
|
||||
assert mask.getpixel((42, 5)) == 255
|
||||
|
||||
|
||||
def test_load_when_image_not_found() -> None:
|
||||
with tempfile.NamedTemporaryFile(delete=False) as tmp:
|
||||
pass
|
||||
|
@ -543,7 +558,7 @@ def test_render_empty(font: ImageFont.FreeTypeFont) -> None:
|
|||
|
||||
def test_unicode_extended(layout_engine: ImageFont.Layout) -> None:
|
||||
# issue #3777
|
||||
text = "A\u278A\U0001F12B"
|
||||
text = "A\u278a\U0001f12b"
|
||||
target = "Tests/images/unicode_extended.png"
|
||||
|
||||
ttf = ImageFont.truetype(
|
||||
|
@ -1012,7 +1027,7 @@ def test_sbix(layout_engine: ImageFont.Layout) -> None:
|
|||
im = Image.new("RGB", (400, 400), "white")
|
||||
d = ImageDraw.Draw(im)
|
||||
|
||||
d.text((50, 50), "\uE901", font=font, embedded_color=True)
|
||||
d.text((50, 50), "\ue901", font=font, embedded_color=True)
|
||||
|
||||
assert_image_similar_tofile(im, "Tests/images/chromacheck-sbix.png", 1)
|
||||
except OSError as e: # pragma: no cover
|
||||
|
@ -1029,7 +1044,7 @@ def test_sbix_mask(layout_engine: ImageFont.Layout) -> None:
|
|||
im = Image.new("RGB", (400, 400), "white")
|
||||
d = ImageDraw.Draw(im)
|
||||
|
||||
d.text((50, 50), "\uE901", (100, 0, 0), font=font)
|
||||
d.text((50, 50), "\ue901", (100, 0, 0), font=font)
|
||||
|
||||
assert_image_similar_tofile(im, "Tests/images/chromacheck-sbix_mask.png", 1)
|
||||
except OSError as e: # pragma: no cover
|
||||
|
|
|
@ -229,7 +229,7 @@ def test_getlength(
|
|||
@pytest.mark.parametrize("direction", ("ltr", "ttb"))
|
||||
@pytest.mark.parametrize(
|
||||
"text",
|
||||
("i" + ("\u030C" * 15) + "i", "i" + "\u032C" * 15 + "i", "\u035Cii", "i\u0305i"),
|
||||
("i" + ("\u030c" * 15) + "i", "i" + "\u032c" * 15 + "i", "\u035cii", "i\u0305i"),
|
||||
ids=("caron-above", "caron-below", "double-breve", "overline"),
|
||||
)
|
||||
def test_getlength_combine(mode: str, direction: str, text: str) -> None:
|
||||
|
@ -272,27 +272,27 @@ def test_anchor_ttb(anchor: str) -> None:
|
|||
|
||||
combine_tests = (
|
||||
# extends above (e.g. issue #4553)
|
||||
("caron", "a\u030C\u030C\u030C\u030C\u030Cb", None, None, 0.08),
|
||||
("caron_la", "a\u030C\u030C\u030C\u030C\u030Cb", "la", None, 0.08),
|
||||
("caron_lt", "a\u030C\u030C\u030C\u030C\u030Cb", "lt", None, 0.08),
|
||||
("caron_ls", "a\u030C\u030C\u030C\u030C\u030Cb", "ls", None, 0.08),
|
||||
("caron_ttb", "ca" + ("\u030C" * 15) + "b", None, "ttb", 0.3),
|
||||
("caron_ttb_lt", "ca" + ("\u030C" * 15) + "b", "lt", "ttb", 0.3),
|
||||
("caron", "a\u030c\u030c\u030c\u030c\u030cb", None, None, 0.08),
|
||||
("caron_la", "a\u030c\u030c\u030c\u030c\u030cb", "la", None, 0.08),
|
||||
("caron_lt", "a\u030c\u030c\u030c\u030c\u030cb", "lt", None, 0.08),
|
||||
("caron_ls", "a\u030c\u030c\u030c\u030c\u030cb", "ls", None, 0.08),
|
||||
("caron_ttb", "ca" + ("\u030c" * 15) + "b", None, "ttb", 0.3),
|
||||
("caron_ttb_lt", "ca" + ("\u030c" * 15) + "b", "lt", "ttb", 0.3),
|
||||
# extends below
|
||||
("caron_below", "a\u032C\u032C\u032C\u032C\u032Cb", None, None, 0.02),
|
||||
("caron_below_ld", "a\u032C\u032C\u032C\u032C\u032Cb", "ld", None, 0.02),
|
||||
("caron_below_lb", "a\u032C\u032C\u032C\u032C\u032Cb", "lb", None, 0.02),
|
||||
("caron_below_ls", "a\u032C\u032C\u032C\u032C\u032Cb", "ls", None, 0.02),
|
||||
("caron_below_ttb", "a" + ("\u032C" * 15) + "b", None, "ttb", 0.03),
|
||||
("caron_below_ttb_lb", "a" + ("\u032C" * 15) + "b", "lb", "ttb", 0.03),
|
||||
("caron_below", "a\u032c\u032c\u032c\u032c\u032cb", None, None, 0.02),
|
||||
("caron_below_ld", "a\u032c\u032c\u032c\u032c\u032cb", "ld", None, 0.02),
|
||||
("caron_below_lb", "a\u032c\u032c\u032c\u032c\u032cb", "lb", None, 0.02),
|
||||
("caron_below_ls", "a\u032c\u032c\u032c\u032c\u032cb", "ls", None, 0.02),
|
||||
("caron_below_ttb", "a" + ("\u032c" * 15) + "b", None, "ttb", 0.03),
|
||||
("caron_below_ttb_lb", "a" + ("\u032c" * 15) + "b", "lb", "ttb", 0.03),
|
||||
# extends to the right (e.g. issue #3745)
|
||||
("double_breve_below", "a\u035Ci", None, None, 0.02),
|
||||
("double_breve_below_ma", "a\u035Ci", "ma", None, 0.02),
|
||||
("double_breve_below_ra", "a\u035Ci", "ra", None, 0.02),
|
||||
("double_breve_below_ttb", "a\u035Cb", None, "ttb", 0.02),
|
||||
("double_breve_below_ttb_rt", "a\u035Cb", "rt", "ttb", 0.02),
|
||||
("double_breve_below_ttb_mt", "a\u035Cb", "mt", "ttb", 0.02),
|
||||
("double_breve_below_ttb_st", "a\u035Cb", "st", "ttb", 0.02),
|
||||
("double_breve_below", "a\u035ci", None, None, 0.02),
|
||||
("double_breve_below_ma", "a\u035ci", "ma", None, 0.02),
|
||||
("double_breve_below_ra", "a\u035ci", "ra", None, 0.02),
|
||||
("double_breve_below_ttb", "a\u035cb", None, "ttb", 0.02),
|
||||
("double_breve_below_ttb_rt", "a\u035cb", "rt", "ttb", 0.02),
|
||||
("double_breve_below_ttb_mt", "a\u035cb", "mt", "ttb", 0.02),
|
||||
("double_breve_below_ttb_st", "a\u035cb", "st", "ttb", 0.02),
|
||||
# extends to the left (fail=0.064)
|
||||
("overline", "i\u0305", None, None, 0.02),
|
||||
("overline_la", "i\u0305", "la", None, 0.02),
|
||||
|
@ -346,7 +346,7 @@ def test_combine_multiline(anchor: str, align: str) -> None:
|
|||
|
||||
path = f"Tests/images/test_combine_multiline_{anchor}_{align}.png"
|
||||
f = ImageFont.truetype("Tests/fonts/NotoSans-Regular.ttf", 48)
|
||||
text = "i\u0305\u035C\ntext" # i with overline and double breve, and a word
|
||||
text = "i\u0305\u035c\ntext" # i with overline and double breve, and a word
|
||||
|
||||
im = Image.new("RGB", (400, 400), "white")
|
||||
d = ImageDraw.Draw(im)
|
||||
|
|
|
@ -165,14 +165,10 @@ def test_pad() -> None:
|
|||
def test_pad_round() -> None:
|
||||
im = Image.new("1", (1, 1), 1)
|
||||
new_im = ImageOps.pad(im, (4, 1))
|
||||
px = new_im.load()
|
||||
assert px is not None
|
||||
assert px[2, 0] == 1
|
||||
assert new_im.getpixel((2, 0)) == 1
|
||||
|
||||
new_im = ImageOps.pad(im, (1, 4))
|
||||
px = new_im.load()
|
||||
assert px is not None
|
||||
assert px[0, 2] == 1
|
||||
assert new_im.getpixel((0, 2)) == 1
|
||||
|
||||
|
||||
@pytest.mark.parametrize("mode", ("P", "PA"))
|
||||
|
|
|
@ -189,7 +189,7 @@ def test_2bit_palette(tmp_path: Path) -> None:
|
|||
|
||||
rgb = b"\x00" * 2 + b"\x01" * 2 + b"\x02" * 2
|
||||
img = Image.frombytes("P", (6, 1), rgb)
|
||||
img.putpalette(b"\xFF\x00\x00\x00\xFF\x00\x00\x00\xFF") # RGB
|
||||
img.putpalette(b"\xff\x00\x00\x00\xff\x00\x00\x00\xff") # RGB
|
||||
img.save(outfile, format="PNG")
|
||||
|
||||
assert_image_equal_tofile(img, outfile)
|
||||
|
|
|
@ -79,7 +79,7 @@ def test_path_constructors(
|
|||
),
|
||||
)
|
||||
def test_invalid_path_constructors(
|
||||
coords: tuple[str, str] | Sequence[Sequence[int]]
|
||||
coords: tuple[str, str] | Sequence[Sequence[int]],
|
||||
) -> None:
|
||||
# Act
|
||||
with pytest.raises(ValueError) as e:
|
||||
|
|
|
@ -7,36 +7,30 @@ import pytest
|
|||
from PIL import Image
|
||||
|
||||
|
||||
def test_overflow() -> None:
|
||||
def test_overflow(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
# There is the potential to overflow comparisons in map.c
|
||||
# if there are > SIZE_MAX bytes in the image or if
|
||||
# the file encodes an offset that makes
|
||||
# (offset + size(bytes)) > SIZE_MAX
|
||||
|
||||
# Note that this image triggers the decompression bomb warning:
|
||||
max_pixels = Image.MAX_IMAGE_PIXELS
|
||||
Image.MAX_IMAGE_PIXELS = None
|
||||
monkeypatch.setattr(Image, "MAX_IMAGE_PIXELS", None)
|
||||
|
||||
# This image hits the offset test.
|
||||
with Image.open("Tests/images/l2rgb_read.bmp") as im:
|
||||
with pytest.raises((ValueError, MemoryError, OSError)):
|
||||
im.load()
|
||||
|
||||
Image.MAX_IMAGE_PIXELS = max_pixels
|
||||
|
||||
|
||||
def test_tobytes() -> None:
|
||||
def test_tobytes(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
# Note that this image triggers the decompression bomb warning:
|
||||
max_pixels = Image.MAX_IMAGE_PIXELS
|
||||
Image.MAX_IMAGE_PIXELS = None
|
||||
monkeypatch.setattr(Image, "MAX_IMAGE_PIXELS", None)
|
||||
|
||||
# Previously raised an access violation on Windows
|
||||
with Image.open("Tests/images/l2rgb_read.bmp") as im:
|
||||
with pytest.raises((ValueError, MemoryError, OSError)):
|
||||
im.tobytes()
|
||||
|
||||
Image.MAX_IMAGE_PIXELS = max_pixels
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.maxsize <= 2**32, reason="Requires 64-bit system")
|
||||
def test_ysize() -> None:
|
||||
|
|
|
@ -141,9 +141,7 @@ def test_save_tiff_uint16() -> None:
|
|||
a.shape = TEST_IMAGE_SIZE
|
||||
img = Image.fromarray(a)
|
||||
|
||||
img_px = img.load()
|
||||
assert img_px is not None
|
||||
assert img_px[0, 0] == pixel_value
|
||||
assert img.getpixel((0, 0)) == pixel_value
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
|
|
@ -20,10 +20,10 @@ from PIL.PdfParser import (
|
|||
|
||||
|
||||
def test_text_encode_decode() -> None:
|
||||
assert encode_text("abc") == b"\xFE\xFF\x00a\x00b\x00c"
|
||||
assert decode_text(b"\xFE\xFF\x00a\x00b\x00c") == "abc"
|
||||
assert encode_text("abc") == b"\xfe\xff\x00a\x00b\x00c"
|
||||
assert decode_text(b"\xfe\xff\x00a\x00b\x00c") == "abc"
|
||||
assert decode_text(b"abc") == "abc"
|
||||
assert decode_text(b"\x1B a \x1C") == "\u02D9 a \u02DD"
|
||||
assert decode_text(b"\x1b a \x1c") == "\u02d9 a \u02dd"
|
||||
|
||||
|
||||
def test_indirect_refs() -> None:
|
||||
|
@ -45,8 +45,8 @@ def test_parsing() -> None:
|
|||
assert PdfParser.get_value(b"false%", 0) == (False, 5)
|
||||
assert PdfParser.get_value(b"null<", 0) == (None, 4)
|
||||
assert PdfParser.get_value(b"%cmt\n %cmt\n 123\n", 0) == (123, 15)
|
||||
assert PdfParser.get_value(b"<901FA3>", 0) == (b"\x90\x1F\xA3", 8)
|
||||
assert PdfParser.get_value(b"asd < 9 0 1 f A > qwe", 3) == (b"\x90\x1F\xA0", 17)
|
||||
assert PdfParser.get_value(b"<901FA3>", 0) == (b"\x90\x1f\xa3", 8)
|
||||
assert PdfParser.get_value(b"asd < 9 0 1 f A > qwe", 3) == (b"\x90\x1f\xa0", 17)
|
||||
assert PdfParser.get_value(b"(asd)", 0) == (b"asd", 5)
|
||||
assert PdfParser.get_value(b"(asd(qwe)zxc)zzz(aaa)", 0) == (b"asd(qwe)zxc", 13)
|
||||
assert PdfParser.get_value(b"(Two \\\nwords.)", 0) == (b"Two words.", 14)
|
||||
|
@ -56,9 +56,9 @@ def test_parsing() -> None:
|
|||
assert PdfParser.get_value(b"(One\\(paren).", 0) == (b"One(paren", 12)
|
||||
assert PdfParser.get_value(b"(One\\)paren).", 0) == (b"One)paren", 12)
|
||||
assert PdfParser.get_value(b"(\\0053)", 0) == (b"\x053", 7)
|
||||
assert PdfParser.get_value(b"(\\053)", 0) == (b"\x2B", 6)
|
||||
assert PdfParser.get_value(b"(\\53)", 0) == (b"\x2B", 5)
|
||||
assert PdfParser.get_value(b"(\\53a)", 0) == (b"\x2Ba", 6)
|
||||
assert PdfParser.get_value(b"(\\053)", 0) == (b"\x2b", 6)
|
||||
assert PdfParser.get_value(b"(\\53)", 0) == (b"\x2b", 5)
|
||||
assert PdfParser.get_value(b"(\\53a)", 0) == (b"\x2ba", 6)
|
||||
assert PdfParser.get_value(b"(\\1111)", 0) == (b"\x491", 7)
|
||||
assert PdfParser.get_value(b" 123 (", 0) == (123, 4)
|
||||
assert round(abs(PdfParser.get_value(b" 123.4 %", 0)[0] - 123.4), 7) == 0
|
||||
|
@ -118,7 +118,7 @@ def test_pdf_repr() -> None:
|
|||
assert pdf_repr(None) == b"null"
|
||||
assert pdf_repr(b"a)/b\\(c") == rb"(a\)/b\\\(c)"
|
||||
assert pdf_repr([123, True, {"a": PdfName(b"b")}]) == b"[ 123 true <<\n/a /b\n>> ]"
|
||||
assert pdf_repr(PdfBinary(b"\x90\x1F\xA0")) == b"<901FA0>"
|
||||
assert pdf_repr(PdfBinary(b"\x90\x1f\xa0")) == b"<901FA0>"
|
||||
|
||||
|
||||
def test_duplicate_xref_entry() -> None:
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# install libimagequant
|
||||
|
||||
archive_name=libimagequant
|
||||
archive_version=4.3.3
|
||||
archive_version=4.3.4
|
||||
|
||||
archive=$archive_name-$archive_version
|
||||
|
||||
|
|
|
@ -6,12 +6,11 @@ Goals
|
|||
|
||||
The fork author's goal is to foster and support active development of PIL through:
|
||||
|
||||
- Continuous integration testing via `GitHub Actions`_ and `AppVeyor`_
|
||||
- Continuous integration testing via `GitHub Actions`_
|
||||
- Publicized development activity on `GitHub`_
|
||||
- Regular releases to the `Python Package Index`_
|
||||
|
||||
.. _GitHub Actions: https://github.com/python-pillow/Pillow/actions
|
||||
.. _AppVeyor: https://ci.appveyor.com/project/Python-pillow/pillow
|
||||
.. _GitHub: https://github.com/python-pillow/Pillow
|
||||
.. _Python Package Index: https://pypi.org/project/pillow/
|
||||
|
||||
|
|
|
@ -183,6 +183,16 @@ ExifTags.IFD.Makernote
|
|||
``ExifTags.IFD.Makernote`` has been deprecated. Instead, use
|
||||
``ExifTags.IFD.MakerNote``.
|
||||
|
||||
Image.Image.get_child_images()
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. deprecated:: 11.2.0
|
||||
|
||||
``Image.Image.get_child_images()`` has been deprecated. and will be removed in Pillow
|
||||
13 (2026-10-15). It will be moved to ``ImageFile.ImageFile.get_child_images()``. The
|
||||
method uses an image's file pointer, and so child images could only be retrieved from
|
||||
an :py:class:`PIL.ImageFile.ImageFile` instance.
|
||||
|
||||
Removed features
|
||||
----------------
|
||||
|
||||
|
|
|
@ -33,10 +33,6 @@ Pillow for enterprise is available via the Tidelift Subscription. `Learn more <h
|
|||
:target: https://github.com/python-pillow/Pillow/actions/workflows/test-cygwin.yml
|
||||
:alt: GitHub Actions build status (Test Cygwin)
|
||||
|
||||
.. image:: https://img.shields.io/appveyor/build/python-pillow/Pillow/main.svg?label=Windows%20build
|
||||
:target: https://ci.appveyor.com/project/python-pillow/Pillow
|
||||
:alt: AppVeyor CI build status (Windows)
|
||||
|
||||
.. image:: https://github.com/python-pillow/Pillow/workflows/Wheels/badge.svg
|
||||
:target: https://github.com/python-pillow/Pillow/actions/workflows/wheels.yml
|
||||
:alt: GitHub Actions build status (Wheels)
|
||||
|
|
|
@ -64,7 +64,7 @@ Many of Pillow's features require external libraries:
|
|||
|
||||
* **libimagequant** provides improved color quantization
|
||||
|
||||
* Pillow has been tested with libimagequant **2.6-4.3.3**
|
||||
* Pillow has been tested with libimagequant **2.6-4.3.4**
|
||||
* Libimagequant is licensed GPLv3, which is more restrictive than
|
||||
the Pillow license, therefore we will not be distributing binaries
|
||||
with libimagequant support enabled.
|
||||
|
|
|
@ -44,18 +44,14 @@ These platforms are built and tested for every change.
|
|||
+----------------------------------+----------------------------+---------------------+
|
||||
| Ubuntu Linux 22.04 LTS (Jammy) | 3.9, 3.10, 3.11, | x86-64 |
|
||||
| | 3.12, 3.13, PyPy3 | |
|
||||
| +----------------------------+---------------------+
|
||||
| | 3.10 | arm64v8 |
|
||||
+----------------------------------+----------------------------+---------------------+
|
||||
| Ubuntu Linux 24.04 LTS (Noble) | 3.12 | x86-64, ppc64le, |
|
||||
| | | s390x |
|
||||
| Ubuntu Linux 24.04 LTS (Noble) | 3.12 | x86-64, arm64v8, |
|
||||
| | | ppc64le, s390x |
|
||||
+----------------------------------+----------------------------+---------------------+
|
||||
| Windows Server 2019 | 3.9 | x86-64 |
|
||||
| Windows Server 2019 | 3.9 | x86 |
|
||||
+----------------------------------+----------------------------+---------------------+
|
||||
| Windows Server 2022 | 3.9, 3.10, 3.11, | x86-64 |
|
||||
| | 3.12, 3.13, PyPy3 | |
|
||||
| +----------------------------+---------------------+
|
||||
| | 3.13 | x86 |
|
||||
| Windows Server 2022 | 3.10, 3.11, 3.12, 3.13, | x86-64 |
|
||||
| | PyPy3 | |
|
||||
| +----------------------------+---------------------+
|
||||
| | 3.12 (MinGW) | x86-64 |
|
||||
| +----------------------------+---------------------+
|
||||
|
|
|
@ -387,8 +387,11 @@ Methods
|
|||
the number of pixels between lines.
|
||||
:param align: If the text is passed on to
|
||||
:py:meth:`~PIL.ImageDraw.ImageDraw.multiline_text`,
|
||||
``"left"``, ``"center"`` or ``"right"``. Determines the relative alignment of lines.
|
||||
Use the ``anchor`` parameter to specify the alignment to ``xy``.
|
||||
``"left"``, ``"center"``, ``"right"`` or ``"justify"``. Determines
|
||||
the relative alignment of lines. Use the ``anchor`` parameter to
|
||||
specify the alignment to ``xy``.
|
||||
|
||||
.. versionadded:: 11.2.0 ``"justify"``
|
||||
:param direction: Direction of the text. It can be ``"rtl"`` (right to
|
||||
left), ``"ltr"`` (left to right) or ``"ttb"`` (top to bottom).
|
||||
Requires libraqm.
|
||||
|
@ -455,8 +458,11 @@ Methods
|
|||
of Pillow, but implemented only in version 8.0.0.
|
||||
|
||||
:param spacing: The number of pixels between lines.
|
||||
:param align: ``"left"``, ``"center"`` or ``"right"``. Determines the relative alignment of lines.
|
||||
Use the ``anchor`` parameter to specify the alignment to ``xy``.
|
||||
:param align: ``"left"``, ``"center"``, ``"right"`` or ``"justify"``. Determines
|
||||
the relative alignment of lines. Use the ``anchor`` parameter to
|
||||
specify the alignment to ``xy``.
|
||||
|
||||
.. versionadded:: 11.2.0 ``"justify"``
|
||||
:param direction: Direction of the text. It can be ``"rtl"`` (right to
|
||||
left), ``"ltr"`` (left to right) or ``"ttb"`` (top to bottom).
|
||||
Requires libraqm.
|
||||
|
@ -599,8 +605,11 @@ Methods
|
|||
the number of pixels between lines.
|
||||
:param align: If the text is passed on to
|
||||
:py:meth:`~PIL.ImageDraw.ImageDraw.multiline_textbbox`,
|
||||
``"left"``, ``"center"`` or ``"right"``. Determines the relative alignment of lines.
|
||||
Use the ``anchor`` parameter to specify the alignment to ``xy``.
|
||||
``"left"``, ``"center"``, ``"right"`` or ``"justify"``. Determines
|
||||
the relative alignment of lines. Use the ``anchor`` parameter to
|
||||
specify the alignment to ``xy``.
|
||||
|
||||
.. versionadded:: 11.2.0 ``"justify"``
|
||||
:param direction: Direction of the text. It can be ``"rtl"`` (right to
|
||||
left), ``"ltr"`` (left to right) or ``"ttb"`` (top to bottom).
|
||||
Requires libraqm.
|
||||
|
@ -650,8 +659,11 @@ Methods
|
|||
vertical text. See :ref:`text-anchors` for details.
|
||||
This parameter is ignored for non-TrueType fonts.
|
||||
:param spacing: The number of pixels between lines.
|
||||
:param align: ``"left"``, ``"center"`` or ``"right"``. Determines the relative alignment of lines.
|
||||
Use the ``anchor`` parameter to specify the alignment to ``xy``.
|
||||
:param align: ``"left"``, ``"center"``, ``"right"`` or ``"justify"``. Determines
|
||||
the relative alignment of lines. Use the ``anchor`` parameter to
|
||||
specify the alignment to ``xy``.
|
||||
|
||||
.. versionadded:: 11.2.0 ``"justify"``
|
||||
:param direction: Direction of the text. It can be ``"rtl"`` (right to
|
||||
left), ``"ltr"`` (left to right) or ``"ttb"`` (top to bottom).
|
||||
Requires libraqm.
|
||||
|
|
|
@ -54,6 +54,7 @@ Feature version numbers are available only where stated.
|
|||
Support for the following features can be checked:
|
||||
|
||||
* ``libjpeg_turbo``: (compile time) Whether Pillow was compiled against the libjpeg-turbo version of libjpeg. Compile-time version number is available.
|
||||
* ``mozjpeg``: (compile time) Whether Pillow was compiled against the MozJPEG version of libjpeg. Compile-time version number is available.
|
||||
* ``zlib_ng``: (compile time) Whether Pillow was compiled against the zlib-ng version of zlib. Compile-time version number is available.
|
||||
* ``raqm``: Raqm library, required for ``ImageFont.Layout.RAQM`` in :py:func:`PIL.ImageFont.truetype`. Run-time version number is available for Raqm 0.7.0 or newer.
|
||||
* ``libimagequant``: (compile time) ImageQuant quantization support in :py:func:`PIL.Image.Image.quantize`. Run-time version number is available.
|
||||
|
|
75
docs/releasenotes/11.2.0.rst
Normal file
75
docs/releasenotes/11.2.0.rst
Normal file
|
@ -0,0 +1,75 @@
|
|||
11.2.0
|
||||
------
|
||||
|
||||
Security
|
||||
========
|
||||
|
||||
TODO
|
||||
^^^^
|
||||
|
||||
TODO
|
||||
|
||||
:cve:`YYYY-XXXXX`: TODO
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
TODO
|
||||
|
||||
Backwards Incompatible Changes
|
||||
==============================
|
||||
|
||||
TODO
|
||||
^^^^
|
||||
|
||||
Deprecations
|
||||
============
|
||||
|
||||
Image.Image.get_child_images()
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. deprecated:: 11.2.0
|
||||
|
||||
``Image.Image.get_child_images()`` has been deprecated. and will be removed in Pillow
|
||||
13 (2026-10-15). It will be moved to ``ImageFile.ImageFile.get_child_images()``. The
|
||||
method uses an image's file pointer, and so child images could only be retrieved from
|
||||
an :py:class:`PIL.ImageFile.ImageFile` instance.
|
||||
|
||||
API Changes
|
||||
===========
|
||||
|
||||
TODO
|
||||
^^^^
|
||||
|
||||
TODO
|
||||
|
||||
API Additions
|
||||
=============
|
||||
|
||||
``"justify"`` multiline text alignment
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
In addition to ``"left"``, ``"center"`` and ``"right"``, multiline text can also be
|
||||
aligned using ``"justify"`` in :py:mod:`~PIL.ImageDraw`::
|
||||
|
||||
from PIL import Image, ImageDraw
|
||||
im = Image.new("RGB", (50, 25))
|
||||
draw = ImageDraw.Draw(im)
|
||||
draw.multiline_text((0, 0), "Multiline\ntext 1", align="justify")
|
||||
draw.multiline_textbbox((0, 0), "Multiline\ntext 2", align="justify")
|
||||
|
||||
Check for MozJPEG
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
You can check if Pillow has been built against the MozJPEG version of the
|
||||
libjpeg library, and what version of MozJPEG is being used::
|
||||
|
||||
from PIL import features
|
||||
features.check_feature("mozjpeg") # True or False
|
||||
features.version_feature("mozjpeg") # "4.1.1" for example, or None
|
||||
|
||||
Other Changes
|
||||
=============
|
||||
|
||||
TODO
|
||||
^^^^
|
||||
|
||||
TODO
|
|
@ -14,6 +14,7 @@ expected to be backported to earlier versions.
|
|||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
11.2.0
|
||||
11.1.0
|
||||
11.0.0
|
||||
10.4.0
|
||||
|
|
|
@ -374,14 +374,10 @@ class BLP1Decoder(_BLPBaseDecoder):
|
|||
image = JpegImageFile(BytesIO(data))
|
||||
Image._decompression_bomb_check(image.size)
|
||||
if image.mode == "CMYK":
|
||||
decoder_name, extents, offset, args = image.tile[0]
|
||||
args = image.tile[0].args
|
||||
assert isinstance(args, tuple)
|
||||
image.tile = [
|
||||
ImageFile._Tile(decoder_name, extents, offset, (args[0], "CMYK"))
|
||||
]
|
||||
r, g, b = image.convert("RGB").split()
|
||||
reversed_image = Image.merge("RGB", (b, g, r))
|
||||
self.set_as_raw(reversed_image.tobytes())
|
||||
image.tile = [image.tile[0]._replace(args=(args[0], "CMYK"))]
|
||||
self.set_as_raw(image.convert("RGB").tobytes(), "BGR")
|
||||
|
||||
|
||||
class BLP2Decoder(_BLPBaseDecoder):
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import IO
|
||||
|
||||
from . import Image, ImageFile
|
||||
|
@ -40,13 +41,11 @@ class BufrStubImageFile(ImageFile.StubImageFile):
|
|||
format_description = "BUFR"
|
||||
|
||||
def _open(self) -> None:
|
||||
offset = self.fp.tell()
|
||||
|
||||
if not _accept(self.fp.read(4)):
|
||||
msg = "Not a BUFR file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
self.fp.seek(offset)
|
||||
self.fp.seek(-4, os.SEEK_CUR)
|
||||
|
||||
# make something up
|
||||
self._mode = "F"
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import IO
|
||||
|
||||
from . import Image, ImageFile
|
||||
|
@ -40,13 +41,11 @@ class GribStubImageFile(ImageFile.StubImageFile):
|
|||
format_description = "GRIB"
|
||||
|
||||
def _open(self) -> None:
|
||||
offset = self.fp.tell()
|
||||
|
||||
if not _accept(self.fp.read(8)):
|
||||
msg = "Not a GRIB file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
self.fp.seek(offset)
|
||||
self.fp.seek(-8, os.SEEK_CUR)
|
||||
|
||||
# make something up
|
||||
self._mode = "F"
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import IO
|
||||
|
||||
from . import Image, ImageFile
|
||||
|
@ -40,13 +41,11 @@ class HDF5StubImageFile(ImageFile.StubImageFile):
|
|||
format_description = "HDF5"
|
||||
|
||||
def _open(self) -> None:
|
||||
offset = self.fp.tell()
|
||||
|
||||
if not _accept(self.fp.read(8)):
|
||||
msg = "Not an HDF file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
self.fp.seek(offset)
|
||||
self.fp.seek(-8, os.SEEK_CUR)
|
||||
|
||||
# make something up
|
||||
self._mode = "F"
|
||||
|
|
|
@ -145,7 +145,7 @@ class ImImageFile(ImageFile.ImageFile):
|
|||
if s == b"\r":
|
||||
continue
|
||||
|
||||
if not s or s == b"\0" or s == b"\x1A":
|
||||
if not s or s == b"\0" or s == b"\x1a":
|
||||
break
|
||||
|
||||
# FIXME: this may read whole file if not a text file
|
||||
|
@ -209,7 +209,7 @@ class ImImageFile(ImageFile.ImageFile):
|
|||
self._mode = self.info[MODE]
|
||||
|
||||
# Skip forward to start of image data
|
||||
while s and s[:1] != b"\x1A":
|
||||
while s and s[:1] != b"\x1a":
|
||||
s = self.fp.read(1)
|
||||
if not s:
|
||||
msg = "File truncated"
|
||||
|
|
|
@ -514,7 +514,7 @@ class ImagePointTransform:
|
|||
|
||||
|
||||
def _getscaleoffset(
|
||||
expr: Callable[[ImagePointTransform], ImagePointTransform | float]
|
||||
expr: Callable[[ImagePointTransform], ImagePointTransform | float],
|
||||
) -> tuple[float, float]:
|
||||
a = expr(ImagePointTransform(1, 0))
|
||||
return (a.scale, a.offset) if isinstance(a, ImagePointTransform) else (0, a)
|
||||
|
@ -603,24 +603,16 @@ class Image:
|
|||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def _close_fp(self):
|
||||
if getattr(self, "_fp", False):
|
||||
if self._fp != self.fp:
|
||||
self._fp.close()
|
||||
self._fp = DeferredError(ValueError("Operation on closed image"))
|
||||
if self.fp:
|
||||
self.fp.close()
|
||||
|
||||
def __exit__(self, *args):
|
||||
if hasattr(self, "fp"):
|
||||
from . import ImageFile
|
||||
|
||||
if isinstance(self, ImageFile.ImageFile):
|
||||
if getattr(self, "_exclusive_fp", False):
|
||||
self._close_fp()
|
||||
self.fp = None
|
||||
|
||||
def close(self) -> None:
|
||||
"""
|
||||
Closes the file pointer, if possible.
|
||||
|
||||
This operation will destroy the image core and release its memory.
|
||||
The image data will be unusable afterward.
|
||||
|
||||
|
@ -629,13 +621,6 @@ class Image:
|
|||
:py:meth:`~PIL.Image.Image.load` method. See :ref:`file-handling` for
|
||||
more information.
|
||||
"""
|
||||
if hasattr(self, "fp"):
|
||||
try:
|
||||
self._close_fp()
|
||||
self.fp = None
|
||||
except Exception as msg:
|
||||
logger.debug("Error closing: %s", msg)
|
||||
|
||||
if getattr(self, "map", None):
|
||||
self.map: mmap.mmap | None = None
|
||||
|
||||
|
@ -1554,50 +1539,10 @@ class Image:
|
|||
self.getexif()
|
||||
|
||||
def get_child_images(self) -> list[ImageFile.ImageFile]:
|
||||
child_images = []
|
||||
exif = self.getexif()
|
||||
ifds = []
|
||||
if ExifTags.Base.SubIFDs in exif:
|
||||
subifd_offsets = exif[ExifTags.Base.SubIFDs]
|
||||
if subifd_offsets:
|
||||
if not isinstance(subifd_offsets, tuple):
|
||||
subifd_offsets = (subifd_offsets,)
|
||||
for subifd_offset in subifd_offsets:
|
||||
ifds.append((exif._get_ifd_dict(subifd_offset), subifd_offset))
|
||||
ifd1 = exif.get_ifd(ExifTags.IFD.IFD1)
|
||||
if ifd1 and ifd1.get(ExifTags.Base.JpegIFOffset):
|
||||
assert exif._info is not None
|
||||
ifds.append((ifd1, exif._info.next))
|
||||
from . import ImageFile
|
||||
|
||||
offset = None
|
||||
for ifd, ifd_offset in ifds:
|
||||
current_offset = self.fp.tell()
|
||||
if offset is None:
|
||||
offset = current_offset
|
||||
|
||||
fp = self.fp
|
||||
if ifd is not None:
|
||||
thumbnail_offset = ifd.get(ExifTags.Base.JpegIFOffset)
|
||||
if thumbnail_offset is not None:
|
||||
thumbnail_offset += getattr(self, "_exif_offset", 0)
|
||||
self.fp.seek(thumbnail_offset)
|
||||
data = self.fp.read(ifd.get(ExifTags.Base.JpegIFByteCount))
|
||||
fp = io.BytesIO(data)
|
||||
|
||||
with open(fp) as im:
|
||||
from . import TiffImagePlugin
|
||||
|
||||
if thumbnail_offset is None and isinstance(
|
||||
im, TiffImagePlugin.TiffImageFile
|
||||
):
|
||||
im._frame_pos = [ifd_offset]
|
||||
im._seek(0)
|
||||
im.load()
|
||||
child_images.append(im)
|
||||
|
||||
if offset is not None:
|
||||
self.fp.seek(offset)
|
||||
return child_images
|
||||
deprecate("Image.Image.get_child_images", 13)
|
||||
return ImageFile.ImageFile.get_child_images(self) # type: ignore[arg-type]
|
||||
|
||||
def getim(self) -> CapsuleType:
|
||||
"""
|
||||
|
@ -3939,7 +3884,7 @@ class Exif(_ExifBase):
|
|||
return self._fixup_dict(dict(info))
|
||||
|
||||
def _get_head(self) -> bytes:
|
||||
version = b"\x2B" if self.bigtiff else b"\x2A"
|
||||
version = b"\x2b" if self.bigtiff else b"\x2a"
|
||||
if self.endian == "<":
|
||||
head = b"II" + version + b"\x00" + o32le(8)
|
||||
else:
|
||||
|
|
|
@ -557,21 +557,6 @@ class ImageDraw:
|
|||
|
||||
return split_character in text
|
||||
|
||||
def _multiline_split(self, text: AnyStr) -> list[AnyStr]:
|
||||
return text.split("\n" if isinstance(text, str) else b"\n")
|
||||
|
||||
def _multiline_spacing(
|
||||
self,
|
||||
font: ImageFont.ImageFont | ImageFont.FreeTypeFont | ImageFont.TransposedFont,
|
||||
spacing: float,
|
||||
stroke_width: float,
|
||||
) -> float:
|
||||
return (
|
||||
self.textbbox((0, 0), "A", font, stroke_width=stroke_width)[3]
|
||||
+ stroke_width
|
||||
+ spacing
|
||||
)
|
||||
|
||||
def text(
|
||||
self,
|
||||
xy: tuple[float, float],
|
||||
|
@ -643,6 +628,7 @@ class ImageDraw:
|
|||
features=features,
|
||||
language=language,
|
||||
stroke_width=stroke_width,
|
||||
stroke_filled=True,
|
||||
anchor=anchor,
|
||||
ink=ink,
|
||||
start=start,
|
||||
|
@ -692,11 +678,125 @@ class ImageDraw:
|
|||
draw_text(stroke_ink, stroke_width)
|
||||
|
||||
# Draw normal text
|
||||
draw_text(ink, 0)
|
||||
if ink != stroke_ink:
|
||||
draw_text(ink)
|
||||
else:
|
||||
# Only draw normal text
|
||||
draw_text(ink)
|
||||
|
||||
def _prepare_multiline_text(
|
||||
self,
|
||||
xy: tuple[float, float],
|
||||
text: AnyStr,
|
||||
font: (
|
||||
ImageFont.ImageFont
|
||||
| ImageFont.FreeTypeFont
|
||||
| ImageFont.TransposedFont
|
||||
| None
|
||||
),
|
||||
anchor: str | None,
|
||||
spacing: float,
|
||||
align: str,
|
||||
direction: str | None,
|
||||
features: list[str] | None,
|
||||
language: str | None,
|
||||
stroke_width: float,
|
||||
embedded_color: bool,
|
||||
font_size: float | None,
|
||||
) -> tuple[
|
||||
ImageFont.ImageFont | ImageFont.FreeTypeFont | ImageFont.TransposedFont,
|
||||
str,
|
||||
list[tuple[tuple[float, float], AnyStr]],
|
||||
]:
|
||||
if direction == "ttb":
|
||||
msg = "ttb direction is unsupported for multiline text"
|
||||
raise ValueError(msg)
|
||||
|
||||
if anchor is None:
|
||||
anchor = "la"
|
||||
elif len(anchor) != 2:
|
||||
msg = "anchor must be a 2 character string"
|
||||
raise ValueError(msg)
|
||||
elif anchor[1] in "tb":
|
||||
msg = "anchor not supported for multiline text"
|
||||
raise ValueError(msg)
|
||||
|
||||
if font is None:
|
||||
font = self._getfont(font_size)
|
||||
|
||||
widths = []
|
||||
max_width: float = 0
|
||||
lines = text.split("\n" if isinstance(text, str) else b"\n")
|
||||
line_spacing = (
|
||||
self.textbbox((0, 0), "A", font, stroke_width=stroke_width)[3]
|
||||
+ stroke_width
|
||||
+ spacing
|
||||
)
|
||||
|
||||
for line in lines:
|
||||
line_width = self.textlength(
|
||||
line,
|
||||
font,
|
||||
direction=direction,
|
||||
features=features,
|
||||
language=language,
|
||||
embedded_color=embedded_color,
|
||||
)
|
||||
widths.append(line_width)
|
||||
max_width = max(max_width, line_width)
|
||||
|
||||
top = xy[1]
|
||||
if anchor[1] == "m":
|
||||
top -= (len(lines) - 1) * line_spacing / 2.0
|
||||
elif anchor[1] == "d":
|
||||
top -= (len(lines) - 1) * line_spacing
|
||||
|
||||
parts = []
|
||||
for idx, line in enumerate(lines):
|
||||
left = xy[0]
|
||||
width_difference = max_width - widths[idx]
|
||||
|
||||
# first align left by anchor
|
||||
if anchor[0] == "m":
|
||||
left -= width_difference / 2.0
|
||||
elif anchor[0] == "r":
|
||||
left -= width_difference
|
||||
|
||||
# then align by align parameter
|
||||
if align in ("left", "justify"):
|
||||
pass
|
||||
elif align == "center":
|
||||
left += width_difference / 2.0
|
||||
elif align == "right":
|
||||
left += width_difference
|
||||
else:
|
||||
msg = 'align must be "left", "center", "right" or "justify"'
|
||||
raise ValueError(msg)
|
||||
|
||||
if align == "justify" and width_difference != 0:
|
||||
words = line.split(" " if isinstance(text, str) else b" ")
|
||||
word_widths = [
|
||||
self.textlength(
|
||||
word,
|
||||
font,
|
||||
direction=direction,
|
||||
features=features,
|
||||
language=language,
|
||||
embedded_color=embedded_color,
|
||||
)
|
||||
for word in words
|
||||
]
|
||||
width_difference = max_width - sum(word_widths)
|
||||
for i, word in enumerate(words):
|
||||
parts.append(((left, top), word))
|
||||
left += word_widths[i] + width_difference / (len(words) - 1)
|
||||
else:
|
||||
parts.append(((left, top), line))
|
||||
|
||||
top += line_spacing
|
||||
|
||||
return font, anchor, parts
|
||||
|
||||
def multiline_text(
|
||||
self,
|
||||
xy: tuple[float, float],
|
||||
|
@ -720,62 +820,24 @@ class ImageDraw:
|
|||
*,
|
||||
font_size: float | None = None,
|
||||
) -> None:
|
||||
if direction == "ttb":
|
||||
msg = "ttb direction is unsupported for multiline text"
|
||||
raise ValueError(msg)
|
||||
|
||||
if anchor is None:
|
||||
anchor = "la"
|
||||
elif len(anchor) != 2:
|
||||
msg = "anchor must be a 2 character string"
|
||||
raise ValueError(msg)
|
||||
elif anchor[1] in "tb":
|
||||
msg = "anchor not supported for multiline text"
|
||||
raise ValueError(msg)
|
||||
|
||||
if font is None:
|
||||
font = self._getfont(font_size)
|
||||
|
||||
widths = []
|
||||
max_width: float = 0
|
||||
lines = self._multiline_split(text)
|
||||
line_spacing = self._multiline_spacing(font, spacing, stroke_width)
|
||||
for line in lines:
|
||||
line_width = self.textlength(
|
||||
line, font, direction=direction, features=features, language=language
|
||||
font, anchor, lines = self._prepare_multiline_text(
|
||||
xy,
|
||||
text,
|
||||
font,
|
||||
anchor,
|
||||
spacing,
|
||||
align,
|
||||
direction,
|
||||
features,
|
||||
language,
|
||||
stroke_width,
|
||||
embedded_color,
|
||||
font_size,
|
||||
)
|
||||
widths.append(line_width)
|
||||
max_width = max(max_width, line_width)
|
||||
|
||||
top = xy[1]
|
||||
if anchor[1] == "m":
|
||||
top -= (len(lines) - 1) * line_spacing / 2.0
|
||||
elif anchor[1] == "d":
|
||||
top -= (len(lines) - 1) * line_spacing
|
||||
|
||||
for idx, line in enumerate(lines):
|
||||
left = xy[0]
|
||||
width_difference = max_width - widths[idx]
|
||||
|
||||
# first align left by anchor
|
||||
if anchor[0] == "m":
|
||||
left -= width_difference / 2.0
|
||||
elif anchor[0] == "r":
|
||||
left -= width_difference
|
||||
|
||||
# then align by align parameter
|
||||
if align == "left":
|
||||
pass
|
||||
elif align == "center":
|
||||
left += width_difference / 2.0
|
||||
elif align == "right":
|
||||
left += width_difference
|
||||
else:
|
||||
msg = 'align must be "left", "center" or "right"'
|
||||
raise ValueError(msg)
|
||||
|
||||
for xy, line in lines:
|
||||
self.text(
|
||||
(left, top),
|
||||
xy,
|
||||
line,
|
||||
fill,
|
||||
font,
|
||||
|
@ -787,7 +849,6 @@ class ImageDraw:
|
|||
stroke_fill=stroke_fill,
|
||||
embedded_color=embedded_color,
|
||||
)
|
||||
top += line_spacing
|
||||
|
||||
def textlength(
|
||||
self,
|
||||
|
@ -889,69 +950,26 @@ class ImageDraw:
|
|||
*,
|
||||
font_size: float | None = None,
|
||||
) -> tuple[float, float, float, float]:
|
||||
if direction == "ttb":
|
||||
msg = "ttb direction is unsupported for multiline text"
|
||||
raise ValueError(msg)
|
||||
|
||||
if anchor is None:
|
||||
anchor = "la"
|
||||
elif len(anchor) != 2:
|
||||
msg = "anchor must be a 2 character string"
|
||||
raise ValueError(msg)
|
||||
elif anchor[1] in "tb":
|
||||
msg = "anchor not supported for multiline text"
|
||||
raise ValueError(msg)
|
||||
|
||||
if font is None:
|
||||
font = self._getfont(font_size)
|
||||
|
||||
widths = []
|
||||
max_width: float = 0
|
||||
lines = self._multiline_split(text)
|
||||
line_spacing = self._multiline_spacing(font, spacing, stroke_width)
|
||||
for line in lines:
|
||||
line_width = self.textlength(
|
||||
line,
|
||||
font, anchor, lines = self._prepare_multiline_text(
|
||||
xy,
|
||||
text,
|
||||
font,
|
||||
direction=direction,
|
||||
features=features,
|
||||
language=language,
|
||||
embedded_color=embedded_color,
|
||||
anchor,
|
||||
spacing,
|
||||
align,
|
||||
direction,
|
||||
features,
|
||||
language,
|
||||
stroke_width,
|
||||
embedded_color,
|
||||
font_size,
|
||||
)
|
||||
widths.append(line_width)
|
||||
max_width = max(max_width, line_width)
|
||||
|
||||
top = xy[1]
|
||||
if anchor[1] == "m":
|
||||
top -= (len(lines) - 1) * line_spacing / 2.0
|
||||
elif anchor[1] == "d":
|
||||
top -= (len(lines) - 1) * line_spacing
|
||||
|
||||
bbox: tuple[float, float, float, float] | None = None
|
||||
|
||||
for idx, line in enumerate(lines):
|
||||
left = xy[0]
|
||||
width_difference = max_width - widths[idx]
|
||||
|
||||
# first align left by anchor
|
||||
if anchor[0] == "m":
|
||||
left -= width_difference / 2.0
|
||||
elif anchor[0] == "r":
|
||||
left -= width_difference
|
||||
|
||||
# then align by align parameter
|
||||
if align == "left":
|
||||
pass
|
||||
elif align == "center":
|
||||
left += width_difference / 2.0
|
||||
elif align == "right":
|
||||
left += width_difference
|
||||
else:
|
||||
msg = 'align must be "left", "center" or "right"'
|
||||
raise ValueError(msg)
|
||||
|
||||
for xy, line in lines:
|
||||
bbox_line = self.textbbox(
|
||||
(left, top),
|
||||
xy,
|
||||
line,
|
||||
font,
|
||||
anchor,
|
||||
|
@ -971,8 +989,6 @@ class ImageDraw:
|
|||
max(bbox[3], bbox_line[3]),
|
||||
)
|
||||
|
||||
top += line_spacing
|
||||
|
||||
if bbox is None:
|
||||
return xy[0], xy[1], xy[0], xy[1]
|
||||
return bbox
|
||||
|
|
|
@ -31,18 +31,21 @@ from __future__ import annotations
|
|||
import abc
|
||||
import io
|
||||
import itertools
|
||||
import logging
|
||||
import os
|
||||
import struct
|
||||
import sys
|
||||
from typing import IO, TYPE_CHECKING, Any, NamedTuple, cast
|
||||
|
||||
from . import Image
|
||||
from . import ExifTags, Image
|
||||
from ._deprecate import deprecate
|
||||
from ._util import is_path
|
||||
from ._util import DeferredError, is_path
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ._typing import StrOrBytesPath
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
MAXBLOCK = 65536
|
||||
|
||||
SAFEBLOCK = 1024 * 1024
|
||||
|
@ -163,6 +166,85 @@ class ImageFile(Image.Image):
|
|||
def _open(self) -> None:
|
||||
pass
|
||||
|
||||
def _close_fp(self):
|
||||
if getattr(self, "_fp", False):
|
||||
if self._fp != self.fp:
|
||||
self._fp.close()
|
||||
self._fp = DeferredError(ValueError("Operation on closed image"))
|
||||
if self.fp:
|
||||
self.fp.close()
|
||||
|
||||
def close(self) -> None:
|
||||
"""
|
||||
Closes the file pointer, if possible.
|
||||
|
||||
This operation will destroy the image core and release its memory.
|
||||
The image data will be unusable afterward.
|
||||
|
||||
This function is required to close images that have multiple frames or
|
||||
have not had their file read and closed by the
|
||||
:py:meth:`~PIL.Image.Image.load` method. See :ref:`file-handling` for
|
||||
more information.
|
||||
"""
|
||||
try:
|
||||
self._close_fp()
|
||||
self.fp = None
|
||||
except Exception as msg:
|
||||
logger.debug("Error closing: %s", msg)
|
||||
|
||||
super().close()
|
||||
|
||||
def get_child_images(self) -> list[ImageFile]:
|
||||
child_images = []
|
||||
exif = self.getexif()
|
||||
ifds = []
|
||||
if ExifTags.Base.SubIFDs in exif:
|
||||
subifd_offsets = exif[ExifTags.Base.SubIFDs]
|
||||
if subifd_offsets:
|
||||
if not isinstance(subifd_offsets, tuple):
|
||||
subifd_offsets = (subifd_offsets,)
|
||||
for subifd_offset in subifd_offsets:
|
||||
ifds.append((exif._get_ifd_dict(subifd_offset), subifd_offset))
|
||||
ifd1 = exif.get_ifd(ExifTags.IFD.IFD1)
|
||||
if ifd1 and ifd1.get(ExifTags.Base.JpegIFOffset):
|
||||
assert exif._info is not None
|
||||
ifds.append((ifd1, exif._info.next))
|
||||
|
||||
offset = None
|
||||
for ifd, ifd_offset in ifds:
|
||||
assert self.fp is not None
|
||||
current_offset = self.fp.tell()
|
||||
if offset is None:
|
||||
offset = current_offset
|
||||
|
||||
fp = self.fp
|
||||
if ifd is not None:
|
||||
thumbnail_offset = ifd.get(ExifTags.Base.JpegIFOffset)
|
||||
if thumbnail_offset is not None:
|
||||
thumbnail_offset += getattr(self, "_exif_offset", 0)
|
||||
self.fp.seek(thumbnail_offset)
|
||||
|
||||
length = ifd.get(ExifTags.Base.JpegIFByteCount)
|
||||
assert isinstance(length, int)
|
||||
data = self.fp.read(length)
|
||||
fp = io.BytesIO(data)
|
||||
|
||||
with Image.open(fp) as im:
|
||||
from . import TiffImagePlugin
|
||||
|
||||
if thumbnail_offset is None and isinstance(
|
||||
im, TiffImagePlugin.TiffImageFile
|
||||
):
|
||||
im._frame_pos = [ifd_offset]
|
||||
im._seek(0)
|
||||
im.load()
|
||||
child_images.append(im)
|
||||
|
||||
if offset is not None:
|
||||
assert self.fp is not None
|
||||
self.fp.seek(offset)
|
||||
return child_images
|
||||
|
||||
def get_format_mimetype(self) -> str | None:
|
||||
if self.custom_mimetype:
|
||||
return self.custom_mimetype
|
||||
|
|
|
@ -598,8 +598,6 @@ class Color3DLUT(MultibandFilter):
|
|||
self.mode or image.mode,
|
||||
Image.Resampling.BILINEAR,
|
||||
self.channels,
|
||||
self.size[0],
|
||||
self.size[1],
|
||||
self.size[2],
|
||||
self.size,
|
||||
self.table,
|
||||
)
|
||||
|
|
|
@ -644,10 +644,10 @@ class FreeTypeFont:
|
|||
features,
|
||||
language,
|
||||
stroke_width,
|
||||
kwargs.get("stroke_filled", False),
|
||||
anchor,
|
||||
ink,
|
||||
start[0],
|
||||
start[1],
|
||||
start,
|
||||
)
|
||||
|
||||
def font_variant(
|
||||
|
|
|
@ -48,9 +48,9 @@ class AffineTransform(Transform):
|
|||
Define an affine image transform.
|
||||
|
||||
This function takes a 6-tuple (a, b, c, d, e, f) which contain the first
|
||||
two rows from an affine transform matrix. For each pixel (x, y) in the
|
||||
output image, the new value is taken from a position (a x + b y + c,
|
||||
d x + e y + f) in the input image, rounded to nearest pixel.
|
||||
two rows from the inverse of an affine transform matrix. For each pixel
|
||||
(x, y) in the output image, the new value is taken from a position (a x +
|
||||
b y + c, d x + e y + f) in the input image, rounded to nearest pixel.
|
||||
|
||||
This function can be used to scale, translate, rotate, and shear the
|
||||
original image.
|
||||
|
@ -58,7 +58,7 @@ class AffineTransform(Transform):
|
|||
See :py:meth:`.Image.transform`
|
||||
|
||||
:param matrix: A 6-tuple (a, b, c, d, e, f) containing the first two rows
|
||||
from an affine transform matrix.
|
||||
from the inverse of an affine transform matrix.
|
||||
"""
|
||||
|
||||
method = Image.Transform.AFFINE
|
||||
|
|
|
@ -55,7 +55,7 @@ class ImtImageFile(ImageFile.ImageFile):
|
|||
if not s:
|
||||
break
|
||||
|
||||
if s == b"\x0C":
|
||||
if s == b"\x0c":
|
||||
# image data begins
|
||||
self.tile = [
|
||||
ImageFile._Tile(
|
||||
|
|
|
@ -325,7 +325,7 @@ MARKER = {
|
|||
|
||||
def _accept(prefix: bytes) -> bool:
|
||||
# Magic number was taken from https://en.wikipedia.org/wiki/JPEG
|
||||
return prefix[:3] == b"\xFF\xD8\xFF"
|
||||
return prefix[:3] == b"\xff\xd8\xff"
|
||||
|
||||
|
||||
##
|
||||
|
@ -342,7 +342,7 @@ class JpegImageFile(ImageFile.ImageFile):
|
|||
if not _accept(s):
|
||||
msg = "not a JPEG file"
|
||||
raise SyntaxError(msg)
|
||||
s = b"\xFF"
|
||||
s = b"\xff"
|
||||
|
||||
# Create attributes
|
||||
self.bits = self.layers = 0
|
||||
|
@ -417,7 +417,7 @@ class JpegImageFile(ImageFile.ImageFile):
|
|||
# Premature EOF.
|
||||
# Pretend file is finished adding EOI marker
|
||||
self._ended = True
|
||||
return b"\xFF\xD9"
|
||||
return b"\xff\xd9"
|
||||
|
||||
return s
|
||||
|
||||
|
@ -712,7 +712,7 @@ def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
|
|||
def validate_qtables(
|
||||
qtables: (
|
||||
str | tuple[list[int], ...] | list[list[int]] | dict[int, list[int]] | None
|
||||
)
|
||||
),
|
||||
) -> list[list[int]] | None:
|
||||
if qtables is None:
|
||||
return qtables
|
||||
|
@ -769,7 +769,7 @@ def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
|
|||
msg = "XMP data is too long"
|
||||
raise ValueError(msg)
|
||||
size = o16(2 + overhead_len + len(xmp))
|
||||
extra += b"\xFF\xE1" + size + b"http://ns.adobe.com/xap/1.0/\x00" + xmp
|
||||
extra += b"\xff\xe1" + size + b"http://ns.adobe.com/xap/1.0/\x00" + xmp
|
||||
|
||||
icc_profile = info.get("icc_profile")
|
||||
if icc_profile:
|
||||
|
@ -783,7 +783,7 @@ def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
|
|||
for marker in markers:
|
||||
size = o16(2 + overhead_len + len(marker))
|
||||
extra += (
|
||||
b"\xFF\xE2"
|
||||
b"\xff\xe2"
|
||||
+ size
|
||||
+ b"ICC_PROFILE\0"
|
||||
+ o8(i)
|
||||
|
@ -816,8 +816,7 @@ def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
|
|||
optimize,
|
||||
info.get("keep_rgb", False),
|
||||
info.get("streamtype", 0),
|
||||
dpi[0],
|
||||
dpi[1],
|
||||
dpi,
|
||||
subsampling,
|
||||
info.get("restart_marker_blocks", 0),
|
||||
info.get("restart_marker_rows", 0),
|
||||
|
|
|
@ -51,7 +51,7 @@ def _save_all(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
|
|||
if not offsets:
|
||||
# APP2 marker
|
||||
im_frame.encoderinfo["extra"] = (
|
||||
b"\xFF\xE2" + struct.pack(">H", 6 + 82) + b"MPF\0" + b" " * 82
|
||||
b"\xff\xe2" + struct.pack(">H", 6 + 82) + b"MPF\0" + b" " * 82
|
||||
)
|
||||
exif = im_frame.encoderinfo.get("exif")
|
||||
if isinstance(exif, Image.Exif):
|
||||
|
@ -84,7 +84,7 @@ def _save_all(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
|
|||
ifd[0xB002] = mpentries
|
||||
|
||||
fp.seek(mpf_offset)
|
||||
fp.write(b"II\x2A\x00" + o32le(8) + ifd.tobytes(8))
|
||||
fp.write(b"II\x2a\x00" + o32le(8) + ifd.tobytes(8))
|
||||
fp.seek(0, os.SEEK_END)
|
||||
|
||||
|
||||
|
|
|
@ -188,7 +188,7 @@ def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
|
|||
+ o16(dpi[0])
|
||||
+ o16(dpi[1])
|
||||
+ b"\0" * 24
|
||||
+ b"\xFF" * 24
|
||||
+ b"\xff" * 24
|
||||
+ b"\0"
|
||||
+ o8(planes)
|
||||
+ o16(stride)
|
||||
|
|
|
@ -19,14 +19,14 @@ def encode_text(s: str) -> bytes:
|
|||
|
||||
PDFDocEncoding = {
|
||||
0x16: "\u0017",
|
||||
0x18: "\u02D8",
|
||||
0x19: "\u02C7",
|
||||
0x1A: "\u02C6",
|
||||
0x1B: "\u02D9",
|
||||
0x1C: "\u02DD",
|
||||
0x1D: "\u02DB",
|
||||
0x1E: "\u02DA",
|
||||
0x1F: "\u02DC",
|
||||
0x18: "\u02d8",
|
||||
0x19: "\u02c7",
|
||||
0x1A: "\u02c6",
|
||||
0x1B: "\u02d9",
|
||||
0x1C: "\u02dd",
|
||||
0x1D: "\u02db",
|
||||
0x1E: "\u02da",
|
||||
0x1F: "\u02dc",
|
||||
0x80: "\u2022",
|
||||
0x81: "\u2020",
|
||||
0x82: "\u2021",
|
||||
|
@ -36,29 +36,29 @@ PDFDocEncoding = {
|
|||
0x86: "\u0192",
|
||||
0x87: "\u2044",
|
||||
0x88: "\u2039",
|
||||
0x89: "\u203A",
|
||||
0x89: "\u203a",
|
||||
0x8A: "\u2212",
|
||||
0x8B: "\u2030",
|
||||
0x8C: "\u201E",
|
||||
0x8D: "\u201C",
|
||||
0x8E: "\u201D",
|
||||
0x8C: "\u201e",
|
||||
0x8D: "\u201c",
|
||||
0x8E: "\u201d",
|
||||
0x8F: "\u2018",
|
||||
0x90: "\u2019",
|
||||
0x91: "\u201A",
|
||||
0x91: "\u201a",
|
||||
0x92: "\u2122",
|
||||
0x93: "\uFB01",
|
||||
0x94: "\uFB02",
|
||||
0x93: "\ufb01",
|
||||
0x94: "\ufb02",
|
||||
0x95: "\u0141",
|
||||
0x96: "\u0152",
|
||||
0x97: "\u0160",
|
||||
0x98: "\u0178",
|
||||
0x99: "\u017D",
|
||||
0x99: "\u017d",
|
||||
0x9A: "\u0131",
|
||||
0x9B: "\u0142",
|
||||
0x9C: "\u0153",
|
||||
0x9D: "\u0161",
|
||||
0x9E: "\u017E",
|
||||
0xA0: "\u20AC",
|
||||
0x9E: "\u017e",
|
||||
0xA0: "\u20ac",
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1382,7 +1382,7 @@ def _save(
|
|||
b"\0", # 12: interlace flag
|
||||
)
|
||||
|
||||
chunks = [b"cHRM", b"gAMA", b"sBIT", b"sRGB", b"tIME"]
|
||||
chunks = [b"cHRM", b"cICP", b"gAMA", b"sBIT", b"sRGB", b"tIME"]
|
||||
|
||||
icc = im.encoderinfo.get("icc_profile", im.info.get("icc_profile"))
|
||||
if icc:
|
||||
|
@ -1433,7 +1433,7 @@ def _save(
|
|||
chunk(fp, b"tRNS", transparency[:alpha_bytes])
|
||||
else:
|
||||
transparency = max(0, min(255, transparency))
|
||||
alpha = b"\xFF" * transparency + b"\0"
|
||||
alpha = b"\xff" * transparency + b"\0"
|
||||
chunk(fp, b"tRNS", alpha[:alpha_bytes])
|
||||
elif im.mode in ("1", "L", "I", "I;16"):
|
||||
transparency = max(0, min(65535, transparency))
|
||||
|
|
|
@ -230,7 +230,7 @@ class PpmPlainDecoder(ImageFile.PyDecoder):
|
|||
msg = b"Invalid token for this mode: %s" % bytes([token])
|
||||
raise ValueError(msg)
|
||||
data = (data + tokens)[:total_bytes]
|
||||
invert = bytes.maketrans(b"01", b"\xFF\x00")
|
||||
invert = bytes.maketrans(b"01", b"\xff\x00")
|
||||
return data.translate(invert)
|
||||
|
||||
def _decode_blocks(self, maxval: int) -> bytearray:
|
||||
|
|
|
@ -267,7 +267,7 @@ def makeSpiderHeader(im: Image.Image) -> list[bytes]:
|
|||
|
||||
|
||||
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
|
||||
if im.mode[0] != "F":
|
||||
if im.mode != "F":
|
||||
im = im.convert("F")
|
||||
|
||||
hdr = makeSpiderHeader(im)
|
||||
|
|
|
@ -275,12 +275,12 @@ OPEN_INFO = {
|
|||
MAX_SAMPLESPERPIXEL = max(len(key_tp[4]) for key_tp in OPEN_INFO)
|
||||
|
||||
PREFIXES = [
|
||||
b"MM\x00\x2A", # Valid TIFF header with big-endian byte order
|
||||
b"II\x2A\x00", # Valid TIFF header with little-endian byte order
|
||||
b"MM\x2A\x00", # Invalid TIFF header, assume big-endian
|
||||
b"II\x00\x2A", # Invalid TIFF header, assume little-endian
|
||||
b"MM\x00\x2B", # BigTIFF with big-endian byte order
|
||||
b"II\x2B\x00", # BigTIFF with little-endian byte order
|
||||
b"MM\x00\x2a", # Valid TIFF header with big-endian byte order
|
||||
b"II\x2a\x00", # Valid TIFF header with little-endian byte order
|
||||
b"MM\x2a\x00", # Invalid TIFF header, assume big-endian
|
||||
b"II\x00\x2a", # Invalid TIFF header, assume little-endian
|
||||
b"MM\x00\x2b", # BigTIFF with big-endian byte order
|
||||
b"II\x2b\x00", # BigTIFF with little-endian byte order
|
||||
]
|
||||
|
||||
if not getattr(Image.core, "libtiff_support_custom_tags", True):
|
||||
|
@ -582,7 +582,7 @@ class ImageFileDirectory_v2(_IFDv2Base):
|
|||
|
||||
def __init__(
|
||||
self,
|
||||
ifh: bytes = b"II\x2A\x00\x00\x00\x00\x00",
|
||||
ifh: bytes = b"II\x2a\x00\x00\x00\x00\x00",
|
||||
prefix: bytes | None = None,
|
||||
group: int | None = None,
|
||||
) -> None:
|
||||
|
@ -949,7 +949,7 @@ class ImageFileDirectory_v2(_IFDv2Base):
|
|||
warnings.warn(str(msg))
|
||||
return
|
||||
|
||||
def _get_ifh(self):
|
||||
def _get_ifh(self) -> bytes:
|
||||
ifh = self._prefix + self._pack("H", 43 if self._bigtiff else 42)
|
||||
if self._bigtiff:
|
||||
ifh += self._pack("HH", 8, 0)
|
||||
|
@ -962,13 +962,16 @@ class ImageFileDirectory_v2(_IFDv2Base):
|
|||
result = self._pack("Q" if self._bigtiff else "H", len(self._tags_v2))
|
||||
|
||||
entries: list[tuple[int, int, int, bytes, bytes]] = []
|
||||
offset += len(result) + len(self._tags_v2) * (20 if self._bigtiff else 12) + 4
|
||||
|
||||
fmt = "Q" if self._bigtiff else "L"
|
||||
fmt_size = 8 if self._bigtiff else 4
|
||||
offset += (
|
||||
len(result) + len(self._tags_v2) * (20 if self._bigtiff else 12) + fmt_size
|
||||
)
|
||||
stripoffsets = None
|
||||
|
||||
# pass 1: convert tags to binary format
|
||||
# always write tags in ascending order
|
||||
fmt = "Q" if self._bigtiff else "L"
|
||||
fmt_size = 8 if self._bigtiff else 4
|
||||
for tag, value in sorted(self._tags_v2.items()):
|
||||
if tag == STRIPOFFSETS:
|
||||
stripoffsets = len(entries)
|
||||
|
@ -1024,7 +1027,7 @@ class ImageFileDirectory_v2(_IFDv2Base):
|
|||
)
|
||||
|
||||
# -- overwrite here for multi-page --
|
||||
result += b"\0\0\0\0" # end of entries
|
||||
result += self._pack(fmt, 0) # end of entries
|
||||
|
||||
# pass 3: write auxiliary data to file
|
||||
for tag, typ, count, value, data in entries:
|
||||
|
@ -1406,7 +1409,8 @@ class TiffImageFile(ImageFile.ImageFile):
|
|||
self.fp = None # might be shared
|
||||
|
||||
if err < 0:
|
||||
raise OSError(err)
|
||||
msg = f"decoder error {err}"
|
||||
raise OSError(msg)
|
||||
|
||||
return Image.Image.load(self)
|
||||
|
||||
|
@ -2043,20 +2047,21 @@ class AppendingTiffWriter(io.BytesIO):
|
|||
self.offsetOfNewPage = 0
|
||||
|
||||
self.IIMM = iimm = self.f.read(4)
|
||||
self._bigtiff = b"\x2b" in iimm
|
||||
if not iimm:
|
||||
# empty file - first page
|
||||
self.isFirst = True
|
||||
return
|
||||
|
||||
self.isFirst = False
|
||||
if iimm == b"II\x2a\x00":
|
||||
self.setEndian("<")
|
||||
elif iimm == b"MM\x00\x2a":
|
||||
self.setEndian(">")
|
||||
else:
|
||||
if iimm not in PREFIXES:
|
||||
msg = "Invalid TIFF file header"
|
||||
raise RuntimeError(msg)
|
||||
|
||||
self.setEndian("<" if iimm.startswith(II) else ">")
|
||||
|
||||
if self._bigtiff:
|
||||
self.f.seek(4, os.SEEK_CUR)
|
||||
self.skipIFDs()
|
||||
self.goToEnd()
|
||||
|
||||
|
@ -2076,11 +2081,13 @@ class AppendingTiffWriter(io.BytesIO):
|
|||
msg = "IIMM of new page doesn't match IIMM of first page"
|
||||
raise RuntimeError(msg)
|
||||
|
||||
ifd_offset = self.readLong()
|
||||
if self._bigtiff:
|
||||
self.f.seek(4, os.SEEK_CUR)
|
||||
ifd_offset = self._read(8 if self._bigtiff else 4)
|
||||
ifd_offset += self.offsetOfNewPage
|
||||
assert self.whereToWriteNewIFDOffset is not None
|
||||
self.f.seek(self.whereToWriteNewIFDOffset)
|
||||
self.writeLong(ifd_offset)
|
||||
self._write(ifd_offset, 8 if self._bigtiff else 4)
|
||||
self.f.seek(ifd_offset)
|
||||
self.fixIFD()
|
||||
|
||||
|
@ -2126,18 +2133,20 @@ class AppendingTiffWriter(io.BytesIO):
|
|||
self.endian = endian
|
||||
self.longFmt = f"{self.endian}L"
|
||||
self.shortFmt = f"{self.endian}H"
|
||||
self.tagFormat = f"{self.endian}HHL"
|
||||
self.tagFormat = f"{self.endian}HH" + ("Q" if self._bigtiff else "L")
|
||||
|
||||
def skipIFDs(self) -> None:
|
||||
while True:
|
||||
ifd_offset = self.readLong()
|
||||
ifd_offset = self._read(8 if self._bigtiff else 4)
|
||||
if ifd_offset == 0:
|
||||
self.whereToWriteNewIFDOffset = self.f.tell() - 4
|
||||
self.whereToWriteNewIFDOffset = self.f.tell() - (
|
||||
8 if self._bigtiff else 4
|
||||
)
|
||||
break
|
||||
|
||||
self.f.seek(ifd_offset)
|
||||
num_tags = self.readShort()
|
||||
self.f.seek(num_tags * 12, os.SEEK_CUR)
|
||||
num_tags = self._read(8 if self._bigtiff else 2)
|
||||
self.f.seek(num_tags * (20 if self._bigtiff else 12), os.SEEK_CUR)
|
||||
|
||||
def write(self, data: Buffer, /) -> int:
|
||||
return self.f.write(data)
|
||||
|
@ -2167,17 +2176,19 @@ class AppendingTiffWriter(io.BytesIO):
|
|||
msg = f"wrote only {bytes_written} bytes but wanted {expected}"
|
||||
raise RuntimeError(msg)
|
||||
|
||||
def rewriteLastShortToLong(self, value: int) -> None:
|
||||
self.f.seek(-2, os.SEEK_CUR)
|
||||
bytes_written = self.f.write(struct.pack(self.longFmt, value))
|
||||
self._verify_bytes_written(bytes_written, 4)
|
||||
|
||||
def _rewriteLast(self, value: int, field_size: int) -> None:
|
||||
def _rewriteLast(
|
||||
self, value: int, field_size: int, new_field_size: int = 0
|
||||
) -> None:
|
||||
self.f.seek(-field_size, os.SEEK_CUR)
|
||||
if not new_field_size:
|
||||
new_field_size = field_size
|
||||
bytes_written = self.f.write(
|
||||
struct.pack(self.endian + self._fmt(field_size), value)
|
||||
struct.pack(self.endian + self._fmt(new_field_size), value)
|
||||
)
|
||||
self._verify_bytes_written(bytes_written, field_size)
|
||||
self._verify_bytes_written(bytes_written, new_field_size)
|
||||
|
||||
def rewriteLastShortToLong(self, value: int) -> None:
|
||||
self._rewriteLast(value, 2, 4)
|
||||
|
||||
def rewriteLastShort(self, value: int) -> None:
|
||||
return self._rewriteLast(value, 2)
|
||||
|
@ -2185,13 +2196,17 @@ class AppendingTiffWriter(io.BytesIO):
|
|||
def rewriteLastLong(self, value: int) -> None:
|
||||
return self._rewriteLast(value, 4)
|
||||
|
||||
def _write(self, value: int, field_size: int) -> None:
|
||||
bytes_written = self.f.write(
|
||||
struct.pack(self.endian + self._fmt(field_size), value)
|
||||
)
|
||||
self._verify_bytes_written(bytes_written, field_size)
|
||||
|
||||
def writeShort(self, value: int) -> None:
|
||||
bytes_written = self.f.write(struct.pack(self.shortFmt, value))
|
||||
self._verify_bytes_written(bytes_written, 2)
|
||||
self._write(value, 2)
|
||||
|
||||
def writeLong(self, value: int) -> None:
|
||||
bytes_written = self.f.write(struct.pack(self.longFmt, value))
|
||||
self._verify_bytes_written(bytes_written, 4)
|
||||
self._write(value, 4)
|
||||
|
||||
def close(self) -> None:
|
||||
self.finalize()
|
||||
|
@ -2199,24 +2214,37 @@ class AppendingTiffWriter(io.BytesIO):
|
|||
self.f.close()
|
||||
|
||||
def fixIFD(self) -> None:
|
||||
num_tags = self.readShort()
|
||||
num_tags = self._read(8 if self._bigtiff else 2)
|
||||
|
||||
for i in range(num_tags):
|
||||
tag, field_type, count = struct.unpack(self.tagFormat, self.f.read(8))
|
||||
tag, field_type, count = struct.unpack(
|
||||
self.tagFormat, self.f.read(12 if self._bigtiff else 8)
|
||||
)
|
||||
|
||||
field_size = self.fieldSizes[field_type]
|
||||
total_size = field_size * count
|
||||
is_local = total_size <= 4
|
||||
fmt_size = 8 if self._bigtiff else 4
|
||||
is_local = total_size <= fmt_size
|
||||
if not is_local:
|
||||
offset = self.readLong() + self.offsetOfNewPage
|
||||
self.rewriteLastLong(offset)
|
||||
offset = self._read(fmt_size) + self.offsetOfNewPage
|
||||
self._rewriteLast(offset, fmt_size)
|
||||
|
||||
if tag in self.Tags:
|
||||
cur_pos = self.f.tell()
|
||||
|
||||
logger.debug(
|
||||
"fixIFD: %s (%d) - type: %s (%d) - type size: %d - count: %d",
|
||||
TiffTags.lookup(tag).name,
|
||||
tag,
|
||||
TYPES.get(field_type, "unknown"),
|
||||
field_type,
|
||||
field_size,
|
||||
count,
|
||||
)
|
||||
|
||||
if is_local:
|
||||
self._fixOffsets(count, field_size)
|
||||
self.f.seek(cur_pos + 4)
|
||||
self.f.seek(cur_pos + fmt_size)
|
||||
else:
|
||||
self.f.seek(offset)
|
||||
self._fixOffsets(count, field_size)
|
||||
|
@ -2224,24 +2252,33 @@ class AppendingTiffWriter(io.BytesIO):
|
|||
|
||||
elif is_local:
|
||||
# skip the locally stored value that is not an offset
|
||||
self.f.seek(4, os.SEEK_CUR)
|
||||
self.f.seek(fmt_size, os.SEEK_CUR)
|
||||
|
||||
def _fixOffsets(self, count: int, field_size: int) -> None:
|
||||
for i in range(count):
|
||||
offset = self._read(field_size)
|
||||
offset += self.offsetOfNewPage
|
||||
if field_size == 2 and offset >= 65536:
|
||||
# offset is now too large - we must convert shorts to longs
|
||||
|
||||
new_field_size = 0
|
||||
if self._bigtiff and field_size in (2, 4) and offset >= 2**32:
|
||||
# offset is now too large - we must convert long to long8
|
||||
new_field_size = 8
|
||||
elif field_size == 2 and offset >= 2**16:
|
||||
# offset is now too large - we must convert short to long
|
||||
new_field_size = 4
|
||||
if new_field_size:
|
||||
if count != 1:
|
||||
msg = "not implemented"
|
||||
raise RuntimeError(msg) # XXX TODO
|
||||
|
||||
# simple case - the offset is just one and therefore it is
|
||||
# local (not referenced with another offset)
|
||||
self.rewriteLastShortToLong(offset)
|
||||
self.f.seek(-10, os.SEEK_CUR)
|
||||
self.writeShort(TiffTags.LONG) # rewrite the type to LONG
|
||||
self.f.seek(8, os.SEEK_CUR)
|
||||
self._rewriteLast(offset, field_size, new_field_size)
|
||||
# Move back past the new offset, past 'count', and before 'field_type'
|
||||
rewind = -new_field_size - 4 - 2
|
||||
self.f.seek(rewind, os.SEEK_CUR)
|
||||
self.writeShort(new_field_size) # rewrite the type
|
||||
self.f.seek(2 - rewind, os.SEEK_CUR)
|
||||
else:
|
||||
self._rewriteLast(offset, field_size)
|
||||
|
||||
|
|
|
@ -223,8 +223,7 @@ def _save_all(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
|
|||
|
||||
# Setup the WebP animation encoder
|
||||
enc = _webp.WebPAnimEncoder(
|
||||
im.size[0],
|
||||
im.size[1],
|
||||
im.size,
|
||||
background,
|
||||
loop,
|
||||
minimize_size,
|
||||
|
|
|
@ -47,6 +47,8 @@ def deprecate(
|
|||
raise RuntimeError(msg)
|
||||
elif when == 12:
|
||||
removed = "Pillow 12 (2025-10-15)"
|
||||
elif when == 13:
|
||||
removed = "Pillow 13 (2026-10-15)"
|
||||
else:
|
||||
msg = f"Unknown removal version: {when}. Update {__name__}?"
|
||||
raise ValueError(msg)
|
||||
|
|
|
@ -28,10 +28,10 @@ class Font:
|
|||
features: list[str] | None,
|
||||
lang: str | None,
|
||||
stroke_width: float,
|
||||
stroke_filled: bool,
|
||||
anchor: str | None,
|
||||
foreground_ink_long: int,
|
||||
x_start: float,
|
||||
y_start: float,
|
||||
start: tuple[float, float],
|
||||
/,
|
||||
) -> tuple[_imaging.ImagingCore, tuple[int, int]]: ...
|
||||
def getsize(
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
""" Find compiled module linking to Tcl / Tk libraries
|
||||
"""
|
||||
"""Find compiled module linking to Tcl / Tk libraries"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
|
|
|
@ -127,6 +127,7 @@ features: dict[str, tuple[str, str | bool, str | None]] = {
|
|||
"fribidi": ("PIL._imagingft", "HAVE_FRIBIDI", "fribidi_version"),
|
||||
"harfbuzz": ("PIL._imagingft", "HAVE_HARFBUZZ", "harfbuzz_version"),
|
||||
"libjpeg_turbo": ("PIL._imaging", "HAVE_LIBJPEGTURBO", "libjpeg_turbo_version"),
|
||||
"mozjpeg": ("PIL._imaging", "HAVE_MOZJPEG", "libjpeg_turbo_version"),
|
||||
"zlib_ng": ("PIL._imaging", "HAVE_ZLIBNG", "zlib_ng_version"),
|
||||
"libimagequant": ("PIL._imaging", "HAVE_LIBIMAGEQUANT", "imagequant_version"),
|
||||
"xcb": ("PIL._imaging", "HAVE_XCB", None),
|
||||
|
@ -300,7 +301,8 @@ def pilinfo(out: IO[str] | None = None, supported_formats: bool = True) -> None:
|
|||
if name == "jpg":
|
||||
libjpeg_turbo_version = version_feature("libjpeg_turbo")
|
||||
if libjpeg_turbo_version is not None:
|
||||
v = "libjpeg-turbo " + libjpeg_turbo_version
|
||||
v = "mozjpeg" if check_feature("mozjpeg") else "libjpeg-turbo"
|
||||
v += " " + libjpeg_turbo_version
|
||||
if v is None:
|
||||
v = version(name)
|
||||
if v is not None:
|
||||
|
|
122
src/_imaging.c
122
src/_imaging.c
|
@ -76,6 +76,13 @@
|
|||
|
||||
#ifdef HAVE_LIBJPEG
|
||||
#include "jconfig.h"
|
||||
#ifdef LIBJPEG_TURBO_VERSION
|
||||
#define JCONFIG_INCLUDED
|
||||
#ifdef __CYGWIN__
|
||||
#define _BASETSD_H
|
||||
#endif
|
||||
#include "jpeglib.h"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_LIBZ
|
||||
|
@ -466,8 +473,7 @@ getpixel(Imaging im, ImagingAccess access, int x, int y) {
|
|||
}
|
||||
|
||||
/* unknown type */
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static char *
|
||||
|
@ -860,7 +866,7 @@ _color_lut_3d(ImagingObject *self, PyObject *args) {
|
|||
|
||||
if (!PyArg_ParseTuple(
|
||||
args,
|
||||
"siiiiiO:color_lut_3d",
|
||||
"sii(iii)O:color_lut_3d",
|
||||
&mode,
|
||||
&filter,
|
||||
&table_channels,
|
||||
|
@ -958,8 +964,7 @@ _convert2(ImagingObject *self, PyObject *args) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -1008,10 +1013,6 @@ _convert_transparent(ImagingObject *self, PyObject *args) {
|
|||
|
||||
static PyObject *
|
||||
_copy(ImagingObject *self, PyObject *args) {
|
||||
if (!PyArg_ParseTuple(args, "")) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return PyImagingNew(ImagingCopy(self->image));
|
||||
}
|
||||
|
||||
|
@ -1207,8 +1208,7 @@ _getpixel(ImagingObject *self, PyObject *args) {
|
|||
}
|
||||
|
||||
if (self->access == NULL) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
return getpixel(self->image, self->access, x, y);
|
||||
|
@ -1410,8 +1410,7 @@ _paste(ImagingObject *self, PyObject *args) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -1684,8 +1683,7 @@ _putdata(ImagingObject *self, PyObject *args) {
|
|||
|
||||
Py_XDECREF(seq);
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -1745,8 +1743,7 @@ _putpalette(ImagingObject *self, PyObject *args) {
|
|||
self->image->palette->size = palettesize * 8 / bits;
|
||||
unpack(self->image->palette->palette, palette, self->image->palette->size);
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -1770,8 +1767,7 @@ _putpalettealpha(ImagingObject *self, PyObject *args) {
|
|||
strcpy(self->image->palette->mode, "RGBA");
|
||||
self->image->palette->palette[index * 4 + 3] = (UINT8)alpha;
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -1798,8 +1794,7 @@ _putpalettealphas(ImagingObject *self, PyObject *args) {
|
|||
self->image->palette->palette[i * 4 + 3] = (UINT8)values[i];
|
||||
}
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -1835,8 +1830,7 @@ _putpixel(ImagingObject *self, PyObject *args) {
|
|||
self->access->put_pixel(im, x, y, ink);
|
||||
}
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -2003,8 +1997,7 @@ im_setmode(ImagingObject *self, PyObject *args) {
|
|||
}
|
||||
self->access = ImagingAccessNew(im);
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -2067,8 +2060,7 @@ _transform(ImagingObject *self, PyObject *args) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -2195,8 +2187,7 @@ _getbbox(ImagingObject *self, PyObject *args) {
|
|||
}
|
||||
|
||||
if (!ImagingGetBBox(self->image, bbox, alpha_only)) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
return Py_BuildValue("iiii", bbox[0], bbox[1], bbox[2], bbox[3]);
|
||||
|
@ -2276,8 +2267,7 @@ _getextrema(ImagingObject *self) {
|
|||
}
|
||||
}
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -2340,8 +2330,7 @@ _fillband(ImagingObject *self, PyObject *args) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -2356,8 +2345,7 @@ _putband(ImagingObject *self, PyObject *args) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -2943,8 +2931,7 @@ _draw_arc(ImagingDrawObject *self, PyObject *args) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -2981,8 +2968,7 @@ _draw_bitmap(ImagingDrawObject *self, PyObject *args) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -3038,8 +3024,7 @@ _draw_chord(ImagingDrawObject *self, PyObject *args) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -3093,8 +3078,7 @@ _draw_ellipse(ImagingDrawObject *self, PyObject *args) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -3157,8 +3141,7 @@ _draw_lines(ImagingDrawObject *self, PyObject *args) {
|
|||
|
||||
free(xy);
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -3189,8 +3172,7 @@ _draw_points(ImagingDrawObject *self, PyObject *args) {
|
|||
|
||||
free(xy);
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
/* from outline.c */
|
||||
|
@ -3218,8 +3200,7 @@ _draw_outline(ImagingDrawObject *self, PyObject *args) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -3275,8 +3256,7 @@ _draw_pieslice(ImagingDrawObject *self, PyObject *args) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -3327,8 +3307,7 @@ _draw_polygon(ImagingDrawObject *self, PyObject *args) {
|
|||
|
||||
free(ixy);
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -3382,8 +3361,7 @@ _draw_rectangle(ImagingDrawObject *self, PyObject *args) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static struct PyMethodDef _draw_methods[] = {
|
||||
|
@ -3588,8 +3566,7 @@ _save_ppm(ImagingObject *self, PyObject *args) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
|
@ -3977,8 +3954,7 @@ _reset_stats(PyObject *self, PyObject *args) {
|
|||
arena->stats_freed_blocks = 0;
|
||||
MUTEX_UNLOCK(&ImagingDefaultArena.mutex);
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -4038,8 +4014,7 @@ _set_alignment(PyObject *self, PyObject *args) {
|
|||
ImagingDefaultArena.alignment = alignment;
|
||||
MUTEX_UNLOCK(&ImagingDefaultArena.mutex);
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -4063,8 +4038,7 @@ _set_block_size(PyObject *self, PyObject *args) {
|
|||
ImagingDefaultArena.block_size = block_size;
|
||||
MUTEX_UNLOCK(&ImagingDefaultArena.mutex);
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -4092,8 +4066,7 @@ _set_blocks_max(PyObject *self, PyObject *args) {
|
|||
return ImagingError_MemoryError();
|
||||
}
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -4108,8 +4081,7 @@ _clear_cache(PyObject *self, PyObject *args) {
|
|||
ImagingMemoryClearCache(&ImagingDefaultArena, i);
|
||||
MUTEX_UNLOCK(&ImagingDefaultArena.mutex);
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
|
@ -4367,6 +4339,15 @@ setup_module(PyObject *m) {
|
|||
Py_INCREF(have_libjpegturbo);
|
||||
PyModule_AddObject(m, "HAVE_LIBJPEGTURBO", have_libjpegturbo);
|
||||
|
||||
PyObject *have_mozjpeg;
|
||||
#ifdef JPEG_C_PARAM_SUPPORTED
|
||||
have_mozjpeg = Py_True;
|
||||
#else
|
||||
have_mozjpeg = Py_False;
|
||||
#endif
|
||||
Py_INCREF(have_mozjpeg);
|
||||
PyModule_AddObject(m, "HAVE_MOZJPEG", have_mozjpeg);
|
||||
|
||||
PyObject *have_libimagequant;
|
||||
#ifdef HAVE_LIBIMAGEQUANT
|
||||
have_libimagequant = Py_True;
|
||||
|
@ -4454,10 +4435,9 @@ PyInit__imaging(void) {
|
|||
|
||||
static PyModuleDef module_def = {
|
||||
PyModuleDef_HEAD_INIT,
|
||||
"_imaging", /* m_name */
|
||||
NULL, /* m_doc */
|
||||
-1, /* m_size */
|
||||
functions, /* m_methods */
|
||||
.m_name = "_imaging",
|
||||
.m_size = -1,
|
||||
.m_methods = functions,
|
||||
};
|
||||
|
||||
m = PyModule_Create(&module_def);
|
||||
|
|
|
@ -654,8 +654,7 @@ cms_get_display_profile_win32(PyObject *self, PyObject *args) {
|
|||
return PyUnicode_FromStringAndSize(filename, filename_size - 1);
|
||||
}
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -672,20 +671,17 @@ _profile_read_mlu(CmsProfileObject *self, cmsTagSignature info) {
|
|||
wchar_t *buf;
|
||||
|
||||
if (!cmsIsTag(self->profile, info)) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
mlu = cmsReadTag(self->profile, info);
|
||||
if (!mlu) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
len = cmsMLUgetWide(mlu, lc, cc, NULL, 0);
|
||||
if (len == 0) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
buf = malloc(len);
|
||||
|
@ -723,14 +719,12 @@ _profile_read_signature(CmsProfileObject *self, cmsTagSignature info) {
|
|||
unsigned int *sig;
|
||||
|
||||
if (!cmsIsTag(self->profile, info)) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
sig = (unsigned int *)cmsReadTag(self->profile, info);
|
||||
if (!sig) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
return _profile_read_int_as_string(*sig);
|
||||
|
@ -780,14 +774,12 @@ _profile_read_ciexyz(CmsProfileObject *self, cmsTagSignature info, int multi) {
|
|||
cmsCIEXYZ *XYZ;
|
||||
|
||||
if (!cmsIsTag(self->profile, info)) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
XYZ = (cmsCIEXYZ *)cmsReadTag(self->profile, info);
|
||||
if (!XYZ) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
if (multi) {
|
||||
return _xyz3_py(XYZ);
|
||||
|
@ -801,14 +793,12 @@ _profile_read_ciexyy_triple(CmsProfileObject *self, cmsTagSignature info) {
|
|||
cmsCIExyYTRIPLE *triple;
|
||||
|
||||
if (!cmsIsTag(self->profile, info)) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
triple = (cmsCIExyYTRIPLE *)cmsReadTag(self->profile, info);
|
||||
if (!triple) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
/* Note: lcms does all the heavy lifting and error checking (nr of
|
||||
|
@ -835,21 +825,18 @@ _profile_read_named_color_list(CmsProfileObject *self, cmsTagSignature info) {
|
|||
PyObject *result;
|
||||
|
||||
if (!cmsIsTag(self->profile, info)) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
ncl = (cmsNAMEDCOLORLIST *)cmsReadTag(self->profile, info);
|
||||
if (ncl == NULL) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
n = cmsNamedColorCount(ncl);
|
||||
result = PyList_New(n);
|
||||
if (!result) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
|
@ -858,8 +845,7 @@ _profile_read_named_color_list(CmsProfileObject *self, cmsTagSignature info) {
|
|||
str = PyUnicode_FromString(name);
|
||||
if (str == NULL) {
|
||||
Py_DECREF(result);
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
PyList_SET_ITEM(result, i, str);
|
||||
}
|
||||
|
@ -926,8 +912,7 @@ _is_intent_supported(CmsProfileObject *self, int clut) {
|
|||
|
||||
result = PyDict_New();
|
||||
if (result == NULL) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
n = cmsGetSupportedIntents(INTENTS, intent_ids, intent_descs);
|
||||
|
@ -957,8 +942,7 @@ _is_intent_supported(CmsProfileObject *self, int clut) {
|
|||
Py_XDECREF(id);
|
||||
Py_XDECREF(entry);
|
||||
Py_XDECREF(result);
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
PyDict_SetItem(result, id, entry);
|
||||
Py_DECREF(id);
|
||||
|
@ -1042,8 +1026,7 @@ cms_profile_getattr_creation_date(CmsProfileObject *self, void *closure) {
|
|||
|
||||
result = cmsGetHeaderCreationDateTime(self->profile, &ct);
|
||||
if (!result) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
return PyDateTime_FromDateAndTime(
|
||||
|
@ -1141,8 +1124,7 @@ cms_profile_getattr_saturation_rendering_intent_gamut(
|
|||
static PyObject *
|
||||
cms_profile_getattr_red_colorant(CmsProfileObject *self, void *closure) {
|
||||
if (!cmsIsMatrixShaper(self->profile)) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
return _profile_read_ciexyz(self, cmsSigRedColorantTag, 0);
|
||||
}
|
||||
|
@ -1150,8 +1132,7 @@ cms_profile_getattr_red_colorant(CmsProfileObject *self, void *closure) {
|
|||
static PyObject *
|
||||
cms_profile_getattr_green_colorant(CmsProfileObject *self, void *closure) {
|
||||
if (!cmsIsMatrixShaper(self->profile)) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
return _profile_read_ciexyz(self, cmsSigGreenColorantTag, 0);
|
||||
}
|
||||
|
@ -1159,8 +1140,7 @@ cms_profile_getattr_green_colorant(CmsProfileObject *self, void *closure) {
|
|||
static PyObject *
|
||||
cms_profile_getattr_blue_colorant(CmsProfileObject *self, void *closure) {
|
||||
if (!cmsIsMatrixShaper(self->profile)) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
return _profile_read_ciexyz(self, cmsSigBlueColorantTag, 0);
|
||||
}
|
||||
|
@ -1176,21 +1156,18 @@ cms_profile_getattr_media_white_point_temperature(
|
|||
cmsBool result;
|
||||
|
||||
if (!cmsIsTag(self->profile, info)) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
XYZ = (cmsCIEXYZ *)cmsReadTag(self->profile, info);
|
||||
if (XYZ == NULL || XYZ->X == 0) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
cmsXYZ2xyY(&xyY, XYZ);
|
||||
result = cmsTempFromWhitePoint(&tempK, &xyY);
|
||||
if (!result) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
return PyFloat_FromDouble(tempK);
|
||||
}
|
||||
|
@ -1229,8 +1206,7 @@ cms_profile_getattr_red_primary(CmsProfileObject *self, void *closure) {
|
|||
result = _calculate_rgb_primaries(self, &primaries);
|
||||
}
|
||||
if (!result) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
return _xyz_py(&primaries.Red);
|
||||
|
@ -1245,8 +1221,7 @@ cms_profile_getattr_green_primary(CmsProfileObject *self, void *closure) {
|
|||
result = _calculate_rgb_primaries(self, &primaries);
|
||||
}
|
||||
if (!result) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
return _xyz_py(&primaries.Green);
|
||||
|
@ -1261,8 +1236,7 @@ cms_profile_getattr_blue_primary(CmsProfileObject *self, void *closure) {
|
|||
result = _calculate_rgb_primaries(self, &primaries);
|
||||
}
|
||||
if (!result) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
return _xyz_py(&primaries.Blue);
|
||||
|
@ -1321,14 +1295,12 @@ cms_profile_getattr_icc_measurement_condition(CmsProfileObject *self, void *clos
|
|||
const char *geo;
|
||||
|
||||
if (!cmsIsTag(self->profile, info)) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
mc = (cmsICCMeasurementConditions *)cmsReadTag(self->profile, info);
|
||||
if (!mc) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
if (mc->Geometry == 1) {
|
||||
|
@ -1362,14 +1334,12 @@ cms_profile_getattr_icc_viewing_condition(CmsProfileObject *self, void *closure)
|
|||
cmsTagSignature info = cmsSigViewingConditionsTag;
|
||||
|
||||
if (!cmsIsTag(self->profile, info)) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
vc = (cmsICCViewingConditions *)cmsReadTag(self->profile, info);
|
||||
if (!vc) {
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
return Py_BuildValue(
|
||||
|
@ -1550,10 +1520,9 @@ PyInit__imagingcms(void) {
|
|||
|
||||
static PyModuleDef module_def = {
|
||||
PyModuleDef_HEAD_INIT,
|
||||
"_imagingcms", /* m_name */
|
||||
NULL, /* m_doc */
|
||||
-1, /* m_size */
|
||||
pyCMSdll_methods, /* m_methods */
|
||||
.m_name = "_imagingcms",
|
||||
.m_size = -1,
|
||||
.m_methods = pyCMSdll_methods,
|
||||
};
|
||||
|
||||
m = PyModule_Create(&module_def);
|
||||
|
|
|
@ -339,29 +339,23 @@ text_layout_raqm(
|
|||
len = PySequence_Fast_GET_SIZE(seq);
|
||||
for (j = 0; j < len; j++) {
|
||||
PyObject *item = PySequence_Fast_GET_ITEM(seq, j);
|
||||
char *feature = NULL;
|
||||
Py_ssize_t size = 0;
|
||||
PyObject *bytes;
|
||||
|
||||
if (!PyUnicode_Check(item)) {
|
||||
Py_DECREF(seq);
|
||||
PyErr_SetString(PyExc_TypeError, "expected a string");
|
||||
goto failed;
|
||||
}
|
||||
bytes = PyUnicode_AsUTF8String(item);
|
||||
if (bytes == NULL) {
|
||||
|
||||
Py_ssize_t size;
|
||||
const char *feature = PyUnicode_AsUTF8AndSize(item, &size);
|
||||
if (feature == NULL) {
|
||||
Py_DECREF(seq);
|
||||
goto failed;
|
||||
}
|
||||
feature = PyBytes_AS_STRING(bytes);
|
||||
size = PyBytes_GET_SIZE(bytes);
|
||||
if (!raqm_add_font_feature(rq, feature, size)) {
|
||||
Py_DECREF(seq);
|
||||
Py_DECREF(bytes);
|
||||
PyErr_SetString(PyExc_ValueError, "raqm_add_font_feature() failed");
|
||||
goto failed;
|
||||
}
|
||||
Py_DECREF(bytes);
|
||||
}
|
||||
Py_DECREF(seq);
|
||||
}
|
||||
|
@ -840,6 +834,7 @@ font_render(FontObject *self, PyObject *args) {
|
|||
int mask = 0; /* is FT_LOAD_TARGET_MONO enabled? */
|
||||
int color = 0; /* is FT_LOAD_COLOR enabled? */
|
||||
float stroke_width = 0;
|
||||
int stroke_filled = 0;
|
||||
PY_LONG_LONG foreground_ink_long = 0;
|
||||
unsigned int foreground_ink;
|
||||
const char *mode = NULL;
|
||||
|
@ -859,7 +854,7 @@ font_render(FontObject *self, PyObject *args) {
|
|||
|
||||
if (!PyArg_ParseTuple(
|
||||
args,
|
||||
"OO|zzOzfzLffO:render",
|
||||
"OO|zzOzfpzL(ff):render",
|
||||
&string,
|
||||
&fill,
|
||||
&mode,
|
||||
|
@ -867,6 +862,7 @@ font_render(FontObject *self, PyObject *args) {
|
|||
&features,
|
||||
&lang,
|
||||
&stroke_width,
|
||||
&stroke_filled,
|
||||
&anchor,
|
||||
&foreground_ink_long,
|
||||
&x_start,
|
||||
|
@ -1011,7 +1007,8 @@ font_render(FontObject *self, PyObject *args) {
|
|||
if (stroker != NULL) {
|
||||
error = FT_Get_Glyph(glyph_slot, &glyph);
|
||||
if (!error) {
|
||||
error = FT_Glyph_Stroke(&glyph, stroker, 1);
|
||||
error = stroke_filled ? FT_Glyph_StrokeBorder(&glyph, stroker, 0, 1)
|
||||
: FT_Glyph_Stroke(&glyph, stroker, 1);
|
||||
}
|
||||
if (!error) {
|
||||
FT_Vector origin = {0, 0};
|
||||
|
@ -1377,8 +1374,7 @@ font_setvarname(FontObject *self, PyObject *args) {
|
|||
return geterror(error);
|
||||
}
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -1432,8 +1428,7 @@ font_setvaraxes(FontObject *self, PyObject *args) {
|
|||
return geterror(error);
|
||||
}
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -1635,10 +1630,9 @@ PyInit__imagingft(void) {
|
|||
|
||||
static PyModuleDef module_def = {
|
||||
PyModuleDef_HEAD_INIT,
|
||||
"_imagingft", /* m_name */
|
||||
NULL, /* m_doc */
|
||||
-1, /* m_size */
|
||||
_functions, /* m_methods */
|
||||
.m_name = "_imagingft",
|
||||
.m_size = -1,
|
||||
.m_methods = _functions,
|
||||
};
|
||||
|
||||
m = PyModule_Create(&module_def);
|
||||
|
|
|
@ -192,8 +192,7 @@ _unop(PyObject *self, PyObject *args) {
|
|||
|
||||
unop(out, im1);
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -226,8 +225,7 @@ _binop(PyObject *self, PyObject *args) {
|
|||
|
||||
binop(out, im1, im2);
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyMethodDef _functions[] = {
|
||||
|
@ -310,10 +308,9 @@ PyInit__imagingmath(void) {
|
|||
|
||||
static PyModuleDef module_def = {
|
||||
PyModuleDef_HEAD_INIT,
|
||||
"_imagingmath", /* m_name */
|
||||
NULL, /* m_doc */
|
||||
-1, /* m_size */
|
||||
_functions, /* m_methods */
|
||||
.m_name = "_imagingmath",
|
||||
.m_size = -1,
|
||||
.m_methods = _functions,
|
||||
};
|
||||
|
||||
m = PyModule_Create(&module_def);
|
||||
|
|
|
@ -252,10 +252,10 @@ PyInit__imagingmorph(void) {
|
|||
|
||||
static PyModuleDef module_def = {
|
||||
PyModuleDef_HEAD_INIT,
|
||||
"_imagingmorph", /* m_name */
|
||||
"A module for doing image morphology", /* m_doc */
|
||||
-1, /* m_size */
|
||||
functions, /* m_methods */
|
||||
.m_name = "_imagingmorph",
|
||||
.m_doc = "A module for doing image morphology",
|
||||
.m_size = -1,
|
||||
.m_methods = functions,
|
||||
};
|
||||
|
||||
m = PyModule_Create(&module_def);
|
||||
|
|
|
@ -37,8 +37,7 @@ _tkinit(PyObject *self, PyObject *args) {
|
|||
/* This will bomb if interp is invalid... */
|
||||
TkImaging_Init(interp);
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyMethodDef functions[] = {
|
||||
|
@ -51,10 +50,9 @@ PyMODINIT_FUNC
|
|||
PyInit__imagingtk(void) {
|
||||
static PyModuleDef module_def = {
|
||||
PyModuleDef_HEAD_INIT,
|
||||
"_imagingtk", /* m_name */
|
||||
NULL, /* m_doc */
|
||||
-1, /* m_size */
|
||||
functions, /* m_methods */
|
||||
.m_name = "_imagingtk",
|
||||
.m_size = -1,
|
||||
.m_methods = functions,
|
||||
};
|
||||
PyObject *m;
|
||||
m = PyModule_Create(&module_def);
|
||||
|
|
|
@ -164,7 +164,7 @@ _anim_encoder_new(PyObject *self, PyObject *args) {
|
|||
|
||||
if (!PyArg_ParseTuple(
|
||||
args,
|
||||
"iiIiiiiii",
|
||||
"(ii)Iiiiiii",
|
||||
&width,
|
||||
&height,
|
||||
&bgcolor,
|
||||
|
@ -835,10 +835,9 @@ PyInit__webp(void) {
|
|||
|
||||
static PyModuleDef module_def = {
|
||||
PyModuleDef_HEAD_INIT,
|
||||
"_webp", /* m_name */
|
||||
NULL, /* m_doc */
|
||||
-1, /* m_size */
|
||||
webpMethods, /* m_methods */
|
||||
.m_name = "_webp",
|
||||
.m_size = -1,
|
||||
.m_methods = webpMethods,
|
||||
};
|
||||
|
||||
m = PyModule_Create(&module_def);
|
||||
|
|
|
@ -213,8 +213,7 @@ _setimage(ImagingDecoderObject *decoder, PyObject *args) {
|
|||
Py_XDECREF(decoder->lock);
|
||||
decoder->lock = op;
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -231,8 +230,7 @@ _setfd(ImagingDecoderObject *decoder, PyObject *args) {
|
|||
Py_XINCREF(fd);
|
||||
state->fd = fd;
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
|
|
@ -85,8 +85,7 @@ _expose(ImagingDisplayObject *display, PyObject *args) {
|
|||
|
||||
ImagingExposeDIB(display->dib, hdc);
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -112,8 +111,7 @@ _draw(ImagingDisplayObject *display, PyObject *args) {
|
|||
|
||||
ImagingDrawDIB(display->dib, hdc, dst, src);
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
extern Imaging
|
||||
|
@ -143,8 +141,7 @@ _paste(ImagingDisplayObject *display, PyObject *args) {
|
|||
|
||||
ImagingPasteDIB(display->dib, im, xy);
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -190,8 +187,7 @@ _releasedc(ImagingDisplayObject *display, PyObject *args) {
|
|||
|
||||
ReleaseDC(window, dc);
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -211,8 +207,7 @@ _frombytes(ImagingDisplayObject *display, PyObject *args) {
|
|||
memcpy(display->dib->bits, buffer.buf, buffer.len);
|
||||
|
||||
PyBuffer_Release(&buffer);
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -709,8 +704,7 @@ PyImaging_EventLoopWin32(PyObject *self, PyObject *args) {
|
|||
}
|
||||
Py_END_ALLOW_THREADS;
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
|
|
|
@ -278,8 +278,7 @@ _setimage(ImagingEncoderObject *encoder, PyObject *args) {
|
|||
Py_XDECREF(encoder->lock);
|
||||
encoder->lock = op;
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -296,8 +295,7 @@ _setfd(ImagingEncoderObject *encoder, PyObject *args) {
|
|||
Py_XINCREF(fd);
|
||||
state->fd = fd;
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
@ -1099,7 +1097,7 @@ PyImaging_JpegEncoderNew(PyObject *self, PyObject *args) {
|
|||
|
||||
if (!PyArg_ParseTuple(
|
||||
args,
|
||||
"ss|nnnnpnnnnnnOz#y#y#",
|
||||
"ss|nnnnpn(nn)nnnOz#y#y#",
|
||||
&mode,
|
||||
&rawmode,
|
||||
&quality,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user