Merge branch 'main' into set_variation_by_name

This commit is contained in:
Andrew Murray 2022-09-17 00:01:05 +10:00
commit 17b56b9463
83 changed files with 2830 additions and 2393 deletions

View File

@ -35,11 +35,9 @@ python3 -m pip install -U pytest
python3 -m pip install -U pytest-cov
python3 -m pip install -U pytest-timeout
python3 -m pip install pyroma
python3 -m pip install test-image-results
if [[ $(uname) != CYGWIN* ]]; then
# TODO Remove condition when NumPy supports 3.11
if ! [ "$GHA_PYTHON_VERSION" == "3.11-dev" ]; then python3 -m pip install numpy ; fi
python3 -m pip install numpy
# PyQt6 doesn't support PyPy3
if [[ $GHA_PYTHON_VERSION == 3.* ]]; then

View File

@ -11,6 +11,9 @@ on:
- "**.h"
workflow_dispatch:
permissions:
contents: read
jobs:
Fuzzing:
runs-on: ubuntu-latest

View File

@ -12,11 +12,9 @@ python3 -m pip install -U pytest
python3 -m pip install -U pytest-cov
python3 -m pip install -U pytest-timeout
python3 -m pip install pyroma
python3 -m pip install test-image-results
echo -e "[openblas]\nlibraries = openblas\nlibrary_dirs = /usr/local/opt/openblas/lib" >> ~/.numpy-site.cfg
# TODO Remove condition when NumPy supports 3.11
if ! [ "$GHA_PYTHON_VERSION" == "3.11-dev" ]; then python3 -m pip install numpy ; fi
python3 -m pip install numpy
# extra test images
pushd depends && ./install_extra_test_images.sh && popd

View File

@ -2,6 +2,9 @@ name: Test Cygwin
on: [push, pull_request, workflow_dispatch]
permissions:
contents: read
jobs:
build:
runs-on: windows-latest

View File

@ -24,7 +24,7 @@ jobs:
fail-fast: false
matrix:
docker: [
ubuntu-20.04-focal-amd64-valgrind,
ubuntu-22.04-jammy-amd64-valgrind,
]
dockerTag: [main]

View File

@ -1,6 +1,6 @@
repos:
- repo: https://github.com/psf/black
rev: 22.6.0
rev: 22.8.0
hooks:
- id: black
args: ["--target-version", "py37"]
@ -14,18 +14,18 @@ repos:
- id: isort
- repo: https://github.com/asottile/yesqa
rev: v1.3.0
rev: v1.4.0
hooks:
- id: yesqa
- repo: https://github.com/Lucas-C/pre-commit-hooks
rev: v1.2.0
rev: v1.3.1
hooks:
- id: remove-tabs
exclude: (Makefile$|\.bat$|\.cmake$|\.eps$|\.fits$|\.opt$)
- repo: https://github.com/PyCQA/flake8
rev: 4.0.1
rev: 5.0.4
hooks:
- id: flake8
additional_dependencies: [flake8-2020, flake8-implicit-str-concat]

View File

@ -5,6 +5,60 @@ Changelog (Pillow)
9.3.0 (unreleased)
------------------
- Corrected BMP and TGA palette size when saving #6500
[radarhere]
- Do not call load() before draft() in Image.thumbnail #6539
[radarhere]
- Copy palette when converting from P to PA #6497
[radarhere]
- Allow RGB and RGBA values for PA image putpixel #6504
[radarhere]
- Removed support for tkinter in PyPy before Python 3.6 #6551
[nulano]
- Do not use CCITTFaxDecode filter if libtiff is not available #6518
[radarhere]
- Fallback to not using mmap if buffer is not large enough #6510
[radarhere]
- Fixed writing bytes as ASCII tag #6493
[radarhere]
- Open 1 bit EPS in mode 1 #6499
[radarhere]
- Removed support for tkinter before Python 1.5.2 #6549
[radarhere]
- Allow default ImageDraw font to be set #6484
[radarhere, hugovk]
- Save 1 mode PDF using CCITTFaxDecode filter #6470
[radarhere]
- Added support for RGBA PSD images #6481
[radarhere]
- Parse orientation from XMP tag contents #6463
[bigcat88, radarhere]
- Added support for reading ATI1/ATI2 (BC4/BC5) DDS images #6457
[REDxEYE, radarhere]
- Do not clear GIF tile when checking number of frames #6455
[radarhere]
- Support saving multiple MPO frames #6444
[radarhere]
- Do not double quote Pillow version for setuptools >= 60 #6450
[radarhere]
- Added ABGR BMP mask mode #6436
[radarhere]

View File

@ -96,8 +96,8 @@ Released as needed privately to individual vendors for critical security-related
## Binary Distributions
### Windows
* [ ] Contact `@cgohlke` for Windows binaries via release ticket e.g. https://github.com/python-pillow/Pillow/issues/1174.
* [ ] Download and extract tarball from `@cgohlke` and copy into `dist/`
* [ ] Download the artifacts from the [GitHub Actions "Test Windows" workflow](https://github.com/python-pillow/Pillow/actions/workflows/test-windows.yml)
and copy into `dist/`
### Mac and Linux
* [ ] Use the [Pillow Wheel Builder](https://github.com/python-pillow/pillow-wheels):

BIN
Tests/images/1.eps Normal file

Binary file not shown.

BIN
Tests/images/ati1.dds Normal file

Binary file not shown.

BIN
Tests/images/ati1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 969 B

BIN
Tests/images/ati2.dds Normal file

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 B

BIN
Tests/images/mmap_error.bmp Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.0 KiB

BIN
Tests/images/rgba.psd Normal file

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 KiB

View File

@ -325,8 +325,9 @@ def test_apng_syntax_errors():
pytest.warns(UserWarning, open)
def test_apng_sequence_errors():
test_files = [
@pytest.mark.parametrize(
"test_file",
(
"sequence_start.png",
"sequence_gap.png",
"sequence_repeat.png",
@ -334,12 +335,13 @@ def test_apng_sequence_errors():
"sequence_reorder.png",
"sequence_reorder_chunk.png",
"sequence_fdat_fctl.png",
]
for f in test_files:
with pytest.raises(SyntaxError):
with Image.open(f"Tests/images/apng/{f}") as im:
im.seek(im.n_frames - 1)
im.load()
),
)
def test_apng_sequence_errors(test_file):
with pytest.raises(SyntaxError):
with Image.open(f"Tests/images/apng/{test_file}") as im:
im.seek(im.n_frames - 1)
im.load()
def test_apng_save(tmp_path):

View File

@ -39,6 +39,13 @@ def test_invalid_file():
BmpImagePlugin.BmpImageFile(fp)
def test_fallback_if_mmap_errors():
# This image has been truncated,
# so that the buffer is not large enough when using mmap
with Image.open("Tests/images/mmap_error.bmp") as im:
assert_image_equal_tofile(im, "Tests/images/pal8_offset.bmp")
def test_save_to_bytes():
output = io.BytesIO()
im = hopper()
@ -51,6 +58,18 @@ def test_save_to_bytes():
assert reloaded.format == "BMP"
def test_small_palette(tmp_path):
im = Image.new("P", (1, 1))
colors = [0, 0, 0, 125, 125, 125, 255, 255, 255]
im.putpalette(colors)
out = str(tmp_path / "temp.bmp")
im.save(out)
with Image.open(out) as reloaded:
assert reloaded.getpalette() == colors
def test_save_too_large(tmp_path):
outfile = str(tmp_path / "temp.bmp")
with Image.new("RGB", (1, 1)) as im:

View File

@ -1,3 +1,5 @@
import pytest
from PIL import ContainerIO, Image
from .helper import hopper
@ -59,89 +61,89 @@ def test_seek_mode_2():
assert container.tell() == 100
def test_read_n0():
@pytest.mark.parametrize("bytesmode", (True, False))
def test_read_n0(bytesmode):
# Arrange
for bytesmode in (True, False):
with open(TEST_FILE, "rb" if bytesmode else "r") as fh:
container = ContainerIO.ContainerIO(fh, 22, 100)
with open(TEST_FILE, "rb" if bytesmode else "r") as fh:
container = ContainerIO.ContainerIO(fh, 22, 100)
# Act
container.seek(81)
data = container.read()
# Act
container.seek(81)
data = container.read()
# Assert
if bytesmode:
data = data.decode()
assert data == "7\nThis is line 8\n"
# Assert
if bytesmode:
data = data.decode()
assert data == "7\nThis is line 8\n"
def test_read_n():
@pytest.mark.parametrize("bytesmode", (True, False))
def test_read_n(bytesmode):
# Arrange
for bytesmode in (True, False):
with open(TEST_FILE, "rb" if bytesmode else "r") as fh:
container = ContainerIO.ContainerIO(fh, 22, 100)
with open(TEST_FILE, "rb" if bytesmode else "r") as fh:
container = ContainerIO.ContainerIO(fh, 22, 100)
# Act
container.seek(81)
data = container.read(3)
# Act
container.seek(81)
data = container.read(3)
# Assert
if bytesmode:
data = data.decode()
assert data == "7\nT"
# Assert
if bytesmode:
data = data.decode()
assert data == "7\nT"
def test_read_eof():
@pytest.mark.parametrize("bytesmode", (True, False))
def test_read_eof(bytesmode):
# Arrange
for bytesmode in (True, False):
with open(TEST_FILE, "rb" if bytesmode else "r") as fh:
container = ContainerIO.ContainerIO(fh, 22, 100)
with open(TEST_FILE, "rb" if bytesmode else "r") as fh:
container = ContainerIO.ContainerIO(fh, 22, 100)
# Act
container.seek(100)
data = container.read()
# Act
container.seek(100)
data = container.read()
# Assert
if bytesmode:
data = data.decode()
assert data == ""
# Assert
if bytesmode:
data = data.decode()
assert data == ""
def test_readline():
@pytest.mark.parametrize("bytesmode", (True, False))
def test_readline(bytesmode):
# Arrange
for bytesmode in (True, False):
with open(TEST_FILE, "rb" if bytesmode else "r") as fh:
container = ContainerIO.ContainerIO(fh, 0, 120)
with open(TEST_FILE, "rb" if bytesmode else "r") as fh:
container = ContainerIO.ContainerIO(fh, 0, 120)
# Act
data = container.readline()
# Act
data = container.readline()
# Assert
if bytesmode:
data = data.decode()
assert data == "This is line 1\n"
# Assert
if bytesmode:
data = data.decode()
assert data == "This is line 1\n"
def test_readlines():
@pytest.mark.parametrize("bytesmode", (True, False))
def test_readlines(bytesmode):
# Arrange
for bytesmode in (True, False):
expected = [
"This is line 1\n",
"This is line 2\n",
"This is line 3\n",
"This is line 4\n",
"This is line 5\n",
"This is line 6\n",
"This is line 7\n",
"This is line 8\n",
]
with open(TEST_FILE, "rb" if bytesmode else "r") as fh:
container = ContainerIO.ContainerIO(fh, 0, 120)
expected = [
"This is line 1\n",
"This is line 2\n",
"This is line 3\n",
"This is line 4\n",
"This is line 5\n",
"This is line 6\n",
"This is line 7\n",
"This is line 8\n",
]
with open(TEST_FILE, "rb" if bytesmode else "r") as fh:
container = ContainerIO.ContainerIO(fh, 0, 120)
# Act
data = container.readlines()
# Act
data = container.readlines()
# Assert
if bytesmode:
data = [line.decode() for line in data]
assert data == expected
# Assert
if bytesmode:
data = [line.decode() for line in data]
assert data == expected

View File

@ -10,6 +10,8 @@ from .helper import assert_image_equal, assert_image_equal_tofile, hopper
TEST_FILE_DXT1 = "Tests/images/dxt1-rgb-4bbp-noalpha_MipMaps-1.dds"
TEST_FILE_DXT3 = "Tests/images/dxt3-argb-8bbp-explicitalpha_MipMaps-1.dds"
TEST_FILE_DXT5 = "Tests/images/dxt5-argb-8bbp-interpolatedalpha_MipMaps-1.dds"
TEST_FILE_ATI1 = "Tests/images/ati1.dds"
TEST_FILE_ATI2 = "Tests/images/ati2.dds"
TEST_FILE_DX10_BC5_TYPELESS = "Tests/images/bc5_typeless.dds"
TEST_FILE_DX10_BC5_UNORM = "Tests/images/bc5_unorm.dds"
TEST_FILE_DX10_BC5_SNORM = "Tests/images/bc5_snorm.dds"
@ -62,6 +64,32 @@ def test_sanity_dxt5():
assert_image_equal_tofile(im, TEST_FILE_DXT5.replace(".dds", ".png"))
def test_sanity_ati1():
"""Check ATI1 images can be opened"""
with Image.open(TEST_FILE_ATI1) as im:
im.load()
assert im.format == "DDS"
assert im.mode == "L"
assert im.size == (64, 64)
assert_image_equal_tofile(im, TEST_FILE_ATI1.replace(".dds", ".png"))
def test_sanity_ati2():
"""Check ATI2 images can be opened"""
with Image.open(TEST_FILE_ATI2) as im:
im.load()
assert im.format == "DDS"
assert im.mode == "RGB"
assert im.size == (256, 256)
assert_image_equal_tofile(im, TEST_FILE_DX10_BC5_UNORM.replace(".dds", ".png"))
@pytest.mark.parametrize(
("image_path", "expected_path"),
(

View File

@ -146,6 +146,11 @@ def test_bytesio_object():
assert_image_similar(img, image1_scale1_compare, 5)
def test_1_mode():
with Image.open("Tests/images/1.eps") as im:
assert im.mode == "1"
def test_image_mode_not_supported(tmp_path):
im = hopper("RGBA")
tmpfile = str(tmp_path / "temp.eps")

View File

@ -399,6 +399,11 @@ def test_no_change():
assert im.is_animated
assert_image_equal(im, expected)
with Image.open("Tests/images/comment_after_only_frame.gif") as im:
expected = Image.new("P", (1, 1))
assert not im.is_animated
assert_image_equal(im, expected)
def test_eoferror():
with Image.open(TEST_GIF) as im:

View File

@ -78,15 +78,12 @@ def test_eoferror():
im.seek(n_frames - 1)
def test_roundtrip(tmp_path):
def roundtrip(mode):
out = str(tmp_path / "temp.im")
im = hopper(mode)
im.save(out)
assert_image_equal_tofile(im, out)
for mode in ["RGB", "P", "PA"]:
roundtrip(mode)
@pytest.mark.parametrize("mode", ("RGB", "P", "PA"))
def test_roundtrip(mode, tmp_path):
out = str(tmp_path / "temp.im")
im = hopper(mode)
im.save(out)
assert_image_equal_tofile(im, out)
def test_save_unsupported_mode(tmp_path):

View File

@ -135,50 +135,50 @@ class TestFileLibTiff(LibTiffTestCase):
assert_image_equal_tofile(im, "Tests/images/tiff_adobe_deflate.png")
def test_write_metadata(self, tmp_path):
@pytest.mark.parametrize("legacy_api", (False, True))
def test_write_metadata(self, legacy_api, tmp_path):
"""Test metadata writing through libtiff"""
for legacy_api in [False, True]:
f = str(tmp_path / "temp.tiff")
with Image.open("Tests/images/hopper_g4.tif") as img:
img.save(f, tiffinfo=img.tag)
f = str(tmp_path / "temp.tiff")
with Image.open("Tests/images/hopper_g4.tif") as img:
img.save(f, tiffinfo=img.tag)
if legacy_api:
original = img.tag.named()
else:
original = img.tag_v2.named()
if legacy_api:
original = img.tag.named()
else:
original = img.tag_v2.named()
# PhotometricInterpretation is set from SAVE_INFO,
# not the original image.
ignored = [
"StripByteCounts",
"RowsPerStrip",
"PageNumber",
"PhotometricInterpretation",
]
# PhotometricInterpretation is set from SAVE_INFO,
# not the original image.
ignored = [
"StripByteCounts",
"RowsPerStrip",
"PageNumber",
"PhotometricInterpretation",
]
with Image.open(f) as loaded:
if legacy_api:
reloaded = loaded.tag.named()
else:
reloaded = loaded.tag_v2.named()
with Image.open(f) as loaded:
if legacy_api:
reloaded = loaded.tag.named()
else:
reloaded = loaded.tag_v2.named()
for tag, value in itertools.chain(reloaded.items(), original.items()):
if tag not in ignored:
val = original[tag]
if tag.endswith("Resolution"):
if legacy_api:
assert val[0][0] / val[0][1] == (
4294967295 / 113653537
), f"{tag} didn't roundtrip"
else:
assert val == 37.79000115940079, f"{tag} didn't roundtrip"
for tag, value in itertools.chain(reloaded.items(), original.items()):
if tag not in ignored:
val = original[tag]
if tag.endswith("Resolution"):
if legacy_api:
assert val[0][0] / val[0][1] == (
4294967295 / 113653537
), f"{tag} didn't roundtrip"
else:
assert val == value, f"{tag} didn't roundtrip"
assert val == 37.79000115940079, f"{tag} didn't roundtrip"
else:
assert val == value, f"{tag} didn't roundtrip"
# https://github.com/python-pillow/Pillow/issues/1561
requested_fields = ["StripByteCounts", "RowsPerStrip", "StripOffsets"]
for field in requested_fields:
assert field in reloaded, f"{field} not in metadata"
# https://github.com/python-pillow/Pillow/issues/1561
requested_fields = ["StripByteCounts", "RowsPerStrip", "StripOffsets"]
for field in requested_fields:
assert field in reloaded, f"{field} not in metadata"
@pytest.mark.valgrind_known_error(reason="Known invalid metadata")
def test_additional_metadata(self, tmp_path):
@ -856,7 +856,7 @@ class TestFileLibTiff(LibTiffTestCase):
def test_strip_ycbcr_jpeg_2x2_sampling(self):
infile = "Tests/images/tiff_strip_ycbcr_jpeg_2x2_sampling.tif"
with Image.open(infile) as im:
assert_image_similar_tofile(im, "Tests/images/flower.jpg", 0.5)
assert_image_similar_tofile(im, "Tests/images/flower.jpg", 1.2)
@mark_if_feature_version(
pytest.mark.valgrind_known_error, "libjpeg_turbo", "2.0", reason="Known Failing"
@ -864,7 +864,7 @@ class TestFileLibTiff(LibTiffTestCase):
def test_strip_ycbcr_jpeg_1x1_sampling(self):
infile = "Tests/images/tiff_strip_ycbcr_jpeg_1x1_sampling.tif"
with Image.open(infile) as im:
assert_image_equal_tofile(im, "Tests/images/flower2.jpg")
assert_image_similar_tofile(im, "Tests/images/flower2.jpg", 0.01)
def test_tiled_cmyk_jpeg(self):
infile = "Tests/images/tiff_tiled_cmyk_jpeg.tif"
@ -877,7 +877,7 @@ class TestFileLibTiff(LibTiffTestCase):
def test_tiled_ycbcr_jpeg_1x1_sampling(self):
infile = "Tests/images/tiff_tiled_ycbcr_jpeg_1x1_sampling.tif"
with Image.open(infile) as im:
assert_image_equal_tofile(im, "Tests/images/flower2.jpg")
assert_image_similar_tofile(im, "Tests/images/flower2.jpg", 0.01)
@mark_if_feature_version(
pytest.mark.valgrind_known_error, "libjpeg_turbo", "2.0", reason="Known Failing"
@ -885,7 +885,7 @@ class TestFileLibTiff(LibTiffTestCase):
def test_tiled_ycbcr_jpeg_2x2_sampling(self):
infile = "Tests/images/tiff_tiled_ycbcr_jpeg_2x2_sampling.tif"
with Image.open(infile) as im:
assert_image_similar_tofile(im, "Tests/images/flower.jpg", 0.5)
assert_image_similar_tofile(im, "Tests/images/flower.jpg", 1.5)
def test_strip_planar_rgb(self):
# gdal_translate -co TILED=no -co INTERLEAVE=BAND -co COMPRESS=LZW \
@ -1011,14 +1011,18 @@ class TestFileLibTiff(LibTiffTestCase):
# Assert that there are multiple strips
assert len(im.tag_v2[STRIPOFFSETS]) > 1
def test_save_single_strip(self, tmp_path):
@pytest.mark.parametrize("argument", (True, False))
def test_save_single_strip(self, argument, tmp_path):
im = hopper("RGB").resize((256, 256))
out = str(tmp_path / "temp.tif")
TiffImagePlugin.STRIP_SIZE = 2**18
if not argument:
TiffImagePlugin.STRIP_SIZE = 2**18
try:
im.save(out, compression="tiff_adobe_deflate")
arguments = {"compression": "tiff_adobe_deflate"}
if argument:
arguments["strip_size"] = 2**18
im.save(out, **arguments)
with Image.open(out) as im:
assert len(im.tag_v2[STRIPOFFSETS]) == 1

View File

@ -5,15 +5,19 @@ import pytest
from PIL import Image
from .helper import assert_image_similar, is_pypy, skip_unless_feature
from .helper import (
assert_image_equal,
assert_image_similar,
is_pypy,
skip_unless_feature,
)
test_files = ["Tests/images/sugarshack.mpo", "Tests/images/frozenpond.mpo"]
pytestmark = skip_unless_feature("jpg")
def frame_roundtrip(im, **options):
# Note that for now, there is no MPO saving functionality
def roundtrip(im, **options):
out = BytesIO()
im.save(out, "MPO", **options)
test_bytes = out.tell()
@ -23,13 +27,13 @@ def frame_roundtrip(im, **options):
return im
def test_sanity():
for test_file in test_files:
with Image.open(test_file) as im:
im.load()
assert im.mode == "RGB"
assert im.size == (640, 480)
assert im.format == "MPO"
@pytest.mark.parametrize("test_file", test_files)
def test_sanity(test_file):
with Image.open(test_file) as im:
im.load()
assert im.mode == "RGB"
assert im.size == (640, 480)
assert im.format == "MPO"
@pytest.mark.skipif(is_pypy(), reason="Requires CPython")
@ -62,26 +66,25 @@ def test_context_manager():
im.load()
def test_app():
for test_file in test_files:
# Test APP/COM reader (@PIL135)
with Image.open(test_file) as im:
assert im.applist[0][0] == "APP1"
assert im.applist[1][0] == "APP2"
assert (
im.applist[1][1][:16]
== b"MPF\x00MM\x00*\x00\x00\x00\x08\x00\x03\xb0\x00"
)
assert len(im.applist) == 2
@pytest.mark.parametrize("test_file", test_files)
def test_app(test_file):
# Test APP/COM reader (@PIL135)
with Image.open(test_file) as im:
assert im.applist[0][0] == "APP1"
assert im.applist[1][0] == "APP2"
assert (
im.applist[1][1][:16] == b"MPF\x00MM\x00*\x00\x00\x00\x08\x00\x03\xb0\x00"
)
assert len(im.applist) == 2
def test_exif():
for test_file in test_files:
with Image.open(test_file) as im:
info = im._getexif()
assert info[272] == "Nintendo 3DS"
assert info[296] == 2
assert info[34665] == 188
@pytest.mark.parametrize("test_file", test_files)
def test_exif(test_file):
with Image.open(test_file) as im:
info = im._getexif()
assert info[272] == "Nintendo 3DS"
assert info[296] == 2
assert info[34665] == 188
def test_frame_size():
@ -133,12 +136,12 @@ def test_reload_exif_after_seek():
assert 296 in exif
def test_mp():
for test_file in test_files:
with Image.open(test_file) as im:
mpinfo = im._getmp()
assert mpinfo[45056] == b"0100"
assert mpinfo[45057] == 2
@pytest.mark.parametrize("test_file", test_files)
def test_mp(test_file):
with Image.open(test_file) as im:
mpinfo = im._getmp()
assert mpinfo[45056] == b"0100"
assert mpinfo[45057] == 2
def test_mp_offset():
@ -158,48 +161,48 @@ def test_mp_no_data():
im.seek(1)
def test_mp_attribute():
for test_file in test_files:
with Image.open(test_file) as im:
mpinfo = im._getmp()
frame_number = 0
for mpentry in mpinfo[0xB002]:
mpattr = mpentry["Attribute"]
if frame_number:
assert not mpattr["RepresentativeImageFlag"]
else:
assert mpattr["RepresentativeImageFlag"]
assert not mpattr["DependentParentImageFlag"]
assert not mpattr["DependentChildImageFlag"]
assert mpattr["ImageDataFormat"] == "JPEG"
assert mpattr["MPType"] == "Multi-Frame Image: (Disparity)"
assert mpattr["Reserved"] == 0
frame_number += 1
@pytest.mark.parametrize("test_file", test_files)
def test_mp_attribute(test_file):
with Image.open(test_file) as im:
mpinfo = im._getmp()
frame_number = 0
for mpentry in mpinfo[0xB002]:
mpattr = mpentry["Attribute"]
if frame_number:
assert not mpattr["RepresentativeImageFlag"]
else:
assert mpattr["RepresentativeImageFlag"]
assert not mpattr["DependentParentImageFlag"]
assert not mpattr["DependentChildImageFlag"]
assert mpattr["ImageDataFormat"] == "JPEG"
assert mpattr["MPType"] == "Multi-Frame Image: (Disparity)"
assert mpattr["Reserved"] == 0
frame_number += 1
def test_seek():
for test_file in test_files:
with Image.open(test_file) as im:
assert im.tell() == 0
# prior to first image raises an error, both blatant and borderline
with pytest.raises(EOFError):
im.seek(-1)
with pytest.raises(EOFError):
im.seek(-523)
# after the final image raises an error,
# both blatant and borderline
with pytest.raises(EOFError):
im.seek(2)
with pytest.raises(EOFError):
im.seek(523)
# bad calls shouldn't change the frame
assert im.tell() == 0
# this one will work
im.seek(1)
assert im.tell() == 1
# and this one, too
im.seek(0)
assert im.tell() == 0
@pytest.mark.parametrize("test_file", test_files)
def test_seek(test_file):
with Image.open(test_file) as im:
assert im.tell() == 0
# prior to first image raises an error, both blatant and borderline
with pytest.raises(EOFError):
im.seek(-1)
with pytest.raises(EOFError):
im.seek(-523)
# after the final image raises an error,
# both blatant and borderline
with pytest.raises(EOFError):
im.seek(2)
with pytest.raises(EOFError):
im.seek(523)
# bad calls shouldn't change the frame
assert im.tell() == 0
# this one will work
im.seek(1)
assert im.tell() == 1
# and this one, too
im.seek(0)
assert im.tell() == 0
def test_n_frames():
@ -221,29 +224,54 @@ def test_eoferror():
im.seek(n_frames - 1)
def test_image_grab():
@pytest.mark.parametrize("test_file", test_files)
def test_image_grab(test_file):
with Image.open(test_file) as im:
assert im.tell() == 0
im0 = im.tobytes()
im.seek(1)
assert im.tell() == 1
im1 = im.tobytes()
im.seek(0)
assert im.tell() == 0
im02 = im.tobytes()
assert im0 == im02
assert im0 != im1
@pytest.mark.parametrize("test_file", test_files)
def test_save(test_file):
with Image.open(test_file) as im:
assert im.tell() == 0
jpg0 = roundtrip(im)
assert_image_similar(im, jpg0, 30)
im.seek(1)
assert im.tell() == 1
jpg1 = roundtrip(im)
assert_image_similar(im, jpg1, 30)
def test_save_all():
for test_file in test_files:
with Image.open(test_file) as im:
assert im.tell() == 0
im0 = im.tobytes()
im.seek(1)
assert im.tell() == 1
im1 = im.tobytes()
im_reloaded = roundtrip(im, save_all=True)
im.seek(0)
assert im.tell() == 0
im02 = im.tobytes()
assert im0 == im02
assert im0 != im1
assert_image_similar(im, im_reloaded, 30)
def test_save():
# Note that only individual frames can be saved at present
for test_file in test_files:
with Image.open(test_file) as im:
assert im.tell() == 0
jpg0 = frame_roundtrip(im)
assert_image_similar(im, jpg0, 30)
im.seek(1)
assert im.tell() == 1
jpg1 = frame_roundtrip(im)
assert_image_similar(im, jpg1, 30)
im_reloaded.seek(1)
assert_image_similar(im, im_reloaded, 30)
im = Image.new("RGB", (1, 1))
im2 = Image.new("RGB", (1, 1), "#f00")
im_reloaded = roundtrip(im, save_all=True, append_images=[im2])
assert_image_equal(im, im_reloaded)
im_reloaded.seek(1)
assert_image_similar(im2, im_reloaded, 1)
# Test that a single frame image will not be saved as an MPO
jpg = roundtrip(im, save_all=True)
assert "mp" not in jpg.info

View File

@ -6,7 +6,7 @@ import time
import pytest
from PIL import Image, PdfParser
from PIL import Image, PdfParser, features
from .helper import hopper, mark_if_feature_version
@ -37,13 +37,14 @@ def helper_save_as_pdf(tmp_path, mode, **kwargs):
return outfile
@pytest.mark.valgrind_known_error(reason="Temporary skip")
def test_monochrome(tmp_path):
# Arrange
mode = "1"
# Act / Assert
outfile = helper_save_as_pdf(tmp_path, mode)
assert os.path.getsize(outfile) < 15000
assert os.path.getsize(outfile) < (5000 if features.check("libtiff") else 15000)
def test_greyscale(tmp_path):

View File

@ -4,7 +4,7 @@ import pytest
from PIL import Image, PsdImagePlugin
from .helper import assert_image_similar, hopper, is_pypy
from .helper import assert_image_equal_tofile, assert_image_similar, hopper, is_pypy
test_file = "Tests/images/hopper.psd"
@ -107,6 +107,11 @@ def test_open_after_exclusive_load():
im.load()
def test_rgba():
with Image.open("Tests/images/rgba.psd") as im:
assert_image_equal_tofile(im, "Tests/images/imagedraw_square.png")
def test_icc_profile():
with Image.open(test_file) as im:
assert "icc_profile" in im.info

View File

@ -18,51 +18,48 @@ _ORIGINS = ("tl", "bl")
_ORIGIN_TO_ORIENTATION = {"tl": 1, "bl": -1}
def test_sanity(tmp_path):
for mode in _MODES:
@pytest.mark.parametrize("mode", _MODES)
def test_sanity(mode, tmp_path):
def roundtrip(original_im):
out = str(tmp_path / "temp.tga")
def roundtrip(original_im):
out = str(tmp_path / "temp.tga")
original_im.save(out, rle=rle)
with Image.open(out) as saved_im:
if rle:
assert saved_im.info["compression"] == original_im.info["compression"]
assert saved_im.info["orientation"] == original_im.info["orientation"]
if mode == "P":
assert saved_im.getpalette() == original_im.getpalette()
original_im.save(out, rle=rle)
with Image.open(out) as saved_im:
if rle:
assert_image_equal(saved_im, original_im)
png_paths = glob(os.path.join(_TGA_DIR_COMMON, f"*x*_{mode.lower()}.png"))
for png_path in png_paths:
with Image.open(png_path) as reference_im:
assert reference_im.mode == mode
path_no_ext = os.path.splitext(png_path)[0]
for origin, rle in product(_ORIGINS, (True, False)):
tga_path = "{}_{}_{}.tga".format(
path_no_ext, origin, "rle" if rle else "raw"
)
with Image.open(tga_path) as original_im:
assert original_im.format == "TGA"
assert original_im.get_format_mimetype() == "image/x-tga"
if rle:
assert original_im.info["compression"] == "tga_rle"
assert (
saved_im.info["compression"] == original_im.info["compression"]
original_im.info["orientation"]
== _ORIGIN_TO_ORIENTATION[origin]
)
assert saved_im.info["orientation"] == original_im.info["orientation"]
if mode == "P":
assert saved_im.getpalette() == original_im.getpalette()
if mode == "P":
assert original_im.getpalette() == reference_im.getpalette()
assert_image_equal(saved_im, original_im)
assert_image_equal(original_im, reference_im)
png_paths = glob(os.path.join(_TGA_DIR_COMMON, f"*x*_{mode.lower()}.png"))
for png_path in png_paths:
with Image.open(png_path) as reference_im:
assert reference_im.mode == mode
path_no_ext = os.path.splitext(png_path)[0]
for origin, rle in product(_ORIGINS, (True, False)):
tga_path = "{}_{}_{}.tga".format(
path_no_ext, origin, "rle" if rle else "raw"
)
with Image.open(tga_path) as original_im:
assert original_im.format == "TGA"
assert original_im.get_format_mimetype() == "image/x-tga"
if rle:
assert original_im.info["compression"] == "tga_rle"
assert (
original_im.info["orientation"]
== _ORIGIN_TO_ORIENTATION[origin]
)
if mode == "P":
assert original_im.getpalette() == reference_im.getpalette()
assert_image_equal(original_im, reference_im)
roundtrip(original_im)
roundtrip(original_im)
def test_palette_depth_16(tmp_path):
@ -123,6 +120,18 @@ def test_save(tmp_path):
assert test_im.size == (100, 100)
def test_small_palette(tmp_path):
im = Image.new("P", (1, 1))
colors = [0, 0, 0]
im.putpalette(colors)
out = str(tmp_path / "temp.tga")
im.save(out)
with Image.open(out) as reloaded:
assert reloaded.getpalette() == colors
def test_save_wrong_mode(tmp_path):
im = hopper("PA")
out = str(tmp_path / "temp.tga")

View File

@ -185,6 +185,22 @@ def test_iptc(tmp_path):
im.save(out)
def test_writing_bytes_to_ascii(tmp_path):
im = hopper()
info = TiffImagePlugin.ImageFileDirectory_v2()
tag = TiffTags.TAGS_V2[271]
assert tag.type == TiffTags.ASCII
info[271] = b"test"
out = str(tmp_path / "temp.tiff")
im.save(out, tiffinfo=info)
with Image.open(out) as reloaded:
assert reloaded.tag_v2[271] == "test"
def test_undefined_zero(tmp_path):
# Check that the tag has not been changed since this test was created
tag = TiffTags.TAGS_V2[45059]

View File

@ -66,10 +66,10 @@ def test_load_set_dpi():
assert_image_similar_tofile(im, "Tests/images/drawing_wmf_ref_144.png", 2.1)
def test_save(tmp_path):
@pytest.mark.parametrize("ext", (".wmf", ".emf"))
def test_save(ext, tmp_path):
im = hopper()
for ext in [".wmf", ".emf"]:
tmpfile = str(tmp_path / ("temp" + ext))
with pytest.raises(OSError):
im.save(tmpfile)
tmpfile = str(tmp_path / ("temp" + ext))
with pytest.raises(OSError):
im.save(tmpfile)

View File

@ -22,8 +22,9 @@ from .helper import (
class TestImage:
def test_image_modes_success(self):
for mode in [
@pytest.mark.parametrize(
"mode",
(
"1",
"P",
"PA",
@ -44,22 +45,18 @@ class TestImage:
"YCbCr",
"LAB",
"HSV",
]:
Image.new(mode, (1, 1))
),
)
def test_image_modes_success(self, mode):
Image.new(mode, (1, 1))
def test_image_modes_fail(self):
for mode in [
"",
"bad",
"very very long",
"BGR;15",
"BGR;16",
"BGR;24",
"BGR;32",
]:
with pytest.raises(ValueError) as e:
Image.new(mode, (1, 1))
assert str(e.value) == "unrecognized image mode"
@pytest.mark.parametrize(
"mode", ("", "bad", "very very long", "BGR;15", "BGR;16", "BGR;24", "BGR;32")
)
def test_image_modes_fail(self, mode):
with pytest.raises(ValueError) as e:
Image.new(mode, (1, 1))
assert str(e.value) == "unrecognized image mode"
def test_exception_inheritance(self):
assert issubclass(UnidentifiedImageError, OSError)
@ -539,23 +536,22 @@ class TestImage:
with pytest.raises(ValueError):
Image.linear_gradient(wrong_mode)
def test_linear_gradient(self):
@pytest.mark.parametrize("mode", ("L", "P", "I", "F"))
def test_linear_gradient(self, mode):
# Arrange
target_file = "Tests/images/linear_gradient.png"
for mode in ["L", "P", "I", "F"]:
# Act
im = Image.linear_gradient(mode)
# Act
im = Image.linear_gradient(mode)
# Assert
assert im.size == (256, 256)
assert im.mode == mode
assert im.getpixel((0, 0)) == 0
assert im.getpixel((255, 255)) == 255
with Image.open(target_file) as target:
target = target.convert(mode)
assert_image_equal(im, target)
# Assert
assert im.size == (256, 256)
assert im.mode == mode
assert im.getpixel((0, 0)) == 0
assert im.getpixel((255, 255)) == 255
with Image.open(target_file) as target:
target = target.convert(mode)
assert_image_equal(im, target)
def test_radial_gradient_wrong_mode(self):
# Arrange
@ -565,23 +561,22 @@ class TestImage:
with pytest.raises(ValueError):
Image.radial_gradient(wrong_mode)
def test_radial_gradient(self):
@pytest.mark.parametrize("mode", ("L", "P", "I", "F"))
def test_radial_gradient(self, mode):
# Arrange
target_file = "Tests/images/radial_gradient.png"
for mode in ["L", "P", "I", "F"]:
# Act
im = Image.radial_gradient(mode)
# Act
im = Image.radial_gradient(mode)
# Assert
assert im.size == (256, 256)
assert im.mode == mode
assert im.getpixel((0, 0)) == 255
assert im.getpixel((128, 128)) == 0
with Image.open(target_file) as target:
target = target.convert(mode)
assert_image_equal(im, target)
# Assert
assert im.size == (256, 256)
assert im.mode == mode
assert im.getpixel((0, 0)) == 255
assert im.getpixel((128, 128)) == 0
with Image.open(target_file) as target:
target = target.convert(mode)
assert_image_equal(im, target)
def test_register_extensions(self):
test_format = "a"

View File

@ -184,8 +184,9 @@ class TestImageGetPixel(AccessTest):
with pytest.raises(error):
im.getpixel((-1, -1))
def test_basic(self):
for mode in (
@pytest.mark.parametrize(
"mode",
(
"1",
"L",
"LA",
@ -200,23 +201,28 @@ class TestImageGetPixel(AccessTest):
"RGBX",
"CMYK",
"YCbCr",
):
self.check(mode)
),
)
def test_basic(self, mode):
self.check(mode)
def test_signedness(self):
@pytest.mark.parametrize("mode", ("I;16", "I;16B"))
def test_signedness(self, mode):
# see https://github.com/python-pillow/Pillow/issues/452
# pixelaccess is using signed int* instead of uint*
for mode in ("I;16", "I;16B"):
self.check(mode, 2**15 - 1)
self.check(mode, 2**15)
self.check(mode, 2**15 + 1)
self.check(mode, 2**16 - 1)
self.check(mode, 2**15 - 1)
self.check(mode, 2**15)
self.check(mode, 2**15 + 1)
self.check(mode, 2**16 - 1)
def test_p_putpixel_rgb_rgba(self):
for color in [(255, 0, 0), (255, 0, 0, 255)]:
im = Image.new("P", (1, 1), 0)
im.putpixel((0, 0), color)
assert im.convert("RGB").getpixel((0, 0)) == (255, 0, 0)
@pytest.mark.parametrize("mode", ("P", "PA"))
@pytest.mark.parametrize("color", ((255, 0, 0), (255, 0, 0, 255)))
def test_p_putpixel_rgb_rgba(self, mode, color):
im = Image.new(mode, (1, 1))
im.putpixel((0, 0), color)
alpha = color[3] if len(color) == 4 and mode == "PA" else 255
assert im.convert("RGBA").getpixel((0, 0)) == (255, 0, 0, alpha)
@pytest.mark.skipif(cffi is None, reason="No CFFI")
@ -337,12 +343,15 @@ class TestCffi(AccessTest):
# pixels can contain garbage if image is released
assert px[i, 0] == 0
def test_p_putpixel_rgb_rgba(self):
for color in [(255, 0, 0), (255, 0, 0, 255)]:
im = Image.new("P", (1, 1), 0)
@pytest.mark.parametrize("mode", ("P", "PA"))
def test_p_putpixel_rgb_rgba(self, mode):
for color in [(255, 0, 0), (255, 0, 0, 127)]:
im = Image.new(mode, (1, 1))
access = PyAccess.new(im, False)
access.putpixel((0, 0), color)
assert im.convert("RGB").getpixel((0, 0)) == (255, 0, 0)
alpha = color[3] if len(color) == 4 and mode == "PA" else 255
assert im.convert("RGBA").getpixel((0, 0)) == (255, 0, 0, alpha)
class TestImagePutPixelError(AccessTest):

View File

@ -236,6 +236,12 @@ def test_p2pa_alpha():
assert im_a.getpixel((x, y)) == alpha
def test_p2pa_palette():
with Image.open("Tests/images/tiny.png") as im:
im_pa = im.convert("PA")
assert im_pa.getpalette() == im.getpalette()
def test_matrix_illegal_conversion():
# Arrange
im = hopper("CMYK")
@ -268,36 +274,33 @@ def test_matrix_wrong_mode():
im.convert(mode="L", matrix=matrix)
def test_matrix_xyz():
def matrix_convert(mode):
# Arrange
im = hopper("RGB")
im.info["transparency"] = (255, 0, 0)
# fmt: off
matrix = (
0.412453, 0.357580, 0.180423, 0,
0.212671, 0.715160, 0.072169, 0,
0.019334, 0.119193, 0.950227, 0)
# fmt: on
assert im.mode == "RGB"
@pytest.mark.parametrize("mode", ("RGB", "L"))
def test_matrix_xyz(mode):
# Arrange
im = hopper("RGB")
im.info["transparency"] = (255, 0, 0)
# fmt: off
matrix = (
0.412453, 0.357580, 0.180423, 0,
0.212671, 0.715160, 0.072169, 0,
0.019334, 0.119193, 0.950227, 0)
# fmt: on
assert im.mode == "RGB"
# Act
# Convert an RGB image to the CIE XYZ colour space
converted_im = im.convert(mode=mode, matrix=matrix)
# Act
# Convert an RGB image to the CIE XYZ colour space
converted_im = im.convert(mode=mode, matrix=matrix)
# Assert
assert converted_im.mode == mode
assert converted_im.size == im.size
with Image.open("Tests/images/hopper-XYZ.png") as target:
if converted_im.mode == "RGB":
assert_image_similar(converted_im, target, 3)
assert converted_im.info["transparency"] == (105, 54, 4)
else:
assert_image_similar(converted_im, target.getchannel(0), 1)
assert converted_im.info["transparency"] == 105
matrix_convert("RGB")
matrix_convert("L")
# Assert
assert converted_im.mode == mode
assert converted_im.size == im.size
with Image.open("Tests/images/hopper-XYZ.png") as target:
if converted_im.mode == "RGB":
assert_image_similar(converted_im, target, 3)
assert converted_im.info["transparency"] == (105, 54, 4)
else:
assert_image_similar(converted_im, target.getchannel(0), 1)
assert converted_im.info["transparency"] == 105
def test_matrix_identity():

View File

@ -1,37 +1,40 @@
import copy
import pytest
from PIL import Image
from .helper import hopper
def test_copy():
@pytest.mark.parametrize("mode", ("1", "P", "L", "RGB", "I", "F"))
def test_copy(mode):
cropped_coordinates = (10, 10, 20, 20)
cropped_size = (10, 10)
for mode in "1", "P", "L", "RGB", "I", "F":
# Internal copy method
im = hopper(mode)
out = im.copy()
assert out.mode == im.mode
assert out.size == im.size
# Python's copy method
im = hopper(mode)
out = copy.copy(im)
assert out.mode == im.mode
assert out.size == im.size
# Internal copy method
im = hopper(mode)
out = im.copy()
assert out.mode == im.mode
assert out.size == im.size
# Internal copy method on a cropped image
im = hopper(mode)
out = im.crop(cropped_coordinates).copy()
assert out.mode == im.mode
assert out.size == cropped_size
# Python's copy method
im = hopper(mode)
out = copy.copy(im)
assert out.mode == im.mode
assert out.size == im.size
# Python's copy method on a cropped image
im = hopper(mode)
out = copy.copy(im.crop(cropped_coordinates))
assert out.mode == im.mode
assert out.size == cropped_size
# Internal copy method on a cropped image
im = hopper(mode)
out = im.crop(cropped_coordinates).copy()
assert out.mode == im.mode
assert out.size == cropped_size
# Python's copy method on a cropped image
im = hopper(mode)
out = copy.copy(im.crop(cropped_coordinates))
assert out.mode == im.mode
assert out.size == cropped_size
def test_copy_zero():

View File

@ -5,17 +5,14 @@ from PIL import Image
from .helper import assert_image_equal, hopper
def test_crop():
def crop(mode):
im = hopper(mode)
assert_image_equal(im.crop(), im)
@pytest.mark.parametrize("mode", ("1", "P", "L", "RGB", "I", "F"))
def test_crop(mode):
im = hopper(mode)
assert_image_equal(im.crop(), im)
cropped = im.crop((50, 50, 100, 100))
assert cropped.mode == mode
assert cropped.size == (50, 50)
for mode in "1", "P", "L", "RGB", "I", "F":
crop(mode)
cropped = im.crop((50, 50, 100, 100))
assert cropped.mode == mode
assert cropped.size == (50, 50)
def test_wide_crop():

View File

@ -5,90 +5,109 @@ from PIL import Image, ImageFilter
from .helper import assert_image_equal, hopper
def test_sanity():
def apply_filter(filter_to_apply):
for mode in ["L", "RGB", "CMYK"]:
im = hopper(mode)
out = im.filter(filter_to_apply)
assert out.mode == im.mode
assert out.size == im.size
@pytest.mark.parametrize(
"filter_to_apply",
(
ImageFilter.BLUR,
ImageFilter.CONTOUR,
ImageFilter.DETAIL,
ImageFilter.EDGE_ENHANCE,
ImageFilter.EDGE_ENHANCE_MORE,
ImageFilter.EMBOSS,
ImageFilter.FIND_EDGES,
ImageFilter.SMOOTH,
ImageFilter.SMOOTH_MORE,
ImageFilter.SHARPEN,
ImageFilter.MaxFilter,
ImageFilter.MedianFilter,
ImageFilter.MinFilter,
ImageFilter.ModeFilter,
ImageFilter.GaussianBlur,
ImageFilter.GaussianBlur(5),
ImageFilter.BoxBlur(5),
ImageFilter.UnsharpMask,
ImageFilter.UnsharpMask(10),
),
)
@pytest.mark.parametrize("mode", ("L", "RGB", "CMYK"))
def test_sanity(filter_to_apply, mode):
im = hopper(mode)
out = im.filter(filter_to_apply)
assert out.mode == im.mode
assert out.size == im.size
apply_filter(ImageFilter.BLUR)
apply_filter(ImageFilter.CONTOUR)
apply_filter(ImageFilter.DETAIL)
apply_filter(ImageFilter.EDGE_ENHANCE)
apply_filter(ImageFilter.EDGE_ENHANCE_MORE)
apply_filter(ImageFilter.EMBOSS)
apply_filter(ImageFilter.FIND_EDGES)
apply_filter(ImageFilter.SMOOTH)
apply_filter(ImageFilter.SMOOTH_MORE)
apply_filter(ImageFilter.SHARPEN)
apply_filter(ImageFilter.MaxFilter)
apply_filter(ImageFilter.MedianFilter)
apply_filter(ImageFilter.MinFilter)
apply_filter(ImageFilter.ModeFilter)
apply_filter(ImageFilter.GaussianBlur)
apply_filter(ImageFilter.GaussianBlur(5))
apply_filter(ImageFilter.BoxBlur(5))
apply_filter(ImageFilter.UnsharpMask)
apply_filter(ImageFilter.UnsharpMask(10))
@pytest.mark.parametrize("mode", ("L", "RGB", "CMYK"))
def test_sanity_error(mode):
with pytest.raises(TypeError):
apply_filter("hello")
im = hopper(mode)
im.filter("hello")
def test_crash():
# crashes on small images
im = Image.new("RGB", (1, 1))
im.filter(ImageFilter.SMOOTH)
im = Image.new("RGB", (2, 2))
im.filter(ImageFilter.SMOOTH)
im = Image.new("RGB", (3, 3))
# crashes on small images
@pytest.mark.parametrize("size", ((1, 1), (2, 2), (3, 3)))
def test_crash(size):
im = Image.new("RGB", size)
im.filter(ImageFilter.SMOOTH)
def test_modefilter():
def modefilter(mode):
im = Image.new(mode, (3, 3), None)
im.putdata(list(range(9)))
# image is:
# 0 1 2
# 3 4 5
# 6 7 8
mod = im.filter(ImageFilter.ModeFilter).getpixel((1, 1))
im.putdata([0, 0, 1, 2, 5, 1, 5, 2, 0]) # mode=0
mod2 = im.filter(ImageFilter.ModeFilter).getpixel((1, 1))
return mod, mod2
assert modefilter("1") == (4, 0)
assert modefilter("L") == (4, 0)
assert modefilter("P") == (4, 0)
assert modefilter("RGB") == ((4, 0, 0), (0, 0, 0))
@pytest.mark.parametrize(
"mode, expected",
(
("1", (4, 0)),
("L", (4, 0)),
("P", (4, 0)),
("RGB", ((4, 0, 0), (0, 0, 0))),
),
)
def test_modefilter(mode, expected):
im = Image.new(mode, (3, 3), None)
im.putdata(list(range(9)))
# image is:
# 0 1 2
# 3 4 5
# 6 7 8
mod = im.filter(ImageFilter.ModeFilter).getpixel((1, 1))
im.putdata([0, 0, 1, 2, 5, 1, 5, 2, 0]) # mode=0
mod2 = im.filter(ImageFilter.ModeFilter).getpixel((1, 1))
assert (mod, mod2) == expected
def test_rankfilter():
def rankfilter(mode):
im = Image.new(mode, (3, 3), None)
im.putdata(list(range(9)))
# image is:
# 0 1 2
# 3 4 5
# 6 7 8
minimum = im.filter(ImageFilter.MinFilter).getpixel((1, 1))
med = im.filter(ImageFilter.MedianFilter).getpixel((1, 1))
maximum = im.filter(ImageFilter.MaxFilter).getpixel((1, 1))
return minimum, med, maximum
@pytest.mark.parametrize(
"mode, expected",
(
("1", (0, 4, 8)),
("L", (0, 4, 8)),
("RGB", ((0, 0, 0), (4, 0, 0), (8, 0, 0))),
("I", (0, 4, 8)),
("F", (0.0, 4.0, 8.0)),
),
)
def test_rankfilter(mode, expected):
im = Image.new(mode, (3, 3), None)
im.putdata(list(range(9)))
# image is:
# 0 1 2
# 3 4 5
# 6 7 8
minimum = im.filter(ImageFilter.MinFilter).getpixel((1, 1))
med = im.filter(ImageFilter.MedianFilter).getpixel((1, 1))
maximum = im.filter(ImageFilter.MaxFilter).getpixel((1, 1))
assert (minimum, med, maximum) == expected
assert rankfilter("1") == (0, 4, 8)
assert rankfilter("L") == (0, 4, 8)
@pytest.mark.parametrize(
"filter", (ImageFilter.MinFilter, ImageFilter.MedianFilter, ImageFilter.MaxFilter)
)
def test_rankfilter_error(filter):
with pytest.raises(ValueError):
rankfilter("P")
assert rankfilter("RGB") == ((0, 0, 0), (4, 0, 0), (8, 0, 0))
assert rankfilter("I") == (0, 4, 8)
assert rankfilter("F") == (0.0, 4.0, 8.0)
im = Image.new("P", (3, 3), None)
im.putdata(list(range(9)))
# image is:
# 0 1 2
# 3 4 5
# 6 7 8
im.filter(filter).getpixel((1, 1))
def test_rankfilter_properties():
@ -110,7 +129,8 @@ def test_kernel_not_enough_coefficients():
ImageFilter.Kernel((3, 3), (0, 0))
def test_consistency_3x3():
@pytest.mark.parametrize("mode", ("L", "LA", "RGB", "CMYK"))
def test_consistency_3x3(mode):
with Image.open("Tests/images/hopper.bmp") as source:
with Image.open("Tests/images/hopper_emboss.bmp") as reference:
kernel = ImageFilter.Kernel(
@ -125,14 +145,14 @@ def test_consistency_3x3():
source = source.split() * 2
reference = reference.split() * 2
for mode in ["L", "LA", "RGB", "CMYK"]:
assert_image_equal(
Image.merge(mode, source[: len(mode)]).filter(kernel),
Image.merge(mode, reference[: len(mode)]),
)
assert_image_equal(
Image.merge(mode, source[: len(mode)]).filter(kernel),
Image.merge(mode, reference[: len(mode)]),
)
def test_consistency_5x5():
@pytest.mark.parametrize("mode", ("L", "LA", "RGB", "CMYK"))
def test_consistency_5x5(mode):
with Image.open("Tests/images/hopper.bmp") as source:
with Image.open("Tests/images/hopper_emboss_more.bmp") as reference:
kernel = ImageFilter.Kernel(
@ -149,8 +169,7 @@ def test_consistency_5x5():
source = source.split() * 2
reference = reference.split() * 2
for mode in ["L", "LA", "RGB", "CMYK"]:
assert_image_equal(
Image.merge(mode, source[: len(mode)]).filter(kernel),
Image.merge(mode, reference[: len(mode)]),
)
assert_image_equal(
Image.merge(mode, source[: len(mode)]).filter(kernel),
Image.merge(mode, reference[: len(mode)]),
)

View File

@ -1,3 +1,5 @@
import pytest
from PIL import Image
from .helper import CachedProperty, assert_image_equal
@ -101,226 +103,226 @@ class TestImagingPaste:
],
)
def test_image_solid(self):
for mode in ("RGBA", "RGB", "L"):
im = Image.new(mode, (200, 200), "red")
im2 = getattr(self, "gradient_" + mode)
@pytest.mark.parametrize("mode", ["RGBA", "RGB", "L"])
def test_image_solid(self, mode):
im = Image.new(mode, (200, 200), "red")
im2 = getattr(self, "gradient_" + mode)
im.paste(im2, (12, 23))
im.paste(im2, (12, 23))
im = im.crop((12, 23, im2.width + 12, im2.height + 23))
assert_image_equal(im, im2)
im = im.crop((12, 23, im2.width + 12, im2.height + 23))
assert_image_equal(im, im2)
def test_image_mask_1(self):
for mode in ("RGBA", "RGB", "L"):
im = Image.new(mode, (200, 200), "white")
im2 = getattr(self, "gradient_" + mode)
@pytest.mark.parametrize("mode", ["RGBA", "RGB", "L"])
def test_image_mask_1(self, mode):
im = Image.new(mode, (200, 200), "white")
im2 = getattr(self, "gradient_" + mode)
self.assert_9points_paste(
im,
im2,
self.mask_1,
[
(255, 255, 255, 255),
(255, 255, 255, 255),
(127, 254, 127, 0),
(255, 255, 255, 255),
(255, 255, 255, 255),
(191, 190, 63, 64),
(127, 0, 127, 254),
(191, 64, 63, 190),
(255, 255, 255, 255),
],
)
self.assert_9points_paste(
im,
im2,
self.mask_1,
[
(255, 255, 255, 255),
(255, 255, 255, 255),
(127, 254, 127, 0),
(255, 255, 255, 255),
(255, 255, 255, 255),
(191, 190, 63, 64),
(127, 0, 127, 254),
(191, 64, 63, 190),
(255, 255, 255, 255),
],
)
def test_image_mask_L(self):
for mode in ("RGBA", "RGB", "L"):
im = Image.new(mode, (200, 200), "white")
im2 = getattr(self, "gradient_" + mode)
@pytest.mark.parametrize("mode", ["RGBA", "RGB", "L"])
def test_image_mask_L(self, mode):
im = Image.new(mode, (200, 200), "white")
im2 = getattr(self, "gradient_" + mode)
self.assert_9points_paste(
im,
im2,
self.mask_L,
[
(128, 191, 255, 191),
(208, 239, 239, 208),
(255, 255, 255, 255),
(112, 111, 206, 207),
(192, 191, 191, 191),
(239, 239, 207, 207),
(128, 1, 128, 254),
(207, 113, 112, 207),
(255, 191, 128, 191),
],
)
self.assert_9points_paste(
im,
im2,
self.mask_L,
[
(128, 191, 255, 191),
(208, 239, 239, 208),
(255, 255, 255, 255),
(112, 111, 206, 207),
(192, 191, 191, 191),
(239, 239, 207, 207),
(128, 1, 128, 254),
(207, 113, 112, 207),
(255, 191, 128, 191),
],
)
def test_image_mask_LA(self):
for mode in ("RGBA", "RGB", "L"):
im = Image.new(mode, (200, 200), "white")
im2 = getattr(self, "gradient_" + mode)
@pytest.mark.parametrize("mode", ["RGBA", "RGB", "L"])
def test_image_mask_LA(self, mode):
im = Image.new(mode, (200, 200), "white")
im2 = getattr(self, "gradient_" + mode)
self.assert_9points_paste(
im,
im2,
self.gradient_LA,
[
(128, 191, 255, 191),
(112, 207, 206, 111),
(128, 254, 128, 1),
(208, 208, 239, 239),
(192, 191, 191, 191),
(207, 207, 112, 113),
(255, 255, 255, 255),
(239, 207, 207, 239),
(255, 191, 128, 191),
],
)
self.assert_9points_paste(
im,
im2,
self.gradient_LA,
[
(128, 191, 255, 191),
(112, 207, 206, 111),
(128, 254, 128, 1),
(208, 208, 239, 239),
(192, 191, 191, 191),
(207, 207, 112, 113),
(255, 255, 255, 255),
(239, 207, 207, 239),
(255, 191, 128, 191),
],
)
def test_image_mask_RGBA(self):
for mode in ("RGBA", "RGB", "L"):
im = Image.new(mode, (200, 200), "white")
im2 = getattr(self, "gradient_" + mode)
@pytest.mark.parametrize("mode", ["RGBA", "RGB", "L"])
def test_image_mask_RGBA(self, mode):
im = Image.new(mode, (200, 200), "white")
im2 = getattr(self, "gradient_" + mode)
self.assert_9points_paste(
im,
im2,
self.gradient_RGBA,
[
(128, 191, 255, 191),
(208, 239, 239, 208),
(255, 255, 255, 255),
(112, 111, 206, 207),
(192, 191, 191, 191),
(239, 239, 207, 207),
(128, 1, 128, 254),
(207, 113, 112, 207),
(255, 191, 128, 191),
],
)
self.assert_9points_paste(
im,
im2,
self.gradient_RGBA,
[
(128, 191, 255, 191),
(208, 239, 239, 208),
(255, 255, 255, 255),
(112, 111, 206, 207),
(192, 191, 191, 191),
(239, 239, 207, 207),
(128, 1, 128, 254),
(207, 113, 112, 207),
(255, 191, 128, 191),
],
)
def test_image_mask_RGBa(self):
for mode in ("RGBA", "RGB", "L"):
im = Image.new(mode, (200, 200), "white")
im2 = getattr(self, "gradient_" + mode)
@pytest.mark.parametrize("mode", ["RGBA", "RGB", "L"])
def test_image_mask_RGBa(self, mode):
im = Image.new(mode, (200, 200), "white")
im2 = getattr(self, "gradient_" + mode)
self.assert_9points_paste(
im,
im2,
self.gradient_RGBa,
[
(128, 255, 126, 255),
(0, 127, 126, 255),
(126, 253, 126, 255),
(128, 127, 254, 255),
(0, 255, 254, 255),
(126, 125, 254, 255),
(128, 1, 128, 255),
(0, 129, 128, 255),
(126, 255, 128, 255),
],
)
self.assert_9points_paste(
im,
im2,
self.gradient_RGBa,
[
(128, 255, 126, 255),
(0, 127, 126, 255),
(126, 253, 126, 255),
(128, 127, 254, 255),
(0, 255, 254, 255),
(126, 125, 254, 255),
(128, 1, 128, 255),
(0, 129, 128, 255),
(126, 255, 128, 255),
],
)
def test_color_solid(self):
for mode in ("RGBA", "RGB", "L"):
im = Image.new(mode, (200, 200), "black")
@pytest.mark.parametrize("mode", ["RGBA", "RGB", "L"])
def test_color_solid(self, mode):
im = Image.new(mode, (200, 200), "black")
rect = (12, 23, 128 + 12, 128 + 23)
im.paste("white", rect)
rect = (12, 23, 128 + 12, 128 + 23)
im.paste("white", rect)
hist = im.crop(rect).histogram()
while hist:
head, hist = hist[:256], hist[256:]
assert head[255] == 128 * 128
assert sum(head[:255]) == 0
hist = im.crop(rect).histogram()
while hist:
head, hist = hist[:256], hist[256:]
assert head[255] == 128 * 128
assert sum(head[:255]) == 0
def test_color_mask_1(self):
for mode in ("RGBA", "RGB", "L"):
im = Image.new(mode, (200, 200), (50, 60, 70, 80)[: len(mode)])
color = (10, 20, 30, 40)[: len(mode)]
@pytest.mark.parametrize("mode", ["RGBA", "RGB", "L"])
def test_color_mask_1(self, mode):
im = Image.new(mode, (200, 200), (50, 60, 70, 80)[: len(mode)])
color = (10, 20, 30, 40)[: len(mode)]
self.assert_9points_paste(
im,
color,
self.mask_1,
[
(50, 60, 70, 80),
(50, 60, 70, 80),
(10, 20, 30, 40),
(50, 60, 70, 80),
(50, 60, 70, 80),
(10, 20, 30, 40),
(10, 20, 30, 40),
(10, 20, 30, 40),
(50, 60, 70, 80),
],
)
self.assert_9points_paste(
im,
color,
self.mask_1,
[
(50, 60, 70, 80),
(50, 60, 70, 80),
(10, 20, 30, 40),
(50, 60, 70, 80),
(50, 60, 70, 80),
(10, 20, 30, 40),
(10, 20, 30, 40),
(10, 20, 30, 40),
(50, 60, 70, 80),
],
)
def test_color_mask_L(self):
for mode in ("RGBA", "RGB", "L"):
im = getattr(self, "gradient_" + mode).copy()
color = "white"
@pytest.mark.parametrize("mode", ["RGBA", "RGB", "L"])
def test_color_mask_L(self, mode):
im = getattr(self, "gradient_" + mode).copy()
color = "white"
self.assert_9points_paste(
im,
color,
self.mask_L,
[
(127, 191, 254, 191),
(111, 207, 206, 110),
(127, 254, 127, 0),
(207, 207, 239, 239),
(191, 191, 190, 191),
(207, 206, 111, 112),
(254, 254, 254, 255),
(239, 206, 206, 238),
(254, 191, 127, 191),
],
)
self.assert_9points_paste(
im,
color,
self.mask_L,
[
(127, 191, 254, 191),
(111, 207, 206, 110),
(127, 254, 127, 0),
(207, 207, 239, 239),
(191, 191, 190, 191),
(207, 206, 111, 112),
(254, 254, 254, 255),
(239, 206, 206, 238),
(254, 191, 127, 191),
],
)
def test_color_mask_RGBA(self):
for mode in ("RGBA", "RGB", "L"):
im = getattr(self, "gradient_" + mode).copy()
color = "white"
@pytest.mark.parametrize("mode", ["RGBA", "RGB", "L"])
def test_color_mask_RGBA(self, mode):
im = getattr(self, "gradient_" + mode).copy()
color = "white"
self.assert_9points_paste(
im,
color,
self.gradient_RGBA,
[
(127, 191, 254, 191),
(111, 207, 206, 110),
(127, 254, 127, 0),
(207, 207, 239, 239),
(191, 191, 190, 191),
(207, 206, 111, 112),
(254, 254, 254, 255),
(239, 206, 206, 238),
(254, 191, 127, 191),
],
)
self.assert_9points_paste(
im,
color,
self.gradient_RGBA,
[
(127, 191, 254, 191),
(111, 207, 206, 110),
(127, 254, 127, 0),
(207, 207, 239, 239),
(191, 191, 190, 191),
(207, 206, 111, 112),
(254, 254, 254, 255),
(239, 206, 206, 238),
(254, 191, 127, 191),
],
)
def test_color_mask_RGBa(self):
for mode in ("RGBA", "RGB", "L"):
im = getattr(self, "gradient_" + mode).copy()
color = "white"
@pytest.mark.parametrize("mode", ["RGBA", "RGB", "L"])
def test_color_mask_RGBa(self, mode):
im = getattr(self, "gradient_" + mode).copy()
color = "white"
self.assert_9points_paste(
im,
color,
self.gradient_RGBa,
[
(255, 63, 126, 63),
(47, 143, 142, 46),
(126, 253, 126, 255),
(15, 15, 47, 47),
(63, 63, 62, 63),
(142, 141, 46, 47),
(255, 255, 255, 0),
(48, 15, 15, 47),
(126, 63, 255, 63),
],
)
self.assert_9points_paste(
im,
color,
self.gradient_RGBa,
[
(255, 63, 126, 63),
(47, 143, 142, 46),
(126, 253, 126, 255),
(15, 15, 47, 47),
(63, 63, 62, 63),
(142, 141, 46, 47),
(255, 255, 255, 0),
(48, 15, 15, 47),
(126, 63, 255, 63),
],
)
def test_different_sizes(self):
im = Image.new("RGB", (100, 100))

View File

@ -38,58 +38,64 @@ gradients_image = Image.open("Tests/images/radial_gradients.png")
gradients_image.load()
def test_args_factor():
@pytest.mark.parametrize(
"size, expected",
(
(3, (4, 4)),
((3, 1), (4, 10)),
((1, 3), (10, 4)),
),
)
def test_args_factor(size, expected):
im = Image.new("L", (10, 10))
assert (4, 4) == im.reduce(3).size
assert (4, 10) == im.reduce((3, 1)).size
assert (10, 4) == im.reduce((1, 3)).size
with pytest.raises(ValueError):
im.reduce(0)
with pytest.raises(TypeError):
im.reduce(2.0)
with pytest.raises(ValueError):
im.reduce((0, 10))
assert expected == im.reduce(size).size
def test_args_box():
@pytest.mark.parametrize(
"size, expected_error", ((0, ValueError), (2.0, TypeError), ((0, 10), ValueError))
)
def test_args_factor_error(size, expected_error):
im = Image.new("L", (10, 10))
assert (5, 5) == im.reduce(2, (0, 0, 10, 10)).size
assert (1, 1) == im.reduce(2, (5, 5, 6, 6)).size
with pytest.raises(TypeError):
im.reduce(2, "stri")
with pytest.raises(TypeError):
im.reduce(2, 2)
with pytest.raises(ValueError):
im.reduce(2, (0, 0, 11, 10))
with pytest.raises(ValueError):
im.reduce(2, (0, 0, 10, 11))
with pytest.raises(ValueError):
im.reduce(2, (-1, 0, 10, 10))
with pytest.raises(ValueError):
im.reduce(2, (0, -1, 10, 10))
with pytest.raises(ValueError):
im.reduce(2, (0, 5, 10, 5))
with pytest.raises(ValueError):
im.reduce(2, (5, 0, 5, 10))
with pytest.raises(expected_error):
im.reduce(size)
def test_unsupported_modes():
@pytest.mark.parametrize(
"size, expected",
(
((0, 0, 10, 10), (5, 5)),
((5, 5, 6, 6), (1, 1)),
),
)
def test_args_box(size, expected):
im = Image.new("L", (10, 10))
assert expected == im.reduce(2, size).size
@pytest.mark.parametrize(
"size, expected_error",
(
("stri", TypeError),
((0, 0, 11, 10), ValueError),
((0, 0, 10, 11), ValueError),
((-1, 0, 10, 10), ValueError),
((0, -1, 10, 10), ValueError),
((0, 5, 10, 5), ValueError),
((5, 0, 5, 10), ValueError),
),
)
def test_args_box_error(size, expected_error):
im = Image.new("L", (10, 10))
with pytest.raises(expected_error):
im.reduce(2, size).size
@pytest.mark.parametrize("mode", ("P", "1", "I;16"))
def test_unsupported_modes(mode):
im = Image.new("P", (10, 10))
with pytest.raises(ValueError):
im.reduce(3)
im = Image.new("1", (10, 10))
with pytest.raises(ValueError):
im.reduce(3)
im = Image.new("I;16", (10, 10))
with pytest.raises(ValueError):
im.reduce(3)
def get_image(mode):
mode_info = ImageMode.getmode(mode)
@ -197,63 +203,69 @@ def test_mode_L():
compare_reduce_with_box(im, factor)
def test_mode_LA():
@pytest.mark.parametrize("factor", remarkable_factors)
def test_mode_LA(factor):
im = get_image("LA")
for factor in remarkable_factors:
compare_reduce_with_reference(im, factor, 0.8, 5)
compare_reduce_with_reference(im, factor, 0.8, 5)
@pytest.mark.parametrize("factor", remarkable_factors)
def test_mode_LA_opaque(factor):
im = get_image("LA")
# With opaque alpha, an error should be way smaller.
im.putalpha(Image.new("L", im.size, 255))
for factor in remarkable_factors:
compare_reduce_with_reference(im, factor)
compare_reduce_with_box(im, factor)
compare_reduce_with_reference(im, factor)
compare_reduce_with_box(im, factor)
def test_mode_La():
@pytest.mark.parametrize("factor", remarkable_factors)
def test_mode_La(factor):
im = get_image("La")
for factor in remarkable_factors:
compare_reduce_with_reference(im, factor)
compare_reduce_with_box(im, factor)
compare_reduce_with_reference(im, factor)
compare_reduce_with_box(im, factor)
def test_mode_RGB():
@pytest.mark.parametrize("factor", remarkable_factors)
def test_mode_RGB(factor):
im = get_image("RGB")
for factor in remarkable_factors:
compare_reduce_with_reference(im, factor)
compare_reduce_with_box(im, factor)
compare_reduce_with_reference(im, factor)
compare_reduce_with_box(im, factor)
def test_mode_RGBA():
@pytest.mark.parametrize("factor", remarkable_factors)
def test_mode_RGBA(factor):
im = get_image("RGBA")
for factor in remarkable_factors:
compare_reduce_with_reference(im, factor, 0.8, 5)
compare_reduce_with_reference(im, factor, 0.8, 5)
@pytest.mark.parametrize("factor", remarkable_factors)
def test_mode_RGBA_opaque(factor):
im = get_image("RGBA")
# With opaque alpha, an error should be way smaller.
im.putalpha(Image.new("L", im.size, 255))
for factor in remarkable_factors:
compare_reduce_with_reference(im, factor)
compare_reduce_with_box(im, factor)
compare_reduce_with_reference(im, factor)
compare_reduce_with_box(im, factor)
def test_mode_RGBa():
@pytest.mark.parametrize("factor", remarkable_factors)
def test_mode_RGBa(factor):
im = get_image("RGBa")
for factor in remarkable_factors:
compare_reduce_with_reference(im, factor)
compare_reduce_with_box(im, factor)
compare_reduce_with_reference(im, factor)
compare_reduce_with_box(im, factor)
def test_mode_I():
@pytest.mark.parametrize("factor", remarkable_factors)
def test_mode_I(factor):
im = get_image("I")
for factor in remarkable_factors:
compare_reduce_with_reference(im, factor)
compare_reduce_with_box(im, factor)
compare_reduce_with_reference(im, factor)
compare_reduce_with_box(im, factor)
def test_mode_F():
@pytest.mark.parametrize("factor", remarkable_factors)
def test_mode_F(factor):
im = get_image("F")
for factor in remarkable_factors:
compare_reduce_with_reference(im, factor, 0, 0)
compare_reduce_with_box(im, factor)
compare_reduce_with_reference(im, factor, 0, 0)
compare_reduce_with_box(im, factor)
@skip_unless_feature("jpg_2000")

View File

@ -100,40 +100,41 @@ class TestImagingCoreResampleAccuracy:
for y in range(image.size[1])
)
def test_reduce_box(self):
for mode in ["RGBX", "RGB", "La", "L"]:
case = self.make_case(mode, (8, 8), 0xE1)
case = case.resize((4, 4), Image.Resampling.BOX)
# fmt: off
data = ("e1 e1"
"e1 e1")
# fmt: on
for channel in case.split():
self.check_case(channel, self.make_sample(data, (4, 4)))
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_reduce_box(self, mode):
case = self.make_case(mode, (8, 8), 0xE1)
case = case.resize((4, 4), Image.Resampling.BOX)
# fmt: off
data = ("e1 e1"
"e1 e1")
# fmt: on
for channel in case.split():
self.check_case(channel, self.make_sample(data, (4, 4)))
def test_reduce_bilinear(self):
for mode in ["RGBX", "RGB", "La", "L"]:
case = self.make_case(mode, (8, 8), 0xE1)
case = case.resize((4, 4), Image.Resampling.BILINEAR)
# fmt: off
data = ("e1 c9"
"c9 b7")
# fmt: on
for channel in case.split():
self.check_case(channel, self.make_sample(data, (4, 4)))
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_reduce_bilinear(self, mode):
case = self.make_case(mode, (8, 8), 0xE1)
case = case.resize((4, 4), Image.Resampling.BILINEAR)
# fmt: off
data = ("e1 c9"
"c9 b7")
# fmt: on
for channel in case.split():
self.check_case(channel, self.make_sample(data, (4, 4)))
def test_reduce_hamming(self):
for mode in ["RGBX", "RGB", "La", "L"]:
case = self.make_case(mode, (8, 8), 0xE1)
case = case.resize((4, 4), Image.Resampling.HAMMING)
# fmt: off
data = ("e1 da"
"da d3")
# fmt: on
for channel in case.split():
self.check_case(channel, self.make_sample(data, (4, 4)))
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_reduce_hamming(self, mode):
case = self.make_case(mode, (8, 8), 0xE1)
case = case.resize((4, 4), Image.Resampling.HAMMING)
# fmt: off
data = ("e1 da"
"da d3")
# fmt: on
for channel in case.split():
self.check_case(channel, self.make_sample(data, (4, 4)))
def test_reduce_bicubic(self):
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_reduce_bicubic(self, mode):
for mode in ["RGBX", "RGB", "La", "L"]:
case = self.make_case(mode, (12, 12), 0xE1)
case = case.resize((6, 6), Image.Resampling.BICUBIC)
@ -145,79 +146,79 @@ class TestImagingCoreResampleAccuracy:
for channel in case.split():
self.check_case(channel, self.make_sample(data, (6, 6)))
def test_reduce_lanczos(self):
for mode in ["RGBX", "RGB", "La", "L"]:
case = self.make_case(mode, (16, 16), 0xE1)
case = case.resize((8, 8), Image.Resampling.LANCZOS)
# fmt: off
data = ("e1 e0 e4 d7"
"e0 df e3 d6"
"e4 e3 e7 da"
"d7 d6 d9 ce")
# fmt: on
for channel in case.split():
self.check_case(channel, self.make_sample(data, (8, 8)))
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_reduce_lanczos(self, mode):
case = self.make_case(mode, (16, 16), 0xE1)
case = case.resize((8, 8), Image.Resampling.LANCZOS)
# fmt: off
data = ("e1 e0 e4 d7"
"e0 df e3 d6"
"e4 e3 e7 da"
"d7 d6 d9 ce")
# fmt: on
for channel in case.split():
self.check_case(channel, self.make_sample(data, (8, 8)))
def test_enlarge_box(self):
for mode in ["RGBX", "RGB", "La", "L"]:
case = self.make_case(mode, (2, 2), 0xE1)
case = case.resize((4, 4), Image.Resampling.BOX)
# fmt: off
data = ("e1 e1"
"e1 e1")
# fmt: on
for channel in case.split():
self.check_case(channel, self.make_sample(data, (4, 4)))
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_enlarge_box(self, mode):
case = self.make_case(mode, (2, 2), 0xE1)
case = case.resize((4, 4), Image.Resampling.BOX)
# fmt: off
data = ("e1 e1"
"e1 e1")
# fmt: on
for channel in case.split():
self.check_case(channel, self.make_sample(data, (4, 4)))
def test_enlarge_bilinear(self):
for mode in ["RGBX", "RGB", "La", "L"]:
case = self.make_case(mode, (2, 2), 0xE1)
case = case.resize((4, 4), Image.Resampling.BILINEAR)
# fmt: off
data = ("e1 b0"
"b0 98")
# fmt: on
for channel in case.split():
self.check_case(channel, self.make_sample(data, (4, 4)))
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_enlarge_bilinear(self, mode):
case = self.make_case(mode, (2, 2), 0xE1)
case = case.resize((4, 4), Image.Resampling.BILINEAR)
# fmt: off
data = ("e1 b0"
"b0 98")
# fmt: on
for channel in case.split():
self.check_case(channel, self.make_sample(data, (4, 4)))
def test_enlarge_hamming(self):
for mode in ["RGBX", "RGB", "La", "L"]:
case = self.make_case(mode, (2, 2), 0xE1)
case = case.resize((4, 4), Image.Resampling.HAMMING)
# fmt: off
data = ("e1 d2"
"d2 c5")
# fmt: on
for channel in case.split():
self.check_case(channel, self.make_sample(data, (4, 4)))
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_enlarge_hamming(self, mode):
case = self.make_case(mode, (2, 2), 0xE1)
case = case.resize((4, 4), Image.Resampling.HAMMING)
# fmt: off
data = ("e1 d2"
"d2 c5")
# fmt: on
for channel in case.split():
self.check_case(channel, self.make_sample(data, (4, 4)))
def test_enlarge_bicubic(self):
for mode in ["RGBX", "RGB", "La", "L"]:
case = self.make_case(mode, (4, 4), 0xE1)
case = case.resize((8, 8), Image.Resampling.BICUBIC)
# fmt: off
data = ("e1 e5 ee b9"
"e5 e9 f3 bc"
"ee f3 fd c1"
"b9 bc c1 a2")
# fmt: on
for channel in case.split():
self.check_case(channel, self.make_sample(data, (8, 8)))
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_enlarge_bicubic(self, mode):
case = self.make_case(mode, (4, 4), 0xE1)
case = case.resize((8, 8), Image.Resampling.BICUBIC)
# fmt: off
data = ("e1 e5 ee b9"
"e5 e9 f3 bc"
"ee f3 fd c1"
"b9 bc c1 a2")
# fmt: on
for channel in case.split():
self.check_case(channel, self.make_sample(data, (8, 8)))
def test_enlarge_lanczos(self):
for mode in ["RGBX", "RGB", "La", "L"]:
case = self.make_case(mode, (6, 6), 0xE1)
case = case.resize((12, 12), Image.Resampling.LANCZOS)
data = (
"e1 e0 db ed f5 b8"
"e0 df da ec f3 b7"
"db db d6 e7 ee b5"
"ed ec e6 fb ff bf"
"f5 f4 ee ff ff c4"
"b8 b7 b4 bf c4 a0"
)
for channel in case.split():
self.check_case(channel, self.make_sample(data, (12, 12)))
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_enlarge_lanczos(self, mode):
case = self.make_case(mode, (6, 6), 0xE1)
case = case.resize((12, 12), Image.Resampling.LANCZOS)
data = (
"e1 e0 db ed f5 b8"
"e0 df da ec f3 b7"
"db db d6 e7 ee b5"
"ed ec e6 fb ff bf"
"f5 f4 ee ff ff c4"
"b8 b7 b4 bf c4 a0"
)
for channel in case.split():
self.check_case(channel, self.make_sample(data, (12, 12)))
def test_box_filter_correct_range(self):
im = Image.new("RGB", (8, 8), "#1688ff").resize(
@ -419,40 +420,43 @@ class TestCoreResampleCoefficients:
class TestCoreResampleBox:
def test_wrong_arguments(self):
im = hopper()
for resample in (
@pytest.mark.parametrize(
"resample",
(
Image.Resampling.NEAREST,
Image.Resampling.BOX,
Image.Resampling.BILINEAR,
Image.Resampling.HAMMING,
Image.Resampling.BICUBIC,
Image.Resampling.LANCZOS,
):
im.resize((32, 32), resample, (0, 0, im.width, im.height))
im.resize((32, 32), resample, (20, 20, im.width, im.height))
im.resize((32, 32), resample, (20, 20, 20, 100))
im.resize((32, 32), resample, (20, 20, 100, 20))
),
)
def test_wrong_arguments(self, resample):
im = hopper()
im.resize((32, 32), resample, (0, 0, im.width, im.height))
im.resize((32, 32), resample, (20, 20, im.width, im.height))
im.resize((32, 32), resample, (20, 20, 20, 100))
im.resize((32, 32), resample, (20, 20, 100, 20))
with pytest.raises(TypeError, match="must be sequence of length 4"):
im.resize((32, 32), resample, (im.width, im.height))
with pytest.raises(TypeError, match="must be sequence of length 4"):
im.resize((32, 32), resample, (im.width, im.height))
with pytest.raises(ValueError, match="can't be negative"):
im.resize((32, 32), resample, (-20, 20, 100, 100))
with pytest.raises(ValueError, match="can't be negative"):
im.resize((32, 32), resample, (20, -20, 100, 100))
with pytest.raises(ValueError, match="can't be negative"):
im.resize((32, 32), resample, (-20, 20, 100, 100))
with pytest.raises(ValueError, match="can't be negative"):
im.resize((32, 32), resample, (20, -20, 100, 100))
with pytest.raises(ValueError, match="can't be empty"):
im.resize((32, 32), resample, (20.1, 20, 20, 100))
with pytest.raises(ValueError, match="can't be empty"):
im.resize((32, 32), resample, (20, 20.1, 100, 20))
with pytest.raises(ValueError, match="can't be empty"):
im.resize((32, 32), resample, (20.1, 20.1, 20, 20))
with pytest.raises(ValueError, match="can't be empty"):
im.resize((32, 32), resample, (20.1, 20, 20, 100))
with pytest.raises(ValueError, match="can't be empty"):
im.resize((32, 32), resample, (20, 20.1, 100, 20))
with pytest.raises(ValueError, match="can't be empty"):
im.resize((32, 32), resample, (20.1, 20.1, 20, 20))
with pytest.raises(ValueError, match="can't exceed"):
im.resize((32, 32), resample, (0, 0, im.width + 1, im.height))
with pytest.raises(ValueError, match="can't exceed"):
im.resize((32, 32), resample, (0, 0, im.width, im.height + 1))
with pytest.raises(ValueError, match="can't exceed"):
im.resize((32, 32), resample, (0, 0, im.width + 1, im.height))
with pytest.raises(ValueError, match="can't exceed"):
im.resize((32, 32), resample, (0, 0, im.width, im.height + 1))
def resize_tiled(self, im, dst_size, xtiles, ytiles):
def split_range(size, tiles):
@ -509,14 +513,16 @@ class TestCoreResampleBox:
with pytest.raises(AssertionError, match=r"difference 29\."):
assert_image_similar(reference, without_box, 5)
def test_formats(self):
for resample in [Image.Resampling.NEAREST, Image.Resampling.BILINEAR]:
for mode in ["RGB", "L", "RGBA", "LA", "I", ""]:
im = hopper(mode)
box = (20, 20, im.size[0] - 20, im.size[1] - 20)
with_box = im.resize((32, 32), resample, box)
cropped = im.crop(box).resize((32, 32), resample)
assert_image_similar(cropped, with_box, 0.4)
@pytest.mark.parametrize("mode", ("RGB", "L", "RGBA", "LA", "I", ""))
@pytest.mark.parametrize(
"resample", (Image.Resampling.NEAREST, Image.Resampling.BILINEAR)
)
def test_formats(self, mode, resample):
im = hopper(mode)
box = (20, 20, im.size[0] - 20, im.size[1] - 20)
with_box = im.resize((32, 32), resample, box)
cropped = im.crop(box).resize((32, 32), resample)
assert_image_similar(cropped, with_box, 0.4)
def test_passthrough(self):
# When no resize is required

View File

@ -22,24 +22,15 @@ class TestImagingCoreResize:
im.load()
return im._new(im.im.resize(size, f))
def test_nearest_mode(self):
for mode in [
"1",
"P",
"L",
"I",
"F",
"RGB",
"RGBA",
"CMYK",
"YCbCr",
"I;16",
]: # exotic mode
im = hopper(mode)
r = self.resize(im, (15, 12), Image.Resampling.NEAREST)
assert r.mode == mode
assert r.size == (15, 12)
assert r.im.bands == im.im.bands
@pytest.mark.parametrize(
"mode", ("1", "P", "L", "I", "F", "RGB", "RGBA", "CMYK", "YCbCr", "I;16")
)
def test_nearest_mode(self, mode):
im = hopper(mode)
r = self.resize(im, (15, 12), Image.Resampling.NEAREST)
assert r.mode == mode
assert r.size == (15, 12)
assert r.im.bands == im.im.bands
def test_convolution_modes(self):
with pytest.raises(ValueError):
@ -55,33 +46,58 @@ class TestImagingCoreResize:
assert r.size == (15, 12)
assert r.im.bands == im.im.bands
def test_reduce_filters(self):
for f in [
@pytest.mark.parametrize(
"resample",
(
Image.Resampling.NEAREST,
Image.Resampling.BOX,
Image.Resampling.BILINEAR,
Image.Resampling.HAMMING,
Image.Resampling.BICUBIC,
Image.Resampling.LANCZOS,
]:
r = self.resize(hopper("RGB"), (15, 12), f)
assert r.mode == "RGB"
assert r.size == (15, 12)
),
)
def test_reduce_filters(self, resample):
r = self.resize(hopper("RGB"), (15, 12), resample)
assert r.mode == "RGB"
assert r.size == (15, 12)
def test_enlarge_filters(self):
for f in [
@pytest.mark.parametrize(
"resample",
(
Image.Resampling.NEAREST,
Image.Resampling.BOX,
Image.Resampling.BILINEAR,
Image.Resampling.HAMMING,
Image.Resampling.BICUBIC,
Image.Resampling.LANCZOS,
]:
r = self.resize(hopper("RGB"), (212, 195), f)
assert r.mode == "RGB"
assert r.size == (212, 195)
),
)
def test_enlarge_filters(self, resample):
r = self.resize(hopper("RGB"), (212, 195), resample)
assert r.mode == "RGB"
assert r.size == (212, 195)
def test_endianness(self):
@pytest.mark.parametrize(
"resample",
(
Image.Resampling.NEAREST,
Image.Resampling.BOX,
Image.Resampling.BILINEAR,
Image.Resampling.HAMMING,
Image.Resampling.BICUBIC,
Image.Resampling.LANCZOS,
),
)
@pytest.mark.parametrize(
"mode, channels_set",
(
("RGB", ("blank", "filled", "dirty")),
("RGBA", ("blank", "blank", "filled", "dirty")),
("LA", ("filled", "dirty")),
),
)
def test_endianness(self, resample, mode, channels_set):
# Make an image with one colored pixel, in one channel.
# When resized, that channel should be the same as a GS image.
# Other channels should be unaffected.
@ -95,47 +111,37 @@ class TestImagingCoreResize:
}
samples["dirty"].putpixel((1, 1), 128)
for f in [
# samples resized with current filter
references = {
name: self.resize(ch, (4, 4), resample) for name, ch in samples.items()
}
for channels in set(permutations(channels_set)):
# compile image from different channels permutations
im = Image.merge(mode, [samples[ch] for ch in channels])
resized = self.resize(im, (4, 4), resample)
for i, ch in enumerate(resized.split()):
# check what resized channel in image is the same
# as separately resized channel
assert_image_equal(ch, references[channels[i]])
@pytest.mark.parametrize(
"resample",
(
Image.Resampling.NEAREST,
Image.Resampling.BOX,
Image.Resampling.BILINEAR,
Image.Resampling.HAMMING,
Image.Resampling.BICUBIC,
Image.Resampling.LANCZOS,
]:
# samples resized with current filter
references = {
name: self.resize(ch, (4, 4), f) for name, ch in samples.items()
}
for mode, channels_set in [
("RGB", ("blank", "filled", "dirty")),
("RGBA", ("blank", "blank", "filled", "dirty")),
("LA", ("filled", "dirty")),
]:
for channels in set(permutations(channels_set)):
# compile image from different channels permutations
im = Image.merge(mode, [samples[ch] for ch in channels])
resized = self.resize(im, (4, 4), f)
for i, ch in enumerate(resized.split()):
# check what resized channel in image is the same
# as separately resized channel
assert_image_equal(ch, references[channels[i]])
def test_enlarge_zero(self):
for f in [
Image.Resampling.NEAREST,
Image.Resampling.BOX,
Image.Resampling.BILINEAR,
Image.Resampling.HAMMING,
Image.Resampling.BICUBIC,
Image.Resampling.LANCZOS,
]:
r = self.resize(Image.new("RGB", (0, 0), "white"), (212, 195), f)
assert r.mode == "RGB"
assert r.size == (212, 195)
assert r.getdata()[0] == (0, 0, 0)
),
)
def test_enlarge_zero(self, resample):
r = self.resize(Image.new("RGB", (0, 0), "white"), (212, 195), resample)
assert r.mode == "RGB"
assert r.size == (212, 195)
assert r.getdata()[0] == (0, 0, 0)
def test_unknown_filter(self):
with pytest.raises(ValueError):
@ -179,74 +185,71 @@ class TestReducingGapResize:
(52, 34), Image.Resampling.BICUBIC, reducing_gap=0.99
)
def test_reducing_gap_1(self, gradients_image):
for box, epsilon in [
(None, 4),
((1.1, 2.2, 510.8, 510.9), 4),
((3, 10, 410, 256), 10),
]:
ref = gradients_image.resize((52, 34), Image.Resampling.BICUBIC, box=box)
im = gradients_image.resize(
(52, 34), Image.Resampling.BICUBIC, box=box, reducing_gap=1.0
)
with pytest.raises(AssertionError):
assert_image_equal(ref, im)
assert_image_similar(ref, im, epsilon)
def test_reducing_gap_2(self, gradients_image):
for box, epsilon in [
(None, 1.5),
((1.1, 2.2, 510.8, 510.9), 1.5),
((3, 10, 410, 256), 1),
]:
ref = gradients_image.resize((52, 34), Image.Resampling.BICUBIC, box=box)
im = gradients_image.resize(
(52, 34), Image.Resampling.BICUBIC, box=box, reducing_gap=2.0
)
with pytest.raises(AssertionError):
assert_image_equal(ref, im)
assert_image_similar(ref, im, epsilon)
def test_reducing_gap_3(self, gradients_image):
for box, epsilon in [
(None, 1),
((1.1, 2.2, 510.8, 510.9), 1),
((3, 10, 410, 256), 0.5),
]:
ref = gradients_image.resize((52, 34), Image.Resampling.BICUBIC, box=box)
im = gradients_image.resize(
(52, 34), Image.Resampling.BICUBIC, box=box, reducing_gap=3.0
)
with pytest.raises(AssertionError):
assert_image_equal(ref, im)
assert_image_similar(ref, im, epsilon)
def test_reducing_gap_8(self, gradients_image):
for box in [None, (1.1, 2.2, 510.8, 510.9), (3, 10, 410, 256)]:
ref = gradients_image.resize((52, 34), Image.Resampling.BICUBIC, box=box)
im = gradients_image.resize(
(52, 34), Image.Resampling.BICUBIC, box=box, reducing_gap=8.0
)
@pytest.mark.parametrize(
"box, epsilon",
((None, 4), ((1.1, 2.2, 510.8, 510.9), 4), ((3, 10, 410, 256), 10)),
)
def test_reducing_gap_1(self, gradients_image, box, epsilon):
ref = gradients_image.resize((52, 34), Image.Resampling.BICUBIC, box=box)
im = gradients_image.resize(
(52, 34), Image.Resampling.BICUBIC, box=box, reducing_gap=1.0
)
with pytest.raises(AssertionError):
assert_image_equal(ref, im)
def test_box_filter(self, gradients_image):
for box, epsilon in [
((0, 0, 512, 512), 5.5),
((0.9, 1.7, 128, 128), 9.5),
]:
ref = gradients_image.resize((52, 34), Image.Resampling.BOX, box=box)
im = gradients_image.resize(
(52, 34), Image.Resampling.BOX, box=box, reducing_gap=1.0
)
assert_image_similar(ref, im, epsilon)
assert_image_similar(ref, im, epsilon)
@pytest.mark.parametrize(
"box, epsilon",
((None, 1.5), ((1.1, 2.2, 510.8, 510.9), 1.5), ((3, 10, 410, 256), 1)),
)
def test_reducing_gap_2(self, gradients_image, box, epsilon):
ref = gradients_image.resize((52, 34), Image.Resampling.BICUBIC, box=box)
im = gradients_image.resize(
(52, 34), Image.Resampling.BICUBIC, box=box, reducing_gap=2.0
)
with pytest.raises(AssertionError):
assert_image_equal(ref, im)
assert_image_similar(ref, im, epsilon)
@pytest.mark.parametrize(
"box, epsilon",
((None, 1), ((1.1, 2.2, 510.8, 510.9), 1), ((3, 10, 410, 256), 0.5)),
)
def test_reducing_gap_3(self, gradients_image, box, epsilon):
ref = gradients_image.resize((52, 34), Image.Resampling.BICUBIC, box=box)
im = gradients_image.resize(
(52, 34), Image.Resampling.BICUBIC, box=box, reducing_gap=3.0
)
with pytest.raises(AssertionError):
assert_image_equal(ref, im)
assert_image_similar(ref, im, epsilon)
@pytest.mark.parametrize("box", (None, (1.1, 2.2, 510.8, 510.9), (3, 10, 410, 256)))
def test_reducing_gap_8(self, gradients_image, box):
ref = gradients_image.resize((52, 34), Image.Resampling.BICUBIC, box=box)
im = gradients_image.resize(
(52, 34), Image.Resampling.BICUBIC, box=box, reducing_gap=8.0
)
assert_image_equal(ref, im)
@pytest.mark.parametrize(
"box, epsilon",
(((0, 0, 512, 512), 5.5), ((0.9, 1.7, 128, 128), 9.5)),
)
def test_box_filter(self, gradients_image, box, epsilon):
ref = gradients_image.resize((52, 34), Image.Resampling.BOX, box=box)
im = gradients_image.resize(
(52, 34), Image.Resampling.BOX, box=box, reducing_gap=1.0
)
assert_image_similar(ref, im, epsilon)
class TestImageResize:
@ -273,15 +276,14 @@ class TestImageResize:
im = im.resize((64, 64))
assert im.size == (64, 64)
def test_default_filter(self):
for mode in "L", "RGB", "I", "F":
im = hopper(mode)
assert im.resize((20, 20), Image.Resampling.BICUBIC) == im.resize((20, 20))
@pytest.mark.parametrize("mode", ("L", "RGB", "I", "F"))
def test_default_filter_bicubic(self, mode):
im = hopper(mode)
assert im.resize((20, 20), Image.Resampling.BICUBIC) == im.resize((20, 20))
for mode in "1", "P":
im = hopper(mode)
assert im.resize((20, 20), Image.Resampling.NEAREST) == im.resize((20, 20))
for mode in "I;16", "I;16L", "I;16B", "BGR;15", "BGR;16":
im = hopper(mode)
assert im.resize((20, 20), Image.Resampling.NEAREST) == im.resize((20, 20))
@pytest.mark.parametrize(
"mode", ("1", "P", "I;16", "I;16L", "I;16B", "BGR;15", "BGR;16")
)
def test_default_filter_nearest(self, mode):
im = hopper(mode)
assert im.resize((20, 20), Image.Resampling.NEAREST) == im.resize((20, 20))

View File

@ -1,3 +1,5 @@
import pytest
from PIL import Image
from .helper import (
@ -22,26 +24,26 @@ def rotate(im, mode, angle, center=None, translate=None):
assert out.size != im.size
def test_mode():
for mode in ("1", "P", "L", "RGB", "I", "F"):
im = hopper(mode)
rotate(im, mode, 45)
@pytest.mark.parametrize("mode", ("1", "P", "L", "RGB", "I", "F"))
def test_mode(mode):
im = hopper(mode)
rotate(im, mode, 45)
def test_angle():
for angle in (0, 90, 180, 270):
with Image.open("Tests/images/test-card.png") as im:
rotate(im, im.mode, angle)
im = hopper()
assert_image_equal(im.rotate(angle), im.rotate(angle, expand=1))
def test_zero():
for angle in (0, 45, 90, 180, 270):
im = Image.new("RGB", (0, 0))
@pytest.mark.parametrize("angle", (0, 90, 180, 270))
def test_angle(angle):
with Image.open("Tests/images/test-card.png") as im:
rotate(im, im.mode, angle)
im = hopper()
assert_image_equal(im.rotate(angle), im.rotate(angle, expand=1))
@pytest.mark.parametrize("angle", (0, 45, 90, 180, 270))
def test_zero(angle):
im = Image.new("RGB", (0, 0))
rotate(im, im.mode, angle)
def test_resample():
# Target image creation, inspected by eye.

View File

@ -97,6 +97,28 @@ def test_load_first():
im.thumbnail((64, 64))
assert im.size == (64, 10)
# Test thumbnail(), without draft(),
# on an image that is large enough once load() has changed the size
with Image.open("Tests/images/g4_orientation_5.tif") as im:
im.thumbnail((590, 88), reducing_gap=None)
assert im.size == (590, 88)
def test_load_first_unless_jpeg():
# Test that thumbnail() still uses draft() for JPEG
with Image.open("Tests/images/hopper.jpg") as im:
draft = im.draft
def im_draft(mode, size):
result = draft(mode, size)
assert result is not None
return result
im.draft = im_draft
im.thumbnail((64, 64))
# valgrind test is failing with memory allocated in libjpeg
@pytest.mark.valgrind_known_error(reason="Known Failing")

View File

@ -75,23 +75,25 @@ class TestImageTransform:
assert_image_equal(transformed, scaled)
def test_fill(self):
for mode, pixel in [
["RGB", (255, 0, 0)],
["RGBA", (255, 0, 0, 255)],
["LA", (76, 0)],
]:
im = hopper(mode)
(w, h) = im.size
transformed = im.transform(
im.size,
Image.Transform.EXTENT,
(0, 0, w * 2, h * 2),
Image.Resampling.BILINEAR,
fillcolor="red",
)
assert transformed.getpixel((w - 1, h - 1)) == pixel
@pytest.mark.parametrize(
"mode, expected_pixel",
(
("RGB", (255, 0, 0)),
("RGBA", (255, 0, 0, 255)),
("LA", (76, 0)),
),
)
def test_fill(self, mode, expected_pixel):
im = hopper(mode)
(w, h) = im.size
transformed = im.transform(
im.size,
Image.Transform.EXTENT,
(0, 0, w * 2, h * 2),
Image.Resampling.BILINEAR,
fillcolor="red",
)
assert transformed.getpixel((w - 1, h - 1)) == expected_pixel
def test_mesh(self):
# this should be a checkerboard of halfsized hoppers in ul, lr
@ -222,14 +224,12 @@ class TestImageTransform:
with pytest.raises(ValueError):
im.transform((100, 100), None)
def test_unknown_resampling_filter(self):
@pytest.mark.parametrize("resample", (Image.Resampling.BOX, "unknown"))
def test_unknown_resampling_filter(self, resample):
with hopper() as im:
(w, h) = im.size
for resample in (Image.Resampling.BOX, "unknown"):
with pytest.raises(ValueError):
im.transform(
(100, 100), Image.Transform.EXTENT, (0, 0, w, h), resample
)
with pytest.raises(ValueError):
im.transform((100, 100), Image.Transform.EXTENT, (0, 0, w, h), resample)
class TestImageTransformAffine:
@ -239,7 +239,16 @@ class TestImageTransformAffine:
im = hopper("RGB")
return im.crop((10, 20, im.width - 10, im.height - 20))
def _test_rotate(self, deg, transpose):
@pytest.mark.parametrize(
"deg, transpose",
(
(0, None),
(90, Image.Transpose.ROTATE_90),
(180, Image.Transpose.ROTATE_180),
(270, Image.Transpose.ROTATE_270),
),
)
def test_rotate(self, deg, transpose):
im = self._test_image()
angle = -math.radians(deg)
@ -271,77 +280,65 @@ class TestImageTransformAffine:
)
assert_image_equal(transposed, transformed)
def test_rotate_0_deg(self):
self._test_rotate(0, None)
def test_rotate_90_deg(self):
self._test_rotate(90, Image.Transpose.ROTATE_90)
def test_rotate_180_deg(self):
self._test_rotate(180, Image.Transpose.ROTATE_180)
def test_rotate_270_deg(self):
self._test_rotate(270, Image.Transpose.ROTATE_270)
def _test_resize(self, scale, epsilonscale):
@pytest.mark.parametrize(
"scale, epsilon_scale",
(
(1.1, 6.9),
(1.5, 5.5),
(2.0, 5.5),
(2.3, 3.7),
(2.5, 3.7),
),
)
@pytest.mark.parametrize(
"resample,epsilon",
(
(Image.Resampling.NEAREST, 0),
(Image.Resampling.BILINEAR, 2),
(Image.Resampling.BICUBIC, 1),
),
)
def test_resize(self, scale, epsilon_scale, resample, epsilon):
im = self._test_image()
size_up = int(round(im.width * scale)), int(round(im.height * scale))
matrix_up = [1 / scale, 0, 0, 0, 1 / scale, 0, 0, 0]
matrix_down = [scale, 0, 0, 0, scale, 0, 0, 0]
for resample, epsilon in [
transformed = im.transform(size_up, self.transform, matrix_up, resample)
transformed = transformed.transform(
im.size, self.transform, matrix_down, resample
)
assert_image_similar(transformed, im, epsilon * epsilon_scale)
@pytest.mark.parametrize(
"x, y, epsilon_scale",
(
(0.1, 0, 3.7),
(0.6, 0, 9.1),
(50, 50, 0),
),
)
@pytest.mark.parametrize(
"resample, epsilon",
(
(Image.Resampling.NEAREST, 0),
(Image.Resampling.BILINEAR, 2),
(Image.Resampling.BILINEAR, 1.5),
(Image.Resampling.BICUBIC, 1),
]:
transformed = im.transform(size_up, self.transform, matrix_up, resample)
transformed = transformed.transform(
im.size, self.transform, matrix_down, resample
)
assert_image_similar(transformed, im, epsilon * epsilonscale)
def test_resize_1_1x(self):
self._test_resize(1.1, 6.9)
def test_resize_1_5x(self):
self._test_resize(1.5, 5.5)
def test_resize_2_0x(self):
self._test_resize(2.0, 5.5)
def test_resize_2_3x(self):
self._test_resize(2.3, 3.7)
def test_resize_2_5x(self):
self._test_resize(2.5, 3.7)
def _test_translate(self, x, y, epsilonscale):
),
)
def test_translate(self, x, y, epsilon_scale, resample, epsilon):
im = self._test_image()
size_up = int(round(im.width + x)), int(round(im.height + y))
matrix_up = [1, 0, -x, 0, 1, -y, 0, 0]
matrix_down = [1, 0, x, 0, 1, y, 0, 0]
for resample, epsilon in [
(Image.Resampling.NEAREST, 0),
(Image.Resampling.BILINEAR, 1.5),
(Image.Resampling.BICUBIC, 1),
]:
transformed = im.transform(size_up, self.transform, matrix_up, resample)
transformed = transformed.transform(
im.size, self.transform, matrix_down, resample
)
assert_image_similar(transformed, im, epsilon * epsilonscale)
def test_translate_0_1(self):
self._test_translate(0.1, 0, 3.7)
def test_translate_0_6(self):
self._test_translate(0.6, 0, 9.1)
def test_translate_50(self):
self._test_translate(50, 50, 0)
transformed = im.transform(size_up, self.transform, matrix_up, resample)
transformed = transformed.transform(
im.size, self.transform, matrix_down, resample
)
assert_image_similar(transformed, im, epsilon * epsilon_scale)
class TestImageTransformPerspective(TestImageTransformAffine):

View File

@ -1,3 +1,5 @@
import pytest
from PIL.Image import Transpose
from . import helper
@ -9,157 +11,136 @@ HOPPER = {
}
def test_flip_left_right():
def transpose(mode):
im = HOPPER[mode]
out = im.transpose(Transpose.FLIP_LEFT_RIGHT)
assert out.mode == mode
assert out.size == im.size
@pytest.mark.parametrize("mode", HOPPER)
def test_flip_left_right(mode):
im = HOPPER[mode]
out = im.transpose(Transpose.FLIP_LEFT_RIGHT)
assert out.mode == mode
assert out.size == im.size
x, y = im.size
assert im.getpixel((1, 1)) == out.getpixel((x - 2, 1))
assert im.getpixel((x - 2, 1)) == out.getpixel((1, 1))
assert im.getpixel((1, y - 2)) == out.getpixel((x - 2, y - 2))
assert im.getpixel((x - 2, y - 2)) == out.getpixel((1, y - 2))
for mode in HOPPER:
transpose(mode)
x, y = im.size
assert im.getpixel((1, 1)) == out.getpixel((x - 2, 1))
assert im.getpixel((x - 2, 1)) == out.getpixel((1, 1))
assert im.getpixel((1, y - 2)) == out.getpixel((x - 2, y - 2))
assert im.getpixel((x - 2, y - 2)) == out.getpixel((1, y - 2))
def test_flip_top_bottom():
def transpose(mode):
im = HOPPER[mode]
out = im.transpose(Transpose.FLIP_TOP_BOTTOM)
assert out.mode == mode
assert out.size == im.size
@pytest.mark.parametrize("mode", HOPPER)
def test_flip_top_bottom(mode):
im = HOPPER[mode]
out = im.transpose(Transpose.FLIP_TOP_BOTTOM)
assert out.mode == mode
assert out.size == im.size
x, y = im.size
assert im.getpixel((1, 1)) == out.getpixel((1, y - 2))
assert im.getpixel((x - 2, 1)) == out.getpixel((x - 2, y - 2))
assert im.getpixel((1, y - 2)) == out.getpixel((1, 1))
assert im.getpixel((x - 2, y - 2)) == out.getpixel((x - 2, 1))
for mode in HOPPER:
transpose(mode)
x, y = im.size
assert im.getpixel((1, 1)) == out.getpixel((1, y - 2))
assert im.getpixel((x - 2, 1)) == out.getpixel((x - 2, y - 2))
assert im.getpixel((1, y - 2)) == out.getpixel((1, 1))
assert im.getpixel((x - 2, y - 2)) == out.getpixel((x - 2, 1))
def test_rotate_90():
def transpose(mode):
im = HOPPER[mode]
out = im.transpose(Transpose.ROTATE_90)
assert out.mode == mode
assert out.size == im.size[::-1]
@pytest.mark.parametrize("mode", HOPPER)
def test_rotate_90(mode):
im = HOPPER[mode]
out = im.transpose(Transpose.ROTATE_90)
assert out.mode == mode
assert out.size == im.size[::-1]
x, y = im.size
assert im.getpixel((1, 1)) == out.getpixel((1, x - 2))
assert im.getpixel((x - 2, 1)) == out.getpixel((1, 1))
assert im.getpixel((1, y - 2)) == out.getpixel((y - 2, x - 2))
assert im.getpixel((x - 2, y - 2)) == out.getpixel((y - 2, 1))
for mode in HOPPER:
transpose(mode)
x, y = im.size
assert im.getpixel((1, 1)) == out.getpixel((1, x - 2))
assert im.getpixel((x - 2, 1)) == out.getpixel((1, 1))
assert im.getpixel((1, y - 2)) == out.getpixel((y - 2, x - 2))
assert im.getpixel((x - 2, y - 2)) == out.getpixel((y - 2, 1))
def test_rotate_180():
def transpose(mode):
im = HOPPER[mode]
out = im.transpose(Transpose.ROTATE_180)
assert out.mode == mode
assert out.size == im.size
@pytest.mark.parametrize("mode", HOPPER)
def test_rotate_180(mode):
im = HOPPER[mode]
out = im.transpose(Transpose.ROTATE_180)
assert out.mode == mode
assert out.size == im.size
x, y = im.size
assert im.getpixel((1, 1)) == out.getpixel((x - 2, y - 2))
assert im.getpixel((x - 2, 1)) == out.getpixel((1, y - 2))
assert im.getpixel((1, y - 2)) == out.getpixel((x - 2, 1))
assert im.getpixel((x - 2, y - 2)) == out.getpixel((1, 1))
for mode in HOPPER:
transpose(mode)
x, y = im.size
assert im.getpixel((1, 1)) == out.getpixel((x - 2, y - 2))
assert im.getpixel((x - 2, 1)) == out.getpixel((1, y - 2))
assert im.getpixel((1, y - 2)) == out.getpixel((x - 2, 1))
assert im.getpixel((x - 2, y - 2)) == out.getpixel((1, 1))
def test_rotate_270():
def transpose(mode):
im = HOPPER[mode]
out = im.transpose(Transpose.ROTATE_270)
assert out.mode == mode
assert out.size == im.size[::-1]
@pytest.mark.parametrize("mode", HOPPER)
def test_rotate_270(mode):
im = HOPPER[mode]
out = im.transpose(Transpose.ROTATE_270)
assert out.mode == mode
assert out.size == im.size[::-1]
x, y = im.size
assert im.getpixel((1, 1)) == out.getpixel((y - 2, 1))
assert im.getpixel((x - 2, 1)) == out.getpixel((y - 2, x - 2))
assert im.getpixel((1, y - 2)) == out.getpixel((1, 1))
assert im.getpixel((x - 2, y - 2)) == out.getpixel((1, x - 2))
for mode in HOPPER:
transpose(mode)
x, y = im.size
assert im.getpixel((1, 1)) == out.getpixel((y - 2, 1))
assert im.getpixel((x - 2, 1)) == out.getpixel((y - 2, x - 2))
assert im.getpixel((1, y - 2)) == out.getpixel((1, 1))
assert im.getpixel((x - 2, y - 2)) == out.getpixel((1, x - 2))
def test_transpose():
def transpose(mode):
im = HOPPER[mode]
out = im.transpose(Transpose.TRANSPOSE)
assert out.mode == mode
assert out.size == im.size[::-1]
@pytest.mark.parametrize("mode", HOPPER)
def test_transpose(mode):
im = HOPPER[mode]
out = im.transpose(Transpose.TRANSPOSE)
assert out.mode == mode
assert out.size == im.size[::-1]
x, y = im.size
assert im.getpixel((1, 1)) == out.getpixel((1, 1))
assert im.getpixel((x - 2, 1)) == out.getpixel((1, x - 2))
assert im.getpixel((1, y - 2)) == out.getpixel((y - 2, 1))
assert im.getpixel((x - 2, y - 2)) == out.getpixel((y - 2, x - 2))
for mode in HOPPER:
transpose(mode)
x, y = im.size
assert im.getpixel((1, 1)) == out.getpixel((1, 1))
assert im.getpixel((x - 2, 1)) == out.getpixel((1, x - 2))
assert im.getpixel((1, y - 2)) == out.getpixel((y - 2, 1))
assert im.getpixel((x - 2, y - 2)) == out.getpixel((y - 2, x - 2))
def test_tranverse():
def transpose(mode):
im = HOPPER[mode]
out = im.transpose(Transpose.TRANSVERSE)
assert out.mode == mode
assert out.size == im.size[::-1]
@pytest.mark.parametrize("mode", HOPPER)
def test_tranverse(mode):
im = HOPPER[mode]
out = im.transpose(Transpose.TRANSVERSE)
assert out.mode == mode
assert out.size == im.size[::-1]
x, y = im.size
assert im.getpixel((1, 1)) == out.getpixel((y - 2, x - 2))
assert im.getpixel((x - 2, 1)) == out.getpixel((y - 2, 1))
assert im.getpixel((1, y - 2)) == out.getpixel((1, x - 2))
assert im.getpixel((x - 2, y - 2)) == out.getpixel((1, 1))
for mode in HOPPER:
transpose(mode)
x, y = im.size
assert im.getpixel((1, 1)) == out.getpixel((y - 2, x - 2))
assert im.getpixel((x - 2, 1)) == out.getpixel((y - 2, 1))
assert im.getpixel((1, y - 2)) == out.getpixel((1, x - 2))
assert im.getpixel((x - 2, y - 2)) == out.getpixel((1, 1))
def test_roundtrip():
for mode in HOPPER:
im = HOPPER[mode]
@pytest.mark.parametrize("mode", HOPPER)
def test_roundtrip(mode):
im = HOPPER[mode]
def transpose(first, second):
return im.transpose(first).transpose(second)
def transpose(first, second):
return im.transpose(first).transpose(second)
assert_image_equal(
im, transpose(Transpose.FLIP_LEFT_RIGHT, Transpose.FLIP_LEFT_RIGHT)
)
assert_image_equal(
im, transpose(Transpose.FLIP_TOP_BOTTOM, Transpose.FLIP_TOP_BOTTOM)
)
assert_image_equal(im, transpose(Transpose.ROTATE_90, Transpose.ROTATE_270))
assert_image_equal(im, transpose(Transpose.ROTATE_180, Transpose.ROTATE_180))
assert_image_equal(
im.transpose(Transpose.TRANSPOSE),
transpose(Transpose.ROTATE_90, Transpose.FLIP_TOP_BOTTOM),
)
assert_image_equal(
im.transpose(Transpose.TRANSPOSE),
transpose(Transpose.ROTATE_270, Transpose.FLIP_LEFT_RIGHT),
)
assert_image_equal(
im.transpose(Transpose.TRANSVERSE),
transpose(Transpose.ROTATE_90, Transpose.FLIP_LEFT_RIGHT),
)
assert_image_equal(
im.transpose(Transpose.TRANSVERSE),
transpose(Transpose.ROTATE_270, Transpose.FLIP_TOP_BOTTOM),
)
assert_image_equal(
im.transpose(Transpose.TRANSVERSE),
transpose(Transpose.ROTATE_180, Transpose.TRANSPOSE),
)
assert_image_equal(
im, transpose(Transpose.FLIP_LEFT_RIGHT, Transpose.FLIP_LEFT_RIGHT)
)
assert_image_equal(
im, transpose(Transpose.FLIP_TOP_BOTTOM, Transpose.FLIP_TOP_BOTTOM)
)
assert_image_equal(im, transpose(Transpose.ROTATE_90, Transpose.ROTATE_270))
assert_image_equal(im, transpose(Transpose.ROTATE_180, Transpose.ROTATE_180))
assert_image_equal(
im.transpose(Transpose.TRANSPOSE),
transpose(Transpose.ROTATE_90, Transpose.FLIP_TOP_BOTTOM),
)
assert_image_equal(
im.transpose(Transpose.TRANSPOSE),
transpose(Transpose.ROTATE_270, Transpose.FLIP_LEFT_RIGHT),
)
assert_image_equal(
im.transpose(Transpose.TRANSVERSE),
transpose(Transpose.ROTATE_90, Transpose.FLIP_LEFT_RIGHT),
)
assert_image_equal(
im.transpose(Transpose.TRANSVERSE),
transpose(Transpose.ROTATE_270, Transpose.FLIP_TOP_BOTTOM),
)
assert_image_equal(
im.transpose(Transpose.TRANSVERSE),
transpose(Transpose.ROTATE_180, Transpose.TRANSPOSE),
)

View File

@ -625,20 +625,20 @@ def test_polygon2():
helper_polygon(POINTS2)
def test_polygon_kite():
@pytest.mark.parametrize("mode", ("RGB", "L"))
def test_polygon_kite(mode):
# Test drawing lines of different gradients (dx>dy, dy>dx) and
# vertical (dx==0) and horizontal (dy==0) lines
for mode in ["RGB", "L"]:
# Arrange
im = Image.new(mode, (W, H))
draw = ImageDraw.Draw(im)
expected = f"Tests/images/imagedraw_polygon_kite_{mode}.png"
# Arrange
im = Image.new(mode, (W, H))
draw = ImageDraw.Draw(im)
expected = f"Tests/images/imagedraw_polygon_kite_{mode}.png"
# Act
draw.polygon(KITE_POINTS, fill="blue", outline="yellow")
# Act
draw.polygon(KITE_POINTS, fill="blue", outline="yellow")
# Assert
assert_image_equal_tofile(im, expected)
# Assert
assert_image_equal_tofile(im, expected)
def test_polygon_1px_high():
@ -1314,6 +1314,23 @@ def test_stroke_multiline():
assert_image_similar_tofile(im, "Tests/images/imagedraw_stroke_multiline.png", 3.3)
def test_setting_default_font():
# Arrange
im = Image.new("RGB", (100, 250))
draw = ImageDraw.Draw(im)
font = ImageFont.truetype("Tests/fonts/FreeMono.ttf", 120)
# Act
ImageDraw.ImageDraw.font = font
# Assert
try:
assert draw.getfont() == font
finally:
ImageDraw.ImageDraw.font = None
assert isinstance(draw.getfont(), ImageFont.ImageFont)
def test_same_color_outline():
# Prepare shape
x0, y0 = 5, 5

File diff suppressed because it is too large Load Diff

View File

@ -345,11 +345,15 @@ def test_exif_transpose():
check(orientation_im)
# Orientation from "XML:com.adobe.xmp" info key
with Image.open("Tests/images/xmp_tags_orientation.png") as im:
assert im.getexif()[0x0112] == 3
for suffix in ("", "_exiftool"):
with Image.open("Tests/images/xmp_tags_orientation" + suffix + ".png") as im:
assert im.getexif()[0x0112] == 3
transposed_im = ImageOps.exif_transpose(im)
assert 0x0112 not in transposed_im.getexif()
transposed_im = ImageOps.exif_transpose(im)
assert 0x0112 not in transposed_im.getexif()
transposed_im._reload_exif()
assert 0x0112 not in transposed_im.getexif()
# Orientation from "Raw profile type exif" info key
# This test image has been manually hexedited from exif_imagemagick.png

View File

@ -16,32 +16,32 @@ if ImageQt.qt_is_installed:
from PIL.ImageQt import QImage
def test_sanity(tmp_path):
for mode in ("RGB", "RGBA", "L", "P", "1"):
src = hopper(mode)
data = ImageQt.toqimage(src)
@pytest.mark.parametrize("mode", ("RGB", "RGBA", "L", "P", "1"))
def test_sanity(mode, tmp_path):
src = hopper(mode)
data = ImageQt.toqimage(src)
assert isinstance(data, QImage)
assert not data.isNull()
assert isinstance(data, QImage)
assert not data.isNull()
# reload directly from the qimage
rt = ImageQt.fromqimage(data)
if mode in ("L", "P", "1"):
assert_image_equal(rt, src.convert("RGB"))
else:
assert_image_equal(rt, src)
# reload directly from the qimage
rt = ImageQt.fromqimage(data)
if mode in ("L", "P", "1"):
assert_image_equal(rt, src.convert("RGB"))
else:
assert_image_equal(rt, src)
if mode == "1":
# BW appears to not save correctly on QT4 and QT5
# kicks out errors on console:
# libpng warning: Invalid color type/bit depth combination
# in IHDR
# libpng error: Invalid IHDR data
continue
if mode == "1":
# BW appears to not save correctly on QT5
# kicks out errors on console:
# libpng warning: Invalid color type/bit depth combination
# in IHDR
# libpng error: Invalid IHDR data
return
# Test saving the file
tempfile = str(tmp_path / f"temp_{mode}.png")
data.save(tempfile)
# Test saving the file
tempfile = str(tmp_path / f"temp_{mode}.png")
data.save(tempfile)
# Check that it actually worked.
assert_image_equal_tofile(src, tempfile)
# Check that it actually worked.
assert_image_equal_tofile(src, tempfile)

View File

@ -1,7 +1,7 @@
#!/bin/bash
# install libimagequant
archive=libimagequant-4.0.0
archive=libimagequant-4.0.4
./download-and-extract.sh $archive https://raw.githubusercontent.com/python-pillow/pillow-depends/main/$archive.tar.gz

View File

@ -1,7 +1,7 @@
#!/bin/bash
# install webp
archive=libwebp-1.2.3
archive=libwebp-1.2.4
./download-and-extract.sh $archive https://raw.githubusercontent.com/python-pillow/pillow-depends/main/$archive.tar.gz

View File

@ -837,6 +837,24 @@ Pillow reads and writes TGA images containing ``L``, ``LA``, ``P``,
``RGB``, and ``RGBA`` data. Pillow can read and write both uncompressed and
run-length encoded TGAs.
The :py:meth:`~PIL.Image.Image.save` method can take the following keyword arguments:
**compression**
If set to "tga_rle", the file will be run-length encoded.
.. versionadded:: 5.3.0
**id_section**
The identification field.
.. versionadded:: 5.3.0
**orientation**
If present and a positive number, the first pixel is for the top left corner,
rather than the bottom left corner.
.. versionadded:: 5.3.0
TIFF
^^^^
@ -968,7 +986,7 @@ The :py:meth:`~PIL.Image.Image.save` method can take the following keyword argum
methods are: :data:`None`, ``"group3"``, ``"group4"``, ``"jpeg"``, ``"lzma"``,
``"packbits"``, ``"tiff_adobe_deflate"``, ``"tiff_ccitt"``, ``"tiff_lzw"``,
``"tiff_raw_16"``, ``"tiff_sgilog"``, ``"tiff_sgilog24"``, ``"tiff_thunderscan"``,
``"webp"`, ``"zstd"``
``"webp"``, ``"zstd"``
**quality**
The image quality for JPEG compression, on a scale from 0 (worst) to 100
@ -1209,6 +1227,17 @@ image when first opened. The :py:meth:`~PIL.Image.Image.seek` and :py:meth:`~PIL
methods may be used to read other pictures from the file. The pictures are
zero-indexed and random access is supported.
When calling :py:meth:`~PIL.Image.Image.save` to write an MPO file, by default
only the first frame of a multiframe image will be saved. If the ``save_all``
argument is present and true, then all frames will be saved, and the following
option will also be available.
**append_images**
A list of images to append as additional pictures. Each of the
images in the list can be single or multiframe images.
.. versionadded:: 9.3.0
PCD
^^^

View File

@ -15,35 +15,13 @@ Python Support
Pillow supports these Python versions.
+----------------------+-----+-----+-----+-----+-----+-----+-----+-----+
| Python |3.10 | 3.9 | 3.8 | 3.7 | 3.6 | 3.5 | 3.4 | 2.7 |
+======================+=====+=====+=====+=====+=====+=====+=====+=====+
| Pillow >= 9.0 | Yes | Yes | Yes | Yes | | | | |
+----------------------+-----+-----+-----+-----+-----+-----+-----+-----+
| Pillow 8.3.2 - 8.4 | Yes | Yes | Yes | Yes | Yes | | | |
+----------------------+-----+-----+-----+-----+-----+-----+-----+-----+
| Pillow 8.0 - 8.3.1 | | Yes | Yes | Yes | Yes | | | |
+----------------------+-----+-----+-----+-----+-----+-----+-----+-----+
| Pillow 7.0 - 7.2 | | | Yes | Yes | Yes | Yes | | |
+----------------------+-----+-----+-----+-----+-----+-----+-----+-----+
| Pillow 6.2.1 - 6.2.2 | | | Yes | Yes | Yes | Yes | | Yes |
+----------------------+-----+-----+-----+-----+-----+-----+-----+-----+
| Pillow 6.0 - 6.2.0 | | | | Yes | Yes | Yes | | Yes |
+----------------------+-----+-----+-----+-----+-----+-----+-----+-----+
| Pillow 5.2 - 5.4 | | | | Yes | Yes | Yes | Yes | Yes |
+----------------------+-----+-----+-----+-----+-----+-----+-----+-----+
.. csv-table:: Newer versions
:file: newer-versions.csv
:header-rows: 1
+------------------+-----+-----+-----+-----+-----+-----+-----+-----+-----+
| Python | 3.6 | 3.5 | 3.4 | 3.3 | 3.2 | 2.7 | 2.6 | 2.5 | 2.4 |
+==================+=====+=====+=====+=====+=====+=====+=====+=====+=====+
| Pillow 5.0 - 5.1 | Yes | Yes | Yes | | | Yes | | | |
+------------------+-----+-----+-----+-----+-----+-----+-----+-----+-----+
| Pillow 4 | Yes | Yes | Yes | Yes | | Yes | | | |
+------------------+-----+-----+-----+-----+-----+-----+-----+-----+-----+
| Pillow 2 - 3 | | Yes | Yes | Yes | Yes | Yes | Yes | | |
+------------------+-----+-----+-----+-----+-----+-----+-----+-----+-----+
| Pillow < 2 | | | | | | Yes | Yes | Yes | Yes |
+------------------+-----+-----+-----+-----+-----+-----+-----+-----+-----+
.. csv-table:: Older versions
:file: older-versions.csv
:header-rows: 1
Basic Installation
------------------
@ -188,7 +166,7 @@ Many of Pillow's features require external libraries:
* **libimagequant** provides improved color quantization
* Pillow has been tested with libimagequant **2.6-4.0**
* Pillow has been tested with libimagequant **2.6-4.0.4**
* Libimagequant is licensed GPLv3, which is more restrictive than
the Pillow license, therefore we will not be distributing binaries
with libimagequant support enabled.
@ -389,7 +367,7 @@ In Alpine, the command is::
.. Note:: ``redhat-rpm-config`` is required on Fedora 23, but not earlier versions.
Prerequisites for **Ubuntu 16.04 LTS - 20.04 LTS** are installed with::
Prerequisites for **Ubuntu 16.04 LTS - 22.04 LTS** are installed with::
sudo apt-get install libtiff5-dev libjpeg8-dev libopenjp2-7-dev zlib1g-dev \
libfreetype6-dev liblcms2-dev libwebp-dev tcl8.6-dev tk8.6-dev python3-tk \

6
docs/newer-versions.csv Normal file
View File

@ -0,0 +1,6 @@
Python,3.11,3.10,3.9,3.8,3.7,3.6,3.5
Pillow >= 9.3,Yes,Yes,Yes,Yes,Yes,,
Pillow 9.0 - 9.2,,Yes,Yes,Yes,Yes,,
Pillow 8.3.2 - 8.4,,Yes,Yes,Yes,Yes,Yes,
Pillow 8.0 - 8.3.1,,,Yes,Yes,Yes,Yes,
Pillow 7.0 - 7.2,,,,Yes,Yes,Yes,Yes
1 Python 3.11 3.10 3.9 3.8 3.7 3.6 3.5
2 Pillow >= 9.3 Yes Yes Yes Yes Yes
3 Pillow 9.0 - 9.2 Yes Yes Yes Yes
4 Pillow 8.3.2 - 8.4 Yes Yes Yes Yes Yes
5 Pillow 8.0 - 8.3.1 Yes Yes Yes Yes
6 Pillow 7.0 - 7.2 Yes Yes Yes Yes

8
docs/older-versions.csv Normal file
View File

@ -0,0 +1,8 @@
Python,3.8,3.7,3.6,3.5,3.4,3.3,3.2,2.7,2.6,2.5,2.4
Pillow 6.2.1 - 6.2.2,Yes,Yes,Yes,Yes,,,,Yes,,,
Pillow 6.0 - 6.2.0,,Yes,Yes,Yes,,,,Yes,,,
Pillow 5.2 - 5.4,,Yes,Yes,Yes,Yes,,,Yes,,,
Pillow 5.0 - 5.1,,,Yes,Yes,Yes,,,Yes,,,
Pillow 4,,,Yes,Yes,Yes,Yes,,Yes,,,
Pillow 2 - 3,,,,Yes,Yes,Yes,Yes,Yes,Yes,,
Pillow < 2,,,,,,,,Yes,Yes,Yes,Yes
1 Python 3.8 3.7 3.6 3.5 3.4 3.3 3.2 2.7 2.6 2.5 2.4
2 Pillow 6.2.1 - 6.2.2 Yes Yes Yes Yes Yes
3 Pillow 6.0 - 6.2.0 Yes Yes Yes Yes
4 Pillow 5.2 - 5.4 Yes Yes Yes Yes Yes
5 Pillow 5.0 - 5.1 Yes Yes Yes Yes
6 Pillow 4 Yes Yes Yes Yes Yes
7 Pillow 2 - 3 Yes Yes Yes Yes Yes Yes
8 Pillow < 2 Yes Yes Yes Yes

View File

@ -53,9 +53,9 @@ Functions
To protect against potential DOS attacks caused by "`decompression bombs`_" (i.e. malicious files
which decompress into a huge amount of data and are designed to crash or cause disruption by using up
a lot of memory), Pillow will issue a ``DecompressionBombWarning`` if the number of pixels in an
image is over a certain limit, :py:data:`PIL.Image.MAX_IMAGE_PIXELS`.
image is over a certain limit, :py:data:`MAX_IMAGE_PIXELS`.
This threshold can be changed by setting :py:data:`PIL.Image.MAX_IMAGE_PIXELS`. It can be disabled
This threshold can be changed by setting :py:data:`MAX_IMAGE_PIXELS`. It can be disabled
by setting ``Image.MAX_IMAGE_PIXELS = None``.
If desired, the warning can be turned into an error with
@ -63,7 +63,7 @@ Functions
``warnings.simplefilter('ignore', Image.DecompressionBombWarning)``. See also
`the logging documentation`_ to have warnings output to the logging facility instead of stderr.
If the number of pixels is greater than twice :py:data:`PIL.Image.MAX_IMAGE_PIXELS`, then a
If the number of pixels is greater than twice :py:data:`MAX_IMAGE_PIXELS`, then a
``DecompressionBombError`` will be raised instead.
.. _decompression bombs: https://en.wikipedia.org/wiki/Zip_bomb
@ -255,7 +255,7 @@ This rotates the input image by ``theta`` degrees counter clockwise:
.. automethod:: PIL.Image.Image.transform
.. automethod:: PIL.Image.Image.transpose
This flips the input image by using the :data:`PIL.Image.Transpose.FLIP_LEFT_RIGHT`
This flips the input image by using the :data:`Transpose.FLIP_LEFT_RIGHT`
method.
.. code-block:: python

View File

@ -64,7 +64,7 @@ Fonts
PIL can use bitmap fonts or OpenType/TrueType fonts.
Bitmap fonts are stored in PILs own format, where each font typically consists
Bitmap fonts are stored in PIL's own format, where each font typically consists
of two files, one named .pil and the other usually named .pbm. The former
contains font metrics, the latter raster data.
@ -146,6 +146,11 @@ Methods
Get the current default font.
To set the default font for all future ImageDraw instances::
from PIL import ImageDraw, ImageFont
ImageDraw.ImageDraw.font = ImageFont.truetype("Tests/fonts/FreeMono.ttf")
:returns: An image font.
.. py:method:: ImageDraw.arc(xy, start, end, fill=None, width=0)

View File

@ -73,7 +73,7 @@ Access using negative indexes is also possible.
Modifies the pixel at x,y. The color is given as a single
numerical value for single band images, and a tuple for
multi-band images. In addition to this, RGB and RGBA tuples
are accepted for P images.
are accepted for P and PA images.
:param xy: The pixel coordinate, given as (x, y).
:param color: The pixel value according to its mode. e.g. tuple (r, g, b) for RGB mode)

View File

@ -0,0 +1,69 @@
9.3.0
-----
Backwards Incompatible Changes
==============================
TODO
^^^^
Deprecations
============
TODO
^^^^
TODO
API Changes
===========
TODO
^^^^
TODO
API Additions
=============
Allow default ImageDraw font to be set
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Rather than specifying a font when calling text-related ImageDraw methods, or
setting a font on each ImageDraw instance, the default font can now be set for
all future ImageDraw operations::
from PIL import ImageDraw, ImageFont
ImageDraw.ImageDraw.font = ImageFont.truetype("Tests/fonts/FreeMono.ttf")
Saving multiple MPO frames
^^^^^^^^^^^^^^^^^^^^^^^^^^
Multiple MPO frames can now be saved. Using the ``save_all`` argument, all of
an image's frames will be saved to file::
from PIL import Image
im = Image.open("frozenpond.mpo")
im.save(out, save_all=True)
Additional images can also be appended when saving, by combining the
``save_all`` argument with the ``append_images`` argument::
im.save(out, save_all=True, append_images=[im1, im2, ...])
Security
========
TODO
^^^^
TODO
Other Changes
=============
Added DDS ATI1 and ATI2 reading
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Support has been added to read the ATI1 and ATI2 formats of DDS images.

View File

@ -14,6 +14,7 @@ expected to be backported to earlier versions.
.. toctree::
:maxdepth: 2
9.3.0
9.2.0
9.1.1
9.1.0

View File

@ -34,7 +34,11 @@ project_urls =
Twitter=https://twitter.com/PythonPillow
[options]
packages = PIL
python_requires = >=3.7
include_package_data = True
package_dir =
= src
[options.extras_require]
docs =

View File

@ -15,7 +15,9 @@ import subprocess
import sys
import warnings
from setuptools import Extension, setup
from setuptools import Extension
from setuptools import __version__ as setuptools_version
from setuptools import setup
from setuptools.command.build_ext import build_ext
@ -850,6 +852,7 @@ class pil_build_ext(build_ext):
sys.platform == "win32"
and sys.version_info < (3, 9)
and not (PLATFORM_PYPY or PLATFORM_MINGW)
and int(setuptools_version.split(".")[0]) < 60
):
defs.append(("PILLOW_VERSION", f'"\\"{PILLOW_VERSION}\\""'))
else:
@ -996,9 +999,6 @@ try:
version=PILLOW_VERSION,
cmdclass={"build_ext": pil_build_ext},
ext_modules=ext_modules,
include_package_data=True,
packages=["PIL"],
package_dir={"": "src"},
zip_safe=not (debug_build() or PLATFORM_MINGW),
)
except RequiredDependencyException as err:

View File

@ -375,6 +375,16 @@ def _save(im, fp, filename, bitmap_header=True):
header = 40 # or 64 for OS/2 version 2
image = stride * im.size[1]
if im.mode == "1":
palette = b"".join(o8(i) * 4 for i in (0, 255))
elif im.mode == "L":
palette = b"".join(o8(i) * 4 for i in range(256))
elif im.mode == "P":
palette = im.im.getpalette("RGB", "BGRX")
colors = len(palette) // 4
else:
palette = None
# bitmap header
if bitmap_header:
offset = 14 + header + colors * 4
@ -405,14 +415,8 @@ def _save(im, fp, filename, bitmap_header=True):
fp.write(b"\0" * (header - 40)) # padding (for OS/2 format)
if im.mode == "1":
for i in (0, 255):
fp.write(o8(i) * 4)
elif im.mode == "L":
for i in range(256):
fp.write(o8(i) * 4)
elif im.mode == "P":
fp.write(im.im.getpalette("RGB", "BGRX"))
if palette:
fp.write(palette)
ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, stride, -1))])

View File

@ -156,6 +156,14 @@ class DdsImageFile(ImageFile.ImageFile):
elif fourcc == b"DXT5":
self.pixel_format = "DXT5"
n = 3
elif fourcc == b"ATI1":
self.pixel_format = "BC4"
n = 4
self.mode = "L"
elif fourcc == b"ATI2":
self.pixel_format = "BC5"
n = 5
self.mode = "RGB"
elif fourcc == b"BC5S":
self.pixel_format = "BC5S"
n = 5

View File

@ -288,11 +288,14 @@ class EpsImageFile(ImageFile.ImageFile):
# Encoded bitmapped image.
x, y, bi, mo = s[11:].split(None, 7)[:4]
if int(bi) != 8:
break
try:
self.mode = self.mode_map[int(mo)]
except ValueError:
if int(bi) == 1:
self.mode = "1"
elif int(bi) == 8:
try:
self.mode = self.mode_map[int(mo)]
except ValueError:
break
else:
break
self._size = int(x), int(y)

View File

@ -185,8 +185,6 @@ class GifImageFile(ImageFile.ImageFile):
if not s or s == b";":
raise EOFError
self.tile = []
palette = None
info = {}
@ -295,6 +293,8 @@ class GifImageFile(ImageFile.ImageFile):
if not update_image:
return
self.tile = []
if self.dispose:
self.im.paste(self.dispose, self.dispose_extent)

View File

@ -1404,9 +1404,9 @@ class Image:
if 0x0112 not in self._exif:
xmp_tags = self.info.get("XML:com.adobe.xmp")
if xmp_tags:
match = re.search(r'tiff:Orientation="([0-9])"', xmp_tags)
match = re.search(r'tiff:Orientation(="|>)([0-9])', xmp_tags)
if match:
self._exif[0x0112] = int(match[1])
self._exif[0x0112] = int(match[2])
return self._exif
@ -1839,7 +1839,7 @@ class Image:
Modifies the pixel at the given position. The color is given as
a single numerical value for single-band images, and a tuple for
multi-band images. In addition to this, RGB and RGBA tuples are
accepted for P images.
accepted for P and PA images.
Note that this method is relatively slow. For more extensive changes,
use :py:meth:`~PIL.Image.Image.paste` or the :py:mod:`~PIL.ImageDraw`
@ -1864,12 +1864,17 @@ class Image:
return self.pyaccess.putpixel(xy, value)
if (
self.mode == "P"
self.mode in ("P", "PA")
and isinstance(value, (list, tuple))
and len(value) in [3, 4]
):
# RGB or RGBA value for a P image
# RGB or RGBA value for a P or PA image
if self.mode == "PA":
alpha = value[3] if len(value) == 4 else 255
value = value[:3]
value = self.palette.getcolor(value, self)
if self.mode == "PA":
value = (value, alpha)
return self.im.putpixel(xy, value)
def remap_palette(self, dest_map, source_palette=None):
@ -1984,18 +1989,14 @@ class Image:
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
:param resample: An optional resampling filter. This can be
one of :py:data:`PIL.Image.Resampling.NEAREST`,
:py:data:`PIL.Image.Resampling.BOX`,
:py:data:`PIL.Image.Resampling.BILINEAR`,
:py:data:`PIL.Image.Resampling.HAMMING`,
:py:data:`PIL.Image.Resampling.BICUBIC` or
:py:data:`PIL.Image.Resampling.LANCZOS`.
one of :py:data:`Resampling.NEAREST`, :py:data:`Resampling.BOX`,
:py:data:`Resampling.BILINEAR`, :py:data:`Resampling.HAMMING`,
:py:data:`Resampling.BICUBIC` or :py:data:`Resampling.LANCZOS`.
If the image has mode "1" or "P", it is always set to
:py:data:`PIL.Image.Resampling.NEAREST`.
If the image mode specifies a number of bits, such as "I;16", then the
default filter is :py:data:`PIL.Image.Resampling.NEAREST`.
Otherwise, the default filter is
:py:data:`PIL.Image.Resampling.BICUBIC`. See: :ref:`concept-filters`.
:py:data:`Resampling.NEAREST`. If the image mode specifies a number
of bits, such as "I;16", then the default filter is
:py:data:`Resampling.NEAREST`. Otherwise, the default filter is
:py:data:`Resampling.BICUBIC`. See: :ref:`concept-filters`.
:param box: An optional 4-tuple of floats providing
the source image region to be scaled.
The values must be within (0, 0, width, height) rectangle.
@ -2135,12 +2136,12 @@ class Image:
:param angle: In degrees counter clockwise.
:param resample: An optional resampling filter. This can be
one of :py:data:`PIL.Image.Resampling.NEAREST` (use nearest neighbour),
:py:data:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
environment), or :py:data:`PIL.Image.Resampling.BICUBIC`
(cubic spline interpolation in a 4x4 environment).
If omitted, or if the image has mode "1" or "P", it is
set to :py:data:`PIL.Image.Resampling.NEAREST`. See :ref:`concept-filters`.
one of :py:data:`Resampling.NEAREST` (use nearest neighbour),
:py:data:`Resampling.BILINEAR` (linear interpolation in a 2x2
environment), or :py:data:`Resampling.BICUBIC` (cubic spline
interpolation in a 4x4 environment). If omitted, or if the image has
mode "1" or "P", it is set to :py:data:`Resampling.NEAREST`.
See :ref:`concept-filters`.
:param expand: Optional expansion flag. If true, expands the output
image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the
@ -2447,14 +2448,11 @@ class Image:
:param size: Requested size.
:param resample: Optional resampling filter. This can be one
of :py:data:`PIL.Image.Resampling.NEAREST`,
:py:data:`PIL.Image.Resampling.BOX`,
:py:data:`PIL.Image.Resampling.BILINEAR`,
:py:data:`PIL.Image.Resampling.HAMMING`,
:py:data:`PIL.Image.Resampling.BICUBIC` or
:py:data:`PIL.Image.Resampling.LANCZOS`.
If omitted, it defaults to :py:data:`PIL.Image.Resampling.BICUBIC`.
(was :py:data:`PIL.Image.Resampling.NEAREST` prior to version 2.5.0).
of :py:data:`Resampling.NEAREST`, :py:data:`Resampling.BOX`,
:py:data:`Resampling.BILINEAR`, :py:data:`Resampling.HAMMING`,
:py:data:`Resampling.BICUBIC` or :py:data:`Resampling.LANCZOS`.
If omitted, it defaults to :py:data:`Resampling.BICUBIC`.
(was :py:data:`Resampling.NEAREST` prior to version 2.5.0).
See: :ref:`concept-filters`.
:param reducing_gap: Apply optimization by resizing the image
in two steps. First, reducing the image by integer times
@ -2473,29 +2471,41 @@ class Image:
:returns: None
"""
self.load()
x, y = map(math.floor, size)
if x >= self.width and y >= self.height:
return
provided_size = tuple(map(math.floor, size))
def round_aspect(number, key):
return max(min(math.floor(number), math.ceil(number), key=key), 1)
def preserve_aspect_ratio():
def round_aspect(number, key):
return max(min(math.floor(number), math.ceil(number), key=key), 1)
# preserve aspect ratio
aspect = self.width / self.height
if x / y >= aspect:
x = round_aspect(y * aspect, key=lambda n: abs(aspect - n / y))
else:
y = round_aspect(
x / aspect, key=lambda n: 0 if n == 0 else abs(aspect - x / n)
)
size = (x, y)
x, y = provided_size
if x >= self.width and y >= self.height:
return
aspect = self.width / self.height
if x / y >= aspect:
x = round_aspect(y * aspect, key=lambda n: abs(aspect - n / y))
else:
y = round_aspect(
x / aspect, key=lambda n: 0 if n == 0 else abs(aspect - x / n)
)
return x, y
box = None
if reducing_gap is not None:
size = preserve_aspect_ratio()
if size is None:
return
res = self.draft(None, (size[0] * reducing_gap, size[1] * reducing_gap))
if res is not None:
box = res[1]
if box is None:
self.load()
# load() may have changed the size of the image
size = preserve_aspect_ratio()
if size is None:
return
if self.size != size:
im = self.resize(size, resample, box=box, reducing_gap=reducing_gap)
@ -2525,11 +2535,11 @@ class Image:
:param size: The output size.
:param method: The transformation method. This is one of
:py:data:`PIL.Image.Transform.EXTENT` (cut out a rectangular subregion),
:py:data:`PIL.Image.Transform.AFFINE` (affine transform),
:py:data:`PIL.Image.Transform.PERSPECTIVE` (perspective transform),
:py:data:`PIL.Image.Transform.QUAD` (map a quadrilateral to a rectangle), or
:py:data:`PIL.Image.Transform.MESH` (map a number of source quadrilaterals
:py:data:`Transform.EXTENT` (cut out a rectangular subregion),
:py:data:`Transform.AFFINE` (affine transform),
:py:data:`Transform.PERSPECTIVE` (perspective transform),
:py:data:`Transform.QUAD` (map a quadrilateral to a rectangle), or
:py:data:`Transform.MESH` (map a number of source quadrilaterals
in one operation).
It may also be an :py:class:`~PIL.Image.ImageTransformHandler`
@ -2549,11 +2559,11 @@ class Image:
return method, data
:param data: Extra data to the transformation method.
:param resample: Optional resampling filter. It can be one of
:py:data:`PIL.Image.Resampling.NEAREST` (use nearest neighbour),
:py:data:`PIL.Image.Resampling.BILINEAR` (linear interpolation in a 2x2
environment), or :py:data:`PIL.Image.BICUBIC` (cubic spline
:py:data:`Resampling.NEAREST` (use nearest neighbour),
:py:data:`Resampling.BILINEAR` (linear interpolation in a 2x2
environment), or :py:data:`Resampling.BICUBIC` (cubic spline
interpolation in a 4x4 environment). If omitted, or if the image
has mode "1" or "P", it is set to :py:data:`PIL.Image.Resampling.NEAREST`.
has mode "1" or "P", it is set to :py:data:`Resampling.NEAREST`.
See: :ref:`concept-filters`.
:param fill: If ``method`` is an
:py:class:`~PIL.Image.ImageTransformHandler` object, this is one of
@ -2680,13 +2690,10 @@ class Image:
"""
Transpose image (flip or rotate in 90 degree steps)
:param method: One of :py:data:`PIL.Image.Transpose.FLIP_LEFT_RIGHT`,
:py:data:`PIL.Image.Transpose.FLIP_TOP_BOTTOM`,
:py:data:`PIL.Image.Transpose.ROTATE_90`,
:py:data:`PIL.Image.Transpose.ROTATE_180`,
:py:data:`PIL.Image.Transpose.ROTATE_270`,
:py:data:`PIL.Image.Transpose.TRANSPOSE` or
:py:data:`PIL.Image.Transpose.TRANSVERSE`.
:param method: One of :py:data:`Transpose.FLIP_LEFT_RIGHT`,
:py:data:`Transpose.FLIP_TOP_BOTTOM`, :py:data:`Transpose.ROTATE_90`,
:py:data:`Transpose.ROTATE_180`, :py:data:`Transpose.ROTATE_270`,
:py:data:`Transpose.TRANSPOSE` or :py:data:`Transpose.TRANSVERSE`.
:returns: Returns a flipped or rotated copy of this image.
"""

View File

@ -46,6 +46,8 @@ directly.
class ImageDraw:
font = None
def __init__(self, im, mode=None):
"""
Create a drawing instance.
@ -86,12 +88,16 @@ class ImageDraw:
else:
self.fontmode = "L" # aliasing is okay for other modes
self.fill = 0
self.font = None
def getfont(self):
"""
Get the current default font.
To set the default font for all future ImageDraw instances::
from PIL import ImageDraw, ImageFont
ImageDraw.ImageDraw.font = ImageFont.truetype("Tests/fonts/FreeMono.ttf")
:returns: An image font."""
if not self.font:
# FIXME: should add a font repository

View File

@ -192,6 +192,9 @@ class ImageFile(Image.Image):
with open(self.filename) as fp:
self.map = mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ)
if offset + self.size[1] * args[1] > self.map.size():
# buffer is not large enough
raise OSError
self.im = Image.core.map_buffer(
self.map, self.size, decoder_name, offset, args
)
@ -499,9 +502,14 @@ def _save(im, fp, tile, bufsize=0):
try:
fh = fp.fileno()
fp.flush()
exc = None
except (AttributeError, io.UnsupportedOperation) as e:
exc = e
_encode_tile(im, fp, tile, bufsize, fh)
except (AttributeError, io.UnsupportedOperation) as exc:
_encode_tile(im, fp, tile, bufsize, None, exc)
if hasattr(fp, "flush"):
fp.flush()
def _encode_tile(im, fp, tile, bufsize, fh, exc=None):
for e, b, o, a in tile:
if o > 0:
fp.seek(o)
@ -526,8 +534,6 @@ def _save(im, fp, tile, bufsize=0):
raise OSError(f"encoder error {s} when writing image file") from exc
finally:
encoder.cleanup()
if hasattr(fp, "flush"):
fp.flush()
def _safe_read(fp, size):

View File

@ -906,10 +906,12 @@ def truetype(font=None, size=10, index=0, encoding="", layout_engine=None):
This function loads a font object from the given file or file-like
object, and creates a font object for a font of the given size.
Pillow uses FreeType to open font files. If you are opening many fonts
simultaneously on Windows, be aware that Windows limits the number of files
that can be open in C at once to 512. If you approach that limit, an
Pillow uses FreeType to open font files. On Windows, be aware that FreeType
will keep the file open as long as the FreeTypeFont object exists. Windows
limits the number of files that can be open in C at once to 512, so if many
fonts are opened simultaneously and that limit is approached, an
``OSError`` may be thrown, reporting that FreeType "cannot open resource".
A workaround would be to copy the file(s) into memory, and open that instead.
This function requires the _imagingft service.

View File

@ -572,8 +572,11 @@ def solarize(image, threshold=128):
def exif_transpose(image):
"""
If an image has an EXIF Orientation tag, return a new image that is
transposed accordingly. Otherwise, return a copy of the image.
If an image has an EXIF Orientation tag, other than 1, return a new image
that is transposed accordingly. The new image will have the orientation
data removed.
Otherwise, return a copy of the image.
:param image: The image to transpose.
:return: An image.
@ -601,10 +604,12 @@ def exif_transpose(image):
"Raw profile type exif"
] = transposed_exif.tobytes().hex()
elif "XML:com.adobe.xmp" in transposed_image.info:
transposed_image.info["XML:com.adobe.xmp"] = re.sub(
for pattern in (
r'tiff:Orientation="([0-9])"',
"",
transposed_image.info["XML:com.adobe.xmp"],
)
r"<tiff:Orientation>([0-9])</tiff:Orientation>",
):
transposed_image.info["XML:com.adobe.xmp"] = re.sub(
pattern, "", transposed_image.info["XML:com.adobe.xmp"]
)
return transposed_image
return image.copy()

View File

@ -68,21 +68,7 @@ def _pyimagingtkcall(command, photo, id):
# may raise an error if it cannot attach to Tkinter
from . import _imagingtk
try:
if hasattr(tk, "interp"):
# Required for PyPy, which always has CFFI installed
from cffi import FFI
ffi = FFI()
# PyPy is using an FFI CDATA element
# (Pdb) self.tk.interp
# <cdata 'Tcl_Interp *' 0x3061b50>
_imagingtk.tkinit(int(ffi.cast("uintptr_t", tk.interp)), 1)
else:
_imagingtk.tkinit(tk.interpaddr(), 1)
except AttributeError:
_imagingtk.tkinit(id(tk), 0)
_imagingtk.tkinit(tk.interpaddr())
tk.call(command, photo, id)

View File

@ -711,7 +711,7 @@ def _save(im, fp, filename):
qtables = getattr(im, "quantization", None)
qtables = validate_qtables(qtables)
extra = b""
extra = info.get("extra", b"")
icc_profile = info.get("icc_profile")
if icc_profile:

View File

@ -18,16 +18,66 @@
# See the README file for information on usage and redistribution.
#
from . import Image, ImageFile, JpegImagePlugin
import itertools
import os
import struct
from . import Image, ImageFile, ImageSequence, JpegImagePlugin, TiffImagePlugin
from ._binary import i16be as i16
from ._binary import o32le
# def _accept(prefix):
# return JpegImagePlugin._accept(prefix)
def _save(im, fp, filename):
# Note that we can only save the current frame at present
return JpegImagePlugin._save(im, fp, filename)
JpegImagePlugin._save(im, fp, filename)
def _save_all(im, fp, filename):
append_images = im.encoderinfo.get("append_images", [])
if not append_images:
try:
animated = im.is_animated
except AttributeError:
animated = False
if not animated:
_save(im, fp, filename)
return
offsets = []
for imSequence in itertools.chain([im], append_images):
for im_frame in ImageSequence.Iterator(imSequence):
if not offsets:
# APP2 marker
im.encoderinfo["extra"] = (
b"\xFF\xE2" + struct.pack(">H", 6 + 70) + b"MPF\0" + b" " * 70
)
JpegImagePlugin._save(im_frame, fp, filename)
offsets.append(fp.tell())
else:
im_frame.save(fp, "JPEG")
offsets.append(fp.tell() - offsets[-1])
ifd = TiffImagePlugin.ImageFileDirectory_v2()
ifd[0xB001] = len(offsets)
mpentries = b""
data_offset = 0
for i, size in enumerate(offsets):
if i == 0:
mptype = 0x030000 # Baseline MP Primary Image
else:
mptype = 0x000000 # Undefined
mpentries += struct.pack("<LLLHH", mptype, size, data_offset, 0, 0)
if i == 0:
data_offset -= 28
data_offset += size
ifd[0xB002] = mpentries
fp.seek(28)
fp.write(b"II\x2A\x00" + o32le(8) + ifd.tobytes(8))
fp.seek(0, os.SEEK_END)
##
@ -124,6 +174,7 @@ class MpoImageFile(JpegImagePlugin.JpegImageFile):
# Image.register_open(MpoImageFile.format,
# JpegImagePlugin.jpeg_factory, _accept)
Image.register_save(MpoImageFile.format, _save)
Image.register_save_all(MpoImageFile.format, _save_all)
Image.register_extension(MpoImageFile.format, ".mpo")

View File

@ -21,10 +21,11 @@
##
import io
import math
import os
import time
from . import Image, ImageFile, ImageSequence, PdfParser, __version__
from . import Image, ImageFile, ImageSequence, PdfParser, __version__, features
#
# --------------------------------------------------------------------
@ -123,8 +124,29 @@ def _save(im, fp, filename, save_all=False):
params = None
decode = None
#
# Get image characteristics
width, height = im.size
if im.mode == "1":
filter = "DCTDecode"
if features.check("libtiff"):
filter = "CCITTFaxDecode"
bits = 1
params = PdfParser.PdfArray(
[
PdfParser.PdfDict(
{
"K": -1,
"BlackIs1": True,
"Columns": width,
"Rows": height,
}
)
]
)
else:
filter = "DCTDecode"
colorspace = PdfParser.PdfName("DeviceGray")
procset = "ImageB" # grayscale
elif im.mode == "L":
@ -161,6 +183,14 @@ def _save(im, fp, filename, save_all=False):
if filter == "ASCIIHexDecode":
ImageFile._save(im, op, [("hex", (0, 0) + im.size, 0, im.mode)])
elif filter == "CCITTFaxDecode":
im.save(
op,
"TIFF",
compression="group4",
# use a single strip
strip_size=math.ceil(im.width / 8) * im.height,
)
elif filter == "DCTDecode":
Image.SAVE["JPEG"](im, op, filename)
elif filter == "FlateDecode":
@ -170,22 +200,24 @@ def _save(im, fp, filename, save_all=False):
else:
raise ValueError(f"unsupported PDF filter ({filter})")
#
# Get image characteristics
width, height = im.size
stream = op.getvalue()
if filter == "CCITTFaxDecode":
stream = stream[8:]
filter = PdfParser.PdfArray([PdfParser.PdfName(filter)])
else:
filter = PdfParser.PdfName(filter)
existing_pdf.write_obj(
image_refs[page_number],
stream=op.getvalue(),
stream=stream,
Type=PdfParser.PdfName("XObject"),
Subtype=PdfParser.PdfName("Image"),
Width=width, # * 72.0 / resolution,
Height=height, # * 72.0 / resolution,
Filter=PdfParser.PdfName(filter),
Filter=filter,
BitsPerComponent=bits,
Decode=decode,
DecodeParams=params,
DecodeParms=params,
ColorSpace=colorspace,
)

View File

@ -75,6 +75,9 @@ class PsdImageFile(ImageFile.ImageFile):
if channels > psd_channels:
raise OSError("not enough channels")
if mode == "RGB" and psd_channels == 4:
mode = "RGBA"
channels = 4
self.mode = mode
self._size = i32(s, 18), i32(s, 14)

View File

@ -58,7 +58,7 @@ class PyAccess:
# Keep pointer to im object to prevent dereferencing.
self._im = img.im
if self._im.mode == "P":
if self._im.mode in ("P", "PA"):
self._palette = img.palette
# Debugging is polluting test traces, only useful here
@ -89,12 +89,17 @@ class PyAccess:
(x, y) = self.check_xy((x, y))
if (
self._im.mode == "P"
self._im.mode in ("P", "PA")
and isinstance(color, (list, tuple))
and len(color) in [3, 4]
):
# RGB or RGBA value for a P image
# RGB or RGBA value for a P or PA image
if self._im.mode == "PA":
alpha = color[3] if len(color) == 4 else 255
color = color[:3]
color = self._palette.getcolor(color, self._img)
if self._im.mode == "PA":
color = (color, alpha)
return self.set_pixel(x, y, color)

View File

@ -193,9 +193,10 @@ def _save(im, fp, filename):
warnings.warn("id_section has been trimmed to 255 characters")
if colormaptype:
colormapfirst, colormaplength, colormapentry = 0, 256, 24
palette = im.im.getpalette("RGB", "BGR")
colormaplength, colormapentry = len(palette) // 3, 24
else:
colormapfirst, colormaplength, colormapentry = 0, 0, 0
colormaplength, colormapentry = 0, 0
if im.mode in ("LA", "RGBA"):
flags = 8
@ -210,7 +211,7 @@ def _save(im, fp, filename):
o8(id_len)
+ o8(colormaptype)
+ o8(imagetype)
+ o16(colormapfirst)
+ o16(0) # colormapfirst
+ o16(colormaplength)
+ o8(colormapentry)
+ o16(0)
@ -225,7 +226,7 @@ def _save(im, fp, filename):
fp.write(id_section)
if colormaptype:
fp.write(im.im.getpalette("RGB", "BGR"))
fp.write(palette)
if rle:
ImageFile._save(

View File

@ -727,7 +727,9 @@ class ImageFileDirectory_v2(MutableMapping):
@_register_writer(2)
def write_string(self, value):
# remerge of https://github.com/python-pillow/Pillow/pull/1416
return b"" + value.encode("ascii", "replace") + b"\0"
if not isinstance(value, bytes):
value = value.encode("ascii", "replace")
return value + b"\0"
@_register_loader(5, 8)
def load_rational(self, data, legacy_api=True):
@ -1153,7 +1155,7 @@ class TiffImageFile(ImageFile.ImageFile):
:returns: XMP tags in a dictionary.
"""
return self._getxmp(self.tag_v2[700]) if 700 in self.tag_v2 else {}
return self._getxmp(self.tag_v2[XMP]) if XMP in self.tag_v2 else {}
def get_photoshop_blocks(self):
"""
@ -1328,7 +1330,7 @@ class TiffImageFile(ImageFile.ImageFile):
logger.debug(f"- photometric_interpretation: {photo}")
logger.debug(f"- planar_configuration: {self._planar_configuration}")
logger.debug(f"- fill_order: {fillorder}")
logger.debug(f"- YCbCr subsampling: {self.tag.get(530)}")
logger.debug(f"- YCbCr subsampling: {self.tag.get(YCBCRSUBSAMPLING)}")
# size
xsize = int(self.tag_v2.get(IMAGEWIDTH))
@ -1469,8 +1471,8 @@ class TiffImageFile(ImageFile.ImageFile):
else:
# tiled image
offsets = self.tag_v2[TILEOFFSETS]
w = self.tag_v2.get(322)
h = self.tag_v2.get(323)
w = self.tag_v2.get(TILEWIDTH)
h = self.tag_v2.get(TILELENGTH)
for offset in offsets:
if x + w > xsize:
@ -1684,7 +1686,8 @@ def _save(im, fp, filename):
stride = len(bits) * ((im.size[0] * bits[0] + 7) // 8)
# aim for given strip size (64 KB by default) when using libtiff writer
if libtiff:
rows_per_strip = 1 if stride == 0 else min(STRIP_SIZE // stride, im.size[1])
im_strip_size = encoderinfo.get("strip_size", STRIP_SIZE)
rows_per_strip = 1 if stride == 0 else min(im_strip_size // stride, im.size[1])
# JPEG encoder expects multiple of 8 rows
if compression == "jpeg":
rows_per_strip = min(((rows_per_strip + 7) // 8) * 8, im.size[1])

View File

@ -23,33 +23,16 @@ TkImaging_Init(Tcl_Interp *interp);
extern int
load_tkinter_funcs(void);
/* copied from _tkinter.c (this isn't as bad as it may seem: for new
versions, we use _tkinter's interpaddr hook instead, and all older
versions use this structure layout) */
typedef struct {
PyObject_HEAD Tcl_Interp *interp;
} TkappObject;
static PyObject *
_tkinit(PyObject *self, PyObject *args) {
Tcl_Interp *interp;
PyObject *arg;
int is_interp;
if (!PyArg_ParseTuple(args, "Oi", &arg, &is_interp)) {
if (!PyArg_ParseTuple(args, "O", &arg)) {
return NULL;
}
if (is_interp) {
interp = (Tcl_Interp *)PyLong_AsVoidPtr(arg);
} else {
TkappObject *app;
/* Do it the hard way. This will break if the TkappObject
layout changes */
app = (TkappObject *)PyLong_AsVoidPtr(arg);
interp = app->interp;
}
interp = (Tcl_Interp *)PyLong_AsVoidPtr(arg);
/* This will bomb if interp is invalid... */
TkImaging_Init(interp);

View File

@ -1026,6 +1026,14 @@ pa2l(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
}
}
static void
pa2p(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
int x;
for (x = 0; x < xsize; x++, in += 4) {
*out++ = in[0];
}
}
static void
p2pa(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
int x;
@ -1209,6 +1217,8 @@ frompalette(Imaging imOut, Imaging imIn, const char *mode) {
convert = alpha ? pa2l : p2l;
} else if (strcmp(mode, "LA") == 0) {
convert = alpha ? pa2la : p2la;
} else if (strcmp(mode, "P") == 0) {
convert = pa2p;
} else if (strcmp(mode, "PA") == 0) {
convert = p2pa;
} else if (strcmp(mode, "I") == 0) {
@ -1233,6 +1243,10 @@ frompalette(Imaging imOut, Imaging imIn, const char *mode) {
if (!imOut) {
return NULL;
}
if (strcmp(mode, "P") == 0 || strcmp(mode, "PA") == 0) {
ImagingPaletteDelete(imOut->palette);
imOut->palette = ImagingPaletteDuplicate(imIn->palette);
}
ImagingSectionEnter(&cookie);
for (y = 0; y < imIn->ysize; y++) {

View File

@ -916,7 +916,7 @@ ImagingLibTiffEncode(Imaging im, ImagingCodecState state, UINT8 *buffer, int byt
dump_state(clientstate);
if (state->state == 0) {
TRACE(("Encoding line bt line"));
TRACE(("Encoding line by line"));
while (state->y < state->ysize) {
state->shuffle(
state->buffer,

View File

@ -11,8 +11,8 @@ For more extensive info, see the [Windows build instructions](build.rst).
* Requires Microsoft Visual Studio 2017 or newer with C++ component.
* Requires NASM for libjpeg-turbo, a required dependency when using this script.
* Requires CMake 3.12 or newer (available as Visual Studio component).
* Tested on Windows Server 2016 with Visual Studio 2017 Community (AppVeyor).
* Tested on Windows Server 2019 with Visual Studio 2019 Enterprise (GitHub Actions).
* Tested on Windows Server 2016 with Visual Studio 2017 Community, and Windows Server 2019 with Visual Studio 2022 Community (AppVeyor).
* Tested on Windows Server 2022 with Visual Studio 2022 Enterprise (GitHub Actions).
The following is a simplified version of the script used on AppVeyor:
```

View File

@ -108,9 +108,9 @@ header = [
deps = {
"libjpeg": {
"url": SF_PROJECTS
+ "/libjpeg-turbo/files/2.1.3/libjpeg-turbo-2.1.3.tar.gz/download",
"filename": "libjpeg-turbo-2.1.3.tar.gz",
"dir": "libjpeg-turbo-2.1.3",
+ "/libjpeg-turbo/files/2.1.4/libjpeg-turbo-2.1.4.tar.gz/download",
"filename": "libjpeg-turbo-2.1.4.tar.gz",
"dir": "libjpeg-turbo-2.1.4",
"build": [
cmd_cmake(
[
@ -157,9 +157,9 @@ deps = {
# "bins": [r"libtiff\*.dll"],
},
"libwebp": {
"url": "http://downloads.webmproject.org/releases/webp/libwebp-1.2.3.tar.gz",
"filename": "libwebp-1.2.3.tar.gz",
"dir": "libwebp-1.2.3",
"url": "http://downloads.webmproject.org/releases/webp/libwebp-1.2.4.tar.gz",
"filename": "libwebp-1.2.4.tar.gz",
"dir": "libwebp-1.2.4",
"build": [
cmd_rmdir(r"output\release-static"), # clean
cmd_nmake(
@ -226,21 +226,21 @@ deps = {
"filename": "lcms2-2.13.1.tar.gz",
"dir": "lcms2-2.13.1",
"patch": {
r"Projects\VC2019\lcms2_static\lcms2_static.vcxproj": {
r"Projects\VC2022\lcms2_static\lcms2_static.vcxproj": {
# default is /MD for x86 and /MT for x64, we need /MD always
"<RuntimeLibrary>MultiThreaded</RuntimeLibrary>": "<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>", # noqa: E501
# retarget to default toolset (selected by vcvarsall.bat)
"<PlatformToolset>v142</PlatformToolset>": "<PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset>", # noqa: E501
"<PlatformToolset>v143</PlatformToolset>": "<PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset>", # noqa: E501
# retarget to latest (selected by vcvarsall.bat)
"<WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>": "<WindowsTargetPlatformVersion>$(WindowsSDKVersion)</WindowsTargetPlatformVersion>", # noqa: E501
}
},
"build": [
cmd_rmdir("Lib"),
cmd_rmdir(r"Projects\VC2019\Release"),
cmd_msbuild(r"Projects\VC2019\lcms2.sln", "Release", "Clean"),
cmd_rmdir(r"Projects\VC2022\Release"),
cmd_msbuild(r"Projects\VC2022\lcms2.sln", "Release", "Clean"),
cmd_msbuild(
r"Projects\VC2019\lcms2.sln", "Release", "lcms2_static:Rebuild"
r"Projects\VC2022\lcms2.sln", "Release", "lcms2_static:Rebuild"
),
cmd_xcopy("include", "{inc_dir}"),
],
@ -281,9 +281,9 @@ deps = {
"libs": [r"imagequant.lib"],
},
"harfbuzz": {
"url": "https://github.com/harfbuzz/harfbuzz/archive/4.4.1.zip",
"filename": "harfbuzz-4.4.1.zip",
"dir": "harfbuzz-4.4.1",
"url": "https://github.com/harfbuzz/harfbuzz/archive/5.1.0.zip",
"filename": "harfbuzz-5.1.0.zip",
"dir": "harfbuzz-5.1.0",
"build": [
cmd_cmake("-DHB_HAVE_FREETYPE:BOOL=TRUE"),
cmd_nmake(target="clean"),