Merge remote-tracking branch 'upstream/main' into winbuild-update

# Conflicts:
#	Tests/test_imagefont.py
This commit is contained in:
nulano 2022-09-13 20:22:25 +02:00
commit 7485b1a8a0
No known key found for this signature in database
GPG Key ID: B650CDF63B705766
12 changed files with 1259 additions and 1208 deletions

View File

@ -1,6 +1,6 @@
repos:
- repo: https://github.com/psf/black
rev: 22.6.0
rev: 22.8.0
hooks:
- id: black
args: ["--target-version", "py37"]
@ -14,18 +14,18 @@ repos:
- id: isort
- repo: https://github.com/asottile/yesqa
rev: v1.3.0
rev: v1.4.0
hooks:
- id: yesqa
- repo: https://github.com/Lucas-C/pre-commit-hooks
rev: v1.3.0
rev: v1.3.1
hooks:
- id: remove-tabs
exclude: (Makefile$|\.bat$|\.cmake$|\.eps$|\.fits$|\.opt$)
- repo: https://github.com/PyCQA/flake8
rev: 5.0.2
rev: 5.0.4
hooks:
- id: flake8
additional_dependencies: [flake8-2020, flake8-implicit-str-concat]

View File

@ -5,6 +5,15 @@ Changelog (Pillow)
9.3.0 (unreleased)
------------------
- Do not call load() before draft() in Image.thumbnail #6539
[radarhere]
- Copy palette when converting from P to PA #6497
[radarhere]
- Allow RGB and RGBA values for PA image putpixel #6504
[radarhere]
- Removed support for tkinter in PyPy before Python 3.6 #6551
[nulano]

View File

@ -5,90 +5,109 @@ from PIL import Image, ImageFilter
from .helper import assert_image_equal, hopper
def test_sanity():
def apply_filter(filter_to_apply):
for mode in ["L", "RGB", "CMYK"]:
im = hopper(mode)
out = im.filter(filter_to_apply)
assert out.mode == im.mode
assert out.size == im.size
@pytest.mark.parametrize(
"filter_to_apply",
(
ImageFilter.BLUR,
ImageFilter.CONTOUR,
ImageFilter.DETAIL,
ImageFilter.EDGE_ENHANCE,
ImageFilter.EDGE_ENHANCE_MORE,
ImageFilter.EMBOSS,
ImageFilter.FIND_EDGES,
ImageFilter.SMOOTH,
ImageFilter.SMOOTH_MORE,
ImageFilter.SHARPEN,
ImageFilter.MaxFilter,
ImageFilter.MedianFilter,
ImageFilter.MinFilter,
ImageFilter.ModeFilter,
ImageFilter.GaussianBlur,
ImageFilter.GaussianBlur(5),
ImageFilter.BoxBlur(5),
ImageFilter.UnsharpMask,
ImageFilter.UnsharpMask(10),
),
)
@pytest.mark.parametrize("mode", ("L", "RGB", "CMYK"))
def test_sanity(filter_to_apply, mode):
im = hopper(mode)
out = im.filter(filter_to_apply)
assert out.mode == im.mode
assert out.size == im.size
apply_filter(ImageFilter.BLUR)
apply_filter(ImageFilter.CONTOUR)
apply_filter(ImageFilter.DETAIL)
apply_filter(ImageFilter.EDGE_ENHANCE)
apply_filter(ImageFilter.EDGE_ENHANCE_MORE)
apply_filter(ImageFilter.EMBOSS)
apply_filter(ImageFilter.FIND_EDGES)
apply_filter(ImageFilter.SMOOTH)
apply_filter(ImageFilter.SMOOTH_MORE)
apply_filter(ImageFilter.SHARPEN)
apply_filter(ImageFilter.MaxFilter)
apply_filter(ImageFilter.MedianFilter)
apply_filter(ImageFilter.MinFilter)
apply_filter(ImageFilter.ModeFilter)
apply_filter(ImageFilter.GaussianBlur)
apply_filter(ImageFilter.GaussianBlur(5))
apply_filter(ImageFilter.BoxBlur(5))
apply_filter(ImageFilter.UnsharpMask)
apply_filter(ImageFilter.UnsharpMask(10))
@pytest.mark.parametrize("mode", ("L", "RGB", "CMYK"))
def test_sanity_error(mode):
with pytest.raises(TypeError):
apply_filter("hello")
im = hopper(mode)
im.filter("hello")
def test_crash():
# crashes on small images
im = Image.new("RGB", (1, 1))
im.filter(ImageFilter.SMOOTH)
im = Image.new("RGB", (2, 2))
im.filter(ImageFilter.SMOOTH)
im = Image.new("RGB", (3, 3))
# crashes on small images
@pytest.mark.parametrize("size", ((1, 1), (2, 2), (3, 3)))
def test_crash(size):
im = Image.new("RGB", size)
im.filter(ImageFilter.SMOOTH)
def test_modefilter():
def modefilter(mode):
im = Image.new(mode, (3, 3), None)
im.putdata(list(range(9)))
# image is:
# 0 1 2
# 3 4 5
# 6 7 8
mod = im.filter(ImageFilter.ModeFilter).getpixel((1, 1))
im.putdata([0, 0, 1, 2, 5, 1, 5, 2, 0]) # mode=0
mod2 = im.filter(ImageFilter.ModeFilter).getpixel((1, 1))
return mod, mod2
assert modefilter("1") == (4, 0)
assert modefilter("L") == (4, 0)
assert modefilter("P") == (4, 0)
assert modefilter("RGB") == ((4, 0, 0), (0, 0, 0))
@pytest.mark.parametrize(
"mode, expected",
(
("1", (4, 0)),
("L", (4, 0)),
("P", (4, 0)),
("RGB", ((4, 0, 0), (0, 0, 0))),
),
)
def test_modefilter(mode, expected):
im = Image.new(mode, (3, 3), None)
im.putdata(list(range(9)))
# image is:
# 0 1 2
# 3 4 5
# 6 7 8
mod = im.filter(ImageFilter.ModeFilter).getpixel((1, 1))
im.putdata([0, 0, 1, 2, 5, 1, 5, 2, 0]) # mode=0
mod2 = im.filter(ImageFilter.ModeFilter).getpixel((1, 1))
assert (mod, mod2) == expected
def test_rankfilter():
def rankfilter(mode):
im = Image.new(mode, (3, 3), None)
im.putdata(list(range(9)))
# image is:
# 0 1 2
# 3 4 5
# 6 7 8
minimum = im.filter(ImageFilter.MinFilter).getpixel((1, 1))
med = im.filter(ImageFilter.MedianFilter).getpixel((1, 1))
maximum = im.filter(ImageFilter.MaxFilter).getpixel((1, 1))
return minimum, med, maximum
@pytest.mark.parametrize(
"mode, expected",
(
("1", (0, 4, 8)),
("L", (0, 4, 8)),
("RGB", ((0, 0, 0), (4, 0, 0), (8, 0, 0))),
("I", (0, 4, 8)),
("F", (0.0, 4.0, 8.0)),
),
)
def test_rankfilter(mode, expected):
im = Image.new(mode, (3, 3), None)
im.putdata(list(range(9)))
# image is:
# 0 1 2
# 3 4 5
# 6 7 8
minimum = im.filter(ImageFilter.MinFilter).getpixel((1, 1))
med = im.filter(ImageFilter.MedianFilter).getpixel((1, 1))
maximum = im.filter(ImageFilter.MaxFilter).getpixel((1, 1))
assert (minimum, med, maximum) == expected
assert rankfilter("1") == (0, 4, 8)
assert rankfilter("L") == (0, 4, 8)
@pytest.mark.parametrize(
"filter", (ImageFilter.MinFilter, ImageFilter.MedianFilter, ImageFilter.MaxFilter)
)
def test_rankfilter_error(filter):
with pytest.raises(ValueError):
rankfilter("P")
assert rankfilter("RGB") == ((0, 0, 0), (4, 0, 0), (8, 0, 0))
assert rankfilter("I") == (0, 4, 8)
assert rankfilter("F") == (0.0, 4.0, 8.0)
im = Image.new("P", (3, 3), None)
im.putdata(list(range(9)))
# image is:
# 0 1 2
# 3 4 5
# 6 7 8
im.filter(filter).getpixel((1, 1))
def test_rankfilter_properties():
@ -110,7 +129,8 @@ def test_kernel_not_enough_coefficients():
ImageFilter.Kernel((3, 3), (0, 0))
def test_consistency_3x3():
@pytest.mark.parametrize("mode", ("L", "LA", "RGB", "CMYK"))
def test_consistency_3x3(mode):
with Image.open("Tests/images/hopper.bmp") as source:
with Image.open("Tests/images/hopper_emboss.bmp") as reference:
kernel = ImageFilter.Kernel(
@ -125,14 +145,14 @@ def test_consistency_3x3():
source = source.split() * 2
reference = reference.split() * 2
for mode in ["L", "LA", "RGB", "CMYK"]:
assert_image_equal(
Image.merge(mode, source[: len(mode)]).filter(kernel),
Image.merge(mode, reference[: len(mode)]),
)
assert_image_equal(
Image.merge(mode, source[: len(mode)]).filter(kernel),
Image.merge(mode, reference[: len(mode)]),
)
def test_consistency_5x5():
@pytest.mark.parametrize("mode", ("L", "LA", "RGB", "CMYK"))
def test_consistency_5x5(mode):
with Image.open("Tests/images/hopper.bmp") as source:
with Image.open("Tests/images/hopper_emboss_more.bmp") as reference:
kernel = ImageFilter.Kernel(
@ -149,8 +169,7 @@ def test_consistency_5x5():
source = source.split() * 2
reference = reference.split() * 2
for mode in ["L", "LA", "RGB", "CMYK"]:
assert_image_equal(
Image.merge(mode, source[: len(mode)]).filter(kernel),
Image.merge(mode, reference[: len(mode)]),
)
assert_image_equal(
Image.merge(mode, source[: len(mode)]).filter(kernel),
Image.merge(mode, reference[: len(mode)]),
)

View File

@ -38,58 +38,64 @@ gradients_image = Image.open("Tests/images/radial_gradients.png")
gradients_image.load()
def test_args_factor():
@pytest.mark.parametrize(
"size, expected",
(
(3, (4, 4)),
((3, 1), (4, 10)),
((1, 3), (10, 4)),
),
)
def test_args_factor(size, expected):
im = Image.new("L", (10, 10))
assert (4, 4) == im.reduce(3).size
assert (4, 10) == im.reduce((3, 1)).size
assert (10, 4) == im.reduce((1, 3)).size
with pytest.raises(ValueError):
im.reduce(0)
with pytest.raises(TypeError):
im.reduce(2.0)
with pytest.raises(ValueError):
im.reduce((0, 10))
assert expected == im.reduce(size).size
def test_args_box():
@pytest.mark.parametrize(
"size, expected_error", ((0, ValueError), (2.0, TypeError), ((0, 10), ValueError))
)
def test_args_factor_error(size, expected_error):
im = Image.new("L", (10, 10))
assert (5, 5) == im.reduce(2, (0, 0, 10, 10)).size
assert (1, 1) == im.reduce(2, (5, 5, 6, 6)).size
with pytest.raises(TypeError):
im.reduce(2, "stri")
with pytest.raises(TypeError):
im.reduce(2, 2)
with pytest.raises(ValueError):
im.reduce(2, (0, 0, 11, 10))
with pytest.raises(ValueError):
im.reduce(2, (0, 0, 10, 11))
with pytest.raises(ValueError):
im.reduce(2, (-1, 0, 10, 10))
with pytest.raises(ValueError):
im.reduce(2, (0, -1, 10, 10))
with pytest.raises(ValueError):
im.reduce(2, (0, 5, 10, 5))
with pytest.raises(ValueError):
im.reduce(2, (5, 0, 5, 10))
with pytest.raises(expected_error):
im.reduce(size)
def test_unsupported_modes():
@pytest.mark.parametrize(
"size, expected",
(
((0, 0, 10, 10), (5, 5)),
((5, 5, 6, 6), (1, 1)),
),
)
def test_args_box(size, expected):
im = Image.new("L", (10, 10))
assert expected == im.reduce(2, size).size
@pytest.mark.parametrize(
"size, expected_error",
(
("stri", TypeError),
((0, 0, 11, 10), ValueError),
((0, 0, 10, 11), ValueError),
((-1, 0, 10, 10), ValueError),
((0, -1, 10, 10), ValueError),
((0, 5, 10, 5), ValueError),
((5, 0, 5, 10), ValueError),
),
)
def test_args_box_error(size, expected_error):
im = Image.new("L", (10, 10))
with pytest.raises(expected_error):
im.reduce(2, size).size
@pytest.mark.parametrize("mode", ("P", "1", "I;16"))
def test_unsupported_modes(mode):
im = Image.new("P", (10, 10))
with pytest.raises(ValueError):
im.reduce(3)
im = Image.new("1", (10, 10))
with pytest.raises(ValueError):
im.reduce(3)
im = Image.new("I;16", (10, 10))
with pytest.raises(ValueError):
im.reduce(3)
def get_image(mode):
mode_info = ImageMode.getmode(mode)
@ -197,63 +203,69 @@ def test_mode_L():
compare_reduce_with_box(im, factor)
def test_mode_LA():
@pytest.mark.parametrize("factor", remarkable_factors)
def test_mode_LA(factor):
im = get_image("LA")
for factor in remarkable_factors:
compare_reduce_with_reference(im, factor, 0.8, 5)
compare_reduce_with_reference(im, factor, 0.8, 5)
@pytest.mark.parametrize("factor", remarkable_factors)
def test_mode_LA_opaque(factor):
im = get_image("LA")
# With opaque alpha, an error should be way smaller.
im.putalpha(Image.new("L", im.size, 255))
for factor in remarkable_factors:
compare_reduce_with_reference(im, factor)
compare_reduce_with_box(im, factor)
compare_reduce_with_reference(im, factor)
compare_reduce_with_box(im, factor)
def test_mode_La():
@pytest.mark.parametrize("factor", remarkable_factors)
def test_mode_La(factor):
im = get_image("La")
for factor in remarkable_factors:
compare_reduce_with_reference(im, factor)
compare_reduce_with_box(im, factor)
compare_reduce_with_reference(im, factor)
compare_reduce_with_box(im, factor)
def test_mode_RGB():
@pytest.mark.parametrize("factor", remarkable_factors)
def test_mode_RGB(factor):
im = get_image("RGB")
for factor in remarkable_factors:
compare_reduce_with_reference(im, factor)
compare_reduce_with_box(im, factor)
compare_reduce_with_reference(im, factor)
compare_reduce_with_box(im, factor)
def test_mode_RGBA():
@pytest.mark.parametrize("factor", remarkable_factors)
def test_mode_RGBA(factor):
im = get_image("RGBA")
for factor in remarkable_factors:
compare_reduce_with_reference(im, factor, 0.8, 5)
compare_reduce_with_reference(im, factor, 0.8, 5)
@pytest.mark.parametrize("factor", remarkable_factors)
def test_mode_RGBA_opaque(factor):
im = get_image("RGBA")
# With opaque alpha, an error should be way smaller.
im.putalpha(Image.new("L", im.size, 255))
for factor in remarkable_factors:
compare_reduce_with_reference(im, factor)
compare_reduce_with_box(im, factor)
compare_reduce_with_reference(im, factor)
compare_reduce_with_box(im, factor)
def test_mode_RGBa():
@pytest.mark.parametrize("factor", remarkable_factors)
def test_mode_RGBa(factor):
im = get_image("RGBa")
for factor in remarkable_factors:
compare_reduce_with_reference(im, factor)
compare_reduce_with_box(im, factor)
compare_reduce_with_reference(im, factor)
compare_reduce_with_box(im, factor)
def test_mode_I():
@pytest.mark.parametrize("factor", remarkable_factors)
def test_mode_I(factor):
im = get_image("I")
for factor in remarkable_factors:
compare_reduce_with_reference(im, factor)
compare_reduce_with_box(im, factor)
compare_reduce_with_reference(im, factor)
compare_reduce_with_box(im, factor)
def test_mode_F():
@pytest.mark.parametrize("factor", remarkable_factors)
def test_mode_F(factor):
im = get_image("F")
for factor in remarkable_factors:
compare_reduce_with_reference(im, factor, 0, 0)
compare_reduce_with_box(im, factor)
compare_reduce_with_reference(im, factor, 0, 0)
compare_reduce_with_box(im, factor)
@skip_unless_feature("jpg_2000")

View File

@ -97,6 +97,28 @@ def test_load_first():
im.thumbnail((64, 64))
assert im.size == (64, 10)
# Test thumbnail(), without draft(),
# on an image that is large enough once load() has changed the size
with Image.open("Tests/images/g4_orientation_5.tif") as im:
im.thumbnail((590, 88), reducing_gap=None)
assert im.size == (590, 88)
def test_load_first_unless_jpeg():
# Test that thumbnail() still uses draft() for JPEG
with Image.open("Tests/images/hopper.jpg") as im:
draft = im.draft
def im_draft(mode, size):
result = draft(mode, size)
assert result is not None
return result
im.draft = im_draft
im.thumbnail((64, 64))
# valgrind test is failing with memory allocated in libjpeg
@pytest.mark.valgrind_known_error(reason="Known Failing")

View File

@ -75,23 +75,25 @@ class TestImageTransform:
assert_image_equal(transformed, scaled)
def test_fill(self):
for mode, pixel in [
["RGB", (255, 0, 0)],
["RGBA", (255, 0, 0, 255)],
["LA", (76, 0)],
]:
im = hopper(mode)
(w, h) = im.size
transformed = im.transform(
im.size,
Image.Transform.EXTENT,
(0, 0, w * 2, h * 2),
Image.Resampling.BILINEAR,
fillcolor="red",
)
assert transformed.getpixel((w - 1, h - 1)) == pixel
@pytest.mark.parametrize(
"mode, expected_pixel",
(
("RGB", (255, 0, 0)),
("RGBA", (255, 0, 0, 255)),
("LA", (76, 0)),
),
)
def test_fill(self, mode, expected_pixel):
im = hopper(mode)
(w, h) = im.size
transformed = im.transform(
im.size,
Image.Transform.EXTENT,
(0, 0, w * 2, h * 2),
Image.Resampling.BILINEAR,
fillcolor="red",
)
assert transformed.getpixel((w - 1, h - 1)) == expected_pixel
def test_mesh(self):
# this should be a checkerboard of halfsized hoppers in ul, lr
@ -222,14 +224,12 @@ class TestImageTransform:
with pytest.raises(ValueError):
im.transform((100, 100), None)
def test_unknown_resampling_filter(self):
@pytest.mark.parametrize("resample", (Image.Resampling.BOX, "unknown"))
def test_unknown_resampling_filter(self, resample):
with hopper() as im:
(w, h) = im.size
for resample in (Image.Resampling.BOX, "unknown"):
with pytest.raises(ValueError):
im.transform(
(100, 100), Image.Transform.EXTENT, (0, 0, w, h), resample
)
with pytest.raises(ValueError):
im.transform((100, 100), Image.Transform.EXTENT, (0, 0, w, h), resample)
class TestImageTransformAffine:
@ -239,7 +239,16 @@ class TestImageTransformAffine:
im = hopper("RGB")
return im.crop((10, 20, im.width - 10, im.height - 20))
def _test_rotate(self, deg, transpose):
@pytest.mark.parametrize(
"deg, transpose",
(
(0, None),
(90, Image.Transpose.ROTATE_90),
(180, Image.Transpose.ROTATE_180),
(270, Image.Transpose.ROTATE_270),
),
)
def test_rotate(self, deg, transpose):
im = self._test_image()
angle = -math.radians(deg)
@ -271,77 +280,65 @@ class TestImageTransformAffine:
)
assert_image_equal(transposed, transformed)
def test_rotate_0_deg(self):
self._test_rotate(0, None)
def test_rotate_90_deg(self):
self._test_rotate(90, Image.Transpose.ROTATE_90)
def test_rotate_180_deg(self):
self._test_rotate(180, Image.Transpose.ROTATE_180)
def test_rotate_270_deg(self):
self._test_rotate(270, Image.Transpose.ROTATE_270)
def _test_resize(self, scale, epsilonscale):
@pytest.mark.parametrize(
"scale, epsilon_scale",
(
(1.1, 6.9),
(1.5, 5.5),
(2.0, 5.5),
(2.3, 3.7),
(2.5, 3.7),
),
)
@pytest.mark.parametrize(
"resample,epsilon",
(
(Image.Resampling.NEAREST, 0),
(Image.Resampling.BILINEAR, 2),
(Image.Resampling.BICUBIC, 1),
),
)
def test_resize(self, scale, epsilon_scale, resample, epsilon):
im = self._test_image()
size_up = int(round(im.width * scale)), int(round(im.height * scale))
matrix_up = [1 / scale, 0, 0, 0, 1 / scale, 0, 0, 0]
matrix_down = [scale, 0, 0, 0, scale, 0, 0, 0]
for resample, epsilon in [
transformed = im.transform(size_up, self.transform, matrix_up, resample)
transformed = transformed.transform(
im.size, self.transform, matrix_down, resample
)
assert_image_similar(transformed, im, epsilon * epsilon_scale)
@pytest.mark.parametrize(
"x, y, epsilon_scale",
(
(0.1, 0, 3.7),
(0.6, 0, 9.1),
(50, 50, 0),
),
)
@pytest.mark.parametrize(
"resample, epsilon",
(
(Image.Resampling.NEAREST, 0),
(Image.Resampling.BILINEAR, 2),
(Image.Resampling.BILINEAR, 1.5),
(Image.Resampling.BICUBIC, 1),
]:
transformed = im.transform(size_up, self.transform, matrix_up, resample)
transformed = transformed.transform(
im.size, self.transform, matrix_down, resample
)
assert_image_similar(transformed, im, epsilon * epsilonscale)
def test_resize_1_1x(self):
self._test_resize(1.1, 6.9)
def test_resize_1_5x(self):
self._test_resize(1.5, 5.5)
def test_resize_2_0x(self):
self._test_resize(2.0, 5.5)
def test_resize_2_3x(self):
self._test_resize(2.3, 3.7)
def test_resize_2_5x(self):
self._test_resize(2.5, 3.7)
def _test_translate(self, x, y, epsilonscale):
),
)
def test_translate(self, x, y, epsilon_scale, resample, epsilon):
im = self._test_image()
size_up = int(round(im.width + x)), int(round(im.height + y))
matrix_up = [1, 0, -x, 0, 1, -y, 0, 0]
matrix_down = [1, 0, x, 0, 1, y, 0, 0]
for resample, epsilon in [
(Image.Resampling.NEAREST, 0),
(Image.Resampling.BILINEAR, 1.5),
(Image.Resampling.BICUBIC, 1),
]:
transformed = im.transform(size_up, self.transform, matrix_up, resample)
transformed = transformed.transform(
im.size, self.transform, matrix_down, resample
)
assert_image_similar(transformed, im, epsilon * epsilonscale)
def test_translate_0_1(self):
self._test_translate(0.1, 0, 3.7)
def test_translate_0_6(self):
self._test_translate(0.6, 0, 9.1)
def test_translate_50(self):
self._test_translate(50, 50, 0)
transformed = im.transform(size_up, self.transform, matrix_up, resample)
transformed = transformed.transform(
im.size, self.transform, matrix_down, resample
)
assert_image_similar(transformed, im, epsilon * epsilon_scale)
class TestImageTransformPerspective(TestImageTransformAffine):

File diff suppressed because it is too large Load Diff

View File

@ -837,6 +837,24 @@ Pillow reads and writes TGA images containing ``L``, ``LA``, ``P``,
``RGB``, and ``RGBA`` data. Pillow can read and write both uncompressed and
run-length encoded TGAs.
The :py:meth:`~PIL.Image.Image.save` method can take the following keyword arguments:
**compression**
If set to "tga_rle", the file will be run-length encoded.
.. versionadded:: 5.3.0
**id_section**
The identification field.
.. versionadded:: 5.3.0
**orientation**
If present and a positive number, the first pixel is for the top left corner,
rather than the bottom left corner.
.. versionadded:: 5.3.0
TIFF
^^^^

View File

@ -53,9 +53,9 @@ Functions
To protect against potential DOS attacks caused by "`decompression bombs`_" (i.e. malicious files
which decompress into a huge amount of data and are designed to crash or cause disruption by using up
a lot of memory), Pillow will issue a ``DecompressionBombWarning`` if the number of pixels in an
image is over a certain limit, :py:data:`PIL.Image.MAX_IMAGE_PIXELS`.
image is over a certain limit, :py:data:`MAX_IMAGE_PIXELS`.
This threshold can be changed by setting :py:data:`PIL.Image.MAX_IMAGE_PIXELS`. It can be disabled
This threshold can be changed by setting :py:data:`MAX_IMAGE_PIXELS`. It can be disabled
by setting ``Image.MAX_IMAGE_PIXELS = None``.
If desired, the warning can be turned into an error with
@ -63,7 +63,7 @@ Functions
``warnings.simplefilter('ignore', Image.DecompressionBombWarning)``. See also
`the logging documentation`_ to have warnings output to the logging facility instead of stderr.
If the number of pixels is greater than twice :py:data:`PIL.Image.MAX_IMAGE_PIXELS`, then a
If the number of pixels is greater than twice :py:data:`MAX_IMAGE_PIXELS`, then a
``DecompressionBombError`` will be raised instead.
.. _decompression bombs: https://en.wikipedia.org/wiki/Zip_bomb
@ -255,7 +255,7 @@ This rotates the input image by ``theta`` degrees counter clockwise:
.. automethod:: PIL.Image.Image.transform
.. automethod:: PIL.Image.Image.transpose
This flips the input image by using the :data:`PIL.Image.Transpose.FLIP_LEFT_RIGHT`
This flips the input image by using the :data:`Transpose.FLIP_LEFT_RIGHT`
method.
.. code-block:: python

View File

@ -34,7 +34,11 @@ project_urls =
Twitter=https://twitter.com/PythonPillow
[options]
packages = PIL
python_requires = >=3.7
include_package_data = True
package_dir =
= src
[options.extras_require]
docs =

View File

@ -999,9 +999,6 @@ try:
version=PILLOW_VERSION,
cmdclass={"build_ext": pil_build_ext},
ext_modules=ext_modules,
include_package_data=True,
packages=["PIL"],
package_dir={"": "src"},
zip_safe=not (debug_build() or PLATFORM_MINGW),
)
except RequiredDependencyException as err:

View File

@ -1989,18 +1989,14 @@ class Image:
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
:param resample: An optional resampling filter. This can be
one of :py:data:`PIL.Image.Resampling.NEAREST`,
:py:data:`PIL.Image.Resampling.BOX`,
:py:data:`PIL.Image.Resampling.BILINEAR`,
:py:data:`PIL.Image.Resampling.HAMMING`,
:py:data:`PIL.Image.Resampling.BICUBIC` or
:py:data:`PIL.Image.Resampling.LANCZOS`.
one of :py:data:`Resampling.NEAREST`, :py:data:`Resampling.BOX`,
:py:data:`Resampling.BILINEAR`, :py:data:`Resampling.HAMMING`,
:py:data:`Resampling.BICUBIC` or :py:data:`Resampling.LANCZOS`.
If the image has mode "1" or "P", it is always set to
:py:data:`PIL.Image.Resampling.NEAREST`.
If the image mode specifies a number of bits, such as "I;16", then the
default filter is :py:data:`PIL.Image.Resampling.NEAREST`.
Otherwise, the default filter is
:py:data:`PIL.Image.Resampling.BICUBIC`. See: :ref:`concept-filters`.
:py:data:`Resampling.NEAREST`. If the image mode specifies a number
of bits, such as "I;16", then the default filter is
:py:data:`Resampling.NEAREST`. Otherwise, the default filter is
:py:data:`Resampling.BICUBIC`. See: :ref:`concept-filters`.
:param box: An optional 4-tuple of floats providing
the source image region to be scaled.
The values must be within (0, 0, width, height) rectangle.
@ -2140,12 +2136,12 @@ class Image:
:param angle: In degrees counter clockwise.
:param resample: An optional resampling filter. This can be
one of :py:data:`PIL.Image.Resampling.NEAREST` (use nearest neighbour),
:py:data:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
environment), or :py:data:`PIL.Image.Resampling.BICUBIC`
(cubic spline interpolation in a 4x4 environment).
If omitted, or if the image has mode "1" or "P", it is
set to :py:data:`PIL.Image.Resampling.NEAREST`. See :ref:`concept-filters`.
one of :py:data:`Resampling.NEAREST` (use nearest neighbour),
:py:data:`Resampling.BILINEAR` (linear interpolation in a 2x2
environment), or :py:data:`Resampling.BICUBIC` (cubic spline
interpolation in a 4x4 environment). If omitted, or if the image has
mode "1" or "P", it is set to :py:data:`Resampling.NEAREST`.
See :ref:`concept-filters`.
:param expand: Optional expansion flag. If true, expands the output
image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the
@ -2452,14 +2448,11 @@ class Image:
:param size: Requested size.
:param resample: Optional resampling filter. This can be one
of :py:data:`PIL.Image.Resampling.NEAREST`,
:py:data:`PIL.Image.Resampling.BOX`,
:py:data:`PIL.Image.Resampling.BILINEAR`,
:py:data:`PIL.Image.Resampling.HAMMING`,
:py:data:`PIL.Image.Resampling.BICUBIC` or
:py:data:`PIL.Image.Resampling.LANCZOS`.
If omitted, it defaults to :py:data:`PIL.Image.Resampling.BICUBIC`.
(was :py:data:`PIL.Image.Resampling.NEAREST` prior to version 2.5.0).
of :py:data:`Resampling.NEAREST`, :py:data:`Resampling.BOX`,
:py:data:`Resampling.BILINEAR`, :py:data:`Resampling.HAMMING`,
:py:data:`Resampling.BICUBIC` or :py:data:`Resampling.LANCZOS`.
If omitted, it defaults to :py:data:`Resampling.BICUBIC`.
(was :py:data:`Resampling.NEAREST` prior to version 2.5.0).
See: :ref:`concept-filters`.
:param reducing_gap: Apply optimization by resizing the image
in two steps. First, reducing the image by integer times
@ -2478,29 +2471,41 @@ class Image:
:returns: None
"""
self.load()
x, y = map(math.floor, size)
if x >= self.width and y >= self.height:
return
provided_size = tuple(map(math.floor, size))
def round_aspect(number, key):
return max(min(math.floor(number), math.ceil(number), key=key), 1)
def preserve_aspect_ratio():
def round_aspect(number, key):
return max(min(math.floor(number), math.ceil(number), key=key), 1)
# preserve aspect ratio
aspect = self.width / self.height
if x / y >= aspect:
x = round_aspect(y * aspect, key=lambda n: abs(aspect - n / y))
else:
y = round_aspect(
x / aspect, key=lambda n: 0 if n == 0 else abs(aspect - x / n)
)
size = (x, y)
x, y = provided_size
if x >= self.width and y >= self.height:
return
aspect = self.width / self.height
if x / y >= aspect:
x = round_aspect(y * aspect, key=lambda n: abs(aspect - n / y))
else:
y = round_aspect(
x / aspect, key=lambda n: 0 if n == 0 else abs(aspect - x / n)
)
return x, y
box = None
if reducing_gap is not None:
size = preserve_aspect_ratio()
if size is None:
return
res = self.draft(None, (size[0] * reducing_gap, size[1] * reducing_gap))
if res is not None:
box = res[1]
if box is None:
self.load()
# load() may have changed the size of the image
size = preserve_aspect_ratio()
if size is None:
return
if self.size != size:
im = self.resize(size, resample, box=box, reducing_gap=reducing_gap)
@ -2530,11 +2535,11 @@ class Image:
:param size: The output size.
:param method: The transformation method. This is one of
:py:data:`PIL.Image.Transform.EXTENT` (cut out a rectangular subregion),
:py:data:`PIL.Image.Transform.AFFINE` (affine transform),
:py:data:`PIL.Image.Transform.PERSPECTIVE` (perspective transform),
:py:data:`PIL.Image.Transform.QUAD` (map a quadrilateral to a rectangle), or
:py:data:`PIL.Image.Transform.MESH` (map a number of source quadrilaterals
:py:data:`Transform.EXTENT` (cut out a rectangular subregion),
:py:data:`Transform.AFFINE` (affine transform),
:py:data:`Transform.PERSPECTIVE` (perspective transform),
:py:data:`Transform.QUAD` (map a quadrilateral to a rectangle), or
:py:data:`Transform.MESH` (map a number of source quadrilaterals
in one operation).
It may also be an :py:class:`~PIL.Image.ImageTransformHandler`
@ -2554,11 +2559,11 @@ class Image:
return method, data
:param data: Extra data to the transformation method.
:param resample: Optional resampling filter. It can be one of
:py:data:`PIL.Image.Resampling.NEAREST` (use nearest neighbour),
:py:data:`PIL.Image.Resampling.BILINEAR` (linear interpolation in a 2x2
environment), or :py:data:`PIL.Image.BICUBIC` (cubic spline
:py:data:`Resampling.NEAREST` (use nearest neighbour),
:py:data:`Resampling.BILINEAR` (linear interpolation in a 2x2
environment), or :py:data:`Resampling.BICUBIC` (cubic spline
interpolation in a 4x4 environment). If omitted, or if the image
has mode "1" or "P", it is set to :py:data:`PIL.Image.Resampling.NEAREST`.
has mode "1" or "P", it is set to :py:data:`Resampling.NEAREST`.
See: :ref:`concept-filters`.
:param fill: If ``method`` is an
:py:class:`~PIL.Image.ImageTransformHandler` object, this is one of
@ -2685,13 +2690,10 @@ class Image:
"""
Transpose image (flip or rotate in 90 degree steps)
:param method: One of :py:data:`PIL.Image.Transpose.FLIP_LEFT_RIGHT`,
:py:data:`PIL.Image.Transpose.FLIP_TOP_BOTTOM`,
:py:data:`PIL.Image.Transpose.ROTATE_90`,
:py:data:`PIL.Image.Transpose.ROTATE_180`,
:py:data:`PIL.Image.Transpose.ROTATE_270`,
:py:data:`PIL.Image.Transpose.TRANSPOSE` or
:py:data:`PIL.Image.Transpose.TRANSVERSE`.
:param method: One of :py:data:`Transpose.FLIP_LEFT_RIGHT`,
:py:data:`Transpose.FLIP_TOP_BOTTOM`, :py:data:`Transpose.ROTATE_90`,
:py:data:`Transpose.ROTATE_180`, :py:data:`Transpose.ROTATE_270`,
:py:data:`Transpose.TRANSPOSE` or :py:data:`Transpose.TRANSVERSE`.
:returns: Returns a flipped or rotated copy of this image.
"""