Revert replacing and deprecating constants with enums

This commit is contained in:
Hugo van Kemenade 2022-10-24 11:44:02 +03:00
parent b6e0b668b9
commit ff4ad1b31c
52 changed files with 693 additions and 1111 deletions

View File

@ -44,93 +44,83 @@ class TestColorLut3DCoreAPI:
with pytest.raises(ValueError, match="filter"): with pytest.raises(ValueError, match="filter"):
im.im.color_lut_3d( im.im.color_lut_3d(
"RGB", Image.Resampling.BICUBIC, *self.generate_identity_table(3, 3) "RGB", Image.BICUBIC, *self.generate_identity_table(3, 3)
) )
with pytest.raises(ValueError, match="image mode"): with pytest.raises(ValueError, match="image mode"):
im.im.color_lut_3d( im.im.color_lut_3d(
"wrong", Image.Resampling.BILINEAR, *self.generate_identity_table(3, 3) "wrong", Image.BILINEAR, *self.generate_identity_table(3, 3)
) )
with pytest.raises(ValueError, match="table_channels"): with pytest.raises(ValueError, match="table_channels"):
im.im.color_lut_3d( im.im.color_lut_3d(
"RGB", Image.Resampling.BILINEAR, *self.generate_identity_table(5, 3) "RGB", Image.BILINEAR, *self.generate_identity_table(5, 3)
) )
with pytest.raises(ValueError, match="table_channels"): with pytest.raises(ValueError, match="table_channels"):
im.im.color_lut_3d( im.im.color_lut_3d(
"RGB", Image.Resampling.BILINEAR, *self.generate_identity_table(1, 3) "RGB", Image.BILINEAR, *self.generate_identity_table(1, 3)
) )
with pytest.raises(ValueError, match="table_channels"): with pytest.raises(ValueError, match="table_channels"):
im.im.color_lut_3d( im.im.color_lut_3d(
"RGB", Image.Resampling.BILINEAR, *self.generate_identity_table(2, 3) "RGB", Image.BILINEAR, *self.generate_identity_table(2, 3)
) )
with pytest.raises(ValueError, match="Table size"): with pytest.raises(ValueError, match="Table size"):
im.im.color_lut_3d( im.im.color_lut_3d(
"RGB", "RGB",
Image.Resampling.BILINEAR, Image.BILINEAR,
*self.generate_identity_table(3, (1, 3, 3)), *self.generate_identity_table(3, (1, 3, 3)),
) )
with pytest.raises(ValueError, match="Table size"): with pytest.raises(ValueError, match="Table size"):
im.im.color_lut_3d( im.im.color_lut_3d(
"RGB", "RGB",
Image.Resampling.BILINEAR, Image.BILINEAR,
*self.generate_identity_table(3, (66, 3, 3)), *self.generate_identity_table(3, (66, 3, 3)),
) )
with pytest.raises(ValueError, match=r"size1D \* size2D \* size3D"): with pytest.raises(ValueError, match=r"size1D \* size2D \* size3D"):
im.im.color_lut_3d( im.im.color_lut_3d("RGB", Image.BILINEAR, 3, 2, 2, 2, [0, 0, 0] * 7)
"RGB", Image.Resampling.BILINEAR, 3, 2, 2, 2, [0, 0, 0] * 7
)
with pytest.raises(ValueError, match=r"size1D \* size2D \* size3D"): with pytest.raises(ValueError, match=r"size1D \* size2D \* size3D"):
im.im.color_lut_3d( im.im.color_lut_3d("RGB", Image.BILINEAR, 3, 2, 2, 2, [0, 0, 0] * 9)
"RGB", Image.Resampling.BILINEAR, 3, 2, 2, 2, [0, 0, 0] * 9
)
with pytest.raises(TypeError): with pytest.raises(TypeError):
im.im.color_lut_3d( im.im.color_lut_3d("RGB", Image.BILINEAR, 3, 2, 2, 2, [0, 0, "0"] * 8)
"RGB", Image.Resampling.BILINEAR, 3, 2, 2, 2, [0, 0, "0"] * 8
)
with pytest.raises(TypeError): with pytest.raises(TypeError):
im.im.color_lut_3d("RGB", Image.Resampling.BILINEAR, 3, 2, 2, 2, 16) im.im.color_lut_3d("RGB", Image.BILINEAR, 3, 2, 2, 2, 16)
def test_correct_args(self): def test_correct_args(self):
im = Image.new("RGB", (10, 10), 0) im = Image.new("RGB", (10, 10), 0)
im.im.color_lut_3d( im.im.color_lut_3d("RGB", Image.BILINEAR, *self.generate_identity_table(3, 3))
"RGB", Image.Resampling.BILINEAR, *self.generate_identity_table(3, 3)
)
im.im.color_lut_3d( im.im.color_lut_3d("CMYK", Image.BILINEAR, *self.generate_identity_table(4, 3))
"CMYK", Image.Resampling.BILINEAR, *self.generate_identity_table(4, 3)
)
im.im.color_lut_3d( im.im.color_lut_3d(
"RGB", "RGB",
Image.Resampling.BILINEAR, Image.BILINEAR,
*self.generate_identity_table(3, (2, 3, 3)), *self.generate_identity_table(3, (2, 3, 3)),
) )
im.im.color_lut_3d( im.im.color_lut_3d(
"RGB", "RGB",
Image.Resampling.BILINEAR, Image.BILINEAR,
*self.generate_identity_table(3, (65, 3, 3)), *self.generate_identity_table(3, (65, 3, 3)),
) )
im.im.color_lut_3d( im.im.color_lut_3d(
"RGB", "RGB",
Image.Resampling.BILINEAR, Image.BILINEAR,
*self.generate_identity_table(3, (3, 65, 3)), *self.generate_identity_table(3, (3, 65, 3)),
) )
im.im.color_lut_3d( im.im.color_lut_3d(
"RGB", "RGB",
Image.Resampling.BILINEAR, Image.BILINEAR,
*self.generate_identity_table(3, (3, 3, 65)), *self.generate_identity_table(3, (3, 3, 65)),
) )
@ -138,53 +128,41 @@ class TestColorLut3DCoreAPI:
with pytest.raises(ValueError, match="wrong mode"): with pytest.raises(ValueError, match="wrong mode"):
im = Image.new("L", (10, 10), 0) im = Image.new("L", (10, 10), 0)
im.im.color_lut_3d( im.im.color_lut_3d(
"RGB", Image.Resampling.BILINEAR, *self.generate_identity_table(3, 3) "RGB", Image.BILINEAR, *self.generate_identity_table(3, 3)
) )
with pytest.raises(ValueError, match="wrong mode"): with pytest.raises(ValueError, match="wrong mode"):
im = Image.new("RGB", (10, 10), 0) im = Image.new("RGB", (10, 10), 0)
im.im.color_lut_3d( im.im.color_lut_3d("L", Image.BILINEAR, *self.generate_identity_table(3, 3))
"L", Image.Resampling.BILINEAR, *self.generate_identity_table(3, 3)
)
with pytest.raises(ValueError, match="wrong mode"): with pytest.raises(ValueError, match="wrong mode"):
im = Image.new("L", (10, 10), 0) im = Image.new("L", (10, 10), 0)
im.im.color_lut_3d("L", Image.BILINEAR, *self.generate_identity_table(3, 3))
with pytest.raises(ValueError, match="wrong mode"):
im = Image.new("RGB", (10, 10), 0)
im.im.color_lut_3d( im.im.color_lut_3d(
"L", Image.Resampling.BILINEAR, *self.generate_identity_table(3, 3) "RGBA", Image.BILINEAR, *self.generate_identity_table(3, 3)
) )
with pytest.raises(ValueError, match="wrong mode"): with pytest.raises(ValueError, match="wrong mode"):
im = Image.new("RGB", (10, 10), 0) im = Image.new("RGB", (10, 10), 0)
im.im.color_lut_3d( im.im.color_lut_3d(
"RGBA", Image.Resampling.BILINEAR, *self.generate_identity_table(3, 3) "RGB", Image.BILINEAR, *self.generate_identity_table(4, 3)
)
with pytest.raises(ValueError, match="wrong mode"):
im = Image.new("RGB", (10, 10), 0)
im.im.color_lut_3d(
"RGB", Image.Resampling.BILINEAR, *self.generate_identity_table(4, 3)
) )
def test_correct_mode(self): def test_correct_mode(self):
im = Image.new("RGBA", (10, 10), 0) im = Image.new("RGBA", (10, 10), 0)
im.im.color_lut_3d( im.im.color_lut_3d("RGBA", Image.BILINEAR, *self.generate_identity_table(3, 3))
"RGBA", Image.Resampling.BILINEAR, *self.generate_identity_table(3, 3)
)
im = Image.new("RGBA", (10, 10), 0) im = Image.new("RGBA", (10, 10), 0)
im.im.color_lut_3d( im.im.color_lut_3d("RGBA", Image.BILINEAR, *self.generate_identity_table(4, 3))
"RGBA", Image.Resampling.BILINEAR, *self.generate_identity_table(4, 3)
)
im = Image.new("RGB", (10, 10), 0) im = Image.new("RGB", (10, 10), 0)
im.im.color_lut_3d( im.im.color_lut_3d("HSV", Image.BILINEAR, *self.generate_identity_table(3, 3))
"HSV", Image.Resampling.BILINEAR, *self.generate_identity_table(3, 3)
)
im = Image.new("RGB", (10, 10), 0) im = Image.new("RGB", (10, 10), 0)
im.im.color_lut_3d( im.im.color_lut_3d("RGBA", Image.BILINEAR, *self.generate_identity_table(4, 3))
"RGBA", Image.Resampling.BILINEAR, *self.generate_identity_table(4, 3)
)
def test_identities(self): def test_identities(self):
g = Image.linear_gradient("L") g = Image.linear_gradient("L")
@ -192,8 +170,8 @@ class TestColorLut3DCoreAPI:
"RGB", "RGB",
[ [
g, g,
g.transpose(Image.Transpose.ROTATE_90), g.transpose(Image.ROTATE_90),
g.transpose(Image.Transpose.ROTATE_180), g.transpose(Image.ROTATE_180),
], ],
) )
@ -204,7 +182,7 @@ class TestColorLut3DCoreAPI:
im._new( im._new(
im.im.color_lut_3d( im.im.color_lut_3d(
"RGB", "RGB",
Image.Resampling.BILINEAR, Image.BILINEAR,
*self.generate_identity_table(3, size), *self.generate_identity_table(3, size),
) )
), ),
@ -216,7 +194,7 @@ class TestColorLut3DCoreAPI:
im._new( im._new(
im.im.color_lut_3d( im.im.color_lut_3d(
"RGB", "RGB",
Image.Resampling.BILINEAR, Image.BILINEAR,
*self.generate_identity_table(3, (2, 2, 65)), *self.generate_identity_table(3, (2, 2, 65)),
) )
), ),
@ -228,8 +206,8 @@ class TestColorLut3DCoreAPI:
"RGB", "RGB",
[ [
g, g,
g.transpose(Image.Transpose.ROTATE_90), g.transpose(Image.ROTATE_90),
g.transpose(Image.Transpose.ROTATE_180), g.transpose(Image.ROTATE_180),
], ],
) )
@ -239,7 +217,7 @@ class TestColorLut3DCoreAPI:
im._new( im._new(
im.im.color_lut_3d( im.im.color_lut_3d(
"RGBA", "RGBA",
Image.Resampling.BILINEAR, Image.BILINEAR,
*self.generate_identity_table(4, 17), *self.generate_identity_table(4, 17),
) )
), ),
@ -251,9 +229,9 @@ class TestColorLut3DCoreAPI:
"RGBA", "RGBA",
[ [
g, g,
g.transpose(Image.Transpose.ROTATE_90), g.transpose(Image.ROTATE_90),
g.transpose(Image.Transpose.ROTATE_180), g.transpose(Image.ROTATE_180),
g.transpose(Image.Transpose.ROTATE_270), g.transpose(Image.ROTATE_270),
], ],
) )
@ -262,7 +240,7 @@ class TestColorLut3DCoreAPI:
im._new( im._new(
im.im.color_lut_3d( im.im.color_lut_3d(
"RGBA", "RGBA",
Image.Resampling.BILINEAR, Image.BILINEAR,
*self.generate_identity_table(3, 17), *self.generate_identity_table(3, 17),
) )
), ),
@ -274,8 +252,8 @@ class TestColorLut3DCoreAPI:
"RGB", "RGB",
[ [
g, g,
g.transpose(Image.Transpose.ROTATE_90), g.transpose(Image.ROTATE_90),
g.transpose(Image.Transpose.ROTATE_180), g.transpose(Image.ROTATE_180),
], ],
) )
@ -283,7 +261,7 @@ class TestColorLut3DCoreAPI:
# fmt: off # fmt: off
assert_image_equal( assert_image_equal(
Image.merge('RGB', im.split()[::-1]), Image.merge('RGB', im.split()[::-1]),
im._new(im.im.color_lut_3d('RGB', Image.Resampling.BILINEAR, im._new(im.im.color_lut_3d('RGB', Image.BILINEAR,
3, 2, 2, 2, [ 3, 2, 2, 2, [
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1,
0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1,
@ -299,13 +277,13 @@ class TestColorLut3DCoreAPI:
"RGB", "RGB",
[ [
g, g,
g.transpose(Image.Transpose.ROTATE_90), g.transpose(Image.ROTATE_90),
g.transpose(Image.Transpose.ROTATE_180), g.transpose(Image.ROTATE_180),
], ],
) )
# fmt: off # fmt: off
transformed = im._new(im.im.color_lut_3d('RGB', Image.Resampling.BILINEAR, transformed = im._new(im.im.color_lut_3d('RGB', Image.BILINEAR,
3, 2, 2, 2, 3, 2, 2, 2,
[ [
-1, -1, -1, 2, -1, -1, -1, -1, -1, 2, -1, -1,
@ -325,7 +303,7 @@ class TestColorLut3DCoreAPI:
assert transformed[205, 205] == (255, 255, 0) assert transformed[205, 205] == (255, 255, 0)
# fmt: off # fmt: off
transformed = im._new(im.im.color_lut_3d('RGB', Image.Resampling.BILINEAR, transformed = im._new(im.im.color_lut_3d('RGB', Image.BILINEAR,
3, 2, 2, 2, 3, 2, 2, 2,
[ [
-3, -3, -3, 5, -3, -3, -3, -3, -3, 5, -3, -3,
@ -431,8 +409,8 @@ class TestColorLut3DFilter:
"RGB", "RGB",
[ [
g, g,
g.transpose(Image.Transpose.ROTATE_90), g.transpose(Image.ROTATE_90),
g.transpose(Image.Transpose.ROTATE_180), g.transpose(Image.ROTATE_180),
], ],
) )
@ -527,8 +505,8 @@ class TestGenerateColorLut3D:
"RGB", "RGB",
[ [
g, g,
g.transpose(Image.Transpose.ROTATE_90), g.transpose(Image.ROTATE_90),
g.transpose(Image.Transpose.ROTATE_180), g.transpose(Image.ROTATE_180),
], ],
) )
assert im == im.filter(lut) assert im == im.filter(lut)

View File

@ -119,9 +119,9 @@ def test_apng_dispose_op_previous_frame():
# save_all=True, # save_all=True,
# append_images=[green, blue], # append_images=[green, blue],
# disposal=[ # disposal=[
# PngImagePlugin.Disposal.OP_NONE, # PngImagePlugin.APNG_BLEND_OP_NONE,
# PngImagePlugin.Disposal.OP_PREVIOUS, # PngImagePlugin.APNG_BLEND_OP_PREVIOUS,
# PngImagePlugin.Disposal.OP_PREVIOUS # PngImagePlugin.APNG_BLEND_OP_PREVIOUS
# ], # ],
# ) # )
with Image.open("Tests/images/apng/dispose_op_previous_frame.png") as im: with Image.open("Tests/images/apng/dispose_op_previous_frame.png") as im:
@ -461,8 +461,8 @@ def test_apng_save_disposal(tmp_path):
test_file, test_file,
save_all=True, save_all=True,
append_images=[green, transparent], append_images=[green, transparent],
disposal=PngImagePlugin.Disposal.OP_NONE, disposal=PngImagePlugin.APNG_DISPOSE_OP_NONE,
blend=PngImagePlugin.Blend.OP_OVER, blend=PngImagePlugin.APNG_BLEND_OP_OVER,
) )
with Image.open(test_file) as im: with Image.open(test_file) as im:
im.seek(2) im.seek(2)
@ -471,16 +471,16 @@ def test_apng_save_disposal(tmp_path):
# test OP_BACKGROUND # test OP_BACKGROUND
disposal = [ disposal = [
PngImagePlugin.Disposal.OP_NONE, PngImagePlugin.APNG_DISPOSE_OP_NONE,
PngImagePlugin.Disposal.OP_BACKGROUND, PngImagePlugin.APNG_DISPOSE_OP_BACKGROUND,
PngImagePlugin.Disposal.OP_NONE, PngImagePlugin.APNG_DISPOSE_OP_NONE,
] ]
red.save( red.save(
test_file, test_file,
save_all=True, save_all=True,
append_images=[red, transparent], append_images=[red, transparent],
disposal=disposal, disposal=disposal,
blend=PngImagePlugin.Blend.OP_OVER, blend=PngImagePlugin.APNG_BLEND_OP_OVER,
) )
with Image.open(test_file) as im: with Image.open(test_file) as im:
im.seek(2) im.seek(2)
@ -488,15 +488,15 @@ def test_apng_save_disposal(tmp_path):
assert im.getpixel((64, 32)) == (0, 0, 0, 0) assert im.getpixel((64, 32)) == (0, 0, 0, 0)
disposal = [ disposal = [
PngImagePlugin.Disposal.OP_NONE, PngImagePlugin.APNG_DISPOSE_OP_NONE,
PngImagePlugin.Disposal.OP_BACKGROUND, PngImagePlugin.APNG_DISPOSE_OP_BACKGROUND,
] ]
red.save( red.save(
test_file, test_file,
save_all=True, save_all=True,
append_images=[green], append_images=[green],
disposal=disposal, disposal=disposal,
blend=PngImagePlugin.Blend.OP_OVER, blend=PngImagePlugin.APNG_BLEND_OP_OVER,
) )
with Image.open(test_file) as im: with Image.open(test_file) as im:
im.seek(1) im.seek(1)
@ -505,9 +505,9 @@ def test_apng_save_disposal(tmp_path):
# test OP_PREVIOUS # test OP_PREVIOUS
disposal = [ disposal = [
PngImagePlugin.Disposal.OP_NONE, PngImagePlugin.APNG_DISPOSE_OP_NONE,
PngImagePlugin.Disposal.OP_PREVIOUS, PngImagePlugin.APNG_DISPOSE_OP_PREVIOUS,
PngImagePlugin.Disposal.OP_NONE, PngImagePlugin.APNG_DISPOSE_OP_NONE,
] ]
red.save( red.save(
test_file, test_file,
@ -515,7 +515,7 @@ def test_apng_save_disposal(tmp_path):
append_images=[green, red, transparent], append_images=[green, red, transparent],
default_image=True, default_image=True,
disposal=disposal, disposal=disposal,
blend=PngImagePlugin.Blend.OP_OVER, blend=PngImagePlugin.APNG_BLEND_OP_OVER,
) )
with Image.open(test_file) as im: with Image.open(test_file) as im:
im.seek(3) im.seek(3)
@ -523,15 +523,15 @@ def test_apng_save_disposal(tmp_path):
assert im.getpixel((64, 32)) == (0, 255, 0, 255) assert im.getpixel((64, 32)) == (0, 255, 0, 255)
disposal = [ disposal = [
PngImagePlugin.Disposal.OP_NONE, PngImagePlugin.APNG_DISPOSE_OP_NONE,
PngImagePlugin.Disposal.OP_PREVIOUS, PngImagePlugin.APNG_DISPOSE_OP_PREVIOUS,
] ]
red.save( red.save(
test_file, test_file,
save_all=True, save_all=True,
append_images=[green], append_images=[green],
disposal=disposal, disposal=disposal,
blend=PngImagePlugin.Blend.OP_OVER, blend=PngImagePlugin.APNG_BLEND_OP_OVER,
) )
with Image.open(test_file) as im: with Image.open(test_file) as im:
im.seek(1) im.seek(1)
@ -539,7 +539,7 @@ def test_apng_save_disposal(tmp_path):
assert im.getpixel((64, 32)) == (0, 255, 0, 255) assert im.getpixel((64, 32)) == (0, 255, 0, 255)
# test info disposal # test info disposal
red.info["disposal"] = PngImagePlugin.Disposal.OP_BACKGROUND red.info["disposal"] = PngImagePlugin.APNG_DISPOSE_OP_BACKGROUND
red.save( red.save(
test_file, test_file,
save_all=True, save_all=True,
@ -562,7 +562,7 @@ def test_apng_save_disposal_previous(tmp_path):
test_file, test_file,
save_all=True, save_all=True,
append_images=[red, green], append_images=[red, green],
disposal=PngImagePlugin.Disposal.OP_PREVIOUS, disposal=PngImagePlugin.APNG_DISPOSE_OP_PREVIOUS,
) )
with Image.open(test_file) as im: with Image.open(test_file) as im:
assert im.getpixel((0, 0)) == (0, 0, 255, 255) assert im.getpixel((0, 0)) == (0, 0, 255, 255)
@ -581,15 +581,15 @@ def test_apng_save_blend(tmp_path):
# test OP_SOURCE on solid color # test OP_SOURCE on solid color
blend = [ blend = [
PngImagePlugin.Blend.OP_OVER, PngImagePlugin.APNG_BLEND_OP_OVER,
PngImagePlugin.Blend.OP_SOURCE, PngImagePlugin.APNG_BLEND_OP_SOURCE,
] ]
red.save( red.save(
test_file, test_file,
save_all=True, save_all=True,
append_images=[red, green], append_images=[red, green],
default_image=True, default_image=True,
disposal=PngImagePlugin.Disposal.OP_NONE, disposal=PngImagePlugin.APNG_DISPOSE_OP_NONE,
blend=blend, blend=blend,
) )
with Image.open(test_file) as im: with Image.open(test_file) as im:
@ -599,15 +599,15 @@ def test_apng_save_blend(tmp_path):
# test OP_SOURCE on transparent color # test OP_SOURCE on transparent color
blend = [ blend = [
PngImagePlugin.Blend.OP_OVER, PngImagePlugin.APNG_BLEND_OP_OVER,
PngImagePlugin.Blend.OP_SOURCE, PngImagePlugin.APNG_BLEND_OP_SOURCE,
] ]
red.save( red.save(
test_file, test_file,
save_all=True, save_all=True,
append_images=[red, transparent], append_images=[red, transparent],
default_image=True, default_image=True,
disposal=PngImagePlugin.Disposal.OP_NONE, disposal=PngImagePlugin.APNG_DISPOSE_OP_NONE,
blend=blend, blend=blend,
) )
with Image.open(test_file) as im: with Image.open(test_file) as im:
@ -621,8 +621,8 @@ def test_apng_save_blend(tmp_path):
save_all=True, save_all=True,
append_images=[green, transparent], append_images=[green, transparent],
default_image=True, default_image=True,
disposal=PngImagePlugin.Disposal.OP_NONE, disposal=PngImagePlugin.APNG_DISPOSE_OP_NONE,
blend=PngImagePlugin.Blend.OP_OVER, blend=PngImagePlugin.APNG_BLEND_OP_OVER,
) )
with Image.open(test_file) as im: with Image.open(test_file) as im:
im.seek(1) im.seek(1)
@ -633,7 +633,7 @@ def test_apng_save_blend(tmp_path):
assert im.getpixel((64, 32)) == (0, 255, 0, 255) assert im.getpixel((64, 32)) == (0, 255, 0, 255)
# test info blend # test info blend
red.info["blend"] = PngImagePlugin.Blend.OP_OVER red.info["blend"] = PngImagePlugin.APNG_BLEND_OP_OVER
red.save(test_file, save_all=True, append_images=[green, transparent]) red.save(test_file, save_all=True, append_images=[green, transparent])
with Image.open(test_file) as im: with Image.open(test_file) as im:
im.seek(2) im.seek(2)
@ -657,13 +657,3 @@ def test_different_modes_in_later_frames(mode, tmp_path):
im.save(test_file, save_all=True, append_images=[Image.new(mode, (1, 1))]) im.save(test_file, save_all=True, append_images=[Image.new(mode, (1, 1))])
with Image.open(test_file) as reloaded: with Image.open(test_file) as reloaded:
assert reloaded.mode == mode assert reloaded.mode == mode
def test_constants_deprecation():
for enum, prefix in {
PngImagePlugin.Disposal: "APNG_DISPOSE_",
PngImagePlugin.Blend: "APNG_BLEND_",
}.items():
for name in enum.__members__:
with pytest.warns(DeprecationWarning):
assert getattr(PngImagePlugin, prefix + name) == enum[name]

View File

@ -1,6 +1,6 @@
import pytest import pytest
from PIL import BlpImagePlugin, Image from PIL import Image
from .helper import ( from .helper import (
assert_image_equal, assert_image_equal,
@ -72,14 +72,3 @@ def test_crashes(test_file):
with Image.open(f) as im: with Image.open(f) as im:
with pytest.raises(OSError): with pytest.raises(OSError):
im.load() im.load()
def test_constants_deprecation():
for enum, prefix in {
BlpImagePlugin.Format: "BLP_FORMAT_",
BlpImagePlugin.Encoding: "BLP_ENCODING_",
BlpImagePlugin.AlphaEncoding: "BLP_ALPHA_ENCODING_",
}.items():
for name in enum.__members__:
with pytest.warns(DeprecationWarning):
assert getattr(BlpImagePlugin, prefix + name) == enum[name]

View File

@ -21,12 +21,3 @@ def test_invalid_file():
with pytest.raises(SyntaxError): with pytest.raises(SyntaxError):
FtexImagePlugin.FtexImageFile(invalid_file) FtexImagePlugin.FtexImageFile(invalid_file)
def test_constants_deprecation():
for enum, prefix in {
FtexImagePlugin.Format: "FORMAT_",
}.items():
for name in enum.__members__:
with pytest.warns(DeprecationWarning):
assert getattr(FtexImagePlugin, prefix + name) == enum[name]

View File

@ -298,8 +298,8 @@ def test_palette_handling(tmp_path):
with Image.open(TEST_GIF) as im: with Image.open(TEST_GIF) as im:
im = im.convert("RGB") im = im.convert("RGB")
im = im.resize((100, 100), Image.Resampling.LANCZOS) im = im.resize((100, 100), Image.LANCZOS)
im2 = im.convert("P", palette=Image.Palette.ADAPTIVE, colors=256) im2 = im.convert("P", palette=Image.ADAPTIVE, colors=256)
f = str(tmp_path / "temp.gif") f = str(tmp_path / "temp.gif")
im2.save(f, optimize=True) im2.save(f, optimize=True)
@ -1175,7 +1175,7 @@ def test_save_I(tmp_path):
def test_getdata(): def test_getdata():
# Test getheader/getdata against legacy values. # Test getheader/getdata against legacy values.
# Create a 'P' image with holes in the palette. # Create a 'P' image with holes in the palette.
im = Image._wedge().resize((16, 16), Image.Resampling.NEAREST) im = Image._wedge().resize((16, 16), Image.NEAREST)
im.putpalette(ImagePalette.ImagePalette("RGB")) im.putpalette(ImagePalette.ImagePalette("RGB"))
im.info = {"background": 0} im.info = {"background": 0}

View File

@ -54,9 +54,7 @@ def test_save_to_bytes():
assert im.mode == reloaded.mode assert im.mode == reloaded.mode
assert (64, 64) == reloaded.size assert (64, 64) == reloaded.size
assert reloaded.format == "ICO" assert reloaded.format == "ICO"
assert_image_equal( assert_image_equal(reloaded, hopper().resize((64, 64), Image.LANCZOS))
reloaded, hopper().resize((64, 64), Image.Resampling.LANCZOS)
)
# The other one # The other one
output.seek(0) output.seek(0)
@ -66,9 +64,7 @@ def test_save_to_bytes():
assert im.mode == reloaded.mode assert im.mode == reloaded.mode
assert (32, 32) == reloaded.size assert (32, 32) == reloaded.size
assert reloaded.format == "ICO" assert reloaded.format == "ICO"
assert_image_equal( assert_image_equal(reloaded, hopper().resize((32, 32), Image.LANCZOS))
reloaded, hopper().resize((32, 32), Image.Resampling.LANCZOS)
)
def test_no_duplicates(tmp_path): def test_no_duplicates(tmp_path):
@ -132,7 +128,7 @@ def test_save_to_bytes_bmp(mode):
assert "RGBA" == reloaded.mode assert "RGBA" == reloaded.mode
assert (64, 64) == reloaded.size assert (64, 64) == reloaded.size
assert reloaded.format == "ICO" assert reloaded.format == "ICO"
im = hopper(mode).resize((64, 64), Image.Resampling.LANCZOS).convert("RGBA") im = hopper(mode).resize((64, 64), Image.LANCZOS).convert("RGBA")
assert_image_equal(reloaded, im) assert_image_equal(reloaded, im)
# The other one # The other one
@ -143,7 +139,7 @@ def test_save_to_bytes_bmp(mode):
assert "RGBA" == reloaded.mode assert "RGBA" == reloaded.mode
assert (32, 32) == reloaded.size assert (32, 32) == reloaded.size
assert reloaded.format == "ICO" assert reloaded.format == "ICO"
im = hopper(mode).resize((32, 32), Image.Resampling.LANCZOS).convert("RGBA") im = hopper(mode).resize((32, 32), Image.LANCZOS).convert("RGBA")
assert_image_equal(reloaded, im) assert_image_equal(reloaded, im)

View File

@ -282,7 +282,7 @@ class TestFileJpeg:
del exif[0x8769] del exif[0x8769]
# Assert that it needs to be transposed # Assert that it needs to be transposed
assert exif[0x0112] == Image.Transpose.TRANSVERSE assert exif[0x0112] == Image.TRANSVERSE
# Assert that the GPS IFD is present and empty # Assert that the GPS IFD is present and empty
assert exif.get_ifd(0x8825) == {} assert exif.get_ifd(0x8825) == {}

View File

@ -336,7 +336,7 @@ def test_subsampling_decode(name):
# RGB reference images are downscaled # RGB reference images are downscaled
epsilon = 3e-3 epsilon = 3e-3
width, height = width * 2, height * 2 width, height = width * 2, height * 2
expected = im2.resize((width, height), Image.Resampling.NEAREST) expected = im2.resize((width, height), Image.NEAREST)
assert_image_similar(im, expected, epsilon) assert_image_similar(im, expected, epsilon)

View File

@ -111,7 +111,7 @@ class TestFileLibTiff(LibTiffTestCase):
test_file = "Tests/images/hopper_g4_500.tif" test_file = "Tests/images/hopper_g4_500.tif"
with Image.open(test_file) as orig: with Image.open(test_file) as orig:
out = str(tmp_path / "temp.tif") out = str(tmp_path / "temp.tif")
rot = orig.transpose(Image.Transpose.ROTATE_90) rot = orig.transpose(Image.ROTATE_90)
assert rot.size == (500, 500) assert rot.size == (500, 500)
rot.save(out) rot.save(out)

View File

@ -77,7 +77,7 @@ def to_rgb_colorsys(im):
def test_wedge(): def test_wedge():
src = wedge().resize((3 * 32, 32), Image.Resampling.BILINEAR) src = wedge().resize((3 * 32, 32), Image.BILINEAR)
im = src.convert("HSV") im = src.convert("HSV")
comparable = to_hsv_colorsys(src) comparable = to_hsv_colorsys(src)

View File

@ -883,31 +883,6 @@ class TestImage:
with pytest.warns(DeprecationWarning): with pytest.warns(DeprecationWarning):
assert Image.CONTAINER == 2 assert Image.CONTAINER == 2
def test_constants_deprecation(self):
with pytest.warns(DeprecationWarning):
assert Image.NEAREST == 0
with pytest.warns(DeprecationWarning):
assert Image.NONE == 0
with pytest.warns(DeprecationWarning):
assert Image.LINEAR == Image.Resampling.BILINEAR
with pytest.warns(DeprecationWarning):
assert Image.CUBIC == Image.Resampling.BICUBIC
with pytest.warns(DeprecationWarning):
assert Image.ANTIALIAS == Image.Resampling.LANCZOS
for enum in (
Image.Transpose,
Image.Transform,
Image.Resampling,
Image.Dither,
Image.Palette,
Image.Quantize,
):
for name in enum.__members__:
with pytest.warns(DeprecationWarning):
assert getattr(Image, name) == enum[name]
@pytest.mark.parametrize( @pytest.mark.parametrize(
"path", "path",
[ [

View File

@ -158,7 +158,7 @@ def test_trns_l(tmp_path):
assert "transparency" in im_p.info assert "transparency" in im_p.info
im_p.save(f) im_p.save(f)
im_p = im.convert("P", palette=Image.Palette.ADAPTIVE) im_p = im.convert("P", palette=Image.ADAPTIVE)
assert "transparency" in im_p.info assert "transparency" in im_p.info
im_p.save(f) im_p.save(f)
@ -181,13 +181,13 @@ def test_trns_RGB(tmp_path):
assert "transparency" not in im_rgba.info assert "transparency" not in im_rgba.info
im_rgba.save(f) im_rgba.save(f)
im_p = pytest.warns(UserWarning, im.convert, "P", palette=Image.Palette.ADAPTIVE) im_p = pytest.warns(UserWarning, im.convert, "P", palette=Image.ADAPTIVE)
assert "transparency" not in im_p.info assert "transparency" not in im_p.info
im_p.save(f) im_p.save(f)
im = Image.new("RGB", (1, 1)) im = Image.new("RGB", (1, 1))
im.info["transparency"] = im.getpixel((0, 0)) im.info["transparency"] = im.getpixel((0, 0))
im_p = im.convert("P", palette=Image.Palette.ADAPTIVE) im_p = im.convert("P", palette=Image.ADAPTIVE)
assert im_p.info["transparency"] == im_p.getpixel((0, 0)) assert im_p.info["transparency"] == im_p.getpixel((0, 0))
im_p.save(f) im_p.save(f)

View File

@ -14,7 +14,7 @@ def test_sanity():
def test_roundtrip(): def test_roundtrip():
def getdata(mode): def getdata(mode):
im = hopper(mode).resize((32, 30), Image.Resampling.NEAREST) im = hopper(mode).resize((32, 30), Image.NEAREST)
data = im.getdata() data = im.getdata()
return data[0], len(data), len(list(data)) return data[0], len(data), len(list(data))

View File

@ -47,7 +47,7 @@ class TestImagingPaste:
@CachedProperty @CachedProperty
def mask_L(self): def mask_L(self):
return self.gradient_L.transpose(Image.Transpose.ROTATE_270) return self.gradient_L.transpose(Image.ROTATE_270)
@CachedProperty @CachedProperty
def gradient_L(self): def gradient_L(self):
@ -64,8 +64,8 @@ class TestImagingPaste:
"RGB", "RGB",
[ [
self.gradient_L, self.gradient_L,
self.gradient_L.transpose(Image.Transpose.ROTATE_90), self.gradient_L.transpose(Image.ROTATE_90),
self.gradient_L.transpose(Image.Transpose.ROTATE_180), self.gradient_L.transpose(Image.ROTATE_180),
], ],
) )
@ -75,7 +75,7 @@ class TestImagingPaste:
"LA", "LA",
[ [
self.gradient_L, self.gradient_L,
self.gradient_L.transpose(Image.Transpose.ROTATE_90), self.gradient_L.transpose(Image.ROTATE_90),
], ],
) )
@ -85,9 +85,9 @@ class TestImagingPaste:
"RGBA", "RGBA",
[ [
self.gradient_L, self.gradient_L,
self.gradient_L.transpose(Image.Transpose.ROTATE_90), self.gradient_L.transpose(Image.ROTATE_90),
self.gradient_L.transpose(Image.Transpose.ROTATE_180), self.gradient_L.transpose(Image.ROTATE_180),
self.gradient_L.transpose(Image.Transpose.ROTATE_270), self.gradient_L.transpose(Image.ROTATE_270),
], ],
) )
@ -97,9 +97,9 @@ class TestImagingPaste:
"RGBa", "RGBa",
[ [
self.gradient_L, self.gradient_L,
self.gradient_L.transpose(Image.Transpose.ROTATE_90), self.gradient_L.transpose(Image.ROTATE_90),
self.gradient_L.transpose(Image.Transpose.ROTATE_180), self.gradient_L.transpose(Image.ROTATE_180),
self.gradient_L.transpose(Image.Transpose.ROTATE_270), self.gradient_L.transpose(Image.ROTATE_270),
], ],
) )

View File

@ -25,7 +25,7 @@ def test_libimagequant_quantize():
libimagequant = parse_version(features.version_feature("libimagequant")) libimagequant = parse_version(features.version_feature("libimagequant"))
if libimagequant < parse_version("4"): if libimagequant < parse_version("4"):
pytest.skip("Fails with libimagequant earlier than 4.0.0 on ppc64le") pytest.skip("Fails with libimagequant earlier than 4.0.0 on ppc64le")
converted = image.quantize(100, Image.Quantize.LIBIMAGEQUANT) converted = image.quantize(100, Image.LIBIMAGEQUANT)
assert converted.mode == "P" assert converted.mode == "P"
assert_image_similar(converted.convert("RGB"), image, 15) assert_image_similar(converted.convert("RGB"), image, 15)
assert len(converted.getcolors()) == 100 assert len(converted.getcolors()) == 100
@ -33,7 +33,7 @@ def test_libimagequant_quantize():
def test_octree_quantize(): def test_octree_quantize():
image = hopper() image = hopper()
converted = image.quantize(100, Image.Quantize.FASTOCTREE) converted = image.quantize(100, Image.FASTOCTREE)
assert converted.mode == "P" assert converted.mode == "P"
assert_image_similar(converted.convert("RGB"), image, 20) assert_image_similar(converted.convert("RGB"), image, 20)
assert len(converted.getcolors()) == 100 assert len(converted.getcolors()) == 100
@ -60,7 +60,7 @@ def test_quantize_no_dither():
with Image.open("Tests/images/caption_6_33_22.png") as palette: with Image.open("Tests/images/caption_6_33_22.png") as palette:
palette = palette.convert("P") palette = palette.convert("P")
converted = image.quantize(dither=Image.Dither.NONE, palette=palette) converted = image.quantize(dither=Image.NONE, palette=palette)
assert converted.mode == "P" assert converted.mode == "P"
assert converted.palette.palette == palette.palette.palette assert converted.palette.palette == palette.palette.palette
@ -72,7 +72,7 @@ def test_quantize_no_dither2():
palette = Image.new("P", (1, 1)) palette = Image.new("P", (1, 1))
data = (0, 0, 0, 32, 32, 32) data = (0, 0, 0, 32, 32, 32)
palette.putpalette(data) palette.putpalette(data)
quantized = im.quantize(dither=Image.Dither.NONE, palette=palette) quantized = im.quantize(dither=Image.NONE, palette=palette)
assert tuple(quantized.palette.palette) == data assert tuple(quantized.palette.palette) == data
@ -86,8 +86,8 @@ def test_quantize_dither_diff():
with Image.open("Tests/images/caption_6_33_22.png") as palette: with Image.open("Tests/images/caption_6_33_22.png") as palette:
palette = palette.convert("P") palette = palette.convert("P")
dither = image.quantize(dither=Image.Dither.FLOYDSTEINBERG, palette=palette) dither = image.quantize(dither=Image.FLOYDSTEINBERG, palette=palette)
nodither = image.quantize(dither=Image.Dither.NONE, palette=palette) nodither = image.quantize(dither=Image.NONE, palette=palette)
assert dither.tobytes() != nodither.tobytes() assert dither.tobytes() != nodither.tobytes()
@ -112,10 +112,10 @@ def test_transparent_colors_equal():
@pytest.mark.parametrize( @pytest.mark.parametrize(
"method, color", "method, color",
( (
(Image.Quantize.MEDIANCUT, (0, 0, 0)), (Image.MEDIANCUT, (0, 0, 0)),
(Image.Quantize.MAXCOVERAGE, (0, 0, 0)), (Image.MAXCOVERAGE, (0, 0, 0)),
(Image.Quantize.FASTOCTREE, (0, 0, 0)), (Image.FASTOCTREE, (0, 0, 0)),
(Image.Quantize.FASTOCTREE, (0, 0, 0, 0)), (Image.FASTOCTREE, (0, 0, 0, 0)),
), ),
) )
def test_palette(method, color): def test_palette(method, color):

View File

@ -103,7 +103,7 @@ def get_image(mode):
bands = [gradients_image] bands = [gradients_image]
for _ in mode_info.bands[1:]: for _ in mode_info.bands[1:]:
# rotate previous image # rotate previous image
band = bands[-1].transpose(Image.Transpose.ROTATE_90) band = bands[-1].transpose(Image.ROTATE_90)
bands.append(band) bands.append(band)
# Correct alpha channel by transforming completely transparent pixels. # Correct alpha channel by transforming completely transparent pixels.
# Low alpha values also emphasize error after alpha multiplication. # Low alpha values also emphasize error after alpha multiplication.
@ -144,26 +144,24 @@ def compare_reduce_with_reference(im, factor, average_diff=0.4, max_diff=1):
reference = Image.new(im.mode, reduced.size) reference = Image.new(im.mode, reduced.size)
area_size = (im.size[0] // factor[0], im.size[1] // factor[1]) area_size = (im.size[0] // factor[0], im.size[1] // factor[1])
area_box = (0, 0, area_size[0] * factor[0], area_size[1] * factor[1]) area_box = (0, 0, area_size[0] * factor[0], area_size[1] * factor[1])
area = im.resize(area_size, Image.Resampling.BOX, area_box) area = im.resize(area_size, Image.BOX, area_box)
reference.paste(area, (0, 0)) reference.paste(area, (0, 0))
if area_size[0] < reduced.size[0]: if area_size[0] < reduced.size[0]:
assert reduced.size[0] - area_size[0] == 1 assert reduced.size[0] - area_size[0] == 1
last_column_box = (area_box[2], 0, im.size[0], area_box[3]) last_column_box = (area_box[2], 0, im.size[0], area_box[3])
last_column = im.resize( last_column = im.resize((1, area_size[1]), Image.BOX, last_column_box)
(1, area_size[1]), Image.Resampling.BOX, last_column_box
)
reference.paste(last_column, (area_size[0], 0)) reference.paste(last_column, (area_size[0], 0))
if area_size[1] < reduced.size[1]: if area_size[1] < reduced.size[1]:
assert reduced.size[1] - area_size[1] == 1 assert reduced.size[1] - area_size[1] == 1
last_row_box = (0, area_box[3], area_box[2], im.size[1]) last_row_box = (0, area_box[3], area_box[2], im.size[1])
last_row = im.resize((area_size[0], 1), Image.Resampling.BOX, last_row_box) last_row = im.resize((area_size[0], 1), Image.BOX, last_row_box)
reference.paste(last_row, (0, area_size[1])) reference.paste(last_row, (0, area_size[1]))
if area_size[0] < reduced.size[0] and area_size[1] < reduced.size[1]: if area_size[0] < reduced.size[0] and area_size[1] < reduced.size[1]:
last_pixel_box = (area_box[2], area_box[3], im.size[0], im.size[1]) last_pixel_box = (area_box[2], area_box[3], im.size[0], im.size[1])
last_pixel = im.resize((1, 1), Image.Resampling.BOX, last_pixel_box) last_pixel = im.resize((1, 1), Image.BOX, last_pixel_box)
reference.paste(last_pixel, area_size) reference.paste(last_pixel, area_size)
assert_compare_images(reduced, reference, average_diff, max_diff) assert_compare_images(reduced, reference, average_diff, max_diff)

View File

@ -24,7 +24,7 @@ class TestImagingResampleVulnerability:
): ):
with pytest.raises(MemoryError): with pytest.raises(MemoryError):
# any resampling filter will do here # any resampling filter will do here
im.im.resize((xsize, ysize), Image.Resampling.BILINEAR) im.im.resize((xsize, ysize), Image.BILINEAR)
def test_invalid_size(self): def test_invalid_size(self):
im = hopper() im = hopper()
@ -103,7 +103,7 @@ class TestImagingCoreResampleAccuracy:
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L")) @pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_reduce_box(self, mode): def test_reduce_box(self, mode):
case = self.make_case(mode, (8, 8), 0xE1) case = self.make_case(mode, (8, 8), 0xE1)
case = case.resize((4, 4), Image.Resampling.BOX) case = case.resize((4, 4), Image.BOX)
# fmt: off # fmt: off
data = ("e1 e1" data = ("e1 e1"
"e1 e1") "e1 e1")
@ -114,7 +114,7 @@ class TestImagingCoreResampleAccuracy:
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L")) @pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_reduce_bilinear(self, mode): def test_reduce_bilinear(self, mode):
case = self.make_case(mode, (8, 8), 0xE1) case = self.make_case(mode, (8, 8), 0xE1)
case = case.resize((4, 4), Image.Resampling.BILINEAR) case = case.resize((4, 4), Image.BILINEAR)
# fmt: off # fmt: off
data = ("e1 c9" data = ("e1 c9"
"c9 b7") "c9 b7")
@ -125,7 +125,7 @@ class TestImagingCoreResampleAccuracy:
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L")) @pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_reduce_hamming(self, mode): def test_reduce_hamming(self, mode):
case = self.make_case(mode, (8, 8), 0xE1) case = self.make_case(mode, (8, 8), 0xE1)
case = case.resize((4, 4), Image.Resampling.HAMMING) case = case.resize((4, 4), Image.HAMMING)
# fmt: off # fmt: off
data = ("e1 da" data = ("e1 da"
"da d3") "da d3")
@ -137,7 +137,7 @@ class TestImagingCoreResampleAccuracy:
def test_reduce_bicubic(self, mode): def test_reduce_bicubic(self, mode):
for mode in ["RGBX", "RGB", "La", "L"]: for mode in ["RGBX", "RGB", "La", "L"]:
case = self.make_case(mode, (12, 12), 0xE1) case = self.make_case(mode, (12, 12), 0xE1)
case = case.resize((6, 6), Image.Resampling.BICUBIC) case = case.resize((6, 6), Image.BICUBIC)
# fmt: off # fmt: off
data = ("e1 e3 d4" data = ("e1 e3 d4"
"e3 e5 d6" "e3 e5 d6"
@ -149,7 +149,7 @@ class TestImagingCoreResampleAccuracy:
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L")) @pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_reduce_lanczos(self, mode): def test_reduce_lanczos(self, mode):
case = self.make_case(mode, (16, 16), 0xE1) case = self.make_case(mode, (16, 16), 0xE1)
case = case.resize((8, 8), Image.Resampling.LANCZOS) case = case.resize((8, 8), Image.LANCZOS)
# fmt: off # fmt: off
data = ("e1 e0 e4 d7" data = ("e1 e0 e4 d7"
"e0 df e3 d6" "e0 df e3 d6"
@ -162,7 +162,7 @@ class TestImagingCoreResampleAccuracy:
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L")) @pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_enlarge_box(self, mode): def test_enlarge_box(self, mode):
case = self.make_case(mode, (2, 2), 0xE1) case = self.make_case(mode, (2, 2), 0xE1)
case = case.resize((4, 4), Image.Resampling.BOX) case = case.resize((4, 4), Image.BOX)
# fmt: off # fmt: off
data = ("e1 e1" data = ("e1 e1"
"e1 e1") "e1 e1")
@ -173,7 +173,7 @@ class TestImagingCoreResampleAccuracy:
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L")) @pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_enlarge_bilinear(self, mode): def test_enlarge_bilinear(self, mode):
case = self.make_case(mode, (2, 2), 0xE1) case = self.make_case(mode, (2, 2), 0xE1)
case = case.resize((4, 4), Image.Resampling.BILINEAR) case = case.resize((4, 4), Image.BILINEAR)
# fmt: off # fmt: off
data = ("e1 b0" data = ("e1 b0"
"b0 98") "b0 98")
@ -184,7 +184,7 @@ class TestImagingCoreResampleAccuracy:
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L")) @pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_enlarge_hamming(self, mode): def test_enlarge_hamming(self, mode):
case = self.make_case(mode, (2, 2), 0xE1) case = self.make_case(mode, (2, 2), 0xE1)
case = case.resize((4, 4), Image.Resampling.HAMMING) case = case.resize((4, 4), Image.HAMMING)
# fmt: off # fmt: off
data = ("e1 d2" data = ("e1 d2"
"d2 c5") "d2 c5")
@ -195,7 +195,7 @@ class TestImagingCoreResampleAccuracy:
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L")) @pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_enlarge_bicubic(self, mode): def test_enlarge_bicubic(self, mode):
case = self.make_case(mode, (4, 4), 0xE1) case = self.make_case(mode, (4, 4), 0xE1)
case = case.resize((8, 8), Image.Resampling.BICUBIC) case = case.resize((8, 8), Image.BICUBIC)
# fmt: off # fmt: off
data = ("e1 e5 ee b9" data = ("e1 e5 ee b9"
"e5 e9 f3 bc" "e5 e9 f3 bc"
@ -208,7 +208,7 @@ class TestImagingCoreResampleAccuracy:
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L")) @pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_enlarge_lanczos(self, mode): def test_enlarge_lanczos(self, mode):
case = self.make_case(mode, (6, 6), 0xE1) case = self.make_case(mode, (6, 6), 0xE1)
case = case.resize((12, 12), Image.Resampling.LANCZOS) case = case.resize((12, 12), Image.LANCZOS)
data = ( data = (
"e1 e0 db ed f5 b8" "e1 e0 db ed f5 b8"
"e0 df da ec f3 b7" "e0 df da ec f3 b7"
@ -221,9 +221,7 @@ class TestImagingCoreResampleAccuracy:
self.check_case(channel, self.make_sample(data, (12, 12))) self.check_case(channel, self.make_sample(data, (12, 12)))
def test_box_filter_correct_range(self): def test_box_filter_correct_range(self):
im = Image.new("RGB", (8, 8), "#1688ff").resize( im = Image.new("RGB", (8, 8), "#1688ff").resize((100, 100), Image.BOX)
(100, 100), Image.Resampling.BOX
)
ref = Image.new("RGB", (100, 100), "#1688ff") ref = Image.new("RGB", (100, 100), "#1688ff")
assert_image_equal(im, ref) assert_image_equal(im, ref)
@ -231,7 +229,7 @@ class TestImagingCoreResampleAccuracy:
class TestCoreResampleConsistency: class TestCoreResampleConsistency:
def make_case(self, mode, fill): def make_case(self, mode, fill):
im = Image.new(mode, (512, 9), fill) im = Image.new(mode, (512, 9), fill)
return im.resize((9, 512), Image.Resampling.LANCZOS), im.load()[0, 0] return im.resize((9, 512), Image.LANCZOS), im.load()[0, 0]
def run_case(self, case): def run_case(self, case):
channel, color = case channel, color = case
@ -286,20 +284,20 @@ class TestCoreResampleAlphaCorrect:
@pytest.mark.xfail(reason="Current implementation isn't precise enough") @pytest.mark.xfail(reason="Current implementation isn't precise enough")
def test_levels_rgba(self): def test_levels_rgba(self):
case = self.make_levels_case("RGBA") case = self.make_levels_case("RGBA")
self.run_levels_case(case.resize((512, 32), Image.Resampling.BOX)) self.run_levels_case(case.resize((512, 32), Image.BOX))
self.run_levels_case(case.resize((512, 32), Image.Resampling.BILINEAR)) self.run_levels_case(case.resize((512, 32), Image.BILINEAR))
self.run_levels_case(case.resize((512, 32), Image.Resampling.HAMMING)) self.run_levels_case(case.resize((512, 32), Image.HAMMING))
self.run_levels_case(case.resize((512, 32), Image.Resampling.BICUBIC)) self.run_levels_case(case.resize((512, 32), Image.BICUBIC))
self.run_levels_case(case.resize((512, 32), Image.Resampling.LANCZOS)) self.run_levels_case(case.resize((512, 32), Image.LANCZOS))
@pytest.mark.xfail(reason="Current implementation isn't precise enough") @pytest.mark.xfail(reason="Current implementation isn't precise enough")
def test_levels_la(self): def test_levels_la(self):
case = self.make_levels_case("LA") case = self.make_levels_case("LA")
self.run_levels_case(case.resize((512, 32), Image.Resampling.BOX)) self.run_levels_case(case.resize((512, 32), Image.BOX))
self.run_levels_case(case.resize((512, 32), Image.Resampling.BILINEAR)) self.run_levels_case(case.resize((512, 32), Image.BILINEAR))
self.run_levels_case(case.resize((512, 32), Image.Resampling.HAMMING)) self.run_levels_case(case.resize((512, 32), Image.HAMMING))
self.run_levels_case(case.resize((512, 32), Image.Resampling.BICUBIC)) self.run_levels_case(case.resize((512, 32), Image.BICUBIC))
self.run_levels_case(case.resize((512, 32), Image.Resampling.LANCZOS)) self.run_levels_case(case.resize((512, 32), Image.LANCZOS))
def make_dirty_case(self, mode, clean_pixel, dirty_pixel): def make_dirty_case(self, mode, clean_pixel, dirty_pixel):
i = Image.new(mode, (64, 64), dirty_pixel) i = Image.new(mode, (64, 64), dirty_pixel)
@ -324,27 +322,19 @@ class TestCoreResampleAlphaCorrect:
def test_dirty_pixels_rgba(self): def test_dirty_pixels_rgba(self):
case = self.make_dirty_case("RGBA", (255, 255, 0, 128), (0, 0, 255, 0)) case = self.make_dirty_case("RGBA", (255, 255, 0, 128), (0, 0, 255, 0))
self.run_dirty_case(case.resize((20, 20), Image.Resampling.BOX), (255, 255, 0)) self.run_dirty_case(case.resize((20, 20), Image.BOX), (255, 255, 0))
self.run_dirty_case( self.run_dirty_case(case.resize((20, 20), Image.BILINEAR), (255, 255, 0))
case.resize((20, 20), Image.Resampling.BILINEAR), (255, 255, 0) self.run_dirty_case(case.resize((20, 20), Image.HAMMING), (255, 255, 0))
) self.run_dirty_case(case.resize((20, 20), Image.BICUBIC), (255, 255, 0))
self.run_dirty_case( self.run_dirty_case(case.resize((20, 20), Image.LANCZOS), (255, 255, 0))
case.resize((20, 20), Image.Resampling.HAMMING), (255, 255, 0)
)
self.run_dirty_case(
case.resize((20, 20), Image.Resampling.BICUBIC), (255, 255, 0)
)
self.run_dirty_case(
case.resize((20, 20), Image.Resampling.LANCZOS), (255, 255, 0)
)
def test_dirty_pixels_la(self): def test_dirty_pixels_la(self):
case = self.make_dirty_case("LA", (255, 128), (0, 0)) case = self.make_dirty_case("LA", (255, 128), (0, 0))
self.run_dirty_case(case.resize((20, 20), Image.Resampling.BOX), (255,)) self.run_dirty_case(case.resize((20, 20), Image.BOX), (255,))
self.run_dirty_case(case.resize((20, 20), Image.Resampling.BILINEAR), (255,)) self.run_dirty_case(case.resize((20, 20), Image.BILINEAR), (255,))
self.run_dirty_case(case.resize((20, 20), Image.Resampling.HAMMING), (255,)) self.run_dirty_case(case.resize((20, 20), Image.HAMMING), (255,))
self.run_dirty_case(case.resize((20, 20), Image.Resampling.BICUBIC), (255,)) self.run_dirty_case(case.resize((20, 20), Image.BICUBIC), (255,))
self.run_dirty_case(case.resize((20, 20), Image.Resampling.LANCZOS), (255,)) self.run_dirty_case(case.resize((20, 20), Image.LANCZOS), (255,))
class TestCoreResamplePasses: class TestCoreResamplePasses:
@ -357,26 +347,26 @@ class TestCoreResamplePasses:
def test_horizontal(self): def test_horizontal(self):
im = hopper("L") im = hopper("L")
with self.count(1): with self.count(1):
im.resize((im.size[0] - 10, im.size[1]), Image.Resampling.BILINEAR) im.resize((im.size[0] - 10, im.size[1]), Image.BILINEAR)
def test_vertical(self): def test_vertical(self):
im = hopper("L") im = hopper("L")
with self.count(1): with self.count(1):
im.resize((im.size[0], im.size[1] - 10), Image.Resampling.BILINEAR) im.resize((im.size[0], im.size[1] - 10), Image.BILINEAR)
def test_both(self): def test_both(self):
im = hopper("L") im = hopper("L")
with self.count(2): with self.count(2):
im.resize((im.size[0] - 10, im.size[1] - 10), Image.Resampling.BILINEAR) im.resize((im.size[0] - 10, im.size[1] - 10), Image.BILINEAR)
def test_box_horizontal(self): def test_box_horizontal(self):
im = hopper("L") im = hopper("L")
box = (20, 0, im.size[0] - 20, im.size[1]) box = (20, 0, im.size[0] - 20, im.size[1])
with self.count(1): with self.count(1):
# the same size, but different box # the same size, but different box
with_box = im.resize(im.size, Image.Resampling.BILINEAR, box) with_box = im.resize(im.size, Image.BILINEAR, box)
with self.count(2): with self.count(2):
cropped = im.crop(box).resize(im.size, Image.Resampling.BILINEAR) cropped = im.crop(box).resize(im.size, Image.BILINEAR)
assert_image_similar(with_box, cropped, 0.1) assert_image_similar(with_box, cropped, 0.1)
def test_box_vertical(self): def test_box_vertical(self):
@ -384,9 +374,9 @@ class TestCoreResamplePasses:
box = (0, 20, im.size[0], im.size[1] - 20) box = (0, 20, im.size[0], im.size[1] - 20)
with self.count(1): with self.count(1):
# the same size, but different box # the same size, but different box
with_box = im.resize(im.size, Image.Resampling.BILINEAR, box) with_box = im.resize(im.size, Image.BILINEAR, box)
with self.count(2): with self.count(2):
cropped = im.crop(box).resize(im.size, Image.Resampling.BILINEAR) cropped = im.crop(box).resize(im.size, Image.BILINEAR)
assert_image_similar(with_box, cropped, 0.1) assert_image_similar(with_box, cropped, 0.1)
@ -399,7 +389,7 @@ class TestCoreResampleCoefficients:
draw = ImageDraw.Draw(i) draw = ImageDraw.Draw(i)
draw.rectangle((0, 0, i.size[0] // 2 - 1, 0), test_color) draw.rectangle((0, 0, i.size[0] // 2 - 1, 0), test_color)
px = i.resize((5, i.size[1]), Image.Resampling.BICUBIC).load() px = i.resize((5, i.size[1]), Image.BICUBIC).load()
if px[2, 0] != test_color // 2: if px[2, 0] != test_color // 2:
assert test_color // 2 == px[2, 0] assert test_color // 2 == px[2, 0]
@ -407,7 +397,7 @@ class TestCoreResampleCoefficients:
# regression test for the wrong coefficients calculation # regression test for the wrong coefficients calculation
# due to bug https://github.com/python-pillow/Pillow/issues/2161 # due to bug https://github.com/python-pillow/Pillow/issues/2161
im = Image.new("RGBA", (1280, 1280), (0x20, 0x40, 0x60, 0xFF)) im = Image.new("RGBA", (1280, 1280), (0x20, 0x40, 0x60, 0xFF))
histogram = im.resize((256, 256), Image.Resampling.BICUBIC).histogram() histogram = im.resize((256, 256), Image.BICUBIC).histogram()
# first channel # first channel
assert histogram[0x100 * 0 + 0x20] == 0x10000 assert histogram[0x100 * 0 + 0x20] == 0x10000
@ -423,12 +413,12 @@ class TestCoreResampleBox:
@pytest.mark.parametrize( @pytest.mark.parametrize(
"resample", "resample",
( (
Image.Resampling.NEAREST, Image.NEAREST,
Image.Resampling.BOX, Image.BOX,
Image.Resampling.BILINEAR, Image.BILINEAR,
Image.Resampling.HAMMING, Image.HAMMING,
Image.Resampling.BICUBIC, Image.BICUBIC,
Image.Resampling.LANCZOS, Image.LANCZOS,
), ),
) )
def test_wrong_arguments(self, resample): def test_wrong_arguments(self, resample):
@ -470,7 +460,7 @@ class TestCoreResampleBox:
for y0, y1 in split_range(dst_size[1], ytiles): for y0, y1 in split_range(dst_size[1], ytiles):
for x0, x1 in split_range(dst_size[0], xtiles): for x0, x1 in split_range(dst_size[0], xtiles):
box = (x0 * scale[0], y0 * scale[1], x1 * scale[0], y1 * scale[1]) box = (x0 * scale[0], y0 * scale[1], x1 * scale[0], y1 * scale[1])
tile = im.resize((x1 - x0, y1 - y0), Image.Resampling.BICUBIC, box) tile = im.resize((x1 - x0, y1 - y0), Image.BICUBIC, box)
tiled.paste(tile, (x0, y0)) tiled.paste(tile, (x0, y0))
return tiled return tiled
@ -481,7 +471,7 @@ class TestCoreResampleBox:
with Image.open("Tests/images/flower.jpg") as im: with Image.open("Tests/images/flower.jpg") as im:
assert im.size == (480, 360) assert im.size == (480, 360)
dst_size = (251, 188) dst_size = (251, 188)
reference = im.resize(dst_size, Image.Resampling.BICUBIC) reference = im.resize(dst_size, Image.BICUBIC)
for tiles in [(1, 1), (3, 3), (9, 7), (100, 100)]: for tiles in [(1, 1), (3, 3), (9, 7), (100, 100)]:
tiled = self.resize_tiled(im, dst_size, *tiles) tiled = self.resize_tiled(im, dst_size, *tiles)
@ -497,16 +487,12 @@ class TestCoreResampleBox:
assert im.size == (480, 360) assert im.size == (480, 360)
dst_size = (48, 36) dst_size = (48, 36)
# Reference is cropped image resized to destination # Reference is cropped image resized to destination
reference = im.crop((0, 0, 473, 353)).resize( reference = im.crop((0, 0, 473, 353)).resize(dst_size, Image.BICUBIC)
dst_size, Image.Resampling.BICUBIC # Image.BOX emulates supersampling (480 / 8 = 60, 360 / 8 = 45)
) supersampled = im.resize((60, 45), Image.BOX)
# Image.Resampling.BOX emulates supersampling (480 / 8 = 60, 360 / 8 = 45)
supersampled = im.resize((60, 45), Image.Resampling.BOX)
with_box = supersampled.resize( with_box = supersampled.resize(dst_size, Image.BICUBIC, (0, 0, 59.125, 44.125))
dst_size, Image.Resampling.BICUBIC, (0, 0, 59.125, 44.125) without_box = supersampled.resize(dst_size, Image.BICUBIC)
)
without_box = supersampled.resize(dst_size, Image.Resampling.BICUBIC)
# error with box should be much smaller than without # error with box should be much smaller than without
assert_image_similar(reference, with_box, 6) assert_image_similar(reference, with_box, 6)
@ -514,9 +500,7 @@ class TestCoreResampleBox:
assert_image_similar(reference, without_box, 5) assert_image_similar(reference, without_box, 5)
@pytest.mark.parametrize("mode", ("RGB", "L", "RGBA", "LA", "I", "")) @pytest.mark.parametrize("mode", ("RGB", "L", "RGBA", "LA", "I", ""))
@pytest.mark.parametrize( @pytest.mark.parametrize("resample", (Image.NEAREST, Image.BILINEAR))
"resample", (Image.Resampling.NEAREST, Image.Resampling.BILINEAR)
)
def test_formats(self, mode, resample): def test_formats(self, mode, resample):
im = hopper(mode) im = hopper(mode)
box = (20, 20, im.size[0] - 20, im.size[1] - 20) box = (20, 20, im.size[0] - 20, im.size[1] - 20)
@ -534,7 +518,7 @@ class TestCoreResampleBox:
((40, 50), (10, 0, 50, 50)), ((40, 50), (10, 0, 50, 50)),
((40, 50), (10, 20, 50, 70)), ((40, 50), (10, 20, 50, 70)),
]: ]:
res = im.resize(size, Image.Resampling.LANCZOS, box) res = im.resize(size, Image.LANCZOS, box)
assert res.size == size assert res.size == size
assert_image_equal(res, im.crop(box), f">>> {size} {box}") assert_image_equal(res, im.crop(box), f">>> {size} {box}")
@ -548,15 +532,13 @@ class TestCoreResampleBox:
((40, 50), (10.4, 0.4, 50.4, 50.4)), ((40, 50), (10.4, 0.4, 50.4, 50.4)),
((40, 50), (10.4, 20.4, 50.4, 70.4)), ((40, 50), (10.4, 20.4, 50.4, 70.4)),
]: ]:
res = im.resize(size, Image.Resampling.LANCZOS, box) res = im.resize(size, Image.LANCZOS, box)
assert res.size == size assert res.size == size
with pytest.raises(AssertionError, match=r"difference \d"): with pytest.raises(AssertionError, match=r"difference \d"):
# check that the difference at least that much # check that the difference at least that much
assert_image_similar(res, im.crop(box), 20, f">>> {size} {box}") assert_image_similar(res, im.crop(box), 20, f">>> {size} {box}")
@pytest.mark.parametrize( @pytest.mark.parametrize("flt", (Image.NEAREST, Image.BICUBIC))
"flt", (Image.Resampling.NEAREST, Image.Resampling.BICUBIC)
)
def test_skip_horizontal(self, flt): def test_skip_horizontal(self, flt):
# Can skip resize for one dimension # Can skip resize for one dimension
im = hopper() im = hopper()
@ -577,9 +559,7 @@ class TestCoreResampleBox:
f">>> {size} {box} {flt}", f">>> {size} {box} {flt}",
) )
@pytest.mark.parametrize( @pytest.mark.parametrize("flt", (Image.NEAREST, Image.BICUBIC))
"flt", (Image.Resampling.NEAREST, Image.Resampling.BICUBIC)
)
def test_skip_vertical(self, flt): def test_skip_vertical(self, flt):
# Can skip resize for one dimension # Can skip resize for one dimension
im = hopper() im = hopper()

View File

@ -27,21 +27,21 @@ class TestImagingCoreResize:
) )
def test_nearest_mode(self, mode): def test_nearest_mode(self, mode):
im = hopper(mode) im = hopper(mode)
r = self.resize(im, (15, 12), Image.Resampling.NEAREST) r = self.resize(im, (15, 12), Image.NEAREST)
assert r.mode == mode assert r.mode == mode
assert r.size == (15, 12) assert r.size == (15, 12)
assert r.im.bands == im.im.bands assert r.im.bands == im.im.bands
def test_convolution_modes(self): def test_convolution_modes(self):
with pytest.raises(ValueError): with pytest.raises(ValueError):
self.resize(hopper("1"), (15, 12), Image.Resampling.BILINEAR) self.resize(hopper("1"), (15, 12), Image.BILINEAR)
with pytest.raises(ValueError): with pytest.raises(ValueError):
self.resize(hopper("P"), (15, 12), Image.Resampling.BILINEAR) self.resize(hopper("P"), (15, 12), Image.BILINEAR)
with pytest.raises(ValueError): with pytest.raises(ValueError):
self.resize(hopper("I;16"), (15, 12), Image.Resampling.BILINEAR) self.resize(hopper("I;16"), (15, 12), Image.BILINEAR)
for mode in ["L", "I", "F", "RGB", "RGBA", "CMYK", "YCbCr"]: for mode in ["L", "I", "F", "RGB", "RGBA", "CMYK", "YCbCr"]:
im = hopper(mode) im = hopper(mode)
r = self.resize(im, (15, 12), Image.Resampling.BILINEAR) r = self.resize(im, (15, 12), Image.BILINEAR)
assert r.mode == mode assert r.mode == mode
assert r.size == (15, 12) assert r.size == (15, 12)
assert r.im.bands == im.im.bands assert r.im.bands == im.im.bands
@ -49,12 +49,12 @@ class TestImagingCoreResize:
@pytest.mark.parametrize( @pytest.mark.parametrize(
"resample", "resample",
( (
Image.Resampling.NEAREST, Image.NEAREST,
Image.Resampling.BOX, Image.BOX,
Image.Resampling.BILINEAR, Image.BILINEAR,
Image.Resampling.HAMMING, Image.HAMMING,
Image.Resampling.BICUBIC, Image.BICUBIC,
Image.Resampling.LANCZOS, Image.LANCZOS,
), ),
) )
def test_reduce_filters(self, resample): def test_reduce_filters(self, resample):
@ -65,12 +65,12 @@ class TestImagingCoreResize:
@pytest.mark.parametrize( @pytest.mark.parametrize(
"resample", "resample",
( (
Image.Resampling.NEAREST, Image.NEAREST,
Image.Resampling.BOX, Image.BOX,
Image.Resampling.BILINEAR, Image.BILINEAR,
Image.Resampling.HAMMING, Image.HAMMING,
Image.Resampling.BICUBIC, Image.BICUBIC,
Image.Resampling.LANCZOS, Image.LANCZOS,
), ),
) )
def test_enlarge_filters(self, resample): def test_enlarge_filters(self, resample):
@ -81,12 +81,12 @@ class TestImagingCoreResize:
@pytest.mark.parametrize( @pytest.mark.parametrize(
"resample", "resample",
( (
Image.Resampling.NEAREST, Image.NEAREST,
Image.Resampling.BOX, Image.BOX,
Image.Resampling.BILINEAR, Image.BILINEAR,
Image.Resampling.HAMMING, Image.HAMMING,
Image.Resampling.BICUBIC, Image.BICUBIC,
Image.Resampling.LANCZOS, Image.LANCZOS,
), ),
) )
@pytest.mark.parametrize( @pytest.mark.parametrize(
@ -129,12 +129,11 @@ class TestImagingCoreResize:
@pytest.mark.parametrize( @pytest.mark.parametrize(
"resample", "resample",
( (
Image.Resampling.NEAREST, Image.NEAREST,
Image.Resampling.BOX, Image.BOX,
Image.Resampling.BILINEAR, Image.BILINEAR,
Image.Resampling.HAMMING, Image.HAMMING,
Image.Resampling.BICUBIC, Image.LANCZOS,
Image.Resampling.LANCZOS,
), ),
) )
def test_enlarge_zero(self, resample): def test_enlarge_zero(self, resample):
@ -171,29 +170,23 @@ def gradients_image():
class TestReducingGapResize: class TestReducingGapResize:
def test_reducing_gap_values(self, gradients_image): def test_reducing_gap_values(self, gradients_image):
ref = gradients_image.resize( ref = gradients_image.resize((52, 34), Image.BICUBIC, reducing_gap=None)
(52, 34), Image.Resampling.BICUBIC, reducing_gap=None im = gradients_image.resize((52, 34), Image.BICUBIC)
)
im = gradients_image.resize((52, 34), Image.Resampling.BICUBIC)
assert_image_equal(ref, im) assert_image_equal(ref, im)
with pytest.raises(ValueError): with pytest.raises(ValueError):
gradients_image.resize((52, 34), Image.Resampling.BICUBIC, reducing_gap=0) gradients_image.resize((52, 34), Image.BICUBIC, reducing_gap=0)
with pytest.raises(ValueError): with pytest.raises(ValueError):
gradients_image.resize( gradients_image.resize((52, 34), Image.BICUBIC, reducing_gap=0.99)
(52, 34), Image.Resampling.BICUBIC, reducing_gap=0.99
)
@pytest.mark.parametrize( @pytest.mark.parametrize(
"box, epsilon", "box, epsilon",
((None, 4), ((1.1, 2.2, 510.8, 510.9), 4), ((3, 10, 410, 256), 10)), ((None, 4), ((1.1, 2.2, 510.8, 510.9), 4), ((3, 10, 410, 256), 10)),
) )
def test_reducing_gap_1(self, gradients_image, box, epsilon): def test_reducing_gap_1(self, gradients_image, box, epsilon):
ref = gradients_image.resize((52, 34), Image.Resampling.BICUBIC, box=box) ref = gradients_image.resize((52, 34), Image.BICUBIC, box=box)
im = gradients_image.resize( im = gradients_image.resize((52, 34), Image.BICUBIC, box=box, reducing_gap=1.0)
(52, 34), Image.Resampling.BICUBIC, box=box, reducing_gap=1.0
)
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
assert_image_equal(ref, im) assert_image_equal(ref, im)
@ -205,10 +198,8 @@ class TestReducingGapResize:
((None, 1.5), ((1.1, 2.2, 510.8, 510.9), 1.5), ((3, 10, 410, 256), 1)), ((None, 1.5), ((1.1, 2.2, 510.8, 510.9), 1.5), ((3, 10, 410, 256), 1)),
) )
def test_reducing_gap_2(self, gradients_image, box, epsilon): def test_reducing_gap_2(self, gradients_image, box, epsilon):
ref = gradients_image.resize((52, 34), Image.Resampling.BICUBIC, box=box) ref = gradients_image.resize((52, 34), Image.BICUBIC, box=box)
im = gradients_image.resize( im = gradients_image.resize((52, 34), Image.BICUBIC, box=box, reducing_gap=2.0)
(52, 34), Image.Resampling.BICUBIC, box=box, reducing_gap=2.0
)
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
assert_image_equal(ref, im) assert_image_equal(ref, im)
@ -220,10 +211,8 @@ class TestReducingGapResize:
((None, 1), ((1.1, 2.2, 510.8, 510.9), 1), ((3, 10, 410, 256), 0.5)), ((None, 1), ((1.1, 2.2, 510.8, 510.9), 1), ((3, 10, 410, 256), 0.5)),
) )
def test_reducing_gap_3(self, gradients_image, box, epsilon): def test_reducing_gap_3(self, gradients_image, box, epsilon):
ref = gradients_image.resize((52, 34), Image.Resampling.BICUBIC, box=box) ref = gradients_image.resize((52, 34), Image.BICUBIC, box=box)
im = gradients_image.resize( im = gradients_image.resize((52, 34), Image.BICUBIC, box=box, reducing_gap=3.0)
(52, 34), Image.Resampling.BICUBIC, box=box, reducing_gap=3.0
)
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
assert_image_equal(ref, im) assert_image_equal(ref, im)
@ -232,10 +221,8 @@ class TestReducingGapResize:
@pytest.mark.parametrize("box", (None, (1.1, 2.2, 510.8, 510.9), (3, 10, 410, 256))) @pytest.mark.parametrize("box", (None, (1.1, 2.2, 510.8, 510.9), (3, 10, 410, 256)))
def test_reducing_gap_8(self, gradients_image, box): def test_reducing_gap_8(self, gradients_image, box):
ref = gradients_image.resize((52, 34), Image.Resampling.BICUBIC, box=box) ref = gradients_image.resize((52, 34), Image.BICUBIC, box=box)
im = gradients_image.resize( im = gradients_image.resize((52, 34), Image.BICUBIC, box=box, reducing_gap=8.0)
(52, 34), Image.Resampling.BICUBIC, box=box, reducing_gap=8.0
)
assert_image_equal(ref, im) assert_image_equal(ref, im)
@ -244,10 +231,8 @@ class TestReducingGapResize:
(((0, 0, 512, 512), 5.5), ((0.9, 1.7, 128, 128), 9.5)), (((0, 0, 512, 512), 5.5), ((0.9, 1.7, 128, 128), 9.5)),
) )
def test_box_filter(self, gradients_image, box, epsilon): def test_box_filter(self, gradients_image, box, epsilon):
ref = gradients_image.resize((52, 34), Image.Resampling.BOX, box=box) ref = gradients_image.resize((52, 34), Image.BOX, box=box)
im = gradients_image.resize( im = gradients_image.resize((52, 34), Image.BOX, box=box, reducing_gap=1.0)
(52, 34), Image.Resampling.BOX, box=box, reducing_gap=1.0
)
assert_image_similar(ref, im, epsilon) assert_image_similar(ref, im, epsilon)
@ -279,11 +264,11 @@ class TestImageResize:
@pytest.mark.parametrize("mode", ("L", "RGB", "I", "F")) @pytest.mark.parametrize("mode", ("L", "RGB", "I", "F"))
def test_default_filter_bicubic(self, mode): def test_default_filter_bicubic(self, mode):
im = hopper(mode) im = hopper(mode)
assert im.resize((20, 20), Image.Resampling.BICUBIC) == im.resize((20, 20)) assert im.resize((20, 20), Image.BICUBIC) == im.resize((20, 20))
@pytest.mark.parametrize( @pytest.mark.parametrize(
"mode", ("1", "P", "I;16", "I;16L", "I;16B", "BGR;15", "BGR;16") "mode", ("1", "P", "I;16", "I;16L", "I;16B", "BGR;15", "BGR;16")
) )
def test_default_filter_nearest(self, mode): def test_default_filter_nearest(self, mode):
im = hopper(mode) im = hopper(mode)
assert im.resize((20, 20), Image.Resampling.NEAREST) == im.resize((20, 20)) assert im.resize((20, 20), Image.NEAREST) == im.resize((20, 20))

View File

@ -48,14 +48,14 @@ def test_zero(angle):
def test_resample(): def test_resample():
# Target image creation, inspected by eye. # Target image creation, inspected by eye.
# >>> im = Image.open('Tests/images/hopper.ppm') # >>> im = Image.open('Tests/images/hopper.ppm')
# >>> im = im.rotate(45, resample=Image.Resampling.BICUBIC, expand=True) # >>> im = im.rotate(45, resample=Image.BICUBIC, expand=True)
# >>> im.save('Tests/images/hopper_45.png') # >>> im.save('Tests/images/hopper_45.png')
with Image.open("Tests/images/hopper_45.png") as target: with Image.open("Tests/images/hopper_45.png") as target:
for (resample, epsilon) in ( for (resample, epsilon) in (
(Image.Resampling.NEAREST, 10), (Image.NEAREST, 10),
(Image.Resampling.BILINEAR, 5), (Image.BILINEAR, 5),
(Image.Resampling.BICUBIC, 0), (Image.BICUBIC, 0),
): ):
im = hopper() im = hopper()
im = im.rotate(45, resample=resample, expand=True) im = im.rotate(45, resample=resample, expand=True)
@ -64,7 +64,7 @@ def test_resample():
def test_center_0(): def test_center_0():
im = hopper() im = hopper()
im = im.rotate(45, center=(0, 0), resample=Image.Resampling.BICUBIC) im = im.rotate(45, center=(0, 0), resample=Image.BICUBIC)
with Image.open("Tests/images/hopper_45.png") as target: with Image.open("Tests/images/hopper_45.png") as target:
target_origin = target.size[1] / 2 target_origin = target.size[1] / 2
@ -75,7 +75,7 @@ def test_center_0():
def test_center_14(): def test_center_14():
im = hopper() im = hopper()
im = im.rotate(45, center=(14, 14), resample=Image.Resampling.BICUBIC) im = im.rotate(45, center=(14, 14), resample=Image.BICUBIC)
with Image.open("Tests/images/hopper_45.png") as target: with Image.open("Tests/images/hopper_45.png") as target:
target_origin = target.size[1] / 2 - 14 target_origin = target.size[1] / 2 - 14
@ -92,7 +92,7 @@ def test_translate():
(target_origin, target_origin, target_origin + 128, target_origin + 128) (target_origin, target_origin, target_origin + 128, target_origin + 128)
) )
im = im.rotate(45, translate=(5, 5), resample=Image.Resampling.BICUBIC) im = im.rotate(45, translate=(5, 5), resample=Image.BICUBIC)
assert_image_similar(im, target, 1) assert_image_similar(im, target, 1)

View File

@ -129,24 +129,24 @@ def test_DCT_scaling_edges():
thumb = fromstring(tostring(im, "JPEG", quality=99, subsampling=0)) thumb = fromstring(tostring(im, "JPEG", quality=99, subsampling=0))
# small reducing_gap to amplify the effect # small reducing_gap to amplify the effect
thumb.thumbnail((32, 32), Image.Resampling.BICUBIC, reducing_gap=1.0) thumb.thumbnail((32, 32), Image.BICUBIC, reducing_gap=1.0)
ref = im.resize((32, 32), Image.Resampling.BICUBIC) ref = im.resize((32, 32), Image.BICUBIC)
# This is still JPEG, some error is present. Without the fix it is 11.5 # This is still JPEG, some error is present. Without the fix it is 11.5
assert_image_similar(thumb, ref, 1.5) assert_image_similar(thumb, ref, 1.5)
def test_reducing_gap_values(): def test_reducing_gap_values():
im = hopper() im = hopper()
im.thumbnail((18, 18), Image.Resampling.BICUBIC) im.thumbnail((18, 18), Image.BICUBIC)
ref = hopper() ref = hopper()
ref.thumbnail((18, 18), Image.Resampling.BICUBIC, reducing_gap=2.0) ref.thumbnail((18, 18), Image.BICUBIC, reducing_gap=2.0)
# reducing_gap=2.0 should be the default # reducing_gap=2.0 should be the default
assert_image_equal(ref, im) assert_image_equal(ref, im)
ref = hopper() ref = hopper()
ref.thumbnail((18, 18), Image.Resampling.BICUBIC, reducing_gap=None) ref.thumbnail((18, 18), Image.BICUBIC, reducing_gap=None)
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
assert_image_equal(ref, im) assert_image_equal(ref, im)
@ -157,9 +157,9 @@ def test_reducing_gap_for_DCT_scaling():
with Image.open("Tests/images/hopper.jpg") as ref: with Image.open("Tests/images/hopper.jpg") as ref:
# thumbnail should call draft with reducing_gap scale # thumbnail should call draft with reducing_gap scale
ref.draft(None, (18 * 3, 18 * 3)) ref.draft(None, (18 * 3, 18 * 3))
ref = ref.resize((18, 18), Image.Resampling.BICUBIC) ref = ref.resize((18, 18), Image.BICUBIC)
with Image.open("Tests/images/hopper.jpg") as im: with Image.open("Tests/images/hopper.jpg") as im:
im.thumbnail((18, 18), Image.Resampling.BICUBIC, reducing_gap=3.0) im.thumbnail((18, 18), Image.BICUBIC, reducing_gap=3.0)
assert_image_similar(ref, im, 1.4) assert_image_similar(ref, im, 1.4)

View File

@ -34,22 +34,20 @@ class TestImageTransform:
def test_palette(self): def test_palette(self):
with Image.open("Tests/images/hopper.gif") as im: with Image.open("Tests/images/hopper.gif") as im:
transformed = im.transform( transformed = im.transform(im.size, Image.AFFINE, [1, 0, 0, 0, 1, 0])
im.size, Image.Transform.AFFINE, [1, 0, 0, 0, 1, 0]
)
assert im.palette.palette == transformed.palette.palette assert im.palette.palette == transformed.palette.palette
def test_extent(self): def test_extent(self):
im = hopper("RGB") im = hopper("RGB")
(w, h) = im.size (w, h) = im.size
# fmt: off # fmt: off
transformed = im.transform(im.size, Image.Transform.EXTENT, transformed = im.transform(im.size, Image.EXTENT,
(0, 0, (0, 0,
w//2, h//2), # ul -> lr w//2, h//2), # ul -> lr
Image.Resampling.BILINEAR) Image.BILINEAR)
# fmt: on # fmt: on
scaled = im.resize((w * 2, h * 2), Image.Resampling.BILINEAR).crop((0, 0, w, h)) scaled = im.resize((w * 2, h * 2), Image.BILINEAR).crop((0, 0, w, h))
# undone -- precision? # undone -- precision?
assert_image_similar(transformed, scaled, 23) assert_image_similar(transformed, scaled, 23)
@ -59,18 +57,18 @@ class TestImageTransform:
im = hopper("RGB") im = hopper("RGB")
(w, h) = im.size (w, h) = im.size
# fmt: off # fmt: off
transformed = im.transform(im.size, Image.Transform.QUAD, transformed = im.transform(im.size, Image.QUAD,
(0, 0, 0, h//2, (0, 0, 0, h//2,
# ul -> ccw around quad: # ul -> ccw around quad:
w//2, h//2, w//2, 0), w//2, h//2, w//2, 0),
Image.Resampling.BILINEAR) Image.BILINEAR)
# fmt: on # fmt: on
scaled = im.transform( scaled = im.transform(
(w, h), (w, h),
Image.Transform.AFFINE, Image.AFFINE,
(0.5, 0, 0, 0, 0.5, 0), (0.5, 0, 0, 0, 0.5, 0),
Image.Resampling.BILINEAR, Image.BILINEAR,
) )
assert_image_equal(transformed, scaled) assert_image_equal(transformed, scaled)
@ -88,9 +86,9 @@ class TestImageTransform:
(w, h) = im.size (w, h) = im.size
transformed = im.transform( transformed = im.transform(
im.size, im.size,
Image.Transform.EXTENT, Image.EXTENT,
(0, 0, w * 2, h * 2), (0, 0, w * 2, h * 2),
Image.Resampling.BILINEAR, Image.BILINEAR,
fillcolor="red", fillcolor="red",
) )
assert transformed.getpixel((w - 1, h - 1)) == expected_pixel assert transformed.getpixel((w - 1, h - 1)) == expected_pixel
@ -100,21 +98,21 @@ class TestImageTransform:
im = hopper("RGBA") im = hopper("RGBA")
(w, h) = im.size (w, h) = im.size
# fmt: off # fmt: off
transformed = im.transform(im.size, Image.Transform.MESH, transformed = im.transform(im.size, Image.MESH,
[((0, 0, w//2, h//2), # box [((0, 0, w//2, h//2), # box
(0, 0, 0, h, (0, 0, 0, h,
w, h, w, 0)), # ul -> ccw around quad w, h, w, 0)), # ul -> ccw around quad
((w//2, h//2, w, h), # box ((w//2, h//2, w, h), # box
(0, 0, 0, h, (0, 0, 0, h,
w, h, w, 0))], # ul -> ccw around quad w, h, w, 0))], # ul -> ccw around quad
Image.Resampling.BILINEAR) Image.BILINEAR)
# fmt: on # fmt: on
scaled = im.transform( scaled = im.transform(
(w // 2, h // 2), (w // 2, h // 2),
Image.Transform.AFFINE, Image.AFFINE,
(2, 0, 0, 0, 2, 0), (2, 0, 0, 0, 2, 0),
Image.Resampling.BILINEAR, Image.BILINEAR,
) )
checker = Image.new("RGBA", im.size) checker = Image.new("RGBA", im.size)
@ -147,16 +145,14 @@ class TestImageTransform:
def test_alpha_premult_resize(self): def test_alpha_premult_resize(self):
def op(im, sz): def op(im, sz):
return im.resize(sz, Image.Resampling.BILINEAR) return im.resize(sz, Image.BILINEAR)
self._test_alpha_premult(op) self._test_alpha_premult(op)
def test_alpha_premult_transform(self): def test_alpha_premult_transform(self):
def op(im, sz): def op(im, sz):
(w, h) = im.size (w, h) = im.size
return im.transform( return im.transform(sz, Image.EXTENT, (0, 0, w, h), Image.BILINEAR)
sz, Image.Transform.EXTENT, (0, 0, w, h), Image.Resampling.BILINEAR
)
self._test_alpha_premult(op) self._test_alpha_premult(op)
@ -183,7 +179,7 @@ class TestImageTransform:
@pytest.mark.parametrize("mode", ("RGBA", "LA")) @pytest.mark.parametrize("mode", ("RGBA", "LA"))
def test_nearest_resize(self, mode): def test_nearest_resize(self, mode):
def op(im, sz): def op(im, sz):
return im.resize(sz, Image.Resampling.NEAREST) return im.resize(sz, Image.NEAREST)
self._test_nearest(op, mode) self._test_nearest(op, mode)
@ -191,9 +187,7 @@ class TestImageTransform:
def test_nearest_transform(self, mode): def test_nearest_transform(self, mode):
def op(im, sz): def op(im, sz):
(w, h) = im.size (w, h) = im.size
return im.transform( return im.transform(sz, Image.EXTENT, (0, 0, w, h), Image.NEAREST)
sz, Image.Transform.EXTENT, (0, 0, w, h), Image.Resampling.NEAREST
)
self._test_nearest(op, mode) self._test_nearest(op, mode)
@ -224,16 +218,16 @@ class TestImageTransform:
with pytest.raises(ValueError): with pytest.raises(ValueError):
im.transform((100, 100), None) im.transform((100, 100), None)
@pytest.mark.parametrize("resample", (Image.Resampling.BOX, "unknown")) @pytest.mark.parametrize("resample", (Image.BOX, "unknown"))
def test_unknown_resampling_filter(self, resample): def test_unknown_resampling_filter(self, resample):
with hopper() as im: with hopper() as im:
(w, h) = im.size (w, h) = im.size
with pytest.raises(ValueError): with pytest.raises(ValueError):
im.transform((100, 100), Image.Transform.EXTENT, (0, 0, w, h), resample) im.transform((100, 100), Image.EXTENT, (0, 0, w, h), resample)
class TestImageTransformAffine: class TestImageTransformAffine:
transform = Image.Transform.AFFINE transform = Image.AFFINE
def _test_image(self): def _test_image(self):
im = hopper("RGB") im = hopper("RGB")
@ -243,9 +237,9 @@ class TestImageTransformAffine:
"deg, transpose", "deg, transpose",
( (
(0, None), (0, None),
(90, Image.Transpose.ROTATE_90), (90, Image.ROTATE_90),
(180, Image.Transpose.ROTATE_180), (180, Image.ROTATE_180),
(270, Image.Transpose.ROTATE_270), (270, Image.ROTATE_270),
), ),
) )
def test_rotate(self, deg, transpose): def test_rotate(self, deg, transpose):
@ -271,9 +265,9 @@ class TestImageTransformAffine:
transposed = im transposed = im
for resample in [ for resample in [
Image.Resampling.NEAREST, Image.NEAREST,
Image.Resampling.BILINEAR, Image.BILINEAR,
Image.Resampling.BICUBIC, Image.BICUBIC,
]: ]:
transformed = im.transform( transformed = im.transform(
transposed.size, self.transform, matrix, resample transposed.size, self.transform, matrix, resample
@ -293,9 +287,9 @@ class TestImageTransformAffine:
@pytest.mark.parametrize( @pytest.mark.parametrize(
"resample,epsilon", "resample,epsilon",
( (
(Image.Resampling.NEAREST, 0), (Image.NEAREST, 0),
(Image.Resampling.BILINEAR, 2), (Image.BILINEAR, 2),
(Image.Resampling.BICUBIC, 1), (Image.BICUBIC, 1),
), ),
) )
def test_resize(self, scale, epsilon_scale, resample, epsilon): def test_resize(self, scale, epsilon_scale, resample, epsilon):
@ -322,9 +316,9 @@ class TestImageTransformAffine:
@pytest.mark.parametrize( @pytest.mark.parametrize(
"resample, epsilon", "resample, epsilon",
( (
(Image.Resampling.NEAREST, 0), (Image.NEAREST, 0),
(Image.Resampling.BILINEAR, 1.5), (Image.BILINEAR, 1.5),
(Image.Resampling.BICUBIC, 1), (Image.BICUBIC, 1),
), ),
) )
def test_translate(self, x, y, epsilon_scale, resample, epsilon): def test_translate(self, x, y, epsilon_scale, resample, epsilon):
@ -343,4 +337,4 @@ class TestImageTransformAffine:
class TestImageTransformPerspective(TestImageTransformAffine): class TestImageTransformPerspective(TestImageTransformAffine):
# Repeat all tests for AFFINE transformations with PERSPECTIVE # Repeat all tests for AFFINE transformations with PERSPECTIVE
transform = Image.Transform.PERSPECTIVE transform = Image.PERSPECTIVE

View File

@ -1,6 +1,14 @@
import pytest import pytest
from PIL.Image import Transpose from PIL.Image import (
FLIP_LEFT_RIGHT,
FLIP_TOP_BOTTOM,
ROTATE_90,
ROTATE_180,
ROTATE_270,
TRANSPOSE,
TRANSVERSE,
)
from . import helper from . import helper
from .helper import assert_image_equal from .helper import assert_image_equal
@ -14,7 +22,7 @@ HOPPER = {
@pytest.mark.parametrize("mode", HOPPER) @pytest.mark.parametrize("mode", HOPPER)
def test_flip_left_right(mode): def test_flip_left_right(mode):
im = HOPPER[mode] im = HOPPER[mode]
out = im.transpose(Transpose.FLIP_LEFT_RIGHT) out = im.transpose(FLIP_LEFT_RIGHT)
assert out.mode == mode assert out.mode == mode
assert out.size == im.size assert out.size == im.size
@ -28,7 +36,7 @@ def test_flip_left_right(mode):
@pytest.mark.parametrize("mode", HOPPER) @pytest.mark.parametrize("mode", HOPPER)
def test_flip_top_bottom(mode): def test_flip_top_bottom(mode):
im = HOPPER[mode] im = HOPPER[mode]
out = im.transpose(Transpose.FLIP_TOP_BOTTOM) out = im.transpose(FLIP_TOP_BOTTOM)
assert out.mode == mode assert out.mode == mode
assert out.size == im.size assert out.size == im.size
@ -42,7 +50,7 @@ def test_flip_top_bottom(mode):
@pytest.mark.parametrize("mode", HOPPER) @pytest.mark.parametrize("mode", HOPPER)
def test_rotate_90(mode): def test_rotate_90(mode):
im = HOPPER[mode] im = HOPPER[mode]
out = im.transpose(Transpose.ROTATE_90) out = im.transpose(ROTATE_90)
assert out.mode == mode assert out.mode == mode
assert out.size == im.size[::-1] assert out.size == im.size[::-1]
@ -56,7 +64,7 @@ def test_rotate_90(mode):
@pytest.mark.parametrize("mode", HOPPER) @pytest.mark.parametrize("mode", HOPPER)
def test_rotate_180(mode): def test_rotate_180(mode):
im = HOPPER[mode] im = HOPPER[mode]
out = im.transpose(Transpose.ROTATE_180) out = im.transpose(ROTATE_180)
assert out.mode == mode assert out.mode == mode
assert out.size == im.size assert out.size == im.size
@ -70,7 +78,7 @@ def test_rotate_180(mode):
@pytest.mark.parametrize("mode", HOPPER) @pytest.mark.parametrize("mode", HOPPER)
def test_rotate_270(mode): def test_rotate_270(mode):
im = HOPPER[mode] im = HOPPER[mode]
out = im.transpose(Transpose.ROTATE_270) out = im.transpose(ROTATE_270)
assert out.mode == mode assert out.mode == mode
assert out.size == im.size[::-1] assert out.size == im.size[::-1]
@ -84,7 +92,7 @@ def test_rotate_270(mode):
@pytest.mark.parametrize("mode", HOPPER) @pytest.mark.parametrize("mode", HOPPER)
def test_transpose(mode): def test_transpose(mode):
im = HOPPER[mode] im = HOPPER[mode]
out = im.transpose(Transpose.TRANSPOSE) out = im.transpose(TRANSPOSE)
assert out.mode == mode assert out.mode == mode
assert out.size == im.size[::-1] assert out.size == im.size[::-1]
@ -98,7 +106,7 @@ def test_transpose(mode):
@pytest.mark.parametrize("mode", HOPPER) @pytest.mark.parametrize("mode", HOPPER)
def test_tranverse(mode): def test_tranverse(mode):
im = HOPPER[mode] im = HOPPER[mode]
out = im.transpose(Transpose.TRANSVERSE) out = im.transpose(TRANSVERSE)
assert out.mode == mode assert out.mode == mode
assert out.size == im.size[::-1] assert out.size == im.size[::-1]
@ -116,31 +124,27 @@ def test_roundtrip(mode):
def transpose(first, second): def transpose(first, second):
return im.transpose(first).transpose(second) return im.transpose(first).transpose(second)
assert_image_equal(im, transpose(FLIP_LEFT_RIGHT, FLIP_LEFT_RIGHT))
assert_image_equal(im, transpose(FLIP_TOP_BOTTOM, FLIP_TOP_BOTTOM))
assert_image_equal(im, transpose(ROTATE_90, ROTATE_270))
assert_image_equal(im, transpose(ROTATE_180, ROTATE_180))
assert_image_equal( assert_image_equal(
im, transpose(Transpose.FLIP_LEFT_RIGHT, Transpose.FLIP_LEFT_RIGHT) im.transpose(TRANSPOSE),
transpose(ROTATE_90, FLIP_TOP_BOTTOM),
) )
assert_image_equal( assert_image_equal(
im, transpose(Transpose.FLIP_TOP_BOTTOM, Transpose.FLIP_TOP_BOTTOM) im.transpose(TRANSPOSE),
) transpose(ROTATE_270, FLIP_LEFT_RIGHT),
assert_image_equal(im, transpose(Transpose.ROTATE_90, Transpose.ROTATE_270))
assert_image_equal(im, transpose(Transpose.ROTATE_180, Transpose.ROTATE_180))
assert_image_equal(
im.transpose(Transpose.TRANSPOSE),
transpose(Transpose.ROTATE_90, Transpose.FLIP_TOP_BOTTOM),
) )
assert_image_equal( assert_image_equal(
im.transpose(Transpose.TRANSPOSE), im.transpose(TRANSVERSE),
transpose(Transpose.ROTATE_270, Transpose.FLIP_LEFT_RIGHT), transpose(ROTATE_90, FLIP_LEFT_RIGHT),
) )
assert_image_equal( assert_image_equal(
im.transpose(Transpose.TRANSVERSE), im.transpose(TRANSVERSE),
transpose(Transpose.ROTATE_90, Transpose.FLIP_LEFT_RIGHT), transpose(ROTATE_270, FLIP_TOP_BOTTOM),
) )
assert_image_equal( assert_image_equal(
im.transpose(Transpose.TRANSVERSE), im.transpose(TRANSVERSE),
transpose(Transpose.ROTATE_270, Transpose.FLIP_TOP_BOTTOM), transpose(ROTATE_180, TRANSPOSE),
)
assert_image_equal(
im.transpose(Transpose.TRANSVERSE),
transpose(Transpose.ROTATE_180, Transpose.TRANSPOSE),
) )

View File

@ -140,7 +140,7 @@ def test_intent():
skip_missing() skip_missing()
assert ImageCms.getDefaultIntent(SRGB) == 0 assert ImageCms.getDefaultIntent(SRGB) == 0
support = ImageCms.isIntentSupported( support = ImageCms.isIntentSupported(
SRGB, ImageCms.Intent.ABSOLUTE_COLORIMETRIC, ImageCms.Direction.INPUT SRGB, ImageCms.INTENT_ABSOLUTE_COLORIMETRIC, ImageCms.DIRECTION_INPUT
) )
assert support == 1 assert support == 1
@ -153,7 +153,7 @@ def test_profile_object():
# ["sRGB built-in", "", "WhitePoint : D65 (daylight)", "", ""] # ["sRGB built-in", "", "WhitePoint : D65 (daylight)", "", ""]
assert ImageCms.getDefaultIntent(p) == 0 assert ImageCms.getDefaultIntent(p) == 0
support = ImageCms.isIntentSupported( support = ImageCms.isIntentSupported(
p, ImageCms.Intent.ABSOLUTE_COLORIMETRIC, ImageCms.Direction.INPUT p, ImageCms.INTENT_ABSOLUTE_COLORIMETRIC, ImageCms.DIRECTION_INPUT
) )
assert support == 1 assert support == 1
@ -615,13 +615,3 @@ def test_auxiliary_channels_isolated():
) )
assert_image_equal(test_image.convert(dst_format[2]), reference_image) assert_image_equal(test_image.convert(dst_format[2]), reference_image)
def test_constants_deprecation():
for enum, prefix in {
ImageCms.Intent: "INTENT_",
ImageCms.Direction: "DIRECTION_",
}.items():
for name in enum.__members__:
with pytest.warns(DeprecationWarning):
assert getattr(ImageCms, prefix + name) == enum[name]

View File

@ -175,7 +175,7 @@ def test_bitmap():
im = Image.new("RGB", (W, H)) im = Image.new("RGB", (W, H))
draw = ImageDraw.Draw(im) draw = ImageDraw.Draw(im)
with Image.open("Tests/images/pil123rgba.png") as small: with Image.open("Tests/images/pil123rgba.png") as small:
small = small.resize((50, 50), Image.Resampling.NEAREST) small = small.resize((50, 50), Image.NEAREST)
# Act # Act
draw.bitmap((10, 10), small) draw.bitmap((10, 10), small)
@ -295,7 +295,7 @@ def test_ellipse_symmetric():
im = Image.new("RGB", (width, 100)) im = Image.new("RGB", (width, 100))
draw = ImageDraw.Draw(im) draw = ImageDraw.Draw(im)
draw.ellipse(bbox, fill="green", outline="blue") draw.ellipse(bbox, fill="green", outline="blue")
assert_image_equal(im, im.transpose(Image.Transpose.FLIP_LEFT_RIGHT)) assert_image_equal(im, im.transpose(Image.FLIP_LEFT_RIGHT))
def test_ellipse_width(): def test_ellipse_width():

View File

@ -31,7 +31,7 @@ class TestImageFile:
def test_parser(self): def test_parser(self):
def roundtrip(format): def roundtrip(format):
im = hopper("L").resize((1000, 1000), Image.Resampling.NEAREST) im = hopper("L").resize((1000, 1000), Image.NEAREST)
if format in ("MSP", "XBM"): if format in ("MSP", "XBM"):
im = im.convert("1") im = im.convert("1")

View File

@ -35,8 +35,8 @@ def test_sanity():
@pytest.fixture( @pytest.fixture(
scope="module", scope="module",
params=[ params=[
pytest.param(ImageFont.Layout.BASIC), pytest.param(ImageFont.LAYOUT_BASIC),
pytest.param(ImageFont.Layout.RAQM, marks=skip_unless_feature("raqm")), pytest.param(ImageFont.LAYOUT_RAQM, marks=skip_unless_feature("raqm")),
], ],
) )
def layout_engine(request): def layout_engine(request):
@ -182,7 +182,7 @@ def test_getlength(
im = Image.new(mode, (1, 1), 0) im = Image.new(mode, (1, 1), 0)
d = ImageDraw.Draw(im) d = ImageDraw.Draw(im)
if layout_engine == ImageFont.Layout.BASIC: if layout_engine == ImageFont.LAYOUT_BASIC:
length = d.textlength(text, f) length = d.textlength(text, f)
assert length == length_basic assert length == length_basic
else: else:
@ -314,9 +314,7 @@ def test_multiline_spacing(font):
assert_image_similar_tofile(im, "Tests/images/multiline_text_spacing.png", 2.5) assert_image_similar_tofile(im, "Tests/images/multiline_text_spacing.png", 2.5)
@pytest.mark.parametrize( @pytest.mark.parametrize("orientation", (Image.ROTATE_90, Image.ROTATE_270))
"orientation", (Image.Transpose.ROTATE_90, Image.Transpose.ROTATE_270)
)
def test_rotated_transposed_font(font, orientation): def test_rotated_transposed_font(font, orientation):
img_grey = Image.new("L", (100, 100)) img_grey = Image.new("L", (100, 100))
draw = ImageDraw.Draw(img_grey) draw = ImageDraw.Draw(img_grey)
@ -358,9 +356,9 @@ def test_rotated_transposed_font(font, orientation):
"orientation", "orientation",
( (
None, None,
Image.Transpose.ROTATE_180, Image.ROTATE_180,
Image.Transpose.FLIP_LEFT_RIGHT, Image.FLIP_LEFT_RIGHT,
Image.Transpose.FLIP_TOP_BOTTOM, Image.FLIP_TOP_BOTTOM,
), ),
) )
def test_unrotated_transposed_font(font, orientation): def test_unrotated_transposed_font(font, orientation):
@ -398,9 +396,7 @@ def test_unrotated_transposed_font(font, orientation):
assert length_a == length_b assert length_a == length_b
@pytest.mark.parametrize( @pytest.mark.parametrize("orientation", (Image.ROTATE_90, Image.ROTATE_270))
"orientation", (Image.Transpose.ROTATE_90, Image.Transpose.ROTATE_270)
)
def test_rotated_transposed_font_get_mask(font, orientation): def test_rotated_transposed_font_get_mask(font, orientation):
# Arrange # Arrange
text = "mask this" text = "mask this"
@ -417,9 +413,9 @@ def test_rotated_transposed_font_get_mask(font, orientation):
"orientation", "orientation",
( (
None, None,
Image.Transpose.ROTATE_180, Image.ROTATE_180,
Image.Transpose.FLIP_LEFT_RIGHT, Image.FLIP_LEFT_RIGHT,
Image.Transpose.FLIP_TOP_BOTTOM, Image.FLIP_TOP_BOTTOM,
), ),
) )
def test_unrotated_transposed_font_get_mask(font, orientation): def test_unrotated_transposed_font_get_mask(font, orientation):
@ -653,7 +649,7 @@ def test_getsize_stroke(font, stroke_width):
def test_complex_font_settings(): def test_complex_font_settings():
t = ImageFont.truetype(FONT_PATH, FONT_SIZE, layout_engine=ImageFont.Layout.BASIC) t = ImageFont.truetype(FONT_PATH, FONT_SIZE, layout_engine=ImageFont.LAYOUT_BASIC)
with pytest.raises(KeyError): with pytest.raises(KeyError):
t.getmask("абвг", direction="rtl") t.getmask("абвг", direction="rtl")
with pytest.raises(KeyError): with pytest.raises(KeyError):
@ -805,7 +801,7 @@ def test_anchor(layout_engine, anchor, left, top):
name, text = "quick", "Quick" name, text = "quick", "Quick"
path = f"Tests/images/test_anchor_{name}_{anchor}.png" path = f"Tests/images/test_anchor_{name}_{anchor}.png"
if layout_engine == ImageFont.Layout.RAQM: if layout_engine == ImageFont.LAYOUT_RAQM:
width, height = (129, 44) width, height = (129, 44)
else: else:
width, height = (128, 44) width, height = (128, 44)
@ -953,7 +949,7 @@ def test_float_coord(layout_engine, fontmode):
try: try:
assert_image_similar_tofile(im, "Tests/images/text_float_coord.png", 3.9) assert_image_similar_tofile(im, "Tests/images/text_float_coord.png", 3.9)
except AssertionError: except AssertionError:
if fontmode == "1" and layout_engine == ImageFont.Layout.BASIC: if fontmode == "1" and layout_engine == ImageFont.LAYOUT_BASIC:
assert_image_similar_tofile( assert_image_similar_tofile(
im, "Tests/images/text_float_coord_1_alt.png", 1 im, "Tests/images/text_float_coord_1_alt.png", 1
) )
@ -1079,7 +1075,7 @@ def test_render_mono_size():
ttf = ImageFont.truetype( ttf = ImageFont.truetype(
"Tests/fonts/DejaVuSans/DejaVuSans.ttf", "Tests/fonts/DejaVuSans/DejaVuSans.ttf",
18, 18,
layout_engine=ImageFont.Layout.BASIC, layout_engine=ImageFont.LAYOUT_BASIC,
) )
draw.text((10, 10), "r" * 10, "black", ttf) draw.text((10, 10), "r" * 10, "black", ttf)
@ -1103,19 +1099,10 @@ def test_raqm_missing_warning(monkeypatch):
monkeypatch.setattr(ImageFont.core, "HAVE_RAQM", False) monkeypatch.setattr(ImageFont.core, "HAVE_RAQM", False)
with pytest.warns(UserWarning) as record: with pytest.warns(UserWarning) as record:
font = ImageFont.truetype( font = ImageFont.truetype(
FONT_PATH, FONT_SIZE, layout_engine=ImageFont.Layout.RAQM FONT_PATH, FONT_SIZE, layout_engine=ImageFont.LAYOUT_RAQM
) )
assert font.layout_engine == ImageFont.Layout.BASIC assert font.layout_engine == ImageFont.LAYOUT_BASIC
assert str(record[-1].message) == ( assert str(record[-1].message) == (
"Raqm layout was requested, but Raqm is not available. " "Raqm layout was requested, but Raqm is not available. "
"Falling back to basic layout." "Falling back to basic layout."
) )
def test_constants_deprecation():
for enum, prefix in {
ImageFont.Layout: "LAYOUT_",
}.items():
for name in enum.__members__:
with pytest.warns(DeprecationWarning):
assert getattr(ImageFont, prefix + name) == enum[name]

View File

@ -35,7 +35,7 @@ def test_basic(tmp_path, mode):
im_out = im_in.copy() im_out = im_in.copy()
verify(im_out) # copy verify(im_out) # copy
im_out = im_in.transform((w, h), Image.Transform.EXTENT, (0, 0, w, h)) im_out = im_in.transform((w, h), Image.EXTENT, (0, 0, w, h))
verify(im_out) # transform verify(im_out) # transform
filename = str(tmp_path / "temp.im") filename = str(tmp_path / "temp.im")

View File

@ -66,73 +66,6 @@ In effect, ``viewer.show_file("test.jpg")`` will continue to work unchanged.
``viewer.show_file(file="test.jpg")`` will raise a deprecation warning, and suggest ``viewer.show_file(file="test.jpg")`` will raise a deprecation warning, and suggest
``viewer.show_file(path="test.jpg")`` instead. ``viewer.show_file(path="test.jpg")`` instead.
Constants
~~~~~~~~~
.. deprecated:: 9.1.0
A number of constants have been deprecated and will be removed in Pillow 10.0.0
(2023-07-01). Instead, ``enum.IntEnum`` classes have been added.
===================================================== ============================================================
Deprecated Use instead
===================================================== ============================================================
``Image.NONE`` Either ``Image.Dither.NONE`` or ``Image.Resampling.NEAREST``
``Image.NEAREST`` Either ``Image.Dither.NONE`` or ``Image.Resampling.NEAREST``
``Image.ORDERED`` ``Image.Dither.ORDERED``
``Image.RASTERIZE`` ``Image.Dither.RASTERIZE``
``Image.FLOYDSTEINBERG`` ``Image.Dither.FLOYDSTEINBERG``
``Image.WEB`` ``Image.Palette.WEB``
``Image.ADAPTIVE`` ``Image.Palette.ADAPTIVE``
``Image.AFFINE`` ``Image.Transform.AFFINE``
``Image.EXTENT`` ``Image.Transform.EXTENT``
``Image.PERSPECTIVE`` ``Image.Transform.PERSPECTIVE``
``Image.QUAD`` ``Image.Transform.QUAD``
``Image.MESH`` ``Image.Transform.MESH``
``Image.FLIP_LEFT_RIGHT`` ``Image.Transpose.FLIP_LEFT_RIGHT``
``Image.FLIP_TOP_BOTTOM`` ``Image.Transpose.FLIP_TOP_BOTTOM``
``Image.ROTATE_90`` ``Image.Transpose.ROTATE_90``
``Image.ROTATE_180`` ``Image.Transpose.ROTATE_180``
``Image.ROTATE_270`` ``Image.Transpose.ROTATE_270``
``Image.TRANSPOSE`` ``Image.Transpose.TRANSPOSE``
``Image.TRANSVERSE`` ``Image.Transpose.TRANSVERSE``
``Image.BOX`` ``Image.Resampling.BOX``
``Image.BILINEAR`` ``Image.Resampling.BILINEAR``
``Image.LINEAR`` ``Image.Resampling.BILINEAR``
``Image.HAMMING`` ``Image.Resampling.HAMMING``
``Image.BICUBIC`` ``Image.Resampling.BICUBIC``
``Image.CUBIC`` ``Image.Resampling.BICUBIC``
``Image.LANCZOS`` ``Image.Resampling.LANCZOS``
``Image.ANTIALIAS`` ``Image.Resampling.LANCZOS``
``Image.MEDIANCUT`` ``Image.Quantize.MEDIANCUT``
``Image.MAXCOVERAGE`` ``Image.Quantize.MAXCOVERAGE``
``Image.FASTOCTREE`` ``Image.Quantize.FASTOCTREE``
``Image.LIBIMAGEQUANT`` ``Image.Quantize.LIBIMAGEQUANT``
``ImageCms.INTENT_PERCEPTUAL`` ``ImageCms.Intent.PERCEPTUAL``
``ImageCms.INTENT_RELATIVE_COLORMETRIC`` ``ImageCms.Intent.RELATIVE_COLORMETRIC``
``ImageCms.INTENT_SATURATION`` ``ImageCms.Intent.SATURATION``
``ImageCms.INTENT_ABSOLUTE_COLORIMETRIC`` ``ImageCms.Intent.ABSOLUTE_COLORIMETRIC``
``ImageCms.DIRECTION_INPUT`` ``ImageCms.Direction.INPUT``
``ImageCms.DIRECTION_OUTPUT`` ``ImageCms.Direction.OUTPUT``
``ImageCms.DIRECTION_PROOF`` ``ImageCms.Direction.PROOF``
``ImageFont.LAYOUT_BASIC`` ``ImageFont.Layout.BASIC``
``ImageFont.LAYOUT_RAQM`` ``ImageFont.Layout.RAQM``
``BlpImagePlugin.BLP_FORMAT_JPEG`` ``BlpImagePlugin.Format.JPEG``
``BlpImagePlugin.BLP_ENCODING_UNCOMPRESSED`` ``BlpImagePlugin.Encoding.UNCOMPRESSED``
``BlpImagePlugin.BLP_ENCODING_DXT`` ``BlpImagePlugin.Encoding.DXT``
``BlpImagePlugin.BLP_ENCODING_UNCOMPRESSED_RAW_RGBA`` ``BlpImagePlugin.Encoding.UNCOMPRESSED_RAW_RGBA``
``BlpImagePlugin.BLP_ALPHA_ENCODING_DXT1`` ``BlpImagePlugin.AlphaEncoding.DXT1``
``BlpImagePlugin.BLP_ALPHA_ENCODING_DXT3`` ``BlpImagePlugin.AlphaEncoding.DXT3``
``BlpImagePlugin.BLP_ALPHA_ENCODING_DXT5`` ``BlpImagePlugin.AlphaEncoding.DXT5``
``FtexImagePlugin.FORMAT_DXT1`` ``FtexImagePlugin.Format.DXT1``
``FtexImagePlugin.FORMAT_UNCOMPRESSED`` ``FtexImagePlugin.Format.UNCOMPRESSED``
``PngImagePlugin.APNG_DISPOSE_OP_NONE`` ``PngImagePlugin.Disposal.OP_NONE``
``PngImagePlugin.APNG_DISPOSE_OP_BACKGROUND`` ``PngImagePlugin.Disposal.OP_BACKGROUND``
``PngImagePlugin.APNG_DISPOSE_OP_PREVIOUS`` ``PngImagePlugin.Disposal.OP_PREVIOUS``
``PngImagePlugin.APNG_BLEND_OP_SOURCE`` ``PngImagePlugin.Blend.OP_SOURCE``
``PngImagePlugin.APNG_BLEND_OP_OVER`` ``PngImagePlugin.Blend.OP_OVER``
===================================================== ============================================================
FitsStubImagePlugin FitsStubImagePlugin
~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~

View File

@ -803,12 +803,12 @@ parameter must be set to ``True``. The following parameters can also be set:
operation to be used for this frame before rendering the next frame. operation to be used for this frame before rendering the next frame.
Defaults to 0. Defaults to 0.
* 0 (:py:data:`~PIL.PngImagePlugin.Disposal.OP_NONE`, default) - * 0 (:py:data:`~PIL.PngImagePlugin.APNG_DISPOSE_OP_NONE`, default) -
No disposal is done on this frame before rendering the next frame. No disposal is done on this frame before rendering the next frame.
* 1 (:py:data:`PIL.PngImagePlugin.Disposal.OP_BACKGROUND`) - * 1 (:py:data:`PIL.PngImagePlugin.APNG_DISPOSE_OP_BACKGROUND`) -
This frame's modified region is cleared to fully transparent black before This frame's modified region is cleared to fully transparent black before
rendering the next frame. rendering the next frame.
* 2 (:py:data:`~PIL.PngImagePlugin.Disposal.OP_PREVIOUS`) - * 2 (:py:data:`~PIL.PngImagePlugin.APNG_DISPOSE_OP_PREVIOUS`) -
This frame's modified region is reverted to the previous frame's contents before This frame's modified region is reverted to the previous frame's contents before
rendering the next frame. rendering the next frame.
@ -817,10 +817,10 @@ parameter must be set to ``True``. The following parameters can also be set:
operation to be used for this frame before rendering the next frame. operation to be used for this frame before rendering the next frame.
Defaults to 0. Defaults to 0.
* 0 (:py:data:`~PIL.PngImagePlugin.Blend.OP_SOURCE`) - * 0 (:py:data:`~PIL.PngImagePlugin.APNG_BLEND_OP_SOURCE`) -
All color components of this frame, including alpha, overwrite the previous output All color components of this frame, including alpha, overwrite the previous output
image contents. image contents.
* 1 (:py:data:`~PIL.PngImagePlugin.Blend.OP_OVER`) - * 1 (:py:data:`~PIL.PngImagePlugin.APNG_BLEND_OP_OVER`) -
This frame should be alpha composited with the previous output image contents. This frame should be alpha composited with the previous output image contents.
.. note:: .. note::

View File

@ -155,7 +155,7 @@ Processing a subrectangle, and pasting it back
:: ::
region = region.transpose(Image.Transpose.ROTATE_180) region = region.transpose(Image.ROTATE_180)
im.paste(region, box) im.paste(region, box)
When pasting regions back, the size of the region must match the given region When pasting regions back, the size of the region must match the given region
@ -255,11 +255,11 @@ Transposing an image
:: ::
out = im.transpose(Image.Transpose.FLIP_LEFT_RIGHT) out = im.transpose(Image.FLIP_LEFT_RIGHT)
out = im.transpose(Image.Transpose.FLIP_TOP_BOTTOM) out = im.transpose(Image.FLIP_TOP_BOTTOM)
out = im.transpose(Image.Transpose.ROTATE_90) out = im.transpose(Image.ROTATE_90)
out = im.transpose(Image.Transpose.ROTATE_180) out = im.transpose(Image.ROTATE_180)
out = im.transpose(Image.Transpose.ROTATE_270) out = im.transpose(Image.ROTATE_270)
``transpose(ROTATE)`` operations can also be performed identically with ``transpose(ROTATE)`` operations can also be performed identically with
:py:meth:`~PIL.Image.Image.rotate` operations, provided the ``expand`` flag is :py:meth:`~PIL.Image.Image.rotate` operations, provided the ``expand`` flag is

View File

@ -255,7 +255,7 @@ This rotates the input image by ``theta`` degrees counter clockwise:
.. automethod:: PIL.Image.Image.transform .. automethod:: PIL.Image.Image.transform
.. automethod:: PIL.Image.Image.transpose .. automethod:: PIL.Image.Image.transpose
This flips the input image by using the :data:`Transpose.FLIP_LEFT_RIGHT` This flips the input image by using the :data:`FLIP_LEFT_RIGHT`
method. method.
.. code-block:: python .. code-block:: python
@ -265,9 +265,9 @@ method.
with Image.open("hopper.jpg") as im: with Image.open("hopper.jpg") as im:
# Flip the image from left to right # Flip the image from left to right
im_flipped = im.transpose(method=Image.Transpose.FLIP_LEFT_RIGHT) im_flipped = im.transpose(method=Image.FLIP_LEFT_RIGHT)
# To flip the image from top to bottom, # To flip the image from top to bottom,
# use the method "Image.Transpose.FLIP_TOP_BOTTOM" # use the method "Image.FLIP_TOP_BOTTOM"
.. automethod:: PIL.Image.Image.verify .. automethod:: PIL.Image.Image.verify
@ -391,57 +391,63 @@ Transpose methods
Used to specify the :meth:`Image.transpose` method to use. Used to specify the :meth:`Image.transpose` method to use.
.. autoclass:: Transpose .. data:: FLIP_LEFT_RIGHT
:members: .. data:: FLIP_TOP_BOTTOM
:undoc-members: .. data:: ROTATE_90
.. data:: ROTATE_180
.. data:: ROTATE_270
.. data:: TRANSPOSE
.. data:: TRANSVERSE
Transform methods Transform methods
^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^
Used to specify the :meth:`Image.transform` method to use. Used to specify the :meth:`Image.transform` method to use.
.. py:class:: Transform .. data:: AFFINE
Affine transform
.. py:attribute:: AFFINE .. data:: EXTENT
Cut out a rectangular subregion
Affine transform .. data:: PERSPECTIVE
Perspective transform
.. py:attribute:: EXTENT .. data:: QUAD
Map a quadrilateral to a rectangle
Cut out a rectangular subregion .. data:: MESH
Map a number of source quadrilaterals in one operation
.. py:attribute:: PERSPECTIVE
Perspective transform
.. py:attribute:: QUAD
Map a quadrilateral to a rectangle
.. py:attribute:: MESH
Map a number of source quadrilaterals in one operation
Resampling filters Resampling filters
^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^
See :ref:`concept-filters` for details. See :ref:`concept-filters` for details.
.. autoclass:: Resampling .. data:: NEAREST
:members: :noindex:
:undoc-members: .. data:: BOX
:noindex:
.. data:: BILINEAR
:noindex:
.. data:: HAMMING
:noindex:
.. data:: BICUBIC
:noindex:
.. data:: LANCZOS
:noindex:
Some deprecated filters are also available under the following names: Some filters are also available under the following names for backwards compatibility:
.. data:: NONE .. data:: NONE
:noindex: :noindex:
:value: Resampling.NEAREST :value: NEAREST
.. data:: LINEAR .. data:: LINEAR
:value: Resampling.BILINEAR :value: BILINEAR
.. data:: CUBIC .. data:: CUBIC
:value: Resampling.BICUBIC :value: BICUBIC
.. data:: ANTIALIAS .. data:: ANTIALIAS
:value: Resampling.LANCZOS :value: LANCZOS
Dither modes Dither modes
^^^^^^^^^^^^ ^^^^^^^^^^^^
@ -449,56 +455,42 @@ Dither modes
Used to specify the dithering method to use for the Used to specify the dithering method to use for the
:meth:`~Image.convert` and :meth:`~Image.quantize` methods. :meth:`~Image.convert` and :meth:`~Image.quantize` methods.
.. py:class:: Dither .. data:: NONE
:noindex:
.. py:attribute:: NONE No dither
.. comment: (not implemented)
.. data:: ORDERED
.. data:: RASTERIZE
No dither .. data:: FLOYDSTEINBERG
Floyd-Steinberg dither
.. py:attribute:: ORDERED
Not implemented
.. py:attribute:: RASTERIZE
Not implemented
.. py:attribute:: FLOYDSTEINBERG
Floyd-Steinberg dither
Palettes Palettes
^^^^^^^^ ^^^^^^^^
Used to specify the pallete to use for the :meth:`~Image.convert` method. Used to specify the pallete to use for the :meth:`~Image.convert` method.
.. autoclass:: Palette .. data:: WEB
:members: .. data:: ADAPTIVE
:undoc-members:
Quantization methods Quantization methods
^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^
Used to specify the quantization method to use for the :meth:`~Image.quantize` method. Used to specify the quantization method to use for the :meth:`~Image.quantize` method.
.. py:class:: Quantize .. data:: MEDIANCUT
Median cut. Default method, except for RGBA images. This method does not support
RGBA images.
.. py:attribute:: MEDIANCUT .. data:: MAXCOVERAGE
Maximum coverage. This method does not support RGBA images.
Median cut. Default method, except for RGBA images. This method does not support .. data:: FASTOCTREE
RGBA images. Fast octree. Default method for RGBA images.
.. py:attribute:: MAXCOVERAGE .. data:: LIBIMAGEQUANT
libimagequant
Maximum coverage. This method does not support RGBA images. Check support using :py:func:`PIL.features.check_feature`
with ``feature="libimagequant"``.
.. py:attribute:: FASTOCTREE
Fast octree. Default method for RGBA images.
.. py:attribute:: LIBIMAGEQUANT
libimagequant
Check support using :py:func:`PIL.features.check_feature` with
``feature="libimagequant"``.

View File

@ -118,8 +118,8 @@ can be easily displayed in a chromaticity diagram, for example).
another profile (usually overridden at run-time, but provided here another profile (usually overridden at run-time, but provided here
for DeviceLink and embedded source profiles, see 7.2.15 of ICC.1:2010). for DeviceLink and embedded source profiles, see 7.2.15 of ICC.1:2010).
One of ``ImageCms.Intent.ABSOLUTE_COLORIMETRIC``, ``ImageCms.Intent.PERCEPTUAL``, One of ``ImageCms.INTENT_ABSOLUTE_COLORIMETRIC``, ``ImageCms.INTENT_PERCEPTUAL``,
``ImageCms.Intent.RELATIVE_COLORIMETRIC`` and ``ImageCms.Intent.SATURATION``. ``ImageCms.INTENT_RELATIVE_COLORIMETRIC`` and ``ImageCms.INTENT_SATURATION``.
.. py:attribute:: profile_id .. py:attribute:: profile_id
:type: bytes :type: bytes
@ -313,14 +313,14 @@ can be easily displayed in a chromaticity diagram, for example).
the CLUT model. the CLUT model.
The dictionary is indexed by intents The dictionary is indexed by intents
(``ImageCms.Intent.ABSOLUTE_COLORIMETRIC``, (``ImageCms.INTENT_ABSOLUTE_COLORIMETRIC``,
``ImageCms.Intent.PERCEPTUAL``, ``ImageCms.INTENT_PERCEPTUAL``,
``ImageCms.Intent.RELATIVE_COLORIMETRIC`` and ``ImageCms.INTENT_RELATIVE_COLORIMETRIC`` and
``ImageCms.Intent.SATURATION``). ``ImageCms.INTENT_SATURATION``).
The values are 3-tuples indexed by directions The values are 3-tuples indexed by directions
(``ImageCms.Direction.INPUT``, ``ImageCms.Direction.OUTPUT``, (``ImageCms.DIRECTION_INPUT``, ``ImageCms.DIRECTION_OUTPUT``,
``ImageCms.Direction.PROOF``). ``ImageCms.DIRECTION_PROOF``).
The elements of the tuple are booleans. If the value is ``True``, The elements of the tuple are booleans. If the value is ``True``,
that intent is supported for that direction. that intent is supported for that direction.
@ -331,14 +331,14 @@ can be easily displayed in a chromaticity diagram, for example).
Returns a dictionary of all supported intents and directions. Returns a dictionary of all supported intents and directions.
The dictionary is indexed by intents The dictionary is indexed by intents
(``ImageCms.Intent.ABSOLUTE_COLORIMETRIC``, (``ImageCms.INTENT_ABSOLUTE_COLORIMETRIC``,
``ImageCms.Intent.PERCEPTUAL``, ``ImageCms.INTENT_PERCEPTUAL``,
``ImageCms.Intent.RELATIVE_COLORIMETRIC`` and ``ImageCms.INTENT_RELATIVE_COLORIMETRIC`` and
``ImageCms.Intent.SATURATION``). ``ImageCms.INTENT_SATURATION``).
The values are 3-tuples indexed by directions The values are 3-tuples indexed by directions
(``ImageCms.Direction.INPUT``, ``ImageCms.Direction.OUTPUT``, (``ImageCms.DIRECTION_INPUT``, ``ImageCms.DIRECTION_OUTPUT``,
``ImageCms.Direction.PROOF``). ``ImageCms.DIRECTION_PROOF``).
The elements of the tuple are booleans. If the value is ``True``, The elements of the tuple are booleans. If the value is ``True``,
that intent is supported for that direction. that intent is supported for that direction.
@ -352,11 +352,11 @@ can be easily displayed in a chromaticity diagram, for example).
Note that you can also get this information for all intents and directions Note that you can also get this information for all intents and directions
with :py:attr:`.intent_supported`. with :py:attr:`.intent_supported`.
:param intent: One of ``ImageCms.Intent.ABSOLUTE_COLORIMETRIC``, :param intent: One of ``ImageCms.INTENT_ABSOLUTE_COLORIMETRIC``,
``ImageCms.Intent.PERCEPTUAL``, ``ImageCms.INTENT_PERCEPTUAL``,
``ImageCms.Intent.RELATIVE_COLORIMETRIC`` ``ImageCms.INTENT_RELATIVE_COLORIMETRIC``
and ``ImageCms.Intent.SATURATION``. and ``ImageCms.INTENT_SATURATION``.
:param direction: One of ``ImageCms.Direction.INPUT``, :param direction: One of ``ImageCms.DIRECTION_INPUT``,
``ImageCms.Direction.OUTPUT`` ``ImageCms.DIRECTION_OUTPUT``
and ``ImageCms.Direction.PROOF`` and ``ImageCms.DIRECTION_PROOF``
:return: Boolean if the intent and direction is supported. :return: Boolean if the intent and direction is supported.

View File

@ -61,12 +61,12 @@ Methods
Constants Constants
--------- ---------
.. data:: PIL.ImageFont.Layout.BASIC .. data:: PIL.ImageFont.LAYOUT_BASIC
Use basic text layout for TrueType font. Use basic text layout for TrueType font.
Advanced features such as text direction are not supported. Advanced features such as text direction are not supported.
.. data:: PIL.ImageFont.Layout.RAQM .. data:: PIL.ImageFont.LAYOUT_RAQM
Use Raqm text layout for TrueType font. Use Raqm text layout for TrueType font.
Advanced features are supported. Advanced features are supported.

View File

@ -57,7 +57,7 @@ Support for the following features can be checked:
* ``transp_webp``: Support for transparency in WebP images. * ``transp_webp``: Support for transparency in WebP images.
* ``webp_mux``: (compile time) Support for EXIF data in WebP images. * ``webp_mux``: (compile time) Support for EXIF data in WebP images.
* ``webp_anim``: (compile time) Support for animated WebP images. * ``webp_anim``: (compile time) Support for animated WebP images.
* ``raqm``: Raqm library, required for ``ImageFont.Layout.RAQM`` in :py:func:`PIL.ImageFont.truetype`. Run-time version number is available for Raqm 0.7.0 or newer. * ``raqm``: Raqm library, required for ``ImageFont.LAYOUT_RAQM`` in :py:func:`PIL.ImageFont.truetype`. Run-time version number is available for Raqm 0.7.0 or newer.
* ``libimagequant``: (compile time) ImageQuant quantization support in :py:func:`PIL.Image.Image.quantize`. Run-time version number is available. * ``libimagequant``: (compile time) ImageQuant quantization support in :py:func:`PIL.Image.Image.quantize`. Run-time version number is available.
* ``xcb``: (compile time) Support for X11 in :py:func:`PIL.ImageGrab.grab` via the XCB library. * ``xcb``: (compile time) Support for X11 in :py:func:`PIL.ImageGrab.grab` via the XCB library.

View File

@ -230,7 +230,8 @@ Plugin reference
.. automodule:: PIL.PngImagePlugin .. automodule:: PIL.PngImagePlugin
:members: ChunkStream, PngImageFile, PngStream, getchunks, is_cid, putchunk, :members: ChunkStream, PngImageFile, PngStream, getchunks, is_cid, putchunk,
Blend, Disposal, MAX_TEXT_CHUNK, MAX_TEXT_MEMORY MAX_TEXT_CHUNK, MAX_TEXT_MEMORY, APNG_BLEND_OP_SOURCE, APNG_BLEND_OP_OVER,
APNG_DISPOSE_OP_NONE, APNG_DISPOSE_OP_BACKGROUND, APNG_DISPOSE_OP_PREVIOUS
:undoc-members: :undoc-members:
:show-inheritance: :show-inheritance:
:member-order: groupwise :member-order: groupwise

View File

@ -111,14 +111,16 @@ downscaling with libjpeg, which uses supersampling internally, not convolutions.
Image transposition Image transposition
------------------- -------------------
A new method ``TRANSPOSE`` has been added for the A new method :py:data:`PIL.Image.TRANSPOSE` has been added for the
:py:meth:`~PIL.Image.Image.transpose` operation in addition to :py:meth:`~PIL.Image.Image.transpose` operation in addition to
``FLIP_LEFT_RIGHT``, ``FLIP_TOP_BOTTOM``, ``ROTATE_90``, ``ROTATE_180``, :py:data:`~PIL.Image.FLIP_LEFT_RIGHT`, :py:data:`~PIL.Image.FLIP_TOP_BOTTOM`,
``ROTATE_270``. ``TRANSPOSE`` is an algebra transpose, with an image reflected :py:data:`~PIL.Image.ROTATE_90`, :py:data:`~PIL.Image.ROTATE_180`,
across its main diagonal. :py:data:`~PIL.Image.ROTATE_270`. :py:data:`~PIL.Image.TRANSPOSE` is an algebra
transpose, with an image reflected across its main diagonal.
The speed of ``ROTATE_90``, ``ROTATE_270`` and ``TRANSPOSE`` has been significantly The speed of :py:data:`~PIL.Image.ROTATE_90`, :py:data:`~PIL.Image.ROTATE_270`
improved for large images which don't fit in the processor cache. and :py:data:`~PIL.Image.TRANSPOSE` has been significantly improved for large
images which don't fit in the processor cache.
Gaussian blur and unsharp mask Gaussian blur and unsharp mask
------------------------------ ------------------------------

View File

@ -47,71 +47,6 @@ command for installing its contents.
Deprecations Deprecations
============ ============
Constants
^^^^^^^^^
A number of constants have been deprecated and will be removed in Pillow 10.0.0
(2023-07-01). Instead, ``enum.IntEnum`` classes have been added.
===================================================== ============================================================
Deprecated Use instead
===================================================== ============================================================
``Image.NONE`` Either ``Image.Dither.NONE`` or ``Image.Resampling.NEAREST``
``Image.NEAREST`` Either ``Image.Dither.NONE`` or ``Image.Resampling.NEAREST``
``Image.ORDERED`` ``Image.Dither.ORDERED``
``Image.RASTERIZE`` ``Image.Dither.RASTERIZE``
``Image.FLOYDSTEINBERG`` ``Image.Dither.FLOYDSTEINBERG``
``Image.WEB`` ``Image.Palette.WEB``
``Image.ADAPTIVE`` ``Image.Palette.ADAPTIVE``
``Image.AFFINE`` ``Image.Transform.AFFINE``
``Image.EXTENT`` ``Image.Transform.EXTENT``
``Image.PERSPECTIVE`` ``Image.Transform.PERSPECTIVE``
``Image.QUAD`` ``Image.Transform.QUAD``
``Image.MESH`` ``Image.Transform.MESH``
``Image.FLIP_LEFT_RIGHT`` ``Image.Transpose.FLIP_LEFT_RIGHT``
``Image.FLIP_TOP_BOTTOM`` ``Image.Transpose.FLIP_TOP_BOTTOM``
``Image.ROTATE_90`` ``Image.Transpose.ROTATE_90``
``Image.ROTATE_180`` ``Image.Transpose.ROTATE_180``
``Image.ROTATE_270`` ``Image.Transpose.ROTATE_270``
``Image.TRANSPOSE`` ``Image.Transpose.TRANSPOSE``
``Image.TRANSVERSE`` ``Image.Transpose.TRANSVERSE``
``Image.BOX`` ``Image.Resampling.BOX``
``Image.BILINEAR`` ``Image.Resampling.BILINEAR``
``Image.LINEAR`` ``Image.Resampling.BILINEAR``
``Image.HAMMING`` ``Image.Resampling.HAMMING``
``Image.BICUBIC`` ``Image.Resampling.BICUBIC``
``Image.CUBIC`` ``Image.Resampling.BICUBIC``
``Image.LANCZOS`` ``Image.Resampling.LANCZOS``
``Image.ANTIALIAS`` ``Image.Resampling.LANCZOS``
``Image.MEDIANCUT`` ``Image.Quantize.MEDIANCUT``
``Image.MAXCOVERAGE`` ``Image.Quantize.MAXCOVERAGE``
``Image.FASTOCTREE`` ``Image.Quantize.FASTOCTREE``
``Image.LIBIMAGEQUANT`` ``Image.Quantize.LIBIMAGEQUANT``
``ImageCms.INTENT_PERCEPTUAL`` ``ImageCms.Intent.PERCEPTUAL``
``ImageCms.INTENT_RELATIVE_COLORMETRIC`` ``ImageCms.Intent.RELATIVE_COLORMETRIC``
``ImageCms.INTENT_SATURATION`` ``ImageCms.Intent.SATURATION``
``ImageCms.INTENT_ABSOLUTE_COLORIMETRIC`` ``ImageCms.Intent.ABSOLUTE_COLORIMETRIC``
``ImageCms.DIRECTION_INPUT`` ``ImageCms.Direction.INPUT``
``ImageCms.DIRECTION_OUTPUT`` ``ImageCms.Direction.OUTPUT``
``ImageCms.DIRECTION_PROOF`` ``ImageCms.Direction.PROOF``
``ImageFont.LAYOUT_BASIC`` ``ImageFont.Layout.BASIC``
``ImageFont.LAYOUT_RAQM`` ``ImageFont.Layout.RAQM``
``BlpImagePlugin.BLP_FORMAT_JPEG`` ``BlpImagePlugin.Format.JPEG``
``BlpImagePlugin.BLP_ENCODING_UNCOMPRESSED`` ``BlpImagePlugin.Encoding.UNCOMPRESSED``
``BlpImagePlugin.BLP_ENCODING_DXT`` ``BlpImagePlugin.Encoding.DXT``
``BlpImagePlugin.BLP_ENCODING_UNCOMPRESSED_RAW_RGBA`` ``BlpImagePlugin.Encoding.UNCOMPRESSED_RAW_RGBA``
``BlpImagePlugin.BLP_ALPHA_ENCODING_DXT1`` ``BlpImagePlugin.AlphaEncoding.DXT1``
``BlpImagePlugin.BLP_ALPHA_ENCODING_DXT3`` ``BlpImagePlugin.AlphaEncoding.DXT3``
``BlpImagePlugin.BLP_ALPHA_ENCODING_DXT5`` ``BlpImagePlugin.AlphaEncoding.DXT5``
``FtexImagePlugin.FORMAT_DXT1`` ``FtexImagePlugin.Format.DXT1``
``FtexImagePlugin.FORMAT_UNCOMPRESSED`` ``FtexImagePlugin.Format.UNCOMPRESSED``
``PngImagePlugin.APNG_DISPOSE_OP_NONE`` ``PngImagePlugin.Disposal.OP_NONE``
``PngImagePlugin.APNG_DISPOSE_OP_BACKGROUND`` ``PngImagePlugin.Disposal.OP_BACKGROUND``
``PngImagePlugin.APNG_DISPOSE_OP_PREVIOUS`` ``PngImagePlugin.Disposal.OP_PREVIOUS``
``PngImagePlugin.APNG_BLEND_OP_SOURCE`` ``PngImagePlugin.Blend.OP_SOURCE``
``PngImagePlugin.APNG_BLEND_OP_OVER`` ``PngImagePlugin.Blend.OP_OVER``
===================================================== ============================================================
ImageShow.Viewer.show_file file argument ImageShow.Viewer.show_file file argument
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

View File

@ -97,9 +97,9 @@ def testimage():
10456 10456
>>> len(im.tobytes()) >>> len(im.tobytes())
49152 49152
>>> _info(im.transform((512, 512), Image.Transform.AFFINE, (1,0,0,0,1,0))) >>> _info(im.transform((512, 512), Image.AFFINE, (1,0,0,0,1,0)))
(None, 'RGB', (512, 512)) (None, 'RGB', (512, 512))
>>> _info(im.transform((512, 512), Image.Transform.EXTENT, (32,32,96,96))) >>> _info(im.transform((512, 512), Image.EXTENT, (32,32,96,96)))
(None, 'RGB', (512, 512)) (None, 'RGB', (512, 512))
The ImageDraw module lets you draw stuff in raster images: The ImageDraw module lets you draw stuff in raster images:

View File

@ -31,41 +31,19 @@ BLP files come in many different flavours:
import os import os
import struct import struct
from enum import IntEnum
from io import BytesIO from io import BytesIO
from . import Image, ImageFile from . import Image, ImageFile
from ._deprecate import deprecate
BLP_FORMAT_JPEG = 0
class Format(IntEnum): BLP_ENCODING_UNCOMPRESSED = 1
JPEG = 0 BLP_ENCODING_DXT = 2
BLP_ENCODING_UNCOMPRESSED_RAW_BGRA = 3
BLP_ALPHA_ENCODING_DXT1 = 0
class Encoding(IntEnum): BLP_ALPHA_ENCODING_DXT3 = 1
UNCOMPRESSED = 1 BLP_ALPHA_ENCODING_DXT5 = 7
DXT = 2
UNCOMPRESSED_RAW_BGRA = 3
class AlphaEncoding(IntEnum):
DXT1 = 0
DXT3 = 1
DXT5 = 7
def __getattr__(name):
for enum, prefix in {
Format: "BLP_FORMAT_",
Encoding: "BLP_ENCODING_",
AlphaEncoding: "BLP_ALPHA_ENCODING_",
}.items():
if name.startswith(prefix):
name = name[len(prefix) :]
if name in enum.__members__:
deprecate(f"{prefix}{name}", 10, f"{enum.__name__}.{name}")
return enum[name]
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
def unpack_565(i): def unpack_565(i):
@ -345,7 +323,7 @@ class _BLPBaseDecoder(ImageFile.PyDecoder):
class BLP1Decoder(_BLPBaseDecoder): class BLP1Decoder(_BLPBaseDecoder):
def _load(self): def _load(self):
if self._blp_compression == Format.JPEG: if self._blp_compression == BLP_FORMAT_JPEG:
self._decode_jpeg_stream() self._decode_jpeg_stream()
elif self._blp_compression == 1: elif self._blp_compression == 1:
@ -387,12 +365,12 @@ class BLP2Decoder(_BLPBaseDecoder):
if self._blp_compression == 1: if self._blp_compression == 1:
# Uncompressed or DirectX compression # Uncompressed or DirectX compression
if self._blp_encoding == Encoding.UNCOMPRESSED: if self._blp_encoding == BLP_ENCODING_UNCOMPRESSED:
data = self._read_bgra(palette) data = self._read_bgra(palette)
elif self._blp_encoding == Encoding.DXT: elif self._blp_encoding == BLP_ENCODING_DXT:
data = bytearray() data = bytearray()
if self._blp_alpha_encoding == AlphaEncoding.DXT1: if self._blp_alpha_encoding == BLP_ALPHA_ENCODING_DXT1:
linesize = (self.size[0] + 3) // 4 * 8 linesize = (self.size[0] + 3) // 4 * 8
for yb in range((self.size[1] + 3) // 4): for yb in range((self.size[1] + 3) // 4):
for d in decode_dxt1( for d in decode_dxt1(
@ -400,13 +378,13 @@ class BLP2Decoder(_BLPBaseDecoder):
): ):
data += d data += d
elif self._blp_alpha_encoding == AlphaEncoding.DXT3: elif self._blp_alpha_encoding == BLP_ALPHA_ENCODING_DXT3:
linesize = (self.size[0] + 3) // 4 * 16 linesize = (self.size[0] + 3) // 4 * 16
for yb in range((self.size[1] + 3) // 4): for yb in range((self.size[1] + 3) // 4):
for d in decode_dxt3(self._safe_read(linesize)): for d in decode_dxt3(self._safe_read(linesize)):
data += d data += d
elif self._blp_alpha_encoding == AlphaEncoding.DXT5: elif self._blp_alpha_encoding == BLP_ALPHA_ENCODING_DXT5:
linesize = (self.size[0] + 3) // 4 * 16 linesize = (self.size[0] + 3) // 4 * 16
for yb in range((self.size[1] + 3) // 4): for yb in range((self.size[1] + 3) // 4):
for d in decode_dxt5(self._safe_read(linesize)): for d in decode_dxt5(self._safe_read(linesize)):
@ -463,7 +441,7 @@ def _save(im, fp, filename, save_all=False):
fp.write(magic) fp.write(magic)
fp.write(struct.pack("<i", 1)) # Uncompressed or DirectX compression fp.write(struct.pack("<i", 1)) # Uncompressed or DirectX compression
fp.write(struct.pack("<b", Encoding.UNCOMPRESSED)) fp.write(struct.pack("<b", BLP_ENCODING_UNCOMPRESSED))
fp.write(struct.pack("<b", 1 if im.palette.mode == "RGBA" else 0)) fp.write(struct.pack("<b", 1 if im.palette.mode == "RGBA" else 0))
fp.write(struct.pack("<b", 0)) # alpha encoding fp.write(struct.pack("<b", 0)) # alpha encoding
fp.write(struct.pack("<b", 0)) # mips fp.write(struct.pack("<b", 0)) # mips

View File

@ -52,28 +52,15 @@ Note: All data is stored in little-Endian (Intel) byte order.
""" """
import struct import struct
from enum import IntEnum
from io import BytesIO from io import BytesIO
from . import Image, ImageFile from . import Image, ImageFile
from ._deprecate import deprecate
MAGIC = b"FTEX" MAGIC = b"FTEX"
class Format(IntEnum): FORMAT_DXT1 = 0
DXT1 = 0 FORMAT_UNCOMPRESSED = 1
UNCOMPRESSED = 1
def __getattr__(name):
for enum, prefix in {Format: "FORMAT_"}.items():
if name.startswith(prefix):
name = name[len(prefix) :]
if name in enum.__members__:
deprecate(f"{prefix}{name}", 10, f"{enum.__name__}.{name}")
return enum[name]
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
class FtexImageFile(ImageFile.ImageFile): class FtexImageFile(ImageFile.ImageFile):
@ -99,10 +86,10 @@ class FtexImageFile(ImageFile.ImageFile):
data = self.fp.read(mipmap_size) data = self.fp.read(mipmap_size)
if format == Format.DXT1: if format == FORMAT_DXT1:
self.mode = "RGBA" self.mode = "RGBA"
self.tile = [("bcn", (0, 0) + self.size, 0, 1)] self.tile = [("bcn", (0, 0) + self.size, 0, 1)]
elif format == Format.UNCOMPRESSED: elif format == FORMAT_UNCOMPRESSED:
self.tile = [("raw", (0, 0) + self.size, 0, ("RGB", 0, 1))] self.tile = [("raw", (0, 0) + self.size, 0, ("RGB", 0, 1))]
else: else:
raise ValueError(f"Invalid texture compression format: {repr(format)}") raise ValueError(f"Invalid texture compression format: {repr(format)}")

View File

@ -325,12 +325,12 @@ class GifImageFile(ImageFile.ImageFile):
self.pyaccess = None self.pyaccess = None
if "transparency" in self.info: if "transparency" in self.info:
self.im.putpalettealpha(self.info["transparency"], 0) self.im.putpalettealpha(self.info["transparency"], 0)
self.im = self.im.convert("RGBA", Image.Dither.FLOYDSTEINBERG) self.im = self.im.convert("RGBA", Image.FLOYDSTEINBERG)
self.mode = "RGBA" self.mode = "RGBA"
del self.info["transparency"] del self.info["transparency"]
else: else:
self.mode = "RGB" self.mode = "RGB"
self.im = self.im.convert("RGB", Image.Dither.FLOYDSTEINBERG) self.im = self.im.convert("RGB", Image.FLOYDSTEINBERG)
def _rgb(color): def _rgb(color):
if self._frame_palette: if self._frame_palette:
@ -438,7 +438,7 @@ class GifImageFile(ImageFile.ImageFile):
self.mode = "RGBA" self.mode = "RGBA"
else: else:
self.mode = "RGB" self.mode = "RGB"
self.im = self.im.convert(self.mode, Image.Dither.FLOYDSTEINBERG) self.im = self.im.convert(self.mode, Image.FLOYDSTEINBERG)
return return
if not self._prev_im: if not self._prev_im:
return return
@ -482,7 +482,7 @@ def _normalize_mode(im):
im.load() im.load()
return im return im
if Image.getmodebase(im.mode) == "RGB": if Image.getmodebase(im.mode) == "RGB":
im = im.convert("P", palette=Image.Palette.ADAPTIVE) im = im.convert("P", palette=Image.ADAPTIVE)
if im.palette.mode == "RGBA": if im.palette.mode == "RGBA":
for rgba in im.palette.colors.keys(): for rgba in im.palette.colors.keys():
if rgba[3] == 0: if rgba[3] == 0:

View File

@ -73,7 +73,7 @@ def _save(im, fp, filename):
else: else:
# TODO: invent a more convenient method for proportional scalings # TODO: invent a more convenient method for proportional scalings
frame = provided_im.copy() frame = provided_im.copy()
frame.thumbnail(size, Image.Resampling.LANCZOS, reducing_gap=None) frame.thumbnail(size, Image.LANCZOS, reducing_gap=None)
frames.append(frame) frames.append(frame)
fp.write(o16(len(frames))) # idCount(2) fp.write(o16(len(frames))) # idCount(2)
offset = fp.tell() + len(frames) * 16 offset = fp.tell() + len(frames) * 16

View File

@ -36,7 +36,6 @@ import sys
import tempfile import tempfile
import warnings import warnings
from collections.abc import Callable, MutableMapping from collections.abc import Callable, MutableMapping
from enum import IntEnum
from pathlib import Path from pathlib import Path
try: try:
@ -58,21 +57,6 @@ def __getattr__(name):
if name in categories: if name in categories:
deprecate("Image categories", 10, "is_animated", plural=True) deprecate("Image categories", 10, "is_animated", plural=True)
return categories[name] return categories[name]
elif name in ("NEAREST", "NONE"):
deprecate(name, 10, "Resampling.NEAREST or Dither.NONE")
return 0
old_resampling = {
"LINEAR": "BILINEAR",
"CUBIC": "BICUBIC",
"ANTIALIAS": "LANCZOS",
}
if name in old_resampling:
deprecate(name, 10, f"Resampling.{old_resampling[name]}")
return Resampling[old_resampling[name]]
for enum in (Transpose, Transform, Resampling, Dither, Palette, Quantize):
if name in enum.__members__:
deprecate(name, 10, f"{enum.__name__}.{name}")
return enum[name]
raise AttributeError(f"module '{__name__}' has no attribute '{name}'") raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
@ -150,64 +134,45 @@ def isImageType(t):
# Constants # Constants
# transpose # transpose
class Transpose(IntEnum): FLIP_LEFT_RIGHT = 0
FLIP_LEFT_RIGHT = 0 FLIP_TOP_BOTTOM = 1
FLIP_TOP_BOTTOM = 1 ROTATE_90 = 2
ROTATE_90 = 2 ROTATE_180 = 3
ROTATE_180 = 3 ROTATE_270 = 4
ROTATE_270 = 4 TRANSPOSE = 5
TRANSPOSE = 5 TRANSVERSE = 6
TRANSVERSE = 6
# transforms (also defined in Imaging.h) # transforms (also defined in Imaging.h)
class Transform(IntEnum): AFFINE = 0
AFFINE = 0 EXTENT = 1
EXTENT = 1 PERSPECTIVE = 2
PERSPECTIVE = 2 QUAD = 3
QUAD = 3 MESH = 4
MESH = 4
# resampling filters (also defined in Imaging.h) # resampling filters (also defined in Imaging.h)
class Resampling(IntEnum): NEAREST = NONE = 0
NEAREST = 0 BOX = 4
BOX = 4 BILINEAR = LINEAR = 2
BILINEAR = 2 HAMMING = 5
HAMMING = 5 BICUBIC = CUBIC = 3
BICUBIC = 3 LANCZOS = ANTIALIAS = 1
LANCZOS = 1
_filters_support = {
Resampling.BOX: 0.5,
Resampling.BILINEAR: 1.0,
Resampling.HAMMING: 1.0,
Resampling.BICUBIC: 2.0,
Resampling.LANCZOS: 3.0,
}
_filters_support = {BOX: 0.5, BILINEAR: 1.0, HAMMING: 1.0, BICUBIC: 2.0, LANCZOS: 3.0}
# dithers # dithers
class Dither(IntEnum): NEAREST = NONE = 0
NONE = 0 ORDERED = 1 # Not yet implemented
ORDERED = 1 # Not yet implemented RASTERIZE = 2 # Not yet implemented
RASTERIZE = 2 # Not yet implemented FLOYDSTEINBERG = 3 # default
FLOYDSTEINBERG = 3 # default
# palettes/quantizers # palettes/quantizers
class Palette(IntEnum): WEB = 0
WEB = 0 ADAPTIVE = 1
ADAPTIVE = 1
class Quantize(IntEnum):
MEDIANCUT = 0
MAXCOVERAGE = 1
FASTOCTREE = 2
LIBIMAGEQUANT = 3
MEDIANCUT = 0
MAXCOVERAGE = 1
FASTOCTREE = 2
LIBIMAGEQUANT = 3
if hasattr(core, "DEFAULT_STRATEGY"): if hasattr(core, "DEFAULT_STRATEGY"):
DEFAULT_STRATEGY = core.DEFAULT_STRATEGY DEFAULT_STRATEGY = core.DEFAULT_STRATEGY
@ -870,9 +835,7 @@ class Image:
""" """
pass pass
def convert( def convert(self, mode=None, matrix=None, dither=None, palette=WEB, colors=256):
self, mode=None, matrix=None, dither=None, palette=Palette.WEB, colors=256
):
""" """
Returns a converted copy of this image. For the "P" mode, this Returns a converted copy of this image. For the "P" mode, this
method translates pixels through the palette. If mode is method translates pixels through the palette. If mode is
@ -907,12 +870,11 @@ class Image:
should be 4- or 12-tuple containing floating point values. should be 4- or 12-tuple containing floating point values.
:param dither: Dithering method, used when converting from :param dither: Dithering method, used when converting from
mode "RGB" to "P" or from "RGB" or "L" to "1". mode "RGB" to "P" or from "RGB" or "L" to "1".
Available methods are :data:`Dither.NONE` or :data:`Dither.FLOYDSTEINBERG` Available methods are :data:`NONE` or :data:`FLOYDSTEINBERG`
(default). Note that this is not used when ``matrix`` is supplied. (default). Note that this is not used when ``matrix`` is supplied.
:param palette: Palette to use when converting from mode "RGB" :param palette: Palette to use when converting from mode "RGB"
to "P". Available palettes are :data:`Palette.WEB` or to "P". Available palettes are :data:`WEB` or :data:`ADAPTIVE`.
:data:`Palette.ADAPTIVE`. :param colors: Number of colors to use for the :data:`ADAPTIVE`
:param colors: Number of colors to use for the :data:`Palette.ADAPTIVE`
palette. Defaults to 256. palette. Defaults to 256.
:rtype: :py:class:`~PIL.Image.Image` :rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object. :returns: An :py:class:`~PIL.Image.Image` object.
@ -1022,7 +984,7 @@ class Image:
else: else:
raise ValueError("Transparency for P mode should be bytes or int") raise ValueError("Transparency for P mode should be bytes or int")
if mode == "P" and palette == Palette.ADAPTIVE: if mode == "P" and palette == ADAPTIVE:
im = self.im.quantize(colors) im = self.im.quantize(colors)
new = self._new(im) new = self._new(im)
from . import ImagePalette from . import ImagePalette
@ -1057,7 +1019,7 @@ class Image:
# colorspace conversion # colorspace conversion
if dither is None: if dither is None:
dither = Dither.FLOYDSTEINBERG dither = FLOYDSTEINBERG
try: try:
im = self.im.convert(mode, dither) im = self.im.convert(mode, dither)
@ -1073,7 +1035,7 @@ class Image:
raise ValueError("illegal conversion") from e raise ValueError("illegal conversion") from e
new_im = self._new(im) new_im = self._new(im)
if mode == "P" and palette != Palette.ADAPTIVE: if mode == "P" and palette != ADAPTIVE:
from . import ImagePalette from . import ImagePalette
new_im.palette = ImagePalette.ImagePalette("RGB", list(range(256)) * 3) new_im.palette = ImagePalette.ImagePalette("RGB", list(range(256)) * 3)
@ -1102,31 +1064,31 @@ class Image:
method=None, method=None,
kmeans=0, kmeans=0,
palette=None, palette=None,
dither=Dither.FLOYDSTEINBERG, dither=FLOYDSTEINBERG,
): ):
""" """
Convert the image to 'P' mode with the specified number Convert the image to 'P' mode with the specified number
of colors. of colors.
:param colors: The desired number of colors, <= 256 :param colors: The desired number of colors, <= 256
:param method: :data:`Quantize.MEDIANCUT` (median cut), :param method: :data:`MEDIANCUT` (median cut),
:data:`Quantize.MAXCOVERAGE` (maximum coverage), :data:`MAXCOVERAGE` (maximum coverage),
:data:`Quantize.FASTOCTREE` (fast octree), :data:`FASTOCTREE` (fast octree),
:data:`Quantize.LIBIMAGEQUANT` (libimagequant; check support :data:`LIBIMAGEQUANT` (libimagequant; check support
using :py:func:`PIL.features.check_feature` with using :py:func:`PIL.features.check_feature` with
``feature="libimagequant"``). ``feature="libimagequant"``).
By default, :data:`Quantize.MEDIANCUT` will be used. By default, :data:`MEDIANCUT` will be used.
The exception to this is RGBA images. :data:`Quantize.MEDIANCUT` The exception to this is RGBA images. :data:`MEDIANCUT`
and :data:`Quantize.MAXCOVERAGE` do not support RGBA images, so and :data:`MAXCOVERAGE` do not support RGBA images, so
:data:`Quantize.FASTOCTREE` is used by default instead. :data:`FASTOCTREE` is used by default instead.
:param kmeans: Integer :param kmeans: Integer
:param palette: Quantize to the palette of given :param palette: Quantize to the palette of given
:py:class:`PIL.Image.Image`. :py:class:`PIL.Image.Image`.
:param dither: Dithering method, used when converting from :param dither: Dithering method, used when converting from
mode "RGB" to "P" or from "RGB" or "L" to "1". mode "RGB" to "P" or from "RGB" or "L" to "1".
Available methods are :data:`Dither.NONE` or :data:`Dither.FLOYDSTEINBERG` Available methods are :data:`NONE` or :data:`FLOYDSTEINBERG`
(default). (default).
:returns: A new image :returns: A new image
@ -1136,14 +1098,11 @@ class Image:
if method is None: if method is None:
# defaults: # defaults:
method = Quantize.MEDIANCUT method = MEDIANCUT
if self.mode == "RGBA": if self.mode == "RGBA":
method = Quantize.FASTOCTREE method = FASTOCTREE
if self.mode == "RGBA" and method not in ( if self.mode == "RGBA" and method not in (FASTOCTREE, LIBIMAGEQUANT):
Quantize.FASTOCTREE,
Quantize.LIBIMAGEQUANT,
):
# Caller specified an invalid mode. # Caller specified an invalid mode.
raise ValueError( raise ValueError(
"Fast Octree (method == 2) and libimagequant (method == 3) " "Fast Octree (method == 2) and libimagequant (method == 3) "
@ -2016,14 +1975,14 @@ class Image:
:param size: The requested size in pixels, as a 2-tuple: :param size: The requested size in pixels, as a 2-tuple:
(width, height). (width, height).
:param resample: An optional resampling filter. This can be :param resample: An optional resampling filter. This can be
one of :py:data:`Resampling.NEAREST`, :py:data:`Resampling.BOX`, one of :py:data:`NEAREST`, :py:data:`BOX`,
:py:data:`Resampling.BILINEAR`, :py:data:`Resampling.HAMMING`, :py:data:`BILINEAR`, :py:data:`HAMMING`,
:py:data:`Resampling.BICUBIC` or :py:data:`Resampling.LANCZOS`. :py:data:`BICUBIC` or :py:data:`LANCZOS`.
If the image has mode "1" or "P", it is always set to If the image has mode "1" or "P", it is always set to
:py:data:`Resampling.NEAREST`. If the image mode specifies a number :py:data:`NEAREST`. If the image mode specifies a number
of bits, such as "I;16", then the default filter is of bits, such as "I;16", then the default filter is
:py:data:`Resampling.NEAREST`. Otherwise, the default filter is :py:data:`NEAREST`. Otherwise, the default filter is
:py:data:`Resampling.BICUBIC`. See: :ref:`concept-filters`. :py:data:`BICUBIC`. See: :ref:`concept-filters`.
:param box: An optional 4-tuple of floats providing :param box: An optional 4-tuple of floats providing
the source image region to be scaled. the source image region to be scaled.
The values must be within (0, 0, width, height) rectangle. The values must be within (0, 0, width, height) rectangle.
@ -2045,26 +2004,19 @@ class Image:
if resample is None: if resample is None:
type_special = ";" in self.mode type_special = ";" in self.mode
resample = Resampling.NEAREST if type_special else Resampling.BICUBIC resample = NEAREST if type_special else BICUBIC
elif resample not in ( elif resample not in (NEAREST, BILINEAR, BICUBIC, LANCZOS, BOX, HAMMING):
Resampling.NEAREST,
Resampling.BILINEAR,
Resampling.BICUBIC,
Resampling.LANCZOS,
Resampling.BOX,
Resampling.HAMMING,
):
message = f"Unknown resampling filter ({resample})." message = f"Unknown resampling filter ({resample})."
filters = [ filters = [
f"{filter[1]} ({filter[0]})" f"{filter[1]} ({filter[0]})"
for filter in ( for filter in (
(Resampling.NEAREST, "Image.Resampling.NEAREST"), (NEAREST, "Image.NEAREST"),
(Resampling.LANCZOS, "Image.Resampling.LANCZOS"), (LANCZOS, "Image.LANCZOS"),
(Resampling.BILINEAR, "Image.Resampling.BILINEAR"), (BILINEAR, "Image.BILINEAR"),
(Resampling.BICUBIC, "Image.Resampling.BICUBIC"), (BICUBIC, "Image.BICUBIC"),
(Resampling.BOX, "Image.Resampling.BOX"), (BOX, "Image.BOX"),
(Resampling.HAMMING, "Image.Resampling.HAMMING"), (HAMMING, "Image.HAMMING"),
) )
] ]
raise ValueError( raise ValueError(
@ -2086,16 +2038,16 @@ class Image:
return self.copy() return self.copy()
if self.mode in ("1", "P"): if self.mode in ("1", "P"):
resample = Resampling.NEAREST resample = NEAREST
if self.mode in ["LA", "RGBA"] and resample != Resampling.NEAREST: if self.mode in ["LA", "RGBA"] and resample != NEAREST:
im = self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode]) im = self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode])
im = im.resize(size, resample, box) im = im.resize(size, resample, box)
return im.convert(self.mode) return im.convert(self.mode)
self.load() self.load()
if reducing_gap is not None and resample != Resampling.NEAREST: if reducing_gap is not None and resample != NEAREST:
factor_x = int((box[2] - box[0]) / size[0] / reducing_gap) or 1 factor_x = int((box[2] - box[0]) / size[0] / reducing_gap) or 1
factor_y = int((box[3] - box[1]) / size[1] / reducing_gap) or 1 factor_y = int((box[3] - box[1]) / size[1] / reducing_gap) or 1
if factor_x > 1 or factor_y > 1: if factor_x > 1 or factor_y > 1:
@ -2150,7 +2102,7 @@ class Image:
def rotate( def rotate(
self, self,
angle, angle,
resample=Resampling.NEAREST, resample=NEAREST,
expand=0, expand=0,
center=None, center=None,
translate=None, translate=None,
@ -2163,11 +2115,11 @@ class Image:
:param angle: In degrees counter clockwise. :param angle: In degrees counter clockwise.
:param resample: An optional resampling filter. This can be :param resample: An optional resampling filter. This can be
one of :py:data:`Resampling.NEAREST` (use nearest neighbour), one of :py:data:`NEAREST` (use nearest neighbour),
:py:data:`Resampling.BILINEAR` (linear interpolation in a 2x2 :py:data:`BILINEAR` (linear interpolation in a 2x2
environment), or :py:data:`Resampling.BICUBIC` (cubic spline environment), or :py:data:`BICUBIC` (cubic spline
interpolation in a 4x4 environment). If omitted, or if the image has interpolation in a 4x4 environment). If omitted, or if the image has
mode "1" or "P", it is set to :py:data:`Resampling.NEAREST`. mode "1" or "P", it is set to :py:data:`NEAREST`.
See :ref:`concept-filters`. See :ref:`concept-filters`.
:param expand: Optional expansion flag. If true, expands the output :param expand: Optional expansion flag. If true, expands the output
image to make it large enough to hold the entire rotated image. image to make it large enough to hold the entire rotated image.
@ -2189,11 +2141,9 @@ class Image:
if angle == 0: if angle == 0:
return self.copy() return self.copy()
if angle == 180: if angle == 180:
return self.transpose(Transpose.ROTATE_180) return self.transpose(ROTATE_180)
if angle in (90, 270) and (expand or self.width == self.height): if angle in (90, 270) and (expand or self.width == self.height):
return self.transpose( return self.transpose(ROTATE_90 if angle == 90 else ROTATE_270)
Transpose.ROTATE_90 if angle == 90 else Transpose.ROTATE_270
)
# Calculate the affine matrix. Note that this is the reverse # Calculate the affine matrix. Note that this is the reverse
# transformation (from destination image to source) because we # transformation (from destination image to source) because we
@ -2262,9 +2212,7 @@ class Image:
matrix[2], matrix[5] = transform(-(nw - w) / 2.0, -(nh - h) / 2.0, matrix) matrix[2], matrix[5] = transform(-(nw - w) / 2.0, -(nh - h) / 2.0, matrix)
w, h = nw, nh w, h = nw, nh
return self.transform( return self.transform((w, h), AFFINE, matrix, resample, fillcolor=fillcolor)
(w, h), Transform.AFFINE, matrix, resample, fillcolor=fillcolor
)
def save(self, fp, format=None, **params): def save(self, fp, format=None, **params):
""" """
@ -2459,7 +2407,7 @@ class Image:
""" """
return 0 return 0
def thumbnail(self, size, resample=Resampling.BICUBIC, reducing_gap=2.0): def thumbnail(self, size, resample=BICUBIC, reducing_gap=2.0):
""" """
Make this image into a thumbnail. This method modifies the Make this image into a thumbnail. This method modifies the
image to contain a thumbnail version of itself, no larger than image to contain a thumbnail version of itself, no larger than
@ -2475,11 +2423,11 @@ class Image:
:param size: Requested size. :param size: Requested size.
:param resample: Optional resampling filter. This can be one :param resample: Optional resampling filter. This can be one
of :py:data:`Resampling.NEAREST`, :py:data:`Resampling.BOX`, of :py:data:`NEAREST`, :py:data:`BOX`,
:py:data:`Resampling.BILINEAR`, :py:data:`Resampling.HAMMING`, :py:data:`BILINEAR`, :py:data:`HAMMING`,
:py:data:`Resampling.BICUBIC` or :py:data:`Resampling.LANCZOS`. :py:data:`BICUBIC` or :py:data:`LANCZOS`.
If omitted, it defaults to :py:data:`Resampling.BICUBIC`. If omitted, it defaults to :py:data:`BICUBIC`.
(was :py:data:`Resampling.NEAREST` prior to version 2.5.0). (was :py:data:`NEAREST` prior to version 2.5.0).
See: :ref:`concept-filters`. See: :ref:`concept-filters`.
:param reducing_gap: Apply optimization by resizing the image :param reducing_gap: Apply optimization by resizing the image
in two steps. First, reducing the image by integer times in two steps. First, reducing the image by integer times
@ -2551,7 +2499,7 @@ class Image:
size, size,
method, method,
data=None, data=None,
resample=Resampling.NEAREST, resample=NEAREST,
fill=1, fill=1,
fillcolor=None, fillcolor=None,
): ):
@ -2562,11 +2510,11 @@ class Image:
:param size: The output size. :param size: The output size.
:param method: The transformation method. This is one of :param method: The transformation method. This is one of
:py:data:`Transform.EXTENT` (cut out a rectangular subregion), :py:data:`EXTENT` (cut out a rectangular subregion),
:py:data:`Transform.AFFINE` (affine transform), :py:data:`AFFINE` (affine transform),
:py:data:`Transform.PERSPECTIVE` (perspective transform), :py:data:`PERSPECTIVE` (perspective transform),
:py:data:`Transform.QUAD` (map a quadrilateral to a rectangle), or :py:data:`QUAD` (map a quadrilateral to a rectangle), or
:py:data:`Transform.MESH` (map a number of source quadrilaterals :py:data:`MESH` (map a number of source quadrilaterals
in one operation). in one operation).
It may also be an :py:class:`~PIL.Image.ImageTransformHandler` It may also be an :py:class:`~PIL.Image.ImageTransformHandler`
@ -2581,16 +2529,16 @@ class Image:
class Example: class Example:
def getdata(self): def getdata(self):
method = Image.Transform.EXTENT method = Image.EXTENT
data = (0, 0, 100, 100) data = (0, 0, 100, 100)
return method, data return method, data
:param data: Extra data to the transformation method. :param data: Extra data to the transformation method.
:param resample: Optional resampling filter. It can be one of :param resample: Optional resampling filter. It can be one of
:py:data:`Resampling.NEAREST` (use nearest neighbour), :py:data:`NEAREST` (use nearest neighbour),
:py:data:`Resampling.BILINEAR` (linear interpolation in a 2x2 :py:data:`BILINEAR` (linear interpolation in a 2x2
environment), or :py:data:`Resampling.BICUBIC` (cubic spline environment), or :py:data:`BICUBIC` (cubic spline
interpolation in a 4x4 environment). If omitted, or if the image interpolation in a 4x4 environment). If omitted, or if the image
has mode "1" or "P", it is set to :py:data:`Resampling.NEAREST`. has mode "1" or "P", it is set to :py:data:`NEAREST`.
See: :ref:`concept-filters`. See: :ref:`concept-filters`.
:param fill: If ``method`` is an :param fill: If ``method`` is an
:py:class:`~PIL.Image.ImageTransformHandler` object, this is one of :py:class:`~PIL.Image.ImageTransformHandler` object, this is one of
@ -2600,7 +2548,7 @@ class Image:
:returns: An :py:class:`~PIL.Image.Image` object. :returns: An :py:class:`~PIL.Image.Image` object.
""" """
if self.mode in ("LA", "RGBA") and resample != Resampling.NEAREST: if self.mode in ("LA", "RGBA") and resample != NEAREST:
return ( return (
self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode]) self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode])
.transform(size, method, data, resample, fill, fillcolor) .transform(size, method, data, resample, fill, fillcolor)
@ -2621,12 +2569,10 @@ class Image:
if self.mode == "P" and self.palette: if self.mode == "P" and self.palette:
im.palette = self.palette.copy() im.palette = self.palette.copy()
im.info = self.info.copy() im.info = self.info.copy()
if method == Transform.MESH: if method == MESH:
# list of quads # list of quads
for box, quad in data: for box, quad in data:
im.__transformer( im.__transformer(box, self, QUAD, quad, resample, fillcolor is None)
box, self, Transform.QUAD, quad, resample, fillcolor is None
)
else: else:
im.__transformer( im.__transformer(
(0, 0) + size, self, method, data, resample, fillcolor is None (0, 0) + size, self, method, data, resample, fillcolor is None
@ -2634,27 +2580,25 @@ class Image:
return im return im
def __transformer( def __transformer(self, box, image, method, data, resample=NEAREST, fill=1):
self, box, image, method, data, resample=Resampling.NEAREST, fill=1
):
w = box[2] - box[0] w = box[2] - box[0]
h = box[3] - box[1] h = box[3] - box[1]
if method == Transform.AFFINE: if method == AFFINE:
data = data[:6] data = data[:6]
elif method == Transform.EXTENT: elif method == EXTENT:
# convert extent to an affine transform # convert extent to an affine transform
x0, y0, x1, y1 = data x0, y0, x1, y1 = data
xs = (x1 - x0) / w xs = (x1 - x0) / w
ys = (y1 - y0) / h ys = (y1 - y0) / h
method = Transform.AFFINE method = AFFINE
data = (xs, 0, x0, 0, ys, y0) data = (xs, 0, x0, 0, ys, y0)
elif method == Transform.PERSPECTIVE: elif method == PERSPECTIVE:
data = data[:8] data = data[:8]
elif method == Transform.QUAD: elif method == QUAD:
# quadrilateral warp. data specifies the four corners # quadrilateral warp. data specifies the four corners
# given as NW, SW, SE, and NE. # given as NW, SW, SE, and NE.
nw = data[:2] nw = data[:2]
@ -2679,15 +2623,15 @@ class Image:
raise ValueError("unknown transformation method") raise ValueError("unknown transformation method")
if resample not in ( if resample not in (
Resampling.NEAREST, NEAREST,
Resampling.BILINEAR, BILINEAR,
Resampling.BICUBIC, BICUBIC,
): ):
if resample in (Resampling.BOX, Resampling.HAMMING, Resampling.LANCZOS): if resample in (BOX, HAMMING, LANCZOS):
message = { message = {
Resampling.BOX: "Image.Resampling.BOX", BOX: "Image.BOX",
Resampling.HAMMING: "Image.Resampling.HAMMING", HAMMING: "Image.HAMMING",
Resampling.LANCZOS: "Image.Resampling.LANCZOS", LANCZOS: "Image.LANCZOS",
}[resample] + f" ({resample}) cannot be used." }[resample] + f" ({resample}) cannot be used."
else: else:
message = f"Unknown resampling filter ({resample})." message = f"Unknown resampling filter ({resample})."
@ -2695,9 +2639,9 @@ class Image:
filters = [ filters = [
f"{filter[1]} ({filter[0]})" f"{filter[1]} ({filter[0]})"
for filter in ( for filter in (
(Resampling.NEAREST, "Image.Resampling.NEAREST"), (NEAREST, "Image.NEAREST"),
(Resampling.BILINEAR, "Image.Resampling.BILINEAR"), (BILINEAR, "Image.BILINEAR"),
(Resampling.BICUBIC, "Image.Resampling.BICUBIC"), (BICUBIC, "Image.BICUBIC"),
) )
] ]
raise ValueError( raise ValueError(
@ -2709,7 +2653,7 @@ class Image:
self.load() self.load()
if image.mode in ("1", "P"): if image.mode in ("1", "P"):
resample = Resampling.NEAREST resample = NEAREST
self.im.transform2(box, image.im, method, data, resample, fill) self.im.transform2(box, image.im, method, data, resample, fill)
@ -2717,10 +2661,10 @@ class Image:
""" """
Transpose image (flip or rotate in 90 degree steps) Transpose image (flip or rotate in 90 degree steps)
:param method: One of :py:data:`Transpose.FLIP_LEFT_RIGHT`, :param method: One of :py:data:`FLIP_LEFT_RIGHT`,
:py:data:`Transpose.FLIP_TOP_BOTTOM`, :py:data:`Transpose.ROTATE_90`, :py:data:`FLIP_TOP_BOTTOM`, :py:data:`ROTATE_90`,
:py:data:`Transpose.ROTATE_180`, :py:data:`Transpose.ROTATE_270`, :py:data:`ROTATE_180`, :py:data:`ROTATE_270`,
:py:data:`Transpose.TRANSPOSE` or :py:data:`Transpose.TRANSVERSE`. :py:data:`TRANSPOSE` or :py:data:`TRANSVERSE`.
:returns: Returns a flipped or rotated copy of this image. :returns: Returns a flipped or rotated copy of this image.
""" """

View File

@ -16,12 +16,9 @@
# below for the original description. # below for the original description.
import sys import sys
from enum import IntEnum
from PIL import Image from PIL import Image
from ._deprecate import deprecate
try: try:
from PIL import _imagingcms from PIL import _imagingcms
except ImportError as ex: except ImportError as ex:
@ -103,29 +100,14 @@ core = _imagingcms
# #
# intent/direction values # intent/direction values
INTENT_PERCEPTUAL = 0
INTENT_RELATIVE_COLORIMETRIC = 1
INTENT_SATURATION = 2
INTENT_ABSOLUTE_COLORIMETRIC = 3
class Intent(IntEnum): DIRECTION_INPUT = 0
PERCEPTUAL = 0 DIRECTION_OUTPUT = 1
RELATIVE_COLORIMETRIC = 1 DIRECTION_PROOF = 2
SATURATION = 2
ABSOLUTE_COLORIMETRIC = 3
class Direction(IntEnum):
INPUT = 0
OUTPUT = 1
PROOF = 2
def __getattr__(name):
for enum, prefix in {Intent: "INTENT_", Direction: "DIRECTION_"}.items():
if name.startswith(prefix):
name = name[len(prefix) :]
if name in enum.__members__:
deprecate(f"{prefix}{name}", 10, f"{enum.__name__}.{name}")
return enum[name]
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
# #
# flags # flags
@ -229,9 +211,9 @@ class ImageCmsTransform(Image.ImagePointHandler):
output, output,
input_mode, input_mode,
output_mode, output_mode,
intent=Intent.PERCEPTUAL, intent=INTENT_PERCEPTUAL,
proof=None, proof=None,
proof_intent=Intent.ABSOLUTE_COLORIMETRIC, proof_intent=INTENT_ABSOLUTE_COLORIMETRIC,
flags=0, flags=0,
): ):
if proof is None: if proof is None:
@ -313,7 +295,7 @@ def profileToProfile(
im, im,
inputProfile, inputProfile,
outputProfile, outputProfile,
renderingIntent=Intent.PERCEPTUAL, renderingIntent=INTENT_PERCEPTUAL,
outputMode=None, outputMode=None,
inPlace=False, inPlace=False,
flags=0, flags=0,
@ -349,10 +331,10 @@ def profileToProfile(
:param renderingIntent: Integer (0-3) specifying the rendering intent you :param renderingIntent: Integer (0-3) specifying the rendering intent you
wish to use for the transform wish to use for the transform
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT) ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT)
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1 ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1
ImageCms.Intent.SATURATION = 2 ImageCms.INTENT_SATURATION = 2
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3 ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3
see the pyCMS documentation for details on rendering intents and what see the pyCMS documentation for details on rendering intents and what
they do. they do.
@ -430,7 +412,7 @@ def buildTransform(
outputProfile, outputProfile,
inMode, inMode,
outMode, outMode,
renderingIntent=Intent.PERCEPTUAL, renderingIntent=INTENT_PERCEPTUAL,
flags=0, flags=0,
): ):
""" """
@ -476,10 +458,10 @@ def buildTransform(
:param renderingIntent: Integer (0-3) specifying the rendering intent you :param renderingIntent: Integer (0-3) specifying the rendering intent you
wish to use for the transform wish to use for the transform
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT) ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT)
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1 ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1
ImageCms.Intent.SATURATION = 2 ImageCms.INTENT_SATURATION = 2
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3 ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3
see the pyCMS documentation for details on rendering intents and what see the pyCMS documentation for details on rendering intents and what
they do. they do.
@ -512,8 +494,8 @@ def buildProofTransform(
proofProfile, proofProfile,
inMode, inMode,
outMode, outMode,
renderingIntent=Intent.PERCEPTUAL, renderingIntent=INTENT_PERCEPTUAL,
proofRenderingIntent=Intent.ABSOLUTE_COLORIMETRIC, proofRenderingIntent=INTENT_ABSOLUTE_COLORIMETRIC,
flags=FLAGS["SOFTPROOFING"], flags=FLAGS["SOFTPROOFING"],
): ):
""" """
@ -568,20 +550,20 @@ def buildProofTransform(
:param renderingIntent: Integer (0-3) specifying the rendering intent you :param renderingIntent: Integer (0-3) specifying the rendering intent you
wish to use for the input->proof (simulated) transform wish to use for the input->proof (simulated) transform
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT) ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT)
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1 ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1
ImageCms.Intent.SATURATION = 2 ImageCms.INTENT_SATURATION = 2
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3 ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3
see the pyCMS documentation for details on rendering intents and what see the pyCMS documentation for details on rendering intents and what
they do. they do.
:param proofRenderingIntent: Integer (0-3) specifying the rendering intent :param proofRenderingIntent: Integer (0-3) specifying the rendering intent
you wish to use for proof->output transform you wish to use for proof->output transform
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT) ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT)
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1 ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1
ImageCms.Intent.SATURATION = 2 ImageCms.INTENT_SATURATION = 2
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3 ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3
see the pyCMS documentation for details on rendering intents and what see the pyCMS documentation for details on rendering intents and what
they do. they do.
@ -940,10 +922,10 @@ def getDefaultIntent(profile):
:returns: Integer 0-3 specifying the default rendering intent for this :returns: Integer 0-3 specifying the default rendering intent for this
profile. profile.
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT) ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT)
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1 ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1
ImageCms.Intent.SATURATION = 2 ImageCms.INTENT_SATURATION = 2
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3 ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3
see the pyCMS documentation for details on rendering intents and what see the pyCMS documentation for details on rendering intents and what
they do. they do.
@ -978,19 +960,19 @@ def isIntentSupported(profile, intent, direction):
:param intent: Integer (0-3) specifying the rendering intent you wish to :param intent: Integer (0-3) specifying the rendering intent you wish to
use with this profile use with this profile
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT) ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT)
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1 ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1
ImageCms.Intent.SATURATION = 2 ImageCms.INTENT_SATURATION = 2
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3 ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3
see the pyCMS documentation for details on rendering intents and what see the pyCMS documentation for details on rendering intents and what
they do. they do.
:param direction: Integer specifying if the profile is to be used for :param direction: Integer specifying if the profile is to be used for
input, output, or proof input, output, or proof
INPUT = 0 (or use ImageCms.Direction.INPUT) INPUT = 0 (or use ImageCms.DIRECTION_INPUT)
OUTPUT = 1 (or use ImageCms.Direction.OUTPUT) OUTPUT = 1 (or use ImageCms.DIRECTION_OUTPUT)
PROOF = 2 (or use ImageCms.Direction.PROOF) PROOF = 2 (or use ImageCms.DIRECTION_PROOF)
:returns: 1 if the intent/direction are supported, -1 if they are not. :returns: 1 if the intent/direction are supported, -1 if they are not.
:exception PyCMSError: :exception PyCMSError:

View File

@ -529,7 +529,7 @@ class Color3DLUT(MultibandFilter):
return image.color_lut_3d( return image.color_lut_3d(
self.mode or image.mode, self.mode or image.mode,
Image.Resampling.BILINEAR, Image.BILINEAR,
self.channels, self.channels,
self.size[0], self.size[0],
self.size[1], self.size[1],

View File

@ -29,27 +29,14 @@ import base64
import os import os
import sys import sys
import warnings import warnings
from enum import IntEnum
from io import BytesIO from io import BytesIO
from . import Image from . import Image
from ._deprecate import deprecate from ._deprecate import deprecate
from ._util import is_directory, is_path from ._util import is_directory, is_path
LAYOUT_BASIC = 0
class Layout(IntEnum): LAYOUT_RAQM = 1
BASIC = 0
RAQM = 1
def __getattr__(name):
for enum, prefix in {Layout: "LAYOUT_"}.items():
if name.startswith(prefix):
name = name[len(prefix) :]
if name in enum.__members__:
deprecate(f"{prefix}{name}", 10, f"{enum.__name__}.{name}")
return enum[name]
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
class _ImagingFtNotInstalled: class _ImagingFtNotInstalled:
@ -216,16 +203,16 @@ class FreeTypeFont:
self.index = index self.index = index
self.encoding = encoding self.encoding = encoding
if layout_engine not in (Layout.BASIC, Layout.RAQM): if layout_engine not in (LAYOUT_BASIC, LAYOUT_RAQM):
layout_engine = Layout.BASIC layout_engine = LAYOUT_BASIC
if core.HAVE_RAQM: if core.HAVE_RAQM:
layout_engine = Layout.RAQM layout_engine = LAYOUT_RAQM
elif layout_engine == Layout.RAQM and not core.HAVE_RAQM: elif layout_engine == LAYOUT_RAQM and not core.HAVE_RAQM:
warnings.warn( warnings.warn(
"Raqm layout was requested, but Raqm is not available. " "Raqm layout was requested, but Raqm is not available. "
"Falling back to basic layout." "Falling back to basic layout."
) )
layout_engine = Layout.BASIC layout_engine = LAYOUT_BASIC
self.layout_engine = layout_engine self.layout_engine = layout_engine
@ -848,9 +835,8 @@ class TransposedFont:
:param font: A font object. :param font: A font object.
:param orientation: An optional orientation. If given, this should :param orientation: An optional orientation. If given, this should
be one of Image.Transpose.FLIP_LEFT_RIGHT, Image.Transpose.FLIP_TOP_BOTTOM, be one of Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM,
Image.Transpose.ROTATE_90, Image.Transpose.ROTATE_180, or Image.ROTATE_90, Image.ROTATE_180, or Image.ROTATE_270.
Image.Transpose.ROTATE_270.
""" """
self.font = font self.font = font
self.orientation = orientation # any 'transpose' argument, or None self.orientation = orientation # any 'transpose' argument, or None
@ -867,7 +853,7 @@ class TransposedFont:
with warnings.catch_warnings(): with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning) warnings.filterwarnings("ignore", category=DeprecationWarning)
w, h = self.font.getsize(text) w, h = self.font.getsize(text)
if self.orientation in (Image.Transpose.ROTATE_90, Image.Transpose.ROTATE_270): if self.orientation in (Image.ROTATE_90, Image.ROTATE_270):
return h, w return h, w
return w, h return w, h
@ -883,12 +869,12 @@ class TransposedFont:
left, top, right, bottom = self.font.getbbox(text, *args, **kwargs) left, top, right, bottom = self.font.getbbox(text, *args, **kwargs)
width = right - left width = right - left
height = bottom - top height = bottom - top
if self.orientation in (Image.Transpose.ROTATE_90, Image.Transpose.ROTATE_270): if self.orientation in (Image.ROTATE_90, Image.ROTATE_270):
return 0, 0, height, width return 0, 0, height, width
return 0, 0, width, height return 0, 0, width, height
def getlength(self, text, *args, **kwargs): def getlength(self, text, *args, **kwargs):
if self.orientation in (Image.Transpose.ROTATE_90, Image.Transpose.ROTATE_270): if self.orientation in (Image.ROTATE_90, Image.ROTATE_270):
raise ValueError( raise ValueError(
"text length is undefined for text rotated by 90 or 270 degrees" "text length is undefined for text rotated by 90 or 270 degrees"
) )
@ -954,7 +940,7 @@ def truetype(font=None, size=10, index=0, encoding="", layout_engine=None):
This specifies the character set to use. It does not alter the This specifies the character set to use. It does not alter the
encoding of any text provided in subsequent operations. encoding of any text provided in subsequent operations.
:param layout_engine: Which layout engine to use, if available: :param layout_engine: Which layout engine to use, if available:
:data:`.ImageFont.Layout.BASIC` or :data:`.ImageFont.Layout.RAQM`. :data:`.ImageFont.LAYOUT_BASIC` or :data:`.ImageFont.LAYOUT_RAQM`.
If it is available, Raqm layout will be used by default. If it is available, Raqm layout will be used by default.
Otherwise, basic layout will be used. Otherwise, basic layout will be used.

View File

@ -237,7 +237,7 @@ def colorize(image, black, white, mid=None, blackpoint=0, whitepoint=255, midpoi
return _lut(image, red + green + blue) return _lut(image, red + green + blue)
def contain(image, size, method=Image.Resampling.BICUBIC): def contain(image, size, method=Image.BICUBIC):
""" """
Returns a resized version of the image, set to the maximum width and height Returns a resized version of the image, set to the maximum width and height
within the requested size, while maintaining the original aspect ratio. within the requested size, while maintaining the original aspect ratio.
@ -265,7 +265,7 @@ def contain(image, size, method=Image.Resampling.BICUBIC):
return image.resize(size, resample=method) return image.resize(size, resample=method)
def pad(image, size, method=Image.Resampling.BICUBIC, color=None, centering=(0.5, 0.5)): def pad(image, size, method=Image.BICUBIC, color=None, centering=(0.5, 0.5)):
""" """
Returns a resized and padded version of the image, expanded to fill the Returns a resized and padded version of the image, expanded to fill the
requested aspect ratio and size. requested aspect ratio and size.
@ -317,7 +317,7 @@ def crop(image, border=0):
return image.crop((left, top, image.size[0] - right, image.size[1] - bottom)) return image.crop((left, top, image.size[0] - right, image.size[1] - bottom))
def scale(image, factor, resample=Image.Resampling.BICUBIC): def scale(image, factor, resample=Image.BICUBIC):
""" """
Returns a rescaled image by a specific factor given in parameter. Returns a rescaled image by a specific factor given in parameter.
A factor greater than 1 expands the image, between 0 and 1 contracts the A factor greater than 1 expands the image, between 0 and 1 contracts the
@ -338,7 +338,7 @@ def scale(image, factor, resample=Image.Resampling.BICUBIC):
return image.resize(size, resample) return image.resize(size, resample)
def deform(image, deformer, resample=Image.Resampling.BILINEAR): def deform(image, deformer, resample=Image.BILINEAR):
""" """
Deform the image. Deform the image.
@ -349,9 +349,7 @@ def deform(image, deformer, resample=Image.Resampling.BILINEAR):
in the PIL.Image.transform function. in the PIL.Image.transform function.
:return: An image. :return: An image.
""" """
return image.transform( return image.transform(image.size, Image.MESH, deformer.getmesh(image), resample)
image.size, Image.Transform.MESH, deformer.getmesh(image), resample
)
def equalize(image, mask=None): def equalize(image, mask=None):
@ -411,7 +409,7 @@ def expand(image, border=0, fill=0):
return out return out
def fit(image, size, method=Image.Resampling.BICUBIC, bleed=0.0, centering=(0.5, 0.5)): def fit(image, size, method=Image.BICUBIC, bleed=0.0, centering=(0.5, 0.5)):
""" """
Returns a resized and cropped version of the image, cropped to the Returns a resized and cropped version of the image, cropped to the
requested aspect ratio and size. requested aspect ratio and size.
@ -503,7 +501,7 @@ def flip(image):
:param image: The image to flip. :param image: The image to flip.
:return: An image. :return: An image.
""" """
return image.transpose(Image.Transpose.FLIP_TOP_BOTTOM) return image.transpose(Image.FLIP_TOP_BOTTOM)
def grayscale(image): def grayscale(image):
@ -536,7 +534,7 @@ def mirror(image):
:param image: The image to mirror. :param image: The image to mirror.
:return: An image. :return: An image.
""" """
return image.transpose(Image.Transpose.FLIP_LEFT_RIGHT) return image.transpose(Image.FLIP_LEFT_RIGHT)
def posterize(image, bits): def posterize(image, bits):
@ -585,13 +583,13 @@ def exif_transpose(image):
exif = image.getexif() exif = image.getexif()
orientation = exif.get(0x0112) orientation = exif.get(0x0112)
method = { method = {
2: Image.Transpose.FLIP_LEFT_RIGHT, 2: Image.FLIP_LEFT_RIGHT,
3: Image.Transpose.ROTATE_180, 3: Image.ROTATE_180,
4: Image.Transpose.FLIP_TOP_BOTTOM, 4: Image.FLIP_TOP_BOTTOM,
5: Image.Transpose.TRANSPOSE, 5: Image.TRANSPOSE,
6: Image.Transpose.ROTATE_270, 6: Image.ROTATE_270,
7: Image.Transpose.TRANSVERSE, 7: Image.TRANSVERSE,
8: Image.Transpose.ROTATE_90, 8: Image.ROTATE_90,
}.get(orientation) }.get(orientation)
if method is not None: if method is not None:
transposed_image = image.transpose(method) transposed_image = image.transpose(method)

View File

@ -47,7 +47,7 @@ class AffineTransform(Transform):
from an affine transform matrix. from an affine transform matrix.
""" """
method = Image.Transform.AFFINE method = Image.AFFINE
class ExtentTransform(Transform): class ExtentTransform(Transform):
@ -69,7 +69,7 @@ class ExtentTransform(Transform):
input image's coordinate system. See :ref:`coordinate-system`. input image's coordinate system. See :ref:`coordinate-system`.
""" """
method = Image.Transform.EXTENT method = Image.EXTENT
class QuadTransform(Transform): class QuadTransform(Transform):
@ -86,7 +86,7 @@ class QuadTransform(Transform):
source quadrilateral. source quadrilateral.
""" """
method = Image.Transform.QUAD method = Image.QUAD
class MeshTransform(Transform): class MeshTransform(Transform):
@ -99,4 +99,4 @@ class MeshTransform(Transform):
:param data: A list of (bbox, quad) tuples. :param data: A list of (bbox, quad) tuples.
""" """
method = Image.Transform.MESH method = Image.MESH

View File

@ -37,7 +37,6 @@ import re
import struct import struct
import warnings import warnings
import zlib import zlib
from enum import IntEnum
from . import Image, ImageChops, ImageFile, ImagePalette, ImageSequence from . import Image, ImageChops, ImageFile, ImagePalette, ImageSequence
from ._binary import i16be as i16 from ._binary import i16be as i16
@ -45,7 +44,6 @@ from ._binary import i32be as i32
from ._binary import o8 from ._binary import o8
from ._binary import o16be as o16 from ._binary import o16be as o16
from ._binary import o32be as o32 from ._binary import o32be as o32
from ._deprecate import deprecate
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -96,49 +94,36 @@ See :ref:`Text in PNG File Format<png-text>`.
# APNG frame disposal modes # APNG frame disposal modes
class Disposal(IntEnum): APNG_DISPOSE_OP_NONE = 0
OP_NONE = 0 """
""" No disposal is done on this frame before rendering the next frame.
No disposal is done on this frame before rendering the next frame. See :ref:`Saving APNG sequences<apng-saving>`.
See :ref:`Saving APNG sequences<apng-saving>`. """
""" APNG_DISPOSE_OP_BACKGROUND = 1
OP_BACKGROUND = 1 """
""" This frames modified region is cleared to fully transparent black before rendering
This frames modified region is cleared to fully transparent black before rendering the next frame.
the next frame. See :ref:`Saving APNG sequences<apng-saving>`.
See :ref:`Saving APNG sequences<apng-saving>`. """
""" APNG_DISPOSE_OP_PREVIOUS = 2
OP_PREVIOUS = 2 """
""" This frames modified region is reverted to the previous frames contents before
This frames modified region is reverted to the previous frames contents before rendering the next frame.
rendering the next frame. See :ref:`Saving APNG sequences<apng-saving>`.
See :ref:`Saving APNG sequences<apng-saving>`. """
"""
# APNG frame blend modes # APNG frame blend modes
class Blend(IntEnum): APNG_BLEND_OP_SOURCE = 0
OP_SOURCE = 0 """
""" All color components of this frame, including alpha, overwrite the previous output
All color components of this frame, including alpha, overwrite the previous output image contents.
image contents. See :ref:`Saving APNG sequences<apng-saving>`.
See :ref:`Saving APNG sequences<apng-saving>`. """
""" APNG_BLEND_OP_OVER = 1
OP_OVER = 1 """
""" This frame should be alpha composited with the previous output image contents.
This frame should be alpha composited with the previous output image contents. See :ref:`Saving APNG sequences<apng-saving>`.
See :ref:`Saving APNG sequences<apng-saving>`. """
"""
def __getattr__(name):
for enum, prefix in {Disposal: "APNG_DISPOSE_", Blend: "APNG_BLEND_"}.items():
if name.startswith(prefix):
name = name[len(prefix) :]
if name in enum.__members__:
deprecate(f"{prefix}{name}", 10, f"{enum.__name__}.{name}")
return enum[name]
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
def _safe_zlib_decompress(s): def _safe_zlib_decompress(s):
@ -901,13 +886,13 @@ class PngImageFile(ImageFile.ImageFile):
raise EOFError raise EOFError
# setup frame disposal (actual disposal done when needed in the next _seek()) # setup frame disposal (actual disposal done when needed in the next _seek())
if self._prev_im is None and self.dispose_op == Disposal.OP_PREVIOUS: if self._prev_im is None and self.dispose_op == APNG_DISPOSE_OP_PREVIOUS:
self.dispose_op = Disposal.OP_BACKGROUND self.dispose_op = APNG_DISPOSE_OP_BACKGROUND
if self.dispose_op == Disposal.OP_PREVIOUS: if self.dispose_op == APNG_DISPOSE_OP_PREVIOUS:
self.dispose = self._prev_im.copy() self.dispose = self._prev_im.copy()
self.dispose = self._crop(self.dispose, self.dispose_extent) self.dispose = self._crop(self.dispose, self.dispose_extent)
elif self.dispose_op == Disposal.OP_BACKGROUND: elif self.dispose_op == APNG_DISPOSE_OP_BACKGROUND:
self.dispose = Image.core.fill(self.mode, self.size) self.dispose = Image.core.fill(self.mode, self.size)
self.dispose = self._crop(self.dispose, self.dispose_extent) self.dispose = self._crop(self.dispose, self.dispose_extent)
else: else:
@ -996,7 +981,7 @@ class PngImageFile(ImageFile.ImageFile):
self.png.close() self.png.close()
self.png = None self.png = None
else: else:
if self._prev_im and self.blend_op == Blend.OP_OVER: if self._prev_im and self.blend_op == APNG_BLEND_OP_OVER:
updated = self._crop(self.im, self.dispose_extent) updated = self._crop(self.im, self.dispose_extent)
self._prev_im.paste( self._prev_im.paste(
updated, self.dispose_extent, updated.convert("RGBA") updated, self.dispose_extent, updated.convert("RGBA")
@ -1092,8 +1077,10 @@ class _fdat:
def _write_multiple_frames(im, fp, chunk, rawmode, default_image, append_images): def _write_multiple_frames(im, fp, chunk, rawmode, default_image, append_images):
duration = im.encoderinfo.get("duration", im.info.get("duration", 0)) duration = im.encoderinfo.get("duration", im.info.get("duration", 0))
loop = im.encoderinfo.get("loop", im.info.get("loop", 0)) loop = im.encoderinfo.get("loop", im.info.get("loop", 0))
disposal = im.encoderinfo.get("disposal", im.info.get("disposal", Disposal.OP_NONE)) disposal = im.encoderinfo.get(
blend = im.encoderinfo.get("blend", im.info.get("blend", Blend.OP_SOURCE)) "disposal", im.info.get("disposal", APNG_DISPOSE_OP_NONE)
)
blend = im.encoderinfo.get("blend", im.info.get("blend", APNG_BLEND_OP_SOURCE))
if default_image: if default_image:
chain = itertools.chain(append_images) chain = itertools.chain(append_images)
@ -1124,10 +1111,10 @@ def _write_multiple_frames(im, fp, chunk, rawmode, default_image, append_images)
previous = im_frames[-1] previous = im_frames[-1]
prev_disposal = previous["encoderinfo"].get("disposal") prev_disposal = previous["encoderinfo"].get("disposal")
prev_blend = previous["encoderinfo"].get("blend") prev_blend = previous["encoderinfo"].get("blend")
if prev_disposal == Disposal.OP_PREVIOUS and len(im_frames) < 2: if prev_disposal == APNG_DISPOSE_OP_PREVIOUS and len(im_frames) < 2:
prev_disposal = Disposal.OP_BACKGROUND prev_disposal = APNG_DISPOSE_OP_BACKGROUND
if prev_disposal == Disposal.OP_BACKGROUND: if prev_disposal == APNG_DISPOSE_OP_BACKGROUND:
base_im = previous["im"].copy() base_im = previous["im"].copy()
dispose = Image.core.fill("RGBA", im.size, (0, 0, 0, 0)) dispose = Image.core.fill("RGBA", im.size, (0, 0, 0, 0))
bbox = previous["bbox"] bbox = previous["bbox"]
@ -1136,7 +1123,7 @@ def _write_multiple_frames(im, fp, chunk, rawmode, default_image, append_images)
else: else:
bbox = (0, 0) + im.size bbox = (0, 0) + im.size
base_im.paste(dispose, bbox) base_im.paste(dispose, bbox)
elif prev_disposal == Disposal.OP_PREVIOUS: elif prev_disposal == APNG_DISPOSE_OP_PREVIOUS:
base_im = im_frames[-2]["im"] base_im = im_frames[-2]["im"]
else: else:
base_im = previous["im"] base_im = previous["im"]

View File

@ -305,7 +305,7 @@ if __name__ == "__main__":
outfile = sys.argv[2] outfile = sys.argv[2]
# perform some image operation # perform some image operation
im = im.transpose(Image.Transpose.FLIP_LEFT_RIGHT) im = im.transpose(Image.FLIP_LEFT_RIGHT)
print( print(
f"saving a flipped version of {os.path.basename(filename)} " f"saving a flipped version of {os.path.basename(filename)} "
f"as {outfile} " f"as {outfile} "

View File

@ -152,7 +152,7 @@ class TgaImageFile(ImageFile.ImageFile):
def load_end(self): def load_end(self):
if self._flip_horizontally: if self._flip_horizontally:
self.im = self.im.transpose(Image.Transpose.FLIP_LEFT_RIGHT) self.im = self.im.transpose(Image.FLIP_LEFT_RIGHT)
# #

View File

@ -1220,13 +1220,13 @@ class TiffImageFile(ImageFile.ImageFile):
def load_end(self): def load_end(self):
if self._tile_orientation: if self._tile_orientation:
method = { method = {
2: Image.Transpose.FLIP_LEFT_RIGHT, 2: Image.FLIP_LEFT_RIGHT,
3: Image.Transpose.ROTATE_180, 3: Image.ROTATE_180,
4: Image.Transpose.FLIP_TOP_BOTTOM, 4: Image.FLIP_TOP_BOTTOM,
5: Image.Transpose.TRANSPOSE, 5: Image.TRANSPOSE,
6: Image.Transpose.ROTATE_270, 6: Image.ROTATE_270,
7: Image.Transpose.TRANSVERSE, 7: Image.TRANSVERSE,
8: Image.Transpose.ROTATE_90, 8: Image.ROTATE_90,
}.get(self._tile_orientation) }.get(self._tile_orientation)
if method is not None: if method is not None:
self.im = self.im.transpose(method) self.im = self.im.transpose(method)