mirror of
https://github.com/python-pillow/Pillow.git
synced 2024-12-25 17:36:18 +03:00
Merge branch 'main' into fits
This commit is contained in:
commit
7dca0135dd
12
CHANGES.rst
12
CHANGES.rst
|
@ -5,6 +5,18 @@ Changelog (Pillow)
|
|||
9.1.0 (unreleased)
|
||||
------------------
|
||||
|
||||
- Consider palette size when converting and in getpalette() #6060
|
||||
[radarhere]
|
||||
|
||||
- Added enums #5954
|
||||
[radarhere]
|
||||
|
||||
- Ensure image is opaque after converting P to PA with RGB palette #6052
|
||||
[radarhere]
|
||||
|
||||
- Attach RGBA palettes from putpalette() when suitable #6054
|
||||
[radarhere]
|
||||
|
||||
- Added get_photoshop_blocks() to parse Photoshop TIFF tag #6030
|
||||
[radarhere]
|
||||
|
||||
|
|
Binary file not shown.
|
@ -43,107 +43,158 @@ class TestColorLut3DCoreAPI:
|
|||
im = Image.new("RGB", (10, 10), 0)
|
||||
|
||||
with pytest.raises(ValueError, match="filter"):
|
||||
im.im.color_lut_3d("RGB", Image.CUBIC, *self.generate_identity_table(3, 3))
|
||||
im.im.color_lut_3d(
|
||||
"RGB", Image.Resampling.BICUBIC, *self.generate_identity_table(3, 3)
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError, match="image mode"):
|
||||
im.im.color_lut_3d(
|
||||
"wrong", Image.LINEAR, *self.generate_identity_table(3, 3)
|
||||
"wrong", Image.Resampling.BILINEAR, *self.generate_identity_table(3, 3)
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError, match="table_channels"):
|
||||
im.im.color_lut_3d("RGB", Image.LINEAR, *self.generate_identity_table(5, 3))
|
||||
|
||||
with pytest.raises(ValueError, match="table_channels"):
|
||||
im.im.color_lut_3d("RGB", Image.LINEAR, *self.generate_identity_table(1, 3))
|
||||
|
||||
with pytest.raises(ValueError, match="table_channels"):
|
||||
im.im.color_lut_3d("RGB", Image.LINEAR, *self.generate_identity_table(2, 3))
|
||||
|
||||
with pytest.raises(ValueError, match="Table size"):
|
||||
im.im.color_lut_3d(
|
||||
"RGB", Image.LINEAR, *self.generate_identity_table(3, (1, 3, 3))
|
||||
"RGB", Image.Resampling.BILINEAR, *self.generate_identity_table(5, 3)
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError, match="table_channels"):
|
||||
im.im.color_lut_3d(
|
||||
"RGB", Image.Resampling.BILINEAR, *self.generate_identity_table(1, 3)
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError, match="table_channels"):
|
||||
im.im.color_lut_3d(
|
||||
"RGB", Image.Resampling.BILINEAR, *self.generate_identity_table(2, 3)
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError, match="Table size"):
|
||||
im.im.color_lut_3d(
|
||||
"RGB", Image.LINEAR, *self.generate_identity_table(3, (66, 3, 3))
|
||||
"RGB",
|
||||
Image.Resampling.BILINEAR,
|
||||
*self.generate_identity_table(3, (1, 3, 3)),
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError, match="Table size"):
|
||||
im.im.color_lut_3d(
|
||||
"RGB",
|
||||
Image.Resampling.BILINEAR,
|
||||
*self.generate_identity_table(3, (66, 3, 3)),
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError, match=r"size1D \* size2D \* size3D"):
|
||||
im.im.color_lut_3d("RGB", Image.LINEAR, 3, 2, 2, 2, [0, 0, 0] * 7)
|
||||
im.im.color_lut_3d(
|
||||
"RGB", Image.Resampling.BILINEAR, 3, 2, 2, 2, [0, 0, 0] * 7
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError, match=r"size1D \* size2D \* size3D"):
|
||||
im.im.color_lut_3d("RGB", Image.LINEAR, 3, 2, 2, 2, [0, 0, 0] * 9)
|
||||
im.im.color_lut_3d(
|
||||
"RGB", Image.Resampling.BILINEAR, 3, 2, 2, 2, [0, 0, 0] * 9
|
||||
)
|
||||
|
||||
with pytest.raises(TypeError):
|
||||
im.im.color_lut_3d("RGB", Image.LINEAR, 3, 2, 2, 2, [0, 0, "0"] * 8)
|
||||
im.im.color_lut_3d(
|
||||
"RGB", Image.Resampling.BILINEAR, 3, 2, 2, 2, [0, 0, "0"] * 8
|
||||
)
|
||||
|
||||
with pytest.raises(TypeError):
|
||||
im.im.color_lut_3d("RGB", Image.LINEAR, 3, 2, 2, 2, 16)
|
||||
im.im.color_lut_3d("RGB", Image.Resampling.BILINEAR, 3, 2, 2, 2, 16)
|
||||
|
||||
def test_correct_args(self):
|
||||
im = Image.new("RGB", (10, 10), 0)
|
||||
|
||||
im.im.color_lut_3d("RGB", Image.LINEAR, *self.generate_identity_table(3, 3))
|
||||
|
||||
im.im.color_lut_3d("CMYK", Image.LINEAR, *self.generate_identity_table(4, 3))
|
||||
|
||||
im.im.color_lut_3d(
|
||||
"RGB", Image.LINEAR, *self.generate_identity_table(3, (2, 3, 3))
|
||||
"RGB", Image.Resampling.BILINEAR, *self.generate_identity_table(3, 3)
|
||||
)
|
||||
|
||||
im.im.color_lut_3d(
|
||||
"RGB", Image.LINEAR, *self.generate_identity_table(3, (65, 3, 3))
|
||||
"CMYK", Image.Resampling.BILINEAR, *self.generate_identity_table(4, 3)
|
||||
)
|
||||
|
||||
im.im.color_lut_3d(
|
||||
"RGB", Image.LINEAR, *self.generate_identity_table(3, (3, 65, 3))
|
||||
"RGB",
|
||||
Image.Resampling.BILINEAR,
|
||||
*self.generate_identity_table(3, (2, 3, 3)),
|
||||
)
|
||||
|
||||
im.im.color_lut_3d(
|
||||
"RGB", Image.LINEAR, *self.generate_identity_table(3, (3, 3, 65))
|
||||
"RGB",
|
||||
Image.Resampling.BILINEAR,
|
||||
*self.generate_identity_table(3, (65, 3, 3)),
|
||||
)
|
||||
|
||||
im.im.color_lut_3d(
|
||||
"RGB",
|
||||
Image.Resampling.BILINEAR,
|
||||
*self.generate_identity_table(3, (3, 65, 3)),
|
||||
)
|
||||
|
||||
im.im.color_lut_3d(
|
||||
"RGB",
|
||||
Image.Resampling.BILINEAR,
|
||||
*self.generate_identity_table(3, (3, 3, 65)),
|
||||
)
|
||||
|
||||
def test_wrong_mode(self):
|
||||
with pytest.raises(ValueError, match="wrong mode"):
|
||||
im = Image.new("L", (10, 10), 0)
|
||||
im.im.color_lut_3d("RGB", Image.LINEAR, *self.generate_identity_table(3, 3))
|
||||
|
||||
with pytest.raises(ValueError, match="wrong mode"):
|
||||
im = Image.new("RGB", (10, 10), 0)
|
||||
im.im.color_lut_3d("L", Image.LINEAR, *self.generate_identity_table(3, 3))
|
||||
|
||||
with pytest.raises(ValueError, match="wrong mode"):
|
||||
im = Image.new("L", (10, 10), 0)
|
||||
im.im.color_lut_3d("L", Image.LINEAR, *self.generate_identity_table(3, 3))
|
||||
|
||||
with pytest.raises(ValueError, match="wrong mode"):
|
||||
im = Image.new("RGB", (10, 10), 0)
|
||||
im.im.color_lut_3d(
|
||||
"RGBA", Image.LINEAR, *self.generate_identity_table(3, 3)
|
||||
"RGB", Image.Resampling.BILINEAR, *self.generate_identity_table(3, 3)
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError, match="wrong mode"):
|
||||
im = Image.new("RGB", (10, 10), 0)
|
||||
im.im.color_lut_3d("RGB", Image.LINEAR, *self.generate_identity_table(4, 3))
|
||||
im.im.color_lut_3d(
|
||||
"L", Image.Resampling.BILINEAR, *self.generate_identity_table(3, 3)
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError, match="wrong mode"):
|
||||
im = Image.new("L", (10, 10), 0)
|
||||
im.im.color_lut_3d(
|
||||
"L", Image.Resampling.BILINEAR, *self.generate_identity_table(3, 3)
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError, match="wrong mode"):
|
||||
im = Image.new("RGB", (10, 10), 0)
|
||||
im.im.color_lut_3d(
|
||||
"RGBA", Image.Resampling.BILINEAR, *self.generate_identity_table(3, 3)
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError, match="wrong mode"):
|
||||
im = Image.new("RGB", (10, 10), 0)
|
||||
im.im.color_lut_3d(
|
||||
"RGB", Image.Resampling.BILINEAR, *self.generate_identity_table(4, 3)
|
||||
)
|
||||
|
||||
def test_correct_mode(self):
|
||||
im = Image.new("RGBA", (10, 10), 0)
|
||||
im.im.color_lut_3d("RGBA", Image.LINEAR, *self.generate_identity_table(3, 3))
|
||||
im.im.color_lut_3d(
|
||||
"RGBA", Image.Resampling.BILINEAR, *self.generate_identity_table(3, 3)
|
||||
)
|
||||
|
||||
im = Image.new("RGBA", (10, 10), 0)
|
||||
im.im.color_lut_3d("RGBA", Image.LINEAR, *self.generate_identity_table(4, 3))
|
||||
im.im.color_lut_3d(
|
||||
"RGBA", Image.Resampling.BILINEAR, *self.generate_identity_table(4, 3)
|
||||
)
|
||||
|
||||
im = Image.new("RGB", (10, 10), 0)
|
||||
im.im.color_lut_3d("HSV", Image.LINEAR, *self.generate_identity_table(3, 3))
|
||||
im.im.color_lut_3d(
|
||||
"HSV", Image.Resampling.BILINEAR, *self.generate_identity_table(3, 3)
|
||||
)
|
||||
|
||||
im = Image.new("RGB", (10, 10), 0)
|
||||
im.im.color_lut_3d("RGBA", Image.LINEAR, *self.generate_identity_table(4, 3))
|
||||
im.im.color_lut_3d(
|
||||
"RGBA", Image.Resampling.BILINEAR, *self.generate_identity_table(4, 3)
|
||||
)
|
||||
|
||||
def test_identities(self):
|
||||
g = Image.linear_gradient("L")
|
||||
im = Image.merge(
|
||||
"RGB", [g, g.transpose(Image.ROTATE_90), g.transpose(Image.ROTATE_180)]
|
||||
"RGB",
|
||||
[
|
||||
g,
|
||||
g.transpose(Image.Transpose.ROTATE_90),
|
||||
g.transpose(Image.Transpose.ROTATE_180),
|
||||
],
|
||||
)
|
||||
|
||||
# Fast test with small cubes
|
||||
|
@ -152,7 +203,9 @@ class TestColorLut3DCoreAPI:
|
|||
im,
|
||||
im._new(
|
||||
im.im.color_lut_3d(
|
||||
"RGB", Image.LINEAR, *self.generate_identity_table(3, size)
|
||||
"RGB",
|
||||
Image.Resampling.BILINEAR,
|
||||
*self.generate_identity_table(3, size),
|
||||
)
|
||||
),
|
||||
)
|
||||
|
@ -162,7 +215,9 @@ class TestColorLut3DCoreAPI:
|
|||
im,
|
||||
im._new(
|
||||
im.im.color_lut_3d(
|
||||
"RGB", Image.LINEAR, *self.generate_identity_table(3, (2, 2, 65))
|
||||
"RGB",
|
||||
Image.Resampling.BILINEAR,
|
||||
*self.generate_identity_table(3, (2, 2, 65)),
|
||||
)
|
||||
),
|
||||
)
|
||||
|
@ -170,7 +225,12 @@ class TestColorLut3DCoreAPI:
|
|||
def test_identities_4_channels(self):
|
||||
g = Image.linear_gradient("L")
|
||||
im = Image.merge(
|
||||
"RGB", [g, g.transpose(Image.ROTATE_90), g.transpose(Image.ROTATE_180)]
|
||||
"RGB",
|
||||
[
|
||||
g,
|
||||
g.transpose(Image.Transpose.ROTATE_90),
|
||||
g.transpose(Image.Transpose.ROTATE_180),
|
||||
],
|
||||
)
|
||||
|
||||
# Red channel copied to alpha
|
||||
|
@ -178,7 +238,9 @@ class TestColorLut3DCoreAPI:
|
|||
Image.merge("RGBA", (im.split() * 2)[:4]),
|
||||
im._new(
|
||||
im.im.color_lut_3d(
|
||||
"RGBA", Image.LINEAR, *self.generate_identity_table(4, 17)
|
||||
"RGBA",
|
||||
Image.Resampling.BILINEAR,
|
||||
*self.generate_identity_table(4, 17),
|
||||
)
|
||||
),
|
||||
)
|
||||
|
@ -189,9 +251,9 @@ class TestColorLut3DCoreAPI:
|
|||
"RGBA",
|
||||
[
|
||||
g,
|
||||
g.transpose(Image.ROTATE_90),
|
||||
g.transpose(Image.ROTATE_180),
|
||||
g.transpose(Image.ROTATE_270),
|
||||
g.transpose(Image.Transpose.ROTATE_90),
|
||||
g.transpose(Image.Transpose.ROTATE_180),
|
||||
g.transpose(Image.Transpose.ROTATE_270),
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -199,7 +261,9 @@ class TestColorLut3DCoreAPI:
|
|||
im,
|
||||
im._new(
|
||||
im.im.color_lut_3d(
|
||||
"RGBA", Image.LINEAR, *self.generate_identity_table(3, 17)
|
||||
"RGBA",
|
||||
Image.Resampling.BILINEAR,
|
||||
*self.generate_identity_table(3, 17),
|
||||
)
|
||||
),
|
||||
)
|
||||
|
@ -207,14 +271,19 @@ class TestColorLut3DCoreAPI:
|
|||
def test_channels_order(self):
|
||||
g = Image.linear_gradient("L")
|
||||
im = Image.merge(
|
||||
"RGB", [g, g.transpose(Image.ROTATE_90), g.transpose(Image.ROTATE_180)]
|
||||
"RGB",
|
||||
[
|
||||
g,
|
||||
g.transpose(Image.Transpose.ROTATE_90),
|
||||
g.transpose(Image.Transpose.ROTATE_180),
|
||||
],
|
||||
)
|
||||
|
||||
# Reverse channels by splitting and using table
|
||||
# fmt: off
|
||||
assert_image_equal(
|
||||
Image.merge('RGB', im.split()[::-1]),
|
||||
im._new(im.im.color_lut_3d('RGB', Image.LINEAR,
|
||||
im._new(im.im.color_lut_3d('RGB', Image.Resampling.BILINEAR,
|
||||
3, 2, 2, 2, [
|
||||
0, 0, 0, 0, 0, 1,
|
||||
0, 1, 0, 0, 1, 1,
|
||||
|
@ -227,11 +296,16 @@ class TestColorLut3DCoreAPI:
|
|||
def test_overflow(self):
|
||||
g = Image.linear_gradient("L")
|
||||
im = Image.merge(
|
||||
"RGB", [g, g.transpose(Image.ROTATE_90), g.transpose(Image.ROTATE_180)]
|
||||
"RGB",
|
||||
[
|
||||
g,
|
||||
g.transpose(Image.Transpose.ROTATE_90),
|
||||
g.transpose(Image.Transpose.ROTATE_180),
|
||||
],
|
||||
)
|
||||
|
||||
# fmt: off
|
||||
transformed = im._new(im.im.color_lut_3d('RGB', Image.LINEAR,
|
||||
transformed = im._new(im.im.color_lut_3d('RGB', Image.Resampling.BILINEAR,
|
||||
3, 2, 2, 2,
|
||||
[
|
||||
-1, -1, -1, 2, -1, -1,
|
||||
|
@ -251,7 +325,7 @@ class TestColorLut3DCoreAPI:
|
|||
assert transformed[205, 205] == (255, 255, 0)
|
||||
|
||||
# fmt: off
|
||||
transformed = im._new(im.im.color_lut_3d('RGB', Image.LINEAR,
|
||||
transformed = im._new(im.im.color_lut_3d('RGB', Image.Resampling.BILINEAR,
|
||||
3, 2, 2, 2,
|
||||
[
|
||||
-3, -3, -3, 5, -3, -3,
|
||||
|
@ -354,7 +428,12 @@ class TestColorLut3DFilter:
|
|||
def test_numpy_formats(self):
|
||||
g = Image.linear_gradient("L")
|
||||
im = Image.merge(
|
||||
"RGB", [g, g.transpose(Image.ROTATE_90), g.transpose(Image.ROTATE_180)]
|
||||
"RGB",
|
||||
[
|
||||
g,
|
||||
g.transpose(Image.Transpose.ROTATE_90),
|
||||
g.transpose(Image.Transpose.ROTATE_180),
|
||||
],
|
||||
)
|
||||
|
||||
lut = ImageFilter.Color3DLUT.generate((7, 9, 11), lambda r, g, b: (r, g, b))
|
||||
|
@ -445,7 +524,12 @@ class TestGenerateColorLut3D:
|
|||
|
||||
g = Image.linear_gradient("L")
|
||||
im = Image.merge(
|
||||
"RGB", [g, g.transpose(Image.ROTATE_90), g.transpose(Image.ROTATE_180)]
|
||||
"RGB",
|
||||
[
|
||||
g,
|
||||
g.transpose(Image.Transpose.ROTATE_90),
|
||||
g.transpose(Image.Transpose.ROTATE_180),
|
||||
],
|
||||
)
|
||||
assert im == im.filter(lut)
|
||||
|
||||
|
|
|
@ -120,9 +120,9 @@ def test_apng_dispose_op_previous_frame():
|
|||
# save_all=True,
|
||||
# append_images=[green, blue],
|
||||
# disposal=[
|
||||
# PngImagePlugin.APNG_DISPOSE_OP_NONE,
|
||||
# PngImagePlugin.APNG_DISPOSE_OP_PREVIOUS,
|
||||
# PngImagePlugin.APNG_DISPOSE_OP_PREVIOUS
|
||||
# PngImagePlugin.Disposal.OP_NONE,
|
||||
# PngImagePlugin.Disposal.OP_PREVIOUS,
|
||||
# PngImagePlugin.Disposal.OP_PREVIOUS
|
||||
# ],
|
||||
# )
|
||||
with Image.open("Tests/images/apng/dispose_op_previous_frame.png") as im:
|
||||
|
@ -455,31 +455,31 @@ def test_apng_save_disposal(tmp_path):
|
|||
green = Image.new("RGBA", size, (0, 255, 0, 255))
|
||||
transparent = Image.new("RGBA", size, (0, 0, 0, 0))
|
||||
|
||||
# test APNG_DISPOSE_OP_NONE
|
||||
# test OP_NONE
|
||||
red.save(
|
||||
test_file,
|
||||
save_all=True,
|
||||
append_images=[green, transparent],
|
||||
disposal=PngImagePlugin.APNG_DISPOSE_OP_NONE,
|
||||
blend=PngImagePlugin.APNG_BLEND_OP_OVER,
|
||||
disposal=PngImagePlugin.Disposal.OP_NONE,
|
||||
blend=PngImagePlugin.Blend.OP_OVER,
|
||||
)
|
||||
with Image.open(test_file) as im:
|
||||
im.seek(2)
|
||||
assert im.getpixel((0, 0)) == (0, 255, 0, 255)
|
||||
assert im.getpixel((64, 32)) == (0, 255, 0, 255)
|
||||
|
||||
# test APNG_DISPOSE_OP_BACKGROUND
|
||||
# test OP_BACKGROUND
|
||||
disposal = [
|
||||
PngImagePlugin.APNG_DISPOSE_OP_NONE,
|
||||
PngImagePlugin.APNG_DISPOSE_OP_BACKGROUND,
|
||||
PngImagePlugin.APNG_DISPOSE_OP_NONE,
|
||||
PngImagePlugin.Disposal.OP_NONE,
|
||||
PngImagePlugin.Disposal.OP_BACKGROUND,
|
||||
PngImagePlugin.Disposal.OP_NONE,
|
||||
]
|
||||
red.save(
|
||||
test_file,
|
||||
save_all=True,
|
||||
append_images=[red, transparent],
|
||||
disposal=disposal,
|
||||
blend=PngImagePlugin.APNG_BLEND_OP_OVER,
|
||||
blend=PngImagePlugin.Blend.OP_OVER,
|
||||
)
|
||||
with Image.open(test_file) as im:
|
||||
im.seek(2)
|
||||
|
@ -487,26 +487,26 @@ def test_apng_save_disposal(tmp_path):
|
|||
assert im.getpixel((64, 32)) == (0, 0, 0, 0)
|
||||
|
||||
disposal = [
|
||||
PngImagePlugin.APNG_DISPOSE_OP_NONE,
|
||||
PngImagePlugin.APNG_DISPOSE_OP_BACKGROUND,
|
||||
PngImagePlugin.Disposal.OP_NONE,
|
||||
PngImagePlugin.Disposal.OP_BACKGROUND,
|
||||
]
|
||||
red.save(
|
||||
test_file,
|
||||
save_all=True,
|
||||
append_images=[green],
|
||||
disposal=disposal,
|
||||
blend=PngImagePlugin.APNG_BLEND_OP_OVER,
|
||||
blend=PngImagePlugin.Blend.OP_OVER,
|
||||
)
|
||||
with Image.open(test_file) as im:
|
||||
im.seek(1)
|
||||
assert im.getpixel((0, 0)) == (0, 255, 0, 255)
|
||||
assert im.getpixel((64, 32)) == (0, 255, 0, 255)
|
||||
|
||||
# test APNG_DISPOSE_OP_PREVIOUS
|
||||
# test OP_PREVIOUS
|
||||
disposal = [
|
||||
PngImagePlugin.APNG_DISPOSE_OP_NONE,
|
||||
PngImagePlugin.APNG_DISPOSE_OP_PREVIOUS,
|
||||
PngImagePlugin.APNG_DISPOSE_OP_NONE,
|
||||
PngImagePlugin.Disposal.OP_NONE,
|
||||
PngImagePlugin.Disposal.OP_PREVIOUS,
|
||||
PngImagePlugin.Disposal.OP_NONE,
|
||||
]
|
||||
red.save(
|
||||
test_file,
|
||||
|
@ -514,7 +514,7 @@ def test_apng_save_disposal(tmp_path):
|
|||
append_images=[green, red, transparent],
|
||||
default_image=True,
|
||||
disposal=disposal,
|
||||
blend=PngImagePlugin.APNG_BLEND_OP_OVER,
|
||||
blend=PngImagePlugin.Blend.OP_OVER,
|
||||
)
|
||||
with Image.open(test_file) as im:
|
||||
im.seek(3)
|
||||
|
@ -522,15 +522,15 @@ def test_apng_save_disposal(tmp_path):
|
|||
assert im.getpixel((64, 32)) == (0, 255, 0, 255)
|
||||
|
||||
disposal = [
|
||||
PngImagePlugin.APNG_DISPOSE_OP_NONE,
|
||||
PngImagePlugin.APNG_DISPOSE_OP_PREVIOUS,
|
||||
PngImagePlugin.Disposal.OP_NONE,
|
||||
PngImagePlugin.Disposal.OP_PREVIOUS,
|
||||
]
|
||||
red.save(
|
||||
test_file,
|
||||
save_all=True,
|
||||
append_images=[green],
|
||||
disposal=disposal,
|
||||
blend=PngImagePlugin.APNG_BLEND_OP_OVER,
|
||||
blend=PngImagePlugin.Blend.OP_OVER,
|
||||
)
|
||||
with Image.open(test_file) as im:
|
||||
im.seek(1)
|
||||
|
@ -538,7 +538,7 @@ def test_apng_save_disposal(tmp_path):
|
|||
assert im.getpixel((64, 32)) == (0, 255, 0, 255)
|
||||
|
||||
# test info disposal
|
||||
red.info["disposal"] = PngImagePlugin.APNG_DISPOSE_OP_BACKGROUND
|
||||
red.info["disposal"] = PngImagePlugin.Disposal.OP_BACKGROUND
|
||||
red.save(
|
||||
test_file,
|
||||
save_all=True,
|
||||
|
@ -556,12 +556,12 @@ def test_apng_save_disposal_previous(tmp_path):
|
|||
red = Image.new("RGBA", size, (255, 0, 0, 255))
|
||||
green = Image.new("RGBA", size, (0, 255, 0, 255))
|
||||
|
||||
# test APNG_DISPOSE_OP_NONE
|
||||
# test OP_NONE
|
||||
transparent.save(
|
||||
test_file,
|
||||
save_all=True,
|
||||
append_images=[red, green],
|
||||
disposal=PngImagePlugin.APNG_DISPOSE_OP_PREVIOUS,
|
||||
disposal=PngImagePlugin.Disposal.OP_PREVIOUS,
|
||||
)
|
||||
with Image.open(test_file) as im:
|
||||
im.seek(2)
|
||||
|
@ -576,17 +576,17 @@ def test_apng_save_blend(tmp_path):
|
|||
green = Image.new("RGBA", size, (0, 255, 0, 255))
|
||||
transparent = Image.new("RGBA", size, (0, 0, 0, 0))
|
||||
|
||||
# test APNG_BLEND_OP_SOURCE on solid color
|
||||
# test OP_SOURCE on solid color
|
||||
blend = [
|
||||
PngImagePlugin.APNG_BLEND_OP_OVER,
|
||||
PngImagePlugin.APNG_BLEND_OP_SOURCE,
|
||||
PngImagePlugin.Blend.OP_OVER,
|
||||
PngImagePlugin.Blend.OP_SOURCE,
|
||||
]
|
||||
red.save(
|
||||
test_file,
|
||||
save_all=True,
|
||||
append_images=[red, green],
|
||||
default_image=True,
|
||||
disposal=PngImagePlugin.APNG_DISPOSE_OP_NONE,
|
||||
disposal=PngImagePlugin.Disposal.OP_NONE,
|
||||
blend=blend,
|
||||
)
|
||||
with Image.open(test_file) as im:
|
||||
|
@ -594,17 +594,17 @@ def test_apng_save_blend(tmp_path):
|
|||
assert im.getpixel((0, 0)) == (0, 255, 0, 255)
|
||||
assert im.getpixel((64, 32)) == (0, 255, 0, 255)
|
||||
|
||||
# test APNG_BLEND_OP_SOURCE on transparent color
|
||||
# test OP_SOURCE on transparent color
|
||||
blend = [
|
||||
PngImagePlugin.APNG_BLEND_OP_OVER,
|
||||
PngImagePlugin.APNG_BLEND_OP_SOURCE,
|
||||
PngImagePlugin.Blend.OP_OVER,
|
||||
PngImagePlugin.Blend.OP_SOURCE,
|
||||
]
|
||||
red.save(
|
||||
test_file,
|
||||
save_all=True,
|
||||
append_images=[red, transparent],
|
||||
default_image=True,
|
||||
disposal=PngImagePlugin.APNG_DISPOSE_OP_NONE,
|
||||
disposal=PngImagePlugin.Disposal.OP_NONE,
|
||||
blend=blend,
|
||||
)
|
||||
with Image.open(test_file) as im:
|
||||
|
@ -612,14 +612,14 @@ def test_apng_save_blend(tmp_path):
|
|||
assert im.getpixel((0, 0)) == (0, 0, 0, 0)
|
||||
assert im.getpixel((64, 32)) == (0, 0, 0, 0)
|
||||
|
||||
# test APNG_BLEND_OP_OVER
|
||||
# test OP_OVER
|
||||
red.save(
|
||||
test_file,
|
||||
save_all=True,
|
||||
append_images=[green, transparent],
|
||||
default_image=True,
|
||||
disposal=PngImagePlugin.APNG_DISPOSE_OP_NONE,
|
||||
blend=PngImagePlugin.APNG_BLEND_OP_OVER,
|
||||
disposal=PngImagePlugin.Disposal.OP_NONE,
|
||||
blend=PngImagePlugin.Blend.OP_OVER,
|
||||
)
|
||||
with Image.open(test_file) as im:
|
||||
im.seek(1)
|
||||
|
@ -630,8 +630,18 @@ def test_apng_save_blend(tmp_path):
|
|||
assert im.getpixel((64, 32)) == (0, 255, 0, 255)
|
||||
|
||||
# test info blend
|
||||
red.info["blend"] = PngImagePlugin.APNG_BLEND_OP_OVER
|
||||
red.info["blend"] = PngImagePlugin.Blend.OP_OVER
|
||||
red.save(test_file, save_all=True, append_images=[green, transparent])
|
||||
with Image.open(test_file) as im:
|
||||
im.seek(2)
|
||||
assert im.getpixel((0, 0)) == (0, 255, 0, 255)
|
||||
|
||||
|
||||
def test_constants_deprecation():
|
||||
for enum, prefix in {
|
||||
PngImagePlugin.Disposal: "APNG_DISPOSE_",
|
||||
PngImagePlugin.Blend: "APNG_BLEND_",
|
||||
}.items():
|
||||
for name in enum.__members__:
|
||||
with pytest.warns(DeprecationWarning):
|
||||
assert getattr(PngImagePlugin, prefix + name) == enum[name]
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import pytest
|
||||
|
||||
from PIL import Image
|
||||
from PIL import BlpImagePlugin, Image
|
||||
|
||||
from .helper import assert_image_equal_tofile
|
||||
|
||||
|
@ -37,3 +37,14 @@ def test_crashes(test_file):
|
|||
with Image.open(f) as im:
|
||||
with pytest.raises(OSError):
|
||||
im.load()
|
||||
|
||||
|
||||
def test_constants_deprecation():
|
||||
for enum, prefix in {
|
||||
BlpImagePlugin.Format: "BLP_FORMAT_",
|
||||
BlpImagePlugin.Encoding: "BLP_ENCODING_",
|
||||
BlpImagePlugin.AlphaEncoding: "BLP_ALPHA_ENCODING_",
|
||||
}.items():
|
||||
for name in enum.__members__:
|
||||
with pytest.warns(DeprecationWarning):
|
||||
assert getattr(BlpImagePlugin, prefix + name) == enum[name]
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
from PIL import Image
|
||||
import pytest
|
||||
|
||||
from PIL import FtexImagePlugin, Image
|
||||
|
||||
from .helper import assert_image_equal_tofile, assert_image_similar
|
||||
|
||||
|
@ -12,3 +14,12 @@ def test_load_dxt1():
|
|||
with Image.open("Tests/images/ftex_dxt1.ftc") as im:
|
||||
with Image.open("Tests/images/ftex_dxt1.png") as target:
|
||||
assert_image_similar(im, target.convert("RGBA"), 15)
|
||||
|
||||
|
||||
def test_constants_deprecation():
|
||||
for enum, prefix in {
|
||||
FtexImagePlugin.Format: "FORMAT_",
|
||||
}.items():
|
||||
for name in enum.__members__:
|
||||
with pytest.warns(DeprecationWarning):
|
||||
assert getattr(FtexImagePlugin, prefix + name) == enum[name]
|
||||
|
|
|
@ -210,8 +210,8 @@ def test_palette_handling(tmp_path):
|
|||
with Image.open(TEST_GIF) as im:
|
||||
im = im.convert("RGB")
|
||||
|
||||
im = im.resize((100, 100), Image.LANCZOS)
|
||||
im2 = im.convert("P", palette=Image.ADAPTIVE, colors=256)
|
||||
im = im.resize((100, 100), Image.Resampling.LANCZOS)
|
||||
im2 = im.convert("P", palette=Image.Palette.ADAPTIVE, colors=256)
|
||||
|
||||
f = str(tmp_path / "temp.gif")
|
||||
im2.save(f, optimize=True)
|
||||
|
@ -911,7 +911,7 @@ def test_save_I(tmp_path):
|
|||
def test_getdata():
|
||||
# Test getheader/getdata against legacy values.
|
||||
# Create a 'P' image with holes in the palette.
|
||||
im = Image._wedge().resize((16, 16), Image.NEAREST)
|
||||
im = Image._wedge().resize((16, 16), Image.Resampling.NEAREST)
|
||||
im.putpalette(ImagePalette.ImagePalette("RGB"))
|
||||
im.info = {"background": 0}
|
||||
|
||||
|
|
|
@ -112,12 +112,9 @@ def test_older_icon():
|
|||
|
||||
|
||||
def test_jp2_icon():
|
||||
# This icon was made by using Uli Kusterer's oldiconutil to replace
|
||||
# the PNG images with JPEG 2000 ones. The advantage of doing this is
|
||||
# that OS X 10.5 supports JPEG 2000 but not PNG; some commercial
|
||||
# software therefore does just this.
|
||||
|
||||
# (oldiconutil is here: https://github.com/uliwitness/oldiconutil)
|
||||
# This icon uses JPEG 2000 images instead of the PNG images.
|
||||
# The advantage of doing this is that OS X 10.5 supports JPEG 2000
|
||||
# but not PNG; some commercial software therefore does just this.
|
||||
|
||||
if not ENABLE_JPEG2K:
|
||||
return
|
||||
|
|
|
@ -53,7 +53,9 @@ def test_save_to_bytes():
|
|||
assert im.mode == reloaded.mode
|
||||
assert (64, 64) == reloaded.size
|
||||
assert reloaded.format == "ICO"
|
||||
assert_image_equal(reloaded, hopper().resize((64, 64), Image.LANCZOS))
|
||||
assert_image_equal(
|
||||
reloaded, hopper().resize((64, 64), Image.Resampling.LANCZOS)
|
||||
)
|
||||
|
||||
# The other one
|
||||
output.seek(0)
|
||||
|
@ -63,7 +65,9 @@ def test_save_to_bytes():
|
|||
assert im.mode == reloaded.mode
|
||||
assert (32, 32) == reloaded.size
|
||||
assert reloaded.format == "ICO"
|
||||
assert_image_equal(reloaded, hopper().resize((32, 32), Image.LANCZOS))
|
||||
assert_image_equal(
|
||||
reloaded, hopper().resize((32, 32), Image.Resampling.LANCZOS)
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("mode", ("1", "L", "P", "RGB", "RGBA"))
|
||||
|
@ -80,7 +84,7 @@ def test_save_to_bytes_bmp(mode):
|
|||
assert "RGBA" == reloaded.mode
|
||||
assert (64, 64) == reloaded.size
|
||||
assert reloaded.format == "ICO"
|
||||
im = hopper(mode).resize((64, 64), Image.LANCZOS).convert("RGBA")
|
||||
im = hopper(mode).resize((64, 64), Image.Resampling.LANCZOS).convert("RGBA")
|
||||
assert_image_equal(reloaded, im)
|
||||
|
||||
# The other one
|
||||
|
@ -91,7 +95,7 @@ def test_save_to_bytes_bmp(mode):
|
|||
assert "RGBA" == reloaded.mode
|
||||
assert (32, 32) == reloaded.size
|
||||
assert reloaded.format == "ICO"
|
||||
im = hopper(mode).resize((32, 32), Image.LANCZOS).convert("RGBA")
|
||||
im = hopper(mode).resize((32, 32), Image.Resampling.LANCZOS).convert("RGBA")
|
||||
assert_image_equal(reloaded, im)
|
||||
|
||||
|
||||
|
|
|
@ -271,7 +271,7 @@ class TestFileJpeg:
|
|||
del exif[0x8769]
|
||||
|
||||
# Assert that it needs to be transposed
|
||||
assert exif[0x0112] == Image.TRANSVERSE
|
||||
assert exif[0x0112] == Image.Transpose.TRANSVERSE
|
||||
|
||||
# Assert that the GPS IFD is present and empty
|
||||
assert exif.get_ifd(0x8825) == {}
|
||||
|
|
|
@ -291,7 +291,7 @@ def test_subsampling_decode(name):
|
|||
# RGB reference images are downscaled
|
||||
epsilon = 3e-3
|
||||
width, height = width * 2, height * 2
|
||||
expected = im2.resize((width, height), Image.NEAREST)
|
||||
expected = im2.resize((width, height), Image.Resampling.NEAREST)
|
||||
assert_image_similar(im, expected, epsilon)
|
||||
|
||||
|
||||
|
|
|
@ -112,7 +112,7 @@ class TestFileLibTiff(LibTiffTestCase):
|
|||
test_file = "Tests/images/hopper_g4_500.tif"
|
||||
with Image.open(test_file) as orig:
|
||||
out = str(tmp_path / "temp.tif")
|
||||
rot = orig.transpose(Image.ROTATE_90)
|
||||
rot = orig.transpose(Image.Transpose.ROTATE_90)
|
||||
assert rot.size == (500, 500)
|
||||
rot.save(out)
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ def to_rgb_colorsys(im):
|
|||
|
||||
|
||||
def test_wedge():
|
||||
src = wedge().resize((3 * 32, 32), Image.BILINEAR)
|
||||
src = wedge().resize((3 * 32, 32), Image.Resampling.BILINEAR)
|
||||
im = src.convert("HSV")
|
||||
comparable = to_hsv_colorsys(src)
|
||||
|
||||
|
|
|
@ -813,6 +813,31 @@ class TestImage:
|
|||
with pytest.warns(DeprecationWarning):
|
||||
assert Image.CONTAINER == 2
|
||||
|
||||
def test_constants_deprecation(self):
|
||||
with pytest.warns(DeprecationWarning):
|
||||
assert Image.NEAREST == 0
|
||||
with pytest.warns(DeprecationWarning):
|
||||
assert Image.NONE == 0
|
||||
|
||||
with pytest.warns(DeprecationWarning):
|
||||
assert Image.LINEAR == Image.Resampling.BILINEAR
|
||||
with pytest.warns(DeprecationWarning):
|
||||
assert Image.CUBIC == Image.Resampling.BICUBIC
|
||||
with pytest.warns(DeprecationWarning):
|
||||
assert Image.ANTIALIAS == Image.Resampling.LANCZOS
|
||||
|
||||
for enum in (
|
||||
Image.Transpose,
|
||||
Image.Transform,
|
||||
Image.Resampling,
|
||||
Image.Dither,
|
||||
Image.Palette,
|
||||
Image.Quantize,
|
||||
):
|
||||
for name in enum.__members__:
|
||||
with pytest.warns(DeprecationWarning):
|
||||
assert getattr(Image, name) == enum[name]
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"path",
|
||||
[
|
||||
|
|
|
@ -76,6 +76,13 @@ def test_16bit_workaround():
|
|||
_test_float_conversion(im.convert("I"))
|
||||
|
||||
|
||||
def test_opaque():
|
||||
alpha = hopper("P").convert("PA").getchannel("A")
|
||||
|
||||
solid = Image.new("L", (128, 128), 255)
|
||||
assert_image_equal(alpha, solid)
|
||||
|
||||
|
||||
def test_rgba_p():
|
||||
im = hopper("RGBA")
|
||||
im.putalpha(hopper("L"))
|
||||
|
@ -136,7 +143,7 @@ def test_trns_l(tmp_path):
|
|||
assert "transparency" in im_p.info
|
||||
im_p.save(f)
|
||||
|
||||
im_p = im.convert("P", palette=Image.ADAPTIVE)
|
||||
im_p = im.convert("P", palette=Image.Palette.ADAPTIVE)
|
||||
assert "transparency" in im_p.info
|
||||
im_p.save(f)
|
||||
|
||||
|
@ -159,13 +166,13 @@ def test_trns_RGB(tmp_path):
|
|||
assert "transparency" not in im_rgba.info
|
||||
im_rgba.save(f)
|
||||
|
||||
im_p = pytest.warns(UserWarning, im.convert, "P", palette=Image.ADAPTIVE)
|
||||
im_p = pytest.warns(UserWarning, im.convert, "P", palette=Image.Palette.ADAPTIVE)
|
||||
assert "transparency" not in im_p.info
|
||||
im_p.save(f)
|
||||
|
||||
im = Image.new("RGB", (1, 1))
|
||||
im.info["transparency"] = im.getpixel((0, 0))
|
||||
im_p = im.convert("P", palette=Image.ADAPTIVE)
|
||||
im_p = im.convert("P", palette=Image.Palette.ADAPTIVE)
|
||||
assert im_p.info["transparency"] == im_p.getpixel((0, 0))
|
||||
im_p.save(f)
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ def test_sanity():
|
|||
|
||||
def test_roundtrip():
|
||||
def getdata(mode):
|
||||
im = hopper(mode).resize((32, 30), Image.NEAREST)
|
||||
im = hopper(mode).resize((32, 30), Image.Resampling.NEAREST)
|
||||
data = im.getdata()
|
||||
return data[0], len(data), len(list(data))
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ class TestImagingPaste:
|
|||
|
||||
@cached_property
|
||||
def mask_L(self):
|
||||
return self.gradient_L.transpose(Image.ROTATE_270)
|
||||
return self.gradient_L.transpose(Image.Transpose.ROTATE_270)
|
||||
|
||||
@cached_property
|
||||
def gradient_L(self):
|
||||
|
@ -62,8 +62,8 @@ class TestImagingPaste:
|
|||
"RGB",
|
||||
[
|
||||
self.gradient_L,
|
||||
self.gradient_L.transpose(Image.ROTATE_90),
|
||||
self.gradient_L.transpose(Image.ROTATE_180),
|
||||
self.gradient_L.transpose(Image.Transpose.ROTATE_90),
|
||||
self.gradient_L.transpose(Image.Transpose.ROTATE_180),
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -73,9 +73,9 @@ class TestImagingPaste:
|
|||
"RGBA",
|
||||
[
|
||||
self.gradient_L,
|
||||
self.gradient_L.transpose(Image.ROTATE_90),
|
||||
self.gradient_L.transpose(Image.ROTATE_180),
|
||||
self.gradient_L.transpose(Image.ROTATE_270),
|
||||
self.gradient_L.transpose(Image.Transpose.ROTATE_90),
|
||||
self.gradient_L.transpose(Image.Transpose.ROTATE_180),
|
||||
self.gradient_L.transpose(Image.Transpose.ROTATE_270),
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -85,9 +85,9 @@ class TestImagingPaste:
|
|||
"RGBa",
|
||||
[
|
||||
self.gradient_L,
|
||||
self.gradient_L.transpose(Image.ROTATE_90),
|
||||
self.gradient_L.transpose(Image.ROTATE_180),
|
||||
self.gradient_L.transpose(Image.ROTATE_270),
|
||||
self.gradient_L.transpose(Image.Transpose.ROTATE_90),
|
||||
self.gradient_L.transpose(Image.Transpose.ROTATE_180),
|
||||
self.gradient_L.transpose(Image.Transpose.ROTATE_270),
|
||||
],
|
||||
)
|
||||
|
||||
|
|
|
@ -62,3 +62,17 @@ def test_putpalette_with_alpha_values():
|
|||
im.putpalette(palette_with_alpha_values, "RGBA")
|
||||
|
||||
assert_image_equal(im.convert("RGBA"), expected)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"mode, palette",
|
||||
(
|
||||
("RGBA", (1, 2, 3, 4)),
|
||||
("RGBAX", (1, 2, 3, 4, 0)),
|
||||
),
|
||||
)
|
||||
def test_rgba_palette(mode, palette):
|
||||
im = Image.new("P", (1, 1))
|
||||
im.putpalette(palette, mode)
|
||||
assert im.getpalette() == [1, 2, 3]
|
||||
assert im.palette.colors == {(1, 2, 3, 4): 0}
|
||||
|
|
|
@ -25,7 +25,7 @@ def test_libimagequant_quantize():
|
|||
libimagequant = parse_version(features.version_feature("libimagequant"))
|
||||
if libimagequant < parse_version("4"):
|
||||
pytest.skip("Fails with libimagequant earlier than 4.0.0 on ppc64le")
|
||||
converted = image.quantize(100, Image.LIBIMAGEQUANT)
|
||||
converted = image.quantize(100, Image.Quantize.LIBIMAGEQUANT)
|
||||
assert converted.mode == "P"
|
||||
assert_image_similar(converted.convert("RGB"), image, 15)
|
||||
assert len(converted.getcolors()) == 100
|
||||
|
@ -33,7 +33,7 @@ def test_libimagequant_quantize():
|
|||
|
||||
def test_octree_quantize():
|
||||
image = hopper()
|
||||
converted = image.quantize(100, Image.FASTOCTREE)
|
||||
converted = image.quantize(100, Image.Quantize.FASTOCTREE)
|
||||
assert converted.mode == "P"
|
||||
assert_image_similar(converted.convert("RGB"), image, 20)
|
||||
assert len(converted.getcolors()) == 100
|
||||
|
@ -96,10 +96,10 @@ def test_transparent_colors_equal():
|
|||
@pytest.mark.parametrize(
|
||||
"method, color",
|
||||
(
|
||||
(Image.MEDIANCUT, (0, 0, 0)),
|
||||
(Image.MAXCOVERAGE, (0, 0, 0)),
|
||||
(Image.FASTOCTREE, (0, 0, 0)),
|
||||
(Image.FASTOCTREE, (0, 0, 0, 0)),
|
||||
(Image.Quantize.MEDIANCUT, (0, 0, 0)),
|
||||
(Image.Quantize.MAXCOVERAGE, (0, 0, 0)),
|
||||
(Image.Quantize.FASTOCTREE, (0, 0, 0)),
|
||||
(Image.Quantize.FASTOCTREE, (0, 0, 0, 0)),
|
||||
),
|
||||
)
|
||||
def test_palette(method, color):
|
||||
|
@ -108,3 +108,18 @@ def test_palette(method, color):
|
|||
converted = im.quantize(method=method)
|
||||
converted_px = converted.load()
|
||||
assert converted_px[0, 0] == converted.palette.colors[color]
|
||||
|
||||
|
||||
def test_small_palette():
|
||||
# Arrange
|
||||
im = hopper()
|
||||
|
||||
colors = (255, 0, 0, 0, 0, 255)
|
||||
p = Image.new("P", (1, 1))
|
||||
p.putpalette(colors)
|
||||
|
||||
# Act
|
||||
im = im.quantize(palette=p)
|
||||
|
||||
# Assert
|
||||
assert len(im.getcolors()) == 2
|
||||
|
|
|
@ -97,7 +97,7 @@ def get_image(mode):
|
|||
bands = [gradients_image]
|
||||
for _ in mode_info.bands[1:]:
|
||||
# rotate previous image
|
||||
band = bands[-1].transpose(Image.ROTATE_90)
|
||||
band = bands[-1].transpose(Image.Transpose.ROTATE_90)
|
||||
bands.append(band)
|
||||
# Correct alpha channel by transforming completely transparent pixels.
|
||||
# Low alpha values also emphasize error after alpha multiplication.
|
||||
|
@ -138,24 +138,26 @@ def compare_reduce_with_reference(im, factor, average_diff=0.4, max_diff=1):
|
|||
reference = Image.new(im.mode, reduced.size)
|
||||
area_size = (im.size[0] // factor[0], im.size[1] // factor[1])
|
||||
area_box = (0, 0, area_size[0] * factor[0], area_size[1] * factor[1])
|
||||
area = im.resize(area_size, Image.BOX, area_box)
|
||||
area = im.resize(area_size, Image.Resampling.BOX, area_box)
|
||||
reference.paste(area, (0, 0))
|
||||
|
||||
if area_size[0] < reduced.size[0]:
|
||||
assert reduced.size[0] - area_size[0] == 1
|
||||
last_column_box = (area_box[2], 0, im.size[0], area_box[3])
|
||||
last_column = im.resize((1, area_size[1]), Image.BOX, last_column_box)
|
||||
last_column = im.resize(
|
||||
(1, area_size[1]), Image.Resampling.BOX, last_column_box
|
||||
)
|
||||
reference.paste(last_column, (area_size[0], 0))
|
||||
|
||||
if area_size[1] < reduced.size[1]:
|
||||
assert reduced.size[1] - area_size[1] == 1
|
||||
last_row_box = (0, area_box[3], area_box[2], im.size[1])
|
||||
last_row = im.resize((area_size[0], 1), Image.BOX, last_row_box)
|
||||
last_row = im.resize((area_size[0], 1), Image.Resampling.BOX, last_row_box)
|
||||
reference.paste(last_row, (0, area_size[1]))
|
||||
|
||||
if area_size[0] < reduced.size[0] and area_size[1] < reduced.size[1]:
|
||||
last_pixel_box = (area_box[2], area_box[3], im.size[0], im.size[1])
|
||||
last_pixel = im.resize((1, 1), Image.BOX, last_pixel_box)
|
||||
last_pixel = im.resize((1, 1), Image.Resampling.BOX, last_pixel_box)
|
||||
reference.paste(last_pixel, area_size)
|
||||
|
||||
assert_compare_images(reduced, reference, average_diff, max_diff)
|
||||
|
|
|
@ -24,7 +24,7 @@ class TestImagingResampleVulnerability:
|
|||
):
|
||||
with pytest.raises(MemoryError):
|
||||
# any resampling filter will do here
|
||||
im.im.resize((xsize, ysize), Image.BILINEAR)
|
||||
im.im.resize((xsize, ysize), Image.Resampling.BILINEAR)
|
||||
|
||||
def test_invalid_size(self):
|
||||
im = hopper()
|
||||
|
@ -103,7 +103,7 @@ class TestImagingCoreResampleAccuracy:
|
|||
def test_reduce_box(self):
|
||||
for mode in ["RGBX", "RGB", "La", "L"]:
|
||||
case = self.make_case(mode, (8, 8), 0xE1)
|
||||
case = case.resize((4, 4), Image.BOX)
|
||||
case = case.resize((4, 4), Image.Resampling.BOX)
|
||||
# fmt: off
|
||||
data = ("e1 e1"
|
||||
"e1 e1")
|
||||
|
@ -114,7 +114,7 @@ class TestImagingCoreResampleAccuracy:
|
|||
def test_reduce_bilinear(self):
|
||||
for mode in ["RGBX", "RGB", "La", "L"]:
|
||||
case = self.make_case(mode, (8, 8), 0xE1)
|
||||
case = case.resize((4, 4), Image.BILINEAR)
|
||||
case = case.resize((4, 4), Image.Resampling.BILINEAR)
|
||||
# fmt: off
|
||||
data = ("e1 c9"
|
||||
"c9 b7")
|
||||
|
@ -125,7 +125,7 @@ class TestImagingCoreResampleAccuracy:
|
|||
def test_reduce_hamming(self):
|
||||
for mode in ["RGBX", "RGB", "La", "L"]:
|
||||
case = self.make_case(mode, (8, 8), 0xE1)
|
||||
case = case.resize((4, 4), Image.HAMMING)
|
||||
case = case.resize((4, 4), Image.Resampling.HAMMING)
|
||||
# fmt: off
|
||||
data = ("e1 da"
|
||||
"da d3")
|
||||
|
@ -136,7 +136,7 @@ class TestImagingCoreResampleAccuracy:
|
|||
def test_reduce_bicubic(self):
|
||||
for mode in ["RGBX", "RGB", "La", "L"]:
|
||||
case = self.make_case(mode, (12, 12), 0xE1)
|
||||
case = case.resize((6, 6), Image.BICUBIC)
|
||||
case = case.resize((6, 6), Image.Resampling.BICUBIC)
|
||||
# fmt: off
|
||||
data = ("e1 e3 d4"
|
||||
"e3 e5 d6"
|
||||
|
@ -148,7 +148,7 @@ class TestImagingCoreResampleAccuracy:
|
|||
def test_reduce_lanczos(self):
|
||||
for mode in ["RGBX", "RGB", "La", "L"]:
|
||||
case = self.make_case(mode, (16, 16), 0xE1)
|
||||
case = case.resize((8, 8), Image.LANCZOS)
|
||||
case = case.resize((8, 8), Image.Resampling.LANCZOS)
|
||||
# fmt: off
|
||||
data = ("e1 e0 e4 d7"
|
||||
"e0 df e3 d6"
|
||||
|
@ -161,7 +161,7 @@ class TestImagingCoreResampleAccuracy:
|
|||
def test_enlarge_box(self):
|
||||
for mode in ["RGBX", "RGB", "La", "L"]:
|
||||
case = self.make_case(mode, (2, 2), 0xE1)
|
||||
case = case.resize((4, 4), Image.BOX)
|
||||
case = case.resize((4, 4), Image.Resampling.BOX)
|
||||
# fmt: off
|
||||
data = ("e1 e1"
|
||||
"e1 e1")
|
||||
|
@ -172,7 +172,7 @@ class TestImagingCoreResampleAccuracy:
|
|||
def test_enlarge_bilinear(self):
|
||||
for mode in ["RGBX", "RGB", "La", "L"]:
|
||||
case = self.make_case(mode, (2, 2), 0xE1)
|
||||
case = case.resize((4, 4), Image.BILINEAR)
|
||||
case = case.resize((4, 4), Image.Resampling.BILINEAR)
|
||||
# fmt: off
|
||||
data = ("e1 b0"
|
||||
"b0 98")
|
||||
|
@ -183,7 +183,7 @@ class TestImagingCoreResampleAccuracy:
|
|||
def test_enlarge_hamming(self):
|
||||
for mode in ["RGBX", "RGB", "La", "L"]:
|
||||
case = self.make_case(mode, (2, 2), 0xE1)
|
||||
case = case.resize((4, 4), Image.HAMMING)
|
||||
case = case.resize((4, 4), Image.Resampling.HAMMING)
|
||||
# fmt: off
|
||||
data = ("e1 d2"
|
||||
"d2 c5")
|
||||
|
@ -194,7 +194,7 @@ class TestImagingCoreResampleAccuracy:
|
|||
def test_enlarge_bicubic(self):
|
||||
for mode in ["RGBX", "RGB", "La", "L"]:
|
||||
case = self.make_case(mode, (4, 4), 0xE1)
|
||||
case = case.resize((8, 8), Image.BICUBIC)
|
||||
case = case.resize((8, 8), Image.Resampling.BICUBIC)
|
||||
# fmt: off
|
||||
data = ("e1 e5 ee b9"
|
||||
"e5 e9 f3 bc"
|
||||
|
@ -207,7 +207,7 @@ class TestImagingCoreResampleAccuracy:
|
|||
def test_enlarge_lanczos(self):
|
||||
for mode in ["RGBX", "RGB", "La", "L"]:
|
||||
case = self.make_case(mode, (6, 6), 0xE1)
|
||||
case = case.resize((12, 12), Image.LANCZOS)
|
||||
case = case.resize((12, 12), Image.Resampling.LANCZOS)
|
||||
data = (
|
||||
"e1 e0 db ed f5 b8"
|
||||
"e0 df da ec f3 b7"
|
||||
|
@ -220,7 +220,9 @@ class TestImagingCoreResampleAccuracy:
|
|||
self.check_case(channel, self.make_sample(data, (12, 12)))
|
||||
|
||||
def test_box_filter_correct_range(self):
|
||||
im = Image.new("RGB", (8, 8), "#1688ff").resize((100, 100), Image.BOX)
|
||||
im = Image.new("RGB", (8, 8), "#1688ff").resize(
|
||||
(100, 100), Image.Resampling.BOX
|
||||
)
|
||||
ref = Image.new("RGB", (100, 100), "#1688ff")
|
||||
assert_image_equal(im, ref)
|
||||
|
||||
|
@ -228,7 +230,7 @@ class TestImagingCoreResampleAccuracy:
|
|||
class TestCoreResampleConsistency:
|
||||
def make_case(self, mode, fill):
|
||||
im = Image.new(mode, (512, 9), fill)
|
||||
return im.resize((9, 512), Image.LANCZOS), im.load()[0, 0]
|
||||
return im.resize((9, 512), Image.Resampling.LANCZOS), im.load()[0, 0]
|
||||
|
||||
def run_case(self, case):
|
||||
channel, color = case
|
||||
|
@ -283,20 +285,20 @@ class TestCoreResampleAlphaCorrect:
|
|||
@pytest.mark.xfail(reason="Current implementation isn't precise enough")
|
||||
def test_levels_rgba(self):
|
||||
case = self.make_levels_case("RGBA")
|
||||
self.run_levels_case(case.resize((512, 32), Image.BOX))
|
||||
self.run_levels_case(case.resize((512, 32), Image.BILINEAR))
|
||||
self.run_levels_case(case.resize((512, 32), Image.HAMMING))
|
||||
self.run_levels_case(case.resize((512, 32), Image.BICUBIC))
|
||||
self.run_levels_case(case.resize((512, 32), Image.LANCZOS))
|
||||
self.run_levels_case(case.resize((512, 32), Image.Resampling.BOX))
|
||||
self.run_levels_case(case.resize((512, 32), Image.Resampling.BILINEAR))
|
||||
self.run_levels_case(case.resize((512, 32), Image.Resampling.HAMMING))
|
||||
self.run_levels_case(case.resize((512, 32), Image.Resampling.BICUBIC))
|
||||
self.run_levels_case(case.resize((512, 32), Image.Resampling.LANCZOS))
|
||||
|
||||
@pytest.mark.xfail(reason="Current implementation isn't precise enough")
|
||||
def test_levels_la(self):
|
||||
case = self.make_levels_case("LA")
|
||||
self.run_levels_case(case.resize((512, 32), Image.BOX))
|
||||
self.run_levels_case(case.resize((512, 32), Image.BILINEAR))
|
||||
self.run_levels_case(case.resize((512, 32), Image.HAMMING))
|
||||
self.run_levels_case(case.resize((512, 32), Image.BICUBIC))
|
||||
self.run_levels_case(case.resize((512, 32), Image.LANCZOS))
|
||||
self.run_levels_case(case.resize((512, 32), Image.Resampling.BOX))
|
||||
self.run_levels_case(case.resize((512, 32), Image.Resampling.BILINEAR))
|
||||
self.run_levels_case(case.resize((512, 32), Image.Resampling.HAMMING))
|
||||
self.run_levels_case(case.resize((512, 32), Image.Resampling.BICUBIC))
|
||||
self.run_levels_case(case.resize((512, 32), Image.Resampling.LANCZOS))
|
||||
|
||||
def make_dirty_case(self, mode, clean_pixel, dirty_pixel):
|
||||
i = Image.new(mode, (64, 64), dirty_pixel)
|
||||
|
@ -321,19 +323,27 @@ class TestCoreResampleAlphaCorrect:
|
|||
|
||||
def test_dirty_pixels_rgba(self):
|
||||
case = self.make_dirty_case("RGBA", (255, 255, 0, 128), (0, 0, 255, 0))
|
||||
self.run_dirty_case(case.resize((20, 20), Image.BOX), (255, 255, 0))
|
||||
self.run_dirty_case(case.resize((20, 20), Image.BILINEAR), (255, 255, 0))
|
||||
self.run_dirty_case(case.resize((20, 20), Image.HAMMING), (255, 255, 0))
|
||||
self.run_dirty_case(case.resize((20, 20), Image.BICUBIC), (255, 255, 0))
|
||||
self.run_dirty_case(case.resize((20, 20), Image.LANCZOS), (255, 255, 0))
|
||||
self.run_dirty_case(case.resize((20, 20), Image.Resampling.BOX), (255, 255, 0))
|
||||
self.run_dirty_case(
|
||||
case.resize((20, 20), Image.Resampling.BILINEAR), (255, 255, 0)
|
||||
)
|
||||
self.run_dirty_case(
|
||||
case.resize((20, 20), Image.Resampling.HAMMING), (255, 255, 0)
|
||||
)
|
||||
self.run_dirty_case(
|
||||
case.resize((20, 20), Image.Resampling.BICUBIC), (255, 255, 0)
|
||||
)
|
||||
self.run_dirty_case(
|
||||
case.resize((20, 20), Image.Resampling.LANCZOS), (255, 255, 0)
|
||||
)
|
||||
|
||||
def test_dirty_pixels_la(self):
|
||||
case = self.make_dirty_case("LA", (255, 128), (0, 0))
|
||||
self.run_dirty_case(case.resize((20, 20), Image.BOX), (255,))
|
||||
self.run_dirty_case(case.resize((20, 20), Image.BILINEAR), (255,))
|
||||
self.run_dirty_case(case.resize((20, 20), Image.HAMMING), (255,))
|
||||
self.run_dirty_case(case.resize((20, 20), Image.BICUBIC), (255,))
|
||||
self.run_dirty_case(case.resize((20, 20), Image.LANCZOS), (255,))
|
||||
self.run_dirty_case(case.resize((20, 20), Image.Resampling.BOX), (255,))
|
||||
self.run_dirty_case(case.resize((20, 20), Image.Resampling.BILINEAR), (255,))
|
||||
self.run_dirty_case(case.resize((20, 20), Image.Resampling.HAMMING), (255,))
|
||||
self.run_dirty_case(case.resize((20, 20), Image.Resampling.BICUBIC), (255,))
|
||||
self.run_dirty_case(case.resize((20, 20), Image.Resampling.LANCZOS), (255,))
|
||||
|
||||
|
||||
class TestCoreResamplePasses:
|
||||
|
@ -346,26 +356,26 @@ class TestCoreResamplePasses:
|
|||
def test_horizontal(self):
|
||||
im = hopper("L")
|
||||
with self.count(1):
|
||||
im.resize((im.size[0] - 10, im.size[1]), Image.BILINEAR)
|
||||
im.resize((im.size[0] - 10, im.size[1]), Image.Resampling.BILINEAR)
|
||||
|
||||
def test_vertical(self):
|
||||
im = hopper("L")
|
||||
with self.count(1):
|
||||
im.resize((im.size[0], im.size[1] - 10), Image.BILINEAR)
|
||||
im.resize((im.size[0], im.size[1] - 10), Image.Resampling.BILINEAR)
|
||||
|
||||
def test_both(self):
|
||||
im = hopper("L")
|
||||
with self.count(2):
|
||||
im.resize((im.size[0] - 10, im.size[1] - 10), Image.BILINEAR)
|
||||
im.resize((im.size[0] - 10, im.size[1] - 10), Image.Resampling.BILINEAR)
|
||||
|
||||
def test_box_horizontal(self):
|
||||
im = hopper("L")
|
||||
box = (20, 0, im.size[0] - 20, im.size[1])
|
||||
with self.count(1):
|
||||
# the same size, but different box
|
||||
with_box = im.resize(im.size, Image.BILINEAR, box)
|
||||
with_box = im.resize(im.size, Image.Resampling.BILINEAR, box)
|
||||
with self.count(2):
|
||||
cropped = im.crop(box).resize(im.size, Image.BILINEAR)
|
||||
cropped = im.crop(box).resize(im.size, Image.Resampling.BILINEAR)
|
||||
assert_image_similar(with_box, cropped, 0.1)
|
||||
|
||||
def test_box_vertical(self):
|
||||
|
@ -373,9 +383,9 @@ class TestCoreResamplePasses:
|
|||
box = (0, 20, im.size[0], im.size[1] - 20)
|
||||
with self.count(1):
|
||||
# the same size, but different box
|
||||
with_box = im.resize(im.size, Image.BILINEAR, box)
|
||||
with_box = im.resize(im.size, Image.Resampling.BILINEAR, box)
|
||||
with self.count(2):
|
||||
cropped = im.crop(box).resize(im.size, Image.BILINEAR)
|
||||
cropped = im.crop(box).resize(im.size, Image.Resampling.BILINEAR)
|
||||
assert_image_similar(with_box, cropped, 0.1)
|
||||
|
||||
|
||||
|
@ -388,7 +398,7 @@ class TestCoreResampleCoefficients:
|
|||
draw = ImageDraw.Draw(i)
|
||||
draw.rectangle((0, 0, i.size[0] // 2 - 1, 0), test_color)
|
||||
|
||||
px = i.resize((5, i.size[1]), Image.BICUBIC).load()
|
||||
px = i.resize((5, i.size[1]), Image.Resampling.BICUBIC).load()
|
||||
if px[2, 0] != test_color // 2:
|
||||
assert test_color // 2 == px[2, 0]
|
||||
|
||||
|
@ -396,7 +406,7 @@ class TestCoreResampleCoefficients:
|
|||
# regression test for the wrong coefficients calculation
|
||||
# due to bug https://github.com/python-pillow/Pillow/issues/2161
|
||||
im = Image.new("RGBA", (1280, 1280), (0x20, 0x40, 0x60, 0xFF))
|
||||
histogram = im.resize((256, 256), Image.BICUBIC).histogram()
|
||||
histogram = im.resize((256, 256), Image.Resampling.BICUBIC).histogram()
|
||||
|
||||
# first channel
|
||||
assert histogram[0x100 * 0 + 0x20] == 0x10000
|
||||
|
@ -412,12 +422,12 @@ class TestCoreResampleBox:
|
|||
def test_wrong_arguments(self):
|
||||
im = hopper()
|
||||
for resample in (
|
||||
Image.NEAREST,
|
||||
Image.BOX,
|
||||
Image.BILINEAR,
|
||||
Image.HAMMING,
|
||||
Image.BICUBIC,
|
||||
Image.LANCZOS,
|
||||
Image.Resampling.NEAREST,
|
||||
Image.Resampling.BOX,
|
||||
Image.Resampling.BILINEAR,
|
||||
Image.Resampling.HAMMING,
|
||||
Image.Resampling.BICUBIC,
|
||||
Image.Resampling.LANCZOS,
|
||||
):
|
||||
im.resize((32, 32), resample, (0, 0, im.width, im.height))
|
||||
im.resize((32, 32), resample, (20, 20, im.width, im.height))
|
||||
|
@ -456,7 +466,7 @@ class TestCoreResampleBox:
|
|||
for y0, y1 in split_range(dst_size[1], ytiles):
|
||||
for x0, x1 in split_range(dst_size[0], xtiles):
|
||||
box = (x0 * scale[0], y0 * scale[1], x1 * scale[0], y1 * scale[1])
|
||||
tile = im.resize((x1 - x0, y1 - y0), Image.BICUBIC, box)
|
||||
tile = im.resize((x1 - x0, y1 - y0), Image.Resampling.BICUBIC, box)
|
||||
tiled.paste(tile, (x0, y0))
|
||||
return tiled
|
||||
|
||||
|
@ -467,7 +477,7 @@ class TestCoreResampleBox:
|
|||
with Image.open("Tests/images/flower.jpg") as im:
|
||||
assert im.size == (480, 360)
|
||||
dst_size = (251, 188)
|
||||
reference = im.resize(dst_size, Image.BICUBIC)
|
||||
reference = im.resize(dst_size, Image.Resampling.BICUBIC)
|
||||
|
||||
for tiles in [(1, 1), (3, 3), (9, 7), (100, 100)]:
|
||||
tiled = self.resize_tiled(im, dst_size, *tiles)
|
||||
|
@ -483,12 +493,16 @@ class TestCoreResampleBox:
|
|||
assert im.size == (480, 360)
|
||||
dst_size = (48, 36)
|
||||
# Reference is cropped image resized to destination
|
||||
reference = im.crop((0, 0, 473, 353)).resize(dst_size, Image.BICUBIC)
|
||||
# Image.BOX emulates supersampling (480 / 8 = 60, 360 / 8 = 45)
|
||||
supersampled = im.resize((60, 45), Image.BOX)
|
||||
reference = im.crop((0, 0, 473, 353)).resize(
|
||||
dst_size, Image.Resampling.BICUBIC
|
||||
)
|
||||
# Image.Resampling.BOX emulates supersampling (480 / 8 = 60, 360 / 8 = 45)
|
||||
supersampled = im.resize((60, 45), Image.Resampling.BOX)
|
||||
|
||||
with_box = supersampled.resize(dst_size, Image.BICUBIC, (0, 0, 59.125, 44.125))
|
||||
without_box = supersampled.resize(dst_size, Image.BICUBIC)
|
||||
with_box = supersampled.resize(
|
||||
dst_size, Image.Resampling.BICUBIC, (0, 0, 59.125, 44.125)
|
||||
)
|
||||
without_box = supersampled.resize(dst_size, Image.Resampling.BICUBIC)
|
||||
|
||||
# error with box should be much smaller than without
|
||||
assert_image_similar(reference, with_box, 6)
|
||||
|
@ -496,7 +510,7 @@ class TestCoreResampleBox:
|
|||
assert_image_similar(reference, without_box, 5)
|
||||
|
||||
def test_formats(self):
|
||||
for resample in [Image.NEAREST, Image.BILINEAR]:
|
||||
for resample in [Image.Resampling.NEAREST, Image.Resampling.BILINEAR]:
|
||||
for mode in ["RGB", "L", "RGBA", "LA", "I", ""]:
|
||||
im = hopper(mode)
|
||||
box = (20, 20, im.size[0] - 20, im.size[1] - 20)
|
||||
|
@ -514,7 +528,7 @@ class TestCoreResampleBox:
|
|||
((40, 50), (10, 0, 50, 50)),
|
||||
((40, 50), (10, 20, 50, 70)),
|
||||
]:
|
||||
res = im.resize(size, Image.LANCZOS, box)
|
||||
res = im.resize(size, Image.Resampling.LANCZOS, box)
|
||||
assert res.size == size
|
||||
assert_image_equal(res, im.crop(box), f">>> {size} {box}")
|
||||
|
||||
|
@ -528,7 +542,7 @@ class TestCoreResampleBox:
|
|||
((40, 50), (10.4, 0.4, 50.4, 50.4)),
|
||||
((40, 50), (10.4, 20.4, 50.4, 70.4)),
|
||||
]:
|
||||
res = im.resize(size, Image.LANCZOS, box)
|
||||
res = im.resize(size, Image.Resampling.LANCZOS, box)
|
||||
assert res.size == size
|
||||
with pytest.raises(AssertionError, match=r"difference \d"):
|
||||
# check that the difference at least that much
|
||||
|
@ -538,7 +552,7 @@ class TestCoreResampleBox:
|
|||
# Can skip resize for one dimension
|
||||
im = hopper()
|
||||
|
||||
for flt in [Image.NEAREST, Image.BICUBIC]:
|
||||
for flt in [Image.Resampling.NEAREST, Image.Resampling.BICUBIC]:
|
||||
for size, box in [
|
||||
((40, 50), (0, 0, 40, 90)),
|
||||
((40, 50), (0, 20, 40, 90)),
|
||||
|
@ -559,7 +573,7 @@ class TestCoreResampleBox:
|
|||
# Can skip resize for one dimension
|
||||
im = hopper()
|
||||
|
||||
for flt in [Image.NEAREST, Image.BICUBIC]:
|
||||
for flt in [Image.Resampling.NEAREST, Image.Resampling.BICUBIC]:
|
||||
for size, box in [
|
||||
((40, 50), (0, 0, 90, 50)),
|
||||
((40, 50), (20, 0, 90, 50)),
|
||||
|
|
|
@ -35,33 +35,33 @@ class TestImagingCoreResize:
|
|||
"I;16",
|
||||
]: # exotic mode
|
||||
im = hopper(mode)
|
||||
r = self.resize(im, (15, 12), Image.NEAREST)
|
||||
r = self.resize(im, (15, 12), Image.Resampling.NEAREST)
|
||||
assert r.mode == mode
|
||||
assert r.size == (15, 12)
|
||||
assert r.im.bands == im.im.bands
|
||||
|
||||
def test_convolution_modes(self):
|
||||
with pytest.raises(ValueError):
|
||||
self.resize(hopper("1"), (15, 12), Image.BILINEAR)
|
||||
self.resize(hopper("1"), (15, 12), Image.Resampling.BILINEAR)
|
||||
with pytest.raises(ValueError):
|
||||
self.resize(hopper("P"), (15, 12), Image.BILINEAR)
|
||||
self.resize(hopper("P"), (15, 12), Image.Resampling.BILINEAR)
|
||||
with pytest.raises(ValueError):
|
||||
self.resize(hopper("I;16"), (15, 12), Image.BILINEAR)
|
||||
self.resize(hopper("I;16"), (15, 12), Image.Resampling.BILINEAR)
|
||||
for mode in ["L", "I", "F", "RGB", "RGBA", "CMYK", "YCbCr"]:
|
||||
im = hopper(mode)
|
||||
r = self.resize(im, (15, 12), Image.BILINEAR)
|
||||
r = self.resize(im, (15, 12), Image.Resampling.BILINEAR)
|
||||
assert r.mode == mode
|
||||
assert r.size == (15, 12)
|
||||
assert r.im.bands == im.im.bands
|
||||
|
||||
def test_reduce_filters(self):
|
||||
for f in [
|
||||
Image.NEAREST,
|
||||
Image.BOX,
|
||||
Image.BILINEAR,
|
||||
Image.HAMMING,
|
||||
Image.BICUBIC,
|
||||
Image.LANCZOS,
|
||||
Image.Resampling.NEAREST,
|
||||
Image.Resampling.BOX,
|
||||
Image.Resampling.BILINEAR,
|
||||
Image.Resampling.HAMMING,
|
||||
Image.Resampling.BICUBIC,
|
||||
Image.Resampling.LANCZOS,
|
||||
]:
|
||||
r = self.resize(hopper("RGB"), (15, 12), f)
|
||||
assert r.mode == "RGB"
|
||||
|
@ -69,12 +69,12 @@ class TestImagingCoreResize:
|
|||
|
||||
def test_enlarge_filters(self):
|
||||
for f in [
|
||||
Image.NEAREST,
|
||||
Image.BOX,
|
||||
Image.BILINEAR,
|
||||
Image.HAMMING,
|
||||
Image.BICUBIC,
|
||||
Image.LANCZOS,
|
||||
Image.Resampling.NEAREST,
|
||||
Image.Resampling.BOX,
|
||||
Image.Resampling.BILINEAR,
|
||||
Image.Resampling.HAMMING,
|
||||
Image.Resampling.BICUBIC,
|
||||
Image.Resampling.LANCZOS,
|
||||
]:
|
||||
r = self.resize(hopper("RGB"), (212, 195), f)
|
||||
assert r.mode == "RGB"
|
||||
|
@ -95,12 +95,12 @@ class TestImagingCoreResize:
|
|||
samples["dirty"].putpixel((1, 1), 128)
|
||||
|
||||
for f in [
|
||||
Image.NEAREST,
|
||||
Image.BOX,
|
||||
Image.BILINEAR,
|
||||
Image.HAMMING,
|
||||
Image.BICUBIC,
|
||||
Image.LANCZOS,
|
||||
Image.Resampling.NEAREST,
|
||||
Image.Resampling.BOX,
|
||||
Image.Resampling.BILINEAR,
|
||||
Image.Resampling.HAMMING,
|
||||
Image.Resampling.BICUBIC,
|
||||
Image.Resampling.LANCZOS,
|
||||
]:
|
||||
# samples resized with current filter
|
||||
references = {
|
||||
|
@ -124,12 +124,12 @@ class TestImagingCoreResize:
|
|||
|
||||
def test_enlarge_zero(self):
|
||||
for f in [
|
||||
Image.NEAREST,
|
||||
Image.BOX,
|
||||
Image.BILINEAR,
|
||||
Image.HAMMING,
|
||||
Image.BICUBIC,
|
||||
Image.LANCZOS,
|
||||
Image.Resampling.NEAREST,
|
||||
Image.Resampling.BOX,
|
||||
Image.Resampling.BILINEAR,
|
||||
Image.Resampling.HAMMING,
|
||||
Image.Resampling.BICUBIC,
|
||||
Image.Resampling.LANCZOS,
|
||||
]:
|
||||
r = self.resize(Image.new("RGB", (0, 0), "white"), (212, 195), f)
|
||||
assert r.mode == "RGB"
|
||||
|
@ -164,15 +164,19 @@ def gradients_image():
|
|||
|
||||
class TestReducingGapResize:
|
||||
def test_reducing_gap_values(self, gradients_image):
|
||||
ref = gradients_image.resize((52, 34), Image.BICUBIC, reducing_gap=None)
|
||||
im = gradients_image.resize((52, 34), Image.BICUBIC)
|
||||
ref = gradients_image.resize(
|
||||
(52, 34), Image.Resampling.BICUBIC, reducing_gap=None
|
||||
)
|
||||
im = gradients_image.resize((52, 34), Image.Resampling.BICUBIC)
|
||||
assert_image_equal(ref, im)
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
gradients_image.resize((52, 34), Image.BICUBIC, reducing_gap=0)
|
||||
gradients_image.resize((52, 34), Image.Resampling.BICUBIC, reducing_gap=0)
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
gradients_image.resize((52, 34), Image.BICUBIC, reducing_gap=0.99)
|
||||
gradients_image.resize(
|
||||
(52, 34), Image.Resampling.BICUBIC, reducing_gap=0.99
|
||||
)
|
||||
|
||||
def test_reducing_gap_1(self, gradients_image):
|
||||
for box, epsilon in [
|
||||
|
@ -180,9 +184,9 @@ class TestReducingGapResize:
|
|||
((1.1, 2.2, 510.8, 510.9), 4),
|
||||
((3, 10, 410, 256), 10),
|
||||
]:
|
||||
ref = gradients_image.resize((52, 34), Image.BICUBIC, box=box)
|
||||
ref = gradients_image.resize((52, 34), Image.Resampling.BICUBIC, box=box)
|
||||
im = gradients_image.resize(
|
||||
(52, 34), Image.BICUBIC, box=box, reducing_gap=1.0
|
||||
(52, 34), Image.Resampling.BICUBIC, box=box, reducing_gap=1.0
|
||||
)
|
||||
|
||||
with pytest.raises(AssertionError):
|
||||
|
@ -196,9 +200,9 @@ class TestReducingGapResize:
|
|||
((1.1, 2.2, 510.8, 510.9), 1.5),
|
||||
((3, 10, 410, 256), 1),
|
||||
]:
|
||||
ref = gradients_image.resize((52, 34), Image.BICUBIC, box=box)
|
||||
ref = gradients_image.resize((52, 34), Image.Resampling.BICUBIC, box=box)
|
||||
im = gradients_image.resize(
|
||||
(52, 34), Image.BICUBIC, box=box, reducing_gap=2.0
|
||||
(52, 34), Image.Resampling.BICUBIC, box=box, reducing_gap=2.0
|
||||
)
|
||||
|
||||
with pytest.raises(AssertionError):
|
||||
|
@ -212,9 +216,9 @@ class TestReducingGapResize:
|
|||
((1.1, 2.2, 510.8, 510.9), 1),
|
||||
((3, 10, 410, 256), 0.5),
|
||||
]:
|
||||
ref = gradients_image.resize((52, 34), Image.BICUBIC, box=box)
|
||||
ref = gradients_image.resize((52, 34), Image.Resampling.BICUBIC, box=box)
|
||||
im = gradients_image.resize(
|
||||
(52, 34), Image.BICUBIC, box=box, reducing_gap=3.0
|
||||
(52, 34), Image.Resampling.BICUBIC, box=box, reducing_gap=3.0
|
||||
)
|
||||
|
||||
with pytest.raises(AssertionError):
|
||||
|
@ -224,9 +228,9 @@ class TestReducingGapResize:
|
|||
|
||||
def test_reducing_gap_8(self, gradients_image):
|
||||
for box in [None, (1.1, 2.2, 510.8, 510.9), (3, 10, 410, 256)]:
|
||||
ref = gradients_image.resize((52, 34), Image.BICUBIC, box=box)
|
||||
ref = gradients_image.resize((52, 34), Image.Resampling.BICUBIC, box=box)
|
||||
im = gradients_image.resize(
|
||||
(52, 34), Image.BICUBIC, box=box, reducing_gap=8.0
|
||||
(52, 34), Image.Resampling.BICUBIC, box=box, reducing_gap=8.0
|
||||
)
|
||||
|
||||
assert_image_equal(ref, im)
|
||||
|
@ -236,8 +240,10 @@ class TestReducingGapResize:
|
|||
((0, 0, 512, 512), 5.5),
|
||||
((0.9, 1.7, 128, 128), 9.5),
|
||||
]:
|
||||
ref = gradients_image.resize((52, 34), Image.BOX, box=box)
|
||||
im = gradients_image.resize((52, 34), Image.BOX, box=box, reducing_gap=1.0)
|
||||
ref = gradients_image.resize((52, 34), Image.Resampling.BOX, box=box)
|
||||
im = gradients_image.resize(
|
||||
(52, 34), Image.Resampling.BOX, box=box, reducing_gap=1.0
|
||||
)
|
||||
|
||||
assert_image_similar(ref, im, epsilon)
|
||||
|
||||
|
@ -261,12 +267,12 @@ class TestImageResize:
|
|||
def test_default_filter(self):
|
||||
for mode in "L", "RGB", "I", "F":
|
||||
im = hopper(mode)
|
||||
assert im.resize((20, 20), Image.BICUBIC) == im.resize((20, 20))
|
||||
assert im.resize((20, 20), Image.Resampling.BICUBIC) == im.resize((20, 20))
|
||||
|
||||
for mode in "1", "P":
|
||||
im = hopper(mode)
|
||||
assert im.resize((20, 20), Image.NEAREST) == im.resize((20, 20))
|
||||
assert im.resize((20, 20), Image.Resampling.NEAREST) == im.resize((20, 20))
|
||||
|
||||
for mode in "I;16", "I;16L", "I;16B", "BGR;15", "BGR;16":
|
||||
im = hopper(mode)
|
||||
assert im.resize((20, 20), Image.NEAREST) == im.resize((20, 20))
|
||||
assert im.resize((20, 20), Image.Resampling.NEAREST) == im.resize((20, 20))
|
||||
|
|
|
@ -46,14 +46,14 @@ def test_zero():
|
|||
def test_resample():
|
||||
# Target image creation, inspected by eye.
|
||||
# >>> im = Image.open('Tests/images/hopper.ppm')
|
||||
# >>> im = im.rotate(45, resample=Image.BICUBIC, expand=True)
|
||||
# >>> im = im.rotate(45, resample=Image.Resampling.BICUBIC, expand=True)
|
||||
# >>> im.save('Tests/images/hopper_45.png')
|
||||
|
||||
with Image.open("Tests/images/hopper_45.png") as target:
|
||||
for (resample, epsilon) in (
|
||||
(Image.NEAREST, 10),
|
||||
(Image.BILINEAR, 5),
|
||||
(Image.BICUBIC, 0),
|
||||
(Image.Resampling.NEAREST, 10),
|
||||
(Image.Resampling.BILINEAR, 5),
|
||||
(Image.Resampling.BICUBIC, 0),
|
||||
):
|
||||
im = hopper()
|
||||
im = im.rotate(45, resample=resample, expand=True)
|
||||
|
@ -62,7 +62,7 @@ def test_resample():
|
|||
|
||||
def test_center_0():
|
||||
im = hopper()
|
||||
im = im.rotate(45, center=(0, 0), resample=Image.BICUBIC)
|
||||
im = im.rotate(45, center=(0, 0), resample=Image.Resampling.BICUBIC)
|
||||
|
||||
with Image.open("Tests/images/hopper_45.png") as target:
|
||||
target_origin = target.size[1] / 2
|
||||
|
@ -73,7 +73,7 @@ def test_center_0():
|
|||
|
||||
def test_center_14():
|
||||
im = hopper()
|
||||
im = im.rotate(45, center=(14, 14), resample=Image.BICUBIC)
|
||||
im = im.rotate(45, center=(14, 14), resample=Image.Resampling.BICUBIC)
|
||||
|
||||
with Image.open("Tests/images/hopper_45.png") as target:
|
||||
target_origin = target.size[1] / 2 - 14
|
||||
|
@ -90,7 +90,7 @@ def test_translate():
|
|||
(target_origin, target_origin, target_origin + 128, target_origin + 128)
|
||||
)
|
||||
|
||||
im = im.rotate(45, translate=(5, 5), resample=Image.BICUBIC)
|
||||
im = im.rotate(45, translate=(5, 5), resample=Image.Resampling.BICUBIC)
|
||||
|
||||
assert_image_similar(im, target, 1)
|
||||
|
||||
|
|
|
@ -97,24 +97,24 @@ def test_DCT_scaling_edges():
|
|||
|
||||
thumb = fromstring(tostring(im, "JPEG", quality=99, subsampling=0))
|
||||
# small reducing_gap to amplify the effect
|
||||
thumb.thumbnail((32, 32), Image.BICUBIC, reducing_gap=1.0)
|
||||
thumb.thumbnail((32, 32), Image.Resampling.BICUBIC, reducing_gap=1.0)
|
||||
|
||||
ref = im.resize((32, 32), Image.BICUBIC)
|
||||
ref = im.resize((32, 32), Image.Resampling.BICUBIC)
|
||||
# This is still JPEG, some error is present. Without the fix it is 11.5
|
||||
assert_image_similar(thumb, ref, 1.5)
|
||||
|
||||
|
||||
def test_reducing_gap_values():
|
||||
im = hopper()
|
||||
im.thumbnail((18, 18), Image.BICUBIC)
|
||||
im.thumbnail((18, 18), Image.Resampling.BICUBIC)
|
||||
|
||||
ref = hopper()
|
||||
ref.thumbnail((18, 18), Image.BICUBIC, reducing_gap=2.0)
|
||||
ref.thumbnail((18, 18), Image.Resampling.BICUBIC, reducing_gap=2.0)
|
||||
# reducing_gap=2.0 should be the default
|
||||
assert_image_equal(ref, im)
|
||||
|
||||
ref = hopper()
|
||||
ref.thumbnail((18, 18), Image.BICUBIC, reducing_gap=None)
|
||||
ref.thumbnail((18, 18), Image.Resampling.BICUBIC, reducing_gap=None)
|
||||
with pytest.raises(AssertionError):
|
||||
assert_image_equal(ref, im)
|
||||
|
||||
|
@ -125,9 +125,9 @@ def test_reducing_gap_for_DCT_scaling():
|
|||
with Image.open("Tests/images/hopper.jpg") as ref:
|
||||
# thumbnail should call draft with reducing_gap scale
|
||||
ref.draft(None, (18 * 3, 18 * 3))
|
||||
ref = ref.resize((18, 18), Image.BICUBIC)
|
||||
ref = ref.resize((18, 18), Image.Resampling.BICUBIC)
|
||||
|
||||
with Image.open("Tests/images/hopper.jpg") as im:
|
||||
im.thumbnail((18, 18), Image.BICUBIC, reducing_gap=3.0)
|
||||
im.thumbnail((18, 18), Image.Resampling.BICUBIC, reducing_gap=3.0)
|
||||
|
||||
assert_image_equal(ref, im)
|
||||
|
|
|
@ -34,20 +34,22 @@ class TestImageTransform:
|
|||
|
||||
def test_palette(self):
|
||||
with Image.open("Tests/images/hopper.gif") as im:
|
||||
transformed = im.transform(im.size, Image.AFFINE, [1, 0, 0, 0, 1, 0])
|
||||
transformed = im.transform(
|
||||
im.size, Image.Transform.AFFINE, [1, 0, 0, 0, 1, 0]
|
||||
)
|
||||
assert im.palette.palette == transformed.palette.palette
|
||||
|
||||
def test_extent(self):
|
||||
im = hopper("RGB")
|
||||
(w, h) = im.size
|
||||
# fmt: off
|
||||
transformed = im.transform(im.size, Image.EXTENT,
|
||||
transformed = im.transform(im.size, Image.Transform.EXTENT,
|
||||
(0, 0,
|
||||
w//2, h//2), # ul -> lr
|
||||
Image.BILINEAR)
|
||||
Image.Resampling.BILINEAR)
|
||||
# fmt: on
|
||||
|
||||
scaled = im.resize((w * 2, h * 2), Image.BILINEAR).crop((0, 0, w, h))
|
||||
scaled = im.resize((w * 2, h * 2), Image.Resampling.BILINEAR).crop((0, 0, w, h))
|
||||
|
||||
# undone -- precision?
|
||||
assert_image_similar(transformed, scaled, 23)
|
||||
|
@ -57,15 +59,18 @@ class TestImageTransform:
|
|||
im = hopper("RGB")
|
||||
(w, h) = im.size
|
||||
# fmt: off
|
||||
transformed = im.transform(im.size, Image.QUAD,
|
||||
transformed = im.transform(im.size, Image.Transform.QUAD,
|
||||
(0, 0, 0, h//2,
|
||||
# ul -> ccw around quad:
|
||||
w//2, h//2, w//2, 0),
|
||||
Image.BILINEAR)
|
||||
Image.Resampling.BILINEAR)
|
||||
# fmt: on
|
||||
|
||||
scaled = im.transform(
|
||||
(w, h), Image.AFFINE, (0.5, 0, 0, 0, 0.5, 0), Image.BILINEAR
|
||||
(w, h),
|
||||
Image.Transform.AFFINE,
|
||||
(0.5, 0, 0, 0, 0.5, 0),
|
||||
Image.Resampling.BILINEAR,
|
||||
)
|
||||
|
||||
assert_image_equal(transformed, scaled)
|
||||
|
@ -80,9 +85,9 @@ class TestImageTransform:
|
|||
(w, h) = im.size
|
||||
transformed = im.transform(
|
||||
im.size,
|
||||
Image.EXTENT,
|
||||
Image.Transform.EXTENT,
|
||||
(0, 0, w * 2, h * 2),
|
||||
Image.BILINEAR,
|
||||
Image.Resampling.BILINEAR,
|
||||
fillcolor="red",
|
||||
)
|
||||
|
||||
|
@ -93,18 +98,21 @@ class TestImageTransform:
|
|||
im = hopper("RGBA")
|
||||
(w, h) = im.size
|
||||
# fmt: off
|
||||
transformed = im.transform(im.size, Image.MESH,
|
||||
transformed = im.transform(im.size, Image.Transform.MESH,
|
||||
[((0, 0, w//2, h//2), # box
|
||||
(0, 0, 0, h,
|
||||
w, h, w, 0)), # ul -> ccw around quad
|
||||
((w//2, h//2, w, h), # box
|
||||
(0, 0, 0, h,
|
||||
w, h, w, 0))], # ul -> ccw around quad
|
||||
Image.BILINEAR)
|
||||
Image.Resampling.BILINEAR)
|
||||
# fmt: on
|
||||
|
||||
scaled = im.transform(
|
||||
(w // 2, h // 2), Image.AFFINE, (2, 0, 0, 0, 2, 0), Image.BILINEAR
|
||||
(w // 2, h // 2),
|
||||
Image.Transform.AFFINE,
|
||||
(2, 0, 0, 0, 2, 0),
|
||||
Image.Resampling.BILINEAR,
|
||||
)
|
||||
|
||||
checker = Image.new("RGBA", im.size)
|
||||
|
@ -137,14 +145,16 @@ class TestImageTransform:
|
|||
|
||||
def test_alpha_premult_resize(self):
|
||||
def op(im, sz):
|
||||
return im.resize(sz, Image.BILINEAR)
|
||||
return im.resize(sz, Image.Resampling.BILINEAR)
|
||||
|
||||
self._test_alpha_premult(op)
|
||||
|
||||
def test_alpha_premult_transform(self):
|
||||
def op(im, sz):
|
||||
(w, h) = im.size
|
||||
return im.transform(sz, Image.EXTENT, (0, 0, w, h), Image.BILINEAR)
|
||||
return im.transform(
|
||||
sz, Image.Transform.EXTENT, (0, 0, w, h), Image.Resampling.BILINEAR
|
||||
)
|
||||
|
||||
self._test_alpha_premult(op)
|
||||
|
||||
|
@ -171,7 +181,7 @@ class TestImageTransform:
|
|||
@pytest.mark.parametrize("mode", ("RGBA", "LA"))
|
||||
def test_nearest_resize(self, mode):
|
||||
def op(im, sz):
|
||||
return im.resize(sz, Image.NEAREST)
|
||||
return im.resize(sz, Image.Resampling.NEAREST)
|
||||
|
||||
self._test_nearest(op, mode)
|
||||
|
||||
|
@ -179,7 +189,9 @@ class TestImageTransform:
|
|||
def test_nearest_transform(self, mode):
|
||||
def op(im, sz):
|
||||
(w, h) = im.size
|
||||
return im.transform(sz, Image.EXTENT, (0, 0, w, h), Image.NEAREST)
|
||||
return im.transform(
|
||||
sz, Image.Transform.EXTENT, (0, 0, w, h), Image.Resampling.NEAREST
|
||||
)
|
||||
|
||||
self._test_nearest(op, mode)
|
||||
|
||||
|
@ -213,13 +225,15 @@ class TestImageTransform:
|
|||
def test_unknown_resampling_filter(self):
|
||||
with hopper() as im:
|
||||
(w, h) = im.size
|
||||
for resample in (Image.BOX, "unknown"):
|
||||
for resample in (Image.Resampling.BOX, "unknown"):
|
||||
with pytest.raises(ValueError):
|
||||
im.transform((100, 100), Image.EXTENT, (0, 0, w, h), resample)
|
||||
im.transform(
|
||||
(100, 100), Image.Transform.EXTENT, (0, 0, w, h), resample
|
||||
)
|
||||
|
||||
|
||||
class TestImageTransformAffine:
|
||||
transform = Image.AFFINE
|
||||
transform = Image.Transform.AFFINE
|
||||
|
||||
def _test_image(self):
|
||||
im = hopper("RGB")
|
||||
|
@ -247,7 +261,11 @@ class TestImageTransformAffine:
|
|||
else:
|
||||
transposed = im
|
||||
|
||||
for resample in [Image.NEAREST, Image.BILINEAR, Image.BICUBIC]:
|
||||
for resample in [
|
||||
Image.Resampling.NEAREST,
|
||||
Image.Resampling.BILINEAR,
|
||||
Image.Resampling.BICUBIC,
|
||||
]:
|
||||
transformed = im.transform(
|
||||
transposed.size, self.transform, matrix, resample
|
||||
)
|
||||
|
@ -257,13 +275,13 @@ class TestImageTransformAffine:
|
|||
self._test_rotate(0, None)
|
||||
|
||||
def test_rotate_90_deg(self):
|
||||
self._test_rotate(90, Image.ROTATE_90)
|
||||
self._test_rotate(90, Image.Transpose.ROTATE_90)
|
||||
|
||||
def test_rotate_180_deg(self):
|
||||
self._test_rotate(180, Image.ROTATE_180)
|
||||
self._test_rotate(180, Image.Transpose.ROTATE_180)
|
||||
|
||||
def test_rotate_270_deg(self):
|
||||
self._test_rotate(270, Image.ROTATE_270)
|
||||
self._test_rotate(270, Image.Transpose.ROTATE_270)
|
||||
|
||||
def _test_resize(self, scale, epsilonscale):
|
||||
im = self._test_image()
|
||||
|
@ -273,9 +291,9 @@ class TestImageTransformAffine:
|
|||
matrix_down = [scale, 0, 0, 0, scale, 0, 0, 0]
|
||||
|
||||
for resample, epsilon in [
|
||||
(Image.NEAREST, 0),
|
||||
(Image.BILINEAR, 2),
|
||||
(Image.BICUBIC, 1),
|
||||
(Image.Resampling.NEAREST, 0),
|
||||
(Image.Resampling.BILINEAR, 2),
|
||||
(Image.Resampling.BICUBIC, 1),
|
||||
]:
|
||||
transformed = im.transform(size_up, self.transform, matrix_up, resample)
|
||||
transformed = transformed.transform(
|
||||
|
@ -306,9 +324,9 @@ class TestImageTransformAffine:
|
|||
matrix_down = [1, 0, x, 0, 1, y, 0, 0]
|
||||
|
||||
for resample, epsilon in [
|
||||
(Image.NEAREST, 0),
|
||||
(Image.BILINEAR, 1.5),
|
||||
(Image.BICUBIC, 1),
|
||||
(Image.Resampling.NEAREST, 0),
|
||||
(Image.Resampling.BILINEAR, 1.5),
|
||||
(Image.Resampling.BICUBIC, 1),
|
||||
]:
|
||||
transformed = im.transform(size_up, self.transform, matrix_up, resample)
|
||||
transformed = transformed.transform(
|
||||
|
@ -328,4 +346,4 @@ class TestImageTransformAffine:
|
|||
|
||||
class TestImageTransformPerspective(TestImageTransformAffine):
|
||||
# Repeat all tests for AFFINE transformations with PERSPECTIVE
|
||||
transform = Image.PERSPECTIVE
|
||||
transform = Image.Transform.PERSPECTIVE
|
||||
|
|
|
@ -1,12 +1,4 @@
|
|||
from PIL.Image import (
|
||||
FLIP_LEFT_RIGHT,
|
||||
FLIP_TOP_BOTTOM,
|
||||
ROTATE_90,
|
||||
ROTATE_180,
|
||||
ROTATE_270,
|
||||
TRANSPOSE,
|
||||
TRANSVERSE,
|
||||
)
|
||||
from PIL.Image import Transpose
|
||||
|
||||
from . import helper
|
||||
from .helper import assert_image_equal
|
||||
|
@ -20,7 +12,7 @@ HOPPER = {
|
|||
def test_flip_left_right():
|
||||
def transpose(mode):
|
||||
im = HOPPER[mode]
|
||||
out = im.transpose(FLIP_LEFT_RIGHT)
|
||||
out = im.transpose(Transpose.FLIP_LEFT_RIGHT)
|
||||
assert out.mode == mode
|
||||
assert out.size == im.size
|
||||
|
||||
|
@ -37,7 +29,7 @@ def test_flip_left_right():
|
|||
def test_flip_top_bottom():
|
||||
def transpose(mode):
|
||||
im = HOPPER[mode]
|
||||
out = im.transpose(FLIP_TOP_BOTTOM)
|
||||
out = im.transpose(Transpose.FLIP_TOP_BOTTOM)
|
||||
assert out.mode == mode
|
||||
assert out.size == im.size
|
||||
|
||||
|
@ -54,7 +46,7 @@ def test_flip_top_bottom():
|
|||
def test_rotate_90():
|
||||
def transpose(mode):
|
||||
im = HOPPER[mode]
|
||||
out = im.transpose(ROTATE_90)
|
||||
out = im.transpose(Transpose.ROTATE_90)
|
||||
assert out.mode == mode
|
||||
assert out.size == im.size[::-1]
|
||||
|
||||
|
@ -71,7 +63,7 @@ def test_rotate_90():
|
|||
def test_rotate_180():
|
||||
def transpose(mode):
|
||||
im = HOPPER[mode]
|
||||
out = im.transpose(ROTATE_180)
|
||||
out = im.transpose(Transpose.ROTATE_180)
|
||||
assert out.mode == mode
|
||||
assert out.size == im.size
|
||||
|
||||
|
@ -88,7 +80,7 @@ def test_rotate_180():
|
|||
def test_rotate_270():
|
||||
def transpose(mode):
|
||||
im = HOPPER[mode]
|
||||
out = im.transpose(ROTATE_270)
|
||||
out = im.transpose(Transpose.ROTATE_270)
|
||||
assert out.mode == mode
|
||||
assert out.size == im.size[::-1]
|
||||
|
||||
|
@ -105,7 +97,7 @@ def test_rotate_270():
|
|||
def test_transpose():
|
||||
def transpose(mode):
|
||||
im = HOPPER[mode]
|
||||
out = im.transpose(TRANSPOSE)
|
||||
out = im.transpose(Transpose.TRANSPOSE)
|
||||
assert out.mode == mode
|
||||
assert out.size == im.size[::-1]
|
||||
|
||||
|
@ -122,7 +114,7 @@ def test_transpose():
|
|||
def test_tranverse():
|
||||
def transpose(mode):
|
||||
im = HOPPER[mode]
|
||||
out = im.transpose(TRANSVERSE)
|
||||
out = im.transpose(Transpose.TRANSVERSE)
|
||||
assert out.mode == mode
|
||||
assert out.size == im.size[::-1]
|
||||
|
||||
|
@ -143,20 +135,31 @@ def test_roundtrip():
|
|||
def transpose(first, second):
|
||||
return im.transpose(first).transpose(second)
|
||||
|
||||
assert_image_equal(im, transpose(FLIP_LEFT_RIGHT, FLIP_LEFT_RIGHT))
|
||||
assert_image_equal(im, transpose(FLIP_TOP_BOTTOM, FLIP_TOP_BOTTOM))
|
||||
assert_image_equal(im, transpose(ROTATE_90, ROTATE_270))
|
||||
assert_image_equal(im, transpose(ROTATE_180, ROTATE_180))
|
||||
assert_image_equal(
|
||||
im.transpose(TRANSPOSE), transpose(ROTATE_90, FLIP_TOP_BOTTOM)
|
||||
im, transpose(Transpose.FLIP_LEFT_RIGHT, Transpose.FLIP_LEFT_RIGHT)
|
||||
)
|
||||
assert_image_equal(
|
||||
im.transpose(TRANSPOSE), transpose(ROTATE_270, FLIP_LEFT_RIGHT)
|
||||
im, transpose(Transpose.FLIP_TOP_BOTTOM, Transpose.FLIP_TOP_BOTTOM)
|
||||
)
|
||||
assert_image_equal(im, transpose(Transpose.ROTATE_90, Transpose.ROTATE_270))
|
||||
assert_image_equal(im, transpose(Transpose.ROTATE_180, Transpose.ROTATE_180))
|
||||
assert_image_equal(
|
||||
im.transpose(Transpose.TRANSPOSE),
|
||||
transpose(Transpose.ROTATE_90, Transpose.FLIP_TOP_BOTTOM),
|
||||
)
|
||||
assert_image_equal(
|
||||
im.transpose(TRANSVERSE), transpose(ROTATE_90, FLIP_LEFT_RIGHT)
|
||||
im.transpose(Transpose.TRANSPOSE),
|
||||
transpose(Transpose.ROTATE_270, Transpose.FLIP_LEFT_RIGHT),
|
||||
)
|
||||
assert_image_equal(
|
||||
im.transpose(TRANSVERSE), transpose(ROTATE_270, FLIP_TOP_BOTTOM)
|
||||
im.transpose(Transpose.TRANSVERSE),
|
||||
transpose(Transpose.ROTATE_90, Transpose.FLIP_LEFT_RIGHT),
|
||||
)
|
||||
assert_image_equal(
|
||||
im.transpose(Transpose.TRANSVERSE),
|
||||
transpose(Transpose.ROTATE_270, Transpose.FLIP_TOP_BOTTOM),
|
||||
)
|
||||
assert_image_equal(
|
||||
im.transpose(Transpose.TRANSVERSE),
|
||||
transpose(Transpose.ROTATE_180, Transpose.TRANSPOSE),
|
||||
)
|
||||
assert_image_equal(im.transpose(TRANSVERSE), transpose(ROTATE_180, TRANSPOSE))
|
||||
|
|
|
@ -140,7 +140,7 @@ def test_intent():
|
|||
skip_missing()
|
||||
assert ImageCms.getDefaultIntent(SRGB) == 0
|
||||
support = ImageCms.isIntentSupported(
|
||||
SRGB, ImageCms.INTENT_ABSOLUTE_COLORIMETRIC, ImageCms.DIRECTION_INPUT
|
||||
SRGB, ImageCms.Intent.ABSOLUTE_COLORIMETRIC, ImageCms.Direction.INPUT
|
||||
)
|
||||
assert support == 1
|
||||
|
||||
|
@ -153,7 +153,7 @@ def test_profile_object():
|
|||
# ["sRGB built-in", "", "WhitePoint : D65 (daylight)", "", ""]
|
||||
assert ImageCms.getDefaultIntent(p) == 0
|
||||
support = ImageCms.isIntentSupported(
|
||||
p, ImageCms.INTENT_ABSOLUTE_COLORIMETRIC, ImageCms.DIRECTION_INPUT
|
||||
p, ImageCms.Intent.ABSOLUTE_COLORIMETRIC, ImageCms.Direction.INPUT
|
||||
)
|
||||
assert support == 1
|
||||
|
||||
|
@ -593,3 +593,13 @@ def test_auxiliary_channels_isolated():
|
|||
)
|
||||
|
||||
assert_image_equal(test_image.convert(dst_format[2]), reference_image)
|
||||
|
||||
|
||||
def test_constants_deprecation():
|
||||
for enum, prefix in {
|
||||
ImageCms.Intent: "INTENT_",
|
||||
ImageCms.Direction: "DIRECTION_",
|
||||
}.items():
|
||||
for name in enum.__members__:
|
||||
with pytest.warns(DeprecationWarning):
|
||||
assert getattr(ImageCms, prefix + name) == enum[name]
|
||||
|
|
|
@ -183,7 +183,7 @@ def test_bitmap():
|
|||
im = Image.new("RGB", (W, H))
|
||||
draw = ImageDraw.Draw(im)
|
||||
with Image.open("Tests/images/pil123rgba.png") as small:
|
||||
small = small.resize((50, 50), Image.NEAREST)
|
||||
small = small.resize((50, 50), Image.Resampling.NEAREST)
|
||||
|
||||
# Act
|
||||
draw.bitmap((10, 10), small)
|
||||
|
@ -319,7 +319,7 @@ def test_ellipse_symmetric():
|
|||
im = Image.new("RGB", (width, 100))
|
||||
draw = ImageDraw.Draw(im)
|
||||
draw.ellipse(bbox, fill="green", outline="blue")
|
||||
assert_image_equal(im, im.transpose(Image.FLIP_LEFT_RIGHT))
|
||||
assert_image_equal(im, im.transpose(Image.Transpose.FLIP_LEFT_RIGHT))
|
||||
|
||||
|
||||
def test_ellipse_width():
|
||||
|
|
|
@ -23,7 +23,7 @@ class TestImageFile:
|
|||
def test_parser(self):
|
||||
def roundtrip(format):
|
||||
|
||||
im = hopper("L").resize((1000, 1000), Image.NEAREST)
|
||||
im = hopper("L").resize((1000, 1000), Image.Resampling.NEAREST)
|
||||
if format in ("MSP", "XBM"):
|
||||
im = im.convert("1")
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ pytestmark = skip_unless_feature("freetype2")
|
|||
|
||||
|
||||
class TestImageFont:
|
||||
LAYOUT_ENGINE = ImageFont.LAYOUT_BASIC
|
||||
LAYOUT_ENGINE = ImageFont.Layout.BASIC
|
||||
|
||||
def get_font(self):
|
||||
return ImageFont.truetype(
|
||||
|
@ -94,12 +94,12 @@ class TestImageFont:
|
|||
|
||||
try:
|
||||
ttf = ImageFont.truetype(
|
||||
FONT_PATH, FONT_SIZE, layout_engine=ImageFont.LAYOUT_RAQM
|
||||
FONT_PATH, FONT_SIZE, layout_engine=ImageFont.Layout.RAQM
|
||||
)
|
||||
finally:
|
||||
ImageFont.core.HAVE_RAQM = have_raqm
|
||||
|
||||
assert ttf.layout_engine == ImageFont.LAYOUT_BASIC
|
||||
assert ttf.layout_engine == ImageFont.Layout.BASIC
|
||||
|
||||
def _render(self, font):
|
||||
txt = "Hello World!"
|
||||
|
@ -182,7 +182,7 @@ class TestImageFont:
|
|||
im = Image.new(mode, (1, 1), 0)
|
||||
d = ImageDraw.Draw(im)
|
||||
|
||||
if self.LAYOUT_ENGINE == ImageFont.LAYOUT_BASIC:
|
||||
if self.LAYOUT_ENGINE == ImageFont.Layout.BASIC:
|
||||
length = d.textlength(text, f)
|
||||
assert length == length_basic
|
||||
else:
|
||||
|
@ -294,7 +294,7 @@ class TestImageFont:
|
|||
word = "testing"
|
||||
font = self.get_font()
|
||||
|
||||
orientation = Image.ROTATE_90
|
||||
orientation = Image.Transpose.ROTATE_90
|
||||
transposed_font = ImageFont.TransposedFont(font, orientation=orientation)
|
||||
|
||||
# Original font
|
||||
|
@ -333,7 +333,7 @@ class TestImageFont:
|
|||
# Arrange
|
||||
text = "mask this"
|
||||
font = self.get_font()
|
||||
orientation = Image.ROTATE_90
|
||||
orientation = Image.Transpose.ROTATE_90
|
||||
transposed_font = ImageFont.TransposedFont(font, orientation=orientation)
|
||||
|
||||
# Act
|
||||
|
@ -604,7 +604,7 @@ class TestImageFont:
|
|||
# Arrange
|
||||
t = self.get_font()
|
||||
# Act / Assert
|
||||
if t.layout_engine == ImageFont.LAYOUT_BASIC:
|
||||
if t.layout_engine == ImageFont.Layout.BASIC:
|
||||
with pytest.raises(KeyError):
|
||||
t.getmask("абвг", direction="rtl")
|
||||
with pytest.raises(KeyError):
|
||||
|
@ -753,7 +753,7 @@ class TestImageFont:
|
|||
name, text = "quick", "Quick"
|
||||
path = f"Tests/images/test_anchor_{name}_{anchor}.png"
|
||||
|
||||
if self.LAYOUT_ENGINE == ImageFont.LAYOUT_RAQM:
|
||||
if self.LAYOUT_ENGINE == ImageFont.Layout.RAQM:
|
||||
width, height = (129, 44)
|
||||
else:
|
||||
width, height = (128, 44)
|
||||
|
@ -993,7 +993,7 @@ class TestImageFont:
|
|||
|
||||
@skip_unless_feature("raqm")
|
||||
class TestImageFont_RaqmLayout(TestImageFont):
|
||||
LAYOUT_ENGINE = ImageFont.LAYOUT_RAQM
|
||||
LAYOUT_ENGINE = ImageFont.Layout.RAQM
|
||||
|
||||
|
||||
def test_render_mono_size():
|
||||
|
@ -1004,7 +1004,7 @@ def test_render_mono_size():
|
|||
ttf = ImageFont.truetype(
|
||||
"Tests/fonts/DejaVuSans/DejaVuSans.ttf",
|
||||
18,
|
||||
layout_engine=ImageFont.LAYOUT_BASIC,
|
||||
layout_engine=ImageFont.Layout.BASIC,
|
||||
)
|
||||
|
||||
draw.text((10, 10), "r" * 10, "black", ttf)
|
||||
|
@ -1028,10 +1028,19 @@ def test_raqm_missing_warning(monkeypatch):
|
|||
monkeypatch.setattr(ImageFont.core, "HAVE_RAQM", False)
|
||||
with pytest.warns(UserWarning) as record:
|
||||
font = ImageFont.truetype(
|
||||
FONT_PATH, FONT_SIZE, layout_engine=ImageFont.LAYOUT_RAQM
|
||||
FONT_PATH, FONT_SIZE, layout_engine=ImageFont.Layout.RAQM
|
||||
)
|
||||
assert font.layout_engine == ImageFont.LAYOUT_BASIC
|
||||
assert font.layout_engine == ImageFont.Layout.BASIC
|
||||
assert str(record[-1].message) == (
|
||||
"Raqm layout was requested, but Raqm is not available. "
|
||||
"Falling back to basic layout."
|
||||
)
|
||||
|
||||
|
||||
def test_constants_deprecation():
|
||||
for enum, prefix in {
|
||||
ImageFont.Layout: "LAYOUT_",
|
||||
}.items():
|
||||
for name in enum.__members__:
|
||||
with pytest.warns(DeprecationWarning):
|
||||
assert getattr(ImageFont, prefix + name) == enum[name]
|
||||
|
|
|
@ -34,7 +34,7 @@ def test_basic(tmp_path):
|
|||
imOut = imIn.copy()
|
||||
verify(imOut) # copy
|
||||
|
||||
imOut = imIn.transform((w, h), Image.EXTENT, (0, 0, w, h))
|
||||
imOut = imIn.transform((w, h), Image.Transform.EXTENT, (0, 0, w, h))
|
||||
verify(imOut) # transform
|
||||
|
||||
filename = str(tmp_path / "temp.im")
|
||||
|
|
|
@ -66,6 +66,73 @@ In effect, ``viewer.show_file("test.jpg")`` will continue to work unchanged.
|
|||
``viewer.show_file(file="test.jpg")`` will raise a deprecation warning, and suggest
|
||||
``viewer.show_file(path="test.jpg")`` instead.
|
||||
|
||||
Constants
|
||||
~~~~~~~~~
|
||||
|
||||
.. deprecated:: 9.2.0
|
||||
|
||||
A number of constants have been deprecated and will be removed in Pillow 10.0.0
|
||||
(2023-07-01). Instead, ``enum.IntEnum`` classes have been added.
|
||||
|
||||
===================================================== ============================================================
|
||||
Deprecated Use instead
|
||||
===================================================== ============================================================
|
||||
``Image.NONE`` Either ``Image.Dither.NONE`` or ``Image.Resampling.NEAREST``
|
||||
``Image.NEAREST`` Either ``Image.Dither.NONE`` or ``Image.Resampling.NEAREST``
|
||||
``Image.ORDERED`` ``Image.Dither.ORDERED``
|
||||
``Image.RASTERIZE`` ``Image.Dither.RASTERIZE``
|
||||
``Image.FLOYDSTEINBERG`` ``Image.Dither.FLOYDSTEINBERG``
|
||||
``Image.WEB`` ``Image.Palette.WEB``
|
||||
``Image.ADAPTIVE`` ``Image.Palette.ADAPTIVE``
|
||||
``Image.AFFINE`` ``Image.Transform.AFFINE``
|
||||
``Image.EXTENT`` ``Image.Transform.EXTENT``
|
||||
``Image.PERSPECTIVE`` ``Image.Transform.PERSPECTIVE``
|
||||
``Image.QUAD`` ``Image.Transform.QUAD``
|
||||
``Image.MESH`` ``Image.Transform.MESH``
|
||||
``Image.FLIP_LEFT_RIGHT`` ``Image.Transpose.FLIP_LEFT_RIGHT``
|
||||
``Image.FLIP_TOP_BOTTOM`` ``Image.Transpose.FLIP_TOP_BOTTOM``
|
||||
``Image.ROTATE_90`` ``Image.Transpose.ROTATE_90``
|
||||
``Image.ROTATE_180`` ``Image.Transpose.ROTATE_180``
|
||||
``Image.ROTATE_270`` ``Image.Transpose.ROTATE_270``
|
||||
``Image.TRANSPOSE`` ``Image.Transpose.TRANSPOSE``
|
||||
``Image.TRANSVERSE`` ``Image.Transpose.TRANSVERSE``
|
||||
``Image.BOX`` ``Image.Resampling.BOX``
|
||||
``Image.BILINEAR`` ``Image.Resampling.BILNEAR``
|
||||
``Image.LINEAR`` ``Image.Resampling.BILNEAR``
|
||||
``Image.HAMMING`` ``Image.Resampling.HAMMING``
|
||||
``Image.BICUBIC`` ``Image.Resampling.BICUBIC``
|
||||
``Image.CUBIC`` ``Image.Resampling.BICUBIC``
|
||||
``Image.LANCZOS`` ``Image.Resampling.LANCZOS``
|
||||
``Image.ANTIALIAS`` ``Image.Resampling.LANCZOS``
|
||||
``Image.MEDIANCUT`` ``Image.Quantize.MEDIANCUT``
|
||||
``Image.MAXCOVERAGE`` ``Image.Quantize.MAXCOVERAGE``
|
||||
``Image.FASTOCTREE`` ``Image.Quantize.FASTOCTREE``
|
||||
``Image.LIBIMAGEQUANT`` ``Image.Quantize.LIBIMAGEQUANT``
|
||||
``ImageCms.INTENT_PERCEPTUAL`` ``ImageCms.Intent.PERCEPTUAL``
|
||||
``ImageCms.INTENT_RELATIVE_COLORMETRIC`` ``ImageCms.Intent.RELATIVE_COLORMETRIC``
|
||||
``ImageCms.INTENT_SATURATION`` ``ImageCms.Intent.SATURATION``
|
||||
``ImageCms.INTENT_ABSOLUTE_COLORIMETRIC`` ``ImageCms.Intent.ABSOLUTE_COLORIMETRIC``
|
||||
``ImageCms.DIRECTION_INPUT`` ``ImageCms.Direction.INPUT``
|
||||
``ImageCms.DIRECTION_OUTPUT`` ``ImageCms.Direction.OUTPUT``
|
||||
``ImageCms.DIRECTION_PROOF`` ``ImageCms.Direction.PROOF``
|
||||
``ImageFont.LAYOUT_BASIC`` ``ImageFont.Layout.BASIC``
|
||||
``ImageFont.LAYOUT_RAQM`` ``ImageFont.Layout.RAQM``
|
||||
``BlpImagePlugin.BLP_FORMAT_JPEG`` ``BlpImagePlugin.Format.JPEG``
|
||||
``BlpImagePlugin.BLP_ENCODING_UNCOMPRESSED`` ``BlpImagePlugin.Encoding.UNCOMPRESSED``
|
||||
``BlpImagePlugin.BLP_ENCODING_DXT`` ``BlpImagePlugin.Encoding.DXT``
|
||||
``BlpImagePlugin.BLP_ENCODING_UNCOMPRESSED_RAW_RGBA`` ``BlpImagePlugin.Encoding.UNCOMPRESSED_RAW_RGBA``
|
||||
``BlpImagePlugin.BLP_ALPHA_ENCODING_DXT1`` ``BlpImagePlugin.AlphaEncoding.DXT1``
|
||||
``BlpImagePlugin.BLP_ALPHA_ENCODING_DXT3`` ``BlpImagePlugin.AlphaEncoding.DXT3``
|
||||
``BlpImagePlugin.BLP_ALPHA_ENCODING_DXT5`` ``BlpImagePlugin.AlphaEncoding.DXT5``
|
||||
``FtexImagePlugin.FORMAT_DXT1`` ``FtexImagePlugin.Format.DXT1``
|
||||
``FtexImagePlugin.FORMAT_UNCOMPRESSED`` ``FtexImagePlugin.Format.UNCOMPRESSED``
|
||||
``PngImagePlugin.APNG_DISPOSE_OP_NONE`` ``PngImagePlugin.Disposal.OP_NONE``
|
||||
``PngImagePlugin.APNG_DISPOSE_OP_BACKGROUND`` ``PngImagePlugin.Disposal.OP_BACKGROUND``
|
||||
``PngImagePlugin.APNG_DISPOSE_OP_PREVIOUS`` ``PngImagePlugin.Disposal.OP_PREVIOUS``
|
||||
``PngImagePlugin.APNG_BLEND_OP_SOURCE`` ``PngImagePlugin.Blend.OP_SOURCE``
|
||||
``PngImagePlugin.APNG_BLEND_OP_OVER`` ``PngImagePlugin.Blend.OP_OVER``
|
||||
===================================================== ============================================================
|
||||
|
||||
FitsStubImagePlugin
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
|
|
@ -696,12 +696,12 @@ parameter must be set to ``True``. The following parameters can also be set:
|
|||
operation to be used for this frame before rendering the next frame.
|
||||
Defaults to 0.
|
||||
|
||||
* 0 (:py:data:`~PIL.PngImagePlugin.APNG_DISPOSE_OP_NONE`, default) -
|
||||
* 0 (:py:data:`~PIL.PngImagePlugin.Disposal.OP_NONE`, default) -
|
||||
No disposal is done on this frame before rendering the next frame.
|
||||
* 1 (:py:data:`PIL.PngImagePlugin.APNG_DISPOSE_OP_BACKGROUND`) -
|
||||
* 1 (:py:data:`PIL.PngImagePlugin.Disposal.OP_BACKGROUND`) -
|
||||
This frame's modified region is cleared to fully transparent black before
|
||||
rendering the next frame.
|
||||
* 2 (:py:data:`~PIL.PngImagePlugin.APNG_DISPOSE_OP_PREVIOUS`) -
|
||||
* 2 (:py:data:`~PIL.PngImagePlugin.Disposal.OP_PREVIOUS`) -
|
||||
This frame's modified region is reverted to the previous frame's contents before
|
||||
rendering the next frame.
|
||||
|
||||
|
@ -710,10 +710,10 @@ parameter must be set to ``True``. The following parameters can also be set:
|
|||
operation to be used for this frame before rendering the next frame.
|
||||
Defaults to 0.
|
||||
|
||||
* 0 (:py:data:`~PIL.PngImagePlugin.APNG_BLEND_OP_SOURCE`) -
|
||||
* 0 (:py:data:`~PIL.PngImagePlugin.Blend.OP_SOURCE`) -
|
||||
All color components of this frame, including alpha, overwrite the previous output
|
||||
image contents.
|
||||
* 1 (:py:data:`~PIL.PngImagePlugin.APNG_BLEND_OP_OVER`) -
|
||||
* 1 (:py:data:`~PIL.PngImagePlugin.Blend.OP_OVER`) -
|
||||
This frame should be alpha composited with the previous output image contents.
|
||||
|
||||
.. note::
|
||||
|
|
|
@ -155,7 +155,7 @@ Processing a subrectangle, and pasting it back
|
|||
|
||||
::
|
||||
|
||||
region = region.transpose(Image.ROTATE_180)
|
||||
region = region.transpose(Image.Transpose.ROTATE_180)
|
||||
im.paste(region, box)
|
||||
|
||||
When pasting regions back, the size of the region must match the given region
|
||||
|
@ -238,11 +238,11 @@ Transposing an image
|
|||
|
||||
::
|
||||
|
||||
out = im.transpose(Image.FLIP_LEFT_RIGHT)
|
||||
out = im.transpose(Image.FLIP_TOP_BOTTOM)
|
||||
out = im.transpose(Image.ROTATE_90)
|
||||
out = im.transpose(Image.ROTATE_180)
|
||||
out = im.transpose(Image.ROTATE_270)
|
||||
out = im.transpose(Image.Transpose.FLIP_LEFT_RIGHT)
|
||||
out = im.transpose(Image.Transpose.FLIP_TOP_BOTTOM)
|
||||
out = im.transpose(Image.Transpose.ROTATE_90)
|
||||
out = im.transpose(Image.Transpose.ROTATE_180)
|
||||
out = im.transpose(Image.Transpose.ROTATE_270)
|
||||
|
||||
``transpose(ROTATE)`` operations can also be performed identically with
|
||||
:py:meth:`~PIL.Image.Image.rotate` operations, provided the ``expand`` flag is
|
||||
|
|
|
@ -215,7 +215,7 @@ Many of Pillow's features require external libraries:
|
|||
Once you have installed the prerequisites, run::
|
||||
|
||||
python3 -m pip install --upgrade pip
|
||||
python3 -m pip install --upgrade Pillow
|
||||
python3 -m pip install --upgrade Pillow --no-binary :all:
|
||||
|
||||
If the prerequisites are installed in the standard library locations
|
||||
for your machine (e.g. :file:`/usr` or :file:`/usr/local`), no
|
||||
|
@ -225,7 +225,7 @@ those locations by editing :file:`setup.py` or
|
|||
:file:`setup.cfg`, or by adding environment variables on the command
|
||||
line::
|
||||
|
||||
CFLAGS="-I/usr/pkg/include" python3 -m pip install --upgrade Pillow
|
||||
CFLAGS="-I/usr/pkg/include" python3 -m pip install --upgrade Pillow --no-binary :all:
|
||||
|
||||
If Pillow has been previously built without the required
|
||||
prerequisites, it may be necessary to manually clear the pip cache or
|
||||
|
@ -291,7 +291,7 @@ tools.
|
|||
The easiest way to install external libraries is via `Homebrew
|
||||
<https://brew.sh/>`_. After you install Homebrew, run::
|
||||
|
||||
brew install libtiff libjpeg webp little-cms2
|
||||
brew install libjpeg libtiff little-cms2 openjpeg webp
|
||||
|
||||
To install libraqm on macOS use Homebrew to install its dependencies::
|
||||
|
||||
|
@ -302,7 +302,7 @@ Then see ``depends/install_raqm_cmake.sh`` to install libraqm.
|
|||
Now install Pillow with::
|
||||
|
||||
python3 -m pip install --upgrade pip
|
||||
python3 -m pip install --upgrade Pillow
|
||||
python3 -m pip install --upgrade Pillow --no-binary :all:
|
||||
|
||||
or from within the uncompressed source directory::
|
||||
|
||||
|
@ -349,7 +349,7 @@ Prerequisites are installed on **MSYS2 MinGW 64-bit** with::
|
|||
Now install Pillow with::
|
||||
|
||||
python3 -m pip install --upgrade pip
|
||||
python3 -m pip install --upgrade Pillow
|
||||
python3 -m pip install --upgrade Pillow --no-binary :all:
|
||||
|
||||
|
||||
Building on FreeBSD
|
||||
|
|
|
@ -254,7 +254,8 @@ This rotates the input image by ``theta`` degrees counter clockwise:
|
|||
.. automethod:: PIL.Image.Image.transform
|
||||
.. automethod:: PIL.Image.Image.transpose
|
||||
|
||||
This flips the input image by using the :data:`FLIP_LEFT_RIGHT` method.
|
||||
This flips the input image by using the :data:`PIL.Image.Transpose.FLIP_LEFT_RIGHT`
|
||||
method.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
|
@ -263,9 +264,9 @@ This flips the input image by using the :data:`FLIP_LEFT_RIGHT` method.
|
|||
with Image.open("hopper.jpg") as im:
|
||||
|
||||
# Flip the image from left to right
|
||||
im_flipped = im.transpose(method=Image.FLIP_LEFT_RIGHT)
|
||||
im_flipped = im.transpose(method=Image.Transpose.FLIP_LEFT_RIGHT)
|
||||
# To flip the image from top to bottom,
|
||||
# use the method "Image.FLIP_TOP_BOTTOM"
|
||||
# use the method "Image.Transpose.FLIP_TOP_BOTTOM"
|
||||
|
||||
|
||||
.. automethod:: PIL.Image.Image.verify
|
||||
|
@ -389,68 +390,57 @@ Transpose methods
|
|||
|
||||
Used to specify the :meth:`Image.transpose` method to use.
|
||||
|
||||
.. data:: FLIP_LEFT_RIGHT
|
||||
.. data:: FLIP_TOP_BOTTOM
|
||||
.. data:: ROTATE_90
|
||||
.. data:: ROTATE_180
|
||||
.. data:: ROTATE_270
|
||||
.. data:: TRANSPOSE
|
||||
.. data:: TRANSVERSE
|
||||
.. autoclass:: Transpose
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
Transform methods
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
Used to specify the :meth:`Image.transform` method to use.
|
||||
|
||||
.. data:: AFFINE
|
||||
.. py:class:: Transform
|
||||
|
||||
Affine transform
|
||||
.. py:attribute:: AFFINE
|
||||
|
||||
.. data:: EXTENT
|
||||
Affine transform
|
||||
|
||||
Cut out a rectangular subregion
|
||||
.. py:attribute:: EXTENT
|
||||
|
||||
.. data:: PERSPECTIVE
|
||||
Cut out a rectangular subregion
|
||||
|
||||
Perspective transform
|
||||
.. py:attribute:: PERSPECTIVE
|
||||
|
||||
.. data:: QUAD
|
||||
Perspective transform
|
||||
|
||||
Map a quadrilateral to a rectangle
|
||||
.. py:attribute:: QUAD
|
||||
|
||||
.. data:: MESH
|
||||
Map a quadrilateral to a rectangle
|
||||
|
||||
Map a number of source quadrilaterals in one operation
|
||||
.. py:attribute:: MESH
|
||||
|
||||
Map a number of source quadrilaterals in one operation
|
||||
|
||||
Resampling filters
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
See :ref:`concept-filters` for details.
|
||||
|
||||
.. data:: NEAREST
|
||||
:noindex:
|
||||
.. data:: BOX
|
||||
:noindex:
|
||||
.. data:: BILINEAR
|
||||
:noindex:
|
||||
.. data:: HAMMING
|
||||
:noindex:
|
||||
.. data:: BICUBIC
|
||||
:noindex:
|
||||
.. data:: LANCZOS
|
||||
:noindex:
|
||||
.. autoclass:: Resampling
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
Some filters are also available under the following names for backwards compatibility:
|
||||
Some deprecated filters are also available under the following names:
|
||||
|
||||
.. data:: NONE
|
||||
:noindex:
|
||||
:value: NEAREST
|
||||
:value: Resampling.NEAREST
|
||||
.. data:: LINEAR
|
||||
:value: BILINEAR
|
||||
:value: Resampling.BILINEAR
|
||||
.. data:: CUBIC
|
||||
:value: BICUBIC
|
||||
:value: Resampling.BICUBIC
|
||||
.. data:: ANTIALIAS
|
||||
:value: LANCZOS
|
||||
:value: Resampling.LANCZOS
|
||||
|
||||
Dither modes
|
||||
^^^^^^^^^^^^
|
||||
|
@ -458,48 +448,56 @@ Dither modes
|
|||
Used to specify the dithering method to use for the
|
||||
:meth:`~Image.convert` and :meth:`~Image.quantize` methods.
|
||||
|
||||
.. data:: NONE
|
||||
:noindex:
|
||||
.. py:class:: Dither
|
||||
|
||||
No dither
|
||||
.. py:attribute:: NONE
|
||||
|
||||
.. comment: (not implemented)
|
||||
.. data:: ORDERED
|
||||
.. data:: RASTERIZE
|
||||
No dither
|
||||
|
||||
.. data:: FLOYDSTEINBERG
|
||||
.. py:attribute:: ORDERED
|
||||
|
||||
Floyd-Steinberg dither
|
||||
Not implemented
|
||||
|
||||
.. py:attribute:: RASTERIZE
|
||||
|
||||
Not implemented
|
||||
|
||||
.. py:attribute:: FLOYDSTEINBERG
|
||||
|
||||
Floyd-Steinberg dither
|
||||
|
||||
Palettes
|
||||
^^^^^^^^
|
||||
|
||||
Used to specify the pallete to use for the :meth:`~Image.convert` method.
|
||||
|
||||
.. data:: WEB
|
||||
.. data:: ADAPTIVE
|
||||
.. autoclass:: Palette
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
Quantization methods
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Used to specify the quantization method to use for the :meth:`~Image.quantize` method.
|
||||
|
||||
.. data:: MEDIANCUT
|
||||
.. py:class:: Quantize
|
||||
|
||||
Median cut. Default method, except for RGBA images. This method does not support
|
||||
RGBA images.
|
||||
.. py:attribute:: MEDIANCUT
|
||||
|
||||
.. data:: MAXCOVERAGE
|
||||
Median cut. Default method, except for RGBA images. This method does not support
|
||||
RGBA images.
|
||||
|
||||
Maximum coverage. This method does not support RGBA images.
|
||||
.. py:attribute:: MAXCOVERAGE
|
||||
|
||||
.. data:: FASTOCTREE
|
||||
Maximum coverage. This method does not support RGBA images.
|
||||
|
||||
Fast octree. Default method for RGBA images.
|
||||
.. py:attribute:: FASTOCTREE
|
||||
|
||||
.. data:: LIBIMAGEQUANT
|
||||
Fast octree. Default method for RGBA images.
|
||||
|
||||
libimagequant
|
||||
.. py:attribute:: LIBIMAGEQUANT
|
||||
|
||||
Check support using :py:func:`PIL.features.check_feature`
|
||||
with ``feature="libimagequant"``.
|
||||
libimagequant
|
||||
|
||||
Check support using :py:func:`PIL.features.check_feature` with
|
||||
``feature="libimagequant"``.
|
||||
|
|
|
@ -118,8 +118,8 @@ can be easily displayed in a chromaticity diagram, for example).
|
|||
another profile (usually overridden at run-time, but provided here
|
||||
for DeviceLink and embedded source profiles, see 7.2.15 of ICC.1:2010).
|
||||
|
||||
One of ``ImageCms.INTENT_ABSOLUTE_COLORIMETRIC``, ``ImageCms.INTENT_PERCEPTUAL``,
|
||||
``ImageCms.INTENT_RELATIVE_COLORIMETRIC`` and ``ImageCms.INTENT_SATURATION``.
|
||||
One of ``ImageCms.Intent.ABSOLUTE_COLORIMETRIC``, ``ImageCms.Intent.PERCEPTUAL``,
|
||||
``ImageCms.Intent.RELATIVE_COLORIMETRIC`` and ``ImageCms.Intent.SATURATION``.
|
||||
|
||||
.. py:attribute:: profile_id
|
||||
:type: bytes
|
||||
|
@ -313,14 +313,14 @@ can be easily displayed in a chromaticity diagram, for example).
|
|||
the CLUT model.
|
||||
|
||||
The dictionary is indexed by intents
|
||||
(``ImageCms.INTENT_ABSOLUTE_COLORIMETRIC``,
|
||||
``ImageCms.INTENT_PERCEPTUAL``,
|
||||
``ImageCms.INTENT_RELATIVE_COLORIMETRIC`` and
|
||||
``ImageCms.INTENT_SATURATION``).
|
||||
(``ImageCms.Intent.ABSOLUTE_COLORIMETRIC``,
|
||||
``ImageCms.Intent.PERCEPTUAL``,
|
||||
``ImageCms.Intent.RELATIVE_COLORIMETRIC`` and
|
||||
``ImageCms.Intent.SATURATION``).
|
||||
|
||||
The values are 3-tuples indexed by directions
|
||||
(``ImageCms.DIRECTION_INPUT``, ``ImageCms.DIRECTION_OUTPUT``,
|
||||
``ImageCms.DIRECTION_PROOF``).
|
||||
(``ImageCms.Direction.INPUT``, ``ImageCms.Direction.OUTPUT``,
|
||||
``ImageCms.Direction.PROOF``).
|
||||
|
||||
The elements of the tuple are booleans. If the value is ``True``,
|
||||
that intent is supported for that direction.
|
||||
|
@ -331,14 +331,14 @@ can be easily displayed in a chromaticity diagram, for example).
|
|||
Returns a dictionary of all supported intents and directions.
|
||||
|
||||
The dictionary is indexed by intents
|
||||
(``ImageCms.INTENT_ABSOLUTE_COLORIMETRIC``,
|
||||
``ImageCms.INTENT_PERCEPTUAL``,
|
||||
``ImageCms.INTENT_RELATIVE_COLORIMETRIC`` and
|
||||
``ImageCms.INTENT_SATURATION``).
|
||||
(``ImageCms.Intent.ABSOLUTE_COLORIMETRIC``,
|
||||
``ImageCms.Intent.PERCEPTUAL``,
|
||||
``ImageCms.Intent.RELATIVE_COLORIMETRIC`` and
|
||||
``ImageCms.Intent.SATURATION``).
|
||||
|
||||
The values are 3-tuples indexed by directions
|
||||
(``ImageCms.DIRECTION_INPUT``, ``ImageCms.DIRECTION_OUTPUT``,
|
||||
``ImageCms.DIRECTION_PROOF``).
|
||||
(``ImageCms.Direction.INPUT``, ``ImageCms.Direction.OUTPUT``,
|
||||
``ImageCms.Direction.PROOF``).
|
||||
|
||||
The elements of the tuple are booleans. If the value is ``True``,
|
||||
that intent is supported for that direction.
|
||||
|
@ -352,11 +352,11 @@ can be easily displayed in a chromaticity diagram, for example).
|
|||
Note that you can also get this information for all intents and directions
|
||||
with :py:attr:`.intent_supported`.
|
||||
|
||||
:param intent: One of ``ImageCms.INTENT_ABSOLUTE_COLORIMETRIC``,
|
||||
``ImageCms.INTENT_PERCEPTUAL``,
|
||||
``ImageCms.INTENT_RELATIVE_COLORIMETRIC``
|
||||
and ``ImageCms.INTENT_SATURATION``.
|
||||
:param direction: One of ``ImageCms.DIRECTION_INPUT``,
|
||||
``ImageCms.DIRECTION_OUTPUT``
|
||||
and ``ImageCms.DIRECTION_PROOF``
|
||||
:param intent: One of ``ImageCms.Intent.ABSOLUTE_COLORIMETRIC``,
|
||||
``ImageCms.Intent.PERCEPTUAL``,
|
||||
``ImageCms.Intent.RELATIVE_COLORIMETRIC``
|
||||
and ``ImageCms.Intent.SATURATION``.
|
||||
:param direction: One of ``ImageCms.Direction.INPUT``,
|
||||
``ImageCms.Direction.OUTPUT``
|
||||
and ``ImageCms.Direction.PROOF``
|
||||
:return: Boolean if the intent and direction is supported.
|
||||
|
|
|
@ -60,12 +60,12 @@ Methods
|
|||
Constants
|
||||
---------
|
||||
|
||||
.. data:: PIL.ImageFont.LAYOUT_BASIC
|
||||
.. data:: PIL.ImageFont.Layout.BASIC
|
||||
|
||||
Use basic text layout for TrueType font.
|
||||
Advanced features such as text direction are not supported.
|
||||
|
||||
.. data:: PIL.ImageFont.LAYOUT_RAQM
|
||||
.. data:: PIL.ImageFont.Layout.RAQM
|
||||
|
||||
Use Raqm text layout for TrueType font.
|
||||
Advanced features are supported.
|
||||
|
|
|
@ -57,7 +57,7 @@ Support for the following features can be checked:
|
|||
* ``transp_webp``: Support for transparency in WebP images.
|
||||
* ``webp_mux``: (compile time) Support for EXIF data in WebP images.
|
||||
* ``webp_anim``: (compile time) Support for animated WebP images.
|
||||
* ``raqm``: Raqm library, required for ``ImageFont.LAYOUT_RAQM`` in :py:func:`PIL.ImageFont.truetype`. Run-time version number is available for Raqm 0.7.0 or newer.
|
||||
* ``raqm``: Raqm library, required for ``ImageFont.Layout.RAQM`` in :py:func:`PIL.ImageFont.truetype`. Run-time version number is available for Raqm 0.7.0 or newer.
|
||||
* ``libimagequant``: (compile time) ImageQuant quantization support in :py:func:`PIL.Image.Image.quantize`. Run-time version number is available.
|
||||
* ``xcb``: (compile time) Support for X11 in :py:func:`PIL.ImageGrab.grab` via the XCB library.
|
||||
|
||||
|
|
|
@ -230,8 +230,7 @@ Plugin reference
|
|||
|
||||
.. automodule:: PIL.PngImagePlugin
|
||||
:members: ChunkStream, PngImageFile, PngStream, getchunks, is_cid, putchunk,
|
||||
MAX_TEXT_CHUNK, MAX_TEXT_MEMORY, APNG_BLEND_OP_SOURCE, APNG_BLEND_OP_OVER,
|
||||
APNG_DISPOSE_OP_NONE, APNG_DISPOSE_OP_BACKGROUND, APNG_DISPOSE_OP_PREVIOUS
|
||||
Blend, Disposal, MAX_TEXT_CHUNK, MAX_TEXT_MEMORY
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
:member-order: groupwise
|
||||
|
|
|
@ -111,16 +111,14 @@ downscaling with libjpeg, which uses supersampling internally, not convolutions.
|
|||
Image transposition
|
||||
-------------------
|
||||
|
||||
A new method :py:data:`PIL.Image.TRANSPOSE` has been added for the
|
||||
A new method ``TRANSPOSE`` has been added for the
|
||||
:py:meth:`~PIL.Image.Image.transpose` operation in addition to
|
||||
:py:data:`~PIL.Image.FLIP_LEFT_RIGHT`, :py:data:`~PIL.Image.FLIP_TOP_BOTTOM`,
|
||||
:py:data:`~PIL.Image.ROTATE_90`, :py:data:`~PIL.Image.ROTATE_180`,
|
||||
:py:data:`~PIL.Image.ROTATE_270`. :py:data:`~PIL.Image.TRANSPOSE` is an algebra
|
||||
transpose, with an image reflected across its main diagonal.
|
||||
``FLIP_LEFT_RIGHT``, ``FLIP_TOP_BOTTOM``, ``ROTATE_90``, ``ROTATE_180``,
|
||||
``ROTATE_270``. ``TRANSPOSE`` is an algebra transpose, with an image reflected
|
||||
across its main diagonal.
|
||||
|
||||
The speed of :py:data:`~PIL.Image.ROTATE_90`, :py:data:`~PIL.Image.ROTATE_270`
|
||||
and :py:data:`~PIL.Image.TRANSPOSE` has been significantly improved for large
|
||||
images which don't fit in the processor cache.
|
||||
The speed of ``ROTATE_90``, ``ROTATE_270`` and ``TRANSPOSE`` has been significantly
|
||||
improved for large images which don't fit in the processor cache.
|
||||
|
||||
Gaussian blur and unsharp mask
|
||||
------------------------------
|
||||
|
|
|
@ -21,11 +21,77 @@ coordinate type".
|
|||
Deprecations
|
||||
^^^^^^^^^^^^
|
||||
|
||||
Constants
|
||||
~~~~~~~~~
|
||||
|
||||
A number of constants have been deprecated and will be removed in Pillow 10.0.0
|
||||
(2023-07-01). Instead, ``enum.IntEnum`` classes have been added.
|
||||
|
||||
===================================================== ============================================================
|
||||
Deprecated Use instead
|
||||
===================================================== ============================================================
|
||||
``Image.NONE`` Either ``Image.Dither.NONE`` or ``Image.Resampling.NEAREST``
|
||||
``Image.NEAREST`` Either ``Image.Dither.NONE`` or ``Image.Resampling.NEAREST``
|
||||
``Image.ORDERED`` ``Image.Dither.ORDERED``
|
||||
``Image.RASTERIZE`` ``Image.Dither.RASTERIZE``
|
||||
``Image.FLOYDSTEINBERG`` ``Image.Dither.FLOYDSTEINBERG``
|
||||
``Image.WEB`` ``Image.Palette.WEB``
|
||||
``Image.ADAPTIVE`` ``Image.Palette.ADAPTIVE``
|
||||
``Image.AFFINE`` ``Image.Transform.AFFINE``
|
||||
``Image.EXTENT`` ``Image.Transform.EXTENT``
|
||||
``Image.PERSPECTIVE`` ``Image.Transform.PERSPECTIVE``
|
||||
``Image.QUAD`` ``Image.Transform.QUAD``
|
||||
``Image.MESH`` ``Image.Transform.MESH``
|
||||
``Image.FLIP_LEFT_RIGHT`` ``Image.Transpose.FLIP_LEFT_RIGHT``
|
||||
``Image.FLIP_TOP_BOTTOM`` ``Image.Transpose.FLIP_TOP_BOTTOM``
|
||||
``Image.ROTATE_90`` ``Image.Transpose.ROTATE_90``
|
||||
``Image.ROTATE_180`` ``Image.Transpose.ROTATE_180``
|
||||
``Image.ROTATE_270`` ``Image.Transpose.ROTATE_270``
|
||||
``Image.TRANSPOSE`` ``Image.Transpose.TRANSPOSE``
|
||||
``Image.TRANSVERSE`` ``Image.Transpose.TRANSVERSE``
|
||||
``Image.BOX`` ``Image.Resampling.BOX``
|
||||
``Image.BILINEAR`` ``Image.Resampling.BILNEAR``
|
||||
``Image.LINEAR`` ``Image.Resampling.BILNEAR``
|
||||
``Image.HAMMING`` ``Image.Resampling.HAMMING``
|
||||
``Image.BICUBIC`` ``Image.Resampling.BICUBIC``
|
||||
``Image.CUBIC`` ``Image.Resampling.BICUBIC``
|
||||
``Image.LANCZOS`` ``Image.Resampling.LANCZOS``
|
||||
``Image.ANTIALIAS`` ``Image.Resampling.LANCZOS``
|
||||
``Image.MEDIANCUT`` ``Image.Quantize.MEDIANCUT``
|
||||
``Image.MAXCOVERAGE`` ``Image.Quantize.MAXCOVERAGE``
|
||||
``Image.FASTOCTREE`` ``Image.Quantize.FASTOCTREE``
|
||||
``Image.LIBIMAGEQUANT`` ``Image.Quantize.LIBIMAGEQUANT``
|
||||
``ImageCms.INTENT_PERCEPTUAL`` ``ImageCms.Intent.PERCEPTUAL``
|
||||
``ImageCms.INTENT_RELATIVE_COLORMETRIC`` ``ImageCms.Intent.RELATIVE_COLORMETRIC``
|
||||
``ImageCms.INTENT_SATURATION`` ``ImageCms.Intent.SATURATION``
|
||||
``ImageCms.INTENT_ABSOLUTE_COLORIMETRIC`` ``ImageCms.Intent.ABSOLUTE_COLORIMETRIC``
|
||||
``ImageCms.DIRECTION_INPUT`` ``ImageCms.Direction.INPUT``
|
||||
``ImageCms.DIRECTION_OUTPUT`` ``ImageCms.Direction.OUTPUT``
|
||||
``ImageCms.DIRECTION_PROOF`` ``ImageCms.Direction.PROOF``
|
||||
``ImageFont.LAYOUT_BASIC`` ``ImageFont.Layout.BASIC``
|
||||
``ImageFont.LAYOUT_RAQM`` ``ImageFont.Layout.RAQM``
|
||||
``BlpImagePlugin.BLP_FORMAT_JPEG`` ``BlpImagePlugin.Format.JPEG``
|
||||
``BlpImagePlugin.BLP_ENCODING_UNCOMPRESSED`` ``BlpImagePlugin.Encoding.UNCOMPRESSED``
|
||||
``BlpImagePlugin.BLP_ENCODING_DXT`` ``BlpImagePlugin.Encoding.DXT``
|
||||
``BlpImagePlugin.BLP_ENCODING_UNCOMPRESSED_RAW_RGBA`` ``BlpImagePlugin.Encoding.UNCOMPRESSED_RAW_RGBA``
|
||||
``BlpImagePlugin.BLP_ALPHA_ENCODING_DXT1`` ``BlpImagePlugin.AlphaEncoding.DXT1``
|
||||
``BlpImagePlugin.BLP_ALPHA_ENCODING_DXT3`` ``BlpImagePlugin.AlphaEncoding.DXT3``
|
||||
``BlpImagePlugin.BLP_ALPHA_ENCODING_DXT5`` ``BlpImagePlugin.AlphaEncoding.DXT5``
|
||||
``FtexImagePlugin.FORMAT_DXT1`` ``FtexImagePlugin.Format.DXT1``
|
||||
``FtexImagePlugin.FORMAT_UNCOMPRESSED`` ``FtexImagePlugin.Format.UNCOMPRESSED``
|
||||
``PngImagePlugin.APNG_DISPOSE_OP_NONE`` ``PngImagePlugin.Disposal.OP_NONE``
|
||||
``PngImagePlugin.APNG_DISPOSE_OP_BACKGROUND`` ``PngImagePlugin.Disposal.OP_BACKGROUND``
|
||||
``PngImagePlugin.APNG_DISPOSE_OP_PREVIOUS`` ``PngImagePlugin.Disposal.OP_PREVIOUS``
|
||||
``PngImagePlugin.APNG_BLEND_OP_SOURCE`` ``PngImagePlugin.Blend.OP_SOURCE``
|
||||
``PngImagePlugin.APNG_BLEND_OP_OVER`` ``PngImagePlugin.Blend.OP_OVER``
|
||||
===================================================== ============================================================
|
||||
|
||||
ImageShow.Viewer.show_file file argument
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The ``file`` argument in :py:meth:`~PIL.ImageShow.Viewer.show_file()` has been
|
||||
deprecated, replaced by ``path``.
|
||||
deprecated and will be removed in Pillow 10.0.0 (2023-07-01). It has been replaced by
|
||||
``path``.
|
||||
|
||||
In effect, ``viewer.show_file("test.jpg")`` will continue to work unchanged.
|
||||
``viewer.show_file(file="test.jpg")`` will raise a deprecation warning, and suggest
|
||||
|
|
|
@ -97,9 +97,9 @@ def testimage():
|
|||
10456
|
||||
>>> len(im.tobytes())
|
||||
49152
|
||||
>>> _info(im.transform((512, 512), Image.AFFINE, (1,0,0,0,1,0)))
|
||||
>>> _info(im.transform((512, 512), Image.Transform.AFFINE, (1,0,0,0,1,0)))
|
||||
(None, 'RGB', (512, 512))
|
||||
>>> _info(im.transform((512, 512), Image.EXTENT, (32,32,96,96)))
|
||||
>>> _info(im.transform((512, 512), Image.Transform.EXTENT, (32,32,96,96)))
|
||||
(None, 'RGB', (512, 512))
|
||||
|
||||
The ImageDraw module lets you draw stuff in raster images:
|
||||
|
|
|
@ -30,19 +30,54 @@ BLP files come in many different flavours:
|
|||
"""
|
||||
|
||||
import struct
|
||||
import warnings
|
||||
from enum import IntEnum
|
||||
from io import BytesIO
|
||||
|
||||
from . import Image, ImageFile
|
||||
|
||||
BLP_FORMAT_JPEG = 0
|
||||
|
||||
BLP_ENCODING_UNCOMPRESSED = 1
|
||||
BLP_ENCODING_DXT = 2
|
||||
BLP_ENCODING_UNCOMPRESSED_RAW_BGRA = 3
|
||||
class Format(IntEnum):
|
||||
JPEG = 0
|
||||
|
||||
BLP_ALPHA_ENCODING_DXT1 = 0
|
||||
BLP_ALPHA_ENCODING_DXT3 = 1
|
||||
BLP_ALPHA_ENCODING_DXT5 = 7
|
||||
|
||||
class Encoding(IntEnum):
|
||||
UNCOMPRESSED = 1
|
||||
DXT = 2
|
||||
UNCOMPRESSED_RAW_BGRA = 3
|
||||
|
||||
|
||||
class AlphaEncoding(IntEnum):
|
||||
DXT1 = 0
|
||||
DXT3 = 1
|
||||
DXT5 = 7
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
deprecated = "deprecated and will be removed in Pillow 10 (2023-07-01). "
|
||||
for enum, prefix in {
|
||||
Format: "BLP_FORMAT_",
|
||||
Encoding: "BLP_ENCODING_",
|
||||
AlphaEncoding: "BLP_ALPHA_ENCODING_",
|
||||
}.items():
|
||||
if name.startswith(prefix):
|
||||
name = name[len(prefix) :]
|
||||
if name in enum.__members__:
|
||||
warnings.warn(
|
||||
prefix
|
||||
+ name
|
||||
+ " is "
|
||||
+ deprecated
|
||||
+ "Use "
|
||||
+ enum.__name__
|
||||
+ "."
|
||||
+ name
|
||||
+ " instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return enum[name]
|
||||
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
||||
|
||||
|
||||
def unpack_565(i):
|
||||
|
@ -320,7 +355,7 @@ class _BLPBaseDecoder(ImageFile.PyDecoder):
|
|||
|
||||
class BLP1Decoder(_BLPBaseDecoder):
|
||||
def _load(self):
|
||||
if self._blp_compression == BLP_FORMAT_JPEG:
|
||||
if self._blp_compression == Format.JPEG:
|
||||
self._decode_jpeg_stream()
|
||||
|
||||
elif self._blp_compression == 1:
|
||||
|
@ -347,7 +382,7 @@ class BLP1Decoder(_BLPBaseDecoder):
|
|||
)
|
||||
|
||||
def _decode_jpeg_stream(self):
|
||||
from PIL.JpegImagePlugin import JpegImageFile
|
||||
from .JpegImagePlugin import JpegImageFile
|
||||
|
||||
(jpeg_header_size,) = struct.unpack("<I", self._safe_read(4))
|
||||
jpeg_header = self._safe_read(jpeg_header_size)
|
||||
|
@ -372,7 +407,7 @@ class BLP2Decoder(_BLPBaseDecoder):
|
|||
if self._blp_compression == 1:
|
||||
# Uncompressed or DirectX compression
|
||||
|
||||
if self._blp_encoding == BLP_ENCODING_UNCOMPRESSED:
|
||||
if self._blp_encoding == Encoding.UNCOMPRESSED:
|
||||
_data = BytesIO(self._safe_read(self._blp_lengths[0]))
|
||||
while True:
|
||||
try:
|
||||
|
@ -382,8 +417,8 @@ class BLP2Decoder(_BLPBaseDecoder):
|
|||
b, g, r, a = palette[offset]
|
||||
data.extend((r, g, b))
|
||||
|
||||
elif self._blp_encoding == BLP_ENCODING_DXT:
|
||||
if self._blp_alpha_encoding == BLP_ALPHA_ENCODING_DXT1:
|
||||
elif self._blp_encoding == Encoding.DXT:
|
||||
if self._blp_alpha_encoding == AlphaEncoding.DXT1:
|
||||
linesize = (self.size[0] + 3) // 4 * 8
|
||||
for yb in range((self.size[1] + 3) // 4):
|
||||
for d in decode_dxt1(
|
||||
|
@ -391,13 +426,13 @@ class BLP2Decoder(_BLPBaseDecoder):
|
|||
):
|
||||
data += d
|
||||
|
||||
elif self._blp_alpha_encoding == BLP_ALPHA_ENCODING_DXT3:
|
||||
elif self._blp_alpha_encoding == AlphaEncoding.DXT3:
|
||||
linesize = (self.size[0] + 3) // 4 * 16
|
||||
for yb in range((self.size[1] + 3) // 4):
|
||||
for d in decode_dxt3(self._safe_read(linesize)):
|
||||
data += d
|
||||
|
||||
elif self._blp_alpha_encoding == BLP_ALPHA_ENCODING_DXT5:
|
||||
elif self._blp_alpha_encoding == AlphaEncoding.DXT5:
|
||||
linesize = (self.size[0] + 3) // 4 * 16
|
||||
for yb in range((self.size[1] + 3) // 4):
|
||||
for d in decode_dxt5(self._safe_read(linesize)):
|
||||
|
|
|
@ -52,13 +52,41 @@ Note: All data is stored in little-Endian (Intel) byte order.
|
|||
"""
|
||||
|
||||
import struct
|
||||
import warnings
|
||||
from enum import IntEnum
|
||||
from io import BytesIO
|
||||
|
||||
from . import Image, ImageFile
|
||||
|
||||
MAGIC = b"FTEX"
|
||||
FORMAT_DXT1 = 0
|
||||
FORMAT_UNCOMPRESSED = 1
|
||||
|
||||
|
||||
class Format(IntEnum):
|
||||
DXT1 = 0
|
||||
UNCOMPRESSED = 1
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
deprecated = "deprecated and will be removed in Pillow 10 (2023-07-01). "
|
||||
for enum, prefix in {Format: "FORMAT_"}.items():
|
||||
if name.startswith(prefix):
|
||||
name = name[len(prefix) :]
|
||||
if name in enum.__members__:
|
||||
warnings.warn(
|
||||
prefix
|
||||
+ name
|
||||
+ " is "
|
||||
+ deprecated
|
||||
+ "Use "
|
||||
+ enum.__name__
|
||||
+ "."
|
||||
+ name
|
||||
+ " instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return enum[name]
|
||||
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
||||
|
||||
|
||||
class FtexImageFile(ImageFile.ImageFile):
|
||||
|
@ -83,10 +111,10 @@ class FtexImageFile(ImageFile.ImageFile):
|
|||
|
||||
data = self.fp.read(mipmap_size)
|
||||
|
||||
if format == FORMAT_DXT1:
|
||||
if format == Format.DXT1:
|
||||
self.mode = "RGBA"
|
||||
self.tile = [("bcn", (0, 0) + self.size, 0, (1))]
|
||||
elif format == FORMAT_UNCOMPRESSED:
|
||||
elif format == Format.UNCOMPRESSED:
|
||||
self.tile = [("raw", (0, 0) + self.size, 0, ("RGB", 0, 1))]
|
||||
else:
|
||||
raise ValueError(f"Invalid texture compression format: {repr(format)}")
|
||||
|
|
|
@ -169,12 +169,12 @@ class GifImageFile(ImageFile.ImageFile):
|
|||
if "transparency" in self.info:
|
||||
self.mode = "RGBA"
|
||||
self.im.putpalettealpha(self.info["transparency"], 0)
|
||||
self.im = self.im.convert("RGBA", Image.FLOYDSTEINBERG)
|
||||
self.im = self.im.convert("RGBA", Image.Dither.FLOYDSTEINBERG)
|
||||
|
||||
del self.info["transparency"]
|
||||
else:
|
||||
self.mode = "RGB"
|
||||
self.im = self.im.convert("RGB", Image.FLOYDSTEINBERG)
|
||||
self.im = self.im.convert("RGB", Image.Dither.FLOYDSTEINBERG)
|
||||
if self.dispose:
|
||||
self.im.paste(self.dispose, self.dispose_extent)
|
||||
|
||||
|
@ -425,7 +425,7 @@ def _normalize_mode(im, initial_call=False):
|
|||
palette_size = 256
|
||||
if im.palette:
|
||||
palette_size = len(im.palette.getdata()[1]) // 3
|
||||
im = im.convert("P", palette=Image.ADAPTIVE, colors=palette_size)
|
||||
im = im.convert("P", palette=Image.Palette.ADAPTIVE, colors=palette_size)
|
||||
if im.palette.mode == "RGBA":
|
||||
for rgba in im.palette.colors.keys():
|
||||
if rgba[3] == 0:
|
||||
|
|
|
@ -69,7 +69,7 @@ def _save(im, fp, filename):
|
|||
if not tmp:
|
||||
# TODO: invent a more convenient method for proportional scalings
|
||||
tmp = im.copy()
|
||||
tmp.thumbnail(size, Image.LANCZOS, reducing_gap=None)
|
||||
tmp.thumbnail(size, Image.Resampling.LANCZOS, reducing_gap=None)
|
||||
bits = BmpImagePlugin.SAVE[tmp.mode][1] if bmp else 32
|
||||
fp.write(struct.pack("<H", bits)) # wBitCount(2)
|
||||
|
||||
|
|
355
src/PIL/Image.py
355
src/PIL/Image.py
|
@ -37,6 +37,7 @@ import sys
|
|||
import tempfile
|
||||
import warnings
|
||||
from collections.abc import Callable, MutableMapping
|
||||
from enum import IntEnum
|
||||
from pathlib import Path
|
||||
|
||||
try:
|
||||
|
@ -53,15 +54,57 @@ from ._util import deferred_error, isPath
|
|||
|
||||
|
||||
def __getattr__(name):
|
||||
deprecated = "deprecated and will be removed in Pillow 10 (2023-07-01). "
|
||||
categories = {"NORMAL": 0, "SEQUENCE": 1, "CONTAINER": 2}
|
||||
if name in categories:
|
||||
warnings.warn(
|
||||
"Image categories are deprecated and will be removed in Pillow 10 "
|
||||
"(2023-07-01). Use is_animated instead.",
|
||||
"Image categories are " + deprecated + "Use is_animated instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return categories[name]
|
||||
elif name in ("NEAREST", "NONE"):
|
||||
warnings.warn(
|
||||
name
|
||||
+ " is "
|
||||
+ deprecated
|
||||
+ "Use Resampling.NEAREST or Dither.NONE instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return 0
|
||||
old_resampling = {
|
||||
"LINEAR": "BILINEAR",
|
||||
"CUBIC": "BICUBIC",
|
||||
"ANTIALIAS": "LANCZOS",
|
||||
}
|
||||
if name in old_resampling:
|
||||
warnings.warn(
|
||||
name
|
||||
+ " is "
|
||||
+ deprecated
|
||||
+ "Use Resampling."
|
||||
+ old_resampling[name]
|
||||
+ " instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return Resampling[old_resampling[name]]
|
||||
for enum in (Transpose, Transform, Resampling, Dither, Palette, Quantize):
|
||||
if name in enum.__members__:
|
||||
warnings.warn(
|
||||
name
|
||||
+ " is "
|
||||
+ deprecated
|
||||
+ "Use "
|
||||
+ enum.__name__
|
||||
+ "."
|
||||
+ name
|
||||
+ " instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return enum[name]
|
||||
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
||||
|
||||
|
||||
|
@ -139,46 +182,64 @@ def isImageType(t):
|
|||
# Constants
|
||||
|
||||
# transpose
|
||||
FLIP_LEFT_RIGHT = 0
|
||||
FLIP_TOP_BOTTOM = 1
|
||||
ROTATE_90 = 2
|
||||
ROTATE_180 = 3
|
||||
ROTATE_270 = 4
|
||||
TRANSPOSE = 5
|
||||
TRANSVERSE = 6
|
||||
class Transpose(IntEnum):
|
||||
FLIP_LEFT_RIGHT = 0
|
||||
FLIP_TOP_BOTTOM = 1
|
||||
ROTATE_90 = 2
|
||||
ROTATE_180 = 3
|
||||
ROTATE_270 = 4
|
||||
TRANSPOSE = 5
|
||||
TRANSVERSE = 6
|
||||
|
||||
|
||||
# transforms (also defined in Imaging.h)
|
||||
AFFINE = 0
|
||||
EXTENT = 1
|
||||
PERSPECTIVE = 2
|
||||
QUAD = 3
|
||||
MESH = 4
|
||||
class Transform(IntEnum):
|
||||
AFFINE = 0
|
||||
EXTENT = 1
|
||||
PERSPECTIVE = 2
|
||||
QUAD = 3
|
||||
MESH = 4
|
||||
|
||||
|
||||
# resampling filters (also defined in Imaging.h)
|
||||
NEAREST = NONE = 0
|
||||
BOX = 4
|
||||
BILINEAR = LINEAR = 2
|
||||
HAMMING = 5
|
||||
BICUBIC = CUBIC = 3
|
||||
LANCZOS = ANTIALIAS = 1
|
||||
class Resampling(IntEnum):
|
||||
NEAREST = 0
|
||||
BOX = 4
|
||||
BILINEAR = 2
|
||||
HAMMING = 5
|
||||
BICUBIC = 3
|
||||
LANCZOS = 1
|
||||
|
||||
_filters_support = {BOX: 0.5, BILINEAR: 1.0, HAMMING: 1.0, BICUBIC: 2.0, LANCZOS: 3.0}
|
||||
|
||||
_filters_support = {
|
||||
Resampling.BOX: 0.5,
|
||||
Resampling.BILINEAR: 1.0,
|
||||
Resampling.HAMMING: 1.0,
|
||||
Resampling.BICUBIC: 2.0,
|
||||
Resampling.LANCZOS: 3.0,
|
||||
}
|
||||
|
||||
|
||||
# dithers
|
||||
NEAREST = NONE = 0
|
||||
ORDERED = 1 # Not yet implemented
|
||||
RASTERIZE = 2 # Not yet implemented
|
||||
FLOYDSTEINBERG = 3 # default
|
||||
class Dither(IntEnum):
|
||||
NONE = 0
|
||||
ORDERED = 1 # Not yet implemented
|
||||
RASTERIZE = 2 # Not yet implemented
|
||||
FLOYDSTEINBERG = 3 # default
|
||||
|
||||
|
||||
# palettes/quantizers
|
||||
WEB = 0
|
||||
ADAPTIVE = 1
|
||||
class Palette(IntEnum):
|
||||
WEB = 0
|
||||
ADAPTIVE = 1
|
||||
|
||||
|
||||
class Quantize(IntEnum):
|
||||
MEDIANCUT = 0
|
||||
MAXCOVERAGE = 1
|
||||
FASTOCTREE = 2
|
||||
LIBIMAGEQUANT = 3
|
||||
|
||||
MEDIANCUT = 0
|
||||
MAXCOVERAGE = 1
|
||||
FASTOCTREE = 2
|
||||
LIBIMAGEQUANT = 3
|
||||
|
||||
if hasattr(core, "DEFAULT_STRATEGY"):
|
||||
DEFAULT_STRATEGY = core.DEFAULT_STRATEGY
|
||||
|
@ -821,13 +882,7 @@ class Image:
|
|||
if self.im and self.palette and self.palette.dirty:
|
||||
# realize palette
|
||||
mode, arr = self.palette.getdata()
|
||||
if mode == "RGBA":
|
||||
mode = "RGB"
|
||||
self.info["transparency"] = arr[3::4]
|
||||
arr = bytes(
|
||||
value for (index, value) in enumerate(arr) if index % 4 != 3
|
||||
)
|
||||
palette_length = self.im.putpalette(mode, arr)
|
||||
self.im.putpalette(mode, arr)
|
||||
self.palette.dirty = 0
|
||||
self.palette.rawmode = None
|
||||
if "transparency" in self.info and mode in ("LA", "PA"):
|
||||
|
@ -837,8 +892,9 @@ class Image:
|
|||
self.im.putpalettealphas(self.info["transparency"])
|
||||
self.palette.mode = "RGBA"
|
||||
else:
|
||||
self.palette.mode = "RGB"
|
||||
self.palette.palette = self.im.getpalette()[: palette_length * 3]
|
||||
palette_mode = "RGBA" if mode.startswith("RGBA") else "RGB"
|
||||
self.palette.mode = palette_mode
|
||||
self.palette.palette = self.im.getpalette(palette_mode, palette_mode)
|
||||
|
||||
if self.im:
|
||||
if cffi and USE_CFFI_ACCESS:
|
||||
|
@ -862,7 +918,9 @@ class Image:
|
|||
"""
|
||||
pass
|
||||
|
||||
def convert(self, mode=None, matrix=None, dither=None, palette=WEB, colors=256):
|
||||
def convert(
|
||||
self, mode=None, matrix=None, dither=None, palette=Palette.WEB, colors=256
|
||||
):
|
||||
"""
|
||||
Returns a converted copy of this image. For the "P" mode, this
|
||||
method translates pixels through the palette. If mode is
|
||||
|
@ -881,7 +939,7 @@ class Image:
|
|||
The default method of converting a greyscale ("L") or "RGB"
|
||||
image into a bilevel (mode "1") image uses Floyd-Steinberg
|
||||
dither to approximate the original image luminosity levels. If
|
||||
dither is :data:`NONE`, all values larger than 127 are set to 255 (white),
|
||||
dither is ``None``, all values larger than 127 are set to 255 (white),
|
||||
all other values to 0 (black). To use other thresholds, use the
|
||||
:py:meth:`~PIL.Image.Image.point` method.
|
||||
|
||||
|
@ -894,12 +952,13 @@ class Image:
|
|||
should be 4- or 12-tuple containing floating point values.
|
||||
:param dither: Dithering method, used when converting from
|
||||
mode "RGB" to "P" or from "RGB" or "L" to "1".
|
||||
Available methods are :data:`NONE` or :data:`FLOYDSTEINBERG` (default).
|
||||
Note that this is not used when ``matrix`` is supplied.
|
||||
Available methods are :data:`Dither.NONE` or :data:`Dither.FLOYDSTEINBERG`
|
||||
(default). Note that this is not used when ``matrix`` is supplied.
|
||||
:param palette: Palette to use when converting from mode "RGB"
|
||||
to "P". Available palettes are :data:`WEB` or :data:`ADAPTIVE`.
|
||||
:param colors: Number of colors to use for the :data:`ADAPTIVE` palette.
|
||||
Defaults to 256.
|
||||
to "P". Available palettes are :data:`Palette.WEB` or
|
||||
:data:`Palette.ADAPTIVE`.
|
||||
:param colors: Number of colors to use for the :data:`Palette.ADAPTIVE`
|
||||
palette. Defaults to 256.
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
:returns: An :py:class:`~PIL.Image.Image` object.
|
||||
"""
|
||||
|
@ -1006,7 +1065,7 @@ class Image:
|
|||
else:
|
||||
raise ValueError("Transparency for P mode should be bytes or int")
|
||||
|
||||
if mode == "P" and palette == ADAPTIVE:
|
||||
if mode == "P" and palette == Palette.ADAPTIVE:
|
||||
im = self.im.quantize(colors)
|
||||
new = self._new(im)
|
||||
from . import ImagePalette
|
||||
|
@ -1028,7 +1087,7 @@ class Image:
|
|||
|
||||
# colorspace conversion
|
||||
if dither is None:
|
||||
dither = FLOYDSTEINBERG
|
||||
dither = Dither.FLOYDSTEINBERG
|
||||
|
||||
try:
|
||||
im = self.im.convert(mode, dither)
|
||||
|
@ -1041,7 +1100,7 @@ class Image:
|
|||
raise ValueError("illegal conversion") from e
|
||||
|
||||
new_im = self._new(im)
|
||||
if mode == "P" and palette != ADAPTIVE:
|
||||
if mode == "P" and palette != Palette.ADAPTIVE:
|
||||
from . import ImagePalette
|
||||
|
||||
new_im.palette = ImagePalette.ImagePalette("RGB", list(range(256)) * 3)
|
||||
|
@ -1070,24 +1129,25 @@ class Image:
|
|||
of colors.
|
||||
|
||||
:param colors: The desired number of colors, <= 256
|
||||
:param method: :data:`MEDIANCUT` (median cut),
|
||||
:data:`MAXCOVERAGE` (maximum coverage),
|
||||
:data:`FASTOCTREE` (fast octree),
|
||||
:data:`LIBIMAGEQUANT` (libimagequant; check support using
|
||||
:py:func:`PIL.features.check_feature`
|
||||
with ``feature="libimagequant"``).
|
||||
:param method: :data:`Quantize.MEDIANCUT` (median cut),
|
||||
:data:`Quantize.MAXCOVERAGE` (maximum coverage),
|
||||
:data:`Quantize.FASTOCTREE` (fast octree),
|
||||
:data:`Quantize.LIBIMAGEQUANT` (libimagequant; check support
|
||||
using :py:func:`PIL.features.check_feature` with
|
||||
``feature="libimagequant"``).
|
||||
|
||||
By default, :data:`MEDIANCUT` will be used.
|
||||
By default, :data:`Quantize.MEDIANCUT` will be used.
|
||||
|
||||
The exception to this is RGBA images. :data:`MEDIANCUT` and
|
||||
:data:`MAXCOVERAGE` do not support RGBA images, so
|
||||
:data:`FASTOCTREE` is used by default instead.
|
||||
The exception to this is RGBA images. :data:`Quantize.MEDIANCUT`
|
||||
and :data:`Quantize.MAXCOVERAGE` do not support RGBA images, so
|
||||
:data:`Quantize.FASTOCTREE` is used by default instead.
|
||||
:param kmeans: Integer
|
||||
:param palette: Quantize to the palette of given
|
||||
:py:class:`PIL.Image.Image`.
|
||||
:param dither: Dithering method, used when converting from
|
||||
mode "RGB" to "P" or from "RGB" or "L" to "1".
|
||||
Available methods are :data:`NONE` or :data:`FLOYDSTEINBERG` (default).
|
||||
Available methods are :data:`Dither.NONE` or :data:`Dither.FLOYDSTEINBERG`
|
||||
(default).
|
||||
Default: 1 (legacy setting)
|
||||
:returns: A new image
|
||||
|
||||
|
@ -1097,11 +1157,14 @@ class Image:
|
|||
|
||||
if method is None:
|
||||
# defaults:
|
||||
method = MEDIANCUT
|
||||
method = Quantize.MEDIANCUT
|
||||
if self.mode == "RGBA":
|
||||
method = FASTOCTREE
|
||||
method = Quantize.FASTOCTREE
|
||||
|
||||
if self.mode == "RGBA" and method not in (FASTOCTREE, LIBIMAGEQUANT):
|
||||
if self.mode == "RGBA" and method not in (
|
||||
Quantize.FASTOCTREE,
|
||||
Quantize.LIBIMAGEQUANT,
|
||||
):
|
||||
# Caller specified an invalid mode.
|
||||
raise ValueError(
|
||||
"Fast Octree (method == 2) and libimagequant (method == 3) "
|
||||
|
@ -1761,8 +1824,8 @@ class Image:
|
|||
Alternatively, an 8-bit string may be used instead of an integer sequence.
|
||||
|
||||
:param data: A palette sequence (either a list or a string).
|
||||
:param rawmode: The raw mode of the palette. Either "RGB", "RGBA", or a
|
||||
mode that can be transformed to "RGB" (e.g. "R", "BGR;15", "RGBA;L").
|
||||
:param rawmode: The raw mode of the palette. Either "RGB", "RGBA", or a mode
|
||||
that can be transformed to "RGB" or "RGBA" (e.g. "R", "BGR;15", "RGBA;L").
|
||||
"""
|
||||
from . import ImagePalette
|
||||
|
||||
|
@ -1911,15 +1974,18 @@ class Image:
|
|||
:param size: The requested size in pixels, as a 2-tuple:
|
||||
(width, height).
|
||||
:param resample: An optional resampling filter. This can be
|
||||
one of :py:data:`PIL.Image.NEAREST`, :py:data:`PIL.Image.BOX`,
|
||||
:py:data:`PIL.Image.BILINEAR`, :py:data:`PIL.Image.HAMMING`,
|
||||
:py:data:`PIL.Image.BICUBIC` or :py:data:`PIL.Image.LANCZOS`.
|
||||
one of :py:data:`PIL.Image.Resampling.NEAREST`,
|
||||
:py:data:`PIL.Image.Resampling.BOX`,
|
||||
:py:data:`PIL.Image.Resampling.BILINEAR`,
|
||||
:py:data:`PIL.Image.Resampling.HAMMING`,
|
||||
:py:data:`PIL.Image.Resampling.BICUBIC` or
|
||||
:py:data:`PIL.Image.Resampling.LANCZOS`.
|
||||
If the image has mode "1" or "P", it is always set to
|
||||
:py:data:`PIL.Image.NEAREST`.
|
||||
:py:data:`PIL.Image.Resampling.NEAREST`.
|
||||
If the image mode specifies a number of bits, such as "I;16", then the
|
||||
default filter is :py:data:`PIL.Image.NEAREST`.
|
||||
Otherwise, the default filter is :py:data:`PIL.Image.BICUBIC`.
|
||||
See: :ref:`concept-filters`.
|
||||
default filter is :py:data:`PIL.Image.Resampling.NEAREST`.
|
||||
Otherwise, the default filter is
|
||||
:py:data:`PIL.Image.Resampling.BICUBIC`. See: :ref:`concept-filters`.
|
||||
:param box: An optional 4-tuple of floats providing
|
||||
the source image region to be scaled.
|
||||
The values must be within (0, 0, width, height) rectangle.
|
||||
|
@ -1941,19 +2007,26 @@ class Image:
|
|||
|
||||
if resample is None:
|
||||
type_special = ";" in self.mode
|
||||
resample = NEAREST if type_special else BICUBIC
|
||||
elif resample not in (NEAREST, BILINEAR, BICUBIC, LANCZOS, BOX, HAMMING):
|
||||
resample = Resampling.NEAREST if type_special else Resampling.BICUBIC
|
||||
elif resample not in (
|
||||
Resampling.NEAREST,
|
||||
Resampling.BILINEAR,
|
||||
Resampling.BICUBIC,
|
||||
Resampling.LANCZOS,
|
||||
Resampling.BOX,
|
||||
Resampling.HAMMING,
|
||||
):
|
||||
message = f"Unknown resampling filter ({resample})."
|
||||
|
||||
filters = [
|
||||
f"{filter[1]} ({filter[0]})"
|
||||
for filter in (
|
||||
(NEAREST, "Image.NEAREST"),
|
||||
(LANCZOS, "Image.LANCZOS"),
|
||||
(BILINEAR, "Image.BILINEAR"),
|
||||
(BICUBIC, "Image.BICUBIC"),
|
||||
(BOX, "Image.BOX"),
|
||||
(HAMMING, "Image.HAMMING"),
|
||||
(Resampling.NEAREST, "Image.Resampling.NEAREST"),
|
||||
(Resampling.LANCZOS, "Image.Resampling.LANCZOS"),
|
||||
(Resampling.BILINEAR, "Image.Resampling.BILINEAR"),
|
||||
(Resampling.BICUBIC, "Image.Resampling.BICUBIC"),
|
||||
(Resampling.BOX, "Image.Resampling.BOX"),
|
||||
(Resampling.HAMMING, "Image.Resampling.HAMMING"),
|
||||
)
|
||||
]
|
||||
raise ValueError(
|
||||
|
@ -1974,16 +2047,16 @@ class Image:
|
|||
return self.copy()
|
||||
|
||||
if self.mode in ("1", "P"):
|
||||
resample = NEAREST
|
||||
resample = Resampling.NEAREST
|
||||
|
||||
if self.mode in ["LA", "RGBA"] and resample != NEAREST:
|
||||
if self.mode in ["LA", "RGBA"] and resample != Resampling.NEAREST:
|
||||
im = self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode])
|
||||
im = im.resize(size, resample, box)
|
||||
return im.convert(self.mode)
|
||||
|
||||
self.load()
|
||||
|
||||
if reducing_gap is not None and resample != NEAREST:
|
||||
if reducing_gap is not None and resample != Resampling.NEAREST:
|
||||
factor_x = int((box[2] - box[0]) / size[0] / reducing_gap) or 1
|
||||
factor_y = int((box[3] - box[1]) / size[1] / reducing_gap) or 1
|
||||
if factor_x > 1 or factor_y > 1:
|
||||
|
@ -2038,7 +2111,7 @@ class Image:
|
|||
def rotate(
|
||||
self,
|
||||
angle,
|
||||
resample=NEAREST,
|
||||
resample=Resampling.NEAREST,
|
||||
expand=0,
|
||||
center=None,
|
||||
translate=None,
|
||||
|
@ -2051,12 +2124,12 @@ class Image:
|
|||
|
||||
:param angle: In degrees counter clockwise.
|
||||
:param resample: An optional resampling filter. This can be
|
||||
one of :py:data:`PIL.Image.NEAREST` (use nearest neighbour),
|
||||
one of :py:data:`PIL.Image.Resampling.NEAREST` (use nearest neighbour),
|
||||
:py:data:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
|
||||
environment), or :py:data:`PIL.Image.BICUBIC`
|
||||
environment), or :py:data:`PIL.Image.Resampling.BICUBIC`
|
||||
(cubic spline interpolation in a 4x4 environment).
|
||||
If omitted, or if the image has mode "1" or "P", it is
|
||||
set to :py:data:`PIL.Image.NEAREST`. See :ref:`concept-filters`.
|
||||
set to :py:data:`PIL.Image.Resampling.NEAREST`. See :ref:`concept-filters`.
|
||||
:param expand: Optional expansion flag. If true, expands the output
|
||||
image to make it large enough to hold the entire rotated image.
|
||||
If false or omitted, make the output image the same size as the
|
||||
|
@ -2077,9 +2150,11 @@ class Image:
|
|||
if angle == 0:
|
||||
return self.copy()
|
||||
if angle == 180:
|
||||
return self.transpose(ROTATE_180)
|
||||
return self.transpose(Transpose.ROTATE_180)
|
||||
if angle in (90, 270) and (expand or self.width == self.height):
|
||||
return self.transpose(ROTATE_90 if angle == 90 else ROTATE_270)
|
||||
return self.transpose(
|
||||
Transpose.ROTATE_90 if angle == 90 else Transpose.ROTATE_270
|
||||
)
|
||||
|
||||
# Calculate the affine matrix. Note that this is the reverse
|
||||
# transformation (from destination image to source) because we
|
||||
|
@ -2148,7 +2223,9 @@ class Image:
|
|||
matrix[2], matrix[5] = transform(-(nw - w) / 2.0, -(nh - h) / 2.0, matrix)
|
||||
w, h = nw, nh
|
||||
|
||||
return self.transform((w, h), AFFINE, matrix, resample, fillcolor=fillcolor)
|
||||
return self.transform(
|
||||
(w, h), Transform.AFFINE, matrix, resample, fillcolor=fillcolor
|
||||
)
|
||||
|
||||
def save(self, fp, format=None, **params):
|
||||
"""
|
||||
|
@ -2334,7 +2411,7 @@ class Image:
|
|||
"""
|
||||
return 0
|
||||
|
||||
def thumbnail(self, size, resample=BICUBIC, reducing_gap=2.0):
|
||||
def thumbnail(self, size, resample=Resampling.BICUBIC, reducing_gap=2.0):
|
||||
"""
|
||||
Make this image into a thumbnail. This method modifies the
|
||||
image to contain a thumbnail version of itself, no larger than
|
||||
|
@ -2350,11 +2427,14 @@ class Image:
|
|||
|
||||
:param size: Requested size.
|
||||
:param resample: Optional resampling filter. This can be one
|
||||
of :py:data:`PIL.Image.NEAREST`, :py:data:`PIL.Image.BOX`,
|
||||
:py:data:`PIL.Image.BILINEAR`, :py:data:`PIL.Image.HAMMING`,
|
||||
:py:data:`PIL.Image.BICUBIC` or :py:data:`PIL.Image.LANCZOS`.
|
||||
If omitted, it defaults to :py:data:`PIL.Image.BICUBIC`.
|
||||
(was :py:data:`PIL.Image.NEAREST` prior to version 2.5.0).
|
||||
of :py:data:`PIL.Image.Resampling.NEAREST`,
|
||||
:py:data:`PIL.Image.Resampling.BOX`,
|
||||
:py:data:`PIL.Image.Resampling.BILINEAR`,
|
||||
:py:data:`PIL.Image.Resampling.HAMMING`,
|
||||
:py:data:`PIL.Image.Resampling.BICUBIC` or
|
||||
:py:data:`PIL.Image.Resampling.LANCZOS`.
|
||||
If omitted, it defaults to :py:data:`PIL.Image.Resampling.BICUBIC`.
|
||||
(was :py:data:`PIL.Image.Resampling.NEAREST` prior to version 2.5.0).
|
||||
See: :ref:`concept-filters`.
|
||||
:param reducing_gap: Apply optimization by resizing the image
|
||||
in two steps. First, reducing the image by integer times
|
||||
|
@ -2409,7 +2489,13 @@ class Image:
|
|||
# FIXME: the different transform methods need further explanation
|
||||
# instead of bloating the method docs, add a separate chapter.
|
||||
def transform(
|
||||
self, size, method, data=None, resample=NEAREST, fill=1, fillcolor=None
|
||||
self,
|
||||
size,
|
||||
method,
|
||||
data=None,
|
||||
resample=Resampling.NEAREST,
|
||||
fill=1,
|
||||
fillcolor=None,
|
||||
):
|
||||
"""
|
||||
Transforms this image. This method creates a new image with the
|
||||
|
@ -2418,11 +2504,11 @@ class Image:
|
|||
|
||||
:param size: The output size.
|
||||
:param method: The transformation method. This is one of
|
||||
:py:data:`PIL.Image.EXTENT` (cut out a rectangular subregion),
|
||||
:py:data:`PIL.Image.AFFINE` (affine transform),
|
||||
:py:data:`PIL.Image.PERSPECTIVE` (perspective transform),
|
||||
:py:data:`PIL.Image.QUAD` (map a quadrilateral to a rectangle), or
|
||||
:py:data:`PIL.Image.MESH` (map a number of source quadrilaterals
|
||||
:py:data:`PIL.Image.Transform.EXTENT` (cut out a rectangular subregion),
|
||||
:py:data:`PIL.Image.Transform.AFFINE` (affine transform),
|
||||
:py:data:`PIL.Image.Transform.PERSPECTIVE` (perspective transform),
|
||||
:py:data:`PIL.Image.Transform.QUAD` (map a quadrilateral to a rectangle), or
|
||||
:py:data:`PIL.Image.Transform.MESH` (map a number of source quadrilaterals
|
||||
in one operation).
|
||||
|
||||
It may also be an :py:class:`~PIL.Image.ImageTransformHandler`
|
||||
|
@ -2437,16 +2523,16 @@ class Image:
|
|||
|
||||
class Example:
|
||||
def getdata(self):
|
||||
method = Image.EXTENT
|
||||
method = Image.Transform.EXTENT
|
||||
data = (0, 0, 100, 100)
|
||||
return method, data
|
||||
:param data: Extra data to the transformation method.
|
||||
:param resample: Optional resampling filter. It can be one of
|
||||
:py:data:`PIL.Image.NEAREST` (use nearest neighbour),
|
||||
:py:data:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
|
||||
:py:data:`PIL.Image.Resampling.NEAREST` (use nearest neighbour),
|
||||
:py:data:`PIL.Image.Resampling.BILINEAR` (linear interpolation in a 2x2
|
||||
environment), or :py:data:`PIL.Image.BICUBIC` (cubic spline
|
||||
interpolation in a 4x4 environment). If omitted, or if the image
|
||||
has mode "1" or "P", it is set to :py:data:`PIL.Image.NEAREST`.
|
||||
has mode "1" or "P", it is set to :py:data:`PIL.Image.Resampling.NEAREST`.
|
||||
See: :ref:`concept-filters`.
|
||||
:param fill: If ``method`` is an
|
||||
:py:class:`~PIL.Image.ImageTransformHandler` object, this is one of
|
||||
|
@ -2456,7 +2542,7 @@ class Image:
|
|||
:returns: An :py:class:`~PIL.Image.Image` object.
|
||||
"""
|
||||
|
||||
if self.mode in ("LA", "RGBA") and resample != NEAREST:
|
||||
if self.mode in ("LA", "RGBA") and resample != Resampling.NEAREST:
|
||||
return (
|
||||
self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode])
|
||||
.transform(size, method, data, resample, fill, fillcolor)
|
||||
|
@ -2477,10 +2563,12 @@ class Image:
|
|||
if self.mode == "P" and self.palette:
|
||||
im.palette = self.palette.copy()
|
||||
im.info = self.info.copy()
|
||||
if method == MESH:
|
||||
if method == Transform.MESH:
|
||||
# list of quads
|
||||
for box, quad in data:
|
||||
im.__transformer(box, self, QUAD, quad, resample, fillcolor is None)
|
||||
im.__transformer(
|
||||
box, self, Transform.QUAD, quad, resample, fillcolor is None
|
||||
)
|
||||
else:
|
||||
im.__transformer(
|
||||
(0, 0) + size, self, method, data, resample, fillcolor is None
|
||||
|
@ -2488,25 +2576,27 @@ class Image:
|
|||
|
||||
return im
|
||||
|
||||
def __transformer(self, box, image, method, data, resample=NEAREST, fill=1):
|
||||
def __transformer(
|
||||
self, box, image, method, data, resample=Resampling.NEAREST, fill=1
|
||||
):
|
||||
w = box[2] - box[0]
|
||||
h = box[3] - box[1]
|
||||
|
||||
if method == AFFINE:
|
||||
if method == Transform.AFFINE:
|
||||
data = data[0:6]
|
||||
|
||||
elif method == EXTENT:
|
||||
elif method == Transform.EXTENT:
|
||||
# convert extent to an affine transform
|
||||
x0, y0, x1, y1 = data
|
||||
xs = (x1 - x0) / w
|
||||
ys = (y1 - y0) / h
|
||||
method = AFFINE
|
||||
method = Transform.AFFINE
|
||||
data = (xs, 0, x0, 0, ys, y0)
|
||||
|
||||
elif method == PERSPECTIVE:
|
||||
elif method == Transform.PERSPECTIVE:
|
||||
data = data[0:8]
|
||||
|
||||
elif method == QUAD:
|
||||
elif method == Transform.QUAD:
|
||||
# quadrilateral warp. data specifies the four corners
|
||||
# given as NW, SW, SE, and NE.
|
||||
nw = data[0:2]
|
||||
|
@ -2530,12 +2620,16 @@ class Image:
|
|||
else:
|
||||
raise ValueError("unknown transformation method")
|
||||
|
||||
if resample not in (NEAREST, BILINEAR, BICUBIC):
|
||||
if resample in (BOX, HAMMING, LANCZOS):
|
||||
if resample not in (
|
||||
Resampling.NEAREST,
|
||||
Resampling.BILINEAR,
|
||||
Resampling.BICUBIC,
|
||||
):
|
||||
if resample in (Resampling.BOX, Resampling.HAMMING, Resampling.LANCZOS):
|
||||
message = {
|
||||
BOX: "Image.BOX",
|
||||
HAMMING: "Image.HAMMING",
|
||||
LANCZOS: "Image.LANCZOS/Image.ANTIALIAS",
|
||||
Resampling.BOX: "Image.Resampling.BOX",
|
||||
Resampling.HAMMING: "Image.Resampling.HAMMING",
|
||||
Resampling.LANCZOS: "Image.Resampling.LANCZOS",
|
||||
}[resample] + f" ({resample}) cannot be used."
|
||||
else:
|
||||
message = f"Unknown resampling filter ({resample})."
|
||||
|
@ -2543,9 +2637,9 @@ class Image:
|
|||
filters = [
|
||||
f"{filter[1]} ({filter[0]})"
|
||||
for filter in (
|
||||
(NEAREST, "Image.NEAREST"),
|
||||
(BILINEAR, "Image.BILINEAR"),
|
||||
(BICUBIC, "Image.BICUBIC"),
|
||||
(Resampling.NEAREST, "Image.Resampling.NEAREST"),
|
||||
(Resampling.BILINEAR, "Image.Resampling.BILINEAR"),
|
||||
(Resampling.BICUBIC, "Image.Resampling.BICUBIC"),
|
||||
)
|
||||
]
|
||||
raise ValueError(
|
||||
|
@ -2557,7 +2651,7 @@ class Image:
|
|||
self.load()
|
||||
|
||||
if image.mode in ("1", "P"):
|
||||
resample = NEAREST
|
||||
resample = Resampling.NEAREST
|
||||
|
||||
self.im.transform2(box, image.im, method, data, resample, fill)
|
||||
|
||||
|
@ -2565,10 +2659,13 @@ class Image:
|
|||
"""
|
||||
Transpose image (flip or rotate in 90 degree steps)
|
||||
|
||||
:param method: One of :py:data:`PIL.Image.FLIP_LEFT_RIGHT`,
|
||||
:py:data:`PIL.Image.FLIP_TOP_BOTTOM`, :py:data:`PIL.Image.ROTATE_90`,
|
||||
:py:data:`PIL.Image.ROTATE_180`, :py:data:`PIL.Image.ROTATE_270`,
|
||||
:py:data:`PIL.Image.TRANSPOSE` or :py:data:`PIL.Image.TRANSVERSE`.
|
||||
:param method: One of :py:data:`PIL.Image.Transpose.FLIP_LEFT_RIGHT`,
|
||||
:py:data:`PIL.Image.Transpose.FLIP_TOP_BOTTOM`,
|
||||
:py:data:`PIL.Image.Transpose.ROTATE_90`,
|
||||
:py:data:`PIL.Image.Transpose.ROTATE_180`,
|
||||
:py:data:`PIL.Image.Transpose.ROTATE_270`,
|
||||
:py:data:`PIL.Image.Transpose.TRANSPOSE` or
|
||||
:py:data:`PIL.Image.Transpose.TRANSVERSE`.
|
||||
:returns: Returns a flipped or rotated copy of this image.
|
||||
"""
|
||||
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
# below for the original description.
|
||||
|
||||
import sys
|
||||
import warnings
|
||||
from enum import IntEnum
|
||||
|
||||
from PIL import Image
|
||||
|
||||
|
@ -100,14 +102,42 @@ core = _imagingcms
|
|||
#
|
||||
# intent/direction values
|
||||
|
||||
INTENT_PERCEPTUAL = 0
|
||||
INTENT_RELATIVE_COLORIMETRIC = 1
|
||||
INTENT_SATURATION = 2
|
||||
INTENT_ABSOLUTE_COLORIMETRIC = 3
|
||||
|
||||
DIRECTION_INPUT = 0
|
||||
DIRECTION_OUTPUT = 1
|
||||
DIRECTION_PROOF = 2
|
||||
class Intent(IntEnum):
|
||||
PERCEPTUAL = 0
|
||||
RELATIVE_COLORIMETRIC = 1
|
||||
SATURATION = 2
|
||||
ABSOLUTE_COLORIMETRIC = 3
|
||||
|
||||
|
||||
class Direction(IntEnum):
|
||||
INPUT = 0
|
||||
OUTPUT = 1
|
||||
PROOF = 2
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
deprecated = "deprecated and will be removed in Pillow 10 (2023-07-01). "
|
||||
for enum, prefix in {Intent: "INTENT_", Direction: "DIRECTION_"}.items():
|
||||
if name.startswith(prefix):
|
||||
name = name[len(prefix) :]
|
||||
if name in enum.__members__:
|
||||
warnings.warn(
|
||||
prefix
|
||||
+ name
|
||||
+ " is "
|
||||
+ deprecated
|
||||
+ "Use "
|
||||
+ enum.__name__
|
||||
+ "."
|
||||
+ name
|
||||
+ " instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return enum[name]
|
||||
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
||||
|
||||
|
||||
#
|
||||
# flags
|
||||
|
@ -211,9 +241,9 @@ class ImageCmsTransform(Image.ImagePointHandler):
|
|||
output,
|
||||
input_mode,
|
||||
output_mode,
|
||||
intent=INTENT_PERCEPTUAL,
|
||||
intent=Intent.PERCEPTUAL,
|
||||
proof=None,
|
||||
proof_intent=INTENT_ABSOLUTE_COLORIMETRIC,
|
||||
proof_intent=Intent.ABSOLUTE_COLORIMETRIC,
|
||||
flags=0,
|
||||
):
|
||||
if proof is None:
|
||||
|
@ -295,7 +325,7 @@ def profileToProfile(
|
|||
im,
|
||||
inputProfile,
|
||||
outputProfile,
|
||||
renderingIntent=INTENT_PERCEPTUAL,
|
||||
renderingIntent=Intent.PERCEPTUAL,
|
||||
outputMode=None,
|
||||
inPlace=False,
|
||||
flags=0,
|
||||
|
@ -331,10 +361,10 @@ def profileToProfile(
|
|||
:param renderingIntent: Integer (0-3) specifying the rendering intent you
|
||||
wish to use for the transform
|
||||
|
||||
ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT)
|
||||
ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1
|
||||
ImageCms.INTENT_SATURATION = 2
|
||||
ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3
|
||||
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
|
||||
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
|
||||
ImageCms.Intent.SATURATION = 2
|
||||
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
|
||||
|
||||
see the pyCMS documentation for details on rendering intents and what
|
||||
they do.
|
||||
|
@ -412,7 +442,7 @@ def buildTransform(
|
|||
outputProfile,
|
||||
inMode,
|
||||
outMode,
|
||||
renderingIntent=INTENT_PERCEPTUAL,
|
||||
renderingIntent=Intent.PERCEPTUAL,
|
||||
flags=0,
|
||||
):
|
||||
"""
|
||||
|
@ -458,10 +488,10 @@ def buildTransform(
|
|||
:param renderingIntent: Integer (0-3) specifying the rendering intent you
|
||||
wish to use for the transform
|
||||
|
||||
ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT)
|
||||
ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1
|
||||
ImageCms.INTENT_SATURATION = 2
|
||||
ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3
|
||||
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
|
||||
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
|
||||
ImageCms.Intent.SATURATION = 2
|
||||
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
|
||||
|
||||
see the pyCMS documentation for details on rendering intents and what
|
||||
they do.
|
||||
|
@ -494,8 +524,8 @@ def buildProofTransform(
|
|||
proofProfile,
|
||||
inMode,
|
||||
outMode,
|
||||
renderingIntent=INTENT_PERCEPTUAL,
|
||||
proofRenderingIntent=INTENT_ABSOLUTE_COLORIMETRIC,
|
||||
renderingIntent=Intent.PERCEPTUAL,
|
||||
proofRenderingIntent=Intent.ABSOLUTE_COLORIMETRIC,
|
||||
flags=FLAGS["SOFTPROOFING"],
|
||||
):
|
||||
"""
|
||||
|
@ -550,20 +580,20 @@ def buildProofTransform(
|
|||
:param renderingIntent: Integer (0-3) specifying the rendering intent you
|
||||
wish to use for the input->proof (simulated) transform
|
||||
|
||||
ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT)
|
||||
ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1
|
||||
ImageCms.INTENT_SATURATION = 2
|
||||
ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3
|
||||
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
|
||||
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
|
||||
ImageCms.Intent.SATURATION = 2
|
||||
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
|
||||
|
||||
see the pyCMS documentation for details on rendering intents and what
|
||||
they do.
|
||||
:param proofRenderingIntent: Integer (0-3) specifying the rendering intent
|
||||
you wish to use for proof->output transform
|
||||
|
||||
ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT)
|
||||
ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1
|
||||
ImageCms.INTENT_SATURATION = 2
|
||||
ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3
|
||||
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
|
||||
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
|
||||
ImageCms.Intent.SATURATION = 2
|
||||
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
|
||||
|
||||
see the pyCMS documentation for details on rendering intents and what
|
||||
they do.
|
||||
|
@ -922,10 +952,10 @@ def getDefaultIntent(profile):
|
|||
:returns: Integer 0-3 specifying the default rendering intent for this
|
||||
profile.
|
||||
|
||||
ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT)
|
||||
ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1
|
||||
ImageCms.INTENT_SATURATION = 2
|
||||
ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3
|
||||
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
|
||||
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
|
||||
ImageCms.Intent.SATURATION = 2
|
||||
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
|
||||
|
||||
see the pyCMS documentation for details on rendering intents and what
|
||||
they do.
|
||||
|
@ -960,19 +990,19 @@ def isIntentSupported(profile, intent, direction):
|
|||
:param intent: Integer (0-3) specifying the rendering intent you wish to
|
||||
use with this profile
|
||||
|
||||
ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT)
|
||||
ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1
|
||||
ImageCms.INTENT_SATURATION = 2
|
||||
ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3
|
||||
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
|
||||
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
|
||||
ImageCms.Intent.SATURATION = 2
|
||||
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
|
||||
|
||||
see the pyCMS documentation for details on rendering intents and what
|
||||
they do.
|
||||
:param direction: Integer specifying if the profile is to be used for
|
||||
input, output, or proof
|
||||
|
||||
INPUT = 0 (or use ImageCms.DIRECTION_INPUT)
|
||||
OUTPUT = 1 (or use ImageCms.DIRECTION_OUTPUT)
|
||||
PROOF = 2 (or use ImageCms.DIRECTION_PROOF)
|
||||
INPUT = 0 (or use ImageCms.Direction.INPUT)
|
||||
OUTPUT = 1 (or use ImageCms.Direction.OUTPUT)
|
||||
PROOF = 2 (or use ImageCms.Direction.PROOF)
|
||||
|
||||
:returns: 1 if the intent/direction are supported, -1 if they are not.
|
||||
:exception PyCMSError:
|
||||
|
|
|
@ -529,7 +529,7 @@ class Color3DLUT(MultibandFilter):
|
|||
|
||||
return image.color_lut_3d(
|
||||
self.mode or image.mode,
|
||||
Image.LINEAR,
|
||||
Image.Resampling.BILINEAR,
|
||||
self.channels,
|
||||
self.size[0],
|
||||
self.size[1],
|
||||
|
|
|
@ -28,13 +28,40 @@
|
|||
import base64
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
from enum import IntEnum
|
||||
from io import BytesIO
|
||||
|
||||
from . import Image
|
||||
from ._util import isDirectory, isPath
|
||||
|
||||
LAYOUT_BASIC = 0
|
||||
LAYOUT_RAQM = 1
|
||||
|
||||
class Layout(IntEnum):
|
||||
BASIC = 0
|
||||
RAQM = 1
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
deprecated = "deprecated and will be removed in Pillow 10 (2023-07-01). "
|
||||
for enum, prefix in {Layout: "LAYOUT_"}.items():
|
||||
if name.startswith(prefix):
|
||||
name = name[len(prefix) :]
|
||||
if name in enum.__members__:
|
||||
warnings.warn(
|
||||
prefix
|
||||
+ name
|
||||
+ " is "
|
||||
+ deprecated
|
||||
+ "Use "
|
||||
+ enum.__name__
|
||||
+ "."
|
||||
+ name
|
||||
+ " instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return enum[name]
|
||||
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
||||
|
||||
|
||||
class _imagingft_not_installed:
|
||||
|
@ -164,18 +191,18 @@ class FreeTypeFont:
|
|||
self.index = index
|
||||
self.encoding = encoding
|
||||
|
||||
if layout_engine not in (LAYOUT_BASIC, LAYOUT_RAQM):
|
||||
layout_engine = LAYOUT_BASIC
|
||||
if layout_engine not in (Layout.BASIC, Layout.RAQM):
|
||||
layout_engine = Layout.BASIC
|
||||
if core.HAVE_RAQM:
|
||||
layout_engine = LAYOUT_RAQM
|
||||
elif layout_engine == LAYOUT_RAQM and not core.HAVE_RAQM:
|
||||
layout_engine = Layout.RAQM
|
||||
elif layout_engine == Layout.RAQM and not core.HAVE_RAQM:
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
"Raqm layout was requested, but Raqm is not available. "
|
||||
"Falling back to basic layout."
|
||||
)
|
||||
layout_engine = LAYOUT_BASIC
|
||||
layout_engine = Layout.BASIC
|
||||
|
||||
self.layout_engine = layout_engine
|
||||
|
||||
|
@ -757,15 +784,16 @@ class TransposedFont:
|
|||
|
||||
:param font: A font object.
|
||||
:param orientation: An optional orientation. If given, this should
|
||||
be one of Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM,
|
||||
Image.ROTATE_90, Image.ROTATE_180, or Image.ROTATE_270.
|
||||
be one of Image.Transpose.FLIP_LEFT_RIGHT, Image.Transpose.FLIP_TOP_BOTTOM,
|
||||
Image.Transpose.ROTATE_90, Image.Transpose.ROTATE_180, or
|
||||
Image.Transpose.ROTATE_270.
|
||||
"""
|
||||
self.font = font
|
||||
self.orientation = orientation # any 'transpose' argument, or None
|
||||
|
||||
def getsize(self, text, *args, **kwargs):
|
||||
w, h = self.font.getsize(text)
|
||||
if self.orientation in (Image.ROTATE_90, Image.ROTATE_270):
|
||||
if self.orientation in (Image.Transpose.ROTATE_90, Image.Transpose.ROTATE_270):
|
||||
return h, w
|
||||
return w, h
|
||||
|
||||
|
@ -833,7 +861,7 @@ def truetype(font=None, size=10, index=0, encoding="", layout_engine=None):
|
|||
This specifies the character set to use. It does not alter the
|
||||
encoding of any text provided in subsequent operations.
|
||||
:param layout_engine: Which layout engine to use, if available:
|
||||
:data:`.ImageFont.LAYOUT_BASIC` or :data:`.ImageFont.LAYOUT_RAQM`.
|
||||
:data:`.ImageFont.Layout.BASIC` or :data:`.ImageFont.Layout.RAQM`.
|
||||
|
||||
You can check support for Raqm layout using
|
||||
:py:func:`PIL.features.check_feature` with ``feature="raqm"``.
|
||||
|
|
|
@ -237,7 +237,7 @@ def colorize(image, black, white, mid=None, blackpoint=0, whitepoint=255, midpoi
|
|||
return _lut(image, red + green + blue)
|
||||
|
||||
|
||||
def contain(image, size, method=Image.BICUBIC):
|
||||
def contain(image, size, method=Image.Resampling.BICUBIC):
|
||||
"""
|
||||
Returns a resized version of the image, set to the maximum width and height
|
||||
within the requested size, while maintaining the original aspect ratio.
|
||||
|
@ -265,7 +265,7 @@ def contain(image, size, method=Image.BICUBIC):
|
|||
return image.resize(size, resample=method)
|
||||
|
||||
|
||||
def pad(image, size, method=Image.BICUBIC, color=None, centering=(0.5, 0.5)):
|
||||
def pad(image, size, method=Image.Resampling.BICUBIC, color=None, centering=(0.5, 0.5)):
|
||||
"""
|
||||
Returns a resized and padded version of the image, expanded to fill the
|
||||
requested aspect ratio and size.
|
||||
|
@ -315,7 +315,7 @@ def crop(image, border=0):
|
|||
return image.crop((left, top, image.size[0] - right, image.size[1] - bottom))
|
||||
|
||||
|
||||
def scale(image, factor, resample=Image.BICUBIC):
|
||||
def scale(image, factor, resample=Image.Resampling.BICUBIC):
|
||||
"""
|
||||
Returns a rescaled image by a specific factor given in parameter.
|
||||
A factor greater than 1 expands the image, between 0 and 1 contracts the
|
||||
|
@ -336,7 +336,7 @@ def scale(image, factor, resample=Image.BICUBIC):
|
|||
return image.resize(size, resample)
|
||||
|
||||
|
||||
def deform(image, deformer, resample=Image.BILINEAR):
|
||||
def deform(image, deformer, resample=Image.Resampling.BILINEAR):
|
||||
"""
|
||||
Deform the image.
|
||||
|
||||
|
@ -347,7 +347,9 @@ def deform(image, deformer, resample=Image.BILINEAR):
|
|||
in the PIL.Image.transform function.
|
||||
:return: An image.
|
||||
"""
|
||||
return image.transform(image.size, Image.MESH, deformer.getmesh(image), resample)
|
||||
return image.transform(
|
||||
image.size, Image.Transform.MESH, deformer.getmesh(image), resample
|
||||
)
|
||||
|
||||
|
||||
def equalize(image, mask=None):
|
||||
|
@ -408,7 +410,7 @@ def expand(image, border=0, fill=0):
|
|||
return out
|
||||
|
||||
|
||||
def fit(image, size, method=Image.BICUBIC, bleed=0.0, centering=(0.5, 0.5)):
|
||||
def fit(image, size, method=Image.Resampling.BICUBIC, bleed=0.0, centering=(0.5, 0.5)):
|
||||
"""
|
||||
Returns a resized and cropped version of the image, cropped to the
|
||||
requested aspect ratio and size.
|
||||
|
@ -500,7 +502,7 @@ def flip(image):
|
|||
:param image: The image to flip.
|
||||
:return: An image.
|
||||
"""
|
||||
return image.transpose(Image.FLIP_TOP_BOTTOM)
|
||||
return image.transpose(Image.Transpose.FLIP_TOP_BOTTOM)
|
||||
|
||||
|
||||
def grayscale(image):
|
||||
|
@ -533,7 +535,7 @@ def mirror(image):
|
|||
:param image: The image to mirror.
|
||||
:return: An image.
|
||||
"""
|
||||
return image.transpose(Image.FLIP_LEFT_RIGHT)
|
||||
return image.transpose(Image.Transpose.FLIP_LEFT_RIGHT)
|
||||
|
||||
|
||||
def posterize(image, bits):
|
||||
|
@ -579,13 +581,13 @@ def exif_transpose(image):
|
|||
exif = image.getexif()
|
||||
orientation = exif.get(0x0112)
|
||||
method = {
|
||||
2: Image.FLIP_LEFT_RIGHT,
|
||||
3: Image.ROTATE_180,
|
||||
4: Image.FLIP_TOP_BOTTOM,
|
||||
5: Image.TRANSPOSE,
|
||||
6: Image.ROTATE_270,
|
||||
7: Image.TRANSVERSE,
|
||||
8: Image.ROTATE_90,
|
||||
2: Image.Transpose.FLIP_LEFT_RIGHT,
|
||||
3: Image.Transpose.ROTATE_180,
|
||||
4: Image.Transpose.FLIP_TOP_BOTTOM,
|
||||
5: Image.Transpose.TRANSPOSE,
|
||||
6: Image.Transpose.ROTATE_270,
|
||||
7: Image.Transpose.TRANSVERSE,
|
||||
8: Image.Transpose.ROTATE_90,
|
||||
}.get(orientation)
|
||||
if method is not None:
|
||||
transposed_image = image.transpose(method)
|
||||
|
|
|
@ -47,7 +47,7 @@ class AffineTransform(Transform):
|
|||
from an affine transform matrix.
|
||||
"""
|
||||
|
||||
method = Image.AFFINE
|
||||
method = Image.Transform.AFFINE
|
||||
|
||||
|
||||
class ExtentTransform(Transform):
|
||||
|
@ -69,7 +69,7 @@ class ExtentTransform(Transform):
|
|||
input image's coordinate system. See :ref:`coordinate-system`.
|
||||
"""
|
||||
|
||||
method = Image.EXTENT
|
||||
method = Image.Transform.EXTENT
|
||||
|
||||
|
||||
class QuadTransform(Transform):
|
||||
|
@ -86,7 +86,7 @@ class QuadTransform(Transform):
|
|||
source quadrilateral.
|
||||
"""
|
||||
|
||||
method = Image.QUAD
|
||||
method = Image.Transform.QUAD
|
||||
|
||||
|
||||
class MeshTransform(Transform):
|
||||
|
@ -99,4 +99,4 @@ class MeshTransform(Transform):
|
|||
:param data: A list of (bbox, quad) tuples.
|
||||
"""
|
||||
|
||||
method = Image.MESH
|
||||
method = Image.Transform.MESH
|
||||
|
|
|
@ -37,6 +37,7 @@ import re
|
|||
import struct
|
||||
import warnings
|
||||
import zlib
|
||||
from enum import IntEnum
|
||||
|
||||
from . import Image, ImageChops, ImageFile, ImagePalette, ImageSequence
|
||||
from ._binary import i16be as i16
|
||||
|
@ -94,36 +95,62 @@ See :ref:`Text in PNG File Format<png-text>`.
|
|||
|
||||
|
||||
# APNG frame disposal modes
|
||||
APNG_DISPOSE_OP_NONE = 0
|
||||
"""
|
||||
No disposal is done on this frame before rendering the next frame.
|
||||
See :ref:`Saving APNG sequences<apng-saving>`.
|
||||
"""
|
||||
APNG_DISPOSE_OP_BACKGROUND = 1
|
||||
"""
|
||||
This frame’s modified region is cleared to fully transparent black before rendering
|
||||
the next frame.
|
||||
See :ref:`Saving APNG sequences<apng-saving>`.
|
||||
"""
|
||||
APNG_DISPOSE_OP_PREVIOUS = 2
|
||||
"""
|
||||
This frame’s modified region is reverted to the previous frame’s contents before
|
||||
rendering the next frame.
|
||||
See :ref:`Saving APNG sequences<apng-saving>`.
|
||||
"""
|
||||
class Disposal(IntEnum):
|
||||
OP_NONE = 0
|
||||
"""
|
||||
No disposal is done on this frame before rendering the next frame.
|
||||
See :ref:`Saving APNG sequences<apng-saving>`.
|
||||
"""
|
||||
OP_BACKGROUND = 1
|
||||
"""
|
||||
This frame’s modified region is cleared to fully transparent black before rendering
|
||||
the next frame.
|
||||
See :ref:`Saving APNG sequences<apng-saving>`.
|
||||
"""
|
||||
OP_PREVIOUS = 2
|
||||
"""
|
||||
This frame’s modified region is reverted to the previous frame’s contents before
|
||||
rendering the next frame.
|
||||
See :ref:`Saving APNG sequences<apng-saving>`.
|
||||
"""
|
||||
|
||||
|
||||
# APNG frame blend modes
|
||||
APNG_BLEND_OP_SOURCE = 0
|
||||
"""
|
||||
All color components of this frame, including alpha, overwrite the previous output
|
||||
image contents.
|
||||
See :ref:`Saving APNG sequences<apng-saving>`.
|
||||
"""
|
||||
APNG_BLEND_OP_OVER = 1
|
||||
"""
|
||||
This frame should be alpha composited with the previous output image contents.
|
||||
See :ref:`Saving APNG sequences<apng-saving>`.
|
||||
"""
|
||||
class Blend(IntEnum):
|
||||
OP_SOURCE = 0
|
||||
"""
|
||||
All color components of this frame, including alpha, overwrite the previous output
|
||||
image contents.
|
||||
See :ref:`Saving APNG sequences<apng-saving>`.
|
||||
"""
|
||||
OP_OVER = 1
|
||||
"""
|
||||
This frame should be alpha composited with the previous output image contents.
|
||||
See :ref:`Saving APNG sequences<apng-saving>`.
|
||||
"""
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
deprecated = "deprecated and will be removed in Pillow 10 (2023-07-01). "
|
||||
for enum, prefix in {Disposal: "APNG_DISPOSE_", Blend: "APNG_BLEND_"}.items():
|
||||
if name.startswith(prefix):
|
||||
name = name[len(prefix) :]
|
||||
if name in enum.__members__:
|
||||
warnings.warn(
|
||||
prefix
|
||||
+ name
|
||||
+ " is "
|
||||
+ deprecated
|
||||
+ "Use "
|
||||
+ enum.__name__
|
||||
+ "."
|
||||
+ name
|
||||
+ " instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return enum[name]
|
||||
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
||||
|
||||
|
||||
def _safe_zlib_decompress(s):
|
||||
|
@ -861,13 +888,13 @@ class PngImageFile(ImageFile.ImageFile):
|
|||
raise EOFError
|
||||
|
||||
# setup frame disposal (actual disposal done when needed in the next _seek())
|
||||
if self._prev_im is None and self.dispose_op == APNG_DISPOSE_OP_PREVIOUS:
|
||||
self.dispose_op = APNG_DISPOSE_OP_BACKGROUND
|
||||
if self._prev_im is None and self.dispose_op == Disposal.OP_PREVIOUS:
|
||||
self.dispose_op = Disposal.OP_BACKGROUND
|
||||
|
||||
if self.dispose_op == APNG_DISPOSE_OP_PREVIOUS:
|
||||
if self.dispose_op == Disposal.OP_PREVIOUS:
|
||||
self.dispose = self._prev_im.copy()
|
||||
self.dispose = self._crop(self.dispose, self.dispose_extent)
|
||||
elif self.dispose_op == APNG_DISPOSE_OP_BACKGROUND:
|
||||
elif self.dispose_op == Disposal.OP_BACKGROUND:
|
||||
self.dispose = Image.core.fill(self.mode, self.size)
|
||||
self.dispose = self._crop(self.dispose, self.dispose_extent)
|
||||
else:
|
||||
|
@ -956,7 +983,7 @@ class PngImageFile(ImageFile.ImageFile):
|
|||
self.png.close()
|
||||
self.png = None
|
||||
else:
|
||||
if self._prev_im and self.blend_op == APNG_BLEND_OP_OVER:
|
||||
if self._prev_im and self.blend_op == Blend.OP_OVER:
|
||||
updated = self._crop(self.im, self.dispose_extent)
|
||||
self._prev_im.paste(
|
||||
updated, self.dispose_extent, updated.convert("RGBA")
|
||||
|
@ -1062,10 +1089,8 @@ def _write_multiple_frames(im, fp, chunk, rawmode):
|
|||
default_image = im.encoderinfo.get("default_image", im.info.get("default_image"))
|
||||
duration = im.encoderinfo.get("duration", im.info.get("duration", 0))
|
||||
loop = im.encoderinfo.get("loop", im.info.get("loop", 0))
|
||||
disposal = im.encoderinfo.get(
|
||||
"disposal", im.info.get("disposal", APNG_DISPOSE_OP_NONE)
|
||||
)
|
||||
blend = im.encoderinfo.get("blend", im.info.get("blend", APNG_BLEND_OP_SOURCE))
|
||||
disposal = im.encoderinfo.get("disposal", im.info.get("disposal", Disposal.OP_NONE))
|
||||
blend = im.encoderinfo.get("blend", im.info.get("blend", Blend.OP_SOURCE))
|
||||
|
||||
if default_image:
|
||||
chain = itertools.chain(im.encoderinfo.get("append_images", []))
|
||||
|
@ -1095,10 +1120,10 @@ def _write_multiple_frames(im, fp, chunk, rawmode):
|
|||
previous = im_frames[-1]
|
||||
prev_disposal = previous["encoderinfo"].get("disposal")
|
||||
prev_blend = previous["encoderinfo"].get("blend")
|
||||
if prev_disposal == APNG_DISPOSE_OP_PREVIOUS and len(im_frames) < 2:
|
||||
prev_disposal = APNG_DISPOSE_OP_BACKGROUND
|
||||
if prev_disposal == Disposal.OP_PREVIOUS and len(im_frames) < 2:
|
||||
prev_disposal = Disposal.OP_BACKGROUND
|
||||
|
||||
if prev_disposal == APNG_DISPOSE_OP_BACKGROUND:
|
||||
if prev_disposal == Disposal.OP_BACKGROUND:
|
||||
base_im = previous["im"]
|
||||
dispose = Image.core.fill("RGBA", im.size, (0, 0, 0, 0))
|
||||
bbox = previous["bbox"]
|
||||
|
@ -1107,7 +1132,7 @@ def _write_multiple_frames(im, fp, chunk, rawmode):
|
|||
else:
|
||||
bbox = (0, 0) + im.size
|
||||
base_im.paste(dispose, bbox)
|
||||
elif prev_disposal == APNG_DISPOSE_OP_PREVIOUS:
|
||||
elif prev_disposal == Disposal.OP_PREVIOUS:
|
||||
base_im = im_frames[-2]["im"]
|
||||
else:
|
||||
base_im = previous["im"]
|
||||
|
|
|
@ -314,7 +314,7 @@ if __name__ == "__main__":
|
|||
outfile = sys.argv[2]
|
||||
|
||||
# perform some image operation
|
||||
im = im.transpose(Image.FLIP_LEFT_RIGHT)
|
||||
im = im.transpose(Image.Transpose.FLIP_LEFT_RIGHT)
|
||||
print(
|
||||
f"saving a flipped version of {os.path.basename(filename)} "
|
||||
f"as {outfile} "
|
||||
|
|
|
@ -152,7 +152,7 @@ class TgaImageFile(ImageFile.ImageFile):
|
|||
|
||||
def load_end(self):
|
||||
if self._flip_horizontally:
|
||||
self.im = self.im.transpose(Image.FLIP_LEFT_RIGHT)
|
||||
self.im = self.im.transpose(Image.Transpose.FLIP_LEFT_RIGHT)
|
||||
|
||||
|
||||
#
|
||||
|
|
|
@ -1161,13 +1161,13 @@ class TiffImageFile(ImageFile.ImageFile):
|
|||
def load_end(self):
|
||||
if self._tile_orientation:
|
||||
method = {
|
||||
2: Image.FLIP_LEFT_RIGHT,
|
||||
3: Image.ROTATE_180,
|
||||
4: Image.FLIP_TOP_BOTTOM,
|
||||
5: Image.TRANSPOSE,
|
||||
6: Image.ROTATE_270,
|
||||
7: Image.TRANSVERSE,
|
||||
8: Image.ROTATE_90,
|
||||
2: Image.Transpose.FLIP_LEFT_RIGHT,
|
||||
3: Image.Transpose.ROTATE_180,
|
||||
4: Image.Transpose.FLIP_TOP_BOTTOM,
|
||||
5: Image.Transpose.TRANSPOSE,
|
||||
6: Image.Transpose.ROTATE_270,
|
||||
7: Image.Transpose.TRANSVERSE,
|
||||
8: Image.Transpose.ROTATE_90,
|
||||
}.get(self._tile_orientation)
|
||||
if method is not None:
|
||||
self.im = self.im.transpose(method)
|
||||
|
|
|
@ -1063,7 +1063,7 @@ _gaussian_blur(ImagingObject *self, PyObject *args) {
|
|||
static PyObject *
|
||||
_getpalette(ImagingObject *self, PyObject *args) {
|
||||
PyObject *palette;
|
||||
int palettesize = 256;
|
||||
int palettesize;
|
||||
int bits;
|
||||
ImagingShuffler pack;
|
||||
|
||||
|
@ -1084,6 +1084,7 @@ _getpalette(ImagingObject *self, PyObject *args) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
palettesize = self->image->palette->size;
|
||||
palette = PyBytes_FromStringAndSize(NULL, palettesize * bits / 8);
|
||||
if (!palette) {
|
||||
return NULL;
|
||||
|
@ -1641,7 +1642,7 @@ _putpalette(ImagingObject *self, PyObject *args) {
|
|||
ImagingShuffler unpack;
|
||||
int bits;
|
||||
|
||||
char *rawmode;
|
||||
char *rawmode, *palette_mode;
|
||||
UINT8 *palette;
|
||||
Py_ssize_t palettesize;
|
||||
if (!PyArg_ParseTuple(args, "sy#", &rawmode, &palette, &palettesize)) {
|
||||
|
@ -1654,7 +1655,8 @@ _putpalette(ImagingObject *self, PyObject *args) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
unpack = ImagingFindUnpacker("RGB", rawmode, &bits);
|
||||
palette_mode = strncmp("RGBA", rawmode, 4) == 0 ? "RGBA" : "RGB";
|
||||
unpack = ImagingFindUnpacker(palette_mode, rawmode, &bits);
|
||||
if (!unpack) {
|
||||
PyErr_SetString(PyExc_ValueError, wrong_raw_mode);
|
||||
return NULL;
|
||||
|
@ -1669,11 +1671,13 @@ _putpalette(ImagingObject *self, PyObject *args) {
|
|||
|
||||
strcpy(self->image->mode, strlen(self->image->mode) == 2 ? "PA" : "P");
|
||||
|
||||
self->image->palette = ImagingPaletteNew("RGB");
|
||||
self->image->palette = ImagingPaletteNew(palette_mode);
|
||||
|
||||
unpack(self->image->palette->palette, palette, palettesize * 8 / bits);
|
||||
self->image->palette->size = palettesize * 8 / bits;
|
||||
unpack(self->image->palette->palette, palette, self->image->palette->size);
|
||||
|
||||
return PyLong_FromLong(palettesize * 8 / bits);
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
|
|
@ -991,115 +991,116 @@ static struct {
|
|||
/* ------------------- */
|
||||
|
||||
static void
|
||||
p2bit(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
||||
p2bit(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||
int x;
|
||||
/* FIXME: precalculate greyscale palette? */
|
||||
for (x = 0; x < xsize; x++) {
|
||||
*out++ = (L(&palette[in[x] * 4]) >= 128000) ? 255 : 0;
|
||||
*out++ = (L(&palette->palette[in[x] * 4]) >= 128000) ? 255 : 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
pa2bit(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
||||
pa2bit(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||
int x;
|
||||
/* FIXME: precalculate greyscale palette? */
|
||||
for (x = 0; x < xsize; x++, in += 4) {
|
||||
*out++ = (L(&palette[in[0] * 4]) >= 128000) ? 255 : 0;
|
||||
*out++ = (L(&palette->palette[in[0] * 4]) >= 128000) ? 255 : 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
p2l(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
||||
p2l(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||
int x;
|
||||
/* FIXME: precalculate greyscale palette? */
|
||||
for (x = 0; x < xsize; x++) {
|
||||
*out++ = L24(&palette[in[x] * 4]) >> 16;
|
||||
*out++ = L24(&palette->palette[in[x] * 4]) >> 16;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
pa2l(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
||||
pa2l(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||
int x;
|
||||
/* FIXME: precalculate greyscale palette? */
|
||||
for (x = 0; x < xsize; x++, in += 4) {
|
||||
*out++ = L24(&palette[in[0] * 4]) >> 16;
|
||||
*out++ = L24(&palette->palette[in[0] * 4]) >> 16;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
p2pa(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
||||
p2pa(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||
int x;
|
||||
int rgb = strcmp(palette->mode, "RGB");
|
||||
for (x = 0; x < xsize; x++, in++) {
|
||||
const UINT8 *rgba = &palette[in[0]];
|
||||
const UINT8 *rgba = &palette->palette[in[0]];
|
||||
*out++ = in[0];
|
||||
*out++ = in[0];
|
||||
*out++ = in[0];
|
||||
*out++ = rgba[3];
|
||||
*out++ = rgb == 0 ? 255 : rgba[3];
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
p2la(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
||||
p2la(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||
int x;
|
||||
/* FIXME: precalculate greyscale palette? */
|
||||
for (x = 0; x < xsize; x++, out += 4) {
|
||||
const UINT8 *rgba = &palette[*in++ * 4];
|
||||
const UINT8 *rgba = &palette->palette[*in++ * 4];
|
||||
out[0] = out[1] = out[2] = L24(rgba) >> 16;
|
||||
out[3] = rgba[3];
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
pa2la(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
||||
pa2la(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||
int x;
|
||||
/* FIXME: precalculate greyscale palette? */
|
||||
for (x = 0; x < xsize; x++, in += 4, out += 4) {
|
||||
out[0] = out[1] = out[2] = L24(&palette[in[0] * 4]) >> 16;
|
||||
out[0] = out[1] = out[2] = L24(&palette->palette[in[0] * 4]) >> 16;
|
||||
out[3] = in[3];
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
p2i(UINT8 *out_, const UINT8 *in, int xsize, const UINT8 *palette) {
|
||||
p2i(UINT8 *out_, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||
int x;
|
||||
for (x = 0; x < xsize; x++, out_ += 4) {
|
||||
INT32 v = L24(&palette[in[x] * 4]) >> 16;
|
||||
INT32 v = L24(&palette->palette[in[x] * 4]) >> 16;
|
||||
memcpy(out_, &v, sizeof(v));
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
pa2i(UINT8 *out_, const UINT8 *in, int xsize, const UINT8 *palette) {
|
||||
pa2i(UINT8 *out_, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||
int x;
|
||||
INT32 *out = (INT32 *)out_;
|
||||
for (x = 0; x < xsize; x++, in += 4) {
|
||||
*out++ = L24(&palette[in[0] * 4]) >> 16;
|
||||
*out++ = L24(&palette->palette[in[0] * 4]) >> 16;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
p2f(UINT8 *out_, const UINT8 *in, int xsize, const UINT8 *palette) {
|
||||
p2f(UINT8 *out_, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||
int x;
|
||||
for (x = 0; x < xsize; x++, out_ += 4) {
|
||||
FLOAT32 v = L(&palette[in[x] * 4]) / 1000.0F;
|
||||
FLOAT32 v = L(&palette->palette[in[x] * 4]) / 1000.0F;
|
||||
memcpy(out_, &v, sizeof(v));
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
pa2f(UINT8 *out_, const UINT8 *in, int xsize, const UINT8 *palette) {
|
||||
pa2f(UINT8 *out_, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||
int x;
|
||||
FLOAT32 *out = (FLOAT32 *)out_;
|
||||
for (x = 0; x < xsize; x++, in += 4) {
|
||||
*out++ = (float)L(&palette[in[0] * 4]) / 1000.0F;
|
||||
*out++ = (float)L(&palette->palette[in[0] * 4]) / 1000.0F;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
p2rgb(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
||||
p2rgb(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||
int x;
|
||||
for (x = 0; x < xsize; x++) {
|
||||
const UINT8 *rgb = &palette[*in++ * 4];
|
||||
const UINT8 *rgb = &palette->palette[*in++ * 4];
|
||||
*out++ = rgb[0];
|
||||
*out++ = rgb[1];
|
||||
*out++ = rgb[2];
|
||||
|
@ -1108,10 +1109,10 @@ p2rgb(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
|||
}
|
||||
|
||||
static void
|
||||
pa2rgb(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
||||
pa2rgb(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||
int x;
|
||||
for (x = 0; x < xsize; x++, in += 4) {
|
||||
const UINT8 *rgb = &palette[in[0] * 4];
|
||||
const UINT8 *rgb = &palette->palette[in[0] * 4];
|
||||
*out++ = rgb[0];
|
||||
*out++ = rgb[1];
|
||||
*out++ = rgb[2];
|
||||
|
@ -1120,30 +1121,30 @@ pa2rgb(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
|||
}
|
||||
|
||||
static void
|
||||
p2hsv(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
||||
p2hsv(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||
int x;
|
||||
for (x = 0; x < xsize; x++, out += 4) {
|
||||
const UINT8 *rgb = &palette[*in++ * 4];
|
||||
const UINT8 *rgb = &palette->palette[*in++ * 4];
|
||||
rgb2hsv_row(out, rgb);
|
||||
out[3] = 255;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
pa2hsv(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
||||
pa2hsv(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||
int x;
|
||||
for (x = 0; x < xsize; x++, in += 4, out += 4) {
|
||||
const UINT8 *rgb = &palette[in[0] * 4];
|
||||
const UINT8 *rgb = &palette->palette[in[0] * 4];
|
||||
rgb2hsv_row(out, rgb);
|
||||
out[3] = 255;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
p2rgba(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
||||
p2rgba(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||
int x;
|
||||
for (x = 0; x < xsize; x++) {
|
||||
const UINT8 *rgba = &palette[*in++ * 4];
|
||||
const UINT8 *rgba = &palette->palette[*in++ * 4];
|
||||
*out++ = rgba[0];
|
||||
*out++ = rgba[1];
|
||||
*out++ = rgba[2];
|
||||
|
@ -1152,10 +1153,10 @@ p2rgba(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
|||
}
|
||||
|
||||
static void
|
||||
pa2rgba(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
||||
pa2rgba(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||
int x;
|
||||
for (x = 0; x < xsize; x++, in += 4) {
|
||||
const UINT8 *rgb = &palette[in[0] * 4];
|
||||
const UINT8 *rgb = &palette->palette[in[0] * 4];
|
||||
*out++ = rgb[0];
|
||||
*out++ = rgb[1];
|
||||
*out++ = rgb[2];
|
||||
|
@ -1164,25 +1165,25 @@ pa2rgba(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
|||
}
|
||||
|
||||
static void
|
||||
p2cmyk(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
||||
p2cmyk(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||
p2rgb(out, in, xsize, palette);
|
||||
rgb2cmyk(out, out, xsize);
|
||||
}
|
||||
|
||||
static void
|
||||
pa2cmyk(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
||||
pa2cmyk(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||
pa2rgb(out, in, xsize, palette);
|
||||
rgb2cmyk(out, out, xsize);
|
||||
}
|
||||
|
||||
static void
|
||||
p2ycbcr(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
||||
p2ycbcr(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||
p2rgb(out, in, xsize, palette);
|
||||
ImagingConvertRGB2YCbCr(out, out, xsize);
|
||||
}
|
||||
|
||||
static void
|
||||
pa2ycbcr(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
||||
pa2ycbcr(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||
pa2rgb(out, in, xsize, palette);
|
||||
ImagingConvertRGB2YCbCr(out, out, xsize);
|
||||
}
|
||||
|
@ -1192,7 +1193,7 @@ frompalette(Imaging imOut, Imaging imIn, const char *mode) {
|
|||
ImagingSectionCookie cookie;
|
||||
int alpha;
|
||||
int y;
|
||||
void (*convert)(UINT8 *, const UINT8 *, int, const UINT8 *);
|
||||
void (*convert)(UINT8 *, const UINT8 *, int, ImagingPalette);
|
||||
|
||||
/* Map palette image to L, RGB, RGBA, or CMYK */
|
||||
|
||||
|
@ -1239,7 +1240,7 @@ frompalette(Imaging imOut, Imaging imIn, const char *mode) {
|
|||
(UINT8 *)imOut->image[y],
|
||||
(UINT8 *)imIn->image[y],
|
||||
imIn->xsize,
|
||||
imIn->palette->palette);
|
||||
imIn->palette);
|
||||
}
|
||||
ImagingSectionLeave(&cookie);
|
||||
|
||||
|
|
|
@ -143,6 +143,7 @@ struct ImagingPaletteInstance {
|
|||
char mode[IMAGING_MODE_LENGTH]; /* Band names */
|
||||
|
||||
/* Data */
|
||||
int size;
|
||||
UINT8 palette[1024]; /* Palette data (same format as image data) */
|
||||
|
||||
INT16 *cache; /* Palette cache (used for predefined palettes) */
|
||||
|
|
|
@ -40,6 +40,7 @@ ImagingPaletteNew(const char *mode) {
|
|||
palette->mode[IMAGING_MODE_LENGTH - 1] = 0;
|
||||
|
||||
/* Initialize to ramp */
|
||||
palette->size = 256;
|
||||
for (i = 0; i < 256; i++) {
|
||||
palette->palette[i * 4 + 0] = palette->palette[i * 4 + 1] =
|
||||
palette->palette[i * 4 + 2] = (UINT8)i;
|
||||
|
@ -193,7 +194,7 @@ ImagingPaletteCacheUpdate(ImagingPalette palette, int r, int g, int b) {
|
|||
|
||||
dmax = (unsigned int)~0;
|
||||
|
||||
for (i = 0; i < 256; i++) {
|
||||
for (i = 0; i < palette->size; i++) {
|
||||
int r, g, b;
|
||||
unsigned int tmin, tmax;
|
||||
|
||||
|
@ -226,7 +227,7 @@ ImagingPaletteCacheUpdate(ImagingPalette palette, int r, int g, int b) {
|
|||
d[i] = (unsigned int)~0;
|
||||
}
|
||||
|
||||
for (i = 0; i < 256; i++) {
|
||||
for (i = 0; i < palette->size; i++) {
|
||||
if (dmin[i] <= dmax) {
|
||||
int rd, gd, bd;
|
||||
int ri, gi, bi;
|
||||
|
|
Loading…
Reference in New Issue
Block a user