mirror of
https://github.com/python-pillow/Pillow.git
synced 2024-12-26 18:06:18 +03:00
Merge branch 'main' into fits
This commit is contained in:
commit
7dca0135dd
12
CHANGES.rst
12
CHANGES.rst
|
@ -5,6 +5,18 @@ Changelog (Pillow)
|
||||||
9.1.0 (unreleased)
|
9.1.0 (unreleased)
|
||||||
------------------
|
------------------
|
||||||
|
|
||||||
|
- Consider palette size when converting and in getpalette() #6060
|
||||||
|
[radarhere]
|
||||||
|
|
||||||
|
- Added enums #5954
|
||||||
|
[radarhere]
|
||||||
|
|
||||||
|
- Ensure image is opaque after converting P to PA with RGB palette #6052
|
||||||
|
[radarhere]
|
||||||
|
|
||||||
|
- Attach RGBA palettes from putpalette() when suitable #6054
|
||||||
|
[radarhere]
|
||||||
|
|
||||||
- Added get_photoshop_blocks() to parse Photoshop TIFF tag #6030
|
- Added get_photoshop_blocks() to parse Photoshop TIFF tag #6030
|
||||||
[radarhere]
|
[radarhere]
|
||||||
|
|
||||||
|
|
Binary file not shown.
|
@ -43,107 +43,158 @@ class TestColorLut3DCoreAPI:
|
||||||
im = Image.new("RGB", (10, 10), 0)
|
im = Image.new("RGB", (10, 10), 0)
|
||||||
|
|
||||||
with pytest.raises(ValueError, match="filter"):
|
with pytest.raises(ValueError, match="filter"):
|
||||||
im.im.color_lut_3d("RGB", Image.CUBIC, *self.generate_identity_table(3, 3))
|
im.im.color_lut_3d(
|
||||||
|
"RGB", Image.Resampling.BICUBIC, *self.generate_identity_table(3, 3)
|
||||||
|
)
|
||||||
|
|
||||||
with pytest.raises(ValueError, match="image mode"):
|
with pytest.raises(ValueError, match="image mode"):
|
||||||
im.im.color_lut_3d(
|
im.im.color_lut_3d(
|
||||||
"wrong", Image.LINEAR, *self.generate_identity_table(3, 3)
|
"wrong", Image.Resampling.BILINEAR, *self.generate_identity_table(3, 3)
|
||||||
)
|
)
|
||||||
|
|
||||||
with pytest.raises(ValueError, match="table_channels"):
|
with pytest.raises(ValueError, match="table_channels"):
|
||||||
im.im.color_lut_3d("RGB", Image.LINEAR, *self.generate_identity_table(5, 3))
|
|
||||||
|
|
||||||
with pytest.raises(ValueError, match="table_channels"):
|
|
||||||
im.im.color_lut_3d("RGB", Image.LINEAR, *self.generate_identity_table(1, 3))
|
|
||||||
|
|
||||||
with pytest.raises(ValueError, match="table_channels"):
|
|
||||||
im.im.color_lut_3d("RGB", Image.LINEAR, *self.generate_identity_table(2, 3))
|
|
||||||
|
|
||||||
with pytest.raises(ValueError, match="Table size"):
|
|
||||||
im.im.color_lut_3d(
|
im.im.color_lut_3d(
|
||||||
"RGB", Image.LINEAR, *self.generate_identity_table(3, (1, 3, 3))
|
"RGB", Image.Resampling.BILINEAR, *self.generate_identity_table(5, 3)
|
||||||
|
)
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match="table_channels"):
|
||||||
|
im.im.color_lut_3d(
|
||||||
|
"RGB", Image.Resampling.BILINEAR, *self.generate_identity_table(1, 3)
|
||||||
|
)
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match="table_channels"):
|
||||||
|
im.im.color_lut_3d(
|
||||||
|
"RGB", Image.Resampling.BILINEAR, *self.generate_identity_table(2, 3)
|
||||||
)
|
)
|
||||||
|
|
||||||
with pytest.raises(ValueError, match="Table size"):
|
with pytest.raises(ValueError, match="Table size"):
|
||||||
im.im.color_lut_3d(
|
im.im.color_lut_3d(
|
||||||
"RGB", Image.LINEAR, *self.generate_identity_table(3, (66, 3, 3))
|
"RGB",
|
||||||
|
Image.Resampling.BILINEAR,
|
||||||
|
*self.generate_identity_table(3, (1, 3, 3)),
|
||||||
|
)
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match="Table size"):
|
||||||
|
im.im.color_lut_3d(
|
||||||
|
"RGB",
|
||||||
|
Image.Resampling.BILINEAR,
|
||||||
|
*self.generate_identity_table(3, (66, 3, 3)),
|
||||||
)
|
)
|
||||||
|
|
||||||
with pytest.raises(ValueError, match=r"size1D \* size2D \* size3D"):
|
with pytest.raises(ValueError, match=r"size1D \* size2D \* size3D"):
|
||||||
im.im.color_lut_3d("RGB", Image.LINEAR, 3, 2, 2, 2, [0, 0, 0] * 7)
|
im.im.color_lut_3d(
|
||||||
|
"RGB", Image.Resampling.BILINEAR, 3, 2, 2, 2, [0, 0, 0] * 7
|
||||||
|
)
|
||||||
|
|
||||||
with pytest.raises(ValueError, match=r"size1D \* size2D \* size3D"):
|
with pytest.raises(ValueError, match=r"size1D \* size2D \* size3D"):
|
||||||
im.im.color_lut_3d("RGB", Image.LINEAR, 3, 2, 2, 2, [0, 0, 0] * 9)
|
im.im.color_lut_3d(
|
||||||
|
"RGB", Image.Resampling.BILINEAR, 3, 2, 2, 2, [0, 0, 0] * 9
|
||||||
|
)
|
||||||
|
|
||||||
with pytest.raises(TypeError):
|
with pytest.raises(TypeError):
|
||||||
im.im.color_lut_3d("RGB", Image.LINEAR, 3, 2, 2, 2, [0, 0, "0"] * 8)
|
im.im.color_lut_3d(
|
||||||
|
"RGB", Image.Resampling.BILINEAR, 3, 2, 2, 2, [0, 0, "0"] * 8
|
||||||
|
)
|
||||||
|
|
||||||
with pytest.raises(TypeError):
|
with pytest.raises(TypeError):
|
||||||
im.im.color_lut_3d("RGB", Image.LINEAR, 3, 2, 2, 2, 16)
|
im.im.color_lut_3d("RGB", Image.Resampling.BILINEAR, 3, 2, 2, 2, 16)
|
||||||
|
|
||||||
def test_correct_args(self):
|
def test_correct_args(self):
|
||||||
im = Image.new("RGB", (10, 10), 0)
|
im = Image.new("RGB", (10, 10), 0)
|
||||||
|
|
||||||
im.im.color_lut_3d("RGB", Image.LINEAR, *self.generate_identity_table(3, 3))
|
|
||||||
|
|
||||||
im.im.color_lut_3d("CMYK", Image.LINEAR, *self.generate_identity_table(4, 3))
|
|
||||||
|
|
||||||
im.im.color_lut_3d(
|
im.im.color_lut_3d(
|
||||||
"RGB", Image.LINEAR, *self.generate_identity_table(3, (2, 3, 3))
|
"RGB", Image.Resampling.BILINEAR, *self.generate_identity_table(3, 3)
|
||||||
)
|
)
|
||||||
|
|
||||||
im.im.color_lut_3d(
|
im.im.color_lut_3d(
|
||||||
"RGB", Image.LINEAR, *self.generate_identity_table(3, (65, 3, 3))
|
"CMYK", Image.Resampling.BILINEAR, *self.generate_identity_table(4, 3)
|
||||||
)
|
)
|
||||||
|
|
||||||
im.im.color_lut_3d(
|
im.im.color_lut_3d(
|
||||||
"RGB", Image.LINEAR, *self.generate_identity_table(3, (3, 65, 3))
|
"RGB",
|
||||||
|
Image.Resampling.BILINEAR,
|
||||||
|
*self.generate_identity_table(3, (2, 3, 3)),
|
||||||
)
|
)
|
||||||
|
|
||||||
im.im.color_lut_3d(
|
im.im.color_lut_3d(
|
||||||
"RGB", Image.LINEAR, *self.generate_identity_table(3, (3, 3, 65))
|
"RGB",
|
||||||
|
Image.Resampling.BILINEAR,
|
||||||
|
*self.generate_identity_table(3, (65, 3, 3)),
|
||||||
|
)
|
||||||
|
|
||||||
|
im.im.color_lut_3d(
|
||||||
|
"RGB",
|
||||||
|
Image.Resampling.BILINEAR,
|
||||||
|
*self.generate_identity_table(3, (3, 65, 3)),
|
||||||
|
)
|
||||||
|
|
||||||
|
im.im.color_lut_3d(
|
||||||
|
"RGB",
|
||||||
|
Image.Resampling.BILINEAR,
|
||||||
|
*self.generate_identity_table(3, (3, 3, 65)),
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_wrong_mode(self):
|
def test_wrong_mode(self):
|
||||||
with pytest.raises(ValueError, match="wrong mode"):
|
with pytest.raises(ValueError, match="wrong mode"):
|
||||||
im = Image.new("L", (10, 10), 0)
|
im = Image.new("L", (10, 10), 0)
|
||||||
im.im.color_lut_3d("RGB", Image.LINEAR, *self.generate_identity_table(3, 3))
|
|
||||||
|
|
||||||
with pytest.raises(ValueError, match="wrong mode"):
|
|
||||||
im = Image.new("RGB", (10, 10), 0)
|
|
||||||
im.im.color_lut_3d("L", Image.LINEAR, *self.generate_identity_table(3, 3))
|
|
||||||
|
|
||||||
with pytest.raises(ValueError, match="wrong mode"):
|
|
||||||
im = Image.new("L", (10, 10), 0)
|
|
||||||
im.im.color_lut_3d("L", Image.LINEAR, *self.generate_identity_table(3, 3))
|
|
||||||
|
|
||||||
with pytest.raises(ValueError, match="wrong mode"):
|
|
||||||
im = Image.new("RGB", (10, 10), 0)
|
|
||||||
im.im.color_lut_3d(
|
im.im.color_lut_3d(
|
||||||
"RGBA", Image.LINEAR, *self.generate_identity_table(3, 3)
|
"RGB", Image.Resampling.BILINEAR, *self.generate_identity_table(3, 3)
|
||||||
)
|
)
|
||||||
|
|
||||||
with pytest.raises(ValueError, match="wrong mode"):
|
with pytest.raises(ValueError, match="wrong mode"):
|
||||||
im = Image.new("RGB", (10, 10), 0)
|
im = Image.new("RGB", (10, 10), 0)
|
||||||
im.im.color_lut_3d("RGB", Image.LINEAR, *self.generate_identity_table(4, 3))
|
im.im.color_lut_3d(
|
||||||
|
"L", Image.Resampling.BILINEAR, *self.generate_identity_table(3, 3)
|
||||||
|
)
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match="wrong mode"):
|
||||||
|
im = Image.new("L", (10, 10), 0)
|
||||||
|
im.im.color_lut_3d(
|
||||||
|
"L", Image.Resampling.BILINEAR, *self.generate_identity_table(3, 3)
|
||||||
|
)
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match="wrong mode"):
|
||||||
|
im = Image.new("RGB", (10, 10), 0)
|
||||||
|
im.im.color_lut_3d(
|
||||||
|
"RGBA", Image.Resampling.BILINEAR, *self.generate_identity_table(3, 3)
|
||||||
|
)
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match="wrong mode"):
|
||||||
|
im = Image.new("RGB", (10, 10), 0)
|
||||||
|
im.im.color_lut_3d(
|
||||||
|
"RGB", Image.Resampling.BILINEAR, *self.generate_identity_table(4, 3)
|
||||||
|
)
|
||||||
|
|
||||||
def test_correct_mode(self):
|
def test_correct_mode(self):
|
||||||
im = Image.new("RGBA", (10, 10), 0)
|
im = Image.new("RGBA", (10, 10), 0)
|
||||||
im.im.color_lut_3d("RGBA", Image.LINEAR, *self.generate_identity_table(3, 3))
|
im.im.color_lut_3d(
|
||||||
|
"RGBA", Image.Resampling.BILINEAR, *self.generate_identity_table(3, 3)
|
||||||
|
)
|
||||||
|
|
||||||
im = Image.new("RGBA", (10, 10), 0)
|
im = Image.new("RGBA", (10, 10), 0)
|
||||||
im.im.color_lut_3d("RGBA", Image.LINEAR, *self.generate_identity_table(4, 3))
|
im.im.color_lut_3d(
|
||||||
|
"RGBA", Image.Resampling.BILINEAR, *self.generate_identity_table(4, 3)
|
||||||
|
)
|
||||||
|
|
||||||
im = Image.new("RGB", (10, 10), 0)
|
im = Image.new("RGB", (10, 10), 0)
|
||||||
im.im.color_lut_3d("HSV", Image.LINEAR, *self.generate_identity_table(3, 3))
|
im.im.color_lut_3d(
|
||||||
|
"HSV", Image.Resampling.BILINEAR, *self.generate_identity_table(3, 3)
|
||||||
|
)
|
||||||
|
|
||||||
im = Image.new("RGB", (10, 10), 0)
|
im = Image.new("RGB", (10, 10), 0)
|
||||||
im.im.color_lut_3d("RGBA", Image.LINEAR, *self.generate_identity_table(4, 3))
|
im.im.color_lut_3d(
|
||||||
|
"RGBA", Image.Resampling.BILINEAR, *self.generate_identity_table(4, 3)
|
||||||
|
)
|
||||||
|
|
||||||
def test_identities(self):
|
def test_identities(self):
|
||||||
g = Image.linear_gradient("L")
|
g = Image.linear_gradient("L")
|
||||||
im = Image.merge(
|
im = Image.merge(
|
||||||
"RGB", [g, g.transpose(Image.ROTATE_90), g.transpose(Image.ROTATE_180)]
|
"RGB",
|
||||||
|
[
|
||||||
|
g,
|
||||||
|
g.transpose(Image.Transpose.ROTATE_90),
|
||||||
|
g.transpose(Image.Transpose.ROTATE_180),
|
||||||
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
# Fast test with small cubes
|
# Fast test with small cubes
|
||||||
|
@ -152,7 +203,9 @@ class TestColorLut3DCoreAPI:
|
||||||
im,
|
im,
|
||||||
im._new(
|
im._new(
|
||||||
im.im.color_lut_3d(
|
im.im.color_lut_3d(
|
||||||
"RGB", Image.LINEAR, *self.generate_identity_table(3, size)
|
"RGB",
|
||||||
|
Image.Resampling.BILINEAR,
|
||||||
|
*self.generate_identity_table(3, size),
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
@ -162,7 +215,9 @@ class TestColorLut3DCoreAPI:
|
||||||
im,
|
im,
|
||||||
im._new(
|
im._new(
|
||||||
im.im.color_lut_3d(
|
im.im.color_lut_3d(
|
||||||
"RGB", Image.LINEAR, *self.generate_identity_table(3, (2, 2, 65))
|
"RGB",
|
||||||
|
Image.Resampling.BILINEAR,
|
||||||
|
*self.generate_identity_table(3, (2, 2, 65)),
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
@ -170,7 +225,12 @@ class TestColorLut3DCoreAPI:
|
||||||
def test_identities_4_channels(self):
|
def test_identities_4_channels(self):
|
||||||
g = Image.linear_gradient("L")
|
g = Image.linear_gradient("L")
|
||||||
im = Image.merge(
|
im = Image.merge(
|
||||||
"RGB", [g, g.transpose(Image.ROTATE_90), g.transpose(Image.ROTATE_180)]
|
"RGB",
|
||||||
|
[
|
||||||
|
g,
|
||||||
|
g.transpose(Image.Transpose.ROTATE_90),
|
||||||
|
g.transpose(Image.Transpose.ROTATE_180),
|
||||||
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
# Red channel copied to alpha
|
# Red channel copied to alpha
|
||||||
|
@ -178,7 +238,9 @@ class TestColorLut3DCoreAPI:
|
||||||
Image.merge("RGBA", (im.split() * 2)[:4]),
|
Image.merge("RGBA", (im.split() * 2)[:4]),
|
||||||
im._new(
|
im._new(
|
||||||
im.im.color_lut_3d(
|
im.im.color_lut_3d(
|
||||||
"RGBA", Image.LINEAR, *self.generate_identity_table(4, 17)
|
"RGBA",
|
||||||
|
Image.Resampling.BILINEAR,
|
||||||
|
*self.generate_identity_table(4, 17),
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
@ -189,9 +251,9 @@ class TestColorLut3DCoreAPI:
|
||||||
"RGBA",
|
"RGBA",
|
||||||
[
|
[
|
||||||
g,
|
g,
|
||||||
g.transpose(Image.ROTATE_90),
|
g.transpose(Image.Transpose.ROTATE_90),
|
||||||
g.transpose(Image.ROTATE_180),
|
g.transpose(Image.Transpose.ROTATE_180),
|
||||||
g.transpose(Image.ROTATE_270),
|
g.transpose(Image.Transpose.ROTATE_270),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -199,7 +261,9 @@ class TestColorLut3DCoreAPI:
|
||||||
im,
|
im,
|
||||||
im._new(
|
im._new(
|
||||||
im.im.color_lut_3d(
|
im.im.color_lut_3d(
|
||||||
"RGBA", Image.LINEAR, *self.generate_identity_table(3, 17)
|
"RGBA",
|
||||||
|
Image.Resampling.BILINEAR,
|
||||||
|
*self.generate_identity_table(3, 17),
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
@ -207,14 +271,19 @@ class TestColorLut3DCoreAPI:
|
||||||
def test_channels_order(self):
|
def test_channels_order(self):
|
||||||
g = Image.linear_gradient("L")
|
g = Image.linear_gradient("L")
|
||||||
im = Image.merge(
|
im = Image.merge(
|
||||||
"RGB", [g, g.transpose(Image.ROTATE_90), g.transpose(Image.ROTATE_180)]
|
"RGB",
|
||||||
|
[
|
||||||
|
g,
|
||||||
|
g.transpose(Image.Transpose.ROTATE_90),
|
||||||
|
g.transpose(Image.Transpose.ROTATE_180),
|
||||||
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
# Reverse channels by splitting and using table
|
# Reverse channels by splitting and using table
|
||||||
# fmt: off
|
# fmt: off
|
||||||
assert_image_equal(
|
assert_image_equal(
|
||||||
Image.merge('RGB', im.split()[::-1]),
|
Image.merge('RGB', im.split()[::-1]),
|
||||||
im._new(im.im.color_lut_3d('RGB', Image.LINEAR,
|
im._new(im.im.color_lut_3d('RGB', Image.Resampling.BILINEAR,
|
||||||
3, 2, 2, 2, [
|
3, 2, 2, 2, [
|
||||||
0, 0, 0, 0, 0, 1,
|
0, 0, 0, 0, 0, 1,
|
||||||
0, 1, 0, 0, 1, 1,
|
0, 1, 0, 0, 1, 1,
|
||||||
|
@ -227,11 +296,16 @@ class TestColorLut3DCoreAPI:
|
||||||
def test_overflow(self):
|
def test_overflow(self):
|
||||||
g = Image.linear_gradient("L")
|
g = Image.linear_gradient("L")
|
||||||
im = Image.merge(
|
im = Image.merge(
|
||||||
"RGB", [g, g.transpose(Image.ROTATE_90), g.transpose(Image.ROTATE_180)]
|
"RGB",
|
||||||
|
[
|
||||||
|
g,
|
||||||
|
g.transpose(Image.Transpose.ROTATE_90),
|
||||||
|
g.transpose(Image.Transpose.ROTATE_180),
|
||||||
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
# fmt: off
|
# fmt: off
|
||||||
transformed = im._new(im.im.color_lut_3d('RGB', Image.LINEAR,
|
transformed = im._new(im.im.color_lut_3d('RGB', Image.Resampling.BILINEAR,
|
||||||
3, 2, 2, 2,
|
3, 2, 2, 2,
|
||||||
[
|
[
|
||||||
-1, -1, -1, 2, -1, -1,
|
-1, -1, -1, 2, -1, -1,
|
||||||
|
@ -251,7 +325,7 @@ class TestColorLut3DCoreAPI:
|
||||||
assert transformed[205, 205] == (255, 255, 0)
|
assert transformed[205, 205] == (255, 255, 0)
|
||||||
|
|
||||||
# fmt: off
|
# fmt: off
|
||||||
transformed = im._new(im.im.color_lut_3d('RGB', Image.LINEAR,
|
transformed = im._new(im.im.color_lut_3d('RGB', Image.Resampling.BILINEAR,
|
||||||
3, 2, 2, 2,
|
3, 2, 2, 2,
|
||||||
[
|
[
|
||||||
-3, -3, -3, 5, -3, -3,
|
-3, -3, -3, 5, -3, -3,
|
||||||
|
@ -354,7 +428,12 @@ class TestColorLut3DFilter:
|
||||||
def test_numpy_formats(self):
|
def test_numpy_formats(self):
|
||||||
g = Image.linear_gradient("L")
|
g = Image.linear_gradient("L")
|
||||||
im = Image.merge(
|
im = Image.merge(
|
||||||
"RGB", [g, g.transpose(Image.ROTATE_90), g.transpose(Image.ROTATE_180)]
|
"RGB",
|
||||||
|
[
|
||||||
|
g,
|
||||||
|
g.transpose(Image.Transpose.ROTATE_90),
|
||||||
|
g.transpose(Image.Transpose.ROTATE_180),
|
||||||
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
lut = ImageFilter.Color3DLUT.generate((7, 9, 11), lambda r, g, b: (r, g, b))
|
lut = ImageFilter.Color3DLUT.generate((7, 9, 11), lambda r, g, b: (r, g, b))
|
||||||
|
@ -445,7 +524,12 @@ class TestGenerateColorLut3D:
|
||||||
|
|
||||||
g = Image.linear_gradient("L")
|
g = Image.linear_gradient("L")
|
||||||
im = Image.merge(
|
im = Image.merge(
|
||||||
"RGB", [g, g.transpose(Image.ROTATE_90), g.transpose(Image.ROTATE_180)]
|
"RGB",
|
||||||
|
[
|
||||||
|
g,
|
||||||
|
g.transpose(Image.Transpose.ROTATE_90),
|
||||||
|
g.transpose(Image.Transpose.ROTATE_180),
|
||||||
|
],
|
||||||
)
|
)
|
||||||
assert im == im.filter(lut)
|
assert im == im.filter(lut)
|
||||||
|
|
||||||
|
|
|
@ -120,9 +120,9 @@ def test_apng_dispose_op_previous_frame():
|
||||||
# save_all=True,
|
# save_all=True,
|
||||||
# append_images=[green, blue],
|
# append_images=[green, blue],
|
||||||
# disposal=[
|
# disposal=[
|
||||||
# PngImagePlugin.APNG_DISPOSE_OP_NONE,
|
# PngImagePlugin.Disposal.OP_NONE,
|
||||||
# PngImagePlugin.APNG_DISPOSE_OP_PREVIOUS,
|
# PngImagePlugin.Disposal.OP_PREVIOUS,
|
||||||
# PngImagePlugin.APNG_DISPOSE_OP_PREVIOUS
|
# PngImagePlugin.Disposal.OP_PREVIOUS
|
||||||
# ],
|
# ],
|
||||||
# )
|
# )
|
||||||
with Image.open("Tests/images/apng/dispose_op_previous_frame.png") as im:
|
with Image.open("Tests/images/apng/dispose_op_previous_frame.png") as im:
|
||||||
|
@ -455,31 +455,31 @@ def test_apng_save_disposal(tmp_path):
|
||||||
green = Image.new("RGBA", size, (0, 255, 0, 255))
|
green = Image.new("RGBA", size, (0, 255, 0, 255))
|
||||||
transparent = Image.new("RGBA", size, (0, 0, 0, 0))
|
transparent = Image.new("RGBA", size, (0, 0, 0, 0))
|
||||||
|
|
||||||
# test APNG_DISPOSE_OP_NONE
|
# test OP_NONE
|
||||||
red.save(
|
red.save(
|
||||||
test_file,
|
test_file,
|
||||||
save_all=True,
|
save_all=True,
|
||||||
append_images=[green, transparent],
|
append_images=[green, transparent],
|
||||||
disposal=PngImagePlugin.APNG_DISPOSE_OP_NONE,
|
disposal=PngImagePlugin.Disposal.OP_NONE,
|
||||||
blend=PngImagePlugin.APNG_BLEND_OP_OVER,
|
blend=PngImagePlugin.Blend.OP_OVER,
|
||||||
)
|
)
|
||||||
with Image.open(test_file) as im:
|
with Image.open(test_file) as im:
|
||||||
im.seek(2)
|
im.seek(2)
|
||||||
assert im.getpixel((0, 0)) == (0, 255, 0, 255)
|
assert im.getpixel((0, 0)) == (0, 255, 0, 255)
|
||||||
assert im.getpixel((64, 32)) == (0, 255, 0, 255)
|
assert im.getpixel((64, 32)) == (0, 255, 0, 255)
|
||||||
|
|
||||||
# test APNG_DISPOSE_OP_BACKGROUND
|
# test OP_BACKGROUND
|
||||||
disposal = [
|
disposal = [
|
||||||
PngImagePlugin.APNG_DISPOSE_OP_NONE,
|
PngImagePlugin.Disposal.OP_NONE,
|
||||||
PngImagePlugin.APNG_DISPOSE_OP_BACKGROUND,
|
PngImagePlugin.Disposal.OP_BACKGROUND,
|
||||||
PngImagePlugin.APNG_DISPOSE_OP_NONE,
|
PngImagePlugin.Disposal.OP_NONE,
|
||||||
]
|
]
|
||||||
red.save(
|
red.save(
|
||||||
test_file,
|
test_file,
|
||||||
save_all=True,
|
save_all=True,
|
||||||
append_images=[red, transparent],
|
append_images=[red, transparent],
|
||||||
disposal=disposal,
|
disposal=disposal,
|
||||||
blend=PngImagePlugin.APNG_BLEND_OP_OVER,
|
blend=PngImagePlugin.Blend.OP_OVER,
|
||||||
)
|
)
|
||||||
with Image.open(test_file) as im:
|
with Image.open(test_file) as im:
|
||||||
im.seek(2)
|
im.seek(2)
|
||||||
|
@ -487,26 +487,26 @@ def test_apng_save_disposal(tmp_path):
|
||||||
assert im.getpixel((64, 32)) == (0, 0, 0, 0)
|
assert im.getpixel((64, 32)) == (0, 0, 0, 0)
|
||||||
|
|
||||||
disposal = [
|
disposal = [
|
||||||
PngImagePlugin.APNG_DISPOSE_OP_NONE,
|
PngImagePlugin.Disposal.OP_NONE,
|
||||||
PngImagePlugin.APNG_DISPOSE_OP_BACKGROUND,
|
PngImagePlugin.Disposal.OP_BACKGROUND,
|
||||||
]
|
]
|
||||||
red.save(
|
red.save(
|
||||||
test_file,
|
test_file,
|
||||||
save_all=True,
|
save_all=True,
|
||||||
append_images=[green],
|
append_images=[green],
|
||||||
disposal=disposal,
|
disposal=disposal,
|
||||||
blend=PngImagePlugin.APNG_BLEND_OP_OVER,
|
blend=PngImagePlugin.Blend.OP_OVER,
|
||||||
)
|
)
|
||||||
with Image.open(test_file) as im:
|
with Image.open(test_file) as im:
|
||||||
im.seek(1)
|
im.seek(1)
|
||||||
assert im.getpixel((0, 0)) == (0, 255, 0, 255)
|
assert im.getpixel((0, 0)) == (0, 255, 0, 255)
|
||||||
assert im.getpixel((64, 32)) == (0, 255, 0, 255)
|
assert im.getpixel((64, 32)) == (0, 255, 0, 255)
|
||||||
|
|
||||||
# test APNG_DISPOSE_OP_PREVIOUS
|
# test OP_PREVIOUS
|
||||||
disposal = [
|
disposal = [
|
||||||
PngImagePlugin.APNG_DISPOSE_OP_NONE,
|
PngImagePlugin.Disposal.OP_NONE,
|
||||||
PngImagePlugin.APNG_DISPOSE_OP_PREVIOUS,
|
PngImagePlugin.Disposal.OP_PREVIOUS,
|
||||||
PngImagePlugin.APNG_DISPOSE_OP_NONE,
|
PngImagePlugin.Disposal.OP_NONE,
|
||||||
]
|
]
|
||||||
red.save(
|
red.save(
|
||||||
test_file,
|
test_file,
|
||||||
|
@ -514,7 +514,7 @@ def test_apng_save_disposal(tmp_path):
|
||||||
append_images=[green, red, transparent],
|
append_images=[green, red, transparent],
|
||||||
default_image=True,
|
default_image=True,
|
||||||
disposal=disposal,
|
disposal=disposal,
|
||||||
blend=PngImagePlugin.APNG_BLEND_OP_OVER,
|
blend=PngImagePlugin.Blend.OP_OVER,
|
||||||
)
|
)
|
||||||
with Image.open(test_file) as im:
|
with Image.open(test_file) as im:
|
||||||
im.seek(3)
|
im.seek(3)
|
||||||
|
@ -522,15 +522,15 @@ def test_apng_save_disposal(tmp_path):
|
||||||
assert im.getpixel((64, 32)) == (0, 255, 0, 255)
|
assert im.getpixel((64, 32)) == (0, 255, 0, 255)
|
||||||
|
|
||||||
disposal = [
|
disposal = [
|
||||||
PngImagePlugin.APNG_DISPOSE_OP_NONE,
|
PngImagePlugin.Disposal.OP_NONE,
|
||||||
PngImagePlugin.APNG_DISPOSE_OP_PREVIOUS,
|
PngImagePlugin.Disposal.OP_PREVIOUS,
|
||||||
]
|
]
|
||||||
red.save(
|
red.save(
|
||||||
test_file,
|
test_file,
|
||||||
save_all=True,
|
save_all=True,
|
||||||
append_images=[green],
|
append_images=[green],
|
||||||
disposal=disposal,
|
disposal=disposal,
|
||||||
blend=PngImagePlugin.APNG_BLEND_OP_OVER,
|
blend=PngImagePlugin.Blend.OP_OVER,
|
||||||
)
|
)
|
||||||
with Image.open(test_file) as im:
|
with Image.open(test_file) as im:
|
||||||
im.seek(1)
|
im.seek(1)
|
||||||
|
@ -538,7 +538,7 @@ def test_apng_save_disposal(tmp_path):
|
||||||
assert im.getpixel((64, 32)) == (0, 255, 0, 255)
|
assert im.getpixel((64, 32)) == (0, 255, 0, 255)
|
||||||
|
|
||||||
# test info disposal
|
# test info disposal
|
||||||
red.info["disposal"] = PngImagePlugin.APNG_DISPOSE_OP_BACKGROUND
|
red.info["disposal"] = PngImagePlugin.Disposal.OP_BACKGROUND
|
||||||
red.save(
|
red.save(
|
||||||
test_file,
|
test_file,
|
||||||
save_all=True,
|
save_all=True,
|
||||||
|
@ -556,12 +556,12 @@ def test_apng_save_disposal_previous(tmp_path):
|
||||||
red = Image.new("RGBA", size, (255, 0, 0, 255))
|
red = Image.new("RGBA", size, (255, 0, 0, 255))
|
||||||
green = Image.new("RGBA", size, (0, 255, 0, 255))
|
green = Image.new("RGBA", size, (0, 255, 0, 255))
|
||||||
|
|
||||||
# test APNG_DISPOSE_OP_NONE
|
# test OP_NONE
|
||||||
transparent.save(
|
transparent.save(
|
||||||
test_file,
|
test_file,
|
||||||
save_all=True,
|
save_all=True,
|
||||||
append_images=[red, green],
|
append_images=[red, green],
|
||||||
disposal=PngImagePlugin.APNG_DISPOSE_OP_PREVIOUS,
|
disposal=PngImagePlugin.Disposal.OP_PREVIOUS,
|
||||||
)
|
)
|
||||||
with Image.open(test_file) as im:
|
with Image.open(test_file) as im:
|
||||||
im.seek(2)
|
im.seek(2)
|
||||||
|
@ -576,17 +576,17 @@ def test_apng_save_blend(tmp_path):
|
||||||
green = Image.new("RGBA", size, (0, 255, 0, 255))
|
green = Image.new("RGBA", size, (0, 255, 0, 255))
|
||||||
transparent = Image.new("RGBA", size, (0, 0, 0, 0))
|
transparent = Image.new("RGBA", size, (0, 0, 0, 0))
|
||||||
|
|
||||||
# test APNG_BLEND_OP_SOURCE on solid color
|
# test OP_SOURCE on solid color
|
||||||
blend = [
|
blend = [
|
||||||
PngImagePlugin.APNG_BLEND_OP_OVER,
|
PngImagePlugin.Blend.OP_OVER,
|
||||||
PngImagePlugin.APNG_BLEND_OP_SOURCE,
|
PngImagePlugin.Blend.OP_SOURCE,
|
||||||
]
|
]
|
||||||
red.save(
|
red.save(
|
||||||
test_file,
|
test_file,
|
||||||
save_all=True,
|
save_all=True,
|
||||||
append_images=[red, green],
|
append_images=[red, green],
|
||||||
default_image=True,
|
default_image=True,
|
||||||
disposal=PngImagePlugin.APNG_DISPOSE_OP_NONE,
|
disposal=PngImagePlugin.Disposal.OP_NONE,
|
||||||
blend=blend,
|
blend=blend,
|
||||||
)
|
)
|
||||||
with Image.open(test_file) as im:
|
with Image.open(test_file) as im:
|
||||||
|
@ -594,17 +594,17 @@ def test_apng_save_blend(tmp_path):
|
||||||
assert im.getpixel((0, 0)) == (0, 255, 0, 255)
|
assert im.getpixel((0, 0)) == (0, 255, 0, 255)
|
||||||
assert im.getpixel((64, 32)) == (0, 255, 0, 255)
|
assert im.getpixel((64, 32)) == (0, 255, 0, 255)
|
||||||
|
|
||||||
# test APNG_BLEND_OP_SOURCE on transparent color
|
# test OP_SOURCE on transparent color
|
||||||
blend = [
|
blend = [
|
||||||
PngImagePlugin.APNG_BLEND_OP_OVER,
|
PngImagePlugin.Blend.OP_OVER,
|
||||||
PngImagePlugin.APNG_BLEND_OP_SOURCE,
|
PngImagePlugin.Blend.OP_SOURCE,
|
||||||
]
|
]
|
||||||
red.save(
|
red.save(
|
||||||
test_file,
|
test_file,
|
||||||
save_all=True,
|
save_all=True,
|
||||||
append_images=[red, transparent],
|
append_images=[red, transparent],
|
||||||
default_image=True,
|
default_image=True,
|
||||||
disposal=PngImagePlugin.APNG_DISPOSE_OP_NONE,
|
disposal=PngImagePlugin.Disposal.OP_NONE,
|
||||||
blend=blend,
|
blend=blend,
|
||||||
)
|
)
|
||||||
with Image.open(test_file) as im:
|
with Image.open(test_file) as im:
|
||||||
|
@ -612,14 +612,14 @@ def test_apng_save_blend(tmp_path):
|
||||||
assert im.getpixel((0, 0)) == (0, 0, 0, 0)
|
assert im.getpixel((0, 0)) == (0, 0, 0, 0)
|
||||||
assert im.getpixel((64, 32)) == (0, 0, 0, 0)
|
assert im.getpixel((64, 32)) == (0, 0, 0, 0)
|
||||||
|
|
||||||
# test APNG_BLEND_OP_OVER
|
# test OP_OVER
|
||||||
red.save(
|
red.save(
|
||||||
test_file,
|
test_file,
|
||||||
save_all=True,
|
save_all=True,
|
||||||
append_images=[green, transparent],
|
append_images=[green, transparent],
|
||||||
default_image=True,
|
default_image=True,
|
||||||
disposal=PngImagePlugin.APNG_DISPOSE_OP_NONE,
|
disposal=PngImagePlugin.Disposal.OP_NONE,
|
||||||
blend=PngImagePlugin.APNG_BLEND_OP_OVER,
|
blend=PngImagePlugin.Blend.OP_OVER,
|
||||||
)
|
)
|
||||||
with Image.open(test_file) as im:
|
with Image.open(test_file) as im:
|
||||||
im.seek(1)
|
im.seek(1)
|
||||||
|
@ -630,8 +630,18 @@ def test_apng_save_blend(tmp_path):
|
||||||
assert im.getpixel((64, 32)) == (0, 255, 0, 255)
|
assert im.getpixel((64, 32)) == (0, 255, 0, 255)
|
||||||
|
|
||||||
# test info blend
|
# test info blend
|
||||||
red.info["blend"] = PngImagePlugin.APNG_BLEND_OP_OVER
|
red.info["blend"] = PngImagePlugin.Blend.OP_OVER
|
||||||
red.save(test_file, save_all=True, append_images=[green, transparent])
|
red.save(test_file, save_all=True, append_images=[green, transparent])
|
||||||
with Image.open(test_file) as im:
|
with Image.open(test_file) as im:
|
||||||
im.seek(2)
|
im.seek(2)
|
||||||
assert im.getpixel((0, 0)) == (0, 255, 0, 255)
|
assert im.getpixel((0, 0)) == (0, 255, 0, 255)
|
||||||
|
|
||||||
|
|
||||||
|
def test_constants_deprecation():
|
||||||
|
for enum, prefix in {
|
||||||
|
PngImagePlugin.Disposal: "APNG_DISPOSE_",
|
||||||
|
PngImagePlugin.Blend: "APNG_BLEND_",
|
||||||
|
}.items():
|
||||||
|
for name in enum.__members__:
|
||||||
|
with pytest.warns(DeprecationWarning):
|
||||||
|
assert getattr(PngImagePlugin, prefix + name) == enum[name]
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from PIL import Image
|
from PIL import BlpImagePlugin, Image
|
||||||
|
|
||||||
from .helper import assert_image_equal_tofile
|
from .helper import assert_image_equal_tofile
|
||||||
|
|
||||||
|
@ -37,3 +37,14 @@ def test_crashes(test_file):
|
||||||
with Image.open(f) as im:
|
with Image.open(f) as im:
|
||||||
with pytest.raises(OSError):
|
with pytest.raises(OSError):
|
||||||
im.load()
|
im.load()
|
||||||
|
|
||||||
|
|
||||||
|
def test_constants_deprecation():
|
||||||
|
for enum, prefix in {
|
||||||
|
BlpImagePlugin.Format: "BLP_FORMAT_",
|
||||||
|
BlpImagePlugin.Encoding: "BLP_ENCODING_",
|
||||||
|
BlpImagePlugin.AlphaEncoding: "BLP_ALPHA_ENCODING_",
|
||||||
|
}.items():
|
||||||
|
for name in enum.__members__:
|
||||||
|
with pytest.warns(DeprecationWarning):
|
||||||
|
assert getattr(BlpImagePlugin, prefix + name) == enum[name]
|
||||||
|
|
|
@ -1,4 +1,6 @@
|
||||||
from PIL import Image
|
import pytest
|
||||||
|
|
||||||
|
from PIL import FtexImagePlugin, Image
|
||||||
|
|
||||||
from .helper import assert_image_equal_tofile, assert_image_similar
|
from .helper import assert_image_equal_tofile, assert_image_similar
|
||||||
|
|
||||||
|
@ -12,3 +14,12 @@ def test_load_dxt1():
|
||||||
with Image.open("Tests/images/ftex_dxt1.ftc") as im:
|
with Image.open("Tests/images/ftex_dxt1.ftc") as im:
|
||||||
with Image.open("Tests/images/ftex_dxt1.png") as target:
|
with Image.open("Tests/images/ftex_dxt1.png") as target:
|
||||||
assert_image_similar(im, target.convert("RGBA"), 15)
|
assert_image_similar(im, target.convert("RGBA"), 15)
|
||||||
|
|
||||||
|
|
||||||
|
def test_constants_deprecation():
|
||||||
|
for enum, prefix in {
|
||||||
|
FtexImagePlugin.Format: "FORMAT_",
|
||||||
|
}.items():
|
||||||
|
for name in enum.__members__:
|
||||||
|
with pytest.warns(DeprecationWarning):
|
||||||
|
assert getattr(FtexImagePlugin, prefix + name) == enum[name]
|
||||||
|
|
|
@ -210,8 +210,8 @@ def test_palette_handling(tmp_path):
|
||||||
with Image.open(TEST_GIF) as im:
|
with Image.open(TEST_GIF) as im:
|
||||||
im = im.convert("RGB")
|
im = im.convert("RGB")
|
||||||
|
|
||||||
im = im.resize((100, 100), Image.LANCZOS)
|
im = im.resize((100, 100), Image.Resampling.LANCZOS)
|
||||||
im2 = im.convert("P", palette=Image.ADAPTIVE, colors=256)
|
im2 = im.convert("P", palette=Image.Palette.ADAPTIVE, colors=256)
|
||||||
|
|
||||||
f = str(tmp_path / "temp.gif")
|
f = str(tmp_path / "temp.gif")
|
||||||
im2.save(f, optimize=True)
|
im2.save(f, optimize=True)
|
||||||
|
@ -911,7 +911,7 @@ def test_save_I(tmp_path):
|
||||||
def test_getdata():
|
def test_getdata():
|
||||||
# Test getheader/getdata against legacy values.
|
# Test getheader/getdata against legacy values.
|
||||||
# Create a 'P' image with holes in the palette.
|
# Create a 'P' image with holes in the palette.
|
||||||
im = Image._wedge().resize((16, 16), Image.NEAREST)
|
im = Image._wedge().resize((16, 16), Image.Resampling.NEAREST)
|
||||||
im.putpalette(ImagePalette.ImagePalette("RGB"))
|
im.putpalette(ImagePalette.ImagePalette("RGB"))
|
||||||
im.info = {"background": 0}
|
im.info = {"background": 0}
|
||||||
|
|
||||||
|
|
|
@ -112,12 +112,9 @@ def test_older_icon():
|
||||||
|
|
||||||
|
|
||||||
def test_jp2_icon():
|
def test_jp2_icon():
|
||||||
# This icon was made by using Uli Kusterer's oldiconutil to replace
|
# This icon uses JPEG 2000 images instead of the PNG images.
|
||||||
# the PNG images with JPEG 2000 ones. The advantage of doing this is
|
# The advantage of doing this is that OS X 10.5 supports JPEG 2000
|
||||||
# that OS X 10.5 supports JPEG 2000 but not PNG; some commercial
|
# but not PNG; some commercial software therefore does just this.
|
||||||
# software therefore does just this.
|
|
||||||
|
|
||||||
# (oldiconutil is here: https://github.com/uliwitness/oldiconutil)
|
|
||||||
|
|
||||||
if not ENABLE_JPEG2K:
|
if not ENABLE_JPEG2K:
|
||||||
return
|
return
|
||||||
|
|
|
@ -53,7 +53,9 @@ def test_save_to_bytes():
|
||||||
assert im.mode == reloaded.mode
|
assert im.mode == reloaded.mode
|
||||||
assert (64, 64) == reloaded.size
|
assert (64, 64) == reloaded.size
|
||||||
assert reloaded.format == "ICO"
|
assert reloaded.format == "ICO"
|
||||||
assert_image_equal(reloaded, hopper().resize((64, 64), Image.LANCZOS))
|
assert_image_equal(
|
||||||
|
reloaded, hopper().resize((64, 64), Image.Resampling.LANCZOS)
|
||||||
|
)
|
||||||
|
|
||||||
# The other one
|
# The other one
|
||||||
output.seek(0)
|
output.seek(0)
|
||||||
|
@ -63,7 +65,9 @@ def test_save_to_bytes():
|
||||||
assert im.mode == reloaded.mode
|
assert im.mode == reloaded.mode
|
||||||
assert (32, 32) == reloaded.size
|
assert (32, 32) == reloaded.size
|
||||||
assert reloaded.format == "ICO"
|
assert reloaded.format == "ICO"
|
||||||
assert_image_equal(reloaded, hopper().resize((32, 32), Image.LANCZOS))
|
assert_image_equal(
|
||||||
|
reloaded, hopper().resize((32, 32), Image.Resampling.LANCZOS)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("mode", ("1", "L", "P", "RGB", "RGBA"))
|
@pytest.mark.parametrize("mode", ("1", "L", "P", "RGB", "RGBA"))
|
||||||
|
@ -80,7 +84,7 @@ def test_save_to_bytes_bmp(mode):
|
||||||
assert "RGBA" == reloaded.mode
|
assert "RGBA" == reloaded.mode
|
||||||
assert (64, 64) == reloaded.size
|
assert (64, 64) == reloaded.size
|
||||||
assert reloaded.format == "ICO"
|
assert reloaded.format == "ICO"
|
||||||
im = hopper(mode).resize((64, 64), Image.LANCZOS).convert("RGBA")
|
im = hopper(mode).resize((64, 64), Image.Resampling.LANCZOS).convert("RGBA")
|
||||||
assert_image_equal(reloaded, im)
|
assert_image_equal(reloaded, im)
|
||||||
|
|
||||||
# The other one
|
# The other one
|
||||||
|
@ -91,7 +95,7 @@ def test_save_to_bytes_bmp(mode):
|
||||||
assert "RGBA" == reloaded.mode
|
assert "RGBA" == reloaded.mode
|
||||||
assert (32, 32) == reloaded.size
|
assert (32, 32) == reloaded.size
|
||||||
assert reloaded.format == "ICO"
|
assert reloaded.format == "ICO"
|
||||||
im = hopper(mode).resize((32, 32), Image.LANCZOS).convert("RGBA")
|
im = hopper(mode).resize((32, 32), Image.Resampling.LANCZOS).convert("RGBA")
|
||||||
assert_image_equal(reloaded, im)
|
assert_image_equal(reloaded, im)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -271,7 +271,7 @@ class TestFileJpeg:
|
||||||
del exif[0x8769]
|
del exif[0x8769]
|
||||||
|
|
||||||
# Assert that it needs to be transposed
|
# Assert that it needs to be transposed
|
||||||
assert exif[0x0112] == Image.TRANSVERSE
|
assert exif[0x0112] == Image.Transpose.TRANSVERSE
|
||||||
|
|
||||||
# Assert that the GPS IFD is present and empty
|
# Assert that the GPS IFD is present and empty
|
||||||
assert exif.get_ifd(0x8825) == {}
|
assert exif.get_ifd(0x8825) == {}
|
||||||
|
|
|
@ -291,7 +291,7 @@ def test_subsampling_decode(name):
|
||||||
# RGB reference images are downscaled
|
# RGB reference images are downscaled
|
||||||
epsilon = 3e-3
|
epsilon = 3e-3
|
||||||
width, height = width * 2, height * 2
|
width, height = width * 2, height * 2
|
||||||
expected = im2.resize((width, height), Image.NEAREST)
|
expected = im2.resize((width, height), Image.Resampling.NEAREST)
|
||||||
assert_image_similar(im, expected, epsilon)
|
assert_image_similar(im, expected, epsilon)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -112,7 +112,7 @@ class TestFileLibTiff(LibTiffTestCase):
|
||||||
test_file = "Tests/images/hopper_g4_500.tif"
|
test_file = "Tests/images/hopper_g4_500.tif"
|
||||||
with Image.open(test_file) as orig:
|
with Image.open(test_file) as orig:
|
||||||
out = str(tmp_path / "temp.tif")
|
out = str(tmp_path / "temp.tif")
|
||||||
rot = orig.transpose(Image.ROTATE_90)
|
rot = orig.transpose(Image.Transpose.ROTATE_90)
|
||||||
assert rot.size == (500, 500)
|
assert rot.size == (500, 500)
|
||||||
rot.save(out)
|
rot.save(out)
|
||||||
|
|
||||||
|
|
|
@ -77,7 +77,7 @@ def to_rgb_colorsys(im):
|
||||||
|
|
||||||
|
|
||||||
def test_wedge():
|
def test_wedge():
|
||||||
src = wedge().resize((3 * 32, 32), Image.BILINEAR)
|
src = wedge().resize((3 * 32, 32), Image.Resampling.BILINEAR)
|
||||||
im = src.convert("HSV")
|
im = src.convert("HSV")
|
||||||
comparable = to_hsv_colorsys(src)
|
comparable = to_hsv_colorsys(src)
|
||||||
|
|
||||||
|
|
|
@ -813,6 +813,31 @@ class TestImage:
|
||||||
with pytest.warns(DeprecationWarning):
|
with pytest.warns(DeprecationWarning):
|
||||||
assert Image.CONTAINER == 2
|
assert Image.CONTAINER == 2
|
||||||
|
|
||||||
|
def test_constants_deprecation(self):
|
||||||
|
with pytest.warns(DeprecationWarning):
|
||||||
|
assert Image.NEAREST == 0
|
||||||
|
with pytest.warns(DeprecationWarning):
|
||||||
|
assert Image.NONE == 0
|
||||||
|
|
||||||
|
with pytest.warns(DeprecationWarning):
|
||||||
|
assert Image.LINEAR == Image.Resampling.BILINEAR
|
||||||
|
with pytest.warns(DeprecationWarning):
|
||||||
|
assert Image.CUBIC == Image.Resampling.BICUBIC
|
||||||
|
with pytest.warns(DeprecationWarning):
|
||||||
|
assert Image.ANTIALIAS == Image.Resampling.LANCZOS
|
||||||
|
|
||||||
|
for enum in (
|
||||||
|
Image.Transpose,
|
||||||
|
Image.Transform,
|
||||||
|
Image.Resampling,
|
||||||
|
Image.Dither,
|
||||||
|
Image.Palette,
|
||||||
|
Image.Quantize,
|
||||||
|
):
|
||||||
|
for name in enum.__members__:
|
||||||
|
with pytest.warns(DeprecationWarning):
|
||||||
|
assert getattr(Image, name) == enum[name]
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"path",
|
"path",
|
||||||
[
|
[
|
||||||
|
|
|
@ -76,6 +76,13 @@ def test_16bit_workaround():
|
||||||
_test_float_conversion(im.convert("I"))
|
_test_float_conversion(im.convert("I"))
|
||||||
|
|
||||||
|
|
||||||
|
def test_opaque():
|
||||||
|
alpha = hopper("P").convert("PA").getchannel("A")
|
||||||
|
|
||||||
|
solid = Image.new("L", (128, 128), 255)
|
||||||
|
assert_image_equal(alpha, solid)
|
||||||
|
|
||||||
|
|
||||||
def test_rgba_p():
|
def test_rgba_p():
|
||||||
im = hopper("RGBA")
|
im = hopper("RGBA")
|
||||||
im.putalpha(hopper("L"))
|
im.putalpha(hopper("L"))
|
||||||
|
@ -136,7 +143,7 @@ def test_trns_l(tmp_path):
|
||||||
assert "transparency" in im_p.info
|
assert "transparency" in im_p.info
|
||||||
im_p.save(f)
|
im_p.save(f)
|
||||||
|
|
||||||
im_p = im.convert("P", palette=Image.ADAPTIVE)
|
im_p = im.convert("P", palette=Image.Palette.ADAPTIVE)
|
||||||
assert "transparency" in im_p.info
|
assert "transparency" in im_p.info
|
||||||
im_p.save(f)
|
im_p.save(f)
|
||||||
|
|
||||||
|
@ -159,13 +166,13 @@ def test_trns_RGB(tmp_path):
|
||||||
assert "transparency" not in im_rgba.info
|
assert "transparency" not in im_rgba.info
|
||||||
im_rgba.save(f)
|
im_rgba.save(f)
|
||||||
|
|
||||||
im_p = pytest.warns(UserWarning, im.convert, "P", palette=Image.ADAPTIVE)
|
im_p = pytest.warns(UserWarning, im.convert, "P", palette=Image.Palette.ADAPTIVE)
|
||||||
assert "transparency" not in im_p.info
|
assert "transparency" not in im_p.info
|
||||||
im_p.save(f)
|
im_p.save(f)
|
||||||
|
|
||||||
im = Image.new("RGB", (1, 1))
|
im = Image.new("RGB", (1, 1))
|
||||||
im.info["transparency"] = im.getpixel((0, 0))
|
im.info["transparency"] = im.getpixel((0, 0))
|
||||||
im_p = im.convert("P", palette=Image.ADAPTIVE)
|
im_p = im.convert("P", palette=Image.Palette.ADAPTIVE)
|
||||||
assert im_p.info["transparency"] == im_p.getpixel((0, 0))
|
assert im_p.info["transparency"] == im_p.getpixel((0, 0))
|
||||||
im_p.save(f)
|
im_p.save(f)
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,7 @@ def test_sanity():
|
||||||
|
|
||||||
def test_roundtrip():
|
def test_roundtrip():
|
||||||
def getdata(mode):
|
def getdata(mode):
|
||||||
im = hopper(mode).resize((32, 30), Image.NEAREST)
|
im = hopper(mode).resize((32, 30), Image.Resampling.NEAREST)
|
||||||
data = im.getdata()
|
data = im.getdata()
|
||||||
return data[0], len(data), len(list(data))
|
return data[0], len(data), len(list(data))
|
||||||
|
|
||||||
|
|
|
@ -45,7 +45,7 @@ class TestImagingPaste:
|
||||||
|
|
||||||
@cached_property
|
@cached_property
|
||||||
def mask_L(self):
|
def mask_L(self):
|
||||||
return self.gradient_L.transpose(Image.ROTATE_270)
|
return self.gradient_L.transpose(Image.Transpose.ROTATE_270)
|
||||||
|
|
||||||
@cached_property
|
@cached_property
|
||||||
def gradient_L(self):
|
def gradient_L(self):
|
||||||
|
@ -62,8 +62,8 @@ class TestImagingPaste:
|
||||||
"RGB",
|
"RGB",
|
||||||
[
|
[
|
||||||
self.gradient_L,
|
self.gradient_L,
|
||||||
self.gradient_L.transpose(Image.ROTATE_90),
|
self.gradient_L.transpose(Image.Transpose.ROTATE_90),
|
||||||
self.gradient_L.transpose(Image.ROTATE_180),
|
self.gradient_L.transpose(Image.Transpose.ROTATE_180),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -73,9 +73,9 @@ class TestImagingPaste:
|
||||||
"RGBA",
|
"RGBA",
|
||||||
[
|
[
|
||||||
self.gradient_L,
|
self.gradient_L,
|
||||||
self.gradient_L.transpose(Image.ROTATE_90),
|
self.gradient_L.transpose(Image.Transpose.ROTATE_90),
|
||||||
self.gradient_L.transpose(Image.ROTATE_180),
|
self.gradient_L.transpose(Image.Transpose.ROTATE_180),
|
||||||
self.gradient_L.transpose(Image.ROTATE_270),
|
self.gradient_L.transpose(Image.Transpose.ROTATE_270),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -85,9 +85,9 @@ class TestImagingPaste:
|
||||||
"RGBa",
|
"RGBa",
|
||||||
[
|
[
|
||||||
self.gradient_L,
|
self.gradient_L,
|
||||||
self.gradient_L.transpose(Image.ROTATE_90),
|
self.gradient_L.transpose(Image.Transpose.ROTATE_90),
|
||||||
self.gradient_L.transpose(Image.ROTATE_180),
|
self.gradient_L.transpose(Image.Transpose.ROTATE_180),
|
||||||
self.gradient_L.transpose(Image.ROTATE_270),
|
self.gradient_L.transpose(Image.Transpose.ROTATE_270),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -62,3 +62,17 @@ def test_putpalette_with_alpha_values():
|
||||||
im.putpalette(palette_with_alpha_values, "RGBA")
|
im.putpalette(palette_with_alpha_values, "RGBA")
|
||||||
|
|
||||||
assert_image_equal(im.convert("RGBA"), expected)
|
assert_image_equal(im.convert("RGBA"), expected)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"mode, palette",
|
||||||
|
(
|
||||||
|
("RGBA", (1, 2, 3, 4)),
|
||||||
|
("RGBAX", (1, 2, 3, 4, 0)),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
def test_rgba_palette(mode, palette):
|
||||||
|
im = Image.new("P", (1, 1))
|
||||||
|
im.putpalette(palette, mode)
|
||||||
|
assert im.getpalette() == [1, 2, 3]
|
||||||
|
assert im.palette.colors == {(1, 2, 3, 4): 0}
|
||||||
|
|
|
@ -25,7 +25,7 @@ def test_libimagequant_quantize():
|
||||||
libimagequant = parse_version(features.version_feature("libimagequant"))
|
libimagequant = parse_version(features.version_feature("libimagequant"))
|
||||||
if libimagequant < parse_version("4"):
|
if libimagequant < parse_version("4"):
|
||||||
pytest.skip("Fails with libimagequant earlier than 4.0.0 on ppc64le")
|
pytest.skip("Fails with libimagequant earlier than 4.0.0 on ppc64le")
|
||||||
converted = image.quantize(100, Image.LIBIMAGEQUANT)
|
converted = image.quantize(100, Image.Quantize.LIBIMAGEQUANT)
|
||||||
assert converted.mode == "P"
|
assert converted.mode == "P"
|
||||||
assert_image_similar(converted.convert("RGB"), image, 15)
|
assert_image_similar(converted.convert("RGB"), image, 15)
|
||||||
assert len(converted.getcolors()) == 100
|
assert len(converted.getcolors()) == 100
|
||||||
|
@ -33,7 +33,7 @@ def test_libimagequant_quantize():
|
||||||
|
|
||||||
def test_octree_quantize():
|
def test_octree_quantize():
|
||||||
image = hopper()
|
image = hopper()
|
||||||
converted = image.quantize(100, Image.FASTOCTREE)
|
converted = image.quantize(100, Image.Quantize.FASTOCTREE)
|
||||||
assert converted.mode == "P"
|
assert converted.mode == "P"
|
||||||
assert_image_similar(converted.convert("RGB"), image, 20)
|
assert_image_similar(converted.convert("RGB"), image, 20)
|
||||||
assert len(converted.getcolors()) == 100
|
assert len(converted.getcolors()) == 100
|
||||||
|
@ -96,10 +96,10 @@ def test_transparent_colors_equal():
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"method, color",
|
"method, color",
|
||||||
(
|
(
|
||||||
(Image.MEDIANCUT, (0, 0, 0)),
|
(Image.Quantize.MEDIANCUT, (0, 0, 0)),
|
||||||
(Image.MAXCOVERAGE, (0, 0, 0)),
|
(Image.Quantize.MAXCOVERAGE, (0, 0, 0)),
|
||||||
(Image.FASTOCTREE, (0, 0, 0)),
|
(Image.Quantize.FASTOCTREE, (0, 0, 0)),
|
||||||
(Image.FASTOCTREE, (0, 0, 0, 0)),
|
(Image.Quantize.FASTOCTREE, (0, 0, 0, 0)),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
def test_palette(method, color):
|
def test_palette(method, color):
|
||||||
|
@ -108,3 +108,18 @@ def test_palette(method, color):
|
||||||
converted = im.quantize(method=method)
|
converted = im.quantize(method=method)
|
||||||
converted_px = converted.load()
|
converted_px = converted.load()
|
||||||
assert converted_px[0, 0] == converted.palette.colors[color]
|
assert converted_px[0, 0] == converted.palette.colors[color]
|
||||||
|
|
||||||
|
|
||||||
|
def test_small_palette():
|
||||||
|
# Arrange
|
||||||
|
im = hopper()
|
||||||
|
|
||||||
|
colors = (255, 0, 0, 0, 0, 255)
|
||||||
|
p = Image.new("P", (1, 1))
|
||||||
|
p.putpalette(colors)
|
||||||
|
|
||||||
|
# Act
|
||||||
|
im = im.quantize(palette=p)
|
||||||
|
|
||||||
|
# Assert
|
||||||
|
assert len(im.getcolors()) == 2
|
||||||
|
|
|
@ -97,7 +97,7 @@ def get_image(mode):
|
||||||
bands = [gradients_image]
|
bands = [gradients_image]
|
||||||
for _ in mode_info.bands[1:]:
|
for _ in mode_info.bands[1:]:
|
||||||
# rotate previous image
|
# rotate previous image
|
||||||
band = bands[-1].transpose(Image.ROTATE_90)
|
band = bands[-1].transpose(Image.Transpose.ROTATE_90)
|
||||||
bands.append(band)
|
bands.append(band)
|
||||||
# Correct alpha channel by transforming completely transparent pixels.
|
# Correct alpha channel by transforming completely transparent pixels.
|
||||||
# Low alpha values also emphasize error after alpha multiplication.
|
# Low alpha values also emphasize error after alpha multiplication.
|
||||||
|
@ -138,24 +138,26 @@ def compare_reduce_with_reference(im, factor, average_diff=0.4, max_diff=1):
|
||||||
reference = Image.new(im.mode, reduced.size)
|
reference = Image.new(im.mode, reduced.size)
|
||||||
area_size = (im.size[0] // factor[0], im.size[1] // factor[1])
|
area_size = (im.size[0] // factor[0], im.size[1] // factor[1])
|
||||||
area_box = (0, 0, area_size[0] * factor[0], area_size[1] * factor[1])
|
area_box = (0, 0, area_size[0] * factor[0], area_size[1] * factor[1])
|
||||||
area = im.resize(area_size, Image.BOX, area_box)
|
area = im.resize(area_size, Image.Resampling.BOX, area_box)
|
||||||
reference.paste(area, (0, 0))
|
reference.paste(area, (0, 0))
|
||||||
|
|
||||||
if area_size[0] < reduced.size[0]:
|
if area_size[0] < reduced.size[0]:
|
||||||
assert reduced.size[0] - area_size[0] == 1
|
assert reduced.size[0] - area_size[0] == 1
|
||||||
last_column_box = (area_box[2], 0, im.size[0], area_box[3])
|
last_column_box = (area_box[2], 0, im.size[0], area_box[3])
|
||||||
last_column = im.resize((1, area_size[1]), Image.BOX, last_column_box)
|
last_column = im.resize(
|
||||||
|
(1, area_size[1]), Image.Resampling.BOX, last_column_box
|
||||||
|
)
|
||||||
reference.paste(last_column, (area_size[0], 0))
|
reference.paste(last_column, (area_size[0], 0))
|
||||||
|
|
||||||
if area_size[1] < reduced.size[1]:
|
if area_size[1] < reduced.size[1]:
|
||||||
assert reduced.size[1] - area_size[1] == 1
|
assert reduced.size[1] - area_size[1] == 1
|
||||||
last_row_box = (0, area_box[3], area_box[2], im.size[1])
|
last_row_box = (0, area_box[3], area_box[2], im.size[1])
|
||||||
last_row = im.resize((area_size[0], 1), Image.BOX, last_row_box)
|
last_row = im.resize((area_size[0], 1), Image.Resampling.BOX, last_row_box)
|
||||||
reference.paste(last_row, (0, area_size[1]))
|
reference.paste(last_row, (0, area_size[1]))
|
||||||
|
|
||||||
if area_size[0] < reduced.size[0] and area_size[1] < reduced.size[1]:
|
if area_size[0] < reduced.size[0] and area_size[1] < reduced.size[1]:
|
||||||
last_pixel_box = (area_box[2], area_box[3], im.size[0], im.size[1])
|
last_pixel_box = (area_box[2], area_box[3], im.size[0], im.size[1])
|
||||||
last_pixel = im.resize((1, 1), Image.BOX, last_pixel_box)
|
last_pixel = im.resize((1, 1), Image.Resampling.BOX, last_pixel_box)
|
||||||
reference.paste(last_pixel, area_size)
|
reference.paste(last_pixel, area_size)
|
||||||
|
|
||||||
assert_compare_images(reduced, reference, average_diff, max_diff)
|
assert_compare_images(reduced, reference, average_diff, max_diff)
|
||||||
|
|
|
@ -24,7 +24,7 @@ class TestImagingResampleVulnerability:
|
||||||
):
|
):
|
||||||
with pytest.raises(MemoryError):
|
with pytest.raises(MemoryError):
|
||||||
# any resampling filter will do here
|
# any resampling filter will do here
|
||||||
im.im.resize((xsize, ysize), Image.BILINEAR)
|
im.im.resize((xsize, ysize), Image.Resampling.BILINEAR)
|
||||||
|
|
||||||
def test_invalid_size(self):
|
def test_invalid_size(self):
|
||||||
im = hopper()
|
im = hopper()
|
||||||
|
@ -103,7 +103,7 @@ class TestImagingCoreResampleAccuracy:
|
||||||
def test_reduce_box(self):
|
def test_reduce_box(self):
|
||||||
for mode in ["RGBX", "RGB", "La", "L"]:
|
for mode in ["RGBX", "RGB", "La", "L"]:
|
||||||
case = self.make_case(mode, (8, 8), 0xE1)
|
case = self.make_case(mode, (8, 8), 0xE1)
|
||||||
case = case.resize((4, 4), Image.BOX)
|
case = case.resize((4, 4), Image.Resampling.BOX)
|
||||||
# fmt: off
|
# fmt: off
|
||||||
data = ("e1 e1"
|
data = ("e1 e1"
|
||||||
"e1 e1")
|
"e1 e1")
|
||||||
|
@ -114,7 +114,7 @@ class TestImagingCoreResampleAccuracy:
|
||||||
def test_reduce_bilinear(self):
|
def test_reduce_bilinear(self):
|
||||||
for mode in ["RGBX", "RGB", "La", "L"]:
|
for mode in ["RGBX", "RGB", "La", "L"]:
|
||||||
case = self.make_case(mode, (8, 8), 0xE1)
|
case = self.make_case(mode, (8, 8), 0xE1)
|
||||||
case = case.resize((4, 4), Image.BILINEAR)
|
case = case.resize((4, 4), Image.Resampling.BILINEAR)
|
||||||
# fmt: off
|
# fmt: off
|
||||||
data = ("e1 c9"
|
data = ("e1 c9"
|
||||||
"c9 b7")
|
"c9 b7")
|
||||||
|
@ -125,7 +125,7 @@ class TestImagingCoreResampleAccuracy:
|
||||||
def test_reduce_hamming(self):
|
def test_reduce_hamming(self):
|
||||||
for mode in ["RGBX", "RGB", "La", "L"]:
|
for mode in ["RGBX", "RGB", "La", "L"]:
|
||||||
case = self.make_case(mode, (8, 8), 0xE1)
|
case = self.make_case(mode, (8, 8), 0xE1)
|
||||||
case = case.resize((4, 4), Image.HAMMING)
|
case = case.resize((4, 4), Image.Resampling.HAMMING)
|
||||||
# fmt: off
|
# fmt: off
|
||||||
data = ("e1 da"
|
data = ("e1 da"
|
||||||
"da d3")
|
"da d3")
|
||||||
|
@ -136,7 +136,7 @@ class TestImagingCoreResampleAccuracy:
|
||||||
def test_reduce_bicubic(self):
|
def test_reduce_bicubic(self):
|
||||||
for mode in ["RGBX", "RGB", "La", "L"]:
|
for mode in ["RGBX", "RGB", "La", "L"]:
|
||||||
case = self.make_case(mode, (12, 12), 0xE1)
|
case = self.make_case(mode, (12, 12), 0xE1)
|
||||||
case = case.resize((6, 6), Image.BICUBIC)
|
case = case.resize((6, 6), Image.Resampling.BICUBIC)
|
||||||
# fmt: off
|
# fmt: off
|
||||||
data = ("e1 e3 d4"
|
data = ("e1 e3 d4"
|
||||||
"e3 e5 d6"
|
"e3 e5 d6"
|
||||||
|
@ -148,7 +148,7 @@ class TestImagingCoreResampleAccuracy:
|
||||||
def test_reduce_lanczos(self):
|
def test_reduce_lanczos(self):
|
||||||
for mode in ["RGBX", "RGB", "La", "L"]:
|
for mode in ["RGBX", "RGB", "La", "L"]:
|
||||||
case = self.make_case(mode, (16, 16), 0xE1)
|
case = self.make_case(mode, (16, 16), 0xE1)
|
||||||
case = case.resize((8, 8), Image.LANCZOS)
|
case = case.resize((8, 8), Image.Resampling.LANCZOS)
|
||||||
# fmt: off
|
# fmt: off
|
||||||
data = ("e1 e0 e4 d7"
|
data = ("e1 e0 e4 d7"
|
||||||
"e0 df e3 d6"
|
"e0 df e3 d6"
|
||||||
|
@ -161,7 +161,7 @@ class TestImagingCoreResampleAccuracy:
|
||||||
def test_enlarge_box(self):
|
def test_enlarge_box(self):
|
||||||
for mode in ["RGBX", "RGB", "La", "L"]:
|
for mode in ["RGBX", "RGB", "La", "L"]:
|
||||||
case = self.make_case(mode, (2, 2), 0xE1)
|
case = self.make_case(mode, (2, 2), 0xE1)
|
||||||
case = case.resize((4, 4), Image.BOX)
|
case = case.resize((4, 4), Image.Resampling.BOX)
|
||||||
# fmt: off
|
# fmt: off
|
||||||
data = ("e1 e1"
|
data = ("e1 e1"
|
||||||
"e1 e1")
|
"e1 e1")
|
||||||
|
@ -172,7 +172,7 @@ class TestImagingCoreResampleAccuracy:
|
||||||
def test_enlarge_bilinear(self):
|
def test_enlarge_bilinear(self):
|
||||||
for mode in ["RGBX", "RGB", "La", "L"]:
|
for mode in ["RGBX", "RGB", "La", "L"]:
|
||||||
case = self.make_case(mode, (2, 2), 0xE1)
|
case = self.make_case(mode, (2, 2), 0xE1)
|
||||||
case = case.resize((4, 4), Image.BILINEAR)
|
case = case.resize((4, 4), Image.Resampling.BILINEAR)
|
||||||
# fmt: off
|
# fmt: off
|
||||||
data = ("e1 b0"
|
data = ("e1 b0"
|
||||||
"b0 98")
|
"b0 98")
|
||||||
|
@ -183,7 +183,7 @@ class TestImagingCoreResampleAccuracy:
|
||||||
def test_enlarge_hamming(self):
|
def test_enlarge_hamming(self):
|
||||||
for mode in ["RGBX", "RGB", "La", "L"]:
|
for mode in ["RGBX", "RGB", "La", "L"]:
|
||||||
case = self.make_case(mode, (2, 2), 0xE1)
|
case = self.make_case(mode, (2, 2), 0xE1)
|
||||||
case = case.resize((4, 4), Image.HAMMING)
|
case = case.resize((4, 4), Image.Resampling.HAMMING)
|
||||||
# fmt: off
|
# fmt: off
|
||||||
data = ("e1 d2"
|
data = ("e1 d2"
|
||||||
"d2 c5")
|
"d2 c5")
|
||||||
|
@ -194,7 +194,7 @@ class TestImagingCoreResampleAccuracy:
|
||||||
def test_enlarge_bicubic(self):
|
def test_enlarge_bicubic(self):
|
||||||
for mode in ["RGBX", "RGB", "La", "L"]:
|
for mode in ["RGBX", "RGB", "La", "L"]:
|
||||||
case = self.make_case(mode, (4, 4), 0xE1)
|
case = self.make_case(mode, (4, 4), 0xE1)
|
||||||
case = case.resize((8, 8), Image.BICUBIC)
|
case = case.resize((8, 8), Image.Resampling.BICUBIC)
|
||||||
# fmt: off
|
# fmt: off
|
||||||
data = ("e1 e5 ee b9"
|
data = ("e1 e5 ee b9"
|
||||||
"e5 e9 f3 bc"
|
"e5 e9 f3 bc"
|
||||||
|
@ -207,7 +207,7 @@ class TestImagingCoreResampleAccuracy:
|
||||||
def test_enlarge_lanczos(self):
|
def test_enlarge_lanczos(self):
|
||||||
for mode in ["RGBX", "RGB", "La", "L"]:
|
for mode in ["RGBX", "RGB", "La", "L"]:
|
||||||
case = self.make_case(mode, (6, 6), 0xE1)
|
case = self.make_case(mode, (6, 6), 0xE1)
|
||||||
case = case.resize((12, 12), Image.LANCZOS)
|
case = case.resize((12, 12), Image.Resampling.LANCZOS)
|
||||||
data = (
|
data = (
|
||||||
"e1 e0 db ed f5 b8"
|
"e1 e0 db ed f5 b8"
|
||||||
"e0 df da ec f3 b7"
|
"e0 df da ec f3 b7"
|
||||||
|
@ -220,7 +220,9 @@ class TestImagingCoreResampleAccuracy:
|
||||||
self.check_case(channel, self.make_sample(data, (12, 12)))
|
self.check_case(channel, self.make_sample(data, (12, 12)))
|
||||||
|
|
||||||
def test_box_filter_correct_range(self):
|
def test_box_filter_correct_range(self):
|
||||||
im = Image.new("RGB", (8, 8), "#1688ff").resize((100, 100), Image.BOX)
|
im = Image.new("RGB", (8, 8), "#1688ff").resize(
|
||||||
|
(100, 100), Image.Resampling.BOX
|
||||||
|
)
|
||||||
ref = Image.new("RGB", (100, 100), "#1688ff")
|
ref = Image.new("RGB", (100, 100), "#1688ff")
|
||||||
assert_image_equal(im, ref)
|
assert_image_equal(im, ref)
|
||||||
|
|
||||||
|
@ -228,7 +230,7 @@ class TestImagingCoreResampleAccuracy:
|
||||||
class TestCoreResampleConsistency:
|
class TestCoreResampleConsistency:
|
||||||
def make_case(self, mode, fill):
|
def make_case(self, mode, fill):
|
||||||
im = Image.new(mode, (512, 9), fill)
|
im = Image.new(mode, (512, 9), fill)
|
||||||
return im.resize((9, 512), Image.LANCZOS), im.load()[0, 0]
|
return im.resize((9, 512), Image.Resampling.LANCZOS), im.load()[0, 0]
|
||||||
|
|
||||||
def run_case(self, case):
|
def run_case(self, case):
|
||||||
channel, color = case
|
channel, color = case
|
||||||
|
@ -283,20 +285,20 @@ class TestCoreResampleAlphaCorrect:
|
||||||
@pytest.mark.xfail(reason="Current implementation isn't precise enough")
|
@pytest.mark.xfail(reason="Current implementation isn't precise enough")
|
||||||
def test_levels_rgba(self):
|
def test_levels_rgba(self):
|
||||||
case = self.make_levels_case("RGBA")
|
case = self.make_levels_case("RGBA")
|
||||||
self.run_levels_case(case.resize((512, 32), Image.BOX))
|
self.run_levels_case(case.resize((512, 32), Image.Resampling.BOX))
|
||||||
self.run_levels_case(case.resize((512, 32), Image.BILINEAR))
|
self.run_levels_case(case.resize((512, 32), Image.Resampling.BILINEAR))
|
||||||
self.run_levels_case(case.resize((512, 32), Image.HAMMING))
|
self.run_levels_case(case.resize((512, 32), Image.Resampling.HAMMING))
|
||||||
self.run_levels_case(case.resize((512, 32), Image.BICUBIC))
|
self.run_levels_case(case.resize((512, 32), Image.Resampling.BICUBIC))
|
||||||
self.run_levels_case(case.resize((512, 32), Image.LANCZOS))
|
self.run_levels_case(case.resize((512, 32), Image.Resampling.LANCZOS))
|
||||||
|
|
||||||
@pytest.mark.xfail(reason="Current implementation isn't precise enough")
|
@pytest.mark.xfail(reason="Current implementation isn't precise enough")
|
||||||
def test_levels_la(self):
|
def test_levels_la(self):
|
||||||
case = self.make_levels_case("LA")
|
case = self.make_levels_case("LA")
|
||||||
self.run_levels_case(case.resize((512, 32), Image.BOX))
|
self.run_levels_case(case.resize((512, 32), Image.Resampling.BOX))
|
||||||
self.run_levels_case(case.resize((512, 32), Image.BILINEAR))
|
self.run_levels_case(case.resize((512, 32), Image.Resampling.BILINEAR))
|
||||||
self.run_levels_case(case.resize((512, 32), Image.HAMMING))
|
self.run_levels_case(case.resize((512, 32), Image.Resampling.HAMMING))
|
||||||
self.run_levels_case(case.resize((512, 32), Image.BICUBIC))
|
self.run_levels_case(case.resize((512, 32), Image.Resampling.BICUBIC))
|
||||||
self.run_levels_case(case.resize((512, 32), Image.LANCZOS))
|
self.run_levels_case(case.resize((512, 32), Image.Resampling.LANCZOS))
|
||||||
|
|
||||||
def make_dirty_case(self, mode, clean_pixel, dirty_pixel):
|
def make_dirty_case(self, mode, clean_pixel, dirty_pixel):
|
||||||
i = Image.new(mode, (64, 64), dirty_pixel)
|
i = Image.new(mode, (64, 64), dirty_pixel)
|
||||||
|
@ -321,19 +323,27 @@ class TestCoreResampleAlphaCorrect:
|
||||||
|
|
||||||
def test_dirty_pixels_rgba(self):
|
def test_dirty_pixels_rgba(self):
|
||||||
case = self.make_dirty_case("RGBA", (255, 255, 0, 128), (0, 0, 255, 0))
|
case = self.make_dirty_case("RGBA", (255, 255, 0, 128), (0, 0, 255, 0))
|
||||||
self.run_dirty_case(case.resize((20, 20), Image.BOX), (255, 255, 0))
|
self.run_dirty_case(case.resize((20, 20), Image.Resampling.BOX), (255, 255, 0))
|
||||||
self.run_dirty_case(case.resize((20, 20), Image.BILINEAR), (255, 255, 0))
|
self.run_dirty_case(
|
||||||
self.run_dirty_case(case.resize((20, 20), Image.HAMMING), (255, 255, 0))
|
case.resize((20, 20), Image.Resampling.BILINEAR), (255, 255, 0)
|
||||||
self.run_dirty_case(case.resize((20, 20), Image.BICUBIC), (255, 255, 0))
|
)
|
||||||
self.run_dirty_case(case.resize((20, 20), Image.LANCZOS), (255, 255, 0))
|
self.run_dirty_case(
|
||||||
|
case.resize((20, 20), Image.Resampling.HAMMING), (255, 255, 0)
|
||||||
|
)
|
||||||
|
self.run_dirty_case(
|
||||||
|
case.resize((20, 20), Image.Resampling.BICUBIC), (255, 255, 0)
|
||||||
|
)
|
||||||
|
self.run_dirty_case(
|
||||||
|
case.resize((20, 20), Image.Resampling.LANCZOS), (255, 255, 0)
|
||||||
|
)
|
||||||
|
|
||||||
def test_dirty_pixels_la(self):
|
def test_dirty_pixels_la(self):
|
||||||
case = self.make_dirty_case("LA", (255, 128), (0, 0))
|
case = self.make_dirty_case("LA", (255, 128), (0, 0))
|
||||||
self.run_dirty_case(case.resize((20, 20), Image.BOX), (255,))
|
self.run_dirty_case(case.resize((20, 20), Image.Resampling.BOX), (255,))
|
||||||
self.run_dirty_case(case.resize((20, 20), Image.BILINEAR), (255,))
|
self.run_dirty_case(case.resize((20, 20), Image.Resampling.BILINEAR), (255,))
|
||||||
self.run_dirty_case(case.resize((20, 20), Image.HAMMING), (255,))
|
self.run_dirty_case(case.resize((20, 20), Image.Resampling.HAMMING), (255,))
|
||||||
self.run_dirty_case(case.resize((20, 20), Image.BICUBIC), (255,))
|
self.run_dirty_case(case.resize((20, 20), Image.Resampling.BICUBIC), (255,))
|
||||||
self.run_dirty_case(case.resize((20, 20), Image.LANCZOS), (255,))
|
self.run_dirty_case(case.resize((20, 20), Image.Resampling.LANCZOS), (255,))
|
||||||
|
|
||||||
|
|
||||||
class TestCoreResamplePasses:
|
class TestCoreResamplePasses:
|
||||||
|
@ -346,26 +356,26 @@ class TestCoreResamplePasses:
|
||||||
def test_horizontal(self):
|
def test_horizontal(self):
|
||||||
im = hopper("L")
|
im = hopper("L")
|
||||||
with self.count(1):
|
with self.count(1):
|
||||||
im.resize((im.size[0] - 10, im.size[1]), Image.BILINEAR)
|
im.resize((im.size[0] - 10, im.size[1]), Image.Resampling.BILINEAR)
|
||||||
|
|
||||||
def test_vertical(self):
|
def test_vertical(self):
|
||||||
im = hopper("L")
|
im = hopper("L")
|
||||||
with self.count(1):
|
with self.count(1):
|
||||||
im.resize((im.size[0], im.size[1] - 10), Image.BILINEAR)
|
im.resize((im.size[0], im.size[1] - 10), Image.Resampling.BILINEAR)
|
||||||
|
|
||||||
def test_both(self):
|
def test_both(self):
|
||||||
im = hopper("L")
|
im = hopper("L")
|
||||||
with self.count(2):
|
with self.count(2):
|
||||||
im.resize((im.size[0] - 10, im.size[1] - 10), Image.BILINEAR)
|
im.resize((im.size[0] - 10, im.size[1] - 10), Image.Resampling.BILINEAR)
|
||||||
|
|
||||||
def test_box_horizontal(self):
|
def test_box_horizontal(self):
|
||||||
im = hopper("L")
|
im = hopper("L")
|
||||||
box = (20, 0, im.size[0] - 20, im.size[1])
|
box = (20, 0, im.size[0] - 20, im.size[1])
|
||||||
with self.count(1):
|
with self.count(1):
|
||||||
# the same size, but different box
|
# the same size, but different box
|
||||||
with_box = im.resize(im.size, Image.BILINEAR, box)
|
with_box = im.resize(im.size, Image.Resampling.BILINEAR, box)
|
||||||
with self.count(2):
|
with self.count(2):
|
||||||
cropped = im.crop(box).resize(im.size, Image.BILINEAR)
|
cropped = im.crop(box).resize(im.size, Image.Resampling.BILINEAR)
|
||||||
assert_image_similar(with_box, cropped, 0.1)
|
assert_image_similar(with_box, cropped, 0.1)
|
||||||
|
|
||||||
def test_box_vertical(self):
|
def test_box_vertical(self):
|
||||||
|
@ -373,9 +383,9 @@ class TestCoreResamplePasses:
|
||||||
box = (0, 20, im.size[0], im.size[1] - 20)
|
box = (0, 20, im.size[0], im.size[1] - 20)
|
||||||
with self.count(1):
|
with self.count(1):
|
||||||
# the same size, but different box
|
# the same size, but different box
|
||||||
with_box = im.resize(im.size, Image.BILINEAR, box)
|
with_box = im.resize(im.size, Image.Resampling.BILINEAR, box)
|
||||||
with self.count(2):
|
with self.count(2):
|
||||||
cropped = im.crop(box).resize(im.size, Image.BILINEAR)
|
cropped = im.crop(box).resize(im.size, Image.Resampling.BILINEAR)
|
||||||
assert_image_similar(with_box, cropped, 0.1)
|
assert_image_similar(with_box, cropped, 0.1)
|
||||||
|
|
||||||
|
|
||||||
|
@ -388,7 +398,7 @@ class TestCoreResampleCoefficients:
|
||||||
draw = ImageDraw.Draw(i)
|
draw = ImageDraw.Draw(i)
|
||||||
draw.rectangle((0, 0, i.size[0] // 2 - 1, 0), test_color)
|
draw.rectangle((0, 0, i.size[0] // 2 - 1, 0), test_color)
|
||||||
|
|
||||||
px = i.resize((5, i.size[1]), Image.BICUBIC).load()
|
px = i.resize((5, i.size[1]), Image.Resampling.BICUBIC).load()
|
||||||
if px[2, 0] != test_color // 2:
|
if px[2, 0] != test_color // 2:
|
||||||
assert test_color // 2 == px[2, 0]
|
assert test_color // 2 == px[2, 0]
|
||||||
|
|
||||||
|
@ -396,7 +406,7 @@ class TestCoreResampleCoefficients:
|
||||||
# regression test for the wrong coefficients calculation
|
# regression test for the wrong coefficients calculation
|
||||||
# due to bug https://github.com/python-pillow/Pillow/issues/2161
|
# due to bug https://github.com/python-pillow/Pillow/issues/2161
|
||||||
im = Image.new("RGBA", (1280, 1280), (0x20, 0x40, 0x60, 0xFF))
|
im = Image.new("RGBA", (1280, 1280), (0x20, 0x40, 0x60, 0xFF))
|
||||||
histogram = im.resize((256, 256), Image.BICUBIC).histogram()
|
histogram = im.resize((256, 256), Image.Resampling.BICUBIC).histogram()
|
||||||
|
|
||||||
# first channel
|
# first channel
|
||||||
assert histogram[0x100 * 0 + 0x20] == 0x10000
|
assert histogram[0x100 * 0 + 0x20] == 0x10000
|
||||||
|
@ -412,12 +422,12 @@ class TestCoreResampleBox:
|
||||||
def test_wrong_arguments(self):
|
def test_wrong_arguments(self):
|
||||||
im = hopper()
|
im = hopper()
|
||||||
for resample in (
|
for resample in (
|
||||||
Image.NEAREST,
|
Image.Resampling.NEAREST,
|
||||||
Image.BOX,
|
Image.Resampling.BOX,
|
||||||
Image.BILINEAR,
|
Image.Resampling.BILINEAR,
|
||||||
Image.HAMMING,
|
Image.Resampling.HAMMING,
|
||||||
Image.BICUBIC,
|
Image.Resampling.BICUBIC,
|
||||||
Image.LANCZOS,
|
Image.Resampling.LANCZOS,
|
||||||
):
|
):
|
||||||
im.resize((32, 32), resample, (0, 0, im.width, im.height))
|
im.resize((32, 32), resample, (0, 0, im.width, im.height))
|
||||||
im.resize((32, 32), resample, (20, 20, im.width, im.height))
|
im.resize((32, 32), resample, (20, 20, im.width, im.height))
|
||||||
|
@ -456,7 +466,7 @@ class TestCoreResampleBox:
|
||||||
for y0, y1 in split_range(dst_size[1], ytiles):
|
for y0, y1 in split_range(dst_size[1], ytiles):
|
||||||
for x0, x1 in split_range(dst_size[0], xtiles):
|
for x0, x1 in split_range(dst_size[0], xtiles):
|
||||||
box = (x0 * scale[0], y0 * scale[1], x1 * scale[0], y1 * scale[1])
|
box = (x0 * scale[0], y0 * scale[1], x1 * scale[0], y1 * scale[1])
|
||||||
tile = im.resize((x1 - x0, y1 - y0), Image.BICUBIC, box)
|
tile = im.resize((x1 - x0, y1 - y0), Image.Resampling.BICUBIC, box)
|
||||||
tiled.paste(tile, (x0, y0))
|
tiled.paste(tile, (x0, y0))
|
||||||
return tiled
|
return tiled
|
||||||
|
|
||||||
|
@ -467,7 +477,7 @@ class TestCoreResampleBox:
|
||||||
with Image.open("Tests/images/flower.jpg") as im:
|
with Image.open("Tests/images/flower.jpg") as im:
|
||||||
assert im.size == (480, 360)
|
assert im.size == (480, 360)
|
||||||
dst_size = (251, 188)
|
dst_size = (251, 188)
|
||||||
reference = im.resize(dst_size, Image.BICUBIC)
|
reference = im.resize(dst_size, Image.Resampling.BICUBIC)
|
||||||
|
|
||||||
for tiles in [(1, 1), (3, 3), (9, 7), (100, 100)]:
|
for tiles in [(1, 1), (3, 3), (9, 7), (100, 100)]:
|
||||||
tiled = self.resize_tiled(im, dst_size, *tiles)
|
tiled = self.resize_tiled(im, dst_size, *tiles)
|
||||||
|
@ -483,12 +493,16 @@ class TestCoreResampleBox:
|
||||||
assert im.size == (480, 360)
|
assert im.size == (480, 360)
|
||||||
dst_size = (48, 36)
|
dst_size = (48, 36)
|
||||||
# Reference is cropped image resized to destination
|
# Reference is cropped image resized to destination
|
||||||
reference = im.crop((0, 0, 473, 353)).resize(dst_size, Image.BICUBIC)
|
reference = im.crop((0, 0, 473, 353)).resize(
|
||||||
# Image.BOX emulates supersampling (480 / 8 = 60, 360 / 8 = 45)
|
dst_size, Image.Resampling.BICUBIC
|
||||||
supersampled = im.resize((60, 45), Image.BOX)
|
)
|
||||||
|
# Image.Resampling.BOX emulates supersampling (480 / 8 = 60, 360 / 8 = 45)
|
||||||
|
supersampled = im.resize((60, 45), Image.Resampling.BOX)
|
||||||
|
|
||||||
with_box = supersampled.resize(dst_size, Image.BICUBIC, (0, 0, 59.125, 44.125))
|
with_box = supersampled.resize(
|
||||||
without_box = supersampled.resize(dst_size, Image.BICUBIC)
|
dst_size, Image.Resampling.BICUBIC, (0, 0, 59.125, 44.125)
|
||||||
|
)
|
||||||
|
without_box = supersampled.resize(dst_size, Image.Resampling.BICUBIC)
|
||||||
|
|
||||||
# error with box should be much smaller than without
|
# error with box should be much smaller than without
|
||||||
assert_image_similar(reference, with_box, 6)
|
assert_image_similar(reference, with_box, 6)
|
||||||
|
@ -496,7 +510,7 @@ class TestCoreResampleBox:
|
||||||
assert_image_similar(reference, without_box, 5)
|
assert_image_similar(reference, without_box, 5)
|
||||||
|
|
||||||
def test_formats(self):
|
def test_formats(self):
|
||||||
for resample in [Image.NEAREST, Image.BILINEAR]:
|
for resample in [Image.Resampling.NEAREST, Image.Resampling.BILINEAR]:
|
||||||
for mode in ["RGB", "L", "RGBA", "LA", "I", ""]:
|
for mode in ["RGB", "L", "RGBA", "LA", "I", ""]:
|
||||||
im = hopper(mode)
|
im = hopper(mode)
|
||||||
box = (20, 20, im.size[0] - 20, im.size[1] - 20)
|
box = (20, 20, im.size[0] - 20, im.size[1] - 20)
|
||||||
|
@ -514,7 +528,7 @@ class TestCoreResampleBox:
|
||||||
((40, 50), (10, 0, 50, 50)),
|
((40, 50), (10, 0, 50, 50)),
|
||||||
((40, 50), (10, 20, 50, 70)),
|
((40, 50), (10, 20, 50, 70)),
|
||||||
]:
|
]:
|
||||||
res = im.resize(size, Image.LANCZOS, box)
|
res = im.resize(size, Image.Resampling.LANCZOS, box)
|
||||||
assert res.size == size
|
assert res.size == size
|
||||||
assert_image_equal(res, im.crop(box), f">>> {size} {box}")
|
assert_image_equal(res, im.crop(box), f">>> {size} {box}")
|
||||||
|
|
||||||
|
@ -528,7 +542,7 @@ class TestCoreResampleBox:
|
||||||
((40, 50), (10.4, 0.4, 50.4, 50.4)),
|
((40, 50), (10.4, 0.4, 50.4, 50.4)),
|
||||||
((40, 50), (10.4, 20.4, 50.4, 70.4)),
|
((40, 50), (10.4, 20.4, 50.4, 70.4)),
|
||||||
]:
|
]:
|
||||||
res = im.resize(size, Image.LANCZOS, box)
|
res = im.resize(size, Image.Resampling.LANCZOS, box)
|
||||||
assert res.size == size
|
assert res.size == size
|
||||||
with pytest.raises(AssertionError, match=r"difference \d"):
|
with pytest.raises(AssertionError, match=r"difference \d"):
|
||||||
# check that the difference at least that much
|
# check that the difference at least that much
|
||||||
|
@ -538,7 +552,7 @@ class TestCoreResampleBox:
|
||||||
# Can skip resize for one dimension
|
# Can skip resize for one dimension
|
||||||
im = hopper()
|
im = hopper()
|
||||||
|
|
||||||
for flt in [Image.NEAREST, Image.BICUBIC]:
|
for flt in [Image.Resampling.NEAREST, Image.Resampling.BICUBIC]:
|
||||||
for size, box in [
|
for size, box in [
|
||||||
((40, 50), (0, 0, 40, 90)),
|
((40, 50), (0, 0, 40, 90)),
|
||||||
((40, 50), (0, 20, 40, 90)),
|
((40, 50), (0, 20, 40, 90)),
|
||||||
|
@ -559,7 +573,7 @@ class TestCoreResampleBox:
|
||||||
# Can skip resize for one dimension
|
# Can skip resize for one dimension
|
||||||
im = hopper()
|
im = hopper()
|
||||||
|
|
||||||
for flt in [Image.NEAREST, Image.BICUBIC]:
|
for flt in [Image.Resampling.NEAREST, Image.Resampling.BICUBIC]:
|
||||||
for size, box in [
|
for size, box in [
|
||||||
((40, 50), (0, 0, 90, 50)),
|
((40, 50), (0, 0, 90, 50)),
|
||||||
((40, 50), (20, 0, 90, 50)),
|
((40, 50), (20, 0, 90, 50)),
|
||||||
|
|
|
@ -35,33 +35,33 @@ class TestImagingCoreResize:
|
||||||
"I;16",
|
"I;16",
|
||||||
]: # exotic mode
|
]: # exotic mode
|
||||||
im = hopper(mode)
|
im = hopper(mode)
|
||||||
r = self.resize(im, (15, 12), Image.NEAREST)
|
r = self.resize(im, (15, 12), Image.Resampling.NEAREST)
|
||||||
assert r.mode == mode
|
assert r.mode == mode
|
||||||
assert r.size == (15, 12)
|
assert r.size == (15, 12)
|
||||||
assert r.im.bands == im.im.bands
|
assert r.im.bands == im.im.bands
|
||||||
|
|
||||||
def test_convolution_modes(self):
|
def test_convolution_modes(self):
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
self.resize(hopper("1"), (15, 12), Image.BILINEAR)
|
self.resize(hopper("1"), (15, 12), Image.Resampling.BILINEAR)
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
self.resize(hopper("P"), (15, 12), Image.BILINEAR)
|
self.resize(hopper("P"), (15, 12), Image.Resampling.BILINEAR)
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
self.resize(hopper("I;16"), (15, 12), Image.BILINEAR)
|
self.resize(hopper("I;16"), (15, 12), Image.Resampling.BILINEAR)
|
||||||
for mode in ["L", "I", "F", "RGB", "RGBA", "CMYK", "YCbCr"]:
|
for mode in ["L", "I", "F", "RGB", "RGBA", "CMYK", "YCbCr"]:
|
||||||
im = hopper(mode)
|
im = hopper(mode)
|
||||||
r = self.resize(im, (15, 12), Image.BILINEAR)
|
r = self.resize(im, (15, 12), Image.Resampling.BILINEAR)
|
||||||
assert r.mode == mode
|
assert r.mode == mode
|
||||||
assert r.size == (15, 12)
|
assert r.size == (15, 12)
|
||||||
assert r.im.bands == im.im.bands
|
assert r.im.bands == im.im.bands
|
||||||
|
|
||||||
def test_reduce_filters(self):
|
def test_reduce_filters(self):
|
||||||
for f in [
|
for f in [
|
||||||
Image.NEAREST,
|
Image.Resampling.NEAREST,
|
||||||
Image.BOX,
|
Image.Resampling.BOX,
|
||||||
Image.BILINEAR,
|
Image.Resampling.BILINEAR,
|
||||||
Image.HAMMING,
|
Image.Resampling.HAMMING,
|
||||||
Image.BICUBIC,
|
Image.Resampling.BICUBIC,
|
||||||
Image.LANCZOS,
|
Image.Resampling.LANCZOS,
|
||||||
]:
|
]:
|
||||||
r = self.resize(hopper("RGB"), (15, 12), f)
|
r = self.resize(hopper("RGB"), (15, 12), f)
|
||||||
assert r.mode == "RGB"
|
assert r.mode == "RGB"
|
||||||
|
@ -69,12 +69,12 @@ class TestImagingCoreResize:
|
||||||
|
|
||||||
def test_enlarge_filters(self):
|
def test_enlarge_filters(self):
|
||||||
for f in [
|
for f in [
|
||||||
Image.NEAREST,
|
Image.Resampling.NEAREST,
|
||||||
Image.BOX,
|
Image.Resampling.BOX,
|
||||||
Image.BILINEAR,
|
Image.Resampling.BILINEAR,
|
||||||
Image.HAMMING,
|
Image.Resampling.HAMMING,
|
||||||
Image.BICUBIC,
|
Image.Resampling.BICUBIC,
|
||||||
Image.LANCZOS,
|
Image.Resampling.LANCZOS,
|
||||||
]:
|
]:
|
||||||
r = self.resize(hopper("RGB"), (212, 195), f)
|
r = self.resize(hopper("RGB"), (212, 195), f)
|
||||||
assert r.mode == "RGB"
|
assert r.mode == "RGB"
|
||||||
|
@ -95,12 +95,12 @@ class TestImagingCoreResize:
|
||||||
samples["dirty"].putpixel((1, 1), 128)
|
samples["dirty"].putpixel((1, 1), 128)
|
||||||
|
|
||||||
for f in [
|
for f in [
|
||||||
Image.NEAREST,
|
Image.Resampling.NEAREST,
|
||||||
Image.BOX,
|
Image.Resampling.BOX,
|
||||||
Image.BILINEAR,
|
Image.Resampling.BILINEAR,
|
||||||
Image.HAMMING,
|
Image.Resampling.HAMMING,
|
||||||
Image.BICUBIC,
|
Image.Resampling.BICUBIC,
|
||||||
Image.LANCZOS,
|
Image.Resampling.LANCZOS,
|
||||||
]:
|
]:
|
||||||
# samples resized with current filter
|
# samples resized with current filter
|
||||||
references = {
|
references = {
|
||||||
|
@ -124,12 +124,12 @@ class TestImagingCoreResize:
|
||||||
|
|
||||||
def test_enlarge_zero(self):
|
def test_enlarge_zero(self):
|
||||||
for f in [
|
for f in [
|
||||||
Image.NEAREST,
|
Image.Resampling.NEAREST,
|
||||||
Image.BOX,
|
Image.Resampling.BOX,
|
||||||
Image.BILINEAR,
|
Image.Resampling.BILINEAR,
|
||||||
Image.HAMMING,
|
Image.Resampling.HAMMING,
|
||||||
Image.BICUBIC,
|
Image.Resampling.BICUBIC,
|
||||||
Image.LANCZOS,
|
Image.Resampling.LANCZOS,
|
||||||
]:
|
]:
|
||||||
r = self.resize(Image.new("RGB", (0, 0), "white"), (212, 195), f)
|
r = self.resize(Image.new("RGB", (0, 0), "white"), (212, 195), f)
|
||||||
assert r.mode == "RGB"
|
assert r.mode == "RGB"
|
||||||
|
@ -164,15 +164,19 @@ def gradients_image():
|
||||||
|
|
||||||
class TestReducingGapResize:
|
class TestReducingGapResize:
|
||||||
def test_reducing_gap_values(self, gradients_image):
|
def test_reducing_gap_values(self, gradients_image):
|
||||||
ref = gradients_image.resize((52, 34), Image.BICUBIC, reducing_gap=None)
|
ref = gradients_image.resize(
|
||||||
im = gradients_image.resize((52, 34), Image.BICUBIC)
|
(52, 34), Image.Resampling.BICUBIC, reducing_gap=None
|
||||||
|
)
|
||||||
|
im = gradients_image.resize((52, 34), Image.Resampling.BICUBIC)
|
||||||
assert_image_equal(ref, im)
|
assert_image_equal(ref, im)
|
||||||
|
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
gradients_image.resize((52, 34), Image.BICUBIC, reducing_gap=0)
|
gradients_image.resize((52, 34), Image.Resampling.BICUBIC, reducing_gap=0)
|
||||||
|
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
gradients_image.resize((52, 34), Image.BICUBIC, reducing_gap=0.99)
|
gradients_image.resize(
|
||||||
|
(52, 34), Image.Resampling.BICUBIC, reducing_gap=0.99
|
||||||
|
)
|
||||||
|
|
||||||
def test_reducing_gap_1(self, gradients_image):
|
def test_reducing_gap_1(self, gradients_image):
|
||||||
for box, epsilon in [
|
for box, epsilon in [
|
||||||
|
@ -180,9 +184,9 @@ class TestReducingGapResize:
|
||||||
((1.1, 2.2, 510.8, 510.9), 4),
|
((1.1, 2.2, 510.8, 510.9), 4),
|
||||||
((3, 10, 410, 256), 10),
|
((3, 10, 410, 256), 10),
|
||||||
]:
|
]:
|
||||||
ref = gradients_image.resize((52, 34), Image.BICUBIC, box=box)
|
ref = gradients_image.resize((52, 34), Image.Resampling.BICUBIC, box=box)
|
||||||
im = gradients_image.resize(
|
im = gradients_image.resize(
|
||||||
(52, 34), Image.BICUBIC, box=box, reducing_gap=1.0
|
(52, 34), Image.Resampling.BICUBIC, box=box, reducing_gap=1.0
|
||||||
)
|
)
|
||||||
|
|
||||||
with pytest.raises(AssertionError):
|
with pytest.raises(AssertionError):
|
||||||
|
@ -196,9 +200,9 @@ class TestReducingGapResize:
|
||||||
((1.1, 2.2, 510.8, 510.9), 1.5),
|
((1.1, 2.2, 510.8, 510.9), 1.5),
|
||||||
((3, 10, 410, 256), 1),
|
((3, 10, 410, 256), 1),
|
||||||
]:
|
]:
|
||||||
ref = gradients_image.resize((52, 34), Image.BICUBIC, box=box)
|
ref = gradients_image.resize((52, 34), Image.Resampling.BICUBIC, box=box)
|
||||||
im = gradients_image.resize(
|
im = gradients_image.resize(
|
||||||
(52, 34), Image.BICUBIC, box=box, reducing_gap=2.0
|
(52, 34), Image.Resampling.BICUBIC, box=box, reducing_gap=2.0
|
||||||
)
|
)
|
||||||
|
|
||||||
with pytest.raises(AssertionError):
|
with pytest.raises(AssertionError):
|
||||||
|
@ -212,9 +216,9 @@ class TestReducingGapResize:
|
||||||
((1.1, 2.2, 510.8, 510.9), 1),
|
((1.1, 2.2, 510.8, 510.9), 1),
|
||||||
((3, 10, 410, 256), 0.5),
|
((3, 10, 410, 256), 0.5),
|
||||||
]:
|
]:
|
||||||
ref = gradients_image.resize((52, 34), Image.BICUBIC, box=box)
|
ref = gradients_image.resize((52, 34), Image.Resampling.BICUBIC, box=box)
|
||||||
im = gradients_image.resize(
|
im = gradients_image.resize(
|
||||||
(52, 34), Image.BICUBIC, box=box, reducing_gap=3.0
|
(52, 34), Image.Resampling.BICUBIC, box=box, reducing_gap=3.0
|
||||||
)
|
)
|
||||||
|
|
||||||
with pytest.raises(AssertionError):
|
with pytest.raises(AssertionError):
|
||||||
|
@ -224,9 +228,9 @@ class TestReducingGapResize:
|
||||||
|
|
||||||
def test_reducing_gap_8(self, gradients_image):
|
def test_reducing_gap_8(self, gradients_image):
|
||||||
for box in [None, (1.1, 2.2, 510.8, 510.9), (3, 10, 410, 256)]:
|
for box in [None, (1.1, 2.2, 510.8, 510.9), (3, 10, 410, 256)]:
|
||||||
ref = gradients_image.resize((52, 34), Image.BICUBIC, box=box)
|
ref = gradients_image.resize((52, 34), Image.Resampling.BICUBIC, box=box)
|
||||||
im = gradients_image.resize(
|
im = gradients_image.resize(
|
||||||
(52, 34), Image.BICUBIC, box=box, reducing_gap=8.0
|
(52, 34), Image.Resampling.BICUBIC, box=box, reducing_gap=8.0
|
||||||
)
|
)
|
||||||
|
|
||||||
assert_image_equal(ref, im)
|
assert_image_equal(ref, im)
|
||||||
|
@ -236,8 +240,10 @@ class TestReducingGapResize:
|
||||||
((0, 0, 512, 512), 5.5),
|
((0, 0, 512, 512), 5.5),
|
||||||
((0.9, 1.7, 128, 128), 9.5),
|
((0.9, 1.7, 128, 128), 9.5),
|
||||||
]:
|
]:
|
||||||
ref = gradients_image.resize((52, 34), Image.BOX, box=box)
|
ref = gradients_image.resize((52, 34), Image.Resampling.BOX, box=box)
|
||||||
im = gradients_image.resize((52, 34), Image.BOX, box=box, reducing_gap=1.0)
|
im = gradients_image.resize(
|
||||||
|
(52, 34), Image.Resampling.BOX, box=box, reducing_gap=1.0
|
||||||
|
)
|
||||||
|
|
||||||
assert_image_similar(ref, im, epsilon)
|
assert_image_similar(ref, im, epsilon)
|
||||||
|
|
||||||
|
@ -261,12 +267,12 @@ class TestImageResize:
|
||||||
def test_default_filter(self):
|
def test_default_filter(self):
|
||||||
for mode in "L", "RGB", "I", "F":
|
for mode in "L", "RGB", "I", "F":
|
||||||
im = hopper(mode)
|
im = hopper(mode)
|
||||||
assert im.resize((20, 20), Image.BICUBIC) == im.resize((20, 20))
|
assert im.resize((20, 20), Image.Resampling.BICUBIC) == im.resize((20, 20))
|
||||||
|
|
||||||
for mode in "1", "P":
|
for mode in "1", "P":
|
||||||
im = hopper(mode)
|
im = hopper(mode)
|
||||||
assert im.resize((20, 20), Image.NEAREST) == im.resize((20, 20))
|
assert im.resize((20, 20), Image.Resampling.NEAREST) == im.resize((20, 20))
|
||||||
|
|
||||||
for mode in "I;16", "I;16L", "I;16B", "BGR;15", "BGR;16":
|
for mode in "I;16", "I;16L", "I;16B", "BGR;15", "BGR;16":
|
||||||
im = hopper(mode)
|
im = hopper(mode)
|
||||||
assert im.resize((20, 20), Image.NEAREST) == im.resize((20, 20))
|
assert im.resize((20, 20), Image.Resampling.NEAREST) == im.resize((20, 20))
|
||||||
|
|
|
@ -46,14 +46,14 @@ def test_zero():
|
||||||
def test_resample():
|
def test_resample():
|
||||||
# Target image creation, inspected by eye.
|
# Target image creation, inspected by eye.
|
||||||
# >>> im = Image.open('Tests/images/hopper.ppm')
|
# >>> im = Image.open('Tests/images/hopper.ppm')
|
||||||
# >>> im = im.rotate(45, resample=Image.BICUBIC, expand=True)
|
# >>> im = im.rotate(45, resample=Image.Resampling.BICUBIC, expand=True)
|
||||||
# >>> im.save('Tests/images/hopper_45.png')
|
# >>> im.save('Tests/images/hopper_45.png')
|
||||||
|
|
||||||
with Image.open("Tests/images/hopper_45.png") as target:
|
with Image.open("Tests/images/hopper_45.png") as target:
|
||||||
for (resample, epsilon) in (
|
for (resample, epsilon) in (
|
||||||
(Image.NEAREST, 10),
|
(Image.Resampling.NEAREST, 10),
|
||||||
(Image.BILINEAR, 5),
|
(Image.Resampling.BILINEAR, 5),
|
||||||
(Image.BICUBIC, 0),
|
(Image.Resampling.BICUBIC, 0),
|
||||||
):
|
):
|
||||||
im = hopper()
|
im = hopper()
|
||||||
im = im.rotate(45, resample=resample, expand=True)
|
im = im.rotate(45, resample=resample, expand=True)
|
||||||
|
@ -62,7 +62,7 @@ def test_resample():
|
||||||
|
|
||||||
def test_center_0():
|
def test_center_0():
|
||||||
im = hopper()
|
im = hopper()
|
||||||
im = im.rotate(45, center=(0, 0), resample=Image.BICUBIC)
|
im = im.rotate(45, center=(0, 0), resample=Image.Resampling.BICUBIC)
|
||||||
|
|
||||||
with Image.open("Tests/images/hopper_45.png") as target:
|
with Image.open("Tests/images/hopper_45.png") as target:
|
||||||
target_origin = target.size[1] / 2
|
target_origin = target.size[1] / 2
|
||||||
|
@ -73,7 +73,7 @@ def test_center_0():
|
||||||
|
|
||||||
def test_center_14():
|
def test_center_14():
|
||||||
im = hopper()
|
im = hopper()
|
||||||
im = im.rotate(45, center=(14, 14), resample=Image.BICUBIC)
|
im = im.rotate(45, center=(14, 14), resample=Image.Resampling.BICUBIC)
|
||||||
|
|
||||||
with Image.open("Tests/images/hopper_45.png") as target:
|
with Image.open("Tests/images/hopper_45.png") as target:
|
||||||
target_origin = target.size[1] / 2 - 14
|
target_origin = target.size[1] / 2 - 14
|
||||||
|
@ -90,7 +90,7 @@ def test_translate():
|
||||||
(target_origin, target_origin, target_origin + 128, target_origin + 128)
|
(target_origin, target_origin, target_origin + 128, target_origin + 128)
|
||||||
)
|
)
|
||||||
|
|
||||||
im = im.rotate(45, translate=(5, 5), resample=Image.BICUBIC)
|
im = im.rotate(45, translate=(5, 5), resample=Image.Resampling.BICUBIC)
|
||||||
|
|
||||||
assert_image_similar(im, target, 1)
|
assert_image_similar(im, target, 1)
|
||||||
|
|
||||||
|
|
|
@ -97,24 +97,24 @@ def test_DCT_scaling_edges():
|
||||||
|
|
||||||
thumb = fromstring(tostring(im, "JPEG", quality=99, subsampling=0))
|
thumb = fromstring(tostring(im, "JPEG", quality=99, subsampling=0))
|
||||||
# small reducing_gap to amplify the effect
|
# small reducing_gap to amplify the effect
|
||||||
thumb.thumbnail((32, 32), Image.BICUBIC, reducing_gap=1.0)
|
thumb.thumbnail((32, 32), Image.Resampling.BICUBIC, reducing_gap=1.0)
|
||||||
|
|
||||||
ref = im.resize((32, 32), Image.BICUBIC)
|
ref = im.resize((32, 32), Image.Resampling.BICUBIC)
|
||||||
# This is still JPEG, some error is present. Without the fix it is 11.5
|
# This is still JPEG, some error is present. Without the fix it is 11.5
|
||||||
assert_image_similar(thumb, ref, 1.5)
|
assert_image_similar(thumb, ref, 1.5)
|
||||||
|
|
||||||
|
|
||||||
def test_reducing_gap_values():
|
def test_reducing_gap_values():
|
||||||
im = hopper()
|
im = hopper()
|
||||||
im.thumbnail((18, 18), Image.BICUBIC)
|
im.thumbnail((18, 18), Image.Resampling.BICUBIC)
|
||||||
|
|
||||||
ref = hopper()
|
ref = hopper()
|
||||||
ref.thumbnail((18, 18), Image.BICUBIC, reducing_gap=2.0)
|
ref.thumbnail((18, 18), Image.Resampling.BICUBIC, reducing_gap=2.0)
|
||||||
# reducing_gap=2.0 should be the default
|
# reducing_gap=2.0 should be the default
|
||||||
assert_image_equal(ref, im)
|
assert_image_equal(ref, im)
|
||||||
|
|
||||||
ref = hopper()
|
ref = hopper()
|
||||||
ref.thumbnail((18, 18), Image.BICUBIC, reducing_gap=None)
|
ref.thumbnail((18, 18), Image.Resampling.BICUBIC, reducing_gap=None)
|
||||||
with pytest.raises(AssertionError):
|
with pytest.raises(AssertionError):
|
||||||
assert_image_equal(ref, im)
|
assert_image_equal(ref, im)
|
||||||
|
|
||||||
|
@ -125,9 +125,9 @@ def test_reducing_gap_for_DCT_scaling():
|
||||||
with Image.open("Tests/images/hopper.jpg") as ref:
|
with Image.open("Tests/images/hopper.jpg") as ref:
|
||||||
# thumbnail should call draft with reducing_gap scale
|
# thumbnail should call draft with reducing_gap scale
|
||||||
ref.draft(None, (18 * 3, 18 * 3))
|
ref.draft(None, (18 * 3, 18 * 3))
|
||||||
ref = ref.resize((18, 18), Image.BICUBIC)
|
ref = ref.resize((18, 18), Image.Resampling.BICUBIC)
|
||||||
|
|
||||||
with Image.open("Tests/images/hopper.jpg") as im:
|
with Image.open("Tests/images/hopper.jpg") as im:
|
||||||
im.thumbnail((18, 18), Image.BICUBIC, reducing_gap=3.0)
|
im.thumbnail((18, 18), Image.Resampling.BICUBIC, reducing_gap=3.0)
|
||||||
|
|
||||||
assert_image_equal(ref, im)
|
assert_image_equal(ref, im)
|
||||||
|
|
|
@ -34,20 +34,22 @@ class TestImageTransform:
|
||||||
|
|
||||||
def test_palette(self):
|
def test_palette(self):
|
||||||
with Image.open("Tests/images/hopper.gif") as im:
|
with Image.open("Tests/images/hopper.gif") as im:
|
||||||
transformed = im.transform(im.size, Image.AFFINE, [1, 0, 0, 0, 1, 0])
|
transformed = im.transform(
|
||||||
|
im.size, Image.Transform.AFFINE, [1, 0, 0, 0, 1, 0]
|
||||||
|
)
|
||||||
assert im.palette.palette == transformed.palette.palette
|
assert im.palette.palette == transformed.palette.palette
|
||||||
|
|
||||||
def test_extent(self):
|
def test_extent(self):
|
||||||
im = hopper("RGB")
|
im = hopper("RGB")
|
||||||
(w, h) = im.size
|
(w, h) = im.size
|
||||||
# fmt: off
|
# fmt: off
|
||||||
transformed = im.transform(im.size, Image.EXTENT,
|
transformed = im.transform(im.size, Image.Transform.EXTENT,
|
||||||
(0, 0,
|
(0, 0,
|
||||||
w//2, h//2), # ul -> lr
|
w//2, h//2), # ul -> lr
|
||||||
Image.BILINEAR)
|
Image.Resampling.BILINEAR)
|
||||||
# fmt: on
|
# fmt: on
|
||||||
|
|
||||||
scaled = im.resize((w * 2, h * 2), Image.BILINEAR).crop((0, 0, w, h))
|
scaled = im.resize((w * 2, h * 2), Image.Resampling.BILINEAR).crop((0, 0, w, h))
|
||||||
|
|
||||||
# undone -- precision?
|
# undone -- precision?
|
||||||
assert_image_similar(transformed, scaled, 23)
|
assert_image_similar(transformed, scaled, 23)
|
||||||
|
@ -57,15 +59,18 @@ class TestImageTransform:
|
||||||
im = hopper("RGB")
|
im = hopper("RGB")
|
||||||
(w, h) = im.size
|
(w, h) = im.size
|
||||||
# fmt: off
|
# fmt: off
|
||||||
transformed = im.transform(im.size, Image.QUAD,
|
transformed = im.transform(im.size, Image.Transform.QUAD,
|
||||||
(0, 0, 0, h//2,
|
(0, 0, 0, h//2,
|
||||||
# ul -> ccw around quad:
|
# ul -> ccw around quad:
|
||||||
w//2, h//2, w//2, 0),
|
w//2, h//2, w//2, 0),
|
||||||
Image.BILINEAR)
|
Image.Resampling.BILINEAR)
|
||||||
# fmt: on
|
# fmt: on
|
||||||
|
|
||||||
scaled = im.transform(
|
scaled = im.transform(
|
||||||
(w, h), Image.AFFINE, (0.5, 0, 0, 0, 0.5, 0), Image.BILINEAR
|
(w, h),
|
||||||
|
Image.Transform.AFFINE,
|
||||||
|
(0.5, 0, 0, 0, 0.5, 0),
|
||||||
|
Image.Resampling.BILINEAR,
|
||||||
)
|
)
|
||||||
|
|
||||||
assert_image_equal(transformed, scaled)
|
assert_image_equal(transformed, scaled)
|
||||||
|
@ -80,9 +85,9 @@ class TestImageTransform:
|
||||||
(w, h) = im.size
|
(w, h) = im.size
|
||||||
transformed = im.transform(
|
transformed = im.transform(
|
||||||
im.size,
|
im.size,
|
||||||
Image.EXTENT,
|
Image.Transform.EXTENT,
|
||||||
(0, 0, w * 2, h * 2),
|
(0, 0, w * 2, h * 2),
|
||||||
Image.BILINEAR,
|
Image.Resampling.BILINEAR,
|
||||||
fillcolor="red",
|
fillcolor="red",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -93,18 +98,21 @@ class TestImageTransform:
|
||||||
im = hopper("RGBA")
|
im = hopper("RGBA")
|
||||||
(w, h) = im.size
|
(w, h) = im.size
|
||||||
# fmt: off
|
# fmt: off
|
||||||
transformed = im.transform(im.size, Image.MESH,
|
transformed = im.transform(im.size, Image.Transform.MESH,
|
||||||
[((0, 0, w//2, h//2), # box
|
[((0, 0, w//2, h//2), # box
|
||||||
(0, 0, 0, h,
|
(0, 0, 0, h,
|
||||||
w, h, w, 0)), # ul -> ccw around quad
|
w, h, w, 0)), # ul -> ccw around quad
|
||||||
((w//2, h//2, w, h), # box
|
((w//2, h//2, w, h), # box
|
||||||
(0, 0, 0, h,
|
(0, 0, 0, h,
|
||||||
w, h, w, 0))], # ul -> ccw around quad
|
w, h, w, 0))], # ul -> ccw around quad
|
||||||
Image.BILINEAR)
|
Image.Resampling.BILINEAR)
|
||||||
# fmt: on
|
# fmt: on
|
||||||
|
|
||||||
scaled = im.transform(
|
scaled = im.transform(
|
||||||
(w // 2, h // 2), Image.AFFINE, (2, 0, 0, 0, 2, 0), Image.BILINEAR
|
(w // 2, h // 2),
|
||||||
|
Image.Transform.AFFINE,
|
||||||
|
(2, 0, 0, 0, 2, 0),
|
||||||
|
Image.Resampling.BILINEAR,
|
||||||
)
|
)
|
||||||
|
|
||||||
checker = Image.new("RGBA", im.size)
|
checker = Image.new("RGBA", im.size)
|
||||||
|
@ -137,14 +145,16 @@ class TestImageTransform:
|
||||||
|
|
||||||
def test_alpha_premult_resize(self):
|
def test_alpha_premult_resize(self):
|
||||||
def op(im, sz):
|
def op(im, sz):
|
||||||
return im.resize(sz, Image.BILINEAR)
|
return im.resize(sz, Image.Resampling.BILINEAR)
|
||||||
|
|
||||||
self._test_alpha_premult(op)
|
self._test_alpha_premult(op)
|
||||||
|
|
||||||
def test_alpha_premult_transform(self):
|
def test_alpha_premult_transform(self):
|
||||||
def op(im, sz):
|
def op(im, sz):
|
||||||
(w, h) = im.size
|
(w, h) = im.size
|
||||||
return im.transform(sz, Image.EXTENT, (0, 0, w, h), Image.BILINEAR)
|
return im.transform(
|
||||||
|
sz, Image.Transform.EXTENT, (0, 0, w, h), Image.Resampling.BILINEAR
|
||||||
|
)
|
||||||
|
|
||||||
self._test_alpha_premult(op)
|
self._test_alpha_premult(op)
|
||||||
|
|
||||||
|
@ -171,7 +181,7 @@ class TestImageTransform:
|
||||||
@pytest.mark.parametrize("mode", ("RGBA", "LA"))
|
@pytest.mark.parametrize("mode", ("RGBA", "LA"))
|
||||||
def test_nearest_resize(self, mode):
|
def test_nearest_resize(self, mode):
|
||||||
def op(im, sz):
|
def op(im, sz):
|
||||||
return im.resize(sz, Image.NEAREST)
|
return im.resize(sz, Image.Resampling.NEAREST)
|
||||||
|
|
||||||
self._test_nearest(op, mode)
|
self._test_nearest(op, mode)
|
||||||
|
|
||||||
|
@ -179,7 +189,9 @@ class TestImageTransform:
|
||||||
def test_nearest_transform(self, mode):
|
def test_nearest_transform(self, mode):
|
||||||
def op(im, sz):
|
def op(im, sz):
|
||||||
(w, h) = im.size
|
(w, h) = im.size
|
||||||
return im.transform(sz, Image.EXTENT, (0, 0, w, h), Image.NEAREST)
|
return im.transform(
|
||||||
|
sz, Image.Transform.EXTENT, (0, 0, w, h), Image.Resampling.NEAREST
|
||||||
|
)
|
||||||
|
|
||||||
self._test_nearest(op, mode)
|
self._test_nearest(op, mode)
|
||||||
|
|
||||||
|
@ -213,13 +225,15 @@ class TestImageTransform:
|
||||||
def test_unknown_resampling_filter(self):
|
def test_unknown_resampling_filter(self):
|
||||||
with hopper() as im:
|
with hopper() as im:
|
||||||
(w, h) = im.size
|
(w, h) = im.size
|
||||||
for resample in (Image.BOX, "unknown"):
|
for resample in (Image.Resampling.BOX, "unknown"):
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
im.transform((100, 100), Image.EXTENT, (0, 0, w, h), resample)
|
im.transform(
|
||||||
|
(100, 100), Image.Transform.EXTENT, (0, 0, w, h), resample
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class TestImageTransformAffine:
|
class TestImageTransformAffine:
|
||||||
transform = Image.AFFINE
|
transform = Image.Transform.AFFINE
|
||||||
|
|
||||||
def _test_image(self):
|
def _test_image(self):
|
||||||
im = hopper("RGB")
|
im = hopper("RGB")
|
||||||
|
@ -247,7 +261,11 @@ class TestImageTransformAffine:
|
||||||
else:
|
else:
|
||||||
transposed = im
|
transposed = im
|
||||||
|
|
||||||
for resample in [Image.NEAREST, Image.BILINEAR, Image.BICUBIC]:
|
for resample in [
|
||||||
|
Image.Resampling.NEAREST,
|
||||||
|
Image.Resampling.BILINEAR,
|
||||||
|
Image.Resampling.BICUBIC,
|
||||||
|
]:
|
||||||
transformed = im.transform(
|
transformed = im.transform(
|
||||||
transposed.size, self.transform, matrix, resample
|
transposed.size, self.transform, matrix, resample
|
||||||
)
|
)
|
||||||
|
@ -257,13 +275,13 @@ class TestImageTransformAffine:
|
||||||
self._test_rotate(0, None)
|
self._test_rotate(0, None)
|
||||||
|
|
||||||
def test_rotate_90_deg(self):
|
def test_rotate_90_deg(self):
|
||||||
self._test_rotate(90, Image.ROTATE_90)
|
self._test_rotate(90, Image.Transpose.ROTATE_90)
|
||||||
|
|
||||||
def test_rotate_180_deg(self):
|
def test_rotate_180_deg(self):
|
||||||
self._test_rotate(180, Image.ROTATE_180)
|
self._test_rotate(180, Image.Transpose.ROTATE_180)
|
||||||
|
|
||||||
def test_rotate_270_deg(self):
|
def test_rotate_270_deg(self):
|
||||||
self._test_rotate(270, Image.ROTATE_270)
|
self._test_rotate(270, Image.Transpose.ROTATE_270)
|
||||||
|
|
||||||
def _test_resize(self, scale, epsilonscale):
|
def _test_resize(self, scale, epsilonscale):
|
||||||
im = self._test_image()
|
im = self._test_image()
|
||||||
|
@ -273,9 +291,9 @@ class TestImageTransformAffine:
|
||||||
matrix_down = [scale, 0, 0, 0, scale, 0, 0, 0]
|
matrix_down = [scale, 0, 0, 0, scale, 0, 0, 0]
|
||||||
|
|
||||||
for resample, epsilon in [
|
for resample, epsilon in [
|
||||||
(Image.NEAREST, 0),
|
(Image.Resampling.NEAREST, 0),
|
||||||
(Image.BILINEAR, 2),
|
(Image.Resampling.BILINEAR, 2),
|
||||||
(Image.BICUBIC, 1),
|
(Image.Resampling.BICUBIC, 1),
|
||||||
]:
|
]:
|
||||||
transformed = im.transform(size_up, self.transform, matrix_up, resample)
|
transformed = im.transform(size_up, self.transform, matrix_up, resample)
|
||||||
transformed = transformed.transform(
|
transformed = transformed.transform(
|
||||||
|
@ -306,9 +324,9 @@ class TestImageTransformAffine:
|
||||||
matrix_down = [1, 0, x, 0, 1, y, 0, 0]
|
matrix_down = [1, 0, x, 0, 1, y, 0, 0]
|
||||||
|
|
||||||
for resample, epsilon in [
|
for resample, epsilon in [
|
||||||
(Image.NEAREST, 0),
|
(Image.Resampling.NEAREST, 0),
|
||||||
(Image.BILINEAR, 1.5),
|
(Image.Resampling.BILINEAR, 1.5),
|
||||||
(Image.BICUBIC, 1),
|
(Image.Resampling.BICUBIC, 1),
|
||||||
]:
|
]:
|
||||||
transformed = im.transform(size_up, self.transform, matrix_up, resample)
|
transformed = im.transform(size_up, self.transform, matrix_up, resample)
|
||||||
transformed = transformed.transform(
|
transformed = transformed.transform(
|
||||||
|
@ -328,4 +346,4 @@ class TestImageTransformAffine:
|
||||||
|
|
||||||
class TestImageTransformPerspective(TestImageTransformAffine):
|
class TestImageTransformPerspective(TestImageTransformAffine):
|
||||||
# Repeat all tests for AFFINE transformations with PERSPECTIVE
|
# Repeat all tests for AFFINE transformations with PERSPECTIVE
|
||||||
transform = Image.PERSPECTIVE
|
transform = Image.Transform.PERSPECTIVE
|
||||||
|
|
|
@ -1,12 +1,4 @@
|
||||||
from PIL.Image import (
|
from PIL.Image import Transpose
|
||||||
FLIP_LEFT_RIGHT,
|
|
||||||
FLIP_TOP_BOTTOM,
|
|
||||||
ROTATE_90,
|
|
||||||
ROTATE_180,
|
|
||||||
ROTATE_270,
|
|
||||||
TRANSPOSE,
|
|
||||||
TRANSVERSE,
|
|
||||||
)
|
|
||||||
|
|
||||||
from . import helper
|
from . import helper
|
||||||
from .helper import assert_image_equal
|
from .helper import assert_image_equal
|
||||||
|
@ -20,7 +12,7 @@ HOPPER = {
|
||||||
def test_flip_left_right():
|
def test_flip_left_right():
|
||||||
def transpose(mode):
|
def transpose(mode):
|
||||||
im = HOPPER[mode]
|
im = HOPPER[mode]
|
||||||
out = im.transpose(FLIP_LEFT_RIGHT)
|
out = im.transpose(Transpose.FLIP_LEFT_RIGHT)
|
||||||
assert out.mode == mode
|
assert out.mode == mode
|
||||||
assert out.size == im.size
|
assert out.size == im.size
|
||||||
|
|
||||||
|
@ -37,7 +29,7 @@ def test_flip_left_right():
|
||||||
def test_flip_top_bottom():
|
def test_flip_top_bottom():
|
||||||
def transpose(mode):
|
def transpose(mode):
|
||||||
im = HOPPER[mode]
|
im = HOPPER[mode]
|
||||||
out = im.transpose(FLIP_TOP_BOTTOM)
|
out = im.transpose(Transpose.FLIP_TOP_BOTTOM)
|
||||||
assert out.mode == mode
|
assert out.mode == mode
|
||||||
assert out.size == im.size
|
assert out.size == im.size
|
||||||
|
|
||||||
|
@ -54,7 +46,7 @@ def test_flip_top_bottom():
|
||||||
def test_rotate_90():
|
def test_rotate_90():
|
||||||
def transpose(mode):
|
def transpose(mode):
|
||||||
im = HOPPER[mode]
|
im = HOPPER[mode]
|
||||||
out = im.transpose(ROTATE_90)
|
out = im.transpose(Transpose.ROTATE_90)
|
||||||
assert out.mode == mode
|
assert out.mode == mode
|
||||||
assert out.size == im.size[::-1]
|
assert out.size == im.size[::-1]
|
||||||
|
|
||||||
|
@ -71,7 +63,7 @@ def test_rotate_90():
|
||||||
def test_rotate_180():
|
def test_rotate_180():
|
||||||
def transpose(mode):
|
def transpose(mode):
|
||||||
im = HOPPER[mode]
|
im = HOPPER[mode]
|
||||||
out = im.transpose(ROTATE_180)
|
out = im.transpose(Transpose.ROTATE_180)
|
||||||
assert out.mode == mode
|
assert out.mode == mode
|
||||||
assert out.size == im.size
|
assert out.size == im.size
|
||||||
|
|
||||||
|
@ -88,7 +80,7 @@ def test_rotate_180():
|
||||||
def test_rotate_270():
|
def test_rotate_270():
|
||||||
def transpose(mode):
|
def transpose(mode):
|
||||||
im = HOPPER[mode]
|
im = HOPPER[mode]
|
||||||
out = im.transpose(ROTATE_270)
|
out = im.transpose(Transpose.ROTATE_270)
|
||||||
assert out.mode == mode
|
assert out.mode == mode
|
||||||
assert out.size == im.size[::-1]
|
assert out.size == im.size[::-1]
|
||||||
|
|
||||||
|
@ -105,7 +97,7 @@ def test_rotate_270():
|
||||||
def test_transpose():
|
def test_transpose():
|
||||||
def transpose(mode):
|
def transpose(mode):
|
||||||
im = HOPPER[mode]
|
im = HOPPER[mode]
|
||||||
out = im.transpose(TRANSPOSE)
|
out = im.transpose(Transpose.TRANSPOSE)
|
||||||
assert out.mode == mode
|
assert out.mode == mode
|
||||||
assert out.size == im.size[::-1]
|
assert out.size == im.size[::-1]
|
||||||
|
|
||||||
|
@ -122,7 +114,7 @@ def test_transpose():
|
||||||
def test_tranverse():
|
def test_tranverse():
|
||||||
def transpose(mode):
|
def transpose(mode):
|
||||||
im = HOPPER[mode]
|
im = HOPPER[mode]
|
||||||
out = im.transpose(TRANSVERSE)
|
out = im.transpose(Transpose.TRANSVERSE)
|
||||||
assert out.mode == mode
|
assert out.mode == mode
|
||||||
assert out.size == im.size[::-1]
|
assert out.size == im.size[::-1]
|
||||||
|
|
||||||
|
@ -143,20 +135,31 @@ def test_roundtrip():
|
||||||
def transpose(first, second):
|
def transpose(first, second):
|
||||||
return im.transpose(first).transpose(second)
|
return im.transpose(first).transpose(second)
|
||||||
|
|
||||||
assert_image_equal(im, transpose(FLIP_LEFT_RIGHT, FLIP_LEFT_RIGHT))
|
|
||||||
assert_image_equal(im, transpose(FLIP_TOP_BOTTOM, FLIP_TOP_BOTTOM))
|
|
||||||
assert_image_equal(im, transpose(ROTATE_90, ROTATE_270))
|
|
||||||
assert_image_equal(im, transpose(ROTATE_180, ROTATE_180))
|
|
||||||
assert_image_equal(
|
assert_image_equal(
|
||||||
im.transpose(TRANSPOSE), transpose(ROTATE_90, FLIP_TOP_BOTTOM)
|
im, transpose(Transpose.FLIP_LEFT_RIGHT, Transpose.FLIP_LEFT_RIGHT)
|
||||||
)
|
)
|
||||||
assert_image_equal(
|
assert_image_equal(
|
||||||
im.transpose(TRANSPOSE), transpose(ROTATE_270, FLIP_LEFT_RIGHT)
|
im, transpose(Transpose.FLIP_TOP_BOTTOM, Transpose.FLIP_TOP_BOTTOM)
|
||||||
|
)
|
||||||
|
assert_image_equal(im, transpose(Transpose.ROTATE_90, Transpose.ROTATE_270))
|
||||||
|
assert_image_equal(im, transpose(Transpose.ROTATE_180, Transpose.ROTATE_180))
|
||||||
|
assert_image_equal(
|
||||||
|
im.transpose(Transpose.TRANSPOSE),
|
||||||
|
transpose(Transpose.ROTATE_90, Transpose.FLIP_TOP_BOTTOM),
|
||||||
)
|
)
|
||||||
assert_image_equal(
|
assert_image_equal(
|
||||||
im.transpose(TRANSVERSE), transpose(ROTATE_90, FLIP_LEFT_RIGHT)
|
im.transpose(Transpose.TRANSPOSE),
|
||||||
|
transpose(Transpose.ROTATE_270, Transpose.FLIP_LEFT_RIGHT),
|
||||||
)
|
)
|
||||||
assert_image_equal(
|
assert_image_equal(
|
||||||
im.transpose(TRANSVERSE), transpose(ROTATE_270, FLIP_TOP_BOTTOM)
|
im.transpose(Transpose.TRANSVERSE),
|
||||||
|
transpose(Transpose.ROTATE_90, Transpose.FLIP_LEFT_RIGHT),
|
||||||
|
)
|
||||||
|
assert_image_equal(
|
||||||
|
im.transpose(Transpose.TRANSVERSE),
|
||||||
|
transpose(Transpose.ROTATE_270, Transpose.FLIP_TOP_BOTTOM),
|
||||||
|
)
|
||||||
|
assert_image_equal(
|
||||||
|
im.transpose(Transpose.TRANSVERSE),
|
||||||
|
transpose(Transpose.ROTATE_180, Transpose.TRANSPOSE),
|
||||||
)
|
)
|
||||||
assert_image_equal(im.transpose(TRANSVERSE), transpose(ROTATE_180, TRANSPOSE))
|
|
||||||
|
|
|
@ -140,7 +140,7 @@ def test_intent():
|
||||||
skip_missing()
|
skip_missing()
|
||||||
assert ImageCms.getDefaultIntent(SRGB) == 0
|
assert ImageCms.getDefaultIntent(SRGB) == 0
|
||||||
support = ImageCms.isIntentSupported(
|
support = ImageCms.isIntentSupported(
|
||||||
SRGB, ImageCms.INTENT_ABSOLUTE_COLORIMETRIC, ImageCms.DIRECTION_INPUT
|
SRGB, ImageCms.Intent.ABSOLUTE_COLORIMETRIC, ImageCms.Direction.INPUT
|
||||||
)
|
)
|
||||||
assert support == 1
|
assert support == 1
|
||||||
|
|
||||||
|
@ -153,7 +153,7 @@ def test_profile_object():
|
||||||
# ["sRGB built-in", "", "WhitePoint : D65 (daylight)", "", ""]
|
# ["sRGB built-in", "", "WhitePoint : D65 (daylight)", "", ""]
|
||||||
assert ImageCms.getDefaultIntent(p) == 0
|
assert ImageCms.getDefaultIntent(p) == 0
|
||||||
support = ImageCms.isIntentSupported(
|
support = ImageCms.isIntentSupported(
|
||||||
p, ImageCms.INTENT_ABSOLUTE_COLORIMETRIC, ImageCms.DIRECTION_INPUT
|
p, ImageCms.Intent.ABSOLUTE_COLORIMETRIC, ImageCms.Direction.INPUT
|
||||||
)
|
)
|
||||||
assert support == 1
|
assert support == 1
|
||||||
|
|
||||||
|
@ -593,3 +593,13 @@ def test_auxiliary_channels_isolated():
|
||||||
)
|
)
|
||||||
|
|
||||||
assert_image_equal(test_image.convert(dst_format[2]), reference_image)
|
assert_image_equal(test_image.convert(dst_format[2]), reference_image)
|
||||||
|
|
||||||
|
|
||||||
|
def test_constants_deprecation():
|
||||||
|
for enum, prefix in {
|
||||||
|
ImageCms.Intent: "INTENT_",
|
||||||
|
ImageCms.Direction: "DIRECTION_",
|
||||||
|
}.items():
|
||||||
|
for name in enum.__members__:
|
||||||
|
with pytest.warns(DeprecationWarning):
|
||||||
|
assert getattr(ImageCms, prefix + name) == enum[name]
|
||||||
|
|
|
@ -183,7 +183,7 @@ def test_bitmap():
|
||||||
im = Image.new("RGB", (W, H))
|
im = Image.new("RGB", (W, H))
|
||||||
draw = ImageDraw.Draw(im)
|
draw = ImageDraw.Draw(im)
|
||||||
with Image.open("Tests/images/pil123rgba.png") as small:
|
with Image.open("Tests/images/pil123rgba.png") as small:
|
||||||
small = small.resize((50, 50), Image.NEAREST)
|
small = small.resize((50, 50), Image.Resampling.NEAREST)
|
||||||
|
|
||||||
# Act
|
# Act
|
||||||
draw.bitmap((10, 10), small)
|
draw.bitmap((10, 10), small)
|
||||||
|
@ -319,7 +319,7 @@ def test_ellipse_symmetric():
|
||||||
im = Image.new("RGB", (width, 100))
|
im = Image.new("RGB", (width, 100))
|
||||||
draw = ImageDraw.Draw(im)
|
draw = ImageDraw.Draw(im)
|
||||||
draw.ellipse(bbox, fill="green", outline="blue")
|
draw.ellipse(bbox, fill="green", outline="blue")
|
||||||
assert_image_equal(im, im.transpose(Image.FLIP_LEFT_RIGHT))
|
assert_image_equal(im, im.transpose(Image.Transpose.FLIP_LEFT_RIGHT))
|
||||||
|
|
||||||
|
|
||||||
def test_ellipse_width():
|
def test_ellipse_width():
|
||||||
|
|
|
@ -23,7 +23,7 @@ class TestImageFile:
|
||||||
def test_parser(self):
|
def test_parser(self):
|
||||||
def roundtrip(format):
|
def roundtrip(format):
|
||||||
|
|
||||||
im = hopper("L").resize((1000, 1000), Image.NEAREST)
|
im = hopper("L").resize((1000, 1000), Image.Resampling.NEAREST)
|
||||||
if format in ("MSP", "XBM"):
|
if format in ("MSP", "XBM"):
|
||||||
im = im.convert("1")
|
im = im.convert("1")
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,7 @@ pytestmark = skip_unless_feature("freetype2")
|
||||||
|
|
||||||
|
|
||||||
class TestImageFont:
|
class TestImageFont:
|
||||||
LAYOUT_ENGINE = ImageFont.LAYOUT_BASIC
|
LAYOUT_ENGINE = ImageFont.Layout.BASIC
|
||||||
|
|
||||||
def get_font(self):
|
def get_font(self):
|
||||||
return ImageFont.truetype(
|
return ImageFont.truetype(
|
||||||
|
@ -94,12 +94,12 @@ class TestImageFont:
|
||||||
|
|
||||||
try:
|
try:
|
||||||
ttf = ImageFont.truetype(
|
ttf = ImageFont.truetype(
|
||||||
FONT_PATH, FONT_SIZE, layout_engine=ImageFont.LAYOUT_RAQM
|
FONT_PATH, FONT_SIZE, layout_engine=ImageFont.Layout.RAQM
|
||||||
)
|
)
|
||||||
finally:
|
finally:
|
||||||
ImageFont.core.HAVE_RAQM = have_raqm
|
ImageFont.core.HAVE_RAQM = have_raqm
|
||||||
|
|
||||||
assert ttf.layout_engine == ImageFont.LAYOUT_BASIC
|
assert ttf.layout_engine == ImageFont.Layout.BASIC
|
||||||
|
|
||||||
def _render(self, font):
|
def _render(self, font):
|
||||||
txt = "Hello World!"
|
txt = "Hello World!"
|
||||||
|
@ -182,7 +182,7 @@ class TestImageFont:
|
||||||
im = Image.new(mode, (1, 1), 0)
|
im = Image.new(mode, (1, 1), 0)
|
||||||
d = ImageDraw.Draw(im)
|
d = ImageDraw.Draw(im)
|
||||||
|
|
||||||
if self.LAYOUT_ENGINE == ImageFont.LAYOUT_BASIC:
|
if self.LAYOUT_ENGINE == ImageFont.Layout.BASIC:
|
||||||
length = d.textlength(text, f)
|
length = d.textlength(text, f)
|
||||||
assert length == length_basic
|
assert length == length_basic
|
||||||
else:
|
else:
|
||||||
|
@ -294,7 +294,7 @@ class TestImageFont:
|
||||||
word = "testing"
|
word = "testing"
|
||||||
font = self.get_font()
|
font = self.get_font()
|
||||||
|
|
||||||
orientation = Image.ROTATE_90
|
orientation = Image.Transpose.ROTATE_90
|
||||||
transposed_font = ImageFont.TransposedFont(font, orientation=orientation)
|
transposed_font = ImageFont.TransposedFont(font, orientation=orientation)
|
||||||
|
|
||||||
# Original font
|
# Original font
|
||||||
|
@ -333,7 +333,7 @@ class TestImageFont:
|
||||||
# Arrange
|
# Arrange
|
||||||
text = "mask this"
|
text = "mask this"
|
||||||
font = self.get_font()
|
font = self.get_font()
|
||||||
orientation = Image.ROTATE_90
|
orientation = Image.Transpose.ROTATE_90
|
||||||
transposed_font = ImageFont.TransposedFont(font, orientation=orientation)
|
transposed_font = ImageFont.TransposedFont(font, orientation=orientation)
|
||||||
|
|
||||||
# Act
|
# Act
|
||||||
|
@ -604,7 +604,7 @@ class TestImageFont:
|
||||||
# Arrange
|
# Arrange
|
||||||
t = self.get_font()
|
t = self.get_font()
|
||||||
# Act / Assert
|
# Act / Assert
|
||||||
if t.layout_engine == ImageFont.LAYOUT_BASIC:
|
if t.layout_engine == ImageFont.Layout.BASIC:
|
||||||
with pytest.raises(KeyError):
|
with pytest.raises(KeyError):
|
||||||
t.getmask("абвг", direction="rtl")
|
t.getmask("абвг", direction="rtl")
|
||||||
with pytest.raises(KeyError):
|
with pytest.raises(KeyError):
|
||||||
|
@ -753,7 +753,7 @@ class TestImageFont:
|
||||||
name, text = "quick", "Quick"
|
name, text = "quick", "Quick"
|
||||||
path = f"Tests/images/test_anchor_{name}_{anchor}.png"
|
path = f"Tests/images/test_anchor_{name}_{anchor}.png"
|
||||||
|
|
||||||
if self.LAYOUT_ENGINE == ImageFont.LAYOUT_RAQM:
|
if self.LAYOUT_ENGINE == ImageFont.Layout.RAQM:
|
||||||
width, height = (129, 44)
|
width, height = (129, 44)
|
||||||
else:
|
else:
|
||||||
width, height = (128, 44)
|
width, height = (128, 44)
|
||||||
|
@ -993,7 +993,7 @@ class TestImageFont:
|
||||||
|
|
||||||
@skip_unless_feature("raqm")
|
@skip_unless_feature("raqm")
|
||||||
class TestImageFont_RaqmLayout(TestImageFont):
|
class TestImageFont_RaqmLayout(TestImageFont):
|
||||||
LAYOUT_ENGINE = ImageFont.LAYOUT_RAQM
|
LAYOUT_ENGINE = ImageFont.Layout.RAQM
|
||||||
|
|
||||||
|
|
||||||
def test_render_mono_size():
|
def test_render_mono_size():
|
||||||
|
@ -1004,7 +1004,7 @@ def test_render_mono_size():
|
||||||
ttf = ImageFont.truetype(
|
ttf = ImageFont.truetype(
|
||||||
"Tests/fonts/DejaVuSans/DejaVuSans.ttf",
|
"Tests/fonts/DejaVuSans/DejaVuSans.ttf",
|
||||||
18,
|
18,
|
||||||
layout_engine=ImageFont.LAYOUT_BASIC,
|
layout_engine=ImageFont.Layout.BASIC,
|
||||||
)
|
)
|
||||||
|
|
||||||
draw.text((10, 10), "r" * 10, "black", ttf)
|
draw.text((10, 10), "r" * 10, "black", ttf)
|
||||||
|
@ -1028,10 +1028,19 @@ def test_raqm_missing_warning(monkeypatch):
|
||||||
monkeypatch.setattr(ImageFont.core, "HAVE_RAQM", False)
|
monkeypatch.setattr(ImageFont.core, "HAVE_RAQM", False)
|
||||||
with pytest.warns(UserWarning) as record:
|
with pytest.warns(UserWarning) as record:
|
||||||
font = ImageFont.truetype(
|
font = ImageFont.truetype(
|
||||||
FONT_PATH, FONT_SIZE, layout_engine=ImageFont.LAYOUT_RAQM
|
FONT_PATH, FONT_SIZE, layout_engine=ImageFont.Layout.RAQM
|
||||||
)
|
)
|
||||||
assert font.layout_engine == ImageFont.LAYOUT_BASIC
|
assert font.layout_engine == ImageFont.Layout.BASIC
|
||||||
assert str(record[-1].message) == (
|
assert str(record[-1].message) == (
|
||||||
"Raqm layout was requested, but Raqm is not available. "
|
"Raqm layout was requested, but Raqm is not available. "
|
||||||
"Falling back to basic layout."
|
"Falling back to basic layout."
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_constants_deprecation():
|
||||||
|
for enum, prefix in {
|
||||||
|
ImageFont.Layout: "LAYOUT_",
|
||||||
|
}.items():
|
||||||
|
for name in enum.__members__:
|
||||||
|
with pytest.warns(DeprecationWarning):
|
||||||
|
assert getattr(ImageFont, prefix + name) == enum[name]
|
||||||
|
|
|
@ -34,7 +34,7 @@ def test_basic(tmp_path):
|
||||||
imOut = imIn.copy()
|
imOut = imIn.copy()
|
||||||
verify(imOut) # copy
|
verify(imOut) # copy
|
||||||
|
|
||||||
imOut = imIn.transform((w, h), Image.EXTENT, (0, 0, w, h))
|
imOut = imIn.transform((w, h), Image.Transform.EXTENT, (0, 0, w, h))
|
||||||
verify(imOut) # transform
|
verify(imOut) # transform
|
||||||
|
|
||||||
filename = str(tmp_path / "temp.im")
|
filename = str(tmp_path / "temp.im")
|
||||||
|
|
|
@ -66,6 +66,73 @@ In effect, ``viewer.show_file("test.jpg")`` will continue to work unchanged.
|
||||||
``viewer.show_file(file="test.jpg")`` will raise a deprecation warning, and suggest
|
``viewer.show_file(file="test.jpg")`` will raise a deprecation warning, and suggest
|
||||||
``viewer.show_file(path="test.jpg")`` instead.
|
``viewer.show_file(path="test.jpg")`` instead.
|
||||||
|
|
||||||
|
Constants
|
||||||
|
~~~~~~~~~
|
||||||
|
|
||||||
|
.. deprecated:: 9.2.0
|
||||||
|
|
||||||
|
A number of constants have been deprecated and will be removed in Pillow 10.0.0
|
||||||
|
(2023-07-01). Instead, ``enum.IntEnum`` classes have been added.
|
||||||
|
|
||||||
|
===================================================== ============================================================
|
||||||
|
Deprecated Use instead
|
||||||
|
===================================================== ============================================================
|
||||||
|
``Image.NONE`` Either ``Image.Dither.NONE`` or ``Image.Resampling.NEAREST``
|
||||||
|
``Image.NEAREST`` Either ``Image.Dither.NONE`` or ``Image.Resampling.NEAREST``
|
||||||
|
``Image.ORDERED`` ``Image.Dither.ORDERED``
|
||||||
|
``Image.RASTERIZE`` ``Image.Dither.RASTERIZE``
|
||||||
|
``Image.FLOYDSTEINBERG`` ``Image.Dither.FLOYDSTEINBERG``
|
||||||
|
``Image.WEB`` ``Image.Palette.WEB``
|
||||||
|
``Image.ADAPTIVE`` ``Image.Palette.ADAPTIVE``
|
||||||
|
``Image.AFFINE`` ``Image.Transform.AFFINE``
|
||||||
|
``Image.EXTENT`` ``Image.Transform.EXTENT``
|
||||||
|
``Image.PERSPECTIVE`` ``Image.Transform.PERSPECTIVE``
|
||||||
|
``Image.QUAD`` ``Image.Transform.QUAD``
|
||||||
|
``Image.MESH`` ``Image.Transform.MESH``
|
||||||
|
``Image.FLIP_LEFT_RIGHT`` ``Image.Transpose.FLIP_LEFT_RIGHT``
|
||||||
|
``Image.FLIP_TOP_BOTTOM`` ``Image.Transpose.FLIP_TOP_BOTTOM``
|
||||||
|
``Image.ROTATE_90`` ``Image.Transpose.ROTATE_90``
|
||||||
|
``Image.ROTATE_180`` ``Image.Transpose.ROTATE_180``
|
||||||
|
``Image.ROTATE_270`` ``Image.Transpose.ROTATE_270``
|
||||||
|
``Image.TRANSPOSE`` ``Image.Transpose.TRANSPOSE``
|
||||||
|
``Image.TRANSVERSE`` ``Image.Transpose.TRANSVERSE``
|
||||||
|
``Image.BOX`` ``Image.Resampling.BOX``
|
||||||
|
``Image.BILINEAR`` ``Image.Resampling.BILNEAR``
|
||||||
|
``Image.LINEAR`` ``Image.Resampling.BILNEAR``
|
||||||
|
``Image.HAMMING`` ``Image.Resampling.HAMMING``
|
||||||
|
``Image.BICUBIC`` ``Image.Resampling.BICUBIC``
|
||||||
|
``Image.CUBIC`` ``Image.Resampling.BICUBIC``
|
||||||
|
``Image.LANCZOS`` ``Image.Resampling.LANCZOS``
|
||||||
|
``Image.ANTIALIAS`` ``Image.Resampling.LANCZOS``
|
||||||
|
``Image.MEDIANCUT`` ``Image.Quantize.MEDIANCUT``
|
||||||
|
``Image.MAXCOVERAGE`` ``Image.Quantize.MAXCOVERAGE``
|
||||||
|
``Image.FASTOCTREE`` ``Image.Quantize.FASTOCTREE``
|
||||||
|
``Image.LIBIMAGEQUANT`` ``Image.Quantize.LIBIMAGEQUANT``
|
||||||
|
``ImageCms.INTENT_PERCEPTUAL`` ``ImageCms.Intent.PERCEPTUAL``
|
||||||
|
``ImageCms.INTENT_RELATIVE_COLORMETRIC`` ``ImageCms.Intent.RELATIVE_COLORMETRIC``
|
||||||
|
``ImageCms.INTENT_SATURATION`` ``ImageCms.Intent.SATURATION``
|
||||||
|
``ImageCms.INTENT_ABSOLUTE_COLORIMETRIC`` ``ImageCms.Intent.ABSOLUTE_COLORIMETRIC``
|
||||||
|
``ImageCms.DIRECTION_INPUT`` ``ImageCms.Direction.INPUT``
|
||||||
|
``ImageCms.DIRECTION_OUTPUT`` ``ImageCms.Direction.OUTPUT``
|
||||||
|
``ImageCms.DIRECTION_PROOF`` ``ImageCms.Direction.PROOF``
|
||||||
|
``ImageFont.LAYOUT_BASIC`` ``ImageFont.Layout.BASIC``
|
||||||
|
``ImageFont.LAYOUT_RAQM`` ``ImageFont.Layout.RAQM``
|
||||||
|
``BlpImagePlugin.BLP_FORMAT_JPEG`` ``BlpImagePlugin.Format.JPEG``
|
||||||
|
``BlpImagePlugin.BLP_ENCODING_UNCOMPRESSED`` ``BlpImagePlugin.Encoding.UNCOMPRESSED``
|
||||||
|
``BlpImagePlugin.BLP_ENCODING_DXT`` ``BlpImagePlugin.Encoding.DXT``
|
||||||
|
``BlpImagePlugin.BLP_ENCODING_UNCOMPRESSED_RAW_RGBA`` ``BlpImagePlugin.Encoding.UNCOMPRESSED_RAW_RGBA``
|
||||||
|
``BlpImagePlugin.BLP_ALPHA_ENCODING_DXT1`` ``BlpImagePlugin.AlphaEncoding.DXT1``
|
||||||
|
``BlpImagePlugin.BLP_ALPHA_ENCODING_DXT3`` ``BlpImagePlugin.AlphaEncoding.DXT3``
|
||||||
|
``BlpImagePlugin.BLP_ALPHA_ENCODING_DXT5`` ``BlpImagePlugin.AlphaEncoding.DXT5``
|
||||||
|
``FtexImagePlugin.FORMAT_DXT1`` ``FtexImagePlugin.Format.DXT1``
|
||||||
|
``FtexImagePlugin.FORMAT_UNCOMPRESSED`` ``FtexImagePlugin.Format.UNCOMPRESSED``
|
||||||
|
``PngImagePlugin.APNG_DISPOSE_OP_NONE`` ``PngImagePlugin.Disposal.OP_NONE``
|
||||||
|
``PngImagePlugin.APNG_DISPOSE_OP_BACKGROUND`` ``PngImagePlugin.Disposal.OP_BACKGROUND``
|
||||||
|
``PngImagePlugin.APNG_DISPOSE_OP_PREVIOUS`` ``PngImagePlugin.Disposal.OP_PREVIOUS``
|
||||||
|
``PngImagePlugin.APNG_BLEND_OP_SOURCE`` ``PngImagePlugin.Blend.OP_SOURCE``
|
||||||
|
``PngImagePlugin.APNG_BLEND_OP_OVER`` ``PngImagePlugin.Blend.OP_OVER``
|
||||||
|
===================================================== ============================================================
|
||||||
|
|
||||||
FitsStubImagePlugin
|
FitsStubImagePlugin
|
||||||
~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
|
|
@ -696,12 +696,12 @@ parameter must be set to ``True``. The following parameters can also be set:
|
||||||
operation to be used for this frame before rendering the next frame.
|
operation to be used for this frame before rendering the next frame.
|
||||||
Defaults to 0.
|
Defaults to 0.
|
||||||
|
|
||||||
* 0 (:py:data:`~PIL.PngImagePlugin.APNG_DISPOSE_OP_NONE`, default) -
|
* 0 (:py:data:`~PIL.PngImagePlugin.Disposal.OP_NONE`, default) -
|
||||||
No disposal is done on this frame before rendering the next frame.
|
No disposal is done on this frame before rendering the next frame.
|
||||||
* 1 (:py:data:`PIL.PngImagePlugin.APNG_DISPOSE_OP_BACKGROUND`) -
|
* 1 (:py:data:`PIL.PngImagePlugin.Disposal.OP_BACKGROUND`) -
|
||||||
This frame's modified region is cleared to fully transparent black before
|
This frame's modified region is cleared to fully transparent black before
|
||||||
rendering the next frame.
|
rendering the next frame.
|
||||||
* 2 (:py:data:`~PIL.PngImagePlugin.APNG_DISPOSE_OP_PREVIOUS`) -
|
* 2 (:py:data:`~PIL.PngImagePlugin.Disposal.OP_PREVIOUS`) -
|
||||||
This frame's modified region is reverted to the previous frame's contents before
|
This frame's modified region is reverted to the previous frame's contents before
|
||||||
rendering the next frame.
|
rendering the next frame.
|
||||||
|
|
||||||
|
@ -710,10 +710,10 @@ parameter must be set to ``True``. The following parameters can also be set:
|
||||||
operation to be used for this frame before rendering the next frame.
|
operation to be used for this frame before rendering the next frame.
|
||||||
Defaults to 0.
|
Defaults to 0.
|
||||||
|
|
||||||
* 0 (:py:data:`~PIL.PngImagePlugin.APNG_BLEND_OP_SOURCE`) -
|
* 0 (:py:data:`~PIL.PngImagePlugin.Blend.OP_SOURCE`) -
|
||||||
All color components of this frame, including alpha, overwrite the previous output
|
All color components of this frame, including alpha, overwrite the previous output
|
||||||
image contents.
|
image contents.
|
||||||
* 1 (:py:data:`~PIL.PngImagePlugin.APNG_BLEND_OP_OVER`) -
|
* 1 (:py:data:`~PIL.PngImagePlugin.Blend.OP_OVER`) -
|
||||||
This frame should be alpha composited with the previous output image contents.
|
This frame should be alpha composited with the previous output image contents.
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
|
|
|
@ -155,7 +155,7 @@ Processing a subrectangle, and pasting it back
|
||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
region = region.transpose(Image.ROTATE_180)
|
region = region.transpose(Image.Transpose.ROTATE_180)
|
||||||
im.paste(region, box)
|
im.paste(region, box)
|
||||||
|
|
||||||
When pasting regions back, the size of the region must match the given region
|
When pasting regions back, the size of the region must match the given region
|
||||||
|
@ -238,11 +238,11 @@ Transposing an image
|
||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
out = im.transpose(Image.FLIP_LEFT_RIGHT)
|
out = im.transpose(Image.Transpose.FLIP_LEFT_RIGHT)
|
||||||
out = im.transpose(Image.FLIP_TOP_BOTTOM)
|
out = im.transpose(Image.Transpose.FLIP_TOP_BOTTOM)
|
||||||
out = im.transpose(Image.ROTATE_90)
|
out = im.transpose(Image.Transpose.ROTATE_90)
|
||||||
out = im.transpose(Image.ROTATE_180)
|
out = im.transpose(Image.Transpose.ROTATE_180)
|
||||||
out = im.transpose(Image.ROTATE_270)
|
out = im.transpose(Image.Transpose.ROTATE_270)
|
||||||
|
|
||||||
``transpose(ROTATE)`` operations can also be performed identically with
|
``transpose(ROTATE)`` operations can also be performed identically with
|
||||||
:py:meth:`~PIL.Image.Image.rotate` operations, provided the ``expand`` flag is
|
:py:meth:`~PIL.Image.Image.rotate` operations, provided the ``expand`` flag is
|
||||||
|
|
|
@ -215,7 +215,7 @@ Many of Pillow's features require external libraries:
|
||||||
Once you have installed the prerequisites, run::
|
Once you have installed the prerequisites, run::
|
||||||
|
|
||||||
python3 -m pip install --upgrade pip
|
python3 -m pip install --upgrade pip
|
||||||
python3 -m pip install --upgrade Pillow
|
python3 -m pip install --upgrade Pillow --no-binary :all:
|
||||||
|
|
||||||
If the prerequisites are installed in the standard library locations
|
If the prerequisites are installed in the standard library locations
|
||||||
for your machine (e.g. :file:`/usr` or :file:`/usr/local`), no
|
for your machine (e.g. :file:`/usr` or :file:`/usr/local`), no
|
||||||
|
@ -225,7 +225,7 @@ those locations by editing :file:`setup.py` or
|
||||||
:file:`setup.cfg`, or by adding environment variables on the command
|
:file:`setup.cfg`, or by adding environment variables on the command
|
||||||
line::
|
line::
|
||||||
|
|
||||||
CFLAGS="-I/usr/pkg/include" python3 -m pip install --upgrade Pillow
|
CFLAGS="-I/usr/pkg/include" python3 -m pip install --upgrade Pillow --no-binary :all:
|
||||||
|
|
||||||
If Pillow has been previously built without the required
|
If Pillow has been previously built without the required
|
||||||
prerequisites, it may be necessary to manually clear the pip cache or
|
prerequisites, it may be necessary to manually clear the pip cache or
|
||||||
|
@ -291,7 +291,7 @@ tools.
|
||||||
The easiest way to install external libraries is via `Homebrew
|
The easiest way to install external libraries is via `Homebrew
|
||||||
<https://brew.sh/>`_. After you install Homebrew, run::
|
<https://brew.sh/>`_. After you install Homebrew, run::
|
||||||
|
|
||||||
brew install libtiff libjpeg webp little-cms2
|
brew install libjpeg libtiff little-cms2 openjpeg webp
|
||||||
|
|
||||||
To install libraqm on macOS use Homebrew to install its dependencies::
|
To install libraqm on macOS use Homebrew to install its dependencies::
|
||||||
|
|
||||||
|
@ -302,7 +302,7 @@ Then see ``depends/install_raqm_cmake.sh`` to install libraqm.
|
||||||
Now install Pillow with::
|
Now install Pillow with::
|
||||||
|
|
||||||
python3 -m pip install --upgrade pip
|
python3 -m pip install --upgrade pip
|
||||||
python3 -m pip install --upgrade Pillow
|
python3 -m pip install --upgrade Pillow --no-binary :all:
|
||||||
|
|
||||||
or from within the uncompressed source directory::
|
or from within the uncompressed source directory::
|
||||||
|
|
||||||
|
@ -349,7 +349,7 @@ Prerequisites are installed on **MSYS2 MinGW 64-bit** with::
|
||||||
Now install Pillow with::
|
Now install Pillow with::
|
||||||
|
|
||||||
python3 -m pip install --upgrade pip
|
python3 -m pip install --upgrade pip
|
||||||
python3 -m pip install --upgrade Pillow
|
python3 -m pip install --upgrade Pillow --no-binary :all:
|
||||||
|
|
||||||
|
|
||||||
Building on FreeBSD
|
Building on FreeBSD
|
||||||
|
|
|
@ -254,7 +254,8 @@ This rotates the input image by ``theta`` degrees counter clockwise:
|
||||||
.. automethod:: PIL.Image.Image.transform
|
.. automethod:: PIL.Image.Image.transform
|
||||||
.. automethod:: PIL.Image.Image.transpose
|
.. automethod:: PIL.Image.Image.transpose
|
||||||
|
|
||||||
This flips the input image by using the :data:`FLIP_LEFT_RIGHT` method.
|
This flips the input image by using the :data:`PIL.Image.Transpose.FLIP_LEFT_RIGHT`
|
||||||
|
method.
|
||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
|
@ -263,9 +264,9 @@ This flips the input image by using the :data:`FLIP_LEFT_RIGHT` method.
|
||||||
with Image.open("hopper.jpg") as im:
|
with Image.open("hopper.jpg") as im:
|
||||||
|
|
||||||
# Flip the image from left to right
|
# Flip the image from left to right
|
||||||
im_flipped = im.transpose(method=Image.FLIP_LEFT_RIGHT)
|
im_flipped = im.transpose(method=Image.Transpose.FLIP_LEFT_RIGHT)
|
||||||
# To flip the image from top to bottom,
|
# To flip the image from top to bottom,
|
||||||
# use the method "Image.FLIP_TOP_BOTTOM"
|
# use the method "Image.Transpose.FLIP_TOP_BOTTOM"
|
||||||
|
|
||||||
|
|
||||||
.. automethod:: PIL.Image.Image.verify
|
.. automethod:: PIL.Image.Image.verify
|
||||||
|
@ -389,36 +390,34 @@ Transpose methods
|
||||||
|
|
||||||
Used to specify the :meth:`Image.transpose` method to use.
|
Used to specify the :meth:`Image.transpose` method to use.
|
||||||
|
|
||||||
.. data:: FLIP_LEFT_RIGHT
|
.. autoclass:: Transpose
|
||||||
.. data:: FLIP_TOP_BOTTOM
|
:members:
|
||||||
.. data:: ROTATE_90
|
:undoc-members:
|
||||||
.. data:: ROTATE_180
|
|
||||||
.. data:: ROTATE_270
|
|
||||||
.. data:: TRANSPOSE
|
|
||||||
.. data:: TRANSVERSE
|
|
||||||
|
|
||||||
Transform methods
|
Transform methods
|
||||||
^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
Used to specify the :meth:`Image.transform` method to use.
|
Used to specify the :meth:`Image.transform` method to use.
|
||||||
|
|
||||||
.. data:: AFFINE
|
.. py:class:: Transform
|
||||||
|
|
||||||
|
.. py:attribute:: AFFINE
|
||||||
|
|
||||||
Affine transform
|
Affine transform
|
||||||
|
|
||||||
.. data:: EXTENT
|
.. py:attribute:: EXTENT
|
||||||
|
|
||||||
Cut out a rectangular subregion
|
Cut out a rectangular subregion
|
||||||
|
|
||||||
.. data:: PERSPECTIVE
|
.. py:attribute:: PERSPECTIVE
|
||||||
|
|
||||||
Perspective transform
|
Perspective transform
|
||||||
|
|
||||||
.. data:: QUAD
|
.. py:attribute:: QUAD
|
||||||
|
|
||||||
Map a quadrilateral to a rectangle
|
Map a quadrilateral to a rectangle
|
||||||
|
|
||||||
.. data:: MESH
|
.. py:attribute:: MESH
|
||||||
|
|
||||||
Map a number of source quadrilaterals in one operation
|
Map a number of source quadrilaterals in one operation
|
||||||
|
|
||||||
|
@ -427,30 +426,21 @@ Resampling filters
|
||||||
|
|
||||||
See :ref:`concept-filters` for details.
|
See :ref:`concept-filters` for details.
|
||||||
|
|
||||||
.. data:: NEAREST
|
.. autoclass:: Resampling
|
||||||
:noindex:
|
:members:
|
||||||
.. data:: BOX
|
:undoc-members:
|
||||||
:noindex:
|
|
||||||
.. data:: BILINEAR
|
|
||||||
:noindex:
|
|
||||||
.. data:: HAMMING
|
|
||||||
:noindex:
|
|
||||||
.. data:: BICUBIC
|
|
||||||
:noindex:
|
|
||||||
.. data:: LANCZOS
|
|
||||||
:noindex:
|
|
||||||
|
|
||||||
Some filters are also available under the following names for backwards compatibility:
|
Some deprecated filters are also available under the following names:
|
||||||
|
|
||||||
.. data:: NONE
|
.. data:: NONE
|
||||||
:noindex:
|
:noindex:
|
||||||
:value: NEAREST
|
:value: Resampling.NEAREST
|
||||||
.. data:: LINEAR
|
.. data:: LINEAR
|
||||||
:value: BILINEAR
|
:value: Resampling.BILINEAR
|
||||||
.. data:: CUBIC
|
.. data:: CUBIC
|
||||||
:value: BICUBIC
|
:value: Resampling.BICUBIC
|
||||||
.. data:: ANTIALIAS
|
.. data:: ANTIALIAS
|
||||||
:value: LANCZOS
|
:value: Resampling.LANCZOS
|
||||||
|
|
||||||
Dither modes
|
Dither modes
|
||||||
^^^^^^^^^^^^
|
^^^^^^^^^^^^
|
||||||
|
@ -458,16 +448,21 @@ Dither modes
|
||||||
Used to specify the dithering method to use for the
|
Used to specify the dithering method to use for the
|
||||||
:meth:`~Image.convert` and :meth:`~Image.quantize` methods.
|
:meth:`~Image.convert` and :meth:`~Image.quantize` methods.
|
||||||
|
|
||||||
.. data:: NONE
|
.. py:class:: Dither
|
||||||
:noindex:
|
|
||||||
|
.. py:attribute:: NONE
|
||||||
|
|
||||||
No dither
|
No dither
|
||||||
|
|
||||||
.. comment: (not implemented)
|
.. py:attribute:: ORDERED
|
||||||
.. data:: ORDERED
|
|
||||||
.. data:: RASTERIZE
|
|
||||||
|
|
||||||
.. data:: FLOYDSTEINBERG
|
Not implemented
|
||||||
|
|
||||||
|
.. py:attribute:: RASTERIZE
|
||||||
|
|
||||||
|
Not implemented
|
||||||
|
|
||||||
|
.. py:attribute:: FLOYDSTEINBERG
|
||||||
|
|
||||||
Floyd-Steinberg dither
|
Floyd-Steinberg dither
|
||||||
|
|
||||||
|
@ -476,30 +471,33 @@ Palettes
|
||||||
|
|
||||||
Used to specify the pallete to use for the :meth:`~Image.convert` method.
|
Used to specify the pallete to use for the :meth:`~Image.convert` method.
|
||||||
|
|
||||||
.. data:: WEB
|
.. autoclass:: Palette
|
||||||
.. data:: ADAPTIVE
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
|
||||||
Quantization methods
|
Quantization methods
|
||||||
^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
Used to specify the quantization method to use for the :meth:`~Image.quantize` method.
|
Used to specify the quantization method to use for the :meth:`~Image.quantize` method.
|
||||||
|
|
||||||
.. data:: MEDIANCUT
|
.. py:class:: Quantize
|
||||||
|
|
||||||
|
.. py:attribute:: MEDIANCUT
|
||||||
|
|
||||||
Median cut. Default method, except for RGBA images. This method does not support
|
Median cut. Default method, except for RGBA images. This method does not support
|
||||||
RGBA images.
|
RGBA images.
|
||||||
|
|
||||||
.. data:: MAXCOVERAGE
|
.. py:attribute:: MAXCOVERAGE
|
||||||
|
|
||||||
Maximum coverage. This method does not support RGBA images.
|
Maximum coverage. This method does not support RGBA images.
|
||||||
|
|
||||||
.. data:: FASTOCTREE
|
.. py:attribute:: FASTOCTREE
|
||||||
|
|
||||||
Fast octree. Default method for RGBA images.
|
Fast octree. Default method for RGBA images.
|
||||||
|
|
||||||
.. data:: LIBIMAGEQUANT
|
.. py:attribute:: LIBIMAGEQUANT
|
||||||
|
|
||||||
libimagequant
|
libimagequant
|
||||||
|
|
||||||
Check support using :py:func:`PIL.features.check_feature`
|
Check support using :py:func:`PIL.features.check_feature` with
|
||||||
with ``feature="libimagequant"``.
|
``feature="libimagequant"``.
|
||||||
|
|
|
@ -118,8 +118,8 @@ can be easily displayed in a chromaticity diagram, for example).
|
||||||
another profile (usually overridden at run-time, but provided here
|
another profile (usually overridden at run-time, but provided here
|
||||||
for DeviceLink and embedded source profiles, see 7.2.15 of ICC.1:2010).
|
for DeviceLink and embedded source profiles, see 7.2.15 of ICC.1:2010).
|
||||||
|
|
||||||
One of ``ImageCms.INTENT_ABSOLUTE_COLORIMETRIC``, ``ImageCms.INTENT_PERCEPTUAL``,
|
One of ``ImageCms.Intent.ABSOLUTE_COLORIMETRIC``, ``ImageCms.Intent.PERCEPTUAL``,
|
||||||
``ImageCms.INTENT_RELATIVE_COLORIMETRIC`` and ``ImageCms.INTENT_SATURATION``.
|
``ImageCms.Intent.RELATIVE_COLORIMETRIC`` and ``ImageCms.Intent.SATURATION``.
|
||||||
|
|
||||||
.. py:attribute:: profile_id
|
.. py:attribute:: profile_id
|
||||||
:type: bytes
|
:type: bytes
|
||||||
|
@ -313,14 +313,14 @@ can be easily displayed in a chromaticity diagram, for example).
|
||||||
the CLUT model.
|
the CLUT model.
|
||||||
|
|
||||||
The dictionary is indexed by intents
|
The dictionary is indexed by intents
|
||||||
(``ImageCms.INTENT_ABSOLUTE_COLORIMETRIC``,
|
(``ImageCms.Intent.ABSOLUTE_COLORIMETRIC``,
|
||||||
``ImageCms.INTENT_PERCEPTUAL``,
|
``ImageCms.Intent.PERCEPTUAL``,
|
||||||
``ImageCms.INTENT_RELATIVE_COLORIMETRIC`` and
|
``ImageCms.Intent.RELATIVE_COLORIMETRIC`` and
|
||||||
``ImageCms.INTENT_SATURATION``).
|
``ImageCms.Intent.SATURATION``).
|
||||||
|
|
||||||
The values are 3-tuples indexed by directions
|
The values are 3-tuples indexed by directions
|
||||||
(``ImageCms.DIRECTION_INPUT``, ``ImageCms.DIRECTION_OUTPUT``,
|
(``ImageCms.Direction.INPUT``, ``ImageCms.Direction.OUTPUT``,
|
||||||
``ImageCms.DIRECTION_PROOF``).
|
``ImageCms.Direction.PROOF``).
|
||||||
|
|
||||||
The elements of the tuple are booleans. If the value is ``True``,
|
The elements of the tuple are booleans. If the value is ``True``,
|
||||||
that intent is supported for that direction.
|
that intent is supported for that direction.
|
||||||
|
@ -331,14 +331,14 @@ can be easily displayed in a chromaticity diagram, for example).
|
||||||
Returns a dictionary of all supported intents and directions.
|
Returns a dictionary of all supported intents and directions.
|
||||||
|
|
||||||
The dictionary is indexed by intents
|
The dictionary is indexed by intents
|
||||||
(``ImageCms.INTENT_ABSOLUTE_COLORIMETRIC``,
|
(``ImageCms.Intent.ABSOLUTE_COLORIMETRIC``,
|
||||||
``ImageCms.INTENT_PERCEPTUAL``,
|
``ImageCms.Intent.PERCEPTUAL``,
|
||||||
``ImageCms.INTENT_RELATIVE_COLORIMETRIC`` and
|
``ImageCms.Intent.RELATIVE_COLORIMETRIC`` and
|
||||||
``ImageCms.INTENT_SATURATION``).
|
``ImageCms.Intent.SATURATION``).
|
||||||
|
|
||||||
The values are 3-tuples indexed by directions
|
The values are 3-tuples indexed by directions
|
||||||
(``ImageCms.DIRECTION_INPUT``, ``ImageCms.DIRECTION_OUTPUT``,
|
(``ImageCms.Direction.INPUT``, ``ImageCms.Direction.OUTPUT``,
|
||||||
``ImageCms.DIRECTION_PROOF``).
|
``ImageCms.Direction.PROOF``).
|
||||||
|
|
||||||
The elements of the tuple are booleans. If the value is ``True``,
|
The elements of the tuple are booleans. If the value is ``True``,
|
||||||
that intent is supported for that direction.
|
that intent is supported for that direction.
|
||||||
|
@ -352,11 +352,11 @@ can be easily displayed in a chromaticity diagram, for example).
|
||||||
Note that you can also get this information for all intents and directions
|
Note that you can also get this information for all intents and directions
|
||||||
with :py:attr:`.intent_supported`.
|
with :py:attr:`.intent_supported`.
|
||||||
|
|
||||||
:param intent: One of ``ImageCms.INTENT_ABSOLUTE_COLORIMETRIC``,
|
:param intent: One of ``ImageCms.Intent.ABSOLUTE_COLORIMETRIC``,
|
||||||
``ImageCms.INTENT_PERCEPTUAL``,
|
``ImageCms.Intent.PERCEPTUAL``,
|
||||||
``ImageCms.INTENT_RELATIVE_COLORIMETRIC``
|
``ImageCms.Intent.RELATIVE_COLORIMETRIC``
|
||||||
and ``ImageCms.INTENT_SATURATION``.
|
and ``ImageCms.Intent.SATURATION``.
|
||||||
:param direction: One of ``ImageCms.DIRECTION_INPUT``,
|
:param direction: One of ``ImageCms.Direction.INPUT``,
|
||||||
``ImageCms.DIRECTION_OUTPUT``
|
``ImageCms.Direction.OUTPUT``
|
||||||
and ``ImageCms.DIRECTION_PROOF``
|
and ``ImageCms.Direction.PROOF``
|
||||||
:return: Boolean if the intent and direction is supported.
|
:return: Boolean if the intent and direction is supported.
|
||||||
|
|
|
@ -60,12 +60,12 @@ Methods
|
||||||
Constants
|
Constants
|
||||||
---------
|
---------
|
||||||
|
|
||||||
.. data:: PIL.ImageFont.LAYOUT_BASIC
|
.. data:: PIL.ImageFont.Layout.BASIC
|
||||||
|
|
||||||
Use basic text layout for TrueType font.
|
Use basic text layout for TrueType font.
|
||||||
Advanced features such as text direction are not supported.
|
Advanced features such as text direction are not supported.
|
||||||
|
|
||||||
.. data:: PIL.ImageFont.LAYOUT_RAQM
|
.. data:: PIL.ImageFont.Layout.RAQM
|
||||||
|
|
||||||
Use Raqm text layout for TrueType font.
|
Use Raqm text layout for TrueType font.
|
||||||
Advanced features are supported.
|
Advanced features are supported.
|
||||||
|
|
|
@ -57,7 +57,7 @@ Support for the following features can be checked:
|
||||||
* ``transp_webp``: Support for transparency in WebP images.
|
* ``transp_webp``: Support for transparency in WebP images.
|
||||||
* ``webp_mux``: (compile time) Support for EXIF data in WebP images.
|
* ``webp_mux``: (compile time) Support for EXIF data in WebP images.
|
||||||
* ``webp_anim``: (compile time) Support for animated WebP images.
|
* ``webp_anim``: (compile time) Support for animated WebP images.
|
||||||
* ``raqm``: Raqm library, required for ``ImageFont.LAYOUT_RAQM`` in :py:func:`PIL.ImageFont.truetype`. Run-time version number is available for Raqm 0.7.0 or newer.
|
* ``raqm``: Raqm library, required for ``ImageFont.Layout.RAQM`` in :py:func:`PIL.ImageFont.truetype`. Run-time version number is available for Raqm 0.7.0 or newer.
|
||||||
* ``libimagequant``: (compile time) ImageQuant quantization support in :py:func:`PIL.Image.Image.quantize`. Run-time version number is available.
|
* ``libimagequant``: (compile time) ImageQuant quantization support in :py:func:`PIL.Image.Image.quantize`. Run-time version number is available.
|
||||||
* ``xcb``: (compile time) Support for X11 in :py:func:`PIL.ImageGrab.grab` via the XCB library.
|
* ``xcb``: (compile time) Support for X11 in :py:func:`PIL.ImageGrab.grab` via the XCB library.
|
||||||
|
|
||||||
|
|
|
@ -230,8 +230,7 @@ Plugin reference
|
||||||
|
|
||||||
.. automodule:: PIL.PngImagePlugin
|
.. automodule:: PIL.PngImagePlugin
|
||||||
:members: ChunkStream, PngImageFile, PngStream, getchunks, is_cid, putchunk,
|
:members: ChunkStream, PngImageFile, PngStream, getchunks, is_cid, putchunk,
|
||||||
MAX_TEXT_CHUNK, MAX_TEXT_MEMORY, APNG_BLEND_OP_SOURCE, APNG_BLEND_OP_OVER,
|
Blend, Disposal, MAX_TEXT_CHUNK, MAX_TEXT_MEMORY
|
||||||
APNG_DISPOSE_OP_NONE, APNG_DISPOSE_OP_BACKGROUND, APNG_DISPOSE_OP_PREVIOUS
|
|
||||||
:undoc-members:
|
:undoc-members:
|
||||||
:show-inheritance:
|
:show-inheritance:
|
||||||
:member-order: groupwise
|
:member-order: groupwise
|
||||||
|
|
|
@ -111,16 +111,14 @@ downscaling with libjpeg, which uses supersampling internally, not convolutions.
|
||||||
Image transposition
|
Image transposition
|
||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
A new method :py:data:`PIL.Image.TRANSPOSE` has been added for the
|
A new method ``TRANSPOSE`` has been added for the
|
||||||
:py:meth:`~PIL.Image.Image.transpose` operation in addition to
|
:py:meth:`~PIL.Image.Image.transpose` operation in addition to
|
||||||
:py:data:`~PIL.Image.FLIP_LEFT_RIGHT`, :py:data:`~PIL.Image.FLIP_TOP_BOTTOM`,
|
``FLIP_LEFT_RIGHT``, ``FLIP_TOP_BOTTOM``, ``ROTATE_90``, ``ROTATE_180``,
|
||||||
:py:data:`~PIL.Image.ROTATE_90`, :py:data:`~PIL.Image.ROTATE_180`,
|
``ROTATE_270``. ``TRANSPOSE`` is an algebra transpose, with an image reflected
|
||||||
:py:data:`~PIL.Image.ROTATE_270`. :py:data:`~PIL.Image.TRANSPOSE` is an algebra
|
across its main diagonal.
|
||||||
transpose, with an image reflected across its main diagonal.
|
|
||||||
|
|
||||||
The speed of :py:data:`~PIL.Image.ROTATE_90`, :py:data:`~PIL.Image.ROTATE_270`
|
The speed of ``ROTATE_90``, ``ROTATE_270`` and ``TRANSPOSE`` has been significantly
|
||||||
and :py:data:`~PIL.Image.TRANSPOSE` has been significantly improved for large
|
improved for large images which don't fit in the processor cache.
|
||||||
images which don't fit in the processor cache.
|
|
||||||
|
|
||||||
Gaussian blur and unsharp mask
|
Gaussian blur and unsharp mask
|
||||||
------------------------------
|
------------------------------
|
||||||
|
|
|
@ -21,11 +21,77 @@ coordinate type".
|
||||||
Deprecations
|
Deprecations
|
||||||
^^^^^^^^^^^^
|
^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Constants
|
||||||
|
~~~~~~~~~
|
||||||
|
|
||||||
|
A number of constants have been deprecated and will be removed in Pillow 10.0.0
|
||||||
|
(2023-07-01). Instead, ``enum.IntEnum`` classes have been added.
|
||||||
|
|
||||||
|
===================================================== ============================================================
|
||||||
|
Deprecated Use instead
|
||||||
|
===================================================== ============================================================
|
||||||
|
``Image.NONE`` Either ``Image.Dither.NONE`` or ``Image.Resampling.NEAREST``
|
||||||
|
``Image.NEAREST`` Either ``Image.Dither.NONE`` or ``Image.Resampling.NEAREST``
|
||||||
|
``Image.ORDERED`` ``Image.Dither.ORDERED``
|
||||||
|
``Image.RASTERIZE`` ``Image.Dither.RASTERIZE``
|
||||||
|
``Image.FLOYDSTEINBERG`` ``Image.Dither.FLOYDSTEINBERG``
|
||||||
|
``Image.WEB`` ``Image.Palette.WEB``
|
||||||
|
``Image.ADAPTIVE`` ``Image.Palette.ADAPTIVE``
|
||||||
|
``Image.AFFINE`` ``Image.Transform.AFFINE``
|
||||||
|
``Image.EXTENT`` ``Image.Transform.EXTENT``
|
||||||
|
``Image.PERSPECTIVE`` ``Image.Transform.PERSPECTIVE``
|
||||||
|
``Image.QUAD`` ``Image.Transform.QUAD``
|
||||||
|
``Image.MESH`` ``Image.Transform.MESH``
|
||||||
|
``Image.FLIP_LEFT_RIGHT`` ``Image.Transpose.FLIP_LEFT_RIGHT``
|
||||||
|
``Image.FLIP_TOP_BOTTOM`` ``Image.Transpose.FLIP_TOP_BOTTOM``
|
||||||
|
``Image.ROTATE_90`` ``Image.Transpose.ROTATE_90``
|
||||||
|
``Image.ROTATE_180`` ``Image.Transpose.ROTATE_180``
|
||||||
|
``Image.ROTATE_270`` ``Image.Transpose.ROTATE_270``
|
||||||
|
``Image.TRANSPOSE`` ``Image.Transpose.TRANSPOSE``
|
||||||
|
``Image.TRANSVERSE`` ``Image.Transpose.TRANSVERSE``
|
||||||
|
``Image.BOX`` ``Image.Resampling.BOX``
|
||||||
|
``Image.BILINEAR`` ``Image.Resampling.BILNEAR``
|
||||||
|
``Image.LINEAR`` ``Image.Resampling.BILNEAR``
|
||||||
|
``Image.HAMMING`` ``Image.Resampling.HAMMING``
|
||||||
|
``Image.BICUBIC`` ``Image.Resampling.BICUBIC``
|
||||||
|
``Image.CUBIC`` ``Image.Resampling.BICUBIC``
|
||||||
|
``Image.LANCZOS`` ``Image.Resampling.LANCZOS``
|
||||||
|
``Image.ANTIALIAS`` ``Image.Resampling.LANCZOS``
|
||||||
|
``Image.MEDIANCUT`` ``Image.Quantize.MEDIANCUT``
|
||||||
|
``Image.MAXCOVERAGE`` ``Image.Quantize.MAXCOVERAGE``
|
||||||
|
``Image.FASTOCTREE`` ``Image.Quantize.FASTOCTREE``
|
||||||
|
``Image.LIBIMAGEQUANT`` ``Image.Quantize.LIBIMAGEQUANT``
|
||||||
|
``ImageCms.INTENT_PERCEPTUAL`` ``ImageCms.Intent.PERCEPTUAL``
|
||||||
|
``ImageCms.INTENT_RELATIVE_COLORMETRIC`` ``ImageCms.Intent.RELATIVE_COLORMETRIC``
|
||||||
|
``ImageCms.INTENT_SATURATION`` ``ImageCms.Intent.SATURATION``
|
||||||
|
``ImageCms.INTENT_ABSOLUTE_COLORIMETRIC`` ``ImageCms.Intent.ABSOLUTE_COLORIMETRIC``
|
||||||
|
``ImageCms.DIRECTION_INPUT`` ``ImageCms.Direction.INPUT``
|
||||||
|
``ImageCms.DIRECTION_OUTPUT`` ``ImageCms.Direction.OUTPUT``
|
||||||
|
``ImageCms.DIRECTION_PROOF`` ``ImageCms.Direction.PROOF``
|
||||||
|
``ImageFont.LAYOUT_BASIC`` ``ImageFont.Layout.BASIC``
|
||||||
|
``ImageFont.LAYOUT_RAQM`` ``ImageFont.Layout.RAQM``
|
||||||
|
``BlpImagePlugin.BLP_FORMAT_JPEG`` ``BlpImagePlugin.Format.JPEG``
|
||||||
|
``BlpImagePlugin.BLP_ENCODING_UNCOMPRESSED`` ``BlpImagePlugin.Encoding.UNCOMPRESSED``
|
||||||
|
``BlpImagePlugin.BLP_ENCODING_DXT`` ``BlpImagePlugin.Encoding.DXT``
|
||||||
|
``BlpImagePlugin.BLP_ENCODING_UNCOMPRESSED_RAW_RGBA`` ``BlpImagePlugin.Encoding.UNCOMPRESSED_RAW_RGBA``
|
||||||
|
``BlpImagePlugin.BLP_ALPHA_ENCODING_DXT1`` ``BlpImagePlugin.AlphaEncoding.DXT1``
|
||||||
|
``BlpImagePlugin.BLP_ALPHA_ENCODING_DXT3`` ``BlpImagePlugin.AlphaEncoding.DXT3``
|
||||||
|
``BlpImagePlugin.BLP_ALPHA_ENCODING_DXT5`` ``BlpImagePlugin.AlphaEncoding.DXT5``
|
||||||
|
``FtexImagePlugin.FORMAT_DXT1`` ``FtexImagePlugin.Format.DXT1``
|
||||||
|
``FtexImagePlugin.FORMAT_UNCOMPRESSED`` ``FtexImagePlugin.Format.UNCOMPRESSED``
|
||||||
|
``PngImagePlugin.APNG_DISPOSE_OP_NONE`` ``PngImagePlugin.Disposal.OP_NONE``
|
||||||
|
``PngImagePlugin.APNG_DISPOSE_OP_BACKGROUND`` ``PngImagePlugin.Disposal.OP_BACKGROUND``
|
||||||
|
``PngImagePlugin.APNG_DISPOSE_OP_PREVIOUS`` ``PngImagePlugin.Disposal.OP_PREVIOUS``
|
||||||
|
``PngImagePlugin.APNG_BLEND_OP_SOURCE`` ``PngImagePlugin.Blend.OP_SOURCE``
|
||||||
|
``PngImagePlugin.APNG_BLEND_OP_OVER`` ``PngImagePlugin.Blend.OP_OVER``
|
||||||
|
===================================================== ============================================================
|
||||||
|
|
||||||
ImageShow.Viewer.show_file file argument
|
ImageShow.Viewer.show_file file argument
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
The ``file`` argument in :py:meth:`~PIL.ImageShow.Viewer.show_file()` has been
|
The ``file`` argument in :py:meth:`~PIL.ImageShow.Viewer.show_file()` has been
|
||||||
deprecated, replaced by ``path``.
|
deprecated and will be removed in Pillow 10.0.0 (2023-07-01). It has been replaced by
|
||||||
|
``path``.
|
||||||
|
|
||||||
In effect, ``viewer.show_file("test.jpg")`` will continue to work unchanged.
|
In effect, ``viewer.show_file("test.jpg")`` will continue to work unchanged.
|
||||||
``viewer.show_file(file="test.jpg")`` will raise a deprecation warning, and suggest
|
``viewer.show_file(file="test.jpg")`` will raise a deprecation warning, and suggest
|
||||||
|
|
|
@ -97,9 +97,9 @@ def testimage():
|
||||||
10456
|
10456
|
||||||
>>> len(im.tobytes())
|
>>> len(im.tobytes())
|
||||||
49152
|
49152
|
||||||
>>> _info(im.transform((512, 512), Image.AFFINE, (1,0,0,0,1,0)))
|
>>> _info(im.transform((512, 512), Image.Transform.AFFINE, (1,0,0,0,1,0)))
|
||||||
(None, 'RGB', (512, 512))
|
(None, 'RGB', (512, 512))
|
||||||
>>> _info(im.transform((512, 512), Image.EXTENT, (32,32,96,96)))
|
>>> _info(im.transform((512, 512), Image.Transform.EXTENT, (32,32,96,96)))
|
||||||
(None, 'RGB', (512, 512))
|
(None, 'RGB', (512, 512))
|
||||||
|
|
||||||
The ImageDraw module lets you draw stuff in raster images:
|
The ImageDraw module lets you draw stuff in raster images:
|
||||||
|
|
|
@ -30,19 +30,54 @@ BLP files come in many different flavours:
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import struct
|
import struct
|
||||||
|
import warnings
|
||||||
|
from enum import IntEnum
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
|
|
||||||
from . import Image, ImageFile
|
from . import Image, ImageFile
|
||||||
|
|
||||||
BLP_FORMAT_JPEG = 0
|
|
||||||
|
|
||||||
BLP_ENCODING_UNCOMPRESSED = 1
|
class Format(IntEnum):
|
||||||
BLP_ENCODING_DXT = 2
|
JPEG = 0
|
||||||
BLP_ENCODING_UNCOMPRESSED_RAW_BGRA = 3
|
|
||||||
|
|
||||||
BLP_ALPHA_ENCODING_DXT1 = 0
|
|
||||||
BLP_ALPHA_ENCODING_DXT3 = 1
|
class Encoding(IntEnum):
|
||||||
BLP_ALPHA_ENCODING_DXT5 = 7
|
UNCOMPRESSED = 1
|
||||||
|
DXT = 2
|
||||||
|
UNCOMPRESSED_RAW_BGRA = 3
|
||||||
|
|
||||||
|
|
||||||
|
class AlphaEncoding(IntEnum):
|
||||||
|
DXT1 = 0
|
||||||
|
DXT3 = 1
|
||||||
|
DXT5 = 7
|
||||||
|
|
||||||
|
|
||||||
|
def __getattr__(name):
|
||||||
|
deprecated = "deprecated and will be removed in Pillow 10 (2023-07-01). "
|
||||||
|
for enum, prefix in {
|
||||||
|
Format: "BLP_FORMAT_",
|
||||||
|
Encoding: "BLP_ENCODING_",
|
||||||
|
AlphaEncoding: "BLP_ALPHA_ENCODING_",
|
||||||
|
}.items():
|
||||||
|
if name.startswith(prefix):
|
||||||
|
name = name[len(prefix) :]
|
||||||
|
if name in enum.__members__:
|
||||||
|
warnings.warn(
|
||||||
|
prefix
|
||||||
|
+ name
|
||||||
|
+ " is "
|
||||||
|
+ deprecated
|
||||||
|
+ "Use "
|
||||||
|
+ enum.__name__
|
||||||
|
+ "."
|
||||||
|
+ name
|
||||||
|
+ " instead.",
|
||||||
|
DeprecationWarning,
|
||||||
|
stacklevel=2,
|
||||||
|
)
|
||||||
|
return enum[name]
|
||||||
|
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
||||||
|
|
||||||
|
|
||||||
def unpack_565(i):
|
def unpack_565(i):
|
||||||
|
@ -320,7 +355,7 @@ class _BLPBaseDecoder(ImageFile.PyDecoder):
|
||||||
|
|
||||||
class BLP1Decoder(_BLPBaseDecoder):
|
class BLP1Decoder(_BLPBaseDecoder):
|
||||||
def _load(self):
|
def _load(self):
|
||||||
if self._blp_compression == BLP_FORMAT_JPEG:
|
if self._blp_compression == Format.JPEG:
|
||||||
self._decode_jpeg_stream()
|
self._decode_jpeg_stream()
|
||||||
|
|
||||||
elif self._blp_compression == 1:
|
elif self._blp_compression == 1:
|
||||||
|
@ -347,7 +382,7 @@ class BLP1Decoder(_BLPBaseDecoder):
|
||||||
)
|
)
|
||||||
|
|
||||||
def _decode_jpeg_stream(self):
|
def _decode_jpeg_stream(self):
|
||||||
from PIL.JpegImagePlugin import JpegImageFile
|
from .JpegImagePlugin import JpegImageFile
|
||||||
|
|
||||||
(jpeg_header_size,) = struct.unpack("<I", self._safe_read(4))
|
(jpeg_header_size,) = struct.unpack("<I", self._safe_read(4))
|
||||||
jpeg_header = self._safe_read(jpeg_header_size)
|
jpeg_header = self._safe_read(jpeg_header_size)
|
||||||
|
@ -372,7 +407,7 @@ class BLP2Decoder(_BLPBaseDecoder):
|
||||||
if self._blp_compression == 1:
|
if self._blp_compression == 1:
|
||||||
# Uncompressed or DirectX compression
|
# Uncompressed or DirectX compression
|
||||||
|
|
||||||
if self._blp_encoding == BLP_ENCODING_UNCOMPRESSED:
|
if self._blp_encoding == Encoding.UNCOMPRESSED:
|
||||||
_data = BytesIO(self._safe_read(self._blp_lengths[0]))
|
_data = BytesIO(self._safe_read(self._blp_lengths[0]))
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
|
@ -382,8 +417,8 @@ class BLP2Decoder(_BLPBaseDecoder):
|
||||||
b, g, r, a = palette[offset]
|
b, g, r, a = palette[offset]
|
||||||
data.extend((r, g, b))
|
data.extend((r, g, b))
|
||||||
|
|
||||||
elif self._blp_encoding == BLP_ENCODING_DXT:
|
elif self._blp_encoding == Encoding.DXT:
|
||||||
if self._blp_alpha_encoding == BLP_ALPHA_ENCODING_DXT1:
|
if self._blp_alpha_encoding == AlphaEncoding.DXT1:
|
||||||
linesize = (self.size[0] + 3) // 4 * 8
|
linesize = (self.size[0] + 3) // 4 * 8
|
||||||
for yb in range((self.size[1] + 3) // 4):
|
for yb in range((self.size[1] + 3) // 4):
|
||||||
for d in decode_dxt1(
|
for d in decode_dxt1(
|
||||||
|
@ -391,13 +426,13 @@ class BLP2Decoder(_BLPBaseDecoder):
|
||||||
):
|
):
|
||||||
data += d
|
data += d
|
||||||
|
|
||||||
elif self._blp_alpha_encoding == BLP_ALPHA_ENCODING_DXT3:
|
elif self._blp_alpha_encoding == AlphaEncoding.DXT3:
|
||||||
linesize = (self.size[0] + 3) // 4 * 16
|
linesize = (self.size[0] + 3) // 4 * 16
|
||||||
for yb in range((self.size[1] + 3) // 4):
|
for yb in range((self.size[1] + 3) // 4):
|
||||||
for d in decode_dxt3(self._safe_read(linesize)):
|
for d in decode_dxt3(self._safe_read(linesize)):
|
||||||
data += d
|
data += d
|
||||||
|
|
||||||
elif self._blp_alpha_encoding == BLP_ALPHA_ENCODING_DXT5:
|
elif self._blp_alpha_encoding == AlphaEncoding.DXT5:
|
||||||
linesize = (self.size[0] + 3) // 4 * 16
|
linesize = (self.size[0] + 3) // 4 * 16
|
||||||
for yb in range((self.size[1] + 3) // 4):
|
for yb in range((self.size[1] + 3) // 4):
|
||||||
for d in decode_dxt5(self._safe_read(linesize)):
|
for d in decode_dxt5(self._safe_read(linesize)):
|
||||||
|
|
|
@ -52,13 +52,41 @@ Note: All data is stored in little-Endian (Intel) byte order.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import struct
|
import struct
|
||||||
|
import warnings
|
||||||
|
from enum import IntEnum
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
|
|
||||||
from . import Image, ImageFile
|
from . import Image, ImageFile
|
||||||
|
|
||||||
MAGIC = b"FTEX"
|
MAGIC = b"FTEX"
|
||||||
FORMAT_DXT1 = 0
|
|
||||||
FORMAT_UNCOMPRESSED = 1
|
|
||||||
|
class Format(IntEnum):
|
||||||
|
DXT1 = 0
|
||||||
|
UNCOMPRESSED = 1
|
||||||
|
|
||||||
|
|
||||||
|
def __getattr__(name):
|
||||||
|
deprecated = "deprecated and will be removed in Pillow 10 (2023-07-01). "
|
||||||
|
for enum, prefix in {Format: "FORMAT_"}.items():
|
||||||
|
if name.startswith(prefix):
|
||||||
|
name = name[len(prefix) :]
|
||||||
|
if name in enum.__members__:
|
||||||
|
warnings.warn(
|
||||||
|
prefix
|
||||||
|
+ name
|
||||||
|
+ " is "
|
||||||
|
+ deprecated
|
||||||
|
+ "Use "
|
||||||
|
+ enum.__name__
|
||||||
|
+ "."
|
||||||
|
+ name
|
||||||
|
+ " instead.",
|
||||||
|
DeprecationWarning,
|
||||||
|
stacklevel=2,
|
||||||
|
)
|
||||||
|
return enum[name]
|
||||||
|
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
||||||
|
|
||||||
|
|
||||||
class FtexImageFile(ImageFile.ImageFile):
|
class FtexImageFile(ImageFile.ImageFile):
|
||||||
|
@ -83,10 +111,10 @@ class FtexImageFile(ImageFile.ImageFile):
|
||||||
|
|
||||||
data = self.fp.read(mipmap_size)
|
data = self.fp.read(mipmap_size)
|
||||||
|
|
||||||
if format == FORMAT_DXT1:
|
if format == Format.DXT1:
|
||||||
self.mode = "RGBA"
|
self.mode = "RGBA"
|
||||||
self.tile = [("bcn", (0, 0) + self.size, 0, (1))]
|
self.tile = [("bcn", (0, 0) + self.size, 0, (1))]
|
||||||
elif format == FORMAT_UNCOMPRESSED:
|
elif format == Format.UNCOMPRESSED:
|
||||||
self.tile = [("raw", (0, 0) + self.size, 0, ("RGB", 0, 1))]
|
self.tile = [("raw", (0, 0) + self.size, 0, ("RGB", 0, 1))]
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"Invalid texture compression format: {repr(format)}")
|
raise ValueError(f"Invalid texture compression format: {repr(format)}")
|
||||||
|
|
|
@ -169,12 +169,12 @@ class GifImageFile(ImageFile.ImageFile):
|
||||||
if "transparency" in self.info:
|
if "transparency" in self.info:
|
||||||
self.mode = "RGBA"
|
self.mode = "RGBA"
|
||||||
self.im.putpalettealpha(self.info["transparency"], 0)
|
self.im.putpalettealpha(self.info["transparency"], 0)
|
||||||
self.im = self.im.convert("RGBA", Image.FLOYDSTEINBERG)
|
self.im = self.im.convert("RGBA", Image.Dither.FLOYDSTEINBERG)
|
||||||
|
|
||||||
del self.info["transparency"]
|
del self.info["transparency"]
|
||||||
else:
|
else:
|
||||||
self.mode = "RGB"
|
self.mode = "RGB"
|
||||||
self.im = self.im.convert("RGB", Image.FLOYDSTEINBERG)
|
self.im = self.im.convert("RGB", Image.Dither.FLOYDSTEINBERG)
|
||||||
if self.dispose:
|
if self.dispose:
|
||||||
self.im.paste(self.dispose, self.dispose_extent)
|
self.im.paste(self.dispose, self.dispose_extent)
|
||||||
|
|
||||||
|
@ -425,7 +425,7 @@ def _normalize_mode(im, initial_call=False):
|
||||||
palette_size = 256
|
palette_size = 256
|
||||||
if im.palette:
|
if im.palette:
|
||||||
palette_size = len(im.palette.getdata()[1]) // 3
|
palette_size = len(im.palette.getdata()[1]) // 3
|
||||||
im = im.convert("P", palette=Image.ADAPTIVE, colors=palette_size)
|
im = im.convert("P", palette=Image.Palette.ADAPTIVE, colors=palette_size)
|
||||||
if im.palette.mode == "RGBA":
|
if im.palette.mode == "RGBA":
|
||||||
for rgba in im.palette.colors.keys():
|
for rgba in im.palette.colors.keys():
|
||||||
if rgba[3] == 0:
|
if rgba[3] == 0:
|
||||||
|
|
|
@ -69,7 +69,7 @@ def _save(im, fp, filename):
|
||||||
if not tmp:
|
if not tmp:
|
||||||
# TODO: invent a more convenient method for proportional scalings
|
# TODO: invent a more convenient method for proportional scalings
|
||||||
tmp = im.copy()
|
tmp = im.copy()
|
||||||
tmp.thumbnail(size, Image.LANCZOS, reducing_gap=None)
|
tmp.thumbnail(size, Image.Resampling.LANCZOS, reducing_gap=None)
|
||||||
bits = BmpImagePlugin.SAVE[tmp.mode][1] if bmp else 32
|
bits = BmpImagePlugin.SAVE[tmp.mode][1] if bmp else 32
|
||||||
fp.write(struct.pack("<H", bits)) # wBitCount(2)
|
fp.write(struct.pack("<H", bits)) # wBitCount(2)
|
||||||
|
|
||||||
|
|
355
src/PIL/Image.py
355
src/PIL/Image.py
|
@ -37,6 +37,7 @@ import sys
|
||||||
import tempfile
|
import tempfile
|
||||||
import warnings
|
import warnings
|
||||||
from collections.abc import Callable, MutableMapping
|
from collections.abc import Callable, MutableMapping
|
||||||
|
from enum import IntEnum
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -53,15 +54,57 @@ from ._util import deferred_error, isPath
|
||||||
|
|
||||||
|
|
||||||
def __getattr__(name):
|
def __getattr__(name):
|
||||||
|
deprecated = "deprecated and will be removed in Pillow 10 (2023-07-01). "
|
||||||
categories = {"NORMAL": 0, "SEQUENCE": 1, "CONTAINER": 2}
|
categories = {"NORMAL": 0, "SEQUENCE": 1, "CONTAINER": 2}
|
||||||
if name in categories:
|
if name in categories:
|
||||||
warnings.warn(
|
warnings.warn(
|
||||||
"Image categories are deprecated and will be removed in Pillow 10 "
|
"Image categories are " + deprecated + "Use is_animated instead.",
|
||||||
"(2023-07-01). Use is_animated instead.",
|
|
||||||
DeprecationWarning,
|
DeprecationWarning,
|
||||||
stacklevel=2,
|
stacklevel=2,
|
||||||
)
|
)
|
||||||
return categories[name]
|
return categories[name]
|
||||||
|
elif name in ("NEAREST", "NONE"):
|
||||||
|
warnings.warn(
|
||||||
|
name
|
||||||
|
+ " is "
|
||||||
|
+ deprecated
|
||||||
|
+ "Use Resampling.NEAREST or Dither.NONE instead.",
|
||||||
|
DeprecationWarning,
|
||||||
|
stacklevel=2,
|
||||||
|
)
|
||||||
|
return 0
|
||||||
|
old_resampling = {
|
||||||
|
"LINEAR": "BILINEAR",
|
||||||
|
"CUBIC": "BICUBIC",
|
||||||
|
"ANTIALIAS": "LANCZOS",
|
||||||
|
}
|
||||||
|
if name in old_resampling:
|
||||||
|
warnings.warn(
|
||||||
|
name
|
||||||
|
+ " is "
|
||||||
|
+ deprecated
|
||||||
|
+ "Use Resampling."
|
||||||
|
+ old_resampling[name]
|
||||||
|
+ " instead.",
|
||||||
|
DeprecationWarning,
|
||||||
|
stacklevel=2,
|
||||||
|
)
|
||||||
|
return Resampling[old_resampling[name]]
|
||||||
|
for enum in (Transpose, Transform, Resampling, Dither, Palette, Quantize):
|
||||||
|
if name in enum.__members__:
|
||||||
|
warnings.warn(
|
||||||
|
name
|
||||||
|
+ " is "
|
||||||
|
+ deprecated
|
||||||
|
+ "Use "
|
||||||
|
+ enum.__name__
|
||||||
|
+ "."
|
||||||
|
+ name
|
||||||
|
+ " instead.",
|
||||||
|
DeprecationWarning,
|
||||||
|
stacklevel=2,
|
||||||
|
)
|
||||||
|
return enum[name]
|
||||||
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
||||||
|
|
||||||
|
|
||||||
|
@ -139,46 +182,64 @@ def isImageType(t):
|
||||||
# Constants
|
# Constants
|
||||||
|
|
||||||
# transpose
|
# transpose
|
||||||
FLIP_LEFT_RIGHT = 0
|
class Transpose(IntEnum):
|
||||||
FLIP_TOP_BOTTOM = 1
|
FLIP_LEFT_RIGHT = 0
|
||||||
ROTATE_90 = 2
|
FLIP_TOP_BOTTOM = 1
|
||||||
ROTATE_180 = 3
|
ROTATE_90 = 2
|
||||||
ROTATE_270 = 4
|
ROTATE_180 = 3
|
||||||
TRANSPOSE = 5
|
ROTATE_270 = 4
|
||||||
TRANSVERSE = 6
|
TRANSPOSE = 5
|
||||||
|
TRANSVERSE = 6
|
||||||
|
|
||||||
|
|
||||||
# transforms (also defined in Imaging.h)
|
# transforms (also defined in Imaging.h)
|
||||||
AFFINE = 0
|
class Transform(IntEnum):
|
||||||
EXTENT = 1
|
AFFINE = 0
|
||||||
PERSPECTIVE = 2
|
EXTENT = 1
|
||||||
QUAD = 3
|
PERSPECTIVE = 2
|
||||||
MESH = 4
|
QUAD = 3
|
||||||
|
MESH = 4
|
||||||
|
|
||||||
|
|
||||||
# resampling filters (also defined in Imaging.h)
|
# resampling filters (also defined in Imaging.h)
|
||||||
NEAREST = NONE = 0
|
class Resampling(IntEnum):
|
||||||
BOX = 4
|
NEAREST = 0
|
||||||
BILINEAR = LINEAR = 2
|
BOX = 4
|
||||||
HAMMING = 5
|
BILINEAR = 2
|
||||||
BICUBIC = CUBIC = 3
|
HAMMING = 5
|
||||||
LANCZOS = ANTIALIAS = 1
|
BICUBIC = 3
|
||||||
|
LANCZOS = 1
|
||||||
|
|
||||||
_filters_support = {BOX: 0.5, BILINEAR: 1.0, HAMMING: 1.0, BICUBIC: 2.0, LANCZOS: 3.0}
|
|
||||||
|
_filters_support = {
|
||||||
|
Resampling.BOX: 0.5,
|
||||||
|
Resampling.BILINEAR: 1.0,
|
||||||
|
Resampling.HAMMING: 1.0,
|
||||||
|
Resampling.BICUBIC: 2.0,
|
||||||
|
Resampling.LANCZOS: 3.0,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
# dithers
|
# dithers
|
||||||
NEAREST = NONE = 0
|
class Dither(IntEnum):
|
||||||
ORDERED = 1 # Not yet implemented
|
NONE = 0
|
||||||
RASTERIZE = 2 # Not yet implemented
|
ORDERED = 1 # Not yet implemented
|
||||||
FLOYDSTEINBERG = 3 # default
|
RASTERIZE = 2 # Not yet implemented
|
||||||
|
FLOYDSTEINBERG = 3 # default
|
||||||
|
|
||||||
|
|
||||||
# palettes/quantizers
|
# palettes/quantizers
|
||||||
WEB = 0
|
class Palette(IntEnum):
|
||||||
ADAPTIVE = 1
|
WEB = 0
|
||||||
|
ADAPTIVE = 1
|
||||||
|
|
||||||
|
|
||||||
|
class Quantize(IntEnum):
|
||||||
|
MEDIANCUT = 0
|
||||||
|
MAXCOVERAGE = 1
|
||||||
|
FASTOCTREE = 2
|
||||||
|
LIBIMAGEQUANT = 3
|
||||||
|
|
||||||
MEDIANCUT = 0
|
|
||||||
MAXCOVERAGE = 1
|
|
||||||
FASTOCTREE = 2
|
|
||||||
LIBIMAGEQUANT = 3
|
|
||||||
|
|
||||||
if hasattr(core, "DEFAULT_STRATEGY"):
|
if hasattr(core, "DEFAULT_STRATEGY"):
|
||||||
DEFAULT_STRATEGY = core.DEFAULT_STRATEGY
|
DEFAULT_STRATEGY = core.DEFAULT_STRATEGY
|
||||||
|
@ -821,13 +882,7 @@ class Image:
|
||||||
if self.im and self.palette and self.palette.dirty:
|
if self.im and self.palette and self.palette.dirty:
|
||||||
# realize palette
|
# realize palette
|
||||||
mode, arr = self.palette.getdata()
|
mode, arr = self.palette.getdata()
|
||||||
if mode == "RGBA":
|
self.im.putpalette(mode, arr)
|
||||||
mode = "RGB"
|
|
||||||
self.info["transparency"] = arr[3::4]
|
|
||||||
arr = bytes(
|
|
||||||
value for (index, value) in enumerate(arr) if index % 4 != 3
|
|
||||||
)
|
|
||||||
palette_length = self.im.putpalette(mode, arr)
|
|
||||||
self.palette.dirty = 0
|
self.palette.dirty = 0
|
||||||
self.palette.rawmode = None
|
self.palette.rawmode = None
|
||||||
if "transparency" in self.info and mode in ("LA", "PA"):
|
if "transparency" in self.info and mode in ("LA", "PA"):
|
||||||
|
@ -837,8 +892,9 @@ class Image:
|
||||||
self.im.putpalettealphas(self.info["transparency"])
|
self.im.putpalettealphas(self.info["transparency"])
|
||||||
self.palette.mode = "RGBA"
|
self.palette.mode = "RGBA"
|
||||||
else:
|
else:
|
||||||
self.palette.mode = "RGB"
|
palette_mode = "RGBA" if mode.startswith("RGBA") else "RGB"
|
||||||
self.palette.palette = self.im.getpalette()[: palette_length * 3]
|
self.palette.mode = palette_mode
|
||||||
|
self.palette.palette = self.im.getpalette(palette_mode, palette_mode)
|
||||||
|
|
||||||
if self.im:
|
if self.im:
|
||||||
if cffi and USE_CFFI_ACCESS:
|
if cffi and USE_CFFI_ACCESS:
|
||||||
|
@ -862,7 +918,9 @@ class Image:
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def convert(self, mode=None, matrix=None, dither=None, palette=WEB, colors=256):
|
def convert(
|
||||||
|
self, mode=None, matrix=None, dither=None, palette=Palette.WEB, colors=256
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Returns a converted copy of this image. For the "P" mode, this
|
Returns a converted copy of this image. For the "P" mode, this
|
||||||
method translates pixels through the palette. If mode is
|
method translates pixels through the palette. If mode is
|
||||||
|
@ -881,7 +939,7 @@ class Image:
|
||||||
The default method of converting a greyscale ("L") or "RGB"
|
The default method of converting a greyscale ("L") or "RGB"
|
||||||
image into a bilevel (mode "1") image uses Floyd-Steinberg
|
image into a bilevel (mode "1") image uses Floyd-Steinberg
|
||||||
dither to approximate the original image luminosity levels. If
|
dither to approximate the original image luminosity levels. If
|
||||||
dither is :data:`NONE`, all values larger than 127 are set to 255 (white),
|
dither is ``None``, all values larger than 127 are set to 255 (white),
|
||||||
all other values to 0 (black). To use other thresholds, use the
|
all other values to 0 (black). To use other thresholds, use the
|
||||||
:py:meth:`~PIL.Image.Image.point` method.
|
:py:meth:`~PIL.Image.Image.point` method.
|
||||||
|
|
||||||
|
@ -894,12 +952,13 @@ class Image:
|
||||||
should be 4- or 12-tuple containing floating point values.
|
should be 4- or 12-tuple containing floating point values.
|
||||||
:param dither: Dithering method, used when converting from
|
:param dither: Dithering method, used when converting from
|
||||||
mode "RGB" to "P" or from "RGB" or "L" to "1".
|
mode "RGB" to "P" or from "RGB" or "L" to "1".
|
||||||
Available methods are :data:`NONE` or :data:`FLOYDSTEINBERG` (default).
|
Available methods are :data:`Dither.NONE` or :data:`Dither.FLOYDSTEINBERG`
|
||||||
Note that this is not used when ``matrix`` is supplied.
|
(default). Note that this is not used when ``matrix`` is supplied.
|
||||||
:param palette: Palette to use when converting from mode "RGB"
|
:param palette: Palette to use when converting from mode "RGB"
|
||||||
to "P". Available palettes are :data:`WEB` or :data:`ADAPTIVE`.
|
to "P". Available palettes are :data:`Palette.WEB` or
|
||||||
:param colors: Number of colors to use for the :data:`ADAPTIVE` palette.
|
:data:`Palette.ADAPTIVE`.
|
||||||
Defaults to 256.
|
:param colors: Number of colors to use for the :data:`Palette.ADAPTIVE`
|
||||||
|
palette. Defaults to 256.
|
||||||
:rtype: :py:class:`~PIL.Image.Image`
|
:rtype: :py:class:`~PIL.Image.Image`
|
||||||
:returns: An :py:class:`~PIL.Image.Image` object.
|
:returns: An :py:class:`~PIL.Image.Image` object.
|
||||||
"""
|
"""
|
||||||
|
@ -1006,7 +1065,7 @@ class Image:
|
||||||
else:
|
else:
|
||||||
raise ValueError("Transparency for P mode should be bytes or int")
|
raise ValueError("Transparency for P mode should be bytes or int")
|
||||||
|
|
||||||
if mode == "P" and palette == ADAPTIVE:
|
if mode == "P" and palette == Palette.ADAPTIVE:
|
||||||
im = self.im.quantize(colors)
|
im = self.im.quantize(colors)
|
||||||
new = self._new(im)
|
new = self._new(im)
|
||||||
from . import ImagePalette
|
from . import ImagePalette
|
||||||
|
@ -1028,7 +1087,7 @@ class Image:
|
||||||
|
|
||||||
# colorspace conversion
|
# colorspace conversion
|
||||||
if dither is None:
|
if dither is None:
|
||||||
dither = FLOYDSTEINBERG
|
dither = Dither.FLOYDSTEINBERG
|
||||||
|
|
||||||
try:
|
try:
|
||||||
im = self.im.convert(mode, dither)
|
im = self.im.convert(mode, dither)
|
||||||
|
@ -1041,7 +1100,7 @@ class Image:
|
||||||
raise ValueError("illegal conversion") from e
|
raise ValueError("illegal conversion") from e
|
||||||
|
|
||||||
new_im = self._new(im)
|
new_im = self._new(im)
|
||||||
if mode == "P" and palette != ADAPTIVE:
|
if mode == "P" and palette != Palette.ADAPTIVE:
|
||||||
from . import ImagePalette
|
from . import ImagePalette
|
||||||
|
|
||||||
new_im.palette = ImagePalette.ImagePalette("RGB", list(range(256)) * 3)
|
new_im.palette = ImagePalette.ImagePalette("RGB", list(range(256)) * 3)
|
||||||
|
@ -1070,24 +1129,25 @@ class Image:
|
||||||
of colors.
|
of colors.
|
||||||
|
|
||||||
:param colors: The desired number of colors, <= 256
|
:param colors: The desired number of colors, <= 256
|
||||||
:param method: :data:`MEDIANCUT` (median cut),
|
:param method: :data:`Quantize.MEDIANCUT` (median cut),
|
||||||
:data:`MAXCOVERAGE` (maximum coverage),
|
:data:`Quantize.MAXCOVERAGE` (maximum coverage),
|
||||||
:data:`FASTOCTREE` (fast octree),
|
:data:`Quantize.FASTOCTREE` (fast octree),
|
||||||
:data:`LIBIMAGEQUANT` (libimagequant; check support using
|
:data:`Quantize.LIBIMAGEQUANT` (libimagequant; check support
|
||||||
:py:func:`PIL.features.check_feature`
|
using :py:func:`PIL.features.check_feature` with
|
||||||
with ``feature="libimagequant"``).
|
``feature="libimagequant"``).
|
||||||
|
|
||||||
By default, :data:`MEDIANCUT` will be used.
|
By default, :data:`Quantize.MEDIANCUT` will be used.
|
||||||
|
|
||||||
The exception to this is RGBA images. :data:`MEDIANCUT` and
|
The exception to this is RGBA images. :data:`Quantize.MEDIANCUT`
|
||||||
:data:`MAXCOVERAGE` do not support RGBA images, so
|
and :data:`Quantize.MAXCOVERAGE` do not support RGBA images, so
|
||||||
:data:`FASTOCTREE` is used by default instead.
|
:data:`Quantize.FASTOCTREE` is used by default instead.
|
||||||
:param kmeans: Integer
|
:param kmeans: Integer
|
||||||
:param palette: Quantize to the palette of given
|
:param palette: Quantize to the palette of given
|
||||||
:py:class:`PIL.Image.Image`.
|
:py:class:`PIL.Image.Image`.
|
||||||
:param dither: Dithering method, used when converting from
|
:param dither: Dithering method, used when converting from
|
||||||
mode "RGB" to "P" or from "RGB" or "L" to "1".
|
mode "RGB" to "P" or from "RGB" or "L" to "1".
|
||||||
Available methods are :data:`NONE` or :data:`FLOYDSTEINBERG` (default).
|
Available methods are :data:`Dither.NONE` or :data:`Dither.FLOYDSTEINBERG`
|
||||||
|
(default).
|
||||||
Default: 1 (legacy setting)
|
Default: 1 (legacy setting)
|
||||||
:returns: A new image
|
:returns: A new image
|
||||||
|
|
||||||
|
@ -1097,11 +1157,14 @@ class Image:
|
||||||
|
|
||||||
if method is None:
|
if method is None:
|
||||||
# defaults:
|
# defaults:
|
||||||
method = MEDIANCUT
|
method = Quantize.MEDIANCUT
|
||||||
if self.mode == "RGBA":
|
if self.mode == "RGBA":
|
||||||
method = FASTOCTREE
|
method = Quantize.FASTOCTREE
|
||||||
|
|
||||||
if self.mode == "RGBA" and method not in (FASTOCTREE, LIBIMAGEQUANT):
|
if self.mode == "RGBA" and method not in (
|
||||||
|
Quantize.FASTOCTREE,
|
||||||
|
Quantize.LIBIMAGEQUANT,
|
||||||
|
):
|
||||||
# Caller specified an invalid mode.
|
# Caller specified an invalid mode.
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Fast Octree (method == 2) and libimagequant (method == 3) "
|
"Fast Octree (method == 2) and libimagequant (method == 3) "
|
||||||
|
@ -1761,8 +1824,8 @@ class Image:
|
||||||
Alternatively, an 8-bit string may be used instead of an integer sequence.
|
Alternatively, an 8-bit string may be used instead of an integer sequence.
|
||||||
|
|
||||||
:param data: A palette sequence (either a list or a string).
|
:param data: A palette sequence (either a list or a string).
|
||||||
:param rawmode: The raw mode of the palette. Either "RGB", "RGBA", or a
|
:param rawmode: The raw mode of the palette. Either "RGB", "RGBA", or a mode
|
||||||
mode that can be transformed to "RGB" (e.g. "R", "BGR;15", "RGBA;L").
|
that can be transformed to "RGB" or "RGBA" (e.g. "R", "BGR;15", "RGBA;L").
|
||||||
"""
|
"""
|
||||||
from . import ImagePalette
|
from . import ImagePalette
|
||||||
|
|
||||||
|
@ -1911,15 +1974,18 @@ class Image:
|
||||||
:param size: The requested size in pixels, as a 2-tuple:
|
:param size: The requested size in pixels, as a 2-tuple:
|
||||||
(width, height).
|
(width, height).
|
||||||
:param resample: An optional resampling filter. This can be
|
:param resample: An optional resampling filter. This can be
|
||||||
one of :py:data:`PIL.Image.NEAREST`, :py:data:`PIL.Image.BOX`,
|
one of :py:data:`PIL.Image.Resampling.NEAREST`,
|
||||||
:py:data:`PIL.Image.BILINEAR`, :py:data:`PIL.Image.HAMMING`,
|
:py:data:`PIL.Image.Resampling.BOX`,
|
||||||
:py:data:`PIL.Image.BICUBIC` or :py:data:`PIL.Image.LANCZOS`.
|
:py:data:`PIL.Image.Resampling.BILINEAR`,
|
||||||
|
:py:data:`PIL.Image.Resampling.HAMMING`,
|
||||||
|
:py:data:`PIL.Image.Resampling.BICUBIC` or
|
||||||
|
:py:data:`PIL.Image.Resampling.LANCZOS`.
|
||||||
If the image has mode "1" or "P", it is always set to
|
If the image has mode "1" or "P", it is always set to
|
||||||
:py:data:`PIL.Image.NEAREST`.
|
:py:data:`PIL.Image.Resampling.NEAREST`.
|
||||||
If the image mode specifies a number of bits, such as "I;16", then the
|
If the image mode specifies a number of bits, such as "I;16", then the
|
||||||
default filter is :py:data:`PIL.Image.NEAREST`.
|
default filter is :py:data:`PIL.Image.Resampling.NEAREST`.
|
||||||
Otherwise, the default filter is :py:data:`PIL.Image.BICUBIC`.
|
Otherwise, the default filter is
|
||||||
See: :ref:`concept-filters`.
|
:py:data:`PIL.Image.Resampling.BICUBIC`. See: :ref:`concept-filters`.
|
||||||
:param box: An optional 4-tuple of floats providing
|
:param box: An optional 4-tuple of floats providing
|
||||||
the source image region to be scaled.
|
the source image region to be scaled.
|
||||||
The values must be within (0, 0, width, height) rectangle.
|
The values must be within (0, 0, width, height) rectangle.
|
||||||
|
@ -1941,19 +2007,26 @@ class Image:
|
||||||
|
|
||||||
if resample is None:
|
if resample is None:
|
||||||
type_special = ";" in self.mode
|
type_special = ";" in self.mode
|
||||||
resample = NEAREST if type_special else BICUBIC
|
resample = Resampling.NEAREST if type_special else Resampling.BICUBIC
|
||||||
elif resample not in (NEAREST, BILINEAR, BICUBIC, LANCZOS, BOX, HAMMING):
|
elif resample not in (
|
||||||
|
Resampling.NEAREST,
|
||||||
|
Resampling.BILINEAR,
|
||||||
|
Resampling.BICUBIC,
|
||||||
|
Resampling.LANCZOS,
|
||||||
|
Resampling.BOX,
|
||||||
|
Resampling.HAMMING,
|
||||||
|
):
|
||||||
message = f"Unknown resampling filter ({resample})."
|
message = f"Unknown resampling filter ({resample})."
|
||||||
|
|
||||||
filters = [
|
filters = [
|
||||||
f"{filter[1]} ({filter[0]})"
|
f"{filter[1]} ({filter[0]})"
|
||||||
for filter in (
|
for filter in (
|
||||||
(NEAREST, "Image.NEAREST"),
|
(Resampling.NEAREST, "Image.Resampling.NEAREST"),
|
||||||
(LANCZOS, "Image.LANCZOS"),
|
(Resampling.LANCZOS, "Image.Resampling.LANCZOS"),
|
||||||
(BILINEAR, "Image.BILINEAR"),
|
(Resampling.BILINEAR, "Image.Resampling.BILINEAR"),
|
||||||
(BICUBIC, "Image.BICUBIC"),
|
(Resampling.BICUBIC, "Image.Resampling.BICUBIC"),
|
||||||
(BOX, "Image.BOX"),
|
(Resampling.BOX, "Image.Resampling.BOX"),
|
||||||
(HAMMING, "Image.HAMMING"),
|
(Resampling.HAMMING, "Image.Resampling.HAMMING"),
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
|
@ -1974,16 +2047,16 @@ class Image:
|
||||||
return self.copy()
|
return self.copy()
|
||||||
|
|
||||||
if self.mode in ("1", "P"):
|
if self.mode in ("1", "P"):
|
||||||
resample = NEAREST
|
resample = Resampling.NEAREST
|
||||||
|
|
||||||
if self.mode in ["LA", "RGBA"] and resample != NEAREST:
|
if self.mode in ["LA", "RGBA"] and resample != Resampling.NEAREST:
|
||||||
im = self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode])
|
im = self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode])
|
||||||
im = im.resize(size, resample, box)
|
im = im.resize(size, resample, box)
|
||||||
return im.convert(self.mode)
|
return im.convert(self.mode)
|
||||||
|
|
||||||
self.load()
|
self.load()
|
||||||
|
|
||||||
if reducing_gap is not None and resample != NEAREST:
|
if reducing_gap is not None and resample != Resampling.NEAREST:
|
||||||
factor_x = int((box[2] - box[0]) / size[0] / reducing_gap) or 1
|
factor_x = int((box[2] - box[0]) / size[0] / reducing_gap) or 1
|
||||||
factor_y = int((box[3] - box[1]) / size[1] / reducing_gap) or 1
|
factor_y = int((box[3] - box[1]) / size[1] / reducing_gap) or 1
|
||||||
if factor_x > 1 or factor_y > 1:
|
if factor_x > 1 or factor_y > 1:
|
||||||
|
@ -2038,7 +2111,7 @@ class Image:
|
||||||
def rotate(
|
def rotate(
|
||||||
self,
|
self,
|
||||||
angle,
|
angle,
|
||||||
resample=NEAREST,
|
resample=Resampling.NEAREST,
|
||||||
expand=0,
|
expand=0,
|
||||||
center=None,
|
center=None,
|
||||||
translate=None,
|
translate=None,
|
||||||
|
@ -2051,12 +2124,12 @@ class Image:
|
||||||
|
|
||||||
:param angle: In degrees counter clockwise.
|
:param angle: In degrees counter clockwise.
|
||||||
:param resample: An optional resampling filter. This can be
|
:param resample: An optional resampling filter. This can be
|
||||||
one of :py:data:`PIL.Image.NEAREST` (use nearest neighbour),
|
one of :py:data:`PIL.Image.Resampling.NEAREST` (use nearest neighbour),
|
||||||
:py:data:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
|
:py:data:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
|
||||||
environment), or :py:data:`PIL.Image.BICUBIC`
|
environment), or :py:data:`PIL.Image.Resampling.BICUBIC`
|
||||||
(cubic spline interpolation in a 4x4 environment).
|
(cubic spline interpolation in a 4x4 environment).
|
||||||
If omitted, or if the image has mode "1" or "P", it is
|
If omitted, or if the image has mode "1" or "P", it is
|
||||||
set to :py:data:`PIL.Image.NEAREST`. See :ref:`concept-filters`.
|
set to :py:data:`PIL.Image.Resampling.NEAREST`. See :ref:`concept-filters`.
|
||||||
:param expand: Optional expansion flag. If true, expands the output
|
:param expand: Optional expansion flag. If true, expands the output
|
||||||
image to make it large enough to hold the entire rotated image.
|
image to make it large enough to hold the entire rotated image.
|
||||||
If false or omitted, make the output image the same size as the
|
If false or omitted, make the output image the same size as the
|
||||||
|
@ -2077,9 +2150,11 @@ class Image:
|
||||||
if angle == 0:
|
if angle == 0:
|
||||||
return self.copy()
|
return self.copy()
|
||||||
if angle == 180:
|
if angle == 180:
|
||||||
return self.transpose(ROTATE_180)
|
return self.transpose(Transpose.ROTATE_180)
|
||||||
if angle in (90, 270) and (expand or self.width == self.height):
|
if angle in (90, 270) and (expand or self.width == self.height):
|
||||||
return self.transpose(ROTATE_90 if angle == 90 else ROTATE_270)
|
return self.transpose(
|
||||||
|
Transpose.ROTATE_90 if angle == 90 else Transpose.ROTATE_270
|
||||||
|
)
|
||||||
|
|
||||||
# Calculate the affine matrix. Note that this is the reverse
|
# Calculate the affine matrix. Note that this is the reverse
|
||||||
# transformation (from destination image to source) because we
|
# transformation (from destination image to source) because we
|
||||||
|
@ -2148,7 +2223,9 @@ class Image:
|
||||||
matrix[2], matrix[5] = transform(-(nw - w) / 2.0, -(nh - h) / 2.0, matrix)
|
matrix[2], matrix[5] = transform(-(nw - w) / 2.0, -(nh - h) / 2.0, matrix)
|
||||||
w, h = nw, nh
|
w, h = nw, nh
|
||||||
|
|
||||||
return self.transform((w, h), AFFINE, matrix, resample, fillcolor=fillcolor)
|
return self.transform(
|
||||||
|
(w, h), Transform.AFFINE, matrix, resample, fillcolor=fillcolor
|
||||||
|
)
|
||||||
|
|
||||||
def save(self, fp, format=None, **params):
|
def save(self, fp, format=None, **params):
|
||||||
"""
|
"""
|
||||||
|
@ -2334,7 +2411,7 @@ class Image:
|
||||||
"""
|
"""
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
def thumbnail(self, size, resample=BICUBIC, reducing_gap=2.0):
|
def thumbnail(self, size, resample=Resampling.BICUBIC, reducing_gap=2.0):
|
||||||
"""
|
"""
|
||||||
Make this image into a thumbnail. This method modifies the
|
Make this image into a thumbnail. This method modifies the
|
||||||
image to contain a thumbnail version of itself, no larger than
|
image to contain a thumbnail version of itself, no larger than
|
||||||
|
@ -2350,11 +2427,14 @@ class Image:
|
||||||
|
|
||||||
:param size: Requested size.
|
:param size: Requested size.
|
||||||
:param resample: Optional resampling filter. This can be one
|
:param resample: Optional resampling filter. This can be one
|
||||||
of :py:data:`PIL.Image.NEAREST`, :py:data:`PIL.Image.BOX`,
|
of :py:data:`PIL.Image.Resampling.NEAREST`,
|
||||||
:py:data:`PIL.Image.BILINEAR`, :py:data:`PIL.Image.HAMMING`,
|
:py:data:`PIL.Image.Resampling.BOX`,
|
||||||
:py:data:`PIL.Image.BICUBIC` or :py:data:`PIL.Image.LANCZOS`.
|
:py:data:`PIL.Image.Resampling.BILINEAR`,
|
||||||
If omitted, it defaults to :py:data:`PIL.Image.BICUBIC`.
|
:py:data:`PIL.Image.Resampling.HAMMING`,
|
||||||
(was :py:data:`PIL.Image.NEAREST` prior to version 2.5.0).
|
:py:data:`PIL.Image.Resampling.BICUBIC` or
|
||||||
|
:py:data:`PIL.Image.Resampling.LANCZOS`.
|
||||||
|
If omitted, it defaults to :py:data:`PIL.Image.Resampling.BICUBIC`.
|
||||||
|
(was :py:data:`PIL.Image.Resampling.NEAREST` prior to version 2.5.0).
|
||||||
See: :ref:`concept-filters`.
|
See: :ref:`concept-filters`.
|
||||||
:param reducing_gap: Apply optimization by resizing the image
|
:param reducing_gap: Apply optimization by resizing the image
|
||||||
in two steps. First, reducing the image by integer times
|
in two steps. First, reducing the image by integer times
|
||||||
|
@ -2409,7 +2489,13 @@ class Image:
|
||||||
# FIXME: the different transform methods need further explanation
|
# FIXME: the different transform methods need further explanation
|
||||||
# instead of bloating the method docs, add a separate chapter.
|
# instead of bloating the method docs, add a separate chapter.
|
||||||
def transform(
|
def transform(
|
||||||
self, size, method, data=None, resample=NEAREST, fill=1, fillcolor=None
|
self,
|
||||||
|
size,
|
||||||
|
method,
|
||||||
|
data=None,
|
||||||
|
resample=Resampling.NEAREST,
|
||||||
|
fill=1,
|
||||||
|
fillcolor=None,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Transforms this image. This method creates a new image with the
|
Transforms this image. This method creates a new image with the
|
||||||
|
@ -2418,11 +2504,11 @@ class Image:
|
||||||
|
|
||||||
:param size: The output size.
|
:param size: The output size.
|
||||||
:param method: The transformation method. This is one of
|
:param method: The transformation method. This is one of
|
||||||
:py:data:`PIL.Image.EXTENT` (cut out a rectangular subregion),
|
:py:data:`PIL.Image.Transform.EXTENT` (cut out a rectangular subregion),
|
||||||
:py:data:`PIL.Image.AFFINE` (affine transform),
|
:py:data:`PIL.Image.Transform.AFFINE` (affine transform),
|
||||||
:py:data:`PIL.Image.PERSPECTIVE` (perspective transform),
|
:py:data:`PIL.Image.Transform.PERSPECTIVE` (perspective transform),
|
||||||
:py:data:`PIL.Image.QUAD` (map a quadrilateral to a rectangle), or
|
:py:data:`PIL.Image.Transform.QUAD` (map a quadrilateral to a rectangle), or
|
||||||
:py:data:`PIL.Image.MESH` (map a number of source quadrilaterals
|
:py:data:`PIL.Image.Transform.MESH` (map a number of source quadrilaterals
|
||||||
in one operation).
|
in one operation).
|
||||||
|
|
||||||
It may also be an :py:class:`~PIL.Image.ImageTransformHandler`
|
It may also be an :py:class:`~PIL.Image.ImageTransformHandler`
|
||||||
|
@ -2437,16 +2523,16 @@ class Image:
|
||||||
|
|
||||||
class Example:
|
class Example:
|
||||||
def getdata(self):
|
def getdata(self):
|
||||||
method = Image.EXTENT
|
method = Image.Transform.EXTENT
|
||||||
data = (0, 0, 100, 100)
|
data = (0, 0, 100, 100)
|
||||||
return method, data
|
return method, data
|
||||||
:param data: Extra data to the transformation method.
|
:param data: Extra data to the transformation method.
|
||||||
:param resample: Optional resampling filter. It can be one of
|
:param resample: Optional resampling filter. It can be one of
|
||||||
:py:data:`PIL.Image.NEAREST` (use nearest neighbour),
|
:py:data:`PIL.Image.Resampling.NEAREST` (use nearest neighbour),
|
||||||
:py:data:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
|
:py:data:`PIL.Image.Resampling.BILINEAR` (linear interpolation in a 2x2
|
||||||
environment), or :py:data:`PIL.Image.BICUBIC` (cubic spline
|
environment), or :py:data:`PIL.Image.BICUBIC` (cubic spline
|
||||||
interpolation in a 4x4 environment). If omitted, or if the image
|
interpolation in a 4x4 environment). If omitted, or if the image
|
||||||
has mode "1" or "P", it is set to :py:data:`PIL.Image.NEAREST`.
|
has mode "1" or "P", it is set to :py:data:`PIL.Image.Resampling.NEAREST`.
|
||||||
See: :ref:`concept-filters`.
|
See: :ref:`concept-filters`.
|
||||||
:param fill: If ``method`` is an
|
:param fill: If ``method`` is an
|
||||||
:py:class:`~PIL.Image.ImageTransformHandler` object, this is one of
|
:py:class:`~PIL.Image.ImageTransformHandler` object, this is one of
|
||||||
|
@ -2456,7 +2542,7 @@ class Image:
|
||||||
:returns: An :py:class:`~PIL.Image.Image` object.
|
:returns: An :py:class:`~PIL.Image.Image` object.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if self.mode in ("LA", "RGBA") and resample != NEAREST:
|
if self.mode in ("LA", "RGBA") and resample != Resampling.NEAREST:
|
||||||
return (
|
return (
|
||||||
self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode])
|
self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode])
|
||||||
.transform(size, method, data, resample, fill, fillcolor)
|
.transform(size, method, data, resample, fill, fillcolor)
|
||||||
|
@ -2477,10 +2563,12 @@ class Image:
|
||||||
if self.mode == "P" and self.palette:
|
if self.mode == "P" and self.palette:
|
||||||
im.palette = self.palette.copy()
|
im.palette = self.palette.copy()
|
||||||
im.info = self.info.copy()
|
im.info = self.info.copy()
|
||||||
if method == MESH:
|
if method == Transform.MESH:
|
||||||
# list of quads
|
# list of quads
|
||||||
for box, quad in data:
|
for box, quad in data:
|
||||||
im.__transformer(box, self, QUAD, quad, resample, fillcolor is None)
|
im.__transformer(
|
||||||
|
box, self, Transform.QUAD, quad, resample, fillcolor is None
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
im.__transformer(
|
im.__transformer(
|
||||||
(0, 0) + size, self, method, data, resample, fillcolor is None
|
(0, 0) + size, self, method, data, resample, fillcolor is None
|
||||||
|
@ -2488,25 +2576,27 @@ class Image:
|
||||||
|
|
||||||
return im
|
return im
|
||||||
|
|
||||||
def __transformer(self, box, image, method, data, resample=NEAREST, fill=1):
|
def __transformer(
|
||||||
|
self, box, image, method, data, resample=Resampling.NEAREST, fill=1
|
||||||
|
):
|
||||||
w = box[2] - box[0]
|
w = box[2] - box[0]
|
||||||
h = box[3] - box[1]
|
h = box[3] - box[1]
|
||||||
|
|
||||||
if method == AFFINE:
|
if method == Transform.AFFINE:
|
||||||
data = data[0:6]
|
data = data[0:6]
|
||||||
|
|
||||||
elif method == EXTENT:
|
elif method == Transform.EXTENT:
|
||||||
# convert extent to an affine transform
|
# convert extent to an affine transform
|
||||||
x0, y0, x1, y1 = data
|
x0, y0, x1, y1 = data
|
||||||
xs = (x1 - x0) / w
|
xs = (x1 - x0) / w
|
||||||
ys = (y1 - y0) / h
|
ys = (y1 - y0) / h
|
||||||
method = AFFINE
|
method = Transform.AFFINE
|
||||||
data = (xs, 0, x0, 0, ys, y0)
|
data = (xs, 0, x0, 0, ys, y0)
|
||||||
|
|
||||||
elif method == PERSPECTIVE:
|
elif method == Transform.PERSPECTIVE:
|
||||||
data = data[0:8]
|
data = data[0:8]
|
||||||
|
|
||||||
elif method == QUAD:
|
elif method == Transform.QUAD:
|
||||||
# quadrilateral warp. data specifies the four corners
|
# quadrilateral warp. data specifies the four corners
|
||||||
# given as NW, SW, SE, and NE.
|
# given as NW, SW, SE, and NE.
|
||||||
nw = data[0:2]
|
nw = data[0:2]
|
||||||
|
@ -2530,12 +2620,16 @@ class Image:
|
||||||
else:
|
else:
|
||||||
raise ValueError("unknown transformation method")
|
raise ValueError("unknown transformation method")
|
||||||
|
|
||||||
if resample not in (NEAREST, BILINEAR, BICUBIC):
|
if resample not in (
|
||||||
if resample in (BOX, HAMMING, LANCZOS):
|
Resampling.NEAREST,
|
||||||
|
Resampling.BILINEAR,
|
||||||
|
Resampling.BICUBIC,
|
||||||
|
):
|
||||||
|
if resample in (Resampling.BOX, Resampling.HAMMING, Resampling.LANCZOS):
|
||||||
message = {
|
message = {
|
||||||
BOX: "Image.BOX",
|
Resampling.BOX: "Image.Resampling.BOX",
|
||||||
HAMMING: "Image.HAMMING",
|
Resampling.HAMMING: "Image.Resampling.HAMMING",
|
||||||
LANCZOS: "Image.LANCZOS/Image.ANTIALIAS",
|
Resampling.LANCZOS: "Image.Resampling.LANCZOS",
|
||||||
}[resample] + f" ({resample}) cannot be used."
|
}[resample] + f" ({resample}) cannot be used."
|
||||||
else:
|
else:
|
||||||
message = f"Unknown resampling filter ({resample})."
|
message = f"Unknown resampling filter ({resample})."
|
||||||
|
@ -2543,9 +2637,9 @@ class Image:
|
||||||
filters = [
|
filters = [
|
||||||
f"{filter[1]} ({filter[0]})"
|
f"{filter[1]} ({filter[0]})"
|
||||||
for filter in (
|
for filter in (
|
||||||
(NEAREST, "Image.NEAREST"),
|
(Resampling.NEAREST, "Image.Resampling.NEAREST"),
|
||||||
(BILINEAR, "Image.BILINEAR"),
|
(Resampling.BILINEAR, "Image.Resampling.BILINEAR"),
|
||||||
(BICUBIC, "Image.BICUBIC"),
|
(Resampling.BICUBIC, "Image.Resampling.BICUBIC"),
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
|
@ -2557,7 +2651,7 @@ class Image:
|
||||||
self.load()
|
self.load()
|
||||||
|
|
||||||
if image.mode in ("1", "P"):
|
if image.mode in ("1", "P"):
|
||||||
resample = NEAREST
|
resample = Resampling.NEAREST
|
||||||
|
|
||||||
self.im.transform2(box, image.im, method, data, resample, fill)
|
self.im.transform2(box, image.im, method, data, resample, fill)
|
||||||
|
|
||||||
|
@ -2565,10 +2659,13 @@ class Image:
|
||||||
"""
|
"""
|
||||||
Transpose image (flip or rotate in 90 degree steps)
|
Transpose image (flip or rotate in 90 degree steps)
|
||||||
|
|
||||||
:param method: One of :py:data:`PIL.Image.FLIP_LEFT_RIGHT`,
|
:param method: One of :py:data:`PIL.Image.Transpose.FLIP_LEFT_RIGHT`,
|
||||||
:py:data:`PIL.Image.FLIP_TOP_BOTTOM`, :py:data:`PIL.Image.ROTATE_90`,
|
:py:data:`PIL.Image.Transpose.FLIP_TOP_BOTTOM`,
|
||||||
:py:data:`PIL.Image.ROTATE_180`, :py:data:`PIL.Image.ROTATE_270`,
|
:py:data:`PIL.Image.Transpose.ROTATE_90`,
|
||||||
:py:data:`PIL.Image.TRANSPOSE` or :py:data:`PIL.Image.TRANSVERSE`.
|
:py:data:`PIL.Image.Transpose.ROTATE_180`,
|
||||||
|
:py:data:`PIL.Image.Transpose.ROTATE_270`,
|
||||||
|
:py:data:`PIL.Image.Transpose.TRANSPOSE` or
|
||||||
|
:py:data:`PIL.Image.Transpose.TRANSVERSE`.
|
||||||
:returns: Returns a flipped or rotated copy of this image.
|
:returns: Returns a flipped or rotated copy of this image.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
|
@ -16,6 +16,8 @@
|
||||||
# below for the original description.
|
# below for the original description.
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
import warnings
|
||||||
|
from enum import IntEnum
|
||||||
|
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
|
||||||
|
@ -100,14 +102,42 @@ core = _imagingcms
|
||||||
#
|
#
|
||||||
# intent/direction values
|
# intent/direction values
|
||||||
|
|
||||||
INTENT_PERCEPTUAL = 0
|
|
||||||
INTENT_RELATIVE_COLORIMETRIC = 1
|
|
||||||
INTENT_SATURATION = 2
|
|
||||||
INTENT_ABSOLUTE_COLORIMETRIC = 3
|
|
||||||
|
|
||||||
DIRECTION_INPUT = 0
|
class Intent(IntEnum):
|
||||||
DIRECTION_OUTPUT = 1
|
PERCEPTUAL = 0
|
||||||
DIRECTION_PROOF = 2
|
RELATIVE_COLORIMETRIC = 1
|
||||||
|
SATURATION = 2
|
||||||
|
ABSOLUTE_COLORIMETRIC = 3
|
||||||
|
|
||||||
|
|
||||||
|
class Direction(IntEnum):
|
||||||
|
INPUT = 0
|
||||||
|
OUTPUT = 1
|
||||||
|
PROOF = 2
|
||||||
|
|
||||||
|
|
||||||
|
def __getattr__(name):
|
||||||
|
deprecated = "deprecated and will be removed in Pillow 10 (2023-07-01). "
|
||||||
|
for enum, prefix in {Intent: "INTENT_", Direction: "DIRECTION_"}.items():
|
||||||
|
if name.startswith(prefix):
|
||||||
|
name = name[len(prefix) :]
|
||||||
|
if name in enum.__members__:
|
||||||
|
warnings.warn(
|
||||||
|
prefix
|
||||||
|
+ name
|
||||||
|
+ " is "
|
||||||
|
+ deprecated
|
||||||
|
+ "Use "
|
||||||
|
+ enum.__name__
|
||||||
|
+ "."
|
||||||
|
+ name
|
||||||
|
+ " instead.",
|
||||||
|
DeprecationWarning,
|
||||||
|
stacklevel=2,
|
||||||
|
)
|
||||||
|
return enum[name]
|
||||||
|
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# flags
|
# flags
|
||||||
|
@ -211,9 +241,9 @@ class ImageCmsTransform(Image.ImagePointHandler):
|
||||||
output,
|
output,
|
||||||
input_mode,
|
input_mode,
|
||||||
output_mode,
|
output_mode,
|
||||||
intent=INTENT_PERCEPTUAL,
|
intent=Intent.PERCEPTUAL,
|
||||||
proof=None,
|
proof=None,
|
||||||
proof_intent=INTENT_ABSOLUTE_COLORIMETRIC,
|
proof_intent=Intent.ABSOLUTE_COLORIMETRIC,
|
||||||
flags=0,
|
flags=0,
|
||||||
):
|
):
|
||||||
if proof is None:
|
if proof is None:
|
||||||
|
@ -295,7 +325,7 @@ def profileToProfile(
|
||||||
im,
|
im,
|
||||||
inputProfile,
|
inputProfile,
|
||||||
outputProfile,
|
outputProfile,
|
||||||
renderingIntent=INTENT_PERCEPTUAL,
|
renderingIntent=Intent.PERCEPTUAL,
|
||||||
outputMode=None,
|
outputMode=None,
|
||||||
inPlace=False,
|
inPlace=False,
|
||||||
flags=0,
|
flags=0,
|
||||||
|
@ -331,10 +361,10 @@ def profileToProfile(
|
||||||
:param renderingIntent: Integer (0-3) specifying the rendering intent you
|
:param renderingIntent: Integer (0-3) specifying the rendering intent you
|
||||||
wish to use for the transform
|
wish to use for the transform
|
||||||
|
|
||||||
ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT)
|
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
|
||||||
ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1
|
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
|
||||||
ImageCms.INTENT_SATURATION = 2
|
ImageCms.Intent.SATURATION = 2
|
||||||
ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3
|
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
|
||||||
|
|
||||||
see the pyCMS documentation for details on rendering intents and what
|
see the pyCMS documentation for details on rendering intents and what
|
||||||
they do.
|
they do.
|
||||||
|
@ -412,7 +442,7 @@ def buildTransform(
|
||||||
outputProfile,
|
outputProfile,
|
||||||
inMode,
|
inMode,
|
||||||
outMode,
|
outMode,
|
||||||
renderingIntent=INTENT_PERCEPTUAL,
|
renderingIntent=Intent.PERCEPTUAL,
|
||||||
flags=0,
|
flags=0,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
|
@ -458,10 +488,10 @@ def buildTransform(
|
||||||
:param renderingIntent: Integer (0-3) specifying the rendering intent you
|
:param renderingIntent: Integer (0-3) specifying the rendering intent you
|
||||||
wish to use for the transform
|
wish to use for the transform
|
||||||
|
|
||||||
ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT)
|
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
|
||||||
ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1
|
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
|
||||||
ImageCms.INTENT_SATURATION = 2
|
ImageCms.Intent.SATURATION = 2
|
||||||
ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3
|
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
|
||||||
|
|
||||||
see the pyCMS documentation for details on rendering intents and what
|
see the pyCMS documentation for details on rendering intents and what
|
||||||
they do.
|
they do.
|
||||||
|
@ -494,8 +524,8 @@ def buildProofTransform(
|
||||||
proofProfile,
|
proofProfile,
|
||||||
inMode,
|
inMode,
|
||||||
outMode,
|
outMode,
|
||||||
renderingIntent=INTENT_PERCEPTUAL,
|
renderingIntent=Intent.PERCEPTUAL,
|
||||||
proofRenderingIntent=INTENT_ABSOLUTE_COLORIMETRIC,
|
proofRenderingIntent=Intent.ABSOLUTE_COLORIMETRIC,
|
||||||
flags=FLAGS["SOFTPROOFING"],
|
flags=FLAGS["SOFTPROOFING"],
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
|
@ -550,20 +580,20 @@ def buildProofTransform(
|
||||||
:param renderingIntent: Integer (0-3) specifying the rendering intent you
|
:param renderingIntent: Integer (0-3) specifying the rendering intent you
|
||||||
wish to use for the input->proof (simulated) transform
|
wish to use for the input->proof (simulated) transform
|
||||||
|
|
||||||
ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT)
|
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
|
||||||
ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1
|
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
|
||||||
ImageCms.INTENT_SATURATION = 2
|
ImageCms.Intent.SATURATION = 2
|
||||||
ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3
|
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
|
||||||
|
|
||||||
see the pyCMS documentation for details on rendering intents and what
|
see the pyCMS documentation for details on rendering intents and what
|
||||||
they do.
|
they do.
|
||||||
:param proofRenderingIntent: Integer (0-3) specifying the rendering intent
|
:param proofRenderingIntent: Integer (0-3) specifying the rendering intent
|
||||||
you wish to use for proof->output transform
|
you wish to use for proof->output transform
|
||||||
|
|
||||||
ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT)
|
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
|
||||||
ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1
|
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
|
||||||
ImageCms.INTENT_SATURATION = 2
|
ImageCms.Intent.SATURATION = 2
|
||||||
ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3
|
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
|
||||||
|
|
||||||
see the pyCMS documentation for details on rendering intents and what
|
see the pyCMS documentation for details on rendering intents and what
|
||||||
they do.
|
they do.
|
||||||
|
@ -922,10 +952,10 @@ def getDefaultIntent(profile):
|
||||||
:returns: Integer 0-3 specifying the default rendering intent for this
|
:returns: Integer 0-3 specifying the default rendering intent for this
|
||||||
profile.
|
profile.
|
||||||
|
|
||||||
ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT)
|
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
|
||||||
ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1
|
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
|
||||||
ImageCms.INTENT_SATURATION = 2
|
ImageCms.Intent.SATURATION = 2
|
||||||
ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3
|
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
|
||||||
|
|
||||||
see the pyCMS documentation for details on rendering intents and what
|
see the pyCMS documentation for details on rendering intents and what
|
||||||
they do.
|
they do.
|
||||||
|
@ -960,19 +990,19 @@ def isIntentSupported(profile, intent, direction):
|
||||||
:param intent: Integer (0-3) specifying the rendering intent you wish to
|
:param intent: Integer (0-3) specifying the rendering intent you wish to
|
||||||
use with this profile
|
use with this profile
|
||||||
|
|
||||||
ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT)
|
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
|
||||||
ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1
|
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
|
||||||
ImageCms.INTENT_SATURATION = 2
|
ImageCms.Intent.SATURATION = 2
|
||||||
ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3
|
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
|
||||||
|
|
||||||
see the pyCMS documentation for details on rendering intents and what
|
see the pyCMS documentation for details on rendering intents and what
|
||||||
they do.
|
they do.
|
||||||
:param direction: Integer specifying if the profile is to be used for
|
:param direction: Integer specifying if the profile is to be used for
|
||||||
input, output, or proof
|
input, output, or proof
|
||||||
|
|
||||||
INPUT = 0 (or use ImageCms.DIRECTION_INPUT)
|
INPUT = 0 (or use ImageCms.Direction.INPUT)
|
||||||
OUTPUT = 1 (or use ImageCms.DIRECTION_OUTPUT)
|
OUTPUT = 1 (or use ImageCms.Direction.OUTPUT)
|
||||||
PROOF = 2 (or use ImageCms.DIRECTION_PROOF)
|
PROOF = 2 (or use ImageCms.Direction.PROOF)
|
||||||
|
|
||||||
:returns: 1 if the intent/direction are supported, -1 if they are not.
|
:returns: 1 if the intent/direction are supported, -1 if they are not.
|
||||||
:exception PyCMSError:
|
:exception PyCMSError:
|
||||||
|
|
|
@ -529,7 +529,7 @@ class Color3DLUT(MultibandFilter):
|
||||||
|
|
||||||
return image.color_lut_3d(
|
return image.color_lut_3d(
|
||||||
self.mode or image.mode,
|
self.mode or image.mode,
|
||||||
Image.LINEAR,
|
Image.Resampling.BILINEAR,
|
||||||
self.channels,
|
self.channels,
|
||||||
self.size[0],
|
self.size[0],
|
||||||
self.size[1],
|
self.size[1],
|
||||||
|
|
|
@ -28,13 +28,40 @@
|
||||||
import base64
|
import base64
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
import warnings
|
||||||
|
from enum import IntEnum
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
|
|
||||||
from . import Image
|
from . import Image
|
||||||
from ._util import isDirectory, isPath
|
from ._util import isDirectory, isPath
|
||||||
|
|
||||||
LAYOUT_BASIC = 0
|
|
||||||
LAYOUT_RAQM = 1
|
class Layout(IntEnum):
|
||||||
|
BASIC = 0
|
||||||
|
RAQM = 1
|
||||||
|
|
||||||
|
|
||||||
|
def __getattr__(name):
|
||||||
|
deprecated = "deprecated and will be removed in Pillow 10 (2023-07-01). "
|
||||||
|
for enum, prefix in {Layout: "LAYOUT_"}.items():
|
||||||
|
if name.startswith(prefix):
|
||||||
|
name = name[len(prefix) :]
|
||||||
|
if name in enum.__members__:
|
||||||
|
warnings.warn(
|
||||||
|
prefix
|
||||||
|
+ name
|
||||||
|
+ " is "
|
||||||
|
+ deprecated
|
||||||
|
+ "Use "
|
||||||
|
+ enum.__name__
|
||||||
|
+ "."
|
||||||
|
+ name
|
||||||
|
+ " instead.",
|
||||||
|
DeprecationWarning,
|
||||||
|
stacklevel=2,
|
||||||
|
)
|
||||||
|
return enum[name]
|
||||||
|
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
||||||
|
|
||||||
|
|
||||||
class _imagingft_not_installed:
|
class _imagingft_not_installed:
|
||||||
|
@ -164,18 +191,18 @@ class FreeTypeFont:
|
||||||
self.index = index
|
self.index = index
|
||||||
self.encoding = encoding
|
self.encoding = encoding
|
||||||
|
|
||||||
if layout_engine not in (LAYOUT_BASIC, LAYOUT_RAQM):
|
if layout_engine not in (Layout.BASIC, Layout.RAQM):
|
||||||
layout_engine = LAYOUT_BASIC
|
layout_engine = Layout.BASIC
|
||||||
if core.HAVE_RAQM:
|
if core.HAVE_RAQM:
|
||||||
layout_engine = LAYOUT_RAQM
|
layout_engine = Layout.RAQM
|
||||||
elif layout_engine == LAYOUT_RAQM and not core.HAVE_RAQM:
|
elif layout_engine == Layout.RAQM and not core.HAVE_RAQM:
|
||||||
import warnings
|
import warnings
|
||||||
|
|
||||||
warnings.warn(
|
warnings.warn(
|
||||||
"Raqm layout was requested, but Raqm is not available. "
|
"Raqm layout was requested, but Raqm is not available. "
|
||||||
"Falling back to basic layout."
|
"Falling back to basic layout."
|
||||||
)
|
)
|
||||||
layout_engine = LAYOUT_BASIC
|
layout_engine = Layout.BASIC
|
||||||
|
|
||||||
self.layout_engine = layout_engine
|
self.layout_engine = layout_engine
|
||||||
|
|
||||||
|
@ -757,15 +784,16 @@ class TransposedFont:
|
||||||
|
|
||||||
:param font: A font object.
|
:param font: A font object.
|
||||||
:param orientation: An optional orientation. If given, this should
|
:param orientation: An optional orientation. If given, this should
|
||||||
be one of Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM,
|
be one of Image.Transpose.FLIP_LEFT_RIGHT, Image.Transpose.FLIP_TOP_BOTTOM,
|
||||||
Image.ROTATE_90, Image.ROTATE_180, or Image.ROTATE_270.
|
Image.Transpose.ROTATE_90, Image.Transpose.ROTATE_180, or
|
||||||
|
Image.Transpose.ROTATE_270.
|
||||||
"""
|
"""
|
||||||
self.font = font
|
self.font = font
|
||||||
self.orientation = orientation # any 'transpose' argument, or None
|
self.orientation = orientation # any 'transpose' argument, or None
|
||||||
|
|
||||||
def getsize(self, text, *args, **kwargs):
|
def getsize(self, text, *args, **kwargs):
|
||||||
w, h = self.font.getsize(text)
|
w, h = self.font.getsize(text)
|
||||||
if self.orientation in (Image.ROTATE_90, Image.ROTATE_270):
|
if self.orientation in (Image.Transpose.ROTATE_90, Image.Transpose.ROTATE_270):
|
||||||
return h, w
|
return h, w
|
||||||
return w, h
|
return w, h
|
||||||
|
|
||||||
|
@ -833,7 +861,7 @@ def truetype(font=None, size=10, index=0, encoding="", layout_engine=None):
|
||||||
This specifies the character set to use. It does not alter the
|
This specifies the character set to use. It does not alter the
|
||||||
encoding of any text provided in subsequent operations.
|
encoding of any text provided in subsequent operations.
|
||||||
:param layout_engine: Which layout engine to use, if available:
|
:param layout_engine: Which layout engine to use, if available:
|
||||||
:data:`.ImageFont.LAYOUT_BASIC` or :data:`.ImageFont.LAYOUT_RAQM`.
|
:data:`.ImageFont.Layout.BASIC` or :data:`.ImageFont.Layout.RAQM`.
|
||||||
|
|
||||||
You can check support for Raqm layout using
|
You can check support for Raqm layout using
|
||||||
:py:func:`PIL.features.check_feature` with ``feature="raqm"``.
|
:py:func:`PIL.features.check_feature` with ``feature="raqm"``.
|
||||||
|
|
|
@ -237,7 +237,7 @@ def colorize(image, black, white, mid=None, blackpoint=0, whitepoint=255, midpoi
|
||||||
return _lut(image, red + green + blue)
|
return _lut(image, red + green + blue)
|
||||||
|
|
||||||
|
|
||||||
def contain(image, size, method=Image.BICUBIC):
|
def contain(image, size, method=Image.Resampling.BICUBIC):
|
||||||
"""
|
"""
|
||||||
Returns a resized version of the image, set to the maximum width and height
|
Returns a resized version of the image, set to the maximum width and height
|
||||||
within the requested size, while maintaining the original aspect ratio.
|
within the requested size, while maintaining the original aspect ratio.
|
||||||
|
@ -265,7 +265,7 @@ def contain(image, size, method=Image.BICUBIC):
|
||||||
return image.resize(size, resample=method)
|
return image.resize(size, resample=method)
|
||||||
|
|
||||||
|
|
||||||
def pad(image, size, method=Image.BICUBIC, color=None, centering=(0.5, 0.5)):
|
def pad(image, size, method=Image.Resampling.BICUBIC, color=None, centering=(0.5, 0.5)):
|
||||||
"""
|
"""
|
||||||
Returns a resized and padded version of the image, expanded to fill the
|
Returns a resized and padded version of the image, expanded to fill the
|
||||||
requested aspect ratio and size.
|
requested aspect ratio and size.
|
||||||
|
@ -315,7 +315,7 @@ def crop(image, border=0):
|
||||||
return image.crop((left, top, image.size[0] - right, image.size[1] - bottom))
|
return image.crop((left, top, image.size[0] - right, image.size[1] - bottom))
|
||||||
|
|
||||||
|
|
||||||
def scale(image, factor, resample=Image.BICUBIC):
|
def scale(image, factor, resample=Image.Resampling.BICUBIC):
|
||||||
"""
|
"""
|
||||||
Returns a rescaled image by a specific factor given in parameter.
|
Returns a rescaled image by a specific factor given in parameter.
|
||||||
A factor greater than 1 expands the image, between 0 and 1 contracts the
|
A factor greater than 1 expands the image, between 0 and 1 contracts the
|
||||||
|
@ -336,7 +336,7 @@ def scale(image, factor, resample=Image.BICUBIC):
|
||||||
return image.resize(size, resample)
|
return image.resize(size, resample)
|
||||||
|
|
||||||
|
|
||||||
def deform(image, deformer, resample=Image.BILINEAR):
|
def deform(image, deformer, resample=Image.Resampling.BILINEAR):
|
||||||
"""
|
"""
|
||||||
Deform the image.
|
Deform the image.
|
||||||
|
|
||||||
|
@ -347,7 +347,9 @@ def deform(image, deformer, resample=Image.BILINEAR):
|
||||||
in the PIL.Image.transform function.
|
in the PIL.Image.transform function.
|
||||||
:return: An image.
|
:return: An image.
|
||||||
"""
|
"""
|
||||||
return image.transform(image.size, Image.MESH, deformer.getmesh(image), resample)
|
return image.transform(
|
||||||
|
image.size, Image.Transform.MESH, deformer.getmesh(image), resample
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def equalize(image, mask=None):
|
def equalize(image, mask=None):
|
||||||
|
@ -408,7 +410,7 @@ def expand(image, border=0, fill=0):
|
||||||
return out
|
return out
|
||||||
|
|
||||||
|
|
||||||
def fit(image, size, method=Image.BICUBIC, bleed=0.0, centering=(0.5, 0.5)):
|
def fit(image, size, method=Image.Resampling.BICUBIC, bleed=0.0, centering=(0.5, 0.5)):
|
||||||
"""
|
"""
|
||||||
Returns a resized and cropped version of the image, cropped to the
|
Returns a resized and cropped version of the image, cropped to the
|
||||||
requested aspect ratio and size.
|
requested aspect ratio and size.
|
||||||
|
@ -500,7 +502,7 @@ def flip(image):
|
||||||
:param image: The image to flip.
|
:param image: The image to flip.
|
||||||
:return: An image.
|
:return: An image.
|
||||||
"""
|
"""
|
||||||
return image.transpose(Image.FLIP_TOP_BOTTOM)
|
return image.transpose(Image.Transpose.FLIP_TOP_BOTTOM)
|
||||||
|
|
||||||
|
|
||||||
def grayscale(image):
|
def grayscale(image):
|
||||||
|
@ -533,7 +535,7 @@ def mirror(image):
|
||||||
:param image: The image to mirror.
|
:param image: The image to mirror.
|
||||||
:return: An image.
|
:return: An image.
|
||||||
"""
|
"""
|
||||||
return image.transpose(Image.FLIP_LEFT_RIGHT)
|
return image.transpose(Image.Transpose.FLIP_LEFT_RIGHT)
|
||||||
|
|
||||||
|
|
||||||
def posterize(image, bits):
|
def posterize(image, bits):
|
||||||
|
@ -579,13 +581,13 @@ def exif_transpose(image):
|
||||||
exif = image.getexif()
|
exif = image.getexif()
|
||||||
orientation = exif.get(0x0112)
|
orientation = exif.get(0x0112)
|
||||||
method = {
|
method = {
|
||||||
2: Image.FLIP_LEFT_RIGHT,
|
2: Image.Transpose.FLIP_LEFT_RIGHT,
|
||||||
3: Image.ROTATE_180,
|
3: Image.Transpose.ROTATE_180,
|
||||||
4: Image.FLIP_TOP_BOTTOM,
|
4: Image.Transpose.FLIP_TOP_BOTTOM,
|
||||||
5: Image.TRANSPOSE,
|
5: Image.Transpose.TRANSPOSE,
|
||||||
6: Image.ROTATE_270,
|
6: Image.Transpose.ROTATE_270,
|
||||||
7: Image.TRANSVERSE,
|
7: Image.Transpose.TRANSVERSE,
|
||||||
8: Image.ROTATE_90,
|
8: Image.Transpose.ROTATE_90,
|
||||||
}.get(orientation)
|
}.get(orientation)
|
||||||
if method is not None:
|
if method is not None:
|
||||||
transposed_image = image.transpose(method)
|
transposed_image = image.transpose(method)
|
||||||
|
|
|
@ -47,7 +47,7 @@ class AffineTransform(Transform):
|
||||||
from an affine transform matrix.
|
from an affine transform matrix.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
method = Image.AFFINE
|
method = Image.Transform.AFFINE
|
||||||
|
|
||||||
|
|
||||||
class ExtentTransform(Transform):
|
class ExtentTransform(Transform):
|
||||||
|
@ -69,7 +69,7 @@ class ExtentTransform(Transform):
|
||||||
input image's coordinate system. See :ref:`coordinate-system`.
|
input image's coordinate system. See :ref:`coordinate-system`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
method = Image.EXTENT
|
method = Image.Transform.EXTENT
|
||||||
|
|
||||||
|
|
||||||
class QuadTransform(Transform):
|
class QuadTransform(Transform):
|
||||||
|
@ -86,7 +86,7 @@ class QuadTransform(Transform):
|
||||||
source quadrilateral.
|
source quadrilateral.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
method = Image.QUAD
|
method = Image.Transform.QUAD
|
||||||
|
|
||||||
|
|
||||||
class MeshTransform(Transform):
|
class MeshTransform(Transform):
|
||||||
|
@ -99,4 +99,4 @@ class MeshTransform(Transform):
|
||||||
:param data: A list of (bbox, quad) tuples.
|
:param data: A list of (bbox, quad) tuples.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
method = Image.MESH
|
method = Image.Transform.MESH
|
||||||
|
|
|
@ -37,6 +37,7 @@ import re
|
||||||
import struct
|
import struct
|
||||||
import warnings
|
import warnings
|
||||||
import zlib
|
import zlib
|
||||||
|
from enum import IntEnum
|
||||||
|
|
||||||
from . import Image, ImageChops, ImageFile, ImagePalette, ImageSequence
|
from . import Image, ImageChops, ImageFile, ImagePalette, ImageSequence
|
||||||
from ._binary import i16be as i16
|
from ._binary import i16be as i16
|
||||||
|
@ -94,36 +95,62 @@ See :ref:`Text in PNG File Format<png-text>`.
|
||||||
|
|
||||||
|
|
||||||
# APNG frame disposal modes
|
# APNG frame disposal modes
|
||||||
APNG_DISPOSE_OP_NONE = 0
|
class Disposal(IntEnum):
|
||||||
"""
|
OP_NONE = 0
|
||||||
No disposal is done on this frame before rendering the next frame.
|
"""
|
||||||
See :ref:`Saving APNG sequences<apng-saving>`.
|
No disposal is done on this frame before rendering the next frame.
|
||||||
"""
|
See :ref:`Saving APNG sequences<apng-saving>`.
|
||||||
APNG_DISPOSE_OP_BACKGROUND = 1
|
"""
|
||||||
"""
|
OP_BACKGROUND = 1
|
||||||
This frame’s modified region is cleared to fully transparent black before rendering
|
"""
|
||||||
the next frame.
|
This frame’s modified region is cleared to fully transparent black before rendering
|
||||||
See :ref:`Saving APNG sequences<apng-saving>`.
|
the next frame.
|
||||||
"""
|
See :ref:`Saving APNG sequences<apng-saving>`.
|
||||||
APNG_DISPOSE_OP_PREVIOUS = 2
|
"""
|
||||||
"""
|
OP_PREVIOUS = 2
|
||||||
This frame’s modified region is reverted to the previous frame’s contents before
|
"""
|
||||||
rendering the next frame.
|
This frame’s modified region is reverted to the previous frame’s contents before
|
||||||
See :ref:`Saving APNG sequences<apng-saving>`.
|
rendering the next frame.
|
||||||
"""
|
See :ref:`Saving APNG sequences<apng-saving>`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
# APNG frame blend modes
|
# APNG frame blend modes
|
||||||
APNG_BLEND_OP_SOURCE = 0
|
class Blend(IntEnum):
|
||||||
"""
|
OP_SOURCE = 0
|
||||||
All color components of this frame, including alpha, overwrite the previous output
|
"""
|
||||||
image contents.
|
All color components of this frame, including alpha, overwrite the previous output
|
||||||
See :ref:`Saving APNG sequences<apng-saving>`.
|
image contents.
|
||||||
"""
|
See :ref:`Saving APNG sequences<apng-saving>`.
|
||||||
APNG_BLEND_OP_OVER = 1
|
"""
|
||||||
"""
|
OP_OVER = 1
|
||||||
This frame should be alpha composited with the previous output image contents.
|
"""
|
||||||
See :ref:`Saving APNG sequences<apng-saving>`.
|
This frame should be alpha composited with the previous output image contents.
|
||||||
"""
|
See :ref:`Saving APNG sequences<apng-saving>`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def __getattr__(name):
|
||||||
|
deprecated = "deprecated and will be removed in Pillow 10 (2023-07-01). "
|
||||||
|
for enum, prefix in {Disposal: "APNG_DISPOSE_", Blend: "APNG_BLEND_"}.items():
|
||||||
|
if name.startswith(prefix):
|
||||||
|
name = name[len(prefix) :]
|
||||||
|
if name in enum.__members__:
|
||||||
|
warnings.warn(
|
||||||
|
prefix
|
||||||
|
+ name
|
||||||
|
+ " is "
|
||||||
|
+ deprecated
|
||||||
|
+ "Use "
|
||||||
|
+ enum.__name__
|
||||||
|
+ "."
|
||||||
|
+ name
|
||||||
|
+ " instead.",
|
||||||
|
DeprecationWarning,
|
||||||
|
stacklevel=2,
|
||||||
|
)
|
||||||
|
return enum[name]
|
||||||
|
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
||||||
|
|
||||||
|
|
||||||
def _safe_zlib_decompress(s):
|
def _safe_zlib_decompress(s):
|
||||||
|
@ -861,13 +888,13 @@ class PngImageFile(ImageFile.ImageFile):
|
||||||
raise EOFError
|
raise EOFError
|
||||||
|
|
||||||
# setup frame disposal (actual disposal done when needed in the next _seek())
|
# setup frame disposal (actual disposal done when needed in the next _seek())
|
||||||
if self._prev_im is None and self.dispose_op == APNG_DISPOSE_OP_PREVIOUS:
|
if self._prev_im is None and self.dispose_op == Disposal.OP_PREVIOUS:
|
||||||
self.dispose_op = APNG_DISPOSE_OP_BACKGROUND
|
self.dispose_op = Disposal.OP_BACKGROUND
|
||||||
|
|
||||||
if self.dispose_op == APNG_DISPOSE_OP_PREVIOUS:
|
if self.dispose_op == Disposal.OP_PREVIOUS:
|
||||||
self.dispose = self._prev_im.copy()
|
self.dispose = self._prev_im.copy()
|
||||||
self.dispose = self._crop(self.dispose, self.dispose_extent)
|
self.dispose = self._crop(self.dispose, self.dispose_extent)
|
||||||
elif self.dispose_op == APNG_DISPOSE_OP_BACKGROUND:
|
elif self.dispose_op == Disposal.OP_BACKGROUND:
|
||||||
self.dispose = Image.core.fill(self.mode, self.size)
|
self.dispose = Image.core.fill(self.mode, self.size)
|
||||||
self.dispose = self._crop(self.dispose, self.dispose_extent)
|
self.dispose = self._crop(self.dispose, self.dispose_extent)
|
||||||
else:
|
else:
|
||||||
|
@ -956,7 +983,7 @@ class PngImageFile(ImageFile.ImageFile):
|
||||||
self.png.close()
|
self.png.close()
|
||||||
self.png = None
|
self.png = None
|
||||||
else:
|
else:
|
||||||
if self._prev_im and self.blend_op == APNG_BLEND_OP_OVER:
|
if self._prev_im and self.blend_op == Blend.OP_OVER:
|
||||||
updated = self._crop(self.im, self.dispose_extent)
|
updated = self._crop(self.im, self.dispose_extent)
|
||||||
self._prev_im.paste(
|
self._prev_im.paste(
|
||||||
updated, self.dispose_extent, updated.convert("RGBA")
|
updated, self.dispose_extent, updated.convert("RGBA")
|
||||||
|
@ -1062,10 +1089,8 @@ def _write_multiple_frames(im, fp, chunk, rawmode):
|
||||||
default_image = im.encoderinfo.get("default_image", im.info.get("default_image"))
|
default_image = im.encoderinfo.get("default_image", im.info.get("default_image"))
|
||||||
duration = im.encoderinfo.get("duration", im.info.get("duration", 0))
|
duration = im.encoderinfo.get("duration", im.info.get("duration", 0))
|
||||||
loop = im.encoderinfo.get("loop", im.info.get("loop", 0))
|
loop = im.encoderinfo.get("loop", im.info.get("loop", 0))
|
||||||
disposal = im.encoderinfo.get(
|
disposal = im.encoderinfo.get("disposal", im.info.get("disposal", Disposal.OP_NONE))
|
||||||
"disposal", im.info.get("disposal", APNG_DISPOSE_OP_NONE)
|
blend = im.encoderinfo.get("blend", im.info.get("blend", Blend.OP_SOURCE))
|
||||||
)
|
|
||||||
blend = im.encoderinfo.get("blend", im.info.get("blend", APNG_BLEND_OP_SOURCE))
|
|
||||||
|
|
||||||
if default_image:
|
if default_image:
|
||||||
chain = itertools.chain(im.encoderinfo.get("append_images", []))
|
chain = itertools.chain(im.encoderinfo.get("append_images", []))
|
||||||
|
@ -1095,10 +1120,10 @@ def _write_multiple_frames(im, fp, chunk, rawmode):
|
||||||
previous = im_frames[-1]
|
previous = im_frames[-1]
|
||||||
prev_disposal = previous["encoderinfo"].get("disposal")
|
prev_disposal = previous["encoderinfo"].get("disposal")
|
||||||
prev_blend = previous["encoderinfo"].get("blend")
|
prev_blend = previous["encoderinfo"].get("blend")
|
||||||
if prev_disposal == APNG_DISPOSE_OP_PREVIOUS and len(im_frames) < 2:
|
if prev_disposal == Disposal.OP_PREVIOUS and len(im_frames) < 2:
|
||||||
prev_disposal = APNG_DISPOSE_OP_BACKGROUND
|
prev_disposal = Disposal.OP_BACKGROUND
|
||||||
|
|
||||||
if prev_disposal == APNG_DISPOSE_OP_BACKGROUND:
|
if prev_disposal == Disposal.OP_BACKGROUND:
|
||||||
base_im = previous["im"]
|
base_im = previous["im"]
|
||||||
dispose = Image.core.fill("RGBA", im.size, (0, 0, 0, 0))
|
dispose = Image.core.fill("RGBA", im.size, (0, 0, 0, 0))
|
||||||
bbox = previous["bbox"]
|
bbox = previous["bbox"]
|
||||||
|
@ -1107,7 +1132,7 @@ def _write_multiple_frames(im, fp, chunk, rawmode):
|
||||||
else:
|
else:
|
||||||
bbox = (0, 0) + im.size
|
bbox = (0, 0) + im.size
|
||||||
base_im.paste(dispose, bbox)
|
base_im.paste(dispose, bbox)
|
||||||
elif prev_disposal == APNG_DISPOSE_OP_PREVIOUS:
|
elif prev_disposal == Disposal.OP_PREVIOUS:
|
||||||
base_im = im_frames[-2]["im"]
|
base_im = im_frames[-2]["im"]
|
||||||
else:
|
else:
|
||||||
base_im = previous["im"]
|
base_im = previous["im"]
|
||||||
|
|
|
@ -314,7 +314,7 @@ if __name__ == "__main__":
|
||||||
outfile = sys.argv[2]
|
outfile = sys.argv[2]
|
||||||
|
|
||||||
# perform some image operation
|
# perform some image operation
|
||||||
im = im.transpose(Image.FLIP_LEFT_RIGHT)
|
im = im.transpose(Image.Transpose.FLIP_LEFT_RIGHT)
|
||||||
print(
|
print(
|
||||||
f"saving a flipped version of {os.path.basename(filename)} "
|
f"saving a flipped version of {os.path.basename(filename)} "
|
||||||
f"as {outfile} "
|
f"as {outfile} "
|
||||||
|
|
|
@ -152,7 +152,7 @@ class TgaImageFile(ImageFile.ImageFile):
|
||||||
|
|
||||||
def load_end(self):
|
def load_end(self):
|
||||||
if self._flip_horizontally:
|
if self._flip_horizontally:
|
||||||
self.im = self.im.transpose(Image.FLIP_LEFT_RIGHT)
|
self.im = self.im.transpose(Image.Transpose.FLIP_LEFT_RIGHT)
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
|
|
|
@ -1161,13 +1161,13 @@ class TiffImageFile(ImageFile.ImageFile):
|
||||||
def load_end(self):
|
def load_end(self):
|
||||||
if self._tile_orientation:
|
if self._tile_orientation:
|
||||||
method = {
|
method = {
|
||||||
2: Image.FLIP_LEFT_RIGHT,
|
2: Image.Transpose.FLIP_LEFT_RIGHT,
|
||||||
3: Image.ROTATE_180,
|
3: Image.Transpose.ROTATE_180,
|
||||||
4: Image.FLIP_TOP_BOTTOM,
|
4: Image.Transpose.FLIP_TOP_BOTTOM,
|
||||||
5: Image.TRANSPOSE,
|
5: Image.Transpose.TRANSPOSE,
|
||||||
6: Image.ROTATE_270,
|
6: Image.Transpose.ROTATE_270,
|
||||||
7: Image.TRANSVERSE,
|
7: Image.Transpose.TRANSVERSE,
|
||||||
8: Image.ROTATE_90,
|
8: Image.Transpose.ROTATE_90,
|
||||||
}.get(self._tile_orientation)
|
}.get(self._tile_orientation)
|
||||||
if method is not None:
|
if method is not None:
|
||||||
self.im = self.im.transpose(method)
|
self.im = self.im.transpose(method)
|
||||||
|
|
|
@ -1063,7 +1063,7 @@ _gaussian_blur(ImagingObject *self, PyObject *args) {
|
||||||
static PyObject *
|
static PyObject *
|
||||||
_getpalette(ImagingObject *self, PyObject *args) {
|
_getpalette(ImagingObject *self, PyObject *args) {
|
||||||
PyObject *palette;
|
PyObject *palette;
|
||||||
int palettesize = 256;
|
int palettesize;
|
||||||
int bits;
|
int bits;
|
||||||
ImagingShuffler pack;
|
ImagingShuffler pack;
|
||||||
|
|
||||||
|
@ -1084,6 +1084,7 @@ _getpalette(ImagingObject *self, PyObject *args) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
palettesize = self->image->palette->size;
|
||||||
palette = PyBytes_FromStringAndSize(NULL, palettesize * bits / 8);
|
palette = PyBytes_FromStringAndSize(NULL, palettesize * bits / 8);
|
||||||
if (!palette) {
|
if (!palette) {
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -1641,7 +1642,7 @@ _putpalette(ImagingObject *self, PyObject *args) {
|
||||||
ImagingShuffler unpack;
|
ImagingShuffler unpack;
|
||||||
int bits;
|
int bits;
|
||||||
|
|
||||||
char *rawmode;
|
char *rawmode, *palette_mode;
|
||||||
UINT8 *palette;
|
UINT8 *palette;
|
||||||
Py_ssize_t palettesize;
|
Py_ssize_t palettesize;
|
||||||
if (!PyArg_ParseTuple(args, "sy#", &rawmode, &palette, &palettesize)) {
|
if (!PyArg_ParseTuple(args, "sy#", &rawmode, &palette, &palettesize)) {
|
||||||
|
@ -1654,7 +1655,8 @@ _putpalette(ImagingObject *self, PyObject *args) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
unpack = ImagingFindUnpacker("RGB", rawmode, &bits);
|
palette_mode = strncmp("RGBA", rawmode, 4) == 0 ? "RGBA" : "RGB";
|
||||||
|
unpack = ImagingFindUnpacker(palette_mode, rawmode, &bits);
|
||||||
if (!unpack) {
|
if (!unpack) {
|
||||||
PyErr_SetString(PyExc_ValueError, wrong_raw_mode);
|
PyErr_SetString(PyExc_ValueError, wrong_raw_mode);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -1669,11 +1671,13 @@ _putpalette(ImagingObject *self, PyObject *args) {
|
||||||
|
|
||||||
strcpy(self->image->mode, strlen(self->image->mode) == 2 ? "PA" : "P");
|
strcpy(self->image->mode, strlen(self->image->mode) == 2 ? "PA" : "P");
|
||||||
|
|
||||||
self->image->palette = ImagingPaletteNew("RGB");
|
self->image->palette = ImagingPaletteNew(palette_mode);
|
||||||
|
|
||||||
unpack(self->image->palette->palette, palette, palettesize * 8 / bits);
|
self->image->palette->size = palettesize * 8 / bits;
|
||||||
|
unpack(self->image->palette->palette, palette, self->image->palette->size);
|
||||||
|
|
||||||
return PyLong_FromLong(palettesize * 8 / bits);
|
Py_INCREF(Py_None);
|
||||||
|
return Py_None;
|
||||||
}
|
}
|
||||||
|
|
||||||
static PyObject *
|
static PyObject *
|
||||||
|
|
|
@ -991,115 +991,116 @@ static struct {
|
||||||
/* ------------------- */
|
/* ------------------- */
|
||||||
|
|
||||||
static void
|
static void
|
||||||
p2bit(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
p2bit(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||||
int x;
|
int x;
|
||||||
/* FIXME: precalculate greyscale palette? */
|
/* FIXME: precalculate greyscale palette? */
|
||||||
for (x = 0; x < xsize; x++) {
|
for (x = 0; x < xsize; x++) {
|
||||||
*out++ = (L(&palette[in[x] * 4]) >= 128000) ? 255 : 0;
|
*out++ = (L(&palette->palette[in[x] * 4]) >= 128000) ? 255 : 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
pa2bit(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
pa2bit(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||||
int x;
|
int x;
|
||||||
/* FIXME: precalculate greyscale palette? */
|
/* FIXME: precalculate greyscale palette? */
|
||||||
for (x = 0; x < xsize; x++, in += 4) {
|
for (x = 0; x < xsize; x++, in += 4) {
|
||||||
*out++ = (L(&palette[in[0] * 4]) >= 128000) ? 255 : 0;
|
*out++ = (L(&palette->palette[in[0] * 4]) >= 128000) ? 255 : 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
p2l(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
p2l(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||||
int x;
|
int x;
|
||||||
/* FIXME: precalculate greyscale palette? */
|
/* FIXME: precalculate greyscale palette? */
|
||||||
for (x = 0; x < xsize; x++) {
|
for (x = 0; x < xsize; x++) {
|
||||||
*out++ = L24(&palette[in[x] * 4]) >> 16;
|
*out++ = L24(&palette->palette[in[x] * 4]) >> 16;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
pa2l(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
pa2l(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||||
int x;
|
int x;
|
||||||
/* FIXME: precalculate greyscale palette? */
|
/* FIXME: precalculate greyscale palette? */
|
||||||
for (x = 0; x < xsize; x++, in += 4) {
|
for (x = 0; x < xsize; x++, in += 4) {
|
||||||
*out++ = L24(&palette[in[0] * 4]) >> 16;
|
*out++ = L24(&palette->palette[in[0] * 4]) >> 16;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
p2pa(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
p2pa(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||||
int x;
|
int x;
|
||||||
|
int rgb = strcmp(palette->mode, "RGB");
|
||||||
for (x = 0; x < xsize; x++, in++) {
|
for (x = 0; x < xsize; x++, in++) {
|
||||||
const UINT8 *rgba = &palette[in[0]];
|
const UINT8 *rgba = &palette->palette[in[0]];
|
||||||
*out++ = in[0];
|
*out++ = in[0];
|
||||||
*out++ = in[0];
|
*out++ = in[0];
|
||||||
*out++ = in[0];
|
*out++ = in[0];
|
||||||
*out++ = rgba[3];
|
*out++ = rgb == 0 ? 255 : rgba[3];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
p2la(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
p2la(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||||
int x;
|
int x;
|
||||||
/* FIXME: precalculate greyscale palette? */
|
/* FIXME: precalculate greyscale palette? */
|
||||||
for (x = 0; x < xsize; x++, out += 4) {
|
for (x = 0; x < xsize; x++, out += 4) {
|
||||||
const UINT8 *rgba = &palette[*in++ * 4];
|
const UINT8 *rgba = &palette->palette[*in++ * 4];
|
||||||
out[0] = out[1] = out[2] = L24(rgba) >> 16;
|
out[0] = out[1] = out[2] = L24(rgba) >> 16;
|
||||||
out[3] = rgba[3];
|
out[3] = rgba[3];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
pa2la(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
pa2la(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||||
int x;
|
int x;
|
||||||
/* FIXME: precalculate greyscale palette? */
|
/* FIXME: precalculate greyscale palette? */
|
||||||
for (x = 0; x < xsize; x++, in += 4, out += 4) {
|
for (x = 0; x < xsize; x++, in += 4, out += 4) {
|
||||||
out[0] = out[1] = out[2] = L24(&palette[in[0] * 4]) >> 16;
|
out[0] = out[1] = out[2] = L24(&palette->palette[in[0] * 4]) >> 16;
|
||||||
out[3] = in[3];
|
out[3] = in[3];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
p2i(UINT8 *out_, const UINT8 *in, int xsize, const UINT8 *palette) {
|
p2i(UINT8 *out_, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||||
int x;
|
int x;
|
||||||
for (x = 0; x < xsize; x++, out_ += 4) {
|
for (x = 0; x < xsize; x++, out_ += 4) {
|
||||||
INT32 v = L24(&palette[in[x] * 4]) >> 16;
|
INT32 v = L24(&palette->palette[in[x] * 4]) >> 16;
|
||||||
memcpy(out_, &v, sizeof(v));
|
memcpy(out_, &v, sizeof(v));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
pa2i(UINT8 *out_, const UINT8 *in, int xsize, const UINT8 *palette) {
|
pa2i(UINT8 *out_, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||||
int x;
|
int x;
|
||||||
INT32 *out = (INT32 *)out_;
|
INT32 *out = (INT32 *)out_;
|
||||||
for (x = 0; x < xsize; x++, in += 4) {
|
for (x = 0; x < xsize; x++, in += 4) {
|
||||||
*out++ = L24(&palette[in[0] * 4]) >> 16;
|
*out++ = L24(&palette->palette[in[0] * 4]) >> 16;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
p2f(UINT8 *out_, const UINT8 *in, int xsize, const UINT8 *palette) {
|
p2f(UINT8 *out_, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||||
int x;
|
int x;
|
||||||
for (x = 0; x < xsize; x++, out_ += 4) {
|
for (x = 0; x < xsize; x++, out_ += 4) {
|
||||||
FLOAT32 v = L(&palette[in[x] * 4]) / 1000.0F;
|
FLOAT32 v = L(&palette->palette[in[x] * 4]) / 1000.0F;
|
||||||
memcpy(out_, &v, sizeof(v));
|
memcpy(out_, &v, sizeof(v));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
pa2f(UINT8 *out_, const UINT8 *in, int xsize, const UINT8 *palette) {
|
pa2f(UINT8 *out_, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||||
int x;
|
int x;
|
||||||
FLOAT32 *out = (FLOAT32 *)out_;
|
FLOAT32 *out = (FLOAT32 *)out_;
|
||||||
for (x = 0; x < xsize; x++, in += 4) {
|
for (x = 0; x < xsize; x++, in += 4) {
|
||||||
*out++ = (float)L(&palette[in[0] * 4]) / 1000.0F;
|
*out++ = (float)L(&palette->palette[in[0] * 4]) / 1000.0F;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
p2rgb(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
p2rgb(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||||
int x;
|
int x;
|
||||||
for (x = 0; x < xsize; x++) {
|
for (x = 0; x < xsize; x++) {
|
||||||
const UINT8 *rgb = &palette[*in++ * 4];
|
const UINT8 *rgb = &palette->palette[*in++ * 4];
|
||||||
*out++ = rgb[0];
|
*out++ = rgb[0];
|
||||||
*out++ = rgb[1];
|
*out++ = rgb[1];
|
||||||
*out++ = rgb[2];
|
*out++ = rgb[2];
|
||||||
|
@ -1108,10 +1109,10 @@ p2rgb(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
pa2rgb(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
pa2rgb(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||||
int x;
|
int x;
|
||||||
for (x = 0; x < xsize; x++, in += 4) {
|
for (x = 0; x < xsize; x++, in += 4) {
|
||||||
const UINT8 *rgb = &palette[in[0] * 4];
|
const UINT8 *rgb = &palette->palette[in[0] * 4];
|
||||||
*out++ = rgb[0];
|
*out++ = rgb[0];
|
||||||
*out++ = rgb[1];
|
*out++ = rgb[1];
|
||||||
*out++ = rgb[2];
|
*out++ = rgb[2];
|
||||||
|
@ -1120,30 +1121,30 @@ pa2rgb(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
p2hsv(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
p2hsv(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||||
int x;
|
int x;
|
||||||
for (x = 0; x < xsize; x++, out += 4) {
|
for (x = 0; x < xsize; x++, out += 4) {
|
||||||
const UINT8 *rgb = &palette[*in++ * 4];
|
const UINT8 *rgb = &palette->palette[*in++ * 4];
|
||||||
rgb2hsv_row(out, rgb);
|
rgb2hsv_row(out, rgb);
|
||||||
out[3] = 255;
|
out[3] = 255;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
pa2hsv(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
pa2hsv(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||||
int x;
|
int x;
|
||||||
for (x = 0; x < xsize; x++, in += 4, out += 4) {
|
for (x = 0; x < xsize; x++, in += 4, out += 4) {
|
||||||
const UINT8 *rgb = &palette[in[0] * 4];
|
const UINT8 *rgb = &palette->palette[in[0] * 4];
|
||||||
rgb2hsv_row(out, rgb);
|
rgb2hsv_row(out, rgb);
|
||||||
out[3] = 255;
|
out[3] = 255;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
p2rgba(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
p2rgba(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||||
int x;
|
int x;
|
||||||
for (x = 0; x < xsize; x++) {
|
for (x = 0; x < xsize; x++) {
|
||||||
const UINT8 *rgba = &palette[*in++ * 4];
|
const UINT8 *rgba = &palette->palette[*in++ * 4];
|
||||||
*out++ = rgba[0];
|
*out++ = rgba[0];
|
||||||
*out++ = rgba[1];
|
*out++ = rgba[1];
|
||||||
*out++ = rgba[2];
|
*out++ = rgba[2];
|
||||||
|
@ -1152,10 +1153,10 @@ p2rgba(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
pa2rgba(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
pa2rgba(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||||
int x;
|
int x;
|
||||||
for (x = 0; x < xsize; x++, in += 4) {
|
for (x = 0; x < xsize; x++, in += 4) {
|
||||||
const UINT8 *rgb = &palette[in[0] * 4];
|
const UINT8 *rgb = &palette->palette[in[0] * 4];
|
||||||
*out++ = rgb[0];
|
*out++ = rgb[0];
|
||||||
*out++ = rgb[1];
|
*out++ = rgb[1];
|
||||||
*out++ = rgb[2];
|
*out++ = rgb[2];
|
||||||
|
@ -1164,25 +1165,25 @@ pa2rgba(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
p2cmyk(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
p2cmyk(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||||
p2rgb(out, in, xsize, palette);
|
p2rgb(out, in, xsize, palette);
|
||||||
rgb2cmyk(out, out, xsize);
|
rgb2cmyk(out, out, xsize);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
pa2cmyk(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
pa2cmyk(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||||
pa2rgb(out, in, xsize, palette);
|
pa2rgb(out, in, xsize, palette);
|
||||||
rgb2cmyk(out, out, xsize);
|
rgb2cmyk(out, out, xsize);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
p2ycbcr(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
p2ycbcr(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||||
p2rgb(out, in, xsize, palette);
|
p2rgb(out, in, xsize, palette);
|
||||||
ImagingConvertRGB2YCbCr(out, out, xsize);
|
ImagingConvertRGB2YCbCr(out, out, xsize);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
pa2ycbcr(UINT8 *out, const UINT8 *in, int xsize, const UINT8 *palette) {
|
pa2ycbcr(UINT8 *out, const UINT8 *in, int xsize, ImagingPalette palette) {
|
||||||
pa2rgb(out, in, xsize, palette);
|
pa2rgb(out, in, xsize, palette);
|
||||||
ImagingConvertRGB2YCbCr(out, out, xsize);
|
ImagingConvertRGB2YCbCr(out, out, xsize);
|
||||||
}
|
}
|
||||||
|
@ -1192,7 +1193,7 @@ frompalette(Imaging imOut, Imaging imIn, const char *mode) {
|
||||||
ImagingSectionCookie cookie;
|
ImagingSectionCookie cookie;
|
||||||
int alpha;
|
int alpha;
|
||||||
int y;
|
int y;
|
||||||
void (*convert)(UINT8 *, const UINT8 *, int, const UINT8 *);
|
void (*convert)(UINT8 *, const UINT8 *, int, ImagingPalette);
|
||||||
|
|
||||||
/* Map palette image to L, RGB, RGBA, or CMYK */
|
/* Map palette image to L, RGB, RGBA, or CMYK */
|
||||||
|
|
||||||
|
@ -1239,7 +1240,7 @@ frompalette(Imaging imOut, Imaging imIn, const char *mode) {
|
||||||
(UINT8 *)imOut->image[y],
|
(UINT8 *)imOut->image[y],
|
||||||
(UINT8 *)imIn->image[y],
|
(UINT8 *)imIn->image[y],
|
||||||
imIn->xsize,
|
imIn->xsize,
|
||||||
imIn->palette->palette);
|
imIn->palette);
|
||||||
}
|
}
|
||||||
ImagingSectionLeave(&cookie);
|
ImagingSectionLeave(&cookie);
|
||||||
|
|
||||||
|
|
|
@ -143,6 +143,7 @@ struct ImagingPaletteInstance {
|
||||||
char mode[IMAGING_MODE_LENGTH]; /* Band names */
|
char mode[IMAGING_MODE_LENGTH]; /* Band names */
|
||||||
|
|
||||||
/* Data */
|
/* Data */
|
||||||
|
int size;
|
||||||
UINT8 palette[1024]; /* Palette data (same format as image data) */
|
UINT8 palette[1024]; /* Palette data (same format as image data) */
|
||||||
|
|
||||||
INT16 *cache; /* Palette cache (used for predefined palettes) */
|
INT16 *cache; /* Palette cache (used for predefined palettes) */
|
||||||
|
|
|
@ -40,6 +40,7 @@ ImagingPaletteNew(const char *mode) {
|
||||||
palette->mode[IMAGING_MODE_LENGTH - 1] = 0;
|
palette->mode[IMAGING_MODE_LENGTH - 1] = 0;
|
||||||
|
|
||||||
/* Initialize to ramp */
|
/* Initialize to ramp */
|
||||||
|
palette->size = 256;
|
||||||
for (i = 0; i < 256; i++) {
|
for (i = 0; i < 256; i++) {
|
||||||
palette->palette[i * 4 + 0] = palette->palette[i * 4 + 1] =
|
palette->palette[i * 4 + 0] = palette->palette[i * 4 + 1] =
|
||||||
palette->palette[i * 4 + 2] = (UINT8)i;
|
palette->palette[i * 4 + 2] = (UINT8)i;
|
||||||
|
@ -193,7 +194,7 @@ ImagingPaletteCacheUpdate(ImagingPalette palette, int r, int g, int b) {
|
||||||
|
|
||||||
dmax = (unsigned int)~0;
|
dmax = (unsigned int)~0;
|
||||||
|
|
||||||
for (i = 0; i < 256; i++) {
|
for (i = 0; i < palette->size; i++) {
|
||||||
int r, g, b;
|
int r, g, b;
|
||||||
unsigned int tmin, tmax;
|
unsigned int tmin, tmax;
|
||||||
|
|
||||||
|
@ -226,7 +227,7 @@ ImagingPaletteCacheUpdate(ImagingPalette palette, int r, int g, int b) {
|
||||||
d[i] = (unsigned int)~0;
|
d[i] = (unsigned int)~0;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < 256; i++) {
|
for (i = 0; i < palette->size; i++) {
|
||||||
if (dmin[i] <= dmax) {
|
if (dmin[i] <= dmax) {
|
||||||
int rd, gd, bd;
|
int rd, gd, bd;
|
||||||
int ri, gi, bi;
|
int ri, gi, bi;
|
||||||
|
|
Loading…
Reference in New Issue
Block a user