mirror of
https://github.com/python-pillow/Pillow.git
synced 2024-12-26 18:06:18 +03:00
Merge pull request #6196 from hugovk/cleanup-names
Cleanup: adjust names
This commit is contained in:
commit
43b423bc25
|
@ -324,7 +324,7 @@ def is_mingw():
|
|||
return sysconfig.get_platform() == "mingw"
|
||||
|
||||
|
||||
class cached_property:
|
||||
class CachedProperty:
|
||||
def __init__(self, func):
|
||||
self.func = func
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ def box_blur(image, radius=1, n=1):
|
|||
return image._new(image.im.box_blur(radius, n))
|
||||
|
||||
|
||||
def assertImage(im, data, delta=0):
|
||||
def assert_image(im, data, delta=0):
|
||||
it = iter(im.getdata())
|
||||
for data_row in data:
|
||||
im_row = [next(it) for _ in range(im.size[0])]
|
||||
|
@ -35,12 +35,12 @@ def assertImage(im, data, delta=0):
|
|||
next(it)
|
||||
|
||||
|
||||
def assertBlur(im, radius, data, passes=1, delta=0):
|
||||
def assert_blur(im, radius, data, passes=1, delta=0):
|
||||
# check grayscale image
|
||||
assertImage(box_blur(im, radius, passes), data, delta)
|
||||
assert_image(box_blur(im, radius, passes), data, delta)
|
||||
rgba = Image.merge("RGBA", (im, im, im, im))
|
||||
for band in box_blur(rgba, radius, passes).split():
|
||||
assertImage(band, data, delta)
|
||||
assert_image(band, data, delta)
|
||||
|
||||
|
||||
def test_color_modes():
|
||||
|
@ -64,7 +64,7 @@ def test_color_modes():
|
|||
|
||||
|
||||
def test_radius_0():
|
||||
assertBlur(
|
||||
assert_blur(
|
||||
sample,
|
||||
0,
|
||||
[
|
||||
|
@ -80,7 +80,7 @@ def test_radius_0():
|
|||
|
||||
|
||||
def test_radius_0_02():
|
||||
assertBlur(
|
||||
assert_blur(
|
||||
sample,
|
||||
0.02,
|
||||
[
|
||||
|
@ -97,7 +97,7 @@ def test_radius_0_02():
|
|||
|
||||
|
||||
def test_radius_0_05():
|
||||
assertBlur(
|
||||
assert_blur(
|
||||
sample,
|
||||
0.05,
|
||||
[
|
||||
|
@ -114,7 +114,7 @@ def test_radius_0_05():
|
|||
|
||||
|
||||
def test_radius_0_1():
|
||||
assertBlur(
|
||||
assert_blur(
|
||||
sample,
|
||||
0.1,
|
||||
[
|
||||
|
@ -131,7 +131,7 @@ def test_radius_0_1():
|
|||
|
||||
|
||||
def test_radius_0_5():
|
||||
assertBlur(
|
||||
assert_blur(
|
||||
sample,
|
||||
0.5,
|
||||
[
|
||||
|
@ -148,7 +148,7 @@ def test_radius_0_5():
|
|||
|
||||
|
||||
def test_radius_1():
|
||||
assertBlur(
|
||||
assert_blur(
|
||||
sample,
|
||||
1,
|
||||
[
|
||||
|
@ -165,7 +165,7 @@ def test_radius_1():
|
|||
|
||||
|
||||
def test_radius_1_5():
|
||||
assertBlur(
|
||||
assert_blur(
|
||||
sample,
|
||||
1.5,
|
||||
[
|
||||
|
@ -182,7 +182,7 @@ def test_radius_1_5():
|
|||
|
||||
|
||||
def test_radius_bigger_then_half():
|
||||
assertBlur(
|
||||
assert_blur(
|
||||
sample,
|
||||
3,
|
||||
[
|
||||
|
@ -199,7 +199,7 @@ def test_radius_bigger_then_half():
|
|||
|
||||
|
||||
def test_radius_bigger_then_width():
|
||||
assertBlur(
|
||||
assert_blur(
|
||||
sample,
|
||||
10,
|
||||
[
|
||||
|
@ -214,7 +214,7 @@ def test_radius_bigger_then_width():
|
|||
|
||||
|
||||
def test_extreme_large_radius():
|
||||
assertBlur(
|
||||
assert_blur(
|
||||
sample,
|
||||
600,
|
||||
[
|
||||
|
@ -229,7 +229,7 @@ def test_extreme_large_radius():
|
|||
|
||||
|
||||
def test_two_passes():
|
||||
assertBlur(
|
||||
assert_blur(
|
||||
sample,
|
||||
1,
|
||||
[
|
||||
|
@ -247,7 +247,7 @@ def test_two_passes():
|
|||
|
||||
|
||||
def test_three_passes():
|
||||
assertBlur(
|
||||
assert_blur(
|
||||
sample,
|
||||
1,
|
||||
[
|
||||
|
|
|
@ -15,27 +15,27 @@ except ImportError:
|
|||
class TestColorLut3DCoreAPI:
|
||||
def generate_identity_table(self, channels, size):
|
||||
if isinstance(size, tuple):
|
||||
size1D, size2D, size3D = size
|
||||
size_1d, size_2d, size_3d = size
|
||||
else:
|
||||
size1D, size2D, size3D = (size, size, size)
|
||||
size_1d, size_2d, size_3d = (size, size, size)
|
||||
|
||||
table = [
|
||||
[
|
||||
r / (size1D - 1) if size1D != 1 else 0,
|
||||
g / (size2D - 1) if size2D != 1 else 0,
|
||||
b / (size3D - 1) if size3D != 1 else 0,
|
||||
r / (size1D - 1) if size1D != 1 else 0,
|
||||
g / (size2D - 1) if size2D != 1 else 0,
|
||||
r / (size_1d - 1) if size_1d != 1 else 0,
|
||||
g / (size_2d - 1) if size_2d != 1 else 0,
|
||||
b / (size_3d - 1) if size_3d != 1 else 0,
|
||||
r / (size_1d - 1) if size_1d != 1 else 0,
|
||||
g / (size_2d - 1) if size_2d != 1 else 0,
|
||||
][:channels]
|
||||
for b in range(size3D)
|
||||
for g in range(size2D)
|
||||
for r in range(size1D)
|
||||
for b in range(size_3d)
|
||||
for g in range(size_2d)
|
||||
for r in range(size_1d)
|
||||
]
|
||||
return (
|
||||
channels,
|
||||
size1D,
|
||||
size2D,
|
||||
size3D,
|
||||
size_1d,
|
||||
size_2d,
|
||||
size_3d,
|
||||
[item for sublist in table for item in sublist],
|
||||
)
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ class TestDecompressionCrop:
|
|||
def teardown_class(self):
|
||||
Image.MAX_IMAGE_PIXELS = ORIGINAL_LIMIT
|
||||
|
||||
def testEnlargeCrop(self):
|
||||
def test_enlarge_crop(self):
|
||||
# Crops can extend the extents, therefore we should have the
|
||||
# same decompression bomb warnings on them.
|
||||
with hopper() as src:
|
||||
|
|
|
@ -799,31 +799,31 @@ def test_zero_comment_subblocks():
|
|||
def test_version(tmp_path):
|
||||
out = str(tmp_path / "temp.gif")
|
||||
|
||||
def assertVersionAfterSave(im, version):
|
||||
def assert_version_after_save(im, version):
|
||||
im.save(out)
|
||||
with Image.open(out) as reread:
|
||||
assert reread.info["version"] == version
|
||||
|
||||
# Test that GIF87a is used by default
|
||||
im = Image.new("L", (100, 100), "#000")
|
||||
assertVersionAfterSave(im, b"GIF87a")
|
||||
assert_version_after_save(im, b"GIF87a")
|
||||
|
||||
# Test setting the version to 89a
|
||||
im = Image.new("L", (100, 100), "#000")
|
||||
im.info["version"] = b"89a"
|
||||
assertVersionAfterSave(im, b"GIF89a")
|
||||
assert_version_after_save(im, b"GIF89a")
|
||||
|
||||
# Test that adding a GIF89a feature changes the version
|
||||
im.info["transparency"] = 1
|
||||
assertVersionAfterSave(im, b"GIF89a")
|
||||
assert_version_after_save(im, b"GIF89a")
|
||||
|
||||
# Test that a GIF87a image is also saved in that format
|
||||
with Image.open("Tests/images/test.colors.gif") as im:
|
||||
assertVersionAfterSave(im, b"GIF87a")
|
||||
assert_version_after_save(im, b"GIF87a")
|
||||
|
||||
# Test that a GIF89a image is also saved in that format
|
||||
im.info["version"] = b"GIF89a"
|
||||
assertVersionAfterSave(im, b"GIF87a")
|
||||
assert_version_after_save(im, b"GIF87a")
|
||||
|
||||
|
||||
def test_append_images(tmp_path):
|
||||
|
@ -838,10 +838,10 @@ def test_append_images(tmp_path):
|
|||
assert reread.n_frames == 3
|
||||
|
||||
# Tests appending using a generator
|
||||
def imGenerator(ims):
|
||||
def im_generator(ims):
|
||||
yield from ims
|
||||
|
||||
im.save(out, save_all=True, append_images=imGenerator(ims))
|
||||
im.save(out, save_all=True, append_images=im_generator(ims))
|
||||
|
||||
with Image.open(out) as reread:
|
||||
assert reread.n_frames == 3
|
||||
|
|
|
@ -145,10 +145,10 @@ def test_mp_attribute():
|
|||
for test_file in test_files:
|
||||
with Image.open(test_file) as im:
|
||||
mpinfo = im._getmp()
|
||||
frameNumber = 0
|
||||
frame_number = 0
|
||||
for mpentry in mpinfo[0xB002]:
|
||||
mpattr = mpentry["Attribute"]
|
||||
if frameNumber:
|
||||
if frame_number:
|
||||
assert not mpattr["RepresentativeImageFlag"]
|
||||
else:
|
||||
assert mpattr["RepresentativeImageFlag"]
|
||||
|
@ -157,7 +157,7 @@ def test_mp_attribute():
|
|||
assert mpattr["ImageDataFormat"] == "JPEG"
|
||||
assert mpattr["MPType"] == "Multi-Frame Image: (Disparity)"
|
||||
assert mpattr["Reserved"] == 0
|
||||
frameNumber += 1
|
||||
frame_number += 1
|
||||
|
||||
|
||||
def test_seek():
|
||||
|
|
|
@ -131,10 +131,10 @@ def test_save_all(tmp_path):
|
|||
assert os.path.getsize(outfile) > 0
|
||||
|
||||
# Test appending using a generator
|
||||
def imGenerator(ims):
|
||||
def im_generator(ims):
|
||||
yield from ims
|
||||
|
||||
im.save(outfile, save_all=True, append_images=imGenerator(ims))
|
||||
im.save(outfile, save_all=True, append_images=im_generator(ims))
|
||||
|
||||
assert os.path.isfile(outfile)
|
||||
assert os.path.getsize(outfile) > 0
|
||||
|
@ -253,9 +253,9 @@ def test_pdf_append(tmp_path):
|
|||
check_pdf_pages_consistency(pdf)
|
||||
|
||||
# append two images
|
||||
mode_CMYK = hopper("CMYK")
|
||||
mode_P = hopper("P")
|
||||
mode_CMYK.save(pdf_filename, append=True, save_all=True, append_images=[mode_P])
|
||||
mode_cmyk = hopper("CMYK")
|
||||
mode_p = hopper("P")
|
||||
mode_cmyk.save(pdf_filename, append=True, save_all=True, append_images=[mode_p])
|
||||
|
||||
# open the PDF again, check pages and info again
|
||||
with PdfParser.PdfParser(pdf_filename) as pdf:
|
||||
|
|
|
@ -151,14 +151,14 @@ class TestFileTiff:
|
|||
assert im.info["dpi"] == (71.0, 71.0)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"resolutionUnit, dpi",
|
||||
"resolution_unit, dpi",
|
||||
[(None, 72.8), (2, 72.8), (3, 184.912)],
|
||||
)
|
||||
def test_load_float_dpi(self, resolutionUnit, dpi):
|
||||
def test_load_float_dpi(self, resolution_unit, dpi):
|
||||
with Image.open(
|
||||
"Tests/images/hopper_float_dpi_" + str(resolutionUnit) + ".tif"
|
||||
"Tests/images/hopper_float_dpi_" + str(resolution_unit) + ".tif"
|
||||
) as im:
|
||||
assert im.tag_v2.get(RESOLUTION_UNIT) == resolutionUnit
|
||||
assert im.tag_v2.get(RESOLUTION_UNIT) == resolution_unit
|
||||
assert im.info["dpi"] == (dpi, dpi)
|
||||
|
||||
def test_save_float_dpi(self, tmp_path):
|
||||
|
@ -655,11 +655,11 @@ class TestFileTiff:
|
|||
assert reread.n_frames == 3
|
||||
|
||||
# Test appending using a generator
|
||||
def imGenerator(ims):
|
||||
def im_generator(ims):
|
||||
yield from ims
|
||||
|
||||
mp = BytesIO()
|
||||
im.save(mp, format="TIFF", save_all=True, append_images=imGenerator(ims))
|
||||
im.save(mp, format="TIFF", save_all=True, append_images=im_generator(ims))
|
||||
|
||||
mp.seek(0, os.SEEK_SET)
|
||||
with Image.open(mp) as reread:
|
||||
|
|
|
@ -28,26 +28,26 @@ def test_rt_metadata(tmp_path):
|
|||
# For text items, we still have to decode('ascii','replace') because
|
||||
# the tiff file format can't take 8 bit bytes in that field.
|
||||
|
||||
basetextdata = "This is some arbitrary metadata for a text field"
|
||||
bindata = basetextdata.encode("ascii") + b" \xff"
|
||||
textdata = basetextdata + " " + chr(255)
|
||||
reloaded_textdata = basetextdata + " ?"
|
||||
floatdata = 12.345
|
||||
doubledata = 67.89
|
||||
base_text_data = "This is some arbitrary metadata for a text field"
|
||||
bin_data = base_text_data.encode("ascii") + b" \xff"
|
||||
text_data = base_text_data + " " + chr(255)
|
||||
reloaded_text_data = base_text_data + " ?"
|
||||
float_data = 12.345
|
||||
double_data = 67.89
|
||||
info = TiffImagePlugin.ImageFileDirectory()
|
||||
|
||||
ImageJMetaData = TAG_IDS["ImageJMetaData"]
|
||||
ImageJMetaDataByteCounts = TAG_IDS["ImageJMetaDataByteCounts"]
|
||||
ImageDescription = TAG_IDS["ImageDescription"]
|
||||
|
||||
info[ImageJMetaDataByteCounts] = len(bindata)
|
||||
info[ImageJMetaData] = bindata
|
||||
info[TAG_IDS["RollAngle"]] = floatdata
|
||||
info[ImageJMetaDataByteCounts] = len(bin_data)
|
||||
info[ImageJMetaData] = bin_data
|
||||
info[TAG_IDS["RollAngle"]] = float_data
|
||||
info.tagtype[TAG_IDS["RollAngle"]] = 11
|
||||
info[TAG_IDS["YawAngle"]] = doubledata
|
||||
info[TAG_IDS["YawAngle"]] = double_data
|
||||
info.tagtype[TAG_IDS["YawAngle"]] = 12
|
||||
|
||||
info[ImageDescription] = textdata
|
||||
info[ImageDescription] = text_data
|
||||
|
||||
f = str(tmp_path / "temp.tif")
|
||||
|
||||
|
@ -55,28 +55,28 @@ def test_rt_metadata(tmp_path):
|
|||
|
||||
with Image.open(f) as loaded:
|
||||
|
||||
assert loaded.tag[ImageJMetaDataByteCounts] == (len(bindata),)
|
||||
assert loaded.tag_v2[ImageJMetaDataByteCounts] == (len(bindata),)
|
||||
assert loaded.tag[ImageJMetaDataByteCounts] == (len(bin_data),)
|
||||
assert loaded.tag_v2[ImageJMetaDataByteCounts] == (len(bin_data),)
|
||||
|
||||
assert loaded.tag[ImageJMetaData] == bindata
|
||||
assert loaded.tag_v2[ImageJMetaData] == bindata
|
||||
assert loaded.tag[ImageJMetaData] == bin_data
|
||||
assert loaded.tag_v2[ImageJMetaData] == bin_data
|
||||
|
||||
assert loaded.tag[ImageDescription] == (reloaded_textdata,)
|
||||
assert loaded.tag_v2[ImageDescription] == reloaded_textdata
|
||||
assert loaded.tag[ImageDescription] == (reloaded_text_data,)
|
||||
assert loaded.tag_v2[ImageDescription] == reloaded_text_data
|
||||
|
||||
loaded_float = loaded.tag[TAG_IDS["RollAngle"]][0]
|
||||
assert round(abs(loaded_float - floatdata), 5) == 0
|
||||
assert round(abs(loaded_float - float_data), 5) == 0
|
||||
loaded_double = loaded.tag[TAG_IDS["YawAngle"]][0]
|
||||
assert round(abs(loaded_double - doubledata), 7) == 0
|
||||
assert round(abs(loaded_double - double_data), 7) == 0
|
||||
|
||||
# check with 2 element ImageJMetaDataByteCounts, issue #2006
|
||||
|
||||
info[ImageJMetaDataByteCounts] = (8, len(bindata) - 8)
|
||||
info[ImageJMetaDataByteCounts] = (8, len(bin_data) - 8)
|
||||
img.save(f, tiffinfo=info)
|
||||
with Image.open(f) as loaded:
|
||||
|
||||
assert loaded.tag[ImageJMetaDataByteCounts] == (8, len(bindata) - 8)
|
||||
assert loaded.tag_v2[ImageJMetaDataByteCounts] == (8, len(bindata) - 8)
|
||||
assert loaded.tag[ImageJMetaDataByteCounts] == (8, len(bin_data) - 8)
|
||||
assert loaded.tag_v2[ImageJMetaDataByteCounts] == (8, len(bin_data) - 8)
|
||||
|
||||
|
||||
def test_read_metadata():
|
||||
|
@ -356,7 +356,7 @@ def test_empty_values():
|
|||
assert 33432 in info
|
||||
|
||||
|
||||
def test_PhotoshopInfo(tmp_path):
|
||||
def test_photoshop_info(tmp_path):
|
||||
with Image.open("Tests/images/issue_2278.tif") as im:
|
||||
assert len(im.tag_v2[34377]) == 70
|
||||
assert isinstance(im.tag_v2[34377], bytes)
|
||||
|
|
|
@ -90,14 +90,14 @@ def test_write_animation_RGB(tmp_path):
|
|||
check(temp_file1)
|
||||
|
||||
# Tests appending using a generator
|
||||
def imGenerator(ims):
|
||||
def im_generator(ims):
|
||||
yield from ims
|
||||
|
||||
temp_file2 = str(tmp_path / "temp_generator.webp")
|
||||
frame1.copy().save(
|
||||
temp_file2,
|
||||
save_all=True,
|
||||
append_images=imGenerator([frame2]),
|
||||
append_images=im_generator([frame2]),
|
||||
lossless=True,
|
||||
)
|
||||
check(temp_file2)
|
||||
|
|
|
@ -6,8 +6,8 @@ from .helper import hopper
|
|||
|
||||
|
||||
def test_copy():
|
||||
croppedCoordinates = (10, 10, 20, 20)
|
||||
croppedSize = (10, 10)
|
||||
cropped_coordinates = (10, 10, 20, 20)
|
||||
cropped_size = (10, 10)
|
||||
for mode in "1", "P", "L", "RGB", "I", "F":
|
||||
# Internal copy method
|
||||
im = hopper(mode)
|
||||
|
@ -23,15 +23,15 @@ def test_copy():
|
|||
|
||||
# Internal copy method on a cropped image
|
||||
im = hopper(mode)
|
||||
out = im.crop(croppedCoordinates).copy()
|
||||
out = im.crop(cropped_coordinates).copy()
|
||||
assert out.mode == im.mode
|
||||
assert out.size == croppedSize
|
||||
assert out.size == cropped_size
|
||||
|
||||
# Python's copy method on a cropped image
|
||||
im = hopper(mode)
|
||||
out = copy.copy(im.crop(croppedCoordinates))
|
||||
out = copy.copy(im.crop(cropped_coordinates))
|
||||
assert out.mode == im.mode
|
||||
assert out.size == croppedSize
|
||||
assert out.size == cropped_size
|
||||
|
||||
|
||||
def test_copy_zero():
|
||||
|
|
|
@ -99,10 +99,10 @@ def test_rankfilter_properties():
|
|||
|
||||
|
||||
def test_builtinfilter_p():
|
||||
builtinFilter = ImageFilter.BuiltinFilter()
|
||||
builtin_filter = ImageFilter.BuiltinFilter()
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
builtinFilter.filter(hopper("P"))
|
||||
builtin_filter.filter(hopper("P"))
|
||||
|
||||
|
||||
def test_kernel_not_enough_coefficients():
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
from PIL import Image
|
||||
|
||||
from .helper import assert_image_equal, cached_property
|
||||
from .helper import CachedProperty, assert_image_equal
|
||||
|
||||
|
||||
class TestImagingPaste:
|
||||
|
@ -34,7 +34,7 @@ class TestImagingPaste:
|
|||
im.paste(im2, mask)
|
||||
self.assert_9points_image(im, expected)
|
||||
|
||||
@cached_property
|
||||
@CachedProperty
|
||||
def mask_1(self):
|
||||
mask = Image.new("1", (self.size, self.size))
|
||||
px = mask.load()
|
||||
|
@ -43,11 +43,11 @@ class TestImagingPaste:
|
|||
px[y, x] = (x + y) % 2
|
||||
return mask
|
||||
|
||||
@cached_property
|
||||
@CachedProperty
|
||||
def mask_L(self):
|
||||
return self.gradient_L.transpose(Image.Transpose.ROTATE_270)
|
||||
|
||||
@cached_property
|
||||
@CachedProperty
|
||||
def gradient_L(self):
|
||||
gradient = Image.new("L", (self.size, self.size))
|
||||
px = gradient.load()
|
||||
|
@ -56,7 +56,7 @@ class TestImagingPaste:
|
|||
px[y, x] = (x + y) % 255
|
||||
return gradient
|
||||
|
||||
@cached_property
|
||||
@CachedProperty
|
||||
def gradient_RGB(self):
|
||||
return Image.merge(
|
||||
"RGB",
|
||||
|
@ -67,7 +67,7 @@ class TestImagingPaste:
|
|||
],
|
||||
)
|
||||
|
||||
@cached_property
|
||||
@CachedProperty
|
||||
def gradient_LA(self):
|
||||
return Image.merge(
|
||||
"LA",
|
||||
|
@ -77,7 +77,7 @@ class TestImagingPaste:
|
|||
],
|
||||
)
|
||||
|
||||
@cached_property
|
||||
@CachedProperty
|
||||
def gradient_RGBA(self):
|
||||
return Image.merge(
|
||||
"RGBA",
|
||||
|
@ -89,7 +89,7 @@ class TestImagingPaste:
|
|||
],
|
||||
)
|
||||
|
||||
@cached_property
|
||||
@CachedProperty
|
||||
def gradient_RGBa(self):
|
||||
return Image.merge(
|
||||
"RGBa",
|
||||
|
|
|
@ -35,9 +35,9 @@ class TestImageFile:
|
|||
|
||||
parser = ImageFile.Parser()
|
||||
parser.feed(data)
|
||||
imOut = parser.close()
|
||||
im_out = parser.close()
|
||||
|
||||
return im, imOut
|
||||
return im, im_out
|
||||
|
||||
assert_image_equal(*roundtrip("BMP"))
|
||||
im1, im2 = roundtrip("GIF")
|
||||
|
|
|
@ -48,8 +48,8 @@ def img_string_normalize(im):
|
|||
return img_to_string(string_to_img(im))
|
||||
|
||||
|
||||
def assert_img_equal_img_string(A, Bstring):
|
||||
assert img_to_string(A) == img_string_normalize(Bstring)
|
||||
def assert_img_equal_img_string(a, b_string):
|
||||
assert img_to_string(a) == img_string_normalize(b_string)
|
||||
|
||||
|
||||
def test_str_to_img():
|
||||
|
|
|
@ -174,7 +174,7 @@ def test_overflow_segfault():
|
|||
# through to the sequence. Seeing this on 32-bit Windows.
|
||||
with pytest.raises((TypeError, MemoryError)):
|
||||
# post patch, this fails with a memory error
|
||||
x = evil()
|
||||
x = Evil()
|
||||
|
||||
# This fails due to the invalid malloc above,
|
||||
# and segfaults
|
||||
|
@ -182,7 +182,7 @@ def test_overflow_segfault():
|
|||
x[i] = b"0" * 16
|
||||
|
||||
|
||||
class evil:
|
||||
class Evil:
|
||||
def __init__(self):
|
||||
self.corrupt = Image.core.path(0x4000000000000000)
|
||||
|
||||
|
|
|
@ -65,12 +65,12 @@ def test_libtiff():
|
|||
|
||||
def test_consecutive():
|
||||
with Image.open("Tests/images/multipage.tiff") as im:
|
||||
firstFrame = None
|
||||
first_frame = None
|
||||
for frame in ImageSequence.Iterator(im):
|
||||
if firstFrame is None:
|
||||
firstFrame = frame.copy()
|
||||
if first_frame is None:
|
||||
first_frame = frame.copy()
|
||||
for frame in ImageSequence.Iterator(im):
|
||||
assert_image_equal(frame, firstFrame)
|
||||
assert_image_equal(frame, first_frame)
|
||||
break
|
||||
|
||||
|
||||
|
|
|
@ -26,51 +26,51 @@ def test_basic(tmp_path):
|
|||
|
||||
def basic(mode):
|
||||
|
||||
imIn = original.convert(mode)
|
||||
verify(imIn)
|
||||
im_in = original.convert(mode)
|
||||
verify(im_in)
|
||||
|
||||
w, h = imIn.size
|
||||
w, h = im_in.size
|
||||
|
||||
imOut = imIn.copy()
|
||||
verify(imOut) # copy
|
||||
im_out = im_in.copy()
|
||||
verify(im_out) # copy
|
||||
|
||||
imOut = imIn.transform((w, h), Image.Transform.EXTENT, (0, 0, w, h))
|
||||
verify(imOut) # transform
|
||||
im_out = im_in.transform((w, h), Image.Transform.EXTENT, (0, 0, w, h))
|
||||
verify(im_out) # transform
|
||||
|
||||
filename = str(tmp_path / "temp.im")
|
||||
imIn.save(filename)
|
||||
im_in.save(filename)
|
||||
|
||||
with Image.open(filename) as imOut:
|
||||
with Image.open(filename) as im_out:
|
||||
|
||||
verify(imIn)
|
||||
verify(imOut)
|
||||
verify(im_in)
|
||||
verify(im_out)
|
||||
|
||||
imOut = imIn.crop((0, 0, w, h))
|
||||
verify(imOut)
|
||||
im_out = im_in.crop((0, 0, w, h))
|
||||
verify(im_out)
|
||||
|
||||
imOut = Image.new(mode, (w, h), None)
|
||||
imOut.paste(imIn.crop((0, 0, w // 2, h)), (0, 0))
|
||||
imOut.paste(imIn.crop((w // 2, 0, w, h)), (w // 2, 0))
|
||||
im_out = Image.new(mode, (w, h), None)
|
||||
im_out.paste(im_in.crop((0, 0, w // 2, h)), (0, 0))
|
||||
im_out.paste(im_in.crop((w // 2, 0, w, h)), (w // 2, 0))
|
||||
|
||||
verify(imIn)
|
||||
verify(imOut)
|
||||
verify(im_in)
|
||||
verify(im_out)
|
||||
|
||||
imIn = Image.new(mode, (1, 1), 1)
|
||||
assert imIn.getpixel((0, 0)) == 1
|
||||
im_in = Image.new(mode, (1, 1), 1)
|
||||
assert im_in.getpixel((0, 0)) == 1
|
||||
|
||||
imIn.putpixel((0, 0), 2)
|
||||
assert imIn.getpixel((0, 0)) == 2
|
||||
im_in.putpixel((0, 0), 2)
|
||||
assert im_in.getpixel((0, 0)) == 2
|
||||
|
||||
if mode == "L":
|
||||
maximum = 255
|
||||
else:
|
||||
maximum = 32767
|
||||
|
||||
imIn = Image.new(mode, (1, 1), 256)
|
||||
assert imIn.getpixel((0, 0)) == min(256, maximum)
|
||||
im_in = Image.new(mode, (1, 1), 256)
|
||||
assert im_in.getpixel((0, 0)) == min(256, maximum)
|
||||
|
||||
imIn.putpixel((0, 0), 512)
|
||||
assert imIn.getpixel((0, 0)) == min(512, maximum)
|
||||
im_in.putpixel((0, 0), 512)
|
||||
assert im_in.getpixel((0, 0)) == min(512, maximum)
|
||||
|
||||
basic("L")
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ def test_is_path():
|
|||
fp = "filename.ext"
|
||||
|
||||
# Act
|
||||
it_is = _util.isPath(fp)
|
||||
it_is = _util.is_path(fp)
|
||||
|
||||
# Assert
|
||||
assert it_is
|
||||
|
@ -21,7 +21,7 @@ def test_path_obj_is_path():
|
|||
test_path = Path("filename.ext")
|
||||
|
||||
# Act
|
||||
it_is = _util.isPath(test_path)
|
||||
it_is = _util.is_path(test_path)
|
||||
|
||||
# Assert
|
||||
assert it_is
|
||||
|
@ -33,7 +33,7 @@ def test_is_not_path(tmp_path):
|
|||
pass
|
||||
|
||||
# Act
|
||||
it_is_not = _util.isPath(fp)
|
||||
it_is_not = _util.is_path(fp)
|
||||
|
||||
# Assert
|
||||
assert not it_is_not
|
||||
|
@ -44,7 +44,7 @@ def test_is_directory():
|
|||
directory = "Tests"
|
||||
|
||||
# Act
|
||||
it_is = _util.isDirectory(directory)
|
||||
it_is = _util.is_directory(directory)
|
||||
|
||||
# Assert
|
||||
assert it_is
|
||||
|
@ -55,7 +55,7 @@ def test_is_not_directory():
|
|||
text = "abc"
|
||||
|
||||
# Act
|
||||
it_is_not = _util.isDirectory(text)
|
||||
it_is_not = _util.is_directory(text)
|
||||
|
||||
# Assert
|
||||
assert not it_is_not
|
||||
|
@ -65,7 +65,7 @@ def test_deferred_error():
|
|||
# Arrange
|
||||
|
||||
# Act
|
||||
thing = _util.deferred_error(ValueError("Some error text"))
|
||||
thing = _util.DeferredError(ValueError("Some error text"))
|
||||
|
||||
# Assert
|
||||
with pytest.raises(ValueError):
|
||||
|
|
|
@ -54,20 +54,25 @@ class GdImageFile(ImageFile.ImageFile):
|
|||
self.mode = "L" # FIXME: "P"
|
||||
self._size = i16(s, 2), i16(s, 4)
|
||||
|
||||
trueColor = s[6]
|
||||
trueColorOffset = 2 if trueColor else 0
|
||||
true_color = s[6]
|
||||
true_color_offset = 2 if true_color else 0
|
||||
|
||||
# transparency index
|
||||
tindex = i32(s, 7 + trueColorOffset)
|
||||
tindex = i32(s, 7 + true_color_offset)
|
||||
if tindex < 256:
|
||||
self.info["transparency"] = tindex
|
||||
|
||||
self.palette = ImagePalette.raw(
|
||||
"XBGR", s[7 + trueColorOffset + 4 : 7 + trueColorOffset + 4 + 256 * 4]
|
||||
"XBGR", s[7 + true_color_offset + 4 : 7 + true_color_offset + 4 + 256 * 4]
|
||||
)
|
||||
|
||||
self.tile = [
|
||||
("raw", (0, 0) + self.size, 7 + trueColorOffset + 4 + 256 * 4, ("L", 0, 1))
|
||||
(
|
||||
"raw",
|
||||
(0, 0) + self.size,
|
||||
7 + true_color_offset + 4 + 256 * 4,
|
||||
("L", 0, 1),
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
|
|
|
@ -883,10 +883,10 @@ def _get_palette_bytes(im):
|
|||
return im.palette.palette
|
||||
|
||||
|
||||
def _get_background(im, infoBackground):
|
||||
def _get_background(im, info_background):
|
||||
background = 0
|
||||
if infoBackground:
|
||||
background = infoBackground
|
||||
if info_background:
|
||||
background = info_background
|
||||
if isinstance(background, tuple):
|
||||
# WebPImagePlugin stores an RGBA value in info["background"]
|
||||
# So it must be converted to the same format as GifImagePlugin's
|
||||
|
|
|
@ -51,7 +51,7 @@ except ImportError:
|
|||
from . import ImageMode, TiffTags, UnidentifiedImageError, __version__, _plugins
|
||||
from ._binary import i32le, o32be, o32le
|
||||
from ._deprecate import deprecate
|
||||
from ._util import deferred_error, isPath
|
||||
from ._util import DeferredError, is_path
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
|
@ -108,7 +108,7 @@ try:
|
|||
)
|
||||
|
||||
except ImportError as v:
|
||||
core = deferred_error(ImportError("The _imaging C module is not installed."))
|
||||
core = DeferredError(ImportError("The _imaging C module is not installed."))
|
||||
# Explanations for ways that we know we might have an import error
|
||||
if str(v).startswith("Module use of python"):
|
||||
# The _imaging C module is present, but not compiled for
|
||||
|
@ -577,7 +577,7 @@ class Image:
|
|||
# Instead of simply setting to None, we're setting up a
|
||||
# deferred error that will better explain that the core image
|
||||
# object is gone.
|
||||
self.im = deferred_error(ValueError("Operation on closed image"))
|
||||
self.im = DeferredError(ValueError("Operation on closed image"))
|
||||
|
||||
def _copy(self):
|
||||
self.load()
|
||||
|
@ -2215,7 +2215,7 @@ class Image:
|
|||
if isinstance(fp, Path):
|
||||
filename = str(fp)
|
||||
open_fp = True
|
||||
elif isPath(fp):
|
||||
elif is_path(fp):
|
||||
filename = fp
|
||||
open_fp = True
|
||||
elif fp == sys.stdout:
|
||||
|
@ -2223,7 +2223,7 @@ class Image:
|
|||
fp = sys.stdout.buffer
|
||||
except AttributeError:
|
||||
pass
|
||||
if not filename and hasattr(fp, "name") and isPath(fp.name):
|
||||
if not filename and hasattr(fp, "name") and is_path(fp.name):
|
||||
# only set the name for metadata purposes
|
||||
filename = fp.name
|
||||
|
||||
|
@ -3029,7 +3029,7 @@ def open(fp, mode="r", formats=None):
|
|||
filename = ""
|
||||
if isinstance(fp, Path):
|
||||
filename = str(fp.resolve())
|
||||
elif isPath(fp):
|
||||
elif is_path(fp):
|
||||
filename = fp
|
||||
|
||||
if filename:
|
||||
|
|
|
@ -27,9 +27,9 @@ try:
|
|||
except ImportError as ex:
|
||||
# Allow error import for doc purposes, but error out when accessing
|
||||
# anything in core.
|
||||
from ._util import deferred_error
|
||||
from ._util import DeferredError
|
||||
|
||||
_imagingcms = deferred_error(ex)
|
||||
_imagingcms = DeferredError(ex)
|
||||
|
||||
DESCRIPTION = """
|
||||
pyCMS
|
||||
|
|
|
@ -197,18 +197,18 @@ class ImageDraw:
|
|||
if width > 8:
|
||||
# Cover potential gaps between the line and the joint
|
||||
if flipped:
|
||||
gapCoords = [
|
||||
gap_coords = [
|
||||
coord_at_angle(point, angles[0] + 90),
|
||||
point,
|
||||
coord_at_angle(point, angles[1] + 90),
|
||||
]
|
||||
else:
|
||||
gapCoords = [
|
||||
gap_coords = [
|
||||
coord_at_angle(point, angles[0] - 90),
|
||||
point,
|
||||
coord_at_angle(point, angles[1] - 90),
|
||||
]
|
||||
self.line(gapCoords, fill, width=3)
|
||||
self.line(gap_coords, fill, width=3)
|
||||
|
||||
def shape(self, shape, fill=None, outline=None):
|
||||
"""(Experimental) Draw a shape."""
|
||||
|
|
|
@ -33,7 +33,7 @@ import struct
|
|||
import sys
|
||||
|
||||
from . import Image
|
||||
from ._util import isPath
|
||||
from ._util import is_path
|
||||
|
||||
MAXBLOCK = 65536
|
||||
|
||||
|
@ -99,7 +99,7 @@ class ImageFile(Image.Image):
|
|||
self.decoderconfig = ()
|
||||
self.decodermaxblock = MAXBLOCK
|
||||
|
||||
if isPath(fp):
|
||||
if is_path(fp):
|
||||
# filename
|
||||
self.fp = open(fp, "rb")
|
||||
self.filename = fp
|
||||
|
|
|
@ -421,8 +421,8 @@ class Color3DLUT(MultibandFilter):
|
|||
except TypeError:
|
||||
size = (size, size, size)
|
||||
size = [int(x) for x in size]
|
||||
for size1D in size:
|
||||
if not 2 <= size1D <= 65:
|
||||
for size_1d in size:
|
||||
if not 2 <= size_1d <= 65:
|
||||
raise ValueError("Size should be in [2, 65] range.")
|
||||
return size
|
||||
|
||||
|
@ -439,22 +439,22 @@ class Color3DLUT(MultibandFilter):
|
|||
:param target_mode: Passed to the constructor of the resulting
|
||||
lookup table.
|
||||
"""
|
||||
size1D, size2D, size3D = cls._check_size(size)
|
||||
size_1d, size_2d, size_3d = cls._check_size(size)
|
||||
if channels not in (3, 4):
|
||||
raise ValueError("Only 3 or 4 output channels are supported")
|
||||
|
||||
table = [0] * (size1D * size2D * size3D * channels)
|
||||
table = [0] * (size_1d * size_2d * size_3d * channels)
|
||||
idx_out = 0
|
||||
for b in range(size3D):
|
||||
for g in range(size2D):
|
||||
for r in range(size1D):
|
||||
for b in range(size_3d):
|
||||
for g in range(size_2d):
|
||||
for r in range(size_1d):
|
||||
table[idx_out : idx_out + channels] = callback(
|
||||
r / (size1D - 1), g / (size2D - 1), b / (size3D - 1)
|
||||
r / (size_1d - 1), g / (size_2d - 1), b / (size_3d - 1)
|
||||
)
|
||||
idx_out += channels
|
||||
|
||||
return cls(
|
||||
(size1D, size2D, size3D),
|
||||
(size_1d, size_2d, size_3d),
|
||||
table,
|
||||
channels=channels,
|
||||
target_mode=target_mode,
|
||||
|
@ -484,20 +484,20 @@ class Color3DLUT(MultibandFilter):
|
|||
raise ValueError("Only 3 or 4 output channels are supported")
|
||||
ch_in = self.channels
|
||||
ch_out = channels or ch_in
|
||||
size1D, size2D, size3D = self.size
|
||||
size_1d, size_2d, size_3d = self.size
|
||||
|
||||
table = [0] * (size1D * size2D * size3D * ch_out)
|
||||
table = [0] * (size_1d * size_2d * size_3d * ch_out)
|
||||
idx_in = 0
|
||||
idx_out = 0
|
||||
for b in range(size3D):
|
||||
for g in range(size2D):
|
||||
for r in range(size1D):
|
||||
for b in range(size_3d):
|
||||
for g in range(size_2d):
|
||||
for r in range(size_1d):
|
||||
values = self.table[idx_in : idx_in + ch_in]
|
||||
if with_normals:
|
||||
values = callback(
|
||||
r / (size1D - 1),
|
||||
g / (size2D - 1),
|
||||
b / (size3D - 1),
|
||||
r / (size_1d - 1),
|
||||
g / (size_2d - 1),
|
||||
b / (size_3d - 1),
|
||||
*values,
|
||||
)
|
||||
else:
|
||||
|
|
|
@ -34,7 +34,7 @@ from io import BytesIO
|
|||
|
||||
from . import Image
|
||||
from ._deprecate import deprecate
|
||||
from ._util import isDirectory, isPath
|
||||
from ._util import is_directory, is_path
|
||||
|
||||
|
||||
class Layout(IntEnum):
|
||||
|
@ -52,7 +52,7 @@ def __getattr__(name):
|
|||
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
||||
|
||||
|
||||
class _imagingft_not_installed:
|
||||
class _ImagingFtNotInstalled:
|
||||
# module placeholder
|
||||
def __getattr__(self, id):
|
||||
raise ImportError("The _imagingft C module is not installed")
|
||||
|
@ -61,7 +61,7 @@ class _imagingft_not_installed:
|
|||
try:
|
||||
from . import _imagingft as core
|
||||
except ImportError:
|
||||
core = _imagingft_not_installed()
|
||||
core = _ImagingFtNotInstalled()
|
||||
|
||||
|
||||
# FIXME: add support for pilfont2 format (see FontFile.py)
|
||||
|
@ -198,7 +198,7 @@ class FreeTypeFont:
|
|||
"", size, index, encoding, self.font_bytes, layout_engine
|
||||
)
|
||||
|
||||
if isPath(font):
|
||||
if is_path(font):
|
||||
if sys.platform == "win32":
|
||||
font_bytes_path = font if isinstance(font, bytes) else font.encode()
|
||||
try:
|
||||
|
@ -863,7 +863,7 @@ def truetype(font=None, size=10, index=0, encoding="", layout_engine=None):
|
|||
try:
|
||||
return freetype(font)
|
||||
except OSError:
|
||||
if not isPath(font):
|
||||
if not is_path(font):
|
||||
raise
|
||||
ttf_filename = os.path.basename(font)
|
||||
|
||||
|
@ -917,7 +917,7 @@ def load_path(filename):
|
|||
:exception OSError: If the file could not be read.
|
||||
"""
|
||||
for directory in sys.path:
|
||||
if isDirectory(directory):
|
||||
if is_directory(directory):
|
||||
if not isinstance(filename, str):
|
||||
filename = filename.decode("utf-8")
|
||||
try:
|
||||
|
|
|
@ -20,7 +20,7 @@ import sys
|
|||
from io import BytesIO
|
||||
|
||||
from . import Image
|
||||
from ._util import isPath
|
||||
from ._util import is_path
|
||||
|
||||
qt_versions = [
|
||||
["6", "PyQt6"],
|
||||
|
@ -140,7 +140,7 @@ def _toqclass_helper(im):
|
|||
if hasattr(im, "toUtf8"):
|
||||
# FIXME - is this really the best way to do this?
|
||||
im = str(im.toUtf8(), "utf-8")
|
||||
if isPath(im):
|
||||
if is_path(im):
|
||||
im = Image.open(im)
|
||||
exclusive_fp = True
|
||||
|
||||
|
|
|
@ -78,10 +78,10 @@ class Stat:
|
|||
|
||||
v = []
|
||||
for i in range(0, len(self.h), 256):
|
||||
layerSum = 0.0
|
||||
layer_sum = 0.0
|
||||
for j in range(256):
|
||||
layerSum += j * self.h[i + j]
|
||||
v.append(layerSum)
|
||||
layer_sum += j * self.h[i + j]
|
||||
v.append(layer_sum)
|
||||
return v
|
||||
|
||||
def _getsum2(self):
|
||||
|
|
|
@ -193,15 +193,15 @@ class PcfFontFile(FontFile.FontFile):
|
|||
for i in range(nbitmaps):
|
||||
offsets.append(i32(fp.read(4)))
|
||||
|
||||
bitmapSizes = []
|
||||
bitmap_sizes = []
|
||||
for i in range(4):
|
||||
bitmapSizes.append(i32(fp.read(4)))
|
||||
bitmap_sizes.append(i32(fp.read(4)))
|
||||
|
||||
# byteorder = format & 4 # non-zero => MSB
|
||||
bitorder = format & 8 # non-zero => MSB
|
||||
padindex = format & 3
|
||||
|
||||
bitmapsize = bitmapSizes[padindex]
|
||||
bitmapsize = bitmap_sizes[padindex]
|
||||
offsets.append(bitmapsize)
|
||||
|
||||
data = fp.read(bitmapsize)
|
||||
|
@ -225,22 +225,22 @@ class PcfFontFile(FontFile.FontFile):
|
|||
|
||||
fp, format, i16, i32 = self._getformat(PCF_BDF_ENCODINGS)
|
||||
|
||||
firstCol, lastCol = i16(fp.read(2)), i16(fp.read(2))
|
||||
firstRow, lastRow = i16(fp.read(2)), i16(fp.read(2))
|
||||
first_col, last_col = i16(fp.read(2)), i16(fp.read(2))
|
||||
first_row, last_row = i16(fp.read(2)), i16(fp.read(2))
|
||||
|
||||
i16(fp.read(2)) # default
|
||||
|
||||
nencoding = (lastCol - firstCol + 1) * (lastRow - firstRow + 1)
|
||||
nencoding = (last_col - first_col + 1) * (last_row - first_row + 1)
|
||||
|
||||
encodingOffsets = [i16(fp.read(2)) for _ in range(nencoding)]
|
||||
encoding_offsets = [i16(fp.read(2)) for _ in range(nencoding)]
|
||||
|
||||
for i in range(firstCol, len(encoding)):
|
||||
for i in range(first_col, len(encoding)):
|
||||
try:
|
||||
encodingOffset = encodingOffsets[
|
||||
encoding_offset = encoding_offsets[
|
||||
ord(bytearray([i]).decode(self.charset_encoding))
|
||||
]
|
||||
if encodingOffset != 0xFFFF:
|
||||
encoding[i] = encodingOffset
|
||||
if encoding_offset != 0xFFFF:
|
||||
encoding[i] = encoding_offset
|
||||
except UnicodeDecodeError:
|
||||
# character is not supported in selected encoding
|
||||
pass
|
||||
|
|
|
@ -87,21 +87,21 @@ def _save(im, fp, filename, save_all=False):
|
|||
for append_im in append_images:
|
||||
append_im.encoderinfo = im.encoderinfo.copy()
|
||||
ims.append(append_im)
|
||||
numberOfPages = 0
|
||||
number_of_pages = 0
|
||||
image_refs = []
|
||||
page_refs = []
|
||||
contents_refs = []
|
||||
for im in ims:
|
||||
im_numberOfPages = 1
|
||||
im_number_of_pages = 1
|
||||
if save_all:
|
||||
try:
|
||||
im_numberOfPages = im.n_frames
|
||||
im_number_of_pages = im.n_frames
|
||||
except AttributeError:
|
||||
# Image format does not have n_frames.
|
||||
# It is a single frame image
|
||||
pass
|
||||
numberOfPages += im_numberOfPages
|
||||
for i in range(im_numberOfPages):
|
||||
number_of_pages += im_number_of_pages
|
||||
for i in range(im_number_of_pages):
|
||||
image_refs.append(existing_pdf.next_object_id(0))
|
||||
page_refs.append(existing_pdf.next_object_id(0))
|
||||
contents_refs.append(existing_pdf.next_object_id(0))
|
||||
|
@ -111,9 +111,9 @@ def _save(im, fp, filename, save_all=False):
|
|||
# catalog and list of pages
|
||||
existing_pdf.write_catalog()
|
||||
|
||||
pageNumber = 0
|
||||
for imSequence in ims:
|
||||
im_pages = ImageSequence.Iterator(imSequence) if save_all else [imSequence]
|
||||
page_number = 0
|
||||
for im_sequence in ims:
|
||||
im_pages = ImageSequence.Iterator(im_sequence) if save_all else [im_sequence]
|
||||
for im in im_pages:
|
||||
# FIXME: Should replace ASCIIHexDecode with RunLengthDecode
|
||||
# (packbits) or LZWDecode (tiff/lzw compression). Note that
|
||||
|
@ -176,7 +176,7 @@ def _save(im, fp, filename, save_all=False):
|
|||
width, height = im.size
|
||||
|
||||
existing_pdf.write_obj(
|
||||
image_refs[pageNumber],
|
||||
image_refs[page_number],
|
||||
stream=op.getvalue(),
|
||||
Type=PdfParser.PdfName("XObject"),
|
||||
Subtype=PdfParser.PdfName("Image"),
|
||||
|
@ -193,10 +193,10 @@ def _save(im, fp, filename, save_all=False):
|
|||
# page
|
||||
|
||||
existing_pdf.write_page(
|
||||
page_refs[pageNumber],
|
||||
page_refs[page_number],
|
||||
Resources=PdfParser.PdfDict(
|
||||
ProcSet=[PdfParser.PdfName("PDF"), PdfParser.PdfName(procset)],
|
||||
XObject=PdfParser.PdfDict(image=image_refs[pageNumber]),
|
||||
XObject=PdfParser.PdfDict(image=image_refs[page_number]),
|
||||
),
|
||||
MediaBox=[
|
||||
0,
|
||||
|
@ -204,7 +204,7 @@ def _save(im, fp, filename, save_all=False):
|
|||
width * 72.0 / resolution,
|
||||
height * 72.0 / resolution,
|
||||
],
|
||||
Contents=contents_refs[pageNumber],
|
||||
Contents=contents_refs[page_number],
|
||||
)
|
||||
|
||||
#
|
||||
|
@ -215,9 +215,9 @@ def _save(im, fp, filename, save_all=False):
|
|||
height * 72.0 / resolution,
|
||||
)
|
||||
|
||||
existing_pdf.write_obj(contents_refs[pageNumber], stream=page_contents)
|
||||
existing_pdf.write_obj(contents_refs[page_number], stream=page_contents)
|
||||
|
||||
pageNumber += 1
|
||||
page_number += 1
|
||||
|
||||
#
|
||||
# trailer
|
||||
|
|
|
@ -39,9 +39,9 @@ try:
|
|||
except ImportError as ex:
|
||||
# Allow error import for doc purposes, but error out when accessing
|
||||
# anything in core.
|
||||
from ._util import deferred_error
|
||||
from ._util import DeferredError
|
||||
|
||||
FFI = ffi = deferred_error(ex)
|
||||
FFI = ffi = DeferredError(ex)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
|
@ -138,7 +138,7 @@ def _save(im, fp, filename):
|
|||
# Flip the image, since the origin of SGI file is the bottom-left corner
|
||||
orientation = -1
|
||||
# Define the file as SGI File Format
|
||||
magicNumber = 474
|
||||
magic_number = 474
|
||||
# Run-Length Encoding Compression - Unsupported at this time
|
||||
rle = 0
|
||||
|
||||
|
@ -167,11 +167,11 @@ def _save(im, fp, filename):
|
|||
# Maximum Byte value (255 = 8bits per pixel)
|
||||
pinmax = 255
|
||||
# Image name (79 characters max, truncated below in write)
|
||||
imgName = os.path.splitext(os.path.basename(filename))[0]
|
||||
imgName = imgName.encode("ascii", "ignore")
|
||||
img_name = os.path.splitext(os.path.basename(filename))[0]
|
||||
img_name = img_name.encode("ascii", "ignore")
|
||||
# Standard representation of pixel in the file
|
||||
colormap = 0
|
||||
fp.write(struct.pack(">h", magicNumber))
|
||||
fp.write(struct.pack(">h", magic_number))
|
||||
fp.write(o8(rle))
|
||||
fp.write(o8(bpc))
|
||||
fp.write(struct.pack(">H", dim))
|
||||
|
@ -181,8 +181,8 @@ def _save(im, fp, filename):
|
|||
fp.write(struct.pack(">l", pinmin))
|
||||
fp.write(struct.pack(">l", pinmax))
|
||||
fp.write(struct.pack("4s", b"")) # dummy
|
||||
fp.write(struct.pack("79s", imgName)) # truncates to 79 chars
|
||||
fp.write(struct.pack("s", b"")) # force null byte after imgname
|
||||
fp.write(struct.pack("79s", img_name)) # truncates to 79 chars
|
||||
fp.write(struct.pack("s", b"")) # force null byte after img_name
|
||||
fp.write(struct.pack(">l", colormap))
|
||||
fp.write(struct.pack("404s", b"")) # dummy
|
||||
|
||||
|
|
|
@ -1336,14 +1336,14 @@ class TiffImageFile(ImageFile.ImageFile):
|
|||
|
||||
logger.debug(f"- size: {self.size}")
|
||||
|
||||
sampleFormat = self.tag_v2.get(SAMPLEFORMAT, (1,))
|
||||
if len(sampleFormat) > 1 and max(sampleFormat) == min(sampleFormat) == 1:
|
||||
sample_format = self.tag_v2.get(SAMPLEFORMAT, (1,))
|
||||
if len(sample_format) > 1 and max(sample_format) == min(sample_format) == 1:
|
||||
# SAMPLEFORMAT is properly per band, so an RGB image will
|
||||
# be (1,1,1). But, we don't support per band pixel types,
|
||||
# and anything more than one band is a uint8. So, just
|
||||
# take the first element. Revisit this if adding support
|
||||
# for more exotic images.
|
||||
sampleFormat = (1,)
|
||||
sample_format = (1,)
|
||||
|
||||
bps_tuple = self.tag_v2.get(BITSPERSAMPLE, (1,))
|
||||
extra_tuple = self.tag_v2.get(EXTRASAMPLES, ())
|
||||
|
@ -1364,18 +1364,18 @@ class TiffImageFile(ImageFile.ImageFile):
|
|||
# presume it is the same number of bits for all of the samples.
|
||||
bps_tuple = bps_tuple * bps_count
|
||||
|
||||
samplesPerPixel = self.tag_v2.get(
|
||||
samples_per_pixel = self.tag_v2.get(
|
||||
SAMPLESPERPIXEL,
|
||||
3 if self._compression == "tiff_jpeg" and photo in (2, 6) else 1,
|
||||
)
|
||||
if len(bps_tuple) != samplesPerPixel:
|
||||
if len(bps_tuple) != samples_per_pixel:
|
||||
raise SyntaxError("unknown data organization")
|
||||
|
||||
# mode: check photometric interpretation and bits per pixel
|
||||
key = (
|
||||
self.tag_v2.prefix,
|
||||
photo,
|
||||
sampleFormat,
|
||||
sample_format,
|
||||
fillorder,
|
||||
bps_tuple,
|
||||
extra_tuple,
|
||||
|
@ -1880,16 +1880,16 @@ class AppendingTiffWriter:
|
|||
self.whereToWriteNewIFDOffset = None
|
||||
self.offsetOfNewPage = 0
|
||||
|
||||
self.IIMM = IIMM = self.f.read(4)
|
||||
if not IIMM:
|
||||
self.IIMM = iimm = self.f.read(4)
|
||||
if not iimm:
|
||||
# empty file - first page
|
||||
self.isFirst = True
|
||||
return
|
||||
|
||||
self.isFirst = False
|
||||
if IIMM == b"II\x2a\x00":
|
||||
if iimm == b"II\x2a\x00":
|
||||
self.setEndian("<")
|
||||
elif IIMM == b"MM\x00\x2a":
|
||||
elif iimm == b"MM\x00\x2a":
|
||||
self.setEndian(">")
|
||||
else:
|
||||
raise RuntimeError("Invalid TIFF file header")
|
||||
|
@ -1904,20 +1904,20 @@ class AppendingTiffWriter:
|
|||
# fix offsets
|
||||
self.f.seek(self.offsetOfNewPage)
|
||||
|
||||
IIMM = self.f.read(4)
|
||||
if not IIMM:
|
||||
iimm = self.f.read(4)
|
||||
if not iimm:
|
||||
# raise RuntimeError("nothing written into new page")
|
||||
# Make it easy to finish a frame without committing to a new one.
|
||||
return
|
||||
|
||||
if IIMM != self.IIMM:
|
||||
if iimm != self.IIMM:
|
||||
raise RuntimeError("IIMM of new page doesn't match IIMM of first page")
|
||||
|
||||
IFDoffset = self.readLong()
|
||||
IFDoffset += self.offsetOfNewPage
|
||||
ifd_offset = self.readLong()
|
||||
ifd_offset += self.offsetOfNewPage
|
||||
self.f.seek(self.whereToWriteNewIFDOffset)
|
||||
self.writeLong(IFDoffset)
|
||||
self.f.seek(IFDoffset)
|
||||
self.writeLong(ifd_offset)
|
||||
self.f.seek(ifd_offset)
|
||||
self.fixIFD()
|
||||
|
||||
def newFrame(self):
|
||||
|
@ -1948,9 +1948,9 @@ class AppendingTiffWriter:
|
|||
pos = self.f.tell()
|
||||
|
||||
# pad to 16 byte boundary
|
||||
padBytes = 16 - pos % 16
|
||||
if 0 < padBytes < 16:
|
||||
self.f.write(bytes(padBytes))
|
||||
pad_bytes = 16 - pos % 16
|
||||
if 0 < pad_bytes < 16:
|
||||
self.f.write(bytes(pad_bytes))
|
||||
self.offsetOfNewPage = self.f.tell()
|
||||
|
||||
def setEndian(self, endian):
|
||||
|
@ -1961,14 +1961,14 @@ class AppendingTiffWriter:
|
|||
|
||||
def skipIFDs(self):
|
||||
while True:
|
||||
IFDoffset = self.readLong()
|
||||
if IFDoffset == 0:
|
||||
ifd_offset = self.readLong()
|
||||
if ifd_offset == 0:
|
||||
self.whereToWriteNewIFDOffset = self.f.tell() - 4
|
||||
break
|
||||
|
||||
self.f.seek(IFDoffset)
|
||||
numTags = self.readShort()
|
||||
self.f.seek(numTags * 12, os.SEEK_CUR)
|
||||
self.f.seek(ifd_offset)
|
||||
num_tags = self.readShort()
|
||||
self.f.seek(num_tags * 12, os.SEEK_CUR)
|
||||
|
||||
def write(self, data):
|
||||
return self.f.write(data)
|
||||
|
@ -1983,68 +1983,68 @@ class AppendingTiffWriter:
|
|||
|
||||
def rewriteLastShortToLong(self, value):
|
||||
self.f.seek(-2, os.SEEK_CUR)
|
||||
bytesWritten = self.f.write(struct.pack(self.longFmt, value))
|
||||
if bytesWritten is not None and bytesWritten != 4:
|
||||
raise RuntimeError(f"wrote only {bytesWritten} bytes but wanted 4")
|
||||
bytes_written = self.f.write(struct.pack(self.longFmt, value))
|
||||
if bytes_written is not None and bytes_written != 4:
|
||||
raise RuntimeError(f"wrote only {bytes_written} bytes but wanted 4")
|
||||
|
||||
def rewriteLastShort(self, value):
|
||||
self.f.seek(-2, os.SEEK_CUR)
|
||||
bytesWritten = self.f.write(struct.pack(self.shortFmt, value))
|
||||
if bytesWritten is not None and bytesWritten != 2:
|
||||
raise RuntimeError(f"wrote only {bytesWritten} bytes but wanted 2")
|
||||
bytes_written = self.f.write(struct.pack(self.shortFmt, value))
|
||||
if bytes_written is not None and bytes_written != 2:
|
||||
raise RuntimeError(f"wrote only {bytes_written} bytes but wanted 2")
|
||||
|
||||
def rewriteLastLong(self, value):
|
||||
self.f.seek(-4, os.SEEK_CUR)
|
||||
bytesWritten = self.f.write(struct.pack(self.longFmt, value))
|
||||
if bytesWritten is not None and bytesWritten != 4:
|
||||
raise RuntimeError(f"wrote only {bytesWritten} bytes but wanted 4")
|
||||
bytes_written = self.f.write(struct.pack(self.longFmt, value))
|
||||
if bytes_written is not None and bytes_written != 4:
|
||||
raise RuntimeError(f"wrote only {bytes_written} bytes but wanted 4")
|
||||
|
||||
def writeShort(self, value):
|
||||
bytesWritten = self.f.write(struct.pack(self.shortFmt, value))
|
||||
if bytesWritten is not None and bytesWritten != 2:
|
||||
raise RuntimeError(f"wrote only {bytesWritten} bytes but wanted 2")
|
||||
bytes_written = self.f.write(struct.pack(self.shortFmt, value))
|
||||
if bytes_written is not None and bytes_written != 2:
|
||||
raise RuntimeError(f"wrote only {bytes_written} bytes but wanted 2")
|
||||
|
||||
def writeLong(self, value):
|
||||
bytesWritten = self.f.write(struct.pack(self.longFmt, value))
|
||||
if bytesWritten is not None and bytesWritten != 4:
|
||||
raise RuntimeError(f"wrote only {bytesWritten} bytes but wanted 4")
|
||||
bytes_written = self.f.write(struct.pack(self.longFmt, value))
|
||||
if bytes_written is not None and bytes_written != 4:
|
||||
raise RuntimeError(f"wrote only {bytes_written} bytes but wanted 4")
|
||||
|
||||
def close(self):
|
||||
self.finalize()
|
||||
self.f.close()
|
||||
|
||||
def fixIFD(self):
|
||||
numTags = self.readShort()
|
||||
num_tags = self.readShort()
|
||||
|
||||
for i in range(numTags):
|
||||
tag, fieldType, count = struct.unpack(self.tagFormat, self.f.read(8))
|
||||
for i in range(num_tags):
|
||||
tag, field_type, count = struct.unpack(self.tagFormat, self.f.read(8))
|
||||
|
||||
fieldSize = self.fieldSizes[fieldType]
|
||||
totalSize = fieldSize * count
|
||||
isLocal = totalSize <= 4
|
||||
if not isLocal:
|
||||
field_size = self.fieldSizes[field_type]
|
||||
total_size = field_size * count
|
||||
is_local = total_size <= 4
|
||||
if not is_local:
|
||||
offset = self.readLong()
|
||||
offset += self.offsetOfNewPage
|
||||
self.rewriteLastLong(offset)
|
||||
|
||||
if tag in self.Tags:
|
||||
curPos = self.f.tell()
|
||||
cur_pos = self.f.tell()
|
||||
|
||||
if isLocal:
|
||||
if is_local:
|
||||
self.fixOffsets(
|
||||
count, isShort=(fieldSize == 2), isLong=(fieldSize == 4)
|
||||
count, isShort=(field_size == 2), isLong=(field_size == 4)
|
||||
)
|
||||
self.f.seek(curPos + 4)
|
||||
self.f.seek(cur_pos + 4)
|
||||
else:
|
||||
self.f.seek(offset)
|
||||
self.fixOffsets(
|
||||
count, isShort=(fieldSize == 2), isLong=(fieldSize == 4)
|
||||
count, isShort=(field_size == 2), isLong=(field_size == 4)
|
||||
)
|
||||
self.f.seek(curPos)
|
||||
self.f.seek(cur_pos)
|
||||
|
||||
offset = curPos = None
|
||||
offset = cur_pos = None
|
||||
|
||||
elif isLocal:
|
||||
elif is_local:
|
||||
# skip the locally stored value that is not an offset
|
||||
self.f.seek(4, os.SEEK_CUR)
|
||||
|
||||
|
|
|
@ -2,16 +2,16 @@ import os
|
|||
from pathlib import Path
|
||||
|
||||
|
||||
def isPath(f):
|
||||
def is_path(f):
|
||||
return isinstance(f, (bytes, str, Path))
|
||||
|
||||
|
||||
# Checks if an object is a string, and that it points to a directory.
|
||||
def isDirectory(f):
|
||||
return isPath(f) and os.path.isdir(f)
|
||||
def is_directory(f):
|
||||
"""Checks if an object is a string, and that it points to a directory."""
|
||||
return is_path(f) and os.path.isdir(f)
|
||||
|
||||
|
||||
class deferred_error:
|
||||
class DeferredError:
|
||||
def __init__(self, ex):
|
||||
self.ex = ex
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user