mirror of
https://github.com/python-pillow/Pillow.git
synced 2025-01-26 17:24:31 +03:00
Variable in function should be snake_case
This commit is contained in:
parent
98e8e6df33
commit
d3c9a6504e
|
@ -15,27 +15,27 @@ except ImportError:
|
|||
class TestColorLut3DCoreAPI:
|
||||
def generate_identity_table(self, channels, size):
|
||||
if isinstance(size, tuple):
|
||||
size1D, size2D, size3D = size
|
||||
size_1d, size_2d, size_3d = size
|
||||
else:
|
||||
size1D, size2D, size3D = (size, size, size)
|
||||
size_1d, size_2d, size_3d = (size, size, size)
|
||||
|
||||
table = [
|
||||
[
|
||||
r / (size1D - 1) if size1D != 1 else 0,
|
||||
g / (size2D - 1) if size2D != 1 else 0,
|
||||
b / (size3D - 1) if size3D != 1 else 0,
|
||||
r / (size1D - 1) if size1D != 1 else 0,
|
||||
g / (size2D - 1) if size2D != 1 else 0,
|
||||
r / (size_1d - 1) if size_1d != 1 else 0,
|
||||
g / (size_2d - 1) if size_2d != 1 else 0,
|
||||
b / (size_3d - 1) if size_3d != 1 else 0,
|
||||
r / (size_1d - 1) if size_1d != 1 else 0,
|
||||
g / (size_2d - 1) if size_2d != 1 else 0,
|
||||
][:channels]
|
||||
for b in range(size3D)
|
||||
for g in range(size2D)
|
||||
for r in range(size1D)
|
||||
for b in range(size_3d)
|
||||
for g in range(size_2d)
|
||||
for r in range(size_1d)
|
||||
]
|
||||
return (
|
||||
channels,
|
||||
size1D,
|
||||
size2D,
|
||||
size3D,
|
||||
size_1d,
|
||||
size_2d,
|
||||
size_3d,
|
||||
[item for sublist in table for item in sublist],
|
||||
)
|
||||
|
||||
|
|
|
@ -145,10 +145,10 @@ def test_mp_attribute():
|
|||
for test_file in test_files:
|
||||
with Image.open(test_file) as im:
|
||||
mpinfo = im._getmp()
|
||||
frameNumber = 0
|
||||
frame_number = 0
|
||||
for mpentry in mpinfo[0xB002]:
|
||||
mpattr = mpentry["Attribute"]
|
||||
if frameNumber:
|
||||
if frame_number:
|
||||
assert not mpattr["RepresentativeImageFlag"]
|
||||
else:
|
||||
assert mpattr["RepresentativeImageFlag"]
|
||||
|
@ -157,7 +157,7 @@ def test_mp_attribute():
|
|||
assert mpattr["ImageDataFormat"] == "JPEG"
|
||||
assert mpattr["MPType"] == "Multi-Frame Image: (Disparity)"
|
||||
assert mpattr["Reserved"] == 0
|
||||
frameNumber += 1
|
||||
frame_number += 1
|
||||
|
||||
|
||||
def test_seek():
|
||||
|
|
|
@ -253,9 +253,9 @@ def test_pdf_append(tmp_path):
|
|||
check_pdf_pages_consistency(pdf)
|
||||
|
||||
# append two images
|
||||
mode_CMYK = hopper("CMYK")
|
||||
mode_P = hopper("P")
|
||||
mode_CMYK.save(pdf_filename, append=True, save_all=True, append_images=[mode_P])
|
||||
mode_cmyk = hopper("CMYK")
|
||||
mode_p = hopper("P")
|
||||
mode_cmyk.save(pdf_filename, append=True, save_all=True, append_images=[mode_p])
|
||||
|
||||
# open the PDF again, check pages and info again
|
||||
with PdfParser.PdfParser(pdf_filename) as pdf:
|
||||
|
|
|
@ -28,26 +28,26 @@ def test_rt_metadata(tmp_path):
|
|||
# For text items, we still have to decode('ascii','replace') because
|
||||
# the tiff file format can't take 8 bit bytes in that field.
|
||||
|
||||
basetextdata = "This is some arbitrary metadata for a text field"
|
||||
bindata = basetextdata.encode("ascii") + b" \xff"
|
||||
textdata = basetextdata + " " + chr(255)
|
||||
reloaded_textdata = basetextdata + " ?"
|
||||
floatdata = 12.345
|
||||
doubledata = 67.89
|
||||
base_text_data = "This is some arbitrary metadata for a text field"
|
||||
bin_data = base_text_data.encode("ascii") + b" \xff"
|
||||
text_data = base_text_data + " " + chr(255)
|
||||
reloaded_text_data = base_text_data + " ?"
|
||||
float_data = 12.345
|
||||
double_data = 67.89
|
||||
info = TiffImagePlugin.ImageFileDirectory()
|
||||
|
||||
ImageJMetaData = TAG_IDS["ImageJMetaData"]
|
||||
ImageJMetaDataByteCounts = TAG_IDS["ImageJMetaDataByteCounts"]
|
||||
ImageDescription = TAG_IDS["ImageDescription"]
|
||||
|
||||
info[ImageJMetaDataByteCounts] = len(bindata)
|
||||
info[ImageJMetaData] = bindata
|
||||
info[TAG_IDS["RollAngle"]] = floatdata
|
||||
info[ImageJMetaDataByteCounts] = len(bin_data)
|
||||
info[ImageJMetaData] = bin_data
|
||||
info[TAG_IDS["RollAngle"]] = float_data
|
||||
info.tagtype[TAG_IDS["RollAngle"]] = 11
|
||||
info[TAG_IDS["YawAngle"]] = doubledata
|
||||
info[TAG_IDS["YawAngle"]] = double_data
|
||||
info.tagtype[TAG_IDS["YawAngle"]] = 12
|
||||
|
||||
info[ImageDescription] = textdata
|
||||
info[ImageDescription] = text_data
|
||||
|
||||
f = str(tmp_path / "temp.tif")
|
||||
|
||||
|
@ -55,28 +55,28 @@ def test_rt_metadata(tmp_path):
|
|||
|
||||
with Image.open(f) as loaded:
|
||||
|
||||
assert loaded.tag[ImageJMetaDataByteCounts] == (len(bindata),)
|
||||
assert loaded.tag_v2[ImageJMetaDataByteCounts] == (len(bindata),)
|
||||
assert loaded.tag[ImageJMetaDataByteCounts] == (len(bin_data),)
|
||||
assert loaded.tag_v2[ImageJMetaDataByteCounts] == (len(bin_data),)
|
||||
|
||||
assert loaded.tag[ImageJMetaData] == bindata
|
||||
assert loaded.tag_v2[ImageJMetaData] == bindata
|
||||
assert loaded.tag[ImageJMetaData] == bin_data
|
||||
assert loaded.tag_v2[ImageJMetaData] == bin_data
|
||||
|
||||
assert loaded.tag[ImageDescription] == (reloaded_textdata,)
|
||||
assert loaded.tag_v2[ImageDescription] == reloaded_textdata
|
||||
assert loaded.tag[ImageDescription] == (reloaded_text_data,)
|
||||
assert loaded.tag_v2[ImageDescription] == reloaded_text_data
|
||||
|
||||
loaded_float = loaded.tag[TAG_IDS["RollAngle"]][0]
|
||||
assert round(abs(loaded_float - floatdata), 5) == 0
|
||||
assert round(abs(loaded_float - float_data), 5) == 0
|
||||
loaded_double = loaded.tag[TAG_IDS["YawAngle"]][0]
|
||||
assert round(abs(loaded_double - doubledata), 7) == 0
|
||||
assert round(abs(loaded_double - double_data), 7) == 0
|
||||
|
||||
# check with 2 element ImageJMetaDataByteCounts, issue #2006
|
||||
|
||||
info[ImageJMetaDataByteCounts] = (8, len(bindata) - 8)
|
||||
info[ImageJMetaDataByteCounts] = (8, len(bin_data) - 8)
|
||||
img.save(f, tiffinfo=info)
|
||||
with Image.open(f) as loaded:
|
||||
|
||||
assert loaded.tag[ImageJMetaDataByteCounts] == (8, len(bindata) - 8)
|
||||
assert loaded.tag_v2[ImageJMetaDataByteCounts] == (8, len(bindata) - 8)
|
||||
assert loaded.tag[ImageJMetaDataByteCounts] == (8, len(bin_data) - 8)
|
||||
assert loaded.tag_v2[ImageJMetaDataByteCounts] == (8, len(bin_data) - 8)
|
||||
|
||||
|
||||
def test_read_metadata():
|
||||
|
|
|
@ -6,8 +6,8 @@ from .helper import hopper
|
|||
|
||||
|
||||
def test_copy():
|
||||
croppedCoordinates = (10, 10, 20, 20)
|
||||
croppedSize = (10, 10)
|
||||
cropped_coordinates = (10, 10, 20, 20)
|
||||
cropped_size = (10, 10)
|
||||
for mode in "1", "P", "L", "RGB", "I", "F":
|
||||
# Internal copy method
|
||||
im = hopper(mode)
|
||||
|
@ -23,15 +23,15 @@ def test_copy():
|
|||
|
||||
# Internal copy method on a cropped image
|
||||
im = hopper(mode)
|
||||
out = im.crop(croppedCoordinates).copy()
|
||||
out = im.crop(cropped_coordinates).copy()
|
||||
assert out.mode == im.mode
|
||||
assert out.size == croppedSize
|
||||
assert out.size == cropped_size
|
||||
|
||||
# Python's copy method on a cropped image
|
||||
im = hopper(mode)
|
||||
out = copy.copy(im.crop(croppedCoordinates))
|
||||
out = copy.copy(im.crop(cropped_coordinates))
|
||||
assert out.mode == im.mode
|
||||
assert out.size == croppedSize
|
||||
assert out.size == cropped_size
|
||||
|
||||
|
||||
def test_copy_zero():
|
||||
|
|
|
@ -99,10 +99,10 @@ def test_rankfilter_properties():
|
|||
|
||||
|
||||
def test_builtinfilter_p():
|
||||
builtinFilter = ImageFilter.BuiltinFilter()
|
||||
builtin_filter = ImageFilter.BuiltinFilter()
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
builtinFilter.filter(hopper("P"))
|
||||
builtin_filter.filter(hopper("P"))
|
||||
|
||||
|
||||
def test_kernel_not_enough_coefficients():
|
||||
|
|
|
@ -35,9 +35,9 @@ class TestImageFile:
|
|||
|
||||
parser = ImageFile.Parser()
|
||||
parser.feed(data)
|
||||
imOut = parser.close()
|
||||
im_out = parser.close()
|
||||
|
||||
return im, imOut
|
||||
return im, im_out
|
||||
|
||||
assert_image_equal(*roundtrip("BMP"))
|
||||
im1, im2 = roundtrip("GIF")
|
||||
|
|
|
@ -65,12 +65,12 @@ def test_libtiff():
|
|||
|
||||
def test_consecutive():
|
||||
with Image.open("Tests/images/multipage.tiff") as im:
|
||||
firstFrame = None
|
||||
first_frame = None
|
||||
for frame in ImageSequence.Iterator(im):
|
||||
if firstFrame is None:
|
||||
firstFrame = frame.copy()
|
||||
if first_frame is None:
|
||||
first_frame = frame.copy()
|
||||
for frame in ImageSequence.Iterator(im):
|
||||
assert_image_equal(frame, firstFrame)
|
||||
assert_image_equal(frame, first_frame)
|
||||
break
|
||||
|
||||
|
||||
|
|
|
@ -26,51 +26,51 @@ def test_basic(tmp_path):
|
|||
|
||||
def basic(mode):
|
||||
|
||||
imIn = original.convert(mode)
|
||||
verify(imIn)
|
||||
im_in = original.convert(mode)
|
||||
verify(im_in)
|
||||
|
||||
w, h = imIn.size
|
||||
w, h = im_in.size
|
||||
|
||||
imOut = imIn.copy()
|
||||
verify(imOut) # copy
|
||||
im_out = im_in.copy()
|
||||
verify(im_out) # copy
|
||||
|
||||
imOut = imIn.transform((w, h), Image.Transform.EXTENT, (0, 0, w, h))
|
||||
verify(imOut) # transform
|
||||
im_out = im_in.transform((w, h), Image.Transform.EXTENT, (0, 0, w, h))
|
||||
verify(im_out) # transform
|
||||
|
||||
filename = str(tmp_path / "temp.im")
|
||||
imIn.save(filename)
|
||||
im_in.save(filename)
|
||||
|
||||
with Image.open(filename) as imOut:
|
||||
with Image.open(filename) as im_out:
|
||||
|
||||
verify(imIn)
|
||||
verify(imOut)
|
||||
verify(im_in)
|
||||
verify(im_out)
|
||||
|
||||
imOut = imIn.crop((0, 0, w, h))
|
||||
verify(imOut)
|
||||
im_out = im_in.crop((0, 0, w, h))
|
||||
verify(im_out)
|
||||
|
||||
imOut = Image.new(mode, (w, h), None)
|
||||
imOut.paste(imIn.crop((0, 0, w // 2, h)), (0, 0))
|
||||
imOut.paste(imIn.crop((w // 2, 0, w, h)), (w // 2, 0))
|
||||
im_out = Image.new(mode, (w, h), None)
|
||||
im_out.paste(im_in.crop((0, 0, w // 2, h)), (0, 0))
|
||||
im_out.paste(im_in.crop((w // 2, 0, w, h)), (w // 2, 0))
|
||||
|
||||
verify(imIn)
|
||||
verify(imOut)
|
||||
verify(im_in)
|
||||
verify(im_out)
|
||||
|
||||
imIn = Image.new(mode, (1, 1), 1)
|
||||
assert imIn.getpixel((0, 0)) == 1
|
||||
im_in = Image.new(mode, (1, 1), 1)
|
||||
assert im_in.getpixel((0, 0)) == 1
|
||||
|
||||
imIn.putpixel((0, 0), 2)
|
||||
assert imIn.getpixel((0, 0)) == 2
|
||||
im_in.putpixel((0, 0), 2)
|
||||
assert im_in.getpixel((0, 0)) == 2
|
||||
|
||||
if mode == "L":
|
||||
maximum = 255
|
||||
else:
|
||||
maximum = 32767
|
||||
|
||||
imIn = Image.new(mode, (1, 1), 256)
|
||||
assert imIn.getpixel((0, 0)) == min(256, maximum)
|
||||
im_in = Image.new(mode, (1, 1), 256)
|
||||
assert im_in.getpixel((0, 0)) == min(256, maximum)
|
||||
|
||||
imIn.putpixel((0, 0), 512)
|
||||
assert imIn.getpixel((0, 0)) == min(512, maximum)
|
||||
im_in.putpixel((0, 0), 512)
|
||||
assert im_in.getpixel((0, 0)) == min(512, maximum)
|
||||
|
||||
basic("L")
|
||||
|
||||
|
|
|
@ -54,20 +54,25 @@ class GdImageFile(ImageFile.ImageFile):
|
|||
self.mode = "L" # FIXME: "P"
|
||||
self._size = i16(s, 2), i16(s, 4)
|
||||
|
||||
trueColor = s[6]
|
||||
trueColorOffset = 2 if trueColor else 0
|
||||
true_color = s[6]
|
||||
true_color_offset = 2 if true_color else 0
|
||||
|
||||
# transparency index
|
||||
tindex = i32(s, 7 + trueColorOffset)
|
||||
tindex = i32(s, 7 + true_color_offset)
|
||||
if tindex < 256:
|
||||
self.info["transparency"] = tindex
|
||||
|
||||
self.palette = ImagePalette.raw(
|
||||
"XBGR", s[7 + trueColorOffset + 4 : 7 + trueColorOffset + 4 + 256 * 4]
|
||||
"XBGR", s[7 + true_color_offset + 4 : 7 + true_color_offset + 4 + 256 * 4]
|
||||
)
|
||||
|
||||
self.tile = [
|
||||
("raw", (0, 0) + self.size, 7 + trueColorOffset + 4 + 256 * 4, ("L", 0, 1))
|
||||
(
|
||||
"raw",
|
||||
(0, 0) + self.size,
|
||||
7 + true_color_offset + 4 + 256 * 4,
|
||||
("L", 0, 1),
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
|
|
|
@ -197,18 +197,18 @@ class ImageDraw:
|
|||
if width > 8:
|
||||
# Cover potential gaps between the line and the joint
|
||||
if flipped:
|
||||
gapCoords = [
|
||||
gap_coords = [
|
||||
coord_at_angle(point, angles[0] + 90),
|
||||
point,
|
||||
coord_at_angle(point, angles[1] + 90),
|
||||
]
|
||||
else:
|
||||
gapCoords = [
|
||||
gap_coords = [
|
||||
coord_at_angle(point, angles[0] - 90),
|
||||
point,
|
||||
coord_at_angle(point, angles[1] - 90),
|
||||
]
|
||||
self.line(gapCoords, fill, width=3)
|
||||
self.line(gap_coords, fill, width=3)
|
||||
|
||||
def shape(self, shape, fill=None, outline=None):
|
||||
"""(Experimental) Draw a shape."""
|
||||
|
|
|
@ -439,22 +439,22 @@ class Color3DLUT(MultibandFilter):
|
|||
:param target_mode: Passed to the constructor of the resulting
|
||||
lookup table.
|
||||
"""
|
||||
size1D, size2D, size3D = cls._check_size(size)
|
||||
size_1d, size_2d, size_3d = cls._check_size(size)
|
||||
if channels not in (3, 4):
|
||||
raise ValueError("Only 3 or 4 output channels are supported")
|
||||
|
||||
table = [0] * (size1D * size2D * size3D * channels)
|
||||
table = [0] * (size_1d * size_2d * size_3d * channels)
|
||||
idx_out = 0
|
||||
for b in range(size3D):
|
||||
for g in range(size2D):
|
||||
for r in range(size1D):
|
||||
for b in range(size_3d):
|
||||
for g in range(size_2d):
|
||||
for r in range(size_1d):
|
||||
table[idx_out : idx_out + channels] = callback(
|
||||
r / (size1D - 1), g / (size2D - 1), b / (size3D - 1)
|
||||
r / (size_1d - 1), g / (size_2d - 1), b / (size_3d - 1)
|
||||
)
|
||||
idx_out += channels
|
||||
|
||||
return cls(
|
||||
(size1D, size2D, size3D),
|
||||
(size_1d, size_2d, size_3d),
|
||||
table,
|
||||
channels=channels,
|
||||
target_mode=target_mode,
|
||||
|
@ -484,20 +484,20 @@ class Color3DLUT(MultibandFilter):
|
|||
raise ValueError("Only 3 or 4 output channels are supported")
|
||||
ch_in = self.channels
|
||||
ch_out = channels or ch_in
|
||||
size1D, size2D, size3D = self.size
|
||||
size_1d, size_2d, size_3d = self.size
|
||||
|
||||
table = [0] * (size1D * size2D * size3D * ch_out)
|
||||
table = [0] * (size_1d * size_2d * size_3d * ch_out)
|
||||
idx_in = 0
|
||||
idx_out = 0
|
||||
for b in range(size3D):
|
||||
for g in range(size2D):
|
||||
for r in range(size1D):
|
||||
for b in range(size_3d):
|
||||
for g in range(size_2d):
|
||||
for r in range(size_1d):
|
||||
values = self.table[idx_in : idx_in + ch_in]
|
||||
if with_normals:
|
||||
values = callback(
|
||||
r / (size1D - 1),
|
||||
g / (size2D - 1),
|
||||
b / (size3D - 1),
|
||||
r / (size_1d - 1),
|
||||
g / (size_2d - 1),
|
||||
b / (size_3d - 1),
|
||||
*values,
|
||||
)
|
||||
else:
|
||||
|
|
|
@ -78,10 +78,10 @@ class Stat:
|
|||
|
||||
v = []
|
||||
for i in range(0, len(self.h), 256):
|
||||
layerSum = 0.0
|
||||
layer_sum = 0.0
|
||||
for j in range(256):
|
||||
layerSum += j * self.h[i + j]
|
||||
v.append(layerSum)
|
||||
layer_sum += j * self.h[i + j]
|
||||
v.append(layer_sum)
|
||||
return v
|
||||
|
||||
def _getsum2(self):
|
||||
|
|
|
@ -193,15 +193,15 @@ class PcfFontFile(FontFile.FontFile):
|
|||
for i in range(nbitmaps):
|
||||
offsets.append(i32(fp.read(4)))
|
||||
|
||||
bitmapSizes = []
|
||||
bitmap_sizes = []
|
||||
for i in range(4):
|
||||
bitmapSizes.append(i32(fp.read(4)))
|
||||
bitmap_sizes.append(i32(fp.read(4)))
|
||||
|
||||
# byteorder = format & 4 # non-zero => MSB
|
||||
bitorder = format & 8 # non-zero => MSB
|
||||
padindex = format & 3
|
||||
|
||||
bitmapsize = bitmapSizes[padindex]
|
||||
bitmapsize = bitmap_sizes[padindex]
|
||||
offsets.append(bitmapsize)
|
||||
|
||||
data = fp.read(bitmapsize)
|
||||
|
@ -225,22 +225,22 @@ class PcfFontFile(FontFile.FontFile):
|
|||
|
||||
fp, format, i16, i32 = self._getformat(PCF_BDF_ENCODINGS)
|
||||
|
||||
firstCol, lastCol = i16(fp.read(2)), i16(fp.read(2))
|
||||
firstRow, lastRow = i16(fp.read(2)), i16(fp.read(2))
|
||||
first_col, last_col = i16(fp.read(2)), i16(fp.read(2))
|
||||
first_row, last_row = i16(fp.read(2)), i16(fp.read(2))
|
||||
|
||||
i16(fp.read(2)) # default
|
||||
|
||||
nencoding = (lastCol - firstCol + 1) * (lastRow - firstRow + 1)
|
||||
nencoding = (last_col - first_col + 1) * (last_row - first_row + 1)
|
||||
|
||||
encodingOffsets = [i16(fp.read(2)) for _ in range(nencoding)]
|
||||
encoding_offsets = [i16(fp.read(2)) for _ in range(nencoding)]
|
||||
|
||||
for i in range(firstCol, len(encoding)):
|
||||
for i in range(first_col, len(encoding)):
|
||||
try:
|
||||
encodingOffset = encodingOffsets[
|
||||
encoding_offset = encoding_offsets[
|
||||
ord(bytearray([i]).decode(self.charset_encoding))
|
||||
]
|
||||
if encodingOffset != 0xFFFF:
|
||||
encoding[i] = encodingOffset
|
||||
if encoding_offset != 0xFFFF:
|
||||
encoding[i] = encoding_offset
|
||||
except UnicodeDecodeError:
|
||||
# character is not supported in selected encoding
|
||||
pass
|
||||
|
|
|
@ -87,21 +87,21 @@ def _save(im, fp, filename, save_all=False):
|
|||
for append_im in append_images:
|
||||
append_im.encoderinfo = im.encoderinfo.copy()
|
||||
ims.append(append_im)
|
||||
numberOfPages = 0
|
||||
number_of_pages = 0
|
||||
image_refs = []
|
||||
page_refs = []
|
||||
contents_refs = []
|
||||
for im in ims:
|
||||
im_numberOfPages = 1
|
||||
im_number_of_pages = 1
|
||||
if save_all:
|
||||
try:
|
||||
im_numberOfPages = im.n_frames
|
||||
im_number_of_pages = im.n_frames
|
||||
except AttributeError:
|
||||
# Image format does not have n_frames.
|
||||
# It is a single frame image
|
||||
pass
|
||||
numberOfPages += im_numberOfPages
|
||||
for i in range(im_numberOfPages):
|
||||
number_of_pages += im_number_of_pages
|
||||
for i in range(im_number_of_pages):
|
||||
image_refs.append(existing_pdf.next_object_id(0))
|
||||
page_refs.append(existing_pdf.next_object_id(0))
|
||||
contents_refs.append(existing_pdf.next_object_id(0))
|
||||
|
@ -111,9 +111,9 @@ def _save(im, fp, filename, save_all=False):
|
|||
# catalog and list of pages
|
||||
existing_pdf.write_catalog()
|
||||
|
||||
pageNumber = 0
|
||||
for imSequence in ims:
|
||||
im_pages = ImageSequence.Iterator(imSequence) if save_all else [imSequence]
|
||||
page_number = 0
|
||||
for im_sequence in ims:
|
||||
im_pages = ImageSequence.Iterator(im_sequence) if save_all else [im_sequence]
|
||||
for im in im_pages:
|
||||
# FIXME: Should replace ASCIIHexDecode with RunLengthDecode
|
||||
# (packbits) or LZWDecode (tiff/lzw compression). Note that
|
||||
|
@ -176,7 +176,7 @@ def _save(im, fp, filename, save_all=False):
|
|||
width, height = im.size
|
||||
|
||||
existing_pdf.write_obj(
|
||||
image_refs[pageNumber],
|
||||
image_refs[page_number],
|
||||
stream=op.getvalue(),
|
||||
Type=PdfParser.PdfName("XObject"),
|
||||
Subtype=PdfParser.PdfName("Image"),
|
||||
|
@ -193,10 +193,10 @@ def _save(im, fp, filename, save_all=False):
|
|||
# page
|
||||
|
||||
existing_pdf.write_page(
|
||||
page_refs[pageNumber],
|
||||
page_refs[page_number],
|
||||
Resources=PdfParser.PdfDict(
|
||||
ProcSet=[PdfParser.PdfName("PDF"), PdfParser.PdfName(procset)],
|
||||
XObject=PdfParser.PdfDict(image=image_refs[pageNumber]),
|
||||
XObject=PdfParser.PdfDict(image=image_refs[page_number]),
|
||||
),
|
||||
MediaBox=[
|
||||
0,
|
||||
|
@ -204,7 +204,7 @@ def _save(im, fp, filename, save_all=False):
|
|||
width * 72.0 / resolution,
|
||||
height * 72.0 / resolution,
|
||||
],
|
||||
Contents=contents_refs[pageNumber],
|
||||
Contents=contents_refs[page_number],
|
||||
)
|
||||
|
||||
#
|
||||
|
@ -215,9 +215,9 @@ def _save(im, fp, filename, save_all=False):
|
|||
height * 72.0 / resolution,
|
||||
)
|
||||
|
||||
existing_pdf.write_obj(contents_refs[pageNumber], stream=page_contents)
|
||||
existing_pdf.write_obj(contents_refs[page_number], stream=page_contents)
|
||||
|
||||
pageNumber += 1
|
||||
page_number += 1
|
||||
|
||||
#
|
||||
# trailer
|
||||
|
|
|
@ -138,7 +138,7 @@ def _save(im, fp, filename):
|
|||
# Flip the image, since the origin of SGI file is the bottom-left corner
|
||||
orientation = -1
|
||||
# Define the file as SGI File Format
|
||||
magicNumber = 474
|
||||
magic_number = 474
|
||||
# Run-Length Encoding Compression - Unsupported at this time
|
||||
rle = 0
|
||||
|
||||
|
@ -167,11 +167,11 @@ def _save(im, fp, filename):
|
|||
# Maximum Byte value (255 = 8bits per pixel)
|
||||
pinmax = 255
|
||||
# Image name (79 characters max, truncated below in write)
|
||||
imgName = os.path.splitext(os.path.basename(filename))[0]
|
||||
imgName = imgName.encode("ascii", "ignore")
|
||||
img_name = os.path.splitext(os.path.basename(filename))[0]
|
||||
img_name = img_name.encode("ascii", "ignore")
|
||||
# Standard representation of pixel in the file
|
||||
colormap = 0
|
||||
fp.write(struct.pack(">h", magicNumber))
|
||||
fp.write(struct.pack(">h", magic_number))
|
||||
fp.write(o8(rle))
|
||||
fp.write(o8(bpc))
|
||||
fp.write(struct.pack(">H", dim))
|
||||
|
@ -181,7 +181,7 @@ def _save(im, fp, filename):
|
|||
fp.write(struct.pack(">l", pinmin))
|
||||
fp.write(struct.pack(">l", pinmax))
|
||||
fp.write(struct.pack("4s", b"")) # dummy
|
||||
fp.write(struct.pack("79s", imgName)) # truncates to 79 chars
|
||||
fp.write(struct.pack("79s", img_name)) # truncates to 79 chars
|
||||
fp.write(struct.pack("s", b"")) # force null byte after imgname
|
||||
fp.write(struct.pack(">l", colormap))
|
||||
fp.write(struct.pack("404s", b"")) # dummy
|
||||
|
|
|
@ -1336,14 +1336,14 @@ class TiffImageFile(ImageFile.ImageFile):
|
|||
|
||||
logger.debug(f"- size: {self.size}")
|
||||
|
||||
sampleFormat = self.tag_v2.get(SAMPLEFORMAT, (1,))
|
||||
if len(sampleFormat) > 1 and max(sampleFormat) == min(sampleFormat) == 1:
|
||||
sample_format = self.tag_v2.get(SAMPLEFORMAT, (1,))
|
||||
if len(sample_format) > 1 and max(sample_format) == min(sample_format) == 1:
|
||||
# SAMPLEFORMAT is properly per band, so an RGB image will
|
||||
# be (1,1,1). But, we don't support per band pixel types,
|
||||
# and anything more than one band is a uint8. So, just
|
||||
# take the first element. Revisit this if adding support
|
||||
# for more exotic images.
|
||||
sampleFormat = (1,)
|
||||
sample_format = (1,)
|
||||
|
||||
bps_tuple = self.tag_v2.get(BITSPERSAMPLE, (1,))
|
||||
extra_tuple = self.tag_v2.get(EXTRASAMPLES, ())
|
||||
|
@ -1364,18 +1364,18 @@ class TiffImageFile(ImageFile.ImageFile):
|
|||
# presume it is the same number of bits for all of the samples.
|
||||
bps_tuple = bps_tuple * bps_count
|
||||
|
||||
samplesPerPixel = self.tag_v2.get(
|
||||
samples_per_pixel = self.tag_v2.get(
|
||||
SAMPLESPERPIXEL,
|
||||
3 if self._compression == "tiff_jpeg" and photo in (2, 6) else 1,
|
||||
)
|
||||
if len(bps_tuple) != samplesPerPixel:
|
||||
if len(bps_tuple) != samples_per_pixel:
|
||||
raise SyntaxError("unknown data organization")
|
||||
|
||||
# mode: check photometric interpretation and bits per pixel
|
||||
key = (
|
||||
self.tag_v2.prefix,
|
||||
photo,
|
||||
sampleFormat,
|
||||
sample_format,
|
||||
fillorder,
|
||||
bps_tuple,
|
||||
extra_tuple,
|
||||
|
@ -1904,20 +1904,20 @@ class AppendingTiffWriter:
|
|||
# fix offsets
|
||||
self.f.seek(self.offsetOfNewPage)
|
||||
|
||||
IIMM = self.f.read(4)
|
||||
if not IIMM:
|
||||
iimm = self.f.read(4)
|
||||
if not iimm:
|
||||
# raise RuntimeError("nothing written into new page")
|
||||
# Make it easy to finish a frame without committing to a new one.
|
||||
return
|
||||
|
||||
if IIMM != self.IIMM:
|
||||
if iimm != self.IIMM:
|
||||
raise RuntimeError("IIMM of new page doesn't match IIMM of first page")
|
||||
|
||||
IFDoffset = self.readLong()
|
||||
IFDoffset += self.offsetOfNewPage
|
||||
ifd_offset = self.readLong()
|
||||
ifd_offset += self.offsetOfNewPage
|
||||
self.f.seek(self.whereToWriteNewIFDOffset)
|
||||
self.writeLong(IFDoffset)
|
||||
self.f.seek(IFDoffset)
|
||||
self.writeLong(ifd_offset)
|
||||
self.f.seek(ifd_offset)
|
||||
self.fixIFD()
|
||||
|
||||
def newFrame(self):
|
||||
|
@ -1948,9 +1948,9 @@ class AppendingTiffWriter:
|
|||
pos = self.f.tell()
|
||||
|
||||
# pad to 16 byte boundary
|
||||
padBytes = 16 - pos % 16
|
||||
if 0 < padBytes < 16:
|
||||
self.f.write(bytes(padBytes))
|
||||
pad_bytes = 16 - pos % 16
|
||||
if 0 < pad_bytes < 16:
|
||||
self.f.write(bytes(pad_bytes))
|
||||
self.offsetOfNewPage = self.f.tell()
|
||||
|
||||
def setEndian(self, endian):
|
||||
|
@ -1961,14 +1961,14 @@ class AppendingTiffWriter:
|
|||
|
||||
def skipIFDs(self):
|
||||
while True:
|
||||
IFDoffset = self.readLong()
|
||||
if IFDoffset == 0:
|
||||
ifd_offset = self.readLong()
|
||||
if ifd_offset == 0:
|
||||
self.whereToWriteNewIFDOffset = self.f.tell() - 4
|
||||
break
|
||||
|
||||
self.f.seek(IFDoffset)
|
||||
numTags = self.readShort()
|
||||
self.f.seek(numTags * 12, os.SEEK_CUR)
|
||||
self.f.seek(ifd_offset)
|
||||
num_tags = self.readShort()
|
||||
self.f.seek(num_tags * 12, os.SEEK_CUR)
|
||||
|
||||
def write(self, data):
|
||||
return self.f.write(data)
|
||||
|
@ -1983,68 +1983,68 @@ class AppendingTiffWriter:
|
|||
|
||||
def rewriteLastShortToLong(self, value):
|
||||
self.f.seek(-2, os.SEEK_CUR)
|
||||
bytesWritten = self.f.write(struct.pack(self.longFmt, value))
|
||||
if bytesWritten is not None and bytesWritten != 4:
|
||||
raise RuntimeError(f"wrote only {bytesWritten} bytes but wanted 4")
|
||||
bytes_written = self.f.write(struct.pack(self.longFmt, value))
|
||||
if bytes_written is not None and bytes_written != 4:
|
||||
raise RuntimeError(f"wrote only {bytes_written} bytes but wanted 4")
|
||||
|
||||
def rewriteLastShort(self, value):
|
||||
self.f.seek(-2, os.SEEK_CUR)
|
||||
bytesWritten = self.f.write(struct.pack(self.shortFmt, value))
|
||||
if bytesWritten is not None and bytesWritten != 2:
|
||||
raise RuntimeError(f"wrote only {bytesWritten} bytes but wanted 2")
|
||||
bytes_written = self.f.write(struct.pack(self.shortFmt, value))
|
||||
if bytes_written is not None and bytes_written != 2:
|
||||
raise RuntimeError(f"wrote only {bytes_written} bytes but wanted 2")
|
||||
|
||||
def rewriteLastLong(self, value):
|
||||
self.f.seek(-4, os.SEEK_CUR)
|
||||
bytesWritten = self.f.write(struct.pack(self.longFmt, value))
|
||||
if bytesWritten is not None and bytesWritten != 4:
|
||||
raise RuntimeError(f"wrote only {bytesWritten} bytes but wanted 4")
|
||||
bytes_written = self.f.write(struct.pack(self.longFmt, value))
|
||||
if bytes_written is not None and bytes_written != 4:
|
||||
raise RuntimeError(f"wrote only {bytes_written} bytes but wanted 4")
|
||||
|
||||
def writeShort(self, value):
|
||||
bytesWritten = self.f.write(struct.pack(self.shortFmt, value))
|
||||
if bytesWritten is not None and bytesWritten != 2:
|
||||
raise RuntimeError(f"wrote only {bytesWritten} bytes but wanted 2")
|
||||
bytes_written = self.f.write(struct.pack(self.shortFmt, value))
|
||||
if bytes_written is not None and bytes_written != 2:
|
||||
raise RuntimeError(f"wrote only {bytes_written} bytes but wanted 2")
|
||||
|
||||
def writeLong(self, value):
|
||||
bytesWritten = self.f.write(struct.pack(self.longFmt, value))
|
||||
if bytesWritten is not None and bytesWritten != 4:
|
||||
raise RuntimeError(f"wrote only {bytesWritten} bytes but wanted 4")
|
||||
bytes_written = self.f.write(struct.pack(self.longFmt, value))
|
||||
if bytes_written is not None and bytes_written != 4:
|
||||
raise RuntimeError(f"wrote only {bytes_written} bytes but wanted 4")
|
||||
|
||||
def close(self):
|
||||
self.finalize()
|
||||
self.f.close()
|
||||
|
||||
def fixIFD(self):
|
||||
numTags = self.readShort()
|
||||
num_tags = self.readShort()
|
||||
|
||||
for i in range(numTags):
|
||||
tag, fieldType, count = struct.unpack(self.tagFormat, self.f.read(8))
|
||||
for i in range(num_tags):
|
||||
tag, field_type, count = struct.unpack(self.tagFormat, self.f.read(8))
|
||||
|
||||
fieldSize = self.fieldSizes[fieldType]
|
||||
totalSize = fieldSize * count
|
||||
isLocal = totalSize <= 4
|
||||
if not isLocal:
|
||||
field_size = self.fieldSizes[field_type]
|
||||
total_size = field_size * count
|
||||
is_local = total_size <= 4
|
||||
if not is_local:
|
||||
offset = self.readLong()
|
||||
offset += self.offsetOfNewPage
|
||||
self.rewriteLastLong(offset)
|
||||
|
||||
if tag in self.Tags:
|
||||
curPos = self.f.tell()
|
||||
cur_pos = self.f.tell()
|
||||
|
||||
if isLocal:
|
||||
if is_local:
|
||||
self.fixOffsets(
|
||||
count, isShort=(fieldSize == 2), isLong=(fieldSize == 4)
|
||||
count, isShort=(field_size == 2), isLong=(field_size == 4)
|
||||
)
|
||||
self.f.seek(curPos + 4)
|
||||
self.f.seek(cur_pos + 4)
|
||||
else:
|
||||
self.f.seek(offset)
|
||||
self.fixOffsets(
|
||||
count, isShort=(fieldSize == 2), isLong=(fieldSize == 4)
|
||||
count, isShort=(field_size == 2), isLong=(field_size == 4)
|
||||
)
|
||||
self.f.seek(curPos)
|
||||
self.f.seek(cur_pos)
|
||||
|
||||
offset = curPos = None
|
||||
offset = cur_pos = None
|
||||
|
||||
elif isLocal:
|
||||
elif is_local:
|
||||
# skip the locally stored value that is not an offset
|
||||
self.f.seek(4, os.SEEK_CUR)
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user