Various Flake8 fixes

This commit is contained in:
Andrew Murray 2015-04-24 09:26:52 +10:00
parent 40165d6860
commit d1c182cadc
40 changed files with 201 additions and 230 deletions

View File

@ -18,6 +18,7 @@ _handler = None
# #
# @param handler Handler object. # @param handler Handler object.
def register_handler(handler): def register_handler(handler):
global _handler global _handler
_handler = handler _handler = handler
@ -25,9 +26,11 @@ def register_handler(handler):
# -------------------------------------------------------------------- # --------------------------------------------------------------------
# Image adapter # Image adapter
def _accept(prefix): def _accept(prefix):
return prefix[:6] == b"SIMPLE" return prefix[:6] == b"SIMPLE"
class FITSStubImageFile(ImageFile.StubImageFile): class FITSStubImageFile(ImageFile.StubImageFile):
format = "FITS" format = "FITS"

View File

@ -136,9 +136,9 @@ class FpxImageFile(ImageFile.ImageFile):
s = fp.read(36) s = fp.read(36)
size = i32(s, 4), i32(s, 8) size = i32(s, 4), i32(s, 8)
#tilecount = i32(s, 12) # tilecount = i32(s, 12)
tilesize = i32(s, 16), i32(s, 20) tilesize = i32(s, 16), i32(s, 20)
#channels = i32(s, 24) # channels = i32(s, 24)
offset = i32(s, 28) offset = i32(s, 28)
length = i32(s, 32) length = i32(s, 32)

View File

@ -387,7 +387,7 @@ def init():
for plugin in _plugins: for plugin in _plugins:
try: try:
if DEBUG: if DEBUG:
print ("Importing %s" % plugin) print("Importing %s" % plugin)
__import__("PIL.%s" % plugin, globals(), locals(), []) __import__("PIL.%s" % plugin, globals(), locals(), [])
except ImportError: except ImportError:
if DEBUG: if DEBUG:
@ -546,7 +546,7 @@ class Image:
self.fp.close() self.fp.close()
except Exception as msg: except Exception as msg:
if DEBUG: if DEBUG:
print ("Error closing: %s" % msg) print("Error closing: %s" % msg)
# Instead of simply setting to None, we're setting up a # Instead of simply setting to None, we're setting up a
# deferred error that will better explain that the core image # deferred error that will better explain that the core image

View File

@ -53,6 +53,7 @@ class Color(_Enhance):
self.degenerate = image.convert(self.intermediate_mode).convert(image.mode) self.degenerate = image.convert(self.intermediate_mode).convert(image.mode)
class Contrast(_Enhance): class Contrast(_Enhance):
"""Adjust image contrast. """Adjust image contrast.

View File

@ -29,12 +29,13 @@ if 'PyQt4.QtGui' not in sys.modules:
except: except:
from PySide.QtGui import QImage, qRgba from PySide.QtGui import QImage, qRgba
else: #PyQt4 is used else: #PyQt4 is used
from PyQt4.QtGui import QImage, qRgba from PyQt4.QtGui import QImage, qRgba
## ##
# (Internal) Turns an RGB color into a Qt compatible color integer. # (Internal) Turns an RGB color into a Qt compatible color integer.
def rgb(r, g, b, a=255): def rgb(r, g, b, a=255):
# use qRgb to pack the colors, and then turn the resulting long # use qRgb to pack the colors, and then turn the resulting long
# into a negative integer with the same bitpattern. # into a negative integer with the same bitpattern.

View File

@ -67,7 +67,7 @@ Libjpeg ref.: http://www.jpegcameras.com/libjpeg/libjpeg-3.html
""" """
presets = { presets = {
'web_low': {'subsampling': 2, # "4:1:1" 'web_low': {'subsampling': 2, # "4:1:1"
'quantization': [ 'quantization': [
[20, 16, 25, 39, 50, 46, 62, 68, [20, 16, 25, 39, 50, 46, 62, 68,
16, 18, 23, 38, 38, 53, 65, 68, 16, 18, 23, 38, 38, 53, 65, 68,
@ -86,7 +86,7 @@ presets = {
68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68,
68, 68, 68, 68, 68, 68, 68, 68] 68, 68, 68, 68, 68, 68, 68, 68]
]}, ]},
'web_medium': {'subsampling': 2, # "4:1:1" 'web_medium': {'subsampling': 2, # "4:1:1"
'quantization': [ 'quantization': [
[16, 11, 11, 16, 23, 27, 31, 30, [16, 11, 11, 16, 23, 27, 31, 30,
11, 12, 12, 15, 20, 23, 23, 30, 11, 12, 12, 15, 20, 23, 23, 30,
@ -105,7 +105,7 @@ presets = {
38, 35, 46, 53, 64, 64, 64, 64, 38, 35, 46, 53, 64, 64, 64, 64,
48, 43, 53, 64, 64, 64, 64, 64] 48, 43, 53, 64, 64, 64, 64, 64]
]}, ]},
'web_high': {'subsampling': 0, # "4:4:4" 'web_high': {'subsampling': 0, # "4:4:4"
'quantization': [ 'quantization': [
[ 6, 4, 4, 6, 9, 11, 12, 16, [ 6, 4, 4, 6, 9, 11, 12, 16,
4, 5, 5, 6, 8, 10, 12, 12, 4, 5, 5, 6, 8, 10, 12, 12,
@ -124,7 +124,7 @@ presets = {
31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31,
31, 31, 31, 31, 31, 31, 31, 31] 31, 31, 31, 31, 31, 31, 31, 31]
]}, ]},
'web_very_high': {'subsampling': 0, # "4:4:4" 'web_very_high': {'subsampling': 0, # "4:4:4"
'quantization': [ 'quantization': [
[ 2, 2, 2, 2, 3, 4, 5, 6, [ 2, 2, 2, 2, 3, 4, 5, 6,
2, 2, 2, 2, 3, 4, 5, 6, 2, 2, 2, 2, 3, 4, 5, 6,
@ -143,7 +143,7 @@ presets = {
15, 12, 12, 12, 12, 12, 12, 12, 15, 12, 12, 12, 12, 12, 12, 12,
15, 12, 12, 12, 12, 12, 12, 12] 15, 12, 12, 12, 12, 12, 12, 12]
]}, ]},
'web_maximum': {'subsampling': 0, # "4:4:4" 'web_maximum': {'subsampling': 0, # "4:4:4"
'quantization': [ 'quantization': [
[ 1, 1, 1, 1, 1, 1, 1, 1, [ 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
@ -162,7 +162,7 @@ presets = {
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3] 3, 3, 3, 3, 3, 3, 3, 3]
]}, ]},
'low': {'subsampling': 2, # "4:1:1" 'low': {'subsampling': 2, # "4:1:1"
'quantization': [ 'quantization': [
[18, 14, 14, 21, 30, 35, 34, 17, [18, 14, 14, 21, 30, 35, 34, 17,
14, 16, 16, 19, 26, 23, 12, 12, 14, 16, 16, 19, 26, 23, 12, 12,
@ -181,7 +181,7 @@ presets = {
17, 12, 12, 12, 12, 12, 12, 12, 17, 12, 12, 12, 12, 12, 12, 12,
17, 12, 12, 12, 12, 12, 12, 12] 17, 12, 12, 12, 12, 12, 12, 12]
]}, ]},
'medium': {'subsampling': 2, # "4:1:1" 'medium': {'subsampling': 2, # "4:1:1"
'quantization': [ 'quantization': [
[12, 8, 8, 12, 17, 21, 24, 17, [12, 8, 8, 12, 17, 21, 24, 17,
8, 9, 9, 11, 15, 19, 12, 12, 8, 9, 9, 11, 15, 19, 12, 12,
@ -200,7 +200,7 @@ presets = {
17, 12, 12, 12, 12, 12, 12, 12, 17, 12, 12, 12, 12, 12, 12, 12,
17, 12, 12, 12, 12, 12, 12, 12] 17, 12, 12, 12, 12, 12, 12, 12]
]}, ]},
'high': {'subsampling': 0, # "4:4:4" 'high': {'subsampling': 0, # "4:4:4"
'quantization': [ 'quantization': [
[ 6, 4, 4, 6, 9, 11, 12, 16, [ 6, 4, 4, 6, 9, 11, 12, 16,
4, 5, 5, 6, 8, 10, 12, 12, 4, 5, 5, 6, 8, 10, 12, 12,
@ -219,7 +219,7 @@ presets = {
17, 12, 12, 12, 12, 12, 12, 12, 17, 12, 12, 12, 12, 12, 12, 12,
17, 12, 12, 12, 12, 12, 12, 12] 17, 12, 12, 12, 12, 12, 12, 12]
]}, ]},
'maximum': {'subsampling': 0, # "4:4:4" 'maximum': {'subsampling': 0, # "4:4:4"
'quantization': [ 'quantization': [
[ 2, 2, 2, 2, 3, 4, 5, 6, [ 2, 2, 2, 2, 3, 4, 5, 6,
2, 2, 2, 2, 3, 4, 5, 6, 2, 2, 2, 2, 3, 4, 5, 6,

View File

@ -28,7 +28,7 @@
# This import enables print() as a function rather than a keyword # This import enables print() as a function rather than a keyword
# (main requirement to be compatible with Python 3.x) # (main requirement to be compatible with Python 3.x)
# The comment on the line below should be printed on Python 2.5 or older: # The comment on the line below should be printed on Python 2.5 or older:
from __future__ import print_function # This version of OleFileIO_PL requires Python 2.6+ or 3.x. from __future__ import print_function # This version of OleFileIO_PL requires Python 2.6+ or 3.x.
__author__ = "Philippe Lagadec, Fredrik Lundh (Secret Labs AB)" __author__ = "Philippe Lagadec, Fredrik Lundh (Secret Labs AB)"
@ -234,7 +234,10 @@ __version__ = '0.30'
import io import io
import sys import sys
import struct, array, os.path, datetime import struct
import array
import os.path
import datetime
#[PL] Define explicitly the public API to avoid private objects in pydoc: #[PL] Define explicitly the public API to avoid private objects in pydoc:
__all__ = ['OleFileIO', 'isOleFile', 'MAGIC'] __all__ = ['OleFileIO', 'isOleFile', 'MAGIC']
@ -283,12 +286,17 @@ KEEP_UNICODE_NAMES = False
#[PL] DEBUG display mode: False by default, use set_debug_mode() or "-d" on #[PL] DEBUG display mode: False by default, use set_debug_mode() or "-d" on
# command line to change it. # command line to change it.
DEBUG_MODE = False DEBUG_MODE = False
def debug_print(msg): def debug_print(msg):
print(msg) print(msg)
def debug_pass(msg): def debug_pass(msg):
pass pass
debug = debug_pass debug = debug_pass
def set_debug_mode(debug_mode): def set_debug_mode(debug_mode):
""" """
Set debug mode on or off, to control display of debugging messages. Set debug mode on or off, to control display of debugging messages.
@ -303,24 +311,24 @@ def set_debug_mode(debug_mode):
MAGIC = b'\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1' MAGIC = b'\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1'
#[PL]: added constants for Sector IDs (from AAF specifications) # [PL]: added constants for Sector IDs (from AAF specifications)
MAXREGSECT = 0xFFFFFFFA; # maximum SECT MAXREGSECT = 0xFFFFFFFA; # maximum SECT
DIFSECT = 0xFFFFFFFC; # (-4) denotes a DIFAT sector in a FAT DIFSECT = 0xFFFFFFFC; # (-4) denotes a DIFAT sector in a FAT
FATSECT = 0xFFFFFFFD; # (-3) denotes a FAT sector in a FAT FATSECT = 0xFFFFFFFD; # (-3) denotes a FAT sector in a FAT
ENDOFCHAIN = 0xFFFFFFFE; # (-2) end of a virtual stream chain ENDOFCHAIN = 0xFFFFFFFE; # (-2) end of a virtual stream chain
FREESECT = 0xFFFFFFFF; # (-1) unallocated sector FREESECT = 0xFFFFFFFF; # (-1) unallocated sector
#[PL]: added constants for Directory Entry IDs (from AAF specifications) # [PL]: added constants for Directory Entry IDs (from AAF specifications)
MAXREGSID = 0xFFFFFFFA; # maximum directory entry ID MAXREGSID = 0xFFFFFFFA; # maximum directory entry ID
NOSTREAM = 0xFFFFFFFF; # (-1) unallocated directory entry NOSTREAM = 0xFFFFFFFF; # (-1) unallocated directory entry
#[PL] object types in storage (from AAF specifications) # [PL] object types in storage (from AAF specifications)
STGTY_EMPTY = 0 # empty directory entry (according to OpenOffice.org doc) STGTY_EMPTY = 0 # empty directory entry (according to OpenOffice.org doc)
STGTY_STORAGE = 1 # element is a storage object STGTY_STORAGE = 1 # element is a storage object
STGTY_STREAM = 2 # element is a stream object STGTY_STREAM = 2 # element is a stream object
STGTY_LOCKBYTES = 3 # element is an ILockBytes object STGTY_LOCKBYTES = 3 # element is an ILockBytes object
STGTY_PROPERTY = 4 # element is an IPropertyStorage object STGTY_PROPERTY = 4 # element is an IPropertyStorage object
STGTY_ROOT = 5 # element is a root storage STGTY_ROOT = 5 # element is a root storage
# #
@ -433,7 +441,6 @@ def _clsid(clsid):
tuple(map(i8, clsid[8:16])))) tuple(map(i8, clsid[8:16]))))
# UNICODE support: # UNICODE support:
# (necessary to handle storages/streams names which use Unicode) # (necessary to handle storages/streams names which use Unicode)
@ -471,7 +478,6 @@ def filetime2datetime(filetime):
return _FILETIME_null_date + datetime.timedelta(microseconds=filetime//10) return _FILETIME_null_date + datetime.timedelta(microseconds=filetime//10)
#=== CLASSES ================================================================== #=== CLASSES ==================================================================
class OleMetadata: class OleMetadata:
@ -579,7 +585,6 @@ class OleMetadata:
self.language = None self.language = None
self.doc_version = None self.doc_version = None
def parse_properties(self, olefile): def parse_properties(self, olefile):
""" """
Parse standard properties of an OLE file, from the streams Parse standard properties of an OLE file, from the streams
@ -663,7 +668,7 @@ class _OleStream(io.BytesIO):
""" """
debug('_OleStream.__init__:') debug('_OleStream.__init__:')
debug(' sect=%d (%X), size=%d, offset=%d, sectorsize=%d, len(fat)=%d, fp=%s' debug(' sect=%d (%X), size=%d, offset=%d, sectorsize=%d, len(fat)=%d, fp=%s'
%(sect,sect,size,offset,sectorsize,len(fat), repr(fp))) %(sect, sect, size, offset, sectorsize, len(fat), repr(fp)))
#[PL] To detect malformed documents with FAT loops, we compute the #[PL] To detect malformed documents with FAT loops, we compute the
# expected number of sectors in the stream: # expected number of sectors in the stream:
unknown_size = False unknown_size = False
@ -789,7 +794,6 @@ class _OleDirectoryEntry:
DIRENTRY_SIZE = 128 DIRENTRY_SIZE = 128
assert struct.calcsize(STRUCT_DIRENTRY) == DIRENTRY_SIZE assert struct.calcsize(STRUCT_DIRENTRY) == DIRENTRY_SIZE
def __init__(self, entry, sid, olefile): def __init__(self, entry, sid, olefile):
""" """
Constructor for an _OleDirectoryEntry object. Constructor for an _OleDirectoryEntry object.
@ -882,8 +886,6 @@ class _OleDirectoryEntry:
minifat = False minifat = False
olefile._check_duplicate_stream(self.isectStart, minifat) olefile._check_duplicate_stream(self.isectStart, minifat)
def build_storage_tree(self): def build_storage_tree(self):
""" """
Read and build the red-black tree attached to this _OleDirectoryEntry Read and build the red-black tree attached to this _OleDirectoryEntry
@ -907,7 +909,6 @@ class _OleDirectoryEntry:
# (see rich comparison methods in this class) # (see rich comparison methods in this class)
self.kids.sort() self.kids.sort()
def append_kids(self, child_sid): def append_kids(self, child_sid):
""" """
Walk through red-black tree of children of this directory entry to add Walk through red-black tree of children of this directory entry to add
@ -916,7 +917,7 @@ class _OleDirectoryEntry:
child_sid : index of child directory entry to use, or None when called child_sid : index of child directory entry to use, or None when called
first time for the root. (only used during recursion) first time for the root. (only used during recursion)
""" """
#[PL] this method was added to use simple recursion instead of a complex # [PL] this method was added to use simple recursion instead of a complex
# algorithm. # algorithm.
# if this is not a storage or a leaf of the tree, nothing to do: # if this is not a storage or a leaf of the tree, nothing to do:
if child_sid == NOSTREAM: if child_sid == NOSTREAM:
@ -951,7 +952,6 @@ class _OleDirectoryEntry:
# Afterwards build kid's own tree if it's also a storage: # Afterwards build kid's own tree if it's also a storage:
child.build_storage_tree() child.build_storage_tree()
def __eq__(self, other): def __eq__(self, other):
"Compare entries by name" "Compare entries by name"
return self.name == other.name return self.name == other.name
@ -971,7 +971,6 @@ class _OleDirectoryEntry:
#TODO: replace by the same function as MS implementation ? #TODO: replace by the same function as MS implementation ?
# (order by name length first, then case-insensitive order) # (order by name length first, then case-insensitive order)
def dump(self, tab = 0): def dump(self, tab = 0):
"Dump this entry, and all its subentries (for debug purposes only)" "Dump this entry, and all its subentries (for debug purposes only)"
TYPES = ["(invalid)", "(storage)", "(stream)", "(lockbytes)", TYPES = ["(invalid)", "(storage)", "(stream)", "(lockbytes)",
@ -986,7 +985,6 @@ class _OleDirectoryEntry:
for kid in self.kids: for kid in self.kids:
kid.dump(tab + 2) kid.dump(tab + 2)
def getmtime(self): def getmtime(self):
""" """
Return modification time of a directory entry. Return modification time of a directory entry.
@ -1000,7 +998,6 @@ class _OleDirectoryEntry:
return None return None
return filetime2datetime(self.modifyTime) return filetime2datetime(self.modifyTime)
def getctime(self): def getctime(self):
""" """
Return creation time of a directory entry. Return creation time of a directory entry.
@ -1064,7 +1061,6 @@ class OleFileIO:
if filename: if filename:
self.open(filename) self.open(filename)
def _raise_defect(self, defect_level, message, exception_type=IOError): def _raise_defect(self, defect_level, message, exception_type=IOError):
""" """
This method should be called for any defect found during file parsing. This method should be called for any defect found during file parsing.
@ -1086,7 +1082,6 @@ class OleFileIO:
# just record the issue, no exception raised: # just record the issue, no exception raised:
self.parsing_issues.append((exception_type, message)) self.parsing_issues.append((exception_type, message))
def open(self, filename): def open(self, filename):
""" """
Open an OLE2 file. Open an OLE2 file.
@ -1189,7 +1184,7 @@ class OleFileIO:
self.sectDifStart, self.sectDifStart,
self.csectDif self.csectDif
) = struct.unpack(fmt_header, header1) ) = struct.unpack(fmt_header, header1)
debug( struct.unpack(fmt_header, header1)) debug(struct.unpack(fmt_header, header1))
if self.Sig != MAGIC: if self.Sig != MAGIC:
# OLE signature should always be present # OLE signature should always be present
@ -1231,11 +1226,11 @@ class OleFileIO:
# rule => only a potential defect: # rule => only a potential defect:
if self.signature != 0: if self.signature != 0:
self._raise_defect(DEFECT_POTENTIAL, "incorrect OLE header (signature>0)") self._raise_defect(DEFECT_POTENTIAL, "incorrect OLE header (signature>0)")
debug( "MiniSectorCutoff = %d" % self.MiniSectorCutoff ) debug("MiniSectorCutoff = %d" % self.MiniSectorCutoff)
debug( "MiniFatStart = %X" % self.MiniFatStart ) debug("MiniFatStart = %X" % self.MiniFatStart)
debug( "csectMiniFat = %d" % self.csectMiniFat ) debug("csectMiniFat = %d" % self.csectMiniFat)
debug( "sectDifStart = %X" % self.sectDifStart ) debug("sectDifStart = %X" % self.sectDifStart)
debug( "csectDif = %d" % self.csectDif ) debug("csectDif = %d" % self.csectDif)
# calculate the number of sectors in the file # calculate the number of sectors in the file
# (-1 because header doesn't count) # (-1 because header doesn't count)
@ -1266,14 +1261,12 @@ class OleFileIO:
self.ministream = None self.ministream = None
self.minifatsect = self.MiniFatStart #i32(header, 60) self.minifatsect = self.MiniFatStart #i32(header, 60)
def close(self): def close(self):
""" """
close the OLE file, to release the file object close the OLE file, to release the file object
""" """
self.fp.close() self.fp.close()
def _check_duplicate_stream(self, first_sect, minifat=False): def _check_duplicate_stream(self, first_sect, minifat=False):
""" """
Checks if a stream has not been already referenced elsewhere. Checks if a stream has not been already referenced elsewhere.
@ -1298,7 +1291,6 @@ class OleFileIO:
else: else:
used_streams.append(first_sect) used_streams.append(first_sect)
def dumpfat(self, fat, firstindex=0): def dumpfat(self, fat, firstindex=0):
"Displays a part of FAT in human-readable form for debugging purpose" "Displays a part of FAT in human-readable form for debugging purpose"
# [PL] added only for debug # [PL] added only for debug
@ -1335,7 +1327,6 @@ class OleFileIO:
print(nom, end=" ") print(nom, end=" ")
print() print()
def dumpsect(self, sector, firstindex=0): def dumpsect(self, sector, firstindex=0):
"Displays a sector in a human-readable form, for debugging purpose." "Displays a sector in a human-readable form, for debugging purpose."
if not DEBUG_MODE: if not DEBUG_MODE:
@ -1370,7 +1361,6 @@ class OleFileIO:
a.byteswap() a.byteswap()
return a return a
def loadfat_sect(self, sect): def loadfat_sect(self, sect):
""" """
Adds the indexes of the given sector to the FAT Adds the indexes of the given sector to the FAT
@ -1400,7 +1390,6 @@ class OleFileIO:
self.fat = self.fat + nextfat self.fat = self.fat + nextfat
return isect return isect
def loadfat(self, header): def loadfat(self, header):
""" """
Load the FAT table. Load the FAT table.
@ -1436,16 +1425,16 @@ class OleFileIO:
if self.sectDifStart >= self.nb_sect: if self.sectDifStart >= self.nb_sect:
# initial DIFAT block index must be valid # initial DIFAT block index must be valid
self._raise_defect(DEFECT_FATAL, 'incorrect DIFAT, first index out of range') self._raise_defect(DEFECT_FATAL, 'incorrect DIFAT, first index out of range')
debug( "DIFAT analysis..." ) debug("DIFAT analysis...")
# We compute the necessary number of DIFAT sectors : # We compute the necessary number of DIFAT sectors :
# (each DIFAT sector = 127 pointers + 1 towards next DIFAT sector) # (each DIFAT sector = 127 pointers + 1 towards next DIFAT sector)
nb_difat = (self.csectFat-109 + 126)//127 nb_difat = (self.csectFat-109 + 126)//127
debug( "nb_difat = %d" % nb_difat ) debug("nb_difat = %d" % nb_difat)
if self.csectDif != nb_difat: if self.csectDif != nb_difat:
raise IOError('incorrect DIFAT') raise IOError('incorrect DIFAT')
isect_difat = self.sectDifStart isect_difat = self.sectDifStart
for i in iterrange(nb_difat): for i in iterrange(nb_difat):
debug( "DIFAT block %d, sector %X" % (i, isect_difat) ) debug("DIFAT block %d, sector %X" % (i, isect_difat))
#TODO: check if corresponding FAT SID = DIFSECT #TODO: check if corresponding FAT SID = DIFSECT
sector_difat = self.getsect(isect_difat) sector_difat = self.getsect(isect_difat)
difat = self.sect2array(sector_difat) difat = self.sect2array(sector_difat)
@ -1453,7 +1442,7 @@ class OleFileIO:
self.loadfat_sect(difat[:127]) self.loadfat_sect(difat[:127])
# last DIFAT pointer is next DIFAT sector: # last DIFAT pointer is next DIFAT sector:
isect_difat = difat[127] isect_difat = difat[127]
debug( "next DIFAT sector: %X" % isect_difat ) debug("next DIFAT sector: %X" % isect_difat)
# checks: # checks:
if isect_difat not in [ENDOFCHAIN, FREESECT]: if isect_difat not in [ENDOFCHAIN, FREESECT]:
# last DIFAT pointer value must be ENDOFCHAIN or FREESECT # last DIFAT pointer value must be ENDOFCHAIN or FREESECT
@ -1471,7 +1460,6 @@ class OleFileIO:
debug('\nFAT:') debug('\nFAT:')
self.dumpfat(self.fat) self.dumpfat(self.fat)
def loadminifat(self): def loadminifat(self):
""" """
Load the MiniFAT table. Load the MiniFAT table.
@ -1531,7 +1519,6 @@ class OleFileIO:
self._raise_defect(DEFECT_FATAL, 'incomplete OLE sector') self._raise_defect(DEFECT_FATAL, 'incomplete OLE sector')
return sector return sector
def loaddirectory(self, sect): def loaddirectory(self, sect):
""" """
Load the directory. Load the directory.
@ -1567,7 +1554,6 @@ class OleFileIO:
# read and build all storage trees, starting from the root: # read and build all storage trees, starting from the root:
self.root.build_storage_tree() self.root.build_storage_tree()
def _load_direntry (self, sid): def _load_direntry (self, sid):
""" """
Load a directory entry from the directory. Load a directory entry from the directory.
@ -1592,14 +1578,12 @@ class OleFileIO:
self.direntries[sid] = _OleDirectoryEntry(entry, sid, self) self.direntries[sid] = _OleDirectoryEntry(entry, sid, self)
return self.direntries[sid] return self.direntries[sid]
def dumpdirectory(self): def dumpdirectory(self):
""" """
Dump directory (for debugging only) Dump directory (for debugging only)
""" """
self.root.dump() self.root.dump()
def _open(self, start, size = 0x7FFFFFFF, force_FAT=False): def _open(self, start, size = 0x7FFFFFFF, force_FAT=False):
""" """
Open a stream, either in FAT or MiniFAT according to its size. Open a stream, either in FAT or MiniFAT according to its size.
@ -1633,7 +1617,6 @@ class OleFileIO:
return _OleStream(self.fp, start, size, 512, return _OleStream(self.fp, start, size, 512,
self.sectorsize, self.fat, self._filesize) self.sectorsize, self.fat, self._filesize)
def _list(self, files, prefix, node, streams=True, storages=False): def _list(self, files, prefix, node, streams=True, storages=False):
""" """
(listdir helper) (listdir helper)
@ -1659,7 +1642,6 @@ class OleFileIO:
# add it to the list # add it to the list
files.append(prefix[1:] + [entry.name]) files.append(prefix[1:] + [entry.name])
def listdir(self, streams=True, storages=False): def listdir(self, streams=True, storages=False):
""" """
Return a list of streams stored in this file Return a list of streams stored in this file
@ -1672,7 +1654,6 @@ class OleFileIO:
self._list(files, [], self.root, streams, storages) self._list(files, [], self.root, streams, storages)
return files return files
def _find(self, filename): def _find(self, filename):
""" """
Returns directory entry of given filename. (openstream helper) Returns directory entry of given filename. (openstream helper)
@ -1703,7 +1684,6 @@ class OleFileIO:
node = kid node = kid
return node.sid return node.sid
def openstream(self, filename): def openstream(self, filename):
""" """
Open a stream as a read-only file object (BytesIO). Open a stream as a read-only file object (BytesIO).
@ -1724,7 +1704,6 @@ class OleFileIO:
raise IOError("this file is not a stream") raise IOError("this file is not a stream")
return self._open(entry.isectStart, entry.size) return self._open(entry.isectStart, entry.size)
def get_type(self, filename): def get_type(self, filename):
""" """
Test if given filename exists as a stream or a storage in the OLE Test if given filename exists as a stream or a storage in the OLE
@ -1744,7 +1723,6 @@ class OleFileIO:
except: except:
return False return False
def getmtime(self, filename): def getmtime(self, filename):
""" """
Return modification time of a stream/storage. Return modification time of a stream/storage.
@ -1760,7 +1738,6 @@ class OleFileIO:
entry = self.direntries[sid] entry = self.direntries[sid]
return entry.getmtime() return entry.getmtime()
def getctime(self, filename): def getctime(self, filename):
""" """
Return creation time of a stream/storage. Return creation time of a stream/storage.
@ -1776,7 +1753,6 @@ class OleFileIO:
entry = self.direntries[sid] entry = self.direntries[sid]
return entry.getctime() return entry.getctime()
def exists(self, filename): def exists(self, filename):
""" """
Test if given filename exists as a stream or a storage in the OLE Test if given filename exists as a stream or a storage in the OLE
@ -1791,7 +1767,6 @@ class OleFileIO:
except: except:
return False return False
def get_size(self, filename): def get_size(self, filename):
""" """
Return size of a stream in the OLE container, in bytes. Return size of a stream in the OLE container, in bytes.
@ -1808,7 +1783,6 @@ class OleFileIO:
raise TypeError('object is not an OLE stream') raise TypeError('object is not an OLE stream')
return entry.size return entry.size
def get_rootentry_name(self): def get_rootentry_name(self):
""" """
Return root entry name. Should usually be 'Root Entry' or 'R' in most Return root entry name. Should usually be 'Root Entry' or 'R' in most
@ -1816,7 +1790,6 @@ class OleFileIO:
""" """
return self.root.name return self.root.name
def getproperties(self, filename, convert_time=False, no_conversion=None): def getproperties(self, filename, convert_time=False, no_conversion=None):
""" """
Return properties described in substream. Return properties described in substream.
@ -1828,7 +1801,7 @@ class OleFileIO:
:returns: a dictionary of values indexed by id (integer) :returns: a dictionary of values indexed by id (integer)
""" """
# make sure no_conversion is a list, just to simplify code below: # make sure no_conversion is a list, just to simplify code below:
if no_conversion == None: if no_conversion is None:
no_conversion = [] no_conversion = []
# stream path as a string to report exceptions: # stream path as a string to report exceptions:
streampath = filename streampath = filename
@ -1842,11 +1815,11 @@ class OleFileIO:
try: try:
# header # header
s = fp.read(28) s = fp.read(28)
#clsid = _clsid(s[8:24]) # clsid = _clsid(s[8:24])
# format id # format id
s = fp.read(20) s = fp.read(20)
#fmtid = _clsid(s[:16]) # fmtid = _clsid(s[:16])
fp.seek(i32(s, 16)) fp.seek(i32(s, 16))
# get section # get section
@ -1864,34 +1837,34 @@ class OleFileIO:
for i in range(num_props): for i in range(num_props):
try: try:
id = 0 # just in case of an exception id = 0 # just in case of an exception
id = i32(s, 8+i*8) id = i32(s, 8+i*8)
offset = i32(s, 12+i*8) offset = i32(s, 12+i*8)
type = i32(s, offset) type = i32(s, offset)
debug ('property id=%d: type=%d offset=%X' % (id, type, offset)) debug('property id=%d: type=%d offset=%X' % (id, type, offset))
# test for common types first (should perhaps use # test for common types first (should perhaps use
# a dictionary instead?) # a dictionary instead?)
if type == VT_I2: # 16-bit signed integer if type == VT_I2: # 16-bit signed integer
value = i16(s, offset+4) value = i16(s, offset+4)
if value >= 32768: if value >= 32768:
value = value - 65536 value = value - 65536
elif type == VT_UI2: # 2-byte unsigned integer elif type == VT_UI2: # 2-byte unsigned integer
value = i16(s, offset+4) value = i16(s, offset+4)
elif type in (VT_I4, VT_INT, VT_ERROR): elif type in (VT_I4, VT_INT, VT_ERROR):
# VT_I4: 32-bit signed integer # VT_I4: 32-bit signed integer
# VT_ERROR: HRESULT, similar to 32-bit signed integer, # VT_ERROR: HRESULT, similar to 32-bit signed integer,
# see http://msdn.microsoft.com/en-us/library/cc230330.aspx # see http://msdn.microsoft.com/en-us/library/cc230330.aspx
value = i32(s, offset+4) value = i32(s, offset+4)
elif type in (VT_UI4, VT_UINT): # 4-byte unsigned integer elif type in (VT_UI4, VT_UINT): # 4-byte unsigned integer
value = i32(s, offset+4) # FIXME value = i32(s, offset+4) # FIXME
elif type in (VT_BSTR, VT_LPSTR): elif type in (VT_BSTR, VT_LPSTR):
# CodePageString, see http://msdn.microsoft.com/en-us/library/dd942354.aspx # CodePageString, see http://msdn.microsoft.com/en-us/library/dd942354.aspx
# size is a 32 bits integer, including the null terminator, and # size is a 32 bits integer, including the null terminator, and
# possibly trailing or embedded null chars # possibly trailing or embedded null chars
#TODO: if codepage is unicode, the string should be converted as such # TODO: if codepage is unicode, the string should be converted as such
count = i32(s, offset+4) count = i32(s, offset+4)
value = s[offset+8:offset+8+count-1] value = s[offset+8:offset+8+count-1]
# remove all null chars: # remove all null chars:
@ -1909,7 +1882,7 @@ class OleFileIO:
count = i32(s, offset+4) count = i32(s, offset+4)
value = _unicode(s[offset+8:offset+8+count*2]) value = _unicode(s[offset+8:offset+8+count*2])
elif type == VT_FILETIME: elif type == VT_FILETIME:
value = long(i32(s, offset+4)) + (long(i32(s, offset+8))<<32) value = long(i32(s, offset+4)) + (long(i32(s, offset+8)) << 32)
# FILETIME is a 64-bit int: "number of 100ns periods # FILETIME is a 64-bit int: "number of 100ns periods
# since Jan 1,1601". # since Jan 1,1601".
if convert_time and id not in no_conversion: if convert_time and id not in no_conversion:
@ -1923,8 +1896,8 @@ class OleFileIO:
else: else:
# legacy code kept for backward compatibility: returns a # legacy code kept for backward compatibility: returns a
# number of seconds since Jan 1,1601 # number of seconds since Jan 1,1601
value = value // 10000000 # seconds value = value // 10000000 # seconds
elif type == VT_UI1: # 1-byte unsigned integer elif type == VT_UI1: # 1-byte unsigned integer
value = i8(s[offset+4]) value = i8(s[offset+4])
elif type == VT_CLSID: elif type == VT_CLSID:
value = _clsid(s[offset+4:offset+20]) value = _clsid(s[offset+4:offset+20])
@ -1938,8 +1911,8 @@ class OleFileIO:
# see http://msdn.microsoft.com/en-us/library/cc237864.aspx # see http://msdn.microsoft.com/en-us/library/cc237864.aspx
value = bool(i16(s, offset+4)) value = bool(i16(s, offset+4))
else: else:
value = None # everything else yields "None" value = None # everything else yields "None"
debug ('property id=%d: type=%d not implemented in parser yet' % (id, type)) debug('property id=%d: type=%d not implemented in parser yet' % (id, type))
# missing: VT_EMPTY, VT_NULL, VT_R4, VT_R8, VT_CY, VT_DATE, # missing: VT_EMPTY, VT_NULL, VT_R4, VT_R8, VT_CY, VT_DATE,
# VT_DECIMAL, VT_I1, VT_I8, VT_UI8, # VT_DECIMAL, VT_I1, VT_I8, VT_UI8,
@ -1951,8 +1924,8 @@ class OleFileIO:
# type of items, e.g. VT_VECTOR|VT_BSTR # type of items, e.g. VT_VECTOR|VT_BSTR
# see http://msdn.microsoft.com/en-us/library/dd942011.aspx # see http://msdn.microsoft.com/en-us/library/dd942011.aspx
#print("%08x" % id, repr(value), end=" ") # print("%08x" % id, repr(value), end=" ")
#print("(%s)" % VT[i32(s, offset) & 0xFFF]) # print("(%s)" % VT[i32(s, offset) & 0xFFF])
data[id] = value data[id] = value
except BaseException as exc: except BaseException as exc:
@ -1999,7 +1972,7 @@ Options:
check_streams = False check_streams = False
for filename in sys.argv[1:]: for filename in sys.argv[1:]:
#try: # try:
# OPTIONS: # OPTIONS:
if filename == '-d': if filename == '-d':
# option to switch debug mode on: # option to switch debug mode on:
@ -2010,7 +1983,7 @@ Options:
check_streams = True check_streams = True
continue continue
ole = OleFileIO(filename)#, raise_defects=DEFECT_INCORRECT) ole = OleFileIO(filename) #, raise_defects=DEFECT_INCORRECT)
print("-" * 68) print("-" * 68)
print(filename) print(filename)
print("-" * 68) print("-" * 68)
@ -2027,8 +2000,8 @@ Options:
v = v[:50] v = v[:50]
if isinstance(v, bytes): if isinstance(v, bytes):
# quick and dirty binary check: # quick and dirty binary check:
for c in (1,2,3,4,5,6,7,11,12,14,15,16,17,18,19,20, for c in (1, 2, 3, 4, 5, 6, 7, 11, 12, 14, 15, 16, 17, 18, 19, 20,
21,22,23,24,25,26,27,28,29,30,31): 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31):
if c in bytearray(v): if c in bytearray(v):
v = '(binary data)' v = '(binary data)'
break break
@ -2039,7 +2012,7 @@ Options:
print('\nChecking streams...') print('\nChecking streams...')
for streamname in ole.listdir(): for streamname in ole.listdir():
# print name using repr() to convert binary chars to \xNN: # print name using repr() to convert binary chars to \xNN:
print('-', repr('/'.join(streamname)),'-', end=' ') print('-', repr('/'.join(streamname)), '-', end=' ')
st_type = ole.get_type(streamname) st_type = ole.get_type(streamname)
if st_type == STGTY_STREAM: if st_type == STGTY_STREAM:
print('size %d' % ole.get_size(streamname)) print('size %d' % ole.get_size(streamname))
@ -2066,7 +2039,7 @@ Options:
meta = ole.get_metadata() meta = ole.get_metadata()
meta.dump() meta.dump()
print() print()
#[PL] Test a few new methods: # [PL] Test a few new methods:
root = ole.get_rootentry_name() root = ole.get_rootentry_name()
print('Root entry name: "%s"' % root) print('Root entry name: "%s"' % root)
if ole.exists('worddocument'): if ole.exists('worddocument'):

View File

@ -58,7 +58,7 @@ class PcxImageFile(ImageFile.ImageFile):
if bbox[2] <= bbox[0] or bbox[3] <= bbox[1]: if bbox[2] <= bbox[0] or bbox[3] <= bbox[1]:
raise SyntaxError("bad PCX image size") raise SyntaxError("bad PCX image size")
if Image.DEBUG: if Image.DEBUG:
print ("BBox: %s %s %s %s" % bbox) print("BBox: %s %s %s %s" % bbox)
# format # format
version = i8(s[1]) version = i8(s[1])
@ -66,8 +66,8 @@ class PcxImageFile(ImageFile.ImageFile):
planes = i8(s[65]) planes = i8(s[65])
stride = i16(s, 66) stride = i16(s, 66)
if Image.DEBUG: if Image.DEBUG:
print ("PCX version %s, bits %s, planes %s, stride %s" % print("PCX version %s, bits %s, planes %s, stride %s" %
(version, bits, planes, stride)) (version, bits, planes, stride))
self.info["dpi"] = i16(s, 12), i16(s, 14) self.info["dpi"] = i16(s, 12), i16(s, 14)
@ -106,7 +106,7 @@ class PcxImageFile(ImageFile.ImageFile):
bbox = (0, 0) + self.size bbox = (0, 0) + self.size
if Image.DEBUG: if Image.DEBUG:
print ("size: %sx%s" % self.size) print("size: %sx%s" % self.size)
self.tile = [("pcx", bbox, self.fp.tell(), (rawmode, planes * stride))] self.tile = [("pcx", bbox, self.fp.tell(), (rawmode, planes * stride))]
@ -143,7 +143,7 @@ def _save(im, fp, filename, check=0):
# gets overwritten. # gets overwritten.
if Image.DEBUG: if Image.DEBUG:
print ("PcxImagePlugin._save: xwidth: %d, bits: %d, stride: %d" % ( print("PcxImagePlugin._save: xwidth: %d, bits: %d, stride: %d" % (
im.size[0], bits, stride)) im.size[0], bits, stride))
# under windows, we could determine the current screen size with # under windows, we could determine the current screen size with

View File

@ -78,6 +78,7 @@ MAX_TEXT_CHUNK = ImageFile.SAFEBLOCK
# Set the maximum total text chunk size. # Set the maximum total text chunk size.
MAX_TEXT_MEMORY = 64 * MAX_TEXT_CHUNK MAX_TEXT_MEMORY = 64 * MAX_TEXT_CHUNK
def _safe_zlib_decompress(s): def _safe_zlib_decompress(s):
dobj = zlib.decompressobj() dobj = zlib.decompressobj()
plaintext = dobj.decompress(s, MAX_TEXT_CHUNK) plaintext = dobj.decompress(s, MAX_TEXT_CHUNK)

View File

@ -51,7 +51,7 @@ class PyAccess(object):
self.ysize = vals['ysize'] self.ysize = vals['ysize']
if DEBUG: if DEBUG:
print (vals) print(vals)
self._post_init() self._post_init()
def _post_init(self): def _post_init(self):

View File

@ -506,7 +506,7 @@ class ImageFileDirectory(collections.MutableMapping):
typ = self.tagtype[tag] typ = self.tagtype[tag]
if Image.DEBUG: if Image.DEBUG:
print ("Tag %s, Type: %s, Value: %s" % (tag, typ, value)) print("Tag %s, Type: %s, Value: %s" % (tag, typ, value))
if typ == 1: if typ == 1:
# byte data # byte data
@ -638,9 +638,9 @@ class TiffImageFile(ImageFile.ImageFile):
self.__fp = self.fp self.__fp = self.fp
if Image.DEBUG: if Image.DEBUG:
print ("*** TiffImageFile._open ***") print("*** TiffImageFile._open ***")
print ("- __first:", self.__first) print("- __first:", self.__first)
print ("- ifh: ", ifh) print("- ifh: ", ifh)
# and load the first frame # and load the first frame
self._seek(0) self._seek(0)
@ -751,19 +751,19 @@ class TiffImageFile(ImageFile.ImageFile):
# that returns an IOError if there's no underlying fp. Easier to # that returns an IOError if there's no underlying fp. Easier to
# dea. with here by reordering. # dea. with here by reordering.
if Image.DEBUG: if Image.DEBUG:
print ("have getvalue. just sending in a string from getvalue") print("have getvalue. just sending in a string from getvalue")
n, err = decoder.decode(self.fp.getvalue()) n, err = decoder.decode(self.fp.getvalue())
elif hasattr(self.fp, "fileno"): elif hasattr(self.fp, "fileno"):
# we've got a actual file on disk, pass in the fp. # we've got a actual file on disk, pass in the fp.
if Image.DEBUG: if Image.DEBUG:
print ("have fileno, calling fileno version of the decoder.") print("have fileno, calling fileno version of the decoder.")
self.fp.seek(0) self.fp.seek(0)
# 4 bytes, otherwise the trace might error out # 4 bytes, otherwise the trace might error out
n, err = decoder.decode(b"fpfp") n, err = decoder.decode(b"fpfp")
else: else:
# we have something else. # we have something else.
if Image.DEBUG: if Image.DEBUG:
print ("don't have fileno or getvalue. just reading") print("don't have fileno or getvalue. just reading")
# UNDONE -- so much for that buffer size thing. # UNDONE -- so much for that buffer size thing.
n, err = decoder.decode(self.fp.read()) n, err = decoder.decode(self.fp.read())
@ -943,7 +943,7 @@ class TiffImageFile(ImageFile.ImageFile):
(0, min(y, ysize), w, min(y+h, ysize)), (0, min(y, ysize), w, min(y+h, ysize)),
offsets[i], a)) offsets[i], a))
if Image.DEBUG: if Image.DEBUG:
print ("tiles: ", self.tile) print("tiles: ", self.tile)
y = y + h y = y + h
if y >= self.size[1]: if y >= self.size[1]:
x = y = 0 x = y = 0
@ -1128,8 +1128,8 @@ def _save(im, fp, filename):
if libtiff: if libtiff:
if Image.DEBUG: if Image.DEBUG:
print ("Saving using libtiff encoder") print("Saving using libtiff encoder")
print (ifd.items()) print(ifd.items())
_fp = 0 _fp = 0
if hasattr(fp, "fileno"): if hasattr(fp, "fileno"):
try: try:
@ -1186,7 +1186,7 @@ def _save(im, fp, filename):
atts[k] = v atts[k] = v
if Image.DEBUG: if Image.DEBUG:
print (atts) print(atts)
# libtiff always expects the bytes in native order. # libtiff always expects the bytes in native order.
# we're storing image byte order. So, if the rawmode # we're storing image byte order. So, if the rawmode

View File

@ -47,7 +47,7 @@ class BenchCffiAccess(PillowTestCase):
self.assertEqual(caccess[(0, 0)], access[(0, 0)]) self.assertEqual(caccess[(0, 0)], access[(0, 0)])
print ("Size: %sx%s" % im.size) print("Size: %sx%s" % im.size)
timer(iterate_get, 'PyAccess - get', im.size, access) timer(iterate_get, 'PyAccess - get', im.size, access)
timer(iterate_set, 'PyAccess - set', im.size, access) timer(iterate_set, 'PyAccess - set', im.size, access)
timer(iterate_get, 'C-api - get', im.size, caccess) timer(iterate_get, 'C-api - get', im.size, caccess)

View File

@ -8,6 +8,7 @@ from PIL import Image, ImageFilter
min_iterations = 100 min_iterations = 100
max_iterations = 10000 max_iterations = 10000
@unittest.skipIf(sys.platform.startswith('win32'), "requires Unix or MacOS") @unittest.skipIf(sys.platform.startswith('win32'), "requires Unix or MacOS")
class TestImagingLeaks(PillowTestCase): class TestImagingLeaks(PillowTestCase):
@ -40,4 +41,3 @@ class TestImagingLeaks(PillowTestCase):
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

View File

@ -74,7 +74,6 @@ post-patch:
""" """
def test_qtables_leak(self): def test_qtables_leak(self):
im = hopper('RGB') im = hopper('RGB')
@ -103,7 +102,6 @@ post-patch:
qtables = [standard_l_qtable, qtables = [standard_l_qtable,
standard_chrominance_qtable] standard_chrominance_qtable]
for count in range(iterations): for count in range(iterations):
test_output = BytesIO() test_output = BytesIO()
im.save(test_output, "JPEG", qtables=qtables) im.save(test_output, "JPEG", qtables=qtables)
@ -172,7 +170,6 @@ post patch:
test_output = BytesIO() test_output = BytesIO()
im.save(test_output, "JPEG", exif=exif) im.save(test_output, "JPEG", exif=exif)
""" """
base case: base case:
MB MB

View File

@ -5,6 +5,7 @@ import zlib
TEST_FILE = "Tests/images/png_decompression_dos.png" TEST_FILE = "Tests/images/png_decompression_dos.png"
class TestPngDos(PillowTestCase): class TestPngDos(PillowTestCase):
def test_dos_text(self): def test_dos_text(self):
@ -19,14 +20,14 @@ class TestPngDos(PillowTestCase):
self.assertLess(len(s), 1024*1024, "Text chunk larger than 1M") self.assertLess(len(s), 1024*1024, "Text chunk larger than 1M")
def test_dos_total_memory(self): def test_dos_total_memory(self):
im = Image.new('L',(1,1)) im = Image.new('L', (1, 1))
compressed_data = zlib.compress('a'*1024*1023) compressed_data = zlib.compress('a'*1024*1023)
info = PngImagePlugin.PngInfo() info = PngImagePlugin.PngInfo()
for x in range(64): for x in range(64):
info.add_text('t%s'%x, compressed_data, 1) info.add_text('t%s' % x, compressed_data, 1)
info.add_itxt('i%s'%x, compressed_data, zip=True) info.add_itxt('i%s' % x, compressed_data, zip=True)
b = BytesIO() b = BytesIO()
im.save(b, 'PNG', pnginfo=info) im.save(b, 'PNG', pnginfo=info)

View File

@ -5,7 +5,7 @@ from PIL import Image
from io import BytesIO from io import BytesIO
# Limits for testing the leak # Limits for testing the leak
mem_limit = 16 # max increase in MB mem_limit = 16 # max increase in MB
iterations = 5000 iterations = 5000
test_file = "Tests/images/hopper.webp" test_file = "Tests/images/hopper.webp"

View File

@ -130,7 +130,7 @@ class PillowTestCase(unittest.TestCase):
# Skip if platform/travis matches, and # Skip if platform/travis matches, and
# PILLOW_RUN_KNOWN_BAD is not true in the environment. # PILLOW_RUN_KNOWN_BAD is not true in the environment.
if bool(os.environ.get('PILLOW_RUN_KNOWN_BAD', False)): if bool(os.environ.get('PILLOW_RUN_KNOWN_BAD', False)):
print (os.environ.get('PILLOW_RUN_KNOWN_BAD', False)) print(os.environ.get('PILLOW_RUN_KNOWN_BAD', False))
return return
skip = True skip = True

View File

@ -2,6 +2,7 @@ from helper import unittest, PillowTestCase
from PIL import _binary from PIL import _binary
class TestBinary(PillowTestCase): class TestBinary(PillowTestCase):
def test_standard(self): def test_standard(self):

View File

@ -148,8 +148,8 @@ class TestFileEps(PillowTestCase):
# open image with binary preview # open image with binary preview
Image.open(file3) Image.open(file3)
def _test_readline(self,t, ending): def _test_readline(self, t, ending):
ending = "Failure with line ending: %s" %("".join("%s" %ord(s) for s in ending)) ending = "Failure with line ending: %s" % ("".join("%s" % ord(s) for s in ending))
self.assertEqual(t.readline().strip('\r\n'), 'something', ending) self.assertEqual(t.readline().strip('\r\n'), 'something', ending)
self.assertEqual(t.readline().strip('\r\n'), 'else', ending) self.assertEqual(t.readline().strip('\r\n'), 'else', ending)
self.assertEqual(t.readline().strip('\r\n'), 'baz', ending) self.assertEqual(t.readline().strip('\r\n'), 'baz', ending)
@ -174,30 +174,30 @@ class TestFileEps(PillowTestCase):
def _test_readline_file_universal(self, test_string, ending): def _test_readline_file_universal(self, test_string, ending):
f = self.tempfile('temp.txt') f = self.tempfile('temp.txt')
with open(f,'wb') as w: with open(f, 'wb') as w:
if str is bytes: if str is bytes:
w.write(test_string) w.write(test_string)
else: else:
w.write(test_string.encode('UTF-8')) w.write(test_string.encode('UTF-8'))
with open(f,'rU') as t: with open(f, 'rU') as t:
self._test_readline(t, ending) self._test_readline(t, ending)
def _test_readline_file_psfile(self, test_string, ending): def _test_readline_file_psfile(self, test_string, ending):
f = self.tempfile('temp.txt') f = self.tempfile('temp.txt')
with open(f,'wb') as w: with open(f, 'wb') as w:
if str is bytes: if str is bytes:
w.write(test_string) w.write(test_string)
else: else:
w.write(test_string.encode('UTF-8')) w.write(test_string.encode('UTF-8'))
with open(f,'rb') as r: with open(f, 'rb') as r:
t = EpsImagePlugin.PSFile(r) t = EpsImagePlugin.PSFile(r)
self._test_readline(t, ending) self._test_readline(t, ending)
def test_readline(self): def test_readline(self):
# check all the freaking line endings possible from the spec # check all the freaking line endings possible from the spec
#test_string = u'something\r\nelse\n\rbaz\rbif\n' # test_string = u'something\r\nelse\n\rbaz\rbif\n'
line_endings = ['\r\n', '\n'] line_endings = ['\r\n', '\n']
not_working_endings = ['\n\r', '\r'] not_working_endings = ['\n\r', '\r']
strings = ['something', 'else', 'baz', 'bif'] strings = ['something', 'else', 'baz', 'bif']
@ -205,9 +205,9 @@ class TestFileEps(PillowTestCase):
for ending in line_endings: for ending in line_endings:
s = ending.join(strings) s = ending.join(strings)
# Native Python versions will pass these endings. # Native Python versions will pass these endings.
#self._test_readline_stringio(s, ending) # self._test_readline_stringio(s, ending)
#self._test_readline_io(s, ending) # self._test_readline_io(s, ending)
#self._test_readline_file_universal(s, ending) # self._test_readline_file_universal(s, ending)
self._test_readline_file_psfile(s, ending) self._test_readline_file_psfile(s, ending)
@ -217,9 +217,9 @@ class TestFileEps(PillowTestCase):
s = ending.join(strings) s = ending.join(strings)
# Native Python versions may fail on these endings. # Native Python versions may fail on these endings.
#self._test_readline_stringio(s, ending) # self._test_readline_stringio(s, ending)
#self._test_readline_io(s, ending) # self._test_readline_io(s, ending)
#self._test_readline_file_universal(s, ending) # self._test_readline_file_universal(s, ending)
self._test_readline_file_psfile(s, ending) self._test_readline_file_psfile(s, ending)

View File

@ -25,23 +25,22 @@ class TestFileIco(PillowTestCase):
# the default image # the default image
output.seek(0) output.seek(0)
reloaded = Image.open(output) reloaded = Image.open(output)
self.assertEqual(reloaded.info['sizes'],set([(32, 32), (64, 64)])) self.assertEqual(reloaded.info['sizes'], set([(32, 32), (64, 64)]))
self.assertEqual(im.mode, reloaded.mode) self.assertEqual(im.mode, reloaded.mode)
self.assertEqual((64, 64), reloaded.size) self.assertEqual((64, 64), reloaded.size)
self.assertEqual(reloaded.format, "ICO") self.assertEqual(reloaded.format, "ICO")
self.assert_image_equal(reloaded, hopper().resize((64,64), Image.LANCZOS)) self.assert_image_equal(reloaded, hopper().resize((64, 64), Image.LANCZOS))
# the other one # the other one
output.seek(0) output.seek(0)
reloaded = Image.open(output) reloaded = Image.open(output)
reloaded.size = (32,32) reloaded.size = (32, 32)
self.assertEqual(im.mode, reloaded.mode) self.assertEqual(im.mode, reloaded.mode)
self.assertEqual((32, 32), reloaded.size) self.assertEqual((32, 32), reloaded.size)
self.assertEqual(reloaded.format, "ICO") self.assertEqual(reloaded.format, "ICO")
self.assert_image_equal(reloaded, hopper().resize((32,32), Image.LANCZOS)) self.assert_image_equal(reloaded, hopper().resize((32, 32), Image.LANCZOS))
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -301,15 +301,12 @@ class TestFileJpeg(PillowTestCase):
# sequence wrong length # sequence wrong length
self.assertRaises(Exception, lambda: self.roundtrip(im, qtables=[])) self.assertRaises(Exception, lambda: self.roundtrip(im, qtables=[]))
# sequence wrong length # sequence wrong length
self.assertRaises(Exception, lambda: self.roundtrip(im, qtables=[1,2,3,4,5])) self.assertRaises(Exception, lambda: self.roundtrip(im, qtables=[1, 2, 3, 4, 5]))
# qtable entry not a sequence # qtable entry not a sequence
self.assertRaises(Exception, lambda: self.roundtrip(im, qtables=[1])) self.assertRaises(Exception, lambda: self.roundtrip(im, qtables=[1]))
# qtable entry has wrong number of items # qtable entry has wrong number of items
self.assertRaises(Exception, lambda: self.roundtrip(im, qtables=[[1,2,3,4]])) self.assertRaises(Exception, lambda: self.roundtrip(im, qtables=[[1, 2, 3, 4]]))
@unittest.skipUnless(djpeg_available(), "djpeg not available") @unittest.skipUnless(djpeg_available(), "djpeg not available")
def test_load_djpeg(self): def test_load_djpeg(self):
@ -340,9 +337,9 @@ class TestFileJpeg(PillowTestCase):
""" Generates a very hard to compress file """ Generates a very hard to compress file
:param size: tuple :param size: tuple
""" """
return Image.frombytes('RGB',size, os.urandom(size[0]*size[1] *3)) return Image.frombytes('RGB', size, os.urandom(size[0]*size[1] * 3))
im = gen_random_image((512,512)) im = gen_random_image((512, 512))
f = self.tempfile("temp.jpeg") f = self.tempfile("temp.jpeg")
im.save(f, quality=100, optimize=True) im.save(f, quality=100, optimize=True)

View File

@ -27,7 +27,7 @@ class LibTiffTestCase(PillowTestCase):
self.assertEqual(im._compression, 'group4') self.assertEqual(im._compression, 'group4')
except: except:
print("No _compression") print("No _compression")
print (dir(im)) print(dir(im))
# can we write it back out, in a different form. # can we write it back out, in a different form.
out = self.tempfile("temp.png") out = self.tempfile("temp.png")
@ -243,13 +243,13 @@ class TestFileLibTiff(LibTiffTestCase):
im2 = Image.open('Tests/images/12in16bit.tif') im2 = Image.open('Tests/images/12in16bit.tif')
if Image.DEBUG: if Image.DEBUG:
print (im.getpixel((0, 0))) print(im.getpixel((0, 0)))
print (im.getpixel((0, 1))) print(im.getpixel((0, 1)))
print (im.getpixel((0, 2))) print(im.getpixel((0, 2)))
print (im2.getpixel((0, 0))) print(im2.getpixel((0, 0)))
print (im2.getpixel((0, 1))) print(im2.getpixel((0, 1)))
print (im2.getpixel((0, 2))) print(im2.getpixel((0, 2)))
self.assert_image_equal(im, im2) self.assert_image_equal(im, im2)

View File

@ -153,7 +153,7 @@ class TestFilePng(PillowTestCase):
im = load(HEAD + chunk(b'iTXt', b'spam\0\1\0en\0Spam\0' + im = load(HEAD + chunk(b'iTXt', b'spam\0\1\0en\0Spam\0' +
zlib.compress(b"egg")[:1]) + TAIL) zlib.compress(b"egg")[:1]) + TAIL)
self.assertEqual(im.info, {'spam':''}) self.assertEqual(im.info, {'spam': ''})
im = load(HEAD + chunk(b'iTXt', b'spam\0\1\1en\0Spam\0' + im = load(HEAD + chunk(b'iTXt', b'spam\0\1\1en\0Spam\0' +
zlib.compress(b"egg")) + TAIL) zlib.compress(b"egg")) + TAIL)
@ -382,7 +382,6 @@ class TestFilePng(PillowTestCase):
self.assert_image_equal(im, repr_png) self.assert_image_equal(im, repr_png)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

View File

@ -121,13 +121,13 @@ class TestFileTiff(PillowTestCase):
im2 = Image.open('Tests/images/12in16bit.tif') im2 = Image.open('Tests/images/12in16bit.tif')
if Image.DEBUG: if Image.DEBUG:
print (im.getpixel((0, 0))) print(im.getpixel((0, 0)))
print (im.getpixel((0, 1))) print(im.getpixel((0, 1)))
print (im.getpixel((0, 2))) print(im.getpixel((0, 2)))
print (im2.getpixel((0, 0))) print(im2.getpixel((0, 0)))
print (im2.getpixel((0, 1))) print(im2.getpixel((0, 1)))
print (im2.getpixel((0, 2))) print(im2.getpixel((0, 2)))
self.assert_image_equal(im, im2) self.assert_image_equal(im, im2)

View File

@ -15,7 +15,6 @@ class TestFileWebpMetadata(PillowTestCase):
if not _webp.HAVE_WEBPMUX: if not _webp.HAVE_WEBPMUX:
self.skipTest('WebPMux support not installed') self.skipTest('WebPMux support not installed')
def test_read_exif_metadata(self): def test_read_exif_metadata(self):
file_path = "Tests/images/flower.webp" file_path = "Tests/images/flower.webp"

View File

@ -15,7 +15,7 @@ class TestFileXpm(PillowTestCase):
self.assertEqual(im.size, (128, 128)) self.assertEqual(im.size, (128, 128))
self.assertEqual(im.format, "XPM") self.assertEqual(im.format, "XPM")
#large error due to quantization->44 colors. # large error due to quantization->44 colors.
self.assert_image_similar(im.convert('RGB'), hopper('RGB'), 60) self.assert_image_similar(im.convert('RGB'), hopper('RGB'), 60)
def test_load_read(self): def test_load_read(self):

View File

@ -72,7 +72,7 @@ class TestImagePutData(PillowTestCase):
im = Image.new('L', (150, 100)) im = Image.new('L', (150, 100))
im.putdata(arr) im.putdata(arr)
self.assertEqual(len(im.getdata()),len(arr)) self.assertEqual(len(im.getdata()), len(arr))
def test_array_F(self): def test_array_F(self):
# shouldn't segfault # shouldn't segfault
@ -82,7 +82,7 @@ class TestImagePutData(PillowTestCase):
arr = array('f', [0.0])*15000 arr = array('f', [0.0])*15000
im.putdata(arr) im.putdata(arr)
self.assertEqual(len(im.getdata()),len(arr)) self.assertEqual(len(im.getdata()), len(arr))
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

View File

@ -17,11 +17,11 @@ class TestImagingCoreResize(PillowTestCase):
def test_nearest_mode(self): def test_nearest_mode(self):
for mode in ["1", "P", "L", "I", "F", "RGB", "RGBA", "CMYK", "YCbCr", for mode in ["1", "P", "L", "I", "F", "RGB", "RGBA", "CMYK", "YCbCr",
"I;16"]: # exotic mode "I;16"]: # exotic mode
im = hopper(mode) im = hopper(mode)
r = self.resize(im, (15, 12), Image.NEAREST) r = self.resize(im, (15, 12), Image.NEAREST)
self.assertEqual(r.mode, mode) self.assertEqual(r.mode, mode)
self.assertEqual(r.size, (15, 12) ) self.assertEqual(r.size, (15, 12))
self.assertEqual(r.im.bands, im.im.bands) self.assertEqual(r.im.bands, im.im.bands)
def test_convolution_modes(self): def test_convolution_modes(self):
@ -35,7 +35,7 @@ class TestImagingCoreResize(PillowTestCase):
im = hopper(mode) im = hopper(mode)
r = self.resize(im, (15, 12), Image.BILINEAR) r = self.resize(im, (15, 12), Image.BILINEAR)
self.assertEqual(r.mode, mode) self.assertEqual(r.mode, mode)
self.assertEqual(r.size, (15, 12) ) self.assertEqual(r.size, (15, 12))
self.assertEqual(r.im.bands, im.im.bands) self.assertEqual(r.im.bands, im.im.bands)
def test_reduce_filters(self): def test_reduce_filters(self):

View File

@ -1,7 +1,7 @@
from helper import unittest, PillowTestCase, hopper from helper import unittest, PillowTestCase, hopper
from PIL.Image import (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM, ROTATE_90, ROTATE_180, from PIL.Image import (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM, ROTATE_90, ROTATE_180,
ROTATE_270, TRANSPOSE) ROTATE_270, TRANSPOSE)
class TestImageTranspose(PillowTestCase): class TestImageTranspose(PillowTestCase):

View File

@ -17,6 +17,7 @@ except ImportError as v:
SRGB = "Tests/icc/sRGB_IEC61966-2-1_black_scaled.icc" SRGB = "Tests/icc/sRGB_IEC61966-2-1_black_scaled.icc"
HAVE_PROFILE = os.path.exists(SRGB) HAVE_PROFILE = os.path.exists(SRGB)
class TestImageCms(PillowTestCase): class TestImageCms(PillowTestCase):
def setUp(self): def setUp(self):

View File

@ -64,7 +64,7 @@ class TestImageDraw(PillowTestCase):
# Assert # Assert
self.assert_image_similar( self.assert_image_similar(
im, Image.open("Tests/images/imagedraw_arc.png"),1) im, Image.open("Tests/images/imagedraw_arc.png"), 1)
def test_arc1(self): def test_arc1(self):
self.helper_arc(BBOX1) self.helper_arc(BBOX1)
@ -97,7 +97,7 @@ class TestImageDraw(PillowTestCase):
# Assert # Assert
self.assert_image_similar( self.assert_image_similar(
im, Image.open("Tests/images/imagedraw_chord.png"),1) im, Image.open("Tests/images/imagedraw_chord.png"), 1)
def test_chord1(self): def test_chord1(self):
self.helper_chord(BBOX1) self.helper_chord(BBOX1)
@ -116,7 +116,7 @@ class TestImageDraw(PillowTestCase):
# Assert # Assert
self.assert_image_similar( self.assert_image_similar(
im, Image.open("Tests/images/imagedraw_ellipse.png"),1) im, Image.open("Tests/images/imagedraw_ellipse.png"), 1)
def test_ellipse1(self): def test_ellipse1(self):
self.helper_ellipse(BBOX1) self.helper_ellipse(BBOX1)
@ -154,7 +154,7 @@ class TestImageDraw(PillowTestCase):
# Assert # Assert
self.assert_image_similar( self.assert_image_similar(
im, Image.open("Tests/images/imagedraw_pieslice.png"),1) im, Image.open("Tests/images/imagedraw_pieslice.png"), 1)
def test_pieslice1(self): def test_pieslice1(self):
self.helper_pieslice(BBOX1) self.helper_pieslice(BBOX1)

View File

@ -21,19 +21,18 @@ class TestImageEnhance(PillowTestCase):
im = Image.new("RGB", (1, 1)) im = Image.new("RGB", (1, 1))
ImageEnhance.Sharpness(im).enhance(0.5) ImageEnhance.Sharpness(im).enhance(0.5)
def _half_transparent_image(self): def _half_transparent_image(self):
# returns an image, half transparent, half solid # returns an image, half transparent, half solid
im = hopper('RGB') im = hopper('RGB')
transparent = Image.new('L', im.size, 0) transparent = Image.new('L', im.size, 0)
solid = Image.new('L', (im.size[0]//2, im.size[1]), 255) solid = Image.new('L', (im.size[0]//2, im.size[1]), 255)
transparent.paste(solid, (0,0)) transparent.paste(solid, (0, 0))
im.putalpha(transparent) im.putalpha(transparent)
return im return im
def _check_alpha(self,im, original, op, amount): def _check_alpha(self, im, original, op, amount):
self.assertEqual(im.getbands(), original.getbands()) self.assertEqual(im.getbands(), original.getbands())
self.assert_image_equal(im.split()[-1], original.split()[-1], self.assert_image_equal(im.split()[-1], original.split()[-1],
"Diff on %s: %s" % (op, amount)) "Diff on %s: %s" % (op, amount))
@ -45,8 +44,8 @@ class TestImageEnhance(PillowTestCase):
original = self._half_transparent_image() original = self._half_transparent_image()
for op in ['Color', 'Brightness', 'Contrast', 'Sharpness']: for op in ['Color', 'Brightness', 'Contrast', 'Sharpness']:
for amount in [0,0.5,1.0]: for amount in [0, 0.5, 1.0]:
self._check_alpha(getattr(ImageEnhance,op)(original).enhance(amount), self._check_alpha(getattr(ImageEnhance, op)(original).enhance(amount),
original, op, amount) original, op, amount)

View File

@ -1,6 +1,7 @@
from helper import unittest, PillowTestCase from helper import unittest, PillowTestCase
from PIL import Image, ImageFont, ImageDraw from PIL import Image, ImageFont, ImageDraw
class TestImageFontBitmap(PillowTestCase): class TestImageFontBitmap(PillowTestCase):
def test_similar(self): def test_similar(self):
text = 'EmbeddedBitmap' text = 'EmbeddedBitmap'

View File

@ -24,16 +24,16 @@ class TestImageSequence(PillowTestCase):
def _test_multipage_tiff(self, dbg=False): def _test_multipage_tiff(self, dbg=False):
# debug had side effect of calling fp.tell. # debug had side effect of calling fp.tell.
Image.DEBUG=dbg Image.DEBUG = dbg
im = Image.open('Tests/images/multipage.tiff') im = Image.open('Tests/images/multipage.tiff')
for index, frame in enumerate(ImageSequence.Iterator(im)): for index, frame in enumerate(ImageSequence.Iterator(im)):
frame.load() frame.load()
self.assertEqual(index, im.tell()) self.assertEqual(index, im.tell())
frame.convert('RGB') frame.convert('RGB')
Image.DEBUG=False Image.DEBUG = False
def test_tiff(self): def test_tiff(self):
#self._test_multipage_tiff(True) # self._test_multipage_tiff(True)
self._test_multipage_tiff(False) self._test_multipage_tiff(False)
def test_libtiff(self): def test_libtiff(self):
@ -43,7 +43,7 @@ class TestImageSequence(PillowTestCase):
self.skipTest("tiff support not available") self.skipTest("tiff support not available")
TiffImagePlugin.READ_LIBTIFF = True TiffImagePlugin.READ_LIBTIFF = True
#self._test_multipage_tiff(True) # self._test_multipage_tiff(True)
self._test_multipage_tiff(False) self._test_multipage_tiff(False)
TiffImagePlugin.READ_LIBTIFF = False TiffImagePlugin.READ_LIBTIFF = False

View File

@ -129,7 +129,7 @@ class TestNumpy(PillowTestCase):
arr = numpy.zeros((15000,), numpy.float32) arr = numpy.zeros((15000,), numpy.float32)
im.putdata(arr) im.putdata(arr)
self.assertEqual(len(im.getdata()),len(arr)) self.assertEqual(len(im.getdata()), len(arr))
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -21,25 +21,23 @@ class Test_scipy_resize(PillowTestCase):
self.skipTest("Scipy Required") self.skipTest("Scipy Required")
def test_imresize(self): def test_imresize(self):
im = np.random.random((10,20)) im = np.random.random((10, 20))
for T in np.sctypes['float'] + [float]: for T in np.sctypes['float'] + [float]:
# 1.1 rounds to below 1.1 for float16, 1.101 works # 1.1 rounds to below 1.1 for float16, 1.101 works
im1 = misc.imresize(im,T(1.101)) im1 = misc.imresize(im, T(1.101))
self.assertEqual(im1.shape,(11,22)) self.assertEqual(im1.shape, (11, 22))
def test_imresize4(self): def test_imresize4(self):
im = np.array([[1,2], im = np.array([[1, 2],
[3,4]]) [3, 4]])
res = np.array([[ 1. , 1.25, 1.75, 2. ], res = np.array([[1. , 1.25, 1.75, 2. ],
[ 1.5 , 1.75, 2.25, 2.5 ], [1.5, 1.75, 2.25, 2.5],
[ 2.5 , 2.75, 3.25, 3.5 ], [2.5, 2.75, 3.25, 3.5],
[ 3. , 3.25, 3.75, 4. ]], dtype=np.float32) [3. , 3.25, 3.75, 4. ]], dtype=np.float32)
# Check that resizing by target size, float and int are the same # Check that resizing by target size, float and int are the same
im2 = misc.imresize(im, (4,4), mode='F') # output size im2 = misc.imresize(im, (4, 4), mode='F') # output size
im3 = misc.imresize(im, 2., mode='F') # fraction im3 = misc.imresize(im, 2., mode='F') # fraction
im4 = misc.imresize(im, 200, mode='F') # percentage im4 = misc.imresize(im, 200, mode='F') # percentage
assert_equal(im2, res) assert_equal(im2, res)
assert_equal(im3, res) assert_equal(im3, res)
assert_equal(im4, res) assert_equal(im4, res)