mirror of
https://github.com/python-pillow/Pillow.git
synced 2024-12-24 17:06:16 +03:00
commit
d79d28152b
17
PIL/tests.py
17
PIL/tests.py
|
@ -1,17 +0,0 @@
|
|||
import unittest
|
||||
|
||||
|
||||
class PillowTests(unittest.TestCase):
|
||||
"""
|
||||
Can we start moving the test suite here?
|
||||
"""
|
||||
|
||||
def test_suite_should_move_here(self):
|
||||
"""
|
||||
Great idea!
|
||||
"""
|
||||
assert True is True
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
20
Tests/README.md
Normal file
20
Tests/README.md
Normal file
|
@ -0,0 +1,20 @@
|
|||
Pillow test files.
|
||||
|
||||
Test scripts are named `test_xxx.py` and use the `unittest` module. A base class and helper functions can be found in `helper.py`.
|
||||
|
||||
Run the tests from the root of the Pillow source distribution:
|
||||
|
||||
python selftest.py
|
||||
nosetests Tests/test_*.py
|
||||
|
||||
Or with coverage:
|
||||
|
||||
coverage run --append --include=PIL/* selftest.py
|
||||
coverage run --append --include=PIL/* -m nose Tests/test_*.py
|
||||
coverage report
|
||||
coverage html
|
||||
open htmlcov/index.html
|
||||
|
||||
To run an individual test:
|
||||
|
||||
python Tests/test_image.py
|
|
@ -1,14 +0,0 @@
|
|||
Minimalistic PIL test framework.
|
||||
|
||||
Test scripts are named "test_xxx" and are supposed to output "ok". That's it. To run the tests::
|
||||
|
||||
python setup.py develop
|
||||
|
||||
Run the tests from the root of the Pillow source distribution:
|
||||
|
||||
python selftest.py
|
||||
python Tests/run.py --installed
|
||||
|
||||
To run an individual test:
|
||||
|
||||
python Tests/test_image.py
|
|
@ -1,23 +1,25 @@
|
|||
from tester import *
|
||||
from helper import *
|
||||
|
||||
# not running this test by default. No DOS against travis.
|
||||
# Not running this test by default. No DOS against Travis CI.
|
||||
|
||||
from PIL import PyAccess
|
||||
from PIL import Image
|
||||
|
||||
import time
|
||||
|
||||
|
||||
def iterate_get(size, access):
|
||||
(w,h) = size
|
||||
(w, h) = size
|
||||
for x in range(w):
|
||||
for y in range(h):
|
||||
access[(x,y)]
|
||||
access[(x, y)]
|
||||
|
||||
|
||||
def iterate_set(size, access):
|
||||
(w,h) = size
|
||||
(w, h) = size
|
||||
for x in range(w):
|
||||
for y in range(h):
|
||||
access[(x,y)] = (x %256,y%256,0)
|
||||
access[(x, y)] = (x % 256, y % 256, 0)
|
||||
|
||||
|
||||
def timer(func, label, *args):
|
||||
iterations = 5000
|
||||
|
@ -25,27 +27,34 @@ def timer(func, label, *args):
|
|||
for x in range(iterations):
|
||||
func(*args)
|
||||
if time.time()-starttime > 10:
|
||||
print ("%s: breaking at %s iterations, %.6f per iteration"%(label, x+1, (time.time()-starttime)/(x+1.0)))
|
||||
print("%s: breaking at %s iterations, %.6f per iteration" % (
|
||||
label, x+1, (time.time()-starttime)/(x+1.0)))
|
||||
break
|
||||
if x == iterations-1:
|
||||
endtime = time.time()
|
||||
print ("%s: %.4f s %.6f per iteration" %(label, endtime-starttime, (endtime-starttime)/(x+1.0)))
|
||||
print("%s: %.4f s %.6f per iteration" % (
|
||||
label, endtime-starttime, (endtime-starttime)/(x+1.0)))
|
||||
|
||||
def test_direct():
|
||||
im = lena()
|
||||
im.load()
|
||||
#im = Image.new( "RGB", (2000,2000), (1,3,2))
|
||||
caccess = im.im.pixel_access(False)
|
||||
access = PyAccess.new(im, False)
|
||||
|
||||
assert_equal(caccess[(0,0)], access[(0,0)])
|
||||
class BenchCffiAccess(PillowTestCase):
|
||||
|
||||
print ("Size: %sx%s" % im.size)
|
||||
timer(iterate_get, 'PyAccess - get', im.size, access)
|
||||
timer(iterate_set, 'PyAccess - set', im.size, access)
|
||||
timer(iterate_get, 'C-api - get', im.size, caccess)
|
||||
timer(iterate_set, 'C-api - set', im.size, caccess)
|
||||
|
||||
|
||||
def test_direct(self):
|
||||
im = lena()
|
||||
im.load()
|
||||
# im = Image.new( "RGB", (2000, 2000), (1, 3, 2))
|
||||
caccess = im.im.pixel_access(False)
|
||||
access = PyAccess.new(im, False)
|
||||
|
||||
|
||||
self.assertEqual(caccess[(0, 0)], access[(0, 0)])
|
||||
|
||||
print ("Size: %sx%s" % im.size)
|
||||
timer(iterate_get, 'PyAccess - get', im.size, access)
|
||||
timer(iterate_set, 'PyAccess - set', im.size, access)
|
||||
timer(iterate_get, 'C-api - get', im.size, caccess)
|
||||
timer(iterate_set, 'C-api - set', im.size, caccess)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
# End of file
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
import sys
|
||||
sys.path.insert(0, ".")
|
||||
|
||||
import tester
|
||||
import helper
|
||||
import timeit
|
||||
|
||||
|
||||
def bench(mode):
|
||||
im = tester.lena(mode)
|
||||
im = helper.lena(mode)
|
||||
get = im.im.getpixel
|
||||
xy = 50, 50 # position shouldn't really matter
|
||||
xy = 50, 50 # position shouldn't really matter
|
||||
t0 = timeit.default_timer()
|
||||
for i in range(1000000):
|
||||
get(xy)
|
||||
|
|
152
Tests/helper.py
152
Tests/helper.py
|
@ -10,6 +10,8 @@ else:
|
|||
import unittest
|
||||
|
||||
|
||||
# This should be imported into every test_XXX.py file to report
|
||||
# any remaining temp files at the end of the run.
|
||||
def tearDownModule():
|
||||
import glob
|
||||
import os
|
||||
|
@ -160,59 +162,11 @@ class PillowTestCase(unittest.TestCase):
|
|||
return files[0]
|
||||
|
||||
|
||||
# # require that deprecation warnings are triggered
|
||||
# import warnings
|
||||
# warnings.simplefilter('default')
|
||||
# # temporarily turn off resource warnings that warn about unclosed
|
||||
# # files in the test scripts.
|
||||
# try:
|
||||
# warnings.filterwarnings("ignore", category=ResourceWarning)
|
||||
# except NameError:
|
||||
# # we expect a NameError on py2.x, since it doesn't have ResourceWarnings.
|
||||
# pass
|
||||
# helpers
|
||||
|
||||
import sys
|
||||
py3 = (sys.version_info >= (3, 0))
|
||||
|
||||
# # some test helpers
|
||||
#
|
||||
# _target = None
|
||||
# _tempfiles = []
|
||||
# _logfile = None
|
||||
#
|
||||
#
|
||||
# def success():
|
||||
# import sys
|
||||
# success.count += 1
|
||||
# if _logfile:
|
||||
# print(sys.argv[0], success.count, failure.count, file=_logfile)
|
||||
# return True
|
||||
#
|
||||
#
|
||||
# def failure(msg=None, frame=None):
|
||||
# import sys
|
||||
# import linecache
|
||||
# failure.count += 1
|
||||
# if _target:
|
||||
# if frame is None:
|
||||
# frame = sys._getframe()
|
||||
# while frame.f_globals.get("__name__") != _target.__name__:
|
||||
# frame = frame.f_back
|
||||
# location = (frame.f_code.co_filename, frame.f_lineno)
|
||||
# prefix = "%s:%d: " % location
|
||||
# line = linecache.getline(*location)
|
||||
# print(prefix + line.strip() + " failed:")
|
||||
# if msg:
|
||||
# print("- " + msg)
|
||||
# if _logfile:
|
||||
# print(sys.argv[0], success.count, failure.count, file=_logfile)
|
||||
# return False
|
||||
#
|
||||
# success.count = failure.count = 0
|
||||
#
|
||||
|
||||
|
||||
# helpers
|
||||
|
||||
def fromstring(data):
|
||||
from io import BytesIO
|
||||
|
@ -230,6 +184,9 @@ def tostring(im, format, **options):
|
|||
def lena(mode="RGB", cache={}):
|
||||
from PIL import Image
|
||||
im = None
|
||||
# FIXME: Implement caching to reduce reading from disk but so an original
|
||||
# copy is returned each time and the cached image isn't modified by tests
|
||||
# (for fast, isolated, repeatable tests).
|
||||
# im = cache.get(mode)
|
||||
if im is None:
|
||||
if mode == "RGB":
|
||||
|
@ -243,99 +200,4 @@ def lena(mode="RGB", cache={}):
|
|||
# cache[mode] = im
|
||||
return im
|
||||
|
||||
|
||||
# def assert_image_completely_equal(a, b, msg=None):
|
||||
# if a != b:
|
||||
# failure(msg or "images different")
|
||||
# else:
|
||||
# success()
|
||||
#
|
||||
#
|
||||
# # test runner
|
||||
#
|
||||
# def run():
|
||||
# global _target, _tests, run
|
||||
# import sys
|
||||
# import traceback
|
||||
# _target = sys.modules["__main__"]
|
||||
# run = None # no need to run twice
|
||||
# tests = []
|
||||
# for name, value in list(vars(_target).items()):
|
||||
# if name[:5] == "test_" and type(value) is type(success):
|
||||
# tests.append((value.__code__.co_firstlineno, name, value))
|
||||
# tests.sort() # sort by line
|
||||
# for lineno, name, func in tests:
|
||||
# try:
|
||||
# _tests = []
|
||||
# func()
|
||||
# for func, args in _tests:
|
||||
# func(*args)
|
||||
# except:
|
||||
# t, v, tb = sys.exc_info()
|
||||
# tb = tb.tb_next
|
||||
# if tb:
|
||||
# failure(frame=tb.tb_frame)
|
||||
# traceback.print_exception(t, v, tb)
|
||||
# else:
|
||||
# print("%s:%d: cannot call test function: %s" % (
|
||||
# sys.argv[0], lineno, v))
|
||||
# failure.count += 1
|
||||
#
|
||||
#
|
||||
# def yield_test(function, *args):
|
||||
# # collect delayed/generated tests
|
||||
# _tests.append((function, args))
|
||||
#
|
||||
#
|
||||
# def skip(msg=None):
|
||||
# import os
|
||||
# print("skip")
|
||||
# os._exit(0) # don't run exit handlers
|
||||
#
|
||||
#
|
||||
# def ignore(pattern):
|
||||
# """Tells the driver to ignore messages matching the pattern, for the
|
||||
# duration of the current test."""
|
||||
# print('ignore: %s' % pattern)
|
||||
#
|
||||
#
|
||||
# def _setup():
|
||||
# global _logfile
|
||||
#
|
||||
# import sys
|
||||
# if "--coverage" in sys.argv:
|
||||
# # Temporary: ignore PendingDeprecationWarning from Coverage (Py3.4)
|
||||
# with warnings.catch_warnings():
|
||||
# warnings.simplefilter("ignore")
|
||||
# import coverage
|
||||
# cov = coverage.coverage(auto_data=True, include="PIL/*")
|
||||
# cov.start()
|
||||
#
|
||||
# def report():
|
||||
# if run:
|
||||
# run()
|
||||
# if success.count and not failure.count:
|
||||
# print("ok")
|
||||
# # only clean out tempfiles if test passed
|
||||
# import os
|
||||
# import os.path
|
||||
# import tempfile
|
||||
# for file in _tempfiles:
|
||||
# try:
|
||||
# os.remove(file)
|
||||
# except OSError:
|
||||
# pass # report?
|
||||
# temp_root = os.path.join(tempfile.gettempdir(), 'pillow-tests')
|
||||
# try:
|
||||
# os.rmdir(temp_root)
|
||||
# except OSError:
|
||||
# pass
|
||||
#
|
||||
# import atexit
|
||||
# atexit.register(report)
|
||||
#
|
||||
# if "--log" in sys.argv:
|
||||
# _logfile = open("test.log", "a")
|
||||
#
|
||||
#
|
||||
# _setup()
|
||||
# End of file
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from tester import *
|
||||
from helper import *
|
||||
|
||||
# This test is not run automatically.
|
||||
#
|
||||
|
@ -6,32 +6,37 @@ from tester import *
|
|||
# second test. Running this automatically would amount to a denial of
|
||||
# service on our testing infrastructure. I expect this test to fail
|
||||
# on any 32 bit machine, as well as any smallish things (like
|
||||
# raspberrypis).
|
||||
# Raspberry Pis).
|
||||
|
||||
from PIL import Image
|
||||
try:
|
||||
import numpy as np
|
||||
except:
|
||||
skip()
|
||||
|
||||
ydim = 32769
|
||||
xdim = 48000
|
||||
f = tempfile('temp.png')
|
||||
sys.exit("Skipping: Numpy not installed")
|
||||
|
||||
def _write_png(xdim,ydim):
|
||||
dtype = np.uint8
|
||||
a = np.zeros((xdim, ydim), dtype=dtype)
|
||||
im = Image.fromarray(a, 'L')
|
||||
im.save(f)
|
||||
success()
|
||||
|
||||
def test_large():
|
||||
""" succeeded prepatch"""
|
||||
_write_png(xdim,ydim)
|
||||
def test_2gpx():
|
||||
"""failed prepatch"""
|
||||
_write_png(xdim,xdim)
|
||||
YDIM = 32769
|
||||
XDIM = 48000
|
||||
|
||||
|
||||
class LargeMemoryNumpyTest(PillowTestCase):
|
||||
|
||||
def _write_png(self, xdim, ydim):
|
||||
dtype = np.uint8
|
||||
a = np.zeros((xdim, ydim), dtype=dtype)
|
||||
f = self.tempfile('temp.png')
|
||||
im = Image.fromarray(a, 'L')
|
||||
im.save(f)
|
||||
|
||||
def test_large(self):
|
||||
""" succeeded prepatch"""
|
||||
self._write_png(XDIM, YDIM)
|
||||
|
||||
def test_2gpx(self):
|
||||
"""failed prepatch"""
|
||||
self._write_png(XDIM, XDIM)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
# End of file
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from tester import *
|
||||
from helper import *
|
||||
|
||||
# This test is not run automatically.
|
||||
#
|
||||
|
@ -6,22 +6,31 @@ from tester import *
|
|||
# second test. Running this automatically would amount to a denial of
|
||||
# service on our testing infrastructure. I expect this test to fail
|
||||
# on any 32 bit machine, as well as any smallish things (like
|
||||
# raspberrypis). It does succeed on a 3gb Ubuntu 12.04x64 VM on python
|
||||
# 2.7 an 3.2
|
||||
# Raspberry Pis). It does succeed on a 3gb Ubuntu 12.04x64 VM on Python
|
||||
# 2.7 an 3.2.
|
||||
|
||||
from PIL import Image
|
||||
ydim = 32769
|
||||
xdim = 48000
|
||||
f = tempfile('temp.png')
|
||||
YDIM = 32769
|
||||
XDIM = 48000
|
||||
|
||||
def _write_png(xdim,ydim):
|
||||
im = Image.new('L',(xdim,ydim),(0))
|
||||
im.save(f)
|
||||
success()
|
||||
|
||||
def test_large():
|
||||
""" succeeded prepatch"""
|
||||
_write_png(xdim,ydim)
|
||||
def test_2gpx():
|
||||
"""failed prepatch"""
|
||||
_write_png(xdim,xdim)
|
||||
class LargeMemoryTest(PillowTestCase):
|
||||
|
||||
def _write_png(self, xdim, ydim):
|
||||
f = self.tempfile('temp.png')
|
||||
im = Image.new('L', (xdim, ydim), (0))
|
||||
im.save(f)
|
||||
|
||||
def test_large(self):
|
||||
""" succeeded prepatch"""
|
||||
self._write_png(XDIM, YDIM)
|
||||
|
||||
def test_2gpx(self):
|
||||
"""failed prepatch"""
|
||||
self._write_png(XDIM, XDIM)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
# End of file
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
# brute-force search for access descriptor hash table
|
||||
|
||||
import random
|
||||
|
||||
modes = [
|
||||
"1",
|
||||
"L", "LA",
|
||||
|
@ -13,12 +11,14 @@ modes = [
|
|||
"YCbCr",
|
||||
]
|
||||
|
||||
|
||||
def hash(s, i):
|
||||
# djb2 hash: multiply by 33 and xor character
|
||||
for c in s:
|
||||
i = (((i<<5) + i) ^ ord(c)) & 0xffffffff
|
||||
i = (((i << 5) + i) ^ ord(c)) & 0xffffffff
|
||||
return i
|
||||
|
||||
|
||||
def check(size, i0):
|
||||
h = [None] * size
|
||||
for m in modes:
|
||||
|
|
135
Tests/run.py
135
Tests/run.py
|
@ -1,135 +0,0 @@
|
|||
from __future__ import print_function
|
||||
|
||||
# minimal test runner
|
||||
|
||||
import glob
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
try:
|
||||
root = os.path.dirname(__file__)
|
||||
except NameError:
|
||||
root = os.path.dirname(sys.argv[0])
|
||||
|
||||
if not os.path.isfile("PIL/Image.py"):
|
||||
print("***", "please run this script from the PIL development directory as")
|
||||
print("***", "$ python Tests/run.py")
|
||||
sys.exit(1)
|
||||
|
||||
print("-"*68)
|
||||
|
||||
python_options = []
|
||||
tester_options = []
|
||||
|
||||
if "--installed" not in sys.argv:
|
||||
os.environ["PYTHONPATH"] = "."
|
||||
|
||||
if "--coverage" in sys.argv:
|
||||
tester_options.append("--coverage")
|
||||
|
||||
if "--log" in sys.argv:
|
||||
tester_options.append("--log")
|
||||
|
||||
files = glob.glob(os.path.join(root, "test_*.py"))
|
||||
files.sort()
|
||||
|
||||
success = failure = 0
|
||||
include = [x for x in sys.argv[1:] if x[:2] != "--"]
|
||||
skipped = []
|
||||
failed = []
|
||||
|
||||
python_options = " ".join(python_options)
|
||||
tester_options = " ".join(tester_options)
|
||||
|
||||
ignore_re = re.compile('^ignore: (.*)$', re.MULTILINE)
|
||||
|
||||
for file in files:
|
||||
test, ext = os.path.splitext(os.path.basename(file))
|
||||
if include and test not in include:
|
||||
continue
|
||||
print("running", test, "...")
|
||||
# 2>&1 works on unix and on modern windowses. we might care about
|
||||
# very old Python versions, but not ancient microsoft products :-)
|
||||
out = os.popen("%s %s -u %s %s 2>&1" % (
|
||||
sys.executable, python_options, file, tester_options
|
||||
))
|
||||
result = out.read()
|
||||
|
||||
result_lines = result.splitlines()
|
||||
if len(result_lines):
|
||||
if result_lines[0] == "ignore_all_except_last_line":
|
||||
result = result_lines[-1]
|
||||
|
||||
# Extract any ignore patterns
|
||||
ignore_pats = ignore_re.findall(result)
|
||||
result = ignore_re.sub('', result)
|
||||
|
||||
try:
|
||||
def fix_re(p):
|
||||
if not p.startswith('^'):
|
||||
p = '^' + p
|
||||
if not p.endswith('$'):
|
||||
p += '$'
|
||||
return p
|
||||
|
||||
ignore_res = [re.compile(fix_re(p), re.MULTILINE) for p in ignore_pats]
|
||||
except:
|
||||
print('(bad ignore patterns %r)' % ignore_pats)
|
||||
ignore_res = []
|
||||
|
||||
for r in ignore_res:
|
||||
result = r.sub('', result)
|
||||
|
||||
result = result.strip()
|
||||
|
||||
if result == "ok":
|
||||
result = None
|
||||
elif result == "skip":
|
||||
print("---", "skipped") # FIXME: driver should include a reason
|
||||
skipped.append(test)
|
||||
continue
|
||||
elif not result:
|
||||
result = "(no output)"
|
||||
status = out.close()
|
||||
if status or result:
|
||||
if status:
|
||||
print("=== error", status)
|
||||
if result:
|
||||
if result[-3:] == "\nok":
|
||||
# if there's an ok at the end, it's not really ok
|
||||
result = result[:-3]
|
||||
print(result)
|
||||
failed.append(test)
|
||||
else:
|
||||
success += 1
|
||||
|
||||
print("-"*68)
|
||||
|
||||
temp_root = os.path.join(tempfile.gettempdir(), 'pillow-tests')
|
||||
tempfiles = glob.glob(os.path.join(temp_root, "temp_*"))
|
||||
if tempfiles:
|
||||
print("===", "remaining temporary files")
|
||||
for file in tempfiles:
|
||||
print(file)
|
||||
print("-"*68)
|
||||
|
||||
|
||||
def tests(n):
|
||||
if n == 1:
|
||||
return "1 test"
|
||||
else:
|
||||
return "%d tests" % n
|
||||
|
||||
if skipped:
|
||||
print("---", tests(len(skipped)), "skipped:")
|
||||
print(", ".join(skipped))
|
||||
if failed:
|
||||
failure = len(failed)
|
||||
print("***", tests(failure), "of", (success + failure), "failed:")
|
||||
print(", ".join(failed))
|
||||
sys.exit(1)
|
||||
else:
|
||||
print(tests(success), "passed.")
|
388
Tests/tester.py
388
Tests/tester.py
|
@ -1,388 +0,0 @@
|
|||
from __future__ import print_function
|
||||
|
||||
# require that deprecation warnings are triggered
|
||||
import warnings
|
||||
warnings.simplefilter('default')
|
||||
# temporarily turn off resource warnings that warn about unclosed
|
||||
# files in the test scripts.
|
||||
try:
|
||||
warnings.filterwarnings("ignore", category=ResourceWarning)
|
||||
except NameError:
|
||||
# we expect a NameError on py2.x, since it doesn't have ResourceWarnings.
|
||||
pass
|
||||
|
||||
import sys
|
||||
py3 = (sys.version_info >= (3, 0))
|
||||
|
||||
# some test helpers
|
||||
|
||||
_target = None
|
||||
_tempfiles = []
|
||||
_logfile = None
|
||||
|
||||
|
||||
def success():
|
||||
import sys
|
||||
success.count += 1
|
||||
if _logfile:
|
||||
print(sys.argv[0], success.count, failure.count, file=_logfile)
|
||||
return True
|
||||
|
||||
|
||||
def failure(msg=None, frame=None):
|
||||
import sys
|
||||
import linecache
|
||||
failure.count += 1
|
||||
if _target:
|
||||
if frame is None:
|
||||
frame = sys._getframe()
|
||||
while frame.f_globals.get("__name__") != _target.__name__:
|
||||
frame = frame.f_back
|
||||
location = (frame.f_code.co_filename, frame.f_lineno)
|
||||
prefix = "%s:%d: " % location
|
||||
line = linecache.getline(*location)
|
||||
print(prefix + line.strip() + " failed:")
|
||||
if msg:
|
||||
print("- " + msg)
|
||||
if _logfile:
|
||||
print(sys.argv[0], success.count, failure.count, file=_logfile)
|
||||
return False
|
||||
|
||||
success.count = failure.count = 0
|
||||
|
||||
|
||||
# predicates
|
||||
|
||||
def assert_true(v, msg=None):
|
||||
if v:
|
||||
success()
|
||||
else:
|
||||
failure(msg or "got %r, expected true value" % v)
|
||||
|
||||
|
||||
def assert_false(v, msg=None):
|
||||
if v:
|
||||
failure(msg or "got %r, expected false value" % v)
|
||||
else:
|
||||
success()
|
||||
|
||||
|
||||
def assert_equal(a, b, msg=None):
|
||||
if a == b:
|
||||
success()
|
||||
else:
|
||||
failure(msg or "got %r, expected %r" % (a, b))
|
||||
|
||||
|
||||
def assert_almost_equal(a, b, msg=None, eps=1e-6):
|
||||
if abs(a-b) < eps:
|
||||
success()
|
||||
else:
|
||||
failure(msg or "got %r, expected %r" % (a, b))
|
||||
|
||||
|
||||
def assert_deep_equal(a, b, msg=None):
|
||||
try:
|
||||
if len(a) == len(b):
|
||||
if all([x == y for x, y in zip(a, b)]):
|
||||
success()
|
||||
else:
|
||||
failure(msg or "got %s, expected %s" % (a, b))
|
||||
else:
|
||||
failure(msg or "got length %s, expected %s" % (len(a), len(b)))
|
||||
except:
|
||||
assert_equal(a, b, msg)
|
||||
|
||||
|
||||
def assert_greater(a, b, msg=None):
|
||||
if a > b:
|
||||
success()
|
||||
else:
|
||||
failure(msg or "%r unexpectedly not greater than %r" % (a, b))
|
||||
|
||||
|
||||
def assert_greater_equal(a, b, msg=None):
|
||||
if a >= b:
|
||||
success()
|
||||
else:
|
||||
failure(
|
||||
msg or "%r unexpectedly not greater than or equal to %r" % (a, b))
|
||||
|
||||
|
||||
def assert_less(a, b, msg=None):
|
||||
if a < b:
|
||||
success()
|
||||
else:
|
||||
failure(msg or "%r unexpectedly not less than %r" % (a, b))
|
||||
|
||||
|
||||
def assert_less_equal(a, b, msg=None):
|
||||
if a <= b:
|
||||
success()
|
||||
else:
|
||||
failure(
|
||||
msg or "%r unexpectedly not less than or equal to %r" % (a, b))
|
||||
|
||||
|
||||
def assert_is_instance(a, b, msg=None):
|
||||
if isinstance(a, b):
|
||||
success()
|
||||
else:
|
||||
failure(msg or "got %r, expected %r" % (type(a), b))
|
||||
|
||||
|
||||
def assert_in(a, b, msg=None):
|
||||
if a in b:
|
||||
success()
|
||||
else:
|
||||
failure(msg or "%r unexpectedly not in %r" % (a, b))
|
||||
|
||||
|
||||
def assert_match(v, pattern, msg=None):
|
||||
import re
|
||||
if re.match(pattern, v):
|
||||
success()
|
||||
else:
|
||||
failure(msg or "got %r, doesn't match pattern %r" % (v, pattern))
|
||||
|
||||
|
||||
def assert_exception(exc_class, func):
|
||||
import sys
|
||||
import traceback
|
||||
try:
|
||||
func()
|
||||
except exc_class:
|
||||
success()
|
||||
except:
|
||||
failure("expected %r exception, got %r" % (
|
||||
exc_class.__name__, sys.exc_info()[0].__name__))
|
||||
traceback.print_exc()
|
||||
else:
|
||||
failure("expected %r exception, got no exception" % exc_class.__name__)
|
||||
|
||||
|
||||
def assert_no_exception(func):
|
||||
import sys
|
||||
import traceback
|
||||
try:
|
||||
func()
|
||||
except:
|
||||
failure("expected no exception, got %r" % sys.exc_info()[0].__name__)
|
||||
traceback.print_exc()
|
||||
else:
|
||||
success()
|
||||
|
||||
|
||||
def assert_warning(warn_class, func):
|
||||
# note: this assert calls func three times!
|
||||
import warnings
|
||||
|
||||
def warn_error(message, category=UserWarning, **options):
|
||||
raise category(message)
|
||||
|
||||
def warn_ignore(message, category=UserWarning, **options):
|
||||
pass
|
||||
warn = warnings.warn
|
||||
result = None
|
||||
try:
|
||||
warnings.warn = warn_ignore
|
||||
assert_no_exception(func)
|
||||
result = func()
|
||||
warnings.warn = warn_error
|
||||
assert_exception(warn_class, func)
|
||||
finally:
|
||||
warnings.warn = warn # restore
|
||||
return result
|
||||
|
||||
# helpers
|
||||
|
||||
from io import BytesIO
|
||||
|
||||
|
||||
def fromstring(data):
|
||||
from PIL import Image
|
||||
return Image.open(BytesIO(data))
|
||||
|
||||
|
||||
def tostring(im, format, **options):
|
||||
out = BytesIO()
|
||||
im.save(out, format, **options)
|
||||
return out.getvalue()
|
||||
|
||||
|
||||
def lena(mode="RGB", cache={}):
|
||||
from PIL import Image
|
||||
im = cache.get(mode)
|
||||
if im is None:
|
||||
if mode == "RGB":
|
||||
im = Image.open("Tests/images/lena.ppm")
|
||||
elif mode == "F":
|
||||
im = lena("L").convert(mode)
|
||||
elif mode[:4] == "I;16":
|
||||
im = lena("I").convert(mode)
|
||||
else:
|
||||
im = lena("RGB").convert(mode)
|
||||
cache[mode] = im
|
||||
return im
|
||||
|
||||
|
||||
def assert_image(im, mode, size, msg=None):
|
||||
if mode is not None and im.mode != mode:
|
||||
failure(msg or "got mode %r, expected %r" % (im.mode, mode))
|
||||
elif size is not None and im.size != size:
|
||||
failure(msg or "got size %r, expected %r" % (im.size, size))
|
||||
else:
|
||||
success()
|
||||
|
||||
|
||||
def assert_image_equal(a, b, msg=None):
|
||||
if a.mode != b.mode:
|
||||
failure(msg or "got mode %r, expected %r" % (a.mode, b.mode))
|
||||
elif a.size != b.size:
|
||||
failure(msg or "got size %r, expected %r" % (a.size, b.size))
|
||||
elif a.tobytes() != b.tobytes():
|
||||
failure(msg or "got different content")
|
||||
else:
|
||||
success()
|
||||
|
||||
|
||||
def assert_image_completely_equal(a, b, msg=None):
|
||||
if a != b:
|
||||
failure(msg or "images different")
|
||||
else:
|
||||
success()
|
||||
|
||||
|
||||
def assert_image_similar(a, b, epsilon, msg=None):
|
||||
epsilon = float(epsilon)
|
||||
if a.mode != b.mode:
|
||||
return failure(msg or "got mode %r, expected %r" % (a.mode, b.mode))
|
||||
elif a.size != b.size:
|
||||
return failure(msg or "got size %r, expected %r" % (a.size, b.size))
|
||||
diff = 0
|
||||
try:
|
||||
ord(b'0')
|
||||
for abyte, bbyte in zip(a.tobytes(), b.tobytes()):
|
||||
diff += abs(ord(abyte)-ord(bbyte))
|
||||
except:
|
||||
for abyte, bbyte in zip(a.tobytes(), b.tobytes()):
|
||||
diff += abs(abyte-bbyte)
|
||||
ave_diff = float(diff)/(a.size[0]*a.size[1])
|
||||
if epsilon < ave_diff:
|
||||
return failure(
|
||||
msg or "average pixel value difference %.4f > epsilon %.4f" % (
|
||||
ave_diff, epsilon))
|
||||
else:
|
||||
return success()
|
||||
|
||||
|
||||
def tempfile(template, *extra):
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
import tempfile
|
||||
files = []
|
||||
root = os.path.join(tempfile.gettempdir(), 'pillow-tests')
|
||||
try:
|
||||
os.mkdir(root)
|
||||
except OSError:
|
||||
pass
|
||||
for temp in (template,) + extra:
|
||||
assert temp[:5] in ("temp.", "temp_")
|
||||
name = os.path.basename(sys.argv[0])
|
||||
name = temp[:4] + os.path.splitext(name)[0][4:]
|
||||
name = name + "_%d" % len(_tempfiles) + temp[4:]
|
||||
name = os.path.join(root, name)
|
||||
files.append(name)
|
||||
_tempfiles.extend(files)
|
||||
return files[0]
|
||||
|
||||
|
||||
# test runner
|
||||
|
||||
def run():
|
||||
global _target, _tests, run
|
||||
import sys
|
||||
import traceback
|
||||
_target = sys.modules["__main__"]
|
||||
run = None # no need to run twice
|
||||
tests = []
|
||||
for name, value in list(vars(_target).items()):
|
||||
if name[:5] == "test_" and type(value) is type(success):
|
||||
tests.append((value.__code__.co_firstlineno, name, value))
|
||||
tests.sort() # sort by line
|
||||
for lineno, name, func in tests:
|
||||
try:
|
||||
_tests = []
|
||||
func()
|
||||
for func, args in _tests:
|
||||
func(*args)
|
||||
except:
|
||||
t, v, tb = sys.exc_info()
|
||||
tb = tb.tb_next
|
||||
if tb:
|
||||
failure(frame=tb.tb_frame)
|
||||
traceback.print_exception(t, v, tb)
|
||||
else:
|
||||
print("%s:%d: cannot call test function: %s" % (
|
||||
sys.argv[0], lineno, v))
|
||||
failure.count += 1
|
||||
|
||||
|
||||
def yield_test(function, *args):
|
||||
# collect delayed/generated tests
|
||||
_tests.append((function, args))
|
||||
|
||||
|
||||
def skip(msg=None):
|
||||
import os
|
||||
print("skip")
|
||||
os._exit(0) # don't run exit handlers
|
||||
|
||||
|
||||
def ignore(pattern):
|
||||
"""Tells the driver to ignore messages matching the pattern, for the
|
||||
duration of the current test."""
|
||||
print('ignore: %s' % pattern)
|
||||
|
||||
|
||||
def _setup():
|
||||
global _logfile
|
||||
|
||||
import sys
|
||||
if "--coverage" in sys.argv:
|
||||
# Temporary: ignore PendingDeprecationWarning from Coverage (Py3.4)
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
import coverage
|
||||
cov = coverage.coverage(auto_data=True, include="PIL/*")
|
||||
cov.start()
|
||||
|
||||
def report():
|
||||
if run:
|
||||
run()
|
||||
if success.count and not failure.count:
|
||||
print("ok")
|
||||
# only clean out tempfiles if test passed
|
||||
import os
|
||||
import os.path
|
||||
import tempfile
|
||||
for file in _tempfiles:
|
||||
try:
|
||||
os.remove(file)
|
||||
except OSError:
|
||||
pass # report?
|
||||
temp_root = os.path.join(tempfile.gettempdir(), 'pillow-tests')
|
||||
try:
|
||||
os.rmdir(temp_root)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
import atexit
|
||||
atexit.register(report)
|
||||
|
||||
if "--log" in sys.argv:
|
||||
_logfile = open("test.log", "a")
|
||||
|
||||
|
||||
_setup()
|
Loading…
Reference in New Issue
Block a user