Merge pull request #2022 from uploadcare/rotation-pixel-center

Respect pixel centers during transform
This commit is contained in:
wiredfool 2016-08-25 11:40:09 +01:00 committed by GitHub
commit 5d2667efda
5 changed files with 151 additions and 25 deletions

View File

@ -1881,7 +1881,7 @@ class Image(object):
xs = float(x1 - x0) / w
ys = float(y1 - y0) / h
method = AFFINE
data = (xs, 0, x0 + xs/2, 0, ys, y0 + ys/2)
data = (xs, 0, x0, 0, ys, y0)
elif method == PERSPECTIVE:
data = data[0:8]

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.1 KiB

After

Width:  |  Height:  |  Size: 2.1 KiB

View File

@ -20,13 +20,13 @@ class TestImageGetData(PillowTestCase):
return data[0], len(data), len(list(data))
self.assertEqual(getdata("1"), (0, 960, 960))
self.assertEqual(getdata("L"), (25, 960, 960))
self.assertEqual(getdata("I"), (25, 960, 960))
self.assertEqual(getdata("F"), (25.0, 960, 960))
self.assertEqual(getdata("RGB"), (((20, 20, 70), 960, 960)))
self.assertEqual(getdata("RGBA"), ((20, 20, 70, 255), 960, 960))
self.assertEqual(getdata("CMYK"), ((235, 235, 185, 0), 960, 960))
self.assertEqual(getdata("YCbCr"), ((25, 153, 123), 960, 960))
self.assertEqual(getdata("L"), (16, 960, 960))
self.assertEqual(getdata("I"), (16, 960, 960))
self.assertEqual(getdata("F"), (16.0, 960, 960))
self.assertEqual(getdata("RGB"), (((11, 13, 52), 960, 960)))
self.assertEqual(getdata("RGBA"), ((11, 13, 52, 255), 960, 960))
self.assertEqual(getdata("CMYK"), ((244, 242, 203, 0), 960, 960))
self.assertEqual(getdata("YCbCr"), ((16, 147, 123), 960, 960))
if __name__ == '__main__':

View File

@ -1,3 +1,5 @@
import math
from helper import unittest, PillowTestCase, hopper
from PIL import Image
@ -98,7 +100,7 @@ class TestImageTransform(PillowTestCase):
def test_alpha_premult_resize(self):
def op(im, sz):
return im.resize(sz, Image.LINEAR)
return im.resize(sz, Image.BILINEAR)
self._test_alpha_premult(op)
@ -139,5 +141,117 @@ class TestImageTransform(PillowTestCase):
self.test_mesh()
class TestImageTransformAffine(PillowTestCase):
transform = Image.AFFINE
def _test_image(self):
im = hopper('RGB')
return im.crop((10, 20, im.width - 10, im.height - 20))
def _test_rotate(self, deg, transpose):
im = self._test_image()
angle = - math.radians(deg)
matrix = [
round(math.cos(angle), 15), round(math.sin(angle), 15), 0.0,
round(-math.sin(angle), 15), round(math.cos(angle), 15), 0.0,
0, 0]
matrix[2] = (1 - matrix[0] - matrix[1]) * im.width / 2
matrix[5] = (1 - matrix[3] - matrix[4]) * im.height / 2
if transpose is not None:
transposed = im.transpose(transpose)
else:
transposed = im
for resample in [Image.NEAREST, Image.BILINEAR, Image.BICUBIC]:
transformed = im.transform(transposed.size, self.transform,
matrix, resample)
self.assert_image_equal(transposed, transformed)
def test_rotate_0_deg(self):
self._test_rotate(0, None)
def test_rotate_90_deg(self):
self._test_rotate(90, Image.ROTATE_90)
def test_rotate_180_deg(self):
self._test_rotate(180, Image.ROTATE_180)
def test_rotate_270_deg(self):
self._test_rotate(270, Image.ROTATE_270)
def _test_resize(self, scale, epsilonscale):
im = self._test_image()
size_up = int(round(im.width * scale)), int(round(im.height * scale))
matrix_up = [
1 / scale, 0, 0,
0, 1 / scale, 0,
0, 0]
matrix_down = [
scale, 0, 0,
0, scale, 0,
0, 0]
for resample, epsilon in [(Image.NEAREST, 0),
(Image.BILINEAR, 2), (Image.BICUBIC, 1)]:
transformed = im.transform(
size_up, self.transform, matrix_up, resample)
transformed = transformed.transform(
im.size, self.transform, matrix_down, resample)
self.assert_image_similar(transformed, im, epsilon * epsilonscale)
def test_resize_1_1x(self):
self._test_resize(1.1, 6.9)
def test_resize_1_5x(self):
self._test_resize(1.5, 5.5)
def test_resize_2_0x(self):
self._test_resize(2.0, 5.5)
def test_resize_2_3x(self):
self._test_resize(2.3, 3.7)
def test_resize_2_5x(self):
self._test_resize(2.5, 3.7)
def _test_translate(self, x, y, epsilonscale):
im = self._test_image()
size_up = int(round(im.width + x)), int(round(im.height + y))
matrix_up = [
1, 0, -x,
0, 1, -y,
0, 0]
matrix_down = [
1, 0, x,
0, 1, y,
0, 0]
for resample, epsilon in [(Image.NEAREST, 0),
(Image.BILINEAR, 1.5), (Image.BICUBIC, 1)]:
transformed = im.transform(
size_up, self.transform, matrix_up, resample)
transformed = transformed.transform(
im.size, self.transform, matrix_down, resample)
self.assert_image_similar(transformed, im, epsilon * epsilonscale)
def test_translate_0_1(self):
self._test_translate(.1, 0, 3.7)
def test_translate_0_6(self):
self._test_translate(.6, 0, 9.1)
def test_translate_50(self):
self._test_translate(50, 50, 0)
class TestImageTransformPerspective(TestImageTransformAffine):
# Repeat all tests for AFFINE transormations with PERSPECTIVE
transform = Image.PERSPECTIVE
if __name__ == '__main__':
unittest.main()

View File

@ -240,7 +240,7 @@ ImagingRotate270(Imaging imOut, Imaging imIn)
/* transform primitives (ImagingTransformMap) */
static int
affine_transform(double* xin, double* yin, int x, int y, void* data)
affine_transform(double* xout, double* yout, int x, int y, void* data)
{
/* full moon tonight. your compiler will generate bogus code
for simple expressions, unless you reorganize the code, or
@ -250,28 +250,34 @@ affine_transform(double* xin, double* yin, int x, int y, void* data)
double a0 = a[0]; double a1 = a[1]; double a2 = a[2];
double a3 = a[3]; double a4 = a[4]; double a5 = a[5];
xin[0] = a0*x + a1*y + a2;
yin[0] = a3*x + a4*y + a5;
double xin = x + 0.5;
double yin = y + 0.5;
xout[0] = a0*xin + a1*yin + a2;
yout[0] = a3*xin + a4*yin + a5;
return 1;
}
static int
perspective_transform(double* xin, double* yin, int x, int y, void* data)
perspective_transform(double* xout, double* yout, int x, int y, void* data)
{
double* a = (double*) data;
double a0 = a[0]; double a1 = a[1]; double a2 = a[2];
double a3 = a[3]; double a4 = a[4]; double a5 = a[5];
double a6 = a[6]; double a7 = a[7];
xin[0] = (a0*x + a1*y + a2) / (a6*x + a7*y + 1);
yin[0] = (a3*x + a4*y + a5) / (a6*x + a7*y + 1);
double xin = x + 0.5;
double yin = y + 0.5;
xout[0] = (a0*xin + a1*yin + a2) / (a6*xin + a7*yin + 1);
yout[0] = (a3*xin + a4*yin + a5) / (a6*xin + a7*yin + 1);
return 1;
}
static int
quad_transform(double* xin, double* yin, int x, int y, void* data)
quad_transform(double* xout, double* yout, int x, int y, void* data)
{
/* quad warp: map quadrilateral to rectangle */
@ -279,8 +285,11 @@ quad_transform(double* xin, double* yin, int x, int y, void* data)
double a0 = a[0]; double a1 = a[1]; double a2 = a[2]; double a3 = a[3];
double a4 = a[4]; double a5 = a[5]; double a6 = a[6]; double a7 = a[7];
xin[0] = a0 + a1*x + a2*y + a3*x*y;
yin[0] = a4 + a5*x + a6*y + a7*x*y;
double xin = x + 0.5;
double yin = y + 0.5;
xout[0] = a0 + a1*xin + a2*yin + a3*xin*yin;
yout[0] = a4 + a5*xin + a6*yin + a7*xin*yin;
return 1;
}
@ -691,8 +700,8 @@ ImagingScaleAffine(Imaging imOut, Imaging imIn,
return (Imaging) ImagingError_MemoryError();
}
xo = a[2];
yo = a[5];
xo = a[2] + a[0] * 0.5;
yo = a[5] + a[4] * 0.5;
xmin = x1;
xmax = x0;
@ -771,8 +780,10 @@ affine_fixed(Imaging imOut, Imaging imIn,
/* use 16.16 fixed point arithmetics */
#define FIX(v) FLOOR((v)*65536.0 + 0.5)
a0 = FIX(a[0]); a1 = FIX(a[1]); a2 = FIX(a[2]);
a3 = FIX(a[3]); a4 = FIX(a[4]); a5 = FIX(a[5]);
a0 = FIX(a[0]); a1 = FIX(a[1]);
a3 = FIX(a[3]); a4 = FIX(a[4]);
a2 = FIX(a[2] + a[0] * 0.5 + a[1] * 0.5);
a5 = FIX(a[5] + a[3] * 0.5 + a[4] * 0.5);
#undef FIX
@ -835,9 +846,10 @@ ImagingTransformAffine(Imaging imOut, Imaging imIn,
filterid, fill);
}
if (a[1] == 0 && a[3] == 0)
if (a[1] == 0 && a[3] == 0) {
/* Scaling */
return ImagingScaleAffine(imOut, imIn, x0, y0, x1, y1, a, fill);
}
if (!imOut || !imIn || strcmp(imIn->mode, imOut->mode) != 0)
return (Imaging) ImagingError_ModeError();
@ -867,8 +879,8 @@ ImagingTransformAffine(Imaging imOut, Imaging imIn,
xsize = (int) imIn->xsize;
ysize = (int) imIn->ysize;
xo = a[2];
yo = a[5];
xo = a[2] + a[1] * 0.5 + a[0] * 0.5;
yo = a[5] + a[4] * 0.5 + a[3] * 0.5;
#define AFFINE_TRANSFORM(pixel, image)\
for (y = y0; y < y1; y++) {\