Give correct extrema for I;16 format images

Currently gives None for a 16 bit greyscale image rather than the
true min and max values in the 0-65536 range. The internal
ImagingGetProjection function already supports I;16 but the
_getextrema needs to know to unpack the result.
This commit is contained in:
Martin Packman 2018-09-17 16:30:42 +01:00
parent 4c47628900
commit 0b3036454c
3 changed files with 13 additions and 0 deletions

Binary file not shown.

View File

@ -1,3 +1,4 @@
from PIL import Image
from helper import unittest, PillowTestCase, hopper
@ -19,6 +20,13 @@ class TestImageGetExtrema(PillowTestCase):
extrema("RGBA"), ((0, 255), (0, 255), (0, 255), (255, 255)))
self.assertEqual(
extrema("CMYK"), (((0, 255), (0, 255), (0, 255), (0, 0))))
self.assertEqual(extrema("I;16"), (0, 255))
def test_true_16(self):
im = Image.open("Tests/images/16_bit_noise.tif")
self.assertEqual(im.mode, 'I;16')
extrema = im.getextrema()
self.assertEqual(extrema, (106, 285))
if __name__ == '__main__':

View File

@ -1998,6 +1998,7 @@ _getextrema(ImagingObject* self, PyObject* args)
UINT8 u[2];
INT32 i[2];
FLOAT32 f[2];
UINT16 s[2];
} extrema;
int status;
@ -2013,6 +2014,10 @@ _getextrema(ImagingObject* self, PyObject* args)
return Py_BuildValue("ii", extrema.i[0], extrema.i[1]);
case IMAGING_TYPE_FLOAT32:
return Py_BuildValue("dd", extrema.f[0], extrema.f[1]);
case IMAGING_TYPE_SPECIAL:
if (strcmp(self->image->mode, "I;16") == 0) {
return Py_BuildValue("HH", extrema.s[0], extrema.s[1]);
}
}
Py_INCREF(Py_None);