2010-07-31 06:52:47 +04:00
|
|
|
#
|
|
|
|
# The Python Imaging Library.
|
|
|
|
# $Id$
|
|
|
|
#
|
|
|
|
# PDF (Acrobat) file handling
|
|
|
|
#
|
|
|
|
# History:
|
|
|
|
# 1996-07-16 fl Created
|
|
|
|
# 1997-01-18 fl Fixed header
|
|
|
|
# 2004-02-21 fl Fixes for 1/L/CMYK images, etc.
|
|
|
|
# 2004-02-24 fl Fixes for 1 and P images.
|
|
|
|
#
|
|
|
|
# Copyright (c) 1997-2004 by Secret Labs AB. All rights reserved.
|
|
|
|
# Copyright (c) 1996-1997 by Fredrik Lundh.
|
|
|
|
#
|
|
|
|
# See the README file for information on usage and redistribution.
|
|
|
|
#
|
|
|
|
|
|
|
|
##
|
|
|
|
# Image plugin for PDF images (output only).
|
|
|
|
##
|
|
|
|
|
2018-01-31 02:35:55 +03:00
|
|
|
from . import Image, ImageFile, ImageSequence, PdfParser
|
2012-10-17 07:01:19 +04:00
|
|
|
import io
|
2018-07-29 07:49:58 +03:00
|
|
|
import os
|
2018-07-29 15:33:59 +03:00
|
|
|
import time
|
2010-07-31 06:52:47 +04:00
|
|
|
|
2018-01-18 16:33:11 +03:00
|
|
|
__version__ = "0.5"
|
2015-08-25 15:27:18 +03:00
|
|
|
|
2010-07-31 06:52:47 +04:00
|
|
|
|
|
|
|
#
|
|
|
|
# --------------------------------------------------------------------
|
|
|
|
|
|
|
|
# object ids:
|
|
|
|
# 1. catalogue
|
|
|
|
# 2. pages
|
|
|
|
# 3. image
|
|
|
|
# 4. page
|
|
|
|
# 5. page contents
|
|
|
|
|
2014-05-12 15:56:55 +04:00
|
|
|
|
2015-09-29 15:51:52 +03:00
|
|
|
def _save_all(im, fp, filename):
|
|
|
|
_save(im, fp, filename, save_all=True)
|
|
|
|
|
|
|
|
|
2010-07-31 06:52:47 +04:00
|
|
|
##
|
|
|
|
# (Internal) Image save plugin for the PDF format.
|
|
|
|
|
2015-09-29 15:51:52 +03:00
|
|
|
def _save(im, fp, filename, save_all=False):
|
2018-01-18 16:33:11 +03:00
|
|
|
is_appending = im.encoderinfo.get("append", False)
|
|
|
|
if is_appending:
|
2018-01-31 02:35:55 +03:00
|
|
|
existing_pdf = PdfParser.PdfParser(f=fp, filename=filename, mode="r+b")
|
2018-01-18 16:33:11 +03:00
|
|
|
else:
|
2018-01-31 02:35:55 +03:00
|
|
|
existing_pdf = PdfParser.PdfParser(f=fp, filename=filename, mode="w+b")
|
2010-07-31 06:52:47 +04:00
|
|
|
|
2018-07-29 15:33:59 +03:00
|
|
|
resolution = im.encoderinfo.get("resolution", 72.0)
|
|
|
|
|
|
|
|
info = {
|
|
|
|
"title": None if is_appending else os.path.splitext(
|
|
|
|
os.path.basename(filename)
|
|
|
|
)[0],
|
|
|
|
"author": None,
|
|
|
|
"subject": None,
|
|
|
|
"keywords": None,
|
|
|
|
"creator": None,
|
|
|
|
"producer": None,
|
|
|
|
"creationDate": None if is_appending else time.gmtime(),
|
|
|
|
"modDate": None if is_appending else time.gmtime()
|
|
|
|
}
|
|
|
|
for k, default in info.items():
|
|
|
|
v = im.encoderinfo.get(k) if k in im.encoderinfo else default
|
|
|
|
if v:
|
|
|
|
existing_pdf.info[k[0].upper() + k[1:]] = v
|
2010-07-31 06:52:47 +04:00
|
|
|
|
|
|
|
#
|
|
|
|
# make sure image data is available
|
|
|
|
im.load()
|
|
|
|
|
2018-01-26 21:07:43 +03:00
|
|
|
existing_pdf.start_writing()
|
|
|
|
existing_pdf.write_header()
|
|
|
|
existing_pdf.write_comment("created by PIL PDF driver " + __version__)
|
2010-07-31 06:52:47 +04:00
|
|
|
|
|
|
|
#
|
|
|
|
# pages
|
2017-05-13 07:26:52 +03:00
|
|
|
ims = [im]
|
2015-09-29 15:51:52 +03:00
|
|
|
if save_all:
|
2017-05-13 07:26:52 +03:00
|
|
|
append_images = im.encoderinfo.get("append_images", [])
|
|
|
|
for append_im in append_images:
|
2017-10-19 14:30:34 +03:00
|
|
|
append_im.encoderinfo = im.encoderinfo.copy()
|
2017-05-13 07:26:52 +03:00
|
|
|
ims.append(append_im)
|
|
|
|
numberOfPages = 0
|
2018-01-18 16:33:11 +03:00
|
|
|
image_refs = []
|
|
|
|
page_refs = []
|
|
|
|
contents_refs = []
|
2017-05-13 07:26:52 +03:00
|
|
|
for im in ims:
|
|
|
|
im_numberOfPages = 1
|
|
|
|
if save_all:
|
|
|
|
try:
|
|
|
|
im_numberOfPages = im.n_frames
|
|
|
|
except AttributeError:
|
2018-06-24 15:32:25 +03:00
|
|
|
# Image format does not have n_frames.
|
|
|
|
# It is a single frame image
|
2017-05-13 07:26:52 +03:00
|
|
|
pass
|
|
|
|
numberOfPages += im_numberOfPages
|
2018-01-18 16:33:11 +03:00
|
|
|
for i in range(im_numberOfPages):
|
|
|
|
image_refs.append(existing_pdf.next_object_id(0))
|
|
|
|
page_refs.append(existing_pdf.next_object_id(0))
|
|
|
|
contents_refs.append(existing_pdf.next_object_id(0))
|
|
|
|
existing_pdf.pages.append(page_refs[-1])
|
2015-09-29 15:51:52 +03:00
|
|
|
|
2018-01-24 04:28:39 +03:00
|
|
|
#
|
|
|
|
# catalog and list of pages
|
2018-01-26 21:07:43 +03:00
|
|
|
existing_pdf.write_catalog()
|
2010-07-31 06:52:47 +04:00
|
|
|
|
2017-05-13 07:26:52 +03:00
|
|
|
pageNumber = 0
|
|
|
|
for imSequence in ims:
|
2018-05-18 15:15:45 +03:00
|
|
|
im_pages = ImageSequence.Iterator(imSequence) if save_all else [imSequence]
|
|
|
|
for im in im_pages:
|
2018-06-24 15:32:25 +03:00
|
|
|
# FIXME: Should replace ASCIIHexDecode with RunLengthDecode
|
|
|
|
# (packbits) or LZWDecode (tiff/lzw compression). Note that
|
|
|
|
# PDF 1.2 also supports Flatedecode (zip compression).
|
2018-01-18 16:33:11 +03:00
|
|
|
|
|
|
|
bits = 8
|
|
|
|
params = None
|
|
|
|
|
|
|
|
if im.mode == "1":
|
|
|
|
filter = "ASCIIHexDecode"
|
2018-01-31 02:35:55 +03:00
|
|
|
colorspace = PdfParser.PdfName("DeviceGray")
|
2018-01-18 16:33:11 +03:00
|
|
|
procset = "ImageB" # grayscale
|
|
|
|
bits = 1
|
|
|
|
elif im.mode == "L":
|
|
|
|
filter = "DCTDecode"
|
|
|
|
# params = "<< /Predictor 15 /Columns %d >>" % (width-2)
|
2018-01-31 02:35:55 +03:00
|
|
|
colorspace = PdfParser.PdfName("DeviceGray")
|
2018-01-18 16:33:11 +03:00
|
|
|
procset = "ImageB" # grayscale
|
|
|
|
elif im.mode == "P":
|
|
|
|
filter = "ASCIIHexDecode"
|
|
|
|
palette = im.im.getpalette("RGB")
|
2018-06-24 15:32:25 +03:00
|
|
|
colorspace = [
|
|
|
|
PdfParser.PdfName("Indexed"),
|
|
|
|
PdfParser.PdfName("DeviceRGB"),
|
|
|
|
255,
|
|
|
|
PdfParser.PdfBinary(palette)
|
|
|
|
]
|
2018-01-18 16:33:11 +03:00
|
|
|
procset = "ImageI" # indexed color
|
|
|
|
elif im.mode == "RGB":
|
|
|
|
filter = "DCTDecode"
|
2018-01-31 02:35:55 +03:00
|
|
|
colorspace = PdfParser.PdfName("DeviceRGB")
|
2018-01-18 16:33:11 +03:00
|
|
|
procset = "ImageC" # color images
|
|
|
|
elif im.mode == "CMYK":
|
|
|
|
filter = "DCTDecode"
|
2018-01-31 02:35:55 +03:00
|
|
|
colorspace = PdfParser.PdfName("DeviceCMYK")
|
2018-01-18 16:33:11 +03:00
|
|
|
procset = "ImageC" # color images
|
|
|
|
else:
|
|
|
|
raise ValueError("cannot save mode %s" % im.mode)
|
|
|
|
|
2017-05-13 07:26:52 +03:00
|
|
|
#
|
|
|
|
# image
|
|
|
|
|
|
|
|
op = io.BytesIO()
|
|
|
|
|
2018-01-18 16:33:11 +03:00
|
|
|
if filter == "ASCIIHexDecode":
|
2017-05-13 07:26:52 +03:00
|
|
|
if bits == 1:
|
|
|
|
# FIXME: the hex encoder doesn't support packed 1-bit
|
|
|
|
# images; do things the hard way...
|
|
|
|
data = im.tobytes("raw", "1")
|
|
|
|
im = Image.new("L", (len(data), 1), None)
|
|
|
|
im.putdata(data)
|
|
|
|
ImageFile._save(im, op, [("hex", (0, 0)+im.size, 0, im.mode)])
|
2018-01-18 16:33:11 +03:00
|
|
|
elif filter == "DCTDecode":
|
2017-05-13 07:26:52 +03:00
|
|
|
Image.SAVE["JPEG"](im, op, filename)
|
2018-01-18 16:33:11 +03:00
|
|
|
elif filter == "FlateDecode":
|
2017-05-13 07:26:52 +03:00
|
|
|
ImageFile._save(im, op, [("zip", (0, 0)+im.size, 0, im.mode)])
|
2018-01-18 16:33:11 +03:00
|
|
|
elif filter == "RunLengthDecode":
|
2018-06-24 15:32:25 +03:00
|
|
|
ImageFile._save(im, op,
|
|
|
|
[("packbits", (0, 0)+im.size, 0, im.mode)])
|
2017-05-13 07:26:52 +03:00
|
|
|
else:
|
|
|
|
raise ValueError("unsupported PDF filter (%s)" % filter)
|
|
|
|
|
|
|
|
#
|
|
|
|
# Get image characteristics
|
|
|
|
|
|
|
|
width, height = im.size
|
|
|
|
|
2018-06-24 15:32:25 +03:00
|
|
|
existing_pdf.write_obj(image_refs[pageNumber],
|
|
|
|
stream=op.getvalue(),
|
|
|
|
Type=PdfParser.PdfName("XObject"),
|
|
|
|
Subtype=PdfParser.PdfName("Image"),
|
|
|
|
Width=width, # * 72.0 / resolution,
|
|
|
|
Height=height, # * 72.0 / resolution,
|
|
|
|
Filter=PdfParser.PdfName(filter),
|
|
|
|
BitsPerComponent=bits,
|
|
|
|
DecodeParams=params,
|
|
|
|
ColorSpace=colorspace)
|
2017-05-13 07:26:52 +03:00
|
|
|
|
|
|
|
#
|
|
|
|
# page
|
|
|
|
|
2018-01-26 21:07:43 +03:00
|
|
|
existing_pdf.write_page(page_refs[pageNumber],
|
2018-06-24 15:32:25 +03:00
|
|
|
Resources=PdfParser.PdfDict(
|
|
|
|
ProcSet=[
|
|
|
|
PdfParser.PdfName("PDF"),
|
|
|
|
PdfParser.PdfName(procset)
|
|
|
|
],
|
|
|
|
XObject=PdfParser.PdfDict(
|
|
|
|
image=image_refs[pageNumber]
|
|
|
|
)
|
|
|
|
),
|
|
|
|
MediaBox=[
|
|
|
|
0,
|
|
|
|
0,
|
|
|
|
int(width * 72.0 / resolution),
|
|
|
|
int(height * 72.0 / resolution)
|
|
|
|
],
|
|
|
|
Contents=contents_refs[pageNumber])
|
2017-05-13 07:26:52 +03:00
|
|
|
|
|
|
|
#
|
|
|
|
# page contents
|
|
|
|
|
2018-01-31 02:35:55 +03:00
|
|
|
page_contents = PdfParser.make_bytes(
|
2017-05-13 07:26:52 +03:00
|
|
|
"q %d 0 0 %d 0 0 cm /image Do Q\n" % (
|
|
|
|
int(width * 72.0 / resolution),
|
|
|
|
int(height * 72.0 / resolution)))
|
|
|
|
|
2018-06-24 15:32:25 +03:00
|
|
|
existing_pdf.write_obj(contents_refs[pageNumber],
|
|
|
|
stream=page_contents)
|
2017-05-13 07:26:52 +03:00
|
|
|
|
|
|
|
pageNumber += 1
|
2010-07-31 06:52:47 +04:00
|
|
|
|
|
|
|
#
|
|
|
|
# trailer
|
2018-01-26 21:07:43 +03:00
|
|
|
existing_pdf.write_xref_and_trailer()
|
2015-09-02 16:48:22 +03:00
|
|
|
if hasattr(fp, "flush"):
|
|
|
|
fp.flush()
|
2018-01-26 21:07:43 +03:00
|
|
|
existing_pdf.close()
|
2010-07-31 06:52:47 +04:00
|
|
|
|
|
|
|
#
|
|
|
|
# --------------------------------------------------------------------
|
|
|
|
|
2018-03-03 12:54:00 +03:00
|
|
|
|
2010-07-31 06:52:47 +04:00
|
|
|
Image.register_save("PDF", _save)
|
2015-09-29 15:51:52 +03:00
|
|
|
Image.register_save_all("PDF", _save_all)
|
2010-07-31 06:52:47 +04:00
|
|
|
|
|
|
|
Image.register_extension("PDF", ".pdf")
|
|
|
|
|
|
|
|
Image.register_mime("PDF", "application/pdf")
|