Removed tokens_read variable

This commit is contained in:
Andrew Murray 2022-03-13 13:36:26 +11:00
parent 4fcef0d163
commit e32a94e835

View File

@ -174,11 +174,10 @@ class PpmPlainDecoder(ImageFile.PyDecoder):
are exactly one byte, and so the inter-token whitespace is optional. are exactly one byte, and so the inter-token whitespace is optional.
""" """
decoded_data = bytearray() decoded_data = bytearray()
total_tokens = self.size total_bytes = self.size
comment_spans = False comment_spans = False
tokens_read = 0 while len(decoded_data) != total_bytes:
while True:
block = self._read_block() # read next block block = self._read_block() # read next block
if not block: if not block:
raise ValueError("Reached EOF while reading data") raise ValueError("Reached EOF while reading data")
@ -194,15 +193,12 @@ class PpmPlainDecoder(ImageFile.PyDecoder):
block, comment_spans = self._ignore_comments(block) block, comment_spans = self._ignore_comments(block)
tokens = b"".join(block.split()) tokens = b"".join(block.split())
for token in tokens: for token in tokens:
if token not in (48, 49): if token not in (48, 49):
raise ValueError(f"Invalid token for this mode: {bytes([token])}") raise ValueError(f"Invalid token for this mode: {bytes([token])}")
tokens_read += 1 decoded_data = (decoded_data + tokens)[:total_bytes]
decoded_data.append(token) invert = bytes.maketrans(b"01", b"\xFF\x00")
if tokens_read == total_tokens: # finished! return decoded_data.translate(invert)
invert = bytes.maketrans(b"01", b"\xFF\x00")
return decoded_data.translate(invert)
def _decode_blocks(self, channels=1, depth=8): def _decode_blocks(self, channels=1, depth=8):
decoded_data = bytearray() decoded_data = bytearray()
@ -210,12 +206,11 @@ class PpmPlainDecoder(ImageFile.PyDecoder):
maxval = 2 ** (31 if depth == 32 else depth) - 1 maxval = 2 ** (31 if depth == 32 else depth) - 1
max_len = 10 max_len = 10
bytes_per_sample = depth // 8 bytes_per_sample = depth // 8
total_tokens = self.size * channels total_bytes = self.size * channels * bytes_per_sample
comment_spans = False comment_spans = False
half_token = False half_token = False
tokens_read = 0 while len(decoded_data) != total_bytes:
while True:
block = self._read_block() # read next block block = self._read_block() # read next block
if not block: if not block:
if half_token: if half_token:
@ -251,12 +246,12 @@ class PpmPlainDecoder(ImageFile.PyDecoder):
f"Token too long found in data: {token[:max_len + 1]}" f"Token too long found in data: {token[:max_len + 1]}"
) )
token = int(token) token = int(token)
tokens_read += 1
if token > maxval: if token > maxval:
raise ValueError(f"Channel value too large for this mode: {token}") raise ValueError(f"Channel value too large for this mode: {token}")
decoded_data += token.to_bytes(bytes_per_sample, "big") decoded_data += token.to_bytes(bytes_per_sample, "big")
if tokens_read == total_tokens: # finished! if len(decoded_data) == total_bytes: # finished!
return decoded_data break
return decoded_data
def decode(self, buffer): def decode(self, buffer):
self.size = self.state.xsize * self.state.ysize self.size = self.state.xsize * self.state.ysize