mirror of
https://github.com/LonamiWebs/Telethon.git
synced 2025-07-15 18:42:23 +03:00
Stop working with bytes on the markdown parser
This commit is contained in:
parent
34fe150096
commit
59a1a6aef2
|
@ -4,6 +4,7 @@ for use within the library, which attempts to handle emojies correctly,
|
||||||
since they seem to count as two characters and it's a bit strange.
|
since they seem to count as two characters and it's a bit strange.
|
||||||
"""
|
"""
|
||||||
import re
|
import re
|
||||||
|
import struct
|
||||||
|
|
||||||
from ..tl import TLObject
|
from ..tl import TLObject
|
||||||
|
|
||||||
|
@ -20,15 +21,24 @@ DEFAULT_DELIMITERS = {
|
||||||
'```': MessageEntityPre
|
'```': MessageEntityPre
|
||||||
}
|
}
|
||||||
|
|
||||||
# Regex used to match utf-16le encoded r'\[(.+?)\]\((.+?)\)',
|
# Regex used to match r'\[(.+?)\]\((.+?)\)' (for URLs.
|
||||||
# reason why there's '\0' after every match-literal character.
|
DEFAULT_URL_RE = re.compile(r'\[(.+?)\]\((.+?)\)')
|
||||||
DEFAULT_URL_RE = re.compile(b'\\[\0(.+?)\\]\0\\(\0(.+?)\\)\0')
|
|
||||||
|
|
||||||
# Reverse operation for DEFAULT_URL_RE. {0} for text, {1} for URL.
|
# Reverse operation for DEFAULT_URL_RE. {0} for text, {1} for URL.
|
||||||
DEFAULT_URL_FORMAT = '[{0}]({1})'
|
DEFAULT_URL_FORMAT = '[{0}]({1})'
|
||||||
|
|
||||||
# Encoding to be used
|
|
||||||
ENC = 'utf-16le'
|
def _add_surrogate(text):
|
||||||
|
return ''.join(
|
||||||
|
# SMP -> Surrogate Pairs (Telegram offsets are calculated with these).
|
||||||
|
# See https://en.wikipedia.org/wiki/Plane_(Unicode)#Overview for more.
|
||||||
|
''.join(chr(y) for y in struct.unpack('<HH', x.encode('utf-16le')))
|
||||||
|
if (0x10000 <= ord(x) <= 0x10FFFF) else x for x in text
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _del_surrogate(text):
|
||||||
|
return text.encode('utf-16', 'surrogatepass').decode('utf-16')
|
||||||
|
|
||||||
|
|
||||||
def parse(message, delimiters=None, url_re=None):
|
def parse(message, delimiters=None, url_re=None):
|
||||||
|
@ -43,8 +53,7 @@ def parse(message, delimiters=None, url_re=None):
|
||||||
"""
|
"""
|
||||||
if url_re is None:
|
if url_re is None:
|
||||||
url_re = DEFAULT_URL_RE
|
url_re = DEFAULT_URL_RE
|
||||||
elif url_re:
|
elif isinstance(url_re, str):
|
||||||
if isinstance(url_re, bytes):
|
|
||||||
url_re = re.compile(url_re)
|
url_re = re.compile(url_re)
|
||||||
|
|
||||||
if not delimiters:
|
if not delimiters:
|
||||||
|
@ -52,8 +61,6 @@ def parse(message, delimiters=None, url_re=None):
|
||||||
return message, []
|
return message, []
|
||||||
delimiters = DEFAULT_DELIMITERS
|
delimiters = DEFAULT_DELIMITERS
|
||||||
|
|
||||||
delimiters = {k.encode(ENC): v for k, v in delimiters.items()}
|
|
||||||
|
|
||||||
# Cannot use a for loop because we need to skip some indices
|
# Cannot use a for loop because we need to skip some indices
|
||||||
i = 0
|
i = 0
|
||||||
result = []
|
result = []
|
||||||
|
@ -62,7 +69,7 @@ def parse(message, delimiters=None, url_re=None):
|
||||||
|
|
||||||
# Work on byte level with the utf-16le encoding to get the offsets right.
|
# Work on byte level with the utf-16le encoding to get the offsets right.
|
||||||
# The offset will just be half the index we're at.
|
# The offset will just be half the index we're at.
|
||||||
message = message.encode(ENC)
|
message = _add_surrogate(message)
|
||||||
while i < len(message):
|
while i < len(message):
|
||||||
if url_re and current is None:
|
if url_re and current is None:
|
||||||
# If we're not inside a previous match since Telegram doesn't allow
|
# If we're not inside a previous match since Telegram doesn't allow
|
||||||
|
@ -70,15 +77,15 @@ def parse(message, delimiters=None, url_re=None):
|
||||||
url_match = url_re.match(message, pos=i)
|
url_match = url_re.match(message, pos=i)
|
||||||
if url_match:
|
if url_match:
|
||||||
# Replace the whole match with only the inline URL text.
|
# Replace the whole match with only the inline URL text.
|
||||||
message = b''.join((
|
message = ''.join((
|
||||||
message[:url_match.start()],
|
message[:url_match.start()],
|
||||||
url_match.group(1),
|
url_match.group(1),
|
||||||
message[url_match.end():]
|
message[url_match.end():]
|
||||||
))
|
))
|
||||||
|
|
||||||
result.append(MessageEntityTextUrl(
|
result.append(MessageEntityTextUrl(
|
||||||
offset=i // 2, length=len(url_match.group(1)) // 2,
|
offset=i, length=len(url_match.group(1)),
|
||||||
url=url_match.group(2).decode(ENC)
|
url=_del_surrogate(url_match.group(2))
|
||||||
))
|
))
|
||||||
i += len(url_match.group(1))
|
i += len(url_match.group(1))
|
||||||
# Next loop iteration, don't check delimiters, since
|
# Next loop iteration, don't check delimiters, since
|
||||||
|
@ -103,16 +110,16 @@ def parse(message, delimiters=None, url_re=None):
|
||||||
message = message[:i] + message[i + len(d):]
|
message = message[:i] + message[i + len(d):]
|
||||||
if m == MessageEntityPre:
|
if m == MessageEntityPre:
|
||||||
# Special case, also has 'lang'
|
# Special case, also has 'lang'
|
||||||
current = m(i // 2, None, '')
|
current = m(i, None, '')
|
||||||
else:
|
else:
|
||||||
current = m(i // 2, None)
|
current = m(i, None)
|
||||||
|
|
||||||
end_delimiter = d # We expect the same delimiter.
|
end_delimiter = d # We expect the same delimiter.
|
||||||
break
|
break
|
||||||
|
|
||||||
elif message[i:i + len(end_delimiter)] == end_delimiter:
|
elif message[i:i + len(end_delimiter)] == end_delimiter:
|
||||||
message = message[:i] + message[i + len(end_delimiter):]
|
message = message[:i] + message[i + len(end_delimiter):]
|
||||||
current.length = (i // 2) - current.offset
|
current.length = i - current.offset
|
||||||
result.append(current)
|
result.append(current)
|
||||||
current, end_delimiter = None, None
|
current, end_delimiter = None, None
|
||||||
# Don't increment i here as we matched a delimiter,
|
# Don't increment i here as we matched a delimiter,
|
||||||
|
@ -121,19 +128,19 @@ def parse(message, delimiters=None, url_re=None):
|
||||||
# as we already know there won't be the same right after.
|
# as we already know there won't be the same right after.
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Next iteration, utf-16 encoded characters need 2 bytes.
|
# Next iteration
|
||||||
i += 2
|
i += 1
|
||||||
|
|
||||||
# We may have found some a delimiter but not its ending pair.
|
# We may have found some a delimiter but not its ending pair.
|
||||||
# If this is the case, we want to insert the delimiter character back.
|
# If this is the case, we want to insert the delimiter character back.
|
||||||
if current is not None:
|
if current is not None:
|
||||||
message = (
|
message = (
|
||||||
message[:2 * current.offset]
|
message[:current.offset]
|
||||||
+ end_delimiter
|
+ end_delimiter
|
||||||
+ message[2 * current.offset:]
|
+ message[current.offset:]
|
||||||
)
|
)
|
||||||
|
|
||||||
return message.decode(ENC), result
|
return _del_surrogate(message), result
|
||||||
|
|
||||||
|
|
||||||
def unparse(text, entities, delimiters=None, url_fmt=None):
|
def unparse(text, entities, delimiters=None, url_fmt=None):
|
||||||
|
@ -158,29 +165,21 @@ def unparse(text, entities, delimiters=None, url_fmt=None):
|
||||||
else:
|
else:
|
||||||
entities = tuple(sorted(entities, key=lambda e: e.offset, reverse=True))
|
entities = tuple(sorted(entities, key=lambda e: e.offset, reverse=True))
|
||||||
|
|
||||||
# Reverse the delimiters, and encode them as utf16
|
text = _add_surrogate(text)
|
||||||
delimiters = {v: k.encode(ENC) for k, v in delimiters.items()}
|
|
||||||
text = text.encode(ENC)
|
|
||||||
for entity in entities:
|
for entity in entities:
|
||||||
s = entity.offset * 2
|
s = entity.offset
|
||||||
e = (entity.offset + entity.length) * 2
|
e = entity.offset + entity.length
|
||||||
delimiter = delimiters.get(type(entity), None)
|
delimiter = delimiters.get(type(entity), None)
|
||||||
if delimiter:
|
if delimiter:
|
||||||
text = text[:s] + delimiter + text[s:e] + delimiter + text[e:]
|
text = text[:s] + delimiter + text[s:e] + delimiter + text[e:]
|
||||||
elif isinstance(entity, MessageEntityTextUrl) and url_fmt:
|
elif isinstance(entity, MessageEntityTextUrl) and url_fmt:
|
||||||
# If byte-strings supported .format(), we, could have converted
|
|
||||||
# the str url_fmt to a byte-string with the following regex:
|
|
||||||
# re.sub(b'{\0\s*(?:([01])\0)?\s*}\0',rb'{\1}',url_fmt.encode(ENC))
|
|
||||||
#
|
|
||||||
# This would preserve {}, {0} and {1}.
|
|
||||||
# Alternatively (as it's done), we can decode/encode it every time.
|
|
||||||
text = (
|
text = (
|
||||||
text[:s] +
|
text[:s] +
|
||||||
url_fmt.format(text[s:e].decode(ENC), entity.url).encode(ENC) +
|
_add_surrogate(url_fmt.format(text[s:e], entity.url)) +
|
||||||
text[e:]
|
text[e:]
|
||||||
)
|
)
|
||||||
|
|
||||||
return text.decode(ENC)
|
return _del_surrogate(text)
|
||||||
|
|
||||||
|
|
||||||
def get_inner_text(text, entity):
|
def get_inner_text(text, entity):
|
||||||
|
@ -198,11 +197,11 @@ def get_inner_text(text, entity):
|
||||||
else:
|
else:
|
||||||
multiple = False
|
multiple = False
|
||||||
|
|
||||||
text = text.encode(ENC)
|
text = _add_surrogate(text)
|
||||||
result = []
|
result = []
|
||||||
for e in entity:
|
for e in entity:
|
||||||
start = e.offset * 2
|
start = e.offset
|
||||||
end = (e.offset + e.length) * 2
|
end = e.offset + e.length
|
||||||
result.append(text[start:end].decode(ENC))
|
result.append(_del_surrogate(text[start:end]))
|
||||||
|
|
||||||
return result if multiple else result[0]
|
return result if multiple else result[0]
|
||||||
|
|
Loading…
Reference in New Issue
Block a user