Telethon/telethon/extensions/markdown.py

201 lines
7.3 KiB
Python
Raw Normal View History

"""
Simple markdown parser which does not support nesting. Intended primarily
for use within the library, which attempts to handle emojies correctly,
since they seem to count as two characters and it's a bit strange.
"""
import re
from ..helpers import add_surrogate, del_surrogate, strip_text
from ..tl import TLObject
from ..tl.types import (
2017-10-29 18:33:10 +03:00
MessageEntityBold, MessageEntityItalic, MessageEntityCode,
MessageEntityPre, MessageEntityTextUrl, MessageEntityMentionName
)
DEFAULT_DELIMITERS = {
'**': MessageEntityBold,
'__': MessageEntityItalic,
'`': MessageEntityCode,
'```': MessageEntityPre
}
DEFAULT_URL_RE = re.compile(r'\[([\S\s]+?)\]\((.+?)\)')
2017-11-26 19:16:59 +03:00
DEFAULT_URL_FORMAT = '[{0}]({1})'
def parse(message, delimiters=None, url_re=None):
"""
2017-11-26 19:14:28 +03:00
Parses the given markdown message and returns its stripped representation
plus a list of the MessageEntity's that were found.
2017-10-29 18:33:10 +03:00
2017-11-26 19:14:28 +03:00
:param message: the message with markdown-like syntax to be parsed.
:param delimiters: the delimiters to be used, {delimiter: type}.
:param url_re: the URL bytes regex to be used. Must have two groups.
:return: a tuple consisting of (clean message, [message entities]).
"""
2018-06-03 14:48:43 +03:00
if not message:
return message, []
if url_re is None:
url_re = DEFAULT_URL_RE
elif isinstance(url_re, str):
url_re = re.compile(url_re)
2017-10-29 18:33:10 +03:00
if not delimiters:
if delimiters is not None:
return message, []
delimiters = DEFAULT_DELIMITERS
# Cannot use a for loop because we need to skip some indices
i = 0
result = []
current = None
end_delimiter = None
# Work on byte level with the utf-16le encoding to get the offsets right.
# The offset will just be half the index we're at.
message = add_surrogate(message)
while i < len(message):
if url_re and current is None:
# If we're not inside a previous match since Telegram doesn't allow
# nested message entities, try matching the URL from the i'th pos.
2017-10-29 18:33:10 +03:00
url_match = url_re.match(message, pos=i)
if url_match:
# Replace the whole match with only the inline URL text.
message = ''.join((
2017-10-29 18:33:10 +03:00
message[:url_match.start()],
url_match.group(1),
message[url_match.end():]
))
result.append(MessageEntityTextUrl(
offset=url_match.start(), length=len(url_match.group(1)),
url=del_surrogate(url_match.group(2))
2017-10-29 18:33:10 +03:00
))
i += len(url_match.group(1))
# Next loop iteration, don't check delimiters, since
# a new inline URL might be right after this one.
continue
if end_delimiter is None:
# We're not expecting any delimiter, so check them all
2017-10-29 18:33:10 +03:00
for d, m in delimiters.items():
# Slice the string at the current i'th position to see if
# it matches the current delimiter d, otherwise skip it.
if message[i:i + len(d)] != d:
continue
if message[i + len(d):i + 2 * len(d)] == d:
# The same delimiter can't be right afterwards, if
# this were the case we would match empty strings
# like `` which we don't want to.
continue
# Get rid of the delimiter by slicing it away
message = message[:i] + message[i + len(d):]
if m == MessageEntityPre:
# Special case, also has 'lang'
current = m(i, None, '')
else:
current = m(i, None)
end_delimiter = d # We expect the same delimiter.
break
elif message[i:i + len(end_delimiter)] == end_delimiter:
message = message[:i] + message[i + len(end_delimiter):]
current.length = i - current.offset
result.append(current)
current, end_delimiter = None, None
# Don't increment i here as we matched a delimiter,
# and there may be a new one right after. This is
# different than when encountering the first delimiter,
# as we already know there won't be the same right after.
continue
# Next iteration
i += 1
# We may have found some a delimiter but not its ending pair.
# If this is the case, we want to insert the delimiter character back.
if current is not None:
message = (
message[:current.offset]
+ end_delimiter
+ message[current.offset:]
)
message = strip_text(message, result)
return del_surrogate(message), result
2017-11-26 19:16:59 +03:00
def unparse(text, entities, delimiters=None, url_fmt=None):
"""
Performs the reverse operation to .parse(), effectively returning
markdown-like syntax given a normal text and its MessageEntity's.
:param text: the text to be reconverted into markdown.
:param entities: the MessageEntity's applied to the text.
:return: a markdown-like text representing the combination of both inputs.
"""
2018-06-03 14:48:43 +03:00
if not text or not entities:
return text
2017-11-26 19:16:59 +03:00
if not delimiters:
if delimiters is not None:
return text
delimiters = DEFAULT_DELIMITERS
if url_fmt is None:
url_fmt = DEFAULT_URL_FORMAT
if isinstance(entities, TLObject):
entities = (entities,)
else:
entities = tuple(sorted(entities, key=lambda e: e.offset, reverse=True))
text = add_surrogate(text)
delimiters = {v: k for k, v in delimiters.items()}
2017-11-26 19:16:59 +03:00
for entity in entities:
s = entity.offset
e = entity.offset + entity.length
2017-11-26 19:16:59 +03:00
delimiter = delimiters.get(type(entity), None)
if delimiter:
text = text[:s] + delimiter + text[s:e] + delimiter + text[e:]
elif url_fmt:
url = None
if isinstance(entity, MessageEntityTextUrl):
url = entity.url
elif isinstance(entity, MessageEntityMentionName):
url = 'tg://user?id={}'.format(entity.user_id)
if url:
# It's possible that entities are malformed and end up in the
# middle of some character, like emoji, by using malformed
# clients or bots. Try decoding the current one to check if
# this is the case, and if it is, advance the entity.
while e <= len(text):
try:
del_surrogate(text[s:e])
break
except UnicodeDecodeError:
e += 1
else:
# Out of bounds, no luck going forward
while e > s:
try:
del_surrogate(text[s:e])
break
except UnicodeDecodeError:
e -= 1
else:
# No luck going backwards either, ignore entity
continue
text = (
text[:s] +
add_surrogate(url_fmt.format(text[s:e], url)) +
text[e:]
)
2017-11-26 19:16:59 +03:00
return del_surrogate(text)