2017-10-28 20:06:41 +03:00
|
|
|
"""
|
|
|
|
Simple markdown parser which does not support nesting. Intended primarily
|
|
|
|
for use within the library, which attempts to handle emojies correctly,
|
|
|
|
since they seem to count as two characters and it's a bit strange.
|
|
|
|
"""
|
|
|
|
import re
|
2018-01-07 18:18:54 +03:00
|
|
|
import struct
|
2017-11-16 21:13:13 +03:00
|
|
|
|
2017-11-17 17:57:48 +03:00
|
|
|
from ..tl import TLObject
|
2017-11-16 21:13:13 +03:00
|
|
|
|
2017-10-28 20:06:41 +03:00
|
|
|
from ..tl.types import (
|
2017-10-29 18:33:10 +03:00
|
|
|
MessageEntityBold, MessageEntityItalic, MessageEntityCode,
|
|
|
|
MessageEntityPre, MessageEntityTextUrl
|
2017-10-28 20:06:41 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2017-10-29 20:21:21 +03:00
|
|
|
DEFAULT_DELIMITERS = {
|
2017-11-10 13:01:02 +03:00
|
|
|
'**': MessageEntityBold,
|
|
|
|
'__': MessageEntityItalic,
|
|
|
|
'`': MessageEntityCode,
|
|
|
|
'```': MessageEntityPre
|
2017-10-29 20:21:21 +03:00
|
|
|
}
|
|
|
|
|
2018-03-22 21:01:50 +03:00
|
|
|
DEFAULT_URL_RE = re.compile(r'\[([\S\s]+?)\]\((.+?)\)')
|
2017-11-26 19:16:59 +03:00
|
|
|
DEFAULT_URL_FORMAT = '[{0}]({1})'
|
|
|
|
|
2018-01-07 18:18:54 +03:00
|
|
|
|
|
|
|
def _add_surrogate(text):
|
|
|
|
return ''.join(
|
|
|
|
# SMP -> Surrogate Pairs (Telegram offsets are calculated with these).
|
|
|
|
# See https://en.wikipedia.org/wiki/Plane_(Unicode)#Overview for more.
|
|
|
|
''.join(chr(y) for y in struct.unpack('<HH', x.encode('utf-16le')))
|
|
|
|
if (0x10000 <= ord(x) <= 0x10FFFF) else x for x in text
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def _del_surrogate(text):
|
|
|
|
return text.encode('utf-16', 'surrogatepass').decode('utf-16')
|
2017-11-26 19:16:59 +03:00
|
|
|
|
2017-10-29 20:21:21 +03:00
|
|
|
|
|
|
|
def parse(message, delimiters=None, url_re=None):
|
2017-10-28 20:06:41 +03:00
|
|
|
"""
|
2017-11-26 19:14:28 +03:00
|
|
|
Parses the given markdown message and returns its stripped representation
|
|
|
|
plus a list of the MessageEntity's that were found.
|
2017-10-29 18:33:10 +03:00
|
|
|
|
2017-11-26 19:14:28 +03:00
|
|
|
:param message: the message with markdown-like syntax to be parsed.
|
|
|
|
:param delimiters: the delimiters to be used, {delimiter: type}.
|
|
|
|
:param url_re: the URL bytes regex to be used. Must have two groups.
|
|
|
|
:return: a tuple consisting of (clean message, [message entities]).
|
2017-10-28 20:06:41 +03:00
|
|
|
"""
|
2017-10-29 20:21:21 +03:00
|
|
|
if url_re is None:
|
|
|
|
url_re = DEFAULT_URL_RE
|
2018-01-07 18:18:54 +03:00
|
|
|
elif isinstance(url_re, str):
|
|
|
|
url_re = re.compile(url_re)
|
2017-10-29 18:33:10 +03:00
|
|
|
|
2017-10-28 20:06:41 +03:00
|
|
|
if not delimiters:
|
|
|
|
if delimiters is not None:
|
|
|
|
return message, []
|
2017-10-29 20:21:21 +03:00
|
|
|
delimiters = DEFAULT_DELIMITERS
|
2017-10-28 20:06:41 +03:00
|
|
|
|
2017-11-06 13:32:40 +03:00
|
|
|
# Cannot use a for loop because we need to skip some indices
|
2017-11-06 12:29:32 +03:00
|
|
|
i = 0
|
2017-10-28 20:06:41 +03:00
|
|
|
result = []
|
2017-11-10 13:01:02 +03:00
|
|
|
current = None
|
2017-11-10 13:41:49 +03:00
|
|
|
end_delimiter = None
|
2017-11-06 13:32:40 +03:00
|
|
|
|
|
|
|
# Work on byte level with the utf-16le encoding to get the offsets right.
|
|
|
|
# The offset will just be half the index we're at.
|
2018-01-07 18:18:54 +03:00
|
|
|
message = _add_surrogate(message)
|
2017-10-28 20:06:41 +03:00
|
|
|
while i < len(message):
|
2017-11-10 13:01:02 +03:00
|
|
|
if url_re and current is None:
|
2017-11-06 13:32:40 +03:00
|
|
|
# If we're not inside a previous match since Telegram doesn't allow
|
|
|
|
# nested message entities, try matching the URL from the i'th pos.
|
2017-10-29 18:33:10 +03:00
|
|
|
url_match = url_re.match(message, pos=i)
|
|
|
|
if url_match:
|
2017-11-06 13:32:40 +03:00
|
|
|
# Replace the whole match with only the inline URL text.
|
2018-01-07 18:18:54 +03:00
|
|
|
message = ''.join((
|
2017-10-29 18:33:10 +03:00
|
|
|
message[:url_match.start()],
|
|
|
|
url_match.group(1),
|
|
|
|
message[url_match.end():]
|
|
|
|
))
|
2017-11-06 02:17:22 +03:00
|
|
|
|
2017-11-10 13:01:02 +03:00
|
|
|
result.append(MessageEntityTextUrl(
|
2018-01-07 18:18:54 +03:00
|
|
|
offset=i, length=len(url_match.group(1)),
|
|
|
|
url=_del_surrogate(url_match.group(2))
|
2017-10-29 18:33:10 +03:00
|
|
|
))
|
2017-11-10 13:41:49 +03:00
|
|
|
i += len(url_match.group(1))
|
|
|
|
# Next loop iteration, don't check delimiters, since
|
|
|
|
# a new inline URL might be right after this one.
|
|
|
|
continue
|
2017-11-06 12:29:32 +03:00
|
|
|
|
2017-11-10 13:41:49 +03:00
|
|
|
if end_delimiter is None:
|
|
|
|
# We're not expecting any delimiter, so check them all
|
2017-10-29 18:33:10 +03:00
|
|
|
for d, m in delimiters.items():
|
2017-11-06 13:32:40 +03:00
|
|
|
# Slice the string at the current i'th position to see if
|
2017-11-10 13:41:49 +03:00
|
|
|
# it matches the current delimiter d, otherwise skip it.
|
|
|
|
if message[i:i + len(d)] != d:
|
|
|
|
continue
|
|
|
|
|
|
|
|
if message[i + len(d):i + 2 * len(d)] == d:
|
|
|
|
# The same delimiter can't be right afterwards, if
|
|
|
|
# this were the case we would match empty strings
|
|
|
|
# like `` which we don't want to.
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Get rid of the delimiter by slicing it away
|
|
|
|
message = message[:i] + message[i + len(d):]
|
|
|
|
if m == MessageEntityPre:
|
|
|
|
# Special case, also has 'lang'
|
2018-01-07 18:18:54 +03:00
|
|
|
current = m(i, None, '')
|
2017-11-10 13:41:49 +03:00
|
|
|
else:
|
2018-01-07 18:18:54 +03:00
|
|
|
current = m(i, None)
|
2017-11-10 13:41:49 +03:00
|
|
|
|
|
|
|
end_delimiter = d # We expect the same delimiter.
|
|
|
|
break
|
|
|
|
|
|
|
|
elif message[i:i + len(end_delimiter)] == end_delimiter:
|
|
|
|
message = message[:i] + message[i + len(end_delimiter):]
|
2018-01-07 18:18:54 +03:00
|
|
|
current.length = i - current.offset
|
2017-11-10 13:41:49 +03:00
|
|
|
result.append(current)
|
|
|
|
current, end_delimiter = None, None
|
|
|
|
# Don't increment i here as we matched a delimiter,
|
|
|
|
# and there may be a new one right after. This is
|
|
|
|
# different than when encountering the first delimiter,
|
|
|
|
# as we already know there won't be the same right after.
|
|
|
|
continue
|
2017-10-28 20:06:41 +03:00
|
|
|
|
2018-01-07 18:18:54 +03:00
|
|
|
# Next iteration
|
|
|
|
i += 1
|
2017-10-28 20:17:18 +03:00
|
|
|
|
2017-11-10 13:01:02 +03:00
|
|
|
# We may have found some a delimiter but not its ending pair.
|
2017-11-10 13:44:27 +03:00
|
|
|
# If this is the case, we want to insert the delimiter character back.
|
|
|
|
if current is not None:
|
2017-11-16 21:07:53 +03:00
|
|
|
message = (
|
2018-01-07 18:18:54 +03:00
|
|
|
message[:current.offset]
|
2017-11-16 21:07:53 +03:00
|
|
|
+ end_delimiter
|
2018-01-07 18:18:54 +03:00
|
|
|
+ message[current.offset:]
|
2017-11-16 21:07:53 +03:00
|
|
|
)
|
2017-11-06 02:17:22 +03:00
|
|
|
|
2018-01-07 18:18:54 +03:00
|
|
|
return _del_surrogate(message), result
|
2017-11-26 19:16:59 +03:00
|
|
|
|
|
|
|
|
|
|
|
def unparse(text, entities, delimiters=None, url_fmt=None):
|
|
|
|
"""
|
|
|
|
Performs the reverse operation to .parse(), effectively returning
|
|
|
|
markdown-like syntax given a normal text and its MessageEntity's.
|
|
|
|
|
|
|
|
:param text: the text to be reconverted into markdown.
|
|
|
|
:param entities: the MessageEntity's applied to the text.
|
|
|
|
:return: a markdown-like text representing the combination of both inputs.
|
|
|
|
"""
|
2018-02-15 13:52:46 +03:00
|
|
|
if not entities:
|
|
|
|
return text
|
|
|
|
|
2017-11-26 19:16:59 +03:00
|
|
|
if not delimiters:
|
|
|
|
if delimiters is not None:
|
|
|
|
return text
|
|
|
|
delimiters = DEFAULT_DELIMITERS
|
|
|
|
|
|
|
|
if url_fmt is None:
|
|
|
|
url_fmt = DEFAULT_URL_FORMAT
|
|
|
|
|
|
|
|
if isinstance(entities, TLObject):
|
|
|
|
entities = (entities,)
|
|
|
|
else:
|
|
|
|
entities = tuple(sorted(entities, key=lambda e: e.offset, reverse=True))
|
|
|
|
|
2018-01-07 18:18:54 +03:00
|
|
|
text = _add_surrogate(text)
|
2018-02-16 22:30:19 +03:00
|
|
|
delimiters = {v: k for k, v in delimiters.items()}
|
2017-11-26 19:16:59 +03:00
|
|
|
for entity in entities:
|
2018-01-07 18:18:54 +03:00
|
|
|
s = entity.offset
|
|
|
|
e = entity.offset + entity.length
|
2017-11-26 19:16:59 +03:00
|
|
|
delimiter = delimiters.get(type(entity), None)
|
|
|
|
if delimiter:
|
|
|
|
text = text[:s] + delimiter + text[s:e] + delimiter + text[e:]
|
|
|
|
elif isinstance(entity, MessageEntityTextUrl) and url_fmt:
|
|
|
|
text = (
|
|
|
|
text[:s] +
|
2018-01-07 18:18:54 +03:00
|
|
|
_add_surrogate(url_fmt.format(text[s:e], entity.url)) +
|
2017-11-26 19:16:59 +03:00
|
|
|
text[e:]
|
|
|
|
)
|
|
|
|
|
2018-01-07 18:18:54 +03:00
|
|
|
return _del_surrogate(text)
|
2017-11-16 21:13:13 +03:00
|
|
|
|
|
|
|
|
|
|
|
def get_inner_text(text, entity):
|
2017-11-26 19:14:28 +03:00
|
|
|
"""
|
|
|
|
Gets the inner text that's surrounded by the given entity or entities.
|
|
|
|
For instance: text = 'hey!', entity = MessageEntityBold(2, 2) -> 'y!'.
|
|
|
|
|
|
|
|
:param text: the original text.
|
|
|
|
:param entity: the entity or entities that must be matched.
|
|
|
|
:return: a single result or a list of the text surrounded by the entities.
|
2017-11-16 21:13:13 +03:00
|
|
|
"""
|
2018-01-05 20:31:48 +03:00
|
|
|
if isinstance(entity, TLObject):
|
|
|
|
entity = (entity,)
|
2017-11-16 21:13:13 +03:00
|
|
|
multiple = True
|
|
|
|
else:
|
|
|
|
multiple = False
|
|
|
|
|
2018-01-07 18:18:54 +03:00
|
|
|
text = _add_surrogate(text)
|
2017-11-16 21:13:13 +03:00
|
|
|
result = []
|
|
|
|
for e in entity:
|
2018-01-07 18:18:54 +03:00
|
|
|
start = e.offset
|
|
|
|
end = e.offset + e.length
|
|
|
|
result.append(_del_surrogate(text[start:end]))
|
2017-11-16 21:13:13 +03:00
|
|
|
|
|
|
|
return result if multiple else result[0]
|