2017-10-28 20:06:41 +03:00
|
|
|
"""
|
|
|
|
Simple markdown parser which does not support nesting. Intended primarily
|
|
|
|
for use within the library, which attempts to handle emojies correctly,
|
|
|
|
since they seem to count as two characters and it's a bit strange.
|
|
|
|
"""
|
|
|
|
import re
|
2019-06-24 14:48:29 +03:00
|
|
|
import warnings
|
2017-11-16 21:13:13 +03:00
|
|
|
|
2018-11-19 12:15:56 +03:00
|
|
|
from ..helpers import add_surrogate, del_surrogate, strip_text
|
2017-11-17 17:57:48 +03:00
|
|
|
from ..tl import TLObject
|
2017-10-28 20:06:41 +03:00
|
|
|
from ..tl.types import (
|
2017-10-29 18:33:10 +03:00
|
|
|
MessageEntityBold, MessageEntityItalic, MessageEntityCode,
|
2019-06-23 22:35:33 +03:00
|
|
|
MessageEntityPre, MessageEntityTextUrl, MessageEntityMentionName,
|
|
|
|
MessageEntityStrike
|
2018-11-19 12:15:56 +03:00
|
|
|
)
|
2017-10-28 20:06:41 +03:00
|
|
|
|
2017-10-29 20:21:21 +03:00
|
|
|
DEFAULT_DELIMITERS = {
|
2017-11-10 13:01:02 +03:00
|
|
|
'**': MessageEntityBold,
|
|
|
|
'__': MessageEntityItalic,
|
2019-06-23 22:35:33 +03:00
|
|
|
'~~': MessageEntityStrike,
|
2017-11-10 13:01:02 +03:00
|
|
|
'`': MessageEntityCode,
|
|
|
|
'```': MessageEntityPre
|
2017-10-29 20:21:21 +03:00
|
|
|
}
|
|
|
|
|
2018-03-22 21:01:50 +03:00
|
|
|
DEFAULT_URL_RE = re.compile(r'\[([\S\s]+?)\]\((.+?)\)')
|
2017-11-26 19:16:59 +03:00
|
|
|
DEFAULT_URL_FORMAT = '[{0}]({1})'
|
|
|
|
|
2018-01-07 18:18:54 +03:00
|
|
|
|
2019-06-24 14:48:29 +03:00
|
|
|
def overlap(a, b, x, y):
|
|
|
|
return max(a, x) < min(b, y)
|
|
|
|
|
|
|
|
|
2017-10-29 20:21:21 +03:00
|
|
|
def parse(message, delimiters=None, url_re=None):
|
2017-10-28 20:06:41 +03:00
|
|
|
"""
|
2017-11-26 19:14:28 +03:00
|
|
|
Parses the given markdown message and returns its stripped representation
|
|
|
|
plus a list of the MessageEntity's that were found.
|
2017-10-29 18:33:10 +03:00
|
|
|
|
2017-11-26 19:14:28 +03:00
|
|
|
:param message: the message with markdown-like syntax to be parsed.
|
|
|
|
:param delimiters: the delimiters to be used, {delimiter: type}.
|
|
|
|
:param url_re: the URL bytes regex to be used. Must have two groups.
|
|
|
|
:return: a tuple consisting of (clean message, [message entities]).
|
2017-10-28 20:06:41 +03:00
|
|
|
"""
|
2018-06-03 14:48:43 +03:00
|
|
|
if not message:
|
|
|
|
return message, []
|
|
|
|
|
2017-10-29 20:21:21 +03:00
|
|
|
if url_re is None:
|
|
|
|
url_re = DEFAULT_URL_RE
|
2018-01-07 18:18:54 +03:00
|
|
|
elif isinstance(url_re, str):
|
|
|
|
url_re = re.compile(url_re)
|
2017-10-29 18:33:10 +03:00
|
|
|
|
2017-10-28 20:06:41 +03:00
|
|
|
if not delimiters:
|
|
|
|
if delimiters is not None:
|
|
|
|
return message, []
|
2017-10-29 20:21:21 +03:00
|
|
|
delimiters = DEFAULT_DELIMITERS
|
2017-10-28 20:06:41 +03:00
|
|
|
|
2019-07-05 21:29:32 +03:00
|
|
|
# Build a regex to efficiently test all delimiters at once.
|
|
|
|
# Note that the largest delimiter should go first, we don't
|
|
|
|
# want ``` to be interpreted as a single back-tick in a code block.
|
|
|
|
delim_re = re.compile('|'.join('({})'.format(re.escape(k))
|
|
|
|
for k in sorted(delimiters, key=len, reverse=True)))
|
2019-06-24 14:48:29 +03:00
|
|
|
|
2017-11-06 13:32:40 +03:00
|
|
|
# Cannot use a for loop because we need to skip some indices
|
2017-11-06 12:29:32 +03:00
|
|
|
i = 0
|
2017-10-28 20:06:41 +03:00
|
|
|
result = []
|
2017-11-06 13:32:40 +03:00
|
|
|
|
|
|
|
# Work on byte level with the utf-16le encoding to get the offsets right.
|
|
|
|
# The offset will just be half the index we're at.
|
2018-06-29 12:04:42 +03:00
|
|
|
message = add_surrogate(message)
|
2017-10-28 20:06:41 +03:00
|
|
|
while i < len(message):
|
2019-06-24 14:48:29 +03:00
|
|
|
m = delim_re.match(message, pos=i)
|
|
|
|
|
|
|
|
# Did we find some delimiter here at `i`?
|
|
|
|
if m:
|
|
|
|
delim = next(filter(None, m.groups()))
|
|
|
|
|
|
|
|
# +1 to avoid matching right after (e.g. "****")
|
|
|
|
end = message.find(delim, i + len(delim) + 1)
|
|
|
|
|
|
|
|
# Did we find the earliest closing tag?
|
|
|
|
if end != -1:
|
|
|
|
|
|
|
|
# Remove the delimiter from the string
|
|
|
|
message = ''.join((
|
|
|
|
message[:i],
|
|
|
|
message[i + len(delim):end],
|
|
|
|
message[end + len(delim):]
|
|
|
|
))
|
|
|
|
|
|
|
|
# Check other affected entities
|
|
|
|
for ent in result:
|
|
|
|
# If the end is after our start, it is affected
|
|
|
|
if ent.offset + ent.length > i:
|
2019-07-06 13:55:44 +03:00
|
|
|
# If the old start is also before ours, it is fully enclosed
|
|
|
|
if ent.offset <= i:
|
|
|
|
ent.length -= len(delim) * 2
|
|
|
|
else:
|
|
|
|
ent.length -= len(delim)
|
2019-06-24 14:48:29 +03:00
|
|
|
|
|
|
|
# Append the found entity
|
|
|
|
ent = delimiters[delim]
|
|
|
|
if ent == MessageEntityPre:
|
|
|
|
result.append(ent(i, end - i - len(delim), '')) # has 'lang'
|
|
|
|
else:
|
|
|
|
result.append(ent(i, end - i - len(delim)))
|
|
|
|
|
|
|
|
# No nested entities inside code blocks
|
|
|
|
if ent in (MessageEntityCode, MessageEntityPre):
|
2019-07-05 21:29:32 +03:00
|
|
|
i = end - len(delim)
|
2019-06-24 14:48:29 +03:00
|
|
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
elif url_re:
|
|
|
|
m = url_re.match(message, pos=i)
|
|
|
|
if m:
|
2017-11-06 13:32:40 +03:00
|
|
|
# Replace the whole match with only the inline URL text.
|
2018-01-07 18:18:54 +03:00
|
|
|
message = ''.join((
|
2019-06-24 14:48:29 +03:00
|
|
|
message[:m.start()],
|
|
|
|
m.group(1),
|
|
|
|
message[m.end():]
|
2017-10-29 18:33:10 +03:00
|
|
|
))
|
2017-11-06 02:17:22 +03:00
|
|
|
|
2019-06-24 14:48:29 +03:00
|
|
|
delim_size = m.end() - m.start() - len(m.group())
|
|
|
|
for ent in result:
|
|
|
|
# If the end is after our start, it is affected
|
|
|
|
if ent.offset + ent.length > m.start():
|
|
|
|
ent.length -= delim_size
|
|
|
|
|
2017-11-10 13:01:02 +03:00
|
|
|
result.append(MessageEntityTextUrl(
|
2019-06-24 14:48:29 +03:00
|
|
|
offset=m.start(), length=len(m.group(1)),
|
|
|
|
url=del_surrogate(m.group(2))
|
2017-10-29 18:33:10 +03:00
|
|
|
))
|
2019-06-24 14:48:29 +03:00
|
|
|
i += len(m.group(1))
|
2017-11-10 13:41:49 +03:00
|
|
|
continue
|
2017-11-06 12:29:32 +03:00
|
|
|
|
2018-01-07 18:18:54 +03:00
|
|
|
i += 1
|
2017-10-28 20:17:18 +03:00
|
|
|
|
2018-11-19 12:15:56 +03:00
|
|
|
message = strip_text(message, result)
|
2018-06-29 12:04:42 +03:00
|
|
|
return del_surrogate(message), result
|
2017-11-26 19:16:59 +03:00
|
|
|
|
|
|
|
|
|
|
|
def unparse(text, entities, delimiters=None, url_fmt=None):
|
|
|
|
"""
|
|
|
|
Performs the reverse operation to .parse(), effectively returning
|
|
|
|
markdown-like syntax given a normal text and its MessageEntity's.
|
|
|
|
|
|
|
|
:param text: the text to be reconverted into markdown.
|
|
|
|
:param entities: the MessageEntity's applied to the text.
|
|
|
|
:return: a markdown-like text representing the combination of both inputs.
|
|
|
|
"""
|
2018-06-03 14:48:43 +03:00
|
|
|
if not text or not entities:
|
2018-02-15 13:52:46 +03:00
|
|
|
return text
|
|
|
|
|
2017-11-26 19:16:59 +03:00
|
|
|
if not delimiters:
|
|
|
|
if delimiters is not None:
|
|
|
|
return text
|
|
|
|
delimiters = DEFAULT_DELIMITERS
|
|
|
|
|
2019-06-24 14:48:29 +03:00
|
|
|
if url_fmt is not None:
|
|
|
|
warnings.warn('url_fmt is deprecated') # since it complicates everything *a lot*
|
2017-11-26 19:16:59 +03:00
|
|
|
|
|
|
|
if isinstance(entities, TLObject):
|
|
|
|
entities = (entities,)
|
|
|
|
|
2018-06-29 12:04:42 +03:00
|
|
|
text = add_surrogate(text)
|
2018-02-16 22:30:19 +03:00
|
|
|
delimiters = {v: k for k, v in delimiters.items()}
|
2019-06-24 14:48:29 +03:00
|
|
|
insert_at = []
|
2017-11-26 19:16:59 +03:00
|
|
|
for entity in entities:
|
2018-01-07 18:18:54 +03:00
|
|
|
s = entity.offset
|
|
|
|
e = entity.offset + entity.length
|
2017-11-26 19:16:59 +03:00
|
|
|
delimiter = delimiters.get(type(entity), None)
|
|
|
|
if delimiter:
|
2019-06-24 14:48:29 +03:00
|
|
|
insert_at.append((s, delimiter))
|
|
|
|
insert_at.append((e, delimiter))
|
|
|
|
else:
|
2018-10-04 16:56:32 +03:00
|
|
|
url = None
|
|
|
|
if isinstance(entity, MessageEntityTextUrl):
|
|
|
|
url = entity.url
|
|
|
|
elif isinstance(entity, MessageEntityMentionName):
|
|
|
|
url = 'tg://user?id={}'.format(entity.user_id)
|
|
|
|
if url:
|
2019-06-24 14:48:29 +03:00
|
|
|
insert_at.append((s, '['))
|
|
|
|
insert_at.append((e, ']({})'.format(url)))
|
|
|
|
|
|
|
|
insert_at.sort(key=lambda t: t[0])
|
|
|
|
while insert_at:
|
|
|
|
at, what = insert_at.pop()
|
|
|
|
text = text[:at] + what + text[at:]
|
2017-11-26 19:16:59 +03:00
|
|
|
|
2018-06-29 12:04:42 +03:00
|
|
|
return del_surrogate(text)
|