2017-06-09 17:13:39 +03:00
|
|
|
"""
|
|
|
|
Utilities for working with the Telegram API itself (such as handy methods
|
2018-09-22 20:18:42 +03:00
|
|
|
to convert between an entity like a User, Chat, etc. into its Input version)
|
2017-06-09 17:13:39 +03:00
|
|
|
"""
|
2018-08-01 00:23:52 +03:00
|
|
|
import base64
|
|
|
|
import binascii
|
2019-02-12 13:33:06 +03:00
|
|
|
import imghdr
|
|
|
|
import inspect
|
|
|
|
import io
|
2018-06-05 22:27:49 +03:00
|
|
|
import itertools
|
2019-03-04 10:50:33 +03:00
|
|
|
import logging
|
2017-10-01 14:24:04 +03:00
|
|
|
import math
|
2018-02-12 12:33:51 +03:00
|
|
|
import mimetypes
|
2018-03-15 11:52:45 +03:00
|
|
|
import os
|
2020-01-17 13:11:10 +03:00
|
|
|
import pathlib
|
2017-12-27 02:50:09 +03:00
|
|
|
import re
|
2018-08-01 00:23:52 +03:00
|
|
|
import struct
|
2019-10-31 21:38:27 +03:00
|
|
|
from collections import namedtuple
|
2018-03-15 11:52:45 +03:00
|
|
|
from mimetypes import guess_extension
|
2018-07-22 20:26:34 +03:00
|
|
|
from types import GeneratorType
|
2017-12-27 02:50:09 +03:00
|
|
|
|
2018-06-10 12:30:51 +03:00
|
|
|
from .extensions import markdown, html
|
2020-10-23 11:57:45 +03:00
|
|
|
from .helpers import add_surrogate, del_surrogate, strip_text
|
2018-07-22 20:26:34 +03:00
|
|
|
from .tl import types
|
2016-10-09 13:57:38 +03:00
|
|
|
|
2018-07-15 12:31:14 +03:00
|
|
|
try:
|
|
|
|
import hachoir
|
|
|
|
import hachoir.metadata
|
|
|
|
import hachoir.parser
|
|
|
|
except ImportError:
|
|
|
|
hachoir = None
|
|
|
|
|
2019-01-23 16:37:28 +03:00
|
|
|
# Register some of the most common mime-types to avoid any issues.
|
|
|
|
# See https://github.com/LonamiWebs/Telethon/issues/1096.
|
|
|
|
mimetypes.add_type('image/png', '.png')
|
|
|
|
mimetypes.add_type('image/jpeg', '.jpeg')
|
2018-08-27 18:19:10 +03:00
|
|
|
mimetypes.add_type('image/webp', '.webp')
|
2019-01-23 16:37:28 +03:00
|
|
|
mimetypes.add_type('image/gif', '.gif')
|
|
|
|
mimetypes.add_type('image/bmp', '.bmp')
|
|
|
|
mimetypes.add_type('image/x-tga', '.tga')
|
|
|
|
mimetypes.add_type('image/tiff', '.tiff')
|
|
|
|
mimetypes.add_type('image/vnd.adobe.photoshop', '.psd')
|
|
|
|
|
|
|
|
mimetypes.add_type('video/mp4', '.mp4')
|
|
|
|
mimetypes.add_type('video/quicktime', '.mov')
|
|
|
|
mimetypes.add_type('video/avi', '.avi')
|
|
|
|
|
2019-04-04 11:10:54 +03:00
|
|
|
mimetypes.add_type('audio/mpeg', '.mp3')
|
2019-01-23 16:37:28 +03:00
|
|
|
mimetypes.add_type('audio/m4a', '.m4a')
|
|
|
|
mimetypes.add_type('audio/aac', '.aac')
|
2018-08-28 22:17:10 +03:00
|
|
|
mimetypes.add_type('audio/ogg', '.ogg')
|
2019-01-23 16:37:28 +03:00
|
|
|
mimetypes.add_type('audio/flac', '.flac')
|
2018-08-27 18:19:10 +03:00
|
|
|
|
2020-04-26 14:34:34 +03:00
|
|
|
mimetypes.add_type('application/x-tgsticker', '.tgs')
|
|
|
|
|
2017-12-27 02:50:09 +03:00
|
|
|
USERNAME_RE = re.compile(
|
2019-09-16 12:36:35 +03:00
|
|
|
r'@|(?:https?://)?(?:www\.)?(?:telegram\.(?:me|dog)|t\.me)/(@|joinchat/)?'
|
2017-12-27 02:50:09 +03:00
|
|
|
)
|
2018-10-06 21:20:11 +03:00
|
|
|
TG_JOIN_RE = re.compile(
|
2019-05-12 14:44:09 +03:00
|
|
|
r'tg://(join)\?invite='
|
2018-10-06 21:20:11 +03:00
|
|
|
)
|
2017-12-27 02:50:09 +03:00
|
|
|
|
2018-05-11 11:02:48 +03:00
|
|
|
# The only shorter-than-five-characters usernames are those used for some
|
|
|
|
# special, very well known bots. This list may be incomplete though:
|
|
|
|
# "[...] @gif, @vid, @pic, @bing, @wiki, @imdb and @bold [...]"
|
|
|
|
#
|
|
|
|
# See https://telegram.org/blog/inline-bots#how-does-it-work
|
|
|
|
VALID_USERNAME_RE = re.compile(
|
2020-03-31 12:18:57 +03:00
|
|
|
r'^([a-z](?:(?!__)\w){3,30}[a-z\d]'
|
2019-10-28 20:12:47 +03:00
|
|
|
r'|gif|vid|pic|bing|wiki|imdb|bold|vote|like|coub)$',
|
2018-05-11 11:02:48 +03:00
|
|
|
re.IGNORECASE
|
|
|
|
)
|
2018-02-19 23:03:33 +03:00
|
|
|
|
2019-10-31 21:38:27 +03:00
|
|
|
_FileInfo = namedtuple('FileInfo', 'dc_id location size')
|
|
|
|
|
2019-03-04 10:50:33 +03:00
|
|
|
_log = logging.getLogger(__name__)
|
|
|
|
|
2017-12-27 02:50:09 +03:00
|
|
|
|
2018-06-05 22:27:49 +03:00
|
|
|
def chunks(iterable, size=100):
|
|
|
|
"""
|
|
|
|
Turns the given iterable into chunks of the specified size,
|
|
|
|
which is 100 by default since that's what Telegram uses the most.
|
|
|
|
"""
|
|
|
|
it = iter(iterable)
|
|
|
|
size -= 1
|
|
|
|
for head in it:
|
|
|
|
yield itertools.chain([head], itertools.islice(it, size))
|
|
|
|
|
|
|
|
|
2016-10-09 13:57:38 +03:00
|
|
|
def get_display_name(entity):
|
2018-03-01 15:21:28 +03:00
|
|
|
"""
|
2019-05-07 22:25:55 +03:00
|
|
|
Gets the display name for the given :tl:`User`,
|
2018-03-23 23:40:24 +03:00
|
|
|
:tl:`Chat` or :tl:`Channel`. Returns an empty string otherwise.
|
2018-03-01 15:21:28 +03:00
|
|
|
"""
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(entity, types.User):
|
2017-06-16 10:11:49 +03:00
|
|
|
if entity.last_name and entity.first_name:
|
2016-10-09 13:57:38 +03:00
|
|
|
return '{} {}'.format(entity.first_name, entity.last_name)
|
2017-06-16 10:11:49 +03:00
|
|
|
elif entity.first_name:
|
|
|
|
return entity.first_name
|
|
|
|
elif entity.last_name:
|
|
|
|
return entity.last_name
|
|
|
|
else:
|
2017-12-24 18:18:09 +03:00
|
|
|
return ''
|
2016-10-09 13:57:38 +03:00
|
|
|
|
2020-11-04 22:28:04 +03:00
|
|
|
elif isinstance(entity, (types.Chat, types.ChatForbidden, types.Channel)):
|
2016-10-09 13:57:38 +03:00
|
|
|
return entity.title
|
|
|
|
|
2017-12-24 18:18:09 +03:00
|
|
|
return ''
|
2017-05-23 10:45:48 +03:00
|
|
|
|
2016-10-09 13:57:38 +03:00
|
|
|
|
|
|
|
def get_extension(media):
|
2018-03-23 23:40:24 +03:00
|
|
|
"""Gets the corresponding extension for any Telegram media."""
|
2016-10-09 13:57:38 +03:00
|
|
|
|
|
|
|
# Photos are always compressed as .jpg by Telegram
|
2019-06-16 12:13:45 +03:00
|
|
|
try:
|
|
|
|
get_input_photo(media)
|
2016-10-09 13:57:38 +03:00
|
|
|
return '.jpg'
|
2019-06-16 12:13:45 +03:00
|
|
|
except TypeError:
|
|
|
|
# These cases are not handled by input photo because it can't
|
|
|
|
if isinstance(media, (types.UserProfilePhoto, types.ChatPhoto)):
|
|
|
|
return '.jpg'
|
2016-10-09 13:57:38 +03:00
|
|
|
|
2017-08-24 18:44:38 +03:00
|
|
|
# Documents will come with a mime type
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(media, types.MessageMediaDocument):
|
2018-01-23 14:10:23 +03:00
|
|
|
media = media.document
|
2018-08-01 01:37:25 +03:00
|
|
|
if isinstance(media, (
|
|
|
|
types.Document, types.WebDocument, types.WebDocumentNoProxy)):
|
2018-01-23 14:10:23 +03:00
|
|
|
if media.mime_type == 'application/octet-stream':
|
|
|
|
# Octet stream are just bytes, which have no default extension
|
|
|
|
return ''
|
|
|
|
else:
|
|
|
|
return guess_extension(media.mime_type) or ''
|
2017-08-24 18:44:38 +03:00
|
|
|
|
|
|
|
return ''
|
2016-10-09 13:57:38 +03:00
|
|
|
|
|
|
|
|
2017-08-30 12:12:25 +03:00
|
|
|
def _raise_cast_fail(entity, target):
|
2017-12-28 02:22:28 +03:00
|
|
|
raise TypeError('Cannot cast {} to any kind of {}.'.format(
|
|
|
|
type(entity).__name__, target))
|
2017-08-30 12:12:25 +03:00
|
|
|
|
|
|
|
|
2018-12-15 14:23:14 +03:00
|
|
|
def get_input_peer(entity, allow_self=True, check_hash=True):
|
2018-03-23 23:40:24 +03:00
|
|
|
"""
|
|
|
|
Gets the input peer for the given "entity" (user, chat or channel).
|
2018-12-15 14:23:14 +03:00
|
|
|
|
|
|
|
A ``TypeError`` is raised if the given entity isn't a supported type
|
2019-09-12 20:17:32 +03:00
|
|
|
or if ``check_hash is True`` but the entity's ``access_hash is None``
|
|
|
|
*or* the entity contains ``min`` information. In this case, the hash
|
|
|
|
cannot be used for general purposes, and thus is not returned to avoid
|
|
|
|
any issues which can derive from invalid access hashes.
|
2018-12-24 19:32:16 +03:00
|
|
|
|
|
|
|
Note that ``check_hash`` **is ignored** if an input peer is already
|
|
|
|
passed since in that case we assume the user knows what they're doing.
|
|
|
|
This is key to getting entities by explicitly passing ``hash = 0``.
|
2018-03-23 23:40:24 +03:00
|
|
|
"""
|
2019-09-12 20:17:32 +03:00
|
|
|
# NOTE: It is important that this method validates the access hashes,
|
|
|
|
# because it is used when we *require* a valid general-purpose
|
|
|
|
# access hash. This includes caching, which relies on this method.
|
|
|
|
# Further, when resolving raw methods, they do e.g.,
|
|
|
|
# utils.get_input_channel(client.get_input_peer(...))
|
|
|
|
#
|
|
|
|
# ...which means that the client's method verifies the hashes.
|
|
|
|
#
|
|
|
|
# Excerpt from a conversation with official developers (slightly edited):
|
|
|
|
# > We send new access_hash for Channel with min flag since layer 102.
|
|
|
|
# > Previously, we omitted it.
|
|
|
|
# > That one works just to download the profile picture.
|
|
|
|
#
|
|
|
|
# < So, min hashes only work for getting files,
|
|
|
|
# < but the non-min hash is required for any other operation?
|
|
|
|
#
|
|
|
|
# > Yes.
|
|
|
|
#
|
|
|
|
# More information: https://core.telegram.org/api/min
|
2018-01-19 15:00:17 +03:00
|
|
|
try:
|
|
|
|
if entity.SUBCLASS_OF_ID == 0xc91c90b6: # crc32(b'InputPeer')
|
|
|
|
return entity
|
|
|
|
except AttributeError:
|
2018-04-13 14:08:29 +03:00
|
|
|
# e.g. custom.Dialog (can't cyclic import).
|
|
|
|
if allow_self and hasattr(entity, 'input_entity'):
|
2018-04-06 20:11:31 +03:00
|
|
|
return entity.input_entity
|
2018-04-13 14:08:29 +03:00
|
|
|
elif hasattr(entity, 'entity'):
|
|
|
|
return get_input_peer(entity.entity)
|
2018-04-06 20:11:31 +03:00
|
|
|
else:
|
|
|
|
_raise_cast_fail(entity, 'InputPeer')
|
2017-08-30 12:12:25 +03:00
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(entity, types.User):
|
2017-10-05 14:14:54 +03:00
|
|
|
if entity.is_self and allow_self:
|
2018-07-22 20:40:00 +03:00
|
|
|
return types.InputPeerSelf()
|
2019-09-12 20:17:32 +03:00
|
|
|
elif (entity.access_hash is not None and not entity.min) or not check_hash:
|
2018-12-15 14:23:14 +03:00
|
|
|
return types.InputPeerUser(entity.id, entity.access_hash)
|
2017-07-10 17:09:20 +03:00
|
|
|
else:
|
2019-09-12 20:17:32 +03:00
|
|
|
raise TypeError('User without access_hash or min info cannot be input')
|
2017-06-15 18:03:59 +03:00
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(entity, (types.Chat, types.ChatEmpty, types.ChatForbidden)):
|
|
|
|
return types.InputPeerChat(entity.id)
|
2017-06-15 18:03:59 +03:00
|
|
|
|
2019-09-12 23:30:47 +03:00
|
|
|
if isinstance(entity, types.Channel):
|
2019-09-12 20:17:32 +03:00
|
|
|
if (entity.access_hash is not None and not entity.min) or not check_hash:
|
2018-12-15 14:23:14 +03:00
|
|
|
return types.InputPeerChannel(entity.id, entity.access_hash)
|
|
|
|
else:
|
2019-09-12 20:17:32 +03:00
|
|
|
raise TypeError('Channel without access_hash or min info cannot be input')
|
2019-09-12 23:30:47 +03:00
|
|
|
if isinstance(entity, types.ChannelForbidden):
|
|
|
|
# "channelForbidden are never min", and since their hash is
|
|
|
|
# also not optional, we assume that this truly is the case.
|
|
|
|
return types.InputPeerChannel(entity.id, entity.access_hash)
|
2016-10-09 13:57:38 +03:00
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(entity, types.InputUser):
|
|
|
|
return types.InputPeerUser(entity.user_id, entity.access_hash)
|
2017-07-10 17:09:20 +03:00
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(entity, types.InputChannel):
|
|
|
|
return types.InputPeerChannel(entity.channel_id, entity.access_hash)
|
2018-01-20 21:29:05 +03:00
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(entity, types.InputUserSelf):
|
|
|
|
return types.InputPeerSelf()
|
2017-11-12 20:03:42 +03:00
|
|
|
|
2019-12-23 15:47:55 +03:00
|
|
|
if isinstance(entity, types.InputUserFromMessage):
|
|
|
|
return types.InputPeerUserFromMessage(entity.peer, entity.msg_id, entity.user_id)
|
|
|
|
|
|
|
|
if isinstance(entity, types.InputChannelFromMessage):
|
|
|
|
return types.InputPeerChannelFromMessage(entity.peer, entity.msg_id, entity.channel_id)
|
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(entity, types.UserEmpty):
|
|
|
|
return types.InputPeerEmpty()
|
2018-01-20 21:29:05 +03:00
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(entity, types.UserFull):
|
2017-07-10 17:09:20 +03:00
|
|
|
return get_input_peer(entity.user)
|
2017-06-15 18:03:59 +03:00
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(entity, types.ChatFull):
|
|
|
|
return types.InputPeerChat(entity.id)
|
2017-06-15 18:03:59 +03:00
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(entity, types.PeerChat):
|
|
|
|
return types.InputPeerChat(entity.chat_id)
|
2017-07-04 22:18:35 +03:00
|
|
|
|
2017-08-30 12:12:25 +03:00
|
|
|
_raise_cast_fail(entity, 'InputPeer')
|
2017-05-23 10:45:48 +03:00
|
|
|
|
2016-10-09 13:57:38 +03:00
|
|
|
|
2017-07-07 10:48:06 +03:00
|
|
|
def get_input_channel(entity):
|
2019-09-12 20:17:32 +03:00
|
|
|
"""
|
|
|
|
Similar to :meth:`get_input_peer`, but for :tl:`InputChannel`'s alone.
|
|
|
|
|
|
|
|
.. important::
|
|
|
|
|
|
|
|
This method does not validate for invalid general-purpose access
|
|
|
|
hashes, unlike `get_input_peer`. Consider using instead:
|
|
|
|
``get_input_channel(get_input_peer(channel))``.
|
|
|
|
"""
|
2018-01-19 15:00:17 +03:00
|
|
|
try:
|
|
|
|
if entity.SUBCLASS_OF_ID == 0x40f202fd: # crc32(b'InputChannel')
|
|
|
|
return entity
|
|
|
|
except AttributeError:
|
2017-08-30 12:12:25 +03:00
|
|
|
_raise_cast_fail(entity, 'InputChannel')
|
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(entity, (types.Channel, types.ChannelForbidden)):
|
|
|
|
return types.InputChannel(entity.id, entity.access_hash or 0)
|
2017-07-07 10:48:06 +03:00
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(entity, types.InputPeerChannel):
|
|
|
|
return types.InputChannel(entity.channel_id, entity.access_hash)
|
2017-08-05 10:37:34 +03:00
|
|
|
|
2019-12-23 15:47:55 +03:00
|
|
|
if isinstance(entity, types.InputPeerChannelFromMessage):
|
|
|
|
return types.InputChannelFromMessage(entity.peer, entity.msg_id, entity.channel_id)
|
|
|
|
|
2017-08-30 12:12:25 +03:00
|
|
|
_raise_cast_fail(entity, 'InputChannel')
|
2017-07-07 10:48:06 +03:00
|
|
|
|
|
|
|
|
2017-07-10 17:04:10 +03:00
|
|
|
def get_input_user(entity):
|
2019-09-12 20:17:32 +03:00
|
|
|
"""
|
|
|
|
Similar to :meth:`get_input_peer`, but for :tl:`InputUser`'s alone.
|
|
|
|
|
|
|
|
.. important::
|
|
|
|
|
|
|
|
This method does not validate for invalid general-purpose access
|
|
|
|
hashes, unlike `get_input_peer`. Consider using instead:
|
|
|
|
``get_input_channel(get_input_peer(channel))``.
|
|
|
|
"""
|
2018-01-19 15:00:17 +03:00
|
|
|
try:
|
|
|
|
if entity.SUBCLASS_OF_ID == 0xe669bf46: # crc32(b'InputUser'):
|
|
|
|
return entity
|
|
|
|
except AttributeError:
|
2017-08-30 12:12:25 +03:00
|
|
|
_raise_cast_fail(entity, 'InputUser')
|
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(entity, types.User):
|
2017-07-10 17:04:10 +03:00
|
|
|
if entity.is_self:
|
2018-07-22 20:40:00 +03:00
|
|
|
return types.InputUserSelf()
|
2017-07-10 17:04:10 +03:00
|
|
|
else:
|
2018-07-22 20:40:00 +03:00
|
|
|
return types.InputUser(entity.id, entity.access_hash or 0)
|
2017-07-10 17:04:10 +03:00
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(entity, types.InputPeerSelf):
|
|
|
|
return types.InputUserSelf()
|
2017-10-24 10:42:51 +03:00
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(entity, (types.UserEmpty, types.InputPeerEmpty)):
|
|
|
|
return types.InputUserEmpty()
|
2017-07-10 17:04:10 +03:00
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(entity, types.UserFull):
|
2017-07-10 17:04:10 +03:00
|
|
|
return get_input_user(entity.user)
|
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(entity, types.InputPeerUser):
|
|
|
|
return types.InputUser(entity.user_id, entity.access_hash)
|
2017-07-10 17:09:20 +03:00
|
|
|
|
2019-12-23 15:47:55 +03:00
|
|
|
if isinstance(entity, types.InputPeerUserFromMessage):
|
|
|
|
return types.InputUserFromMessage(entity.peer, entity.msg_id, entity.user_id)
|
|
|
|
|
2017-08-30 12:12:25 +03:00
|
|
|
_raise_cast_fail(entity, 'InputUser')
|
2017-07-10 17:04:10 +03:00
|
|
|
|
|
|
|
|
2018-04-28 12:49:43 +03:00
|
|
|
def get_input_dialog(dialog):
|
|
|
|
"""Similar to :meth:`get_input_peer`, but for dialogs"""
|
|
|
|
try:
|
|
|
|
if dialog.SUBCLASS_OF_ID == 0xa21c9795: # crc32(b'InputDialogPeer')
|
|
|
|
return dialog
|
|
|
|
if dialog.SUBCLASS_OF_ID == 0xc91c90b6: # crc32(b'InputPeer')
|
2018-07-22 20:40:00 +03:00
|
|
|
return types.InputDialogPeer(dialog)
|
2018-04-28 12:49:43 +03:00
|
|
|
except AttributeError:
|
|
|
|
_raise_cast_fail(dialog, 'InputDialogPeer')
|
|
|
|
|
|
|
|
try:
|
2018-07-22 20:40:00 +03:00
|
|
|
return types.InputDialogPeer(get_input_peer(dialog))
|
2018-04-28 12:49:43 +03:00
|
|
|
except TypeError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
_raise_cast_fail(dialog, 'InputDialogPeer')
|
|
|
|
|
|
|
|
|
2017-09-25 14:43:03 +03:00
|
|
|
def get_input_document(document):
|
2018-03-23 23:40:24 +03:00
|
|
|
"""Similar to :meth:`get_input_peer`, but for documents"""
|
2018-01-19 15:00:17 +03:00
|
|
|
try:
|
|
|
|
if document.SUBCLASS_OF_ID == 0xf33fdb68: # crc32(b'InputDocument'):
|
|
|
|
return document
|
|
|
|
except AttributeError:
|
2017-09-25 14:43:03 +03:00
|
|
|
_raise_cast_fail(document, 'InputDocument')
|
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(document, types.Document):
|
2018-07-23 13:19:41 +03:00
|
|
|
return types.InputDocument(
|
2018-12-25 03:04:27 +03:00
|
|
|
id=document.id, access_hash=document.access_hash,
|
|
|
|
file_reference=document.file_reference)
|
2017-09-25 14:43:03 +03:00
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(document, types.DocumentEmpty):
|
|
|
|
return types.InputDocumentEmpty()
|
2017-09-25 14:43:03 +03:00
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(document, types.MessageMediaDocument):
|
2017-09-25 14:43:03 +03:00
|
|
|
return get_input_document(document.document)
|
|
|
|
|
2018-07-22 20:26:34 +03:00
|
|
|
if isinstance(document, types.Message):
|
2017-09-25 14:43:03 +03:00
|
|
|
return get_input_document(document.media)
|
|
|
|
|
|
|
|
_raise_cast_fail(document, 'InputDocument')
|
|
|
|
|
|
|
|
|
|
|
|
def get_input_photo(photo):
|
2018-03-23 23:40:24 +03:00
|
|
|
"""Similar to :meth:`get_input_peer`, but for photos"""
|
2018-01-19 15:00:17 +03:00
|
|
|
try:
|
|
|
|
if photo.SUBCLASS_OF_ID == 0x846363e0: # crc32(b'InputPhoto'):
|
|
|
|
return photo
|
|
|
|
except AttributeError:
|
2017-09-25 14:43:03 +03:00
|
|
|
_raise_cast_fail(photo, 'InputPhoto')
|
|
|
|
|
2019-06-16 12:13:45 +03:00
|
|
|
if isinstance(photo, types.Message):
|
|
|
|
photo = photo.media
|
|
|
|
|
|
|
|
if isinstance(photo, (types.photos.Photo, types.MessageMediaPhoto)):
|
2017-10-08 14:45:14 +03:00
|
|
|
photo = photo.photo
|
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(photo, types.Photo):
|
2018-12-18 18:37:07 +03:00
|
|
|
return types.InputPhoto(id=photo.id, access_hash=photo.access_hash,
|
|
|
|
file_reference=photo.file_reference)
|
2017-09-25 14:43:03 +03:00
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(photo, types.PhotoEmpty):
|
|
|
|
return types.InputPhotoEmpty()
|
2017-09-25 14:43:03 +03:00
|
|
|
|
2018-10-05 15:11:47 +03:00
|
|
|
if isinstance(photo, types.messages.ChatFull):
|
|
|
|
photo = photo.full_chat
|
2019-06-16 12:13:45 +03:00
|
|
|
|
2018-10-05 15:11:47 +03:00
|
|
|
if isinstance(photo, types.ChannelFull):
|
|
|
|
return get_input_photo(photo.chat_photo)
|
|
|
|
elif isinstance(photo, types.UserFull):
|
|
|
|
return get_input_photo(photo.profile_photo)
|
|
|
|
elif isinstance(photo, (types.Channel, types.Chat, types.User)):
|
|
|
|
return get_input_photo(photo.photo)
|
|
|
|
|
|
|
|
if isinstance(photo, (types.UserEmpty, types.ChatEmpty,
|
|
|
|
types.ChatForbidden, types.ChannelForbidden)):
|
|
|
|
return types.InputPhotoEmpty()
|
|
|
|
|
2017-09-25 14:43:03 +03:00
|
|
|
_raise_cast_fail(photo, 'InputPhoto')
|
|
|
|
|
|
|
|
|
2018-10-05 15:11:47 +03:00
|
|
|
def get_input_chat_photo(photo):
|
|
|
|
"""Similar to :meth:`get_input_peer`, but for chat photos"""
|
|
|
|
try:
|
|
|
|
if photo.SUBCLASS_OF_ID == 0xd4eb2d74: # crc32(b'InputChatPhoto')
|
|
|
|
return photo
|
|
|
|
elif photo.SUBCLASS_OF_ID == 0xe7655f1f: # crc32(b'InputFile'):
|
|
|
|
return types.InputChatUploadedPhoto(photo)
|
|
|
|
except AttributeError:
|
|
|
|
_raise_cast_fail(photo, 'InputChatPhoto')
|
|
|
|
|
|
|
|
photo = get_input_photo(photo)
|
|
|
|
if isinstance(photo, types.InputPhoto):
|
|
|
|
return types.InputChatPhoto(photo)
|
|
|
|
elif isinstance(photo, types.InputPhotoEmpty):
|
|
|
|
return types.InputChatPhotoEmpty()
|
|
|
|
|
|
|
|
_raise_cast_fail(photo, 'InputChatPhoto')
|
|
|
|
|
|
|
|
|
2017-09-25 14:43:03 +03:00
|
|
|
def get_input_geo(geo):
|
2018-03-23 23:40:24 +03:00
|
|
|
"""Similar to :meth:`get_input_peer`, but for geo points"""
|
2018-01-19 15:00:17 +03:00
|
|
|
try:
|
|
|
|
if geo.SUBCLASS_OF_ID == 0x430d225: # crc32(b'InputGeoPoint'):
|
|
|
|
return geo
|
|
|
|
except AttributeError:
|
2017-09-25 14:43:03 +03:00
|
|
|
_raise_cast_fail(geo, 'InputGeoPoint')
|
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(geo, types.GeoPoint):
|
|
|
|
return types.InputGeoPoint(lat=geo.lat, long=geo.long)
|
2017-09-25 14:43:03 +03:00
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(geo, types.GeoPointEmpty):
|
|
|
|
return types.InputGeoPointEmpty()
|
2017-09-25 14:43:03 +03:00
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(geo, types.MessageMediaGeo):
|
2017-09-25 14:43:03 +03:00
|
|
|
return get_input_geo(geo.geo)
|
|
|
|
|
2018-07-22 20:26:34 +03:00
|
|
|
if isinstance(geo, types.Message):
|
2017-09-25 14:43:03 +03:00
|
|
|
return get_input_geo(geo.media)
|
|
|
|
|
|
|
|
_raise_cast_fail(geo, 'InputGeoPoint')
|
|
|
|
|
|
|
|
|
2019-02-13 14:33:11 +03:00
|
|
|
def get_input_media(
|
|
|
|
media, *,
|
|
|
|
is_photo=False, attributes=None, force_document=False,
|
2021-09-11 12:02:19 +03:00
|
|
|
voice_note=False, video_note=False, supports_streaming=False,
|
|
|
|
ttl=None
|
2019-02-13 14:33:11 +03:00
|
|
|
):
|
2018-03-23 23:40:24 +03:00
|
|
|
"""
|
|
|
|
Similar to :meth:`get_input_peer`, but for media.
|
2017-09-25 14:43:03 +03:00
|
|
|
|
2019-07-06 13:10:25 +03:00
|
|
|
If the media is :tl:`InputFile` and ``is_photo`` is known to be `True`,
|
2019-02-13 14:33:11 +03:00
|
|
|
it will be treated as an :tl:`InputMediaUploadedPhoto`. Else, the rest
|
|
|
|
of parameters will indicate how to treat it.
|
2017-09-25 14:43:03 +03:00
|
|
|
"""
|
2018-01-19 15:00:17 +03:00
|
|
|
try:
|
2018-04-23 16:33:44 +03:00
|
|
|
if media.SUBCLASS_OF_ID == 0xfaf846f4: # crc32(b'InputMedia')
|
2018-01-19 15:00:17 +03:00
|
|
|
return media
|
2018-04-23 16:33:44 +03:00
|
|
|
elif media.SUBCLASS_OF_ID == 0x846363e0: # crc32(b'InputPhoto')
|
2021-09-11 12:02:19 +03:00
|
|
|
return types.InputMediaPhoto(media, ttl_seconds=ttl)
|
2018-04-23 16:33:44 +03:00
|
|
|
elif media.SUBCLASS_OF_ID == 0xf33fdb68: # crc32(b'InputDocument')
|
2021-09-11 12:02:19 +03:00
|
|
|
return types.InputMediaDocument(media, ttl_seconds=ttl)
|
2018-01-19 15:00:17 +03:00
|
|
|
except AttributeError:
|
2017-09-25 14:43:03 +03:00
|
|
|
_raise_cast_fail(media, 'InputMedia')
|
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(media, types.MessageMediaPhoto):
|
|
|
|
return types.InputMediaPhoto(
|
2017-09-25 14:43:03 +03:00
|
|
|
id=get_input_photo(media.photo),
|
2021-09-11 12:02:19 +03:00
|
|
|
ttl_seconds=ttl or media.ttl_seconds
|
2017-09-25 14:43:03 +03:00
|
|
|
)
|
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(media, (types.Photo, types.photos.Photo, types.PhotoEmpty)):
|
|
|
|
return types.InputMediaPhoto(
|
2021-09-11 12:02:19 +03:00
|
|
|
id=get_input_photo(media),
|
|
|
|
ttl_seconds=ttl
|
2018-03-27 12:22:31 +03:00
|
|
|
)
|
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(media, types.MessageMediaDocument):
|
|
|
|
return types.InputMediaDocument(
|
2017-09-25 14:43:03 +03:00
|
|
|
id=get_input_document(media.document),
|
2021-09-11 12:02:19 +03:00
|
|
|
ttl_seconds=ttl or media.ttl_seconds
|
2017-09-25 14:43:03 +03:00
|
|
|
)
|
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(media, (types.Document, types.DocumentEmpty)):
|
|
|
|
return types.InputMediaDocument(
|
2021-09-11 12:02:19 +03:00
|
|
|
id=get_input_document(media),
|
|
|
|
ttl_seconds=ttl
|
2018-03-27 12:22:31 +03:00
|
|
|
)
|
|
|
|
|
2019-02-13 14:33:11 +03:00
|
|
|
if isinstance(media, (types.InputFile, types.InputFileBig)):
|
2017-09-25 14:43:03 +03:00
|
|
|
if is_photo:
|
2021-09-11 12:02:19 +03:00
|
|
|
return types.InputMediaUploadedPhoto(file=media, ttl_seconds=ttl)
|
2017-09-25 14:43:03 +03:00
|
|
|
else:
|
2019-02-13 14:33:11 +03:00
|
|
|
attrs, mime = get_attributes(
|
|
|
|
media,
|
|
|
|
attributes=attributes,
|
|
|
|
force_document=force_document,
|
|
|
|
voice_note=voice_note,
|
|
|
|
video_note=video_note,
|
|
|
|
supports_streaming=supports_streaming
|
2017-09-25 14:43:03 +03:00
|
|
|
)
|
2019-02-13 14:33:11 +03:00
|
|
|
return types.InputMediaUploadedDocument(
|
2021-09-11 12:02:19 +03:00
|
|
|
file=media, mime_type=mime, attributes=attrs, force_file=force_document,
|
|
|
|
ttl_seconds=ttl)
|
2017-09-25 14:43:03 +03:00
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(media, types.MessageMediaGame):
|
2020-01-04 19:52:31 +03:00
|
|
|
return types.InputMediaGame(id=types.InputGameID(
|
|
|
|
id=media.game.id,
|
|
|
|
access_hash=media.game.access_hash
|
|
|
|
))
|
2017-09-25 14:43:03 +03:00
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(media, types.MessageMediaContact):
|
|
|
|
return types.InputMediaContact(
|
2017-09-25 14:43:03 +03:00
|
|
|
phone_number=media.phone_number,
|
|
|
|
first_name=media.first_name,
|
2018-06-29 14:20:45 +03:00
|
|
|
last_name=media.last_name,
|
|
|
|
vcard=''
|
2017-09-25 14:43:03 +03:00
|
|
|
)
|
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(media, types.MessageMediaGeo):
|
|
|
|
return types.InputMediaGeoPoint(geo_point=get_input_geo(media.geo))
|
2017-09-25 14:43:03 +03:00
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(media, types.MessageMediaVenue):
|
|
|
|
return types.InputMediaVenue(
|
2017-09-25 14:43:03 +03:00
|
|
|
geo_point=get_input_geo(media.geo),
|
|
|
|
title=media.title,
|
|
|
|
address=media.address,
|
|
|
|
provider=media.provider,
|
2018-01-16 16:01:14 +03:00
|
|
|
venue_id=media.venue_id,
|
|
|
|
venue_type=''
|
2017-09-25 14:43:03 +03:00
|
|
|
)
|
|
|
|
|
2020-05-05 10:28:37 +03:00
|
|
|
if isinstance(media, types.MessageMediaDice):
|
|
|
|
return types.InputMediaDice(media.emoticon)
|
|
|
|
|
2017-10-13 12:38:12 +03:00
|
|
|
if isinstance(media, (
|
2018-07-22 20:40:00 +03:00
|
|
|
types.MessageMediaEmpty, types.MessageMediaUnsupported,
|
|
|
|
types.ChatPhotoEmpty, types.UserProfilePhotoEmpty,
|
2021-06-15 23:57:32 +03:00
|
|
|
types.ChatPhoto, types.UserProfilePhoto)):
|
2018-07-22 20:40:00 +03:00
|
|
|
return types.InputMediaEmpty()
|
2017-09-25 14:43:03 +03:00
|
|
|
|
2018-07-22 20:26:34 +03:00
|
|
|
if isinstance(media, types.Message):
|
2021-09-11 12:02:19 +03:00
|
|
|
return get_input_media(media.media, is_photo=is_photo, ttl=ttl)
|
2017-09-25 14:43:03 +03:00
|
|
|
|
2020-08-07 17:03:50 +03:00
|
|
|
if isinstance(media, types.MessageMediaPoll):
|
|
|
|
if media.poll.quiz:
|
|
|
|
if not media.results.results:
|
|
|
|
# A quiz has correct answers, which we don't know until answered.
|
|
|
|
# If the quiz hasn't been answered we can't reconstruct it properly.
|
|
|
|
raise TypeError('Cannot cast unanswered quiz to any kind of InputMedia.')
|
|
|
|
|
|
|
|
correct_answers = [r.option for r in media.results.results if r.correct]
|
|
|
|
else:
|
|
|
|
correct_answers = None
|
|
|
|
|
|
|
|
return types.InputMediaPoll(
|
|
|
|
poll=media.poll,
|
|
|
|
correct_answers=correct_answers,
|
|
|
|
solution=media.results.solution,
|
|
|
|
solution_entities=media.results.solution_entities,
|
|
|
|
)
|
|
|
|
|
|
|
|
if isinstance(media, types.Poll):
|
|
|
|
return types.InputMediaPoll(media)
|
|
|
|
|
2017-09-25 14:43:03 +03:00
|
|
|
_raise_cast_fail(media, 'InputMedia')
|
|
|
|
|
|
|
|
|
2018-04-23 12:05:38 +03:00
|
|
|
def get_input_message(message):
|
|
|
|
"""Similar to :meth:`get_input_peer`, but for input messages."""
|
|
|
|
try:
|
|
|
|
if isinstance(message, int): # This case is really common too
|
2018-07-22 20:40:00 +03:00
|
|
|
return types.InputMessageID(message)
|
2018-04-23 12:05:38 +03:00
|
|
|
elif message.SUBCLASS_OF_ID == 0x54b6bcc5: # crc32(b'InputMessage'):
|
|
|
|
return message
|
|
|
|
elif message.SUBCLASS_OF_ID == 0x790009e3: # crc32(b'Message'):
|
2018-07-22 20:40:00 +03:00
|
|
|
return types.InputMessageID(message.id)
|
2018-04-23 12:05:38 +03:00
|
|
|
except AttributeError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
_raise_cast_fail(message, 'InputMedia')
|
|
|
|
|
|
|
|
|
2020-12-11 18:55:49 +03:00
|
|
|
def get_input_group_call(call):
|
|
|
|
"""Similar to :meth:`get_input_peer`, but for input calls."""
|
|
|
|
try:
|
|
|
|
if call.SUBCLASS_OF_ID == 0x58611ab1: # crc32(b'InputGroupCall')
|
|
|
|
return call
|
|
|
|
elif call.SUBCLASS_OF_ID == 0x20b4f320: # crc32(b'GroupCall')
|
|
|
|
return types.InputGroupCall(id=call.id, access_hash=call.access_hash)
|
|
|
|
except AttributeError:
|
|
|
|
_raise_cast_fail(call, 'InputGroupCall')
|
|
|
|
|
|
|
|
|
2019-05-01 18:52:32 +03:00
|
|
|
def _get_entity_pair(entity_id, entities, cache,
|
|
|
|
get_input_peer=get_input_peer):
|
|
|
|
"""
|
|
|
|
Returns ``(entity, input_entity)`` for the given entity ID.
|
|
|
|
"""
|
|
|
|
entity = entities.get(entity_id)
|
|
|
|
try:
|
|
|
|
input_entity = cache[entity_id]
|
|
|
|
except KeyError:
|
|
|
|
# KeyError is unlikely, so another TypeError won't hurt
|
|
|
|
try:
|
|
|
|
input_entity = get_input_peer(entity)
|
|
|
|
except TypeError:
|
|
|
|
input_entity = None
|
|
|
|
|
|
|
|
return entity, input_entity
|
|
|
|
|
|
|
|
|
2018-06-09 23:05:06 +03:00
|
|
|
def get_message_id(message):
|
2018-07-29 16:49:12 +03:00
|
|
|
"""Similar to :meth:`get_input_peer`, but for message IDs."""
|
2018-06-09 23:05:06 +03:00
|
|
|
if message is None:
|
|
|
|
return None
|
|
|
|
|
|
|
|
if isinstance(message, int):
|
|
|
|
return message
|
|
|
|
|
|
|
|
try:
|
|
|
|
if message.SUBCLASS_OF_ID == 0x790009e3:
|
|
|
|
# hex(crc32(b'Message')) = 0x790009e3
|
|
|
|
return message.id
|
|
|
|
except AttributeError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
raise TypeError('Invalid message type: {}'.format(type(message)))
|
|
|
|
|
|
|
|
|
2019-03-04 10:50:33 +03:00
|
|
|
def _get_metadata(file):
|
2020-09-08 01:20:37 +03:00
|
|
|
if not hachoir:
|
|
|
|
return
|
|
|
|
|
|
|
|
stream = None
|
|
|
|
close_stream = True
|
|
|
|
seekable = True
|
|
|
|
|
|
|
|
# The parser may fail and we don't want to crash if
|
2019-03-04 10:50:33 +03:00
|
|
|
# the extraction process fails.
|
2020-09-08 01:20:37 +03:00
|
|
|
try:
|
2020-09-14 17:20:44 +03:00
|
|
|
# Note: aiofiles are intentionally left out for simplicity.
|
|
|
|
# `helpers._FileStream` is async only for simplicity too, so can't
|
|
|
|
# reuse it here.
|
2020-09-08 01:20:37 +03:00
|
|
|
if isinstance(file, str):
|
|
|
|
stream = open(file, 'rb')
|
|
|
|
elif isinstance(file, bytes):
|
|
|
|
stream = io.BytesIO(file)
|
|
|
|
else:
|
|
|
|
stream = file
|
|
|
|
close_stream = False
|
|
|
|
if getattr(file, 'seekable', None):
|
|
|
|
seekable = file.seekable()
|
|
|
|
else:
|
|
|
|
seekable = False
|
|
|
|
|
|
|
|
if not seekable:
|
|
|
|
return None
|
|
|
|
|
|
|
|
pos = stream.tell()
|
|
|
|
filename = getattr(file, 'name', '')
|
|
|
|
|
|
|
|
parser = hachoir.parser.guess.guessParser(hachoir.stream.InputIOStream(
|
|
|
|
stream,
|
|
|
|
source='file:' + filename,
|
|
|
|
tags=[],
|
|
|
|
filename=filename
|
|
|
|
))
|
|
|
|
|
|
|
|
return hachoir.metadata.extractMetadata(parser)
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
_log.warning('Failed to analyze %s: %s %s', file, e.__class__, e)
|
|
|
|
|
|
|
|
finally:
|
|
|
|
if stream and close_stream:
|
|
|
|
stream.close()
|
|
|
|
elif stream and seekable:
|
|
|
|
stream.seek(pos)
|
2019-03-04 10:50:33 +03:00
|
|
|
|
|
|
|
|
2018-07-15 12:31:14 +03:00
|
|
|
def get_attributes(file, *, attributes=None, mime_type=None,
|
2019-01-21 21:46:33 +03:00
|
|
|
force_document=False, voice_note=False, video_note=False,
|
2021-01-17 19:31:26 +03:00
|
|
|
supports_streaming=False, thumb=None):
|
2018-07-15 12:31:14 +03:00
|
|
|
"""
|
|
|
|
Get a list of attributes for the given file and
|
|
|
|
the mime type as a tuple ([attribute], mime_type).
|
|
|
|
"""
|
2019-02-13 14:33:11 +03:00
|
|
|
# Note: ``file.name`` works for :tl:`InputFile` and some `IOBase` streams
|
2018-07-31 13:14:13 +03:00
|
|
|
name = file if isinstance(file, str) else getattr(file, 'name', 'unnamed')
|
|
|
|
if mime_type is None:
|
|
|
|
mime_type = mimetypes.guess_type(name)[0]
|
2018-07-15 12:31:14 +03:00
|
|
|
|
2018-07-31 13:14:13 +03:00
|
|
|
attr_dict = {types.DocumentAttributeFilename:
|
|
|
|
types.DocumentAttributeFilename(os.path.basename(name))}
|
2018-07-15 12:31:14 +03:00
|
|
|
|
2019-03-04 10:50:33 +03:00
|
|
|
if is_audio(file):
|
|
|
|
m = _get_metadata(file)
|
|
|
|
if m:
|
2021-03-31 11:57:20 +03:00
|
|
|
if m.has('author'):
|
|
|
|
performer = m.get('author')
|
|
|
|
elif m.has('artist'):
|
|
|
|
performer = m.get('artist')
|
|
|
|
else:
|
|
|
|
performer = None
|
|
|
|
|
2018-07-31 13:14:13 +03:00
|
|
|
attr_dict[types.DocumentAttributeAudio] = \
|
|
|
|
types.DocumentAttributeAudio(
|
|
|
|
voice=voice_note,
|
|
|
|
title=m.get('title') if m.has('title') else None,
|
2021-03-31 11:57:20 +03:00
|
|
|
performer=performer,
|
2018-07-31 13:14:13 +03:00
|
|
|
duration=int(m.get('duration').seconds
|
|
|
|
if m.has('duration') else 0)
|
|
|
|
)
|
|
|
|
|
|
|
|
if not force_document and is_video(file):
|
2019-03-04 10:50:33 +03:00
|
|
|
m = _get_metadata(file)
|
|
|
|
if m:
|
|
|
|
doc = types.DocumentAttributeVideo(
|
|
|
|
round_message=video_note,
|
2021-01-26 23:10:21 +03:00
|
|
|
w=m.get('width') if m.has('width') else 1,
|
|
|
|
h=m.get('height') if m.has('height') else 1,
|
2019-03-04 10:50:33 +03:00
|
|
|
duration=int(m.get('duration').seconds
|
2021-01-26 23:10:21 +03:00
|
|
|
if m.has('duration') else 1),
|
2019-03-04 10:50:33 +03:00
|
|
|
supports_streaming=supports_streaming
|
|
|
|
)
|
2021-01-17 19:31:26 +03:00
|
|
|
elif thumb:
|
|
|
|
t_m = _get_metadata(thumb)
|
|
|
|
width = 1
|
|
|
|
height = 1
|
|
|
|
if t_m and t_m.has("width"):
|
|
|
|
width = t_m.get("width")
|
|
|
|
if t_m and t_m.has("height"):
|
|
|
|
height = t_m.get("height")
|
2021-01-26 23:10:21 +03:00
|
|
|
|
2021-01-17 19:31:26 +03:00
|
|
|
doc = types.DocumentAttributeVideo(
|
|
|
|
0, width, height, round_message=video_note,
|
|
|
|
supports_streaming=supports_streaming)
|
2018-07-31 13:14:13 +03:00
|
|
|
else:
|
|
|
|
doc = types.DocumentAttributeVideo(
|
2019-01-21 21:46:33 +03:00
|
|
|
0, 1, 1, round_message=video_note,
|
|
|
|
supports_streaming=supports_streaming)
|
2018-07-15 12:31:14 +03:00
|
|
|
|
2018-07-31 13:14:13 +03:00
|
|
|
attr_dict[types.DocumentAttributeVideo] = doc
|
2018-07-15 12:31:14 +03:00
|
|
|
|
|
|
|
if voice_note:
|
2018-07-22 20:40:00 +03:00
|
|
|
if types.DocumentAttributeAudio in attr_dict:
|
|
|
|
attr_dict[types.DocumentAttributeAudio].voice = True
|
2018-07-15 12:31:14 +03:00
|
|
|
else:
|
2018-07-22 20:40:00 +03:00
|
|
|
attr_dict[types.DocumentAttributeAudio] = \
|
|
|
|
types.DocumentAttributeAudio(0, voice=True)
|
2018-07-15 12:31:14 +03:00
|
|
|
|
|
|
|
# Now override the attributes if any. As we have a dict of
|
|
|
|
# {cls: instance}, we can override any class with the list
|
|
|
|
# of attributes provided by the user easily.
|
|
|
|
if attributes:
|
|
|
|
for a in attributes:
|
|
|
|
attr_dict[type(a)] = a
|
|
|
|
|
|
|
|
# Ensure we have a mime type, any; but it cannot be None
|
|
|
|
# 'The "octet-stream" subtype is used to indicate that a body
|
|
|
|
# contains arbitrary binary data.'
|
|
|
|
if not mime_type:
|
|
|
|
mime_type = 'application/octet-stream'
|
|
|
|
|
|
|
|
return list(attr_dict.values()), mime_type
|
|
|
|
|
|
|
|
|
2018-06-10 12:30:51 +03:00
|
|
|
def sanitize_parse_mode(mode):
|
|
|
|
"""
|
|
|
|
Converts the given parse mode into an object with
|
|
|
|
``parse`` and ``unparse`` callable properties.
|
|
|
|
"""
|
|
|
|
if not mode:
|
|
|
|
return None
|
|
|
|
|
|
|
|
if callable(mode):
|
|
|
|
class CustomMode:
|
|
|
|
@staticmethod
|
|
|
|
def unparse(text, entities):
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
CustomMode.parse = mode
|
|
|
|
return CustomMode
|
|
|
|
elif (all(hasattr(mode, x) for x in ('parse', 'unparse'))
|
|
|
|
and all(callable(x) for x in (mode.parse, mode.unparse))):
|
|
|
|
return mode
|
|
|
|
elif isinstance(mode, str):
|
|
|
|
try:
|
|
|
|
return {
|
|
|
|
'md': markdown,
|
|
|
|
'markdown': markdown,
|
|
|
|
'htm': html,
|
|
|
|
'html': html
|
|
|
|
}[mode.lower()]
|
|
|
|
except KeyError:
|
|
|
|
raise ValueError('Unknown parse mode {}'.format(mode))
|
|
|
|
else:
|
|
|
|
raise TypeError('Invalid parse mode type {}'.format(mode))
|
|
|
|
|
|
|
|
|
2018-04-27 22:10:41 +03:00
|
|
|
def get_input_location(location):
|
2018-07-21 12:59:44 +03:00
|
|
|
"""
|
|
|
|
Similar to :meth:`get_input_peer`, but for input messages.
|
|
|
|
|
|
|
|
Note that this returns a tuple ``(dc_id, location)``, the
|
|
|
|
``dc_id`` being present if known.
|
|
|
|
"""
|
2019-10-31 21:38:27 +03:00
|
|
|
info = _get_file_info(location)
|
|
|
|
return info.dc_id, info.location
|
|
|
|
|
|
|
|
|
|
|
|
def _get_file_info(location):
|
2018-04-27 22:10:41 +03:00
|
|
|
try:
|
|
|
|
if location.SUBCLASS_OF_ID == 0x1523d462:
|
2019-10-31 21:38:27 +03:00
|
|
|
return _FileInfo(None, location, None) # crc32(b'InputFileLocation'):
|
2018-04-27 22:10:41 +03:00
|
|
|
except AttributeError:
|
|
|
|
_raise_cast_fail(location, 'InputFileLocation')
|
|
|
|
|
2018-07-22 20:26:34 +03:00
|
|
|
if isinstance(location, types.Message):
|
2018-04-27 22:10:41 +03:00
|
|
|
location = location.media
|
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(location, types.MessageMediaDocument):
|
2018-04-27 22:10:41 +03:00
|
|
|
location = location.document
|
2018-07-22 20:40:00 +03:00
|
|
|
elif isinstance(location, types.MessageMediaPhoto):
|
2018-04-27 22:10:41 +03:00
|
|
|
location = location.photo
|
|
|
|
|
2018-07-22 20:40:00 +03:00
|
|
|
if isinstance(location, types.Document):
|
2019-10-31 21:38:27 +03:00
|
|
|
return _FileInfo(location.dc_id, types.InputDocumentFileLocation(
|
2019-04-22 17:51:05 +03:00
|
|
|
id=location.id,
|
|
|
|
access_hash=location.access_hash,
|
|
|
|
file_reference=location.file_reference,
|
|
|
|
thumb_size='' # Presumably to download one of its thumbnails
|
2019-10-31 21:38:27 +03:00
|
|
|
), location.size)
|
2018-07-22 20:40:00 +03:00
|
|
|
elif isinstance(location, types.Photo):
|
2019-10-31 21:38:27 +03:00
|
|
|
return _FileInfo(location.dc_id, types.InputPhotoFileLocation(
|
2019-04-22 17:51:05 +03:00
|
|
|
id=location.id,
|
|
|
|
access_hash=location.access_hash,
|
|
|
|
file_reference=location.file_reference,
|
2019-04-24 13:37:19 +03:00
|
|
|
thumb_size=location.sizes[-1].type
|
2019-10-31 21:38:27 +03:00
|
|
|
), _photo_size_byte_count(location.sizes[-1]))
|
2019-04-22 17:51:05 +03:00
|
|
|
|
2018-04-27 22:10:41 +03:00
|
|
|
_raise_cast_fail(location, 'InputFileLocation')
|
|
|
|
|
|
|
|
|
2018-06-21 17:31:03 +03:00
|
|
|
def _get_extension(file):
|
|
|
|
"""
|
|
|
|
Gets the extension for the given file, which can be either a
|
|
|
|
str or an ``open()``'ed file (which has a ``.name`` attribute).
|
|
|
|
"""
|
|
|
|
if isinstance(file, str):
|
|
|
|
return os.path.splitext(file)[-1]
|
2020-01-17 13:11:10 +03:00
|
|
|
elif isinstance(file, pathlib.Path):
|
|
|
|
return file.suffix
|
2019-02-12 13:33:06 +03:00
|
|
|
elif isinstance(file, bytes):
|
|
|
|
kind = imghdr.what(io.BytesIO(file))
|
|
|
|
return ('.' + kind) if kind else ''
|
2019-12-27 14:05:27 +03:00
|
|
|
elif isinstance(file, io.IOBase) and not isinstance(file, io.TextIOBase) and file.seekable():
|
2019-03-01 23:27:15 +03:00
|
|
|
kind = imghdr.what(file)
|
|
|
|
return ('.' + kind) if kind is not None else ''
|
2018-06-21 17:31:03 +03:00
|
|
|
elif getattr(file, 'name', None):
|
2019-02-13 14:33:11 +03:00
|
|
|
# Note: ``file.name`` works for :tl:`InputFile` and some `IOBase`
|
2018-06-21 17:31:03 +03:00
|
|
|
return _get_extension(file.name)
|
|
|
|
else:
|
2019-06-15 17:42:26 +03:00
|
|
|
# Maybe it's a Telegram media
|
|
|
|
return get_extension(file)
|
2018-06-21 17:31:03 +03:00
|
|
|
|
|
|
|
|
2018-01-15 20:15:30 +03:00
|
|
|
def is_image(file):
|
2018-03-15 11:52:45 +03:00
|
|
|
"""
|
2019-07-06 13:10:25 +03:00
|
|
|
Returns `True` if the file extension looks like an image file to Telegram.
|
2018-03-15 11:52:45 +03:00
|
|
|
"""
|
2019-02-25 20:23:39 +03:00
|
|
|
match = re.match(r'\.(png|jpe?g)', _get_extension(file), re.IGNORECASE)
|
|
|
|
if match:
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return isinstance(resolve_bot_file_id(file), types.Photo)
|
2018-02-12 12:33:51 +03:00
|
|
|
|
2018-02-19 23:03:33 +03:00
|
|
|
|
2018-06-26 17:39:22 +03:00
|
|
|
def is_gif(file):
|
|
|
|
"""
|
2019-07-06 13:10:25 +03:00
|
|
|
Returns `True` if the file extension looks like a gif file to Telegram.
|
2018-06-26 17:39:22 +03:00
|
|
|
"""
|
|
|
|
return re.match(r'\.gif', _get_extension(file), re.IGNORECASE)
|
|
|
|
|
|
|
|
|
2018-02-17 15:00:58 +03:00
|
|
|
def is_audio(file):
|
2020-09-08 01:20:37 +03:00
|
|
|
"""Returns `True` if the file has an audio mime type."""
|
|
|
|
ext = _get_extension(file)
|
|
|
|
if not ext:
|
|
|
|
metadata = _get_metadata(file)
|
|
|
|
if metadata and metadata.has('mime_type'):
|
|
|
|
return metadata.get('mime_type').startswith('audio/')
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
else:
|
|
|
|
file = 'a' + ext
|
|
|
|
return (mimetypes.guess_type(file)[0] or '').startswith('audio/')
|
2018-02-17 15:00:58 +03:00
|
|
|
|
|
|
|
|
2018-02-12 12:33:51 +03:00
|
|
|
def is_video(file):
|
2020-09-08 01:20:37 +03:00
|
|
|
"""Returns `True` if the file has a video mime type."""
|
|
|
|
ext = _get_extension(file)
|
|
|
|
if not ext:
|
|
|
|
metadata = _get_metadata(file)
|
|
|
|
if metadata and metadata.has('mime_type'):
|
|
|
|
return metadata.get('mime_type').startswith('video/')
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
else:
|
|
|
|
file = 'a' + ext
|
|
|
|
return (mimetypes.guess_type(file)[0] or '').startswith('video/')
|
2018-01-15 20:15:30 +03:00
|
|
|
|
|
|
|
|
2018-02-26 16:12:21 +03:00
|
|
|
def is_list_like(obj):
|
|
|
|
"""
|
2019-07-06 13:10:25 +03:00
|
|
|
Returns `True` if the given object looks like a list.
|
2018-02-26 16:12:21 +03:00
|
|
|
|
2018-03-23 23:40:24 +03:00
|
|
|
Checking ``if hasattr(obj, '__iter__')`` and ignoring ``str/bytes`` is not
|
|
|
|
enough. Things like ``open()`` are also iterable (and probably many
|
2018-02-26 16:12:21 +03:00
|
|
|
other things), so just support the commonly known list-like objects.
|
|
|
|
"""
|
2018-08-03 00:00:10 +03:00
|
|
|
return isinstance(obj, (list, tuple, set, dict, GeneratorType))
|
2018-02-26 16:12:21 +03:00
|
|
|
|
|
|
|
|
2017-12-27 02:50:09 +03:00
|
|
|
def parse_phone(phone):
|
2019-07-06 13:10:25 +03:00
|
|
|
"""Parses the given phone, or returns `None` if it's invalid."""
|
2017-12-27 02:50:09 +03:00
|
|
|
if isinstance(phone, int):
|
|
|
|
return str(phone)
|
|
|
|
else:
|
|
|
|
phone = re.sub(r'[+()\s-]', '', str(phone))
|
|
|
|
if phone.isdigit():
|
|
|
|
return phone
|
|
|
|
|
|
|
|
|
|
|
|
def parse_username(username):
|
2018-10-06 21:20:11 +03:00
|
|
|
"""
|
|
|
|
Parses the given username or channel access hash, given
|
|
|
|
a string, username or URL. Returns a tuple consisting of
|
|
|
|
both the stripped, lowercase username and whether it is
|
|
|
|
a joinchat/ hash (in which case is not lowercase'd).
|
2018-02-19 23:03:33 +03:00
|
|
|
|
2018-10-06 21:20:11 +03:00
|
|
|
Returns ``(None, False)`` if the ``username`` or link is not valid.
|
2017-12-27 02:50:09 +03:00
|
|
|
"""
|
|
|
|
username = username.strip()
|
2018-10-06 21:20:11 +03:00
|
|
|
m = USERNAME_RE.match(username) or TG_JOIN_RE.match(username)
|
2017-12-27 02:50:09 +03:00
|
|
|
if m:
|
2018-02-19 23:03:33 +03:00
|
|
|
username = username[m.end():]
|
2017-12-27 13:54:08 +03:00
|
|
|
is_invite = bool(m.group(1))
|
2018-02-19 23:03:33 +03:00
|
|
|
if is_invite:
|
|
|
|
return username, True
|
2018-02-22 12:27:12 +03:00
|
|
|
else:
|
|
|
|
username = username.rstrip('/')
|
2018-02-19 23:03:33 +03:00
|
|
|
|
|
|
|
if VALID_USERNAME_RE.match(username):
|
2017-12-27 13:54:08 +03:00
|
|
|
return username.lower(), False
|
2018-02-19 23:03:33 +03:00
|
|
|
else:
|
|
|
|
return None, False
|
2017-12-27 02:50:09 +03:00
|
|
|
|
|
|
|
|
2018-06-07 11:46:32 +03:00
|
|
|
def get_inner_text(text, entities):
|
2018-06-03 12:53:18 +03:00
|
|
|
"""
|
2018-06-07 11:46:32 +03:00
|
|
|
Gets the inner text that's surrounded by the given entities.
|
2018-06-03 12:53:18 +03:00
|
|
|
For instance: text = 'hey!', entity = MessageEntityBold(2, 2) -> 'y!'.
|
|
|
|
|
2018-06-07 11:46:32 +03:00
|
|
|
:param text: the original text.
|
|
|
|
:param entities: the entity or entities that must be matched.
|
2018-06-03 12:53:18 +03:00
|
|
|
:return: a single result or a list of the text surrounded by the entities.
|
|
|
|
"""
|
|
|
|
text = add_surrogate(text)
|
|
|
|
result = []
|
2018-06-07 11:46:32 +03:00
|
|
|
for e in entities:
|
2018-06-03 12:53:18 +03:00
|
|
|
start = e.offset
|
|
|
|
end = e.offset + e.length
|
|
|
|
result.append(del_surrogate(text[start:end]))
|
|
|
|
|
2018-06-07 11:46:32 +03:00
|
|
|
return result
|
2018-06-03 12:53:18 +03:00
|
|
|
|
|
|
|
|
2018-08-02 13:56:40 +03:00
|
|
|
def get_peer(peer):
|
|
|
|
try:
|
2018-12-28 19:54:15 +03:00
|
|
|
if isinstance(peer, int):
|
|
|
|
pid, cls = resolve_id(peer)
|
|
|
|
return cls(pid)
|
|
|
|
elif peer.SUBCLASS_OF_ID == 0x2d45687:
|
2018-08-02 13:56:40 +03:00
|
|
|
return peer
|
|
|
|
elif isinstance(peer, (
|
|
|
|
types.contacts.ResolvedPeer, types.InputNotifyPeer,
|
2019-07-23 13:44:06 +03:00
|
|
|
types.TopPeer, types.Dialog, types.DialogPeer)):
|
2018-08-02 13:56:40 +03:00
|
|
|
return peer.peer
|
|
|
|
elif isinstance(peer, types.ChannelFull):
|
|
|
|
return types.PeerChannel(peer.id)
|
2020-10-01 15:02:54 +03:00
|
|
|
elif isinstance(peer, types.UserEmpty):
|
|
|
|
return types.PeerUser(peer.id)
|
2020-11-13 12:52:24 +03:00
|
|
|
elif isinstance(peer, types.ChatEmpty):
|
|
|
|
return types.PeerChat(peer.id)
|
2018-08-02 13:56:40 +03:00
|
|
|
|
2019-01-07 18:13:37 +03:00
|
|
|
if peer.SUBCLASS_OF_ID in (0x7d7c6f86, 0xd9c7fc18):
|
|
|
|
# ChatParticipant, ChannelParticipant
|
|
|
|
return types.PeerUser(peer.user_id)
|
|
|
|
|
2018-12-15 14:23:14 +03:00
|
|
|
peer = get_input_peer(peer, allow_self=False, check_hash=False)
|
2019-12-23 15:47:55 +03:00
|
|
|
if isinstance(peer, (types.InputPeerUser, types.InputPeerUserFromMessage)):
|
2018-08-02 13:56:40 +03:00
|
|
|
return types.PeerUser(peer.user_id)
|
|
|
|
elif isinstance(peer, types.InputPeerChat):
|
|
|
|
return types.PeerChat(peer.chat_id)
|
2019-12-23 15:47:55 +03:00
|
|
|
elif isinstance(peer, (types.InputPeerChannel, types.InputPeerChannelFromMessage)):
|
2018-08-02 13:56:40 +03:00
|
|
|
return types.PeerChannel(peer.channel_id)
|
|
|
|
except (AttributeError, TypeError):
|
2018-11-23 17:47:09 +03:00
|
|
|
pass
|
|
|
|
_raise_cast_fail(peer, 'Peer')
|
2018-08-02 13:56:40 +03:00
|
|
|
|
|
|
|
|
2018-07-07 13:44:05 +03:00
|
|
|
def get_peer_id(peer, add_mark=True):
|
2017-12-28 15:31:43 +03:00
|
|
|
"""
|
2019-05-07 22:25:55 +03:00
|
|
|
Convert the given peer into its marked ID by default.
|
|
|
|
|
|
|
|
This "mark" comes from the "bot api" format, and with it the peer type
|
|
|
|
can be identified back. User ID is left unmodified, chat ID is negated,
|
2021-01-28 22:01:46 +03:00
|
|
|
and channel ID is "prefixed" with -100:
|
2019-05-07 22:25:55 +03:00
|
|
|
|
|
|
|
* ``user_id``
|
|
|
|
* ``-chat_id``
|
|
|
|
* ``-100channel_id``
|
2017-12-28 15:31:43 +03:00
|
|
|
|
|
|
|
The original ID and the peer type class can be returned with
|
2018-03-23 23:40:24 +03:00
|
|
|
a call to :meth:`resolve_id(marked_id)`.
|
2017-10-01 14:24:04 +03:00
|
|
|
"""
|
2017-10-06 22:47:10 +03:00
|
|
|
# First we assert it's a Peer TLObject, or early return for integers
|
2018-01-19 15:00:17 +03:00
|
|
|
if isinstance(peer, int):
|
2018-07-07 13:44:05 +03:00
|
|
|
return peer if add_mark else resolve_id(peer)[0]
|
2018-01-19 15:00:17 +03:00
|
|
|
|
2018-11-23 17:58:36 +03:00
|
|
|
# Tell the user to use their client to resolve InputPeerSelf if we got one
|
|
|
|
if isinstance(peer, types.InputPeerSelf):
|
|
|
|
_raise_cast_fail(peer, 'int (you might want to use client.get_peer_id)')
|
|
|
|
|
2018-01-19 15:00:17 +03:00
|
|
|
try:
|
2018-08-02 13:56:40 +03:00
|
|
|
peer = get_peer(peer)
|
|
|
|
except TypeError:
|
2018-01-19 15:00:17 +03:00
|
|
|
_raise_cast_fail(peer, 'int')
|
2017-10-05 13:59:44 +03:00
|
|
|
|
2018-08-02 13:56:40 +03:00
|
|
|
if isinstance(peer, types.PeerUser):
|
2017-10-09 20:40:39 +03:00
|
|
|
return peer.user_id
|
2018-08-02 13:56:40 +03:00
|
|
|
elif isinstance(peer, types.PeerChat):
|
2018-03-10 14:13:17 +03:00
|
|
|
# Check in case the user mixed things up to avoid blowing up
|
|
|
|
if not (0 < peer.chat_id <= 0x7fffffff):
|
2018-07-07 13:44:05 +03:00
|
|
|
peer.chat_id = resolve_id(peer.chat_id)[0]
|
2018-03-10 14:13:17 +03:00
|
|
|
|
2018-07-07 13:44:05 +03:00
|
|
|
return -peer.chat_id if add_mark else peer.chat_id
|
2018-08-02 13:56:40 +03:00
|
|
|
else: # if isinstance(peer, types.PeerChannel):
|
2018-03-10 14:13:17 +03:00
|
|
|
# Check in case the user mixed things up to avoid blowing up
|
2018-08-02 13:56:40 +03:00
|
|
|
if not (0 < peer.channel_id <= 0x7fffffff):
|
|
|
|
peer.channel_id = resolve_id(peer.channel_id)[0]
|
|
|
|
|
|
|
|
if not add_mark:
|
|
|
|
return peer.channel_id
|
2017-10-06 22:42:04 +03:00
|
|
|
|
2020-11-07 14:18:55 +03:00
|
|
|
# Growing backwards from -100_0000_000_000 indicates it's a channel
|
|
|
|
return -(1000000000000 + peer.channel_id)
|
2017-10-01 14:24:04 +03:00
|
|
|
|
|
|
|
|
|
|
|
def resolve_id(marked_id):
|
2018-03-23 23:40:24 +03:00
|
|
|
"""Given a marked ID, returns the original ID and its :tl:`Peer` type."""
|
2017-10-01 14:24:04 +03:00
|
|
|
if marked_id >= 0:
|
2018-07-22 20:40:00 +03:00
|
|
|
return marked_id, types.PeerUser
|
2017-10-01 14:24:04 +03:00
|
|
|
|
2021-01-28 22:01:46 +03:00
|
|
|
marked_id = -marked_id
|
|
|
|
if marked_id > 1000000000000:
|
|
|
|
marked_id -= 1000000000000
|
|
|
|
return marked_id, types.PeerChannel
|
|
|
|
else:
|
|
|
|
return marked_id, types.PeerChat
|
2017-10-01 14:24:04 +03:00
|
|
|
|
|
|
|
|
2018-08-01 00:23:52 +03:00
|
|
|
def _rle_decode(data):
|
|
|
|
"""
|
|
|
|
Decodes run-length-encoded `data`.
|
|
|
|
"""
|
2018-08-02 14:47:35 +03:00
|
|
|
if not data:
|
|
|
|
return data
|
|
|
|
|
2018-08-01 00:23:52 +03:00
|
|
|
new = b''
|
|
|
|
last = b''
|
|
|
|
for cur in data:
|
|
|
|
if last == b'\0':
|
2018-08-06 18:54:07 +03:00
|
|
|
new += last * cur
|
2018-08-01 00:23:52 +03:00
|
|
|
last = b''
|
|
|
|
else:
|
|
|
|
new += last
|
2018-08-06 18:54:07 +03:00
|
|
|
last = bytes([cur])
|
2018-08-01 00:23:52 +03:00
|
|
|
|
|
|
|
return new + last
|
|
|
|
|
|
|
|
|
2018-08-06 18:54:07 +03:00
|
|
|
def _rle_encode(string):
|
|
|
|
new = b''
|
|
|
|
count = 0
|
|
|
|
for cur in string:
|
|
|
|
if not cur:
|
|
|
|
count += 1
|
|
|
|
else:
|
|
|
|
if count:
|
|
|
|
new += b'\0' + bytes([count])
|
|
|
|
count = 0
|
|
|
|
|
|
|
|
new += bytes([cur])
|
|
|
|
return new
|
|
|
|
|
|
|
|
|
2018-08-02 14:47:35 +03:00
|
|
|
def _decode_telegram_base64(string):
|
2018-08-01 00:23:52 +03:00
|
|
|
"""
|
2020-10-18 22:11:59 +03:00
|
|
|
Decodes a url-safe base64-encoded string into its bytes
|
2018-08-02 14:59:27 +03:00
|
|
|
by first adding the stripped necessary padding characters.
|
2018-08-01 00:23:52 +03:00
|
|
|
|
2018-08-02 14:47:35 +03:00
|
|
|
This is the way Telegram shares binary data as strings,
|
|
|
|
such as Bot API-style file IDs or invite links.
|
|
|
|
|
2019-07-06 13:10:25 +03:00
|
|
|
Returns `None` if the input string was not valid.
|
2018-08-02 14:59:27 +03:00
|
|
|
"""
|
2018-08-01 00:23:52 +03:00
|
|
|
try:
|
2018-08-02 14:59:27 +03:00
|
|
|
return base64.urlsafe_b64decode(string + '=' * (len(string) % 4))
|
|
|
|
except (binascii.Error, ValueError, TypeError):
|
|
|
|
return None # not valid base64, not valid ascii, not a string
|
2018-08-01 00:23:52 +03:00
|
|
|
|
2018-08-02 14:47:35 +03:00
|
|
|
|
2018-08-06 18:54:07 +03:00
|
|
|
def _encode_telegram_base64(string):
|
|
|
|
"""
|
|
|
|
Inverse for `_decode_telegram_base64`.
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
return base64.urlsafe_b64encode(string).rstrip(b'=').decode('ascii')
|
|
|
|
except (binascii.Error, ValueError, TypeError):
|
|
|
|
return None # not valid base64, not valid ascii, not a string
|
|
|
|
|
|
|
|
|
2018-08-02 14:47:35 +03:00
|
|
|
def resolve_bot_file_id(file_id):
|
|
|
|
"""
|
2019-05-09 13:24:37 +03:00
|
|
|
Given a Bot API-style `file_id <telethon.tl.custom.file.File.id>`,
|
|
|
|
returns the media it represents. If the `file_id <telethon.tl.custom.file.File.id>`
|
2019-07-06 13:10:25 +03:00
|
|
|
is not valid, `None` is returned instead.
|
2018-08-02 14:47:35 +03:00
|
|
|
|
2019-05-09 13:24:37 +03:00
|
|
|
Note that the `file_id <telethon.tl.custom.file.File.id>` does not have information
|
|
|
|
such as image dimensions or file size, so these will be zero if present.
|
2018-08-02 14:47:35 +03:00
|
|
|
|
|
|
|
For thumbnails, the photo ID and hash will always be zero.
|
|
|
|
"""
|
|
|
|
data = _rle_decode(_decode_telegram_base64(file_id))
|
2019-08-07 00:25:58 +03:00
|
|
|
if not data:
|
2018-08-01 00:23:52 +03:00
|
|
|
return None
|
|
|
|
|
2019-08-07 00:25:58 +03:00
|
|
|
# This isn't officially documented anywhere, but
|
|
|
|
# we assume the last byte is some kind of "version".
|
|
|
|
data, version = data[:-1], data[-1]
|
|
|
|
if version not in (2, 4):
|
|
|
|
return None
|
|
|
|
|
|
|
|
if (version == 2 and len(data) == 24) or (version == 4 and len(data) == 25):
|
|
|
|
if version == 2:
|
|
|
|
file_type, dc_id, media_id, access_hash = struct.unpack('<iiqq', data)
|
|
|
|
# elif version == 4:
|
|
|
|
else:
|
|
|
|
# TODO Figure out what the extra byte means
|
|
|
|
file_type, dc_id, media_id, access_hash, _ = struct.unpack('<iiqqb', data)
|
2019-02-13 11:16:34 +03:00
|
|
|
|
|
|
|
if not (1 <= dc_id <= 5):
|
|
|
|
# Valid `file_id`'s must have valid DC IDs. Since this method is
|
|
|
|
# called when sending a file and the user may have entered a path
|
|
|
|
# they believe is correct but the file doesn't exist, this method
|
|
|
|
# may detect a path as "valid" bot `file_id` even when it's not.
|
|
|
|
# By checking the `dc_id`, we greatly reduce the chances of this
|
|
|
|
# happening.
|
|
|
|
return None
|
|
|
|
|
2018-08-01 00:23:52 +03:00
|
|
|
attributes = []
|
|
|
|
if file_type == 3 or file_type == 9:
|
|
|
|
attributes.append(types.DocumentAttributeAudio(
|
|
|
|
duration=0,
|
|
|
|
voice=file_type == 3
|
|
|
|
))
|
|
|
|
elif file_type == 4 or file_type == 13:
|
|
|
|
attributes.append(types.DocumentAttributeVideo(
|
|
|
|
duration=0,
|
|
|
|
w=0,
|
|
|
|
h=0,
|
|
|
|
round_message=file_type == 13
|
|
|
|
))
|
|
|
|
# elif file_type == 5: # other, cannot know which
|
|
|
|
elif file_type == 8:
|
|
|
|
attributes.append(types.DocumentAttributeSticker(
|
|
|
|
alt='',
|
|
|
|
stickerset=types.InputStickerSetEmpty()
|
|
|
|
))
|
|
|
|
elif file_type == 10:
|
|
|
|
attributes.append(types.DocumentAttributeAnimated())
|
|
|
|
|
|
|
|
return types.Document(
|
|
|
|
id=media_id,
|
|
|
|
access_hash=access_hash,
|
|
|
|
date=None,
|
|
|
|
mime_type='',
|
|
|
|
size=0,
|
2019-02-03 02:14:39 +03:00
|
|
|
thumbs=None,
|
2018-08-01 00:23:52 +03:00
|
|
|
dc_id=dc_id,
|
2018-12-18 18:37:07 +03:00
|
|
|
attributes=attributes,
|
|
|
|
file_reference=b''
|
2018-08-01 00:23:52 +03:00
|
|
|
)
|
2020-11-07 14:46:46 +03:00
|
|
|
elif (version == 2 and len(data) == 44) or (version == 4 and len(data) in (49, 77)):
|
2019-08-07 00:25:58 +03:00
|
|
|
if version == 2:
|
|
|
|
(file_type, dc_id, media_id, access_hash,
|
|
|
|
volume_id, secret, local_id) = struct.unpack('<iiqqqqi', data)
|
2020-11-07 14:46:46 +03:00
|
|
|
# else version == 4:
|
|
|
|
elif len(data) == 49:
|
2019-08-07 00:25:58 +03:00
|
|
|
# TODO Figure out what the extra five bytes mean
|
|
|
|
(file_type, dc_id, media_id, access_hash,
|
|
|
|
volume_id, secret, local_id, _) = struct.unpack('<iiqqqqi5s', data)
|
2020-11-07 14:46:46 +03:00
|
|
|
elif len(data) == 77:
|
|
|
|
# See #1613.
|
|
|
|
(file_type, dc_id, _, media_id, access_hash, volume_id, _, local_id, _) = struct.unpack('<ii28sqqq12sib', data)
|
|
|
|
else:
|
|
|
|
return None
|
2018-08-01 00:23:52 +03:00
|
|
|
|
2019-02-13 11:16:34 +03:00
|
|
|
if not (1 <= dc_id <= 5):
|
|
|
|
return None
|
|
|
|
|
2018-08-01 00:23:52 +03:00
|
|
|
# Thumbnails (small) always have ID 0; otherwise size 'x'
|
|
|
|
photo_size = 's' if media_id or access_hash else 'x'
|
2019-04-22 20:02:15 +03:00
|
|
|
return types.Photo(
|
|
|
|
id=media_id,
|
|
|
|
access_hash=access_hash,
|
|
|
|
file_reference=b'',
|
|
|
|
date=None,
|
|
|
|
sizes=[types.PhotoSize(
|
|
|
|
type=photo_size,
|
|
|
|
w=0,
|
|
|
|
h=0,
|
|
|
|
size=0
|
|
|
|
)],
|
|
|
|
dc_id=dc_id,
|
|
|
|
has_stickers=None
|
|
|
|
)
|
2018-08-01 00:23:52 +03:00
|
|
|
|
|
|
|
|
2018-08-06 18:54:07 +03:00
|
|
|
def pack_bot_file_id(file):
|
|
|
|
"""
|
|
|
|
Inverse operation for `resolve_bot_file_id`.
|
|
|
|
|
|
|
|
The only parameters this method will accept are :tl:`Document` and
|
|
|
|
:tl:`Photo`, and it will return a variable-length ``file_id`` string.
|
|
|
|
|
|
|
|
If an invalid parameter is given, it will ``return None``.
|
|
|
|
"""
|
2019-02-10 14:30:20 +03:00
|
|
|
if isinstance(file, types.MessageMediaDocument):
|
|
|
|
file = file.document
|
|
|
|
elif isinstance(file, types.MessageMediaPhoto):
|
|
|
|
file = file.photo
|
|
|
|
|
2018-08-06 18:54:07 +03:00
|
|
|
if isinstance(file, types.Document):
|
|
|
|
file_type = 5
|
|
|
|
for attribute in file.attributes:
|
|
|
|
if isinstance(attribute, types.DocumentAttributeAudio):
|
|
|
|
file_type = 3 if attribute.voice else 9
|
|
|
|
elif isinstance(attribute, types.DocumentAttributeVideo):
|
|
|
|
file_type = 13 if attribute.round_message else 4
|
|
|
|
elif isinstance(attribute, types.DocumentAttributeSticker):
|
|
|
|
file_type = 8
|
|
|
|
elif isinstance(attribute, types.DocumentAttributeAnimated):
|
|
|
|
file_type = 10
|
|
|
|
else:
|
|
|
|
continue
|
|
|
|
break
|
|
|
|
|
|
|
|
return _encode_telegram_base64(_rle_encode(struct.pack(
|
|
|
|
'<iiqqb', file_type, file.dc_id, file.id, file.access_hash, 2)))
|
|
|
|
|
|
|
|
elif isinstance(file, types.Photo):
|
|
|
|
size = next((x for x in reversed(file.sizes) if isinstance(
|
|
|
|
x, (types.PhotoSize, types.PhotoCachedSize))), None)
|
|
|
|
|
2019-04-22 20:02:15 +03:00
|
|
|
if not size:
|
2018-08-06 18:54:07 +03:00
|
|
|
return None
|
|
|
|
|
|
|
|
size = size.location
|
|
|
|
return _encode_telegram_base64(_rle_encode(struct.pack(
|
2019-04-22 20:02:15 +03:00
|
|
|
'<iiqqqqib', 2, file.dc_id, file.id, file.access_hash,
|
|
|
|
size.volume_id, 0, size.local_id, 2 # 0 = old `secret`
|
2018-08-06 18:54:07 +03:00
|
|
|
)))
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
2018-08-02 14:47:35 +03:00
|
|
|
def resolve_invite_link(link):
|
|
|
|
"""
|
|
|
|
Resolves the given invite link. Returns a tuple of
|
2018-08-02 14:59:27 +03:00
|
|
|
``(link creator user id, global chat id, random int)``.
|
2018-08-02 14:47:35 +03:00
|
|
|
|
2021-01-12 22:04:34 +03:00
|
|
|
Note that for broadcast channels or with the newest link format, the link
|
|
|
|
creator user ID will be zero to protect their identity. Normal chats and
|
|
|
|
megagroup channels will have such ID.
|
|
|
|
|
|
|
|
Note that the chat ID may not be accurate for chats with a link that were
|
|
|
|
upgraded to megagroup, since the link can remain the same, but the chat
|
|
|
|
ID will be correct once a new link is generated.
|
2018-08-02 14:47:35 +03:00
|
|
|
"""
|
|
|
|
link_hash, is_link = parse_username(link)
|
|
|
|
if not is_link:
|
|
|
|
# Perhaps the user passed the link hash directly
|
|
|
|
link_hash = link
|
|
|
|
|
2019-05-05 12:56:04 +03:00
|
|
|
# Little known fact, but invite links with a
|
|
|
|
# hex-string of bytes instead of base64 also works.
|
2021-01-12 22:04:34 +03:00
|
|
|
if re.match(r'[a-fA-F\d]+', link_hash) and len(link_hash) in (24, 32):
|
2019-05-05 12:56:04 +03:00
|
|
|
payload = bytes.fromhex(link_hash)
|
|
|
|
else:
|
|
|
|
payload = _decode_telegram_base64(link_hash)
|
|
|
|
|
2018-08-02 14:47:35 +03:00
|
|
|
try:
|
2021-01-12 21:50:27 +03:00
|
|
|
if len(payload) == 12:
|
2021-01-12 22:04:34 +03:00
|
|
|
return (0, *struct.unpack('>LQ', payload))
|
2021-01-12 21:50:27 +03:00
|
|
|
elif len(payload) == 16:
|
|
|
|
return struct.unpack('>LLQ', payload)
|
|
|
|
else:
|
|
|
|
pass
|
2018-08-02 14:47:35 +03:00
|
|
|
except (struct.error, TypeError):
|
2021-01-12 21:50:27 +03:00
|
|
|
pass
|
|
|
|
return None, None, None
|
2018-08-02 14:47:35 +03:00
|
|
|
|
|
|
|
|
2018-09-19 15:22:35 +03:00
|
|
|
def resolve_inline_message_id(inline_msg_id):
|
|
|
|
"""
|
|
|
|
Resolves an inline message ID. Returns a tuple of
|
|
|
|
``(message id, peer, dc id, access hash)``
|
|
|
|
|
|
|
|
The ``peer`` may either be a :tl:`PeerUser` referencing
|
|
|
|
the user who sent the message via the bot in a private
|
|
|
|
conversation or small group chat, or a :tl:`PeerChannel`
|
|
|
|
if the message was sent in a channel.
|
|
|
|
|
|
|
|
The ``access_hash`` does not have any use yet.
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
dc_id, message_id, pid, access_hash = \
|
|
|
|
struct.unpack('<iiiq', _decode_telegram_base64(inline_msg_id))
|
|
|
|
peer = types.PeerChannel(-pid) if pid < 0 else types.PeerUser(pid)
|
|
|
|
return message_id, peer, dc_id, access_hash
|
|
|
|
except (struct.error, TypeError):
|
|
|
|
return None, None, None, None
|
|
|
|
|
|
|
|
|
2017-05-21 14:59:16 +03:00
|
|
|
def get_appropriated_part_size(file_size):
|
2018-03-23 23:40:24 +03:00
|
|
|
"""
|
|
|
|
Gets the appropriated part size when uploading or downloading files,
|
|
|
|
given an initial file size.
|
|
|
|
"""
|
2017-10-09 14:19:03 +03:00
|
|
|
if file_size <= 104857600: # 100MB
|
2016-10-09 13:57:38 +03:00
|
|
|
return 128
|
|
|
|
if file_size <= 786432000: # 750MB
|
|
|
|
return 256
|
2020-07-06 21:11:40 +03:00
|
|
|
if file_size <= 2097152000: # 2000MB
|
2016-10-09 13:57:38 +03:00
|
|
|
return 512
|
|
|
|
|
|
|
|
raise ValueError('File size too large')
|
2018-10-05 21:25:49 +03:00
|
|
|
|
|
|
|
|
2019-06-04 22:36:38 +03:00
|
|
|
def encode_waveform(waveform):
|
|
|
|
"""
|
2019-07-06 13:10:25 +03:00
|
|
|
Encodes the input `bytes` into a 5-bit byte-string
|
2019-06-04 22:36:38 +03:00
|
|
|
to be used as a voice note's waveform. See `decode_waveform`
|
|
|
|
for the reverse operation.
|
|
|
|
|
|
|
|
Example
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
chat = ...
|
|
|
|
file = 'my.ogg'
|
|
|
|
|
|
|
|
# Send 'my.ogg' with a ascending-triangle waveform
|
2019-08-14 00:33:39 +03:00
|
|
|
await client.send_file(chat, file, attributes=[types.DocumentAttributeAudio(
|
2019-06-04 22:36:38 +03:00
|
|
|
duration=7,
|
|
|
|
voice=True,
|
|
|
|
waveform=utils.encode_waveform(bytes(range(2 ** 5)) # 2**5 because 5-bit
|
|
|
|
)]
|
|
|
|
|
|
|
|
# Send 'my.ogg' with a square waveform
|
2019-08-14 00:33:39 +03:00
|
|
|
await client.send_file(chat, file, attributes=[types.DocumentAttributeAudio(
|
2019-06-04 22:36:38 +03:00
|
|
|
duration=7,
|
|
|
|
voice=True,
|
|
|
|
waveform=utils.encode_waveform(bytes((31, 31, 15, 15, 15, 15, 31, 31)) * 4)
|
|
|
|
)]
|
|
|
|
"""
|
|
|
|
bits_count = len(waveform) * 5
|
|
|
|
bytes_count = (bits_count + 7) // 8
|
|
|
|
result = bytearray(bytes_count + 1)
|
|
|
|
|
|
|
|
for i in range(len(waveform)):
|
|
|
|
byte_index, bit_shift = divmod(i * 5, 8)
|
|
|
|
value = (waveform[i] & 0b00011111) << bit_shift
|
|
|
|
|
|
|
|
or_what = struct.unpack('<H', (result[byte_index:byte_index + 2]))[0]
|
|
|
|
or_what |= value
|
|
|
|
result[byte_index:byte_index + 2] = struct.pack('<H', or_what)
|
|
|
|
|
|
|
|
return bytes(result[:bytes_count])
|
|
|
|
|
|
|
|
|
|
|
|
def decode_waveform(waveform):
|
|
|
|
"""
|
|
|
|
Inverse operation of `encode_waveform`.
|
|
|
|
"""
|
|
|
|
bit_count = len(waveform) * 8
|
|
|
|
value_count = bit_count // 5
|
|
|
|
if value_count == 0:
|
|
|
|
return b''
|
|
|
|
|
|
|
|
result = bytearray(value_count)
|
|
|
|
for i in range(value_count - 1):
|
|
|
|
byte_index, bit_shift = divmod(i * 5, 8)
|
|
|
|
value = struct.unpack('<H', waveform[byte_index:byte_index + 2])[0]
|
|
|
|
result[i] = (value >> bit_shift) & 0b00011111
|
|
|
|
|
|
|
|
byte_index, bit_shift = divmod(value_count - 1, 8)
|
|
|
|
if byte_index == len(waveform) - 1:
|
|
|
|
value = waveform[byte_index]
|
|
|
|
else:
|
|
|
|
value = struct.unpack('<H', waveform[byte_index:byte_index + 2])[0]
|
|
|
|
|
|
|
|
result[value_count - 1] = (value >> bit_shift) & 0b00011111
|
|
|
|
return bytes(result)
|
|
|
|
|
|
|
|
|
2020-10-23 11:57:45 +03:00
|
|
|
def split_text(text, entities, *, limit=4096, max_entities=100, split_at=(r'\n', r'\s', '.')):
|
|
|
|
"""
|
|
|
|
Split a message text and entities into multiple messages, each with their
|
|
|
|
own set of entities. This allows sending a very large message as multiple
|
|
|
|
messages while respecting the formatting.
|
|
|
|
|
|
|
|
Arguments
|
|
|
|
text (`str`):
|
|
|
|
The message text.
|
|
|
|
|
|
|
|
entities (List[:tl:`MessageEntity`])
|
|
|
|
The formatting entities.
|
|
|
|
|
|
|
|
limit (`int`):
|
|
|
|
The maximum message length of each individual message.
|
|
|
|
|
|
|
|
max_entities (`int`):
|
|
|
|
The maximum amount of entities that will be present in each
|
|
|
|
individual message.
|
|
|
|
|
|
|
|
split_at (Tuplel[`str`]):
|
|
|
|
The list of regular expressions that will determine where to split
|
|
|
|
the text. By default, a newline is searched. If no newline is
|
|
|
|
present, a space is searched. If no space is found, the split will
|
|
|
|
be made at any character.
|
|
|
|
|
|
|
|
The last expression should always match a character, or else the
|
|
|
|
text will stop being splitted and the resulting text may be larger
|
|
|
|
than the limit.
|
|
|
|
|
|
|
|
Yields
|
|
|
|
Pairs of ``(str, entities)`` with the split message.
|
|
|
|
|
|
|
|
Example
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
from telethon import utils
|
|
|
|
from telethon.extensions import markdown
|
|
|
|
|
|
|
|
very_long_markdown_text = "..."
|
|
|
|
text, entities = markdown.parse(very_long_markdown_text)
|
|
|
|
|
|
|
|
for text, entities in utils.split_text(text, entities):
|
|
|
|
await client.send_message(chat, text, formatting_entities=entities)
|
|
|
|
"""
|
|
|
|
# TODO add test cases (multiple entities beyond cutoff, at cutoff, splitting at emoji)
|
|
|
|
# TODO try to optimize this a bit more? (avoid new_ent, smarter update method)
|
|
|
|
def update(ent, **updates):
|
|
|
|
kwargs = ent.to_dict()
|
|
|
|
del kwargs['_']
|
|
|
|
kwargs.update(updates)
|
|
|
|
return ent.__class__(**kwargs)
|
|
|
|
|
|
|
|
text = add_surrogate(text)
|
|
|
|
split_at = tuple(map(re.compile, split_at))
|
|
|
|
|
|
|
|
while True:
|
|
|
|
if len(entities) > max_entities:
|
|
|
|
last_ent = entities[max_entities - 1]
|
|
|
|
cur_limit = min(limit, last_ent.offset + last_ent.length)
|
|
|
|
else:
|
|
|
|
cur_limit = limit
|
|
|
|
|
|
|
|
if len(text) <= cur_limit:
|
|
|
|
break
|
|
|
|
|
|
|
|
for split in split_at:
|
|
|
|
for i in reversed(range(cur_limit)):
|
|
|
|
m = split.match(text, pos=i)
|
|
|
|
if m:
|
|
|
|
cur_text, new_text = text[:m.end()], text[m.end():]
|
|
|
|
cur_ent, new_ent = [], []
|
|
|
|
for ent in entities:
|
|
|
|
if ent.offset < m.end():
|
|
|
|
if ent.offset + ent.length > m.end():
|
|
|
|
cur_ent.append(update(ent, length=m.end() - ent.offset))
|
|
|
|
new_ent.append(update(ent, offset=0, length=ent.offset + ent.length - m.end()))
|
|
|
|
else:
|
|
|
|
cur_ent.append(ent)
|
|
|
|
else:
|
|
|
|
new_ent.append(update(ent, offset=ent.offset - m.end()))
|
|
|
|
|
|
|
|
yield del_surrogate(cur_text), cur_ent
|
|
|
|
text, entities = new_text, new_ent
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
continue
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
# Can't find where to split, just return the remaining text and entities
|
|
|
|
break
|
|
|
|
|
|
|
|
yield del_surrogate(text), entities
|
|
|
|
|
|
|
|
|
2018-10-05 21:25:49 +03:00
|
|
|
class AsyncClassWrapper:
|
|
|
|
def __init__(self, wrapped):
|
|
|
|
self.wrapped = wrapped
|
|
|
|
|
|
|
|
def __getattr__(self, item):
|
|
|
|
w = getattr(self.wrapped, item)
|
|
|
|
async def wrapper(*args, **kwargs):
|
|
|
|
val = w(*args, **kwargs)
|
|
|
|
return await val if inspect.isawaitable(val) else val
|
|
|
|
|
|
|
|
if callable(w):
|
|
|
|
return wrapper
|
|
|
|
else:
|
|
|
|
return w
|
2019-04-25 21:31:52 +03:00
|
|
|
|
|
|
|
|
|
|
|
def stripped_photo_to_jpg(stripped):
|
|
|
|
"""
|
|
|
|
Adds the JPG header and footer to a stripped image.
|
|
|
|
|
|
|
|
Ported from https://github.com/telegramdesktop/tdesktop/blob/bec39d89e19670eb436dc794a8f20b657cb87c71/Telegram/SourceFiles/ui/image/image.cpp#L225
|
|
|
|
"""
|
2019-10-31 21:38:27 +03:00
|
|
|
# NOTE: Changes here should update _photo_size_byte_count
|
2019-04-25 21:37:48 +03:00
|
|
|
if len(stripped) < 3 or stripped[0] != 1:
|
2019-04-25 21:31:52 +03:00
|
|
|
return stripped
|
|
|
|
|
|
|
|
header = bytearray(b'\xff\xd8\xff\xe0\x00\x10JFIF\x00\x01\x01\x00\x00\x01\x00\x01\x00\x00\xff\xdb\x00C\x00(\x1c\x1e#\x1e\x19(#!#-+(0<dA<77<{X]Id\x91\x80\x99\x96\x8f\x80\x8c\x8a\xa0\xb4\xe6\xc3\xa0\xaa\xda\xad\x8a\x8c\xc8\xff\xcb\xda\xee\xf5\xff\xff\xff\x9b\xc1\xff\xff\xff\xfa\xff\xe6\xfd\xff\xf8\xff\xdb\x00C\x01+--<5<vAAv\xf8\xa5\x8c\xa5\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xff\xc0\x00\x11\x08\x00\x00\x00\x00\x03\x01"\x00\x02\x11\x01\x03\x11\x01\xff\xc4\x00\x1f\x00\x00\x01\x05\x01\x01\x01\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\xff\xc4\x00\xb5\x10\x00\x02\x01\x03\x03\x02\x04\x03\x05\x05\x04\x04\x00\x00\x01}\x01\x02\x03\x00\x04\x11\x05\x12!1A\x06\x13Qa\x07"q\x142\x81\x91\xa1\x08#B\xb1\xc1\x15R\xd1\xf0$3br\x82\t\n\x16\x17\x18\x19\x1a%&\'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz\x83\x84\x85\x86\x87\x88\x89\x8a\x92\x93\x94\x95\x96\x97\x98\x99\x9a\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xff\xc4\x00\x1f\x01\x00\x03\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\xff\xc4\x00\xb5\x11\x00\x02\x01\x02\x04\x04\x03\x04\x07\x05\x04\x04\x00\x01\x02w\x00\x01\x02\x03\x11\x04\x05!1\x06\x12AQ\x07aq\x13"2\x81\x08\x14B\x91\xa1\xb1\xc1\t#3R\xf0\x15br\xd1\n\x16$4\xe1%\xf1\x17\x18\x19\x1a&\'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x92\x93\x94\x95\x96\x97\x98\x99\x9a\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xff\xda\x00\x0c\x03\x01\x00\x02\x11\x03\x11\x00?\x00')
|
|
|
|
footer = b"\xff\xd9"
|
|
|
|
header[164] = stripped[1]
|
|
|
|
header[166] = stripped[2]
|
|
|
|
return bytes(header) + stripped[3:] + footer
|
2019-05-08 19:41:40 +03:00
|
|
|
|
|
|
|
|
2019-10-31 21:38:27 +03:00
|
|
|
def _photo_size_byte_count(size):
|
|
|
|
if isinstance(size, types.PhotoSize):
|
|
|
|
return size.size
|
|
|
|
elif isinstance(size, types.PhotoStrippedSize):
|
|
|
|
if len(size.bytes) < 3 or size.bytes[0] != 1:
|
|
|
|
return len(size.bytes)
|
2019-05-08 19:41:40 +03:00
|
|
|
|
2019-10-31 21:38:27 +03:00
|
|
|
return len(size.bytes) + 622
|
|
|
|
elif isinstance(size, types.PhotoCachedSize):
|
|
|
|
return len(size.bytes)
|
|
|
|
elif isinstance(size, types.PhotoSizeEmpty):
|
|
|
|
return 0
|
2021-02-14 00:49:03 +03:00
|
|
|
elif isinstance(size, types.PhotoSizeProgressive):
|
|
|
|
return max(size.sizes)
|
2019-10-31 21:38:27 +03:00
|
|
|
else:
|
|
|
|
return None
|