2018-06-10 13:04:23 +03:00
|
|
|
import datetime
|
|
|
|
import io
|
|
|
|
import os
|
2018-06-16 18:01:20 +03:00
|
|
|
import pathlib
|
2019-05-03 22:37:27 +03:00
|
|
|
import typing
|
2019-07-04 16:34:51 +03:00
|
|
|
import inspect
|
2021-01-24 03:38:44 +03:00
|
|
|
import asyncio
|
2018-06-10 13:04:23 +03:00
|
|
|
|
2020-03-04 18:12:34 +03:00
|
|
|
from ..crypto import AES
|
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
from .. import utils, helpers, errors, hints
|
2019-05-21 17:16:16 +03:00
|
|
|
from ..requestiter import RequestIter
|
2018-06-10 13:04:23 +03:00
|
|
|
from ..tl import TLObject, types, functions
|
|
|
|
|
2018-08-01 01:37:25 +03:00
|
|
|
try:
|
|
|
|
import aiohttp
|
|
|
|
except ImportError:
|
|
|
|
aiohttp = None
|
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
if typing.TYPE_CHECKING:
|
|
|
|
from .telegramclient import TelegramClient
|
|
|
|
|
2019-05-21 17:16:16 +03:00
|
|
|
# Chunk sizes for upload.getFile must be multiples of the smallest size
|
|
|
|
MIN_CHUNK_SIZE = 4096
|
|
|
|
MAX_CHUNK_SIZE = 512 * 1024
|
|
|
|
|
2021-01-24 03:36:10 +03:00
|
|
|
# 2021-01-15, users reported that `errors.TimeoutError` can occur while downloading files.
|
|
|
|
TIMED_OUT_SLEEP = 1
|
2019-05-21 17:16:16 +03:00
|
|
|
|
|
|
|
class _DirectDownloadIter(RequestIter):
|
|
|
|
async def _init(
|
2020-09-24 11:03:28 +03:00
|
|
|
self, file, dc_id, offset, stride, chunk_size, request_size, file_size, msg_data
|
2019-05-21 17:16:16 +03:00
|
|
|
):
|
|
|
|
self.request = functions.upload.GetFileRequest(
|
|
|
|
file, offset=offset, limit=request_size)
|
|
|
|
|
|
|
|
self.total = file_size
|
|
|
|
self._stride = stride
|
|
|
|
self._chunk_size = chunk_size
|
|
|
|
self._last_part = None
|
2020-09-24 11:03:28 +03:00
|
|
|
self._msg_data = msg_data
|
2021-01-24 03:36:10 +03:00
|
|
|
self._timed_out = False
|
2019-05-21 17:16:16 +03:00
|
|
|
|
|
|
|
self._exported = dc_id and self.client.session.dc_id != dc_id
|
|
|
|
if not self._exported:
|
|
|
|
# The used sender will also change if ``FileMigrateError`` occurs
|
|
|
|
self._sender = self.client._sender
|
|
|
|
else:
|
|
|
|
try:
|
|
|
|
self._sender = await self.client._borrow_exported_sender(dc_id)
|
|
|
|
except errors.DcIdInvalidError:
|
|
|
|
# Can't export a sender for the ID we are currently in
|
|
|
|
config = await self.client(functions.help.GetConfigRequest())
|
|
|
|
for option in config.dc_options:
|
|
|
|
if option.ip_address == self.client.session.server_address:
|
|
|
|
self.client.session.set_dc(
|
|
|
|
option.id, option.ip_address, option.port)
|
2022-08-30 13:32:21 +03:00
|
|
|
self.client.session.save()
|
2019-05-21 17:16:16 +03:00
|
|
|
break
|
|
|
|
|
|
|
|
# TODO Figure out why the session may have the wrong DC ID
|
|
|
|
self._sender = self.client._sender
|
|
|
|
self._exported = False
|
|
|
|
|
|
|
|
async def _load_next_chunk(self):
|
|
|
|
cur = await self._request()
|
|
|
|
self.buffer.append(cur)
|
|
|
|
if len(cur) < self.request.limit:
|
|
|
|
self.left = len(self.buffer)
|
|
|
|
await self.close()
|
|
|
|
else:
|
|
|
|
self.request.offset += self._stride
|
|
|
|
|
|
|
|
async def _request(self):
|
|
|
|
try:
|
2020-04-28 21:49:57 +03:00
|
|
|
result = await self.client._call(self._sender, self.request)
|
2021-01-24 03:36:10 +03:00
|
|
|
self._timed_out = False
|
2019-05-21 17:16:16 +03:00
|
|
|
if isinstance(result, types.upload.FileCdnRedirect):
|
|
|
|
raise NotImplementedError # TODO Implement
|
|
|
|
else:
|
|
|
|
return result.bytes
|
|
|
|
|
2023-08-18 19:36:30 +03:00
|
|
|
except errors.TimedOutError as e:
|
2021-01-24 03:36:10 +03:00
|
|
|
if self._timed_out:
|
|
|
|
self.client._log[__name__].warning('Got two timeouts in a row while downloading file')
|
|
|
|
raise
|
|
|
|
|
|
|
|
self._timed_out = True
|
|
|
|
self.client._log[__name__].info('Got timeout while downloading file, retrying once')
|
|
|
|
await asyncio.sleep(TIMED_OUT_SLEEP)
|
|
|
|
return await self._request()
|
|
|
|
|
2019-05-21 17:16:16 +03:00
|
|
|
except errors.FileMigrateError as e:
|
|
|
|
self.client._log[__name__].info('File lives in another DC')
|
|
|
|
self._sender = await self.client._borrow_exported_sender(e.new_dc)
|
|
|
|
self._exported = True
|
|
|
|
return await self._request()
|
|
|
|
|
2024-04-02 12:02:32 +03:00
|
|
|
except (errors.FilerefUpgradeNeededError, errors.FileReferenceExpiredError) as e:
|
2020-09-24 11:03:28 +03:00
|
|
|
# Only implemented for documents which are the ones that may take that long to download
|
|
|
|
if not self._msg_data \
|
|
|
|
or not isinstance(self.request.location, types.InputDocumentFileLocation) \
|
|
|
|
or self.request.location.thumb_size != '':
|
|
|
|
raise
|
|
|
|
|
|
|
|
self.client._log[__name__].info('File ref expired during download; refetching message')
|
|
|
|
chat, msg_id = self._msg_data
|
|
|
|
msg = await self.client.get_messages(chat, ids=msg_id)
|
|
|
|
|
|
|
|
if not isinstance(msg.media, types.MessageMediaDocument):
|
|
|
|
raise
|
|
|
|
|
|
|
|
document = msg.media.document
|
|
|
|
|
|
|
|
# Message media may have been edited for something else
|
|
|
|
if document.id != self.request.location.id:
|
|
|
|
raise
|
|
|
|
|
|
|
|
self.request.location.file_reference = document.file_reference
|
|
|
|
return await self._request()
|
|
|
|
|
2019-05-21 17:16:16 +03:00
|
|
|
async def close(self):
|
|
|
|
if not self._sender:
|
|
|
|
return
|
|
|
|
|
|
|
|
try:
|
|
|
|
if self._exported:
|
|
|
|
await self.client._return_exported_sender(self._sender)
|
|
|
|
elif self._sender != self.client._sender:
|
|
|
|
await self._sender.disconnect()
|
|
|
|
finally:
|
|
|
|
self._sender = None
|
|
|
|
|
|
|
|
async def __aenter__(self):
|
2019-07-13 22:20:51 +03:00
|
|
|
return self
|
2019-05-21 17:16:16 +03:00
|
|
|
|
|
|
|
async def __aexit__(self, *args):
|
|
|
|
await self.close()
|
|
|
|
|
|
|
|
__enter__ = helpers._sync_enter
|
|
|
|
__exit__ = helpers._sync_exit
|
|
|
|
|
|
|
|
|
|
|
|
class _GenericDownloadIter(_DirectDownloadIter):
|
2021-05-14 09:11:54 +03:00
|
|
|
async def _load_next_chunk(self):
|
2019-05-21 17:16:16 +03:00
|
|
|
# 1. Fetch enough for one chunk
|
|
|
|
data = b''
|
|
|
|
|
|
|
|
# 1.1. ``bad`` is how much into the data we have we need to offset
|
2021-05-14 09:11:54 +03:00
|
|
|
bad = self.request.offset % self.request.limit
|
2019-05-21 17:16:16 +03:00
|
|
|
before = self.request.offset
|
|
|
|
|
|
|
|
# 1.2. We have to fetch from a valid offset, so remove that bad part
|
|
|
|
self.request.offset -= bad
|
|
|
|
|
|
|
|
done = False
|
|
|
|
while not done and len(data) - bad < self._chunk_size:
|
|
|
|
cur = await self._request()
|
|
|
|
self.request.offset += self.request.limit
|
|
|
|
|
|
|
|
data += cur
|
|
|
|
done = len(cur) < self.request.limit
|
|
|
|
|
|
|
|
# 1.3 Restore our last desired offset
|
|
|
|
self.request.offset = before
|
|
|
|
|
|
|
|
# 2. Fill the buffer with the data we have
|
2019-07-06 13:10:25 +03:00
|
|
|
# 2.1. Slicing `bytes` is expensive, yield `memoryview` instead
|
2019-05-21 17:16:16 +03:00
|
|
|
mem = memoryview(data)
|
|
|
|
|
|
|
|
# 2.2. The current chunk starts at ``bad`` offset into the data,
|
|
|
|
# and each new chunk is ``stride`` bytes apart of the other
|
|
|
|
for i in range(bad, len(data), self._stride):
|
|
|
|
self.buffer.append(mem[i:i + self._chunk_size])
|
|
|
|
|
|
|
|
# 2.3. We will yield this offset, so move to the next one
|
|
|
|
self.request.offset += self._stride
|
|
|
|
|
|
|
|
# 2.4. If we are in the last chunk, we will return the last partial data
|
|
|
|
if done:
|
|
|
|
self.left = len(self.buffer)
|
|
|
|
await self.close()
|
|
|
|
return
|
|
|
|
|
|
|
|
# 2.5. If we are not done, we can't return incomplete chunks.
|
|
|
|
if len(self.buffer[-1]) != self._chunk_size:
|
|
|
|
self._last_part = self.buffer.pop().tobytes()
|
|
|
|
|
|
|
|
# 3. Be careful with the offsets. Re-fetching a bit of data
|
|
|
|
# is fine, since it greatly simplifies things.
|
|
|
|
# TODO Try to not re-fetch data
|
|
|
|
self.request.offset -= self._stride
|
|
|
|
|
|
|
|
|
2019-06-24 18:48:46 +03:00
|
|
|
class DownloadMethods:
|
2018-06-10 13:04:23 +03:00
|
|
|
|
|
|
|
# region Public methods
|
|
|
|
|
|
|
|
async def download_profile_photo(
|
2019-05-03 22:37:27 +03:00
|
|
|
self: 'TelegramClient',
|
2019-05-08 18:16:09 +03:00
|
|
|
entity: 'hints.EntityLike',
|
|
|
|
file: 'hints.FileLike' = None,
|
2019-05-03 22:37:27 +03:00
|
|
|
*,
|
|
|
|
download_big: bool = True) -> typing.Optional[str]:
|
2018-06-10 13:04:23 +03:00
|
|
|
"""
|
2019-05-06 12:38:26 +03:00
|
|
|
Downloads the profile photo from the given user, chat or channel.
|
2018-06-10 13:04:23 +03:00
|
|
|
|
2019-05-20 12:38:26 +03:00
|
|
|
Arguments
|
2018-06-10 13:04:23 +03:00
|
|
|
entity (`entity`):
|
|
|
|
From who the photo will be downloaded.
|
|
|
|
|
2019-01-17 18:51:50 +03:00
|
|
|
.. note::
|
|
|
|
|
|
|
|
This method expects the full entity (which has the data
|
|
|
|
to download the photo), not an input variant.
|
|
|
|
|
|
|
|
It's possible that sometimes you can't fetch the entity
|
|
|
|
from its input (since you can get errors like
|
|
|
|
``ChannelPrivateError``) but you already have it through
|
|
|
|
another call, like getting a forwarded message from it.
|
|
|
|
|
2018-06-10 13:04:23 +03:00
|
|
|
file (`str` | `file`, optional):
|
|
|
|
The output file path, directory, or stream-like object.
|
|
|
|
If the path exists and is a file, it will be overwritten.
|
2018-12-27 21:05:52 +03:00
|
|
|
If file is the type `bytes`, it will be downloaded in-memory
|
2023-02-26 12:10:19 +03:00
|
|
|
and returned as a bytestring (i.e. ``file=bytes``, without
|
|
|
|
parentheses or quotes).
|
2018-06-10 13:04:23 +03:00
|
|
|
|
|
|
|
download_big (`bool`, optional):
|
|
|
|
Whether to use the big version of the available photos.
|
|
|
|
|
2019-05-20 12:38:26 +03:00
|
|
|
Returns
|
2019-07-06 13:10:25 +03:00
|
|
|
`None` if no photo was provided, or if it was Empty. On success
|
2018-06-10 13:04:23 +03:00
|
|
|
the file path is returned since it may differ from the one given.
|
2019-05-09 13:24:37 +03:00
|
|
|
|
2019-05-20 12:38:26 +03:00
|
|
|
Example
|
2019-05-09 13:24:37 +03:00
|
|
|
.. code-block:: python
|
|
|
|
|
2019-05-20 12:38:26 +03:00
|
|
|
# Download your own profile photo
|
2019-08-14 00:33:39 +03:00
|
|
|
path = await client.download_profile_photo('me')
|
2019-05-09 13:24:37 +03:00
|
|
|
print(path)
|
2018-06-10 13:04:23 +03:00
|
|
|
"""
|
|
|
|
# hex(crc32(x.encode('ascii'))) for x in
|
|
|
|
# ('User', 'Chat', 'UserFull', 'ChatFull')
|
|
|
|
ENTITIES = (0x2da17977, 0xc5af5d94, 0x1f4661b9, 0xd49a2697)
|
|
|
|
# ('InputPeer', 'InputUser', 'InputChannel')
|
|
|
|
INPUTS = (0xc91c90b6, 0xe669bf46, 0x40f202fd)
|
|
|
|
if not isinstance(entity, TLObject) or entity.SUBCLASS_OF_ID in INPUTS:
|
|
|
|
entity = await self.get_entity(entity)
|
|
|
|
|
2019-05-03 14:59:17 +03:00
|
|
|
thumb = -1 if download_big else 0
|
|
|
|
|
2018-06-10 13:04:23 +03:00
|
|
|
possible_names = []
|
|
|
|
if entity.SUBCLASS_OF_ID not in ENTITIES:
|
|
|
|
photo = entity
|
|
|
|
else:
|
|
|
|
if not hasattr(entity, 'photo'):
|
|
|
|
# Special case: may be a ChatFull with photo:Photo
|
|
|
|
# This is different from a normal UserProfilePhoto and Chat
|
|
|
|
if not hasattr(entity, 'chat_photo'):
|
|
|
|
return None
|
|
|
|
|
|
|
|
return await self._download_photo(
|
2019-05-03 14:59:17 +03:00
|
|
|
entity.chat_photo, file, date=None,
|
|
|
|
thumb=thumb, progress_callback=None
|
|
|
|
)
|
2018-06-10 13:04:23 +03:00
|
|
|
|
|
|
|
for attr in ('username', 'first_name', 'title'):
|
|
|
|
possible_names.append(getattr(entity, attr, None))
|
|
|
|
|
|
|
|
photo = entity.photo
|
|
|
|
|
|
|
|
if isinstance(photo, (types.UserProfilePhoto, types.ChatPhoto)):
|
2019-04-22 17:51:05 +03:00
|
|
|
dc_id = photo.dc_id
|
|
|
|
loc = types.InputPeerPhotoFileLocation(
|
2022-10-03 14:06:27 +03:00
|
|
|
# min users can be used to download profile photos
|
|
|
|
# self.get_input_entity would otherwise not accept those
|
|
|
|
peer=utils.get_input_peer(entity, check_hash=False),
|
2021-06-15 23:57:32 +03:00
|
|
|
photo_id=photo.photo_id,
|
2019-04-22 17:51:05 +03:00
|
|
|
big=download_big
|
|
|
|
)
|
2018-06-10 13:04:23 +03:00
|
|
|
else:
|
2019-04-03 10:51:33 +03:00
|
|
|
# It doesn't make any sense to check if `photo` can be used
|
|
|
|
# as input location, because then this method would be able
|
|
|
|
# to "download the profile photo of a message", i.e. its
|
|
|
|
# media which should be done with `download_media` instead.
|
|
|
|
return None
|
2018-06-10 13:04:23 +03:00
|
|
|
|
|
|
|
file = self._get_proper_filename(
|
|
|
|
file, 'profile_photo', '.jpg',
|
|
|
|
possible_names=possible_names
|
|
|
|
)
|
|
|
|
|
|
|
|
try:
|
2019-04-22 17:51:05 +03:00
|
|
|
result = await self.download_file(loc, file, dc_id=dc_id)
|
2018-12-27 21:05:52 +03:00
|
|
|
return result if file is bytes else file
|
2018-06-10 13:04:23 +03:00
|
|
|
except errors.LocationInvalidError:
|
|
|
|
# See issue #500, Android app fails as of v4.6.0 (1155).
|
|
|
|
# The fix seems to be using the full channel chat photo.
|
|
|
|
ie = await self.get_input_entity(entity)
|
2019-12-23 15:52:07 +03:00
|
|
|
ty = helpers._entity_type(ie)
|
|
|
|
if ty == helpers._EntityType.CHANNEL:
|
2018-06-10 13:04:23 +03:00
|
|
|
full = await self(functions.channels.GetFullChannelRequest(ie))
|
|
|
|
return await self._download_photo(
|
|
|
|
full.full_chat.chat_photo, file,
|
2019-04-24 13:38:03 +03:00
|
|
|
date=None, progress_callback=None,
|
2019-05-03 14:59:17 +03:00
|
|
|
thumb=thumb
|
2018-06-10 13:04:23 +03:00
|
|
|
)
|
|
|
|
else:
|
|
|
|
# Until there's a report for chats, no need to.
|
|
|
|
return None
|
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
async def download_media(
|
|
|
|
self: 'TelegramClient',
|
2019-05-08 18:16:09 +03:00
|
|
|
message: 'hints.MessageLike',
|
|
|
|
file: 'hints.FileLike' = None,
|
2019-05-03 22:37:27 +03:00
|
|
|
*,
|
2019-07-17 13:37:16 +03:00
|
|
|
thumb: 'typing.Union[int, types.TypePhotoSize]' = None,
|
2019-05-11 21:12:57 +03:00
|
|
|
progress_callback: 'hints.ProgressCallback' = None) -> typing.Optional[typing.Union[str, bytes]]:
|
2018-06-10 13:04:23 +03:00
|
|
|
"""
|
2019-05-06 12:38:26 +03:00
|
|
|
Downloads the given media from a message object.
|
2018-06-10 13:04:23 +03:00
|
|
|
|
|
|
|
Note that if the download is too slow, you should consider installing
|
|
|
|
``cryptg`` (through ``pip install cryptg``) so that decrypting the
|
|
|
|
received data is done in C instead of Python (much faster).
|
|
|
|
|
2019-05-20 12:38:26 +03:00
|
|
|
See also `Message.download_media() <telethon.tl.custom.message.Message.download_media>`.
|
|
|
|
|
|
|
|
Arguments
|
|
|
|
message (`Message <telethon.tl.custom.message.Message>` | :tl:`Media`):
|
|
|
|
The media or message containing the media that will be downloaded.
|
2018-06-10 13:04:23 +03:00
|
|
|
|
2019-05-20 12:38:26 +03:00
|
|
|
file (`str` | `file`, optional):
|
|
|
|
The output file path, directory, or stream-like object.
|
|
|
|
If the path exists and is a file, it will be overwritten.
|
|
|
|
If file is the type `bytes`, it will be downloaded in-memory
|
2023-02-26 12:10:19 +03:00
|
|
|
and returned as a bytestring (i.e. ``file=bytes``, without
|
|
|
|
parentheses or quotes).
|
2018-06-10 13:04:23 +03:00
|
|
|
|
2019-05-20 12:38:26 +03:00
|
|
|
progress_callback (`callable`, optional):
|
|
|
|
A callback function accepting two parameters:
|
|
|
|
``(received bytes, total)``.
|
2018-06-10 13:04:23 +03:00
|
|
|
|
2019-05-20 12:38:26 +03:00
|
|
|
thumb (`int` | :tl:`PhotoSize`, optional):
|
|
|
|
Which thumbnail size from the document or photo to download,
|
|
|
|
instead of downloading the document or photo itself.
|
2019-04-24 13:38:03 +03:00
|
|
|
|
2019-05-20 12:38:26 +03:00
|
|
|
If it's specified but the file does not have a thumbnail,
|
2019-07-06 13:10:25 +03:00
|
|
|
this method will return `None`.
|
2019-04-24 13:38:03 +03:00
|
|
|
|
2019-05-20 12:38:26 +03:00
|
|
|
The parameter should be an integer index between ``0`` and
|
|
|
|
``len(sizes)``. ``0`` will download the smallest thumbnail,
|
|
|
|
and ``len(sizes) - 1`` will download the largest thumbnail.
|
2020-08-12 00:14:31 +03:00
|
|
|
You can also use negative indices, which work the same as
|
|
|
|
they do in Python's `list`.
|
2019-04-24 13:38:03 +03:00
|
|
|
|
2019-05-20 12:38:26 +03:00
|
|
|
You can also pass the :tl:`PhotoSize` instance to use.
|
2020-08-12 00:14:31 +03:00
|
|
|
Alternatively, the thumb size type `str` may be used.
|
2019-04-24 13:38:03 +03:00
|
|
|
|
2019-05-20 12:38:26 +03:00
|
|
|
In short, use ``thumb=0`` if you want the smallest thumbnail
|
|
|
|
and ``thumb=-1`` if you want the largest thumbnail.
|
2019-04-24 13:38:03 +03:00
|
|
|
|
2020-08-12 00:14:31 +03:00
|
|
|
.. note::
|
|
|
|
The largest thumbnail may be a video instead of a photo,
|
|
|
|
as they are available since layer 116 and are bigger than
|
|
|
|
any of the photos.
|
|
|
|
|
2019-05-20 12:38:26 +03:00
|
|
|
Returns
|
2019-07-06 13:10:25 +03:00
|
|
|
`None` if no media was provided, or if it was Empty. On success
|
2018-06-10 13:04:23 +03:00
|
|
|
the file path is returned since it may differ from the one given.
|
2019-05-09 13:24:37 +03:00
|
|
|
|
2019-05-20 12:38:26 +03:00
|
|
|
Example
|
2019-05-09 13:24:37 +03:00
|
|
|
.. code-block:: python
|
|
|
|
|
2019-08-14 00:33:39 +03:00
|
|
|
path = await client.download_media(message)
|
|
|
|
await client.download_media(message, filename)
|
2019-05-09 13:24:37 +03:00
|
|
|
# or
|
2019-08-14 00:33:39 +03:00
|
|
|
path = await message.download_media()
|
|
|
|
await message.download_media(filename)
|
2020-02-28 13:50:16 +03:00
|
|
|
|
2023-02-26 12:10:19 +03:00
|
|
|
# Downloading to memory
|
|
|
|
blob = await client.download_media(message, bytes)
|
|
|
|
|
2020-02-28 13:50:16 +03:00
|
|
|
# Printing download progress
|
|
|
|
def callback(current, total):
|
|
|
|
print('Downloaded', current, 'out of', total,
|
|
|
|
'bytes: {:.2%}'.format(current / total))
|
|
|
|
|
|
|
|
await client.download_media(message, progress_callback=callback)
|
2018-06-10 13:04:23 +03:00
|
|
|
"""
|
2020-09-24 11:03:28 +03:00
|
|
|
# Downloading large documents may be slow enough to require a new file reference
|
|
|
|
# to be obtained mid-download. Store (input chat, message id) so that the message
|
|
|
|
# can be re-fetched.
|
|
|
|
msg_data = None
|
|
|
|
|
2018-06-10 13:04:23 +03:00
|
|
|
# TODO This won't work for messageService
|
|
|
|
if isinstance(message, types.Message):
|
|
|
|
date = message.date
|
|
|
|
media = message.media
|
2020-09-24 11:03:28 +03:00
|
|
|
msg_data = (message.input_chat, message.id) if message.input_chat else None
|
2018-06-10 13:04:23 +03:00
|
|
|
else:
|
|
|
|
date = datetime.datetime.now()
|
|
|
|
media = message
|
|
|
|
|
2018-08-01 14:39:34 +03:00
|
|
|
if isinstance(media, str):
|
|
|
|
media = utils.resolve_bot_file_id(media)
|
|
|
|
|
2021-06-15 23:57:32 +03:00
|
|
|
if isinstance(media, types.MessageService):
|
|
|
|
if isinstance(message.action,
|
|
|
|
types.MessageActionChatEditPhoto):
|
|
|
|
media = media.photo
|
2022-10-03 14:06:27 +03:00
|
|
|
|
2018-06-10 13:04:23 +03:00
|
|
|
if isinstance(media, types.MessageMediaWebPage):
|
|
|
|
if isinstance(media.webpage, types.WebPage):
|
|
|
|
media = media.webpage.document or media.webpage.photo
|
|
|
|
|
2019-04-24 13:38:03 +03:00
|
|
|
if isinstance(media, (types.MessageMediaPhoto, types.Photo)):
|
2018-06-10 13:04:23 +03:00
|
|
|
return await self._download_photo(
|
2019-04-24 13:38:03 +03:00
|
|
|
media, file, date, thumb, progress_callback
|
2018-06-10 13:04:23 +03:00
|
|
|
)
|
|
|
|
elif isinstance(media, (types.MessageMediaDocument, types.Document)):
|
|
|
|
return await self._download_document(
|
2020-09-24 11:03:28 +03:00
|
|
|
media, file, date, thumb, progress_callback, msg_data
|
2018-06-10 13:04:23 +03:00
|
|
|
)
|
2019-04-24 13:38:03 +03:00
|
|
|
elif isinstance(media, types.MessageMediaContact) and thumb is None:
|
2018-06-10 13:04:23 +03:00
|
|
|
return self._download_contact(
|
|
|
|
media, file
|
|
|
|
)
|
2019-04-24 13:38:03 +03:00
|
|
|
elif isinstance(media, (types.WebDocument, types.WebDocumentNoProxy)) and thumb is None:
|
2018-08-01 01:37:25 +03:00
|
|
|
return await self._download_web_document(
|
|
|
|
media, file, progress_callback
|
|
|
|
)
|
2018-06-10 13:04:23 +03:00
|
|
|
|
|
|
|
async def download_file(
|
2019-05-03 22:37:27 +03:00
|
|
|
self: 'TelegramClient',
|
2019-05-08 18:16:09 +03:00
|
|
|
input_location: 'hints.FileLike',
|
|
|
|
file: 'hints.OutFileLike' = None,
|
2019-05-03 22:37:27 +03:00
|
|
|
*,
|
|
|
|
part_size_kb: float = None,
|
|
|
|
file_size: int = None,
|
2019-05-08 18:16:09 +03:00
|
|
|
progress_callback: 'hints.ProgressCallback' = None,
|
2020-03-04 18:12:34 +03:00
|
|
|
dc_id: int = None,
|
|
|
|
key: bytes = None,
|
|
|
|
iv: bytes = None) -> typing.Optional[bytes]:
|
2018-06-10 13:04:23 +03:00
|
|
|
"""
|
2019-05-06 12:38:26 +03:00
|
|
|
Low-level method to download files from their input location.
|
2018-06-10 13:04:23 +03:00
|
|
|
|
2020-02-28 13:50:16 +03:00
|
|
|
.. note::
|
|
|
|
|
|
|
|
Generally, you should instead use `download_media`.
|
|
|
|
This method is intended to be a bit more low-level.
|
|
|
|
|
2019-05-20 12:38:26 +03:00
|
|
|
Arguments
|
2019-04-22 20:02:15 +03:00
|
|
|
input_location (:tl:`InputFileLocation`):
|
2018-06-10 13:04:23 +03:00
|
|
|
The file location from which the file will be downloaded.
|
|
|
|
See `telethon.utils.get_input_location` source for a complete
|
|
|
|
list of supported types.
|
|
|
|
|
|
|
|
file (`str` | `file`, optional):
|
|
|
|
The output file path, directory, or stream-like object.
|
|
|
|
If the path exists and is a file, it will be overwritten.
|
|
|
|
|
2019-07-06 13:10:25 +03:00
|
|
|
If the file path is `None` or `bytes`, then the result
|
2018-12-27 21:05:52 +03:00
|
|
|
will be saved in memory and returned as `bytes`.
|
2018-06-10 13:04:23 +03:00
|
|
|
|
|
|
|
part_size_kb (`int`, optional):
|
|
|
|
Chunk size when downloading files. The larger, the less
|
|
|
|
requests will be made (up to 512KB maximum).
|
|
|
|
|
|
|
|
file_size (`int`, optional):
|
|
|
|
The file size that is about to be downloaded, if known.
|
|
|
|
Only used if ``progress_callback`` is specified.
|
|
|
|
|
|
|
|
progress_callback (`callable`, optional):
|
|
|
|
A callback function accepting two parameters:
|
|
|
|
``(downloaded bytes, total)``. Note that the
|
|
|
|
``total`` is the provided ``file_size``.
|
2019-04-22 17:51:05 +03:00
|
|
|
|
|
|
|
dc_id (`int`, optional):
|
|
|
|
The data center the library should connect to in order
|
|
|
|
to download the file. You shouldn't worry about this.
|
2019-05-20 12:38:26 +03:00
|
|
|
|
2020-03-04 18:12:34 +03:00
|
|
|
key ('bytes', optional):
|
|
|
|
In case of an encrypted upload (secret chats) a key is supplied
|
|
|
|
|
|
|
|
iv ('bytes', optional):
|
|
|
|
In case of an encrypted upload (secret chats) an iv is supplied
|
|
|
|
|
|
|
|
|
2019-05-20 12:38:26 +03:00
|
|
|
Example
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
# Download a file and print its header
|
2019-08-14 00:33:39 +03:00
|
|
|
data = await client.download_file(input_file, bytes)
|
2019-05-20 12:38:26 +03:00
|
|
|
print(data[:16])
|
2018-06-10 13:04:23 +03:00
|
|
|
"""
|
2020-09-24 11:03:28 +03:00
|
|
|
return await self._download_file(
|
|
|
|
input_location,
|
|
|
|
file,
|
|
|
|
part_size_kb=part_size_kb,
|
|
|
|
file_size=file_size,
|
|
|
|
progress_callback=progress_callback,
|
|
|
|
dc_id=dc_id,
|
|
|
|
key=key,
|
|
|
|
iv=iv,
|
|
|
|
)
|
|
|
|
|
|
|
|
async def _download_file(
|
|
|
|
self: 'TelegramClient',
|
|
|
|
input_location: 'hints.FileLike',
|
|
|
|
file: 'hints.OutFileLike' = None,
|
|
|
|
*,
|
|
|
|
part_size_kb: float = None,
|
|
|
|
file_size: int = None,
|
|
|
|
progress_callback: 'hints.ProgressCallback' = None,
|
|
|
|
dc_id: int = None,
|
|
|
|
key: bytes = None,
|
|
|
|
iv: bytes = None,
|
|
|
|
msg_data: tuple = None) -> typing.Optional[bytes]:
|
2018-06-10 13:04:23 +03:00
|
|
|
if not part_size_kb:
|
|
|
|
if not file_size:
|
|
|
|
part_size_kb = 64 # Reasonable default
|
|
|
|
else:
|
|
|
|
part_size_kb = utils.get_appropriated_part_size(file_size)
|
|
|
|
|
|
|
|
part_size = int(part_size_kb * 1024)
|
2019-05-21 17:40:11 +03:00
|
|
|
if part_size % MIN_CHUNK_SIZE != 0:
|
2018-06-10 13:04:23 +03:00
|
|
|
raise ValueError(
|
|
|
|
'The part size must be evenly divisible by 4096.')
|
|
|
|
|
2020-06-06 22:07:22 +03:00
|
|
|
if isinstance(file, pathlib.Path):
|
|
|
|
file = str(file.absolute())
|
|
|
|
|
2018-12-27 21:05:52 +03:00
|
|
|
in_memory = file is None or file is bytes
|
2018-06-10 13:04:23 +03:00
|
|
|
if in_memory:
|
|
|
|
f = io.BytesIO()
|
|
|
|
elif isinstance(file, str):
|
|
|
|
# Ensure that we'll be able to download the media
|
|
|
|
helpers.ensure_parent_dir_exists(file)
|
|
|
|
f = open(file, 'wb')
|
|
|
|
else:
|
|
|
|
f = file
|
|
|
|
|
|
|
|
try:
|
2020-09-24 11:03:28 +03:00
|
|
|
async for chunk in self._iter_download(
|
|
|
|
input_location, request_size=part_size, dc_id=dc_id, msg_data=msg_data):
|
2020-03-04 18:12:34 +03:00
|
|
|
if iv and key:
|
|
|
|
chunk = AES.decrypt_ige(chunk, key, iv)
|
2019-12-27 14:04:08 +03:00
|
|
|
r = f.write(chunk)
|
|
|
|
if inspect.isawaitable(r):
|
|
|
|
await r
|
|
|
|
|
2018-06-10 13:04:23 +03:00
|
|
|
if progress_callback:
|
2019-07-04 16:34:51 +03:00
|
|
|
r = progress_callback(f.tell(), file_size)
|
|
|
|
if inspect.isawaitable(r):
|
|
|
|
await r
|
2019-05-21 17:40:11 +03:00
|
|
|
|
2019-07-09 12:40:05 +03:00
|
|
|
# Not all IO objects have flush (see #1227)
|
|
|
|
if callable(getattr(f, 'flush', None)):
|
|
|
|
f.flush()
|
|
|
|
|
2019-05-21 17:40:11 +03:00
|
|
|
if in_memory:
|
|
|
|
return f.getvalue()
|
2018-06-10 13:04:23 +03:00
|
|
|
finally:
|
|
|
|
if isinstance(file, str) or in_memory:
|
|
|
|
f.close()
|
|
|
|
|
2019-05-21 17:16:16 +03:00
|
|
|
def iter_download(
|
|
|
|
self: 'TelegramClient',
|
|
|
|
file: 'hints.FileLike',
|
|
|
|
*,
|
|
|
|
offset: int = 0,
|
|
|
|
stride: int = None,
|
|
|
|
limit: int = None,
|
|
|
|
chunk_size: int = None,
|
|
|
|
request_size: int = MAX_CHUNK_SIZE,
|
|
|
|
file_size: int = None,
|
|
|
|
dc_id: int = None
|
|
|
|
):
|
|
|
|
"""
|
|
|
|
Iterates over a file download, yielding chunks of the file.
|
|
|
|
|
|
|
|
This method can be used to stream files in a more convenient
|
|
|
|
way, since it offers more control (pausing, resuming, etc.)
|
|
|
|
|
|
|
|
.. note::
|
|
|
|
|
|
|
|
Using a value for `offset` or `stride` which is not a multiple
|
|
|
|
of the minimum allowed `request_size`, or if `chunk_size` is
|
|
|
|
different from `request_size`, the library will need to do a
|
|
|
|
bit more work to fetch the data in the way you intend it to.
|
|
|
|
|
|
|
|
You normally shouldn't worry about this.
|
|
|
|
|
|
|
|
Arguments
|
|
|
|
file (`hints.FileLike`):
|
|
|
|
The file of which contents you want to iterate over.
|
|
|
|
|
|
|
|
offset (`int`, optional):
|
|
|
|
The offset in bytes into the file from where the
|
|
|
|
download should start. For example, if a file is
|
|
|
|
1024KB long and you just want the last 512KB, you
|
|
|
|
would use ``offset=512 * 1024``.
|
|
|
|
|
|
|
|
stride (`int`, optional):
|
|
|
|
The stride of each chunk (how much the offset should
|
|
|
|
advance between reading each chunk). This parameter
|
|
|
|
should only be used for more advanced use cases.
|
|
|
|
|
|
|
|
It must be bigger than or equal to the `chunk_size`.
|
|
|
|
|
|
|
|
limit (`int`, optional):
|
|
|
|
The limit for how many *chunks* will be yielded at most.
|
|
|
|
|
|
|
|
chunk_size (`int`, optional):
|
|
|
|
The maximum size of the chunks that will be yielded.
|
|
|
|
Note that the last chunk may be less than this value.
|
|
|
|
By default, it equals to `request_size`.
|
|
|
|
|
|
|
|
request_size (`int`, optional):
|
|
|
|
How many bytes will be requested to Telegram when more
|
|
|
|
data is required. By default, as many bytes as possible
|
|
|
|
are requested. If you would like to request data in
|
|
|
|
smaller sizes, adjust this parameter.
|
|
|
|
|
|
|
|
Note that values outside the valid range will be clamped,
|
|
|
|
and the final value will also be a multiple of the minimum
|
|
|
|
allowed size.
|
|
|
|
|
|
|
|
file_size (`int`, optional):
|
|
|
|
If the file size is known beforehand, you should set
|
|
|
|
this parameter to said value. Depending on the type of
|
|
|
|
the input file passed, this may be set automatically.
|
|
|
|
|
|
|
|
dc_id (`int`, optional):
|
|
|
|
The data center the library should connect to in order
|
|
|
|
to download the file. You shouldn't worry about this.
|
|
|
|
|
|
|
|
Yields
|
|
|
|
|
2019-07-06 13:10:25 +03:00
|
|
|
`bytes` objects representing the chunks of the file if the
|
|
|
|
right conditions are met, or `memoryview` objects instead.
|
2019-05-21 17:16:16 +03:00
|
|
|
|
|
|
|
Example
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
# Streaming `media` to an output file
|
|
|
|
# After the iteration ends, the sender is cleaned up
|
|
|
|
with open('photo.jpg', 'wb') as fd:
|
2020-08-08 18:47:58 +03:00
|
|
|
async for chunk in client.iter_download(media):
|
2019-05-21 17:16:16 +03:00
|
|
|
fd.write(chunk)
|
|
|
|
|
|
|
|
# Fetching only the header of a file (32 bytes)
|
|
|
|
# You should manually close the iterator in this case.
|
2019-08-14 00:33:39 +03:00
|
|
|
#
|
2020-06-22 14:21:45 +03:00
|
|
|
# "stream" is a common name for asynchronous generators,
|
|
|
|
# and iter_download will yield `bytes` (chunks of the file).
|
2019-05-21 17:16:16 +03:00
|
|
|
stream = client.iter_download(media, request_size=32)
|
2020-06-22 14:21:45 +03:00
|
|
|
header = await stream.__anext__() # "manual" version of `async for`
|
|
|
|
await stream.close()
|
2019-05-21 17:16:16 +03:00
|
|
|
assert len(header) == 32
|
|
|
|
"""
|
2020-09-24 11:03:28 +03:00
|
|
|
return self._iter_download(
|
|
|
|
file,
|
|
|
|
offset=offset,
|
|
|
|
stride=stride,
|
|
|
|
limit=limit,
|
|
|
|
chunk_size=chunk_size,
|
|
|
|
request_size=request_size,
|
|
|
|
file_size=file_size,
|
|
|
|
dc_id=dc_id,
|
|
|
|
)
|
|
|
|
|
|
|
|
def _iter_download(
|
|
|
|
self: 'TelegramClient',
|
|
|
|
file: 'hints.FileLike',
|
|
|
|
*,
|
|
|
|
offset: int = 0,
|
|
|
|
stride: int = None,
|
|
|
|
limit: int = None,
|
|
|
|
chunk_size: int = None,
|
|
|
|
request_size: int = MAX_CHUNK_SIZE,
|
|
|
|
file_size: int = None,
|
|
|
|
dc_id: int = None,
|
|
|
|
msg_data: tuple = None
|
|
|
|
):
|
2019-10-31 21:38:49 +03:00
|
|
|
info = utils._get_file_info(file)
|
|
|
|
if info.dc_id is not None:
|
|
|
|
dc_id = info.dc_id
|
|
|
|
|
|
|
|
if file_size is None:
|
|
|
|
file_size = info.size
|
|
|
|
|
|
|
|
file = info.location
|
|
|
|
|
2019-05-21 17:16:16 +03:00
|
|
|
if chunk_size is None:
|
|
|
|
chunk_size = request_size
|
|
|
|
|
|
|
|
if limit is None and file_size is not None:
|
|
|
|
limit = (file_size + chunk_size - 1) // chunk_size
|
|
|
|
|
|
|
|
if stride is None:
|
|
|
|
stride = chunk_size
|
|
|
|
elif stride < chunk_size:
|
|
|
|
raise ValueError('stride must be >= chunk_size')
|
|
|
|
|
|
|
|
request_size -= request_size % MIN_CHUNK_SIZE
|
|
|
|
if request_size < MIN_CHUNK_SIZE:
|
|
|
|
request_size = MIN_CHUNK_SIZE
|
|
|
|
elif request_size > MAX_CHUNK_SIZE:
|
|
|
|
request_size = MAX_CHUNK_SIZE
|
|
|
|
|
|
|
|
if chunk_size == request_size \
|
|
|
|
and offset % MIN_CHUNK_SIZE == 0 \
|
2021-05-14 09:11:54 +03:00
|
|
|
and stride % MIN_CHUNK_SIZE == 0 \
|
2021-05-16 23:27:29 +03:00
|
|
|
and (limit is None or offset % limit == 0):
|
2019-05-21 17:16:16 +03:00
|
|
|
cls = _DirectDownloadIter
|
|
|
|
self._log[__name__].info('Starting direct file download in chunks of '
|
|
|
|
'%d at %d, stride %d', request_size, offset, stride)
|
|
|
|
else:
|
|
|
|
cls = _GenericDownloadIter
|
|
|
|
self._log[__name__].info('Starting indirect file download in chunks of '
|
|
|
|
'%d at %d, stride %d', request_size, offset, stride)
|
|
|
|
|
|
|
|
return cls(
|
|
|
|
self,
|
|
|
|
limit,
|
|
|
|
file=file,
|
|
|
|
dc_id=dc_id,
|
|
|
|
offset=offset,
|
|
|
|
stride=stride,
|
|
|
|
chunk_size=chunk_size,
|
|
|
|
request_size=request_size,
|
2020-09-24 11:03:28 +03:00
|
|
|
file_size=file_size,
|
|
|
|
msg_data=msg_data,
|
2019-05-21 17:16:16 +03:00
|
|
|
)
|
|
|
|
|
2018-06-10 13:04:23 +03:00
|
|
|
# endregion
|
|
|
|
|
|
|
|
# region Private methods
|
|
|
|
|
2019-04-24 13:38:03 +03:00
|
|
|
@staticmethod
|
|
|
|
def _get_thumb(thumbs, thumb):
|
2023-07-21 23:48:12 +03:00
|
|
|
if not thumbs:
|
|
|
|
return None
|
|
|
|
|
2020-08-12 00:14:31 +03:00
|
|
|
# Seems Telegram has changed the order and put `PhotoStrippedSize`
|
|
|
|
# last while this is the smallest (layer 116). Ensure we have the
|
|
|
|
# sizes sorted correctly with a custom function.
|
|
|
|
def sort_thumbs(thumb):
|
|
|
|
if isinstance(thumb, types.PhotoStrippedSize):
|
|
|
|
return 1, len(thumb.bytes)
|
|
|
|
if isinstance(thumb, types.PhotoCachedSize):
|
|
|
|
return 1, len(thumb.bytes)
|
|
|
|
if isinstance(thumb, types.PhotoSize):
|
|
|
|
return 1, thumb.size
|
2021-02-14 00:47:34 +03:00
|
|
|
if isinstance(thumb, types.PhotoSizeProgressive):
|
|
|
|
return 1, max(thumb.sizes)
|
2020-08-12 00:14:31 +03:00
|
|
|
if isinstance(thumb, types.VideoSize):
|
|
|
|
return 2, thumb.size
|
|
|
|
|
|
|
|
# Empty size or invalid should go last
|
|
|
|
return 0, 0
|
|
|
|
|
|
|
|
thumbs = list(sorted(thumbs, key=sort_thumbs))
|
|
|
|
|
2020-12-19 21:42:07 +03:00
|
|
|
for i in reversed(range(len(thumbs))):
|
|
|
|
# :tl:`PhotoPathSize` is used for animated stickers preview, and the thumb is actually
|
|
|
|
# a SVG path of the outline. Users expect thumbnails to be JPEG files, so pretend this
|
|
|
|
# thumb size doesn't actually exist (#1655).
|
|
|
|
if isinstance(thumbs[i], types.PhotoPathSize):
|
|
|
|
thumbs.pop(i)
|
|
|
|
|
2019-04-24 13:38:03 +03:00
|
|
|
if thumb is None:
|
|
|
|
return thumbs[-1]
|
|
|
|
elif isinstance(thumb, int):
|
|
|
|
return thumbs[thumb]
|
2020-08-11 23:31:12 +03:00
|
|
|
elif isinstance(thumb, str):
|
|
|
|
return next((t for t in thumbs if t.type == thumb), None)
|
2019-04-24 13:38:03 +03:00
|
|
|
elif isinstance(thumb, (types.PhotoSize, types.PhotoCachedSize,
|
2020-08-11 23:31:12 +03:00
|
|
|
types.PhotoStrippedSize, types.VideoSize)):
|
2019-04-24 13:38:03 +03:00
|
|
|
return thumb
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
def _download_cached_photo_size(self: 'TelegramClient', size, file):
|
2019-04-24 13:38:03 +03:00
|
|
|
# No need to download anything, simply write the bytes
|
2019-05-09 19:56:54 +03:00
|
|
|
if isinstance(size, types.PhotoStrippedSize):
|
|
|
|
data = utils.stripped_photo_to_jpg(size.bytes)
|
|
|
|
else:
|
|
|
|
data = size.bytes
|
|
|
|
|
2019-04-24 13:38:03 +03:00
|
|
|
if file is bytes:
|
2019-05-09 19:56:54 +03:00
|
|
|
return data
|
2019-04-24 13:38:03 +03:00
|
|
|
elif isinstance(file, str):
|
|
|
|
helpers.ensure_parent_dir_exists(file)
|
|
|
|
f = open(file, 'wb')
|
|
|
|
else:
|
|
|
|
f = file
|
|
|
|
|
|
|
|
try:
|
2019-05-09 19:56:54 +03:00
|
|
|
f.write(data)
|
2019-04-24 13:38:03 +03:00
|
|
|
finally:
|
|
|
|
if isinstance(file, str):
|
|
|
|
f.close()
|
|
|
|
return file
|
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
async def _download_photo(self: 'TelegramClient', photo, file, date, thumb, progress_callback):
|
2018-06-10 13:04:23 +03:00
|
|
|
"""Specialized version of .download_media() for photos"""
|
|
|
|
# Determine the photo and its largest size
|
|
|
|
if isinstance(photo, types.MessageMediaPhoto):
|
|
|
|
photo = photo.photo
|
2019-04-24 13:38:03 +03:00
|
|
|
if not isinstance(photo, types.Photo):
|
2018-06-10 13:04:23 +03:00
|
|
|
return
|
|
|
|
|
2020-08-11 23:31:12 +03:00
|
|
|
# Include video sizes here (but they may be None so provide an empty list)
|
|
|
|
size = self._get_thumb(photo.sizes + (photo.video_sizes or []), thumb)
|
2019-04-24 13:38:03 +03:00
|
|
|
if not size or isinstance(size, types.PhotoSizeEmpty):
|
|
|
|
return
|
2018-12-27 21:05:52 +03:00
|
|
|
|
2020-08-11 23:31:12 +03:00
|
|
|
if isinstance(size, types.VideoSize):
|
|
|
|
file = self._get_proper_filename(file, 'video', '.mp4', date=date)
|
|
|
|
else:
|
|
|
|
file = self._get_proper_filename(file, 'photo', '.jpg', date=date)
|
|
|
|
|
2019-04-24 13:38:03 +03:00
|
|
|
if isinstance(size, (types.PhotoCachedSize, types.PhotoStrippedSize)):
|
|
|
|
return self._download_cached_photo_size(size, file)
|
2018-06-10 13:04:23 +03:00
|
|
|
|
2021-02-14 00:45:12 +03:00
|
|
|
if isinstance(size, types.PhotoSizeProgressive):
|
|
|
|
file_size = max(size.sizes)
|
|
|
|
else:
|
|
|
|
file_size = size.size
|
|
|
|
|
2018-12-27 21:05:52 +03:00
|
|
|
result = await self.download_file(
|
2019-04-24 13:38:03 +03:00
|
|
|
types.InputPhotoFileLocation(
|
|
|
|
id=photo.id,
|
|
|
|
access_hash=photo.access_hash,
|
|
|
|
file_reference=photo.file_reference,
|
|
|
|
thumb_size=size.type
|
|
|
|
),
|
|
|
|
file,
|
2021-02-14 00:45:12 +03:00
|
|
|
file_size=file_size,
|
2019-04-24 13:38:03 +03:00
|
|
|
progress_callback=progress_callback
|
|
|
|
)
|
2018-12-27 21:05:52 +03:00
|
|
|
return result if file is bytes else file
|
2018-06-10 13:04:23 +03:00
|
|
|
|
2018-08-01 01:37:25 +03:00
|
|
|
@staticmethod
|
|
|
|
def _get_kind_and_names(attributes):
|
|
|
|
"""Gets kind and possible names for :tl:`DocumentAttribute`."""
|
2018-06-10 13:04:23 +03:00
|
|
|
kind = 'document'
|
|
|
|
possible_names = []
|
2018-08-01 01:37:25 +03:00
|
|
|
for attr in attributes:
|
2018-06-10 13:04:23 +03:00
|
|
|
if isinstance(attr, types.DocumentAttributeFilename):
|
|
|
|
possible_names.insert(0, attr.file_name)
|
|
|
|
|
|
|
|
elif isinstance(attr, types.DocumentAttributeAudio):
|
|
|
|
kind = 'audio'
|
|
|
|
if attr.performer and attr.title:
|
|
|
|
possible_names.append('{} - {}'.format(
|
|
|
|
attr.performer, attr.title
|
|
|
|
))
|
|
|
|
elif attr.performer:
|
|
|
|
possible_names.append(attr.performer)
|
|
|
|
elif attr.title:
|
|
|
|
possible_names.append(attr.title)
|
|
|
|
elif attr.voice:
|
|
|
|
kind = 'voice'
|
|
|
|
|
2018-08-01 01:37:25 +03:00
|
|
|
return kind, possible_names
|
|
|
|
|
|
|
|
async def _download_document(
|
2020-09-24 11:03:28 +03:00
|
|
|
self, document, file, date, thumb, progress_callback, msg_data):
|
2018-08-01 01:37:25 +03:00
|
|
|
"""Specialized version of .download_media() for documents."""
|
|
|
|
if isinstance(document, types.MessageMediaDocument):
|
|
|
|
document = document.document
|
|
|
|
if not isinstance(document, types.Document):
|
|
|
|
return
|
|
|
|
|
2019-04-24 13:38:03 +03:00
|
|
|
if thumb is None:
|
2020-04-23 22:01:29 +03:00
|
|
|
kind, possible_names = self._get_kind_and_names(document.attributes)
|
|
|
|
file = self._get_proper_filename(
|
|
|
|
file, kind, utils.get_extension(document),
|
|
|
|
date=date, possible_names=possible_names
|
|
|
|
)
|
2019-04-24 13:38:03 +03:00
|
|
|
size = None
|
|
|
|
else:
|
2020-04-23 22:01:29 +03:00
|
|
|
file = self._get_proper_filename(file, 'photo', '.jpg', date=date)
|
2019-04-24 13:38:03 +03:00
|
|
|
size = self._get_thumb(document.thumbs, thumb)
|
2023-07-22 11:52:03 +03:00
|
|
|
if not size or isinstance(size, types.PhotoSizeEmpty):
|
|
|
|
return
|
|
|
|
|
2019-04-24 13:38:03 +03:00
|
|
|
if isinstance(size, (types.PhotoCachedSize, types.PhotoStrippedSize)):
|
|
|
|
return self._download_cached_photo_size(size, file)
|
|
|
|
|
2020-09-24 11:03:28 +03:00
|
|
|
result = await self._download_file(
|
2019-04-24 13:38:03 +03:00
|
|
|
types.InputDocumentFileLocation(
|
|
|
|
id=document.id,
|
|
|
|
access_hash=document.access_hash,
|
|
|
|
file_reference=document.file_reference,
|
|
|
|
thumb_size=size.type if size else ''
|
|
|
|
),
|
|
|
|
file,
|
|
|
|
file_size=size.size if size else document.size,
|
2020-09-24 11:03:28 +03:00
|
|
|
progress_callback=progress_callback,
|
|
|
|
msg_data=msg_data,
|
2019-04-24 13:38:03 +03:00
|
|
|
)
|
|
|
|
|
2018-12-27 21:05:52 +03:00
|
|
|
return result if file is bytes else file
|
2018-06-10 13:04:23 +03:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _download_contact(cls, mm_contact, file):
|
|
|
|
"""
|
|
|
|
Specialized version of .download_media() for contacts.
|
|
|
|
Will make use of the vCard 4.0 format.
|
|
|
|
"""
|
|
|
|
first_name = mm_contact.first_name
|
|
|
|
last_name = mm_contact.last_name
|
|
|
|
phone_number = mm_contact.phone_number
|
|
|
|
|
2018-12-27 21:05:52 +03:00
|
|
|
# Remove these pesky characters
|
|
|
|
first_name = first_name.replace(';', '')
|
|
|
|
last_name = (last_name or '').replace(';', '')
|
|
|
|
result = (
|
|
|
|
'BEGIN:VCARD\n'
|
|
|
|
'VERSION:4.0\n'
|
|
|
|
'N:{f};{l};;;\n'
|
|
|
|
'FN:{f} {l}\n'
|
|
|
|
'TEL;TYPE=cell;VALUE=uri:tel:+{p}\n'
|
|
|
|
'END:VCARD\n'
|
|
|
|
).format(f=first_name, l=last_name, p=phone_number).encode('utf-8')
|
|
|
|
|
2023-01-23 10:48:00 +03:00
|
|
|
file = cls._get_proper_filename(
|
|
|
|
file, 'contact', '.vcard',
|
|
|
|
possible_names=[first_name, phone_number, last_name]
|
|
|
|
)
|
2018-12-27 21:05:52 +03:00
|
|
|
if file is bytes:
|
|
|
|
return result
|
2023-01-23 10:48:00 +03:00
|
|
|
f = file if hasattr(file, 'write') else open(file, 'wb')
|
2018-06-10 13:04:23 +03:00
|
|
|
|
|
|
|
try:
|
2018-12-27 21:05:52 +03:00
|
|
|
f.write(result)
|
2018-06-10 13:04:23 +03:00
|
|
|
finally:
|
|
|
|
# Only close the stream if we opened it
|
2023-01-23 10:48:00 +03:00
|
|
|
if f != file:
|
2018-06-10 13:04:23 +03:00
|
|
|
f.close()
|
|
|
|
|
|
|
|
return file
|
|
|
|
|
2018-08-01 01:37:25 +03:00
|
|
|
@classmethod
|
|
|
|
async def _download_web_document(cls, web, file, progress_callback):
|
|
|
|
"""
|
|
|
|
Specialized version of .download_media() for web documents.
|
|
|
|
"""
|
|
|
|
if not aiohttp:
|
|
|
|
raise ValueError(
|
|
|
|
'Cannot download web documents without the aiohttp '
|
|
|
|
'dependency install it (pip install aiohttp)'
|
|
|
|
)
|
|
|
|
|
|
|
|
# TODO Better way to get opened handles of files and auto-close
|
2023-01-23 10:48:00 +03:00
|
|
|
kind, possible_names = self._get_kind_and_names(web.attributes)
|
|
|
|
file = self._get_proper_filename(
|
|
|
|
file, kind, utils.get_extension(web),
|
|
|
|
possible_names=possible_names
|
|
|
|
)
|
|
|
|
if file is bytes:
|
2018-12-27 21:05:52 +03:00
|
|
|
f = io.BytesIO()
|
2023-01-23 10:48:00 +03:00
|
|
|
elif hasattr(file, 'write'):
|
2018-08-01 01:37:25 +03:00
|
|
|
f = file
|
2023-01-23 10:48:00 +03:00
|
|
|
else:
|
|
|
|
f = open(file, 'wb')
|
2018-08-01 01:37:25 +03:00
|
|
|
|
|
|
|
try:
|
2021-12-01 22:28:55 +03:00
|
|
|
async with aiohttp.ClientSession() as session:
|
2018-08-01 01:37:25 +03:00
|
|
|
# TODO Use progress_callback; get content length from response
|
|
|
|
# https://github.com/telegramdesktop/tdesktop/blob/c7e773dd9aeba94e2be48c032edc9a78bb50234e/Telegram/SourceFiles/ui/images.cpp#L1318-L1319
|
|
|
|
async with session.get(web.url) as response:
|
|
|
|
while True:
|
|
|
|
chunk = await response.content.read(128 * 1024)
|
|
|
|
if not chunk:
|
|
|
|
break
|
|
|
|
f.write(chunk)
|
|
|
|
finally:
|
2023-01-23 10:48:00 +03:00
|
|
|
if f != file:
|
2018-08-01 01:37:25 +03:00
|
|
|
f.close()
|
|
|
|
|
2023-01-23 10:48:00 +03:00
|
|
|
return f.getvalue() if file is bytes else file
|
2018-12-27 21:05:52 +03:00
|
|
|
|
2018-06-10 13:04:23 +03:00
|
|
|
@staticmethod
|
|
|
|
def _get_proper_filename(file, kind, extension,
|
|
|
|
date=None, possible_names=None):
|
|
|
|
"""Gets a proper filename for 'file', if this is a path.
|
|
|
|
|
|
|
|
'kind' should be the kind of the output file (photo, document...)
|
|
|
|
'extension' should be the extension to be added to the file if
|
|
|
|
the filename doesn't have any yet
|
|
|
|
'date' should be when this file was originally sent, if known
|
|
|
|
'possible_names' should be an ordered list of possible names
|
|
|
|
|
|
|
|
If no modification is made to the path, any existing file
|
|
|
|
will be overwritten.
|
|
|
|
If any modification is made to the path, this method will
|
|
|
|
ensure that no existing file will be overwritten.
|
|
|
|
"""
|
2018-06-16 18:01:20 +03:00
|
|
|
if isinstance(file, pathlib.Path):
|
|
|
|
file = str(file.absolute())
|
|
|
|
|
2018-06-10 13:04:23 +03:00
|
|
|
if file is not None and not isinstance(file, str):
|
|
|
|
# Probably a stream-like object, we cannot set a filename here
|
|
|
|
return file
|
|
|
|
|
|
|
|
if file is None:
|
|
|
|
file = ''
|
|
|
|
elif os.path.isfile(file):
|
|
|
|
# Make no modifications to valid existing paths
|
|
|
|
return file
|
|
|
|
|
|
|
|
if os.path.isdir(file) or not file:
|
|
|
|
try:
|
|
|
|
name = None if possible_names is None else next(
|
|
|
|
x for x in possible_names if x
|
|
|
|
)
|
|
|
|
except StopIteration:
|
|
|
|
name = None
|
|
|
|
|
|
|
|
if not name:
|
|
|
|
if not date:
|
|
|
|
date = datetime.datetime.now()
|
|
|
|
name = '{}_{}-{:02}-{:02}_{:02}-{:02}-{:02}'.format(
|
|
|
|
kind,
|
|
|
|
date.year, date.month, date.day,
|
|
|
|
date.hour, date.minute, date.second,
|
|
|
|
)
|
|
|
|
file = os.path.join(file, name)
|
|
|
|
|
|
|
|
directory, name = os.path.split(file)
|
|
|
|
name, ext = os.path.splitext(name)
|
|
|
|
if not ext:
|
|
|
|
ext = extension
|
|
|
|
|
|
|
|
result = os.path.join(directory, name + ext)
|
|
|
|
if not os.path.isfile(result):
|
|
|
|
return result
|
|
|
|
|
|
|
|
i = 1
|
|
|
|
while True:
|
|
|
|
result = os.path.join(directory, '{} ({}){}'.format(name, i, ext))
|
|
|
|
if not os.path.isfile(result):
|
|
|
|
return result
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
# endregion
|