2018-06-09 22:22:54 +03:00
|
|
|
import abc
|
2020-09-10 15:52:25 +03:00
|
|
|
import re
|
2018-06-14 20:35:12 +03:00
|
|
|
import asyncio
|
2019-06-03 20:41:22 +03:00
|
|
|
import collections
|
2017-06-08 14:12:57 +03:00
|
|
|
import logging
|
2018-03-02 22:05:09 +03:00
|
|
|
import platform
|
2018-06-18 14:22:25 +03:00
|
|
|
import time
|
2019-05-03 22:37:27 +03:00
|
|
|
import typing
|
2022-05-16 10:36:57 +03:00
|
|
|
import datetime
|
2024-02-13 20:18:52 +03:00
|
|
|
import pathlib
|
2018-06-08 22:52:59 +03:00
|
|
|
|
2019-03-21 14:21:00 +03:00
|
|
|
from .. import version, helpers, __name__ as __base_name__
|
2018-06-09 22:22:54 +03:00
|
|
|
from ..crypto import rsa
|
|
|
|
from ..extensions import markdown
|
2019-05-03 22:37:27 +03:00
|
|
|
from ..network import MTProtoSender, Connection, ConnectionTcpFull, TcpMTProxy
|
2018-08-02 16:17:44 +03:00
|
|
|
from ..sessions import Session, SQLiteSession, MemorySession
|
2020-07-04 13:18:39 +03:00
|
|
|
from ..tl import functions, types
|
2018-06-18 22:02:42 +03:00
|
|
|
from ..tl.alltlobjects import LAYER
|
2022-05-13 18:40:03 +03:00
|
|
|
from .._updates import MessageBox, EntityCache as MbEntityCache, SessionState, ChannelState, Entity, EntityType
|
2017-06-08 14:12:57 +03:00
|
|
|
|
2020-02-02 12:01:15 +03:00
|
|
|
DEFAULT_DC_ID = 2
|
2017-11-16 15:30:18 +03:00
|
|
|
DEFAULT_IPV4_IP = '149.154.167.51'
|
2020-08-08 14:16:01 +03:00
|
|
|
DEFAULT_IPV6_IP = '2001:67c:4e8:f002::a'
|
2017-11-16 15:30:18 +03:00
|
|
|
DEFAULT_PORT = 443
|
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
if typing.TYPE_CHECKING:
|
|
|
|
from .telegramclient import TelegramClient
|
|
|
|
|
2021-02-02 22:47:02 +03:00
|
|
|
_base_log = logging.getLogger(__base_name__)
|
2017-12-20 14:47:10 +03:00
|
|
|
|
2017-11-16 15:30:18 +03:00
|
|
|
|
2020-04-05 13:34:33 +03:00
|
|
|
# In seconds, how long to wait before disconnecting a exported sender.
|
|
|
|
_DISCONNECT_EXPORTED_AFTER = 60
|
|
|
|
|
|
|
|
|
|
|
|
class _ExportState:
|
|
|
|
def __init__(self):
|
|
|
|
# ``n`` is the amount of borrows a given sender has;
|
|
|
|
# once ``n`` reaches ``0``, disconnect the sender after a while.
|
|
|
|
self._n = 0
|
|
|
|
self._zero_ts = 0
|
|
|
|
self._connected = False
|
|
|
|
|
|
|
|
def add_borrow(self):
|
|
|
|
self._n += 1
|
|
|
|
self._connected = True
|
|
|
|
|
|
|
|
def add_return(self):
|
|
|
|
self._n -= 1
|
|
|
|
assert self._n >= 0, 'returned sender more than it was borrowed'
|
|
|
|
if self._n == 0:
|
|
|
|
self._zero_ts = time.time()
|
|
|
|
|
|
|
|
def should_disconnect(self):
|
|
|
|
return (self._n == 0
|
|
|
|
and self._connected
|
|
|
|
and (time.time() - self._zero_ts) > _DISCONNECT_EXPORTED_AFTER)
|
|
|
|
|
|
|
|
def need_connect(self):
|
|
|
|
return not self._connected
|
|
|
|
|
|
|
|
def mark_disconnected(self):
|
|
|
|
assert self.should_disconnect(), 'marked as disconnected when it was borrowed'
|
|
|
|
self._connected = False
|
|
|
|
|
|
|
|
|
2019-02-27 21:30:12 +03:00
|
|
|
# TODO How hard would it be to support both `trio` and `asyncio`?
|
2018-06-09 22:22:54 +03:00
|
|
|
class TelegramBaseClient(abc.ABC):
|
2018-06-08 22:52:59 +03:00
|
|
|
"""
|
2018-06-09 22:22:54 +03:00
|
|
|
This is the abstract base class for the client. It defines some
|
|
|
|
basic stuff like connecting, switching data center, etc, and
|
|
|
|
leaves the `__call__` unimplemented.
|
2018-06-08 22:52:59 +03:00
|
|
|
|
2019-05-20 12:38:26 +03:00
|
|
|
Arguments
|
2018-06-08 22:52:59 +03:00
|
|
|
session (`str` | `telethon.sessions.abstract.Session`, `None`):
|
|
|
|
The file name of the session file to be used if a string is
|
|
|
|
given (it may be a full path), or the Session instance to be
|
2019-07-06 13:10:25 +03:00
|
|
|
used otherwise. If it's `None`, the session will not be saved,
|
2018-06-08 22:52:59 +03:00
|
|
|
and you should call :meth:`.log_out()` when you're done.
|
|
|
|
|
|
|
|
Note that if you pass a string it will be a file in the current
|
|
|
|
working directory, although you can also pass absolute paths.
|
|
|
|
|
|
|
|
The session file contains enough information for you to login
|
|
|
|
without re-sending the code, so if you have to enter the code
|
|
|
|
more than once, maybe you're changing the working directory,
|
|
|
|
renaming or removing the file, or using random names.
|
|
|
|
|
|
|
|
api_id (`int` | `str`):
|
|
|
|
The API ID you obtained from https://my.telegram.org.
|
|
|
|
|
|
|
|
api_hash (`str`):
|
2021-05-30 19:00:27 +03:00
|
|
|
The API hash you obtained from https://my.telegram.org.
|
2018-06-08 22:52:59 +03:00
|
|
|
|
|
|
|
connection (`telethon.network.connection.common.Connection`, optional):
|
|
|
|
The connection instance to be used when creating a new connection
|
2018-09-28 18:51:28 +03:00
|
|
|
to the servers. It **must** be a type.
|
2018-06-08 22:52:59 +03:00
|
|
|
|
|
|
|
Defaults to `telethon.network.connection.tcpfull.ConnectionTcpFull`.
|
|
|
|
|
|
|
|
use_ipv6 (`bool`, optional):
|
|
|
|
Whether to connect to the servers through IPv6 or not.
|
2019-07-06 13:10:25 +03:00
|
|
|
By default this is `False` as IPv6 support is not
|
2018-06-08 22:52:59 +03:00
|
|
|
too widespread yet.
|
|
|
|
|
2019-02-11 11:54:35 +03:00
|
|
|
proxy (`tuple` | `list` | `dict`, optional):
|
|
|
|
An iterable consisting of the proxy info. If `connection` is
|
2019-03-10 03:00:11 +03:00
|
|
|
one of `MTProxy`, then it should contain MTProxy credentials:
|
2019-02-11 11:54:35 +03:00
|
|
|
``('hostname', port, 'secret')``. Otherwise, it's meant to store
|
|
|
|
function parameters for PySocks, like ``(type, 'hostname', port)``.
|
2018-06-08 22:52:59 +03:00
|
|
|
See https://github.com/Anorov/PySocks#usage-1 for more.
|
|
|
|
|
2020-11-09 21:59:54 +03:00
|
|
|
local_addr (`str` | `tuple`, optional):
|
|
|
|
Local host address (and port, optionally) used to bind the socket to locally.
|
2020-10-07 11:03:19 +03:00
|
|
|
You only need to use this if you have multiple network cards and
|
|
|
|
want to use a specific one.
|
|
|
|
|
2018-10-05 15:06:15 +03:00
|
|
|
timeout (`int` | `float`, optional):
|
|
|
|
The timeout in seconds to be used when connecting.
|
|
|
|
This is **not** the timeout to be used when ``await``'ing for
|
|
|
|
invoked requests, and you should use ``asyncio.wait`` or
|
|
|
|
``asyncio.wait_for`` for that.
|
2018-06-18 19:11:16 +03:00
|
|
|
|
2019-02-06 21:41:45 +03:00
|
|
|
request_retries (`int` | `None`, optional):
|
2018-06-18 19:11:16 +03:00
|
|
|
How many times a request should be retried. Request are retried
|
|
|
|
when Telegram is having internal issues (due to either
|
|
|
|
``errors.ServerError`` or ``errors.RpcCallFailError``),
|
|
|
|
when there is a ``errors.FloodWaitError`` less than
|
2018-06-26 17:09:16 +03:00
|
|
|
`flood_sleep_threshold`, or when there's a migrate error.
|
2018-06-18 19:11:16 +03:00
|
|
|
|
2019-07-06 13:10:25 +03:00
|
|
|
May take a negative or `None` value for infinite retries, but
|
2019-02-06 21:41:45 +03:00
|
|
|
this is not recommended, since some requests can always trigger
|
|
|
|
a call fail (such as searching for messages).
|
2018-06-18 19:11:16 +03:00
|
|
|
|
2019-02-06 21:41:45 +03:00
|
|
|
connection_retries (`int` | `None`, optional):
|
2018-06-18 19:11:16 +03:00
|
|
|
How many times the reconnection should retry, either on the
|
|
|
|
initial connection or when Telegram disconnects us. May be
|
2019-07-06 13:10:25 +03:00
|
|
|
set to a negative or `None` value for infinite retries, but
|
2019-02-06 21:41:45 +03:00
|
|
|
this is not recommended, since the program can get stuck in an
|
|
|
|
infinite loop.
|
2018-06-18 19:11:16 +03:00
|
|
|
|
2018-10-28 12:55:58 +03:00
|
|
|
retry_delay (`int` | `float`, optional):
|
|
|
|
The delay in seconds to sleep between automatic reconnections.
|
|
|
|
|
2018-06-18 19:11:16 +03:00
|
|
|
auto_reconnect (`bool`, optional):
|
|
|
|
Whether reconnection should be retried `connection_retries`
|
|
|
|
times automatically if Telegram disconnects us or not.
|
2018-06-08 22:52:59 +03:00
|
|
|
|
2018-06-29 11:45:04 +03:00
|
|
|
sequential_updates (`bool`, optional):
|
|
|
|
By default every incoming update will create a new task, so
|
|
|
|
you can handle several updates in parallel. Some scripts need
|
|
|
|
the order in which updates are processed to be sequential, and
|
|
|
|
this setting allows them to do so.
|
|
|
|
|
2019-07-06 13:10:25 +03:00
|
|
|
If set to `True`, incoming updates will be put in a queue
|
2018-06-29 11:45:04 +03:00
|
|
|
and processed sequentially. This means your event handlers
|
|
|
|
should *not* perform long-running operations since new
|
|
|
|
updates are put inside of an unbounded queue.
|
|
|
|
|
2018-06-26 17:09:16 +03:00
|
|
|
flood_sleep_threshold (`int` | `float`, optional):
|
|
|
|
The threshold below which the library should automatically
|
2019-09-24 12:37:41 +03:00
|
|
|
sleep on flood wait and slow mode wait errors (inclusive). For instance, if a
|
2018-06-26 17:09:16 +03:00
|
|
|
``FloodWaitError`` for 17s occurs and `flood_sleep_threshold`
|
|
|
|
is 20s, the library will ``sleep`` automatically. If the error
|
|
|
|
was for 21s, it would ``raise FloodWaitError`` instead. Values
|
|
|
|
larger than a day (like ``float('inf')``) will be changed to a day.
|
2018-06-08 22:52:59 +03:00
|
|
|
|
2020-10-01 12:53:17 +03:00
|
|
|
raise_last_call_error (`bool`, optional):
|
|
|
|
When API calls fail in a way that causes Telethon to retry
|
|
|
|
automatically, should the RPC error of the last attempt be raised
|
|
|
|
instead of a generic ValueError. This is mostly useful for
|
|
|
|
detecting when Telegram has internal issues.
|
|
|
|
|
2018-06-08 22:52:59 +03:00
|
|
|
device_model (`str`, optional):
|
|
|
|
"Device model" to be sent when creating the initial connection.
|
2020-09-10 15:52:25 +03:00
|
|
|
Defaults to 'PC (n)bit' derived from ``platform.uname().machine``, or its direct value if unknown.
|
2018-06-08 22:52:59 +03:00
|
|
|
|
|
|
|
system_version (`str`, optional):
|
|
|
|
"System version" to be sent when creating the initial connection.
|
2020-09-10 15:52:25 +03:00
|
|
|
Defaults to ``platform.uname().release`` stripped of everything ahead of -.
|
2018-06-08 22:52:59 +03:00
|
|
|
|
|
|
|
app_version (`str`, optional):
|
|
|
|
"App version" to be sent when creating the initial connection.
|
|
|
|
Defaults to `telethon.version.__version__`.
|
|
|
|
|
|
|
|
lang_code (`str`, optional):
|
|
|
|
"Language code" to be sent when creating the initial connection.
|
|
|
|
Defaults to ``'en'``.
|
|
|
|
|
|
|
|
system_lang_code (`str`, optional):
|
|
|
|
"System lang code" to be sent when creating the initial connection.
|
|
|
|
Defaults to `lang_code`.
|
2019-01-11 17:52:30 +03:00
|
|
|
|
|
|
|
loop (`asyncio.AbstractEventLoop`, optional):
|
2023-01-11 23:02:29 +03:00
|
|
|
Asyncio event loop to use. Defaults to `asyncio.get_running_loop()`.
|
2020-07-25 19:39:35 +03:00
|
|
|
This argument is ignored.
|
2019-01-11 17:52:30 +03:00
|
|
|
|
|
|
|
base_logger (`str` | `logging.Logger`, optional):
|
|
|
|
Base logger name or instance to use.
|
|
|
|
If a `str` is given, it'll be passed to `logging.getLogger()`. If a
|
|
|
|
`logging.Logger` is given, it'll be used directly. If something
|
|
|
|
else or nothing is given, the default logger will be used.
|
2021-08-29 12:36:08 +03:00
|
|
|
|
|
|
|
receive_updates (`bool`, optional):
|
|
|
|
Whether the client will receive updates or not. By default, updates
|
|
|
|
will be received from Telegram as they occur.
|
|
|
|
|
|
|
|
Turning this off means that Telegram will not send updates at all
|
|
|
|
so event handlers, conversations, and QR login will not work.
|
|
|
|
However, certain scripts don't need updates, so this will reduce
|
|
|
|
the amount of bandwidth used.
|
2023-04-06 14:45:12 +03:00
|
|
|
|
|
|
|
entity_cache_limit (`int`, optional):
|
|
|
|
How many users, chats and channels to keep in the in-memory cache
|
|
|
|
at most. This limit is checked against when processing updates.
|
|
|
|
|
|
|
|
When this limit is reached or exceeded, all entities that are not
|
|
|
|
required for update handling will be flushed to the session file.
|
|
|
|
|
|
|
|
Note that this implies that there is a lower bound to the amount
|
|
|
|
of entities that must be kept in memory.
|
|
|
|
|
|
|
|
Setting this limit too low will cause the library to attempt to
|
|
|
|
flush entities to the session file even if no entities can be
|
|
|
|
removed from the in-memory cache, which will degrade performance.
|
2017-06-08 14:12:57 +03:00
|
|
|
"""
|
|
|
|
|
|
|
|
# Current TelegramClient version
|
2017-10-28 13:21:07 +03:00
|
|
|
__version__ = version.__version__
|
2017-06-08 14:12:57 +03:00
|
|
|
|
2018-06-10 22:30:16 +03:00
|
|
|
# Cached server configuration (with .dc_options), can be "global"
|
2018-06-08 22:52:59 +03:00
|
|
|
_config = None
|
2018-06-10 22:30:16 +03:00
|
|
|
_cdn_config = None
|
2017-09-17 15:30:23 +03:00
|
|
|
|
2017-06-08 14:12:57 +03:00
|
|
|
# region Initialization
|
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
def __init__(
|
|
|
|
self: 'TelegramClient',
|
2024-02-13 20:18:52 +03:00
|
|
|
session: 'typing.Union[str, pathlib.Path, Session]',
|
2019-05-03 22:37:27 +03:00
|
|
|
api_id: int,
|
|
|
|
api_hash: str,
|
|
|
|
*,
|
2019-05-08 18:16:09 +03:00
|
|
|
connection: 'typing.Type[Connection]' = ConnectionTcpFull,
|
2019-05-03 22:37:27 +03:00
|
|
|
use_ipv6: bool = False,
|
|
|
|
proxy: typing.Union[tuple, dict] = None,
|
2020-11-09 21:59:54 +03:00
|
|
|
local_addr: typing.Union[str, tuple] = None,
|
2019-05-03 22:37:27 +03:00
|
|
|
timeout: int = 10,
|
|
|
|
request_retries: int = 5,
|
2020-11-09 21:59:54 +03:00
|
|
|
connection_retries: int = 5,
|
2019-05-08 18:16:09 +03:00
|
|
|
retry_delay: int = 1,
|
2019-05-03 22:37:27 +03:00
|
|
|
auto_reconnect: bool = True,
|
|
|
|
sequential_updates: bool = False,
|
|
|
|
flood_sleep_threshold: int = 60,
|
2020-10-01 12:53:17 +03:00
|
|
|
raise_last_call_error: bool = False,
|
2019-05-03 22:37:27 +03:00
|
|
|
device_model: str = None,
|
|
|
|
system_version: str = None,
|
|
|
|
app_version: str = None,
|
|
|
|
lang_code: str = 'en',
|
|
|
|
system_lang_code: str = 'en',
|
|
|
|
loop: asyncio.AbstractEventLoop = None,
|
2021-08-29 12:36:08 +03:00
|
|
|
base_logger: typing.Union[str, logging.Logger] = None,
|
2022-05-20 15:55:47 +03:00
|
|
|
receive_updates: bool = True,
|
2023-04-06 14:45:12 +03:00
|
|
|
catch_up: bool = False,
|
|
|
|
entity_cache_limit: int = 5000
|
2021-08-29 12:36:08 +03:00
|
|
|
):
|
2017-09-29 21:50:27 +03:00
|
|
|
if not api_id or not api_hash:
|
2017-12-28 02:22:28 +03:00
|
|
|
raise ValueError(
|
2017-09-29 21:50:27 +03:00
|
|
|
"Your API ID or Hash cannot be empty or None. "
|
2018-01-08 16:04:04 +03:00
|
|
|
"Refer to telethon.rtfd.io for more information.")
|
2017-09-29 21:50:27 +03:00
|
|
|
|
2017-11-16 15:30:18 +03:00
|
|
|
self._use_ipv6 = use_ipv6
|
2018-03-02 00:34:32 +03:00
|
|
|
|
2019-01-11 17:52:30 +03:00
|
|
|
if isinstance(base_logger, str):
|
|
|
|
base_logger = logging.getLogger(base_logger)
|
|
|
|
elif not isinstance(base_logger, logging.Logger):
|
2021-02-02 22:47:02 +03:00
|
|
|
base_logger = _base_log
|
2019-01-11 17:52:30 +03:00
|
|
|
|
|
|
|
class _Loggers(dict):
|
|
|
|
def __missing__(self, key):
|
|
|
|
if key.startswith("telethon."):
|
2019-01-12 14:15:29 +03:00
|
|
|
key = key.split('.', maxsplit=1)[1]
|
|
|
|
|
2019-01-11 17:52:30 +03:00
|
|
|
return base_logger.getChild(key)
|
|
|
|
|
|
|
|
self._log = _Loggers()
|
|
|
|
|
2017-09-29 21:50:27 +03:00
|
|
|
# Determine what session object we have
|
2024-03-30 17:10:12 +03:00
|
|
|
if isinstance(session, (str, pathlib.Path)):
|
2018-08-02 16:17:44 +03:00
|
|
|
try:
|
2024-02-13 20:18:52 +03:00
|
|
|
session = SQLiteSession(str(session))
|
2019-02-13 10:51:26 +03:00
|
|
|
except ImportError:
|
2018-08-02 16:17:44 +03:00
|
|
|
import warnings
|
|
|
|
warnings.warn(
|
|
|
|
'The sqlite3 module is not available under this '
|
|
|
|
'Python installation and no custom session '
|
|
|
|
'instance was given; using MemorySession.\n'
|
|
|
|
'You will need to re-login every time unless '
|
|
|
|
'you use another session storage'
|
|
|
|
)
|
|
|
|
session = MemorySession()
|
2024-03-30 17:10:12 +03:00
|
|
|
elif session is None:
|
|
|
|
session = MemorySession()
|
2017-09-29 21:50:27 +03:00
|
|
|
elif not isinstance(session, Session):
|
2017-12-28 02:22:28 +03:00
|
|
|
raise TypeError(
|
2017-09-29 21:50:27 +03:00
|
|
|
'The given session must be a str or a Session instance.'
|
|
|
|
)
|
|
|
|
|
2017-11-16 15:40:25 +03:00
|
|
|
# ':' in session.server_address is True if it's an IPv6 address
|
|
|
|
if (not session.server_address or
|
|
|
|
(':' in session.server_address) != use_ipv6):
|
2017-12-28 03:04:11 +03:00
|
|
|
session.set_dc(
|
|
|
|
DEFAULT_DC_ID,
|
|
|
|
DEFAULT_IPV6_IP if self._use_ipv6 else DEFAULT_IPV4_IP,
|
|
|
|
DEFAULT_PORT
|
|
|
|
)
|
2024-11-20 17:13:34 +03:00
|
|
|
session.save()
|
2017-11-16 15:30:18 +03:00
|
|
|
|
2018-06-26 17:09:16 +03:00
|
|
|
self.flood_sleep_threshold = flood_sleep_threshold
|
2018-10-12 23:00:02 +03:00
|
|
|
|
2019-03-26 13:27:21 +03:00
|
|
|
# TODO Use AsyncClassWrapper(session)
|
2023-04-06 14:09:07 +03:00
|
|
|
# ChatGetter and SenderGetter can use the in-memory _mb_entity_cache
|
2019-03-26 13:27:21 +03:00
|
|
|
# to avoid network access and the need for await in session files.
|
2018-10-12 23:00:02 +03:00
|
|
|
#
|
2019-03-26 13:27:21 +03:00
|
|
|
# The session files only wants the entities to persist
|
|
|
|
# them to disk, and to save additional useful information.
|
|
|
|
# TODO Session should probably return all cached
|
|
|
|
# info of entities, not just the input versions
|
2018-10-12 23:00:02 +03:00
|
|
|
self.session = session
|
2017-06-11 23:42:04 +03:00
|
|
|
self.api_id = int(api_id)
|
2017-06-08 14:12:57 +03:00
|
|
|
self.api_hash = api_hash
|
2017-09-21 14:43:33 +03:00
|
|
|
|
2019-12-06 12:23:15 +03:00
|
|
|
# Current proxy implementation requires `sock_connect`, and some
|
|
|
|
# event loops lack this method. If the current loop is missing it,
|
|
|
|
# bail out early and suggest an alternative.
|
2019-12-05 13:25:48 +03:00
|
|
|
#
|
2019-12-06 12:23:15 +03:00
|
|
|
# TODO A better fix is obviously avoiding the use of `sock_connect`
|
2019-12-05 13:25:48 +03:00
|
|
|
#
|
|
|
|
# See https://github.com/LonamiWebs/Telethon/issues/1337 for details.
|
2021-01-19 00:43:39 +03:00
|
|
|
if not callable(getattr(self.loop, 'sock_connect', None)):
|
2019-12-05 13:25:48 +03:00
|
|
|
raise TypeError(
|
2019-12-06 12:23:15 +03:00
|
|
|
'Event loop of type {} lacks `sock_connect`, which is needed to use proxies.\n\n'
|
2019-12-05 13:25:48 +03:00
|
|
|
'Change the event loop in use to use proxies:\n'
|
|
|
|
'# https://github.com/LonamiWebs/Telethon/issues/1337\n'
|
|
|
|
'import asyncio\n'
|
|
|
|
'asyncio.set_event_loop(asyncio.SelectorEventLoop())'.format(
|
2021-01-19 00:43:39 +03:00
|
|
|
self.loop.__class__.__name__
|
2019-12-05 13:25:48 +03:00
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2020-10-07 11:03:19 +03:00
|
|
|
if local_addr is not None:
|
|
|
|
if use_ipv6 is False and ':' in local_addr:
|
|
|
|
raise TypeError(
|
|
|
|
'A local IPv6 address must only be used with `use_ipv6=True`.'
|
|
|
|
)
|
|
|
|
elif use_ipv6 is True and ':' not in local_addr:
|
|
|
|
raise TypeError(
|
|
|
|
'`use_ipv6=True` must only be used with a local IPv6 address.'
|
|
|
|
)
|
|
|
|
|
2020-10-01 12:53:17 +03:00
|
|
|
self._raise_last_call_error = raise_last_call_error
|
|
|
|
|
2019-02-06 21:41:45 +03:00
|
|
|
self._request_retries = request_retries
|
|
|
|
self._connection_retries = connection_retries
|
2018-10-28 12:55:58 +03:00
|
|
|
self._retry_delay = retry_delay or 0
|
2018-10-04 18:11:31 +03:00
|
|
|
self._proxy = proxy
|
2020-10-07 11:03:19 +03:00
|
|
|
self._local_addr = local_addr
|
2018-10-04 17:39:57 +03:00
|
|
|
self._timeout = timeout
|
2018-06-18 19:11:16 +03:00
|
|
|
self._auto_reconnect = auto_reconnect
|
|
|
|
|
2018-09-28 18:51:28 +03:00
|
|
|
assert isinstance(connection, type)
|
|
|
|
self._connection = connection
|
2019-03-10 03:00:11 +03:00
|
|
|
init_proxy = None if not issubclass(connection, TcpMTProxy) else \
|
|
|
|
types.InputClientProxy(*connection.address_info(proxy))
|
2018-05-10 15:22:19 +03:00
|
|
|
|
2018-06-11 21:05:10 +03:00
|
|
|
# Used on connection. Capture the variables in a lambda since
|
|
|
|
# exporting clients need to create this InvokeWithLayerRequest.
|
2018-06-09 22:03:48 +03:00
|
|
|
system = platform.uname()
|
2020-09-10 15:52:25 +03:00
|
|
|
|
|
|
|
if system.machine in ('x86_64', 'AMD64'):
|
|
|
|
default_device_model = 'PC 64bit'
|
|
|
|
elif system.machine in ('i386','i686','x86'):
|
|
|
|
default_device_model = 'PC 32bit'
|
|
|
|
else:
|
|
|
|
default_device_model = system.machine
|
|
|
|
default_system_version = re.sub(r'-.+','',system.release)
|
2020-11-14 16:01:59 +03:00
|
|
|
|
|
|
|
self._init_request = functions.InitConnectionRequest(
|
|
|
|
api_id=self.api_id,
|
|
|
|
device_model=device_model or default_device_model or 'Unknown',
|
|
|
|
system_version=system_version or default_system_version or '1.0',
|
|
|
|
app_version=app_version or self.__version__,
|
|
|
|
lang_code=lang_code,
|
|
|
|
system_lang_code=system_lang_code,
|
|
|
|
lang_pack='', # "langPacks are for official apps only"
|
|
|
|
query=None,
|
|
|
|
proxy=init_proxy
|
2018-06-09 22:03:48 +03:00
|
|
|
)
|
2017-09-22 13:20:38 +03:00
|
|
|
|
2018-07-21 13:25:20 +03:00
|
|
|
# Remember flood-waited requests to avoid making them again
|
|
|
|
self._flood_waited_requests = {}
|
|
|
|
|
2020-04-05 13:34:33 +03:00
|
|
|
# Cache ``{dc_id: (_ExportState, MTProtoSender)}`` for all borrowed senders
|
2018-06-28 15:10:36 +03:00
|
|
|
self._borrowed_senders = {}
|
2020-07-25 19:39:35 +03:00
|
|
|
self._borrow_sender_lock = asyncio.Lock()
|
2024-08-07 21:25:35 +03:00
|
|
|
self._exported_sessions = {}
|
2017-07-04 11:21:15 +03:00
|
|
|
|
2023-05-24 20:15:46 +03:00
|
|
|
self._loop = None # only used as a sanity check
|
2022-09-21 13:13:21 +03:00
|
|
|
self._updates_error = None
|
2018-06-18 14:22:25 +03:00
|
|
|
self._updates_handle = None
|
2022-05-13 14:17:16 +03:00
|
|
|
self._keepalive_handle = None
|
2018-06-18 14:22:25 +03:00
|
|
|
self._last_request = time.time()
|
2021-08-29 12:36:08 +03:00
|
|
|
self._no_updates = not receive_updates
|
2018-06-21 10:32:09 +03:00
|
|
|
|
2022-05-13 14:17:16 +03:00
|
|
|
# Used for non-sequential updates, in order to terminate all pending tasks on disconnect.
|
|
|
|
self._sequential_updates = sequential_updates
|
|
|
|
self._event_handler_tasks = set()
|
2018-06-29 11:45:04 +03:00
|
|
|
|
2018-12-06 18:07:11 +03:00
|
|
|
self._authorized = None # None = unknown, False = no, True = yes
|
2019-03-28 14:32:02 +03:00
|
|
|
|
2018-06-08 22:52:59 +03:00
|
|
|
# Some further state for subclasses
|
|
|
|
self._event_builders = []
|
2019-06-03 20:41:22 +03:00
|
|
|
|
|
|
|
# {chat_id: {Conversation}}
|
|
|
|
self._conversations = collections.defaultdict(set)
|
2017-06-08 14:12:57 +03:00
|
|
|
|
2020-06-06 22:01:02 +03:00
|
|
|
# Hack to workaround the fact Telegram may send album updates as
|
|
|
|
# different Updates when being sent from a different data center.
|
|
|
|
# {grouped_id: AlbumHack}
|
|
|
|
#
|
|
|
|
# FIXME: We don't bother cleaning this up because it's not really
|
|
|
|
# worth it, albums are pretty rare and this only holds them
|
|
|
|
# for a second at most.
|
|
|
|
self._albums = {}
|
|
|
|
|
2018-06-08 22:52:59 +03:00
|
|
|
# Default parse mode
|
|
|
|
self._parse_mode = markdown
|
2017-06-08 14:12:57 +03:00
|
|
|
|
2018-06-08 22:52:59 +03:00
|
|
|
# Some fields to easy signing in. Let {phone: hash} be
|
|
|
|
# a dictionary because the user may change their mind.
|
|
|
|
self._phone_code_hash = {}
|
|
|
|
self._phone = None
|
|
|
|
self._tos = None
|
2017-06-09 11:35:19 +03:00
|
|
|
|
2019-10-11 19:04:41 +03:00
|
|
|
# A place to store if channels are a megagroup or not (see `edit_admin`)
|
|
|
|
self._megagroup_cache = {}
|
|
|
|
|
2022-05-13 14:17:16 +03:00
|
|
|
# This is backported from v2 in a very ad-hoc way just to get proper update handling
|
2022-05-20 15:55:47 +03:00
|
|
|
self._catch_up = catch_up
|
2022-05-13 14:17:16 +03:00
|
|
|
self._updates_queue = asyncio.Queue()
|
2022-12-18 01:13:06 +03:00
|
|
|
self._message_box = MessageBox(self._log['messagebox'])
|
2022-05-13 14:17:16 +03:00
|
|
|
self._mb_entity_cache = MbEntityCache() # required for proper update handling (to know when to getDifference)
|
2023-04-06 14:45:12 +03:00
|
|
|
self._entity_cache_limit = entity_cache_limit
|
2022-05-13 14:17:16 +03:00
|
|
|
|
|
|
|
self._sender = MTProtoSender(
|
|
|
|
self.session.auth_key,
|
|
|
|
loggers=self._log,
|
|
|
|
retries=self._connection_retries,
|
|
|
|
delay=self._retry_delay,
|
|
|
|
auto_reconnect=self._auto_reconnect,
|
|
|
|
connect_timeout=self._timeout,
|
|
|
|
auth_key_callback=self._auth_key_callback,
|
|
|
|
updates_queue=self._updates_queue,
|
|
|
|
auto_reconnect_callback=self._handle_auto_reconnect
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2018-06-08 22:52:59 +03:00
|
|
|
# endregion
|
2017-12-20 14:47:10 +03:00
|
|
|
|
2018-06-14 20:35:12 +03:00
|
|
|
# region Properties
|
|
|
|
|
|
|
|
@property
|
2019-05-03 22:37:27 +03:00
|
|
|
def loop(self: 'TelegramClient') -> asyncio.AbstractEventLoop:
|
2019-05-06 12:38:26 +03:00
|
|
|
"""
|
|
|
|
Property with the ``asyncio`` event loop used by this client.
|
2019-05-20 12:38:26 +03:00
|
|
|
|
|
|
|
Example
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
# Download media in the background
|
2020-06-24 15:30:41 +03:00
|
|
|
task = client.loop.create_task(message.download_media())
|
2019-05-20 12:38:26 +03:00
|
|
|
|
|
|
|
# Do some work
|
|
|
|
...
|
|
|
|
|
|
|
|
# Join the task (wait for it to complete)
|
|
|
|
await task
|
2019-05-06 12:38:26 +03:00
|
|
|
"""
|
2023-01-11 23:02:29 +03:00
|
|
|
return helpers.get_running_loop()
|
2018-06-14 20:35:12 +03:00
|
|
|
|
2018-06-17 20:29:41 +03:00
|
|
|
@property
|
2019-05-03 22:37:27 +03:00
|
|
|
def disconnected(self: 'TelegramClient') -> asyncio.Future:
|
2018-06-17 20:29:41 +03:00
|
|
|
"""
|
2019-05-06 12:38:26 +03:00
|
|
|
Property with a ``Future`` that resolves upon disconnection.
|
2019-05-20 12:38:26 +03:00
|
|
|
|
|
|
|
Example
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
# Wait for a disconnection to occur
|
|
|
|
try:
|
|
|
|
await client.disconnected
|
|
|
|
except OSError:
|
|
|
|
print('Error on disconnect')
|
2018-06-17 20:29:41 +03:00
|
|
|
"""
|
|
|
|
return self._sender.disconnected
|
|
|
|
|
2019-11-18 14:51:18 +03:00
|
|
|
@property
|
|
|
|
def flood_sleep_threshold(self):
|
|
|
|
return self._flood_sleep_threshold
|
|
|
|
|
|
|
|
@flood_sleep_threshold.setter
|
|
|
|
def flood_sleep_threshold(self, value):
|
|
|
|
# None -> 0, negative values don't really matter
|
|
|
|
self._flood_sleep_threshold = min(value or 0, 24 * 60 * 60)
|
|
|
|
|
2018-06-14 20:35:12 +03:00
|
|
|
# endregion
|
|
|
|
|
2018-06-08 22:52:59 +03:00
|
|
|
# region Connecting
|
2017-09-29 21:50:27 +03:00
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
async def connect(self: 'TelegramClient') -> None:
|
2018-06-08 22:52:59 +03:00
|
|
|
"""
|
|
|
|
Connects to Telegram.
|
2019-05-20 12:38:26 +03:00
|
|
|
|
2019-05-23 13:11:58 +03:00
|
|
|
.. note::
|
|
|
|
|
|
|
|
Connect means connect and nothing else, and only one low-level
|
|
|
|
request is made to notify Telegram about which layer we will be
|
|
|
|
using.
|
|
|
|
|
|
|
|
Before Telegram sends you updates, you need to make a high-level
|
|
|
|
request, like `client.get_me() <telethon.client.users.UserMethods.get_me>`,
|
|
|
|
as described in https://core.telegram.org/api/updates.
|
|
|
|
|
2019-05-20 12:38:26 +03:00
|
|
|
Example
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
try:
|
2019-08-14 00:33:39 +03:00
|
|
|
await client.connect()
|
2019-05-20 12:38:26 +03:00
|
|
|
except OSError:
|
|
|
|
print('Failed to connect')
|
2018-06-08 22:52:59 +03:00
|
|
|
"""
|
2022-11-27 13:22:30 +03:00
|
|
|
if self.session is None:
|
|
|
|
raise ValueError('TelegramClient instance cannot be reused after logging out')
|
|
|
|
|
2023-05-24 20:15:46 +03:00
|
|
|
if self._loop is None:
|
|
|
|
self._loop = helpers.get_running_loop()
|
|
|
|
elif self._loop != helpers.get_running_loop():
|
|
|
|
raise RuntimeError('The asyncio event loop must not change after connection (see the FAQ for details)')
|
|
|
|
|
2019-08-08 00:43:31 +03:00
|
|
|
if not await self._sender.connect(self._connection(
|
2019-01-15 13:09:08 +03:00
|
|
|
self.session.server_address,
|
|
|
|
self.session.port,
|
2019-02-11 02:16:46 +03:00
|
|
|
self.session.dc_id,
|
2019-01-15 13:09:08 +03:00
|
|
|
loggers=self._log,
|
2020-10-07 11:03:19 +03:00
|
|
|
proxy=self._proxy,
|
|
|
|
local_addr=self._local_addr
|
2019-08-08 00:43:31 +03:00
|
|
|
)):
|
|
|
|
# We don't want to init or modify anything if we were already connected
|
|
|
|
return
|
|
|
|
|
2018-10-22 21:58:07 +03:00
|
|
|
self.session.auth_key = self._sender.auth_key
|
2022-08-30 13:32:21 +03:00
|
|
|
self.session.save()
|
2017-06-08 14:12:57 +03:00
|
|
|
|
2023-04-06 15:18:42 +03:00
|
|
|
try:
|
|
|
|
# See comment when saving entities to understand this hack
|
|
|
|
self_id = self.session.get_input_entity(0).access_hash
|
|
|
|
self_user = self.session.get_input_entity(self_id)
|
|
|
|
self._mb_entity_cache.set_self_user(self_id, None, self_user.access_hash)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
|
2022-05-13 18:40:03 +03:00
|
|
|
if self._catch_up:
|
|
|
|
ss = SessionState(0, 0, False, 0, 0, 0, 0, None)
|
|
|
|
cs = []
|
|
|
|
|
2022-08-30 13:32:21 +03:00
|
|
|
for entity_id, state in self.session.get_update_states():
|
2022-05-13 18:40:03 +03:00
|
|
|
if entity_id == 0:
|
|
|
|
# TODO current session doesn't store self-user info but adding that is breaking on downstream session impls
|
2022-05-16 20:01:05 +03:00
|
|
|
ss = SessionState(0, 0, False, state.pts, state.qts, int(state.date.timestamp()), state.seq, None)
|
2022-05-13 18:40:03 +03:00
|
|
|
else:
|
|
|
|
cs.append(ChannelState(entity_id, state.pts))
|
|
|
|
|
|
|
|
self._message_box.load(ss, cs)
|
|
|
|
for state in cs:
|
2023-04-29 14:10:00 +03:00
|
|
|
try:
|
|
|
|
entity = self.session.get_input_entity(state.channel_id)
|
|
|
|
except ValueError:
|
|
|
|
self._log[__name__].warning(
|
|
|
|
'No access_hash in cache for channel %s, will not catch up', state.channel_id)
|
|
|
|
else:
|
2022-05-13 18:40:03 +03:00
|
|
|
self._mb_entity_cache.put(Entity(EntityType.CHANNEL, entity.channel_id, entity.access_hash))
|
|
|
|
|
2020-11-14 16:01:59 +03:00
|
|
|
self._init_request.query = functions.help.GetConfigRequest()
|
|
|
|
|
2023-01-14 15:31:32 +03:00
|
|
|
req = self._init_request
|
|
|
|
if self._no_updates:
|
|
|
|
req = functions.InvokeWithoutUpdatesRequest(req)
|
|
|
|
|
|
|
|
await self._sender.send(functions.InvokeWithLayerRequest(LAYER, req))
|
2018-06-17 17:23:22 +03:00
|
|
|
|
2022-05-16 11:04:49 +03:00
|
|
|
if self._message_box.is_empty():
|
|
|
|
me = await self.get_me()
|
|
|
|
if me:
|
|
|
|
await self._on_login(me) # also calls GetState to initialize the MessageBox
|
|
|
|
|
2021-01-19 00:43:39 +03:00
|
|
|
self._updates_handle = self.loop.create_task(self._update_loop())
|
2022-05-13 14:17:16 +03:00
|
|
|
self._keepalive_handle = self.loop.create_task(self._keepalive_loop())
|
2018-06-18 14:22:25 +03:00
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
def is_connected(self: 'TelegramClient') -> bool:
|
2018-06-08 22:52:59 +03:00
|
|
|
"""
|
2019-07-06 13:10:25 +03:00
|
|
|
Returns `True` if the user has connected.
|
2019-05-06 12:38:26 +03:00
|
|
|
|
|
|
|
This method is **not** asynchronous (don't use ``await`` on it).
|
2019-05-20 12:38:26 +03:00
|
|
|
|
|
|
|
Example
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
while client.is_connected():
|
|
|
|
await asyncio.sleep(1)
|
2018-06-08 22:52:59 +03:00
|
|
|
"""
|
2018-06-28 17:04:12 +03:00
|
|
|
sender = getattr(self, '_sender', None)
|
|
|
|
return sender and sender.is_connected()
|
2017-09-17 17:39:29 +03:00
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
def disconnect(self: 'TelegramClient'):
|
2018-06-08 22:52:59 +03:00
|
|
|
"""
|
|
|
|
Disconnects from Telegram.
|
2018-10-16 12:56:17 +03:00
|
|
|
|
2019-03-21 14:21:00 +03:00
|
|
|
If the event loop is already running, this method returns a
|
|
|
|
coroutine that you should await on your own code; otherwise
|
|
|
|
the loop is ran until said coroutine completes.
|
2019-05-09 13:24:37 +03:00
|
|
|
|
2022-10-03 11:24:37 +03:00
|
|
|
Event handlers which are currently running will be cancelled before
|
|
|
|
this function returns (in order to properly clean-up their tasks).
|
|
|
|
In particular, this means that using ``disconnect`` in a handler
|
|
|
|
will cause code after the ``disconnect`` to never run. If this is
|
|
|
|
needed, consider spawning a separate task to do the remaining work.
|
|
|
|
|
2019-05-20 12:38:26 +03:00
|
|
|
Example
|
2019-05-09 13:24:37 +03:00
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
# You don't need to use this if you used "with client"
|
2019-08-14 00:33:39 +03:00
|
|
|
await client.disconnect()
|
2018-06-08 22:52:59 +03:00
|
|
|
"""
|
2021-01-19 00:43:39 +03:00
|
|
|
if self.loop.is_running():
|
2022-10-03 11:19:14 +03:00
|
|
|
# Disconnect may be called from an event handler, which would
|
|
|
|
# cancel itself during itself and never actually complete the
|
|
|
|
# disconnection. Shield the task to prevent disconnect itself
|
|
|
|
# from being cancelled. See issue #3942 for more details.
|
|
|
|
return asyncio.shield(self.loop.create_task(self._disconnect_coro()))
|
2019-03-21 14:21:00 +03:00
|
|
|
else:
|
2019-05-04 22:02:07 +03:00
|
|
|
try:
|
2021-01-19 00:43:39 +03:00
|
|
|
self.loop.run_until_complete(self._disconnect_coro())
|
2019-05-04 22:02:07 +03:00
|
|
|
except RuntimeError:
|
|
|
|
# Python 3.5.x complains when called from
|
|
|
|
# `__aexit__` and there were pending updates with:
|
|
|
|
# "Event loop stopped before Future completed."
|
|
|
|
#
|
|
|
|
# However, it doesn't really make a lot of sense.
|
|
|
|
pass
|
2019-03-21 14:21:00 +03:00
|
|
|
|
2020-11-14 16:01:59 +03:00
|
|
|
def set_proxy(self: 'TelegramClient', proxy: typing.Union[tuple, dict]):
|
|
|
|
"""
|
|
|
|
Changes the proxy which will be used on next (re)connection.
|
|
|
|
|
|
|
|
Method has no immediate effects if the client is currently connected.
|
|
|
|
|
|
|
|
The new proxy will take it's effect on the next reconnection attempt:
|
|
|
|
- on a call `await client.connect()` (after complete disconnect)
|
|
|
|
- on auto-reconnect attempt (e.g, after previous connection was lost)
|
|
|
|
"""
|
|
|
|
init_proxy = None if not issubclass(self._connection, TcpMTProxy) else \
|
|
|
|
types.InputClientProxy(*self._connection.address_info(proxy))
|
|
|
|
|
|
|
|
self._init_request.proxy = init_proxy
|
|
|
|
self._proxy = proxy
|
|
|
|
|
|
|
|
# While `await client.connect()` passes new proxy on each new call,
|
|
|
|
# auto-reconnect attempts use already set up `_connection` inside
|
|
|
|
# the `_sender`, so the only way to change proxy between those
|
|
|
|
# is to directly inject parameters.
|
|
|
|
|
|
|
|
connection = getattr(self._sender, "_connection", None)
|
|
|
|
if connection:
|
|
|
|
if isinstance(connection, TcpMTProxy):
|
|
|
|
connection._ip = proxy[0]
|
|
|
|
connection._port = proxy[1]
|
|
|
|
else:
|
|
|
|
connection._proxy = proxy
|
|
|
|
|
2023-03-28 20:00:36 +03:00
|
|
|
def _save_states_and_entities(self: 'TelegramClient'):
|
|
|
|
entities = self._mb_entity_cache.get_all_entities()
|
|
|
|
|
|
|
|
# Piggy-back on an arbitrary TL type with users and chats so the session can understand to read the entities.
|
|
|
|
# It doesn't matter if we put users in the list of chats.
|
|
|
|
self.session.process_entities(types.contacts.ResolvedPeer(None, [e._as_input_peer() for e in entities], []))
|
|
|
|
|
2023-04-06 15:18:42 +03:00
|
|
|
# As a hack to not need to change the session files, save ourselves with ``id=0`` and ``access_hash`` of our ``id``.
|
|
|
|
# This way it is possible to determine our own ID by querying for 0. However, whether we're a bot is not saved.
|
|
|
|
if self._mb_entity_cache.self_id:
|
|
|
|
self.session.process_entities(types.contacts.ResolvedPeer(None, [types.InputPeerUser(0, self._mb_entity_cache.self_id)], []))
|
|
|
|
|
2023-03-28 20:00:36 +03:00
|
|
|
ss, cs = self._message_box.session_state()
|
|
|
|
self.session.set_update_state(0, types.updates.State(**ss, unread_count=0))
|
|
|
|
now = datetime.datetime.now() # any datetime works; channels don't need it
|
|
|
|
for channel_id, pts in cs.items():
|
|
|
|
self.session.set_update_state(channel_id, types.updates.State(pts, 0, now, 0, unread_count=0))
|
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
async def _disconnect_coro(self: 'TelegramClient'):
|
2022-12-13 10:33:13 +03:00
|
|
|
if self.session is None:
|
|
|
|
return # already logged out and disconnected
|
|
|
|
|
2019-03-21 14:21:00 +03:00
|
|
|
await self._disconnect()
|
2019-03-28 14:32:02 +03:00
|
|
|
|
2020-04-05 13:34:33 +03:00
|
|
|
# Also clean-up all exported senders because we're done with them
|
|
|
|
async with self._borrow_sender_lock:
|
|
|
|
for state, sender in self._borrowed_senders.values():
|
2020-12-11 18:27:08 +03:00
|
|
|
# Note that we're not checking for `state.should_disconnect()`.
|
|
|
|
# If the user wants to disconnect the client, ALL connections
|
|
|
|
# to Telegram (including exported senders) should be closed.
|
|
|
|
#
|
|
|
|
# Disconnect should never raise, so there's no try/except.
|
|
|
|
await sender.disconnect()
|
|
|
|
# Can't use `mark_disconnected` because it may be borrowed.
|
|
|
|
state._connected = False
|
2020-04-05 13:34:33 +03:00
|
|
|
|
2020-12-11 18:27:08 +03:00
|
|
|
# If any was borrowed
|
2020-04-05 13:34:33 +03:00
|
|
|
self._borrowed_senders.clear()
|
|
|
|
|
2019-05-04 22:02:07 +03:00
|
|
|
# trio's nurseries would handle this for us, but this is asyncio.
|
|
|
|
# All tasks spawned in the background should properly be terminated.
|
2022-05-13 14:17:16 +03:00
|
|
|
if self._event_handler_tasks:
|
|
|
|
for task in self._event_handler_tasks:
|
2019-05-04 22:02:07 +03:00
|
|
|
task.cancel()
|
|
|
|
|
2022-05-13 14:17:16 +03:00
|
|
|
await asyncio.wait(self._event_handler_tasks)
|
|
|
|
self._event_handler_tasks.clear()
|
2019-03-28 14:32:02 +03:00
|
|
|
|
2023-03-28 20:00:36 +03:00
|
|
|
self._save_states_and_entities()
|
2022-05-13 18:40:03 +03:00
|
|
|
|
2022-08-30 13:32:21 +03:00
|
|
|
self.session.close()
|
2018-06-26 14:37:34 +03:00
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
async def _disconnect(self: 'TelegramClient'):
|
2018-06-26 14:37:34 +03:00
|
|
|
"""
|
|
|
|
Disconnect only, without closing the session. Used in reconnections
|
|
|
|
to different data centers, where we don't want to close the session
|
|
|
|
file; user disconnects however should close it since it means that
|
|
|
|
their job with the client is complete and we should clean it up all.
|
|
|
|
"""
|
2019-03-21 14:21:00 +03:00
|
|
|
await self._sender.disconnect()
|
2019-04-01 09:46:07 +03:00
|
|
|
await helpers._cancel(self._log[__name__],
|
2022-05-13 14:17:16 +03:00
|
|
|
updates_handle=self._updates_handle,
|
|
|
|
keepalive_handle=self._keepalive_handle)
|
2017-09-29 21:50:27 +03:00
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
async def _switch_dc(self: 'TelegramClient', new_dc):
|
2017-06-08 17:51:20 +03:00
|
|
|
"""
|
2018-06-09 22:03:48 +03:00
|
|
|
Permanently switches the current connection to the new data center.
|
2018-06-08 22:52:59 +03:00
|
|
|
"""
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log[__name__].info('Reconnecting to new data center %s', new_dc)
|
2018-06-10 22:30:16 +03:00
|
|
|
dc = await self._get_dc(new_dc)
|
2018-06-08 22:52:59 +03:00
|
|
|
|
2018-10-12 23:00:02 +03:00
|
|
|
self.session.set_dc(dc.id, dc.ip_address, dc.port)
|
2018-06-08 22:52:59 +03:00
|
|
|
# auth_key's are associated with a server, which has now changed
|
|
|
|
# so it's not valid anymore. Set to None to force recreating it.
|
2018-11-03 20:53:26 +03:00
|
|
|
self._sender.auth_key.key = None
|
2018-10-01 15:20:50 +03:00
|
|
|
self.session.auth_key = None
|
2022-08-30 13:32:21 +03:00
|
|
|
self.session.save()
|
2019-03-21 14:21:00 +03:00
|
|
|
await self._disconnect()
|
2018-06-10 22:30:16 +03:00
|
|
|
return await self.connect()
|
2017-06-08 14:12:57 +03:00
|
|
|
|
2022-08-30 13:32:21 +03:00
|
|
|
def _auth_key_callback(self: 'TelegramClient', auth_key):
|
2018-06-27 11:15:59 +03:00
|
|
|
"""
|
|
|
|
Callback from the sender whenever it needed to generate a
|
|
|
|
new authorization key. This means we are not authorized.
|
|
|
|
"""
|
|
|
|
self.session.auth_key = auth_key
|
2022-08-30 13:32:21 +03:00
|
|
|
self.session.save()
|
2018-06-27 11:15:59 +03:00
|
|
|
|
2017-06-08 14:12:57 +03:00
|
|
|
# endregion
|
|
|
|
|
2017-09-29 21:50:27 +03:00
|
|
|
# region Working with different connections/Data Centers
|
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
async def _get_dc(self: 'TelegramClient', dc_id, cdn=False):
|
2017-06-08 14:12:57 +03:00
|
|
|
"""Gets the Data Center (DC) associated to 'dc_id'"""
|
2018-06-10 22:30:16 +03:00
|
|
|
cls = self.__class__
|
|
|
|
if not cls._config:
|
|
|
|
cls._config = await self(functions.help.GetConfigRequest())
|
|
|
|
|
|
|
|
if cdn and not self._cdn_config:
|
|
|
|
cls._cdn_config = await self(functions.help.GetCdnConfigRequest())
|
|
|
|
for pk in cls._cdn_config.public_keys:
|
2024-08-07 21:25:35 +03:00
|
|
|
if pk.dc_id == dc_id:
|
|
|
|
rsa.add_key(pk.public_key, old=False)
|
2018-06-10 22:30:16 +03:00
|
|
|
|
2021-06-19 18:45:23 +03:00
|
|
|
try:
|
|
|
|
return next(
|
|
|
|
dc for dc in cls._config.dc_options
|
|
|
|
if dc.id == dc_id
|
|
|
|
and bool(dc.ipv6) == self._use_ipv6 and bool(dc.cdn) == cdn
|
|
|
|
)
|
|
|
|
except StopIteration:
|
|
|
|
self._log[__name__].warning(
|
|
|
|
'Failed to get DC %s (cdn = %s) with use_ipv6 = %s; retrying ignoring IPv6 check',
|
|
|
|
dc_id, cdn, self._use_ipv6
|
|
|
|
)
|
2024-08-07 21:25:35 +03:00
|
|
|
try:
|
|
|
|
return next(
|
|
|
|
dc for dc in cls._config.dc_options
|
|
|
|
if dc.id == dc_id and bool(dc.cdn) == cdn
|
|
|
|
)
|
|
|
|
except StopIteration:
|
|
|
|
raise ValueError(f'Failed to get DC {dc_id} (cdn = {cdn})')
|
2017-06-08 14:12:57 +03:00
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
async def _create_exported_sender(self: 'TelegramClient', dc_id):
|
2018-06-11 21:05:10 +03:00
|
|
|
"""
|
2018-06-28 15:10:36 +03:00
|
|
|
Creates a new exported `MTProtoSender` for the given `dc_id` and
|
|
|
|
returns it. This method should be used by `_borrow_exported_sender`.
|
2017-07-04 11:21:15 +03:00
|
|
|
"""
|
|
|
|
# Thanks badoualy/kotlogram on /telegram/api/DefaultTelegramClient.kt
|
2018-06-11 21:05:10 +03:00
|
|
|
# for clearly showing how to export the authorization
|
|
|
|
dc = await self._get_dc(dc_id)
|
|
|
|
# Can't reuse self._sender._connection as it has its own seqno.
|
|
|
|
#
|
|
|
|
# If one were to do that, Telegram would reset the connection
|
|
|
|
# with no further clues.
|
2020-07-25 19:39:35 +03:00
|
|
|
sender = MTProtoSender(None, loggers=self._log)
|
2018-10-19 14:50:11 +03:00
|
|
|
await sender.connect(self._connection(
|
2019-01-15 13:09:08 +03:00
|
|
|
dc.ip_address,
|
|
|
|
dc.port,
|
2019-02-11 02:16:46 +03:00
|
|
|
dc.id,
|
2019-01-15 13:09:08 +03:00
|
|
|
loggers=self._log,
|
2020-10-07 11:03:19 +03:00
|
|
|
proxy=self._proxy,
|
|
|
|
local_addr=self._local_addr
|
2019-01-15 13:09:08 +03:00
|
|
|
))
|
2020-04-05 13:34:33 +03:00
|
|
|
self._log[__name__].info('Exporting auth for new borrowed sender in %s', dc)
|
2018-06-28 15:10:36 +03:00
|
|
|
auth = await self(functions.auth.ExportAuthorizationRequest(dc_id))
|
2020-11-14 16:01:59 +03:00
|
|
|
self._init_request.query = functions.auth.ImportAuthorizationRequest(id=auth.id, bytes=auth.bytes)
|
|
|
|
req = functions.InvokeWithLayerRequest(LAYER, self._init_request)
|
2018-06-28 15:10:36 +03:00
|
|
|
await sender.send(req)
|
|
|
|
return sender
|
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
async def _borrow_exported_sender(self: 'TelegramClient', dc_id):
|
2018-06-28 15:10:36 +03:00
|
|
|
"""
|
|
|
|
Borrows a connected `MTProtoSender` for the given `dc_id`.
|
|
|
|
If it's not cached, creates a new one if it doesn't exist yet,
|
|
|
|
and imports a freshly exported authorization key for it to be usable.
|
|
|
|
|
|
|
|
Once its job is over it should be `_return_exported_sender`.
|
|
|
|
"""
|
|
|
|
async with self._borrow_sender_lock:
|
2020-04-05 13:34:33 +03:00
|
|
|
self._log[__name__].debug('Borrowing sender for dc_id %d', dc_id)
|
|
|
|
state, sender = self._borrowed_senders.get(dc_id, (None, None))
|
|
|
|
|
|
|
|
if state is None:
|
|
|
|
state = _ExportState()
|
2018-06-28 15:10:36 +03:00
|
|
|
sender = await self._create_exported_sender(dc_id)
|
|
|
|
sender.dc_id = dc_id
|
2020-04-05 13:34:33 +03:00
|
|
|
self._borrowed_senders[dc_id] = (state, sender)
|
|
|
|
|
|
|
|
elif state.need_connect():
|
2018-07-21 12:24:20 +03:00
|
|
|
dc = await self._get_dc(dc_id)
|
2018-10-20 18:11:40 +03:00
|
|
|
await sender.connect(self._connection(
|
2019-01-15 13:09:08 +03:00
|
|
|
dc.ip_address,
|
|
|
|
dc.port,
|
2019-02-11 02:16:46 +03:00
|
|
|
dc.id,
|
2019-01-15 13:09:08 +03:00
|
|
|
loggers=self._log,
|
2020-10-07 11:03:19 +03:00
|
|
|
proxy=self._proxy,
|
|
|
|
local_addr=self._local_addr
|
2018-10-20 18:11:40 +03:00
|
|
|
))
|
2018-06-28 15:10:36 +03:00
|
|
|
|
2020-04-05 13:34:33 +03:00
|
|
|
state.add_borrow()
|
|
|
|
return sender
|
2017-07-04 11:21:15 +03:00
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
async def _return_exported_sender(self: 'TelegramClient', sender):
|
2018-06-28 15:10:36 +03:00
|
|
|
"""
|
|
|
|
Returns a borrowed exported sender. If all borrows have
|
|
|
|
been returned, the sender is cleanly disconnected.
|
|
|
|
"""
|
|
|
|
async with self._borrow_sender_lock:
|
2020-04-05 13:34:33 +03:00
|
|
|
self._log[__name__].debug('Returning borrowed sender for dc_id %d', sender.dc_id)
|
|
|
|
state, _ = self._borrowed_senders[sender.dc_id]
|
|
|
|
state.add_return()
|
|
|
|
|
|
|
|
async def _clean_exported_senders(self: 'TelegramClient'):
|
|
|
|
"""
|
|
|
|
Cleans-up all unused exported senders by disconnecting them.
|
|
|
|
"""
|
|
|
|
async with self._borrow_sender_lock:
|
|
|
|
for dc_id, (state, sender) in self._borrowed_senders.items():
|
|
|
|
if state.should_disconnect():
|
|
|
|
self._log[__name__].info(
|
|
|
|
'Disconnecting borrowed sender for DC %d', dc_id)
|
|
|
|
|
|
|
|
# Disconnect should never raise
|
|
|
|
await sender.disconnect()
|
|
|
|
state.mark_disconnected()
|
2018-06-28 15:10:36 +03:00
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
async def _get_cdn_client(self: 'TelegramClient', cdn_redirect):
|
2018-06-28 15:10:36 +03:00
|
|
|
"""Similar to ._borrow_exported_client, but for CDNs"""
|
2017-09-30 18:51:07 +03:00
|
|
|
session = self._exported_sessions.get(cdn_redirect.dc_id)
|
|
|
|
if not session:
|
2018-06-09 22:03:48 +03:00
|
|
|
dc = await self._get_dc(cdn_redirect.dc_id, cdn=True)
|
2018-10-12 23:00:02 +03:00
|
|
|
session = self.session.clone()
|
2021-12-20 19:52:02 +03:00
|
|
|
session.set_dc(dc.id, dc.ip_address, dc.port)
|
2017-09-30 18:51:07 +03:00
|
|
|
self._exported_sessions[cdn_redirect.dc_id] = session
|
|
|
|
|
2019-01-12 14:15:29 +03:00
|
|
|
self._log[__name__].info('Creating new CDN client')
|
2024-08-07 21:25:35 +03:00
|
|
|
client = self.__class__(
|
2017-09-30 18:51:07 +03:00
|
|
|
session, self.api_id, self.api_hash,
|
2024-08-07 21:25:35 +03:00
|
|
|
proxy=self._proxy,
|
|
|
|
timeout=self._timeout,
|
|
|
|
loop=self.loop
|
2017-09-30 18:51:07 +03:00
|
|
|
)
|
|
|
|
|
2024-08-07 21:25:35 +03:00
|
|
|
session.auth_key = self._sender.auth_key
|
|
|
|
await client._sender.connect(self._connection(
|
|
|
|
session.server_address,
|
|
|
|
session.port,
|
|
|
|
session.dc_id,
|
|
|
|
loggers=self._log,
|
|
|
|
proxy=self._proxy,
|
|
|
|
local_addr=self._local_addr
|
|
|
|
))
|
2017-09-30 18:51:07 +03:00
|
|
|
return client
|
|
|
|
|
2017-06-08 14:12:57 +03:00
|
|
|
# endregion
|
|
|
|
|
|
|
|
# region Invoking Telegram requests
|
|
|
|
|
2018-06-09 22:22:54 +03:00
|
|
|
@abc.abstractmethod
|
2019-05-03 22:37:27 +03:00
|
|
|
def __call__(self: 'TelegramClient', request, ordered=False):
|
2018-05-09 11:19:45 +03:00
|
|
|
"""
|
|
|
|
Invokes (sends) one or more MTProtoRequests and returns (receives)
|
|
|
|
their result.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
request (`TLObject` | `list`):
|
|
|
|
The request or requests to be invoked.
|
|
|
|
|
|
|
|
ordered (`bool`, optional):
|
|
|
|
Whether the requests (if more than one was given) should be
|
|
|
|
executed sequentially on the server. They run in arbitrary
|
|
|
|
order by default.
|
|
|
|
|
2021-08-06 09:13:34 +03:00
|
|
|
flood_sleep_threshold (`int` | `None`, optional):
|
|
|
|
The flood sleep threshold to use for this request. This overrides
|
|
|
|
the default value stored in
|
|
|
|
`client.flood_sleep_threshold <telethon.client.telegrambaseclient.TelegramBaseClient.flood_sleep_threshold>`
|
|
|
|
|
2018-05-09 11:19:45 +03:00
|
|
|
Returns:
|
|
|
|
The result of the request (often a `TLObject`) or a list of
|
|
|
|
results if more than one request was given.
|
2017-06-08 14:12:57 +03:00
|
|
|
"""
|
2018-06-09 22:22:54 +03:00
|
|
|
raise NotImplementedError
|
2017-09-30 12:45:35 +03:00
|
|
|
|
2018-06-18 14:22:25 +03:00
|
|
|
@abc.abstractmethod
|
2019-05-03 22:37:27 +03:00
|
|
|
def _update_loop(self: 'TelegramClient'):
|
2018-06-18 14:22:25 +03:00
|
|
|
raise NotImplementedError
|
|
|
|
|
2018-06-27 20:40:32 +03:00
|
|
|
@abc.abstractmethod
|
2019-05-03 22:37:27 +03:00
|
|
|
async def _handle_auto_reconnect(self: 'TelegramClient'):
|
2018-06-27 20:40:32 +03:00
|
|
|
raise NotImplementedError
|
|
|
|
|
2017-09-29 21:50:27 +03:00
|
|
|
# endregion
|