2018-06-06 21:41:01 +03:00
|
|
|
import asyncio
|
2018-09-29 13:20:26 +03:00
|
|
|
import collections
|
2020-02-20 15:40:08 +03:00
|
|
|
import struct
|
2022-01-18 21:46:19 +03:00
|
|
|
import logging
|
2018-06-06 21:41:01 +03:00
|
|
|
|
2018-10-01 10:58:53 +03:00
|
|
|
from . import authenticator
|
2021-09-12 14:27:13 +03:00
|
|
|
from .._misc.messagepacker import MessagePacker
|
2021-09-24 21:07:34 +03:00
|
|
|
from ..errors._rpcbase import _mk_error_type
|
2018-10-01 10:58:53 +03:00
|
|
|
from .mtprotoplainsender import MTProtoPlainSender
|
2018-09-29 13:48:50 +03:00
|
|
|
from .requeststate import RequestState
|
2018-10-19 14:24:52 +03:00
|
|
|
from .mtprotostate import MTProtoState
|
2018-06-07 17:32:12 +03:00
|
|
|
from ..errors import (
|
2018-10-25 16:50:49 +03:00
|
|
|
BadMessageError, InvalidBufferError, SecurityError,
|
|
|
|
TypeNotFoundError, rpc_message_to_error
|
2018-06-07 17:32:12 +03:00
|
|
|
)
|
2021-09-12 17:58:06 +03:00
|
|
|
from .._misc.binaryreader import BinaryReader
|
|
|
|
from .._misc.tlobject import TLRequest
|
|
|
|
from ..types._core import RpcResult, MessageContainer, GzipPacked
|
2021-09-12 14:27:13 +03:00
|
|
|
from .._crypto import AuthKey
|
2021-09-26 20:58:42 +03:00
|
|
|
from .._misc import helpers, utils
|
|
|
|
from .. import _tl
|
2018-06-06 21:41:01 +03:00
|
|
|
|
|
|
|
|
2022-01-18 21:46:19 +03:00
|
|
|
UPDATE_BUFFER_FULL_WARN_DELAY = 15 * 60
|
|
|
|
|
|
|
|
|
2018-06-06 21:41:01 +03:00
|
|
|
class MTProtoSender:
|
2018-06-07 12:51:09 +03:00
|
|
|
"""
|
|
|
|
MTProto Mobile Protocol sender
|
|
|
|
(https://core.telegram.org/mtproto/description).
|
|
|
|
|
|
|
|
This class is responsible for wrapping requests into `TLMessage`'s,
|
|
|
|
sending them over the network and receiving them in a safe manner.
|
|
|
|
|
|
|
|
Automatic reconnection due to temporary network issues is a concern
|
|
|
|
for this class as well, including retry of messages that could not
|
|
|
|
be sent successfully.
|
|
|
|
|
|
|
|
A new authorization key will be generated on connection if no other
|
|
|
|
key exists yet.
|
|
|
|
"""
|
2022-01-18 21:46:19 +03:00
|
|
|
def __init__(self, *, loggers, updates_queue,
|
|
|
|
retries=5, delay=1, auto_reconnect=True, connect_timeout=None,):
|
2018-10-19 14:24:52 +03:00
|
|
|
self._connection = None
|
2019-01-11 17:52:30 +03:00
|
|
|
self._loggers = loggers
|
|
|
|
self._log = loggers[__name__]
|
2018-06-08 21:50:53 +03:00
|
|
|
self._retries = retries
|
2018-10-28 12:55:58 +03:00
|
|
|
self._delay = delay
|
2018-06-18 19:11:16 +03:00
|
|
|
self._auto_reconnect = auto_reconnect
|
2018-10-04 17:39:57 +03:00
|
|
|
self._connect_timeout = connect_timeout
|
2022-01-18 21:46:19 +03:00
|
|
|
self._updates_queue = updates_queue
|
2020-07-25 19:39:35 +03:00
|
|
|
self._connect_lock = asyncio.Lock()
|
2020-12-11 19:18:25 +03:00
|
|
|
self._ping = None
|
2018-06-07 12:51:09 +03:00
|
|
|
|
|
|
|
# Whether the user has explicitly connected or disconnected.
|
|
|
|
#
|
|
|
|
# If a disconnection happens for any other reason and it
|
|
|
|
# was *not* user action then the pending messages won't
|
|
|
|
# be cleared but on explicit user disconnection all the
|
|
|
|
# pending futures should be cancelled.
|
2018-06-06 21:41:01 +03:00
|
|
|
self._user_connected = False
|
2018-06-08 21:41:48 +03:00
|
|
|
self._reconnecting = False
|
2022-01-16 15:59:43 +03:00
|
|
|
self._disconnected = asyncio.Queue(1)
|
|
|
|
self._disconnected.put_nowait(None)
|
2018-06-06 21:41:01 +03:00
|
|
|
|
2018-06-06 22:42:48 +03:00
|
|
|
# We need to join the loops upon disconnection
|
|
|
|
self._send_loop_handle = None
|
|
|
|
self._recv_loop_handle = None
|
|
|
|
|
2018-10-19 14:24:52 +03:00
|
|
|
# Preserving the references of the AuthKey and state is important
|
2021-09-19 17:38:11 +03:00
|
|
|
self.auth_key = AuthKey(None)
|
2019-01-11 17:52:30 +03:00
|
|
|
self._state = MTProtoState(self.auth_key, loggers=self._loggers)
|
2018-10-19 14:24:52 +03:00
|
|
|
|
2018-09-29 13:20:26 +03:00
|
|
|
# Outgoing messages are put in a queue and sent in a batch.
|
|
|
|
# Note that here we're also storing their ``_RequestState``.
|
2020-07-25 19:39:35 +03:00
|
|
|
self._send_queue = MessagePacker(self._state, loggers=self._loggers)
|
2018-06-06 21:41:01 +03:00
|
|
|
|
2018-09-29 13:20:26 +03:00
|
|
|
# Sent states are remembered until a response is received.
|
|
|
|
self._pending_state = {}
|
2018-06-06 21:41:01 +03:00
|
|
|
|
2018-09-29 13:20:26 +03:00
|
|
|
# Responses must be acknowledged, and we can also batch these.
|
2018-06-06 21:41:01 +03:00
|
|
|
self._pending_ack = set()
|
|
|
|
|
2018-09-29 13:20:26 +03:00
|
|
|
# Similar to pending_messages but only for the last acknowledges.
|
|
|
|
# These can't go in pending_messages because no acknowledge for them
|
|
|
|
# is received, but we may still need to resend their state on bad salts.
|
|
|
|
self._last_acks = collections.deque(maxlen=10)
|
2018-06-20 12:12:04 +03:00
|
|
|
|
2022-01-18 21:46:19 +03:00
|
|
|
# Last time we warned about the update buffer being full
|
|
|
|
self._last_update_warn = -UPDATE_BUFFER_FULL_WARN_DELAY
|
|
|
|
|
2018-06-06 21:41:01 +03:00
|
|
|
# Jump table from response ID to method that handles it
|
|
|
|
self._handlers = {
|
2018-06-09 14:11:49 +03:00
|
|
|
RpcResult.CONSTRUCTOR_ID: self._handle_rpc_result,
|
2018-06-06 21:41:01 +03:00
|
|
|
MessageContainer.CONSTRUCTOR_ID: self._handle_container,
|
|
|
|
GzipPacked.CONSTRUCTOR_ID: self._handle_gzip_packed,
|
2021-09-12 14:27:13 +03:00
|
|
|
_tl.Pong.CONSTRUCTOR_ID: self._handle_pong,
|
|
|
|
_tl.BadServerSalt.CONSTRUCTOR_ID: self._handle_bad_server_salt,
|
|
|
|
_tl.BadMsgNotification.CONSTRUCTOR_ID: self._handle_bad_notification,
|
|
|
|
_tl.MsgDetailedInfo.CONSTRUCTOR_ID: self._handle_detailed_info,
|
|
|
|
_tl.MsgNewDetailedInfo.CONSTRUCTOR_ID: self._handle_new_detailed_info,
|
|
|
|
_tl.NewSessionCreated.CONSTRUCTOR_ID: self._handle_new_session_created,
|
|
|
|
_tl.MsgsAck.CONSTRUCTOR_ID: self._handle_ack,
|
|
|
|
_tl.FutureSalts.CONSTRUCTOR_ID: self._handle_future_salts,
|
|
|
|
_tl.MsgsStateReq.CONSTRUCTOR_ID: self._handle_state_forgotten,
|
|
|
|
_tl.MsgResendReq.CONSTRUCTOR_ID: self._handle_state_forgotten,
|
|
|
|
_tl.MsgsAllInfo.CONSTRUCTOR_ID: self._handle_msg_all,
|
|
|
|
_tl.DestroySessionOk: self._handle_destroy_session,
|
|
|
|
_tl.DestroySessionNone: self._handle_destroy_session,
|
2018-06-06 21:41:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
# Public API
|
|
|
|
|
2018-10-19 14:50:11 +03:00
|
|
|
async def connect(self, connection):
|
2018-06-07 12:51:09 +03:00
|
|
|
"""
|
2018-10-01 15:20:50 +03:00
|
|
|
Connects to the specified given connection using the given auth key.
|
2018-06-07 12:51:09 +03:00
|
|
|
"""
|
2019-08-08 00:43:31 +03:00
|
|
|
async with self._connect_lock:
|
|
|
|
if self._user_connected:
|
|
|
|
self._log.info('User is already connected!')
|
|
|
|
return False
|
|
|
|
|
|
|
|
self._connection = connection
|
|
|
|
await self._connect()
|
|
|
|
self._user_connected = True
|
|
|
|
return True
|
2018-06-08 21:41:48 +03:00
|
|
|
|
2018-06-09 22:03:48 +03:00
|
|
|
def is_connected(self):
|
|
|
|
return self._user_connected
|
|
|
|
|
2019-12-02 20:32:31 +03:00
|
|
|
def _transport_connected(self):
|
|
|
|
return (
|
|
|
|
not self._reconnecting
|
|
|
|
and self._connection is not None
|
|
|
|
and self._connection._connected
|
|
|
|
)
|
|
|
|
|
2019-03-21 14:21:00 +03:00
|
|
|
async def disconnect(self):
|
2018-06-07 12:51:09 +03:00
|
|
|
"""
|
|
|
|
Cleanly disconnects the instance from the network, cancels
|
|
|
|
all pending requests, and closes the send and receive loops.
|
|
|
|
"""
|
2019-03-21 14:21:00 +03:00
|
|
|
await self._disconnect()
|
2018-06-17 12:41:35 +03:00
|
|
|
|
2018-06-09 16:26:13 +03:00
|
|
|
def send(self, request, ordered=False):
|
2018-06-07 11:30:20 +03:00
|
|
|
"""
|
2018-09-29 13:20:26 +03:00
|
|
|
This method enqueues the given request to be sent. Its send
|
|
|
|
state will be saved until a response arrives, and a ``Future``
|
|
|
|
that will be resolved when the response arrives will be returned:
|
2018-06-07 11:30:20 +03:00
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
async def method():
|
|
|
|
# Sending (enqueued for the send loop)
|
2018-06-09 16:26:13 +03:00
|
|
|
future = sender.send(request)
|
2018-06-07 11:30:20 +03:00
|
|
|
# Receiving (waits for the receive loop to read the result)
|
|
|
|
result = await future
|
|
|
|
|
|
|
|
Designed like this because Telegram may send the response at
|
|
|
|
any point, and it can send other items while one waits for it.
|
|
|
|
Once the response for this future arrives, it is set with the
|
|
|
|
received result, quite similar to how a ``receive()`` call
|
|
|
|
would otherwise work.
|
|
|
|
|
|
|
|
Since the receiving part is "built in" the future, it's
|
|
|
|
impossible to await receive a result that was never sent.
|
|
|
|
"""
|
2018-06-17 21:25:22 +03:00
|
|
|
if not self._user_connected:
|
|
|
|
raise ConnectionError('Cannot send requests while disconnected')
|
|
|
|
|
2018-09-29 13:20:26 +03:00
|
|
|
if not utils.is_list_like(request):
|
2020-02-20 15:40:08 +03:00
|
|
|
try:
|
2020-07-25 19:39:35 +03:00
|
|
|
state = RequestState(request)
|
2020-02-20 15:40:08 +03:00
|
|
|
except struct.error as e:
|
|
|
|
# "struct.error: required argument is not an integer" is not
|
|
|
|
# very helpful; log the request to find out what wasn't int.
|
|
|
|
self._log.error('Request caused struct.error: %s: %s', e, request)
|
|
|
|
raise
|
|
|
|
|
2018-09-29 13:48:50 +03:00
|
|
|
self._send_queue.append(state)
|
2018-09-29 13:20:26 +03:00
|
|
|
return state.future
|
2018-06-07 15:02:55 +03:00
|
|
|
else:
|
2018-09-29 13:20:26 +03:00
|
|
|
states = []
|
|
|
|
futures = []
|
2018-10-19 17:53:50 +03:00
|
|
|
state = None
|
2018-09-29 13:20:26 +03:00
|
|
|
for req in request:
|
2020-02-20 15:40:08 +03:00
|
|
|
try:
|
2020-07-25 19:39:35 +03:00
|
|
|
state = RequestState(req, after=ordered and state)
|
2020-02-20 15:40:08 +03:00
|
|
|
except struct.error as e:
|
|
|
|
self._log.error('Request caused struct.error: %s: %s', e, request)
|
|
|
|
raise
|
|
|
|
|
2018-09-29 13:20:26 +03:00
|
|
|
states.append(state)
|
|
|
|
futures.append(state.future)
|
2018-09-29 13:48:50 +03:00
|
|
|
|
2018-10-19 17:53:50 +03:00
|
|
|
self._send_queue.extend(states)
|
2018-09-29 13:20:26 +03:00
|
|
|
return futures
|
2018-06-06 21:41:01 +03:00
|
|
|
|
2022-01-16 15:59:43 +03:00
|
|
|
async def wait_disconnected(self):
|
2018-06-17 20:29:41 +03:00
|
|
|
"""
|
2022-01-16 15:59:43 +03:00
|
|
|
Wait until the client is disconnected.
|
|
|
|
Raise if the disconnection finished with error.
|
2018-06-17 20:29:41 +03:00
|
|
|
"""
|
2022-01-16 15:59:43 +03:00
|
|
|
res = await self._disconnected.get()
|
|
|
|
if isinstance(res, BaseException):
|
|
|
|
raise res
|
2018-06-17 20:29:41 +03:00
|
|
|
|
2018-06-08 22:18:15 +03:00
|
|
|
# Private methods
|
|
|
|
|
|
|
|
async def _connect(self):
|
|
|
|
"""
|
|
|
|
Performs the actual connection, retrying, generating the
|
|
|
|
authorization key if necessary, and starting the send and
|
|
|
|
receive loops.
|
|
|
|
"""
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.info('Connecting to %s...', self._connection)
|
2019-10-24 14:40:09 +03:00
|
|
|
|
|
|
|
connected = False
|
2020-12-11 19:18:25 +03:00
|
|
|
|
2021-09-26 20:58:42 +03:00
|
|
|
for attempt in helpers.retry_range(self._retries):
|
2019-10-24 14:40:09 +03:00
|
|
|
if not connected:
|
|
|
|
connected = await self._try_connect(attempt)
|
|
|
|
if not connected:
|
|
|
|
continue # skip auth key generation until we're connected
|
|
|
|
|
|
|
|
if not self.auth_key:
|
2019-10-24 14:48:29 +03:00
|
|
|
try:
|
|
|
|
if not await self._try_gen_auth_key(attempt):
|
|
|
|
continue # keep retrying until we have the auth key
|
|
|
|
except (IOError, asyncio.TimeoutError) as e:
|
|
|
|
# Sometimes, specially during user-DC migrations,
|
|
|
|
# Telegram may close the connection during auth_key
|
|
|
|
# generation. If that's the case, we will need to
|
|
|
|
# connect again.
|
|
|
|
self._log.warning('Connection error %d during auth_key gen: %s: %s',
|
|
|
|
attempt, type(e).__name__, e)
|
|
|
|
|
|
|
|
# Whatever the IOError was, make sure to disconnect so we can
|
|
|
|
# reconnect cleanly after.
|
|
|
|
await self._connection.disconnect()
|
|
|
|
connected = False
|
2020-07-25 19:39:35 +03:00
|
|
|
await asyncio.sleep(self._delay)
|
2019-10-24 14:48:29 +03:00
|
|
|
continue # next iteration we will try to reconnect
|
2019-10-24 14:40:09 +03:00
|
|
|
|
|
|
|
break # all steps done, break retry loop
|
2018-06-08 22:18:15 +03:00
|
|
|
else:
|
2019-10-24 14:40:09 +03:00
|
|
|
if not connected:
|
2020-01-07 14:20:01 +03:00
|
|
|
raise ConnectionError('Connection to Telegram failed {} time(s)'.format(self._retries))
|
2019-10-24 14:40:09 +03:00
|
|
|
|
2020-01-07 14:20:01 +03:00
|
|
|
e = ConnectionError('auth_key generation failed {} time(s)'.format(self._retries))
|
2019-10-24 14:40:09 +03:00
|
|
|
await self._disconnect(error=e)
|
|
|
|
raise e
|
2018-06-08 22:18:15 +03:00
|
|
|
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.debug('Starting send loop')
|
2022-01-16 15:51:23 +03:00
|
|
|
self._send_loop_handle = asyncio.create_task(self._send_loop())
|
2018-06-14 20:35:12 +03:00
|
|
|
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.debug('Starting receive loop')
|
2022-01-16 15:51:23 +03:00
|
|
|
self._recv_loop_handle = asyncio.create_task(self._recv_loop())
|
2018-06-14 20:35:12 +03:00
|
|
|
|
2018-10-19 15:01:03 +03:00
|
|
|
# _disconnected only completes after manual disconnection
|
|
|
|
# or errors after which the sender cannot continue such
|
|
|
|
# as failing to reconnect or any unexpected error.
|
2022-01-16 15:59:43 +03:00
|
|
|
while not self._disconnected.empty():
|
|
|
|
self._disconnected.get_nowait()
|
2018-10-19 15:01:03 +03:00
|
|
|
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.info('Connection to %s complete!', self._connection)
|
2018-06-08 22:18:15 +03:00
|
|
|
|
2019-10-24 14:36:32 +03:00
|
|
|
async def _try_connect(self, attempt):
|
|
|
|
try:
|
|
|
|
self._log.debug('Connection attempt %d...', attempt)
|
|
|
|
await self._connection.connect(timeout=self._connect_timeout)
|
|
|
|
self._log.debug('Connection success!')
|
|
|
|
return True
|
|
|
|
except (IOError, asyncio.TimeoutError) as e:
|
|
|
|
self._log.warning('Attempt %d at connecting failed: %s: %s',
|
|
|
|
attempt, type(e).__name__, e)
|
|
|
|
await asyncio.sleep(self._delay)
|
|
|
|
return False
|
|
|
|
|
|
|
|
async def _try_gen_auth_key(self, attempt):
|
|
|
|
plain = MTProtoPlainSender(self._connection, loggers=self._loggers)
|
|
|
|
try:
|
|
|
|
self._log.debug('New auth_key attempt %d...', attempt)
|
|
|
|
self.auth_key.key, self._state.time_offset = \
|
|
|
|
await authenticator.do_authentication(plain)
|
|
|
|
|
|
|
|
self._log.debug('auth_key generation success!')
|
|
|
|
return True
|
|
|
|
except (SecurityError, AssertionError) as e:
|
|
|
|
self._log.warning('Attempt %d at new auth_key failed: %s', attempt, e)
|
|
|
|
await asyncio.sleep(self._delay)
|
|
|
|
return False
|
|
|
|
|
2019-03-21 14:21:00 +03:00
|
|
|
async def _disconnect(self, error=None):
|
2019-05-11 17:49:38 +03:00
|
|
|
if self._connection is None:
|
|
|
|
self._log.info('Not disconnecting (already have no connection)')
|
|
|
|
return
|
|
|
|
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.info('Disconnecting from %s...', self._connection)
|
2018-09-30 12:58:46 +03:00
|
|
|
self._user_connected = False
|
|
|
|
try:
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.debug('Closing current connection...')
|
2019-03-21 14:21:00 +03:00
|
|
|
await self._connection.disconnect()
|
2018-09-30 12:58:46 +03:00
|
|
|
finally:
|
2019-05-17 13:30:00 +03:00
|
|
|
self._log.debug('Cancelling %d pending message(s)...', len(self._pending_state))
|
2018-09-30 12:58:46 +03:00
|
|
|
for state in self._pending_state.values():
|
|
|
|
if error and not state.future.done():
|
|
|
|
state.future.set_exception(error)
|
|
|
|
else:
|
|
|
|
state.future.cancel()
|
|
|
|
|
|
|
|
self._pending_state.clear()
|
2019-03-21 14:21:00 +03:00
|
|
|
await helpers._cancel(
|
|
|
|
self._log,
|
|
|
|
send_loop_handle=self._send_loop_handle,
|
|
|
|
recv_loop_handle=self._recv_loop_handle
|
|
|
|
)
|
2018-09-30 12:58:46 +03:00
|
|
|
|
2019-08-08 10:32:18 +03:00
|
|
|
self._log.info('Disconnection from %s complete!', self._connection)
|
|
|
|
self._connection = None
|
|
|
|
|
2022-01-16 15:59:43 +03:00
|
|
|
if not self._disconnected.full():
|
|
|
|
self._disconnected.put_nowait(error)
|
2018-09-30 12:58:46 +03:00
|
|
|
|
2019-03-28 12:11:33 +03:00
|
|
|
async def _reconnect(self, last_error):
|
2018-06-08 22:18:15 +03:00
|
|
|
"""
|
|
|
|
Cleanly disconnects and then reconnects.
|
|
|
|
"""
|
2020-10-07 11:17:31 +03:00
|
|
|
self._log.info('Closing current connection to begin reconnect...')
|
2019-03-21 14:21:00 +03:00
|
|
|
await self._connection.disconnect()
|
2018-06-08 22:18:15 +03:00
|
|
|
|
2019-03-21 14:21:00 +03:00
|
|
|
await helpers._cancel(
|
|
|
|
self._log,
|
|
|
|
send_loop_handle=self._send_loop_handle,
|
|
|
|
recv_loop_handle=self._recv_loop_handle
|
|
|
|
)
|
2018-06-08 22:18:15 +03:00
|
|
|
|
2019-03-22 21:01:40 +03:00
|
|
|
# TODO See comment in `_start_reconnect`
|
|
|
|
# Perhaps this should be the last thing to do?
|
|
|
|
# But _connect() creates tasks which may run and,
|
|
|
|
# if they see that reconnecting is True, they will end.
|
|
|
|
# Perhaps that task creation should not belong in connect?
|
2018-06-08 22:18:15 +03:00
|
|
|
self._reconnecting = False
|
2018-06-18 19:11:16 +03:00
|
|
|
|
2018-10-02 09:55:46 +03:00
|
|
|
# Start with a clean state (and thus session ID) to avoid old msgs
|
2018-10-19 14:24:52 +03:00
|
|
|
self._state.reset()
|
2018-10-02 09:55:46 +03:00
|
|
|
|
2018-06-18 19:11:16 +03:00
|
|
|
retries = self._retries if self._auto_reconnect else 0
|
2020-12-11 19:18:25 +03:00
|
|
|
|
2020-09-13 10:43:01 +03:00
|
|
|
attempt = 0
|
2020-10-07 11:17:31 +03:00
|
|
|
ok = True
|
2020-09-13 10:43:01 +03:00
|
|
|
# We're already "retrying" to connect, so we don't want to force retries
|
2021-09-26 20:58:42 +03:00
|
|
|
for attempt in helpers.retry_range(retries, force_retry=False):
|
2018-06-18 19:11:16 +03:00
|
|
|
try:
|
|
|
|
await self._connect()
|
2019-03-28 11:52:46 +03:00
|
|
|
except (IOError, asyncio.TimeoutError) as e:
|
2019-03-28 12:11:33 +03:00
|
|
|
last_error = e
|
2019-02-06 21:41:45 +03:00
|
|
|
self._log.info('Failed reconnection attempt %d with %s',
|
2019-03-28 12:11:33 +03:00
|
|
|
attempt, e.__class__.__name__)
|
2018-11-29 15:11:34 +03:00
|
|
|
await asyncio.sleep(self._delay)
|
2020-10-05 15:07:11 +03:00
|
|
|
except BufferError as e:
|
|
|
|
# TODO there should probably only be one place to except all these errors
|
|
|
|
if isinstance(e, InvalidBufferError) and e.code == 404:
|
|
|
|
self._log.info('Broken authorization key; resetting')
|
2020-10-05 15:08:21 +03:00
|
|
|
self.auth_key.key = None
|
2020-10-07 11:17:31 +03:00
|
|
|
|
|
|
|
ok = False
|
|
|
|
break
|
2020-10-05 15:07:11 +03:00
|
|
|
else:
|
|
|
|
self._log.warning('Invalid buffer %s', e)
|
|
|
|
|
2019-03-28 12:11:33 +03:00
|
|
|
except Exception as e:
|
|
|
|
last_error = e
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.exception('Unexpected exception reconnecting on '
|
2019-03-28 12:11:33 +03:00
|
|
|
'attempt %d', attempt)
|
2018-11-29 15:11:34 +03:00
|
|
|
|
2018-10-28 12:55:58 +03:00
|
|
|
await asyncio.sleep(self._delay)
|
2018-10-02 09:55:46 +03:00
|
|
|
else:
|
|
|
|
self._send_queue.extend(self._pending_state.values())
|
|
|
|
self._pending_state.clear()
|
2018-06-18 19:11:16 +03:00
|
|
|
break
|
|
|
|
else:
|
2020-10-07 11:17:31 +03:00
|
|
|
ok = False
|
|
|
|
|
|
|
|
if not ok:
|
2019-05-17 13:30:00 +03:00
|
|
|
self._log.error('Automatic reconnection failed %d time(s)', attempt)
|
2020-12-11 23:43:39 +03:00
|
|
|
# There may be no error (e.g. automatic reconnection was turned off).
|
|
|
|
error = last_error.with_traceback(None) if last_error else None
|
|
|
|
await self._disconnect(error=error)
|
2018-06-08 22:18:15 +03:00
|
|
|
|
2019-03-28 12:11:33 +03:00
|
|
|
def _start_reconnect(self, error):
|
2018-06-24 11:44:31 +03:00
|
|
|
"""Starts a reconnection in the background."""
|
2019-03-22 18:21:18 +03:00
|
|
|
if self._user_connected and not self._reconnecting:
|
2019-03-22 21:01:40 +03:00
|
|
|
# We set reconnecting to True here and not inside the new task
|
|
|
|
# because it may happen that send/recv loop calls this again
|
|
|
|
# while the new task hasn't had a chance to run yet. This race
|
|
|
|
# condition puts `self.connection` in a bad state with two calls
|
|
|
|
# to its `connect` without disconnecting, so it creates a second
|
|
|
|
# receive loop. There can't be two tasks receiving data from
|
|
|
|
# the reader, since that causes an error, and the library just
|
|
|
|
# gets stuck.
|
|
|
|
# TODO It still gets stuck? Investigate where and why.
|
|
|
|
self._reconnecting = True
|
2022-01-16 15:51:23 +03:00
|
|
|
asyncio.create_task(self._reconnect(error))
|
2018-06-24 11:44:31 +03:00
|
|
|
|
2020-12-11 19:18:25 +03:00
|
|
|
def _keepalive_ping(self, rnd_id):
|
|
|
|
"""
|
|
|
|
Send a keep-alive ping. If a pong for the last ping was not received
|
|
|
|
yet, this means we're probably not connected.
|
|
|
|
"""
|
|
|
|
# TODO this is ugly, update loop shouldn't worry about this, sender should
|
|
|
|
if self._ping is None:
|
|
|
|
self._ping = rnd_id
|
2021-09-12 14:27:13 +03:00
|
|
|
self.send(_tl.fn.Ping(rnd_id))
|
2020-12-11 19:18:25 +03:00
|
|
|
else:
|
|
|
|
self._start_reconnect(None)
|
|
|
|
|
2018-06-06 21:41:01 +03:00
|
|
|
# Loops
|
|
|
|
|
|
|
|
async def _send_loop(self):
|
2018-06-07 12:51:09 +03:00
|
|
|
"""
|
|
|
|
This loop is responsible for popping items off the send
|
|
|
|
queue, encrypting them, and sending them over the network.
|
|
|
|
|
|
|
|
Besides `connect`, only this method ever sends data.
|
|
|
|
"""
|
2018-06-08 21:41:48 +03:00
|
|
|
while self._user_connected and not self._reconnecting:
|
2018-06-07 15:16:47 +03:00
|
|
|
if self._pending_ack:
|
2021-09-12 14:27:13 +03:00
|
|
|
ack = RequestState(_tl.MsgsAck(list(self._pending_ack)))
|
2018-09-29 13:48:50 +03:00
|
|
|
self._send_queue.append(ack)
|
2018-09-29 13:20:26 +03:00
|
|
|
self._last_acks.append(ack)
|
2018-06-07 15:16:47 +03:00
|
|
|
self._pending_ack.clear()
|
|
|
|
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.debug('Waiting for messages to send...')
|
2018-10-19 15:41:50 +03:00
|
|
|
# TODO Wait for the connection send queue to be empty?
|
|
|
|
# This means that while it's not empty we can wait for
|
|
|
|
# more messages to be added to the send queue.
|
2018-10-25 16:50:49 +03:00
|
|
|
batch, data = await self._send_queue.get()
|
2018-06-16 19:34:36 +03:00
|
|
|
|
2018-10-19 14:24:52 +03:00
|
|
|
if not data:
|
|
|
|
continue
|
2018-08-21 12:31:14 +03:00
|
|
|
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.debug('Encrypting %d message(s) in %d bytes for sending',
|
2019-05-17 13:30:00 +03:00
|
|
|
len(batch), len(data))
|
2018-10-19 15:41:50 +03:00
|
|
|
|
|
|
|
data = self._state.encrypt_message_data(data)
|
2018-10-05 14:23:38 +03:00
|
|
|
|
2020-09-29 22:07:38 +03:00
|
|
|
# Whether sending succeeds or not, the popped requests are now
|
|
|
|
# pending because they're removed from the queue. If a reconnect
|
|
|
|
# occurs, they will be removed from pending state and re-enqueued
|
|
|
|
# so even if the network fails they won't be lost. If they were
|
|
|
|
# never re-enqueued, the future waiting for a response "locks".
|
2018-10-19 14:24:52 +03:00
|
|
|
for state in batch:
|
2018-09-29 13:20:26 +03:00
|
|
|
if not isinstance(state, list):
|
2018-10-05 14:23:38 +03:00
|
|
|
if isinstance(state.request, TLRequest):
|
|
|
|
self._pending_state[state.msg_id] = state
|
2018-09-29 13:20:26 +03:00
|
|
|
else:
|
|
|
|
for s in state:
|
2018-10-05 14:23:38 +03:00
|
|
|
if isinstance(s.request, TLRequest):
|
|
|
|
self._pending_state[s.msg_id] = s
|
2018-06-08 22:13:14 +03:00
|
|
|
|
2020-09-29 22:07:38 +03:00
|
|
|
try:
|
|
|
|
await self._connection.send(data)
|
|
|
|
except IOError as e:
|
|
|
|
self._log.info('Connection closed while sending data')
|
|
|
|
self._start_reconnect(e)
|
|
|
|
return
|
|
|
|
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.debug('Encrypted messages put in a queue to be sent')
|
2018-10-19 15:41:50 +03:00
|
|
|
|
2018-06-06 21:41:01 +03:00
|
|
|
async def _recv_loop(self):
|
2018-06-07 12:51:09 +03:00
|
|
|
"""
|
|
|
|
This loop is responsible for reading all incoming responses
|
|
|
|
from the network, decrypting and handling or dispatching them.
|
|
|
|
|
|
|
|
Besides `connect`, only this method ever receives data.
|
|
|
|
"""
|
2018-06-08 21:41:48 +03:00
|
|
|
while self._user_connected and not self._reconnecting:
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.debug('Receiving items from the network...')
|
2018-10-02 09:55:46 +03:00
|
|
|
try:
|
2018-10-19 14:24:52 +03:00
|
|
|
body = await self._connection.recv()
|
2019-03-28 12:11:33 +03:00
|
|
|
except IOError as e:
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.info('Connection closed while receiving data')
|
2019-03-28 12:11:33 +03:00
|
|
|
self._start_reconnect(e)
|
2018-10-19 15:41:50 +03:00
|
|
|
return
|
|
|
|
|
|
|
|
try:
|
2018-10-19 14:24:52 +03:00
|
|
|
message = self._state.decrypt_message_data(body)
|
2018-10-02 09:55:46 +03:00
|
|
|
except TypeNotFoundError as e:
|
2018-10-19 15:41:50 +03:00
|
|
|
# Received object which we don't know how to deserialize
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.info('Type %08x not found, remaining data %r',
|
2019-05-17 13:30:00 +03:00
|
|
|
e.invalid_constructor_id, e.remaining)
|
2018-10-02 09:55:46 +03:00
|
|
|
continue
|
|
|
|
except SecurityError as e:
|
|
|
|
# A step while decoding had the incorrect data. This message
|
|
|
|
# should not be considered safe and it should be ignored.
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.warning('Security error while unpacking a '
|
2019-05-17 13:30:00 +03:00
|
|
|
'received message: %s', e)
|
2018-10-02 09:55:46 +03:00
|
|
|
continue
|
2018-10-12 20:47:40 +03:00
|
|
|
except BufferError as e:
|
|
|
|
if isinstance(e, InvalidBufferError) and e.code == 404:
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.info('Broken authorization key; resetting')
|
2020-10-05 15:08:21 +03:00
|
|
|
self.auth_key.key = None
|
2020-10-07 11:17:31 +03:00
|
|
|
|
|
|
|
await self._disconnect(error=e)
|
2018-10-12 20:47:40 +03:00
|
|
|
else:
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.warning('Invalid buffer %s', e)
|
2020-10-07 11:17:31 +03:00
|
|
|
self._start_reconnect(e)
|
2018-10-02 09:55:46 +03:00
|
|
|
return
|
2019-03-28 12:11:33 +03:00
|
|
|
except Exception as e:
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.exception('Unhandled error while receiving data')
|
2019-03-28 12:11:33 +03:00
|
|
|
self._start_reconnect(e)
|
2018-10-02 09:55:46 +03:00
|
|
|
return
|
2018-10-19 15:41:50 +03:00
|
|
|
|
|
|
|
try:
|
|
|
|
await self._process_message(message)
|
|
|
|
except Exception:
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.exception('Unhandled error while processing msgs')
|
2018-06-06 21:41:01 +03:00
|
|
|
|
|
|
|
# Response Handlers
|
|
|
|
|
2018-06-09 14:48:27 +03:00
|
|
|
async def _process_message(self, message):
|
2018-06-07 12:51:09 +03:00
|
|
|
"""
|
|
|
|
Adds the given message to the list of messages that must be
|
|
|
|
acknowledged and dispatches control to different ``_handle_*``
|
|
|
|
method based on its type.
|
|
|
|
"""
|
2018-06-09 12:34:01 +03:00
|
|
|
self._pending_ack.add(message.msg_id)
|
2018-06-09 14:48:27 +03:00
|
|
|
handler = self._handlers.get(message.obj.CONSTRUCTOR_ID,
|
|
|
|
self._handle_update)
|
|
|
|
await handler(message)
|
2018-06-06 22:42:48 +03:00
|
|
|
|
2018-10-01 14:49:30 +03:00
|
|
|
def _pop_states(self, msg_id):
|
|
|
|
"""
|
|
|
|
Pops the states known to match the given ID from pending messages.
|
|
|
|
|
|
|
|
This method should be used when the response isn't specific.
|
|
|
|
"""
|
|
|
|
state = self._pending_state.pop(msg_id, None)
|
|
|
|
if state:
|
|
|
|
return [state]
|
|
|
|
|
|
|
|
to_pop = []
|
|
|
|
for state in self._pending_state.values():
|
|
|
|
if state.container_id == msg_id:
|
|
|
|
to_pop.append(state.msg_id)
|
|
|
|
|
|
|
|
if to_pop:
|
|
|
|
return [self._pending_state.pop(x) for x in to_pop]
|
|
|
|
|
|
|
|
for ack in self._last_acks:
|
|
|
|
if ack.msg_id == msg_id:
|
|
|
|
return [ack]
|
|
|
|
|
|
|
|
return []
|
|
|
|
|
2018-06-09 14:48:27 +03:00
|
|
|
async def _handle_rpc_result(self, message):
|
2018-06-07 12:51:09 +03:00
|
|
|
"""
|
|
|
|
Handles the result for Remote Procedure Calls:
|
|
|
|
|
|
|
|
rpc_result#f35c6d01 req_msg_id:long result:bytes = RpcResult;
|
|
|
|
|
|
|
|
This is where the future results for sent requests are set.
|
|
|
|
"""
|
2018-06-09 14:48:27 +03:00
|
|
|
rpc_result = message.obj
|
2018-09-29 13:20:26 +03:00
|
|
|
state = self._pending_state.pop(rpc_result.req_msg_id, None)
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.debug('Handling RPC result for message %d',
|
2019-05-17 13:30:00 +03:00
|
|
|
rpc_result.req_msg_id)
|
2018-06-06 22:42:48 +03:00
|
|
|
|
2018-09-29 13:20:26 +03:00
|
|
|
if not state:
|
2018-07-25 13:19:28 +03:00
|
|
|
# TODO We should not get responses to things we never sent
|
2018-08-20 12:42:51 +03:00
|
|
|
# However receiving a File() with empty bytes is "common".
|
|
|
|
# See #658, #759 and #958. They seem to happen in a container
|
|
|
|
# which contain the real response right after.
|
|
|
|
try:
|
|
|
|
with BinaryReader(rpc_result.body) as reader:
|
2021-09-12 14:27:13 +03:00
|
|
|
if not isinstance(reader.tgread_object(), _tl.upload.File):
|
2018-08-20 12:42:51 +03:00
|
|
|
raise ValueError('Not an upload.File')
|
|
|
|
except (TypeNotFoundError, ValueError):
|
2019-05-17 13:30:00 +03:00
|
|
|
self._log.info('Received response without parent request: %s', rpc_result.body)
|
2018-07-25 13:19:28 +03:00
|
|
|
return
|
|
|
|
|
2018-06-09 14:11:49 +03:00
|
|
|
if rpc_result.error:
|
2018-09-29 13:48:50 +03:00
|
|
|
self._send_queue.append(
|
2021-09-12 14:27:13 +03:00
|
|
|
RequestState(_tl.MsgsAck([state.msg_id])))
|
2018-06-07 11:30:20 +03:00
|
|
|
|
2018-09-29 13:20:26 +03:00
|
|
|
if not state.future.cancelled():
|
2021-09-24 21:07:34 +03:00
|
|
|
err_ty = _mk_error_type(
|
|
|
|
name=rpc_result.error.error_message,
|
|
|
|
code=rpc_result.error.error_code,
|
|
|
|
)
|
|
|
|
state.future.set_exception(err_ty(
|
|
|
|
rpc_result.error.error_code,
|
|
|
|
rpc_result.error.error_message,
|
|
|
|
state.request
|
|
|
|
))
|
2018-07-25 13:19:28 +03:00
|
|
|
else:
|
2021-02-11 21:26:40 +03:00
|
|
|
try:
|
|
|
|
with BinaryReader(rpc_result.body) as reader:
|
|
|
|
result = state.request.read_result(reader)
|
|
|
|
except Exception as e:
|
|
|
|
# e.g. TypeNotFoundError, should be propagated to caller
|
|
|
|
if not state.future.cancelled():
|
|
|
|
state.future.set_exception(e)
|
|
|
|
else:
|
|
|
|
if not state.future.cancelled():
|
|
|
|
state.future.set_result(result)
|
2018-06-06 22:42:48 +03:00
|
|
|
|
2018-06-09 14:48:27 +03:00
|
|
|
async def _handle_container(self, message):
|
2018-06-07 12:51:09 +03:00
|
|
|
"""
|
|
|
|
Processes the inner messages of a container with many of them:
|
|
|
|
|
|
|
|
msg_container#73f1f8dc messages:vector<%Message> = MessageContainer;
|
|
|
|
"""
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.debug('Handling container')
|
2018-06-09 14:48:27 +03:00
|
|
|
for inner_message in message.obj.messages:
|
|
|
|
await self._process_message(inner_message)
|
2018-06-06 22:42:48 +03:00
|
|
|
|
2018-06-09 14:48:27 +03:00
|
|
|
async def _handle_gzip_packed(self, message):
|
2018-06-07 12:51:09 +03:00
|
|
|
"""
|
|
|
|
Unpacks the data from a gzipped object and processes it:
|
|
|
|
|
|
|
|
gzip_packed#3072cfa1 packed_data:bytes = Object;
|
|
|
|
"""
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.debug('Handling gzipped data')
|
2018-06-09 14:48:27 +03:00
|
|
|
with BinaryReader(message.obj.data) as reader:
|
|
|
|
message.obj = reader.tgread_object()
|
|
|
|
await self._process_message(message)
|
2018-06-06 21:41:01 +03:00
|
|
|
|
2018-06-09 14:48:27 +03:00
|
|
|
async def _handle_update(self, message):
|
2019-06-15 22:15:57 +03:00
|
|
|
try:
|
|
|
|
assert message.obj.SUBCLASS_OF_ID == 0x8af52aac # crc32(b'Updates')
|
|
|
|
except AssertionError:
|
|
|
|
self._log.warning('Note: %s is not an update, not dispatching it %s', message.obj)
|
|
|
|
return
|
|
|
|
|
2019-05-17 13:30:00 +03:00
|
|
|
self._log.debug('Handling update %s', message.obj.__class__.__name__)
|
2022-01-18 21:46:19 +03:00
|
|
|
try:
|
|
|
|
self._updates_queue.put_nowait(message.obj)
|
|
|
|
except asyncio.QueueFull:
|
|
|
|
now = asyncio.get_running_loop().time()
|
|
|
|
if now - self._last_update_warn >= UPDATE_BUFFER_FULL_WARN_DELAY:
|
|
|
|
self._log.warning(
|
|
|
|
'Cannot dispatch update because the buffer capacity of %d was reached',
|
|
|
|
self._updates_queue.maxsize
|
|
|
|
)
|
|
|
|
self._last_update_warn = now
|
2018-06-07 15:32:22 +03:00
|
|
|
|
2018-06-09 14:48:27 +03:00
|
|
|
async def _handle_pong(self, message):
|
2018-06-07 12:51:09 +03:00
|
|
|
"""
|
|
|
|
Handles pong results, which don't come inside a ``rpc_result``
|
|
|
|
but are still sent through a request:
|
|
|
|
|
|
|
|
pong#347773c5 msg_id:long ping_id:long = Pong;
|
|
|
|
"""
|
2018-06-09 14:48:27 +03:00
|
|
|
pong = message.obj
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.debug('Handling pong for message %d', pong.msg_id)
|
2020-12-11 19:18:25 +03:00
|
|
|
if self._ping == pong.ping_id:
|
|
|
|
self._ping = None
|
|
|
|
|
2018-09-29 13:20:26 +03:00
|
|
|
state = self._pending_state.pop(pong.msg_id, None)
|
|
|
|
if state:
|
|
|
|
state.future.set_result(pong)
|
2018-06-06 21:41:01 +03:00
|
|
|
|
2018-06-09 14:48:27 +03:00
|
|
|
async def _handle_bad_server_salt(self, message):
|
2018-06-07 12:51:09 +03:00
|
|
|
"""
|
|
|
|
Corrects the currently used server salt to use the right value
|
|
|
|
before enqueuing the rejected message to be re-sent:
|
|
|
|
|
|
|
|
bad_server_salt#edab447b bad_msg_id:long bad_msg_seqno:int
|
|
|
|
error_code:int new_server_salt:long = BadMsgNotification;
|
|
|
|
"""
|
2018-06-09 14:48:27 +03:00
|
|
|
bad_salt = message.obj
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.debug('Handling bad salt for message %d', bad_salt.bad_msg_id)
|
2018-10-19 14:24:52 +03:00
|
|
|
self._state.salt = bad_salt.new_server_salt
|
2018-10-01 14:49:30 +03:00
|
|
|
states = self._pop_states(bad_salt.bad_msg_id)
|
|
|
|
self._send_queue.extend(states)
|
2018-09-29 13:20:26 +03:00
|
|
|
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.debug('%d message(s) will be resent', len(states))
|
2018-06-06 21:41:01 +03:00
|
|
|
|
2018-06-09 14:48:27 +03:00
|
|
|
async def _handle_bad_notification(self, message):
|
2018-06-07 12:51:09 +03:00
|
|
|
"""
|
|
|
|
Adjusts the current state to be correct based on the
|
|
|
|
received bad message notification whenever possible:
|
|
|
|
|
|
|
|
bad_msg_notification#a7eff811 bad_msg_id:long bad_msg_seqno:int
|
|
|
|
error_code:int = BadMsgNotification;
|
|
|
|
"""
|
2018-06-09 14:48:27 +03:00
|
|
|
bad_msg = message.obj
|
2018-10-01 14:49:30 +03:00
|
|
|
states = self._pop_states(bad_msg.bad_msg_id)
|
2018-06-27 20:04:33 +03:00
|
|
|
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.debug('Handling bad msg %s', bad_msg)
|
2018-06-07 12:51:09 +03:00
|
|
|
if bad_msg.error_code in (16, 17):
|
|
|
|
# Sent msg_id too low or too high (respectively).
|
|
|
|
# Use the current msg_id to determine the right time offset.
|
2018-10-19 14:24:52 +03:00
|
|
|
to = self._state.update_time_offset(
|
2018-09-29 13:20:26 +03:00
|
|
|
correct_msg_id=message.msg_id)
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.info('System clock is wrong, set time offset to %ds', to)
|
2018-06-07 12:51:09 +03:00
|
|
|
elif bad_msg.error_code == 32:
|
|
|
|
# msg_seqno too low, so just pump it up by some "large" amount
|
|
|
|
# TODO A better fix would be to start with a new fresh session ID
|
2018-10-19 14:24:52 +03:00
|
|
|
self._state._sequence += 64
|
2018-06-07 12:51:09 +03:00
|
|
|
elif bad_msg.error_code == 33:
|
|
|
|
# msg_seqno too high never seems to happen but just in case
|
2018-10-19 14:24:52 +03:00
|
|
|
self._state._sequence -= 16
|
2018-06-07 12:51:09 +03:00
|
|
|
else:
|
2018-10-01 14:49:30 +03:00
|
|
|
for state in states:
|
2018-10-19 15:02:20 +03:00
|
|
|
state.future.set_exception(
|
|
|
|
BadMessageError(state.request, bad_msg.error_code))
|
2018-06-07 12:51:09 +03:00
|
|
|
return
|
|
|
|
|
|
|
|
# Messages are to be re-sent once we've corrected the issue
|
2018-10-01 14:49:30 +03:00
|
|
|
self._send_queue.extend(states)
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.debug('%d messages will be resent due to bad msg',
|
|
|
|
len(states))
|
2018-06-06 21:41:01 +03:00
|
|
|
|
2018-06-09 14:48:27 +03:00
|
|
|
async def _handle_detailed_info(self, message):
|
2018-06-07 12:51:09 +03:00
|
|
|
"""
|
|
|
|
Updates the current status with the received detailed information:
|
|
|
|
|
|
|
|
msg_detailed_info#276d3ec6 msg_id:long answer_msg_id:long
|
|
|
|
bytes:int status:int = MsgDetailedInfo;
|
|
|
|
"""
|
|
|
|
# TODO https://goo.gl/VvpCC6
|
2018-06-18 18:14:04 +03:00
|
|
|
msg_id = message.obj.answer_msg_id
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.debug('Handling detailed info for message %d', msg_id)
|
2018-06-18 18:14:04 +03:00
|
|
|
self._pending_ack.add(msg_id)
|
2018-06-06 21:41:01 +03:00
|
|
|
|
2018-06-09 14:48:27 +03:00
|
|
|
async def _handle_new_detailed_info(self, message):
|
2018-06-07 12:51:09 +03:00
|
|
|
"""
|
|
|
|
Updates the current status with the received detailed information:
|
|
|
|
|
|
|
|
msg_new_detailed_info#809db6df answer_msg_id:long
|
|
|
|
bytes:int status:int = MsgDetailedInfo;
|
|
|
|
"""
|
|
|
|
# TODO https://goo.gl/G7DPsR
|
2018-06-18 18:14:04 +03:00
|
|
|
msg_id = message.obj.answer_msg_id
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.debug('Handling new detailed info for message %d', msg_id)
|
2018-06-18 18:14:04 +03:00
|
|
|
self._pending_ack.add(msg_id)
|
2018-06-06 21:41:01 +03:00
|
|
|
|
2018-06-09 14:48:27 +03:00
|
|
|
async def _handle_new_session_created(self, message):
|
2018-06-07 12:51:09 +03:00
|
|
|
"""
|
|
|
|
Updates the current status with the received session information:
|
|
|
|
|
|
|
|
new_session_created#9ec20908 first_msg_id:long unique_id:long
|
|
|
|
server_salt:long = NewSession;
|
|
|
|
"""
|
2018-06-06 22:42:48 +03:00
|
|
|
# TODO https://goo.gl/LMyN7A
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.debug('Handling new session created')
|
2018-10-19 14:24:52 +03:00
|
|
|
self._state.salt = message.obj.server_salt
|
2018-06-06 21:41:01 +03:00
|
|
|
|
2018-06-09 14:48:27 +03:00
|
|
|
async def _handle_ack(self, message):
|
2018-06-07 12:51:09 +03:00
|
|
|
"""
|
|
|
|
Handles a server acknowledge about our messages. Normally
|
|
|
|
these can be ignored except in the case of ``auth.logOut``:
|
|
|
|
|
|
|
|
auth.logOut#5717da40 = Bool;
|
2018-06-06 21:41:01 +03:00
|
|
|
|
2018-06-07 12:51:09 +03:00
|
|
|
Telegram doesn't seem to send its result so we need to confirm
|
|
|
|
it manually. No other request is known to have this behaviour.
|
2018-06-07 14:33:32 +03:00
|
|
|
|
|
|
|
Since the ID of sent messages consisting of a container is
|
|
|
|
never returned (unless on a bad notification), this method
|
|
|
|
also removes containers messages when any of their inner
|
|
|
|
messages are acknowledged.
|
2018-06-07 12:51:09 +03:00
|
|
|
"""
|
2018-06-09 14:48:27 +03:00
|
|
|
ack = message.obj
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.debug('Handling acknowledge for %s', str(ack.msg_ids))
|
2018-06-07 14:33:32 +03:00
|
|
|
for msg_id in ack.msg_ids:
|
2018-09-29 13:20:26 +03:00
|
|
|
state = self._pending_state.get(msg_id)
|
2021-09-12 14:27:13 +03:00
|
|
|
if state and isinstance(state.request, _tl.fn.auth.LogOut):
|
2018-09-29 13:20:26 +03:00
|
|
|
del self._pending_state[msg_id]
|
2021-02-27 17:14:44 +03:00
|
|
|
if not state.future.cancelled():
|
|
|
|
state.future.set_result(True)
|
2018-06-06 21:41:01 +03:00
|
|
|
|
2018-06-09 14:48:27 +03:00
|
|
|
async def _handle_future_salts(self, message):
|
2018-06-07 12:51:09 +03:00
|
|
|
"""
|
|
|
|
Handles future salt results, which don't come inside a
|
|
|
|
``rpc_result`` but are still sent through a request:
|
|
|
|
|
|
|
|
future_salts#ae500895 req_msg_id:long now:int
|
|
|
|
salts:vector<future_salt> = FutureSalts;
|
|
|
|
"""
|
|
|
|
# TODO save these salts and automatically adjust to the
|
|
|
|
# correct one whenever the salt in use expires.
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log.debug('Handling future salts for message %d', message.msg_id)
|
2018-09-29 13:20:26 +03:00
|
|
|
state = self._pending_state.pop(message.msg_id, None)
|
|
|
|
if state:
|
|
|
|
state.future.set_result(message.obj)
|
2018-06-07 14:51:19 +03:00
|
|
|
|
2018-06-14 17:23:16 +03:00
|
|
|
async def _handle_state_forgotten(self, message):
|
|
|
|
"""
|
|
|
|
Handles both :tl:`MsgsStateReq` and :tl:`MsgResendReq` by
|
|
|
|
enqueuing a :tl:`MsgsStateInfo` to be sent at a later point.
|
|
|
|
"""
|
2021-09-12 14:27:13 +03:00
|
|
|
self._send_queue.append(RequestState(_tl.MsgsStateInfo(
|
2020-07-25 19:39:35 +03:00
|
|
|
req_msg_id=message.msg_id, info=chr(1) * len(message.obj.msg_ids)
|
|
|
|
)))
|
2018-06-14 17:23:16 +03:00
|
|
|
|
|
|
|
async def _handle_msg_all(self, message):
|
|
|
|
"""
|
|
|
|
Handles :tl:`MsgsAllInfo` by doing nothing (yet).
|
|
|
|
"""
|
2021-02-27 17:13:53 +03:00
|
|
|
|
|
|
|
async def _handle_destroy_session(self, message):
|
|
|
|
"""
|
|
|
|
Handles both :tl:`DestroySessionOk` and :tl:`DestroySessionNone`.
|
|
|
|
It behaves pretty much like handling an RPC result.
|
|
|
|
"""
|
|
|
|
for msg_id, state in self._pending_state.items():
|
2021-09-12 14:27:13 +03:00
|
|
|
if isinstance(state.request, _tl.fn.DestroySession)\
|
2021-02-27 17:13:53 +03:00
|
|
|
and state.request.session_id == message.obj.session_id:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
return
|
|
|
|
|
|
|
|
del self._pending_state[msg_id]
|
|
|
|
if not state.future.cancelled():
|
|
|
|
state.future.set_result(message.obj)
|