2016-09-05 19:35:12 +03:00
|
|
|
import gzip
|
2017-09-04 18:18:33 +03:00
|
|
|
import logging
|
2017-09-27 22:01:20 +03:00
|
|
|
import struct
|
2016-08-26 13:58:53 +03:00
|
|
|
|
2017-05-21 14:02:54 +03:00
|
|
|
from .. import helpers as utils
|
|
|
|
from ..crypto import AES
|
2017-09-22 14:32:00 +03:00
|
|
|
from ..errors import (
|
|
|
|
BadMessageError, InvalidChecksumError, BrokenAuthKeyError,
|
|
|
|
rpc_message_to_error
|
|
|
|
)
|
2017-09-27 22:01:20 +03:00
|
|
|
from ..extensions import BinaryReader
|
2017-09-28 12:59:24 +03:00
|
|
|
from ..tl import TLMessage, MessageContainer, GzipPacked
|
2017-05-21 14:02:54 +03:00
|
|
|
from ..tl.all_tlobjects import tlobjects
|
2017-10-12 22:09:09 +03:00
|
|
|
from ..tl.types import (
|
|
|
|
MsgsAck, Pong, BadServerSalt, BadMsgNotification,
|
|
|
|
MsgNewDetailedInfo, NewSessionCreated, MsgDetailedInfo
|
|
|
|
)
|
2017-09-29 14:58:15 +03:00
|
|
|
from ..tl.functions.auth import LogOutRequest
|
2016-08-26 13:58:53 +03:00
|
|
|
|
2017-05-20 12:49:09 +03:00
|
|
|
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
|
|
|
|
2016-08-26 13:58:53 +03:00
|
|
|
|
|
|
|
class MtProtoSender:
|
2017-09-02 22:27:11 +03:00
|
|
|
"""MTProto Mobile Protocol sender
|
2017-09-30 12:49:38 +03:00
|
|
|
(https://core.telegram.org/mtproto/description).
|
|
|
|
|
|
|
|
Note that this class is not thread-safe, and calling send/receive
|
|
|
|
from two or more threads at the same time is undefined behaviour.
|
|
|
|
Rationale: a new connection should be spawned to send/receive requests
|
|
|
|
in parallel, so thread-safety (hence locking) isn't needed.
|
2017-09-02 22:27:11 +03:00
|
|
|
"""
|
|
|
|
|
2017-09-21 14:43:33 +03:00
|
|
|
def __init__(self, session, connection):
|
2017-09-02 22:27:11 +03:00
|
|
|
"""Creates a new MtProtoSender configured to send messages through
|
|
|
|
'connection' and using the parameters from 'session'.
|
|
|
|
"""
|
2016-08-28 14:43:00 +03:00
|
|
|
self.session = session
|
2017-09-21 14:43:33 +03:00
|
|
|
self.connection = connection
|
2017-05-29 18:06:48 +03:00
|
|
|
self._logger = logging.getLogger(__name__)
|
2016-09-07 12:36:34 +03:00
|
|
|
|
2017-09-27 22:01:20 +03:00
|
|
|
# Message IDs that need confirmation
|
2017-10-20 23:07:45 +03:00
|
|
|
self._need_confirmation = set()
|
2017-09-27 22:01:20 +03:00
|
|
|
|
|
|
|
# Requests (as msg_id: Message) sent waiting to be received
|
|
|
|
self._pending_receive = {}
|
2016-09-06 19:54:49 +03:00
|
|
|
|
2017-04-29 12:07:32 +03:00
|
|
|
def connect(self):
|
|
|
|
"""Connects to the server"""
|
2017-10-01 20:26:20 +03:00
|
|
|
self.connection.connect(self.session.server_address, self.session.port)
|
2017-02-19 17:20:21 +03:00
|
|
|
|
2017-06-19 10:58:03 +03:00
|
|
|
def is_connected(self):
|
2017-08-28 22:23:31 +03:00
|
|
|
return self.connection.is_connected()
|
2017-06-19 10:58:03 +03:00
|
|
|
|
2016-09-09 12:47:37 +03:00
|
|
|
def disconnect(self):
|
2017-05-29 22:24:47 +03:00
|
|
|
"""Disconnects from the server"""
|
2017-09-03 10:56:10 +03:00
|
|
|
self.connection.close()
|
2017-09-21 14:43:33 +03:00
|
|
|
self._need_confirmation.clear()
|
|
|
|
self._clear_all_pending()
|
2016-09-09 12:47:37 +03:00
|
|
|
|
2017-09-30 13:08:06 +03:00
|
|
|
def clone(self):
|
|
|
|
"""Creates a copy of this MtProtoSender as a new connection"""
|
|
|
|
return MtProtoSender(self.session, self.connection.clone())
|
|
|
|
|
2016-08-28 14:43:00 +03:00
|
|
|
# region Send and receive
|
2016-08-26 13:58:53 +03:00
|
|
|
|
2017-09-25 21:52:27 +03:00
|
|
|
def send(self, *requests):
|
2016-09-09 12:47:37 +03:00
|
|
|
"""Sends the specified MTProtoRequest, previously sending any message
|
2017-05-29 22:24:47 +03:00
|
|
|
which needed confirmation."""
|
2016-09-10 12:01:03 +03:00
|
|
|
|
2017-09-25 21:52:27 +03:00
|
|
|
# Finally send our packed request(s)
|
2017-09-28 12:59:24 +03:00
|
|
|
messages = [TLMessage(self.session, r) for r in requests]
|
2017-09-27 22:01:20 +03:00
|
|
|
self._pending_receive.update({m.msg_id: m for m in messages})
|
|
|
|
|
2017-10-06 12:36:39 +03:00
|
|
|
# Pack everything in the same container if we need to send AckRequests
|
|
|
|
if self._need_confirmation:
|
|
|
|
messages.append(
|
2017-10-20 23:07:45 +03:00
|
|
|
TLMessage(self.session, MsgsAck(list(self._need_confirmation)))
|
2017-10-06 12:36:39 +03:00
|
|
|
)
|
|
|
|
self._need_confirmation.clear()
|
|
|
|
|
2017-09-27 22:01:20 +03:00
|
|
|
if len(messages) == 1:
|
|
|
|
message = messages[0]
|
2017-09-25 21:52:27 +03:00
|
|
|
else:
|
2017-09-28 12:59:24 +03:00
|
|
|
message = TLMessage(self.session, MessageContainer(messages))
|
2017-10-22 14:13:49 +03:00
|
|
|
# On bad_msg_salt errors, Telegram will reply with the ID of
|
|
|
|
# the container and not the requests it contains, so in case
|
|
|
|
# this happens we need to know to which container they belong.
|
|
|
|
for m in messages:
|
|
|
|
m.container_msg_id = message.msg_id
|
2017-09-25 21:52:27 +03:00
|
|
|
|
2017-09-27 22:01:20 +03:00
|
|
|
self._send_message(message)
|
2016-08-26 13:58:53 +03:00
|
|
|
|
2017-10-06 12:36:39 +03:00
|
|
|
def _send_acknowledge(self, msg_id):
|
|
|
|
"""Sends a message acknowledge for the given msg_id"""
|
|
|
|
self._send_message(TLMessage(self.session, MsgsAck([msg_id])))
|
2017-06-09 11:46:39 +03:00
|
|
|
|
2017-09-07 19:49:08 +03:00
|
|
|
def receive(self, update_state):
|
2017-09-02 22:27:11 +03:00
|
|
|
"""Receives a single message from the connected endpoint.
|
|
|
|
|
|
|
|
This method returns nothing, and will only affect other parts
|
|
|
|
of the MtProtoSender such as the updates callback being fired
|
|
|
|
or a pending request being confirmed.
|
2017-09-07 19:49:08 +03:00
|
|
|
|
|
|
|
Any unhandled object (likely updates) will be passed to
|
|
|
|
update_state.process(TLObject).
|
2017-09-02 22:27:11 +03:00
|
|
|
"""
|
2017-09-30 12:49:38 +03:00
|
|
|
try:
|
|
|
|
body = self.connection.recv()
|
|
|
|
except (BufferError, InvalidChecksumError):
|
|
|
|
# TODO BufferError, we should spot the cause...
|
|
|
|
# "No more bytes left"; something wrong happened, clear
|
|
|
|
# everything to be on the safe side, or:
|
|
|
|
#
|
|
|
|
# "This packet should be skipped"; since this may have
|
|
|
|
# been a result for a request, invalidate every request
|
|
|
|
# and just re-invoke them to avoid problems
|
|
|
|
self._clear_all_pending()
|
|
|
|
return
|
2016-09-09 12:47:37 +03:00
|
|
|
|
2017-09-07 22:32:46 +03:00
|
|
|
message, remote_msg_id, remote_seq = self._decode_msg(body)
|
2017-09-02 19:27:22 +03:00
|
|
|
with BinaryReader(message) as reader:
|
2017-09-07 19:49:08 +03:00
|
|
|
self._process_msg(remote_msg_id, remote_seq, reader, update_state)
|
2017-09-02 19:27:22 +03:00
|
|
|
|
2016-08-28 14:43:00 +03:00
|
|
|
# endregion
|
|
|
|
|
|
|
|
# region Low level processing
|
2016-08-26 13:58:53 +03:00
|
|
|
|
2017-09-27 22:01:20 +03:00
|
|
|
def _send_message(self, message):
|
|
|
|
"""Sends the given Message(TLObject) encrypted through the network"""
|
2016-08-26 13:58:53 +03:00
|
|
|
|
2017-09-27 22:01:20 +03:00
|
|
|
plain_text = \
|
|
|
|
struct.pack('<QQ', self.session.salt, self.session.id) \
|
2017-10-17 20:54:24 +03:00
|
|
|
+ bytes(message)
|
2016-08-26 13:58:53 +03:00
|
|
|
|
2017-09-27 22:01:20 +03:00
|
|
|
msg_key = utils.calc_msg_key(plain_text)
|
2017-09-27 22:04:52 +03:00
|
|
|
key_id = struct.pack('<Q', self.session.auth_key.key_id)
|
2017-09-27 22:01:20 +03:00
|
|
|
key, iv = utils.calc_key(self.session.auth_key.key, msg_key, True)
|
|
|
|
cipher_text = AES.encrypt_ige(plain_text, key, iv)
|
2016-08-26 13:58:53 +03:00
|
|
|
|
2017-09-27 22:01:20 +03:00
|
|
|
result = key_id + msg_key + cipher_text
|
2017-09-30 12:49:38 +03:00
|
|
|
self.connection.send(result)
|
2016-08-26 13:58:53 +03:00
|
|
|
|
2017-05-29 18:06:48 +03:00
|
|
|
def _decode_msg(self, body):
|
2016-08-28 14:43:00 +03:00
|
|
|
"""Decodes an received encrypted message body bytes"""
|
2016-08-26 13:58:53 +03:00
|
|
|
message = None
|
2016-08-28 14:43:00 +03:00
|
|
|
remote_msg_id = None
|
2016-08-26 13:58:53 +03:00
|
|
|
remote_sequence = None
|
|
|
|
|
|
|
|
with BinaryReader(body) as reader:
|
|
|
|
if len(body) < 8:
|
2017-09-22 14:32:00 +03:00
|
|
|
if body == b'l\xfe\xff\xff':
|
|
|
|
raise BrokenAuthKeyError()
|
|
|
|
else:
|
|
|
|
raise BufferError("Can't decode packet ({})".format(body))
|
2016-08-26 13:58:53 +03:00
|
|
|
|
|
|
|
# TODO Check for both auth key ID and msg_key correctness
|
2017-01-13 23:22:53 +03:00
|
|
|
reader.read_long() # remote_auth_key_id
|
2016-08-26 13:58:53 +03:00
|
|
|
msg_key = reader.read(16)
|
|
|
|
|
2016-09-04 22:07:09 +03:00
|
|
|
key, iv = utils.calc_key(self.session.auth_key.key, msg_key, False)
|
2016-11-30 00:29:42 +03:00
|
|
|
plain_text = AES.decrypt_ige(
|
|
|
|
reader.read(len(body) - reader.tell_position()), key, iv)
|
2016-08-26 13:58:53 +03:00
|
|
|
|
|
|
|
with BinaryReader(plain_text) as plain_text_reader:
|
2017-01-13 23:22:53 +03:00
|
|
|
plain_text_reader.read_long() # remote_salt
|
|
|
|
plain_text_reader.read_long() # remote_session_id
|
2016-08-28 14:43:00 +03:00
|
|
|
remote_msg_id = plain_text_reader.read_long()
|
2016-08-26 13:58:53 +03:00
|
|
|
remote_sequence = plain_text_reader.read_int()
|
|
|
|
msg_len = plain_text_reader.read_int()
|
|
|
|
message = plain_text_reader.read(msg_len)
|
|
|
|
|
2016-08-28 14:43:00 +03:00
|
|
|
return message, remote_msg_id, remote_sequence
|
2016-08-26 13:58:53 +03:00
|
|
|
|
2017-09-07 19:49:08 +03:00
|
|
|
def _process_msg(self, msg_id, sequence, reader, state):
|
2017-06-11 18:53:53 +03:00
|
|
|
"""Processes and handles a Telegram message.
|
|
|
|
|
|
|
|
Returns True if the message was handled correctly and doesn't
|
|
|
|
need to be skipped. Returns False otherwise.
|
|
|
|
"""
|
2016-09-09 12:47:37 +03:00
|
|
|
|
2016-08-26 13:58:53 +03:00
|
|
|
# TODO Check salt, session_id and sequence_number
|
2017-10-20 23:07:45 +03:00
|
|
|
self._need_confirmation.add(msg_id)
|
2016-08-26 13:58:53 +03:00
|
|
|
|
|
|
|
code = reader.read_int(signed=False)
|
|
|
|
reader.seek(-4)
|
|
|
|
|
2016-09-10 19:05:20 +03:00
|
|
|
# The following codes are "parsed manually"
|
2017-09-04 18:10:04 +03:00
|
|
|
if code == 0xf35c6d01: # rpc_result, (response of an RPC call)
|
2017-06-11 15:58:16 +03:00
|
|
|
return self._handle_rpc_result(msg_id, sequence, reader)
|
2016-09-10 19:05:20 +03:00
|
|
|
|
2017-10-12 17:40:59 +03:00
|
|
|
if code == Pong.CONSTRUCTOR_ID:
|
2017-06-11 15:58:16 +03:00
|
|
|
return self._handle_pong(msg_id, sequence, reader)
|
2017-05-29 21:41:03 +03:00
|
|
|
|
2017-10-12 17:40:59 +03:00
|
|
|
if code == MessageContainer.CONSTRUCTOR_ID:
|
2017-09-07 19:49:08 +03:00
|
|
|
return self._handle_container(msg_id, sequence, reader, state)
|
2017-05-29 21:41:03 +03:00
|
|
|
|
2017-10-12 17:40:59 +03:00
|
|
|
if code == GzipPacked.CONSTRUCTOR_ID:
|
2017-09-07 19:49:08 +03:00
|
|
|
return self._handle_gzip_packed(msg_id, sequence, reader, state)
|
2017-05-29 21:41:03 +03:00
|
|
|
|
2017-10-12 17:40:59 +03:00
|
|
|
if code == BadServerSalt.CONSTRUCTOR_ID:
|
2017-06-11 15:58:16 +03:00
|
|
|
return self._handle_bad_server_salt(msg_id, sequence, reader)
|
2017-05-29 21:41:03 +03:00
|
|
|
|
2017-10-12 17:40:59 +03:00
|
|
|
if code == BadMsgNotification.CONSTRUCTOR_ID:
|
2017-06-11 15:58:16 +03:00
|
|
|
return self._handle_bad_msg_notification(msg_id, sequence, reader)
|
2016-09-09 12:47:37 +03:00
|
|
|
|
2017-10-12 22:09:09 +03:00
|
|
|
if code == MsgDetailedInfo.CONSTRUCTOR_ID:
|
|
|
|
return self._handle_msg_detailed_info(msg_id, sequence, reader)
|
|
|
|
|
|
|
|
if code == MsgNewDetailedInfo.CONSTRUCTOR_ID:
|
|
|
|
return self._handle_msg_new_detailed_info(msg_id, sequence, reader)
|
|
|
|
|
|
|
|
if code == NewSessionCreated.CONSTRUCTOR_ID:
|
|
|
|
return self._handle_new_session_created(msg_id, sequence, reader)
|
|
|
|
|
2017-10-12 17:40:59 +03:00
|
|
|
if code == MsgsAck.CONSTRUCTOR_ID: # may handle the request we wanted
|
2016-09-26 14:13:11 +03:00
|
|
|
ack = reader.tgread_object()
|
2017-10-12 17:40:59 +03:00
|
|
|
assert isinstance(ack, MsgsAck)
|
2017-09-29 14:58:15 +03:00
|
|
|
# Ignore every ack request *unless* when logging out, when it's
|
|
|
|
# when it seems to only make sense. We also need to set a non-None
|
|
|
|
# result since Telegram doesn't send the response for these.
|
|
|
|
for msg_id in ack.msg_ids:
|
|
|
|
r = self._pop_request_of_type(msg_id, LogOutRequest)
|
|
|
|
if r:
|
|
|
|
r.result = True # Telegram won't send this value
|
2017-10-17 01:39:04 +03:00
|
|
|
r.confirm_received.set()
|
2017-09-29 14:58:15 +03:00
|
|
|
self._logger.debug('Message ack confirmed', r)
|
2017-04-11 10:52:44 +03:00
|
|
|
|
2017-06-11 18:53:53 +03:00
|
|
|
return True
|
2016-09-26 14:13:11 +03:00
|
|
|
|
2017-09-02 22:45:27 +03:00
|
|
|
# If the code is not parsed manually then it should be a TLObject.
|
2017-05-29 22:27:20 +03:00
|
|
|
if code in tlobjects:
|
2017-05-29 21:41:03 +03:00
|
|
|
result = reader.tgread_object()
|
2017-10-01 17:02:29 +03:00
|
|
|
self.session.process_entities(result)
|
|
|
|
if state:
|
2017-09-07 19:49:08 +03:00
|
|
|
state.process(result)
|
2017-05-29 21:41:03 +03:00
|
|
|
|
2017-06-11 18:53:53 +03:00
|
|
|
return True
|
2016-08-26 13:58:53 +03:00
|
|
|
|
2017-10-12 17:40:59 +03:00
|
|
|
self._logger.debug(
|
|
|
|
'[WARN] Unknown message: {}, data left in the buffer: {}'
|
|
|
|
.format(
|
|
|
|
hex(code), repr(reader.get_bytes()[reader.tell_position():])
|
|
|
|
)
|
|
|
|
)
|
2016-08-26 13:58:53 +03:00
|
|
|
return False
|
|
|
|
|
2016-08-28 14:43:00 +03:00
|
|
|
# endregion
|
|
|
|
|
|
|
|
# region Message handling
|
|
|
|
|
2017-09-27 22:01:20 +03:00
|
|
|
def _pop_request(self, msg_id):
|
|
|
|
"""Pops a pending REQUEST from self._pending_receive, or
|
|
|
|
returns None if it's not found.
|
2017-09-03 11:50:55 +03:00
|
|
|
"""
|
2017-09-27 22:01:20 +03:00
|
|
|
message = self._pending_receive.pop(msg_id, None)
|
|
|
|
if message:
|
|
|
|
return message.request
|
2017-09-03 11:50:55 +03:00
|
|
|
|
2017-09-29 14:58:15 +03:00
|
|
|
def _pop_request_of_type(self, msg_id, t):
|
|
|
|
"""Pops a pending REQUEST from self._pending_receive if it matches
|
|
|
|
the given type, or returns None if it's not found/doesn't match.
|
|
|
|
"""
|
|
|
|
message = self._pending_receive.get(msg_id, None)
|
2017-10-12 19:03:10 +03:00
|
|
|
if message and isinstance(message.request, t):
|
2017-09-29 14:58:15 +03:00
|
|
|
return self._pending_receive.pop(msg_id).request
|
|
|
|
|
2017-10-22 14:13:49 +03:00
|
|
|
def _pop_requests_of_container(self, container_msg_id):
|
|
|
|
"""Pops the pending requests (plural) from self._pending_receive if
|
|
|
|
they were sent on a container that matches container_msg_id.
|
|
|
|
"""
|
|
|
|
msgs = [msg for msg in self._pending_receive.values()
|
|
|
|
if msg.container_msg_id == container_msg_id]
|
|
|
|
|
|
|
|
requests = [msg.request for msg in msgs]
|
|
|
|
for msg in msgs:
|
|
|
|
self._pending_receive.pop(msg.msg_id, None)
|
|
|
|
return requests
|
|
|
|
|
2017-09-21 14:43:33 +03:00
|
|
|
def _clear_all_pending(self):
|
2017-09-27 22:01:20 +03:00
|
|
|
for r in self._pending_receive.values():
|
2017-10-18 16:34:04 +03:00
|
|
|
r.request.confirm_received.set()
|
2017-09-21 14:43:33 +03:00
|
|
|
self._pending_receive.clear()
|
|
|
|
|
2017-10-22 14:13:49 +03:00
|
|
|
def _resend_request(self, msg_id):
|
|
|
|
"""Re-sends the request that belongs to a certain msg_id. This may
|
|
|
|
also be the msg_id of a container if they were sent in one.
|
|
|
|
"""
|
|
|
|
request = self._pop_request(msg_id)
|
|
|
|
if request:
|
|
|
|
return self.send(request)
|
|
|
|
requests = self._pop_requests_of_container(msg_id)
|
|
|
|
if requests:
|
|
|
|
return self.send(*requests)
|
|
|
|
|
2017-06-11 15:58:16 +03:00
|
|
|
def _handle_pong(self, msg_id, sequence, reader):
|
2017-05-29 18:06:48 +03:00
|
|
|
self._logger.debug('Handling pong')
|
2017-10-11 22:09:09 +03:00
|
|
|
pong = reader.tgread_object()
|
|
|
|
assert isinstance(pong, Pong)
|
2017-01-19 17:54:28 +03:00
|
|
|
|
2017-10-11 22:09:09 +03:00
|
|
|
request = self._pop_request(pong.msg_id)
|
2017-09-03 11:50:55 +03:00
|
|
|
if request:
|
2017-07-10 16:21:20 +03:00
|
|
|
self._logger.debug('Pong confirmed a request')
|
2017-10-11 22:09:09 +03:00
|
|
|
request.result = pong
|
2017-09-02 21:41:00 +03:00
|
|
|
request.confirm_received.set()
|
2017-01-19 17:54:28 +03:00
|
|
|
|
2017-06-11 18:53:53 +03:00
|
|
|
return True
|
2017-01-19 17:54:28 +03:00
|
|
|
|
2017-09-07 19:49:08 +03:00
|
|
|
def _handle_container(self, msg_id, sequence, reader, state):
|
2017-05-29 18:06:48 +03:00
|
|
|
self._logger.debug('Handling container')
|
2017-09-25 21:52:27 +03:00
|
|
|
for inner_msg_id, _, inner_len in MessageContainer.iter_read(reader):
|
2016-08-26 13:58:53 +03:00
|
|
|
begin_position = reader.tell_position()
|
2016-09-06 19:54:49 +03:00
|
|
|
|
2017-05-29 21:41:03 +03:00
|
|
|
# Note that this code is IMPORTANT for skipping RPC results of
|
|
|
|
# lost requests (i.e., ones from the previous connection session)
|
2017-07-26 17:10:45 +03:00
|
|
|
try:
|
2017-09-07 19:49:08 +03:00
|
|
|
if not self._process_msg(inner_msg_id, sequence, reader, state):
|
2017-09-25 21:52:27 +03:00
|
|
|
reader.set_position(begin_position + inner_len)
|
2017-07-26 17:10:45 +03:00
|
|
|
except:
|
|
|
|
# If any error is raised, something went wrong; skip the packet
|
2017-09-25 21:52:27 +03:00
|
|
|
reader.set_position(begin_position + inner_len)
|
2017-07-26 17:10:45 +03:00
|
|
|
raise
|
2016-08-26 13:58:53 +03:00
|
|
|
|
2017-06-11 18:53:53 +03:00
|
|
|
return True
|
2016-08-26 13:58:53 +03:00
|
|
|
|
2017-06-11 15:58:16 +03:00
|
|
|
def _handle_bad_server_salt(self, msg_id, sequence, reader):
|
2017-05-29 18:06:48 +03:00
|
|
|
self._logger.debug('Handling bad server salt')
|
2017-10-12 17:40:59 +03:00
|
|
|
bad_salt = reader.tgread_object()
|
|
|
|
assert isinstance(bad_salt, BadServerSalt)
|
2016-08-26 13:58:53 +03:00
|
|
|
|
2017-10-12 17:40:59 +03:00
|
|
|
# Our salt is unsigned, but the objects work with signed salts
|
|
|
|
self.session.salt = struct.unpack(
|
|
|
|
'<Q', struct.pack('<q', bad_salt.new_server_salt)
|
|
|
|
)[0]
|
2017-10-20 17:48:54 +03:00
|
|
|
self.session.save()
|
2017-10-12 17:40:59 +03:00
|
|
|
|
2017-10-22 14:13:49 +03:00
|
|
|
# "the bad_server_salt response is received with the
|
|
|
|
# correct salt, and the message is to be re-sent with it"
|
|
|
|
self._resend_request(bad_salt.bad_msg_id)
|
2016-08-26 13:58:53 +03:00
|
|
|
return True
|
|
|
|
|
2017-05-29 18:06:48 +03:00
|
|
|
def _handle_bad_msg_notification(self, msg_id, sequence, reader):
|
|
|
|
self._logger.debug('Handling bad message notification')
|
2017-10-12 17:40:59 +03:00
|
|
|
bad_msg = reader.tgread_object()
|
|
|
|
assert isinstance(bad_msg, BadMsgNotification)
|
2016-08-26 13:58:53 +03:00
|
|
|
|
2017-10-12 17:40:59 +03:00
|
|
|
error = BadMessageError(bad_msg.error_code)
|
|
|
|
if bad_msg.error_code in (16, 17):
|
2017-05-26 17:39:59 +03:00
|
|
|
# sent msg_id too low or too high (respectively).
|
|
|
|
# Use the current msg_id to determine the right time offset.
|
|
|
|
self.session.update_time_offset(correct_msg_id=msg_id)
|
2017-07-10 16:21:20 +03:00
|
|
|
self._logger.debug('Read Bad Message error: ' + str(error))
|
|
|
|
self._logger.debug('Attempting to use the correct time offset.')
|
2017-10-22 14:13:49 +03:00
|
|
|
self._resend_request(bad_msg.bad_msg_id)
|
2017-06-11 18:53:53 +03:00
|
|
|
return True
|
2017-10-12 17:40:59 +03:00
|
|
|
elif bad_msg.error_code == 32:
|
2017-09-14 12:50:38 +03:00
|
|
|
# msg_seqno too low, so just pump it up by some "large" amount
|
|
|
|
# TODO A better fix would be to start with a new fresh session ID
|
|
|
|
self.session._sequence += 64
|
2017-10-22 14:13:49 +03:00
|
|
|
self._resend_request(bad_msg.bad_msg_id)
|
2017-09-14 12:50:38 +03:00
|
|
|
return True
|
2017-10-12 17:40:59 +03:00
|
|
|
elif bad_msg.error_code == 33:
|
2017-09-14 12:50:38 +03:00
|
|
|
# msg_seqno too high never seems to happen but just in case
|
|
|
|
self.session._sequence -= 16
|
2017-10-22 14:13:49 +03:00
|
|
|
self._resend_request(bad_msg.bad_msg_id)
|
2017-09-14 12:50:38 +03:00
|
|
|
return True
|
2017-05-26 17:39:59 +03:00
|
|
|
else:
|
|
|
|
raise error
|
2016-08-26 13:58:53 +03:00
|
|
|
|
2017-10-12 22:09:09 +03:00
|
|
|
def _handle_msg_detailed_info(self, msg_id, sequence, reader):
|
|
|
|
msg_new = reader.tgread_object()
|
|
|
|
assert isinstance(msg_new, MsgDetailedInfo)
|
|
|
|
|
|
|
|
# TODO For now, simply ack msg_new.answer_msg_id
|
|
|
|
# Relevant tdesktop source code: https://goo.gl/VvpCC6
|
|
|
|
self._send_acknowledge(msg_new.answer_msg_id)
|
|
|
|
return True
|
|
|
|
|
|
|
|
def _handle_msg_new_detailed_info(self, msg_id, sequence, reader):
|
|
|
|
msg_new = reader.tgread_object()
|
|
|
|
assert isinstance(msg_new, MsgNewDetailedInfo)
|
|
|
|
|
|
|
|
# TODO For now, simply ack msg_new.answer_msg_id
|
|
|
|
# Relevant tdesktop source code: https://goo.gl/G7DPsR
|
|
|
|
self._send_acknowledge(msg_new.answer_msg_id)
|
|
|
|
return True
|
|
|
|
|
|
|
|
def _handle_new_session_created(self, msg_id, sequence, reader):
|
|
|
|
new_session = reader.tgread_object()
|
|
|
|
assert isinstance(new_session, NewSessionCreated)
|
|
|
|
# TODO https://goo.gl/LMyN7A
|
|
|
|
return True
|
|
|
|
|
2017-06-11 15:58:16 +03:00
|
|
|
def _handle_rpc_result(self, msg_id, sequence, reader):
|
|
|
|
self._logger.debug('Handling RPC result')
|
2017-01-13 23:22:53 +03:00
|
|
|
reader.read_int(signed=False) # code
|
2017-06-21 11:20:39 +03:00
|
|
|
request_id = reader.read_long()
|
2016-09-06 19:54:49 +03:00
|
|
|
inner_code = reader.read_int(signed=False)
|
2016-08-26 13:58:53 +03:00
|
|
|
|
2017-09-03 11:50:55 +03:00
|
|
|
request = self._pop_request(request_id)
|
2016-08-26 13:58:53 +03:00
|
|
|
|
|
|
|
if inner_code == 0x2144ca19: # RPC Error
|
2017-08-25 16:34:20 +03:00
|
|
|
if self.session.report_errors and request:
|
|
|
|
error = rpc_message_to_error(
|
|
|
|
reader.read_int(), reader.tgread_string(),
|
2017-09-29 14:11:33 +03:00
|
|
|
report_method=type(request).CONSTRUCTOR_ID
|
2017-08-25 16:34:20 +03:00
|
|
|
)
|
|
|
|
else:
|
|
|
|
error = rpc_message_to_error(
|
|
|
|
reader.read_int(), reader.tgread_string()
|
|
|
|
)
|
2017-04-09 14:14:04 +03:00
|
|
|
|
2017-06-09 11:46:39 +03:00
|
|
|
# Acknowledge that we received the error
|
2017-10-06 12:36:39 +03:00
|
|
|
self._send_acknowledge(request_id)
|
2017-06-09 11:46:39 +03:00
|
|
|
|
2017-09-02 21:41:00 +03:00
|
|
|
if request:
|
2017-09-03 12:19:28 +03:00
|
|
|
request.rpc_error = error
|
2017-09-02 21:41:00 +03:00
|
|
|
request.confirm_received.set()
|
|
|
|
# else TODO Where should this error be reported?
|
|
|
|
# Read may be async. Can an error not-belong to a request?
|
2017-07-10 16:21:20 +03:00
|
|
|
self._logger.debug('Read RPC error: %s', str(error))
|
2017-10-12 18:58:37 +03:00
|
|
|
return True # All contents were read okay
|
|
|
|
|
|
|
|
elif request:
|
|
|
|
self._logger.debug('Reading request response')
|
|
|
|
if inner_code == 0x3072cfa1: # GZip packed
|
|
|
|
unpacked_data = gzip.decompress(reader.tgread_bytes())
|
|
|
|
with BinaryReader(unpacked_data) as compressed_reader:
|
|
|
|
request.on_response(compressed_reader)
|
2017-06-11 18:53:53 +03:00
|
|
|
else:
|
2017-10-12 18:58:37 +03:00
|
|
|
reader.seek(-4)
|
|
|
|
request.on_response(reader)
|
|
|
|
|
|
|
|
self.session.process_entities(request.result)
|
|
|
|
request.confirm_received.set()
|
|
|
|
return True
|
|
|
|
|
|
|
|
# If it's really a result for RPC from previous connection
|
|
|
|
# session, it will be skipped by the handle_container()
|
|
|
|
self._logger.debug('Lost request will be skipped.')
|
|
|
|
return False
|
2016-08-26 13:58:53 +03:00
|
|
|
|
2017-09-07 19:49:08 +03:00
|
|
|
def _handle_gzip_packed(self, msg_id, sequence, reader, state):
|
2017-05-29 18:06:48 +03:00
|
|
|
self._logger.debug('Handling gzip packed data')
|
2017-09-27 14:46:53 +03:00
|
|
|
with BinaryReader(GzipPacked.read(reader)) as compressed_reader:
|
2017-10-20 23:44:00 +03:00
|
|
|
# We are reentering process_msg, which seemingly the same msg_id
|
|
|
|
# to the self._need_confirmation set. Remove it from there first
|
|
|
|
# to avoid any future conflicts (i.e. if we "ignore" messages
|
|
|
|
# that we are already aware of, see 1a91c02 and old 63dfb1e)
|
|
|
|
self._need_confirmation -= {msg_id}
|
2017-09-07 19:49:08 +03:00
|
|
|
return self._process_msg(msg_id, sequence, compressed_reader, state)
|
2016-08-28 14:43:00 +03:00
|
|
|
|
|
|
|
# endregion
|