2018-06-13 17:20:15 +03:00
|
|
|
import asyncio
|
2020-05-16 10:58:37 +03:00
|
|
|
import inspect
|
2018-06-14 23:51:57 +03:00
|
|
|
import itertools
|
2018-06-18 14:22:25 +03:00
|
|
|
import random
|
2021-01-30 15:47:28 +03:00
|
|
|
import sys
|
2018-06-18 14:22:25 +03:00
|
|
|
import time
|
2021-01-30 15:47:28 +03:00
|
|
|
import traceback
|
2019-05-03 22:37:27 +03:00
|
|
|
import typing
|
2021-01-30 15:47:28 +03:00
|
|
|
import logging
|
2018-06-10 14:58:21 +03:00
|
|
|
|
2018-06-27 20:40:32 +03:00
|
|
|
from .. import events, utils, errors
|
2019-05-03 22:37:27 +03:00
|
|
|
from ..events.common import EventBuilder, EventCommon
|
2018-06-10 14:58:21 +03:00
|
|
|
from ..tl import types, functions
|
2019-05-03 22:37:27 +03:00
|
|
|
|
|
|
|
if typing.TYPE_CHECKING:
|
|
|
|
from .telegramclient import TelegramClient
|
2018-06-10 14:58:21 +03:00
|
|
|
|
|
|
|
|
2019-06-24 18:48:46 +03:00
|
|
|
class UpdateMethods:
|
2018-06-10 14:58:21 +03:00
|
|
|
|
|
|
|
# region Public methods
|
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
async def _run_until_disconnected(self: 'TelegramClient'):
|
2018-06-25 14:32:31 +03:00
|
|
|
try:
|
2019-05-23 13:11:58 +03:00
|
|
|
# Make a high-level request to notify that we want updates
|
|
|
|
await self(functions.updates.GetStateRequest())
|
|
|
|
return await self.disconnected
|
2018-06-25 14:32:31 +03:00
|
|
|
except KeyboardInterrupt:
|
2019-03-21 14:25:19 +03:00
|
|
|
pass
|
|
|
|
finally:
|
2019-03-21 14:21:00 +03:00
|
|
|
await self.disconnect()
|
2018-06-25 14:32:31 +03:00
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
def run_until_disconnected(self: 'TelegramClient'):
|
2018-06-17 20:29:41 +03:00
|
|
|
"""
|
2019-05-06 12:38:26 +03:00
|
|
|
Runs the event loop until the library is disconnected.
|
|
|
|
|
2019-05-23 13:11:58 +03:00
|
|
|
It also notifies Telegram that we want to receive updates
|
|
|
|
as described in https://core.telegram.org/api/updates.
|
2019-05-06 12:38:26 +03:00
|
|
|
|
2019-05-23 13:11:58 +03:00
|
|
|
Manual disconnections can be made by calling `disconnect()
|
|
|
|
<telethon.client.telegrambaseclient.TelegramBaseClient.disconnect>`
|
|
|
|
or sending a ``KeyboardInterrupt`` (e.g. by pressing ``Ctrl+C`` on
|
|
|
|
the console window running the script).
|
|
|
|
|
|
|
|
If a disconnection error occurs (i.e. the library fails to reconnect
|
|
|
|
automatically), said error will be raised through here, so you have a
|
|
|
|
chance to ``except`` it on your own code.
|
2018-06-17 20:29:41 +03:00
|
|
|
|
2018-06-25 14:32:31 +03:00
|
|
|
If the loop is already running, this method returns a coroutine
|
|
|
|
that you should await on your own code.
|
2019-05-20 12:38:26 +03:00
|
|
|
|
2019-05-23 13:11:58 +03:00
|
|
|
.. note::
|
|
|
|
|
|
|
|
If you want to handle ``KeyboardInterrupt`` in your code,
|
|
|
|
simply run the event loop in your code too in any way, such as
|
|
|
|
``loop.run_forever()`` or ``await client.disconnected`` (e.g.
|
|
|
|
``loop.run_until_complete(client.disconnected)``).
|
|
|
|
|
2019-05-20 12:38:26 +03:00
|
|
|
Example
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
# Blocks the current task here until a disconnection occurs.
|
|
|
|
#
|
|
|
|
# You will still receive updates, since this prevents the
|
|
|
|
# script from exiting.
|
2019-08-14 00:33:39 +03:00
|
|
|
await client.run_until_disconnected()
|
2018-06-17 20:29:41 +03:00
|
|
|
"""
|
2018-06-26 16:48:38 +03:00
|
|
|
if self.loop.is_running():
|
|
|
|
return self._run_until_disconnected()
|
|
|
|
try:
|
2019-05-23 13:11:58 +03:00
|
|
|
return self.loop.run_until_complete(self._run_until_disconnected())
|
2018-06-26 16:48:38 +03:00
|
|
|
except KeyboardInterrupt:
|
2019-03-21 14:25:19 +03:00
|
|
|
pass
|
|
|
|
finally:
|
2019-03-22 20:20:02 +03:00
|
|
|
# No loop.run_until_complete; it's already syncified
|
|
|
|
self.disconnect()
|
2018-06-17 20:29:41 +03:00
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
def on(self: 'TelegramClient', event: EventBuilder):
|
2018-06-10 14:58:21 +03:00
|
|
|
"""
|
2019-05-06 12:38:26 +03:00
|
|
|
Decorator used to `add_event_handler` more conveniently.
|
2018-06-20 12:05:33 +03:00
|
|
|
|
2018-06-10 14:58:21 +03:00
|
|
|
|
2019-05-20 12:38:26 +03:00
|
|
|
Arguments
|
2018-06-10 14:58:21 +03:00
|
|
|
event (`_EventBuilder` | `type`):
|
|
|
|
The event builder class or instance to be used,
|
|
|
|
for instance ``events.NewMessage``.
|
2019-05-20 12:38:26 +03:00
|
|
|
|
|
|
|
Example
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
from telethon import TelegramClient, events
|
|
|
|
client = TelegramClient(...)
|
|
|
|
|
|
|
|
# Here we use client.on
|
|
|
|
@client.on(events.NewMessage)
|
|
|
|
async def handler(event):
|
|
|
|
...
|
2018-06-10 14:58:21 +03:00
|
|
|
"""
|
|
|
|
def decorator(f):
|
|
|
|
self.add_event_handler(f, event)
|
|
|
|
return f
|
|
|
|
|
|
|
|
return decorator
|
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
def add_event_handler(
|
|
|
|
self: 'TelegramClient',
|
|
|
|
callback: callable,
|
|
|
|
event: EventBuilder = None):
|
2018-06-10 14:58:21 +03:00
|
|
|
"""
|
2019-05-06 12:38:26 +03:00
|
|
|
Registers a new event handler callback.
|
|
|
|
|
|
|
|
The callback will be called when the specified event occurs.
|
2018-06-10 14:58:21 +03:00
|
|
|
|
2019-05-20 12:38:26 +03:00
|
|
|
Arguments
|
2018-06-10 14:58:21 +03:00
|
|
|
callback (`callable`):
|
|
|
|
The callable function accepting one parameter to be used.
|
|
|
|
|
2018-09-22 13:51:58 +03:00
|
|
|
Note that if you have used `telethon.events.register` in
|
|
|
|
the callback, ``event`` will be ignored, and instead the
|
|
|
|
events you previously registered will be used.
|
|
|
|
|
2018-06-10 14:58:21 +03:00
|
|
|
event (`_EventBuilder` | `type`, optional):
|
|
|
|
The event builder class or instance to be used,
|
|
|
|
for instance ``events.NewMessage``.
|
|
|
|
|
|
|
|
If left unspecified, `telethon.events.raw.Raw` (the
|
|
|
|
:tl:`Update` objects with no further processing) will
|
|
|
|
be passed instead.
|
2019-05-20 12:38:26 +03:00
|
|
|
|
|
|
|
Example
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
from telethon import TelegramClient, events
|
|
|
|
client = TelegramClient(...)
|
|
|
|
|
|
|
|
async def handler(event):
|
|
|
|
...
|
|
|
|
|
|
|
|
client.add_event_handler(handler, events.NewMessage)
|
2018-06-10 14:58:21 +03:00
|
|
|
"""
|
2018-09-22 13:51:58 +03:00
|
|
|
builders = events._get_handlers(callback)
|
|
|
|
if builders is not None:
|
|
|
|
for event in builders:
|
|
|
|
self._event_builders.append((event, callback))
|
|
|
|
return
|
|
|
|
|
2018-06-10 14:58:21 +03:00
|
|
|
if isinstance(event, type):
|
|
|
|
event = event()
|
|
|
|
elif not event:
|
|
|
|
event = events.Raw()
|
|
|
|
|
|
|
|
self._event_builders.append((event, callback))
|
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
def remove_event_handler(
|
|
|
|
self: 'TelegramClient',
|
|
|
|
callback: callable,
|
|
|
|
event: EventBuilder = None) -> int:
|
2018-06-10 14:58:21 +03:00
|
|
|
"""
|
2019-05-09 13:24:37 +03:00
|
|
|
Inverse operation of `add_event_handler()`.
|
2018-06-10 14:58:21 +03:00
|
|
|
|
|
|
|
If no event is given, all events for this callback are removed.
|
|
|
|
Returns how many callbacks were removed.
|
2019-05-20 12:38:26 +03:00
|
|
|
|
|
|
|
Example
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
@client.on(events.Raw)
|
|
|
|
@client.on(events.NewMessage)
|
|
|
|
async def handler(event):
|
|
|
|
...
|
|
|
|
|
|
|
|
# Removes only the "Raw" handling
|
|
|
|
# "handler" will still receive "events.NewMessage"
|
|
|
|
client.remove_event_handler(handler, events.Raw)
|
|
|
|
|
|
|
|
# "handler" will stop receiving anything
|
|
|
|
client.remove_event_handler(handler)
|
2018-06-10 14:58:21 +03:00
|
|
|
"""
|
|
|
|
found = 0
|
|
|
|
if event and not isinstance(event, type):
|
|
|
|
event = type(event)
|
|
|
|
|
|
|
|
i = len(self._event_builders)
|
|
|
|
while i:
|
|
|
|
i -= 1
|
|
|
|
ev, cb = self._event_builders[i]
|
|
|
|
if cb == callback and (not event or isinstance(ev, event)):
|
|
|
|
del self._event_builders[i]
|
|
|
|
found += 1
|
|
|
|
|
|
|
|
return found
|
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
def list_event_handlers(self: 'TelegramClient')\
|
2019-05-08 18:16:09 +03:00
|
|
|
-> 'typing.Sequence[typing.Tuple[callable, EventBuilder]]':
|
2018-06-10 14:58:21 +03:00
|
|
|
"""
|
2019-05-06 12:38:26 +03:00
|
|
|
Lists all registered event handlers.
|
|
|
|
|
2019-05-20 12:38:26 +03:00
|
|
|
Returns
|
|
|
|
A list of pairs consisting of ``(callback, event)``.
|
|
|
|
|
|
|
|
Example
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
@client.on(events.NewMessage(pattern='hello'))
|
|
|
|
async def on_greeting(event):
|
|
|
|
'''Greets someone'''
|
|
|
|
await event.reply('Hi')
|
|
|
|
|
|
|
|
for callback, event in client.list_event_handlers():
|
|
|
|
print(id(callback), type(event))
|
2018-06-10 14:58:21 +03:00
|
|
|
"""
|
|
|
|
return [(callback, event) for event, callback in self._event_builders]
|
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
async def catch_up(self: 'TelegramClient'):
|
2018-08-14 19:48:56 +03:00
|
|
|
"""
|
|
|
|
"Catches up" on the missed updates while the client was offline.
|
|
|
|
You should call this method after registering the event handlers
|
|
|
|
so that the updates it loads can by processed by your script.
|
|
|
|
|
|
|
|
This can also be used to forcibly fetch new updates if there are any.
|
2019-05-20 12:38:26 +03:00
|
|
|
|
|
|
|
Example
|
|
|
|
.. code-block:: python
|
|
|
|
|
2019-08-14 00:33:39 +03:00
|
|
|
await client.catch_up()
|
2018-08-14 19:48:56 +03:00
|
|
|
"""
|
2019-04-21 14:56:14 +03:00
|
|
|
pts, date = self._state_cache[None]
|
2019-05-08 19:15:57 +03:00
|
|
|
if not pts:
|
|
|
|
return
|
|
|
|
|
2018-06-10 14:58:21 +03:00
|
|
|
self.session.catching_up = True
|
|
|
|
try:
|
|
|
|
while True:
|
|
|
|
d = await self(functions.updates.GetDifferenceRequest(
|
2019-04-10 20:09:15 +03:00
|
|
|
pts, date, 0
|
2018-08-13 13:32:12 +03:00
|
|
|
))
|
|
|
|
if isinstance(d, (types.updates.DifferenceSlice,
|
|
|
|
types.updates.Difference)):
|
2018-06-10 14:58:21 +03:00
|
|
|
if isinstance(d, types.updates.Difference):
|
|
|
|
state = d.state
|
|
|
|
else:
|
2018-08-13 13:32:12 +03:00
|
|
|
state = d.intermediate_state
|
2018-06-10 14:58:21 +03:00
|
|
|
|
2019-04-10 20:09:15 +03:00
|
|
|
pts, date = state.pts, state.date
|
|
|
|
self._handle_update(types.Updates(
|
2018-06-10 14:58:21 +03:00
|
|
|
users=d.users,
|
|
|
|
chats=d.chats,
|
|
|
|
date=state.date,
|
|
|
|
seq=state.seq,
|
|
|
|
updates=d.other_updates + [
|
|
|
|
types.UpdateNewMessage(m, 0, 0)
|
|
|
|
for m in d.new_messages
|
|
|
|
]
|
|
|
|
))
|
2019-03-28 14:32:02 +03:00
|
|
|
|
2019-04-10 20:09:15 +03:00
|
|
|
# TODO Implement upper limit (max_pts)
|
2019-03-28 14:32:02 +03:00
|
|
|
# We don't want to fetch updates we already know about.
|
|
|
|
#
|
|
|
|
# We may still get duplicates because the Difference
|
|
|
|
# contains a lot of updates and presumably only has
|
|
|
|
# the state for the last one, but at least we don't
|
|
|
|
# unnecessarily fetch too many.
|
|
|
|
#
|
|
|
|
# updates.getDifference's pts_total_limit seems to mean
|
|
|
|
# "how many pts is the request allowed to return", and
|
|
|
|
# if there is more than that, it returns "too long" (so
|
|
|
|
# there would be duplicate updates since we know about
|
|
|
|
# some). This can be used to detect collisions (i.e.
|
|
|
|
# it would return an update we have already seen).
|
2018-08-13 13:32:12 +03:00
|
|
|
else:
|
|
|
|
if isinstance(d, types.updates.DifferenceEmpty):
|
2019-04-10 20:09:15 +03:00
|
|
|
date = d.date
|
2018-08-13 13:32:12 +03:00
|
|
|
elif isinstance(d, types.updates.DifferenceTooLong):
|
2019-04-10 20:09:15 +03:00
|
|
|
pts = d.pts
|
2018-06-10 14:58:21 +03:00
|
|
|
break
|
2019-04-01 09:46:07 +03:00
|
|
|
except (ConnectionError, asyncio.CancelledError):
|
|
|
|
pass
|
2018-06-10 14:58:21 +03:00
|
|
|
finally:
|
2019-04-10 20:09:15 +03:00
|
|
|
# TODO Save new pts to session
|
2019-04-21 14:56:14 +03:00
|
|
|
self._state_cache._pts_date = (pts, date)
|
2018-06-10 14:58:21 +03:00
|
|
|
self.session.catching_up = False
|
|
|
|
|
|
|
|
# endregion
|
|
|
|
|
|
|
|
# region Private methods
|
|
|
|
|
2019-04-10 20:09:15 +03:00
|
|
|
# It is important to not make _handle_update async because we rely on
|
|
|
|
# the order that the updates arrive in to update the pts and date to
|
|
|
|
# be always-increasing. There is also no need to make this async.
|
2019-05-03 22:37:27 +03:00
|
|
|
def _handle_update(self: 'TelegramClient', update):
|
2018-10-12 23:00:02 +03:00
|
|
|
self.session.process_entities(update)
|
2019-03-26 13:27:21 +03:00
|
|
|
self._entity_cache.add(update)
|
|
|
|
|
2018-06-14 23:51:57 +03:00
|
|
|
if isinstance(update, (types.Updates, types.UpdatesCombined)):
|
|
|
|
entities = {utils.get_peer_id(x): x for x in
|
|
|
|
itertools.chain(update.users, update.chats)}
|
|
|
|
for u in update.updates:
|
2019-06-30 17:32:18 +03:00
|
|
|
self._process_update(u, update.updates, entities=entities)
|
2018-07-12 10:45:29 +03:00
|
|
|
elif isinstance(update, types.UpdateShort):
|
2019-06-30 17:32:18 +03:00
|
|
|
self._process_update(update.update, None)
|
2019-04-10 20:09:15 +03:00
|
|
|
else:
|
2019-06-30 17:32:18 +03:00
|
|
|
self._process_update(update, None)
|
2019-04-10 20:09:15 +03:00
|
|
|
|
2019-04-21 14:56:14 +03:00
|
|
|
self._state_cache.update(update)
|
2019-04-10 20:09:15 +03:00
|
|
|
|
2019-06-30 17:32:18 +03:00
|
|
|
def _process_update(self: 'TelegramClient', update, others, entities=None):
|
2019-04-10 20:09:15 +03:00
|
|
|
update._entities = entities or {}
|
2019-04-23 21:15:27 +03:00
|
|
|
|
|
|
|
# This part is somewhat hot so we don't bother patching
|
|
|
|
# update with channel ID/its state. Instead we just pass
|
|
|
|
# arguments which is faster.
|
|
|
|
channel_id = self._state_cache.get_channel_id(update)
|
2019-06-30 17:32:18 +03:00
|
|
|
args = (update, others, channel_id, self._state_cache[channel_id])
|
2019-05-04 22:02:07 +03:00
|
|
|
if self._dispatching_updates_queue is None:
|
2021-01-19 00:43:39 +03:00
|
|
|
task = self.loop.create_task(self._dispatch_update(*args))
|
2019-05-04 22:02:07 +03:00
|
|
|
self._updates_queue.add(task)
|
|
|
|
task.add_done_callback(lambda _: self._updates_queue.discard(task))
|
2018-06-20 20:48:00 +03:00
|
|
|
else:
|
2019-04-23 21:15:27 +03:00
|
|
|
self._updates_queue.put_nowait(args)
|
2019-04-10 20:09:15 +03:00
|
|
|
if not self._dispatching_updates_queue.is_set():
|
|
|
|
self._dispatching_updates_queue.set()
|
2021-01-19 00:43:39 +03:00
|
|
|
self.loop.create_task(self._dispatch_queue_updates())
|
2019-04-10 20:09:15 +03:00
|
|
|
|
2019-04-21 14:56:14 +03:00
|
|
|
self._state_cache.update(update)
|
2018-06-13 17:20:15 +03:00
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
async def _update_loop(self: 'TelegramClient'):
|
2018-06-18 14:22:25 +03:00
|
|
|
# Pings' ID don't really need to be secure, just "random"
|
|
|
|
rnd = lambda: random.randrange(-2**63, 2**63)
|
|
|
|
while self.is_connected():
|
|
|
|
try:
|
2018-06-18 15:27:40 +03:00
|
|
|
await asyncio.wait_for(
|
2020-07-25 19:39:35 +03:00
|
|
|
self.disconnected, timeout=60
|
2018-06-18 15:27:40 +03:00
|
|
|
)
|
2018-06-18 14:22:25 +03:00
|
|
|
continue # We actually just want to act upon timeout
|
|
|
|
except asyncio.TimeoutError:
|
|
|
|
pass
|
2018-07-09 21:54:43 +03:00
|
|
|
except asyncio.CancelledError:
|
|
|
|
return
|
2019-04-01 09:46:07 +03:00
|
|
|
except Exception:
|
2018-06-18 14:22:25 +03:00
|
|
|
continue # Any disconnected exception should be ignored
|
|
|
|
|
2020-04-05 13:34:33 +03:00
|
|
|
# Check if we have any exported senders to clean-up periodically
|
|
|
|
await self._clean_exported_senders()
|
|
|
|
|
2019-12-02 20:32:31 +03:00
|
|
|
# Don't bother sending pings until the low-level connection is
|
|
|
|
# ready, otherwise a lot of pings will be batched to be sent upon
|
|
|
|
# reconnect, when we really don't care about that.
|
|
|
|
if not self._sender._transport_connected():
|
|
|
|
continue
|
|
|
|
|
2018-06-18 14:22:25 +03:00
|
|
|
# We also don't really care about their result.
|
|
|
|
# Just send them periodically.
|
2019-04-01 09:46:07 +03:00
|
|
|
try:
|
2020-12-11 19:18:25 +03:00
|
|
|
self._sender._keepalive_ping(rnd())
|
2019-04-01 09:46:07 +03:00
|
|
|
except (ConnectionError, asyncio.CancelledError):
|
|
|
|
return
|
2018-06-18 14:22:25 +03:00
|
|
|
|
2018-06-24 13:21:58 +03:00
|
|
|
# Entities and cached files are not saved when they are
|
|
|
|
# inserted because this is a rather expensive operation
|
|
|
|
# (default's sqlite3 takes ~0.1s to commit changes). Do
|
|
|
|
# it every minute instead. No-op if there's nothing new.
|
2018-10-12 23:00:02 +03:00
|
|
|
self.session.save()
|
2018-06-24 13:21:58 +03:00
|
|
|
|
2018-06-18 14:22:25 +03:00
|
|
|
# We need to send some content-related request at least hourly
|
|
|
|
# for Telegram to keep delivering updates, otherwise they will
|
|
|
|
# just stop even if we're connected. Do so every 30 minutes.
|
|
|
|
#
|
|
|
|
# TODO Call getDifference instead since it's more relevant
|
|
|
|
if time.time() - self._last_request > 30 * 60:
|
|
|
|
if not await self.is_user_authorized():
|
|
|
|
# What can be the user doing for so
|
|
|
|
# long without being logged in...?
|
|
|
|
continue
|
|
|
|
|
2019-04-01 09:46:07 +03:00
|
|
|
try:
|
|
|
|
await self(functions.updates.GetStateRequest())
|
|
|
|
except (ConnectionError, asyncio.CancelledError):
|
|
|
|
return
|
2018-06-18 14:22:25 +03:00
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
async def _dispatch_queue_updates(self: 'TelegramClient'):
|
2018-06-29 11:45:04 +03:00
|
|
|
while not self._updates_queue.empty():
|
2019-04-23 21:15:27 +03:00
|
|
|
await self._dispatch_update(*self._updates_queue.get_nowait())
|
2018-06-29 11:45:04 +03:00
|
|
|
|
|
|
|
self._dispatching_updates_queue.clear()
|
|
|
|
|
2019-06-30 17:32:18 +03:00
|
|
|
async def _dispatch_update(self: 'TelegramClient', update, others, channel_id, pts_date):
|
2019-05-01 15:02:27 +03:00
|
|
|
if not self._entity_cache.ensure_cached(update):
|
2019-05-04 20:29:47 +03:00
|
|
|
# We could add a lock to not fetch the same pts twice if we are
|
|
|
|
# already fetching it. However this does not happen in practice,
|
|
|
|
# which makes sense, because different updates have different pts.
|
2019-05-04 20:48:36 +03:00
|
|
|
if self._state_cache.update(update, check_only=True):
|
|
|
|
# If the update doesn't have pts, fetching won't do anything.
|
|
|
|
# For example, UpdateUserStatus or UpdateChatUserTyping.
|
2019-07-23 22:12:08 +03:00
|
|
|
try:
|
|
|
|
await self._get_difference(update, channel_id, pts_date)
|
|
|
|
except OSError:
|
|
|
|
pass # We were disconnected, that's okay
|
2020-06-06 15:04:14 +03:00
|
|
|
except errors.RPCError:
|
|
|
|
# There's a high chance the request fails because we lack
|
|
|
|
# the channel. Because these "happen sporadically" (#1428)
|
|
|
|
# we should be okay (no flood waits) even if more occur.
|
|
|
|
pass
|
2020-11-04 11:58:20 +03:00
|
|
|
except ValueError:
|
|
|
|
# There is a chance that GetFullChannelRequest and GetDifferenceRequest
|
2020-12-11 19:18:25 +03:00
|
|
|
# inside the _get_difference() function will end up with
|
2020-11-04 11:58:20 +03:00
|
|
|
# ValueError("Request was unsuccessful N time(s)") for whatever reasons.
|
|
|
|
pass
|
2019-05-01 15:02:27 +03:00
|
|
|
|
2019-08-07 01:46:19 +03:00
|
|
|
if not self._self_input_peer:
|
|
|
|
# Some updates require our own ID, so we must make sure
|
|
|
|
# that the event builder has offline access to it. Calling
|
|
|
|
# `get_me()` will cache it under `self._self_input_peer`.
|
2020-06-05 22:17:09 +03:00
|
|
|
#
|
|
|
|
# It will return `None` if we haven't logged in yet which is
|
|
|
|
# fine, we will just retry next time anyway.
|
2020-10-11 10:33:05 +03:00
|
|
|
try:
|
|
|
|
await self.get_me(input_peer=True)
|
|
|
|
except OSError:
|
|
|
|
pass # might not have connection
|
2019-08-07 01:46:19 +03:00
|
|
|
|
2019-06-30 17:32:18 +03:00
|
|
|
built = EventBuilderDict(self, update, others)
|
2019-06-03 20:41:22 +03:00
|
|
|
for conv_set in self._conversations.values():
|
|
|
|
for conv in conv_set:
|
2019-04-23 21:15:27 +03:00
|
|
|
ev = built[events.NewMessage]
|
|
|
|
if ev:
|
|
|
|
conv._on_new_message(ev)
|
|
|
|
|
|
|
|
ev = built[events.MessageEdited]
|
|
|
|
if ev:
|
|
|
|
conv._on_edit(ev)
|
|
|
|
|
|
|
|
ev = built[events.MessageRead]
|
|
|
|
if ev:
|
|
|
|
conv._on_read(ev)
|
|
|
|
|
2018-08-05 02:55:41 +03:00
|
|
|
if conv._custom:
|
2019-05-01 18:07:12 +03:00
|
|
|
await conv._check_custom(built)
|
2018-08-05 02:55:41 +03:00
|
|
|
|
2018-06-10 14:58:21 +03:00
|
|
|
for builder, callback in self._event_builders:
|
2019-04-23 21:15:27 +03:00
|
|
|
event = built[type(builder)]
|
2018-08-21 12:08:08 +03:00
|
|
|
if not event:
|
|
|
|
continue
|
|
|
|
|
2018-08-27 02:19:37 +03:00
|
|
|
if not builder.resolved:
|
2018-08-27 11:58:06 +03:00
|
|
|
await builder.resolve(self)
|
2018-08-21 12:08:08 +03:00
|
|
|
|
2020-05-16 10:58:37 +03:00
|
|
|
filter = builder.filter(event)
|
|
|
|
if inspect.isawaitable(filter):
|
|
|
|
filter = await filter
|
|
|
|
if not filter:
|
2018-07-11 12:22:43 +03:00
|
|
|
continue
|
|
|
|
|
|
|
|
try:
|
|
|
|
await callback(event)
|
2018-10-12 23:17:07 +03:00
|
|
|
except errors.AlreadyInConversationError:
|
|
|
|
name = getattr(callback, '__name__', repr(callback))
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log[__name__].debug(
|
|
|
|
'Event handler "%s" already has an open conversation, '
|
|
|
|
'ignoring new one', name)
|
2018-07-11 12:22:43 +03:00
|
|
|
except events.StopPropagation:
|
2018-07-21 18:52:42 +03:00
|
|
|
name = getattr(callback, '__name__', repr(callback))
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log[__name__].debug(
|
2020-06-06 22:01:02 +03:00
|
|
|
'Event handler "%s" stopped chain of propagation '
|
|
|
|
'for event %s.', name, type(event).__name__
|
|
|
|
)
|
|
|
|
break
|
|
|
|
except Exception as e:
|
|
|
|
if not isinstance(e, asyncio.CancelledError) or self.is_connected():
|
|
|
|
name = getattr(callback, '__name__', repr(callback))
|
2021-02-02 22:47:02 +03:00
|
|
|
self._log[__name__].exception('Unhandled exception on %s', name)
|
2020-06-06 22:01:02 +03:00
|
|
|
|
|
|
|
async def _dispatch_event(self: 'TelegramClient', event):
|
|
|
|
"""
|
|
|
|
Dispatches a single, out-of-order event. Used by `AlbumHack`.
|
|
|
|
"""
|
|
|
|
# We're duplicating a most logic from `_dispatch_update`, but all in
|
|
|
|
# the name of speed; we don't want to make it worse for all updates
|
|
|
|
# just because albums may need it.
|
|
|
|
for builder, callback in self._event_builders:
|
2020-09-10 17:25:44 +03:00
|
|
|
if isinstance(builder, events.Raw):
|
|
|
|
continue
|
2020-06-06 22:01:02 +03:00
|
|
|
if not isinstance(event, builder.Event):
|
|
|
|
continue
|
|
|
|
|
|
|
|
if not builder.resolved:
|
|
|
|
await builder.resolve(self)
|
|
|
|
|
|
|
|
filter = builder.filter(event)
|
|
|
|
if inspect.isawaitable(filter):
|
|
|
|
filter = await filter
|
|
|
|
if not filter:
|
|
|
|
continue
|
|
|
|
|
|
|
|
try:
|
|
|
|
await callback(event)
|
|
|
|
except errors.AlreadyInConversationError:
|
|
|
|
name = getattr(callback, '__name__', repr(callback))
|
|
|
|
self._log[__name__].debug(
|
|
|
|
'Event handler "%s" already has an open conversation, '
|
|
|
|
'ignoring new one', name)
|
|
|
|
except events.StopPropagation:
|
|
|
|
name = getattr(callback, '__name__', repr(callback))
|
|
|
|
self._log[__name__].debug(
|
2018-07-21 18:52:42 +03:00
|
|
|
'Event handler "%s" stopped chain of propagation '
|
|
|
|
'for event %s.', name, type(event).__name__
|
2018-07-11 12:22:43 +03:00
|
|
|
)
|
|
|
|
break
|
2019-05-04 22:02:07 +03:00
|
|
|
except Exception as e:
|
|
|
|
if not isinstance(e, asyncio.CancelledError) or self.is_connected():
|
|
|
|
name = getattr(callback, '__name__', repr(callback))
|
2021-02-02 22:47:02 +03:00
|
|
|
self._log[__name__].exception('Unhandled exception on %s', name)
|
2018-06-10 14:58:21 +03:00
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
async def _get_difference(self: 'TelegramClient', update, channel_id, pts_date):
|
2019-05-01 15:02:27 +03:00
|
|
|
"""
|
|
|
|
Get the difference for this `channel_id` if any, then load entities.
|
|
|
|
|
|
|
|
Calls :tl:`updates.getDifference`, which fills the entities cache
|
|
|
|
(always done by `__call__`) and lets us know about the full entities.
|
|
|
|
"""
|
|
|
|
# Fetch since the last known pts/date before this update arrived,
|
|
|
|
# in order to fetch this update at full, including its entities.
|
|
|
|
self._log[__name__].debug('Getting difference for entities '
|
|
|
|
'for %r', update.__class__)
|
|
|
|
if channel_id:
|
2020-10-05 14:58:04 +03:00
|
|
|
# There are reports where we somehow call get channel difference
|
|
|
|
# with `InputPeerEmpty`. Check our assumptions to better debug
|
|
|
|
# this when it happens.
|
|
|
|
assert isinstance(channel_id, int), 'channel_id was {}, not int in {}'.format(type(channel_id), update)
|
2019-05-01 15:02:27 +03:00
|
|
|
try:
|
2020-10-05 14:58:04 +03:00
|
|
|
# Wrap the ID inside a peer to ensure we get a channel back.
|
|
|
|
where = await self.get_input_entity(types.PeerChannel(channel_id))
|
2019-05-01 15:02:27 +03:00
|
|
|
except ValueError:
|
2019-05-04 20:29:47 +03:00
|
|
|
# There's a high chance that this fails, since
|
|
|
|
# we are getting the difference to fetch entities.
|
|
|
|
return
|
|
|
|
|
|
|
|
if not pts_date:
|
|
|
|
# First-time, can't get difference. Get pts instead.
|
2019-10-27 20:48:41 +03:00
|
|
|
result = await self(functions.channels.GetFullChannelRequest(
|
|
|
|
utils.get_input_channel(where)
|
|
|
|
))
|
|
|
|
self._state_cache[channel_id] = result.full_chat.pts
|
2019-05-01 15:02:27 +03:00
|
|
|
return
|
|
|
|
|
|
|
|
result = await self(functions.updates.GetChannelDifferenceRequest(
|
|
|
|
channel=where,
|
|
|
|
filter=types.ChannelMessagesFilterEmpty(),
|
|
|
|
pts=pts_date, # just pts
|
|
|
|
limit=100,
|
|
|
|
force=True
|
|
|
|
))
|
|
|
|
else:
|
2019-05-04 20:29:47 +03:00
|
|
|
if not pts_date[0]:
|
|
|
|
# First-time, can't get difference. Get pts instead.
|
|
|
|
result = await self(functions.updates.GetStateRequest())
|
|
|
|
self._state_cache[None] = result.pts, result.date
|
|
|
|
return
|
|
|
|
|
2019-05-01 15:02:27 +03:00
|
|
|
result = await self(functions.updates.GetDifferenceRequest(
|
|
|
|
pts=pts_date[0],
|
|
|
|
date=pts_date[1],
|
|
|
|
qts=0
|
|
|
|
))
|
|
|
|
|
|
|
|
if isinstance(result, (types.updates.Difference,
|
|
|
|
types.updates.DifferenceSlice,
|
|
|
|
types.updates.ChannelDifference,
|
|
|
|
types.updates.ChannelDifferenceTooLong)):
|
|
|
|
update._entities.update({
|
|
|
|
utils.get_peer_id(x): x for x in
|
|
|
|
itertools.chain(result.users, result.chats)
|
|
|
|
})
|
|
|
|
|
2019-05-03 22:37:27 +03:00
|
|
|
async def _handle_auto_reconnect(self: 'TelegramClient'):
|
2019-04-22 17:56:32 +03:00
|
|
|
# TODO Catch-up
|
2020-01-04 19:22:53 +03:00
|
|
|
# For now we make a high-level request to let Telegram
|
|
|
|
# know we are still interested in receiving more updates.
|
|
|
|
try:
|
|
|
|
await self.get_me()
|
|
|
|
except Exception as e:
|
|
|
|
self._log[__name__].warning('Error executing high-level request '
|
|
|
|
'after reconnect: %s: %s', type(e), e)
|
|
|
|
|
2019-04-22 17:56:32 +03:00
|
|
|
return
|
2018-06-27 20:40:32 +03:00
|
|
|
try:
|
2019-01-11 17:52:30 +03:00
|
|
|
self._log[__name__].info(
|
|
|
|
'Asking for the current state after reconnect...')
|
2019-03-28 14:32:02 +03:00
|
|
|
|
|
|
|
# TODO consider:
|
|
|
|
# If there aren't many updates while the client is disconnected
|
|
|
|
# (I tried with up to 20), Telegram seems to send them without
|
|
|
|
# asking for them (via updates.getDifference).
|
|
|
|
#
|
|
|
|
# On disconnection, the library should probably set a "need
|
|
|
|
# difference" or "catching up" flag so that any new updates are
|
|
|
|
# ignored, and then the library should call updates.getDifference
|
|
|
|
# itself to fetch them.
|
|
|
|
#
|
|
|
|
# In any case (either there are too many updates and Telegram
|
|
|
|
# didn't send them, or there isn't a lot and Telegram sent them
|
|
|
|
# but we dropped them), we fetch the new difference to get all
|
|
|
|
# missed updates. I feel like this would be the best solution.
|
|
|
|
|
|
|
|
# If a disconnection occurs, the old known state will be
|
|
|
|
# the latest one we were aware of, so we can catch up since
|
|
|
|
# the most recent state we were aware of.
|
|
|
|
await self.catch_up()
|
|
|
|
|
|
|
|
self._log[__name__].info('Successfully fetched missed updates')
|
2018-06-27 20:40:32 +03:00
|
|
|
except errors.RPCError as e:
|
2019-03-28 14:32:02 +03:00
|
|
|
self._log[__name__].warning('Failed to get missed updates after '
|
|
|
|
'reconnect: %r', e)
|
|
|
|
except Exception:
|
2021-02-02 22:47:02 +03:00
|
|
|
self._log[__name__].exception(
|
|
|
|
'Unhandled exception while getting update difference after reconnect')
|
2018-06-27 20:40:32 +03:00
|
|
|
|
2018-06-10 14:58:21 +03:00
|
|
|
# endregion
|
2018-08-21 11:27:12 +03:00
|
|
|
|
|
|
|
|
|
|
|
class EventBuilderDict:
|
|
|
|
"""
|
|
|
|
Helper "dictionary" to return events from types and cache them.
|
|
|
|
"""
|
2019-06-30 17:32:18 +03:00
|
|
|
def __init__(self, client: 'TelegramClient', update, others):
|
2018-08-21 11:27:12 +03:00
|
|
|
self.client = client
|
|
|
|
self.update = update
|
2019-06-30 17:32:18 +03:00
|
|
|
self.others = others
|
2018-08-21 11:27:12 +03:00
|
|
|
|
|
|
|
def __getitem__(self, builder):
|
|
|
|
try:
|
|
|
|
return self.__dict__[builder]
|
|
|
|
except KeyError:
|
2019-08-07 01:46:19 +03:00
|
|
|
event = self.__dict__[builder] = builder.build(
|
2020-10-02 11:23:04 +03:00
|
|
|
self.update, self.others, self.client._self_id)
|
2019-08-07 01:46:19 +03:00
|
|
|
|
2019-03-27 18:21:17 +03:00
|
|
|
if isinstance(event, EventCommon):
|
2018-08-21 11:27:12 +03:00
|
|
|
event.original_update = self.update
|
2019-06-30 14:23:18 +03:00
|
|
|
event._entities = self.update._entities
|
2019-03-27 18:21:17 +03:00
|
|
|
event._set_client(self.client)
|
|
|
|
elif event:
|
|
|
|
event._client = self.client
|
2018-08-21 11:27:12 +03:00
|
|
|
|
|
|
|
return event
|