diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000..3f06bb6
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,23 @@
+[run]
+branch = True
+source = channels
+omit = tests/*
+
+[report]
+show_missing = True
+skip_covered = True
+omit = tests/*
+
+[html]
+directory = coverage_html
+
+[paths]
+django_19 =
+ .tox/py27-django-18/lib/python2.7
+ .tox/py34-django-18/lib/python3.4
+ .tox/py35-django-18/lib/python3.5
+
+django_18 =
+ .tox/py27-django-19/lib/python2.7
+ .tox/py34-django-19/lib/python3.4
+ .tox/py35-django-19/lib/python3.5
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
new file mode 100644
index 0000000..9820316
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE.md
@@ -0,0 +1,15 @@
+Please submit **questions** about usage to django-users@googlegroups.com (https://groups.google.com/forum/#!forum/django-users) and about development to django-developers@googlegroups.com (https://groups.google.com/forum/#!forum/django-developers).
+
+If you're submitting a feature request, please try to include:
+
+- Detailed description of the overall behaviour
+- Reasoning why it should be in Channels rather than a third-party app
+- Examples of usage, if possible (it's easier to discuss concrete code examples)
+
+If you're submitting a bug report, please include:
+
+- Your OS and runtime environment, and browser if applicable
+- The versions of Channels, Daphne, Django, Twisted, and your ASGI backend (asgi_ipc or asgi_redis normally)
+- What you expected to happen vs. what actually happened
+- How you're running Channels (runserver? daphne/runworker? Nginx/Apache in front?)
+- Console logs and full tracebacks of any errors
diff --git a/.gitignore b/.gitignore
index e3535d3..8736ba0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,9 +1,16 @@
*.egg-info
-*.pyc
-__pycache__
dist/
build/
-/.tox
-.hypothesis
-.cache
-.eggs
+docs/_build
+__pycache__/
+*.sqlite3
+.tox/
+*.swp
+*.pyc
+.coverage.*
+TODO
+node_modules
+
+# IDE and Tooling files
+.idea/*
+*~
diff --git a/.travis.yml b/.travis.yml
index 9672140..2c4da6b 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,4 +1,5 @@
sudo: false
+dist: trusty
language: python
@@ -8,6 +9,24 @@ python:
- "3.5"
- "3.6"
-install: pip install tox tox-travis
+env:
+ - DJANGO="Django>=1.8,<1.9"
+ - DJANGO="Django>=1.9,<1.10"
+ - DJANGO="Django>=1.10,<1.11"
+ - DJANGO="Django>=1.11,<2.0"
-script: tox
+cache:
+ directories:
+ - $HOME/.cache/pip/wheels
+
+install:
+ - nvm install 7
+ - pip install -U pip wheel setuptools
+ - pip install $DJANGO -e .[tests]
+ - pip freeze
+
+script:
+ - python runtests.py
+# - cd js_client && npm install --progress=false && npm test && cd ..
+ - flake8
+ - isort --check-only --recursive channels
diff --git a/CHANGELOG.txt b/CHANGELOG.txt
index 6238f69..0f50233 100644
--- a/CHANGELOG.txt
+++ b/CHANGELOG.txt
@@ -1,272 +1,421 @@
-1.2.0 (2017-04-01)
+Full release notes, with more details and upgrade information, are available at:
+https://channels.readthedocs.io/en/latest/releases
+
+
+1.1.8 (2017-09-15)
------------------
-* The new process-specific channel support is now implemented, resulting in
- significantly less traffic to your channel backend.
+* Reverted recent JS fixes for subprotocols on some phones as they do not work
+ in Chrome.
-* Native twisted blocking support for channel layers that support it is now
- used. While it is a lot more efficient, it is also sometimes slightly more
- latent; you can disable it using --force-sync.
-* Native SSL termination is now correctly reflected in the ASGI-HTTP `scheme`
- key.
+1.1.7 (2017-09-14)
+------------------
-* accept: False is now a valid way to deny a connection, as well as close: True.
+* Fixed compatability with Django 1.10 and below
-* HTTP version is now correctly sent as one of "1.0", "1.1" or "2".
+* JS library: Fixed error with 1006 error code
-* More command line options for websocket timeouts
+
+1.1.6 (2017-06-28)
+------------------
+
+* The ``runserver`` ``server_cls`` override no longer fails with more modern
+ Django versions that pass an ``ipv6`` parameter.
+
+
+1.1.5 (2017-06-16)
+------------------
+
+* The Daphne dependency requirement was bumped to 1.3.0.
+
+
+1.1.4 (2017-06-15)
+------------------
+
+* Pending messages correctly handle retries in backlog situations
+
+* Workers in threading mode now respond to ctrl-C and gracefully exit.
+
+* ``request.meta['QUERY_STRING']`` is now correctly encoded at all times.
+
+* Test client improvements
+
+* ``ChannelServerLiveTestCase`` added, allows an equivalent of the Django
+ ``LiveTestCase``.
+
+* Decorator added to check ``Origin`` headers (``allowed_hosts_only``)
+
+* New ``TEST_CONFIG`` setting in ``CHANNEL_LAYERS`` that allows varying of
+ the channel layer for tests (e.g. using a different Redis install)
+
+
+1.1.3 (2017-04-05)
+------------------
+
+* ``enforce_ordering`` now works correctly with the new-style process-specific
+ channels
+
+* ASGI channel layer versions are now explicitly checked for version compatability
+
+
+1.1.2 (2017-04-01)
+------------------
+
+* Session name hash changed to SHA-1 to satisfy FIPS-140-2. Due to this,
+ please force all WebSockets to reconnect after the upgrade.
+
+* `scheme` key in ASGI-HTTP messages now translates into `request.is_secure()`
+ correctly.
+
+* WebsocketBridge now exposes the underlying WebSocket as `.socket`
+
+
+1.1.1 (2017-03-19)
+------------------
+
+* Fixed JS packaging issue
1.1.0 (2017-03-18)
------------------
-* HTTP/2 termination is now supported natively. The Twisted dependency has been
- increased to at least 17.1 as a result; for more information about setting up
- HTTP/2, see the README.
+* Channels now includes a JavaScript wrapper that wraps reconnection and
+ multiplexing for you on the client side.
-* X-Forwarded-For decoding support understands IPv6 addresses, and picks the
- most remote (leftmost) entry if there are multiple relay hosts.
+* Test classes have been moved from ``channels.tests`` to ``channels.test``.
-* Fixed an error where `disconnect` messages would still try and get sent even
- if the client never finished a request.
+* Bindings now support non-integer fields for primary keys on models.
+
+* The ``enforce_ordering`` decorator no longer suffers a race condition where
+ it would drop messages under high load.
+
+* ``runserver`` no longer errors if the ``staticfiles`` app is not enabled in Django.
-1.0.3 (2017-02-12)
+1.0.3 (2017-02-01)
------------------
-* IPv6 addresses are correctly accepted as bind targets on the command line
+* Database connections are no longer force-closed after each test is run.
-* Twisted 17.1 compatability fixes for WebSocket receiving/keepalive and
- proxy header detection.
+* Channel sessions are not re-saved if they're empty even if they're marked as
+ modified, allowing logout to work correctly.
+
+* WebsocketDemultiplexer now correctly does sessions for the second/third/etc.
+ connect and disconnect handlers.
+
+* Request reading timeouts now correctly return 408 rather than erroring out.
+
+* The ``rundelay`` delay server now only polls the database once per second,
+ and this interval is configurable with the ``--sleep`` option.
-1.0.2 (2017-02-01)
+1.0.2 (2017-01-12)
------------------
-* The "null" WebSocket origin (including file:// and no value) is now accepted
- by Daphne and passed onto the application to accept/deny.
+* Websockets can now be closed from anywhere using the new ``WebsocketCloseException``.
+ There is also a generic ``ChannelSocketException`` so you can do custom behaviours.
-* Listening on file descriptors works properly again.
+* Calling ``Channel.send`` or ``Group.send`` from outside a consumer context
+ (i.e. in tests or management commands) will once again send the message immediately.
-* The DeprecationError caused by not passing endpoints into a Server class
- directly is now a warning instead.
+* The base implementation of databinding now correctly only calls ``group_names(instance)``,
+ as documented.
1.0.1 (2017-01-09)
------------------
-* Endpoint unicode strings now work correctly on Python 2 and Python 3
+* WebSocket generic views now accept connections by default in their connect
+ handler for better backwards compatibility.
1.0.0 (2017-01-08)
------------------
-* BREAKING CHANGE: Daphne now requires acceptance of WebSocket connections
- before it finishes the socket handshake and relays incoming packets.
- You must upgrade to at least Channels 1.0.0 as well; see
- http://channels.readthedocs.io/en/latest/releases/1.0.0.html for more.
+* BREAKING CHANGE: WebSockets must now be explicitly accepted or denied.
+ See https://channels.readthedocs.io/en/latest/releases/1.0.0.html for more.
-* http.disconnect now has a `path` key
+* BREAKING CHANGE: Demultiplexers have been overhauled to directly dispatch
+ messages rather than using channels to new consumers. Consult the docs on
+ generic consumers for more: https://channels.readthedocs.io/en/latest/generics.html
-* WebSockets can now be closed with a specific code
+* BREAKING CHANGE: Databinding now operates from implicit group membership,
+ where your code just has to say what groups would be used and Channels will
+ work out if it's a creation, modification or removal from a client's
+ perspective, including with permissions.
-* X-Forwarded-For header support; defaults to X-Forwarded-For, override with
- --proxy-headers on the commandline.
+* Delay protocol server ships with Channels providing a specification on how
+ to delay jobs until later and a reference implementation.
-* Twisted endpoint description string support with `-e` on the command line
- (allowing for SNI/ACME support, among other things)
+* Serializers can now specify fields as `__all__` to auto-include all fields.
-* Logging/error verbosity fixes and access log flushes properly
+* Various other small fixes.
-
-0.15.0 (2016-08-28)
+0.17.3 (2016-10-12)
-------------------
-* Connections now force-close themselves after pings fail for a certain
- timeframe, controllable via the new --ping-timeout option.
+* channel_session now also rehydrates the http session with an option
-* Badly-formatted websocket response messages now log to console in
- all situations
+* request.META['PATH_INFO'] is now present
-* Compatability with Twisted 16.3 and up
+* runserver shows Daphne log messages
+
+* runserver --nothreading only starts a single worker thread
+
+* Databinding changed to call group_names dynamically and imply changed/created from that;
+ other small changes to databinding, and more changes likely.
-0.14.3 (2016-07-21)
+0.17.2 (2016-08-04)
-------------------
-* File descriptors can now be passed on the commandline for process managers
- that pass sockets along like this.
+* New CHANNELS_WS_PROTOCOLS setting if you want Daphne to accept certain
+ subprotocols
-* websocket.disconnect messages now come with a "code" attribute matching the
- WebSocket spec.
+* WebsocketBindingWithMembers allows serialization of non-fields on instances
-* A memory leak in request logging has been fixed.
+* Class-based consumers have an .as_route() method that lets you skip using
+ route_class
+
+* Bindings now work if loaded after app ready state
-0.14.2 (2016-07-07)
+0.17.1 (2016-07-22)
-------------------
-* Marked as incompatible with twisted 16.3 and above until we work out why
- it stops incoming websocket messages working
+* Bindings now require that `fields` is defined on the class body so all fields
+ are not sent by default. To restore old behaviour, set it to ['__all__']
+
+* Bindings can now be declared after app.ready() has been called and still work.
+
+* Binding payloads now include the model name as `appname.modelname`.
+
+* A worker_ready signal now gets triggered when `runworker` starts consuming
+ messages. It does not fire from within `runserver`.
-0.14.1 (2016-07-06)
+0.17.0 (2016-07-19)
-------------------
-* Consumption of websocket.receive is also now required.
+* Data Binding framework is added, which allows easy tying of model changes
+ to WebSockets (and other protocols) and vice-versa.
+
+* Standardised WebSocket/JSON multiplexing introduced
+
+* WebSocket generic consumers now have a 'close' argument on send/group_send
-0.14.0 (2016-07-06)
+0.16.1 (2016-07-12)
-------------------
-* Consumption of websocket.connect is now required (channels 0.16 enforces
- this); getting backpressure on it now results in the socket being
- force closed.
+* WebsocketConsumer now has a http_user option for auto user sessions.
+
+* consumer_started and consumer_finished signals are now available under
+ channels.signals.
+
+* Database connections are closed whenever a consumer finishes.
-0.13.1 (2016-06-28)
+0.16.0 (2016-07-06)
-------------------
-* Bad WebSocket handshakes now return 400 and an error messages
- rather than 500 with no content.
+* websocket.connect and websocket.receive are now consumed by a no-op consumer
+ by default if you don't specify anything to consume it, to bring Channels in
+ line with the ASGI rules on WebSocket backpressure.
+
+* You no longer need to call super's setUp in ChannelTestCase.
-0.13.0 (2016-06-22)
+0.15.1 (2016-06-29)
-------------------
-* Query strings are now sent as bytestrings and the application is responsible
- for decoding. Ensure you're running Channels 0.15 or higher.
+* Class based consumers now have a self.kwargs
+
+* Fixed bug where empty streaming responses did not send headers or status code
-0.12.2 (2016-06-21)
+0.15.0 (2016-06-22)
-------------------
-* Plus signs in query string are now handled by Daphne, not Django-by-mistake.
- Ensure you're running Channels 0.14.3 or higher.
-
-* New --root-path and DAPHNE_ROOT_PATH options for setting root path.
+* Query strings are now decoded entirely by Django. Must be used with Daphne
+ 0.13 or higher.
-0.12.1 (2016-05-18)
+0.14.3 (2016-06-21)
-------------------
-* Fixed bug where a non-ASCII byte in URL paths would crash the HTTP parser
- without a response; now returns 400, and hardening in place to catch most
- other errors and return a 500.
+* + signs in query strings are no longer double-decoded
-* WebSocket header format now matches HTTP header format and the ASGI spec.
- No update needed to channels library, but user code may need updating.
+* Message now has .values(), .keys() and .items() to match dict
-0.12.0 (2016-05-07)
+0.14.2 (2016-06-16)
-------------------
-* Backpressure on http.request now causes incoming requests to drop with 503.
- Websockets will drop connection/disconnection messages/received frames if
- backpressure is encountered; options are coming soon to instead drop the
- connection if this happens.
+* Class based consumers now have built-in channel_session and
+ channel_session_user support
-0.11.4 (2016-05-04)
+0.14.1 (2016-06-09)
-------------------
-* Don't try to send TCP host info in message for unix sockets
+* Fix unicode issues with test client under Python 2.7
-0.11.3 (2016-04-27)
+0.14.0 (2016-05-25)
-------------------
-* Don't decode + as a space in URLs
+* Class-based consumer pattern and WebSocket consumer now come with Channels
+ (see docs for more details)
+
+* Better testing utilities including a higher-level Client abstraction with
+ optional HTTP/WebSocket HttpClient variant.
-0.11.2 (2016-04-27)
+0.13.1 (2016-05-13)
-------------------
-* Correctly encode all path params for WebSockets
+* enforce_ordering now queues future messages in a channel rather than
+ spinlocking worker processes to achieve delays.
+
+* ConsumeLater no longer duplicates messages when they're requeued below the
+ limit.
-0.11.1 (2016-04-26)
+0.13.0 (2016-05-07)
-------------------
-* Fix bugs with WebSocket path parsing under Python 2
+* Backpressure is now implemented, meaning responses will pause sending if
+ the client does not read them fast enough.
+
+* DatabaseChannelLayer has been removed; it was not sensible.
-0.11.0 (2016-04-26)
+0.12.0 (2016-04-26)
-------------------
-* HTTP paths and query strings are now pre-decoded before going to ASGI
+* HTTP paths and query strings are now expected to be sent over ASGI as
+ unescaped unicode. Daphne 0.11.0 is updated to send things in this format.
+
+* request.FILES reading bug fixed
-0.10.3 (2016-04-05)
+0.11.0 (2016-04-05)
-------------------
-* Error on badly formatted websocket reply messages
+* ChannelTestCase base testing class for easier testing of consumers
+
+* Routing rewrite to improve speed with nested includes and remove need for ^ operator
+
+* Timeouts reading very slow request bodies
-0.10.2 (2016-04-03)
+0.10.3 (2016-03-29)
-------------------
-* Access logging in NCSAish format now printed to stdout, configurable to
- another file using --access-log=filename
+* Better error messages for wrongly-constructed routing lists
+
+* Error when trying to use signed cookie backend with channel_session
+
+* ASGI group_expiry implemented on database channel backend
-0.10.1 (2016-03-29)
+0.10.2 (2016-03-23)
-------------------
-* WebSockets now close after they've been open for longer than the channel
- layer group expiry (86400 seconds by default for most layers).
+* Regular expressions for routing include() can now be Unicode under Python 3
-* Binding to UNIX sockets is now possible (use the -u argument)
+* Last-resort error handling for HTTP request exceptions inside Django's core
+ code. If DEBUG is on, shows plain text tracebacks; if it is off, shows
+ "Internal Server Error".
-* WebSockets now send keepalive pings if they've had no data for a certain
- amount of time (20 seconds by default, set with --ping-interval)
+
+0.10.1 (2016-03-22)
+-------------------
+
+* Regular expressions for HTTP paths can now be Unicode under Python 3
+
+* route() and include() now importable directly from `channels`
+
+* FileResponse send speed improved for all code (previously just for staticfiles)
0.10.0 (2016-03-21)
-------------------
-* Multiple cookies are now set correctly
+* New routing system
-* Follows new ASGI single-response-channel spec for !
+* Updated to match new ASGI single-reader-channel name spec
-* Follows new ASGI header encoding spec for HTTP
+* Updated to match new ASGI HTTP header encoding spec
-0.9.3 (2016-03-08)
+0.9.5 (2016-03-10)
------------------
-* WebSocket query strings are correctly encoded
+* `runworker` now has an --alias option to specify a different channel layer
+
+* `runserver` correctly falls back to WSGI mode if no channel layers configured
-0.9.2 (2016-03-02)
+0.9.4 (2016-03-08)
------------------
-* HTTP requests now time out after a configurable amount of time and return 503
- (default is 2 minutes)
+* Worker processes now exit gracefully (finish their current processing) when
+ sent SIGTERM or SIGINT.
+
+* `runserver` now has a shorter than standard HTTP timeout configured
+ of 60 seconds.
-0.9.1 (2016-03-01)
+0.9.3 (2016-02-28)
------------------
-* Main thread actually idles rather than sitting at 100%
+* Static file serving is significantly faster thanks to larger chunk size
-* WebSocket packets have an "order" attribute attached
+* `runworker` now refuses to start if an in memory layer is configured
-* WebSocket upgrade header detection is now case insensitive
+
+0.9.2 (2016-02-28)
+------------------
+
+* ASGI spec updated to include `order` field for WebSocket messages
+
+* `enforce_ordering` decorator introduced
+
+* DatabaseChannelLayer now uses transactions to stop duplicated messages
+
+
+0.9.1 (2016-02-21)
+------------------
+
+* Fix packaging issues with previous release
0.9 (2016-02-21)
----------------
-* Signal handlers can now be disabled if you want to run inside a thread
- (e.g. inside Django autoreloader)
+* Staticfiles support in runserver
-* Logging hooks that can be used to allow calling code to show requests
- and other events.
+* Runserver logs requests and WebSocket connections to console
-* Headers are now transmitted for websocket.connect
+* Runserver autoreloads correctly
-* http.disconnect messages are now sent
+* --noasgi option on runserver to use the old WSGI-based server
-* Request handling speed significantly improved
+* --noworker option on runserver to make it not launch worker threads
+
+* Streaming responses work correctly
+
+* Authentication decorators work again with new ASGI spec
+
+* channel_session_user_from_http decorator introduced
+
+* HTTP Long Poll support (raise ResponseLater)
+
+* Handle non-latin1 request body encoding
+
+* ASGI conformance tests for built-in database backend
+
+* Moved some imports around for more sensible layout
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
new file mode 100644
index 0000000..c5b62ac
--- /dev/null
+++ b/CONTRIBUTING.rst
@@ -0,0 +1,30 @@
+Contributing to Channels
+========================
+
+As an open source project, Channels welcomes contributions of many forms. By participating in this project, you
+agree to abide by the Django `code of conduct `_.
+
+Examples of contributions include:
+
+* Code patches
+* Documentation improvements
+* Bug reports and patch reviews
+
+For more information, please see our `contribution guide `_.
+
+Quick Setup
+-----------
+
+Fork, then clone the repo:
+
+ git clone git@github.com:your-username/channels.git
+
+Make sure the tests pass:
+
+ tox
+
+Make your change. Add tests for your change. Make the tests pass:
+
+ tox
+
+Push to your fork and `submit a pull request `_.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..100f6b7
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,2 @@
+recursive-exclude tests *
+include channels/static/channels/js/*
diff --git a/Makefile b/Makefile
index 1a1f55e..612ab19 100644
--- a/Makefile
+++ b/Makefile
@@ -2,6 +2,9 @@
all:
+build_assets:
+ cd js_client && npm run browserify && cd ..
+
release:
ifndef version
$(error Please supply a version)
@@ -14,3 +17,4 @@ endif
git push
git push --tags
python setup.py sdist bdist_wheel upload
+ cd js_client && npm publish && cd ..
diff --git a/README.rst b/README.rst
index f8c0f04..f398c12 100644
--- a/README.rst
+++ b/README.rst
@@ -1,125 +1,50 @@
-daphne
-======
+Django Channels
+===============
-.. image:: https://api.travis-ci.org/django/daphne.svg
- :target: https://travis-ci.org/django/daphne
+.. image:: https://api.travis-ci.org/django/channels.svg
+ :target: https://travis-ci.org/django/channels
-.. image:: https://img.shields.io/pypi/v/daphne.svg
- :target: https://pypi.python.org/pypi/daphne
+.. image:: https://readthedocs.org/projects/channels/badge/?version=latest
+ :target: https://channels.readthedocs.io/en/latest/?badge=latest
-Daphne is a HTTP, HTTP2 and WebSocket protocol server for
-`ASGI `_, and developed
-to power Django Channels.
+.. image:: https://img.shields.io/pypi/v/channels.svg
+ :target: https://pypi.python.org/pypi/channels
-It supports automatic negotiation of protocols; there's no need for URL
-prefixing to determine WebSocket endpoints versus HTTP endpoints.
+.. image:: https://img.shields.io/pypi/l/channels.svg
+ :target: https://pypi.python.org/pypi/channels
+Channels loads into Django as a pluggable app to bring WebSocket, long-poll HTTP,
+task offloading and other asynchrony support to your code, using familiar Django
+design patterns and a flexible underlying framework that lets you not only
+customize behaviours but also write support for your own protocols and needs.
-Running
--------
+Documentation, installation and getting started instructions are at
+https://channels.readthedocs.io
-Simply point Daphne to your ASGI channel layer instance, and optionally
-set a bind address and port (defaults to localhost, port 8000)::
+Channels is an official Django Project and as such has a deprecation policy.
+Details about what's deprecated or pending deprecation for each release is in
+the `release notes `_.
- daphne -b 0.0.0.0 -p 8001 django_project.asgi:channel_layer
+Support can be obtained either here via issues, or in the ``#django-channels``
+channel on Freenode.
-If you intend to run daphne behind a proxy server you can use UNIX
-sockets to communicate between the two::
-
- daphne -u /tmp/daphne.sock django_project.asgi:channel_layer
-
-If daphne is being run inside a process manager such as
-`Circus `_ you might
-want it to bind to a file descriptor passed down from a parent process.
-To achieve this you can use the --fd flag::
-
- daphne --fd 5 django_project.asgi:channel_layer
-
-If you want more control over the port/socket bindings you can fall back to
-using `twisted's endpoint description strings
-`_
-by using the `--endpoint (-e)` flag, which can be used multiple times.
-This line would start a SSL server on port 443, assuming that `key.pem` and `crt.pem`
-exist in the current directory (requires pyopenssl to be installed)::
-
- daphne -e ssl:443:privateKey=key.pem:certKey=crt.pem django_project.asgi:channel_layer
-
-Endpoints even let you use the ``txacme`` endpoint syntax to get automatic certificates
-from Let's Encrypt, which you can read more about at http://txacme.readthedocs.io/en/stable/.
-
-To see all available command line options run daphne with the *-h* flag.
-
-
-HTTP/2 Support
---------------
-
-Daphne 1.1 and above supports terminating HTTP/2 connections natively. You'll
-need to do a couple of things to get it working, though. First, you need to
-make sure you install the Twisted ``http2`` and ``tls`` extras::
-
- pip install -U Twisted[tls,http2]
-
-Next, because all current browsers only support HTTP/2 when using TLS, you will
-need to start Daphne with TLS turned on, which can be done using the Twisted endpoint sytax::
-
- daphne -e ssl:443:privateKey=key.pem:certKey=crt.pem django_project.asgi:channel_layer
-
-Alternatively, you can use the ``txacme`` endpoint syntax or anything else that
-enables TLS under the hood.
-
-You will also need to be on a system that has **OpenSSL 1.0.2 or greater**; if you are
-using Ubuntu, this means you need at least 16.04.
-
-Now, when you start up Daphne, it should tell you this in the log::
-
- 2017-03-18 19:14:02,741 INFO Starting server at ssl:port=8000:privateKey=privkey.pem:certKey=cert.pem, channel layer django_project.asgi:channel_layer.
- 2017-03-18 19:14:02,742 INFO HTTP/2 support enabled
-
-Then, connect with a browser that supports HTTP/2, and everything should be
-working. It's often hard to tell that HTTP/2 is working, as the log Daphne gives you
-will be identical (it's HTTP, after all), and most browsers don't make it obvious
-in their network inspector windows. There are browser extensions that will let
-you know clearly if it's working or not.
-
-Daphne only supports "normal" requests over HTTP/2 at this time; there is not
-yet support for extended features like Server Push. It will, however, result in
-much faster connections and lower overheads.
-
-If you have a reverse proxy in front of your site to serve static files or
-similar, HTTP/2 will only work if that proxy understands and passes through the
-connection correctly.
-
-
-Root Path (SCRIPT_NAME)
------------------------
-
-In order to set the root path for Daphne, which is the equivalent of the
-WSGI ``SCRIPT_NAME`` setting, you have two options:
-
-* Pass a header value ``Daphne-Root-Path``, with the desired root path as a
- URLencoded ASCII value. This header will not be passed down to applications.
-
-* Set the ``--root-path`` commandline option with the desired root path as a
- URLencoded ASCII value.
-
-The header takes precedence if both are set. As with ``SCRIPT_ALIAS``, the value
-should start with a slash, but not end with one; for example::
-
- daphne --root-path=/forum django_project.asgi:channel_layer
+You can install channels from PyPI as the ``channels`` package.
+You'll likely also want to install ``asgi_redis`` or ``asgi_rabbitmq``
+to provide the Redis/RabbitMQ channel layer correspondingly.
+See our `installation `_
+and `getting started `_ docs for more.
Dependencies
------------
-All Channels projects currently support Python 2.7, 3.4 and 3.5. `daphne` requires Twisted 17.1 or
-greater.
+All Channels projects currently support Python 2.7, 3.4 and 3.5. `channels` supports all released
+Django versions, namely 1.8-1.10.
Contributing
------------
-Please refer to the
-`main Channels contributing docs `_.
-That also contains advice on how to set up the development environment and run the tests.
+To learn more about contributing, please `read our contributing docs `_.
Maintenance and Security
@@ -129,7 +54,29 @@ To report security issues, please contact security@djangoproject.com. For GPG
signatures and more security process information, see
https://docs.djangoproject.com/en/dev/internals/security/.
-To report bugs or request new features, please open a new GitHub issue.
+To report bugs or request new features, please open a new GitHub issue. For
+larger discussions, please post to the
+`django-developers mailing list `_.
-This repository is part of the Channels project. For the shepherd and maintenance team, please see the
-`main Channels readme `_.
+Django Core Shepherd: Andrew Godwin
+
+Maintenance team:
+
+* Andrew Godwin
+* Artem Malyshev
+
+If you are interested in joining the maintenance team, please
+`read more about contributing `_
+and get in touch!
+
+
+Other Projects
+--------------
+
+The Channels project is made up of several packages; the others are:
+
+* `Daphne `_, the HTTP and Websocket termination server
+* `asgiref `_, the base ASGI library/memory backend
+* `asgi_redis `_, the Redis channel backend
+* `asgi_rabbitmq `_, the RabbitMQ channel backend
+* `asgi_ipc `_, the POSIX IPC channel backend
diff --git a/channels/__init__.py b/channels/__init__.py
new file mode 100644
index 0000000..5430057
--- /dev/null
+++ b/channels/__init__.py
@@ -0,0 +1,11 @@
+__version__ = "1.1.8"
+
+default_app_config = 'channels.apps.ChannelsConfig'
+DEFAULT_CHANNEL_LAYER = 'default'
+
+try:
+ from .asgi import channel_layers # NOQA isort:skip
+ from .channel import Channel, Group # NOQA isort:skip
+ from .routing import route, route_class, include # NOQA isort:skip
+except ImportError: # No django installed, allow vars to be read
+ pass
diff --git a/channels/apps.py b/channels/apps.py
new file mode 100644
index 0000000..36fc9a0
--- /dev/null
+++ b/channels/apps.py
@@ -0,0 +1,19 @@
+from django.apps import AppConfig
+
+from .binding.base import BindingMetaclass
+from .package_checks import check_all
+
+
+class ChannelsConfig(AppConfig):
+
+ name = "channels"
+ verbose_name = "Channels"
+
+ def ready(self):
+ # Check versions
+ check_all()
+ # Do django monkeypatches
+ from .hacks import monkeypatch_django
+ monkeypatch_django()
+ # Instantiate bindings
+ BindingMetaclass.register_all()
diff --git a/channels/asgi.py b/channels/asgi.py
new file mode 100644
index 0000000..132553d
--- /dev/null
+++ b/channels/asgi.py
@@ -0,0 +1,122 @@
+from __future__ import unicode_literals
+
+import django
+from django.conf import settings
+from django.utils.module_loading import import_string
+
+from .routing import Router
+from .utils import name_that_thing
+
+
+class InvalidChannelLayerError(ValueError):
+ pass
+
+
+class ChannelLayerManager(object):
+ """
+ Takes a settings dictionary of backends and initialises them on request.
+ """
+
+ def __init__(self):
+ self.backends = {}
+
+ @property
+ def configs(self):
+ # Lazy load settings so we can be imported
+ return getattr(settings, "CHANNEL_LAYERS", {})
+
+ def make_backend(self, name):
+ """
+ Instantiate channel layer.
+ """
+ config = self.configs[name].get("CONFIG", {})
+ return self._make_backend(name, config)
+
+ def make_test_backend(self, name):
+ """
+ Instantiate channel layer using its test config.
+ """
+ try:
+ config = self.configs[name]["TEST_CONFIG"]
+ except KeyError:
+ raise InvalidChannelLayerError("No TEST_CONFIG specified for %s" % name)
+ return self._make_backend(name, config)
+
+ def _make_backend(self, name, config):
+ # Load the backend class
+ try:
+ backend_class = import_string(self.configs[name]['BACKEND'])
+ except KeyError:
+ raise InvalidChannelLayerError("No BACKEND specified for %s" % name)
+ except ImportError:
+ raise InvalidChannelLayerError(
+ "Cannot import BACKEND %r specified for %s" % (self.configs[name]['BACKEND'], name)
+ )
+ # Get routing
+ try:
+ routing = self.configs[name]['ROUTING']
+ except KeyError:
+ raise InvalidChannelLayerError("No ROUTING specified for %s" % name)
+ # Initialise and pass config
+ asgi_layer = backend_class(**config)
+ return ChannelLayerWrapper(
+ channel_layer=asgi_layer,
+ alias=name,
+ routing=routing,
+ )
+
+ def __getitem__(self, key):
+ if key not in self.backends:
+ self.backends[key] = self.make_backend(key)
+ return self.backends[key]
+
+ def __contains__(self, key):
+ return key in self.configs
+
+ def set(self, key, layer):
+ """
+ Sets an alias to point to a new ChannelLayerWrapper instance, and
+ returns the old one that it replaced. Useful for swapping out the
+ backend during tests.
+ """
+ old = self.backends.get(key, None)
+ self.backends[key] = layer
+ return old
+
+
+class ChannelLayerWrapper(object):
+ """
+ Top level channel layer wrapper, which contains both the ASGI channel
+ layer object as well as alias and routing information specific to Django.
+ """
+
+ def __init__(self, channel_layer, alias, routing):
+ self.channel_layer = channel_layer
+ self.alias = alias
+ self.routing = routing
+ self.router = Router(self.routing)
+
+ def __getattr__(self, name):
+ return getattr(self.channel_layer, name)
+
+ def __str__(self):
+ return "%s (%s)" % (self.alias, name_that_thing(self.channel_layer))
+
+ def local_only(self):
+ # TODO: Can probably come up with a nicer check?
+ return "inmemory" in self.channel_layer.__class__.__module__
+
+
+def get_channel_layer(alias="default"):
+ """
+ Returns the raw ASGI channel layer for this project.
+ """
+ if django.VERSION[1] > 9:
+ django.setup(set_prefix=False)
+ else:
+ django.setup()
+ return channel_layers[alias].channel_layer
+
+
+# Default global instance of the channel layer manager
+channel_layers = ChannelLayerManager()
diff --git a/channels/auth.py b/channels/auth.py
new file mode 100644
index 0000000..606e2b8
--- /dev/null
+++ b/channels/auth.py
@@ -0,0 +1,106 @@
+import functools
+
+from django.contrib import auth
+
+from .sessions import channel_and_http_session, channel_session, http_session
+
+
+def transfer_user(from_session, to_session):
+ """
+ Transfers user from HTTP session to channel session.
+ """
+ if auth.BACKEND_SESSION_KEY in from_session and \
+ auth.SESSION_KEY in from_session and \
+ auth.HASH_SESSION_KEY in from_session:
+ to_session[auth.BACKEND_SESSION_KEY] = from_session[auth.BACKEND_SESSION_KEY]
+ to_session[auth.SESSION_KEY] = from_session[auth.SESSION_KEY]
+ to_session[auth.HASH_SESSION_KEY] = from_session[auth.HASH_SESSION_KEY]
+
+
+def channel_session_user(func):
+ """
+ Presents a message.user attribute obtained from a user ID in the channel
+ session, rather than in the http_session. Turns on channel session implicitly.
+ """
+ @channel_session
+ @functools.wraps(func)
+ def inner(message, *args, **kwargs):
+ # If we didn't get a session, then we don't get a user
+ if not hasattr(message, "channel_session"):
+ raise ValueError("Did not see a channel session to get auth from")
+ if message.channel_session is None:
+ # Inner import to avoid reaching into models before load complete
+ from django.contrib.auth.models import AnonymousUser
+ message.user = AnonymousUser()
+ # Otherwise, be a bit naughty and make a fake Request with just
+ # a "session" attribute (later on, perhaps refactor contrib.auth to
+ # pass around session rather than request)
+ else:
+ fake_request = type("FakeRequest", (object, ), {"session": message.channel_session})
+ message.user = auth.get_user(fake_request)
+ # Run the consumer
+ return func(message, *args, **kwargs)
+ return inner
+
+
+def http_session_user(func):
+ """
+ Wraps a HTTP or WebSocket consumer (or any consumer of messages
+ that provides a "COOKIES" attribute) to provide both a "session"
+ attribute and a "user" attibute, like AuthMiddleware does.
+
+ This runs http_session() to get a session to hook auth off of.
+ If the user does not have a session cookie set, both "session"
+ and "user" will be None.
+ """
+ @http_session
+ @functools.wraps(func)
+ def inner(message, *args, **kwargs):
+ # If we didn't get a session, then we don't get a user
+ if not hasattr(message, "http_session"):
+ raise ValueError("Did not see a http session to get auth from")
+ if message.http_session is None:
+ # Inner import to avoid reaching into models before load complete
+ from django.contrib.auth.models import AnonymousUser
+ message.user = AnonymousUser()
+ # Otherwise, be a bit naughty and make a fake Request with just
+ # a "session" attribute (later on, perhaps refactor contrib.auth to
+ # pass around session rather than request)
+ else:
+ fake_request = type("FakeRequest", (object, ), {"session": message.http_session})
+ message.user = auth.get_user(fake_request)
+ # Run the consumer
+ return func(message, *args, **kwargs)
+ return inner
+
+
+def channel_session_user_from_http(func):
+ """
+ Decorator that automatically transfers the user from HTTP sessions to
+ channel-based sessions, and returns the user as message.user as well.
+ Useful for things that consume e.g. websocket.connect
+ """
+ @http_session_user
+ @channel_session
+ @functools.wraps(func)
+ def inner(message, *args, **kwargs):
+ if message.http_session is not None:
+ transfer_user(message.http_session, message.channel_session)
+ return func(message, *args, **kwargs)
+ return inner
+
+
+def channel_and_http_session_user_from_http(func):
+ """
+ Decorator that automatically transfers the user from HTTP sessions to
+ channel-based sessions, rehydrates the HTTP session, and returns the
+ user as message.user as well.
+ """
+ @http_session_user
+ @channel_and_http_session
+ @functools.wraps(func)
+ def inner(message, *args, **kwargs):
+ if message.http_session is not None:
+ transfer_user(message.http_session, message.channel_session)
+ return func(message, *args, **kwargs)
+ return inner
diff --git a/channels/binding/__init__.py b/channels/binding/__init__.py
new file mode 100644
index 0000000..37b12a0
--- /dev/null
+++ b/channels/binding/__init__.py
@@ -0,0 +1 @@
+from .base import Binding # NOQA isort:skip
diff --git a/channels/binding/base.py b/channels/binding/base.py
new file mode 100644
index 0000000..59cf43a
--- /dev/null
+++ b/channels/binding/base.py
@@ -0,0 +1,289 @@
+from __future__ import unicode_literals
+
+import six
+from django.apps import apps
+from django.db.models.signals import post_delete, post_save, pre_delete, pre_save
+
+from ..auth import channel_session, channel_session_user
+from ..channel import Group
+
+CREATE = 'create'
+UPDATE = 'update'
+DELETE = 'delete'
+
+
+class BindingMetaclass(type):
+ """
+ Metaclass that tracks instantiations of its type.
+ """
+
+ register_immediately = False
+ binding_classes = []
+
+ def __new__(cls, name, bases, body):
+ klass = type.__new__(cls, name, bases, body)
+ if bases != (object, ):
+ cls.binding_classes.append(klass)
+ if cls.register_immediately:
+ klass.register()
+ return klass
+
+ @classmethod
+ def register_all(cls):
+ for binding_class in cls.binding_classes:
+ binding_class.register()
+ cls.register_immediately = True
+
+
+@six.add_metaclass(BindingMetaclass)
+class Binding(object):
+ """
+ Represents a two-way data binding from channels/groups to a Django model.
+ Outgoing binding sends model events to zero or more groups.
+ Incoming binding takes messages and maybe applies the action based on perms.
+
+ To implement outbound, implement:
+ - group_names, which returns a list of group names to send to
+ - serialize, which returns message contents from an instance + action
+
+ To implement inbound, implement:
+ - deserialize, which returns pk, data and action from message contents
+ - has_permission, which says if the user can do the action on an instance
+ - create, which takes the data and makes a model instance
+ - update, which takes data and a model instance and applies one to the other
+
+ Outbound will work once you implement the functions; inbound requires you
+ to place one or more bindings inside a protocol-specific Demultiplexer
+ and tie that in as a consumer.
+ """
+
+ # Model to serialize
+
+ model = None
+
+ # Only model fields that are listed in fields should be send by default
+ # if you want to really send all fields, use fields = ['__all__']
+
+ fields = None
+ exclude = None
+
+ # Decorators
+ channel_session_user = True
+ channel_session = False
+
+ # the kwargs the triggering signal (e.g. post_save) was emitted with
+ signal_kwargs = None
+
+ @classmethod
+ def register(cls):
+ """
+ Resolves models.
+ """
+ # Connect signals
+ for model in cls.get_registered_models():
+ pre_save.connect(cls.pre_save_receiver, sender=model)
+ post_save.connect(cls.post_save_receiver, sender=model)
+ pre_delete.connect(cls.pre_delete_receiver, sender=model)
+ post_delete.connect(cls.post_delete_receiver, sender=model)
+
+ @classmethod
+ def get_registered_models(cls):
+ """
+ Resolves the class model attribute if it's a string and returns it.
+ """
+ # If model is None directly on the class, assume it's abstract.
+ if cls.model is None:
+ if "model" in cls.__dict__:
+ return []
+ else:
+ raise ValueError("You must set the model attribute on Binding %r!" % cls)
+ # If neither fields nor exclude are not defined, raise an error
+ if cls.fields is None and cls.exclude is None:
+ raise ValueError("You must set the fields or exclude attribute on Binding %r!" % cls)
+ # Optionally resolve model strings
+ if isinstance(cls.model, six.string_types):
+ cls.model = apps.get_model(cls.model)
+ cls.model_label = "%s.%s" % (
+ cls.model._meta.app_label.lower(),
+ cls.model._meta.object_name.lower(),
+ )
+ return [cls.model]
+
+ # Outbound binding
+
+ @classmethod
+ def encode(cls, stream, payload):
+ """
+ Encodes stream + payload for outbound sending.
+ """
+ raise NotImplementedError()
+
+ @classmethod
+ def pre_save_receiver(cls, instance, **kwargs):
+ creating = instance._state.adding
+ cls.pre_change_receiver(instance, CREATE if creating else UPDATE)
+
+ @classmethod
+ def post_save_receiver(cls, instance, created, **kwargs):
+ cls.post_change_receiver(instance, CREATE if created else UPDATE, **kwargs)
+
+ @classmethod
+ def pre_delete_receiver(cls, instance, **kwargs):
+ cls.pre_change_receiver(instance, DELETE)
+
+ @classmethod
+ def post_delete_receiver(cls, instance, **kwargs):
+ cls.post_change_receiver(instance, DELETE, **kwargs)
+
+ @classmethod
+ def pre_change_receiver(cls, instance, action):
+ """
+ Entry point for triggering the binding from save signals.
+ """
+ if action == CREATE:
+ group_names = set()
+ else:
+ group_names = set(cls.group_names(instance))
+
+ if not hasattr(instance, '_binding_group_names'):
+ instance._binding_group_names = {}
+ instance._binding_group_names[cls] = group_names
+
+ @classmethod
+ def post_change_receiver(cls, instance, action, **kwargs):
+ """
+ Triggers the binding to possibly send to its group.
+ """
+ old_group_names = instance._binding_group_names[cls]
+ if action == DELETE:
+ new_group_names = set()
+ else:
+ new_group_names = set(cls.group_names(instance))
+
+ # if post delete, new_group_names should be []
+ self = cls()
+ self.instance = instance
+
+ # Django DDP had used the ordering of DELETE, UPDATE then CREATE for good reasons.
+ self.send_messages(instance, old_group_names - new_group_names, DELETE, **kwargs)
+ self.send_messages(instance, old_group_names & new_group_names, UPDATE, **kwargs)
+ self.send_messages(instance, new_group_names - old_group_names, CREATE, **kwargs)
+
+ def send_messages(self, instance, group_names, action, **kwargs):
+ """
+ Serializes the instance and sends it to all provided group names.
+ """
+ if not group_names:
+ return # no need to serialize, bail.
+ self.signal_kwargs = kwargs
+ payload = self.serialize(instance, action)
+ if payload == {}:
+ return # nothing to send, bail.
+
+ assert self.stream is not None
+ message = self.encode(self.stream, payload)
+ for group_name in group_names:
+ group = Group(group_name)
+ group.send(message)
+
+ @classmethod
+ def group_names(cls, instance):
+ """
+ Returns the iterable of group names to send the object to based on the
+ instance and action performed on it.
+ """
+ raise NotImplementedError()
+
+ def serialize(self, instance, action):
+ """
+ Should return a serialized version of the instance to send over the
+ wire (e.g. {"pk": 12, "value": 42, "string": "some string"})
+ Kwargs are passed from the models save and delete methods.
+ """
+ raise NotImplementedError()
+
+ # Inbound binding
+
+ @classmethod
+ def trigger_inbound(cls, message, **kwargs):
+ """
+ Triggers the binding to see if it will do something.
+ Also acts as a consumer.
+ """
+ # Late import as it touches models
+ from django.contrib.auth.models import AnonymousUser
+ self = cls()
+ self.message = message
+ self.kwargs = kwargs
+ # Deserialize message
+ self.action, self.pk, self.data = self.deserialize(self.message)
+ self.user = getattr(self.message, "user", AnonymousUser())
+ # Run incoming action
+ self.run_action(self.action, self.pk, self.data)
+
+ @classmethod
+ def get_handler(cls):
+ """
+ Adds decorators to trigger_inbound.
+ """
+ handler = cls.trigger_inbound
+ if cls.channel_session_user:
+ return channel_session_user(handler)
+ elif cls.channel_session:
+ return channel_session(handler)
+ else:
+ return handler
+
+ @classmethod
+ def consumer(cls, message, **kwargs):
+ handler = cls.get_handler()
+ handler(message, **kwargs)
+
+ def deserialize(self, message):
+ """
+ Returns action, pk, data decoded from the message. pk should be None
+ if action is create; data should be None if action is delete.
+ """
+ raise NotImplementedError()
+
+ def has_permission(self, user, action, pk):
+ """
+ Return True if the user can do action to the pk, False if not.
+ User may be AnonymousUser if no auth hooked up/they're not logged in.
+ Action is one of "create", "delete", "update".
+ """
+ raise NotImplementedError()
+
+ def run_action(self, action, pk, data):
+ """
+ Performs the requested action. This version dispatches to named
+ functions by default for update/create, and handles delete itself.
+ """
+ # Check to see if we're allowed
+ if self.has_permission(self.user, action, pk):
+ if action == "create":
+ self.create(data)
+ elif action == "update":
+ self.update(pk, data)
+ elif action == "delete":
+ self.delete(pk)
+ else:
+ raise ValueError("Bad action %r" % action)
+
+ def create(self, data):
+ """
+ Creates a new instance of the model with the data.
+ """
+ raise NotImplementedError()
+
+ def update(self, pk, data):
+ """
+ Updates the model with the data.
+ """
+ raise NotImplementedError()
+
+ def delete(self, pk):
+ """
+ Deletes the model instance.
+ """
+ self.model.objects.filter(pk=pk).delete()
diff --git a/channels/binding/websockets.py b/channels/binding/websockets.py
new file mode 100644
index 0000000..bf57b9f
--- /dev/null
+++ b/channels/binding/websockets.py
@@ -0,0 +1,181 @@
+import json
+
+from django.core import serializers
+from django.core.serializers.json import DjangoJSONEncoder
+
+from ..generic.websockets import WebsocketMultiplexer
+from ..sessions import enforce_ordering
+from .base import Binding
+
+
+class WebsocketBinding(Binding):
+ """
+ Websocket-specific outgoing binding subclass that uses JSON encoding
+ and the built-in JSON/WebSocket multiplexer.
+
+ To implement outbound, implement:
+ - group_names, which returns a list of group names to send to
+
+ To implement inbound, implement:
+ - has_permission, which says if the user can do the action on an instance
+
+ Optionally also implement:
+ - serialize_data, which returns JSON-safe data from a model instance
+ - create, which takes incoming data and makes a model instance
+ - update, which takes incoming data and a model instance and applies one to the other
+ """
+
+ # Mark as abstract
+ model = None
+
+ # Stream multiplexing name
+ stream = None
+
+ # Decorators
+ strict_ordering = False
+ slight_ordering = False
+
+ # Outbound
+ @classmethod
+ def encode(cls, stream, payload):
+ return WebsocketMultiplexer.encode(stream, payload)
+
+ def serialize(self, instance, action):
+ payload = {
+ "action": action,
+ "pk": instance.pk,
+ "data": self.serialize_data(instance),
+ "model": self.model_label,
+ }
+ return payload
+
+ def serialize_data(self, instance):
+ """
+ Serializes model data into JSON-compatible types.
+ """
+ if self.fields is not None:
+ if self.fields == '__all__' or list(self.fields) == ['__all__']:
+ fields = None
+ else:
+ fields = self.fields
+ else:
+ fields = [f.name for f in instance._meta.get_fields() if f.name not in self.exclude]
+ data = serializers.serialize('json', [instance], fields=fields)
+ return json.loads(data)[0]['fields']
+
+ # Inbound
+ @classmethod
+ def get_handler(cls):
+ """
+ Adds decorators to trigger_inbound.
+ """
+ # Get super-handler
+ handler = super(WebsocketBinding, cls).get_handler()
+ # Ordering decorators
+ if cls.strict_ordering:
+ return enforce_ordering(handler, slight=False)
+ elif cls.slight_ordering:
+ return enforce_ordering(handler, slight=True)
+ else:
+ return handler
+
+ @classmethod
+ def trigger_inbound(cls, message, **kwargs):
+ """
+ Overrides base trigger_inbound to ignore connect/disconnect.
+ """
+ # Only allow received packets through further.
+ if message.channel.name != "websocket.receive":
+ return
+ super(WebsocketBinding, cls).trigger_inbound(message, **kwargs)
+
+ def deserialize(self, message):
+ """
+ You must hook this up behind a Deserializer, so we expect the JSON
+ already dealt with.
+ """
+ body = json.loads(message['text'])
+ action = body['action']
+ pk = body.get('pk', None)
+ data = body.get('data', None)
+ return action, pk, data
+
+ def _hydrate(self, pk, data):
+ """
+ Given a raw "data" section of an incoming message, returns a
+ DeserializedObject.
+ """
+ s_data = [
+ {
+ "pk": pk,
+ "model": self.model_label,
+ "fields": data,
+ }
+ ]
+ return list(serializers.deserialize("python", s_data))[0]
+
+ def create(self, data):
+ self._hydrate(None, data).save()
+
+ def update(self, pk, data):
+ instance = self.model.objects.get(pk=pk)
+ hydrated = self._hydrate(pk, data)
+
+ if self.fields is not None:
+ for name in data.keys():
+ if name in self.fields or self.fields == ['__all__']:
+ setattr(instance, name, getattr(hydrated.object, name))
+ else:
+ for name in data.keys():
+ if name not in self.exclude:
+ setattr(instance, name, getattr(hydrated.object, name))
+ instance.save()
+
+
+class WebsocketBindingWithMembers(WebsocketBinding):
+ """
+ Outgoing binding binding subclass based on WebsocketBinding.
+ Additionally enables sending of member variables, properties and methods.
+ Member methods can only have self as a required argument.
+ Just add the name of the member to the send_members-list.
+ Example:
+
+ class MyModel(models.Model):
+ my_field = models.IntegerField(default=0)
+ my_var = 3
+
+ @property
+ def my_property(self):
+ return self.my_var + self.my_field
+
+ def my_function(self):
+ return self.my_var - self.my_vield
+
+ class MyBinding(BindingWithMembersMixin, WebsocketBinding):
+ model = MyModel
+ stream = 'mystream'
+
+ send_members = ['my_var', 'my_property', 'my_function']
+ """
+
+ model = None
+ send_members = []
+
+ encoder = DjangoJSONEncoder()
+
+ def serialize_data(self, instance, **kwargs):
+ data = super(WebsocketBindingWithMembers, self).serialize_data(instance, **kwargs)
+ member_data = {}
+ for m in self.send_members:
+ member = instance
+ for s in m.split('.'):
+ member = getattr(member, s)
+ if callable(member):
+ member_data[m.replace('.', '__')] = member()
+ else:
+ member_data[m.replace('.', '__')] = member
+ member_data = json.loads(self.encoder.encode(member_data))
+ # the update never overwrites any value from data,
+ # because an object can't have two attributes with the same name
+ data.update(member_data)
+ return data
diff --git a/channels/channel.py b/channels/channel.py
new file mode 100644
index 0000000..e4bef7b
--- /dev/null
+++ b/channels/channel.py
@@ -0,0 +1,90 @@
+from __future__ import unicode_literals
+
+from django.utils import six
+
+from channels import DEFAULT_CHANNEL_LAYER, channel_layers
+
+
+class Channel(object):
+ """
+ Public interaction class for the channel layer.
+
+ This is separate to the backends so we can:
+ a) Hide receive_many from end-users, as it is only for interface servers
+ b) Keep a stable-ish backend interface for third parties
+
+ You can pass an alternate Channel Layer alias in, but it will use the
+ "default" one by default.
+ """
+
+ def __init__(self, name, alias=DEFAULT_CHANNEL_LAYER, channel_layer=None):
+ """
+ Create an instance for the channel named "name"
+ """
+ if isinstance(name, six.binary_type):
+ name = name.decode("ascii")
+ self.name = name
+ if channel_layer:
+ self.channel_layer = channel_layer
+ else:
+ self.channel_layer = channel_layers[alias]
+
+ def send(self, content, immediately=False):
+ """
+ Send a message over the channel - messages are always dicts.
+
+ Sends are delayed until consumer completion. To override this, you
+ may pass immediately=True. If you are outside a consumer, things are
+ always sent immediately.
+ """
+ from .message import pending_message_store
+ if not isinstance(content, dict):
+ raise TypeError("You can only send dicts as content on channels.")
+ if immediately or not pending_message_store.active:
+ self.channel_layer.send(self.name, content)
+ else:
+ pending_message_store.append(self, content)
+
+ def __str__(self):
+ return self.name
+
+
+class Group(object):
+ """
+ A group of channels that can be messaged at once, and that expire out
+ of the group after an expiry time (keep re-adding to keep them in).
+ """
+
+ def __init__(self, name, alias=DEFAULT_CHANNEL_LAYER, channel_layer=None):
+ if isinstance(name, six.binary_type):
+ name = name.decode("ascii")
+ self.name = name
+ if channel_layer:
+ self.channel_layer = channel_layer
+ else:
+ self.channel_layer = channel_layers[alias]
+
+ def add(self, channel):
+ if isinstance(channel, Channel):
+ channel = channel.name
+ self.channel_layer.group_add(self.name, channel)
+
+ def discard(self, channel):
+ if isinstance(channel, Channel):
+ channel = channel.name
+ self.channel_layer.group_discard(self.name, channel)
+
+ def send(self, content, immediately=False):
+ """
+ Send a message to all channels in the group.
+
+ Sends are delayed until consumer completion. To override this, you
+ may pass immediately=True.
+ """
+ from .message import pending_message_store
+ if not isinstance(content, dict):
+ raise ValueError("You can only send dicts as content on channels.")
+ if immediately or not pending_message_store.active:
+ self.channel_layer.send_group(self.name, content)
+ else:
+ pending_message_store.append(self, content)
diff --git a/channels/delay/__init__.py b/channels/delay/__init__.py
new file mode 100644
index 0000000..389cd5b
--- /dev/null
+++ b/channels/delay/__init__.py
@@ -0,0 +1 @@
+default_app_config = 'channels.delay.apps.DelayConfig'
diff --git a/channels/delay/apps.py b/channels/delay/apps.py
new file mode 100644
index 0000000..f68802b
--- /dev/null
+++ b/channels/delay/apps.py
@@ -0,0 +1,8 @@
+from django.apps import AppConfig
+
+
+class DelayConfig(AppConfig):
+
+ name = "channels.delay"
+ label = "channels.delay"
+ verbose_name = "Channels Delay"
diff --git a/channels/delay/management/__init__.py b/channels/delay/management/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/channels/delay/management/commands/__init__.py b/channels/delay/management/commands/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/channels/delay/management/commands/rundelay.py b/channels/delay/management/commands/rundelay.py
new file mode 100644
index 0000000..b47b1e2
--- /dev/null
+++ b/channels/delay/management/commands/rundelay.py
@@ -0,0 +1,44 @@
+from __future__ import unicode_literals
+
+from django.core.management import BaseCommand, CommandError
+
+from channels import DEFAULT_CHANNEL_LAYER, channel_layers
+from channels.delay.worker import Worker
+from channels.log import setup_logger
+
+
+class Command(BaseCommand):
+
+ leave_locale_alone = True
+
+ def add_arguments(self, parser):
+ super(Command, self).add_arguments(parser)
+ parser.add_argument(
+ '--layer', action='store', dest='layer', default=DEFAULT_CHANNEL_LAYER,
+ help='Channel layer alias to use, if not the default.',
+ )
+ parser.add_argument(
+ '--sleep', action='store', dest='sleep', default=1, type=float,
+ help='Amount of time to sleep between checks, in seconds.',
+ )
+
+ def handle(self, *args, **options):
+ self.verbosity = options.get("verbosity", 1)
+ self.logger = setup_logger('django.channels', self.verbosity)
+ self.channel_layer = channel_layers[options.get("layer", DEFAULT_CHANNEL_LAYER)]
+ # Check that handler isn't inmemory
+ if self.channel_layer.local_only():
+ raise CommandError(
+ "You cannot span multiple processes with the in-memory layer. " +
+ "Change your settings to use a cross-process channel layer."
+ )
+ self.options = options
+ self.logger.info("Running delay against channel layer %s", self.channel_layer)
+ try:
+ worker = Worker(
+ channel_layer=self.channel_layer,
+ database_sleep_duration=options['sleep'],
+ )
+ worker.run()
+ except KeyboardInterrupt:
+ pass
diff --git a/channels/delay/migrations/0001_initial.py b/channels/delay/migrations/0001_initial.py
new file mode 100644
index 0000000..82e85f9
--- /dev/null
+++ b/channels/delay/migrations/0001_initial.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+# Generated by Django 1.9.7 on 2016-10-21 01:14
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ initial = True
+
+ dependencies = [
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='DelayedMessage',
+ fields=[
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('due_date', models.DateTimeField(db_index=True)),
+ ('channel_name', models.CharField(max_length=512)),
+ ('content', models.TextField()),
+ ],
+ ),
+ ]
diff --git a/channels/delay/migrations/__init__.py b/channels/delay/migrations/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/channels/delay/models.py b/channels/delay/models.py
new file mode 100644
index 0000000..932d6d3
--- /dev/null
+++ b/channels/delay/models.py
@@ -0,0 +1,49 @@
+import json
+from datetime import timedelta
+
+from django.db import models
+from django.utils import timezone
+
+from channels import DEFAULT_CHANNEL_LAYER, Channel, channel_layers
+
+
+class DelayedMessageQuerySet(models.QuerySet):
+
+ def is_due(self):
+ return self.filter(due_date__lte=timezone.now())
+
+
+class DelayedMessage(models.Model):
+
+ due_date = models.DateTimeField(db_index=True)
+ channel_name = models.CharField(max_length=512)
+ content = models.TextField()
+
+ objects = DelayedMessageQuerySet.as_manager()
+
+ @property
+ def delay(self):
+ return self._delay
+
+ @delay.setter
+ def delay(self, milliseconds):
+ self._delay = milliseconds
+ self.due_date = timezone.now() + timedelta(milliseconds=milliseconds)
+
+ def send(self, channel_layer=None, requeue_delay=1000):
+ """
+ Sends the message on the configured channel with the stored content.
+
+ Deletes the DelayedMessage record if successfully sent.
+
+ Args:
+ channel_layer: optional channel_layer to use
+ requeue_delay: if the channel is full, milliseconds to wait before requeue
+ """
+ channel_layer = channel_layer or channel_layers[DEFAULT_CHANNEL_LAYER]
+ try:
+ Channel(self.channel_name, channel_layer=channel_layer).send(json.loads(self.content), immediately=True)
+ self.delete()
+ except channel_layer.ChannelFull:
+ self.delay = requeue_delay
+ self.save()
diff --git a/channels/delay/worker.py b/channels/delay/worker.py
new file mode 100644
index 0000000..689a588
--- /dev/null
+++ b/channels/delay/worker.py
@@ -0,0 +1,91 @@
+from __future__ import unicode_literals
+
+import json
+import logging
+import signal
+import sys
+import time
+
+from django.core.exceptions import ValidationError
+
+from .models import DelayedMessage
+
+logger = logging.getLogger('django.channels')
+
+
+class Worker(object):
+ """Worker class that listens to channels.delay messages and dispatches messages"""
+
+ def __init__(
+ self,
+ channel_layer,
+ signal_handlers=True,
+ database_sleep_duration=1,
+ ):
+ self.channel_layer = channel_layer
+ self.signal_handlers = signal_handlers
+ self.termed = False
+ self.in_job = False
+ self.database_sleep_duration = database_sleep_duration
+
+ def install_signal_handler(self):
+ signal.signal(signal.SIGTERM, self.sigterm_handler)
+ signal.signal(signal.SIGINT, self.sigterm_handler)
+
+ def sigterm_handler(self, signo, stack_frame):
+ self.termed = True
+ if self.in_job:
+ logger.info("Shutdown signal received while busy, waiting for loop termination")
+ else:
+ logger.info("Shutdown signal received while idle, terminating immediately")
+ sys.exit(0)
+
+ def run(self):
+ if self.signal_handlers:
+ self.install_signal_handler()
+
+ logger.info("Listening on asgi.delay")
+
+ last_delay_check = 0
+
+ while not self.termed:
+ self.in_job = False
+ channel, content = self.channel_layer.receive(['asgi.delay'], block=False)
+ self.in_job = True
+
+ if channel is not None:
+ logger.debug("Got message on asgi.delay")
+
+ if 'channel' not in content or \
+ 'content' not in content or \
+ 'delay' not in content:
+ logger.error("Invalid message received, it must contain keys 'channel', 'content', "
+ "and 'delay'.")
+ break
+
+ message = DelayedMessage(
+ content=json.dumps(content['content']),
+ channel_name=content['channel'],
+ delay=content['delay']
+ )
+
+ try:
+ message.full_clean()
+ except ValidationError as err:
+ logger.error("Invalid message received: %s:%s", err.error_dict.keys(), err.messages)
+ break
+ message.save()
+
+ else:
+ # Sleep for a short interval so we don't idle hot.
+ time.sleep(0.1)
+
+ # check for messages to send
+ if time.time() - last_delay_check > self.database_sleep_duration:
+ if DelayedMessage.objects.is_due().exists():
+ for message in DelayedMessage.objects.is_due().all():
+ logger.info("Sending delayed message to channel %s", message.channel_name)
+ message.send(channel_layer=self.channel_layer)
+ else:
+ logger.debug("No delayed messages waiting.")
+ last_delay_check = time.time()
diff --git a/channels/exceptions.py b/channels/exceptions.py
new file mode 100644
index 0000000..2c8a359
--- /dev/null
+++ b/channels/exceptions.py
@@ -0,0 +1,78 @@
+from __future__ import unicode_literals
+
+import six
+
+
+class ConsumeLater(Exception):
+ """
+ Exception that says that the current message should be re-queued back
+ onto its channel as it's not ready to be consumed yet (e.g. global order
+ is being enforced)
+ """
+ pass
+
+
+class ResponseLater(Exception):
+ """
+ Exception raised inside a Django view when the view has passed
+ responsibility for the response to another consumer, and so is not
+ returning a response.
+ """
+ pass
+
+
+class RequestTimeout(Exception):
+ """
+ Raised when it takes too long to read a request body.
+ """
+ pass
+
+
+class RequestAborted(Exception):
+ """
+ Raised when the incoming request tells us it's aborted partway through
+ reading the body.
+ """
+ pass
+
+
+class DenyConnection(Exception):
+ """
+ Raised during a websocket.connect (or other supported connection) handler
+ to deny the connection.
+ """
+ pass
+
+
+class ChannelSocketException(Exception):
+ """
+ Base Exception is intended to run some action ('run' method)
+ when it is raised at a consumer body
+ """
+
+ def run(self, message):
+ raise NotImplementedError
+
+
+class WebsocketCloseException(ChannelSocketException):
+ """
+ ChannelSocketException based exceptions for close websocket connection with code
+ """
+
+ def __init__(self, code=None):
+ if code is not None and not isinstance(code, six.integer_types) \
+ and code != 1000 and not (3000 <= code <= 4999):
+ raise ValueError("invalid close code {} (must be 1000 or from [3000, 4999])".format(code))
+ self._code = code
+
+ def run(self, message):
+ if message.reply_channel.name.split('.')[0] != "websocket":
+ raise ValueError("You cannot raise CloseWebsocketError from a non-websocket handler.")
+ message.reply_channel.send({"close": self._code or True})
+
+
+class SendNotAvailableOnDemultiplexer(Exception):
+ """
+ Raised when trying to send with a WebsocketDemultiplexer. Use the multiplexer instead.
+ """
+ pass
diff --git a/channels/generic/__init__.py b/channels/generic/__init__.py
new file mode 100644
index 0000000..8374ebb
--- /dev/null
+++ b/channels/generic/__init__.py
@@ -0,0 +1 @@
+from .base import BaseConsumer # NOQA isort:skip
diff --git a/channels/generic/base.py b/channels/generic/base.py
new file mode 100644
index 0000000..cc407a0
--- /dev/null
+++ b/channels/generic/base.py
@@ -0,0 +1,70 @@
+from __future__ import unicode_literals
+
+from ..auth import channel_session_user
+from ..routing import route_class
+from ..sessions import channel_session
+
+
+class BaseConsumer(object):
+ """
+ Base class-based consumer class. Provides the mechanisms to be a direct
+ routing object and a few other things.
+
+ Class-based consumers should be used with route_class in routing, like so::
+
+ from channels import route_class
+ routing = [
+ route_class(JsonWebsocketConsumer, path=r"^/liveblog/(?P[^/]+)/"),
+ ]
+ """
+
+ method_mapping = {}
+ channel_session = False
+ channel_session_user = False
+
+ def __init__(self, message, **kwargs):
+ """
+ Constructor, called when a new message comes in (the consumer is
+ the uninstantiated class, so calling it creates it)
+ """
+ self.message = message
+ self.kwargs = kwargs
+ self.dispatch(message, **kwargs)
+
+ @classmethod
+ def channel_names(cls):
+ """
+ Returns a list of channels this consumer will respond to, in our case
+ derived from the method_mapping class attribute.
+ """
+ return set(cls.method_mapping.keys())
+
+ @classmethod
+ def as_route(cls, attrs=None, **kwargs):
+ """
+ Shortcut function to create route with filters (kwargs)
+ to direct to a class-based consumer with given class attributes (attrs)
+ """
+ _cls = cls
+ if attrs:
+ assert isinstance(attrs, dict), 'attrs must be a dict'
+ _cls = type(cls.__name__, (cls,), attrs)
+ return route_class(_cls, **kwargs)
+
+ def get_handler(self, message, **kwargs):
+ """
+ Return handler uses method_mapping to return the right method to call.
+ """
+ handler = getattr(self, self.method_mapping[message.channel.name])
+ if self.channel_session_user:
+ return channel_session_user(handler)
+ elif self.channel_session:
+ return channel_session(handler)
+ else:
+ return handler
+
+ def dispatch(self, message, **kwargs):
+ """
+ Call handler with the message and all keyword arguments.
+ """
+ return self.get_handler(message, **kwargs)(message, **kwargs)
diff --git a/channels/generic/websockets.py b/channels/generic/websockets.py
new file mode 100644
index 0000000..ab35d31
--- /dev/null
+++ b/channels/generic/websockets.py
@@ -0,0 +1,291 @@
+from django.core.serializers.json import DjangoJSONEncoder, json
+
+from ..auth import channel_and_http_session_user_from_http, channel_session_user_from_http
+from ..channel import Group
+from ..exceptions import SendNotAvailableOnDemultiplexer
+from ..sessions import enforce_ordering
+from .base import BaseConsumer
+
+
+class WebsocketConsumer(BaseConsumer):
+ """
+ Base WebSocket consumer. Provides a general encapsulation for the
+ WebSocket handling model that other applications can build on.
+ """
+
+ # You shouldn't need to override this
+ method_mapping = {
+ "websocket.connect": "raw_connect",
+ "websocket.receive": "raw_receive",
+ "websocket.disconnect": "raw_disconnect",
+ }
+
+ # Turning this on passes the user over from the HTTP session on connect,
+ # implies channel_session_user
+ http_user = False
+ http_user_and_session = False
+
+ # Set to True if you want the class to enforce ordering for you
+ strict_ordering = False
+
+ groups = None
+
+ def get_handler(self, message, **kwargs):
+ """
+ Pulls out the path onto an instance variable, and optionally
+ adds the ordering decorator.
+ """
+ # HTTP user implies channel session user
+ if self.http_user or self.http_user_and_session:
+ self.channel_session_user = True
+ # Get super-handler
+ self.path = message['path']
+ handler = super(WebsocketConsumer, self).get_handler(message, **kwargs)
+ # Optionally apply HTTP transfer
+ if self.http_user_and_session:
+ handler = channel_and_http_session_user_from_http(handler)
+ elif self.http_user:
+ handler = channel_session_user_from_http(handler)
+ # Ordering decorators
+ if self.strict_ordering:
+ return enforce_ordering(handler, slight=False)
+ elif getattr(self, "slight_ordering", False):
+ raise ValueError("Slight ordering is now always on. Please remove `slight_ordering=True`.")
+ else:
+ return handler
+
+ def connection_groups(self, **kwargs):
+ """
+ Group(s) to make people join when they connect and leave when they
+ disconnect. Make sure to return a list/tuple, not a string!
+ """
+ return self.groups or []
+
+ def raw_connect(self, message, **kwargs):
+ """
+ Called when a WebSocket connection is opened. Base level so you don't
+ need to call super() all the time.
+ """
+ for group in self.connection_groups(**kwargs):
+ Group(group, channel_layer=message.channel_layer).add(message.reply_channel)
+ self.connect(message, **kwargs)
+
+ def connect(self, message, **kwargs):
+ """
+ Called when a WebSocket connection is opened.
+ """
+ self.message.reply_channel.send({"accept": True})
+
+ def raw_receive(self, message, **kwargs):
+ """
+ Called when a WebSocket frame is received. Decodes it and passes it
+ to receive().
+ """
+ if "text" in message:
+ self.receive(text=message['text'], **kwargs)
+ else:
+ self.receive(bytes=message['bytes'], **kwargs)
+
+ def receive(self, text=None, bytes=None, **kwargs):
+ """
+ Called with a decoded WebSocket frame.
+ """
+ pass
+
+ def send(self, text=None, bytes=None, close=False):
+ """
+ Sends a reply back down the WebSocket
+ """
+ message = {}
+ if close:
+ message["close"] = close
+ if text is not None:
+ message["text"] = text
+ elif bytes is not None:
+ message["bytes"] = bytes
+ else:
+ raise ValueError("You must pass text or bytes")
+ self.message.reply_channel.send(message)
+
+ @classmethod
+ def group_send(cls, name, text=None, bytes=None, close=False):
+ message = {}
+ if close:
+ message["close"] = close
+ if text is not None:
+ message["text"] = text
+ elif bytes is not None:
+ message["bytes"] = bytes
+ else:
+ raise ValueError("You must pass text or bytes")
+ Group(name).send(message)
+
+ def close(self, status=True):
+ """
+ Closes the WebSocket from the server end
+ """
+ self.message.reply_channel.send({"close": status})
+
+ def raw_disconnect(self, message, **kwargs):
+ """
+ Called when a WebSocket connection is closed. Base level so you don't
+ need to call super() all the time.
+ """
+ for group in self.connection_groups(**kwargs):
+ Group(group, channel_layer=message.channel_layer).discard(message.reply_channel)
+ self.disconnect(message, **kwargs)
+
+ def disconnect(self, message, **kwargs):
+ """
+ Called when a WebSocket connection is closed.
+ """
+ pass
+
+
+class JsonWebsocketConsumer(WebsocketConsumer):
+ """
+ Variant of WebsocketConsumer that automatically JSON-encodes and decodes
+ messages as they come in and go out. Expects everything to be text; will
+ error on binary data.
+ """
+
+ def raw_receive(self, message, **kwargs):
+ if "text" in message:
+ self.receive(self.decode_json(message['text']), **kwargs)
+ else:
+ raise ValueError("No text section for incoming WebSocket frame!")
+
+ def receive(self, content, **kwargs):
+ """
+ Called with decoded JSON content.
+ """
+ pass
+
+ def send(self, content, close=False):
+ """
+ Encode the given content as JSON and send it to the client.
+ """
+ super(JsonWebsocketConsumer, self).send(text=self.encode_json(content), close=close)
+
+ @classmethod
+ def decode_json(cls, text):
+ return json.loads(text)
+
+ @classmethod
+ def encode_json(cls, content):
+ return json.dumps(content)
+
+ @classmethod
+ def group_send(cls, name, content, close=False):
+ WebsocketConsumer.group_send(name, cls.encode_json(content), close=close)
+
+
+class WebsocketMultiplexer(object):
+ """
+ The opposite of the demultiplexer, to send a message though a multiplexed channel.
+
+ The multiplexer object is passed as a kwargs to the consumer when the message is dispatched.
+ This pattern allows the consumer class to be independent of the stream name.
+ """
+
+ stream = None
+ reply_channel = None
+
+ def __init__(self, stream, reply_channel):
+ self.stream = stream
+ self.reply_channel = reply_channel
+
+ def send(self, payload):
+ """Multiplex the payload using the stream name and send it."""
+ self.reply_channel.send(self.encode(self.stream, payload))
+
+ @classmethod
+ def encode_json(cls, content):
+ return json.dumps(content, cls=DjangoJSONEncoder)
+
+ @classmethod
+ def encode(cls, stream, payload):
+ """
+ Encodes stream + payload for outbound sending.
+ """
+ content = {"stream": stream, "payload": payload}
+ return {"text": cls.encode_json(content)}
+
+ @classmethod
+ def group_send(cls, name, stream, payload, close=False):
+ message = cls.encode(stream, payload)
+ if close:
+ message["close"] = True
+ Group(name).send(message)
+
+
+class WebsocketDemultiplexer(JsonWebsocketConsumer):
+ """
+ JSON-understanding WebSocket consumer subclass that handles demultiplexing
+ streams using a "stream" key in a top-level dict and the actual payload
+ in a sub-dict called "payload". This lets you run multiple streams over
+ a single WebSocket connection in a standardised way.
+
+ Incoming messages on streams are dispatched to consumers so you can
+ just tie in consumers the normal way. The reply_channels are kept so
+ sessions/auth continue to work. Payloads must be a dict at the top level,
+ so they fulfill the Channels message spec.
+
+ To answer with a multiplexed message, a multiplexer object
+ with "send" and "group_send" methods is forwarded to the consumer as a kwargs
+ "multiplexer".
+
+ Set a mapping of streams to consumer classes in the "consumers" keyword.
+ """
+
+ # Put your JSON consumers here: {stream_name : consumer}
+ consumers = {}
+
+ # Optionally use a custom multiplexer class
+ multiplexer_class = WebsocketMultiplexer
+
+ def receive(self, content, **kwargs):
+ """Forward messages to all consumers."""
+ # Check the frame looks good
+ if isinstance(content, dict) and "stream" in content and "payload" in content:
+ # Match it to a channel
+ for stream, consumer in self.consumers.items():
+ if stream == content['stream']:
+ # Extract payload and add in reply_channel
+ payload = content['payload']
+ if not isinstance(payload, dict):
+ raise ValueError("Multiplexed frame payload is not a dict")
+ # The json consumer expects serialized JSON
+ self.message.content['text'] = self.encode_json(payload)
+ # Send demultiplexer to the consumer, to be able to answer
+ kwargs['multiplexer'] = self.multiplexer_class(stream, self.message.reply_channel)
+ # Patch send to avoid sending not formatted messages from the consumer
+ if hasattr(consumer, "send"):
+ consumer.send = self.send
+ # Dispatch message
+ consumer(self.message, **kwargs)
+ return
+
+ raise ValueError("Invalid multiplexed frame received (stream not mapped)")
+ else:
+ raise ValueError("Invalid multiplexed **frame received (no channel/payload key)")
+
+ def connect(self, message, **kwargs):
+ """Forward connection to all consumers."""
+ self.message.reply_channel.send({"accept": True})
+ for stream, consumer in self.consumers.items():
+ kwargs['multiplexer'] = self.multiplexer_class(stream, self.message.reply_channel)
+ consumer(message, **kwargs)
+
+ def disconnect(self, message, **kwargs):
+ """Forward disconnection to all consumers."""
+ for stream, consumer in self.consumers.items():
+ kwargs['multiplexer'] = self.multiplexer_class(stream, self.message.reply_channel)
+ consumer(message, **kwargs)
+
+ def send(self, *args):
+ raise SendNotAvailableOnDemultiplexer("Use multiplexer.send of the multiplexer kwarg.")
+
+ @classmethod
+ def group_send(cls, name, stream, payload, close=False):
+ raise SendNotAvailableOnDemultiplexer("Use WebsocketMultiplexer.group_send")
diff --git a/channels/hacks.py b/channels/hacks.py
new file mode 100644
index 0000000..d6303d5
--- /dev/null
+++ b/channels/hacks.py
@@ -0,0 +1,11 @@
+
+
+def monkeypatch_django():
+ """
+ Monkeypatches support for us into parts of Django.
+ """
+ # Ensure that the staticfiles version of runserver bows down to us
+ # This one is particularly horrible
+ from django.contrib.staticfiles.management.commands.runserver import Command as StaticRunserverCommand
+ from .management.commands.runserver import Command as RunserverCommand
+ StaticRunserverCommand.__bases__ = (RunserverCommand, )
diff --git a/channels/handler.py b/channels/handler.py
new file mode 100644
index 0000000..cd44f45
--- /dev/null
+++ b/channels/handler.py
@@ -0,0 +1,364 @@
+from __future__ import unicode_literals
+
+import cgi
+import codecs
+import logging
+import sys
+import time
+import traceback
+from io import BytesIO
+
+from django import http
+from django.conf import settings
+from django.core import signals
+from django.core.handlers import base
+from django.http import FileResponse, HttpResponse, HttpResponseServerError
+from django.utils import six
+from django.utils.functional import cached_property
+
+from channels.exceptions import RequestAborted, RequestTimeout, ResponseLater as ResponseLaterOuter
+
+try:
+ from django.urls import set_script_prefix
+except ImportError:
+ # Django < 1.10
+ from django.core.urlresolvers import set_script_prefix
+
+logger = logging.getLogger('django.request')
+
+
+class AsgiRequest(http.HttpRequest):
+ """
+ Custom request subclass that decodes from an ASGI-standard request
+ dict, and wraps request body handling.
+ """
+
+ ResponseLater = ResponseLaterOuter
+
+ # Number of seconds until a Request gives up on trying to read a request
+ # body and aborts.
+ body_receive_timeout = 60
+
+ def __init__(self, message):
+ self.message = message
+ self.reply_channel = self.message.reply_channel
+ self._content_length = 0
+ self._post_parse_error = False
+ self._read_started = False
+ self.resolver_match = None
+ # Path info
+ self.path = self.message['path']
+ self.script_name = self.message.get('root_path', '')
+ if self.script_name and self.path.startswith(self.script_name):
+ # TODO: Better is-prefix checking, slash handling?
+ self.path_info = self.path[len(self.script_name):]
+ else:
+ self.path_info = self.path
+ # HTTP basics
+ self.method = self.message['method'].upper()
+ # fix https://github.com/django/channels/issues/622
+ query_string = self.message.get('query_string', '')
+ if isinstance(query_string, bytes):
+ query_string = query_string.decode('utf-8')
+ self.META = {
+ "REQUEST_METHOD": self.method,
+ "QUERY_STRING": query_string,
+ "SCRIPT_NAME": self.script_name,
+ "PATH_INFO": self.path_info,
+ # Old code will need these for a while
+ "wsgi.multithread": True,
+ "wsgi.multiprocess": True,
+ }
+ if self.message.get('client', None):
+ self.META['REMOTE_ADDR'] = self.message['client'][0]
+ self.META['REMOTE_HOST'] = self.META['REMOTE_ADDR']
+ self.META['REMOTE_PORT'] = self.message['client'][1]
+ if self.message.get('server', None):
+ self.META['SERVER_NAME'] = self.message['server'][0]
+ self.META['SERVER_PORT'] = six.text_type(self.message['server'][1])
+ else:
+ self.META['SERVER_NAME'] = "unknown"
+ self.META['SERVER_PORT'] = "0"
+ # Handle old style-headers for a transition period
+ if "headers" in self.message and isinstance(self.message['headers'], dict):
+ self.message['headers'] = [
+ (x.encode("latin1"), y) for x, y in
+ self.message['headers'].items()
+ ]
+ # Headers go into META
+ for name, value in self.message.get('headers', []):
+ name = name.decode("latin1")
+ if name == "content-length":
+ corrected_name = "CONTENT_LENGTH"
+ elif name == "content-type":
+ corrected_name = "CONTENT_TYPE"
+ else:
+ corrected_name = 'HTTP_%s' % name.upper().replace("-", "_")
+ # HTTPbis say only ASCII chars are allowed in headers, but we latin1 just in case
+ value = value.decode("latin1")
+ if corrected_name in self.META:
+ value = self.META[corrected_name] + "," + value
+ self.META[corrected_name] = value
+ # Pull out request encoding if we find it
+ if "CONTENT_TYPE" in self.META:
+ self.content_type, self.content_params = cgi.parse_header(self.META["CONTENT_TYPE"])
+ if 'charset' in self.content_params:
+ try:
+ codecs.lookup(self.content_params['charset'])
+ except LookupError:
+ pass
+ else:
+ self.encoding = self.content_params['charset']
+ else:
+ self.content_type, self.content_params = "", {}
+ # Pull out content length info
+ if self.META.get('CONTENT_LENGTH', None):
+ try:
+ self._content_length = int(self.META['CONTENT_LENGTH'])
+ except (ValueError, TypeError):
+ pass
+ # Body handling
+ self._body = message.get("body", b"")
+ if message.get("body_channel", None):
+ body_handle_start = time.time()
+ while True:
+ # Get the next chunk from the request body channel
+ chunk = None
+ while chunk is None:
+ # If they take too long, raise request timeout and the handler
+ # will turn it into a response
+ if time.time() - body_handle_start > self.body_receive_timeout:
+ raise RequestTimeout()
+ _, chunk = message.channel_layer.receive_many(
+ [message['body_channel']],
+ block=True,
+ )
+ # If chunk contains close, abort.
+ if chunk.get("closed", False):
+ raise RequestAborted()
+ # Add content to body
+ self._body += chunk.get("content", "")
+ # Exit loop if this was the last
+ if not chunk.get("more_content", False):
+ break
+ assert isinstance(self._body, six.binary_type), "Body is not bytes"
+ # Add a stream-a-like for the body
+ self._stream = BytesIO(self._body)
+ # Other bits
+ self.resolver_match = None
+
+ @cached_property
+ def GET(self):
+ return http.QueryDict(self.message.get('query_string', ''))
+
+ def _get_scheme(self):
+ return self.message.get("scheme", "http")
+
+ def _get_post(self):
+ if not hasattr(self, '_post'):
+ self._read_started = False
+ self._load_post_and_files()
+ return self._post
+
+ def _set_post(self, post):
+ self._post = post
+
+ def _get_files(self):
+ if not hasattr(self, '_files'):
+ self._read_started = False
+ self._load_post_and_files()
+ return self._files
+
+ POST = property(_get_post, _set_post)
+ FILES = property(_get_files)
+
+ @cached_property
+ def COOKIES(self):
+ return http.parse_cookie(self.META.get('HTTP_COOKIE', ''))
+
+
+class AsgiHandler(base.BaseHandler):
+ """
+ Handler for ASGI requests for the view system only (it will have got here
+ after traversing the dispatch-by-channel-name system, which decides it's
+ a HTTP request)
+ """
+
+ request_class = AsgiRequest
+
+ # Size to chunk response bodies into for multiple response messages
+ chunk_size = 512 * 1024
+
+ def __init__(self, *args, **kwargs):
+ super(AsgiHandler, self).__init__(*args, **kwargs)
+ self.load_middleware()
+
+ def __call__(self, message):
+ # Set script prefix from message root_path, turning None into empty string
+ set_script_prefix(message.get('root_path', '') or '')
+ signals.request_started.send(sender=self.__class__, message=message)
+ # Run request through view system
+ try:
+ request = self.request_class(message)
+ except UnicodeDecodeError:
+ logger.warning(
+ 'Bad Request (UnicodeDecodeError)',
+ exc_info=sys.exc_info(),
+ extra={
+ 'status_code': 400,
+ }
+ )
+ response = http.HttpResponseBadRequest()
+ except RequestTimeout:
+ # Parsing the rquest failed, so the response is a Request Timeout error
+ response = HttpResponse("408 Request Timeout (upload too slow)", status=408)
+ except RequestAborted:
+ # Client closed connection on us mid request. Abort!
+ return
+ else:
+ try:
+ response = self.get_response(request)
+ # Fix chunk size on file responses
+ if isinstance(response, FileResponse):
+ response.block_size = 1024 * 512
+ except AsgiRequest.ResponseLater:
+ # The view has promised something else
+ # will send a response at a later time
+ return
+ # Transform response into messages, which we yield back to caller
+ for message in self.encode_response(response):
+ # TODO: file_to_stream
+ yield message
+ # Close the response now we're done with it
+ response.close()
+
+ def process_exception_by_middleware(self, exception, request):
+ """
+ Catches ResponseLater and re-raises it, else tries to delegate
+ to middleware exception handling.
+ """
+ if isinstance(exception, AsgiRequest.ResponseLater):
+ raise
+ else:
+ return super(AsgiHandler, self).process_exception_by_middleware(exception, request)
+
+ def handle_uncaught_exception(self, request, resolver, exc_info):
+ """
+ Propagates ResponseLater up into the higher handler method,
+ processes everything else
+ """
+ # ResponseLater needs to be bubbled up the stack
+ if issubclass(exc_info[0], AsgiRequest.ResponseLater):
+ raise
+ # There's no WSGI server to catch the exception further up if this fails,
+ # so translate it into a plain text response.
+ try:
+ return super(AsgiHandler, self).handle_uncaught_exception(request, resolver, exc_info)
+ except:
+ return HttpResponseServerError(
+ traceback.format_exc() if settings.DEBUG else "Internal Server Error",
+ content_type="text/plain",
+ )
+
+ @classmethod
+ def encode_response(cls, response):
+ """
+ Encodes a Django HTTP response into ASGI http.response message(s).
+ """
+ # Collect cookies into headers.
+ # Note that we have to preserve header case as there are some non-RFC
+ # compliant clients that want things like Content-Type correct. Ugh.
+ response_headers = []
+ for header, value in response.items():
+ if isinstance(header, six.text_type):
+ header = header.encode("ascii")
+ if isinstance(value, six.text_type):
+ value = value.encode("latin1")
+ response_headers.append(
+ (
+ six.binary_type(header),
+ six.binary_type(value),
+ )
+ )
+ for c in response.cookies.values():
+ response_headers.append(
+ (
+ b'Set-Cookie',
+ c.output(header='').encode("ascii"),
+ )
+ )
+ # Make initial response message
+ message = {
+ "status": response.status_code,
+ "headers": response_headers,
+ }
+ # Streaming responses need to be pinned to their iterator
+ if response.streaming:
+ # Access `__iter__` and not `streaming_content` directly in case
+ # it has been overridden in a subclass.
+ for part in response:
+ for chunk, more in cls.chunk_bytes(part):
+ message['content'] = chunk
+ # We ignore "more" as there may be more parts; instead,
+ # we use an empty final closing message with False.
+ message['more_content'] = True
+ yield message
+ message = {}
+ # Final closing message
+ message["more_content"] = False
+ yield message
+ # Other responses just need chunking
+ else:
+ # Yield chunks of response
+ for chunk, last in cls.chunk_bytes(response.content):
+ message['content'] = chunk
+ message['more_content'] = not last
+ yield message
+ message = {}
+
+ @classmethod
+ def chunk_bytes(cls, data):
+ """
+ Chunks some data into chunks based on the current ASGI channel layer's
+ message size and reasonable defaults.
+
+ Yields (chunk, last_chunk) tuples.
+ """
+ position = 0
+ if not data:
+ yield data, True
+ return
+ while position < len(data):
+ yield (
+ data[position:position + cls.chunk_size],
+ (position + cls.chunk_size) >= len(data),
+ )
+ position += cls.chunk_size
+
+
+class ViewConsumer(object):
+ """
+ Dispatches channel HTTP requests into django's URL/View system.
+ """
+
+ handler_class = AsgiHandler
+
+ def __init__(self):
+ self.handler = self.handler_class()
+
+ def __call__(self, message):
+ for reply_message in self.handler(message):
+ while True:
+ # If we get ChannelFull we just wait and keep trying until
+ # it goes through.
+ # TODO: Add optional death timeout? Don't want to lock up
+ # a whole worker if the client just vanishes and leaves the response
+ # channel full.
+ try:
+ # Note: Use immediately to prevent streaming responses trying
+ # cache all data.
+ message.reply_channel.send(reply_message, immediately=True)
+ except message.channel_layer.ChannelFull:
+ time.sleep(0.05)
+ else:
+ break
diff --git a/channels/log.py b/channels/log.py
new file mode 100644
index 0000000..26c8bf6
--- /dev/null
+++ b/channels/log.py
@@ -0,0 +1,31 @@
+import logging
+
+handler = logging.StreamHandler()
+
+
+def setup_logger(name, verbosity=1):
+ """
+ Basic logger for runserver etc.
+ """
+
+ formatter = logging.Formatter(
+ fmt='%(asctime)s - %(levelname)s - %(module)s - %(message)s')
+
+ handler.setFormatter(formatter)
+
+ # Set up main logger
+ logger = logging.getLogger(name)
+ logger.setLevel(logging.INFO)
+ logger.addHandler(handler)
+ if verbosity > 1:
+ logger.setLevel(logging.DEBUG)
+
+ # Set up daphne protocol loggers
+ for module in ["daphne.ws_protocol", "daphne.http_protocol", "daphne.server"]:
+ daphne_logger = logging.getLogger(module)
+ daphne_logger.addHandler(handler)
+ daphne_logger.setLevel(
+ logging.DEBUG if verbosity > 1 else logging.INFO)
+
+ logger.propagate = False
+ return logger
diff --git a/channels/management/__init__.py b/channels/management/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/channels/management/commands/__init__.py b/channels/management/commands/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/channels/management/commands/runserver.py b/channels/management/commands/runserver.py
new file mode 100644
index 0000000..c4937ff
--- /dev/null
+++ b/channels/management/commands/runserver.py
@@ -0,0 +1,177 @@
+import datetime
+import sys
+import threading
+
+from daphne.server import Server, build_endpoint_description_strings
+from django.apps import apps
+from django.conf import settings
+from django.core.management.commands.runserver import Command as RunserverCommand
+from django.utils import six
+from django.utils.encoding import get_system_encoding
+
+from channels import DEFAULT_CHANNEL_LAYER, channel_layers
+from channels.handler import ViewConsumer
+from channels.log import setup_logger
+from channels.staticfiles import StaticFilesConsumer
+from channels.worker import Worker
+
+
+class Command(RunserverCommand):
+ protocol = 'http'
+ server_cls = Server
+
+ def add_arguments(self, parser):
+ super(Command, self).add_arguments(parser)
+ parser.add_argument('--noworker', action='store_false', dest='run_worker', default=True,
+ help='Tells Django not to run a worker thread; you\'ll need to run one separately.')
+ parser.add_argument('--noasgi', action='store_false', dest='use_asgi', default=True,
+ help='Run the old WSGI-based runserver rather than the ASGI-based one')
+ parser.add_argument('--http_timeout', action='store', dest='http_timeout', type=int, default=60,
+ help='Specify the daphne http_timeout interval in seconds (default: 60)')
+ parser.add_argument('--websocket_handshake_timeout', action='store', dest='websocket_handshake_timeout',
+ type=int, default=5,
+ help='Specify the daphne websocket_handshake_timeout interval in seconds (default: 5)')
+
+ def handle(self, *args, **options):
+ self.verbosity = options.get("verbosity", 1)
+ self.logger = setup_logger('django.channels', self.verbosity)
+ self.http_timeout = options.get("http_timeout", 60)
+ self.websocket_handshake_timeout = options.get("websocket_handshake_timeout", 5)
+ super(Command, self).handle(*args, **options)
+
+ def inner_run(self, *args, **options):
+ # Maybe they want the wsgi one?
+ if not options.get("use_asgi", True) or DEFAULT_CHANNEL_LAYER not in channel_layers:
+ if hasattr(RunserverCommand, "server_cls"):
+ self.server_cls = RunserverCommand.server_cls
+ return RunserverCommand.inner_run(self, *args, **options)
+ # Check a handler is registered for http reqs; if not, add default one
+ self.channel_layer = channel_layers[DEFAULT_CHANNEL_LAYER]
+ self.channel_layer.router.check_default(
+ http_consumer=self.get_consumer(*args, **options),
+ )
+ # Run checks
+ self.stdout.write("Performing system checks...\n\n")
+ self.check(display_num_errors=True)
+ self.check_migrations()
+ # Print helpful text
+ quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'
+ now = datetime.datetime.now().strftime('%B %d, %Y - %X')
+ if six.PY2:
+ now = now.decode(get_system_encoding())
+ self.stdout.write(now)
+ self.stdout.write((
+ "Django version %(version)s, using settings %(settings)r\n"
+ "Starting Channels development server at %(protocol)s://%(addr)s:%(port)s/\n"
+ "Channel layer %(layer)s\n"
+ "Quit the server with %(quit_command)s.\n"
+ ) % {
+ "version": self.get_version(),
+ "settings": settings.SETTINGS_MODULE,
+ "protocol": self.protocol,
+ "addr": '[%s]' % self.addr if self._raw_ipv6 else self.addr,
+ "port": self.port,
+ "quit_command": quit_command,
+ "layer": self.channel_layer,
+ })
+
+ # Launch workers as subthreads
+ if options.get("run_worker", True):
+ worker_count = 4 if options.get("use_threading", True) else 1
+ for _ in range(worker_count):
+ worker = WorkerThread(self.channel_layer, self.logger)
+ worker.daemon = True
+ worker.start()
+ # Launch server in 'main' thread. Signals are disabled as it's still
+ # actually a subthread under the autoreloader.
+ self.logger.debug("Daphne running, listening on %s:%s", self.addr, self.port)
+
+ # build the endpoint description string from host/port options
+ endpoints = build_endpoint_description_strings(host=self.addr, port=self.port)
+ try:
+ self.server_cls(
+ channel_layer=self.channel_layer,
+ endpoints=endpoints,
+ signal_handlers=not options['use_reloader'],
+ action_logger=self.log_action,
+ http_timeout=self.http_timeout,
+ ws_protocols=getattr(settings, 'CHANNELS_WS_PROTOCOLS', None),
+ root_path=getattr(settings, 'FORCE_SCRIPT_NAME', '') or '',
+ websocket_handshake_timeout=self.websocket_handshake_timeout,
+ ).run()
+ self.logger.debug("Daphne exited")
+ except KeyboardInterrupt:
+ shutdown_message = options.get('shutdown_message', '')
+ if shutdown_message:
+ self.stdout.write(shutdown_message)
+ return
+
+ def log_action(self, protocol, action, details):
+ """
+ Logs various different kinds of requests to the console.
+ """
+ # All start with timestamp
+ msg = "[%s] " % datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
+ # HTTP requests
+ if protocol == "http" and action == "complete":
+ msg += "HTTP %(method)s %(path)s %(status)s [%(time_taken).2f, %(client)s]\n" % details
+ # Utilize terminal colors, if available
+ if 200 <= details['status'] < 300:
+ # Put 2XX first, since it should be the common case
+ msg = self.style.HTTP_SUCCESS(msg)
+ elif 100 <= details['status'] < 200:
+ msg = self.style.HTTP_INFO(msg)
+ elif details['status'] == 304:
+ msg = self.style.HTTP_NOT_MODIFIED(msg)
+ elif 300 <= details['status'] < 400:
+ msg = self.style.HTTP_REDIRECT(msg)
+ elif details['status'] == 404:
+ msg = self.style.HTTP_NOT_FOUND(msg)
+ elif 400 <= details['status'] < 500:
+ msg = self.style.HTTP_BAD_REQUEST(msg)
+ else:
+ # Any 5XX, or any other response
+ msg = self.style.HTTP_SERVER_ERROR(msg)
+ # Websocket requests
+ elif protocol == "websocket" and action == "connected":
+ msg += "WebSocket CONNECT %(path)s [%(client)s]\n" % details
+ elif protocol == "websocket" and action == "disconnected":
+ msg += "WebSocket DISCONNECT %(path)s [%(client)s]\n" % details
+ elif protocol == "websocket" and action == "connecting":
+ msg += "WebSocket HANDSHAKING %(path)s [%(client)s]\n" % details
+ elif protocol == "websocket" and action == "rejected":
+ msg += "WebSocket REJECT %(path)s [%(client)s]\n" % details
+
+ sys.stderr.write(msg)
+
+ def get_consumer(self, *args, **options):
+ """
+ Returns the static files serving handler wrapping the default handler,
+ if static files should be served. Otherwise just returns the default
+ handler.
+ """
+ staticfiles_installed = apps.is_installed("django.contrib.staticfiles")
+ use_static_handler = options.get('use_static_handler', staticfiles_installed)
+ insecure_serving = options.get('insecure_serving', False)
+ if use_static_handler and (settings.DEBUG or insecure_serving):
+ return StaticFilesConsumer()
+ else:
+ return ViewConsumer()
+
+
+class WorkerThread(threading.Thread):
+ """
+ Class that runs a worker
+ """
+
+ def __init__(self, channel_layer, logger):
+ super(WorkerThread, self).__init__()
+ self.channel_layer = channel_layer
+ self.logger = logger
+
+ def run(self):
+ self.logger.debug("Worker thread running")
+ worker = Worker(channel_layer=self.channel_layer, signal_handlers=False)
+ worker.ready()
+ worker.run()
+ self.logger.debug("Worker thread exited")
diff --git a/channels/management/commands/runworker.py b/channels/management/commands/runworker.py
new file mode 100644
index 0000000..e8366e6
--- /dev/null
+++ b/channels/management/commands/runworker.py
@@ -0,0 +1,88 @@
+from __future__ import unicode_literals
+
+from django.apps import apps
+from django.conf import settings
+from django.core.management import BaseCommand, CommandError
+
+from channels import DEFAULT_CHANNEL_LAYER, channel_layers
+from channels.log import setup_logger
+from channels.signals import worker_process_ready
+from channels.staticfiles import StaticFilesConsumer
+from channels.worker import Worker, WorkerGroup
+
+
+class Command(BaseCommand):
+
+ leave_locale_alone = True
+
+ def add_arguments(self, parser):
+ super(Command, self).add_arguments(parser)
+ parser.add_argument(
+ '--layer', action='store', dest='layer', default=DEFAULT_CHANNEL_LAYER,
+ help='Channel layer alias to use, if not the default.',
+ )
+ parser.add_argument(
+ '--only-channels', action='append', dest='only_channels',
+ help='Limits this worker to only listening on the provided channels (supports globbing).',
+ )
+ parser.add_argument(
+ '--exclude-channels', action='append', dest='exclude_channels',
+ help='Prevents this worker from listening on the provided channels (supports globbing).',
+ )
+ parser.add_argument(
+ '--threads', action='store', dest='threads',
+ default=1, type=int,
+ help='Number of threads to execute.'
+ )
+
+ def handle(self, *args, **options):
+ # Get the backend to use
+ self.verbosity = options.get("verbosity", 1)
+ self.logger = setup_logger('django.channels', self.verbosity)
+ self.channel_layer = channel_layers[options.get("layer", DEFAULT_CHANNEL_LAYER)]
+ self.n_threads = options.get('threads', 1)
+ # Check that handler isn't inmemory
+ if self.channel_layer.local_only():
+ raise CommandError(
+ "You cannot span multiple processes with the in-memory layer. " +
+ "Change your settings to use a cross-process channel layer."
+ )
+ # Check a handler is registered for http reqs
+ # Serve static files if Django in debug mode
+ if settings.DEBUG and apps.is_installed('django.contrib.staticfiles'):
+ self.channel_layer.router.check_default(http_consumer=StaticFilesConsumer())
+ else:
+ self.channel_layer.router.check_default()
+ # Optionally provide an output callback
+ callback = None
+ if self.verbosity > 1:
+ callback = self.consumer_called
+ self.callback = callback
+ self.options = options
+ # Choose an appropriate worker.
+ worker_kwargs = {}
+ if self.n_threads == 1:
+ self.logger.info("Using single-threaded worker.")
+ worker_cls = Worker
+ else:
+ self.logger.info("Using multi-threaded worker, {} thread(s).".format(self.n_threads))
+ worker_cls = WorkerGroup
+ worker_kwargs['n_threads'] = self.n_threads
+ # Run the worker
+ self.logger.info("Running worker against channel layer %s", self.channel_layer)
+ try:
+ worker = worker_cls(
+ channel_layer=self.channel_layer,
+ callback=self.callback,
+ only_channels=self.options.get("only_channels", None),
+ exclude_channels=self.options.get("exclude_channels", None),
+ **worker_kwargs
+ )
+ worker_process_ready.send(sender=worker)
+ worker.ready()
+ worker.run()
+ except KeyboardInterrupt:
+ pass
+
+ def consumer_called(self, channel, message):
+ self.logger.debug("%s", channel)
diff --git a/channels/message.py b/channels/message.py
new file mode 100644
index 0000000..48e4ebe
--- /dev/null
+++ b/channels/message.py
@@ -0,0 +1,124 @@
+from __future__ import unicode_literals
+
+import copy
+import threading
+import time
+
+from .channel import Channel
+from .signals import consumer_finished, consumer_started
+
+
+class Message(object):
+ """
+ Represents a message sent over a Channel.
+
+ The message content is a dict called .content, while
+ reply_channel is an optional extra attribute representing a channel
+ to use to reply to this message's end user, if that makes sense.
+ """
+
+ def __init__(self, content, channel_name, channel_layer):
+ self.content = content
+ self.channel = Channel(
+ channel_name,
+ channel_layer=channel_layer,
+ )
+ self.channel_layer = channel_layer
+ if content.get("reply_channel", None):
+ self.reply_channel = Channel(
+ content["reply_channel"],
+ channel_layer=self.channel_layer,
+ )
+ else:
+ self.reply_channel = None
+
+ def __getitem__(self, key):
+ return self.content[key]
+
+ def __setitem__(self, key, value):
+ self.content[key] = value
+
+ def __contains__(self, key):
+ return key in self.content
+
+ def keys(self):
+ return self.content.keys()
+
+ def values(self):
+ return self.content.values()
+
+ def items(self):
+ return self.content.items()
+
+ def get(self, key, default=None):
+ return self.content.get(key, default)
+
+ def copy(self):
+ """
+ Returns a safely content-mutable copy of this Message.
+ """
+ return self.__class__(
+ copy.deepcopy(self.content),
+ self.channel.name,
+ self.channel_layer,
+ )
+
+
+class PendingMessageStore(object):
+ """
+ Singleton object used for storing pending messages that should be sent
+ to a channel or group when a consumer finishes.
+
+ Will retry when it sees ChannelFull up to a limit; if you want more control
+ over this, change to `immediately=True` in your send method and handle it
+ yourself.
+ """
+
+ threadlocal = threading.local()
+
+ retry_time = 2 # seconds
+ retry_interval = 0.2 # seconds
+
+ def prepare(self, **kwargs):
+ """
+ Sets the message store up to receive messages.
+ """
+ self.threadlocal.messages = []
+
+ @property
+ def active(self):
+ """
+ Returns if the pending message store can be used or not
+ (it can only be used inside consumers)
+ """
+ return hasattr(self.threadlocal, "messages")
+
+ def append(self, sender, message):
+ self.threadlocal.messages.append((sender, message))
+
+ def send_and_flush(self, **kwargs):
+ for sender, message in getattr(self.threadlocal, "messages", []):
+ # Loop until the retry time limit is hit
+ started = time.time()
+ while time.time() - started < self.retry_time:
+ try:
+ sender.send(message, immediately=True)
+ except sender.channel_layer.ChannelFull:
+ time.sleep(self.retry_interval)
+ continue
+ else:
+ break
+ # If we didn't break out, we failed to send, so do a nice exception
+ else:
+ raise RuntimeError(
+ "Failed to send queued message to %s after retrying for %.2fs.\n"
+ "You need to increase the consumption rate on this channel, its capacity,\n"
+ "or handle the ChannelFull exception yourself after adding\n"
+ "immediately=True to send()." % (sender, self.retry_time)
+ )
+ delattr(self.threadlocal, "messages")
+
+
+pending_message_store = PendingMessageStore()
+consumer_started.connect(pending_message_store.prepare)
+consumer_finished.connect(pending_message_store.send_and_flush)
diff --git a/channels/package_checks.py b/channels/package_checks.py
new file mode 100644
index 0000000..bf72ec0
--- /dev/null
+++ b/channels/package_checks.py
@@ -0,0 +1,30 @@
+import importlib
+from distutils.version import StrictVersion
+
+required_versions = {
+ "asgi_rabbitmq": "0.4.0",
+ "asgi_redis": "1.2.0",
+ "asgi_ipc": "1.3.0",
+}
+
+
+def check_all():
+ """
+ Checks versions of all the possible packages you have installed so that
+ we can easily warn people about incompatible versions.
+
+ This is needed as there are some packages (e.g. asgi_redis) that we cannot
+ declare dependencies on as they are not _required_. People usually remember
+ to upgrade their Channels package so this is where we check.
+ """
+ for package, version in required_versions.items():
+ try:
+ module = importlib.import_module(package)
+ except ImportError:
+ continue
+ else:
+ if StrictVersion(version) > StrictVersion(module.__version__):
+ raise RuntimeError("Your version of %s is too old - it must be at least %s" % (
+ package,
+ version,
+ ))
diff --git a/channels/routing.py b/channels/routing.py
new file mode 100644
index 0000000..8330537
--- /dev/null
+++ b/channels/routing.py
@@ -0,0 +1,264 @@
+from __future__ import unicode_literals
+
+import importlib
+import re
+
+from django.core.exceptions import ImproperlyConfigured
+from django.utils import six
+
+from .utils import name_that_thing
+
+
+class Router(object):
+ """
+ Manages the available consumers in the project and which channels they
+ listen to.
+
+ Generally this is attached to a backend instance as ".router"
+
+ Anything can be a routable object as long as it provides a match()
+ method that either returns (callable, kwargs) or None.
+ """
+
+ def __init__(self, routing):
+ # Use a blank include as the root item
+ self.root = Include(routing)
+ # Cache channel names
+ self.channels = self.root.channel_names()
+
+ def add_route(self, route):
+ """
+ Adds a single raw Route to us at the end of the resolution list.
+ """
+ self.root.routing.append(route)
+ self.channels = self.root.channel_names()
+
+ def match(self, message):
+ """
+ Runs through our routing and tries to find a consumer that matches
+ the message/channel. Returns (consumer, extra_kwargs) if it does,
+ and None if it doesn't.
+ """
+ # TODO: Maybe we can add some kind of caching in here if we can hash
+ # the message with only matchable keys faster than the search?
+ return self.root.match(message)
+
+ def check_default(self, http_consumer=None):
+ """
+ Adds default handlers for Django's default handling of channels.
+ """
+ # We just add the default Django route to the bottom; if the user
+ # has defined another http.request handler, it'll get hit first and run.
+ # Inner import here to avoid circular import; this function only gets
+ # called once, thankfully.
+ from .handler import ViewConsumer
+ self.add_route(Route("http.request", http_consumer or ViewConsumer()))
+ # We also add a no-op websocket.connect consumer to the bottom, as the
+ # spec requires that this is consumed, but Channels does not. Any user
+ # consumer will override this one. Same for websocket.receive.
+ self.add_route(Route("websocket.connect", connect_consumer))
+ self.add_route(Route("websocket.receive", null_consumer))
+ self.add_route(Route("websocket.disconnect", null_consumer))
+
+ @classmethod
+ def resolve_routing(cls, routing):
+ """
+ Takes a routing - if it's a string, it imports it, and if it's a
+ dict, converts it to a list of route()s. Used by this class and Include.
+ """
+ # If the routing was a string, import it
+ if isinstance(routing, six.string_types):
+ module_name, variable_name = routing.rsplit(".", 1)
+ try:
+ routing = getattr(importlib.import_module(module_name), variable_name)
+ except (ImportError, AttributeError) as e:
+ raise ImproperlyConfigured("Cannot import channel routing %r: %s" % (routing, e))
+ # If the routing is a dict, convert it
+ if isinstance(routing, dict):
+ routing = [
+ Route(channel, consumer)
+ for channel, consumer in routing.items()
+ ]
+ return routing
+
+ @classmethod
+ def normalise_re_arg(cls, value):
+ """
+ Normalises regular expression patterns and string inputs to Unicode.
+ """
+ if isinstance(value, six.binary_type):
+ return value.decode("ascii")
+ else:
+ return value
+
+
+class Route(object):
+ """
+ Represents a route to a single consumer, with a channel name
+ and optional message parameter matching.
+ """
+
+ def __init__(self, channels, consumer, **kwargs):
+ # Get channels, make sure it's a list of unicode strings
+ if isinstance(channels, six.string_types):
+ channels = [channels]
+ self.channels = [
+ channel.decode("ascii") if isinstance(channel, six.binary_type) else channel
+ for channel in channels
+ ]
+ # Get consumer, optionally importing it
+ self.consumer = self._resolve_consumer(consumer)
+ # Compile filter regexes up front
+ self.filters = {
+ name: re.compile(Router.normalise_re_arg(value))
+ for name, value in kwargs.items()
+ }
+ # Check filters don't use positional groups
+ for name, regex in self.filters.items():
+ if regex.groups != len(regex.groupindex):
+ raise ValueError(
+ "Filter for %s on %s contains positional groups; "
+ "only named groups are allowed." % (
+ name,
+ self,
+ )
+ )
+
+ def _resolve_consumer(self, consumer):
+ """
+ Turns the consumer from a string into an object if it's a string,
+ passes it through otherwise.
+ """
+ if isinstance(consumer, six.string_types):
+ module_name, variable_name = consumer.rsplit(".", 1)
+ try:
+ consumer = getattr(importlib.import_module(module_name), variable_name)
+ except (ImportError, AttributeError):
+ raise ImproperlyConfigured("Cannot import consumer %r" % consumer)
+ return consumer
+
+ def match(self, message):
+ """
+ Checks to see if we match the Message object. Returns
+ (consumer, kwargs dict) if it matches, None otherwise
+ """
+ # Check for channel match first of all
+ if message.channel.name not in self.channels:
+ return None
+ # Check each message filter and build consumer kwargs as we go
+ call_args = {}
+ for name, value in self.filters.items():
+ if name not in message:
+ return None
+ match = value.match(Router.normalise_re_arg(message[name]))
+ # Any match failure means we pass
+ if match:
+ call_args.update(match.groupdict())
+ else:
+ return None
+ return self.consumer, call_args
+
+ def channel_names(self):
+ """
+ Returns the channel names this route listens on
+ """
+ return set(self.channels)
+
+ def __str__(self):
+ return "%s %s -> %s" % (
+ "/".join(self.channels),
+ "" if not self.filters else "(%s)" % (
+ ", ".join("%s=%s" % (n, v.pattern) for n, v in self.filters.items())
+ ),
+ name_that_thing(self.consumer),
+ )
+
+
+class RouteClass(Route):
+ """
+ Like Route, but targets a class-based consumer rather than a functional
+ one, meaning it looks for a (class) method called "channel_names()" on the
+ object rather than having a single channel passed in.
+ """
+
+ def __init__(self, consumer, **kwargs):
+ # Check the consumer provides a method_channels
+ consumer = self._resolve_consumer(consumer)
+ if not hasattr(consumer, "channel_names") or not callable(consumer.channel_names):
+ raise ValueError("The consumer passed to RouteClass has no valid channel_names method")
+ # Call super with list of channels
+ super(RouteClass, self).__init__(consumer.channel_names(), consumer, **kwargs)
+
+
+class Include(object):
+ """
+ Represents an inclusion of another routing list in another file.
+ Will automatically modify message match filters to add prefixes,
+ if specified.
+ """
+
+ def __init__(self, routing, **kwargs):
+ self.routing = Router.resolve_routing(routing)
+ self.prefixes = {
+ name: re.compile(Router.normalise_re_arg(value))
+ for name, value in kwargs.items()
+ }
+
+ def match(self, message):
+ """
+ Tries to match the message against our own prefixes, possibly modifying
+ what we send to included things, then tries all included items.
+ """
+ # Check our prefixes match. Do this against a copy of the message so
+ # we can write back any changed values.
+ message = message.copy()
+ call_args = {}
+ for name, prefix in self.prefixes.items():
+ if name not in message:
+ return None
+ value = Router.normalise_re_arg(message[name])
+ match = prefix.match(value)
+ # Any match failure means we pass
+ if match:
+ call_args.update(match.groupdict())
+ # Modify the message value to remove the part we matched on
+ message[name] = value[match.end():]
+ else:
+ return None
+ # Alright, if we got this far our prefixes match. Try all of our
+ # included objects now.
+ for entry in self.routing:
+ match = entry.match(message)
+ if match is not None:
+ call_args.update(match[1])
+ return match[0], call_args
+ # Nothing matched :(
+ return None
+
+ def channel_names(self):
+ """
+ Returns the channel names this route listens on
+ """
+ result = set()
+ for entry in self.routing:
+ result.update(entry.channel_names())
+ return result
+
+
+def null_consumer(*args, **kwargs):
+ """
+ Standard no-op consumer.
+ """
+
+
+def connect_consumer(message, *args, **kwargs):
+ """
+ Accept-all-connections websocket.connect consumer
+ """
+ message.reply_channel.send({"accept": True})
+
+
+# Lowercase standard to match urls.py
+route = Route
+route_class = RouteClass
+include = Include
diff --git a/channels/security/__init__.py b/channels/security/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/channels/security/websockets.py b/channels/security/websockets.py
new file mode 100644
index 0000000..d1dac88
--- /dev/null
+++ b/channels/security/websockets.py
@@ -0,0 +1,90 @@
+from functools import update_wrapper
+
+from django.conf import settings
+from django.http.request import validate_host
+
+from ..exceptions import DenyConnection
+
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse
+
+
+class BaseOriginValidator(object):
+ """
+ Base class-based decorator for origin validation of WebSocket connect
+ messages.
+
+ This base class handles parsing of the origin header. When the origin header
+ is missing, empty or contains non-ascii characters, it raises a
+ DenyConnection exception to reject the connection.
+
+ Subclasses must overwrite the method validate_origin(self, message, origin)
+ to return True when a message should be accepted, False otherwise.
+ """
+
+ def __init__(self, func):
+ update_wrapper(self, func)
+ self.func = func
+
+ def __call__(self, message, *args, **kwargs):
+ origin = self.get_origin(message)
+ if not self.validate_origin(message, origin):
+ raise DenyConnection
+ return self.func(message, *args, **kwargs)
+
+ def get_header(self, message, name):
+ headers = message.content['headers']
+ for header in headers:
+ try:
+ if header[0] == name:
+ return header[1:]
+ except IndexError:
+ continue
+ raise KeyError('No header named "{}"'.format(name))
+
+ def get_origin(self, message):
+ """
+ Returns the origin of a WebSocket connect message.
+
+ Raises DenyConnection for messages with missing or non-ascii Origin
+ header.
+ """
+ try:
+ header = self.get_header(message, b'origin')[0]
+ except (IndexError, KeyError):
+ raise DenyConnection
+ try:
+ origin = header.decode('ascii')
+ except UnicodeDecodeError:
+ raise DenyConnection
+ return origin
+
+ def validate_origin(self, message, origin):
+ """
+ Validates the origin of a WebSocket connect message.
+
+ Must be overwritten by subclasses.
+ """
+ raise NotImplemented('You must overwrite this method.')
+
+
+class AllowedHostsOnlyOriginValidator(BaseOriginValidator):
+ """
+ Class-based decorator for websocket consumers that checks that
+ the origin is allowed according to the ALLOWED_HOSTS settings.
+ """
+
+ def validate_origin(self, message, origin):
+ allowed_hosts = settings.ALLOWED_HOSTS
+ if settings.DEBUG and not allowed_hosts:
+ allowed_hosts = ['localhost', '127.0.0.1', '[::1]']
+
+ origin_hostname = urlparse(origin).hostname
+ valid = (origin_hostname and
+ validate_host(origin_hostname, allowed_hosts))
+ return valid
+
+
+allowed_hosts_only = AllowedHostsOnlyOriginValidator
diff --git a/channels/sessions.py b/channels/sessions.py
new file mode 100644
index 0000000..8bf716b
--- /dev/null
+++ b/channels/sessions.py
@@ -0,0 +1,249 @@
+import functools
+import hashlib
+from importlib import import_module
+
+from django.conf import settings
+from django.contrib.sessions.backends import signed_cookies
+from django.contrib.sessions.backends.base import CreateError
+
+from .exceptions import ConsumeLater
+from .handler import AsgiRequest
+from .message import Message
+
+
+def session_for_reply_channel(reply_channel):
+ """
+ Returns a session object tied to the reply_channel unicode string
+ passed in as an argument.
+ """
+ # We hash the whole reply channel name and add a prefix, to fit inside 32B
+ reply_name = reply_channel
+ hashed = hashlib.sha1(reply_name.encode("utf8")).hexdigest()
+ session_key = "chn" + hashed[:29]
+ # Make a session storage
+ session_engine = import_module(getattr(settings, "CHANNEL_SESSION_ENGINE", settings.SESSION_ENGINE))
+ if session_engine is signed_cookies:
+ raise ValueError("You cannot use channels session functionality with signed cookie sessions!")
+ # Force the instance to load in case it resets the session when it does
+ instance = session_engine.SessionStore(session_key=session_key)
+ instance._session.keys()
+ instance._session_key = session_key
+ return instance
+
+
+def channel_session(func):
+ """
+ Provides a session-like object called "channel_session" to consumers
+ as a message attribute that will auto-persist across consumers with
+ the same incoming "reply_channel" value.
+
+ Use this to persist data across the lifetime of a connection.
+ """
+ @functools.wraps(func)
+ def inner(*args, **kwargs):
+ message = None
+ for arg in args[:2]:
+ if isinstance(arg, Message):
+ message = arg
+ break
+ if message is None:
+ raise ValueError('channel_session called without Message instance')
+ # Make sure there's NOT a channel_session already
+ if hasattr(message, "channel_session"):
+ try:
+ return func(*args, **kwargs)
+ finally:
+ # Persist session if needed
+ if message.channel_session.modified:
+ message.channel_session.save()
+
+ # Make sure there's a reply_channel
+ if not message.reply_channel:
+ raise ValueError(
+ "No reply_channel sent to consumer; @channel_session " +
+ "can only be used on messages containing it."
+ )
+ # If the session does not already exist, save to force our
+ # session key to be valid.
+ session = session_for_reply_channel(message.reply_channel.name)
+ if not session.exists(session.session_key):
+ try:
+ session.save(must_create=True)
+ except CreateError:
+ # Session wasn't unique, so another consumer is doing the same thing
+ raise ConsumeLater()
+ message.channel_session = session
+ # Run the consumer
+ try:
+ return func(*args, **kwargs)
+ finally:
+ # Persist session if needed
+ if session.modified and not session.is_empty():
+ session.save()
+ return inner
+
+
+def wait_channel_name(reply_channel):
+ """
+ Given a reply_channel, returns a wait channel for it.
+ Replaces any ! with ? so process-specific channels become single-reader
+ channels.
+ """
+ return "__wait__.%s" % (reply_channel.replace("!", "?"), )
+
+
+def requeue_messages(message):
+ """
+ Requeue any pending wait channel messages for this socket connection back onto it's original channel
+ """
+ while True:
+ wait_channel = wait_channel_name(message.reply_channel.name)
+ channel, content = message.channel_layer.receive_many([wait_channel], block=False)
+ if channel:
+ original_channel = content.pop("original_channel")
+ try:
+ message.channel_layer.send(original_channel, content)
+ except message.channel_layer.ChannelFull:
+ raise message.channel_layer.ChannelFull(
+ "Cannot requeue pending __wait__ channel message " +
+ "back on to already full channel %s" % original_channel
+ )
+ else:
+ break
+
+
+def enforce_ordering(func=None, slight=False):
+ """
+ Enforces strict (all messages exactly ordered) ordering against a reply_channel.
+
+ Uses sessions to track ordering and socket-specific wait channels for unordered messages.
+ """
+ # Slight is deprecated
+ if slight:
+ raise ValueError("Slight ordering is now always on due to Channels changes. Please remove the decorator.")
+
+ # Main decorator
+ def decorator(func):
+ @channel_session
+ @functools.wraps(func)
+ def inner(message, *args, **kwargs):
+ # Make sure there's an order
+ if "order" not in message.content:
+ raise ValueError(
+ "No `order` value in message; @enforce_ordering " +
+ "can only be used on messages containing it."
+ )
+ order = int(message.content['order'])
+ # See what the current next order should be
+ next_order = message.channel_session.get("__channels_next_order", 0)
+ if order == next_order:
+ # Run consumer
+ func(message, *args, **kwargs)
+ # Mark next message order as available for running
+ message.channel_session["__channels_next_order"] = order + 1
+ message.channel_session.save()
+ message.channel_session.modified = False
+ requeue_messages(message)
+ else:
+ # Since out of order, enqueue message temporarily to wait channel for this socket connection
+ wait_channel = wait_channel_name(message.reply_channel.name)
+ message.content["original_channel"] = message.channel.name
+ try:
+ message.channel_layer.send(wait_channel, message.content)
+ except message.channel_layer.ChannelFull:
+ raise message.channel_layer.ChannelFull(
+ "Cannot add unordered message to already " +
+ "full __wait__ channel for socket %s" % message.reply_channel.name
+ )
+ # Next order may have changed while this message was being processed
+ # Requeue messages if this has happened
+ if order == message.channel_session.load().get("__channels_next_order", 0):
+ requeue_messages(message)
+
+ return inner
+ if func is not None:
+ return decorator(func)
+ else:
+ return decorator
+
+
+def http_session(func):
+ """
+ Wraps a HTTP or WebSocket connect consumer (or any consumer of messages
+ that provides a "cookies" or "get" attribute) to provide a "http_session"
+ attribute that behaves like request.session; that is, it's hung off of
+ a per-user session key that is saved in a cookie or passed as the
+ "session_key" GET parameter.
+
+ It won't automatically create and set a session cookie for users who
+ don't have one - that's what SessionMiddleware is for, this is a simpler
+ read-only version for more low-level code.
+
+ If a message does not have a session we can inflate, the "session" attribute
+ will be None, rather than an empty session you can write to.
+
+ Does not allow a new session to be set; that must be done via a view. This
+ is only an accessor for any existing session.
+ """
+ @functools.wraps(func)
+ def inner(message, *args, **kwargs):
+ # Make sure there's NOT a http_session already
+ if hasattr(message, "http_session"):
+ try:
+ return func(message, *args, **kwargs)
+ finally:
+ # Persist session if needed (won't be saved if error happens)
+ if message.http_session is not None and message.http_session.modified:
+ message.http_session.save()
+
+ try:
+ # We want to parse the WebSocket (or similar HTTP-lite) message
+ # to get cookies and GET, but we need to add in a few things that
+ # might not have been there.
+ if "method" not in message.content:
+ message.content['method'] = "FAKE"
+ request = AsgiRequest(message)
+ except Exception as e:
+ raise ValueError("Cannot parse HTTP message - are you sure this is a HTTP consumer? %s" % e)
+ # Make sure there's a session key
+ session_key = request.GET.get("session_key", None)
+ if session_key is None:
+ session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME, None)
+ # Make a session storage
+ if session_key:
+ session_engine = import_module(settings.SESSION_ENGINE)
+ session = session_engine.SessionStore(session_key=session_key)
+ else:
+ session = None
+ message.http_session = session
+ # Run the consumer
+ result = func(message, *args, **kwargs)
+ # Persist session if needed (won't be saved if error happens)
+ if session is not None and session.modified:
+ session.save()
+ return result
+ return inner
+
+
+def channel_and_http_session(func):
+ """
+ Enables both the channel_session and http_session.
+
+ Stores the http session key in the channel_session on websocket.connect messages.
+ It will then hydrate the http_session from that same key on subsequent messages.
+ """
+ @http_session
+ @channel_session
+ @functools.wraps(func)
+ def inner(message, *args, **kwargs):
+ # Store the session key in channel_session
+ if message.http_session is not None and settings.SESSION_COOKIE_NAME not in message.channel_session:
+ message.channel_session[settings.SESSION_COOKIE_NAME] = message.http_session.session_key
+ # Hydrate the http_session from session_key
+ elif message.http_session is None and settings.SESSION_COOKIE_NAME in message.channel_session:
+ session_engine = import_module(settings.SESSION_ENGINE)
+ session = session_engine.SessionStore(session_key=message.channel_session[settings.SESSION_COOKIE_NAME])
+ message.http_session = session
+ # Run the consumer
+ return func(message, *args, **kwargs)
+ return inner
diff --git a/channels/signals.py b/channels/signals.py
new file mode 100644
index 0000000..b663bb8
--- /dev/null
+++ b/channels/signals.py
@@ -0,0 +1,10 @@
+from django.db import close_old_connections
+from django.dispatch import Signal
+
+consumer_started = Signal(providing_args=["environ"])
+consumer_finished = Signal()
+worker_ready = Signal()
+worker_process_ready = Signal()
+
+# Connect connection closer to consumer finished as well
+consumer_finished.connect(close_old_connections)
diff --git a/channels/static/channels/js/websocketbridge.js b/channels/static/channels/js/websocketbridge.js
new file mode 100644
index 0000000..d7b2845
--- /dev/null
+++ b/channels/static/channels/js/websocketbridge.js
@@ -0,0 +1,399 @@
+/*!
+ * Do not edit!. This file is autogenerated by running `npm run browserify`.
+ */
+(function(f){if(typeof exports==="object"&&typeof module!=="undefined"){module.exports=f()}else if(typeof define==="function"&&define.amd){define([],f)}else{var g;if(typeof window!=="undefined"){g=window}else if(typeof global!=="undefined"){g=global}else if(typeof self!=="undefined"){g=self}else{g=this}g.channels = f()}})(function(){var define,module,exports;return (function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o config.maxReconnectionDelay)
+ ? config.maxReconnectionDelay
+ : newDelay;
+};
+var LEVEL_0_EVENTS = ['onopen', 'onclose', 'onmessage', 'onerror'];
+var reassignEventListeners = function (ws, oldWs, listeners) {
+ Object.keys(listeners).forEach(function (type) {
+ listeners[type].forEach(function (_a) {
+ var listener = _a[0], options = _a[1];
+ ws.addEventListener(type, listener, options);
+ });
+ });
+ if (oldWs) {
+ LEVEL_0_EVENTS.forEach(function (name) { ws[name] = oldWs[name]; });
+ }
+};
+var ReconnectingWebsocket = function (url, protocols, options) {
+ var _this = this;
+ if (options === void 0) { options = {}; }
+ var ws;
+ var connectingTimeout;
+ var reconnectDelay = 0;
+ var retriesCount = 0;
+ var shouldRetry = true;
+ var savedOnClose = null;
+ var listeners = {};
+ // require new to construct
+ if (!(this instanceof ReconnectingWebsocket)) {
+ throw new TypeError("Failed to construct 'ReconnectingWebSocket': Please use the 'new' operator");
+ }
+ // Set config. Not using `Object.assign` because of IE11
+ var config = getDefaultOptions();
+ Object.keys(config)
+ .filter(function (key) { return options.hasOwnProperty(key); })
+ .forEach(function (key) { return config[key] = options[key]; });
+ if (!isWebSocket(config.constructor)) {
+ throw new TypeError('Invalid WebSocket constructor. Set `options.constructor`');
+ }
+ var log = config.debug ? function () {
+ var params = [];
+ for (var _i = 0; _i < arguments.length; _i++) {
+ params[_i - 0] = arguments[_i];
+ }
+ return console.log.apply(console, ['RWS:'].concat(params));
+ } : function () { };
+ /**
+ * Not using dispatchEvent, otherwise we must use a DOM Event object
+ * Deferred because we want to handle the close event before this
+ */
+ var emitError = function (code, msg) { return setTimeout(function () {
+ var err = new Error(msg);
+ err.code = code;
+ if (Array.isArray(listeners.error)) {
+ listeners.error.forEach(function (_a) {
+ var fn = _a[0];
+ return fn(err);
+ });
+ }
+ if (ws.onerror) {
+ ws.onerror(err);
+ }
+ }, 0); };
+ var handleClose = function () {
+ log('close');
+ retriesCount++;
+ log('retries count:', retriesCount);
+ if (retriesCount > config.maxRetries) {
+ emitError('EHOSTDOWN', 'Too many failed connection attempts');
+ return;
+ }
+ if (!reconnectDelay) {
+ reconnectDelay = initReconnectionDelay(config);
+ }
+ else {
+ reconnectDelay = updateReconnectionDelay(config, reconnectDelay);
+ }
+ log('reconnectDelay:', reconnectDelay);
+ if (shouldRetry) {
+ setTimeout(connect, reconnectDelay);
+ }
+ };
+ var connect = function () {
+ log('connect');
+ var oldWs = ws;
+ ws = new config.constructor(url, protocols);
+ connectingTimeout = setTimeout(function () {
+ log('timeout');
+ ws.close();
+ emitError('ETIMEDOUT', 'Connection timeout');
+ }, config.connectionTimeout);
+ log('bypass properties');
+ for (var key in ws) {
+ // @todo move to constant
+ if (['addEventListener', 'removeEventListener', 'close', 'send'].indexOf(key) < 0) {
+ bypassProperty(ws, _this, key);
+ }
+ }
+ ws.addEventListener('open', function () {
+ clearTimeout(connectingTimeout);
+ log('open');
+ reconnectDelay = initReconnectionDelay(config);
+ log('reconnectDelay:', reconnectDelay);
+ retriesCount = 0;
+ });
+ ws.addEventListener('close', handleClose);
+ reassignEventListeners(ws, oldWs, listeners);
+ // because when closing with fastClose=true, it is saved and set to null to avoid double calls
+ ws.onclose = ws.onclose || savedOnClose;
+ savedOnClose = null;
+ };
+ log('init');
+ connect();
+ this.close = function (code, reason, _a) {
+ if (code === void 0) { code = 1000; }
+ if (reason === void 0) { reason = ''; }
+ var _b = _a === void 0 ? {} : _a, _c = _b.keepClosed, keepClosed = _c === void 0 ? false : _c, _d = _b.fastClose, fastClose = _d === void 0 ? true : _d, _e = _b.delay, delay = _e === void 0 ? 0 : _e;
+ if (delay) {
+ reconnectDelay = delay;
+ }
+ shouldRetry = !keepClosed;
+ ws.close(code, reason);
+ if (fastClose) {
+ var fakeCloseEvent_1 = {
+ code: code,
+ reason: reason,
+ wasClean: true,
+ };
+ // execute close listeners soon with a fake closeEvent
+ // and remove them from the WS instance so they
+ // don't get fired on the real close.
+ handleClose();
+ ws.removeEventListener('close', handleClose);
+ // run and remove level2
+ if (Array.isArray(listeners.close)) {
+ listeners.close.forEach(function (_a) {
+ var listener = _a[0], options = _a[1];
+ listener(fakeCloseEvent_1);
+ ws.removeEventListener('close', listener, options);
+ });
+ }
+ // run and remove level0
+ if (ws.onclose) {
+ savedOnClose = ws.onclose;
+ ws.onclose(fakeCloseEvent_1);
+ ws.onclose = null;
+ }
+ }
+ };
+ this.send = function (data) {
+ ws.send(data);
+ };
+ this.addEventListener = function (type, listener, options) {
+ if (Array.isArray(listeners[type])) {
+ if (!listeners[type].some(function (_a) {
+ var l = _a[0];
+ return l === listener;
+ })) {
+ listeners[type].push([listener, options]);
+ }
+ }
+ else {
+ listeners[type] = [[listener, options]];
+ }
+ ws.addEventListener(type, listener, options);
+ };
+ this.removeEventListener = function (type, listener, options) {
+ if (Array.isArray(listeners[type])) {
+ listeners[type] = listeners[type].filter(function (_a) {
+ var l = _a[0];
+ return l !== listener;
+ });
+ }
+ ws.removeEventListener(type, listener, options);
+ };
+};
+module.exports = ReconnectingWebsocket;
+
+},{}],2:[function(require,module,exports){
+'use strict';
+
+Object.defineProperty(exports, "__esModule", {
+ value: true
+});
+exports.WebSocketBridge = undefined;
+
+var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; };
+
+var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
+
+var _reconnectingWebsocket = require('reconnecting-websocket');
+
+var _reconnectingWebsocket2 = _interopRequireDefault(_reconnectingWebsocket);
+
+function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
+
+function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
+
+/**
+ * Bridge between Channels and plain javascript.
+ *
+ * @example
+ * const webSocketBridge = new WebSocketBridge();
+ * webSocketBridge.connect();
+ * webSocketBridge.listen(function(action, stream) {
+ * console.log(action, stream);
+ * });
+ */
+var WebSocketBridge = function () {
+ function WebSocketBridge(options) {
+ _classCallCheck(this, WebSocketBridge);
+
+ /**
+ * The underlaying `ReconnectingWebSocket` instance.
+ *
+ * @type {ReconnectingWebSocket}
+ */
+ this.socket = null;
+ this.streams = {};
+ this.default_cb = null;
+ this.options = _extends({}, options);
+ }
+
+ /**
+ * Connect to the websocket server
+ *
+ * @param {String} [url] The url of the websocket. Defaults to
+ * `window.location.host`
+ * @param {String[]|String} [protocols] Optional string or array of protocols.
+ * @param {Object} options Object of options for [`reconnecting-websocket`](https://github.com/joewalnes/reconnecting-websocket#options-1).
+ * @example
+ * const webSocketBridge = new WebSocketBridge();
+ * webSocketBridge.connect();
+ */
+
+
+ _createClass(WebSocketBridge, [{
+ key: 'connect',
+ value: function connect(url, protocols, options) {
+ var _url = void 0;
+ // Use wss:// if running on https://
+ var scheme = window.location.protocol === 'https:' ? 'wss' : 'ws';
+ var base_url = scheme + '://' + window.location.host;
+ if (url === undefined) {
+ _url = base_url;
+ } else {
+ // Support relative URLs
+ if (url[0] == '/') {
+ _url = '' + base_url + url;
+ } else {
+ _url = url;
+ }
+ }
+ this.socket = new _reconnectingWebsocket2.default(_url, protocols, options);
+ }
+
+ /**
+ * Starts listening for messages on the websocket, demultiplexing if necessary.
+ *
+ * @param {Function} [cb] Callback to be execute when a message
+ * arrives. The callback will receive `action` and `stream` parameters
+ *
+ * @example
+ * const webSocketBridge = new WebSocketBridge();
+ * webSocketBridge.connect();
+ * webSocketBridge.listen(function(action, stream) {
+ * console.log(action, stream);
+ * });
+ */
+
+ }, {
+ key: 'listen',
+ value: function listen(cb) {
+ var _this = this;
+
+ this.default_cb = cb;
+ this.socket.onmessage = function (event) {
+ var msg = JSON.parse(event.data);
+ var action = void 0;
+ var stream = void 0;
+
+ if (msg.stream !== undefined) {
+ action = msg.payload;
+ stream = msg.stream;
+ var stream_cb = _this.streams[stream];
+ stream_cb ? stream_cb(action, stream) : null;
+ } else {
+ action = msg;
+ stream = null;
+ _this.default_cb ? _this.default_cb(action, stream) : null;
+ }
+ };
+ }
+
+ /**
+ * Adds a 'stream handler' callback. Messages coming from the specified stream
+ * will call the specified callback.
+ *
+ * @param {String} stream The stream name
+ * @param {Function} cb Callback to be execute when a message
+ * arrives. The callback will receive `action` and `stream` parameters.
+ * @example
+ * const webSocketBridge = new WebSocketBridge();
+ * webSocketBridge.connect();
+ * webSocketBridge.listen();
+ * webSocketBridge.demultiplex('mystream', function(action, stream) {
+ * console.log(action, stream);
+ * });
+ * webSocketBridge.demultiplex('myotherstream', function(action, stream) {
+ * console.info(action, stream);
+ * });
+ */
+
+ }, {
+ key: 'demultiplex',
+ value: function demultiplex(stream, cb) {
+ this.streams[stream] = cb;
+ }
+
+ /**
+ * Sends a message to the reply channel.
+ *
+ * @param {Object} msg The message
+ *
+ * @example
+ * webSocketBridge.send({prop1: 'value1', prop2: 'value1'});
+ */
+
+ }, {
+ key: 'send',
+ value: function send(msg) {
+ this.socket.send(JSON.stringify(msg));
+ }
+
+ /**
+ * Returns an object to send messages to a specific stream
+ *
+ * @param {String} stream The stream name
+ * @return {Object} convenience object to send messages to `stream`.
+ * @example
+ * webSocketBridge.stream('mystream').send({prop1: 'value1', prop2: 'value1'})
+ */
+
+ }, {
+ key: 'stream',
+ value: function stream(_stream) {
+ var _this2 = this;
+
+ return {
+ send: function send(action) {
+ var msg = {
+ stream: _stream,
+ payload: action
+ };
+ _this2.socket.send(JSON.stringify(msg));
+ }
+ };
+ }
+ }]);
+
+ return WebSocketBridge;
+}();
+
+exports.WebSocketBridge = WebSocketBridge;
+
+},{"reconnecting-websocket":1}]},{},[2])(2)
+});
\ No newline at end of file
diff --git a/channels/staticfiles.py b/channels/staticfiles.py
new file mode 100644
index 0000000..8169be8
--- /dev/null
+++ b/channels/staticfiles.py
@@ -0,0 +1,65 @@
+from __future__ import unicode_literals
+
+from django.conf import settings
+from django.contrib.staticfiles import utils
+from django.contrib.staticfiles.views import serve
+from django.utils.six.moves.urllib.parse import urlparse
+from django.utils.six.moves.urllib.request import url2pathname
+
+from .handler import AsgiHandler, ViewConsumer
+
+
+class StaticFilesHandler(AsgiHandler):
+ """
+ Wrapper handler that serves the static files directory.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(StaticFilesHandler, self).__init__()
+ self.base_url = urlparse(self.get_base_url())
+
+ def get_base_url(self):
+ utils.check_settings()
+ return settings.STATIC_URL
+
+ def _should_handle(self, path):
+ """
+ Checks if the path should be handled. Ignores the path if:
+
+ * the host is provided as part of the base_url
+ * the request's path isn't under the media path (or equal)
+ """
+ return path.startswith(self.base_url[2]) and not self.base_url[1]
+
+ def file_path(self, url):
+ """
+ Returns the relative path to the media file on disk for the given URL.
+ """
+ relative_url = url[len(self.base_url[2]):]
+ return url2pathname(relative_url)
+
+ def serve(self, request):
+ """
+ Actually serves the request path.
+ """
+ return serve(request, self.file_path(request.path), insecure=True)
+
+ def get_response(self, request):
+ from django.http import Http404
+
+ if self._should_handle(request.path):
+ try:
+ return self.serve(request)
+ except Http404 as e:
+ if settings.DEBUG:
+ from django.views import debug
+ return debug.technical_404_response(request, e)
+ return super(StaticFilesHandler, self).get_response(request)
+
+
+class StaticFilesConsumer(ViewConsumer):
+ """
+ Overrides standard view consumer with our new handler
+ """
+
+ handler_class = StaticFilesHandler
diff --git a/channels/test/__init__.py b/channels/test/__init__.py
new file mode 100644
index 0000000..f0835ef
--- /dev/null
+++ b/channels/test/__init__.py
@@ -0,0 +1,4 @@
+from .base import TransactionChannelTestCase, ChannelTestCase, Client, apply_routes # NOQA isort:skip
+from .http import HttpClient # NOQA isort:skip
+from .websocket import WSClient # NOQA isort:skip
+from .liveserver import ChannelLiveServerTestCase # NOQA isort:skip
diff --git a/channels/test/base.py b/channels/test/base.py
new file mode 100644
index 0000000..e3ac34d
--- /dev/null
+++ b/channels/test/base.py
@@ -0,0 +1,232 @@
+from __future__ import unicode_literals
+
+import copy
+import random
+import string
+from functools import wraps
+
+from asgiref.inmemory import ChannelLayer as InMemoryChannelLayer
+from django.db import close_old_connections
+from django.test.testcases import TestCase, TransactionTestCase
+
+from .. import DEFAULT_CHANNEL_LAYER
+from ..asgi import ChannelLayerWrapper, channel_layers
+from ..channel import Group
+from ..exceptions import ChannelSocketException
+from ..message import Message
+from ..routing import Router, include
+from ..signals import consumer_finished, consumer_started
+
+
+class ChannelTestCaseMixin(object):
+ """
+ TestCase subclass that provides easy methods for testing channels using
+ an in-memory backend to capture messages, and assertion methods to allow
+ checking of what was sent.
+
+ Inherits from TestCase, so provides per-test transactions as long as the
+ database backend supports it.
+ """
+
+ # Customizable so users can test multi-layer setups
+ test_channel_aliases = [DEFAULT_CHANNEL_LAYER]
+
+ def _pre_setup(self):
+ """
+ Initialises in memory channel layer for the duration of the test
+ """
+ super(ChannelTestCaseMixin, self)._pre_setup()
+ self._old_layers = {}
+ for alias in self.test_channel_aliases:
+ # Swap in an in memory layer wrapper and keep the old one around
+ self._old_layers[alias] = channel_layers.set(
+ alias,
+ ChannelLayerWrapper(
+ InMemoryChannelLayer(),
+ alias,
+ channel_layers[alias].routing[:],
+ )
+ )
+
+ def _post_teardown(self):
+ """
+ Undoes the channel rerouting
+ """
+ for alias in self.test_channel_aliases:
+ # Swap in an in memory layer wrapper and keep the old one around
+ channel_layers.set(alias, self._old_layers[alias])
+ del self._old_layers
+ super(ChannelTestCaseMixin, self)._post_teardown()
+
+ def get_next_message(self, channel, alias=DEFAULT_CHANNEL_LAYER, require=False):
+ """
+ Gets the next message that was sent to the channel during the test,
+ or None if no message is available.
+
+ If require is true, will fail the test if no message is received.
+ """
+ recv_channel, content = channel_layers[alias].receive_many([channel])
+ if recv_channel is None:
+ if require:
+ self.fail("Expected a message on channel %s, got none" % channel)
+ else:
+ return None
+ return Message(content, recv_channel, channel_layers[alias])
+
+
+class ChannelTestCase(ChannelTestCaseMixin, TestCase):
+ pass
+
+
+class TransactionChannelTestCase(ChannelTestCaseMixin, TransactionTestCase):
+ pass
+
+
+class Client(object):
+ """
+ Channel client abstraction that provides easy methods for testing full live cycle of message in channels
+ with determined the reply channel
+ """
+
+ def __init__(self, alias=DEFAULT_CHANNEL_LAYER):
+ self.reply_channel = alias + ''.join([random.choice(string.ascii_letters) for _ in range(5)])
+ self.alias = alias
+
+ @property
+ def channel_layer(self):
+ """Channel layer as lazy property"""
+ return channel_layers[self.alias]
+
+ def get_next_message(self, channel):
+ """
+ Gets the next message that was sent to the channel during the test,
+ or None if no message is available.
+ """
+ recv_channel, content = channel_layers[self.alias].receive_many([channel])
+ if recv_channel is None:
+ return
+ return Message(content, recv_channel, channel_layers[self.alias])
+
+ def get_consumer_by_channel(self, channel):
+ message = Message({'text': ''}, channel, self.channel_layer)
+ match = self.channel_layer.router.match(message)
+ if match:
+ consumer, kwargs = match
+ return consumer
+
+ def send(self, to, content={}):
+ """
+ Send a message to a channel.
+ Adds reply_channel name to the message.
+ """
+ content = copy.deepcopy(content)
+ content.setdefault('reply_channel', self.reply_channel)
+ self.channel_layer.send(to, content)
+
+ def consume(self, channel, fail_on_none=True):
+ """
+ Get next message for channel name and run appointed consumer
+ """
+ message = self.get_next_message(channel)
+ if message:
+ match = self.channel_layer.router.match(message)
+ if match:
+ consumer, kwargs = match
+ try:
+ consumer_started.send(sender=self.__class__)
+ return consumer(message, **kwargs)
+ except ChannelSocketException as e:
+ e.run(message)
+ finally:
+ # Copy Django's workaround so we don't actually close DB conns
+ consumer_finished.disconnect(close_old_connections)
+ consumer_finished.send(sender=self.__class__)
+ consumer_finished.connect(close_old_connections)
+ elif fail_on_none:
+ raise AssertionError("Can't find consumer for message %s" % message)
+ elif fail_on_none:
+ raise AssertionError("No message for channel %s" % channel)
+
+ def send_and_consume(self, channel, content={}, fail_on_none=True):
+ """
+ Reproduce full life cycle of the message
+ """
+ self.send(channel, content)
+ return self.consume(channel, fail_on_none=fail_on_none)
+
+ def receive(self):
+ """
+ Get content of next message for reply channel if message exists
+ """
+ message = self.get_next_message(self.reply_channel)
+ if message:
+ return message.content
+
+ def join_group(self, group_name):
+ Group(group_name).add(self.reply_channel)
+
+
+class apply_routes(object):
+ """
+ Decorator/ContextManager for rewrite layers routes in context.
+ Helpful for testing group routes/consumers as isolated application
+
+ The applying routes can be list of instances of Route or list of this lists
+ """
+
+ def __init__(self, routes, aliases=[DEFAULT_CHANNEL_LAYER]):
+ self._aliases = aliases
+ self.routes = routes
+ self._old_routing = {}
+
+ def enter(self):
+ """
+ Store old routes and apply new one
+ """
+ for alias in self._aliases:
+ channel_layer = channel_layers[DEFAULT_CHANNEL_LAYER]
+ self._old_routing[alias] = channel_layer.routing
+ if isinstance(self.routes, (list, tuple)):
+ if isinstance(self.routes[0], (list, tuple)):
+ routes = list(map(include, self.routes))
+ else:
+ routes = self.routes
+ else:
+ routes = [self.routes]
+ channel_layer.routing = routes
+ channel_layer.router = Router(routes)
+
+ def exit(self, exc_type=None, exc_val=None, exc_tb=None):
+ """
+ Undoes rerouting
+ """
+ for alias in self._aliases:
+ channel_layer = channel_layers[DEFAULT_CHANNEL_LAYER]
+ channel_layer.routing = self._old_routing[alias]
+ channel_layer.router = Router(self._old_routing[alias])
+
+ __enter__ = enter
+ __exit__ = exit
+
+ def __call__(self, test_func):
+ if isinstance(test_func, type):
+ old_setup = test_func.setUp
+ old_teardown = test_func.tearDown
+
+ def new_setup(this):
+ self.enter()
+ old_setup(this)
+
+ def new_teardown(this):
+ self.exit()
+ old_teardown(this)
+
+ test_func.setUp = new_setup
+ test_func.tearDown = new_teardown
+ return test_func
+ else:
+ @wraps(test_func)
+ def inner(*args, **kwargs):
+ with self:
+ return test_func(*args, **kwargs)
+ return inner
diff --git a/channels/test/http.py b/channels/test/http.py
new file mode 100644
index 0000000..f263f9f
--- /dev/null
+++ b/channels/test/http.py
@@ -0,0 +1,8 @@
+import warnings
+
+warnings.warn(
+ "test.http.HttpClient is deprecated. Use test.websocket.WSClient",
+ DeprecationWarning,
+)
+
+from .websocket import WSClient as HttpClient # NOQA isort:skip
diff --git a/channels/test/liveserver.py b/channels/test/liveserver.py
new file mode 100644
index 0000000..78bf252
--- /dev/null
+++ b/channels/test/liveserver.py
@@ -0,0 +1,256 @@
+import multiprocessing
+
+import django
+from daphne.server import Server
+from django.apps import apps
+from django.core.exceptions import ImproperlyConfigured
+from django.db import connections
+from django.db.utils import load_backend
+from django.test.testcases import TransactionTestCase
+from django.test.utils import modify_settings, override_settings
+from twisted.internet import reactor
+
+from .. import DEFAULT_CHANNEL_LAYER
+from ..asgi import ChannelLayerManager
+from ..staticfiles import StaticFilesConsumer
+from ..worker import Worker, WorkerGroup
+
+# NOTE: We use ChannelLayerManager to prevent layer instance sharing
+# between forked process. Some layers implementations create
+# connections inside the __init__ method. After forking child
+# processes can lose the ability to use this connection and typically
+# stuck on some network operation. To prevent this we use new
+# ChannelLayerManager each time we want to initiate default layer.
+# This gives us guaranty that new layer instance will be created and
+# new connection will be established.
+
+
+class ProcessSetup(multiprocessing.Process):
+ """Common initialization steps for test subprocess."""
+
+ def common_setup(self):
+
+ self.setup_django()
+ self.setup_databases()
+ self.override_settings()
+
+ def setup_django(self):
+
+ if django.VERSION >= (1, 10):
+ django.setup(set_prefix=False)
+ else:
+ django.setup()
+
+ def cleanup_connections(self):
+
+ # Channels run `django.db.close_old_connections` as a signal
+ # receiver after each consumer finished event. This function
+ # iterate on each created connection wrapper, checks if
+ # connection is still usable and closes it otherwise. Under
+ # normal circumstances this is a very reasonable approach.
+ # When process starts the usual way `django.db.connections`
+ # contains empty connection list. But channels worker in the
+ # test case is created with the fork system call. This means
+ # file descriptors from the parent process are available in
+ # the connection list, but connections themselves are not
+ # usable. So test worker will close connections of the parent
+ # process and test suite will fail when it tries to flush
+ # database after test run.
+ #
+ # See https://github.com/django/channels/issues/614
+ for alias in self.databases:
+ del connections[alias]
+
+ def setup_databases(self):
+
+ self.cleanup_connections()
+ for alias, db in self.databases.items():
+ backend = load_backend(db['ENGINE'])
+ conn = backend.DatabaseWrapper(db, alias)
+ if django.VERSION >= (1, 9):
+ connections[alias].creation.set_as_test_mirror(
+ conn.settings_dict,
+ )
+ else:
+ test_db_name = conn.settings_dict['NAME']
+ connections[alias].settings_dict['NAME'] = test_db_name
+
+ def override_settings(self):
+
+ if self.overridden_settings:
+ overridden = override_settings(**self.overridden_settings)
+ overridden.enable()
+
+ if self.modified_settings:
+ modified = modify_settings(self.modified_settings)
+ modified.enable()
+
+
+class WorkerProcess(ProcessSetup):
+
+ def __init__(self, is_ready, n_threads, overridden_settings,
+ modified_settings, databases, serve_static):
+
+ self.is_ready = is_ready
+ self.n_threads = n_threads
+ self.overridden_settings = overridden_settings
+ self.modified_settings = modified_settings
+ self.databases = databases
+ self.serve_static = serve_static
+ super(WorkerProcess, self).__init__()
+ self.daemon = True
+
+ def run(self):
+
+ try:
+ self.common_setup()
+ channel_layers = ChannelLayerManager()
+ channel_layer = channel_layers.make_test_backend(DEFAULT_CHANNEL_LAYER)
+ if self.serve_static and apps.is_installed('django.contrib.staticfiles'):
+ channel_layer.router.check_default(http_consumer=StaticFilesConsumer())
+ else:
+ channel_layer.router.check_default()
+ if self.n_threads == 1:
+ self.worker = Worker(
+ channel_layer=channel_layer,
+ signal_handlers=False,
+ )
+ else:
+ self.worker = WorkerGroup(
+ channel_layer=channel_layer,
+ signal_handlers=False,
+ n_threads=self.n_threads,
+ )
+ self.worker.ready()
+ self.is_ready.set()
+ self.worker.run()
+ except Exception:
+ self.is_ready.set()
+ raise
+
+
+class DaphneProcess(ProcessSetup):
+
+ def __init__(self, host, port_storage, is_ready, overridden_settings,
+ modified_settings, databases):
+
+ self.host = host
+ self.port_storage = port_storage
+ self.is_ready = is_ready
+ self.overridden_settings = overridden_settings
+ self.modified_settings = modified_settings
+ self.databases = databases
+ super(DaphneProcess, self).__init__()
+ self.daemon = True
+
+ def run(self):
+
+ try:
+ self.common_setup()
+ channel_layers = ChannelLayerManager()
+ channel_layer = channel_layers.make_test_backend(DEFAULT_CHANNEL_LAYER)
+ self.server = Server(
+ channel_layer=channel_layer,
+ endpoints=['tcp:interface=%s:port=0' % (self.host)],
+ signal_handlers=False,
+ )
+ reactor.callLater(0.5, self.resolve_port)
+ self.server.run()
+ except Exception:
+ self.is_ready.set()
+ raise
+
+ def resolve_port(self):
+
+ port = self.server.listeners[0].result.getHost().port
+ self.port_storage.value = port
+ self.is_ready.set()
+
+
+class ChannelLiveServerTestCase(TransactionTestCase):
+ """
+ Does basically the same as TransactionTestCase but also launches a
+ live Daphne server and Channels worker in a separate process, so
+ that the tests may use another test framework, such as Selenium,
+ instead of the built-in dummy client.
+ """
+
+ host = 'localhost'
+ ProtocolServerProcess = DaphneProcess
+ WorkerProcess = WorkerProcess
+ worker_threads = 1
+ serve_static = True
+
+ @property
+ def live_server_url(self):
+
+ return 'http://%s:%s' % (self.host, self._port_storage.value)
+
+ @property
+ def live_server_ws_url(self):
+
+ return 'ws://%s:%s' % (self.host, self._port_storage.value)
+
+ def _pre_setup(self):
+
+ for connection in connections.all():
+ if self._is_in_memory_db(connection):
+ raise ImproperlyConfigured(
+ 'ChannelLiveServerTestCase can not be used with in memory databases'
+ )
+
+ channel_layers = ChannelLayerManager()
+ if len(channel_layers.configs) > 1:
+ raise ImproperlyConfigured(
+ 'ChannelLiveServerTestCase does not support multiple CHANNEL_LAYERS at this time'
+ )
+
+ channel_layer = channel_layers.make_test_backend(DEFAULT_CHANNEL_LAYER)
+ if 'flush' in channel_layer.extensions:
+ channel_layer.flush()
+
+ super(ChannelLiveServerTestCase, self)._pre_setup()
+
+ server_ready = multiprocessing.Event()
+ self._port_storage = multiprocessing.Value('i')
+ self._server_process = self.ProtocolServerProcess(
+ self.host,
+ self._port_storage,
+ server_ready,
+ self._overridden_settings,
+ self._modified_settings,
+ connections.databases,
+ )
+ self._server_process.start()
+ server_ready.wait()
+
+ worker_ready = multiprocessing.Event()
+ self._worker_process = self.WorkerProcess(
+ worker_ready,
+ self.worker_threads,
+ self._overridden_settings,
+ self._modified_settings,
+ connections.databases,
+ self.serve_static,
+ )
+ self._worker_process.start()
+ worker_ready.wait()
+
+ def _post_teardown(self):
+
+ self._server_process.terminate()
+ self._server_process.join()
+ self._worker_process.terminate()
+ self._worker_process.join()
+ super(ChannelLiveServerTestCase, self)._post_teardown()
+
+ def _is_in_memory_db(self, connection):
+ """Check if DatabaseWrapper holds in memory database."""
+
+ if connection.vendor == 'sqlite':
+ if django.VERSION >= (1, 11):
+ return connection.is_in_memory_db()
+ else:
+ return connection.is_in_memory_db(
+ connection.settings_dict['NAME'],
+ )
diff --git a/channels/test/websocket.py b/channels/test/websocket.py
new file mode 100644
index 0000000..c260bd8
--- /dev/null
+++ b/channels/test/websocket.py
@@ -0,0 +1,170 @@
+import copy
+import json
+
+import six
+from django.apps import apps
+from django.conf import settings
+from django.http.cookie import SimpleCookie
+
+from ..sessions import session_for_reply_channel
+from .base import Client
+
+json_module = json # alias for using at functions with json kwarg
+
+
+class WSClient(Client):
+ """
+ Channel http/ws client abstraction that provides easy methods for testing full life cycle of message in channels
+ with determined reply channel, auth opportunity, cookies, headers and so on
+ """
+
+ def __init__(self, **kwargs):
+ self._ordered = kwargs.pop('ordered', False)
+ super(WSClient, self).__init__(**kwargs)
+ self._session = None
+ self._headers = {}
+ self._cookies = {}
+ self._session_cookie = True
+ self.order = 0
+
+ def set_cookie(self, key, value):
+ """
+ Set cookie
+ """
+ self._cookies[key] = value
+
+ def set_header(self, key, value):
+ """
+ Set header
+ """
+ if key == 'cookie':
+ raise ValueError('Use set_cookie method for cookie header')
+ self._headers[key] = value
+
+ def get_cookies(self):
+ """Return cookies"""
+ cookies = copy.copy(self._cookies)
+ if self._session_cookie and apps.is_installed('django.contrib.sessions'):
+ cookies[settings.SESSION_COOKIE_NAME] = self.session.session_key
+ return cookies
+
+ @property
+ def headers(self):
+ headers = copy.deepcopy(self._headers)
+ headers.setdefault('cookie', _encoded_cookies(self.get_cookies()))
+ return headers
+
+ @property
+ def session(self):
+ """Session as Lazy property: check that django.contrib.sessions is installed"""
+ if not apps.is_installed('django.contrib.sessions'):
+ raise EnvironmentError('Add django.contrib.sessions to the INSTALLED_APPS to use session')
+ if not self._session:
+ self._session = session_for_reply_channel(self.reply_channel)
+ return self._session
+
+ def receive(self, json=True):
+ """
+ Return text content of a message for client channel and decoding it if json kwarg is set
+ """
+ content = super(WSClient, self).receive()
+ if content and json and 'text' in content and isinstance(content['text'], six.string_types):
+ return json_module.loads(content['text'])
+ return content.get('text', content) if content else None
+
+ def send(self, to, content={}, text=None, path='/'):
+ """
+ Send a message to a channel.
+ Adds reply_channel name and channel_session to the message.
+ """
+ if to != 'websocket.connect' and '?' in path:
+ path = path.split('?')[0]
+ self.channel_layer.send(to, self._get_content(content, text, path))
+ self._session_cookie = False
+
+ def _list_headers(self):
+ return [[key.encode(), self.headers[key]] for key in self.headers]
+
+ def _get_content(self, content={}, text=None, path='/'):
+ content = copy.deepcopy(content)
+ content.setdefault('reply_channel', self.reply_channel)
+
+ if '?' in path:
+ path, query_string = path.split('?')
+ content.setdefault('path', path)
+ content.setdefault('query_string', query_string)
+ else:
+ content.setdefault('path', path)
+
+ content.setdefault('headers', self._list_headers())
+
+ if self._ordered:
+ if 'order' in content:
+ raise ValueError('Do not use "order" manually with "ordered=True"')
+ content['order'] = self.order
+ self.order += 1
+
+ text = text or content.get('text', None)
+
+ if text is not None:
+ if not isinstance(text, six.string_types):
+ content['text'] = json.dumps(text)
+ else:
+ content['text'] = text
+ return content
+
+ def send_and_consume(self, channel, content={}, text=None, path='/', fail_on_none=True, check_accept=True):
+ """
+ Reproduce full life cycle of the message
+ """
+ self.send(channel, content, text, path)
+ return self.consume(channel, fail_on_none=fail_on_none, check_accept=check_accept)
+
+ def consume(self, channel, fail_on_none=True, check_accept=True):
+ result = super(WSClient, self).consume(channel, fail_on_none=fail_on_none)
+ if channel == "websocket.connect" and check_accept:
+ received = self.receive(json=False)
+ if received != {"accept": True}:
+ raise AssertionError("Connection rejected: %s != '{accept: True}'" % received)
+ return result
+
+ def login(self, **credentials):
+ """
+ Returns True if login is possible; False if the provided credentials
+ are incorrect, or the user is inactive, or if the sessions framework is
+ not available.
+ """
+ from django.contrib.auth import authenticate
+ user = authenticate(**credentials)
+ if user and user.is_active and apps.is_installed('django.contrib.sessions'):
+ self._login(user)
+ return True
+ else:
+ return False
+
+ def force_login(self, user, backend=None):
+ if backend is None:
+ backend = settings.AUTHENTICATION_BACKENDS[0]
+ user.backend = backend
+ self._login(user)
+
+ def _login(self, user):
+ from django.contrib.auth import login
+
+ # Fake http request
+ request = type('FakeRequest', (object, ), {'session': self.session, 'META': {}})
+ login(request, user)
+
+ # Save the session values.
+ self.session.save()
+
+
+def _encoded_cookies(cookies):
+ """Encode dict of cookies to ascii string"""
+
+ cookie_encoder = SimpleCookie()
+
+ for k, v in cookies.items():
+ cookie_encoder[k] = v
+
+ return cookie_encoder.output(header='', sep=';').encode("ascii")
diff --git a/channels/tests/__init__.py b/channels/tests/__init__.py
new file mode 100644
index 0000000..af4b25e
--- /dev/null
+++ b/channels/tests/__init__.py
@@ -0,0 +1,8 @@
+import warnings
+
+warnings.warn(
+ "channels.tests package is deprecated. Use channels.test",
+ DeprecationWarning,
+)
+
+from channels.test import * # NOQA isort:skip
diff --git a/channels/utils.py b/channels/utils.py
new file mode 100644
index 0000000..a22ba20
--- /dev/null
+++ b/channels/utils.py
@@ -0,0 +1,26 @@
+import types
+
+
+def name_that_thing(thing):
+ """
+ Returns either the function/class path or just the object's repr
+ """
+ # Instance method
+ if hasattr(thing, "im_class"):
+ # Mocks will recurse im_class forever
+ if hasattr(thing, "mock_calls"):
+ return ""
+ return name_that_thing(thing.im_class) + "." + thing.im_func.func_name
+ # Other named thing
+ if hasattr(thing, "__name__"):
+ if hasattr(thing, "__class__") and not isinstance(thing, (types.FunctionType, types.MethodType)):
+ if thing.__class__ is not type and not issubclass(thing.__class__, type):
+ return name_that_thing(thing.__class__)
+ if hasattr(thing, "__self__"):
+ return "%s.%s" % (thing.__self__.__module__, thing.__self__.__name__)
+ if hasattr(thing, "__module__"):
+ return "%s.%s" % (thing.__module__, thing.__name__)
+ # Generic instance of a class
+ if hasattr(thing, "__class__"):
+ return name_that_thing(thing.__class__)
+ return repr(thing)
diff --git a/channels/worker.py b/channels/worker.py
new file mode 100644
index 0000000..1eacd67
--- /dev/null
+++ b/channels/worker.py
@@ -0,0 +1,184 @@
+from __future__ import unicode_literals
+
+import fnmatch
+import logging
+import multiprocessing
+import signal
+import sys
+import threading
+import time
+
+from .exceptions import ChannelSocketException, ConsumeLater, DenyConnection
+from .message import Message
+from .signals import consumer_finished, consumer_started, worker_ready
+from .utils import name_that_thing
+
+logger = logging.getLogger('django.channels')
+
+
+class Worker(object):
+ """
+ A "worker" process that continually looks for available messages to run
+ and runs their consumers.
+ """
+
+ def __init__(
+ self,
+ channel_layer,
+ callback=None,
+ message_retries=10,
+ signal_handlers=True,
+ only_channels=None,
+ exclude_channels=None
+ ):
+ self.channel_layer = channel_layer
+ self.callback = callback
+ self.message_retries = message_retries
+ self.signal_handlers = signal_handlers
+ self.only_channels = only_channels
+ self.exclude_channels = exclude_channels
+ self.termed = False
+ self.in_job = False
+
+ def install_signal_handler(self):
+ signal.signal(signal.SIGTERM, self.sigterm_handler)
+ signal.signal(signal.SIGINT, self.sigterm_handler)
+
+ def sigterm_handler(self, signo, stack_frame):
+ self.termed = True
+ if self.in_job:
+ logger.info("Shutdown signal received while busy, waiting for loop termination")
+ else:
+ logger.info("Shutdown signal received while idle, terminating immediately")
+ sys.exit(0)
+
+ def apply_channel_filters(self, channels):
+ """
+ Applies our include and exclude filters to the channel list and returns it
+ """
+ if self.only_channels:
+ channels = [
+ channel for channel in channels
+ if any(fnmatch.fnmatchcase(channel, pattern) for pattern in self.only_channels)
+ ]
+ if self.exclude_channels:
+ channels = [
+ channel for channel in channels
+ if not any(fnmatch.fnmatchcase(channel, pattern) for pattern in self.exclude_channels)
+ ]
+ return channels
+
+ def ready(self):
+ """
+ Called once worker setup is complete.
+ """
+ worker_ready.send(sender=self)
+
+ def run(self):
+ """
+ Tries to continually dispatch messages to consumers.
+ """
+ if self.signal_handlers:
+ self.install_signal_handler()
+ channels = self.apply_channel_filters(self.channel_layer.router.channels)
+ logger.info("Listening on channels %s", ", ".join(sorted(channels)))
+ while not self.termed:
+ self.in_job = False
+ channel, content = self.channel_layer.receive_many(channels, block=True)
+ self.in_job = True
+ # If no message, stall a little to avoid busy-looping then continue
+ if channel is None:
+ time.sleep(0.01)
+ continue
+ # Create message wrapper
+ logger.debug("Got message on %s (reply %s)", channel, content.get("reply_channel", "none"))
+ message = Message(
+ content=content,
+ channel_name=channel,
+ channel_layer=self.channel_layer,
+ )
+ # Add attribute to message if it's been retried almost too many times,
+ # and would be thrown away this time if it's requeued. Used for helpful
+ # warnings in decorators and such - don't rely on this as public API.
+ if content.get("__retries__", 0) == self.message_retries:
+ message.__doomed__ = True
+ # Handle the message
+ match = self.channel_layer.router.match(message)
+ if match is None:
+ logger.error("Could not find match for message on %s! Check your routing.", channel)
+ continue
+ else:
+ consumer, kwargs = match
+ if self.callback:
+ self.callback(channel, message)
+ try:
+ logger.debug("Dispatching message on %s to %s", channel, name_that_thing(consumer))
+ # Send consumer started to manage lifecycle stuff
+ consumer_started.send(sender=self.__class__, environ={})
+ # Run consumer
+ consumer(message, **kwargs)
+ except DenyConnection:
+ # They want to deny a WebSocket connection.
+ if message.channel.name != "websocket.connect":
+ raise ValueError("You cannot DenyConnection from a non-websocket.connect handler.")
+ message.reply_channel.send({"close": True})
+ except ChannelSocketException as e:
+ e.run(message)
+ except ConsumeLater:
+ # They want to not handle it yet. Re-inject it with a number-of-tries marker.
+ content['__retries__'] = content.get("__retries__", 0) + 1
+ # If we retried too many times, quit and error rather than
+ # spinning forever
+ if content['__retries__'] > self.message_retries:
+ logger.warning(
+ "Exceeded number of retries for message on channel %s: %s",
+ channel,
+ repr(content)[:100],
+ )
+ continue
+ # Try to re-insert it a few times then drop it
+ for _ in range(10):
+ try:
+ self.channel_layer.send(channel, content)
+ except self.channel_layer.ChannelFull:
+ time.sleep(0.05)
+ else:
+ break
+ except:
+ logger.exception("Error processing message with consumer %s:", name_that_thing(consumer))
+ finally:
+ # Send consumer finished so DB conns close etc.
+ consumer_finished.send(sender=self.__class__)
+
+
+class WorkerGroup(Worker):
+ """
+ Group several workers together in threads. Manages the sub-workers,
+ terminating them if a signal is received.
+ """
+
+ def __init__(self, *args, **kwargs):
+ n_threads = kwargs.pop('n_threads', multiprocessing.cpu_count()) - 1
+ super(WorkerGroup, self).__init__(*args, **kwargs)
+ kwargs['signal_handlers'] = False
+ self.workers = [Worker(*args, **kwargs) for ii in range(n_threads)]
+
+ def sigterm_handler(self, signo, stack_frame):
+ logger.info("Shutdown signal received by WorkerGroup, terminating immediately.")
+ sys.exit(0)
+
+ def ready(self):
+ super(WorkerGroup, self).ready()
+ for wkr in self.workers:
+ wkr.ready()
+
+ def run(self):
+ """
+ Launch sub-workers before running.
+ """
+ self.threads = [threading.Thread(target=self.workers[ii].run)
+ for ii in range(len(self.workers))]
+ for t in self.threads:
+ t.daemon = True
+ t.start()
+ super(WorkerGroup, self).run()
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 0000000..21894a7
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,177 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = _build
+
+# User-friendly check for sphinx-build
+ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
+$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
+endif
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+ @echo "Please use \`make ' where is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " texinfo to make Texinfo files"
+ @echo " info to make Texinfo files and run them through makeinfo"
+ @echo " gettext to make PO message catalogs"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " xml to make Docutils-native XML files"
+ @echo " pseudoxml to make pseudoxml-XML files for display purposes"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+ rm -rf $(BUILDDIR)/*
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Channels.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Channels.qhc"
+
+devhelp:
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/Channels"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Channels"
+ @echo "# devhelp"
+
+epub:
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+latexpdfja:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through platex and dvipdfmx..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo
+ @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+ @echo "Run \`make' in that directory to run these through makeinfo" \
+ "(use \`make info' here to do that automatically)."
+
+info:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo "Running Texinfo files through makeinfo..."
+ make -C $(BUILDDIR)/texinfo info
+ @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+ $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+ @echo
+ @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
+
+xml:
+ $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+ @echo
+ @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+pseudoxml:
+ $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+ @echo
+ @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
diff --git a/docs/asgi.rst b/docs/asgi.rst
new file mode 100644
index 0000000..de0803c
--- /dev/null
+++ b/docs/asgi.rst
@@ -0,0 +1,631 @@
+=======================================================
+ASGI (Asynchronous Server Gateway Interface) Draft Spec
+=======================================================
+
+.. note::
+ This is still in-progress, but is now mostly complete.
+
+Abstract
+========
+
+This document proposes a standard interface between network protocol
+servers (particularly web servers) and Python applications, intended
+to allow handling of multiple common protocol styles (including HTTP, HTTP2,
+and WebSocket).
+
+This base specification is intended to fix in place the set of APIs by which
+these servers interact and the guarantees and style of message delivery;
+each supported protocol (such as HTTP) has a sub-specification that outlines
+how to encode and decode that protocol into messages.
+
+The set of sub-specifications is available :ref:`in the Message Formats section `.
+
+
+Rationale
+=========
+
+The WSGI specification has worked well since it was introduced, and
+allowed for great flexibility in Python framework and web server choice.
+However, its design is irrevocably tied to the HTTP-style
+request/response cycle, and more and more protocols are becoming a
+standard part of web programming that do not follow this pattern
+(most notably, WebSocket).
+
+ASGI attempts to preserve a simple application interface, but provide
+an abstraction that allows for data to be sent and received at any time,
+and from different application threads or processes.
+
+It also take the principle of turning protocols into Python-compatible,
+asynchronous-friendly sets of messages and generalises it into two parts;
+a standardised interface for communication and to build servers around (this
+document), and a set of standard :ref:`message formats for each protocol `.
+
+Its primary goal is to provide a way to write HTTP/2 and WebSocket code,
+alongside normal HTTP handling code, however, and part of this design is
+ensuring there is an easy path to use both existing WSGI servers and
+applications, as a large majority of Python web usage relies on WSGI and
+providing an easy path forwards is critical to adoption. Details on that
+interoperability are covered in :doc:`/asgi/www`.
+
+The end result of this process has been a specification for generalised
+inter-process communication between Python processes, with a certain set of
+guarantees and delivery styles that make it suited to low-latency protocol
+processing and response. It is not intended to replace things like traditional
+task queues, but it is intended that it could be used for things like
+distributed systems communication, or as the backbone of a service-oriented
+architecure for inter-service communication.
+
+
+Overview
+========
+
+ASGI consists of three different components - *protocol servers*,
+a *channel layer*, and *application code*. Channel layers are the core
+part of the implementation, and provide an interface to both protocol
+servers and applications.
+
+A channel layer provides a protocol server or an application server
+with a ``send`` callable, which takes a channel name and message
+``dict``, and a ``receive`` callable, which takes a list of
+channel names and returns the next message available on any named channel.
+
+Thus, rather than under WSGI, where you point the protocol server to the
+application, under ASGI you point both the protocol server and the application
+to a channel layer instance. It is intended that applications and protocol
+servers always run in separate processes or threads, and always communicate
+via the channel layer.
+
+ASGI tries to be as compatible as possible by default, and so the only
+implementation of ``receive`` that must be provided is a fully-synchronous,
+nonblocking one. Implementations can then choose to implement a blocking mode
+in this method, and if they wish to go further, versions compatible with
+the asyncio or Twisted frameworks (or other frameworks that may become
+popular, thanks to the extension declaration mechanism).
+
+The distinction between protocol servers and applications in this document
+is mostly to distinguish their roles and to make illustrating concepts easier.
+There is no code-level distinction between the two, and it's entirely possible
+to have a process that does both, or middleware-like code that transforms
+messages between two different channel layers or channel names. It is
+expected, however, that most deployments will fall into this pattern.
+
+There is even room for a WSGI-like application abstraction on the application
+server side, with a callable which takes ``(channel, message, send_func)``,
+but this would be slightly too restrictive for many use cases and does not
+cover how to specify channel names to listen on. It is expected that
+frameworks will cover this use case.
+
+
+Channels and Messages
+---------------------
+
+All communication in an ASGI stack uses *messages* sent over *channels*.
+All messages must be a ``dict`` at the top level of the object, and
+contain only the following types to ensure serializability:
+
+* Byte strings
+* Unicode strings
+* Integers (within the signed 64 bit range)
+* Floating point numbers (within the IEEE 754 double precision range)
+* Lists (tuples should be treated as lists)
+* Dicts (keys must be unicode strings)
+* Booleans
+* None
+
+Channels are identified by a unicode string name consisting only of ASCII
+letters, ASCII numerical digits, periods (``.``), dashes (``-``) and
+underscores (``_``), plus an optional type character (see below).
+
+Channels are a first-in, first out queue with at-most-once delivery
+semantics. They can have multiple writers and multiple readers; only a single
+reader should get each written message. Implementations must never deliver
+a message more than once or to more than one reader, and must drop messages if
+this is necessary to achieve this restriction.
+
+In order to aid with scaling and network architecture, a distinction
+is made between channels that have multiple readers (such as the
+``http.request`` channel that web applications would listen on from every
+application worker process), *single-reader channels* that are read from a
+single unknown location (such as ``http.request.body?ABCDEF``), and
+*process-specific channels* (such as a ``http.response.A1B2C3!D4E5F6`` channel
+tied to a client socket).
+
+*Normal channel* names contain no type characters, and can be routed however
+the backend wishes; in particular, they do not have to appear globally
+consistent, and backends may shard their contents out to different servers
+so that a querying client only sees some portion of the messages. Calling
+``receive`` on these channels does not guarantee that you will get the
+messages in order or that you will get anything if the channel is non-empty.
+
+*Single-reader channel* names contain a question mark
+(``?``) character in order to indicate to the channel layer that it must make
+these channels appear globally consistent. The ``?`` is always preceded by
+the main channel name (e.g. ``http.response.body``) and followed by a
+random portion. Channel layers may use the random portion to help pin the
+channel to a server, but reads from this channel by a single process must
+always be in-order and return messages if the channel is non-empty. These names
+must be generated by the ``new_channel`` call.
+
+*Process-specific channel* names contain an exclamation mark (``!``) that
+separates a remote and local part. These channels are received differently;
+only the name up to and including the ``!`` character is passed to the
+``receive()`` call, and it will receive any message on any channel with that
+prefix. This allows a process, such as a HTTP terminator, to listen on a single
+process-specific channel, and then distribute incoming requests to the
+appropriate client sockets using the local part (the part after the ``!``).
+The local parts must be generated and managed by the process that consumes them.
+These channels, like single-reader channels, are guaranteed to give any extant
+messages in order if received from a single process.
+
+Messages should expire after a set time sitting unread in a channel;
+the recommendation is one minute, though the best value depends on the
+channel layer and the way it is deployed.
+
+The maximum message size is 1MB if the message were encoded as JSON;
+if more data than this needs to be transmitted it must be chunked or placed
+onto its own single-reader or process-specific channel (see how HTTP request
+bodies are done, for example). All channel layers must support messages up
+to this size, but protocol specifications are encouraged to keep well below it.
+
+
+Handling Protocols
+------------------
+
+ASGI messages represent two main things - internal application events
+(for example, a channel might be used to queue thumbnails of previously
+uploaded videos), and protocol events to/from connected clients.
+
+As such, there are :ref:`sub-specifications ` that
+outline encodings to and from ASGI messages for common protocols like HTTP and
+WebSocket; in particular, the HTTP one covers the WSGI/ASGI interoperability.
+It is recommended that if a protocol becomes commonplace, it should gain
+standardized formats in a sub-specification of its own.
+
+The message formats are a key part of the specification; without them,
+the protocol server and web application might be able to talk to each other,
+but may not understand some of what the other is saying. It's equivalent to the
+standard keys in the ``environ`` dict for WSGI.
+
+The design pattern is that most protocols will share a few channels for
+incoming data (for example, ``http.request``, ``websocket.connect`` and
+``websocket.receive``), but will have individual channels for sending to
+each client (such as ``http.response!kj2daj23``). This allows incoming
+data to be dispatched into a cluster of application servers that can all
+handle it, while responses are routed to the individual protocol server
+that has the other end of the client's socket.
+
+Some protocols, however, do not have the concept of a unique socket
+connection; for example, an SMS gateway protocol server might just have
+``sms.receive`` and ``sms.send``, and the protocol server cluster would
+take messages from ``sms.send`` and route them into the normal phone
+network based on attributes in the message (in this case, a telephone
+number).
+
+
+.. _asgi_extensions:
+
+Extensions
+----------
+
+Extensions are functionality that is
+not required for basic application code and nearly all protocol server
+code, and so has been made optional in order to enable lightweight
+channel layers for applications that don't need the full feature set defined
+here.
+
+The extensions defined here are:
+
+* ``groups``: Allows grouping of channels to allow broadcast; see below for more.
+* ``flush``: Allows easier testing and development with channel layers.
+* ``statistics``: Allows channel layers to provide global and per-channel statistics.
+* ``twisted``: Async compatibility with the Twisted framework.
+* ``asyncio``: Async compatibility with Python 3's asyncio.
+
+There is potential to add further extensions; these may be defined by
+a separate specification, or a new version of this specification.
+
+If application code requires an extension, it should check for it as soon
+as possible, and hard error if it is not provided. Frameworks should
+encourage optional use of extensions, while attempting to move any
+extension-not-found errors to process startup rather than message handling.
+
+
+Groups
+------
+
+While the basic channel model is sufficient to handle basic application
+needs, many more advanced uses of asynchronous messaging require
+notifying many users at once when an event occurs - imagine a live blog,
+for example, where every viewer should get a long poll response or
+WebSocket packet when a new entry is posted.
+
+This concept could be kept external to the ASGI spec, and would be, if it
+were not for the significant performance gains a channel layer implementation
+could make on the send-group operation by having it included - the
+alternative being a ``send_many`` callable that might have to take
+tens of thousands of destination channel names in a single call. However,
+the group feature is still optional; its presence is indicated by the
+``supports_groups`` attribute on the channel layer object.
+
+Thus, there is a simple Group concept in ASGI, which acts as the
+broadcast/multicast mechanism across channels. Channels are added to a group,
+and then messages sent to that group are sent to all members of the group.
+Channels can be removed from a group manually (e.g. based on a disconnect
+event), and the channel layer will garbage collect "old" channels in groups
+on a periodic basis.
+
+How this garbage collection happens is not specified here, as it depends on
+the internal implementation of the channel layer. The recommended approach,
+however, is when a message on a process-specific channel expires, the channel
+layer should remove that channel from all groups it's currently a member of;
+this is deemed an acceptable indication that the channel's listener is gone.
+
+*Implementation of the group functionality is optional*. If it is not provided
+and an application or protocol server requires it, they should hard error
+and exit with an appropriate error message. It is expected that protocol
+servers will not need to use groups.
+
+
+Linearization
+-------------
+
+The design of ASGI is meant to enable a shared-nothing architecture,
+where messages can be handled by any one of a set of threads, processes
+or machines running application code.
+
+This, of course, means that several different copies of the application
+could be handling messages simultaneously, and those messages could even
+be from the same client; in the worst case, two packets from a client
+could even be processed out-of-order if one server is slower than another.
+
+This is an existing issue with things like WSGI as well - a user could
+open two different tabs to the same site at once and launch simultaneous
+requests to different servers - but the nature of the new protocols
+specified here mean that collisions are more likely to occur.
+
+Solving this issue is left to frameworks and application code; there are
+already solutions such as database transactions that help solve this,
+and the vast majority of application code will not need to deal with this
+problem. If ordering of incoming packets matters for a protocol, they should
+be annotated with a packet number (as WebSocket is in its specification).
+
+Single-reader and process-specific channels, such as those used for response
+channels back to clients, are not subject to this problem; a single reader
+on these must always receive messages in channel order.
+
+
+Capacity
+--------
+
+To provide backpressure, each channel in a channel layer may have a capacity,
+defined however the layer wishes (it is recommended that it is configurable
+by the user using keyword arguments to the channel layer constructor, and
+furthermore configurable per channel name or name prefix).
+
+When a channel is at or over capacity, trying to send() to that channel
+may raise ChannelFull, which indicates to the sender the channel is over
+capacity. How the sender wishes to deal with this will depend on context;
+for example, a web application trying to send a response body will likely
+wait until it empties out again, while a HTTP interface server trying to
+send in a request would drop the request and return a 503 error.
+
+Process-local channels must apply their capacity on the non-local part (that is,
+up to and including the ``!`` character), and so capacity is shared among all
+of the "virtual" channels inside it.
+
+Sending to a group never raises ChannelFull; instead, it must silently drop
+the message if it is over capacity, as per ASGI's at-most-once delivery
+policy.
+
+
+Specification Details
+=====================
+
+A *channel layer* must provide an object with these attributes
+(all function arguments are positional):
+
+* ``send(channel, message)``, a callable that takes two arguments: the
+ channel to send on, as a unicode string, and the message
+ to send, as a serializable ``dict``.
+
+* ``receive(channels, block=False)``, a callable that takes a list of channel
+ names as unicode strings, and returns with either ``(None, None)``
+ or ``(channel, message)`` if a message is available. If ``block`` is True, then
+ it will not return a message arrives (or optionally, a built-in timeout,
+ but it is valid to block forever if there are no messages); if
+ ``block`` is false, it will always return immediately. It is perfectly
+ valid to ignore ``block`` and always return immediately, or after a delay;
+ ``block`` means that the call can take as long as it likes before returning
+ a message or nothing, not that it must block until it gets one.
+
+* ``new_channel(pattern)``, a callable that takes a unicode string pattern,
+ and returns a new valid channel name that does not already exist, by
+ adding a unicode string after the ``!`` or ``?`` character in ``pattern``,
+ and checking for existence of that name in the channel layer. The ``pattern``
+ must end with ``!`` or ``?`` or this function must error. If the character
+ is ``!``, making it a process-specific channel, ``new_channel`` must be
+ called on the same channel layer that intends to read the channel with
+ ``receive``; any other channel layer instance may not receive
+ messages on this channel due to client-routing portions of the appended string.
+
+* ``MessageTooLarge``, the exception raised when a send operation fails
+ because the encoded message is over the layer's size limit.
+
+* ``ChannelFull``, the exception raised when a send operation fails
+ because the destination channel is over capacity.
+
+* ``extensions``, a list of unicode string names indicating which
+ extensions this layer provides, or an empty list if it supports none.
+ The possible extensions can be seen in :ref:`asgi_extensions`.
+
+A channel layer implementing the ``groups`` extension must also provide:
+
+* ``group_add(group, channel)``, a callable that takes a ``channel`` and adds
+ it to the group given by ``group``. Both are unicode strings. If the channel
+ is already in the group, the function should return normally.
+
+* ``group_discard(group, channel)``, a callable that removes the ``channel``
+ from the ``group`` if it is in it, and does nothing otherwise.
+
+* ``group_channels(group)``, a callable that returns an iterable which yields
+ all of the group's member channel names. The return value should be serializable
+ with regards to local adds and discards, but best-effort with regards to
+ adds and discards on other nodes.
+
+* ``send_group(group, message)``, a callable that takes two positional
+ arguments; the group to send to, as a unicode string, and the message
+ to send, as a serializable ``dict``. It may raise MessageTooLarge but cannot
+ raise ChannelFull.
+
+* ``group_expiry``, an integer number of seconds that specifies how long group
+ membership is valid for after the most recent ``group_add`` call (see
+ *Persistence* below)
+
+A channel layer implementing the ``statistics`` extension must also provide:
+
+* ``global_statistics()``, a callable that returns statistics across all channels
+* ``channel_statistics(channel)``, a callable that returns statistics for specified channel
+
+* in both cases statistics are a dict with zero or more of (unicode string keys):
+
+ * ``messages_count``, the number of messages processed since server start
+ * ``messages_count_per_second``, the number of messages processed in the last second
+ * ``messages_pending``, the current number of messages waiting
+ * ``messages_max_age``, how long the oldest message has been waiting, in seconds
+ * ``channel_full_count``, the number of times `ChannelFull` exception has been risen since server start
+ * ``channel_full_count_per_second``, the number of times `ChannelFull` exception has been risen in the last second
+
+* Implementation may provide total counts, counts per seconds or both.
+
+
+A channel layer implementing the ``flush`` extension must also provide:
+
+* ``flush()``, a callable that resets the channel layer to a blank state,
+ containing no messages and no groups (if the groups extension is
+ implemented). This call must block until the system is cleared and will
+ consistently look empty to any client, if the channel layer is distributed.
+
+A channel layer implementing the ``twisted`` extension must also provide:
+
+* ``receive_twisted(channels)``, a function that behaves
+ like ``receive`` but that returns a Twisted Deferred that eventually
+ returns either ``(channel, message)`` or ``(None, None)``. It is not possible
+ to run it in nonblocking mode; use the normal ``receive`` for that.
+
+A channel layer implementing the ``async`` extension must also provide:
+
+* ``receive_async(channels)``, a function that behaves
+ like ``receive`` but that fulfills the asyncio coroutine contract to
+ block until either a result is available or an internal timeout is reached
+ and ``(None, None)`` is returned. It is not possible
+ to run it in nonblocking mode; use the normal ``receive`` for that.
+
+Channel Semantics
+-----------------
+
+Channels **must**:
+
+* Preserve ordering of messages perfectly with only a single reader
+ and writer if the channel is a *single-reader* or *process-specific* channel.
+
+* Never deliver a message more than once.
+
+* Never block on message send (though they may raise ChannelFull or
+ MessageTooLarge)
+
+* Be able to handle messages of at least 1MB in size when encoded as
+ JSON (the implementation may use better encoding or compression, as long
+ as it meets the equivalent size)
+
+* Have a maximum name length of at least 100 bytes.
+
+They should attempt to preserve ordering in all cases as much as possible,
+but perfect global ordering is obviously not possible in the distributed case.
+
+They are not expected to deliver all messages, but a success rate of at least
+99.99% is expected under normal circumstances. Implementations may want to
+have a "resilience testing" mode where they deliberately drop more messages
+than usual so developers can test their code's handling of these scenarios.
+
+
+Persistence
+-----------
+
+Channel layers do not need to persist data long-term; group
+memberships only need to live as long as a connection does, and messages
+only as long as the message expiry time, which is usually a couple of minutes.
+
+That said, if a channel server goes down momentarily and loses all data,
+persistent socket connections will continue to transfer incoming data and
+send out new generated data, but will have lost all of their group memberships
+and in-flight messages.
+
+In order to avoid a nasty set of bugs caused by these half-deleted sockets,
+protocol servers should quit and hard restart if they detect that the channel
+layer has gone down or lost data; shedding all existing connections and letting
+clients reconnect will immediately resolve the problem.
+
+If a channel layer implements the ``groups`` extension, it must persist group
+membership until at least the time when the member channel has a message
+expire due to non-consumption, after which it may drop membership at any time.
+If a channel subsequently has a successful delivery, the channel layer must
+then not drop group membership until another message expires on that channel.
+
+Channel layers must also drop group membership after a configurable long timeout
+after the most recent ``group_add`` call for that membership, the default being
+86,400 seconds (one day). The value of this timeout is exposed as the
+``group_expiry`` property on the channel layer.
+
+Protocol servers must have a configurable timeout value for every connection-based
+protocol they serve that closes the connection after the timeout, and should
+default this value to the value of ``group_expiry``, if the channel
+layer provides it. This allows old group memberships to be cleaned up safely,
+knowing that after the group expiry the original connection must have closed,
+or is about to be in the next few seconds.
+
+It's recommended that end developers put the timeout setting much lower - on
+the order of hours or minutes - to enable better protocol design and testing.
+Even with ASGI's separation of protocol server restart from business logic
+restart, you will likely need to move and reprovision protocol servers, and
+making sure your code can cope with this is important.
+
+
+.. _asgi_sub_specifications:
+
+Message Formats
+---------------
+
+These describe the standardized message formats for the protocols this
+specification supports. All messages are ``dicts`` at the top level,
+and all keys are required unless explicitly marked as optional. If a key is
+marked optional, a default value is specified, which is to be assumed if
+the key is missing. Keys are unicode strings.
+
+The one common key across all protocols is ``reply_channel``, a way to indicate
+the client-specific channel to send responses to. Protocols are generally
+encouraged to have one message type and one reply channel type to ensure ordering.
+
+A ``reply_channel`` should be unique per connection. If the protocol in question
+can have any server service a response - e.g. a theoretical SMS protocol - it
+should not have ``reply_channel`` attributes on messages, but instead a separate
+top-level outgoing channel.
+
+Messages are specified here along with the channel names they are expected
+on; if a channel name can vary, such as with reply channels, the varying
+portion will be represented by ``!``, such as ``http.response!``, which matches
+the format the ``new_channel`` callable takes.
+
+There is no label on message types to say what they are; their type is implicit
+in the channel name they are received on. Two types that are sent on the same
+channel, such as HTTP responses and response chunks, are distinguished apart
+by their required fields.
+
+Message formats can be found in the sub-specifications:
+
+.. toctree::
+ :maxdepth: 1
+
+ /asgi/www
+ /asgi/delay
+ /asgi/udp
+
+
+Protocol Format Guidelines
+--------------------------
+
+Message formats for protocols should follow these rules, unless
+a very good performance or implementation reason is present:
+
+* ``reply_channel`` should be unique per logical connection, and not per
+ logical client.
+
+* If the protocol has server-side state, entirely encapsulate that state in
+ the protocol server; do not require the message consumers to use an external
+ state store.
+
+* If the protocol has low-level negotiation, keepalive or other features,
+ handle these within the protocol server and don't expose them in ASGI
+ messages.
+
+* If the protocol has guaranteed ordering and does not use a specific channel
+ for a given connection (as HTTP does for body data), ASGI messages should
+ include an ``order`` field (0-indexed) that preserves the ordering as
+ received by the protocol server (or as sent by the client, if available).
+ This ordering should span all message types emitted by the client - for
+ example, a connect message might have order ``0``, and the first two frames
+ order ``1`` and ``2``.
+
+* If the protocol is datagram-based, one datagram should equal one ASGI message
+ (unless size is an issue)
+
+
+Approximate Global Ordering
+---------------------------
+
+While maintaining true global (across-channels) ordering of messages is
+entirely unreasonable to expect of many implementations, they should strive
+to prevent busy channels from overpowering quiet channels.
+
+For example, imagine two channels, ``busy``, which spikes to 1000 messages a
+second, and ``quiet``, which gets one message a second. There's a single
+consumer running ``receive(['busy', 'quiet'])`` which can handle
+around 200 messages a second.
+
+In a simplistic for-loop implementation, the channel layer might always check
+``busy`` first; it always has messages available, and so the consumer never
+even gets to see a message from ``quiet``, even if it was sent with the
+first batch of ``busy`` messages.
+
+A simple way to solve this is to randomize the order of the channel list when
+looking for messages inside the channel layer; other, better methods are also
+available, but whatever is chosen, it should try to avoid a scenario where
+a message doesn't get received purely because another channel is busy.
+
+
+Strings and Unicode
+-------------------
+
+In this document, and all sub-specifications, *byte string* refers to
+``str`` on Python 2 and ``bytes`` on Python 3. If this type still supports
+Unicode codepoints due to the underlying implementation, then any values
+should be kept within the 0 - 255 range.
+
+*Unicode string* refers to ``unicode`` on Python 2 and ``str`` on Python 3.
+This document will never specify just *string* - all strings are one of the
+two exact types.
+
+Some serializers, such as ``json``, cannot differentiate between byte
+strings and unicode strings; these should include logic to box one type as
+the other (for example, encoding byte strings as base64 unicode strings with
+a preceding special character, e.g. U+FFFF).
+
+Channel and group names are always unicode strings, with the additional
+limitation that they only use the following characters:
+
+* ASCII letters
+* The digits ``0`` through ``9``
+* Hyphen ``-``
+* Underscore ``_``
+* Period ``.``
+* Question mark ``?`` (only to delineiate single-reader channel names,
+ and only one per name)
+* Exclamation mark ``!`` (only to delineate process-specific channel names,
+ and only one per name)
+
+
+
+Common Questions
+================
+
+1. Why are messages ``dicts``, rather than a more advanced type?
+
+ We want messages to be very portable, especially across process and
+ machine boundaries, and so a simple encodable type seemed the best way.
+ We expect frameworks to wrap each protocol-specific set of messages in
+ custom classes (e.g. ``http.request`` messages become ``Request`` objects)
+
+
+Copyright
+=========
+
+This document has been placed in the public domain.
diff --git a/docs/asgi/delay.rst b/docs/asgi/delay.rst
new file mode 100644
index 0000000..6edc19a
--- /dev/null
+++ b/docs/asgi/delay.rst
@@ -0,0 +1,26 @@
+===============================================
+Delay Protocol ASGI Message Format (Draft Spec)
+===============================================
+
+Protocol that allows any ASGI message to be delayed for a given number of milliseconds.
+
+This simple protocol enables developers to schedule ASGI messages to be sent at a time in the future.
+It can be used in conjunction with any other channel. This allows you do simple tasks
+like scheduling an email to be sent later, to more complex tasks like testing latency in protocols.
+
+
+Delay
+'''''
+
+Send a message to this channel to delay a message.
+
+Channel: ``asgi.delay``
+
+Keys:
+
+ * ``channel``: Unicode string specifying the final destination channel for the message after the delay.
+
+ * ``delay``: Positive integer specifying the number of milliseconds to delay the message.
+
+ * ``content``: Dictionary of unicode string keys for the message content. This should meet the
+content specifications for the specified destination channel.
diff --git a/docs/asgi/email.rst b/docs/asgi/email.rst
new file mode 100644
index 0000000..ae72dca
--- /dev/null
+++ b/docs/asgi/email.rst
@@ -0,0 +1,76 @@
+======================================
+Email ASGI Message Format (Draft Spec)
+======================================
+
+.. warning::
+ This is an incomplete draft.
+
+Represents emails sent or received, likely over the SMTP protocol though that
+is not directly specified here (a protocol server could in theory deliver
+or receive email over HTTP to some external service, for example). Generally
+adheres to RFC 5322 as much as possible.
+
+As emails have no concept of a session and there's no trustable socket or
+author model, the send and receive channels are both multi-listener, and
+there is no ``reply_channel`` on any message type. If you want to persist
+data across different email receive consumers, you should decide what part
+of the message to use for an identifier (from address? to address? subject?
+thread id?) and provide the persistence yourself.
+
+The protocol server should handle encoding of headers by itself, understanding
+RFC 1342 format headers and decoding them into unicode upon receive, and
+encoding outgoing emails similarly (preferably using UTF-8).
+
+
+Receive
+'''''''
+
+Sent when an email is received.
+
+Channel: ``email.receive``
+
+Keys:
+
+* ``from``: Unicode string specifying the return-path of the email as specified
+ in the SMTP envelope. Will be ``None`` if no return path was provided.
+
+* ``to``: List of unicode strings specifying the recipients requested in the
+ SMTP envelope using ``RCPT TO`` commands. Will always contain at least one
+ value.
+
+* ``headers``: Dictionary of unicode string keys and unicode string values,
+ containing all headers, including ``subject``. Header names are all forced
+ to lower case. Header values are decoded from RFC 1342 if needed.
+
+* ``content``: Contains a content object (see section below) representing the
+ body of the message.
+
+Note that ``from`` and ``to`` are extracted from the SMTP envelope, and not
+from the headers inside the message; if you wish to get the header values,
+you should use ``headers['from']`` and ``headers['to']``; they may be different.
+
+
+Send
+''''
+
+Sends an email out via whatever transport
+
+
+Content objects
+'''''''''''''''
+
+Used in both send and receive to represent the tree structure of a MIME
+multipart message tree.
+
+A content object is always a dict, containing at least the key:
+
+* ``content-type``: The unicode string of the content type for this section.
+
+Multipart content objects also have:
+
+* ``parts``: A list of content objects contained inside this multipart
+
+Any other type of object has:
+
+* ``body``: Byte string content of this part, decoded from any
+ ``Content-Transfer-Encoding`` if one was specified as a MIME header.
diff --git a/docs/asgi/irc-client.rst b/docs/asgi/irc-client.rst
new file mode 100644
index 0000000..cfdecd7
--- /dev/null
+++ b/docs/asgi/irc-client.rst
@@ -0,0 +1,81 @@
+===========================================
+IRC Client ASGI Message Format (Draft Spec)
+===========================================
+
+.. warning::
+ This is an incomplete draft.
+
+Represents communication with an external IRC server as a client. It is possible
+to have multiple clients hooked into the same channel layer talking to different
+servers, which is why the reply channel is not a fixed name;
+a client will provide it with every incoming action and upon connection.
+
+The reply channel must stay consistent throughout the client's lifetime, so it
+can be used as a unique identifier for the client.
+
+
+Connected
+---------
+
+Sent when the client has established a connection to an IRC server.
+
+Channel: ``irc-client.connect``
+
+Keys:
+
+* ``reply_channel``: The channel to send messages or actions to the server over.
+
+* ``server``: A two-item list of ``[hostname, port]``, where hostname is a
+ unicode string of the server hostname or IP address, and port is the integer port.
+
+
+Joined
+------
+
+Sent when the client has joined an IRC channel.
+
+Channel: ``irc-client.join``
+
+Keys:
+
+* ``reply_channel``: The channel to send messages or actions to the server over.
+
+* ``channel``: Unicode string name of the IRC channel joined
+
+
+Receive
+-------
+
+Represents either a message, action or notice being received from the server.
+
+Channel: ``irc-client.receive``
+
+Keys:
+
+* ``reply_channel``: The channel to send messages or actions to the server over.
+
+* ``type``: Unicode string, one of ``message``, ``action`` or ``notice``.
+
+* ``user``: IRC user as a unicode string (including host portion)
+
+* ``channel``: IRC channel name as a unicode string
+
+* ``body``: Message, action or notice content as a unicode string
+
+
+Control
+-------
+
+Sent to control the IRC client.
+
+Channel: Specified by the server as ``reply_channel`` in other types
+
+Keys:
+
+* ``channel``: IRC channel name to act on as a unicode string
+
+* ``type``: Unicode string, one of ``join``, ``part``, ``message`` or
+ ``action``.
+
+* ``body``: If type is ``message`` or ``action``, the body of the message
+ or the action as a unicode string.
diff --git a/docs/asgi/udp.rst b/docs/asgi/udp.rst
new file mode 100644
index 0000000..3b46331
--- /dev/null
+++ b/docs/asgi/udp.rst
@@ -0,0 +1,48 @@
+=============================
+UDP ASGI Message Format (1.0)
+=============================
+
+Raw UDP is specified here as it is a datagram-based, unordered and unreliable
+protocol, which neatly maps to the underlying message abstraction. It is not
+expected that many applications would use the low-level protocol, but it may
+be useful for some.
+
+While it might seem odd to have reply channels for UDP as it is a stateless
+protocol, replies need to come from the same server as the messages were
+sent to, so the reply channel here ensures that reply packets from an ASGI
+stack do not come from a different protocol server to the one you sent the
+initial packet to.
+
+
+Receive
+'''''''
+
+Sent when a UDP datagram is received.
+
+Channel: ``udp.receive``
+
+Keys:
+
+* ``reply_channel``: Channel name for sending data, starts with ``udp.send!``
+
+* ``data``: Byte string of UDP datagram payload.
+
+* ``client``: List of ``[host, port]`` where ``host`` is a unicode string of the
+ remote host's IPv4 or IPv6 address, and ``port`` is the remote port as an
+ integer.
+
+* ``server``: List of ``[host, port]`` where ``host`` is the listening address
+ for this server as a unicode string, and ``port`` is the integer listening port.
+ Optional, defaults to ``None``.
+
+
+Send
+''''
+
+Sent to send out a UDP datagram to a client.
+
+Channel: ``udp.send!``
+
+Keys:
+
+* ``data``: Byte string of UDP datagram payload.
diff --git a/docs/asgi/www.rst b/docs/asgi/www.rst
new file mode 100644
index 0000000..c80027c
--- /dev/null
+++ b/docs/asgi/www.rst
@@ -0,0 +1,453 @@
+=================================================
+HTTP & WebSocket ASGI Message Format (Draft Spec)
+=================================================
+
+.. note::
+ This is still in-progress, but is now mostly complete.
+
+The HTTP+WebSocket ASGI sub-specification outlines how to transport HTTP/1.1,
+HTTP/2 and WebSocket connections over an ASGI-compatible channel layer.
+
+It is deliberately intended and designed to be a superset of the WSGI format
+and specifies how to translate between the two for the set of requests that
+are able to be handled by WSGI.
+
+HTTP
+----
+
+The HTTP format covers HTTP/1.0, HTTP/1.1 and HTTP/2, as the changes in
+HTTP/2 are largely on the transport level. A protocol server should give
+different requests on the same connection different reply channels, and
+correctly multiplex the responses back into the same stream as they come in.
+The HTTP version is available as a string in the request message.
+
+HTTP/2 Server Push responses are included, but must be sent prior to the
+main response, and applications must check for ``http_version = 2`` before
+sending them; if a protocol server or connection incapable of Server Push
+receives these, it must drop them.
+
+Multiple header fields with the same name are complex in HTTP. RFC 7230
+states that for any header field that can appear multiple times, it is exactly
+equivalent to sending that header field only once with all the values joined by
+commas.
+
+However, RFC 7230 and RFC 6265 make it clear that this rule does not apply to
+the various headers used by HTTP cookies (``Cookie`` and ``Set-Cookie``). The
+``Cookie`` header must only be sent once by a user-agent, but the
+``Set-Cookie`` header may appear repeatedly and cannot be joined by commas.
+The ASGI design decision is to transport both request and response headers as
+lists of 2-element ``[name, value]`` lists and preserve headers exactly as they
+were provided.
+
+
+Request
+'''''''
+
+Sent once for each request that comes into the protocol server. If sending
+this raises ``ChannelFull``, the interface server must respond with a
+500-range error, preferably ``503 Service Unavailable``, and close the connection.
+
+Channel: ``http.request``
+
+Keys:
+
+* ``reply_channel``: Channel name for responses and server pushes.
+
+* ``http_version``: Unicode string, one of ``1.0``, ``1.1`` or ``2``.
+
+* ``method``: Unicode string HTTP method name, uppercased.
+
+* ``scheme``: Unicode string URL scheme portion (likely ``http`` or ``https``).
+ Optional (but must not be empty), default is ``"http"``.
+
+* ``path``: Unicode string HTTP path from URL, with percent escapes decoded
+ and UTF8 byte sequences decoded into characters.
+
+* ``query_string``: Byte string URL portion after the ``?``, not url-decoded.
+
+* ``root_path``: Unicode string that indicates the root path this application
+ is mounted at; same as ``SCRIPT_NAME`` in WSGI. Optional, defaults
+ to ``""``.
+
+* ``headers``: A list of ``[name, value]`` lists, where ``name`` is the
+ byte string header name, and ``value`` is the byte string
+ header value. Order of header values must be preserved from the original HTTP
+ request; order of header names is not important. Duplicates are possible and
+ must be preserved in the message as received.
+ Header names must be lowercased.
+
+* ``body``: Body of the request, as a byte string. Optional, defaults to ``""``.
+ If ``body_channel`` is set, treat as start of body and concatenate
+ on further chunks.
+
+* ``body_channel``: Name of a single-reader channel (containing ``?``) that contains
+ Request Body Chunk messages representing a large request body.
+ Optional, defaults to ``None``. Chunks append to ``body`` if set. Presence of
+ a channel indicates at least one Request Body Chunk message needs to be read,
+ and then further consumption keyed off of the ``more_content`` key in those
+ messages.
+
+* ``client``: List of ``[host, port]`` where ``host`` is a unicode string of the
+ remote host's IPv4 or IPv6 address, and ``port`` is the remote port as an
+ integer. Optional, defaults to ``None``.
+
+* ``server``: List of ``[host, port]`` where ``host`` is the listening address
+ for this server as a unicode string, and ``port`` is the integer listening port.
+ Optional, defaults to ``None``.
+
+
+Request Body Chunk
+''''''''''''''''''
+
+Must be sent after an initial Response. If trying to send this raises
+``ChannelFull``, the interface server should wait and try again until it is
+accepted (the consumer at the other end of the channel may not be as fast
+consuming the data as the client is at sending it).
+
+Channel: ``http.request.body?``
+
+Keys:
+
+* ``content``: Byte string of HTTP body content, will be concatenated onto
+ previously received ``content`` values and ``body`` key in Request.
+ Not required if ``closed`` is True, required otherwise.
+
+* ``closed``: True if the client closed the connection prematurely and the
+ rest of the body. If you receive this, abandon processing of the HTTP request.
+ Optional, defaults to ``False``.
+
+* ``more_content``: Boolean value signifying if there is additional content
+ to come (as part of a Request Body Chunk message). If ``False``, request will
+ be taken as complete, and any further messages on the channel
+ will be ignored. Optional, defaults to ``False``.
+
+
+Response
+''''''''
+
+Send after any server pushes, and before any response chunks. If ``ChannelFull``
+is encountered, wait and try again later, optionally giving up after a
+predetermined timeout.
+
+Channel: Defined by server, suggested ``http.response.RANDOMPART!CLIENTID``
+
+Keys:
+
+* ``status``: Integer HTTP status code.
+
+* ``headers``: A list of ``[name, value]`` lists, where ``name`` is the
+ byte string header name, and ``value`` is the byte string
+ header value. Order must be preserved in the HTTP response. Header names
+ must be lowercased. Optional, defaults to an empty list.
+
+* ``content``: Byte string of HTTP body content.
+ Optional, defaults to empty string.
+
+* ``more_content``: Boolean value signifying if there is additional content
+ to come (as part of a Response Chunk message). If ``False``, response will
+ be taken as complete and closed off, and any further messages on the channel
+ will be ignored. Optional, defaults to ``False``.
+
+
+Response Chunk
+''''''''''''''
+
+Must be sent after an initial Response. If ``ChannelFull``
+is encountered, wait and try again later.
+
+Channel: Defined by server, suggested ``http.response.RANDOMPART!CLIENTID``
+
+Keys:
+
+* ``content``: Byte string of HTTP body content, will be concatenated onto
+ previously received ``content`` values.
+
+* ``more_content``: Boolean value signifying if there is additional content
+ to come (as part of a Response Chunk message). If ``False``, response will
+ be taken as complete and closed off, and any further messages on the channel
+ will be ignored. Optional, defaults to ``False``.
+
+
+Server Push
+'''''''''''
+
+Must be sent before any Response or Response Chunk messages. If ``ChannelFull``
+is encountered, wait and try again later, optionally giving up after a
+predetermined timeout, and give up on the entire response this push is
+connected to.
+
+When a server receives this message, it must treat the Request message in the
+``request`` field of the Server Push as though it were a new HTTP request being
+received from the network. A server may, if it chooses, apply all of its
+internal logic to handling this request (e.g. the server may want to try to
+satisfy the request from a cache). Regardless, if the server is unable to
+satisfy the request itself it must create a new ``http.response!`` channel for
+the application to send the Response message on, fill that channel in on the
+``reply_channel`` field of the message, and then send the Request back to the
+application on the ``http.request`` channel.
+
+This approach limits the amount of knowledge the application has to have about
+pushed responses: they essentially appear to the application like a normal HTTP
+request, with the difference being that the application itself triggered the
+request.
+
+If the remote peer does not support server push, either because it's not a
+HTTP/2 peer or because SETTINGS_ENABLE_PUSH is set to 0, the server must do
+nothing in response to this message.
+
+Channel: Defined by server, suggested ``http.response.RANDOMPART!CLIENTID``
+
+Keys:
+
+* ``request``: A Request message. The ``body``, ``body_channel``, and
+ ``reply_channel`` fields MUST be absent: bodies are not allowed on
+ server-pushed requests, and applications should not create reply channels.
+
+
+Disconnect
+''''''''''
+
+Sent when a HTTP connection is closed. This is mainly useful for long-polling,
+where you may have added the response channel to a Group or other set of
+channels you want to trigger a reply to when data arrives.
+
+If ``ChannelFull`` is raised, then give up attempting to send the message;
+consumption is not required.
+
+Channel: ``http.disconnect``
+
+Keys:
+
+* ``reply_channel``: Channel name responses would have been sent on. No longer
+ valid after this message is sent; all messages to it will be dropped.
+
+* ``path``: Unicode string HTTP path from URL, with percent escapes decoded
+ and UTF8 byte sequences decoded into characters.
+
+
+WebSocket
+---------
+
+WebSockets share some HTTP details - they have a path and headers - but also
+have more state. Path and header details are only sent in the connection
+message; applications that need to refer to these during later messages
+should store them in a cache or database.
+
+WebSocket protocol servers should handle PING/PONG requests themselves, and
+send PING frames as necessary to ensure the connection is alive.
+
+Note that you **must** ensure that websocket.connect is consumed; if an
+interface server gets ``ChannelFull`` on this channel it will drop the
+connection. Django Channels ships with a no-op consumer attached by default;
+we recommend other implementations do the same.
+
+
+Connection
+''''''''''
+
+Sent when the client initially opens a connection and completes the
+WebSocket handshake. If sending this raises ``ChannelFull``, the interface
+server must close the connection with either HTTP status code ``503`` or
+WebSocket close code ``1013``.
+
+This message must be responded to on the ``reply_channel`` with a
+*Send/Close/Accept* message before the socket will pass messages on the
+``receive`` channel. The protocol server should ideally send this message
+during the handshake phase of the WebSocket and not complete the handshake
+until it gets a reply, returning HTTP status code ``403`` if the connection is
+denied. If this is not possible, it must buffer WebSocket frames and not
+send them onto ``websocket.receive`` until a reply is received, and if the
+connection is rejected, return WebSocket close code ``4403``.
+
+Channel: ``websocket.connect``
+
+Keys:
+
+* ``reply_channel``: Channel name for sending data
+
+* ``scheme``: Unicode string URL scheme portion (likely ``ws`` or ``wss``).
+ Optional (but must not be empty), default is ``ws``.
+
+* ``path``: Unicode HTTP path from URL, already urldecoded.
+
+* ``query_string``: Byte string URL portion after the ``?``. Optional, default
+ is empty string.
+
+* ``root_path``: Byte string that indicates the root path this application
+ is mounted at; same as ``SCRIPT_NAME`` in WSGI. Optional, defaults
+ to empty string.
+
+* ``headers``: List of ``[name, value]``, where ``name`` is the
+ header name as byte string and ``value`` is the header value as a byte
+ string. Order should be preserved from the original HTTP request;
+ duplicates are possible and must be preserved in the message as received.
+ Header names must be lowercased.
+
+* ``client``: List of ``[host, port]`` where ``host`` is a unicode string of the
+ remote host's IPv4 or IPv6 address, and ``port`` is the remote port as an
+ integer. Optional, defaults to ``None``.
+
+* ``server``: List of ``[host, port]`` where ``host`` is the listening address
+ for this server as a unicode string, and ``port`` is the integer listening port.
+ Optional, defaults to ``None``.
+
+* ``order``: The integer value ``0``.
+
+
+Receive
+'''''''
+
+Sent when a data frame is received from the client. If ``ChannelFull`` is
+raised, you may retry sending it but if it does not send the socket must
+be closed with websocket error code 1013.
+
+Channel: ``websocket.receive``
+
+Keys:
+
+* ``reply_channel``: Channel name for sending data
+
+* ``path``: Path sent during ``connect``, sent to make routing easier for apps.
+
+* ``bytes``: Byte string of frame content, if it was bytes mode, or ``None``.
+
+* ``text``: Unicode string of frame content, if it was text mode, or ``None``.
+
+* ``order``: Order of this frame in the WebSocket stream, starting
+ at 1 (``connect`` is 0).
+
+One of ``bytes`` or ``text`` must be non-``None``.
+
+
+Disconnection
+'''''''''''''
+
+Sent when either connection to the client is lost, either from the client
+closing the connection, the server closing the connection, or loss of the
+socket.
+
+If ``ChannelFull`` is raised, then give up attempting to send the message;
+consumption is not required.
+
+Channel: ``websocket.disconnect``
+
+Keys:
+
+* ``reply_channel``: Channel name that was used for sending data.
+ Cannot be used to send at this point; provided
+ as a way to identify the connection only.
+
+* ``code``: The WebSocket close code (integer), as per the WebSocket spec.
+
+* ``path``: Path sent during ``connect``, sent to make routing easier for apps.
+
+* ``order``: Order of the disconnection relative to the incoming frames'
+ ``order`` values in ``websocket.receive``.
+
+
+Send/Close/Accept
+'''''''''''''''''
+
+Sends a data frame to the client and/or closes the connection from the
+server end and/or accepts a connection. If ``ChannelFull`` is raised, wait
+and try again.
+
+If received while the connection is waiting for acceptance after a ``connect``
+message:
+
+* If ``accept`` is ``True``, accept the connection (and send any data provided).
+* If ``accept`` is ``False``, reject the connection and do nothing else.
+ If ``bytes`` or ``text`` were also present they must be ignored.
+* If ``bytes`` or ``text`` is present and contains a non-empty value,
+ accept the connection and send the data.
+* If ``close`` is ``True`` or a positive integer, reject the connection. If
+ ``bytes`` or ``text`` is also set and not empty, it should accept the
+ connection, send the frame, then immediately close the connection.
+ Note that any close code integer sent is ignored, as connections are
+ rejected with HTTP's ``403 Forbidden``, unless data is also sent, in which
+ case a full WebSocket close is done with the provided code.
+
+If received while the connection is established:
+
+* If ``bytes`` or ``text`` is present, send the data.
+* If ``close`` is ``True`` or a positive integer, close the connection after
+ any send.
+* ``accept`` is ignored.
+
+Channel: Defined by server, suggested ``websocket.send.RANDOMPART!CLIENTID``
+
+Keys:
+
+* ``bytes``: Byte string of frame content, if in bytes mode, or ``None``.
+
+* ``text``: Unicode string of frame content, if in text mode, or ``None``.
+
+* ``close``: Boolean indicating if the connection should be closed after
+ data is sent, if any. Alternatively, a positive integer specifying the
+ response code. The response code will be 1000 if you pass ``True``.
+ Optional, default ``False``.
+
+* ``accept``: Boolean saying if the connection should be accepted without
+ sending a frame if it is in the handshake phase.
+
+A maximum of one of ``bytes`` or ``text`` may be provided. If both are
+provided, the protocol server should ignore the message entirely.
+
+
+WSGI Compatibility
+------------------
+
+Part of the design of the HTTP portion of this spec is to make sure it
+aligns well with the WSGI specification, to ensure easy adaptability
+between both specifications and the ability to keep using WSGI servers or
+applications with ASGI.
+
+The adaptability works in two ways:
+
+* WSGI Server to ASGI: A WSGI application can be written that transforms
+ ``environ`` into a Request message, sends it off on the ``http.request``
+ channel, and then waits on a generated response channel for a Response
+ message. This has the disadvantage of tying up an entire WSGI thread
+ to poll one channel, but should not be a massive performance drop if
+ there is no backlog on the request channel, and would work fine for an
+ in-process adapter to run a pure-ASGI web application.
+
+* ASGI to WSGI application: A small wrapper process is needed that listens
+ on the ``http.request`` channel, and decodes incoming Request messages
+ into an ``environ`` dict that matches the WSGI specs, while passing in
+ a ``start_response`` that stores the values for sending with the first
+ content chunk. Then, the application iterates over the WSGI app,
+ packaging each returned content chunk into a Response or Response Chunk
+ message (if more than one is yielded).
+
+There is an almost direct mapping for the various special keys in
+WSGI's ``environ`` variable to the Request message:
+
+* ``REQUEST_METHOD`` is the ``method`` key
+* ``SCRIPT_NAME`` is ``root_path``
+* ``PATH_INFO`` can be derived from ``path`` and ``root_path``
+* ``QUERY_STRING`` is ``query_string``
+* ``CONTENT_TYPE`` can be extracted from ``headers``
+* ``CONTENT_LENGTH`` can be extracted from ``headers``
+* ``SERVER_NAME`` and ``SERVER_PORT`` are in ``server``
+* ``REMOTE_HOST``/``REMOTE_ADDR`` and ``REMOTE_PORT`` are in ``client``
+* ``SERVER_PROTOCOL`` is encoded in ``http_version``
+* ``wsgi.url_scheme`` is ``scheme``
+* ``wsgi.input`` is a StringIO around ``body``
+* ``wsgi.errors`` is directed by the wrapper as needed
+
+The ``start_response`` callable maps similarly to Response:
+
+* The ``status`` argument becomes ``status``, with the reason phrase dropped.
+* ``response_headers`` maps to ``headers``
+
+It may even be possible to map Request Body Chunks in a way that allows
+streaming of body data, though it would likely be easier and sufficient for
+many applications to simply buffer the whole body into memory before calling
+the WSGI application.
+
+
+TODOs
+-----
+
+* Maybe remove ``http_version`` and replace with ``supports_server_push``?
diff --git a/docs/backends.rst b/docs/backends.rst
new file mode 100644
index 0000000..af2f655
--- /dev/null
+++ b/docs/backends.rst
@@ -0,0 +1,131 @@
+Channel Layer Types
+===================
+
+Multiple choices of backend are available, to fill different tradeoffs of
+complexity, throughput and scalability. You can also write your own backend if
+you wish; the spec they confirm to is called :doc:`ASGI `. Any
+ASGI-compliant channel layer can be used.
+
+Redis
+-----
+
+The Redis layer is the recommended backend to run Channels with, as it
+supports both high throughput on a single Redis server as well as the ability
+to run against a set of Redis servers in a sharded mode.
+
+To use the Redis layer, simply install it from PyPI (it lives in a separate
+package, as we didn't want to force a dependency on the redis-py for the main
+install)::
+
+ pip install -U asgi_redis
+
+By default, it will attempt to connect to a Redis server on ``localhost:6379``,
+but you can override this with the ``hosts`` key in its config::
+
+ CHANNEL_LAYERS = {
+ "default": {
+ "BACKEND": "asgi_redis.RedisChannelLayer",
+ "ROUTING": "???",
+ "CONFIG": {
+ "hosts": [("redis-channel-1", 6379), ("redis-channel-2", 6379)],
+ },
+ },
+ }
+
+Consider `hiredis`_ library installation to improve layer performance::
+
+ pip install hiredis
+
+It will be used automatically if it's installed.
+
+.. _hiredis: https://github.com/redis/hiredis-py
+
+Sharding
+~~~~~~~~
+
+The sharding model is based on consistent hashing - in particular,
+:ref:`response channels ` are hashed and used to pick a single
+Redis server that both the interface server and the worker will use.
+
+For normal channels, since any worker can service any channel request, messages
+are simply distributed randomly among all possible servers, and workers will
+pick a single server to listen to. Note that if you run more Redis servers than
+workers, it's very likely that some servers will not have workers listening to
+them; we recommend you always have at least ten workers for each Redis server
+to ensure good distribution. Workers will, however, change server periodically
+(every five seconds or so) so queued messages should eventually get a response.
+
+Note that if you change the set of sharding servers you will need to restart
+all interface servers and workers with the new set before anything works,
+and any in-flight messages will be lost (even with persistence, some will);
+the consistent hashing model relies on all running clients having the same
+settings. Any misconfigured interface server or worker will drop some or all
+messages.
+
+RabbitMQ
+--------
+
+RabbitMQ layer is comparable to Redis in terms of latency and
+throughput. It can work with single RabbitMQ node and with Erlang
+cluster.
+
+You need to install layer package from PyPI::
+
+ pip install -U asgi_rabbitmq
+
+To use it you also need provide link to the virtual host with granted
+permissions::
+
+ CHANNEL_LAYERS = {
+ "default": {
+ "BACKEND": "asgi_rabbitmq.RabbitmqChannelLayer",
+ "ROUTING": "???",
+ "CONFIG": {
+ "url": "amqp://guest:guest@rabbitmq:5672/%2F",
+ },
+ },
+ }
+
+This layer has complete `documentation `_ on its own.
+
+IPC
+---
+
+The IPC backend uses POSIX shared memory segments and semaphores in order to
+allow different processes on the same machine to communicate with each other.
+
+As it uses shared memory, it does not require any additional servers running
+to get working, and is quicker than any network-based channel layer. However,
+it can only run between processes on the same machine.
+
+.. warning::
+ The IPC layer only communicates between processes on the same machine,
+ and while you might initially be tempted to run a cluster of machines all
+ with their own IPC-based set of processes, this will result in groups not
+ working properly; events sent to a group will only go to those channels
+ that joined the group on the same machine. This backend is for
+ single-machine deployments only.
+
+
+In-memory
+---------
+
+The in-memory layer is only useful when running the protocol server and the
+worker server in a single process; the most common case of this
+is ``runserver``, where a server thread, this channel layer, and worker thread all
+co-exist inside the same python process.
+
+Its path is ``asgiref.inmemory.ChannelLayer``. If you try and use this channel
+layer with ``runworker``, it will exit, as it does not support cross-process
+communication.
+
+
+Writing Custom Channel Layers
+-----------------------------
+
+The interface channel layers present to Django and other software that
+communicates over them is codified in a specification called :doc:`ASGI `.
+
+Any channel layer that conforms to the :doc:`ASGI spec ` can be used
+by Django; just set ``BACKEND`` to the class to instantiate and ``CONFIG`` to
+a dict of keyword arguments to initialize the class with.
diff --git a/docs/binding.rst b/docs/binding.rst
new file mode 100644
index 0000000..64e8ba1
--- /dev/null
+++ b/docs/binding.rst
@@ -0,0 +1,168 @@
+Data Binding
+============
+
+The Channels data binding framework automates the process of tying Django
+models into frontend views, such as javascript-powered website UIs. It provides
+a quick and flexible way to generate messages on Groups for model changes
+and to accept messages that change models themselves.
+
+The main target for the moment is WebSockets, but the framework is flexible
+enough to be used over any protocol.
+
+What does data binding allow?
+-----------------------------
+
+Data binding in Channels works two ways:
+
+* Outbound, where model changes made through Django are sent out to listening
+ clients. This includes creation, updates and deletion of instances.
+
+* Inbound, where a standardised message format allows creation, update and
+ deletion of instances to be made by clients sending messages.
+
+Combined, these allow a UI to be designed that automatically updates to
+reflect new values and reflects across clients. A live blog is easily done
+using data binding against the post object, for example, or an edit interface
+can show data live as it's edited by other users.
+
+It has some limitations:
+
+* Signals are used to power outbound binding, so if you change the values of
+ a model outside of Django (or use the ``.update()`` method on a QuerySet),
+ the signals are not triggered and the change will not be sent out. You
+ can trigger changes yourself, but you'll need to source the events from the
+ right place for your system.
+
+* The built-in serializers are based on the built-in Django ones and can only
+ handle certain field types; for more flexibility, you can plug in something
+ like the Django REST Framework serializers.
+
+Getting Started
+---------------
+
+A single Binding subclass will handle outbound and inbound binding for a model,
+and you can have multiple bindings per model (if you want different formats
+or permission checks, for example).
+
+You can inherit from the base Binding and provide all the methods needed, but
+we'll focus on the WebSocket JSON variant here, as it's the easiest thing to
+get started and likely close to what you want.
+
+Start off like this::
+
+ from django.db import models
+ from channels.binding.websockets import WebsocketBinding
+
+ class IntegerValue(models.Model):
+
+ name = models.CharField(max_length=100, unique=True)
+ value = models.IntegerField(default=0)
+
+ class IntegerValueBinding(WebsocketBinding):
+
+ model = IntegerValue
+ stream = "intval"
+ fields = ["name", "value"]
+
+ @classmethod
+ def group_names(cls, instance):
+ return ["intval-updates"]
+
+ def has_permission(self, user, action, pk):
+ return True
+
+This defines a WebSocket binding - so it knows to send outgoing messages
+formatted as JSON WebSocket frames - and provides the three things you must
+always provide:
+
+* ``fields`` is a whitelist of fields to return in the serialized request.
+ Channels does not default to all fields for security concerns; if you want
+ this, set it to the value ``["__all__"]``. As an alternative, ``exclude``
+ acts as a blacklist of fields.
+
+* ``group_names`` returns a list of groups to send outbound updates to based
+ on the instance. For example, you could dispatch posts on different
+ liveblogs to groups that included the parent blog ID in the name; here, we
+ just use a fixed group name. Based on how ``group_names`` changes as the
+ instance changes, Channels will work out if clients need ``create``,
+ ``update`` or ``delete`` messages (or if the change is hidden from them).
+
+* ``has_permission`` returns if an inbound binding update is allowed to actually
+ be carried out on the model. We've been very unsafe and made it always return
+ ``True``, but here is where you would check against either Django's or your
+ own permission system to see if the user is allowed that action.
+
+For reference, ``action`` is always one of the unicode strings ``"create"``,
+``"update"`` or ``"delete"``. You also supply the :ref:`multiplexing`
+stream name to provide to the client - you must use multiplexing if you
+use WebSocket data binding.
+
+Just adding the binding like this in a place where it will be imported will
+get outbound messages sending, but you still need a Consumer that will both
+accept incoming binding updates and add people to the right Groups when they
+connect. The WebSocket binding classes use the standard :ref:`multiplexing`,
+so you just need to use that::
+
+ from channels.generic.websockets import WebsocketDemultiplexer
+ from .binding import IntegerValueBinding
+
+ class Demultiplexer(WebsocketDemultiplexer):
+
+ consumers = {
+ "intval": IntegerValueBinding.consumer,
+ }
+
+ def connection_groups(self):
+ return ["intval-updates"]
+
+As well as the standard stream-to-consumer mapping, you also need to set
+``connection_groups``, a list of groups to put people in when they connect.
+This should match the logic of ``group_names`` on your binding - we've used
+our fixed group name again. Notice that the binding has a ``.consumer`` attribute;
+this is a standard WebSocket-JSON consumer, that the demultiplexer can pass
+demultiplexed ``websocket.receive`` messages to.
+
+Tie that into your routing, and you're ready to go::
+
+ from channels import route_class, route
+ from .consumers import Demultiplexer
+ from .models import IntegerValueBinding
+
+ channel_routing = [
+ route_class(Demultiplexer, path="^/binding/"),
+ ]
+
+
+Frontend Considerations
+-----------------------
+
+You can use the standard :doc:`Channels WebSocket wrapper ` to
+automatically run demultiplexing, and then tie the events you receive into your
+frontend framework of choice based on ``action``, ``pk`` and ``data``.
+
+.. note::
+
+ Common plugins for data binding against popular JavaScript frameworks are
+ wanted; if you're interested, please get in touch.
+
+
+Custom Serialization/Protocols
+------------------------------
+
+Rather than inheriting from the ``WebsocketBinding``, you can inherit directly
+from the base ``Binding`` class and implement serialization and deserialization
+yourself. Until proper reference documentation for this is written, we
+recommend looking at the source code in ``channels/bindings/base.py``; it's
+reasonably well-commented.
+
+
+Dealing with Disconnection
+--------------------------
+
+Because the data binding Channels ships with has no history of events,
+it means that when a disconnection happens you may miss events that happen
+during your offline time. For this reason, it's recommended you reload
+data directly using an API call once connection has been re-established,
+don't rely on the live updates for critical functionality, or have UI designs
+that cope well with missing data (e.g. ones where it's all updates and no
+creates, so the next update will correct everything).
diff --git a/docs/community.rst b/docs/community.rst
new file mode 100644
index 0000000..e844e31
--- /dev/null
+++ b/docs/community.rst
@@ -0,0 +1,18 @@
+Community Projects
+==================
+
+These projects from the community are developed on top of Channels:
+
+* Djangobot_, a bi-directional interface server for Slack.
+* knocker_, a generic desktop-notification system.
+* Beatserver_, a periodic task scheduler for django channels.
+* cq_, a simple distributed task system.
+* Debugpannel_, a django Debug Toolbar panel for channels.
+
+If you'd like to add your project, please submit a PR with a link and brief description.
+
+.. _Djangobot: https://github.com/djangobot/djangobot
+.. _knocker: https://github.com/nephila/django-knocker
+.. _Beatserver: https://github.com/rajasimon/beatserver
+.. _cq: https://github.com/furious-luke/django-cq
+.. _Debugpannel: https://github.com/Krukov/django-channels-panel
diff --git a/docs/concepts.rst b/docs/concepts.rst
new file mode 100644
index 0000000..26ba9e8
--- /dev/null
+++ b/docs/concepts.rst
@@ -0,0 +1,273 @@
+Channels Concepts
+=================
+
+Django's traditional view of the world revolves around requests and responses;
+a request comes in, Django is fired up to serve it, generates a response to
+send, and then Django goes away and waits for the next request.
+
+That was fine when the internet was driven by simple browser interactions,
+but the modern Web includes things like WebSockets and HTTP2 server push,
+which allow websites to communicate outside of this traditional cycle.
+
+And, beyond that, there are plenty of non-critical tasks that applications
+could easily offload until after a response has been sent - like saving things
+into a cache or thumbnailing newly-uploaded images.
+
+It changes the way Django runs to be "event oriented" - rather than
+just responding to requests, instead Django responds to a wide array of events
+sent on *channels*. There's still no persistent state - each event handler,
+or *consumer* as we call them, is called independently in a way much like a
+view is called.
+
+Let's look at what *channels* are first.
+
+.. _what-are-channels:
+
+What is a channel?
+------------------
+
+The core of the system is, unsurprisingly, a datastructure called a *channel*.
+What is a channel? It is an *ordered*, *first-in first-out queue* with
+*message expiry* and *at-most-once delivery* to *only one listener at a time*.
+
+You can think of it as analogous to a task queue - messages are put onto
+the channel by *producers*, and then given to just one of the *consumers*
+listening to that channel.
+
+By *at-most-once* we say that either one consumer gets the message or nobody
+does (if the channel implementation crashes, let's say). The
+alternative is *at-least-once*, where normally one consumer gets the message
+but when things crash it's sent to more than one, which is not the trade-off
+we want.
+
+There are a couple of other limitations - messages must be made of
+serializable types, and stay under a certain size limit - but these are
+implementation details you won't need to worry about until you get to more
+advanced usage.
+
+The channels have capacity, so a lot of producers can write lots of messages
+into a channel with no consumers and then a consumer can come along later and
+will start getting served those queued messages.
+
+If you've used `channels in Go `_: Go channels
+are reasonably similar to Django ones. The key difference is that
+Django channels are network-transparent; the implementations
+of channels we provide are all accessible across a network to consumers
+and producers running in different processes or on different machines.
+
+Inside a network, we identify channels uniquely by a name string - you can
+send to any named channel from any machine connected to the same channel
+backend. If two different machines both write to the ``http.request``
+channel, they're writing into the same channel.
+
+How do we use channels?
+-----------------------
+
+So how is Django using those channels? Inside Django
+you can write a function to consume a channel::
+
+ def my_consumer(message):
+ pass
+
+And then assign a channel to it in the channel routing::
+
+ channel_routing = {
+ "some-channel": "myapp.consumers.my_consumer",
+ }
+
+This means that for every message on the channel, Django will call that
+consumer function with a message object (message objects have a "content"
+attribute which is always a dict of data, and a "channel" attribute which
+is the channel it came from, as well as some others).
+
+Instead of having Django run in the traditional request-response mode,
+Channels changes Django so that it runs in a worker mode - it listens on
+all channels that have consumers assigned, and when a message arrives on
+one, it runs the relevant consumer. So rather than running in just a
+single process tied to a WSGI server, Django runs in three separate layers:
+
+* Interface servers, which communicate between Django and the outside world.
+ This includes a WSGI adapter as well as a separate WebSocket server - this is explained and covered in :ref:`run-interface-servers`.
+
+* The channel backend, which is a combination of pluggable Python code and
+ a datastore (e.g. Redis, or a shared memory segment) responsible for
+ transporting messages.
+
+* The workers, that listen on all relevant channels and run consumer code
+ when a message is ready.
+
+This may seem relatively simplistic, but that's part of the design; rather than
+try and have a full asynchronous architecture, we're just introducing a
+slightly more complex abstraction than that presented by Django views.
+
+A view takes a request and returns a response; a consumer takes a channel
+message and can write out zero to many other channel messages.
+
+Now, let's make a channel for requests (called ``http.request``),
+and a channel per client for responses (e.g. ``http.response.o4F2h2Fd``),
+where the response channel is a property (``reply_channel``) of the request
+message. Suddenly, a view is merely another example of a consumer::
+
+ # Listens on http.request
+ def my_consumer(message):
+ # Decode the request from message format to a Request object
+ django_request = AsgiRequest(message)
+ # Run view
+ django_response = view(django_request)
+ # Encode the response into message format
+ for chunk in AsgiHandler.encode_response(django_response):
+ message.reply_channel.send(chunk)
+
+In fact, this is how Channels works. The interface servers transform connections
+from the outside world (HTTP, WebSockets, etc.) into messages on channels,
+and then you write workers to handle these messages. Usually you leave normal
+HTTP up to Django's built-in consumers that plug it into the view/template
+system, but you can override it to add functionality if you want.
+
+However, the crucial part is that you can run code (and so send on channels) in
+response to any event - and that includes ones you create. You can trigger
+on model saves, on other incoming messages, or from code paths inside views
+and forms. That approach comes in handy for push-style
+code - where you use WebSockets or HTTP long-polling to notify
+clients of changes in real time (messages in a chat, perhaps, or live updates
+in an admin as another user edits something).
+
+.. _channel-types:
+
+Channel Types
+-------------
+
+There are actually two major uses for channels in
+this model. The first, and more obvious one, is the dispatching of work to
+consumers - a message gets added to a channel, and then any one of the workers
+can pick it up and run the consumer.
+
+The second kind of channel, however, is used for replies. Notably, these only
+have one thing listening on them - the interface server. Each reply channel
+is individually named and has to be routed back to the interface server where
+its client is terminated.
+
+This is not a massive difference - they both still behave according to the core
+definition of a *channel* - but presents some problems when we're looking to
+scale things up. We can happily randomly load-balance normal channels across
+clusters of channel servers and workers - after all, any worker can process
+the message - but response channels would have to have their messages sent
+to the channel server they're listening on.
+
+For this reason, Channels treats these as two different *channel types*, and
+denotes a *reply channel* by having the channel name contain
+the character ``!`` - e.g. ``http.response!f5G3fE21f``. *Normal
+channels* do not contain it, but along with the rest of the reply
+channel name, they must contain only the characters ``a-z A-Z 0-9 - _``,
+and be less than 200 characters long.
+
+It's optional for a backend implementation to understand this - after all,
+it's only important at scale, where you want to shard the two types differently
+— but it's present nonetheless. For more on scaling, and how to handle channel
+types if you're writing a backend or interface server, see :ref:`scaling-up`.
+
+Groups
+------
+
+Because channels only deliver to a single listener, they can't do broadcast;
+if you want to send a message to an arbitrary group of clients, you need to
+keep track of which reply channels of those you wish to send to.
+
+If I had a liveblog where I wanted to push out updates whenever a new post is
+saved, I could register a handler for the ``post_save`` signal and keep a
+set of channels (here, using Redis) to send updates to::
+
+ redis_conn = redis.Redis("localhost", 6379)
+
+ @receiver(post_save, sender=BlogUpdate)
+ def send_update(sender, instance, **kwargs):
+ # Loop through all reply channels and send the update
+ for reply_channel in redis_conn.smembers("readers"):
+ Channel(reply_channel).send({
+ "text": json.dumps({
+ "id": instance.id,
+ "content": instance.content
+ })
+ })
+
+ # Connected to websocket.connect
+ def ws_connect(message):
+ # Add to reader set
+ redis_conn.sadd("readers", message.reply_channel.name)
+
+While this will work, there's a small problem - we never remove people from
+the ``readers`` set when they disconnect. We could add a consumer that
+listens to ``websocket.disconnect`` to do that, but we'd also need to
+have some kind of expiry in case an interface server is forced to quit or
+loses power before it can send disconnect signals - your code will never
+see any disconnect notification but the reply channel is completely
+invalid and messages you send there will sit there until they expire.
+
+Because the basic design of channels is stateless, the channel server has no
+concept of "closing" a channel if an interface server goes away - after all,
+channels are meant to hold messages until a consumer comes along (and some
+types of interface server, e.g. an SMS gateway, could theoretically serve
+any client from any interface server).
+
+We don't particularly care if a disconnected client doesn't get the messages
+sent to the group - after all, it disconnected - but we do care about
+cluttering up the channel backend tracking all of these clients that are no
+longer around (and possibly, eventually getting a collision on the reply
+channel name and sending someone messages not meant for them, though that would
+likely take weeks).
+
+Now, we could go back into our example above and add an expiring set and keep
+track of expiry times and so forth, but what would be the point of a framework
+if it made you add boilerplate code? Instead, Channels implements this
+abstraction as a core concept called Groups::
+
+ @receiver(post_save, sender=BlogUpdate)
+ def send_update(sender, instance, **kwargs):
+ Group("liveblog").send({
+ "text": json.dumps({
+ "id": instance.id,
+ "content": instance.content
+ })
+ })
+
+ # Connected to websocket.connect
+ def ws_connect(message):
+ # Add to reader group
+ Group("liveblog").add(message.reply_channel)
+ # Accept the connection request
+ message.reply_channel.send({"accept": True})
+
+ # Connected to websocket.disconnect
+ def ws_disconnect(message):
+ # Remove from reader group on clean disconnect
+ Group("liveblog").discard(message.reply_channel)
+
+Not only do groups have their own ``send()`` method (which backends can provide
+an efficient implementation of), they also automatically manage expiry of
+the group members - when the channel starts having messages expire on it due
+to non-consumption, we go in and remove it from all the groups it's in as well.
+Of course, you should still remove things from the group on disconnect if you
+can; the expiry code is there to catch cases where the disconnect message
+doesn't make it for some reason.
+
+Groups are generally only useful for reply channels (ones containing
+the character ``!``), as these are unique-per-client, but can be used for
+normal channels as well if you wish.
+
+Next Steps
+----------
+
+That's the high-level overview of channels and groups, and how you should
+start thinking about them. Remember, Django provides some channels
+but you're free to make and consume your own, and all channels are
+network-transparent.
+
+One thing channels do not do, however, is guarantee delivery. If you need
+certainty that tasks will complete, use a system designed for this with
+retries and persistence (e.g. Celery), or alternatively make a management
+command that checks for completion and re-submits a message to the channel
+if nothing is completed (rolling your own retry logic, essentially).
+
+We'll cover more about what kind of tasks fit well into Channels in the rest
+of the documentation, but for now, let's progress to :doc:`getting-started`
+and writing some code.
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 0000000..2ed3472
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1,260 @@
+# -*- coding: utf-8 -*-
+#
+# Channels documentation build configuration file, created by
+# sphinx-quickstart on Fri Jun 19 11:37:58 2015.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath('..'))
+
+from channels import __version__ # noqa
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = []
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'Channels'
+copyright = u'2017, Andrew Godwin'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = __version__
+# The full version, including alpha/beta/rc tags.
+release = __version__
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# " v documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'Channelsdoc'
+
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ ('index', 'Channels.tex', u'Channels Documentation',
+ u'Andrew Godwin', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ ('index', 'channels', u'Channels Documentation',
+ [u'Andrew Godwin'], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ ('index', 'Channels', u'Channels Documentation',
+ u'Andrew Godwin', 'Channels', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
diff --git a/docs/contributing.rst b/docs/contributing.rst
new file mode 100644
index 0000000..9e03e40
--- /dev/null
+++ b/docs/contributing.rst
@@ -0,0 +1,81 @@
+Contributing
+============
+
+If you're looking to contribute to Channels, then please read on - we encourage
+contributions both large and small, from both novice and seasoned developers.
+
+
+What can I work on?
+-------------------
+
+We're looking for help with the following areas:
+
+ * Documentation and tutorial writing
+ * Bugfixing and testing
+ * Feature polish and occasional new feature design
+ * Case studies and writeups
+
+You can find what we're looking to work on in the GitHub issues list for each
+of the Channels sub-projects:
+
+ * `Channels issues `_, for the Django integration and overall project efforts
+ * `Daphne issues `_, for the HTTP and Websocket termination
+ * `asgiref issues `_, for the base ASGI library/memory backend
+ * `asgi_redis issues `_, for the Redis channel backend
+ * `asgi_rabbitmq `_, for the RabbitMQ channel backend
+ * `asgi_ipc issues `_, for the POSIX IPC channel backend
+
+Issues are categorized by difficulty level:
+
+ * ``exp/beginner``: Easy issues suitable for a first-time contributor.
+ * ``exp/intermediate``: Moderate issues that need skill and a day or two to solve.
+ * ``exp/advanced``: Difficult issues that require expertise and potentially weeks of work.
+
+They are also classified by type:
+
+ * ``documentation``: Documentation issues. Pick these if you want to help us by writing docs.
+ * ``bug``: A bug in existing code. Usually easier for beginners as there's a defined thing to fix.
+ * ``enhancement``: A new feature for the code; may be a bit more open-ended.
+
+You should filter the issues list by the experience level and type of work
+you'd like to do, and then if you want to take something on leave a comment
+and assign yourself to it. If you want advice about how to take on a bug,
+leave a comment asking about it, or pop into the IRC channel at
+``#django-channels`` on Freenode and we'll be happy to help.
+
+The issues are also just a suggested list - any offer to help is welcome as long
+as it fits the project goals, but you should make an issue for the thing you
+wish to do and discuss it first if it's relatively large (but if you just found
+a small bug and want to fix it, sending us a pull request straight away is fine).
+
+
+I'm a novice contributor/developer - can I help?
+------------------------------------------------
+
+Of course! The issues labelled with ``exp/beginner`` are a perfect place to
+get started, as they're usually small and well defined. If you want help with
+one of them, pop into the IRC channel at ``#django-channels`` on Freenode or
+get in touch with Andrew directly at andrew@aeracode.org.
+
+
+Can you pay me for my time?
+---------------------------
+
+Thanks to Mozilla, we have a reasonable budget to pay people for their time
+working on all of the above sorts of tasks and more. Generally, we'd prefer
+to fund larger projects (you can find these labelled as ``epic-project`` in the
+issues lists) to reduce the administrative overhead, but we're open to any
+proposal.
+
+If you're interested in working on something and being paid, you'll need to
+draw up a short proposal and get in touch with the committee, discuss the work
+and your history with open-source contribution (we strongly prefer that you have
+a proven track record on at least a few things) and the amount you'd like to be paid.
+
+If you're interested in working on one of these tasks, get in touch with
+Andrew Godwin (andrew@aeracode.org) as a first point of contact; he can help
+talk you through what's involved, and help judge/refine your proposal before
+it goes to the committee.
+
+Tasks not on any issues list can also be proposed; Andrew can help talk about them
+and if they would be sensible to do.
diff --git a/docs/delay.rst b/docs/delay.rst
new file mode 100644
index 0000000..ec9630a
--- /dev/null
+++ b/docs/delay.rst
@@ -0,0 +1,46 @@
+Delay Server
+============
+
+Channels has an optional app ``channels.delay`` that implements the :doc:`ASGI Delay Protocol `.
+
+The server is exposed through a custom management command ``rundelay`` which listens to
+the `asgi.delay` channel for messages to delay.
+
+
+Getting Started with Delay
+--------------------------
+
+To Install the app add `channels.delay` to `INSTALLED_APPS`::
+
+ INSTALLED_APPS = (
+ ...
+ 'channels',
+ 'channels.delay'
+ )
+
+Run `migrate` to create the tables
+
+`python manage.py migrate`
+
+Run the delay process to start processing messages
+
+`python manage.py rundelay`
+
+Now you're ready to start delaying messages.
+
+Delaying Messages
+-----------------
+
+To delay a message by a fixed number of milliseconds use the `delay` parameter.
+
+Here's an example::
+
+ from channels import Channel
+
+ delayed_message = {
+ 'channel': 'example_channel',
+ 'content': {'x': 1},
+ 'delay': 10 * 1000
+ }
+ # The message will be delayed 10 seconds by the server and then sent
+ Channel('asgi.delay').send(delayed_message, immediately=True)
diff --git a/docs/deploying.rst b/docs/deploying.rst
new file mode 100644
index 0000000..c041e41
--- /dev/null
+++ b/docs/deploying.rst
@@ -0,0 +1,326 @@
+Deploying
+=========
+
+Deploying applications using channels requires a few more steps than a normal
+Django WSGI application, but you have a couple of options as to how to deploy
+it and how much of your traffic you wish to route through the channel layers.
+
+Firstly, remember that it's an entirely optional part of Django.
+If you leave a project with the default settings (no ``CHANNEL_LAYERS``),
+it'll just run and work like a normal WSGI app.
+
+When you want to enable channels in production, you need to do three things:
+
+* Set up a channel backend
+* Run worker servers
+* Run interface servers
+
+You can set things up in one of two ways; either route all traffic through
+a :ref:`HTTP/WebSocket interface server `, removing the need
+to run a WSGI server at all; or, just route WebSockets and long-poll
+HTTP connections to the interface server, and :ref:`leave other pages served
+by a standard WSGI server `.
+
+Routing all traffic through the interface server lets you have WebSockets and
+long-polling coexist in the same URL tree with no configuration; if you split
+the traffic up, you'll need to configure a webserver or layer 7 loadbalancer
+in front of the two servers to route requests to the correct place based on
+path or domain. Both methods are covered below.
+
+
+Setting up a channel backend
+----------------------------
+
+The first step is to set up a channel backend. If you followed the
+:doc:`getting-started` guide, you will have ended up using the in-memory
+backend, which is useful for ``runserver``, but as it only works inside the
+same process, useless for actually running separate worker and interface
+servers.
+
+Instead, take a look at the list of :doc:`backends`, and choose one that
+fits your requirements (additionally, you could use a third-party pluggable
+backend or write your own - that page also explains the interface and rules
+a backend has to follow).
+
+Typically a channel backend will connect to one or more central servers that
+serve as the communication layer - for example, the Redis backend connects
+to a Redis server. All this goes into the ``CHANNEL_LAYERS`` setting;
+here's an example for a remote Redis server::
+
+ CHANNEL_LAYERS = {
+ "default": {
+ "BACKEND": "asgi_redis.RedisChannelLayer",
+ "CONFIG": {
+ "hosts": [("redis-server-name", 6379)],
+ },
+ "ROUTING": "my_project.routing.channel_routing",
+ },
+ }
+
+To use the Redis backend you have to install it::
+
+ pip install -U asgi_redis
+
+Some backends, though, don't require an extra server, like the IPC backend,
+which works between processes on the same machine but not over the network
+(it's available in the ``asgi_ipc`` package)::
+
+ CHANNEL_LAYERS = {
+ "default": {
+ "BACKEND": "asgi_ipc.IPCChannelLayer",
+ "ROUTING": "my_project.routing.channel_routing",
+ "CONFIG": {
+ "prefix": "mysite",
+ },
+ },
+ }
+
+Make sure the same settings file is used across all your workers and interface
+servers; without it, they won't be able to talk to each other and things
+will just fail to work.
+
+If you prefer to use RabbitMQ layer, please refer to its
+`documentation `_.
+Usually your config will end up like this::
+
+ CHANNEL_LAYERS = {
+ "default": {
+ "BACKEND": "asgi_rabbitmq.RabbitmqChannelLayer",
+ "ROUTING": "my_project.routing.channel_routing",
+ "CONFIG": {
+ "url": "amqp://guest:guest@rabbitmq:5672/%2F",
+ },
+ },
+ }
+
+Run worker servers
+------------------
+
+Because the work of running consumers is decoupled from the work of talking
+to HTTP, WebSocket and other client connections, you need to run a cluster
+of "worker servers" to do all the processing.
+
+Each server is single-threaded, so it's recommended you run around one or two per
+core on each machine; it's safe to run as many concurrent workers on the same
+machine as you like, as they don't open any ports (all they do is talk to
+the channel backend).
+
+To run a worker server, just run::
+
+ python manage.py runworker
+
+Make sure you run this inside an init system or a program like supervisord that
+can take care of restarting the process when it exits; the worker server has
+no retry-on-exit logic, though it will absorb tracebacks from inside consumers
+and forward them to stderr.
+
+Make sure you keep an eye on how busy your workers are; if they get overloaded,
+requests will take longer and longer to return as the messages queue up
+(until the expiry or capacity limit is reached, at which point HTTP connections will
+start dropping).
+
+In a more complex project, you won't want all your channels being served by the
+same workers, especially if you have long-running tasks (if you serve them from
+the same workers as HTTP requests, there's a chance long-running tasks could
+block up all the workers and delay responding to HTTP requests).
+
+To manage this, it's possible to tell workers to either limit themselves to
+just certain channel names or ignore specific channels using the
+``--only-channels`` and ``--exclude-channels`` options. Here's an example
+of configuring a worker to only serve HTTP and WebSocket requests::
+
+ python manage.py runworker --only-channels=http.* --only-channels=websocket.*
+
+Or telling a worker to ignore all messages on the "thumbnail" channel::
+
+ python manage.py runworker --exclude-channels=thumbnail
+
+
+.. _run-interface-servers:
+
+Run interface servers
+---------------------
+
+The final piece of the puzzle is the "interface servers", the processes that
+do the work of taking incoming requests and loading them into the channels
+system.
+
+If you want to support WebSockets, long-poll HTTP requests and other Channels
+features, you'll need to run a native ASGI interface server, as the WSGI
+specification has no support for running these kinds of requests concurrently.
+We ship with an interface server that we recommend you use called
+`Daphne `_; it supports WebSockets,
+long-poll HTTP requests, HTTP/2 and performs quite well.
+
+You can just keep running your Django code as a WSGI app if you like, behind
+something like uwsgi or gunicorn; this won't let you support WebSockets, though,
+so you'll need to run a separate interface server to terminate those connections
+and configure routing in front of your interface and WSGI servers to route
+requests appropriately.
+
+If you use Daphne for all traffic, it auto-negotiates between HTTP and WebSocket,
+so there's no need to have your WebSockets on a separate domain or path (and
+they'll be able to share cookies with your normal view code, which isn't
+possible if you separate by domain rather than path).
+
+To run Daphne, it just needs to be supplied with a channel backend, in much
+the same way a WSGI server needs to be given an application.
+First, make sure your project has an ``asgi.py`` file that looks like this
+(it should live next to ``wsgi.py``)::
+
+ import os
+ from channels.asgi import get_channel_layer
+
+ os.environ.setdefault("DJANGO_SETTINGS_MODULE", "my_project.settings")
+
+ channel_layer = get_channel_layer()
+
+Then, you can run Daphne and supply the channel layer as the argument::
+
+ daphne my_project.asgi:channel_layer
+
+Like ``runworker``, you should place this inside an init system or something
+like supervisord to ensure it is re-run if it exits unexpectedly.
+
+If you only run Daphne and no workers, all of your page requests will seem to
+hang forever; that's because Daphne doesn't have any worker servers to handle
+the request and it's waiting for one to appear (while ``runserver`` also uses
+Daphne, it launches worker threads along with it in the same process). In this
+scenario, it will eventually time out and give you a 503 error after 2 minutes;
+you can configure how long it waits with the ``--http-timeout`` command line
+argument.
+
+With some browsers you may see errors regarding ``Sec-WebSocket-Protocol`` headers. You can set the allowed ws_protocols to match your client protocol like this::
+
+ CHANNELS_WS_PROTOCOLS = ["graphql-ws", ]
+
+In production you may start a daphne server without the runserver command. So you need to pass the ws-protocl directly::
+
+ daphne --ws-protocol "graphql-ws" --proxy-headers my_project.asgi:channel_layer
+
+Note: The daphne server binds to 127.0.0.1 by default. If you deploy this not locally, bind to your ip or to any ip::
+
+ daphne -b 0.0.0.0 -p 8000 --ws-protocol "graphql-ws" --proxy-headers my_project.asgi:channel_layer
+
+Deploying new versions of code
+------------------------------
+
+One of the benefits of decoupling the client connection handling from work
+processing is that it means you can run new code without dropping client
+connections; this is especially useful for WebSockets.
+
+Just restart your workers when you have new code (by default, if you send
+them SIGTERM they'll cleanly exit and finish running any in-process
+consumers), and any queued messages or new connections will go to the new
+workers. As long as the new code is session-compatible, you can even do staged
+rollouts to make sure workers on new code aren't experiencing high error rates.
+
+There's no need to restart the WSGI or WebSocket interface servers unless
+you've upgraded the interface server itself or changed the ``CHANNEL_LAYER``
+setting; none of your code is used by them, and all middleware and code that can
+customize requests is run on the consumers.
+
+You can even use different Python versions for the interface servers and the
+workers; the ASGI protocol that channel layers communicate over
+is designed to be portable across all Python versions.
+
+
+.. _asgi-alone:
+
+Running just ASGI
+-----------------
+
+If you are just running Daphne to serve all traffic, then the configuration
+above is enough where you can just expose it to the Internet and it'll serve
+whatever kind of request comes in; for a small site, just the one Daphne
+instance and four or five workers is likely enough.
+
+However, larger sites will need to deploy things at a slightly larger scale,
+and how you scale things up is different from WSGI; see :ref:`scaling-up`.
+
+
+.. _wsgi-with-asgi:
+
+Running ASGI alongside WSGI
+---------------------------
+
+ASGI and its canonical interface server Daphne are both relatively new,
+and so you may not wish to run all your traffic through it yet (or you may
+be using specialized features of your existing WSGI server).
+
+If that's the case, that's fine; you can run Daphne and a WSGI server alongside
+each other, and only have Daphne serve the requests you need it to (usually
+WebSocket and long-poll HTTP requests, as these do not fit into the WSGI model).
+
+To do this, just set up your Daphne to serve as we discussed above, and then
+configure your load-balancer or front HTTP server process to dispatch requests
+to the correct server - based on either path, domain, or if
+you can, the Upgrade header.
+
+Dispatching based on path or domain means you'll need to design your WebSocket
+URLs carefully so you can always tell how to route them at the load-balancer
+level; the ideal thing is to be able to look for the ``Upgrade: WebSocket``
+header and distinguish connections by this, but not all software supports this
+and it doesn't help route long-poll HTTP connections at all.
+
+You could also invert this model, and have all connections go to Daphne by
+default and selectively route some back to the WSGI server, if you have
+particular URLs or domains you want to use that server on.
+
+
+Running on a PaaS
+-----------------
+
+To run Django with channels enabled on a Platform-as-a-Service (PaaS), you will
+need to ensure that your PaaS allows you to run multiple processes at different
+scaling levels; one group will be running Daphne, as a pure Python application
+(not a WSGI application), and the other should be running ``runworker``.
+
+The PaaS will also either have to provide either its own Redis service or
+a third process type that lets you run Redis yourself to use the cross-network
+channel backend; both interface and worker processes need to be able to see
+Redis, but not each other.
+
+If you are only allowed one running process type, it's possible you could
+combine both interface server and worker into one process using threading
+and the in-memory backend; however, this is not recommended for production
+use as you cannot scale up past a single node without groups failing to work.
+
+
+.. _scaling-up:
+
+Scaling Up
+----------
+
+Scaling up a deployment containing channels (and thus running ASGI) is a little
+different to scaling a WSGI deployment.
+
+The fundamental difference is that the group mechanic requires all servers serving
+the same site to be able to see each other; if you separate the site up and run
+it in a few, large clusters, messages to groups will only deliver to WebSockets
+connected to the same cluster. For some site designs this will be fine, and if
+you think you can live with this and design around it (which means never
+designing anything around global notifications or events), this may be a good
+way to go.
+
+For most projects, you'll need to run a single channel layer at scale in order
+to achieve proper group delivery. Different backends will scale up differently,
+but the Redis backend can use multiple Redis servers and spread the load
+across them using sharding based on consistent hashing.
+
+The key to a channel layer knowing how to scale a channel's delivery is if it
+contains the ``!`` character or not, which signifies a single-reader channel.
+Single-reader channels are only ever connected to by a single process, and so
+in the Redis case are stored on a single, predictable shard. Other channels
+are assumed to have many workers trying to read them, and so messages for
+these can be evenly divided across all shards.
+
+Django channels are still relatively new, and so it's likely that we don't yet
+know the full story about how to scale things up; we run large load tests to
+try and refine and improve large-project scaling, but it's no substitute for
+actual traffic. If you're running channels at scale, you're encouraged to
+send feedback to the Django team and work with us to hone the design and
+performance of the channel layer backends, or you're free to make your own;
+the ASGI specification is comprehensive and comes with a conformance test
+suite, which should aid in any modification of existing backends or development
+of new ones.
diff --git a/docs/faqs.rst b/docs/faqs.rst
new file mode 100755
index 0000000..701f9b9
--- /dev/null
+++ b/docs/faqs.rst
@@ -0,0 +1,177 @@
+Frequently Asked Questions
+==========================
+
+Why are you doing this rather than just using Tornado/gevent/asyncio/etc.?
+--------------------------------------------------------------------------
+
+They're kind of solving different problems. Tornado, gevent and other
+in-process async solutions are a way of making a single Python process act
+asynchronously - doing other things while a HTTP request is going on, or
+juggling hundreds of incoming connections without blocking on a single one.
+
+Channels is different - all the code you write for consumers runs synchronously.
+You can do all the blocking filesystem calls and CPU-bound tasks you like
+and all you'll do is block the one worker you're running on; the other
+worker processes will just keep on going and handling other messages.
+
+This is partially because Django is all written in a synchronous manner, and
+rewriting it to all be asynchronous would be a near-impossible task, but also
+because we believe that normal developers should not have to write
+asynchronous-friendly code. It's really easy to shoot yourself in the foot;
+do a tight loop without yielding in the middle, or access a file that happens
+to be on a slow NFS share, and you've just blocked the entire process.
+
+Channels still uses asynchronous code, but it confines it to the interface
+layer - the processes that serve HTTP, WebSocket and other requests. These do
+indeed use asynchronous frameworks (currently, asyncio and Twisted) to handle
+managing all the concurrent connections, but they're also fixed pieces of code;
+as an end developer, you'll likely never have to touch them.
+
+All of your work can be with standard Python libraries and patterns and the
+only thing you need to look out for is worker contention - if you flood your
+workers with infinite loops, of course they'll all stop working, but that's
+better than a single thread of execution stopping the entire site.
+
+
+Why aren't you using node/go/etc. to proxy to Django?
+-----------------------------------------------------
+
+There are a couple of solutions where you can use a more "async-friendly"
+language (or Python framework) to bridge things like WebSockets to Django -
+terminate them in (say) a Node process, and then bridge it to Django using
+either a reverse proxy model, or Redis signalling, or some other mechanism.
+
+The thing is, Channels actually makes it easier to do this if you wish. The
+key part of Channels is introducing a standardised way to run event-triggered
+pieces of code, and a standardised way to route messages via named channels
+that hits the right balance between flexibility and simplicity.
+
+While our interface servers are written in Python, there's nothing stopping
+you from writing an interface server in another language, providing it follows
+the same serialisation standards for HTTP/WebSocket/etc. messages. In fact,
+we may ship an alternative server implementation ourselves at some point.
+
+
+Why isn't there guaranteed delivery/a retry mechanism?
+------------------------------------------------------
+
+Channels' design is such that anything is allowed to fail - a consumer can
+error and not send replies, the channel layer can restart and drop a few messages,
+a dogpile can happen and a few incoming clients get rejected.
+
+This is because designing a system that was fully guaranteed, end-to-end, would
+result in something with incredibly low throughput, and almost no problem needs
+that level of guarantee. If you want some level of guarantee, you can build on
+top of what Channels provides and add it in (for example, use a database to
+mark things that need to be cleaned up and resend messages if they aren't after
+a while, or make idempotent consumers and over-send messages rather than
+under-send).
+
+That said, it's a good way to design a system to presume any part of it can
+fail, and design for detection and recovery of that state, rather than hanging
+your entire livelihood on a system working perfectly as designed. Channels
+takes this idea and uses it to provide a high-throughput solution that is
+mostly reliable, rather than a low-throughput one that is *nearly* completely
+reliable.
+
+
+Can I run HTTP requests/service calls/etc. in parallel from Django without blocking?
+------------------------------------------------------------------------------------
+
+Not directly - Channels only allows a consumer function to listen to channels
+at the start, which is what kicks it off; you can't send tasks off on channels
+to other consumers and then *wait on the result*. You can send them off and keep
+going, but you cannot ever block waiting on a channel in a consumer, as otherwise
+you'd hit deadlocks, livelocks, and similar issues.
+
+This is partially a design feature - this falls into the class of "difficult
+async concepts that it's easy to shoot yourself in the foot with" - but also
+to keep the underlying channels implementation simple. By not allowing this sort
+of blocking, we can have specifications for channel layers that allows horizontal
+scaling and sharding.
+
+What you can do is:
+
+* Dispatch a whole load of tasks to run later in the background and then finish
+ your current task - for example, dispatching an avatar thumbnailing task in
+ the avatar upload view, then returning a "we got it!" HTTP response.
+
+* Pass details along to the other task about how to continue, in particular
+ a channel name linked to another consumer that will finish the job, or
+ IDs or other details of the data (remember, message contents are just a dict
+ you can put stuff into). For example, you might have a generic image fetching
+ task for a variety of models that should fetch an image, store it, and pass
+ the resultant ID and the ID of the object you're attaching it to onto a different
+ channel depending on the model - you'd pass the next channel name and the
+ ID of the target object in the message, and then the consumer could send
+ a new message onto that channel name when it's done.
+
+* Have interface servers that perform requests or slow tasks (remember, interface
+ servers are the specialist code which *is* written to be highly asynchronous)
+ and then send their results onto a channel when finished. Again, you can't wait
+ around inside a consumer and block on the results, but you can provide another
+ consumer on a new channel that will do the second half.
+
+
+How do I associate data with incoming connections?
+--------------------------------------------------
+
+Channels provides full integration with Django's session and auth system for its
+WebSockets support, as well as per-websocket sessions for persisting data, so
+you can easily persist data on a per-connection or per-user basis.
+
+You can also provide your own solution if you wish, keyed off of ``message.reply_channel``,
+which is the unique channel representing the connection, but remember that
+whatever you store in must be **network-transparent** - storing things in a
+global variable won't work outside of development.
+
+
+How do I talk to Channels from my non-Django application?
+---------------------------------------------------------
+
+If you have an external server or script you want to talk to Channels, you have
+a few choices:
+
+* If it's a Python program, and you've made an ``asgi.py`` file for your project
+ (see :doc:`deploying`), you can import the channel layer directly as
+ ``yourproject.asgi.channel_layer`` and call ``send()`` and ``receive_many()``
+ on it directly. See the :doc:`ASGI spec ` for the API the channel layer
+ presents. Here's what that looks like:
+
+ >>> from yourproject.asgi import channel_layer
+ >>> from channels import Channel, Group
+ >>> Channel("channel_name").send({"text":"channel_text"})
+ >>> Group("group_name").send({"text":"group_text"})
+
+* If you just need to send messages in when events happen, you can make a
+ management command that calls ``Channel("namehere").send({...})``
+ so your external program can just call
+ ``manage.py send_custom_event`` (or similar) to send a message. Remember, you
+ can send onto channels from any code in your project.
+
+* If neither of these work, you'll have to communicate with Django over
+ HTTP, WebSocket, or another protocol that your project talks, as normal.
+
+
+Are channels Python 2, 3 or 2+3?
+--------------------------------
+
+Django-channels and all of its dependencies are compatible with Python 2.7,
+3.4, and higher. This includes the parts of Twisted that some of the Channels
+packages (like daphne) use.
+
+
+Why isn't there support for socket.io/SockJS/long poll fallback?
+----------------------------------------------------------------
+
+Emulating WebSocket over HTTP long polling requires considerably more effort
+than terminating WebSockets; some server-side state of the connection must
+be kept in a place that's accessible from all nodes, so when the new long
+poll comes in, messages can be replayed onto it.
+
+For this reason, we think it's out of scope for Channels itself, though
+Channels and Daphne come with first-class support for long-running HTTP
+connections without taking up a worker thread (you can consume ``http.request``
+and not send a response until later, add the reply channel to groups,
+and even listen out for the ``http.disconnect`` channel that tells you when
+long polls terminate early).
diff --git a/docs/generics.rst b/docs/generics.rst
new file mode 100644
index 0000000..17768da
--- /dev/null
+++ b/docs/generics.rst
@@ -0,0 +1,322 @@
+Generic Consumers
+=================
+
+Much like Django's class-based views, Channels has class-based consumers.
+They provide a way for you to arrange code so it's highly modifiable and
+inheritable, at the slight cost of it being harder to figure out the execution
+path.
+
+We recommend you use them if you find them valuable; normal function-based
+consumers are also entirely valid, however, and may result in more readable
+code for simpler tasks.
+
+There is one base generic consumer class, ``BaseConsumer``, that provides
+the pattern for method dispatch and is the thing you can build entirely
+custom consumers on top of, and then protocol-specific subclasses that provide
+extra utility - for example, the ``WebsocketConsumer`` provides automatic
+group management for the connection.
+
+When you use class-based consumers in :doc:`routing `, you need
+to use ``route_class`` rather than ``route``; ``route_class`` knows how to
+talk to the class-based consumer and extract the list of channels it needs
+to listen on from it directly, rather than making you pass it in explicitly.
+
+Here's a routing example::
+
+ from channels import route, route_class
+
+ channel_routing = [
+ route_class(consumers.ChatServer, path=r"^/chat/"),
+ route("websocket.connect", consumers.ws_connect, path=r"^/$"),
+ ]
+
+Class-based consumers are instantiated once for each message they consume,
+so it's safe to store things on ``self`` (in fact, ``self.message`` is the
+current message by default, and ``self.kwargs`` are the keyword arguments
+passed in from the routing).
+
+Base
+----
+
+The ``BaseConsumer`` class is the foundation of class-based consumers, and what
+you can inherit from if you wish to build your own entirely from scratch.
+
+You use it like this::
+
+ from channels.generic import BaseConsumer
+
+ class MyConsumer(BaseConsumer):
+
+ method_mapping = {
+ "channel.name.here": "method_name",
+ }
+
+ def method_name(self, message, **kwargs):
+ pass
+
+All you need to define is the ``method_mapping`` dictionary, which maps
+channel names to method names. The base code will take care of the dispatching
+for you, and set ``self.message`` to the current message as well.
+
+If you want to perfom more complicated routing, you'll need to override the
+``dispatch()`` and ``channel_names()`` methods in order to do the right thing;
+remember, though, your channel names cannot change during runtime and must
+always be the same for as long as your process runs.
+
+``BaseConsumer`` and all other generic consumers that inherit from it provide
+two instance variables on the class:
+
+* ``self.message``, the :ref:`Message object ` representing the
+ message the consumer was called for.
+* ``self.kwargs``, keyword arguments from the :doc:`routing`
+
+
+WebSockets
+----------
+
+There are two WebSockets generic consumers; one that provides group management,
+simpler send/receive methods, and basic method routing, and a subclass which
+additionally automatically serializes all messages sent and receives using JSON.
+
+The basic WebSocket generic consumer is used like this::
+
+ from channels.generic.websockets import WebsocketConsumer
+
+ class MyConsumer(WebsocketConsumer):
+
+ # Set to True to automatically port users from HTTP cookies
+ # (you don't need channel_session_user, this implies it)
+ http_user = True
+
+ # Set to True if you want it, else leave it out
+ strict_ordering = False
+
+ def connection_groups(self, **kwargs):
+ """
+ Called to return the list of groups to automatically add/remove
+ this connection to/from.
+ """
+ return ["test"]
+
+ def connect(self, message, **kwargs):
+ """
+ Perform things on connection start
+ """
+ # Accept the connection; this is done by default if you don't override
+ # the connect function.
+ self.message.reply_channel.send({"accept": True})
+
+ def receive(self, text=None, bytes=None, **kwargs):
+ """
+ Called when a message is received with either text or bytes
+ filled out.
+ """
+ # Simple echo
+ self.send(text=text, bytes=bytes)
+
+ def disconnect(self, message, **kwargs):
+ """
+ Perform things on connection close
+ """
+ pass
+
+You can call ``self.send`` inside the class to send things to the connection's
+``reply_channel`` automatically. Any group names returned from ``connection_groups``
+are used to add the socket to when it connects and to remove it from when it
+disconnects; you get keyword arguments too if your URL path, say, affects
+which group to talk to.
+
+Additionally, the property ``self.path`` is always set to the current URL path.
+
+The JSON-enabled consumer looks slightly different::
+
+ from channels.generic.websockets import JsonWebsocketConsumer
+
+ class MyConsumer(JsonWebsocketConsumer):
+
+ # Set to True if you want it, else leave it out
+ strict_ordering = False
+
+ def connection_groups(self, **kwargs):
+ """
+ Called to return the list of groups to automatically add/remove
+ this connection to/from.
+ """
+ return ["test"]
+
+ def connect(self, message, **kwargs):
+ """
+ Perform things on connection start
+ """
+ pass
+
+ def receive(self, content, **kwargs):
+ """
+ Called when a message is received with decoded JSON content
+ """
+ # Simple echo
+ self.send(content)
+
+ def disconnect(self, message, **kwargs):
+ """
+ Perform things on connection close
+ """
+ pass
+
+ # Optionally provide your own custom json encoder and decoder
+ # @classmethod
+ # def decode_json(cls, text):
+ # return my_custom_json_decoder(text)
+ #
+ # @classmethod
+ # def encode_json(cls, content):
+ # return my_custom_json_encoder(content)
+
+For this subclass, ``receive`` only gets a ``content`` argument that is the
+already-decoded JSON as Python datastructures; similarly, ``send`` now only
+takes a single argument, which it JSON-encodes before sending down to the
+client.
+
+Note that this subclass still can't intercept ``Group.send()`` calls to make
+them into JSON automatically, but it does provide ``self.group_send(name, content)``
+that will do this for you if you call it explicitly.
+
+``self.close()`` is also provided to easily close the WebSocket from the
+server end with an optional status code once you are done with it.
+
+.. _multiplexing:
+
+WebSocket Multiplexing
+----------------------
+
+Channels provides a standard way to multiplex different data streams over
+a single WebSocket, called a ``Demultiplexer``.
+
+It expects JSON-formatted WebSocket frames with two keys, ``stream`` and
+``payload``, and will match the ``stream`` against the mapping to find a
+channel name. It will then forward the message onto that channel while
+preserving ``reply_channel``, so you can hook consumers up to them directly
+in the ``routing.py`` file, and use authentication decorators as you wish.
+
+
+Example using class-based consumer::
+
+ from channels.generic.websockets import WebsocketDemultiplexer, JsonWebsocketConsumer
+
+ class EchoConsumer(JsonWebsocketConsumer):
+ def connect(self, message, multiplexer, **kwargs):
+ # Send data with the multiplexer
+ multiplexer.send({"status": "I just connected!"})
+
+ def disconnect(self, message, multiplexer, **kwargs):
+ print("Stream %s is closed" % multiplexer.stream)
+
+ def receive(self, content, multiplexer, **kwargs):
+ # Simple echo
+ multiplexer.send({"original_message": content})
+
+
+ class AnotherConsumer(JsonWebsocketConsumer):
+ def receive(self, content, multiplexer=None, **kwargs):
+ # Some other actions here
+ pass
+
+
+ class Demultiplexer(WebsocketDemultiplexer):
+
+ # Wire your JSON consumers here: {stream_name : consumer}
+ consumers = {
+ "echo": EchoConsumer,
+ "other": AnotherConsumer,
+ }
+
+ # Optionally provide a custom multiplexer class
+ # multiplexer_class = MyCustomJsonEncodingMultiplexer
+
+
+The ``multiplexer`` allows the consumer class to be independent of the stream name.
+It holds the stream name and the demultiplexer on the attributes ``stream`` and ``demultiplexer``.
+
+The :doc:`data binding ` code will also send out messages to clients
+in the same format, and you can encode things in this format yourself by
+using the ``WebsocketDemultiplexer.encode`` class method.
+
+
+Sessions and Users
+------------------
+
+If you wish to use ``channel_session`` or ``channel_session_user`` with a
+class-based consumer, simply set one of the variables in the class body::
+
+ class MyConsumer(WebsocketConsumer):
+
+ channel_session_user = True
+
+This will run the appropriate decorator around your handler methods, and provide
+``message.channel_session`` and ``message.user`` on the message object - both
+the one passed in to your handler as an argument as well as ``self.message``,
+as they point to the same instance.
+
+And if you just want to use the user from the django session, add ``http_user``::
+
+ class MyConsumer(WebsocketConsumer):
+
+ http_user = True
+
+This will give you ``message.user``, which will be the same as ``request.user``
+would be on a regular View.
+
+
+Applying Decorators
+-------------------
+
+To apply decorators to a class-based consumer, you'll have to wrap a functional
+part of the consumer; in this case, ``get_handler`` is likely the place you
+want to override; like so::
+
+ class MyConsumer(WebsocketConsumer):
+
+ def get_handler(self, *args, **kwargs):
+ handler = super(MyConsumer, self).get_handler(*args, **kwargs)
+ return your_decorator(handler)
+
+You can also use the Django ``method_decorator`` utility to wrap methods that
+have ``message`` as their first positional argument - note that it won't work
+for more high-level methods, like ``WebsocketConsumer.receive``.
+
+
+As route
+--------
+
+Instead of making routes using ``route_class`` you may use the ``as_route`` shortcut.
+This function takes route filters (:ref:`filters`) as kwargs and returns
+``route_class``. For example::
+
+ from . import consumers
+
+ channel_routing = [
+ consumers.ChatServer.as_route(path=r"^/chat/"),
+ ]
+
+Use the ``attrs`` dict keyword for dynamic class attributes. For example you have
+the generic consumer::
+
+ class MyGenericConsumer(WebsocketConsumer):
+ group = 'default'
+ group_prefix = ''
+
+ def connection_groups(self, **kwargs):
+ return ['_'.join(self.group_prefix, self.group)]
+
+You can create consumers with different ``group`` and ``group_prefix`` with ``attrs``,
+like so::
+
+ from . import consumers
+
+ channel_routing = [
+ consumers.MyGenericConsumer.as_route(path=r"^/path/1/",
+ attrs={'group': 'one', 'group_prefix': 'pre'}),
+ consumers.MyGenericConsumer.as_route(path=r"^/path/2/",
+ attrs={'group': 'two', 'group_prefix': 'public'}),
+ ]
+
diff --git a/docs/getting-started.rst b/docs/getting-started.rst
new file mode 100644
index 0000000..35cca58
--- /dev/null
+++ b/docs/getting-started.rst
@@ -0,0 +1,807 @@
+Getting Started with Channels
+=============================
+
+(If you haven't yet, make sure you :doc:`install Channels `)
+
+Now, let's get to writing some consumers. If you've not read it already,
+you should read :doc:`concepts`, as it covers the basic description of what
+channels and groups are, and lays out some of the important implementation
+patterns and caveats.
+
+First Consumers
+---------------
+
+When you first run Django with Channels installed, it will be set up in the default layout -
+where all HTTP requests (on the ``http.request`` channel) are routed to the
+Django view layer - nothing will be different to how things worked in the past
+with a WSGI-based Django, and your views and static file serving (from
+``runserver`` will work as normal)
+
+As a very basic introduction, let's write a consumer that overrides the built-in
+handling and handles every HTTP request directly. This isn't something you'd
+usually do in a project, but it's a good illustration of how channels
+underlie even core Django - it's less of an addition and more adding a whole
+new layer under the existing view layer.
+
+Make a new project, a new app, and put this in a ``consumers.py`` file in the app::
+
+ from django.http import HttpResponse
+ from channels.handler import AsgiHandler
+
+ def http_consumer(message):
+ # Make standard HTTP response - access ASGI path attribute directly
+ response = HttpResponse("Hello world! You asked for %s" % message.content['path'])
+ # Encode that response into message format (ASGI)
+ for chunk in AsgiHandler.encode_response(response):
+ message.reply_channel.send(chunk)
+
+The most important thing to note here is that, because things we send in
+messages must be JSON serializable, the request and response messages
+are in a key-value format. You can read more about that format in the
+:doc:`ASGI specification `, but you don't need to worry about it too much;
+just know that there's an ``AsgiRequest`` class that translates from ASGI into
+Django request objects, and the ``AsgiHandler`` class handles translation of
+``HttpResponse`` into ASGI messages, which you see used above. Usually,
+Django's built-in code will do all this for you when you're using normal views.
+
+Now we need to do one more thing, and that's tell Django that this consumer
+should be tied to the ``http.request`` channel rather than the default Django
+view system. This is done in the settings file - in particular, we need to
+define our ``default`` channel layer and what its routing is set to.
+
+Channel routing is a bit like URL routing, and so it's structured similarly -
+you point the setting at a dict mapping channels to consumer callables.
+Here's what that looks like::
+
+ # In settings.py
+ CHANNEL_LAYERS = {
+ "default": {
+ "BACKEND": "asgiref.inmemory.ChannelLayer",
+ "ROUTING": "myproject.routing.channel_routing",
+ },
+ }
+
+::
+
+ # In routing.py
+ from channels.routing import route
+ channel_routing = [
+ route("http.request", "myapp.consumers.http_consumer"),
+ ]
+
+.. warning::
+ This example, and most of the examples here, use the "in memory" channel
+ layer. This is the easiest to get started with but provides absolutely no
+ cross-process channel transportation, and so can only be used with
+ ``runserver``. You'll want to choose another backend (discussed later)
+ to run things in production.
+
+As you can see, this is a little like Django's ``DATABASES`` setting; there are
+named channel layers, with a default one called ``default``. Each layer
+needs a channel layer class, some options (if the channel layer needs them),
+and a routing scheme, which points to a list containing the routing settings.
+It's recommended you call this ``routing.py`` and put it alongside ``urls.py``
+in your project, but you can put it wherever you like, as long as the path is
+correct.
+
+If you start up ``python manage.py runserver`` and go to
+``http://localhost:8000``, you'll see that, rather than a default Django page,
+you get the Hello World response, so things are working. If you don't see
+a response, check you :doc:`installed Channels correctly `.
+
+Now, that's not very exciting - raw HTTP responses are something Django has
+been able to do for a long time. Let's try some WebSockets, and make a basic
+chat server!
+
+We'll start with a simple server that just echoes every message it gets sent
+back to the same client - no cross-client communication. It's not terribly
+useful, but it's a good way to start out writing Channels consumers.
+
+Delete that previous consumer and its routing - we'll want the normal Django view layer to
+serve HTTP requests from now on, which happens if you don't specify a consumer
+for ``http.request`` - and make this WebSocket consumer instead::
+
+ # In consumers.py
+
+ def ws_message(message):
+ # ASGI WebSocket packet-received and send-packet message types
+ # both have a "text" key for their textual data.
+ message.reply_channel.send({
+ "text": message.content['text'],
+ })
+
+Hook it up to the ``websocket.receive`` channel like this::
+
+ # In routing.py
+ from channels.routing import route
+ from myapp.consumers import ws_message
+
+ channel_routing = [
+ route("websocket.receive", ws_message),
+ ]
+
+Now, let's look at what this is doing. It's tied to the
+``websocket.receive`` channel, which means that it'll get a message
+whenever a WebSocket packet is sent to us by a client.
+
+When it gets that message, it takes the ``reply_channel`` attribute from it, which
+is the unique response channel for that client, and sends the same content
+back to the client using its ``send()`` method.
+
+Let's test it! Run ``runserver``, open a browser, navigate to a page on the server
+(you can't use any page's console because of origin restrictions), and put the
+following into the JavaScript console to open a WebSocket and send some data
+down it (you might need to change the socket address if you're using a
+development VM or similar)
+
+.. code-block:: javascript
+
+ // Note that the path doesn't matter for routing; any WebSocket
+ // connection gets bumped over to WebSocket consumers
+ socket = new WebSocket("ws://" + window.location.host + "/chat/");
+ socket.onmessage = function(e) {
+ alert(e.data);
+ }
+ socket.onopen = function() {
+ socket.send("hello world");
+ }
+ // Call onopen directly if socket is already open
+ if (socket.readyState == WebSocket.OPEN) socket.onopen();
+
+You should see an alert come back immediately saying "hello world" - your
+message has round-tripped through the server and come back to trigger the alert.
+
+Groups
+------
+
+Now, let's make our echo server into an actual chat server, so people can talk
+to each other. To do this, we'll use Groups, one of the :doc:`core concepts `
+of Channels, and our fundamental way of doing multi-cast messaging.
+
+To do this, we'll hook up the ``websocket.connect`` and ``websocket.disconnect``
+channels to add and remove our clients from the Group as they connect and
+disconnect, like this::
+
+ # In consumers.py
+ from channels import Group
+
+ # Connected to websocket.connect
+ def ws_add(message):
+ # Accept the incoming connection
+ message.reply_channel.send({"accept": True})
+ # Add them to the chat group
+ Group("chat").add(message.reply_channel)
+
+ # Connected to websocket.disconnect
+ def ws_disconnect(message):
+ Group("chat").discard(message.reply_channel)
+
+.. note::
+ You need to explicitly accept WebSocket connections if you override connect
+ by sending ``accept: True`` - you can also reject them at connection time,
+ before they open, by sending ``close: True``.
+
+Of course, if you've read through :doc:`concepts`, you'll know that channels
+added to groups expire out if their messages expire (every channel layer has
+a message expiry time, usually between 30 seconds and a few minutes, and it's
+often configurable) - but the ``disconnect`` handler will get called nearly all
+of the time anyway.
+
+.. note::
+ Channels' design is predicated on expecting and working around failure;
+ it assumes that some small percentage of messages will never get delivered,
+ and so all the core functionality is designed to *expect failure* so that
+ when a message doesn't get delivered, it doesn't ruin the whole system.
+
+ We suggest you design your applications the same way - rather than relying
+ on 100% guaranteed delivery, which Channels won't give you, look at each
+ failure case and program something to expect and handle it - be that retry
+ logic, partial content handling, or just having something not work that one
+ time. HTTP requests are just as fallible, and most people's response to that
+ is a generic error page!
+
+.. _websocket-example:
+
+Now, that's taken care of adding and removing WebSocket send channels for the
+``chat`` group; all we need to do now is take care of message sending. Instead
+of echoing the message back to the client like we did above, we'll instead send
+it to the whole ``Group``, which means any client who's been added to it will
+get the message. Here's all the code::
+
+ # In consumers.py
+ from channels import Group
+
+ # Connected to websocket.connect
+ def ws_add(message):
+ # Accept the connection
+ message.reply_channel.send({"accept": True})
+ # Add to the chat group
+ Group("chat").add(message.reply_channel)
+
+ # Connected to websocket.receive
+ def ws_message(message):
+ Group("chat").send({
+ "text": "[user] %s" % message.content['text'],
+ })
+
+ # Connected to websocket.disconnect
+ def ws_disconnect(message):
+ Group("chat").discard(message.reply_channel)
+
+And what our routing should look like in ``routing.py``::
+
+ from channels.routing import route
+ from myapp.consumers import ws_add, ws_message, ws_disconnect
+
+ channel_routing = [
+ route("websocket.connect", ws_add),
+ route("websocket.receive", ws_message),
+ route("websocket.disconnect", ws_disconnect),
+ ]
+
+Note that the ``http.request`` route is no longer present - if we leave it
+out, then Django will route HTTP requests to the normal view system by default,
+which is probably what you want. Even if you have a ``http.request`` route that
+matches just a subset of paths or methods, the ones that don't match will still
+fall through to the default handler, which passes it into URL routing and the
+views.
+
+With all that code, you now have a working set of a logic for a chat server.
+Test time! Run ``runserver``, open a browser and use that same JavaScript
+code in the developer console as before
+
+.. code-block:: javascript
+
+ // Note that the path doesn't matter right now; any WebSocket
+ // connection gets bumped over to WebSocket consumers
+ socket = new WebSocket("ws://" + window.location.host + "/chat/");
+ socket.onmessage = function(e) {
+ alert(e.data);
+ }
+ socket.onopen = function() {
+ socket.send("hello world");
+ }
+ // Call onopen directly if socket is already open
+ if (socket.readyState == WebSocket.OPEN) socket.onopen();
+
+You should see an alert come back immediately saying "hello world" - but this
+time, you can open another tab and do the same there, and both tabs will
+receive the message and show an alert. Any incoming message is sent to the
+``chat`` group by the ``ws_message`` consumer, and both your tabs will have
+been put into the ``chat`` group when they connected.
+
+Feel free to put some calls to ``print`` in your handler functions too, if you
+like, so you can understand when they're called. You can also use ``pdb`` and
+other similar methods you'd use to debug normal Django projects.
+
+
+Running with Channels
+---------------------
+
+Because Channels takes Django into a multi-process model, you no longer run
+everything in one process along with a WSGI server (of course, you're still
+free to do that if you don't want to use Channels). Instead, you run one or
+more *interface servers*, and one or more *worker servers*, connected by
+that *channel layer* you configured earlier.
+
+There are multiple kinds of "interface servers", and each one will service a
+different type of request - one might do both WebSocket and HTTP requests, while
+another might act as an SMS message gateway, for example.
+
+These are separate from the "worker servers" where Django will run actual logic,
+though, and so the *channel layer* transports the content of channels across
+the network. In a production scenario, you'd usually run *worker servers*
+as a separate cluster from the *interface servers*, though of course you
+can run both as separate processes on one machine too.
+
+By default, Django doesn't have a channel layer configured - it doesn't need one to run
+normal WSGI requests, after all. As soon as you try to add some consumers,
+though, you'll need to configure one.
+
+In the example above we used the in-memory channel layer implementation
+as our default channel layer. This just stores all the channel data in a dict
+in memory, and so isn't actually cross-process; it only works inside
+``runserver``, as that runs the interface and worker servers in different threads
+inside the same process. When you deploy to production, you'll need to
+use a channel layer like the Redis backend ``asgi_redis`` that works cross-process;
+see :doc:`backends` for more.
+
+The second thing, once we have a networked channel backend set up, is to make
+sure we're running an interface server that's capable of serving WebSockets.
+To solve this, Channels comes with ``daphne``, an interface server
+that can handle both HTTP and WebSockets at the same time, and then ties this
+in to run when you run ``runserver`` - you shouldn't notice any difference
+from the normal Django ``runserver``, though some of the options may be a little
+different.
+
+*(Under the hood, runserver is now running Daphne in one thread and a worker
+with autoreload in another - it's basically a miniature version of a deployment,
+but all in one process)*
+
+Let's try out the Redis backend - Redis runs on pretty much every machine, and
+has a very small overhead, which makes it perfect for this kind of thing. Install
+the ``asgi_redis`` package using ``pip``. ::
+
+ pip install asgi_redis
+
+and set up your channel layer like this::
+
+ # In settings.py
+ CHANNEL_LAYERS = {
+ "default": {
+ "BACKEND": "asgi_redis.RedisChannelLayer",
+ "CONFIG": {
+ "hosts": [("localhost", 6379)],
+ },
+ "ROUTING": "myproject.routing.channel_routing",
+ },
+ }
+
+You'll also need to install the Redis server - there are downloads available
+for Mac OS and Windows, and it's in pretty much every linux distribution's
+package manager. For example, on Ubuntu, you can just::
+
+ sudo apt-get install redis-server
+
+Fire up ``runserver``, and it'll work as before - unexciting, like good
+infrastructure should be. You can also try out the cross-process nature; run
+these two commands in two terminals:
+
+* ``manage.py runserver --noworker``
+* ``manage.py runworker``
+
+As you can probably guess, this disables the worker threads in ``runserver``
+and handles them in a separate process. You can pass ``-v 2`` to ``runworker``
+if you want to see logging as it runs the consumers.
+
+If Django is in debug mode (``DEBUG=True``), then ``runworker`` will serve
+static files, as ``runserver`` does. Just like a normal Django setup, you'll
+have to set up your static file serving for when ``DEBUG`` is turned off.
+
+Persisting Data
+---------------
+
+Echoing messages is a nice simple example, but it's ignoring the real
+need for a system like this - persistent state for connections.
+Let's consider a basic chat site where a user requests a chat room upon initial
+connection, as part of the URL path (e.g. ``wss://host/rooms/room-name``).
+
+The ``reply_channel`` attribute you've seen before is our unique pointer to the
+open WebSocket - because it varies between different clients, it's how we can
+keep track of "who" a message is from. Remember, Channels is network-transparent
+and can run on multiple workers, so you can't just store things locally in
+global variables or similar.
+
+Instead, the solution is to persist information keyed by the ``reply_channel`` in
+some other data store - sound familiar? This is what Django's session framework
+does for HTTP requests, using a cookie as the key. Wouldn't it be useful if
+we could get a session using the ``reply_channel`` as a key?
+
+Channels provides a ``channel_session`` decorator for this purpose - it
+provides you with an attribute called ``message.channel_session`` that acts
+just like a normal Django session.
+
+Let's use it now to build a chat server that expects you to pass a chatroom
+name in the path of your WebSocket request and a query string with your username (we'll ignore auth for now - that's next)::
+
+ # In consumers.py
+ import json
+ from channels import Group
+ from channels.sessions import channel_session
+ from urllib.parse import parse_qs
+
+ # Connected to websocket.connect
+ @channel_session
+ def ws_connect(message, room_name):
+ # Accept connection
+ message.reply_channel.send({"accept": True})
+ # Parse the query string
+ params = parse_qs(message.content["query_string"])
+ if b"username" in params:
+ # Set the username in the session
+ message.channel_session["username"] = params[b"username"][0].decode("utf8")
+ # Add the user to the room_name group
+ Group("chat-%s" % room_name).add(message.reply_channel)
+ else:
+ # Close the connection.
+ message.reply_channel.send({"close": True})
+
+ # Connected to websocket.receive
+ @channel_session
+ def ws_message(message, room_name):
+ Group("chat-%s" % room_name).send({
+ "text": json.dumps({
+ "text": message["text"],
+ "username": message.channel_session["username"],
+ }),
+ })
+
+ # Connected to websocket.disconnect
+ @channel_session
+ def ws_disconnect(message, room_name):
+ Group("chat-%s" % room_name).discard(message.reply_channel)
+
+Update ``routing.py`` as well::
+
+ # in routing.py
+ from channels.routing import route
+ from myapp.consumers import ws_connect, ws_message, ws_disconnect
+
+ channel_routing = [
+ route("websocket.connect", ws_connect, path=r"^/(?P[a-zA-Z0-9_]+)/$"),
+ route("websocket.receive", ws_message, path=r"^/(?P[a-zA-Z0-9_]+)/$"),
+ route("websocket.disconnect", ws_disconnect, path=r"^/(?P[a-zA-Z0-9_]+)/$"),
+ ]
+
+If you play around with it from the console (or start building a simple
+JavaScript chat client that appends received messages to a div), you'll see
+that you can set a chat room with the initial request.
+
+
+Authentication
+--------------
+
+Now, of course, a WebSocket solution is somewhat limited in scope without the
+ability to live with the rest of your website - in particular, we want to make
+sure we know what user we're talking to, in case we have things like private
+chat channels (we don't want a solution where clients just ask for the right
+channels, as anyone could change the code and just put in private channel names).
+
+It can also save you having to manually make clients ask for what they want to
+see; if I see you open a WebSocket to my "updates" endpoint, and I know which
+user you are, I can just auto-add that channel to all the relevant groups (mentions
+of that user, for example).
+
+Handily, as WebSockets start off using the HTTP protocol, they have a lot of
+familiar features, including a path, GET parameters, and cookies. We'd like to
+use these to hook into the familiar Django session and authentication systems;
+after all, WebSockets are no good unless we can identify who they belong to
+and do things securely.
+
+In addition, we don't want the interface servers storing data or trying to run
+authentication; they're meant to be simple, lean, fast processes without much
+state, and so we'll need to do our authentication inside our consumer functions.
+
+Fortunately, because Channels has an underlying spec for WebSockets and other
+messages (:doc:`ASGI `), it ships with decorators that help you with
+both authentication and getting the underlying Django session (which is what
+Django authentication relies on).
+
+Channels can use Django sessions either from cookies (if you're running your
+websocket server on the same domain as your main site, using something like Daphne),
+or from a ``session_key`` GET parameter, which works if you want to keep
+running your HTTP requests through a WSGI server and offload WebSockets to a
+second server process on another domain.
+
+You get access to a user's normal Django session using the ``http_session``
+decorator - that gives you a ``message.http_session`` attribute that behaves
+just like ``request.session``. You can go one further and use ``http_session_user``
+which will provide a ``message.user`` attribute as well as the session attribute.
+
+Now, one thing to note is that you only get the detailed HTTP information
+during the ``connect`` message of a WebSocket connection (you can read more
+about that in the :doc:`ASGI spec `) - this means we're not
+wasting bandwidth sending the same information over the wire needlessly.
+
+This also means we'll have to grab the user in the connection handler and then
+store it in the session; thankfully, Channels ships with both a ``channel_session_user``
+decorator that works like the ``http_session_user`` decorator we mentioned above but
+loads the user from the *channel* session rather than the *HTTP* session,
+and a function called ``transfer_user`` which replicates a user from one session
+to another. Even better, it combines all of these into a ``channel_session_user_from_http``
+decorator.
+
+Bringing that all together, let's make a chat server where users can only
+chat to people with the same first letter of their username::
+
+ # In consumers.py
+ from channels import Channel, Group
+ from channels.sessions import channel_session
+ from channels.auth import channel_session_user, channel_session_user_from_http
+
+ # Connected to websocket.connect
+ @channel_session_user_from_http
+ def ws_add(message):
+ # Accept connection
+ message.reply_channel.send({"accept": True})
+ # Add them to the right group
+ Group("chat-%s" % message.user.username[0]).add(message.reply_channel)
+
+ # Connected to websocket.receive
+ @channel_session_user
+ def ws_message(message):
+ Group("chat-%s" % message.user.username[0]).send({
+ "text": message['text'],
+ })
+
+ # Connected to websocket.disconnect
+ @channel_session_user
+ def ws_disconnect(message):
+ Group("chat-%s" % message.user.username[0]).discard(message.reply_channel)
+
+If you're just using ``runserver`` (and so Daphne), you can just connect
+and your cookies should transfer your auth over. If you were running WebSockets
+on a separate domain, you'd have to remember to provide the
+Django session ID as part of the URL, like this
+
+.. code-block:: javascript
+
+ socket = new WebSocket("ws://127.0.0.1:9000/?session_key=abcdefg");
+
+You can get the current session key in a template with ``{{ request.session.session_key }}``.
+Note that this can't work with signed cookie sessions - since only HTTP
+responses can set cookies, it needs a backend it can write to to separately
+store state.
+
+
+Security
+--------
+
+Unlike AJAX requests, WebSocket requests are not limited by the Same-Origin
+policy. This means you don't have to take any extra steps when you have an HTML
+page served by host A containing JavaScript code wanting to connect to a
+WebSocket on Host B.
+
+While this can be convenient, it also implies that by default any third-party
+site can connect to your WebSocket application. When you are using the
+``http_session_user`` or the ``channel_session_user_from_http`` decorator, this
+connection would be authenticated.
+
+The WebSocket specification requires browsers to send the origin of a WebSocket
+request in the HTTP header named ``Origin``, but validating that header is left
+to the server.
+
+You can use the decorator ``channels.security.websockets.allowed_hosts_only``
+on a ``websocket.connect`` consumer to only allow requests originating
+from hosts listed in the ``ALLOWED_HOSTS`` setting::
+
+ # In consumers.py
+ from channels import Channel, Group
+ from channels.sessions import channel_session
+ from channels.auth import channel_session_user, channel_session_user_from_http
+ from channels.security.websockets import allowed_hosts_only
+
+ # Connected to websocket.connect
+ @allowed_hosts_only
+ @channel_session_user_from_http
+ def ws_add(message):
+ # Accept connection
+ ...
+
+Requests from other hosts or requests with missing or invalid origin header
+are now rejected.
+
+The name ``allowed_hosts_only`` is an alias for the class-based decorator
+``AllowedHostsOnlyOriginValidator``, which inherits from
+``BaseOriginValidator``. If you have custom requirements for origin validation,
+create a subclass and overwrite the method
+``validate_origin(self, message, origin)``. It must return True when a message
+should be accepted, False otherwise.
+
+
+Routing
+-------
+
+The ``routing.py`` file acts very much like Django's ``urls.py``, including the
+ability to route things to different consumers based on ``path``, or any other
+message attribute that's a string (for example, ``http.request`` messages have
+a ``method`` key you could route based on).
+
+Much like urls, you route using regular expressions; the main difference is that
+because the ``path`` is not special-cased - Channels doesn't know that it's a URL -
+you have to start patterns with the root ``/``, and end includes without a ``/``
+so that when the patterns combine, they work correctly.
+
+Finally, because you're matching against message contents using keyword arguments,
+you can only use named groups in your regular expressions! Here's an example of
+routing our chat from above::
+
+ http_routing = [
+ route("http.request", poll_consumer, path=r"^/poll/$", method=r"^POST$"),
+ ]
+
+ chat_routing = [
+ route("websocket.connect", chat_connect, path=r"^/(?P[a-zA-Z0-9_]+)/$"),
+ route("websocket.disconnect", chat_disconnect),
+ ]
+
+ routing = [
+ # You can use a string import path as the first argument as well.
+ include(chat_routing, path=r"^/chat"),
+ include(http_routing),
+ ]
+
+The routing is resolved in order, short-circuiting around the
+includes if one or more of their matches fails. You don't have to start with
+the ``^`` symbol - we use Python's ``re.match`` function, which starts at the
+start of a line anyway - but it's considered good practice.
+
+When an include matches part of a message value, it chops off the bit of the
+value it matched before passing it down to its routes or sub-includes, so you
+can put the same routing under multiple includes with different prefixes if
+you like.
+
+Because these matches come through as keyword arguments, we could modify our
+consumer above to use a room based on URL rather than username::
+
+ # Connected to websocket.connect
+ @channel_session_user_from_http
+ def ws_add(message, room_name):
+ # Add them to the right group
+ Group("chat-%s" % room_name).add(message.reply_channel)
+ # Accept the connection request
+ message.reply_channel.send({"accept": True})
+
+In the next section, we'll change to sending the ``room_name`` as a part of the
+WebSocket message - which you might do if you had a multiplexing client -
+but you could use routing there as well.
+
+
+Models
+------
+
+So far, we've just been taking incoming messages and rebroadcasting them to
+other clients connected to the same group, but this isn't that great; really,
+we want to persist messages to a datastore, and we'd probably like to be
+able to inject messages into chatrooms from things other than WebSocket client
+connections (perhaps a built-in bot, or server status messages).
+
+Thankfully, we can just use Django's ORM to handle persistence of messages and
+easily integrate the send into the save flow of the model, rather than the
+message receive - that way, any new message saved will be broadcast to all
+the appropriate clients, no matter where it's saved from.
+
+We'll even take some performance considerations into account: We'll make our
+own custom channel for new chat messages and move the model save and the chat
+broadcast into that, meaning the sending process/consumer can move on
+immediately and not spend time waiting for the database save and the
+(slow on some backends) ``Group.send()`` call.
+
+Let's see what that looks like, assuming we
+have a ChatMessage model with ``message`` and ``room`` fields::
+
+ # In consumers.py
+ from channels import Channel, Group
+ from channels.sessions import channel_session
+ from .models import ChatMessage
+
+ # Connected to chat-messages
+ def msg_consumer(message):
+ # Save to model
+ room = message.content['room']
+ ChatMessage.objects.create(
+ room=room,
+ message=message.content['message'],
+ )
+ # Broadcast to listening sockets
+ Group("chat-%s" % room).send({
+ "text": message.content['message'],
+ })
+
+ # Connected to websocket.connect
+ @channel_session
+ def ws_connect(message):
+ # Work out room name from path (ignore slashes)
+ room = message.content['path'].strip("/")
+ # Save room in session and add us to the group
+ message.channel_session['room'] = room
+ Group("chat-%s" % room).add(message.reply_channel)
+ # Accept the connection request
+ message.reply_channel.send({"accept": True})
+
+ # Connected to websocket.receive
+ @channel_session
+ def ws_message(message):
+ # Stick the message onto the processing queue
+ Channel("chat-messages").send({
+ "room": message.channel_session['room'],
+ "message": message['text'],
+ })
+
+ # Connected to websocket.disconnect
+ @channel_session
+ def ws_disconnect(message):
+ Group("chat-%s" % message.channel_session['room']).discard(message.reply_channel)
+
+Update ``routing.py`` as well::
+
+ # in routing.py
+ from channels.routing import route
+ from myapp.consumers import ws_connect, ws_message, ws_disconnect, msg_consumer
+
+ channel_routing = [
+ route("websocket.connect", ws_connect),
+ route("websocket.receive", ws_message),
+ route("websocket.disconnect", ws_disconnect),
+ route("chat-messages", msg_consumer),
+ ]
+
+Note that we could add messages onto the ``chat-messages`` channel from anywhere;
+inside a View, inside another model's ``post_save`` signal, inside a management
+command run via ``cron``. If we wanted to write a bot, too, we could put its
+listening logic inside the ``chat-messages`` consumer, as every message would
+pass through it.
+
+
+.. _enforcing-ordering:
+
+Enforcing Ordering
+------------------
+
+There's one final concept we want to introduce you to before you go on to build
+sites with Channels - consumer ordering.
+
+Because Channels is a distributed system that can have many workers, by default
+it just processes messages in the order the workers get them off the queue.
+It's entirely feasible for a WebSocket interface server to send out two
+``receive`` messages close enough together that a second worker will pick
+up and start processing the second message before the first worker has
+finished processing the first.
+
+This is particularly annoying if you're storing things in the session in the
+one consumer and trying to get them in the other consumer - because
+the ``connect`` consumer hasn't exited, its session hasn't saved. You'd get the
+same effect if someone tried to request a view before the login view had finished
+processing, of course, but HTTP requests usually come in a bit slower from clients.
+
+Channels has a solution - the ``enforce_ordering`` decorator. All WebSocket
+messages contain an ``order`` key, and this decorator uses that to make sure that
+messages are consumed in the right order. In addition, the ``connect`` message
+blocks the socket opening until it's responded to, so you are always guaranteed
+that ``connect`` will run before any ``receives`` even without the decorator.
+
+The decorator uses ``channel_session`` to keep track of what numbered messages
+have been processed, and if a worker tries to run a consumer on an out-of-order
+message, it raises the ``ConsumeLater`` exception, which puts the message
+back on the channel it came from and tells the worker to work on another message.
+
+There's a high cost to using ``enforce_ordering``, which is why it's an optional
+decorator. Here's an example of it being used::
+
+ # In consumers.py
+ from channels import Channel, Group
+ from channels.sessions import channel_session, enforce_ordering
+ from channels.auth import channel_session_user, channel_session_user_from_http
+
+ # Connected to websocket.connect
+ @channel_session_user_from_http
+ def ws_add(message):
+ # This doesn't need a decorator - it always runs separately
+ message.channel_session['sent'] = 0
+ # Add them to the right group
+ Group("chat").add(message.reply_channel)
+ # Accept the socket
+ message.reply_channel.send({"accept": True})
+
+ # Connected to websocket.receive
+ @enforce_ordering
+ @channel_session_user
+ def ws_message(message):
+ # Without enforce_ordering this wouldn't work right
+ message.channel_session['sent'] = message.channel_session['sent'] + 1
+ Group("chat").send({
+ "text": "%s: %s" % (message.channel_session['sent'], message['text']),
+ })
+
+ # Connected to websocket.disconnect
+ @channel_session_user
+ def ws_disconnect(message):
+ Group("chat").discard(message.reply_channel)
+
+Generally, the performance (and safety) of your ordering is tied to your
+session backend's performance. Make sure you choose a session backend wisely
+if you're going to rely heavily on ``enforce_ordering``.
+
+
+Next Steps
+----------
+
+That covers the basics of using Channels; you've seen not only how to use basic
+channels, but also seen how they integrate with WebSockets, how to use groups
+to manage logical sets of channels, and how Django's session and authentication
+systems easily integrate with WebSockets.
+
+We recommend you read through the rest of the reference documentation to see
+more about what you can do with channels; in particular, you may want to look at
+our :doc:`deploying` documentation to get an idea of how to
+design and run apps in production environments.
diff --git a/docs/index.rst b/docs/index.rst
new file mode 100644
index 0000000..8c90e78
--- /dev/null
+++ b/docs/index.rst
@@ -0,0 +1,62 @@
+Django Channels
+===============
+
+Channels is a project to make Django able to handle more than just plain
+HTTP requests, including WebSockets and HTTP2, as well as the ability to
+run code after a response has been sent for things like thumbnailing or
+background calculation.
+
+It's an easy-to-understand extension of the Django view model, and easy
+to integrate and deploy.
+
+First, read our :doc:`concepts` documentation to get an idea of the
+data model underlying Channels and how they're used inside Django.
+
+Then, read :doc:`getting-started` to see how to get up and running with
+WebSockets with only 30 lines of code.
+
+If you want a quick overview, start with :doc:`inshort`.
+
+If you are interested in contributing, please read our :doc:`contributing` docs!
+
+
+Projects
+--------
+
+Channels is comprised of six packages:
+
+* `Channels `_, the Django integration layer
+* `Daphne `_, the HTTP and Websocket termination server
+* `asgiref `_, the base ASGI library/memory backend
+* `asgi_redis `_, the Redis channel backend
+* `asgi_rabbitmq `_, the RabbitMQ channel backend
+* `asgi_ipc `_, the POSIX IPC channel backend
+
+This documentation covers the system as a whole; individual release notes and
+instructions can be found in the individual repositories.
+
+
+Topics
+------
+
+.. toctree::
+ :maxdepth: 2
+
+ inshort
+ concepts
+ installation
+ getting-started
+ deploying
+ generics
+ routing
+ binding
+ javascript
+ backends
+ delay
+ testing
+ reference
+ faqs
+ asgi
+ community
+ contributing
+ releases/index
diff --git a/docs/inshort.rst b/docs/inshort.rst
new file mode 100644
index 0000000..848b3a9
--- /dev/null
+++ b/docs/inshort.rst
@@ -0,0 +1,114 @@
+In Short
+========
+
+
+What is Channels?
+-----------------
+
+Channels extends Django to add :ref:`a new layer `
+that allows two important features:
+
+* WebSocket handling, in a way very :ref:`similar to normal views `
+* Background tasks, running in the same servers as the rest of Django
+
+It allows other things too, but these are the ones you'll use to start with.
+
+
+How?
+----
+
+It separates Django into two process types:
+
+* One that handles HTTP and WebSockets
+* One that runs views, websocket handlers and background tasks (*consumers*)
+
+They communicate via a protocol called :doc:`ASGI `, which is similar
+to WSGI but runs over a network and allows for more protocol types.
+
+Channels does not introduce asyncio, gevent, or any other async code to
+your Django code; all of your business logic runs synchronously in a worker
+process or thread.
+
+
+I have to change how I run Django?
+----------------------------------
+
+No, all the new stuff is entirely optional. If you want it, however, you'll
+change from running Django under a WSGI server, to running:
+
+* An ASGI server, probably `Daphne `_
+* Django worker servers, using ``manage.py runworker``
+* Something to route ASGI requests over, like Redis.
+
+Even when you're running on Channels, it routes all HTTP requests to the Django
+view system by default, so it works like before.
+
+
+What else does Channels give me?
+--------------------------------
+
+Other features include:
+
+* Easy HTTP long-poll support for thousands of clients at once
+* Full session and auth support for WebSockets
+* Automatic user login for WebSockets based on site cookies
+* Built-in primitives for mass triggering of events (chat, live blogs, etc.)
+* Zero-downtime deployment with browsers paused while new workers spin up
+* Optional low-level HTTP control on a per-URL basis
+* Extendability to other protocols or event sources (e.g. WebRTC, raw UDP, SMS)
+
+
+Does it scale?
+--------------
+
+Yes, you can run any number of *protocol servers* (ones that serve HTTP
+and WebSockets) and *worker servers* (ones that run your Django code) to
+fit your use case.
+
+The ASGI spec allows a number of different *channel layers* to be plugged in
+between these two components, with different performance characteristics, and
+it's designed to allow both easy sharding as well as the ability to run
+separate clusters with their own protocol and worker servers.
+
+
+Why doesn't it use my favourite message queue?
+----------------------------------------------
+
+Channels is deliberately designed to prefer low latency (goal is a few milliseconds)
+and high throughput over guaranteed delivery, which doesn't match some
+message queue designs.
+
+Some features, like :ref:`guaranteed ordering of messages `,
+are opt-in as they incur a performance hit, but make it more message queue like.
+
+
+Do I need to worry about making all my code async-friendly?
+-----------------------------------------------------------
+
+No, all your code runs synchronously without any sockets or event loops to
+block. You can use async code within a Django view or channel consumer if you
+like - for example, to fetch lots of URLs in parallel - but it doesn't
+affect the overall deployed site.
+
+
+What version of Django does it work with?
+-----------------------------------------
+
+You can install Channels as a library for Django >= 1.8. It has a few
+extra dependencies, but these will all be installed if you use ``pip``.
+
+
+Official project
+----------------
+
+Channels is not in the Django core as initially planned, but it's
+an official Django project since September 2016. More information about Channels
+being adopted as an official project are available on the
+`Django blog `_.
+
+
+What do I read next?
+--------------------
+
+Start off by reading about the :doc:`concepts underlying Channels `,
+and then move on to read our example-laden :doc:`Getting Started guide `.
diff --git a/docs/installation.rst b/docs/installation.rst
new file mode 100644
index 0000000..2002643
--- /dev/null
+++ b/docs/installation.rst
@@ -0,0 +1,42 @@
+Installation
+============
+
+Channels is available on PyPI - to install it, just run::
+
+ pip install -U channels
+
+Once that's done, you should add ``channels`` to your
+``INSTALLED_APPS`` setting::
+
+ INSTALLED_APPS = (
+ 'django.contrib.auth',
+ 'django.contrib.contenttypes',
+ 'django.contrib.sessions',
+ 'django.contrib.sites',
+ ...
+ 'channels',
+ )
+
+That's it! Once enabled, ``channels`` will integrate itself into Django and
+take control of the ``runserver`` command. See :doc:`getting-started` for more.
+
+.. note::
+ Please be wary of any other third-party apps that require an overloaded or
+ replacement ``runserver`` command. Channels provides a separate
+ ``runserver`` command and may conflict with it. An example
+ of such a conflict is with `whitenoise.runserver_nostatic `_
+ from `whitenoise `_. In order to
+ solve such issues, try moving ``channels`` to the top of your ``INSTALLED_APPS``
+ or remove the offending app altogether.
+
+Installing the latest development version
+-----------------------------------------
+
+To install the latest version of Channels, clone the repo, change to the repo,
+change to the repo directory, and pip install it into your current virtual
+environment::
+
+ $ git clone git@github.com:django/channels.git
+ $ cd channels
+ $
+ (environment) $ pip install -e . # the dot specifies the current repo
diff --git a/docs/javascript.rst b/docs/javascript.rst
new file mode 100644
index 0000000..488a4c3
--- /dev/null
+++ b/docs/javascript.rst
@@ -0,0 +1,68 @@
+Channels WebSocket wrapper
+==========================
+
+Channels ships with a javascript WebSocket wrapper to help you connect to your websocket
+and send/receive messages.
+
+First, you must include the javascript library in your template; if you're using
+Django's staticfiles, this is as easy as::
+
+ {% load staticfiles %}
+
+ {% static "channels/js/websocketbridge.js" %}
+
+If you are using an alternative method of serving static files, the compiled
+source code is located at ``channels/static/channels/js/websocketbridge.js`` in
+a Channels installation. We compile the file for you each release; it's ready
+to serve as-is.
+
+The library is deliberately quite low-level and generic; it's designed to
+be compatible with any JavaScript code or framework, so you can build more
+specific integration on top of it.
+
+To process messages
+
+.. code-block:: javascript
+
+ const webSocketBridge = new channels.WebSocketBridge();
+ webSocketBridge.connect('/ws/');
+ webSocketBridge.listen(function(action, stream) {
+ console.log(action, stream);
+ });
+
+To send messages, use the `send` method
+
+.. code-block:: javascript
+
+ webSocketBridge.send({prop1: 'value1', prop2: 'value1'});
+
+To demultiplex specific streams
+
+.. code-block:: javascript
+
+ webSocketBridge.connect();
+ webSocketBridge.listen('/ws/');
+ webSocketBridge.demultiplex('mystream', function(action, stream) {
+ console.log(action, stream);
+ });
+ webSocketBridge.demultiplex('myotherstream', function(action, stream) {
+ console.info(action, stream);
+ });
+
+To send a message to a specific stream
+
+.. code-block:: javascript
+
+ webSocketBridge.stream('mystream').send({prop1: 'value1', prop2: 'value1'})
+
+The `WebSocketBridge` instance exposes the underlaying `ReconnectingWebSocket` as the `socket` property. You can use this property to add any custom behavior. For example
+
+.. code-block:: javascript
+
+ webSocketBridge.socket.addEventListener('open', function() {
+ console.log("Connected to WebSocket");
+ })
+
+
+The library is also available as a npm module, under the name
+`django-channels `_
diff --git a/docs/make.bat b/docs/make.bat
new file mode 100644
index 0000000..33f68e4
--- /dev/null
+++ b/docs/make.bat
@@ -0,0 +1,242 @@
+@ECHO OFF
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=sphinx-build
+)
+set BUILDDIR=_build
+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
+set I18NSPHINXOPTS=%SPHINXOPTS% .
+if NOT "%PAPER%" == "" (
+ set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
+ set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
+)
+
+if "%1" == "" goto help
+
+if "%1" == "help" (
+ :help
+ echo.Please use `make ^` where ^ is one of
+ echo. html to make standalone HTML files
+ echo. dirhtml to make HTML files named index.html in directories
+ echo. singlehtml to make a single large HTML file
+ echo. pickle to make pickle files
+ echo. json to make JSON files
+ echo. htmlhelp to make HTML files and a HTML help project
+ echo. qthelp to make HTML files and a qthelp project
+ echo. devhelp to make HTML files and a Devhelp project
+ echo. epub to make an epub
+ echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
+ echo. text to make text files
+ echo. man to make manual pages
+ echo. texinfo to make Texinfo files
+ echo. gettext to make PO message catalogs
+ echo. changes to make an overview over all changed/added/deprecated items
+ echo. xml to make Docutils-native XML files
+ echo. pseudoxml to make pseudoxml-XML files for display purposes
+ echo. linkcheck to check all external links for integrity
+ echo. doctest to run all doctests embedded in the documentation if enabled
+ goto end
+)
+
+if "%1" == "clean" (
+ for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
+ del /q /s %BUILDDIR%\*
+ goto end
+)
+
+
+%SPHINXBUILD% 2> nul
+if errorlevel 9009 (
+ echo.
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+ echo.installed, then set the SPHINXBUILD environment variable to point
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
+ echo.may add the Sphinx directory to PATH.
+ echo.
+ echo.If you don't have Sphinx installed, grab it from
+ echo.http://sphinx-doc.org/
+ exit /b 1
+)
+
+if "%1" == "html" (
+ %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/html.
+ goto end
+)
+
+if "%1" == "dirhtml" (
+ %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
+ goto end
+)
+
+if "%1" == "singlehtml" (
+ %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
+ goto end
+)
+
+if "%1" == "pickle" (
+ %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can process the pickle files.
+ goto end
+)
+
+if "%1" == "json" (
+ %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can process the JSON files.
+ goto end
+)
+
+if "%1" == "htmlhelp" (
+ %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can run HTML Help Workshop with the ^
+.hhp project file in %BUILDDIR%/htmlhelp.
+ goto end
+)
+
+if "%1" == "qthelp" (
+ %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can run "qcollectiongenerator" with the ^
+.qhcp project file in %BUILDDIR%/qthelp, like this:
+ echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Channels.qhcp
+ echo.To view the help file:
+ echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Channels.ghc
+ goto end
+)
+
+if "%1" == "devhelp" (
+ %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished.
+ goto end
+)
+
+if "%1" == "epub" (
+ %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The epub file is in %BUILDDIR%/epub.
+ goto end
+)
+
+if "%1" == "latex" (
+ %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
+ goto end
+)
+
+if "%1" == "latexpdf" (
+ %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+ cd %BUILDDIR%/latex
+ make all-pdf
+ cd %BUILDDIR%/..
+ echo.
+ echo.Build finished; the PDF files are in %BUILDDIR%/latex.
+ goto end
+)
+
+if "%1" == "latexpdfja" (
+ %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+ cd %BUILDDIR%/latex
+ make all-pdf-ja
+ cd %BUILDDIR%/..
+ echo.
+ echo.Build finished; the PDF files are in %BUILDDIR%/latex.
+ goto end
+)
+
+if "%1" == "text" (
+ %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The text files are in %BUILDDIR%/text.
+ goto end
+)
+
+if "%1" == "man" (
+ %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The manual pages are in %BUILDDIR%/man.
+ goto end
+)
+
+if "%1" == "texinfo" (
+ %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
+ goto end
+)
+
+if "%1" == "gettext" (
+ %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
+ goto end
+)
+
+if "%1" == "changes" (
+ %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.The overview file is in %BUILDDIR%/changes.
+ goto end
+)
+
+if "%1" == "linkcheck" (
+ %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Link check complete; look for any errors in the above output ^
+or in %BUILDDIR%/linkcheck/output.txt.
+ goto end
+)
+
+if "%1" == "doctest" (
+ %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Testing of doctests in the sources finished, look at the ^
+results in %BUILDDIR%/doctest/output.txt.
+ goto end
+)
+
+if "%1" == "xml" (
+ %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The XML files are in %BUILDDIR%/xml.
+ goto end
+)
+
+if "%1" == "pseudoxml" (
+ %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
+ goto end
+)
+
+:end
diff --git a/docs/reference.rst b/docs/reference.rst
new file mode 100644
index 0000000..c90e8f4
--- /dev/null
+++ b/docs/reference.rst
@@ -0,0 +1,209 @@
+Reference
+=========
+
+.. _ref-consumers:
+
+Consumers
+---------
+
+When you configure channel routing, the object assigned to a channel
+should be a callable that takes exactly one positional argument, here
+called ``message``, which is a :ref:`message object `. A consumer
+is any callable that fits this definition.
+
+Consumers are not expected to return anything, and if they do, it will be
+ignored. They may raise ``channels.exceptions.ConsumeLater`` to re-insert
+their current message at the back of the channel it was on, but be aware you
+can only do this so many time (10 by default) until the message is dropped
+to avoid deadlocking.
+
+
+.. _ref-message:
+
+Message
+-------
+
+Message objects are what consumers get passed as their only argument. They
+encapsulate the basic :doc:`ASGI ` message, which is a ``dict``, with
+extra information. They have the following attributes:
+
+* ``content``: The actual message content, as a dict. See the
+ :doc:`ASGI spec ` or protocol message definition document for how this
+ is structured.
+
+* ``channel``: A :ref:`Channel ` object, representing the channel
+ this message was received on. Useful if one consumer handles multiple channels.
+
+* ``reply_channel``: A :ref:`Channel ` object, representing the
+ unique reply channel for this message, or ``None`` if there isn't one.
+
+* ``channel_layer``: A :ref:`ChannelLayer ` object,
+ representing the underlying channel layer this was received on. This can be
+ useful in projects that have more than one layer to identify where to send
+ messages the consumer generates (you can pass it to the constructor of
+ :ref:`Channel ` or :ref:`Group `)
+
+
+.. _ref-channel:
+
+Channel
+-------
+
+Channel objects are a simple abstraction around ASGI channels, which by default
+are unicode strings. The constructor looks like this::
+
+ channels.Channel(name, alias=DEFAULT_CHANNEL_LAYER, channel_layer=None)
+
+Normally, you'll just call ``Channel("my.channel.name")`` and it'll make the
+right thing, but if you're in a project with multiple channel layers set up,
+you can pass in either the layer alias or the layer object and it'll send
+onto that one instead. They have the following attributes:
+
+* ``name``: The unicode string representing the channel name.
+
+* ``channel_layer``: A :ref:`ChannelLayer ` object,
+ representing the underlying channel layer to send messages on.
+
+* ``send(content)``: Sends the ``dict`` provided as *content* over the channel.
+ The content should conform to the relevant ASGI spec or protocol definition.
+
+
+.. _ref-group:
+
+Group
+-----
+
+Groups represent the underlying :doc:`ASGI ` group concept in an
+object-oriented way. The constructor looks like this::
+
+ channels.Group(name, alias=DEFAULT_CHANNEL_LAYER, channel_layer=None)
+
+Like :ref:`Channel `, you would usually just pass a ``name``, but
+can pass a layer alias or object if you want to send on a non-default one.
+They have the following attributes:
+
+* ``name``: The unicode string representing the group name.
+
+* ``channel_layer``: A :ref:`ChannelLayer ` object,
+ representing the underlying channel layer to send messages on.
+
+* ``send(content)``: Sends the ``dict`` provided as *content* to all
+ members of the group.
+
+* ``add(channel)``: Adds the given channel (as either a :ref:`Channel `
+ object or a unicode string name) to the group. If the channel is already in
+ the group, does nothing.
+
+* ``discard(channel)``: Removes the given channel (as either a
+ :ref:`Channel ` object or a unicode string name) from the group,
+ if it's in the group. Does nothing otherwise.
+
+
+.. _ref-channellayer:
+
+Channel Layer
+-------------
+
+These are a wrapper around the underlying :doc:`ASGI ` channel layers
+that supplies a routing system that maps channels to consumers, as well as
+aliases to help distinguish different layers in a project with multiple layers.
+
+You shouldn't make these directly; instead, get them by alias (``default`` is
+the default alias)::
+
+ from channels import channel_layers
+ layer = channel_layers["default"]
+
+They have the following attributes:
+
+* ``alias``: The alias of this layer.
+
+* ``router``: An object which represents the layer's mapping of channels
+ to consumers. Has the following attributes:
+
+ * ``channels``: The set of channels this router can handle, as unicode strings
+
+ * ``match(message)``: Takes a :ref:`Message ` and returns either
+ a (consumer, kwargs) tuple specifying the consumer to run and the keyword
+ argument to pass that were extracted via routing patterns, or None,
+ meaning there's no route available.
+
+
+.. _ref-asgirequest:
+
+AsgiRequest
+-----------
+
+This is a subclass of ``django.http.HttpRequest`` that provides decoding from
+ASGI requests, and a few extra methods for ASGI-specific info. The constructor is::
+
+ channels.handler.AsgiRequest(message)
+
+``message`` must be an :doc:`ASGI ` ``http.request`` format message.
+
+Additional attributes are:
+
+* ``reply_channel``, a :ref:`Channel ` object that represents the
+ ``http.response.?`` reply channel for this request.
+
+* ``message``, the raw ASGI message passed in the constructor.
+
+
+.. _ref-asgihandler:
+
+AsgiHandler
+-----------
+
+This is a class in ``channels.handler`` that's designed to handle the workflow
+of HTTP requests via ASGI messages. You likely don't need to interact with it
+directly, but there are two useful ways you can call it:
+
+* ``AsgiHandler(message)`` will process the message through the Django view
+ layer and yield one or more response messages to send back to the client,
+ encoded from the Django ``HttpResponse``.
+
+* ``encode_response(response)`` is a classmethod that can be called with a
+ Django ``HttpResponse`` and will yield one or more ASGI messages that are
+ the encoded response.
+
+
+.. _ref-decorators:
+
+Decorators
+----------
+
+Channels provides decorators to assist with persisting data and security.
+
+* ``channel_session``: Provides a session-like object called "channel_session" to consumers
+ as a message attribute that will auto-persist across consumers with
+ the same incoming "reply_channel" value.
+
+ Use this to persist data across the lifetime of a connection.
+
+* ``http_session``: Wraps a HTTP or WebSocket connect consumer (or any consumer of messages
+ that provides a "cookies" or "get" attribute) to provide a "http_session"
+ attribute that behaves like request.session; that is, it's hung off of
+ a per-user session key that is saved in a cookie or passed as the
+ "session_key" GET parameter.
+
+ It won't automatically create and set a session cookie for users who
+ don't have one - that's what SessionMiddleware is for, this is a simpler
+ read-only version for more low-level code.
+
+ If a message does not have a session we can inflate, the "session" attribute
+ will be None, rather than an empty session you can write to.
+
+ Does not allow a new session to be set; that must be done via a view. This
+ is only an accessor for any existing session.
+
+* ``channel_and_http_session``: Enables both the channel_session and http_session.
+
+ Stores the http session key in the channel_session on websocket.connect messages.
+ It will then hydrate the http_session from that same key on subsequent messages.
+
+* ``allowed_hosts_only``: Wraps a WebSocket connect consumer and ensures the
+ request originates from an allowed host.
+
+ Reads the Origin header and only passes request originating from a host
+ listed in ``ALLOWED_HOSTS`` to the consumer. Requests from other hosts or
+ with a missing or invalid Origin headers are rejected.
diff --git a/docs/releases/1.0.0.rst b/docs/releases/1.0.0.rst
new file mode 100644
index 0000000..1e6849f
--- /dev/null
+++ b/docs/releases/1.0.0.rst
@@ -0,0 +1,224 @@
+1.0.0 Release Notes
+===================
+
+Channels 1.0.0 brings together a number of design changes, including some
+breaking changes, into our first fully stable release, and also brings the
+databinding code out of alpha phase. It was released on 2017/01/08.
+
+The result is a faster, easier to use, and safer Channels, including one major
+change that will fix almost all problems with sessions and connect/receive
+ordering in a way that needs no persistent storage.
+
+It was unfortunately not possible to make all of the changes backwards
+compatible, though most code should not be too affected and the fixes are
+generally quite easy.
+
+You **must also update Daphne** to at least 1.0.0 to have this release of
+Channels work correctly.
+
+
+Major Features
+--------------
+
+Channels 1.0 introduces a couple of new major features.
+
+
+WebSocket accept/reject flow
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Rather than be immediately accepted, WebSockets now pause during the handshake
+while they send over a message on ``websocket.connect``, and your application
+must either accept or reject the connection before the handshake is completed
+and messages can be received.
+
+You **must** update Daphne to at least 1.0.0 to make this work correctly.
+
+This has several advantages:
+
+* You can now reject WebSockets before they even finish connecting, giving
+ appropriate error codes to browsers and not letting the browser-side socket
+ ever get into a connected state and send messages.
+
+* Combined with Consumer Atomicity (below), it means there is no longer any need
+ for the old "slight ordering" mode, as the connect consumer must run to
+ completion and accept the socket before any messages can be received and
+ forwarded onto ``websocket.receive``.
+
+* Any ``send`` message sent to the WebSocket will implicitly accept the connection,
+ meaning only a limited set of ``connect`` consumers need changes (see
+ Backwards Incompatible Changes below)
+
+
+Consumer Atomicity
+~~~~~~~~~~~~~~~~~~
+
+Consumers will now buffer messages you try to send until the consumer completes
+and then send them once it exits and the outbound part of any decorators have
+been run (even if an exception is raised).
+
+This makes the flow of messages much easier to reason about - consumers can now
+be reasoned about as atomic blocks that run and then send messages, meaning that
+if you send a message to start another consumer you're guaranteed that the
+sending consumer has finished running by the time it's acted upon.
+
+If you want to send messages immediately rather than at the end of the consumer,
+you can still do that by passing the ``immediately`` argument::
+
+ Channel("thumbnailing-tasks").send({"id": 34245}, immediately=True)
+
+This should be mostly backwards compatible, and may actually fix race
+conditions in some apps that were pre-existing.
+
+
+Databinding Group/Action Overhaul
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Previously, databinding subclasses had to implement
+``group_names(instance, action)`` to return what groups to send an instance's
+change to of the type ``action``. This had flaws, most notably when what was
+actually just a modification to the instance in question changed its
+permission status so more clients could see it; to those clients, it should
+instead have been "created".
+
+Now, Channels just calls ``group_names(instance)``, and you should return what
+groups can see the instance at the current point in time given the instance
+you were passed. Channels will actually call the method before and after changes,
+comparing the groups you gave, and sending out create, update or delete messages
+to clients appropriately.
+
+Existing databinding code will need to be adapted; see the
+"Backwards Incompatible Changes" section for more.
+
+
+Demultiplexer Overhaul
+~~~~~~~~~~~~~~~~~~~~~~
+
+Demuliplexers have changed to remove the behaviour where they re-sent messages
+onto new channels without special headers, and instead now correctly split out
+incoming messages into sub-messages that still look like ``websocket.receive``
+messages, and directly dispatch these to the relevant consumer.
+
+They also now forward all ``websocket.connect`` and ``websocket.disconnect``
+messages to all of their sub-consumers, so it's much easier to compose things
+together from code that also works outside the context of multiplexing.
+
+For more, read the updated :doc:`/generic` docs.
+
+
+Delay Server
+~~~~~~~~~~~~
+
+A built-in delay server, launched with `manage.py rundelay`, now ships if you
+wish to use it. It needs some extra initial setup and uses a database for
+persistance; see :doc:`/delay` for more information.
+
+
+Minor Changes
+-------------
+
+* Serializers can now specify fields as ``__all__`` to auto-include all fields,
+ and ``exclude`` to remove certain unwanted fields.
+
+* ``runserver`` respects ``FORCE_SCRIPT_NAME``
+
+* Websockets can now be closed with a specific code by calling ``close(status=4000)``
+
+* ``enforce_ordering`` no longer has a ``slight`` mode (because of the accept
+ flow changes), and is more efficient with session saving.
+
+* ``runserver`` respects ``--nothreading`` and only launches one worker, takes
+ a ``--http-timeout`` option if you want to override it from the default ``60``,
+
+* A new ``@channel_and_http_session`` decorator rehydrates the HTTP session out
+ of the channel session if you want to access it inside receive consumers.
+
+* Streaming responses no longer have a chance of being cached.
+
+* ``request.META['SERVER_PORT']`` is now always a string.
+
+* ``http.disconnect`` now has a ``path`` key so you can route it.
+
+* Test client now has a ``send_and_consume`` method.
+
+
+Backwards Incompatible Changes
+------------------------------
+
+Connect Consumers
+~~~~~~~~~~~~~~~~~
+
+If you have a custom consumer for ``websocket.connect``, you must ensure that
+it either:
+
+* Sends at least one message onto the ``reply_channel`` that generates a
+ WebSocket frame (either ``bytes`` or ``text`` is set), either directly
+ or via a group.
+* Sends a message onto the ``reply_channel`` that is ``{"accept": True}``,
+ to accept a connection without sending data.
+* Sends a message onto the ``reply_channel`` that is ``{"close": True}``,
+ to reject a connection mid-handshake.
+
+Many consumers already do the former, but if your connect consumer does not
+send anything you MUST now send an accept message or the socket will remain
+in the handshaking phase forever and you'll never get any messages.
+
+All built-in Channels consumers (e.g. in the generic consumers) have been
+upgraded to do this.
+
+You **must** update Daphne to at least 1.0.0 to make this work correctly.
+
+
+Databinding group_names
+~~~~~~~~~~~~~~~~~~~~~~~
+
+If you have databinding subclasses, you will have implemented
+``group_names(instance, action)``, which returns the groups to use based on the
+instance and action provided.
+
+Now, instead, you must implement ``group_names(instance)``, which returns the
+groups that can see the instance as it is presented for you; the action
+results will be worked out for you. For example, if you want to only show
+objects marked as "admin_only" to admins, and objects without it to everyone,
+previously you would have done::
+
+ def group_names(self, instance, action):
+ if instance.admin_only:
+ return ["admins"]
+ else:
+ return ["admins", "non-admins"]
+
+Because you did nothing based on the ``action`` (and if you did, you would
+have got incomplete messages, hence this design change), you can just change
+the signature of the method like this::
+
+ def group_names(self, instance):
+ if instance.admin_only:
+ return ["admins"]
+ else:
+ return ["admins", "non-admins"]
+
+Now, when an object is updated to have ``admin_only = True``, the clients
+in the ``non-admins`` group will get a ``delete`` message, while those in
+the ``admins`` group will get an ``update`` message.
+
+
+Demultiplexers
+~~~~~~~~~~~~~~
+
+Demultiplexers have changed from using a ``mapping`` dict, which mapped stream
+names to channels, to using a ``consumers`` dict which maps stream names
+directly to consumer classes.
+
+You will have to convert over to using direct references to consumers, change
+the name of the dict, and then you can remove any channel routing for the old
+channels that were in ``mapping`` from your routes.
+
+Additionally, the Demultiplexer now forwards messages as they would look from
+a direct connection, meaning that where you previously got a decoded object
+through you will now get a correctly-formatted ``websocket.receive`` message
+through with the content as a ``text`` key, JSON-encoded. You will also
+now have to handle ``websocket.connect`` and ``websocket.disconnect`` messages.
+
+Both of these issues can be solved using the ``JsonWebsocketConsumer`` generic
+consumer, which will decode for you and correctly separate connection and
+disconnection handling into their own methods.
diff --git a/docs/releases/1.0.1.rst b/docs/releases/1.0.1.rst
new file mode 100644
index 0000000..ae07531
--- /dev/null
+++ b/docs/releases/1.0.1.rst
@@ -0,0 +1,16 @@
+1.0.1 Release Notes
+===================
+
+Channels 1.0.1 is a minor bugfix release, released on 2017/01/09.
+
+Changes
+-------
+
+* WebSocket generic views now accept connections by default in their connect
+ handler for better backwards compatibility.
+
+
+Backwards Incompatible Changes
+------------------------------
+
+None.
diff --git a/docs/releases/1.0.2.rst b/docs/releases/1.0.2.rst
new file mode 100644
index 0000000..fd0435d
--- /dev/null
+++ b/docs/releases/1.0.2.rst
@@ -0,0 +1,27 @@
+1.0.2 Release Notes
+===================
+
+Channels 1.0.2 is a minor bugfix release, released on 2017/01/12.
+
+Changes
+-------
+
+* Websockets can now be closed from anywhere using the new ``WebsocketCloseException``,
+ available as ``channels.exceptions.WebsocketCloseException(code=None)``. There is
+ also a generic ``ChannelSocketException`` you can base any exceptions on that,
+ if it is caught, gets handed the current ``message`` in a ``run`` method, so you
+ can do custom behaviours.
+
+* Calling ``Channel.send`` or ``Group.send`` from outside a consumer context
+ (i.e. in tests or management commands) will once again send the message immediately,
+ rather than putting it into the consumer message buffer to be flushed when the
+ consumer ends (which never happens)
+
+* The base implementation of databinding now correctly only calls ``group_names(instance)``,
+ as documented.
+
+
+Backwards Incompatible Changes
+------------------------------
+
+None.
diff --git a/docs/releases/1.0.3.rst b/docs/releases/1.0.3.rst
new file mode 100644
index 0000000..8179168
--- /dev/null
+++ b/docs/releases/1.0.3.rst
@@ -0,0 +1,26 @@
+1.0.3 Release Notes
+===================
+
+Channels 1.0.3 is a minor bugfix release, released on 2017/02/01.
+
+Changes
+-------
+
+* Database connections are no longer force-closed after each test is run.
+
+* Channel sessions are not re-saved if they're empty even if they're marked as
+ modified, allowing logout to work correctly.
+
+* WebsocketDemultiplexer now correctly does sessions for the second/third/etc.
+ connect and disconnect handlers.
+
+* Request reading timeouts now correctly return 408 rather than erroring out.
+
+* The ``rundelay`` delay server now only polls the database once per second,
+ and this interval is configurable with the ``--sleep`` option.
+
+
+Backwards Incompatible Changes
+------------------------------
+
+None.
diff --git a/docs/releases/1.1.0.rst b/docs/releases/1.1.0.rst
new file mode 100644
index 0000000..4b63a71
--- /dev/null
+++ b/docs/releases/1.1.0.rst
@@ -0,0 +1,35 @@
+1.1.0 Release Notes
+===================
+
+Channels 1.1.0 introduces a couple of major but backwards-compatible changes,
+including most notably the inclusion of a standard, framework-agnostic JavaScript
+library for easier integration with your site.
+
+
+Major Changes
+-------------
+
+* Channels now includes a JavaScript wrapper that wraps reconnection and
+ multiplexing for you on the client side. For more on how to use it, see the
+ :doc:`/javascript` documentation.
+
+* Test classes have been moved from ``channels.tests`` to ``channels.test``
+ to better match Django. Old imports from ``channels.tests`` will continue to
+ work but will trigger a deprecation warning, and ``channels.tests`` will be
+ removed completely in version 1.3.
+
+Minor Changes & Bugfixes
+------------------------
+
+* Bindings now support non-integer fields for primary keys on models.
+
+* The ``enforce_ordering`` decorator no longer suffers a race condition where
+ it would drop messages under high load.
+
+* ``runserver`` no longer errors if the ``staticfiles`` app is not enabled in Django.
+
+
+Backwards Incompatible Changes
+------------------------------
+
+None.
diff --git a/docs/releases/1.1.1.rst b/docs/releases/1.1.1.rst
new file mode 100644
index 0000000..6a2b1ab
--- /dev/null
+++ b/docs/releases/1.1.1.rst
@@ -0,0 +1,22 @@
+1.1.1 Release Notes
+===================
+
+Channels 1.1.1 is a bugfix release that fixes a packaging issue with the JavaScript files.
+
+
+Major Changes
+-------------
+
+None.
+
+Minor Changes & Bugfixes
+------------------------
+
+* The JavaScript binding introduced in 1.1.0 is now correctly packaged and
+ included in builds.
+
+
+Backwards Incompatible Changes
+------------------------------
+
+None.
diff --git a/docs/releases/1.1.2.rst b/docs/releases/1.1.2.rst
new file mode 100644
index 0000000..8e13b50
--- /dev/null
+++ b/docs/releases/1.1.2.rst
@@ -0,0 +1,29 @@
+1.1.2 Release Notes
+===================
+
+Channels 1.1.2 is a bugfix release for the 1.1 series, released on
+April 1st, 2017.
+
+
+Major Changes
+-------------
+
+None.
+
+
+Minor Changes & Bugfixes
+------------------------
+
+* Session name hash changed to SHA-1 to satisfy FIPS-140-2.
+
+* `scheme` key in ASGI-HTTP messages now translates into `request.is_secure()`
+ correctly.
+
+* WebsocketBridge now exposes the underlying WebSocket as `.socket`.
+
+
+Backwards Incompatible Changes
+------------------------------
+
+* When you upgrade all current channel sessions will be invalidated; you
+ should make sure you disconnect all WebSockets during upgrade.
diff --git a/docs/releases/1.1.3.rst b/docs/releases/1.1.3.rst
new file mode 100644
index 0000000..24d022d
--- /dev/null
+++ b/docs/releases/1.1.3.rst
@@ -0,0 +1,26 @@
+1.1.3 Release Notes
+===================
+
+Channels 1.1.3 is a bugfix release for the 1.1 series, released on
+April 5th, 2017.
+
+
+Major Changes
+-------------
+
+None.
+
+
+Minor Changes & Bugfixes
+------------------------
+
+* ``enforce_ordering`` now works correctly with the new-style process-specific
+ channels
+
+* ASGI channel layer versions are now explicitly checked for version compatability
+
+
+Backwards Incompatible Changes
+------------------------------
+
+None.
diff --git a/docs/releases/1.1.4.rst b/docs/releases/1.1.4.rst
new file mode 100644
index 0000000..c57ff61
--- /dev/null
+++ b/docs/releases/1.1.4.rst
@@ -0,0 +1,37 @@
+1.1.4 Release Notes
+===================
+
+Channels 1.1.4 is a bugfix release for the 1.1 series, released on
+June 15th, 2017.
+
+
+Major Changes
+-------------
+
+None.
+
+
+Minor Changes & Bugfixes
+------------------------
+
+* Pending messages correctly handle retries in backlog situations
+
+* Workers in threading mode now respond to ctrl-C and gracefully exit.
+
+* ``request.meta['QUERY_STRING']`` is now correctly encoded at all times.
+
+* Test client improvements
+
+* ``ChannelServerLiveTestCase`` added, allows an equivalent of the Django
+ ``LiveTestCase``.
+
+* Decorator added to check ``Origin`` headers (``allowed_hosts_only``)
+
+* New ``TEST_CONFIG`` setting in ``CHANNEL_LAYERS`` that allows varying of
+ the channel layer for tests (e.g. using a different Redis install)
+
+
+Backwards Incompatible Changes
+------------------------------
+
+None.
diff --git a/docs/releases/1.1.5.rst b/docs/releases/1.1.5.rst
new file mode 100644
index 0000000..3c48cdf
--- /dev/null
+++ b/docs/releases/1.1.5.rst
@@ -0,0 +1,22 @@
+1.1.5 Release Notes
+===================
+
+Channels 1.1.5 is a packaging release for the 1.1 series, released on
+June 16th, 2017.
+
+
+Major Changes
+-------------
+
+None.
+
+
+Minor Changes & Bugfixes
+------------------------
+
+* The Daphne dependency requirement was bumped to 1.3.0.
+
+Backwards Incompatible Changes
+------------------------------
+
+None.
diff --git a/docs/releases/1.1.6.rst b/docs/releases/1.1.6.rst
new file mode 100644
index 0000000..258c348
--- /dev/null
+++ b/docs/releases/1.1.6.rst
@@ -0,0 +1,23 @@
+1.1.6 Release Notes
+===================
+
+Channels 1.1.6 is a bugfix release for the 1.1 series, released on
+June 28th, 2017.
+
+
+Major Changes
+-------------
+
+None.
+
+
+Minor Changes & Bugfixes
+------------------------
+
+* The ``runserver`` ``server_cls`` override no longer fails with more modern
+ Django versions that pass an ``ipv6`` parameter.
+
+Backwards Incompatible Changes
+------------------------------
+
+None.
diff --git a/docs/releases/1.1.7.rst b/docs/releases/1.1.7.rst
new file mode 100644
index 0000000..9e0a4ec
--- /dev/null
+++ b/docs/releases/1.1.7.rst
@@ -0,0 +1,26 @@
+1.1.7 Release Notes
+===================
+
+Channels 1.1.7 is a bugfix release for the 1.1 series, released on
+September 14th, 2017.
+
+
+Major Changes
+-------------
+
+None.
+
+
+Minor Changes & Bugfixes
+------------------------
+
+* The ``runserver`` ``server_cls`` override fic from 1.1.6 no longer fails
+ when trying to use Django 1.10 or below.
+
+* The JS library has fixed error with the 1006 error code on some WebSocket
+ implementations.
+
+Backwards Incompatible Changes
+------------------------------
+
+None.
diff --git a/docs/releases/1.1.8.rst b/docs/releases/1.1.8.rst
new file mode 100644
index 0000000..c7fcbee
--- /dev/null
+++ b/docs/releases/1.1.8.rst
@@ -0,0 +1,23 @@
+1.1.8 Release Notes
+===================
+
+Channels 1.1.8 is a packaging release for the 1.1 series, released on
+September 15th, 2017.
+
+
+Major Changes
+-------------
+
+None.
+
+
+Minor Changes & Bugfixes
+------------------------
+
+* Reverted recent JS fixes for subprotocols on some phones as they do not work
+ in Chrome.
+
+Backwards Incompatible Changes
+------------------------------
+
+None.
diff --git a/docs/releases/index.rst b/docs/releases/index.rst
new file mode 100644
index 0000000..05570f1
--- /dev/null
+++ b/docs/releases/index.rst
@@ -0,0 +1,19 @@
+Release Notes
+=============
+
+.. toctree::
+ :maxdepth: 1
+
+ 1.0.0
+ 1.0.1
+ 1.0.2
+ 1.0.3
+ 1.1.0
+ 1.1.1
+ 1.1.2
+ 1.1.3
+ 1.1.4
+ 1.1.5
+ 1.1.6
+ 1.1.7
+ 1.1.8
diff --git a/docs/routing.rst b/docs/routing.rst
new file mode 100644
index 0000000..065e13f
--- /dev/null
+++ b/docs/routing.rst
@@ -0,0 +1,82 @@
+Routing
+=======
+
+Routing in Channels is done using a system similar to that in core Django;
+a list of possible routes is provided, and Channels goes through all routes
+until a match is found, and then runs the resulting consumer.
+
+The difference comes, however, in the fact that Channels has to route based
+on more than just URL; channel name is the main thing routed on, and URL
+path is one of many other optional things you can route on, depending on
+the protocol (for example, imagine email consumers - they would route on
+domain or recipient address instead).
+
+The routing Channels takes is just a list of *routing objects* - the three
+built in ones are ``route``, ``route_class`` and ``include``, but any object
+that implements the routing interface will work:
+
+* A method called ``match``, taking a single ``message`` as an argument and
+ returning ``None`` for no match or a tuple of ``(consumer, kwargs)`` if matched.
+
+* A method called ``channel_names``, which returns a set of channel names that
+ will match, which is fed to the channel layer to listen on them.
+
+The three default routing objects are:
+
+* ``route``: Takes a channel name, a consumer function, and optional filter
+ keyword arguments.
+
+* ``route_class``: Takes a class-based consumer, and optional filter
+ keyword arguments. Channel names are taken from the consumer's
+ ``channel_names()`` method.
+
+* ``include``: Takes either a list or string import path to a routing list,
+ and optional filter keyword arguments.
+
+.. _filters:
+
+Filters
+-------
+
+Filtering is how you limit matches based on, for example, URLs; you use regular
+expressions, like so::
+
+ route("websocket.connect", consumers.ws_connect, path=r"^/chat/$")
+
+.. note::
+ Unlike Django's URL routing, which strips the first slash of a URL for
+ neatness, Channels includes the first slash, as the routing system is
+ generic and not designed just for URLs.
+
+You can have multiple filters::
+
+ route("email.receive", comment_response, to_address=r".*@example.com$", subject="^reply")
+
+Multiple filters are always combined with logical AND; that is, you need to
+match every filter to have the consumer called.
+
+Filters can capture keyword arguments to be passed to your function or your class based consumer methods as a ``kwarg``::
+
+ route("websocket.connect", connect_blog, path=r'^/liveblog/(?P[^/]+)/stream/$')
+
+You can also specify filters on an ``include``::
+
+ include("blog_includes", path=r'^/liveblog')
+
+When you specify filters on ``include``, the matched portion of the attribute
+is removed for matches inside the include; for example, this arrangement
+matches URLs like ``/liveblog/stream/``, because the outside ``include``
+strips off the ``/liveblog`` part it matches before passing it inside::
+
+ inner_routes = [
+ route("websocket.connect", connect_blog, path=r'^/stream/'),
+ ]
+
+ routing = [
+ include(inner_routes, path=r'^/liveblog')
+ ]
+
+You can also include named capture groups in the filters on an include and
+they'll be passed to the consumer just like those on ``route``; note, though,
+that if the keyword argument names from the ``include`` and the ``route``
+clash, the values from ``route`` will take precedence.
diff --git a/docs/testing.rst b/docs/testing.rst
new file mode 100644
index 0000000..e845619
--- /dev/null
+++ b/docs/testing.rst
@@ -0,0 +1,396 @@
+Testing Consumers
+=================
+
+When you want to write unit tests for your new Channels consumers, you'll
+realize that you can't use the standard Django test client to submit fake HTTP
+requests - instead, you'll need to submit fake Messages to your consumers,
+and inspect what Messages they send themselves.
+
+We provide a ``TestCase`` subclass that sets all of this up for you,
+however, so you can easily write tests and check what your consumers are sending.
+
+
+ChannelTestCase
+---------------
+
+If your tests inherit from the ``channels.test.ChannelTestCase`` base class,
+whenever you run tests your channel layer will be swapped out for a captive
+in-memory layer, meaning you don't need an external server running to run tests.
+
+Moreover, you can inject messages onto this layer and inspect ones sent to it
+to help test your consumers.
+
+To inject a message onto the layer, simply call ``Channel.send()`` inside
+any test method on a ``ChannelTestCase`` subclass, like so::
+
+ from channels import Channel
+ from channels.test import ChannelTestCase
+
+ class MyTests(ChannelTestCase):
+ def test_a_thing(self):
+ # This goes onto an in-memory channel, not the real backend.
+ Channel("some-channel-name").send({"foo": "bar"})
+
+To receive a message from the layer, you can use ``self.get_next_message(channel)``,
+which handles receiving the message and converting it into a Message object for
+you (if you want, you can call ``receive_many`` on the underlying channel layer,
+but you'll get back a raw dict and channel name, which is not what consumers want).
+
+You can use this both to get Messages to send to consumers as their primary
+argument, as well as to get Messages from channels that consumers are supposed
+to send on to verify that they did.
+
+You can even pass ``require=True`` to ``get_next_message`` to make the test
+fail if there is no message on the channel (by default, it will return you
+``None`` instead).
+
+Here's an extended example testing a consumer that's supposed to take a value
+and post the square of it to the ``"result"`` channel::
+
+
+ from channels import Channel
+ from channels.test import ChannelTestCase
+
+ class MyTests(ChannelTestCase):
+ def test_a_thing(self):
+ # Inject a message onto the channel to use in a consumer
+ Channel("input").send({"value": 33})
+ # Run the consumer with the new Message object
+ my_consumer(self.get_next_message("input", require=True))
+ # Verify there's a result and that it's accurate
+ result = self.get_next_message("result", require=True)
+ self.assertEqual(result['value'], 1089)
+
+
+Generic Consumers
+-----------------
+
+You can use ``ChannelTestCase`` to test generic consumers as well. Just pass the message
+object from ``get_next_message`` to the constructor of the class. To test replies to a specific channel,
+use the ``reply_channel`` property on the ``Message`` object. For example::
+
+ from channels import Channel
+ from channels.test import ChannelTestCase
+
+ from myapp.consumers import MyConsumer
+
+ class MyTests(ChannelTestCase):
+
+ def test_a_thing(self):
+ # Inject a message onto the channel to use in a consumer
+ Channel("input").send({"value": 33})
+ # Run the consumer with the new Message object
+ message = self.get_next_message("input", require=True)
+ MyConsumer(message)
+ # Verify there's a reply and that it's accurate
+ result = self.get_next_message(message.reply_channel.name, require=True)
+ self.assertEqual(result['value'], 1089)
+
+
+Groups
+------
+
+You can test Groups in the same way as Channels inside a ``ChannelTestCase``;
+the entire channel layer is flushed each time a test is run, so it's safe to
+do group adds and sends during a test. For example::
+
+ from channels import Group
+ from channels.test import ChannelTestCase
+
+ class MyTests(ChannelTestCase):
+ def test_a_thing(self):
+ # Add a test channel to a test group
+ Group("test-group").add("test-channel")
+ # Send to the group
+ Group("test-group").send({"value": 42})
+ # Verify the message got into the destination channel
+ result = self.get_next_message("test-channel", require=True)
+ self.assertEqual(result['value'], 42)
+
+
+Clients
+-------
+
+For more complicated test suites you can use the ``Client`` abstraction that
+provides an easy way to test the full life cycle of messages with a couple of methods:
+``send`` to sending message with given content to the given channel, ``consume``
+to run appointed consumer for the next message, ``receive`` to getting replies for client.
+Very often you may need to ``send`` and than call a consumer one by one, for this
+purpose use ``send_and_consume`` method::
+
+ from channels.test import ChannelTestCase, Client
+
+ class MyTests(ChannelTestCase):
+
+ def test_my_consumer(self):
+ client = Client()
+ client.send_and_consume('my_internal_channel', {'value': 'my_value'})
+ self.assertEqual(client.receive(), {'all is': 'done'})
+
+*Note: if testing consumers that are expected to close the connection when consuming, set the ``check_accept`` parameter to False on ``send_and_consume``.*
+
+You can use ``WSClient`` for websocket related consumers. It automatically serializes JSON content,
+manage cookies and headers, give easy access to the session and add ability to authorize your requests.
+For example::
+
+
+ # consumers.py
+ class RoomConsumer(JsonWebsocketConsumer):
+ http_user = True
+ groups = ['rooms_watchers']
+
+ def receive(self, content, **kwargs):
+ self.send({'rooms': self.message.http_session.get("rooms", [])})
+ Channel("rooms_receive").send({'user': self.message.user.id,
+ 'message': content['message']}
+
+
+ # tests.py
+ from channels import Group
+ from channels.test import ChannelTestCase, WSClient
+
+
+ class RoomsTests(ChannelTestCase):
+
+ def test_rooms(self):
+ client = WSClient()
+ user = User.objects.create_user(
+ username='test', email='test@test.com', password='123456')
+ client.login(username='test', password='123456')
+
+ client.send_and_consume('websocket.connect', path='/rooms/')
+ # check that there is nothing to receive
+ self.assertIsNone(client.receive())
+
+ # test that the client in the group
+ Group(RoomConsumer.groups[0]).send({'text': 'ok'}, immediately=True)
+ self.assertEqual(client.receive(json=False), 'ok')
+
+ client.session['rooms'] = ['test', '1']
+ client.session.save()
+
+ client.send_and_consume('websocket.receive',
+ text={'message': 'hey'},
+ path='/rooms/')
+ # test 'response'
+ self.assertEqual(client.receive(), {'rooms': ['test', '1']})
+
+ self.assertEqual(self.get_next_message('rooms_receive').content,
+ {'user': user.id, 'message': 'hey'})
+
+ # There is nothing to receive
+ self.assertIsNone(client.receive())
+
+
+Instead of ``WSClient.login`` method with credentials at arguments you
+may call ``WSClient.force_login`` (like at django client) with the user object.
+
+``receive`` method by default trying to deserialize json text content of a message,
+so if you need to pass decoding use ``receive(json=False)``, like in the example.
+
+For testing consumers with ``enforce_ordering`` initialize ``HttpClient`` with ``ordered``
+flag, but if you wanna use your own order don't use it, use content::
+
+ client = HttpClient(ordered=True)
+ client.send_and_consume('websocket.receive', text='1', path='/ws') # order = 0
+ client.send_and_consume('websocket.receive', text='2', path='/ws') # order = 1
+ client.send_and_consume('websocket.receive', text='3', path='/ws') # order = 2
+
+ # manually
+ client = HttpClient()
+ client.send('websocket.receive', content={'order': 0}, text='1')
+ client.send('websocket.receive', content={'order': 2}, text='2')
+ client.send('websocket.receive', content={'order': 1}, text='3')
+
+ # calling consume 4 time for `waiting` message with order 1
+ client.consume('websocket.receive')
+ client.consume('websocket.receive')
+ client.consume('websocket.receive')
+ client.consume('websocket.receive')
+
+
+Applying routes
+---------------
+
+When you need to test your consumers without routes in settings or you
+want to test your consumers in a more isolate and atomic way, it will be
+simpler with ``apply_routes`` contextmanager and decorator for your ``ChannelTestCase``.
+It takes a list of routes that you want to use and overwrites existing routes::
+
+ from channels.test import ChannelTestCase, WSClient, apply_routes
+
+ class MyTests(ChannelTestCase):
+
+ def test_myconsumer(self):
+ client = WSClient()
+
+ with apply_routes([MyConsumer.as_route(path='/new')]):
+ client.send_and_consume('websocket.connect', '/new')
+ self.assertEqual(client.receive(), {'key': 'value'})
+
+
+Test Data binding with ``WSClient``
+-------------------------------------
+
+As you know data binding in channels works in outbound and inbound ways,
+so that ways tests in different ways and ``WSClient`` and ``apply_routes``
+will help to do this.
+When you testing outbound consumers you need just import your ``Binding``
+subclass with specified ``group_names``. At test you can join to one of them,
+make some changes with target model and check received message.
+Lets test ``IntegerValueBinding`` from :doc:`data binding `
+with creating::
+
+ from channels.test import ChannelTestCase, WSClient
+ from channels.signals import consumer_finished
+
+ class TestIntegerValueBinding(ChannelTestCase):
+
+ def test_outbound_create(self):
+ # We use WSClient because of json encoding messages
+ client = WSClient()
+ client.join_group("intval-updates") # join outbound binding
+
+ # create target entity
+ value = IntegerValue.objects.create(name='fifty', value=50)
+
+ received = client.receive() # receive outbound binding message
+ self.assertIsNotNone(received)
+
+ self.assertTrue('payload' in received)
+ self.assertTrue('action' in received['payload'])
+ self.assertTrue('data' in received['payload'])
+ self.assertTrue('name' in received['payload']['data'])
+ self.assertTrue('value' in received['payload']['data'])
+
+ self.assertEqual(received['payload']['action'], 'create')
+ self.assertEqual(received['payload']['model'], 'values.integervalue')
+ self.assertEqual(received['payload']['pk'], value.pk)
+
+ self.assertEqual(received['payload']['data']['name'], 'fifty')
+ self.assertEqual(received['payload']['data']['value'], 50)
+
+ # assert that is nothing to receive
+ self.assertIsNone(client.receive())
+
+
+There is another situation with inbound binding. It is used with :ref:`multiplexing`,
+So we apply two routes: websocket route for demultiplexer and route with internal
+consumer for binding itself, connect to websocket entrypoint and test different actions.
+For example::
+
+ class TestIntegerValueBinding(ChannelTestCase):
+
+ def test_inbound_create(self):
+ # check that initial state is empty
+ self.assertEqual(IntegerValue.objects.all().count(), 0)
+
+ with apply_routes([Demultiplexer.as_route(path='/'),
+ route("binding.intval", IntegerValueBinding.consumer)]):
+ client = WSClient()
+ client.send_and_consume('websocket.connect', path='/')
+ client.send_and_consume('websocket.receive', path='/', text={
+ 'stream': 'intval',
+ 'payload': {'action': CREATE, 'data': {'name': 'one', 'value': 1}}
+ })
+ # our Demultiplexer route message to the inbound consumer,
+ # so we need to call this consumer
+ client.consume('binding.users')
+
+ self.assertEqual(IntegerValue.objects.all().count(), 1)
+ value = IntegerValue.objects.all().first()
+ self.assertEqual(value.name, 'one')
+ self.assertEqual(value.value, 1)
+
+
+
+Multiple Channel Layers
+-----------------------
+
+If you want to test code that uses multiple channel layers, specify the alias
+of the layers you want to mock as the ``test_channel_aliases`` attribute on
+the ``ChannelTestCase`` subclass; by default, only the ``default`` layer is
+mocked.
+
+You can pass an ``alias`` argument to ``get_next_message``, ``Client`` and ``Channel``
+to use a different layer too.
+
+Live Server Test Case
+---------------------
+
+You can use browser automation libraries like Selenium or Splinter to
+check your application against real layer installation. First of all
+provide ``TEST_CONFIG`` setting to prevent overlapping with running
+dev environment.
+
+.. code:: python
+
+ CHANNEL_LAYERS = {
+ "default": {
+ "BACKEND": "asgi_redis.RedisChannelLayer",
+ "ROUTING": "my_project.routing.channel_routing",
+ "CONFIG": {
+ "hosts": [("redis-server-name", 6379)],
+ },
+ "TEST_CONFIG": {
+ "hosts": [("localhost", 6379)],
+ },
+ },
+ }
+
+Now use ``ChannelLiveServerTestCase`` for your acceptance tests.
+
+.. code:: python
+
+ from channels.test import ChannelLiveServerTestCase
+ from splinter import Browser
+
+ class IntegrationTest(ChannelLiveServerTestCase):
+
+ def test_browse_site_index(self):
+
+ with Browser() as browser:
+
+ browser.visit(self.live_server_url)
+ # the rest of your integration test...
+
+In the test above Daphne and Channels worker processes were fired up.
+These processes run your project against the test database and the
+default channel layer you spacify in the settings. If channel layer
+support ``flush`` extension, initial cleanup will be done. So do not
+run this code against your production environment.
+ChannelLiveServerTestCase can not be used with in memory databases.
+When using the SQLite database engine the Django tests will by default
+use an in-memory database. To disable this add the ``TEST`` setting
+to the database configuration.
+
+.. code:: python
+
+ DATABASES = {
+ 'default': {
+ 'ENGINE': 'django.db.backends.sqlite3',
+ 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
+ 'TEST': {
+ 'NAME': 'testdb.sqlite3'
+ }
+ }
+ }
+
+When channels
+infrastructure is ready default web browser will be also started. You
+can open your website in the real browser which can execute JavaScript
+and operate on WebSockets. ``live_server_ws_url`` property is also
+provided if you decide to run messaging directly from Python.
+
+By default live server test case will serve static files. To disable
+this feature override `serve_static` class attribute.
+
+.. code:: python
+
+ class IntegrationTest(ChannelLiveServerTestCase):
+
+ serve_static = False
+
+ def test_websocket_message(self):
+ # JS and CSS are not available in this test.
+ ...
diff --git a/js_client/.babelrc b/js_client/.babelrc
new file mode 100644
index 0000000..6d1bf22
--- /dev/null
+++ b/js_client/.babelrc
@@ -0,0 +1,10 @@
+{
+ "presets": [
+ "es2015",
+ "stage-1",
+ "react"
+ ],
+ "plugins": [
+ "transform-object-assign",
+ ]
+}
diff --git a/js_client/.eslintrc.js b/js_client/.eslintrc.js
new file mode 100644
index 0000000..c06645c
--- /dev/null
+++ b/js_client/.eslintrc.js
@@ -0,0 +1,9 @@
+module.exports = {
+ "extends": "airbnb",
+ "plugins": [
+ "react"
+ ],
+ env: {
+ jest: true
+ }
+};
diff --git a/js_client/.npmignore b/js_client/.npmignore
new file mode 100644
index 0000000..b81236d
--- /dev/null
+++ b/js_client/.npmignore
@@ -0,0 +1,8 @@
+npm-debug.log
+node_modules
+.*.swp
+.lock-*
+build
+.babelrc
+webpack.*
+/src/
diff --git a/js_client/README.md b/js_client/README.md
new file mode 100644
index 0000000..5a97685
--- /dev/null
+++ b/js_client/README.md
@@ -0,0 +1,49 @@
+### Usage
+
+Channels WebSocket wrapper.
+
+To process messages:
+
+```
+import { WebSocketBridge } from 'django-channels'
+
+const webSocketBridge = new WebSocketBridge();
+webSocketBridge.connect('/ws/');
+webSocketBridge.listen(function(action, stream) {
+ console.log(action, stream);
+});
+```
+
+To send messages:
+
+```
+webSocketBridge.send({prop1: 'value1', prop2: 'value1'});
+```
+
+To demultiplex specific streams:
+
+```
+const webSocketBridge = new WebSocketBridge();
+webSocketBridge.connect('/ws/');
+webSocketBridge.listen();
+webSocketBridge.demultiplex('mystream', function(action, stream) {
+ console.log(action, stream);
+});
+webSocketBridge.demultiplex('myotherstream', function(action, stream) {
+ console.info(action, stream);
+});
+```
+
+To send a message to a specific stream:
+
+```
+webSocketBridge.stream('mystream').send({prop1: 'value1', prop2: 'value1'})
+```
+
+The `WebSocketBridge` instance exposes the underlaying `ReconnectingWebSocket` as the `socket` property. You can use this property to add any custom behavior. For example:
+
+```
+webSocketBridge.socket.addEventListener('open', function() {
+ console.log("Connected to WebSocket");
+})
+```
diff --git a/js_client/banner.txt b/js_client/banner.txt
new file mode 100644
index 0000000..9eb7978
--- /dev/null
+++ b/js_client/banner.txt
@@ -0,0 +1 @@
+Do not edit!. This file is autogenerated by running `npm run browserify`.
\ No newline at end of file
diff --git a/js_client/esdoc.json b/js_client/esdoc.json
new file mode 100644
index 0000000..157bc8e
--- /dev/null
+++ b/js_client/esdoc.json
@@ -0,0 +1,21 @@
+{
+ "source": "./src",
+ "destination": "./docs",
+ "undocumentIdentifier": false,
+ "title": "django-channels",
+ "experimentalProposal": {
+ "classProperties": true,
+ "objectRestSpread": true
+ },
+ "plugins": [
+ {
+ "name": "esdoc-importpath-plugin",
+ "option": {
+ "replaces": [
+ {"from": "^src/", "to": "lib/"},
+ {"from": ".js$", "to": ""}
+ ]
+ }
+ }
+ ]
+}
diff --git a/js_client/lib/index.js b/js_client/lib/index.js
new file mode 100644
index 0000000..53ca4f0
--- /dev/null
+++ b/js_client/lib/index.js
@@ -0,0 +1,185 @@
+'use strict';
+
+Object.defineProperty(exports, "__esModule", {
+ value: true
+});
+exports.WebSocketBridge = undefined;
+
+var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; };
+
+var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
+
+var _reconnectingWebsocket = require('reconnecting-websocket');
+
+var _reconnectingWebsocket2 = _interopRequireDefault(_reconnectingWebsocket);
+
+function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
+
+function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
+
+/**
+ * Bridge between Channels and plain javascript.
+ *
+ * @example
+ * const webSocketBridge = new WebSocketBridge();
+ * webSocketBridge.connect();
+ * webSocketBridge.listen(function(action, stream) {
+ * console.log(action, stream);
+ * });
+ */
+var WebSocketBridge = function () {
+ function WebSocketBridge(options) {
+ _classCallCheck(this, WebSocketBridge);
+
+ /**
+ * The underlaying `ReconnectingWebSocket` instance.
+ *
+ * @type {ReconnectingWebSocket}
+ */
+ this.socket = null;
+ this.streams = {};
+ this.default_cb = null;
+ this.options = _extends({}, options);
+ }
+
+ /**
+ * Connect to the websocket server
+ *
+ * @param {String} [url] The url of the websocket. Defaults to
+ * `window.location.host`
+ * @param {String[]|String} [protocols] Optional string or array of protocols.
+ * @param {Object} options Object of options for [`reconnecting-websocket`](https://github.com/joewalnes/reconnecting-websocket#options-1).
+ * @example
+ * const webSocketBridge = new WebSocketBridge();
+ * webSocketBridge.connect();
+ */
+
+
+ _createClass(WebSocketBridge, [{
+ key: 'connect',
+ value: function connect(url, protocols, options) {
+ var _url = void 0;
+ // Use wss:// if running on https://
+ var scheme = window.location.protocol === 'https:' ? 'wss' : 'ws';
+ var base_url = scheme + '://' + window.location.host;
+ if (url === undefined) {
+ _url = base_url;
+ } else {
+ // Support relative URLs
+ if (url[0] == '/') {
+ _url = '' + base_url + url;
+ } else {
+ _url = url;
+ }
+ }
+ this.socket = new _reconnectingWebsocket2.default(_url, protocols, options);
+ }
+
+ /**
+ * Starts listening for messages on the websocket, demultiplexing if necessary.
+ *
+ * @param {Function} [cb] Callback to be execute when a message
+ * arrives. The callback will receive `action` and `stream` parameters
+ *
+ * @example
+ * const webSocketBridge = new WebSocketBridge();
+ * webSocketBridge.connect();
+ * webSocketBridge.listen(function(action, stream) {
+ * console.log(action, stream);
+ * });
+ */
+
+ }, {
+ key: 'listen',
+ value: function listen(cb) {
+ var _this = this;
+
+ this.default_cb = cb;
+ this.socket.onmessage = function (event) {
+ var msg = JSON.parse(event.data);
+ var action = void 0;
+ var stream = void 0;
+
+ if (msg.stream !== undefined) {
+ action = msg.payload;
+ stream = msg.stream;
+ var stream_cb = _this.streams[stream];
+ stream_cb ? stream_cb(action, stream) : null;
+ } else {
+ action = msg;
+ stream = null;
+ _this.default_cb ? _this.default_cb(action, stream) : null;
+ }
+ };
+ }
+
+ /**
+ * Adds a 'stream handler' callback. Messages coming from the specified stream
+ * will call the specified callback.
+ *
+ * @param {String} stream The stream name
+ * @param {Function} cb Callback to be execute when a message
+ * arrives. The callback will receive `action` and `stream` parameters.
+ * @example
+ * const webSocketBridge = new WebSocketBridge();
+ * webSocketBridge.connect();
+ * webSocketBridge.listen();
+ * webSocketBridge.demultiplex('mystream', function(action, stream) {
+ * console.log(action, stream);
+ * });
+ * webSocketBridge.demultiplex('myotherstream', function(action, stream) {
+ * console.info(action, stream);
+ * });
+ */
+
+ }, {
+ key: 'demultiplex',
+ value: function demultiplex(stream, cb) {
+ this.streams[stream] = cb;
+ }
+
+ /**
+ * Sends a message to the reply channel.
+ *
+ * @param {Object} msg The message
+ *
+ * @example
+ * webSocketBridge.send({prop1: 'value1', prop2: 'value1'});
+ */
+
+ }, {
+ key: 'send',
+ value: function send(msg) {
+ this.socket.send(JSON.stringify(msg));
+ }
+
+ /**
+ * Returns an object to send messages to a specific stream
+ *
+ * @param {String} stream The stream name
+ * @return {Object} convenience object to send messages to `stream`.
+ * @example
+ * webSocketBridge.stream('mystream').send({prop1: 'value1', prop2: 'value1'})
+ */
+
+ }, {
+ key: 'stream',
+ value: function stream(_stream) {
+ var _this2 = this;
+
+ return {
+ send: function send(action) {
+ var msg = {
+ stream: _stream,
+ payload: action
+ };
+ _this2.socket.send(JSON.stringify(msg));
+ }
+ };
+ }
+ }]);
+
+ return WebSocketBridge;
+}();
+
+exports.WebSocketBridge = WebSocketBridge;
\ No newline at end of file
diff --git a/js_client/package.json b/js_client/package.json
new file mode 100644
index 0000000..1bc0791
--- /dev/null
+++ b/js_client/package.json
@@ -0,0 +1,74 @@
+{
+ "name": "django-channels",
+ "version": "1.1.8",
+ "description": "",
+ "repository": {
+ "type": "git",
+ "url": "https://github.com/django/channels.git"
+ },
+ "main": "lib/index.js",
+ "scripts": {
+ "transpile": "rm -rf lib && babel src --out-dir lib",
+ "docs": "rm -rf docs && esdoc -c esdoc.json",
+ "test": "jest",
+ "browserify": "browserify src/index.js -p browserify-banner -s channels -o ../channels/static/channels/js/websocketbridge.js",
+ "prepare": "npm run transpile",
+ "compile": "npm run transpile && npm run browserify"
+ },
+ "engines": {
+ "npm": "^5.0.0"
+ },
+ "files": [
+ "lib/index.js"
+ ],
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "reconnecting-websocket": "^3.0.3"
+ },
+ "jest": {
+ "roots": [
+ "tests"
+ ]
+ },
+ "browserify": {
+ "transform": [
+ [
+ "babelify"
+ ]
+ ]
+ },
+ "devDependencies": {
+ "babel-cli": "^6.24.0",
+ "babel-core": "^6.16.0",
+ "babel-plugin-transform-inline-environment-variables": "^6.8.0",
+ "babel-plugin-transform-object-assign": "^6.8.0",
+ "babel-plugin-transform-runtime": "^6.15.0",
+ "babel-polyfill": "^6.16.0",
+ "babel-preset-es2015": "^6.16.0",
+ "babel-preset-react": "^6.16.0",
+ "babel-preset-stage-0": "^6.16.0",
+ "babel-register": "^6.9.0",
+ "babel-runtime": "^6.11.6",
+ "babelify": "^7.3.0",
+ "browserify": "^14.1.0",
+ "browserify-banner": "^1.0.3",
+ "esdoc": "^0.5.2",
+ "esdoc-es7-plugin": "0.0.3",
+ "esdoc-importpath-plugin": "^0.1.1",
+ "eslint": "^2.13.1",
+ "eslint-config-airbnb": "^9.0.1",
+ "eslint-plugin-import": "^1.9.2",
+ "eslint-plugin-jsx-a11y": "^1.5.3",
+ "eslint-plugin-react": "^5.2.2",
+ "jest": "^19.0.1",
+ "mock-socket": "6.0.4",
+ "react": "^15.4.0",
+ "react-cookie": "^0.4.8",
+ "react-dom": "^15.4.0",
+ "react-redux": "^4.4.6",
+ "redux": "^3.6.0",
+ "redux-actions": "^1.0.0",
+ "redux-logger": "^2.7.4",
+ "redux-thunk": "^2.1.0"
+ }
+}
diff --git a/js_client/src/index.js b/js_client/src/index.js
new file mode 100644
index 0000000..b2ebd81
--- /dev/null
+++ b/js_client/src/index.js
@@ -0,0 +1,144 @@
+import ReconnectingWebSocket from 'reconnecting-websocket';
+
+
+/**
+ * Bridge between Channels and plain javascript.
+ *
+ * @example
+ * const webSocketBridge = new WebSocketBridge();
+ * webSocketBridge.connect();
+ * webSocketBridge.listen(function(action, stream) {
+ * console.log(action, stream);
+ * });
+ */
+export class WebSocketBridge {
+ constructor(options) {
+ /**
+ * The underlaying `ReconnectingWebSocket` instance.
+ *
+ * @type {ReconnectingWebSocket}
+ */
+ this.socket = null;
+ this.streams = {};
+ this.default_cb = null;
+ this.options = {...options};
+ }
+
+ /**
+ * Connect to the websocket server
+ *
+ * @param {String} [url] The url of the websocket. Defaults to
+ * `window.location.host`
+ * @param {String[]|String} [protocols] Optional string or array of protocols.
+ * @param {Object} options Object of options for [`reconnecting-websocket`](https://github.com/joewalnes/reconnecting-websocket#options-1).
+ * @example
+ * const webSocketBridge = new WebSocketBridge();
+ * webSocketBridge.connect();
+ */
+ connect(url, protocols, options) {
+ let _url;
+ // Use wss:// if running on https://
+ const scheme = window.location.protocol === 'https:' ? 'wss' : 'ws';
+ const base_url = `${scheme}://${window.location.host}`;
+ if (url === undefined) {
+ _url = base_url;
+ } else {
+ // Support relative URLs
+ if (url[0] == '/') {
+ _url = `${base_url}${url}`;
+ } else {
+ _url = url;
+ }
+ }
+ this.socket = new ReconnectingWebSocket(_url, protocols, options);
+ }
+
+ /**
+ * Starts listening for messages on the websocket, demultiplexing if necessary.
+ *
+ * @param {Function} [cb] Callback to be execute when a message
+ * arrives. The callback will receive `action` and `stream` parameters
+ *
+ * @example
+ * const webSocketBridge = new WebSocketBridge();
+ * webSocketBridge.connect();
+ * webSocketBridge.listen(function(action, stream) {
+ * console.log(action, stream);
+ * });
+ */
+ listen(cb) {
+ this.default_cb = cb;
+ this.socket.onmessage = (event) => {
+ const msg = JSON.parse(event.data);
+ let action;
+ let stream;
+
+ if (msg.stream !== undefined) {
+ action = msg.payload;
+ stream = msg.stream;
+ const stream_cb = this.streams[stream];
+ stream_cb ? stream_cb(action, stream) : null;
+ } else {
+ action = msg;
+ stream = null;
+ this.default_cb ? this.default_cb(action, stream) : null;
+ }
+ };
+ }
+
+ /**
+ * Adds a 'stream handler' callback. Messages coming from the specified stream
+ * will call the specified callback.
+ *
+ * @param {String} stream The stream name
+ * @param {Function} cb Callback to be execute when a message
+ * arrives. The callback will receive `action` and `stream` parameters.
+
+ * @example
+ * const webSocketBridge = new WebSocketBridge();
+ * webSocketBridge.connect();
+ * webSocketBridge.listen();
+ * webSocketBridge.demultiplex('mystream', function(action, stream) {
+ * console.log(action, stream);
+ * });
+ * webSocketBridge.demultiplex('myotherstream', function(action, stream) {
+ * console.info(action, stream);
+ * });
+ */
+ demultiplex(stream, cb) {
+ this.streams[stream] = cb;
+ }
+
+ /**
+ * Sends a message to the reply channel.
+ *
+ * @param {Object} msg The message
+ *
+ * @example
+ * webSocketBridge.send({prop1: 'value1', prop2: 'value1'});
+ */
+ send(msg) {
+ this.socket.send(JSON.stringify(msg));
+ }
+
+ /**
+ * Returns an object to send messages to a specific stream
+ *
+ * @param {String} stream The stream name
+ * @return {Object} convenience object to send messages to `stream`.
+ * @example
+ * webSocketBridge.stream('mystream').send({prop1: 'value1', prop2: 'value1'})
+ */
+ stream(stream) {
+ return {
+ send: (action) => {
+ const msg = {
+ stream,
+ payload: action
+ }
+ this.socket.send(JSON.stringify(msg));
+ }
+ }
+ }
+
+}
diff --git a/js_client/tests/websocketbridge.test.js b/js_client/tests/websocketbridge.test.js
new file mode 100644
index 0000000..0b347be
--- /dev/null
+++ b/js_client/tests/websocketbridge.test.js
@@ -0,0 +1,152 @@
+import { WebSocket, Server } from 'mock-socket';
+import { WebSocketBridge } from '../src/';
+
+
+
+describe('WebSocketBridge', () => {
+ const mockServer = new Server('ws://localhost');
+ const serverReceivedMessage = jest.fn();
+ mockServer.on('message', serverReceivedMessage);
+
+ beforeEach(() => {
+ serverReceivedMessage.mockReset();
+ });
+
+ it('Connects', () => {
+ const webSocketBridge = new WebSocketBridge();
+ webSocketBridge.connect('ws://localhost');
+ });
+ it('Supports relative urls', () => {
+ const webSocketBridge = new WebSocketBridge();
+ webSocketBridge.connect('/somepath/');
+ });
+ it('Can add event listeners to socket', () => {
+ const webSocketBridge = new WebSocketBridge();
+ const myMock = jest.fn();
+
+ webSocketBridge.connect('ws://localhost', {});
+ webSocketBridge.socket.addEventListener('message', myMock);
+ mockServer.send('{"type": "test", "payload": "message 1"}');
+
+ expect(myMock.mock.calls.length).toBe(1);
+
+ });
+ it('Processes messages', () => {
+ const webSocketBridge = new WebSocketBridge();
+ const myMock = jest.fn();
+
+ webSocketBridge.connect('ws://localhost');
+ webSocketBridge.listen(myMock);
+
+ mockServer.send('{"type": "test", "payload": "message 1"}');
+ mockServer.send('{"type": "test", "payload": "message 2"}');
+
+ expect(myMock.mock.calls.length).toBe(2);
+ expect(myMock.mock.calls[0][0]).toEqual({"type": "test", "payload": "message 1"});
+ expect(myMock.mock.calls[0][1]).toBe(null);
+ });
+ it('Ignores multiplexed messages for unregistered streams', () => {
+ const webSocketBridge = new WebSocketBridge();
+ const myMock = jest.fn();
+
+ webSocketBridge.connect('ws://localhost');
+ webSocketBridge.listen(myMock);
+
+ mockServer.send('{"stream": "stream1", "payload": {"type": "test", "payload": "message 1"}}');
+ expect(myMock.mock.calls.length).toBe(0);
+
+ });
+ it('Demultiplexes messages only when they have a stream', () => {
+ const webSocketBridge = new WebSocketBridge();
+ const myMock = jest.fn();
+ const myMock2 = jest.fn();
+ const myMock3 = jest.fn();
+
+ webSocketBridge.connect('ws://localhost');
+ webSocketBridge.listen(myMock);
+ webSocketBridge.demultiplex('stream1', myMock2);
+ webSocketBridge.demultiplex('stream2', myMock3);
+
+ mockServer.send('{"type": "test", "payload": "message 1"}');
+ expect(myMock.mock.calls.length).toBe(1);
+ expect(myMock2.mock.calls.length).toBe(0);
+ expect(myMock3.mock.calls.length).toBe(0);
+
+ mockServer.send('{"stream": "stream1", "payload": {"type": "test", "payload": "message 1"}}');
+
+ expect(myMock.mock.calls.length).toBe(1);
+ expect(myMock2.mock.calls.length).toBe(1);
+ expect(myMock3.mock.calls.length).toBe(0);
+
+ expect(myMock2.mock.calls[0][0]).toEqual({"type": "test", "payload": "message 1"});
+ expect(myMock2.mock.calls[0][1]).toBe("stream1");
+
+ mockServer.send('{"stream": "stream2", "payload": {"type": "test", "payload": "message 2"}}');
+
+ expect(myMock.mock.calls.length).toBe(1);
+ expect(myMock2.mock.calls.length).toBe(1);
+ expect(myMock3.mock.calls.length).toBe(1);
+
+ expect(myMock3.mock.calls[0][0]).toEqual({"type": "test", "payload": "message 2"});
+ expect(myMock3.mock.calls[0][1]).toBe("stream2");
+ });
+ it('Demultiplexes messages', () => {
+ const webSocketBridge = new WebSocketBridge();
+ const myMock = jest.fn();
+ const myMock2 = jest.fn();
+
+ webSocketBridge.connect('ws://localhost');
+ webSocketBridge.listen();
+
+ webSocketBridge.demultiplex('stream1', myMock);
+ webSocketBridge.demultiplex('stream2', myMock2);
+
+ mockServer.send('{"type": "test", "payload": "message 1"}');
+ mockServer.send('{"type": "test", "payload": "message 2"}');
+
+ expect(myMock.mock.calls.length).toBe(0);
+ expect(myMock2.mock.calls.length).toBe(0);
+
+ mockServer.send('{"stream": "stream1", "payload": {"type": "test", "payload": "message 1"}}');
+
+ expect(myMock.mock.calls.length).toBe(1);
+
+ expect(myMock2.mock.calls.length).toBe(0);
+
+ expect(myMock.mock.calls[0][0]).toEqual({"type": "test", "payload": "message 1"});
+ expect(myMock.mock.calls[0][1]).toBe("stream1");
+
+ mockServer.send('{"stream": "stream2", "payload": {"type": "test", "payload": "message 2"}}');
+
+ expect(myMock.mock.calls.length).toBe(1);
+ expect(myMock2.mock.calls.length).toBe(1);
+
+
+ expect(myMock2.mock.calls[0][0]).toEqual({"type": "test", "payload": "message 2"});
+ expect(myMock2.mock.calls[0][1]).toBe("stream2");
+
+ });
+ it('Sends messages', () => {
+ const webSocketBridge = new WebSocketBridge();
+
+ webSocketBridge.connect('ws://localhost');
+ webSocketBridge.send({"type": "test", "payload": "message 1"});
+
+ expect(serverReceivedMessage.mock.calls.length).toBe(1);
+ expect(serverReceivedMessage.mock.calls[0][0]).toEqual(JSON.stringify({"type": "test", "payload": "message 1"}));
+ });
+ it('Multiplexes messages', () => {
+ const webSocketBridge = new WebSocketBridge();
+
+ webSocketBridge.connect('ws://localhost');
+ webSocketBridge.stream('stream1').send({"type": "test", "payload": "message 1"});
+
+ expect(serverReceivedMessage.mock.calls.length).toBe(1);
+ expect(serverReceivedMessage.mock.calls[0][0]).toEqual(JSON.stringify({
+ "stream": "stream1",
+ "payload": {
+ "type": "test", "payload": "message 1",
+ },
+ }));
+ });
+});
diff --git a/loadtesting/2016-09-06/README.rst b/loadtesting/2016-09-06/README.rst
new file mode 100644
index 0000000..4acc015
--- /dev/null
+++ b/loadtesting/2016-09-06/README.rst
@@ -0,0 +1,142 @@
+Django Channels Load Testing Results for (2016-09-06)
+=====================================================
+
+The goal of these load tests is to see how Channels performs with normal HTTP traffic under heavy load.
+
+In order to handle WebSockets, Channels introduced ASGI, a new interface spec for asynchronous request handling. Also,
+Channels implemented this spec with Daphne--an HTTP, HTTP2, and WebSocket protocol server.
+
+The load testing completed has been to compare how well Daphne using 1 worker performs with normal HTTP traffic in
+comparison to a WSGI HTTP server. Gunincorn was chosen as its configuration was simple and well-understood.
+
+
+Summary of Results
+~~~~~~~~~~~~~~~~~~
+
+Daphne is not as efficient as its WSGI counterpart. When considering only latency, Daphne can have 10 times the latency
+when under the same traffic load as gunincorn. When considering only throughput, Daphne can have 40-50% of the total
+throughput of gunicorn while still being at 2 times latency.
+
+The results should not be surprising considering the overhead involved. However, these results represent the simplest
+case to test and should be represented as saying that Daphne is always slower than an WSGI server. These results are
+a starting point, not a final conclusion.
+
+Some additional things that should be tested:
+
+- More than 1 worker
+- A separate server for redis
+- Comparison to other WebSocket servers, such as Node's socket.io or Rails' Action cable
+
+
+Methodology
+~~~~~~~~~~~
+
+In order to control for variances, several measures were taken:
+
+- the same testing tool was used across all tests, `loadtest `_.
+- all target machines were identical
+- all target code variances were separated into appropriate files in the dir of /testproject in this repo
+- all target config variances necessary to the different setups were controlled by supervisord so that human error was limited
+- across different test types, the same target machines were used, using the same target code and the same target config
+- several tests were run for each setup and test type
+
+
+Setups
+~~~~~~
+
+3 setups were used for this set of tests:
+
+1) Normal Django with Gunicorn (19.6.0)
+2) Django Channels with local Redis (0.14.0) and Daphne (0.14.3)
+3) Django Channels with IPC (1.1.0) and Daphne (0.14.3)
+
+
+Latency
+~~~~~~~
+
+All target and sources machines were identical ec2 instances m3.2xlarge running Ubuntu 16.04.
+
+In order to ensure that the same number of requests were sent, the rps flag was set to 300.
+
+
+.. image:: channels-latency.PNG
+
+
+Throughput
+~~~~~~~~~~
+
+The same source machine was used for all tests: ec2 instance m3.large running Ubuntu 16.04.
+All target machines were identical ec2 instances m3.2xlarge running Ubuntu 16.04.
+
+For the following tests, loadtest was permitted to autothrottle so as to limit errors; this led to varied latency times.
+
+Gunicorn had a latency of 6 ms; daphne and Redis, 12 ms; daphne and IPC, 35 ms.
+
+
+.. image:: channels-throughput.PNG
+
+
+Supervisor Configs
+~~~~~~~~~~~~~~~~~~
+
+**Gunicorn (19.6.0)**
+
+This is the non-channels config. It's a standard Django environment on one machine, using gunicorn to handle requests.
+
+.. code-block:: bash
+
+ [program:gunicorn]
+ command = gunicorn testproject.wsgi_no_channels -b 0.0.0.0:80
+ directory = /srv/channels/testproject/
+ user = root
+
+ [group:django_http]
+ programs=gunicorn
+ priority=999
+
+
+**Redis (0.14.0) and Daphne (0.14.3)**
+
+This is the channels config using redis as the backend. It's on one machine, so a local redis confog.
+
+Also, it's a single worker, not multiple, as that's the default config.
+
+.. code-block:: bash
+
+ [program:daphne]
+ command = daphne -b 0.0.0.0 -p 80 testproject.asgi:channel_layer
+ directory = /srv/channels/testproject/
+ user = root
+
+ [program:worker]
+ command = python manage.py runworker
+ directory = /srv/channels/testproject/
+ user = django-channels
+
+
+ [group:django_channels]
+ programs=daphne,worker
+ priority=999
+
+
+**IPC (1.1.0) and Daphne (0.14.3)**
+
+This is the channels config using IPC (Inter Process Communication). It's only possible to have this work on one machine.
+
+
+.. code-block:: bash
+
+ [program:daphne]
+ command = daphne -b 0.0.0.0 -p 80 testproject.asgi_for_ipc:channel_layer
+ directory = /srv/channels/testproject/
+ user = root
+
+ [program:worker]
+ command = python manage.py runworker --settings=testproject.settings.channels_ipc
+ directory = /srv/channels/testproject/
+ user = root
+
+
+ [group:django_channels]
+ programs=daphne,worker
+ priority=999
diff --git a/loadtesting/2016-09-06/channels-latency.PNG b/loadtesting/2016-09-06/channels-latency.PNG
new file mode 100644
index 0000000..31808f9
Binary files /dev/null and b/loadtesting/2016-09-06/channels-latency.PNG differ
diff --git a/loadtesting/2016-09-06/channels-throughput.PNG b/loadtesting/2016-09-06/channels-throughput.PNG
new file mode 100644
index 0000000..ec88cc2
Binary files /dev/null and b/loadtesting/2016-09-06/channels-throughput.PNG differ
diff --git a/loadtesting/README.md b/loadtesting/README.md
new file mode 100644
index 0000000..316e909
--- /dev/null
+++ b/loadtesting/README.md
@@ -0,0 +1,13 @@
+Django Channels Load Testing Results Index
+===============
+
+[2016-09-06 Results](2016-09-06/README.rst)
+---------------
+
+**Normal Django, WSGI**
+- Gunicorn (19.6.0)
+
+
+**Django Channels, ASGI**
+- Redis (0.14.0) and Daphne (0.14.3)
+- IPC (1.1.0) and Daphne (0.14.3)
diff --git a/patchinator.py b/patchinator.py
new file mode 100644
index 0000000..661a659
--- /dev/null
+++ b/patchinator.py
@@ -0,0 +1,259 @@
+#!/usr/bin/python
+"""
+Script that automatically generates a Django patch from the Channels codebase
+based on some simple rules and string replacements.
+
+Once Channels lands in Django, will be reversed to instead generate this
+third-party app from the now-canonical Django source.
+"""
+
+import re
+import os.path
+import sys
+
+from isort import SortImports
+
+
+# Transforms: Turn one content string into another
+
+class Replacement(object):
+ """
+ Represents a string replacement in a file; uses a regular expression to
+ substitute strings in the file.
+ """
+
+ def __init__(self, match, sub, regex=True):
+ self.match = match
+ self.sub = sub
+ self.regex = regex
+
+ def __call__(self, value):
+ if self.regex:
+ return re.sub(self.match, self.sub, value)
+ else:
+ return value.replace(self.match, self.sub)
+
+
+class Insert(object):
+ """
+ Inserts a string before/after another in a file, one time only, with multiline match.
+ """
+
+ def __init__(self, match, to_insert, after=False):
+ self.match = match
+ self.to_insert = to_insert
+ self.after = after
+
+ def __call__(self, value):
+ match = re.search(self.match, value, flags=re.MULTILINE)
+ if not match:
+ raise ValueError("Could not find match %s" % self.match)
+ if self.after:
+ return value[:match.end()] + self.to_insert + value[match.end():]
+ else:
+ return value[:match.start()] + self.to_insert + value[match.start():]
+
+
+class Isort(object):
+ """
+ Runs isort on the file
+ """
+
+ def __call__(self, value):
+ return SortImports(file_contents=value).output
+
+
+# Operations: Copy or patch files
+
+class FileMap(object):
+ """
+ Represents a file map from the source to the destination, with
+ optional extra regex transforms.
+ """
+
+ def __init__(self, source_path, dest_path, transforms, makedirs=True):
+ self.source_path = source_path
+ self.dest_path = dest_path
+ self.transforms = transforms
+ self.makedirs = makedirs
+
+ def run(self, source_dir, dest_dir):
+ print("COPY: %s -> %s" % (self.source_path, self.dest_path))
+ # Open and read in source file
+ source = os.path.join(source_dir, self.source_path)
+ with open(source, "r") as fh:
+ content = fh.read()
+ # Run transforms
+ for transform in self.transforms:
+ content = transform(content)
+ # Save new file
+ dest = os.path.join(dest_dir, self.dest_path)
+ if self.makedirs:
+ if not os.path.isdir(os.path.dirname(dest)):
+ os.makedirs(os.path.dirname(dest))
+ with open(dest, "w") as fh:
+ fh.write(content)
+
+
+class NewFile(object):
+ """
+ Writes a file to the destination, either blank or with some content from
+ a string.
+ """
+
+ def __init__(self, dest_path, content=""):
+ self.dest_path = dest_path
+ self.content = content
+
+ def run(self, source_dir, dest_dir):
+ print("NEW: %s" % (self.dest_path, ))
+ # Save new file
+ dest = os.path.join(dest_dir, self.dest_path)
+ with open(dest, "w") as fh:
+ fh.write(self.content)
+
+
+# Main class and config
+
+
+global_transforms = [
+ Replacement(r"import channels.([a-zA-Z0-9_\.]+)$", r"import django.channels.\1 as channels"),
+ Replacement(r"from channels import", r"from django.channels import"),
+ Replacement(r"from channels.([a-zA-Z0-9_\.]+) import", r"from django.channels.\1 import"),
+ Replacement(r"from .handler import", r"from django.core.handlers.asgi import"),
+ Replacement(r"from django.channels.test import", r"from django.test.channels import"),
+ Replacement(r"from django.channels.handler import", r"from django.core.handlers.asgi import"),
+ Replacement(r"tests.test_routing", r"channels_tests.test_routing"),
+ Replacement(r"django.core.urlresolvers", r"django.urls"),
+]
+
+python_transforms = global_transforms + [
+ Isort(),
+]
+
+docs_transforms = global_transforms + [
+ Replacement(r"`", r"`"),
+ Replacement(r":doc:`concepts`", r":doc:`/topics/channels/concepts`"),
+ Replacement(r":doc:`deploying`", r":doc:`/topics/channels/deploying`"),
+ Replacement(r":doc:`scaling`", r":doc:`/topics/channels/scaling`"),
+ Replacement(r":doc:`getting-started`", r":doc:`/intro/channels`"),
+ Replacement(r"`", r"`"),
+ Replacement(r":doc:`backends`", r":doc:`/ref/channels/backends`"),
+ Replacement(r":doc:`([\w\d\s]+) `", r"`\1 `_"),
+ Replacement(r"\n\(.*installation>`\)\n", r""),
+ Replacement(r":doc:`installed Channels correctly `", r"added the channel layer setting"),
+ Replacement(r"Channels", r"channels"),
+ Replacement(r"Started with channels", r"Started with Channels"),
+ Replacement(r"Running with channels", r"Running with Channels"),
+ Replacement(r"channels consumers", r"channel consumers"),
+ Replacement(r"channels' design", r"The channels design"),
+ Replacement(r"channels is being released", r"Channels is being released"),
+ Replacement(r"channels is", r"channels are"),
+ Replacement(r"channels provides a", r"Channels provides a"),
+ Replacement(r"channels can use", r"Channels can use"),
+ Replacement(r"channels Concepts", r"Channels Concepts"),
+ Replacement(r"channels works", r"channels work"),
+]
+
+
+class Patchinator(object):
+
+ operations = [
+ FileMap(
+ "channels/asgi.py", "django/channels/asgi.py", python_transforms + [
+ Replacement("if django.VERSION[1] > 9:\n django.setup(set_prefix=False)\n else:\n django.setup()", "django.setup(set_prefix=False)", regex=False), # NOQA
+ ],
+ ),
+ FileMap(
+ "channels/auth.py", "django/channels/auth.py", python_transforms,
+ ),
+ FileMap(
+ "channels/channel.py", "django/channels/channel.py", python_transforms,
+ ),
+ FileMap(
+ "channels/exceptions.py", "django/channels/exceptions.py", python_transforms,
+ ),
+ FileMap(
+ "channels/handler.py", "django/core/handlers/asgi.py", python_transforms,
+ ),
+ FileMap(
+ "channels/routing.py", "django/channels/routing.py", python_transforms,
+ ),
+ FileMap(
+ "channels/message.py", "django/channels/message.py", python_transforms,
+ ),
+ FileMap(
+ "channels/sessions.py", "django/channels/sessions.py", python_transforms,
+ ),
+ FileMap(
+ "channels/staticfiles.py", "django/contrib/staticfiles/consumers.py", python_transforms,
+ ),
+ FileMap(
+ "channels/utils.py", "django/channels/utils.py", python_transforms,
+ ),
+ FileMap(
+ "channels/worker.py", "django/channels/worker.py", python_transforms,
+ ),
+ FileMap(
+ "channels/management/commands/runworker.py",
+ "django/core/management/commands/runworker.py",
+ python_transforms,
+ ),
+ # Tests
+ FileMap(
+ "channels/test/base.py", "django/test/channels.py", python_transforms,
+ ),
+ NewFile(
+ "tests/channels_tests/__init__.py",
+ ),
+ FileMap(
+ "tests/test_handler.py", "tests/channels_tests/test_handler.py", python_transforms,
+ ),
+ FileMap(
+ "tests/test_routing.py", "tests/channels_tests/test_routing.py", python_transforms,
+ ),
+ FileMap(
+ "tests/test_request.py", "tests/channels_tests/test_request.py", python_transforms,
+ ),
+ FileMap(
+ "tests/test_sessions.py", "tests/channels_tests/test_sessions.py", python_transforms,
+ ),
+ # Docs
+ FileMap(
+ "docs/backends.rst", "docs/ref/channels/backends.txt", docs_transforms,
+ ),
+ FileMap(
+ "docs/concepts.rst", "docs/topics/channels/concepts.txt", docs_transforms,
+ ),
+ FileMap(
+ "docs/deploying.rst", "docs/topics/channels/deploying.txt", docs_transforms,
+ ),
+ FileMap(
+ "docs/getting-started.rst", "docs/intro/channels.txt", docs_transforms,
+ ),
+ FileMap(
+ "docs/reference.rst", "docs/ref/channels/api.txt", docs_transforms,
+ ),
+ FileMap(
+ "docs/testing.rst", "docs/topics/channels/testing.txt", docs_transforms,
+ ),
+ FileMap(
+ "docs/cross-compat.rst", "docs/topics/channels/cross-compat.txt", docs_transforms,
+ ),
+ ]
+
+ def __init__(self, source, destination):
+ self.source = os.path.abspath(source)
+ self.destination = os.path.abspath(destination)
+
+ def run(self):
+ print("Patchinator running.\n Source: %s\n Destination: %s" % (self.source, self.destination))
+ for operation in self.operations:
+ operation.run(self.source, self.destination)
+
+
+if __name__ == '__main__':
+ try:
+ Patchinator(os.path.dirname(__file__), sys.argv[1]).run()
+ except IndexError:
+ print("Supply the target Django directory on the command line")
diff --git a/runtests.py b/runtests.py
new file mode 100755
index 0000000..2f9b3f2
--- /dev/null
+++ b/runtests.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+import os
+import sys
+
+import django
+from django.conf import settings
+from django.test.utils import get_runner
+
+if __name__ == "__main__":
+ os.environ['DJANGO_SETTINGS_MODULE'] = "tests.settings"
+ django.setup()
+ TestRunner = get_runner(settings)
+ tests = sys.argv[1:] or ["tests"]
+ test_runner = TestRunner()
+ failures = test_runner.run_tests(tests)
+ sys.exit(bool(failures))
diff --git a/setup.cfg b/setup.cfg
index 3c6e79c..7efaff1 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,2 +1,16 @@
+[flake8]
+exclude = venv/*,tox/*,docs/*,testproject/*,js_client/*
+ignore = E123,E128,E402,W503,E731,W601
+max-line-length = 119
+
+[isort]
+combine_as_imports = true
+default_section = THIRDPARTY
+include_trailing_comma = true
+known_first_party = channels
+multi_line_output = 5
+not_skip = __init__.py
+line_length = 119
+
[bdist_wheel]
universal=1
diff --git a/setup.py b/setup.py
old mode 100755
new mode 100644
index fd45283..d5bbf18
--- a/setup.py
+++ b/setup.py
@@ -1,41 +1,33 @@
-import os
-
from setuptools import find_packages, setup
-
-from daphne import __version__
-
-# We use the README as the long_description
-readme_path = os.path.join(os.path.dirname(__file__), "README.rst")
-with open(readme_path) as fp:
- long_description = fp.read()
+from channels import __version__
setup(
- name='daphne',
+ name='channels',
version=__version__,
- url='https://github.com/django/daphne',
+ url='http://github.com/django/channels',
author='Django Software Foundation',
author_email='foundation@djangoproject.com',
- description='Django ASGI (HTTP/WebSocket) server',
- long_description=long_description,
+ description="Brings event-driven capabilities to Django with a channel system. Django 1.8 and up only.",
license='BSD',
- zip_safe=False,
- package_dir={'twisted': 'daphne/twisted'},
- packages=find_packages() + ['twisted.plugins'],
+ packages=find_packages(exclude=['tests']),
include_package_data=True,
install_requires=[
+ 'Django>=1.8',
'asgiref~=1.1',
- 'twisted>=17.1',
- 'autobahn>=0.18',
+ 'daphne~=1.3',
],
extras_require={
- 'tests': ['hypothesis', 'tox']
+ 'tests': [
+ 'coverage',
+ 'flake8>=2.0,<3.0',
+ 'isort',
+ ],
+ 'tests:python_version < "3.0"': ['mock'],
},
- entry_points={'console_scripts': [
- 'daphne = daphne.cli:CommandLineInterface.entrypoint',
- ]},
classifiers=[
- 'Development Status :: 4 - Beta',
+ 'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
+ 'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
@@ -45,7 +37,6 @@ setup(
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
- 'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
],
)
diff --git a/testproject/Dockerfile.rabbitmq b/testproject/Dockerfile.rabbitmq
new file mode 100644
index 0000000..5e5cfb3
--- /dev/null
+++ b/testproject/Dockerfile.rabbitmq
@@ -0,0 +1,27 @@
+FROM ubuntu:16.04
+
+MAINTAINER Artem Malyshev
+
+# python-dev \
+RUN apt-get update && \
+ apt-get install -y \
+ git \
+ python-setuptools \
+ python-pip && \
+ pip install -U pip
+
+# Install asgi_rabbitmq driver and most recent Daphne
+RUN pip install \
+ git+https://github.com/proofit404/asgi_rabbitmq.git#egg=asgi_rabbitmq \
+ git+https://github.com/django/daphne.git@#egg=daphne
+
+# Clone Channels and install it
+RUN git clone https://github.com/django/channels.git /srv/channels/ && \
+ cd /srv/channels && \
+ git reset --hard origin/master && \
+ python setup.py install
+
+WORKDIR /srv/channels/testproject/
+ENV RABBITMQ_URL=amqp://guest:guest@rabbitmq:5672/%2F
+
+EXPOSE 80
diff --git a/testproject/Dockerfile.redis b/testproject/Dockerfile.redis
new file mode 100644
index 0000000..15e43e3
--- /dev/null
+++ b/testproject/Dockerfile.redis
@@ -0,0 +1,27 @@
+FROM ubuntu:16.04
+
+MAINTAINER Andrew Godwin
+
+# python-dev \
+RUN apt-get update && \
+ apt-get install -y \
+ git \
+ python-setuptools \
+ python-pip && \
+ pip install -U pip
+
+# Install asgi_redis driver and most recent Daphne
+RUN pip install \
+ asgi_redis==1.0.0 \
+ git+https://github.com/django/daphne.git@#egg=daphne
+
+# Clone Channels and install it
+RUN git clone https://github.com/django/channels.git /srv/channels/ && \
+ cd /srv/channels && \
+ git reset --hard origin/master && \
+ python setup.py install
+
+WORKDIR /srv/channels/testproject/
+ENV REDIS_URL=redis://redis:6379
+
+EXPOSE 80
diff --git a/testproject/README.rst b/testproject/README.rst
new file mode 100644
index 0000000..3ccb737
--- /dev/null
+++ b/testproject/README.rst
@@ -0,0 +1,115 @@
+Channels Test Project
+=====================
+
+This subdirectory contains benchmarking code and a companion Django project
+that can be used to benchmark Channels for both HTTP and WebSocket performance.
+
+Preparation:
+~~~~~~~~~~~~
+
+Set up a Python 2.7 virtualenv however you do that and activate it.
+
+e.g. to create it right in the test directory (assuming python 2 is your system's default)::
+
+ virtualenv channels-test-py27
+ source channels-test-py27/bin/activate
+ pip install -U -r requirements.txt
+
+How to use with Docker:
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Build the docker image from Dockerfile, tag it `channels-redis-test`::
+
+ docker build -t channels-redis-test -f Dockerfile.redis .
+
+Run the server::
+
+ docker-compose -f docker-compose.redis.yml up
+
+The benchmark project will now be running on: http:{your-docker-ip}:80
+
+Test it by navigating to that address in a browser. It should just say "OK".
+
+It is also running a WebSocket server at: ws://{your-docker-ip}:80
+
+Run the benchmark's help to show the parameters::
+
+ python benchmark.py --help
+
+Let's just try a quick test with the default values from the parameter list::
+
+ python benchmark.py ws://localhost:80
+
+How to use with Docker and RabbitMQ:
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Build the docker image from Dockerfile, tag it `channels-rabbitmq-test`::
+
+ docker build -t channels-rabbitmq-test -f Dockerfile.rabbitmq .
+
+Run the server::
+
+ docker-compose -f docker-compose.rabbitmq.yml up
+
+The rest is the same.
+
+How to use with runserver:
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+You must have a local Redis server running on localhost:6739 for this to work! If you happen
+to be running Docker, this can easily be done with::
+
+ docker run -d --name redis_local -p 6379:6379 redis:alpine
+
+Just to make sure you're up to date with migrations, run::
+
+ python manage.py migrate
+
+In one terminal window, run the server with::
+
+ python manage.py runserver
+
+In another terminal window, run the benchmark with::
+
+ python benchmark.py ws://localhost:8000
+
+
+Additional load testing options:
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If you wish to setup a separate machine to loadtest your environment, you can do the following steps.
+
+Install fabric on your machine. This is highly dependent on what your environment looks like, but the recommend option is to::
+
+ pip install fabric
+
+(Hint: if you're on Windows 10, just use the Linux subsystem and use ``apt-get install fabric``. It'll save you a lot of trouble.)
+
+Git clone this project down to your machine::
+
+ git clone https://github.com/django/channels/
+
+Relative to where you cloned the directory, move up a couple levels::
+
+ cd channels/testproject/
+
+Spin up a server on your favorite cloud host (AWS, Linode, Digital Ocean, etc.) and get its host and credentials. Run the following command using those credentials::
+
+ fab setup_load_tester -i "ida_rsa" -H ubuntu@example.com
+
+That machine will provision itself. It may (depending on your vendor) prompt you a few times for a ``Y/n`` question. This is just asking you about increasing stroage space.
+
+
+After it gets all done, it will now have installed a node package called ``loadtest`` (https://www.npmjs.com/package/loadtest). Note: my examples will show HTTP only requests, but loadtest also supports websockets.
+
+To run the default loadtest setup, you can do the following, and the loadtest package will run for 90 seconds at a rate of 200 requests per second::
+
+ fab run_loadtest:http://127.0.0.1 -i "id_rsa" -H ubuntu@example.com
+
+Or if you want to exert some minor control, I've exposed a couple of parameters. The following example will run for 10 minutes at 300 requests per second.::
+
+ fab run_loadtest:http://127.0.0.1,rps=300,t=600 -i "id_rsa" -H ubuntu@example.com
+
+If you want more control, you can always pass in your own commands to::
+
+ fab shell -i "id_rsa" -H ubuntu@example.com
diff --git a/testproject/benchmark.py b/testproject/benchmark.py
new file mode 100644
index 0000000..387f1c9
--- /dev/null
+++ b/testproject/benchmark.py
@@ -0,0 +1,224 @@
+from __future__ import unicode_literals
+
+import time
+import random
+import statistics
+from autobahn.twisted.websocket import (
+ WebSocketClientProtocol,
+ WebSocketClientFactory,
+)
+from twisted.internet import reactor
+
+stats = {}
+
+
+class MyClientProtocol(WebSocketClientProtocol):
+
+ def __init__(self, *args, **kwargs):
+ WebSocketClientProtocol.__init__(self, *args, **kwargs)
+ self.fingerprint = "".join(random.choice("abcdefghijklmnopqrstuvwxyz") for i in range(16))
+ stats[self.fingerprint] = {}
+
+ def onConnect(self, response):
+ self.opened = time.time()
+ self.sent = 0
+ self.last_send = None
+ self.received = 0
+ self.corrupted = 0
+ self.out_of_order = 0
+ self.latencies = []
+
+ def onOpen(self):
+ def hello():
+ if self.last_send is None:
+ if self.sent >= self.factory.num_messages:
+ self.sendClose()
+ return
+ self.last_send = time.time()
+ self.sendMessage(("%s:%s" % (self.sent, self.fingerprint)).encode("ascii"))
+ self.sent += 1
+ else:
+ # Wait for receipt of ping
+ pass
+ self.factory.reactor.callLater(1.0 / self.factory.message_rate, hello)
+ hello()
+
+ def onMessage(self, payload, isBinary):
+ # Detect receive-before-send
+ if self.last_send is None:
+ self.corrupted += 1
+ print("CRITICAL: Socket %s received before sending: %s" % (self.fingerprint, payload))
+ return
+ num, fingerprint = payload.decode("ascii").split(":")
+ if fingerprint != self.fingerprint:
+ self.corrupted += 1
+ try:
+ if int(num) != self.received:
+ self.out_of_order += 1
+ except ValueError:
+ self.corrupted += 1
+ self.latencies.append(time.time() - self.last_send)
+ self.received += 1
+ self.last_send = None
+
+ def onClose(self, wasClean, code, reason):
+ if hasattr(self, "sent"):
+ stats[self.fingerprint] = {
+ "sent": self.sent,
+ "received": self.received,
+ "corrupted": self.corrupted,
+ "out_of_order": self.out_of_order,
+ "latencies": self.latencies,
+ "connect": True,
+ }
+ else:
+ stats[self.fingerprint] = {
+ "sent": 0,
+ "received": 0,
+ "corrupted": 0,
+ "out_of_order": 0,
+ "connect": False,
+ }
+
+
+
+class Benchmarker(object):
+ """
+ Performs benchmarks against WebSockets.
+ """
+
+ def __init__(self, url, num, concurrency, rate, messages, spawn):
+ self.url = url
+ self.num = num
+ self.concurrency = concurrency
+ self.rate = rate
+ self.spawn = spawn
+ self.messages = messages
+ self.factory = WebSocketClientFactory(self.url)
+ self.factory.protocol = MyClientProtocol
+ self.factory.num_messages = self.messages
+ self.factory.message_rate = self.rate
+
+ def loop(self):
+ self.spawn_loop()
+ self.progress_loop()
+
+ def spawn_loop(self):
+ self.spawn_connections()
+ reactor.callLater(0.1, self.spawn_loop)
+
+ def progress_loop(self):
+ self.print_progress()
+ reactor.callLater(1, self.progress_loop)
+
+ def spawn_connections(self):
+ # Stop spawning if we did the right total number
+ max_to_spawn = self.num - len(stats)
+ if max_to_spawn <= 0:
+ return
+ # Don't spawn too many at once
+ max_to_spawn = min(max_to_spawn, int(self.spawn / 10.0))
+ # Decode connection args
+ host, port = self.url.split("://")[1].split(":")
+ port = int(port)
+ # Only spawn enough to get up to concurrency
+ open_protocols = len([x for x in stats.values() if not x])
+ to_spawn = min(max(self.concurrency - open_protocols, 0), max_to_spawn)
+ for _ in range(to_spawn):
+ reactor.connectTCP(host, port, self.factory)
+
+ def print_progress(self):
+ open_protocols = len([x for x in stats.values() if not x])
+ print("%s open, %s total" % (
+ open_protocols,
+ len(stats),
+ ))
+ if open_protocols == 0 and len(stats) >= self.num:
+ reactor.stop()
+ self.print_stats()
+
+ def percentile(self, values, fraction):
+ """
+ Returns a percentile value (e.g. fraction = 0.95 -> 95th percentile)
+ """
+ values = sorted(values)
+ stopat = int(len(values) * fraction)
+ if stopat == len(values):
+ stopat -= 1
+ return values[stopat]
+
+ def print_stats(self):
+ # Collect stats together
+ latencies = []
+ num_good = 0
+ num_incomplete = 0
+ num_failed = 0
+ num_corruption = 0
+ num_out_of_order = 0
+ for entry in stats.values():
+ latencies.extend(entry.get("latencies", []))
+ if not entry['connect']:
+ num_failed += 1
+ elif entry['sent'] != entry['received']:
+ num_incomplete += 1
+ elif entry['corrupted']:
+ num_corruption += 1
+ elif entry['out_of_order']:
+ num_out_of_order += 1
+ else:
+ num_good += 1
+
+ if latencies:
+ # Some analysis on latencies
+ latency_mean = statistics.mean(latencies)
+ latency_median = statistics.median(latencies)
+ latency_stdev = statistics.stdev(latencies)
+ latency_95 = self.percentile(latencies, 0.95)
+ latency_99 = self.percentile(latencies, 0.99)
+
+ # Print results
+ print("-------")
+ print("Sockets opened: %s" % len(stats))
+ if latencies:
+ print("Latency stats: Mean %.3fs Median %.3fs Stdev %.3f 95%% %.3fs 99%% %.3fs" % (
+ latency_mean,
+ latency_median,
+ latency_stdev,
+ latency_95,
+ latency_99,
+ ))
+ print("Good sockets: %s (%.2f%%)" % (num_good, (float(num_good) / len(stats))*100))
+ print("Incomplete sockets: %s (%.2f%%)" % (num_incomplete, (float(num_incomplete) / len(stats))*100))
+ print("Corrupt sockets: %s (%.2f%%)" % (num_corruption, (float(num_corruption) / len(stats))*100))
+ print("Out of order sockets: %s (%.2f%%)" % (num_out_of_order, (float(num_out_of_order) / len(stats))*100))
+ print("Failed to connect: %s (%.2f%%)" % (num_failed, (float(num_failed) / len(stats))*100))
+
+
+if __name__ == '__main__':
+
+ import sys
+ import argparse
+
+ from twisted.python import log
+
+# log.startLogging(sys.stdout)
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("url")
+ parser.add_argument("-n", "--num", type=int, default=100, help="Total number of sockets to open")
+ parser.add_argument("-c", "--concurrency", type=int, default=10, help="Number of sockets to open at once")
+ parser.add_argument("-r", "--rate", type=float, default=1, help="Number of messages to send per socket per second")
+ parser.add_argument("-m", "--messages", type=int, default=5, help="Number of messages to send per socket before close")
+ parser.add_argument("-s", "--spawn", type=int, default=30, help="Number of sockets to spawn per second, max")
+ args = parser.parse_args()
+
+ benchmarker = Benchmarker(
+ url=args.url,
+ num=args.num,
+ concurrency=args.concurrency,
+ rate=args.rate,
+ messages=args.messages,
+ spawn=args.spawn,
+ )
+ benchmarker.loop()
+ reactor.run()
diff --git a/testproject/chtest/__init__.py b/testproject/chtest/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/testproject/chtest/consumers.py b/testproject/chtest/consumers.py
new file mode 100644
index 0000000..a761fd9
--- /dev/null
+++ b/testproject/chtest/consumers.py
@@ -0,0 +1,7 @@
+from channels.sessions import enforce_ordering
+
+
+def ws_message(message):
+ "Echoes messages back to the client."
+
+ message.reply_channel.send({'text': message['text']})
diff --git a/testproject/chtest/models.py b/testproject/chtest/models.py
new file mode 100644
index 0000000..71a8362
--- /dev/null
+++ b/testproject/chtest/models.py
@@ -0,0 +1,3 @@
+from django.db import models
+
+# Create your models here.
diff --git a/testproject/chtest/views.py b/testproject/chtest/views.py
new file mode 100644
index 0000000..d244010
--- /dev/null
+++ b/testproject/chtest/views.py
@@ -0,0 +1,6 @@
+from django.http import HttpResponse
+
+
+def index(request):
+
+ return HttpResponse("OK")
diff --git a/testproject/docker-compose.rabbitmq.yml b/testproject/docker-compose.rabbitmq.yml
new file mode 100644
index 0000000..1cc885d
--- /dev/null
+++ b/testproject/docker-compose.rabbitmq.yml
@@ -0,0 +1,28 @@
+version: '2'
+services:
+ rabbitmq:
+ image: rabbitmq:management
+ ports:
+ - "15672:15672"
+ rabbitmq_daphne:
+ image: channels-rabbitmq-test
+ build:
+ context: .
+ dockerfile: Dockerfile.rabbitmq
+ command: daphne -b 0.0.0.0 -p 80 testproject.asgi.rabbitmq:channel_layer
+ volumes:
+ - .:/srv/channels/testproject/
+ ports:
+ - "80:80"
+ depends_on:
+ - rabbitmq
+ rabbitmq_worker:
+ image: channels-rabbitmq-test
+ build:
+ context: .
+ dockerfile: Dockerfile.rabbitmq
+ command: python manage.py runworker --settings=testproject.settings.channels_rabbitmq
+ volumes:
+ - .:/srv/channels/testproject/
+ depends_on:
+ - rabbitmq
diff --git a/testproject/docker-compose.redis.yml b/testproject/docker-compose.redis.yml
new file mode 100644
index 0000000..8cb3d18
--- /dev/null
+++ b/testproject/docker-compose.redis.yml
@@ -0,0 +1,26 @@
+version: '2'
+services:
+ redis:
+ image: redis:alpine
+ redis_daphne:
+ image: channels-redis-test
+ build:
+ context: .
+ dockerfile: Dockerfile.redis
+ command: daphne -b 0.0.0.0 -p 80 testproject.asgi.redis:channel_layer
+ volumes:
+ - .:/srv/channels/testproject/
+ ports:
+ - "80:80"
+ depends_on:
+ - redis
+ redis_worker:
+ image: channels-redis-test
+ build:
+ context: .
+ dockerfile: Dockerfile.redis
+ command: python manage.py runworker --settings=testproject.settings.channels_redis
+ volumes:
+ - .:/srv/channels/testproject/
+ depends_on:
+ - redis
diff --git a/testproject/fabfile.py b/testproject/fabfile.py
new file mode 100644
index 0000000..772ff08
--- /dev/null
+++ b/testproject/fabfile.py
@@ -0,0 +1,67 @@
+from fabric.api import sudo, task, cd
+
+# CHANNEL TASKS
+@task
+def setup_redis():
+ sudo("apt-get update && apt-get install -y redis-server")
+ sudo("sed -i -e 's/127.0.0.1/0.0.0.0/g' /etc/redis/redis.conf")
+ sudo("/etc/init.d/redis-server stop")
+ sudo("/etc/init.d/redis-server start")
+
+
+@task
+def setup_channels():
+ sudo("apt-get update && apt-get install -y git python-dev python-setuptools python-pip")
+ sudo("pip install -U pip")
+ sudo("pip install -U asgi_redis asgi_ipc git+https://github.com/django/daphne.git@#egg=daphne")
+ sudo("rm -rf /srv/channels")
+ sudo("git clone https://github.com/django/channels.git /srv/channels/")
+ with cd("/srv/channels/"):
+ sudo("python setup.py install")
+
+
+@task
+def run_daphne(redis_ip):
+ with cd("/srv/channels/testproject/"):
+ sudo("REDIS_URL=redis://%s:6379 daphne -b 0.0.0.0 -p 80 testproject.asgi.redis:channel_layer" % redis_ip)
+
+
+@task
+def run_worker(redis_ip):
+ with cd("/srv/channels/testproject/"):
+ sudo("REDIS_URL=redis://%s:6379 python manage.py runworker" % redis_ip)
+
+
+# Current loadtesting setup
+@task
+def setup_load_tester(src="https://github.com/django/channels.git"):
+ sudo("apt-get update && apt-get install -y git nodejs && apt-get install npm")
+ sudo("npm install -g loadtest")
+ sudo("ln -s /usr/bin/nodejs /usr/bin/node")
+
+
+# Run current loadtesting setup
+# example usage: $ fab run_loadtest:http://127.0.0.1,rps=10 -i "id_rsa" -H ubuntu@example.com
+@task
+def run_loadtest(host, t=90):
+ sudo("loadtest -c 10 -t {t} {h}".format(h=host, t=t))
+
+# Run current loadtesting setup
+# example usage: $ fab run_loadtest:http://127.0.0.1,rps=10 -i "id_rsa" -H ubuntu@example.com
+@task
+def run_loadtest_rps(host, t=90, rps=200):
+ sudo("loadtest -c 10 --rps {rps} -t {t} {h}".format(h=host, t=t, rps=rps))
+
+
+# Task that Andrew used for loadtesting earlier on
+@task
+def setup_tester():
+ sudo("apt-get update && apt-get install -y apache2-utils python3-pip")
+ sudo("pip3 -U pip autobahn twisted")
+ sudo("rm -rf /srv/channels")
+ sudo("git clone https://github.com/django/channels.git /srv/channels/")
+
+
+@task
+def shell():
+ sudo("bash")
diff --git a/testproject/locustfile.py b/testproject/locustfile.py
new file mode 100644
index 0000000..1379f2a
--- /dev/null
+++ b/testproject/locustfile.py
@@ -0,0 +1,16 @@
+from locust import HttpLocust, TaskSet, task
+
+class UserBehavior(TaskSet):
+ def on_start(self):
+ """ on_start is called when a Locust start before any task is scheduled """
+ self.index()
+
+ @task
+ def index(self):
+ self.client.get("/")
+
+
+class WebsiteUser(HttpLocust):
+ task_set = UserBehavior
+ min_wait=5000
+ max_wait=9000
diff --git a/testproject/manage.py b/testproject/manage.py
new file mode 100644
index 0000000..9a0be8b
--- /dev/null
+++ b/testproject/manage.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import os
+import sys
+
+if __name__ == "__main__":
+ os.environ.setdefault(
+ "DJANGO_SETTINGS_MODULE",
+ "testproject.settings.channels_redis",
+ )
+
+ from django.core.management import execute_from_command_line
+
+ execute_from_command_line(sys.argv)
diff --git a/testproject/requirements.benchmark.txt b/testproject/requirements.benchmark.txt
new file mode 100644
index 0000000..f04a638
--- /dev/null
+++ b/testproject/requirements.benchmark.txt
@@ -0,0 +1,7 @@
+autobahn==0.17.1
+constantly==15.1.0
+incremental==16.10.1
+six==1.10.0
+Twisted==16.6.0
+txaio==2.6.0
+zope.interface==4.3.3
diff --git a/testproject/requirements.txt b/testproject/requirements.txt
new file mode 100644
index 0000000..900d067
--- /dev/null
+++ b/testproject/requirements.txt
@@ -0,0 +1,15 @@
+asgi_redis==0.13.1
+asgi_ipc==1.1.0
+asgiref==0.13.3
+autobahn==0.14.1
+channels==0.14.2
+daphne==0.13.1
+Django==1.9.7
+docutils==0.12
+msgpack-python==0.4.7
+redis==2.10.5
+six==1.10.0
+statistics==1.0.3.5
+Twisted==16.2.0
+txaio==2.5.1
+zope.interface==4.2.0
diff --git a/testproject/setup.py b/testproject/setup.py
new file mode 100644
index 0000000..8d0865c
--- /dev/null
+++ b/testproject/setup.py
@@ -0,0 +1,12 @@
+from setuptools import find_packages, setup
+
+setup(
+ name='channels-benchmark',
+ packages=find_packages(),
+ py_modules=['benchmark'],
+ install_requires=[
+ 'autobahn',
+ 'Twisted',
+ 'statistics ; python_version < "3.0"',
+ ],
+)
diff --git a/testproject/testproject/__init__.py b/testproject/testproject/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/testproject/testproject/asgi/__init__.py b/testproject/testproject/asgi/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/testproject/testproject/asgi/ipc.py b/testproject/testproject/asgi/ipc.py
new file mode 100644
index 0000000..78421fb
--- /dev/null
+++ b/testproject/testproject/asgi/ipc.py
@@ -0,0 +1,6 @@
+import os
+from channels.asgi import get_channel_layer
+import asgi_ipc
+
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testproject.settings.channels_ipc")
+channel_layer = get_channel_layer()
diff --git a/testproject/testproject/asgi/rabbitmq.py b/testproject/testproject/asgi/rabbitmq.py
new file mode 100644
index 0000000..b132a73
--- /dev/null
+++ b/testproject/testproject/asgi/rabbitmq.py
@@ -0,0 +1,8 @@
+import os
+from channels.asgi import get_channel_layer
+
+os.environ.setdefault(
+ "DJANGO_SETTINGS_MODULE",
+ "testproject.settings.channels_rabbitmq",
+)
+channel_layer = get_channel_layer()
diff --git a/testproject/testproject/asgi/redis.py b/testproject/testproject/asgi/redis.py
new file mode 100644
index 0000000..73591ef
--- /dev/null
+++ b/testproject/testproject/asgi/redis.py
@@ -0,0 +1,5 @@
+import os
+from channels.asgi import get_channel_layer
+
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testproject.settings.channels_redis")
+channel_layer = get_channel_layer()
diff --git a/testproject/testproject/settings/__init__.py b/testproject/testproject/settings/__init__.py
new file mode 100644
index 0000000..7b33d23
--- /dev/null
+++ b/testproject/testproject/settings/__init__.py
@@ -0,0 +1 @@
+# Blank on purpose
diff --git a/testproject/testproject/settings/base.py b/testproject/testproject/settings/base.py
new file mode 100644
index 0000000..e9b51b5
--- /dev/null
+++ b/testproject/testproject/settings/base.py
@@ -0,0 +1,33 @@
+# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
+
+import os
+
+BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+
+SECRET_KEY = '-3yt98bfvxe)7+^h#(@8k#1(1m_fpd9x3q2wolfbf^!r5ma62u'
+
+DEBUG = True
+
+INSTALLED_APPS = (
+ 'django.contrib.auth',
+ 'django.contrib.contenttypes',
+ 'django.contrib.sessions',
+)
+
+ROOT_URLCONF = 'testproject.urls'
+
+WSGI_APPLICATION = 'testproject.wsgi.application'
+
+STATIC_URL = "/static/"
+
+DATABASES = {
+ 'default': {
+ 'ENGINE': 'django.db.backends.sqlite3',
+ 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
+ 'TEST': {
+ 'NAME': os.path.join(BASE_DIR, 'test_db.sqlite3'),
+ },
+ },
+}
+
+ALLOWED_HOSTS = ['*']
diff --git a/testproject/testproject/settings/channels_ipc.py b/testproject/testproject/settings/channels_ipc.py
new file mode 100644
index 0000000..3a02249
--- /dev/null
+++ b/testproject/testproject/settings/channels_ipc.py
@@ -0,0 +1,13 @@
+# Settings for channels specifically
+from testproject.settings.base import *
+
+INSTALLED_APPS += (
+ 'channels',
+)
+
+CHANNEL_LAYERS = {
+ "default": {
+ "BACKEND": "asgi_ipc.IPCChannelLayer",
+ "ROUTING": "testproject.urls.channel_routing",
+ },
+}
diff --git a/testproject/testproject/settings/channels_rabbitmq.py b/testproject/testproject/settings/channels_rabbitmq.py
new file mode 100644
index 0000000..005eb07
--- /dev/null
+++ b/testproject/testproject/settings/channels_rabbitmq.py
@@ -0,0 +1,19 @@
+# Settings for channels specifically
+from testproject.settings.base import *
+
+INSTALLED_APPS += (
+ 'channels',
+)
+
+CHANNEL_LAYERS = {
+ 'default': {
+ 'BACKEND': 'asgi_rabbitmq.RabbitmqChannelLayer',
+ 'ROUTING': 'testproject.urls.channel_routing',
+ 'CONFIG': {
+ 'url': os.environ.get(
+ 'RABBITMQ_URL',
+ 'amqp://guest:guest@localhost:5672/%2F',
+ ),
+ },
+ },
+}
diff --git a/testproject/testproject/settings/channels_redis.py b/testproject/testproject/settings/channels_redis.py
new file mode 100644
index 0000000..2068548
--- /dev/null
+++ b/testproject/testproject/settings/channels_redis.py
@@ -0,0 +1,19 @@
+# Settings for channels specifically
+from testproject.settings.base import *
+
+INSTALLED_APPS += (
+ 'channels',
+)
+
+CHANNEL_LAYERS = {
+ "default": {
+ "BACKEND": "asgi_redis.RedisChannelLayer",
+ "ROUTING": "testproject.urls.channel_routing",
+ "CONFIG": {
+ "hosts": [os.environ.get('REDIS_URL', 'redis://127.0.0.1:6379')],
+ },
+ "TEST_CONFIG": {
+ "hosts": [os.environ.get('REDIS_URL', 'redis://127.0.0.1:6379')],
+ },
+ },
+}
diff --git a/testproject/testproject/urls.py b/testproject/testproject/urls.py
new file mode 100644
index 0000000..7341fa0
--- /dev/null
+++ b/testproject/testproject/urls.py
@@ -0,0 +1,11 @@
+from django.conf.urls import url
+from chtest import views
+
+urlpatterns = [url(r'^$', views.index)]
+
+try:
+ from chtest import consumers
+
+ channel_routing = {"websocket.receive": consumers.ws_message}
+except:
+ pass
diff --git a/testproject/testproject/wsgi.py b/testproject/testproject/wsgi.py
new file mode 100644
index 0000000..91daa15
--- /dev/null
+++ b/testproject/testproject/wsgi.py
@@ -0,0 +1,16 @@
+"""
+WSGI config for testproject project.
+
+It exposes the WSGI callable as a module-level variable named ``application``.
+
+For more information on this file, see
+https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
+"""
+
+import os
+
+from django.core.wsgi import get_wsgi_application
+
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testproject.settings.channels_redis")
+
+application = get_wsgi_application()
diff --git a/testproject/testproject/wsgi_no_channels.py b/testproject/testproject/wsgi_no_channels.py
new file mode 100644
index 0000000..79863f9
--- /dev/null
+++ b/testproject/testproject/wsgi_no_channels.py
@@ -0,0 +1,16 @@
+"""
+WSGI config for testproject project.
+
+It exposes the WSGI callable as a module-level variable named ``application``.
+
+For more information on this file, see
+https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
+"""
+
+import os
+
+from django.core.wsgi import get_wsgi_application
+
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testproject.settings.base")
+
+application = get_wsgi_application()
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/tests/a_file b/tests/a_file
new file mode 100644
index 0000000..207ed20
--- /dev/null
+++ b/tests/a_file
@@ -0,0 +1,5 @@
+thi is
+a file
+sdaf
+sadf
+
diff --git a/tests/models.py b/tests/models.py
new file mode 100644
index 0000000..4b1da2d
--- /dev/null
+++ b/tests/models.py
@@ -0,0 +1,12 @@
+from uuid import uuid4
+
+from django.db import models
+
+
+class TestUUIDModel(models.Model):
+ """
+ Simple model with UUIDField as primary key for tests.
+ """
+
+ id = models.UUIDField(primary_key=True, default=uuid4)
+ name = models.CharField(max_length=255)
diff --git a/tests/settings.py b/tests/settings.py
new file mode 100644
index 0000000..74fbcc3
--- /dev/null
+++ b/tests/settings.py
@@ -0,0 +1,30 @@
+SECRET_KEY = 'cat'
+
+INSTALLED_APPS = (
+ 'django.contrib.auth',
+ 'django.contrib.contenttypes',
+ 'django.contrib.sessions',
+ 'django.contrib.admin',
+ 'channels',
+ 'channels.delay',
+ 'tests',
+)
+
+DATABASES = {
+ 'default': {
+ 'ENGINE': 'django.db.backends.sqlite3',
+ }
+}
+
+CHANNEL_LAYERS = {
+ 'default': {
+ 'BACKEND': 'asgiref.inmemory.ChannelLayer',
+ 'ROUTING': [],
+ },
+ 'fake_channel': {
+ 'BACKEND': 'tests.test_management.FakeChannelLayer',
+ 'ROUTING': [],
+ },
+}
+
+MIDDLEWARE_CLASSES = []
diff --git a/tests/test_asgi.py b/tests/test_asgi.py
new file mode 100644
index 0000000..b166e32
--- /dev/null
+++ b/tests/test_asgi.py
@@ -0,0 +1,34 @@
+from channels import DEFAULT_CHANNEL_LAYER
+from channels.asgi import InvalidChannelLayerError, channel_layers
+from channels.test import ChannelTestCase
+from django.test import override_settings
+
+
+class TestChannelLayerManager(ChannelTestCase):
+
+ def test_config_error(self):
+ """
+ If channel layer doesn't specify TEST_CONFIG, `make_test_backend`
+ should result into error.
+ """
+
+ with self.assertRaises(InvalidChannelLayerError):
+ channel_layers.make_test_backend(DEFAULT_CHANNEL_LAYER)
+
+ @override_settings(CHANNEL_LAYERS={
+ 'default': {
+ 'BACKEND': 'asgiref.inmemory.ChannelLayer',
+ 'ROUTING': [],
+ 'TEST_CONFIG': {
+ 'expiry': 100500,
+ },
+ },
+ })
+ def test_config_instance(self):
+ """
+ If channel layer provides TEST_CONFIG, `make_test_backend` should
+ return channel layer instance appropriate for testing.
+ """
+
+ layer = channel_layers.make_test_backend(DEFAULT_CHANNEL_LAYER)
+ self.assertEqual(layer.channel_layer.expiry, 100500)
diff --git a/tests/test_binding.py b/tests/test_binding.py
new file mode 100644
index 0000000..b0775d1
--- /dev/null
+++ b/tests/test_binding.py
@@ -0,0 +1,377 @@
+from __future__ import unicode_literals
+
+from django.contrib.auth import get_user_model
+
+from channels import route
+from channels.binding.base import CREATE, DELETE, UPDATE
+from channels.binding.websockets import WebsocketBinding
+from channels.generic.websockets import WebsocketDemultiplexer
+from channels.test import ChannelTestCase, WSClient, apply_routes
+from tests import models
+
+User = get_user_model()
+
+
+class TestsBinding(ChannelTestCase):
+
+ def test_trigger_outbound_create(self):
+
+ class TestBinding(WebsocketBinding):
+ model = User
+ stream = 'test'
+ fields = ['username', 'email', 'password', 'last_name']
+
+ @classmethod
+ def group_names(cls, instance):
+ return ["users"]
+
+ def has_permission(self, user, action, pk):
+ return True
+
+ client = WSClient()
+ client.join_group('users')
+
+ user = User.objects.create(username='test', email='test@test.com')
+
+ received = client.receive()
+ self.assertTrue('payload' in received)
+ self.assertTrue('action' in received['payload'])
+ self.assertTrue('data' in received['payload'])
+ self.assertTrue('username' in received['payload']['data'])
+ self.assertTrue('email' in received['payload']['data'])
+ self.assertTrue('password' in received['payload']['data'])
+ self.assertTrue('last_name' in received['payload']['data'])
+ self.assertTrue('model' in received['payload'])
+ self.assertTrue('pk' in received['payload'])
+
+ self.assertEqual(received['payload']['action'], 'create')
+ self.assertEqual(received['payload']['model'], 'auth.user')
+ self.assertEqual(received['payload']['pk'], user.pk)
+
+ self.assertEqual(received['payload']['data']['email'], 'test@test.com')
+ self.assertEqual(received['payload']['data']['username'], 'test')
+ self.assertEqual(received['payload']['data']['password'], '')
+ self.assertEqual(received['payload']['data']['last_name'], '')
+
+ received = client.receive()
+ self.assertIsNone(received)
+
+ def test_trigger_outbound_create_non_auto_pk(self):
+
+ class TestBinding(WebsocketBinding):
+ model = models.TestUUIDModel
+ stream = 'test'
+ fields = ['name']
+
+ @classmethod
+ def group_names(cls, instance):
+ return ["testuuidmodels"]
+
+ def has_permission(self, user, action, pk):
+ return True
+
+ client = WSClient()
+ client.join_group('testuuidmodels')
+
+ instance = models.TestUUIDModel.objects.create(name='testname')
+
+ received = client.receive()
+ self.assertTrue('payload' in received)
+ self.assertTrue('action' in received['payload'])
+ self.assertTrue('data' in received['payload'])
+ self.assertTrue('name' in received['payload']['data'])
+ self.assertTrue('model' in received['payload'])
+ self.assertTrue('pk' in received['payload'])
+
+ self.assertEqual(received['payload']['action'], 'create')
+ self.assertEqual(received['payload']['model'], 'tests.testuuidmodel')
+ self.assertEqual(received['payload']['pk'], str(instance.pk))
+
+ self.assertEqual(received['payload']['data']['name'], 'testname')
+
+ received = client.receive()
+ self.assertIsNone(received)
+
+ def test_trigger_outbound_create_exclude(self):
+ class TestBinding(WebsocketBinding):
+ model = User
+ stream = 'test'
+ exclude = ['first_name', 'last_name']
+
+ @classmethod
+ def group_names(cls, instance):
+ return ["users_exclude"]
+
+ def has_permission(self, user, action, pk):
+ return True
+
+ with apply_routes([route('test', TestBinding.consumer)]):
+ client = WSClient()
+ client.join_group('users_exclude')
+
+ user = User.objects.create(username='test', email='test@test.com')
+ received = client.receive()
+
+ self.assertTrue('payload' in received)
+ self.assertTrue('action' in received['payload'])
+ self.assertTrue('data' in received['payload'])
+ self.assertTrue('username' in received['payload']['data'])
+ self.assertTrue('email' in received['payload']['data'])
+ self.assertTrue('password' in received['payload']['data'])
+ self.assertTrue('model' in received['payload'])
+ self.assertTrue('pk' in received['payload'])
+
+ self.assertFalse('last_name' in received['payload']['data'])
+ self.assertFalse('first_name' in received['payload']['data'])
+
+ self.assertEqual(received['payload']['action'], 'create')
+ self.assertEqual(received['payload']['model'], 'auth.user')
+ self.assertEqual(received['payload']['pk'], user.pk)
+
+ self.assertEqual(received['payload']['data']['email'], 'test@test.com')
+ self.assertEqual(received['payload']['data']['username'], 'test')
+ self.assertEqual(received['payload']['data']['password'], '')
+
+ received = client.receive()
+ self.assertIsNone(received)
+
+ def test_omit_fields_and_exclude(self):
+ def _declare_class():
+ class TestBinding(WebsocketBinding):
+ model = User
+ stream = 'test'
+
+ @classmethod
+ def group_names(cls, instance):
+ return ["users_omit"]
+
+ def has_permission(self, user, action, pk):
+ return True
+ self.assertRaises(ValueError, _declare_class)
+
+ def test_trigger_outbound_update(self):
+ class TestBinding(WebsocketBinding):
+ model = User
+ stream = 'test'
+ fields = ['__all__']
+
+ @classmethod
+ def group_names(cls, instance):
+ return ["users2"]
+
+ def has_permission(self, user, action, pk):
+ return True
+
+ # Make model and clear out pending sends
+ user = User.objects.create(username='test', email='test@test.com')
+
+ client = WSClient()
+ client.join_group('users2')
+
+ user.username = 'test_new'
+ user.save()
+
+ received = client.receive()
+ self.assertTrue('payload' in received)
+ self.assertTrue('action' in received['payload'])
+ self.assertTrue('data' in received['payload'])
+ self.assertTrue('username' in received['payload']['data'])
+ self.assertTrue('email' in received['payload']['data'])
+ self.assertTrue('password' in received['payload']['data'])
+ self.assertTrue('last_name' in received['payload']['data'])
+ self.assertTrue('model' in received['payload'])
+ self.assertTrue('pk' in received['payload'])
+
+ self.assertEqual(received['payload']['action'], 'update')
+ self.assertEqual(received['payload']['model'], 'auth.user')
+ self.assertEqual(received['payload']['pk'], user.pk)
+
+ self.assertEqual(received['payload']['data']['email'], 'test@test.com')
+ self.assertEqual(received['payload']['data']['username'], 'test_new')
+ self.assertEqual(received['payload']['data']['password'], '')
+ self.assertEqual(received['payload']['data']['last_name'], '')
+
+ received = client.receive()
+ self.assertIsNone(received)
+
+ def test_trigger_outbound_delete(self):
+ class TestBinding(WebsocketBinding):
+ model = User
+ stream = 'test'
+ fields = ['username']
+
+ @classmethod
+ def group_names(cls, instance):
+ return ["users3"]
+
+ def has_permission(self, user, action, pk):
+ return True
+
+ # Make model and clear out pending sends
+ user = User.objects.create(username='test', email='test@test.com')
+
+ client = WSClient()
+ client.join_group('users3')
+
+ user.delete()
+
+ received = client.receive()
+ self.assertTrue('payload' in received)
+ self.assertTrue('action' in received['payload'])
+ self.assertTrue('data' in received['payload'])
+ self.assertTrue('username' in received['payload']['data'])
+ self.assertTrue('model' in received['payload'])
+ self.assertTrue('pk' in received['payload'])
+
+ self.assertEqual(received['payload']['action'], 'delete')
+ self.assertEqual(received['payload']['model'], 'auth.user')
+ self.assertEqual(received['payload']['pk'], 1)
+ self.assertEqual(received['payload']['data']['username'], 'test')
+
+ received = client.receive()
+ self.assertIsNone(received)
+
+ def test_inbound_create(self):
+ self.assertEqual(User.objects.all().count(), 0)
+
+ class UserBinding(WebsocketBinding):
+ model = User
+ stream = 'users'
+ fields = ['username', 'email', 'password', 'last_name']
+
+ @classmethod
+ def group_names(cls, instance):
+ return ['users_outbound']
+
+ def has_permission(self, user, action, pk):
+ return True
+
+ class Demultiplexer(WebsocketDemultiplexer):
+ consumers = {
+ 'users': UserBinding.consumer,
+ }
+
+ groups = ['inbound']
+
+ with apply_routes([Demultiplexer.as_route(path='/')]):
+ client = WSClient()
+ client.send_and_consume('websocket.connect', path='/')
+ client.send_and_consume('websocket.receive', path='/', text={
+ 'stream': 'users',
+ 'payload': {
+ 'action': CREATE,
+ 'data': {'username': 'test_inbound', 'email': 'test@user_steam.com'},
+ },
+ })
+
+ self.assertEqual(User.objects.all().count(), 1)
+ user = User.objects.all().first()
+ self.assertEqual(user.username, 'test_inbound')
+ self.assertEqual(user.email, 'test@user_steam.com')
+
+ self.assertIsNone(client.receive())
+
+ def test_inbound_update(self):
+ user = User.objects.create(username='test', email='test@channels.com')
+
+ class UserBinding(WebsocketBinding):
+ model = User
+ stream = 'users'
+ fields = ['username', ]
+
+ @classmethod
+ def group_names(cls, instance):
+ return ['users_outbound']
+
+ def has_permission(self, user, action, pk):
+ return True
+
+ class Demultiplexer(WebsocketDemultiplexer):
+ consumers = {
+ 'users': UserBinding.consumer,
+ }
+
+ groups = ['inbound']
+
+ with apply_routes([Demultiplexer.as_route(path='/')]):
+ client = WSClient()
+ client.send_and_consume('websocket.connect', path='/')
+ client.send_and_consume('websocket.receive', path='/', text={
+ 'stream': 'users',
+ 'payload': {'action': UPDATE, 'pk': user.pk, 'data': {'username': 'test_inbound'}}
+ })
+
+ user = User.objects.get(pk=user.pk)
+ self.assertEqual(user.username, 'test_inbound')
+ self.assertEqual(user.email, 'test@channels.com')
+
+ # trying change field that not in binding fields
+ client.send_and_consume('websocket.receive', path='/', text={
+ 'stream': 'users',
+ 'payload': {'action': UPDATE, 'pk': user.pk, 'data': {'email': 'new@test.com'}}
+ })
+
+ user = User.objects.get(pk=user.pk)
+ self.assertEqual(user.username, 'test_inbound')
+ self.assertEqual(user.email, 'test@channels.com')
+
+ self.assertIsNone(client.receive())
+
+ def test_inbound_delete(self):
+ user = User.objects.create(username='test', email='test@channels.com')
+
+ class UserBinding(WebsocketBinding):
+ model = User
+ stream = 'users'
+ fields = ['username', ]
+
+ @classmethod
+ def group_names(cls, instance):
+ return ['users_outbound']
+
+ def has_permission(self, user, action, pk):
+ return True
+
+ class Demultiplexer(WebsocketDemultiplexer):
+ consumers = {
+ 'users': UserBinding.consumer,
+ }
+
+ groups = ['inbound']
+
+ with apply_routes([Demultiplexer.as_route(path='/')]):
+ client = WSClient()
+ client.send_and_consume('websocket.connect', path='/')
+ client.send_and_consume('websocket.receive', path='/', text={
+ 'stream': 'users',
+ 'payload': {'action': DELETE, 'pk': user.pk}
+ })
+
+ self.assertIsNone(User.objects.filter(pk=user.pk).first())
+ self.assertIsNone(client.receive())
+
+ def test_route_params_saved_in_kwargs(self):
+
+ class UserBinding(WebsocketBinding):
+ model = User
+ stream = 'users'
+ fields = ['username', 'email', 'password', 'last_name']
+
+ @classmethod
+ def group_names(cls, instance):
+ return ['users_outbound']
+
+ def has_permission(self, user, action, pk):
+ return True
+
+ class Demultiplexer(WebsocketDemultiplexer):
+ consumers = {
+ 'users': UserBinding.consumer,
+ }
+
+ groups = ['inbound']
+
+ with apply_routes([Demultiplexer.as_route(path='/path/(?P\d+)')]):
+ client = WSClient()
+ consumer = client.send_and_consume('websocket.connect', path='/path/789')
+ self.assertEqual(consumer.kwargs['id'], '789')
diff --git a/tests/test_delay.py b/tests/test_delay.py
new file mode 100644
index 0000000..a6de9dd
--- /dev/null
+++ b/tests/test_delay.py
@@ -0,0 +1,137 @@
+from __future__ import unicode_literals
+
+import json
+from datetime import timedelta
+
+from django.utils import timezone
+
+from channels import DEFAULT_CHANNEL_LAYER, Channel, channel_layers
+from channels.delay.models import DelayedMessage
+from channels.delay.worker import Worker
+from channels.test import ChannelTestCase
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+class PatchedWorker(Worker):
+ """Worker with specific numbers of loops"""
+ def get_termed(self):
+ if not self.__iters:
+ return True
+ self.__iters -= 1
+ return False
+
+ def set_termed(self, value):
+ self.__iters = value
+
+ termed = property(get_termed, set_termed)
+
+
+class WorkerTests(ChannelTestCase):
+
+ def test_invalid_message(self):
+ """
+ Tests the worker won't delay an invalid message
+ """
+ Channel('asgi.delay').send({'test': 'value'}, immediately=True)
+
+ worker = PatchedWorker(channel_layers[DEFAULT_CHANNEL_LAYER])
+ worker.termed = 1
+
+ worker.run()
+
+ self.assertEqual(DelayedMessage.objects.count(), 0)
+
+ def test_delay_message(self):
+ """
+ Tests the message is delayed and dispatched when due
+ """
+ Channel('asgi.delay').send({
+ 'channel': 'test',
+ 'delay': 1000,
+ 'content': {'test': 'value'}
+ }, immediately=True)
+
+ worker = PatchedWorker(channel_layers[DEFAULT_CHANNEL_LAYER])
+ worker.termed = 1
+
+ worker.run()
+
+ self.assertEqual(DelayedMessage.objects.count(), 1)
+
+ with mock.patch('django.utils.timezone.now', return_value=timezone.now() + timedelta(milliseconds=1001)):
+ worker.termed = 1
+ worker.run()
+
+ self.assertEqual(DelayedMessage.objects.count(), 0)
+
+ message = self.get_next_message('test', require=True)
+ self.assertEqual(message.content, {'test': 'value'})
+
+ def test_channel_full(self):
+ """
+ Tests that when channel capacity is hit when processing due messages,
+ message is requeued instead of dropped
+ """
+ for i in range(10):
+ Channel('asgi.delay').send({
+ 'channel': 'test',
+ 'delay': 1000,
+ 'content': {'test': 'value'}
+ }, immediately=True)
+
+ worker = PatchedWorker(channel_layers[DEFAULT_CHANNEL_LAYER])
+ worker.termed = 10
+ worker.run()
+
+ for i in range(1):
+ Channel('asgi.delay').send({
+ 'channel': 'test',
+ 'delay': 1000,
+ 'content': {'test': 'value'}
+ }, immediately=True)
+
+ worker = PatchedWorker(channel_layers[DEFAULT_CHANNEL_LAYER])
+ worker.termed = 1
+ worker.run()
+
+ self.assertEqual(DelayedMessage.objects.count(), 11)
+
+ with mock.patch('django.utils.timezone.now', return_value=timezone.now() + timedelta(milliseconds=2000)):
+ worker.termed = 1
+ worker.run()
+
+ self.assertEqual(DelayedMessage.objects.count(), 1)
+
+
+class DelayedMessageTests(ChannelTestCase):
+
+ def _create_message(self):
+ kwargs = {
+ 'content': json.dumps({'test': 'data'}),
+ 'channel_name': 'test',
+ 'delay': 1000 * 5
+ }
+ delayed_message = DelayedMessage(**kwargs)
+ delayed_message.save()
+
+ return delayed_message
+
+ def test_is_due(self):
+ message = self._create_message()
+
+ self.assertEqual(DelayedMessage.objects.is_due().count(), 0)
+
+ with mock.patch('django.utils.timezone.now', return_value=message.due_date + timedelta(milliseconds=1)):
+ self.assertEqual(DelayedMessage.objects.is_due().count(), 1)
+
+ def test_send(self):
+ message = self._create_message()
+ message.send(channel_layer=channel_layers[DEFAULT_CHANNEL_LAYER])
+
+ self.get_next_message(message.channel_name, require=True)
+
+ self.assertEqual(DelayedMessage.objects.count(), 0)
diff --git a/tests/test_generic.py b/tests/test_generic.py
new file mode 100644
index 0000000..dc4841c
--- /dev/null
+++ b/tests/test_generic.py
@@ -0,0 +1,293 @@
+from __future__ import unicode_literals
+
+import json
+
+from django.test import override_settings
+from django.contrib.auth import get_user_model
+
+from channels import route_class
+from channels.exceptions import SendNotAvailableOnDemultiplexer
+from channels.generic import BaseConsumer, websockets
+from channels.test import ChannelTestCase, Client, WSClient, apply_routes
+
+
+@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.cache")
+class GenericTests(ChannelTestCase):
+
+ def test_base_consumer(self):
+
+ class Consumers(BaseConsumer):
+
+ method_mapping = {
+ 'test.create': 'create',
+ 'test.test': 'test',
+ }
+
+ def create(self, message, **kwargs):
+ self.called = 'create'
+
+ def test(self, message, **kwargs):
+ self.called = 'test'
+
+ with apply_routes([route_class(Consumers)]):
+ client = Client()
+
+ # check that methods for certain channels routes successfully
+ self.assertEqual(client.send_and_consume('test.create').called, 'create')
+ self.assertEqual(client.send_and_consume('test.test').called, 'test')
+
+ # send to the channels without routes
+ client.send('test.wrong')
+ message = self.get_next_message('test.wrong')
+ self.assertEqual(client.channel_layer.router.match(message), None)
+
+ client.send('test')
+ message = self.get_next_message('test')
+ self.assertEqual(client.channel_layer.router.match(message), None)
+
+ def test_websockets_consumers_handlers(self):
+
+ class WebsocketConsumer(websockets.WebsocketConsumer):
+
+ def connect(self, message, **kwargs):
+ self.called = 'connect'
+ self.id = kwargs['id']
+
+ def disconnect(self, message, **kwargs):
+ self.called = 'disconnect'
+
+ def receive(self, text=None, bytes=None, **kwargs):
+ self.text = text
+
+ with apply_routes([route_class(WebsocketConsumer, path='/path/(?P\d+)')]):
+ client = Client()
+
+ consumer = client.send_and_consume('websocket.connect', {'path': '/path/1'})
+ self.assertEqual(consumer.called, 'connect')
+ self.assertEqual(consumer.id, '1')
+
+ consumer = client.send_and_consume('websocket.receive', {'path': '/path/1', 'text': 'text'})
+ self.assertEqual(consumer.text, 'text')
+
+ consumer = client.send_and_consume('websocket.disconnect', {'path': '/path/1'})
+ self.assertEqual(consumer.called, 'disconnect')
+
+ def test_websockets_decorators(self):
+ class WebsocketConsumer(websockets.WebsocketConsumer):
+ strict_ordering = True
+
+ def connect(self, message, **kwargs):
+ self.order = message['order']
+
+ with apply_routes([route_class(WebsocketConsumer, path='/path')]):
+ client = Client()
+
+ client.send('websocket.connect', {'path': '/path', 'order': 1})
+ client.send('websocket.connect', {'path': '/path', 'order': 0})
+ client.consume('websocket.connect')
+ self.assertEqual(client.consume('websocket.connect').order, 0)
+ self.assertEqual(client.consume('websocket.connect').order, 1)
+
+ def test_websockets_http_session_and_channel_session(self):
+
+ class WebsocketConsumer(websockets.WebsocketConsumer):
+ http_user_and_session = True
+
+ user_model = get_user_model()
+ user = user_model.objects.create_user(username='test', email='test@test.com', password='123456')
+
+ client = WSClient()
+ client.force_login(user)
+ with apply_routes([route_class(WebsocketConsumer, path='/path')]):
+ connect = client.send_and_consume('websocket.connect', {'path': '/path'})
+ receive = client.send_and_consume('websocket.receive', {'path': '/path'}, text={'key': 'value'})
+ disconnect = client.send_and_consume('websocket.disconnect', {'path': '/path'})
+ self.assertEqual(
+ connect.message.http_session.session_key,
+ receive.message.http_session.session_key
+ )
+ self.assertEqual(
+ connect.message.http_session.session_key,
+ disconnect.message.http_session.session_key
+ )
+
+ def test_simple_as_route_method(self):
+
+ class WebsocketConsumer(websockets.WebsocketConsumer):
+
+ def connect(self, message, **kwargs):
+ self.message.reply_channel.send({'accept': True})
+ self.send(text=message.get('order'))
+
+ routes = [
+ WebsocketConsumer.as_route(attrs={"strict_ordering": True}, path='^/path$'),
+ WebsocketConsumer.as_route(path='^/path/2$'),
+ ]
+
+ self.assertIsNot(routes[0].consumer, WebsocketConsumer)
+ self.assertIs(routes[1].consumer, WebsocketConsumer)
+
+ with apply_routes(routes):
+ client = WSClient()
+
+ client.send('websocket.connect', {'path': '/path', 'order': 1})
+ client.send('websocket.connect', {'path': '/path', 'order': 0})
+ client.consume('websocket.connect', check_accept=False)
+ client.consume('websocket.connect')
+ self.assertEqual(client.receive(json=False), 0)
+ client.consume('websocket.connect')
+ self.assertEqual(client.receive(json=False), 1)
+
+ client.send_and_consume('websocket.connect', {'path': '/path/2', 'order': 'next'})
+ self.assertEqual(client.receive(json=False), 'next')
+
+ def test_as_route_method(self):
+ class WebsocketConsumer(BaseConsumer):
+ trigger = 'new'
+
+ def test(self, message, **kwargs):
+ self.message.reply_channel.send({'trigger': self.trigger})
+
+ method_mapping = {'mychannel': 'test'}
+
+ with apply_routes([
+ WebsocketConsumer.as_route(
+ {'method_mapping': method_mapping, 'trigger': 'from_as_route'},
+ name='filter',
+ ),
+ ]):
+ client = Client()
+
+ client.send_and_consume('mychannel', {'name': 'filter'})
+ self.assertEqual(client.receive(), {'trigger': 'from_as_route'})
+
+ def test_websockets_demultiplexer(self):
+
+ class MyWebsocketConsumer(websockets.JsonWebsocketConsumer):
+ def connect(self, message, multiplexer=None, **kwargs):
+ multiplexer.send(kwargs)
+
+ def disconnect(self, message, multiplexer=None, **kwargs):
+ multiplexer.send(kwargs)
+
+ def receive(self, content, multiplexer=None, **kwargs):
+ multiplexer.send(content)
+
+ class Demultiplexer(websockets.WebsocketDemultiplexer):
+
+ consumers = {
+ "mystream": MyWebsocketConsumer
+ }
+
+ with apply_routes([route_class(Demultiplexer, path='/path/(?P\d+)')]):
+ client = WSClient()
+
+ client.send_and_consume('websocket.connect', path='/path/1')
+ self.assertEqual(client.receive(), {
+ "stream": "mystream",
+ "payload": {"id": "1"},
+ })
+
+ client.send_and_consume('websocket.receive', text={
+ "stream": "mystream",
+ "payload": {"text_field": "mytext"},
+ }, path='/path/1')
+ self.assertEqual(client.receive(), {
+ "stream": "mystream",
+ "payload": {"text_field": "mytext"},
+ })
+
+ client.send_and_consume('websocket.disconnect', path='/path/1')
+ self.assertEqual(client.receive(), {
+ "stream": "mystream",
+ "payload": {"id": "1"},
+ })
+
+ def test_websocket_demultiplexer_send(self):
+
+ class MyWebsocketConsumer(websockets.JsonWebsocketConsumer):
+ def receive(self, content, multiplexer=None, **kwargs):
+ self.send(content)
+
+ class Demultiplexer(websockets.WebsocketDemultiplexer):
+
+ consumers = {
+ "mystream": MyWebsocketConsumer
+ }
+
+ with apply_routes([route_class(Demultiplexer, path='/path/(?P\d+)')]):
+ client = WSClient()
+
+ with self.assertRaises(SendNotAvailableOnDemultiplexer):
+ client.send_and_consume('websocket.receive', path='/path/1', text={
+ "stream": "mystream",
+ "payload": {"text_field": "mytext"},
+ })
+
+ client.receive()
+
+ def test_websocket_custom_json_serialization(self):
+
+ class WebsocketConsumer(websockets.JsonWebsocketConsumer):
+ @classmethod
+ def decode_json(cls, text):
+ obj = json.loads(text)
+ return dict((key.upper(), obj[key]) for key in obj)
+
+ @classmethod
+ def encode_json(cls, content):
+ lowered = dict((key.lower(), content[key]) for key in content)
+ return json.dumps(lowered)
+
+ def receive(self, content, multiplexer=None, **kwargs):
+ self.content_received = content
+ self.send({"RESPONSE": "HI"})
+
+ class MyMultiplexer(websockets.WebsocketMultiplexer):
+ @classmethod
+ def encode_json(cls, content):
+ lowered = dict((key.lower(), content[key]) for key in content)
+ return json.dumps(lowered)
+
+ with apply_routes([route_class(WebsocketConsumer, path='/path')]):
+ client = WSClient()
+
+ consumer = client.send_and_consume('websocket.receive', path='/path', text={"key": "value"})
+ self.assertEqual(consumer.content_received, {"KEY": "value"})
+
+ self.assertEqual(client.receive(), {"response": "HI"})
+
+ client.join_group('test_group')
+ WebsocketConsumer.group_send('test_group', {"KEY": "VALUE"})
+ self.assertEqual(client.receive(), {"key": "VALUE"})
+
+ def test_websockets_demultiplexer_custom_multiplexer(self):
+
+ class MyWebsocketConsumer(websockets.JsonWebsocketConsumer):
+ def connect(self, message, multiplexer=None, **kwargs):
+ multiplexer.send({"THIS_SHOULD_BE_LOWERCASED": "1"})
+
+ class MyMultiplexer(websockets.WebsocketMultiplexer):
+ @classmethod
+ def encode_json(cls, content):
+ lowered = {
+ "stream": content["stream"],
+ "payload": dict((key.lower(), content["payload"][key]) for key in content["payload"])
+ }
+ return json.dumps(lowered)
+
+ class Demultiplexer(websockets.WebsocketDemultiplexer):
+ multiplexer_class = MyMultiplexer
+
+ consumers = {
+ "mystream": MyWebsocketConsumer
+ }
+
+ with apply_routes([route_class(Demultiplexer, path='/path/(?P\d+)')]):
+ client = WSClient()
+
+ client.send_and_consume('websocket.connect', path='/path/1')
+ self.assertEqual(client.receive(), {
+ "stream": "mystream",
+ "payload": {"this_should_be_lowercased": "1"},
+ })
diff --git a/tests/test_handler.py b/tests/test_handler.py
new file mode 100644
index 0000000..ffecf8b
--- /dev/null
+++ b/tests/test_handler.py
@@ -0,0 +1,355 @@
+from __future__ import unicode_literals
+
+import os
+from datetime import datetime
+from itertools import islice
+
+from django.http import FileResponse, HttpResponse, HttpResponseRedirect, JsonResponse, StreamingHttpResponse
+from six import BytesIO
+
+from channels import Channel
+from channels.handler import AsgiHandler
+from channels.test import ChannelTestCase
+
+
+class FakeAsgiHandler(AsgiHandler):
+ """
+ Handler subclass that just returns a premade response rather than
+ go into the view subsystem.
+ """
+
+ chunk_size = 30
+
+ def __init__(self, response):
+ assert isinstance(response, (HttpResponse, StreamingHttpResponse))
+ self._response = response
+ super(FakeAsgiHandler, self).__init__()
+
+ def get_response(self, request):
+ return self._response
+
+
+class HandlerTests(ChannelTestCase):
+ """
+ Tests that the handler works correctly and round-trips things into a
+ correct response.
+ """
+
+ def test_basic(self):
+ """
+ Tests a simple request
+ """
+ # Make stub request and desired response
+ Channel("test").send({
+ "reply_channel": "test",
+ "http_version": "1.1",
+ "method": "GET",
+ "path": b"/test/",
+ })
+ response = HttpResponse(b"Hi there!", content_type="text/plain")
+ # Run the handler
+ handler = FakeAsgiHandler(response)
+ reply_messages = list(
+ handler(self.get_next_message("test", require=True)))
+ # Make sure we got the right number of messages
+ self.assertEqual(len(reply_messages), 1)
+ reply_message = reply_messages[0]
+ # Make sure the message looks correct
+ self.assertEqual(reply_message["content"], b"Hi there!")
+ self.assertEqual(reply_message["status"], 200)
+ self.assertEqual(reply_message.get("more_content", False), False)
+ self.assertEqual(
+ reply_message["headers"],
+ [
+ (b"Content-Type", b"text/plain"),
+ ],
+ )
+
+ def test_cookies(self):
+ Channel("test").send({
+ "reply_channel": "test",
+ "http_version": "1.1",
+ "method": "GET",
+ "path": b"/test/",
+ })
+ response = HttpResponse(b"Hi there!", content_type="text/plain")
+ response.set_signed_cookie('foo', '1', expires=datetime.now())
+ # Run the handler
+ handler = FakeAsgiHandler(response)
+ reply_messages = list(
+ handler(self.get_next_message("test", require=True)))
+ # Make sure we got the right number of messages
+ self.assertEqual(len(reply_messages), 1)
+ reply_message = reply_messages[0]
+ # Make sure the message looks correct
+ self.assertEqual(reply_message["content"], b"Hi there!")
+ self.assertEqual(reply_message["status"], 200)
+ self.assertEqual(reply_message.get("more_content", False), False)
+ self.assertEqual(reply_message["headers"][0], (b'Content-Type', b'text/plain'))
+ self.assertIn('foo=', reply_message["headers"][1][1].decode())
+
+ def test_headers(self):
+ Channel("test").send({
+ "reply_channel": "test",
+ "http_version": "1.1",
+ "method": "GET",
+ "path": b"/test/",
+ })
+ response = HttpResponse(b"Hi there!", content_type="text/plain")
+ response['foo'] = 1
+ response['bar'] = 1
+ del response['bar']
+ del response['nonexistant_key']
+ handler = FakeAsgiHandler(response)
+ reply_messages = list(
+ handler(self.get_next_message("test", require=True)))
+ # Make sure we got the right number of messages
+ self.assertEqual(len(reply_messages), 1)
+ reply_message = reply_messages[0]
+ # Make sure the message looks correct
+ self.assertEqual(reply_message["content"], b"Hi there!")
+ header_dict = dict(reply_messages[0]['headers'])
+ self.assertEqual(header_dict[b'foo'].decode(), '1')
+ self.assertNotIn('bar', header_dict)
+
+ def test_large(self):
+ """
+ Tests a large response (will need chunking)
+ """
+ # Make stub request and desired response
+ Channel("test").send({
+ "reply_channel": "test",
+ "http_version": "1.1",
+ "method": "GET",
+ "path": b"/test/",
+ })
+ response = HttpResponse(
+ b"Thefirstthirtybytesisrighthereandhereistherest")
+ # Run the handler
+ handler = FakeAsgiHandler(response)
+ reply_messages = list(
+ handler(self.get_next_message("test", require=True)))
+ # Make sure we got the right number of messages
+ self.assertEqual(len(reply_messages), 2)
+ # Make sure the messages look correct
+ self.assertEqual(reply_messages[0][
+ "content"], b"Thefirstthirtybytesisrighthere")
+ self.assertEqual(reply_messages[0]["status"], 200)
+ self.assertEqual(reply_messages[0]["more_content"], True)
+ self.assertEqual(reply_messages[1]["content"], b"andhereistherest")
+ self.assertEqual(reply_messages[1].get("more_content", False), False)
+
+ def test_empty(self):
+ """
+ Tests an empty response
+ """
+ # Make stub request and desired response
+ Channel("test").send({
+ "reply_channel": "test",
+ "http_version": "1.1",
+ "method": "GET",
+ "path": b"/test/",
+ })
+ response = HttpResponse(b"", status=304)
+ # Run the handler
+ handler = FakeAsgiHandler(response)
+ reply_messages = list(
+ handler(self.get_next_message("test", require=True))
+ )
+ # Make sure we got the right number of messages
+ self.assertEqual(len(reply_messages), 1)
+ # Make sure the messages look correct
+ self.assertEqual(reply_messages[0].get("content", b""), b"")
+ self.assertEqual(reply_messages[0]["status"], 304)
+ self.assertEqual(reply_messages[0]["more_content"], False)
+
+ def test_empty_streaming(self):
+ """
+ Tests an empty streaming response
+ """
+ # Make stub request and desired response
+ Channel("test").send({
+ "reply_channel": "test",
+ "http_version": "1.1",
+ "method": "GET",
+ "path": b"/test/",
+ })
+ response = StreamingHttpResponse([], status=304)
+ # Run the handler
+ handler = FakeAsgiHandler(response)
+ reply_messages = list(
+ handler(self.get_next_message("test", require=True))
+ )
+ # Make sure we got the right number of messages
+ self.assertEqual(len(reply_messages), 1)
+ # Make sure the messages look correct
+ self.assertEqual(reply_messages[0].get("content", b""), b"")
+ self.assertEqual(reply_messages[0]["status"], 304)
+ self.assertEqual(reply_messages[0]["more_content"], False)
+
+ def test_chunk_bytes(self):
+ """
+ Makes sure chunk_bytes works correctly
+ """
+ # Empty string should still return one chunk
+ result = list(FakeAsgiHandler.chunk_bytes(b""))
+ self.assertEqual(len(result), 1)
+ self.assertEqual(result[0][0], b"")
+ self.assertEqual(result[0][1], True)
+ # Below chunk size
+ result = list(FakeAsgiHandler.chunk_bytes(
+ b"12345678901234567890123456789"))
+ self.assertEqual(len(result), 1)
+ self.assertEqual(result[0][0], b"12345678901234567890123456789")
+ self.assertEqual(result[0][1], True)
+ # Exactly chunk size
+ result = list(FakeAsgiHandler.chunk_bytes(
+ b"123456789012345678901234567890"))
+ self.assertEqual(len(result), 1)
+ self.assertEqual(result[0][0], b"123456789012345678901234567890")
+ self.assertEqual(result[0][1], True)
+ # Just above chunk size
+ result = list(FakeAsgiHandler.chunk_bytes(
+ b"123456789012345678901234567890a"))
+ self.assertEqual(len(result), 2)
+ self.assertEqual(result[0][0], b"123456789012345678901234567890")
+ self.assertEqual(result[0][1], False)
+ self.assertEqual(result[1][0], b"a")
+ self.assertEqual(result[1][1], True)
+
+ def test_iterator(self):
+ Channel("test").send({
+ "reply_channel": "test",
+ "http_version": "1.1",
+ "method": "GET",
+ "path": b"/test/",
+ })
+ response = HttpResponse(range(10))
+ handler = FakeAsgiHandler(response)
+ reply_messages = list(
+ handler(self.get_next_message("test", require=True)))
+ self.assertEqual(len(reply_messages), 1)
+ self.assertEqual(reply_messages[0]["content"], b"0123456789")
+
+ def test_streaming_data(self):
+ Channel("test").send({
+ "reply_channel": "test",
+ "http_version": "1.1",
+ "method": "GET",
+ "path": b"/test/",
+ })
+ response = StreamingHttpResponse('Line: %s' % i for i in range(10))
+ handler = FakeAsgiHandler(response)
+ reply_messages = list(
+ handler(self.get_next_message("test", require=True)))
+ self.assertEqual(len(reply_messages), 11)
+ self.assertEqual(reply_messages[0]["content"], b"Line: 0")
+ self.assertEqual(reply_messages[9]["content"], b"Line: 9")
+
+ def test_real_file_response(self):
+ Channel("test").send({
+ "reply_channel": "test",
+ "http_version": "1.1",
+ "method": "GET",
+ "path": b"/test/",
+ })
+ current_dir = os.path.realpath(os.path.join(
+ os.getcwd(), os.path.dirname(__file__)))
+ response = FileResponse(
+ open(os.path.join(current_dir, 'a_file'), 'rb'))
+ handler = FakeAsgiHandler(response)
+ reply_messages = list(
+ handler(self.get_next_message("test", require=True)))
+ self.assertEqual(len(reply_messages), 2)
+ self.assertEqual(response.getvalue(), b'')
+
+ def test_bytes_file_response(self):
+ Channel("test").send({
+ "reply_channel": "test",
+ "http_version": "1.1",
+ "method": "GET",
+ "path": b"/test/",
+ })
+ response = FileResponse(BytesIO(b'sadfdasfsdfsadf'))
+ handler = FakeAsgiHandler(response)
+ reply_messages = list(
+ handler(self.get_next_message("test", require=True)))
+ self.assertEqual(len(reply_messages), 2)
+
+ def test_string_file_response(self):
+ Channel("test").send({
+ "reply_channel": "test",
+ "http_version": "1.1",
+ "method": "GET",
+ "path": b"/test/",
+ })
+ response = FileResponse('abcd')
+ handler = FakeAsgiHandler(response)
+ reply_messages = list(
+ handler(self.get_next_message("test", require=True)))
+ self.assertEqual(len(reply_messages), 5)
+
+ def test_non_streaming_file_response(self):
+ Channel("test").send({
+ "reply_channel": "test",
+ "http_version": "1.1",
+ "method": "GET",
+ "path": b"/test/",
+ })
+ response = FileResponse(BytesIO(b'sadfdasfsdfsadf'))
+ # This is to test the exception handling. This would only happening if
+ # the StreamingHttpResponse was incorrectly subclassed.
+ response.streaming = False
+
+ handler = FakeAsgiHandler(response)
+ with self.assertRaises(AttributeError):
+ list(handler(self.get_next_message("test", require=True)))
+
+ def test_unclosable_filelike_object(self):
+ Channel("test").send({
+ "reply_channel": "test",
+ "http_version": "1.1",
+ "method": "GET",
+ "path": b"/test/",
+ })
+
+ # This is a readable object that cannot be closed.
+ class Unclosable:
+
+ def read(self, n=-1):
+ # Nothing to see here
+ return b""
+
+ response = FileResponse(Unclosable())
+ handler = FakeAsgiHandler(response)
+ reply_messages = list(islice(handler(self.get_next_message("test", require=True)), 5))
+ self.assertEqual(len(reply_messages), 1)
+ response.close()
+
+ def test_json_response(self):
+ Channel("test").send({
+ "reply_channel": "test",
+ "http_version": "1.1",
+ "method": "GET",
+ "path": b"/test/",
+ })
+ response = JsonResponse({'foo': (1, 2)})
+ handler = FakeAsgiHandler(response)
+ reply_messages = list(handler(self.get_next_message("test", require=True)))
+ self.assertEqual(len(reply_messages), 1)
+ self.assertEqual(reply_messages[0]['content'], b'{"foo": [1, 2]}')
+
+ def test_redirect(self):
+ for redirect_to in ['/', '..', 'https://example.com']:
+ Channel("test").send({
+ "reply_channel": "test",
+ "http_version": "1.1",
+ "method": "GET",
+ "path": b"/test/",
+ })
+ response = HttpResponseRedirect(redirect_to)
+ handler = FakeAsgiHandler(response)
+ reply_messages = list(handler(self.get_next_message("test", require=True)))
+ self.assertEqual(reply_messages[0]['status'], 302)
+ header_dict = dict(reply_messages[0]['headers'])
+ self.assertEqual(header_dict[b'Location'].decode(), redirect_to)
diff --git a/tests/test_management.py b/tests/test_management.py
new file mode 100644
index 0000000..61900d7
--- /dev/null
+++ b/tests/test_management.py
@@ -0,0 +1,232 @@
+from __future__ import unicode_literals
+
+import logging
+
+from asgiref.inmemory import ChannelLayer
+from django.core.management import CommandError, call_command
+from django.test import TestCase, mock
+from six import StringIO
+
+from channels.asgi import channel_layers, ChannelLayerWrapper
+from channels.binding.base import BindingMetaclass
+from channels.handler import ViewConsumer
+from channels.management.commands import runserver
+from channels.staticfiles import StaticFilesConsumer
+
+
+class FakeChannelLayer(ChannelLayer):
+ '''
+ Dummy class to bypass the 'inmemory' string check.
+ '''
+ pass
+
+
+@mock.patch('channels.management.commands.runworker.Worker')
+class RunWorkerTests(TestCase):
+
+ def setUp(self):
+ import channels.log
+ self.stream = StringIO()
+ channels.log.handler = logging.StreamHandler(self.stream)
+ BindingMetaclass.binding_classes = []
+ self._old_layer = channel_layers.set(
+ 'fake_channel',
+ ChannelLayerWrapper(
+ FakeChannelLayer(),
+ 'fake_channel',
+ channel_layers['fake_channel'].routing[:],
+ )
+ )
+
+ def tearDown(self):
+ channel_layers.set('fake_channel', self._old_layer)
+
+ def test_runworker_no_local_only(self, mock_worker):
+ """
+ Runworker should fail with the default "inmemory" worker.
+ """
+ with self.assertRaises(CommandError):
+ call_command('runworker')
+
+ def test_debug(self, mock_worker):
+ """
+ Test that the StaticFilesConsumer is used in debug mode.
+ """
+ with self.settings(
+ DEBUG=True,
+ STATIC_URL='/static/',
+ INSTALLED_APPS=['channels', 'django.contrib.staticfiles'],
+ ):
+ # Use 'fake_channel' that bypasses the 'inmemory' check
+ call_command('runworker', '--layer', 'fake_channel')
+ mock_worker.assert_called_with(
+ only_channels=None,
+ exclude_channels=None,
+ callback=None,
+ channel_layer=mock.ANY,
+ )
+
+ channel_layer = mock_worker.call_args[1]['channel_layer']
+ static_consumer = channel_layer.router.root.routing[0].consumer
+ self.assertIsInstance(static_consumer, StaticFilesConsumer)
+
+ def test_debug_without_staticfiles(self, mock_worker):
+ """
+ Test that the StaticFilesConsumer is not used in debug mode when staticfiles app is not configured.
+ """
+ with self.settings(DEBUG=True, STATIC_URL=None, INSTALLED_APPS=['channels']):
+ # Use 'fake_channel' that bypasses the 'inmemory' check
+ call_command('runworker', '--layer', 'fake_channel')
+ mock_worker.assert_called_with(
+ only_channels=None,
+ exclude_channels=None,
+ callback=None,
+ channel_layer=mock.ANY,
+ )
+
+ channel_layer = mock_worker.call_args[1]['channel_layer']
+ static_consumer = channel_layer.router.root.routing[0].consumer
+ self.assertNotIsInstance(static_consumer, StaticFilesConsumer)
+ self.assertIsInstance(static_consumer, ViewConsumer)
+
+ def test_runworker(self, mock_worker):
+ # Use 'fake_channel' that bypasses the 'inmemory' check
+ call_command('runworker', '--layer', 'fake_channel')
+ mock_worker.assert_called_with(
+ callback=None,
+ only_channels=None,
+ channel_layer=mock.ANY,
+ exclude_channels=None,
+ )
+
+ def test_runworker_verbose(self, mocked_worker):
+ # Use 'fake_channel' that bypasses the 'inmemory' check
+ call_command('runworker', '--layer', 'fake_channel', '--verbosity', '2')
+
+ # Verify the callback is set
+ mocked_worker.assert_called_with(
+ callback=mock.ANY,
+ only_channels=None,
+ channel_layer=mock.ANY,
+ exclude_channels=None,
+ )
+
+
+class RunServerTests(TestCase):
+
+ def setUp(self):
+ import channels.log
+ self.stream = StringIO()
+ # Capture the logging of the channels moduel to match against the
+ # output.
+ channels.log.handler = logging.StreamHandler(self.stream)
+
+ @mock.patch('channels.management.commands.runserver.sys.stdout', new_callable=StringIO)
+ @mock.patch('channels.management.commands.runserver.Command.server_cls')
+ @mock.patch('channels.management.commands.runworker.Worker')
+ def test_runserver_basic(self, mocked_worker, mocked_server, mock_stdout):
+ # Django's autoreload util uses threads and this is not needed
+ # in the test environment.
+ # See:
+ # https://github.com/django/django/blob/master/django/core/management/commands/runserver.py#L105
+ call_command('runserver', '--noreload')
+ mocked_server.assert_called_with(
+ endpoints=['tcp:port=8000:interface=127.0.0.1'],
+ signal_handlers=True,
+ http_timeout=60,
+ action_logger=mock.ANY,
+ channel_layer=mock.ANY,
+ ws_protocols=None,
+ root_path='',
+ websocket_handshake_timeout=5,
+ )
+
+ @mock.patch('channels.management.commands.runserver.sys.stdout', new_callable=StringIO)
+ @mock.patch('channels.management.commands.runserver.Command.server_cls')
+ @mock.patch('channels.management.commands.runworker.Worker')
+ def test_runserver_debug(self, mocked_worker, mocked_server, mock_stdout):
+ """
+ Test that the server runs with `DEBUG=True`.
+ """
+ # Debug requires the static url is set.
+ with self.settings(DEBUG=True, STATIC_URL='/static/'):
+ call_command('runserver', '--noreload')
+ mocked_server.assert_called_with(
+ endpoints=['tcp:port=8000:interface=127.0.0.1'],
+ signal_handlers=True,
+ http_timeout=60,
+ action_logger=mock.ANY,
+ channel_layer=mock.ANY,
+ ws_protocols=None,
+ root_path='',
+ websocket_handshake_timeout=5,
+ )
+
+ call_command('runserver', '--noreload', 'localhost:8001')
+ mocked_server.assert_called_with(
+ endpoints=['tcp:port=8001:interface=localhost'],
+ signal_handlers=True,
+ http_timeout=60,
+ action_logger=mock.ANY,
+ channel_layer=mock.ANY,
+ ws_protocols=None,
+ root_path='',
+ websocket_handshake_timeout=5,
+ )
+
+ self.assertFalse(
+ mocked_worker.called,
+ "The worker should not be called with '--noworker'",
+ )
+
+ @mock.patch('channels.management.commands.runserver.sys.stdout', new_callable=StringIO)
+ @mock.patch('channels.management.commands.runserver.Command.server_cls')
+ @mock.patch('channels.management.commands.runworker.Worker')
+ def test_runserver_noworker(self, mocked_worker, mocked_server, mock_stdout):
+ '''
+ Test that the Worker is not called when using the `--noworker` parameter.
+ '''
+ call_command('runserver', '--noreload', '--noworker')
+ mocked_server.assert_called_with(
+ endpoints=['tcp:port=8000:interface=127.0.0.1'],
+ signal_handlers=True,
+ http_timeout=60,
+ action_logger=mock.ANY,
+ channel_layer=mock.ANY,
+ ws_protocols=None,
+ root_path='',
+ websocket_handshake_timeout=5,
+ )
+ self.assertFalse(
+ mocked_worker.called,
+ "The worker should not be called with '--noworker'",
+ )
+
+ @mock.patch('channels.management.commands.runserver.sys.stderr', new_callable=StringIO)
+ def test_log_action(self, mocked_stderr):
+ cmd = runserver.Command()
+ test_actions = [
+ (100, 'http', 'complete', 'HTTP GET /a-path/ 100 [0.12, a-client]'),
+ (200, 'http', 'complete', 'HTTP GET /a-path/ 200 [0.12, a-client]'),
+ (300, 'http', 'complete', 'HTTP GET /a-path/ 300 [0.12, a-client]'),
+ (304, 'http', 'complete', 'HTTP GET /a-path/ 304 [0.12, a-client]'),
+ (400, 'http', 'complete', 'HTTP GET /a-path/ 400 [0.12, a-client]'),
+ (404, 'http', 'complete', 'HTTP GET /a-path/ 404 [0.12, a-client]'),
+ (500, 'http', 'complete', 'HTTP GET /a-path/ 500 [0.12, a-client]'),
+ (None, 'websocket', 'connected', 'WebSocket CONNECT /a-path/ [a-client]'),
+ (None, 'websocket', 'disconnected', 'WebSocket DISCONNECT /a-path/ [a-client]'),
+ (None, 'websocket', 'something', ''), # This shouldn't happen
+ ]
+
+ for status_code, protocol, action, output in test_actions:
+ details = {
+ 'status': status_code,
+ 'method': 'GET',
+ 'path': '/a-path/',
+ 'time_taken': 0.12345,
+ 'client': 'a-client',
+ }
+ cmd.log_action(protocol, action, details)
+ self.assertIn(output, mocked_stderr.getvalue())
+ # Clear previous output
+ mocked_stderr.truncate(0)
diff --git a/tests/test_request.py b/tests/test_request.py
new file mode 100644
index 0000000..39702ca
--- /dev/null
+++ b/tests/test_request.py
@@ -0,0 +1,244 @@
+from __future__ import unicode_literals
+
+from django.utils import six
+
+from channels import Channel
+from channels.exceptions import RequestAborted, RequestTimeout
+from channels.handler import AsgiRequest
+from channels.test import ChannelTestCase
+
+
+class RequestTests(ChannelTestCase):
+ """
+ Tests that ASGI request handling correctly decodes HTTP requests.
+ """
+
+ def test_basic(self):
+ """
+ Tests that the handler can decode the most basic request message,
+ with all optional fields omitted.
+ """
+ Channel("test").send({
+ "reply_channel": "test-reply",
+ "http_version": "1.1",
+ "method": "GET",
+ "path": "/test/",
+ }, immediately=True)
+ request = AsgiRequest(self.get_next_message("test"))
+ self.assertEqual(request.path, "/test/")
+ self.assertEqual(request.method, "GET")
+ self.assertFalse(request.body)
+ self.assertNotIn("HTTP_HOST", request.META)
+ self.assertNotIn("REMOTE_ADDR", request.META)
+ self.assertNotIn("REMOTE_HOST", request.META)
+ self.assertNotIn("REMOTE_PORT", request.META)
+ self.assertIn("SERVER_NAME", request.META)
+ self.assertIn("SERVER_PORT", request.META)
+ self.assertFalse(request.GET)
+ self.assertFalse(request.POST)
+ self.assertFalse(request.COOKIES)
+
+ def test_extended(self):
+ """
+ Tests a more fully-featured GET request
+ """
+ Channel("test").send({
+ "reply_channel": "test",
+ "http_version": "1.1",
+ "method": "GET",
+ "path": "/test2/",
+ "query_string": b"x=1&y=%26foo+bar%2Bbaz",
+ "headers": {
+ "host": b"example.com",
+ "cookie": b"test-time=1448995585123; test-value=yeah",
+ },
+ "client": ["10.0.0.1", 1234],
+ "server": ["10.0.0.2", 80],
+ }, immediately=True)
+ request = AsgiRequest(self.get_next_message("test"))
+ self.assertEqual(request.path, "/test2/")
+ self.assertEqual(request.method, "GET")
+ self.assertFalse(request.body)
+ self.assertEqual(request.META["HTTP_HOST"], "example.com")
+ self.assertEqual(request.META["REMOTE_ADDR"], "10.0.0.1")
+ self.assertEqual(request.META["REMOTE_HOST"], "10.0.0.1")
+ self.assertEqual(request.META["REMOTE_PORT"], 1234)
+ self.assertEqual(request.META["SERVER_NAME"], "10.0.0.2")
+ self.assertEqual(request.META["SERVER_PORT"], "80")
+ self.assertEqual(request.GET["x"], "1")
+ self.assertEqual(request.GET["y"], "&foo bar+baz")
+ self.assertEqual(request.COOKIES["test-time"], "1448995585123")
+ self.assertEqual(request.COOKIES["test-value"], "yeah")
+ self.assertFalse(request.POST)
+
+ def test_post_single(self):
+ """
+ Tests a POST body contained within a single message.
+ """
+ Channel("test").send({
+ "reply_channel": "test",
+ "http_version": "1.1",
+ "method": "POST",
+ "path": "/test2/",
+ "query_string": "django=great",
+ "body": b"ponies=are+awesome",
+ "headers": {
+ "host": b"example.com",
+ "content-type": b"application/x-www-form-urlencoded",
+ "content-length": b"18",
+ },
+ }, immediately=True)
+ request = AsgiRequest(self.get_next_message("test"))
+ self.assertEqual(request.path, "/test2/")
+ self.assertEqual(request.method, "POST")
+ self.assertEqual(request.body, b"ponies=are+awesome")
+ self.assertEqual(request.META["HTTP_HOST"], "example.com")
+ self.assertEqual(request.META["CONTENT_TYPE"], "application/x-www-form-urlencoded")
+ self.assertEqual(request.GET["django"], "great")
+ self.assertEqual(request.POST["ponies"], "are awesome")
+ with self.assertRaises(KeyError):
+ request.POST["django"]
+ with self.assertRaises(KeyError):
+ request.GET["ponies"]
+
+ def test_post_multiple(self):
+ """
+ Tests a POST body across multiple messages (first part in 'body').
+ """
+ Channel("test").send({
+ "reply_channel": "test",
+ "http_version": "1.1",
+ "method": "POST",
+ "path": "/test/",
+ "body": b"there_a",
+ "body_channel": "test-input",
+ "headers": {
+ "host": b"example.com",
+ "content-type": b"application/x-www-form-urlencoded",
+ "content-length": b"21",
+ },
+ }, immediately=True)
+ Channel("test-input").send({
+ "content": b"re=fou",
+ "more_content": True,
+ }, immediately=True)
+ Channel("test-input").send({
+ "content": b"r+lights",
+ }, immediately=True)
+ request = AsgiRequest(self.get_next_message("test"))
+ self.assertEqual(request.method, "POST")
+ self.assertEqual(request.body, b"there_are=four+lights")
+ self.assertEqual(request.META["CONTENT_TYPE"], "application/x-www-form-urlencoded")
+ self.assertEqual(request.POST["there_are"], "four lights")
+
+ def test_post_files(self):
+ """
+ Tests POSTing files using multipart form data and multiple messages,
+ with no body in the initial message.
+ """
+ body = (
+ b'--BOUNDARY\r\n' +
+ b'Content-Disposition: form-data; name="title"\r\n\r\n' +
+ b'My First Book\r\n' +
+ b'--BOUNDARY\r\n' +
+ b'Content-Disposition: form-data; name="pdf"; filename="book.pdf"\r\n\r\n' +
+ b'FAKEPDFBYTESGOHERE' +
+ b'--BOUNDARY--'
+ )
+ Channel("test").send({
+ "reply_channel": "test",
+ "http_version": "1.1",
+ "method": "POST",
+ "path": "/test/",
+ "body_channel": "test-input",
+ "headers": {
+ "content-type": b"multipart/form-data; boundary=BOUNDARY",
+ "content-length": six.text_type(len(body)).encode("ascii"),
+ },
+ }, immediately=True)
+ Channel("test-input").send({
+ "content": body[:20],
+ "more_content": True,
+ }, immediately=True)
+ Channel("test-input").send({
+ "content": body[20:],
+ }, immediately=True)
+ request = AsgiRequest(self.get_next_message("test"))
+ self.assertEqual(request.method, "POST")
+ self.assertEqual(len(request.body), len(body))
+ self.assertTrue(request.META["CONTENT_TYPE"].startswith("multipart/form-data"))
+ self.assertFalse(request._post_parse_error)
+ self.assertEqual(request.POST["title"], "My First Book")
+ self.assertEqual(request.FILES["pdf"].read(), b"FAKEPDFBYTESGOHERE")
+
+ def test_stream(self):
+ """
+ Tests the body stream is emulated correctly.
+ """
+ Channel("test").send({
+ "reply_channel": "test",
+ "http_version": "1.1",
+ "method": "PUT",
+ "path": "/",
+ "body": b"onetwothree",
+ "headers": {
+ "host": b"example.com",
+ "content-length": b"11",
+ },
+ }, immediately=True)
+ request = AsgiRequest(self.get_next_message("test", require=True))
+ self.assertEqual(request.method, "PUT")
+ self.assertEqual(request.read(3), b"one")
+ self.assertEqual(request.read(), b"twothree")
+
+ def test_request_timeout(self):
+ """
+ Tests that the code correctly gives up after the request body read timeout.
+ """
+ Channel("test").send({
+ "reply_channel": "test",
+ "http_version": "1.1",
+ "method": "POST",
+ "path": "/test/",
+ "body": b"there_a",
+ "body_channel": "test-input",
+ "headers": {
+ "host": b"example.com",
+ "content-type": b"application/x-www-form-urlencoded",
+ "content-length": b"21",
+ },
+ }, immediately=True)
+ # Say there's more content, but never provide it! Muahahaha!
+ Channel("test-input").send({
+ "content": b"re=fou",
+ "more_content": True,
+ }, immediately=True)
+
+ class VeryImpatientRequest(AsgiRequest):
+ body_receive_timeout = 0
+
+ with self.assertRaises(RequestTimeout):
+ VeryImpatientRequest(self.get_next_message("test"))
+
+ def test_request_abort(self):
+ """
+ Tests that the code aborts when a request-body close is sent.
+ """
+ Channel("test").send({
+ "reply_channel": "test",
+ "http_version": "1.1",
+ "method": "POST",
+ "path": "/test/",
+ "body": b"there_a",
+ "body_channel": "test-input",
+ "headers": {
+ "host": b"example.com",
+ "content-type": b"application/x-www-form-urlencoded",
+ "content-length": b"21",
+ },
+ }, immediately=True)
+ Channel("test-input").send({
+ "closed": True,
+ }, immediately=True)
+ with self.assertRaises(RequestAborted):
+ AsgiRequest(self.get_next_message("test"))
diff --git a/tests/test_routing.py b/tests/test_routing.py
new file mode 100644
index 0000000..c246347
--- /dev/null
+++ b/tests/test_routing.py
@@ -0,0 +1,348 @@
+from __future__ import unicode_literals
+
+from django.test import SimpleTestCase
+
+from channels.generic import BaseConsumer
+from channels.message import Message
+from channels.routing import Router, include, route, route_class
+from channels.utils import name_that_thing
+
+
+# Fake consumers and routing sets that can be imported by string
+def consumer_1():
+ pass
+
+
+def consumer_2():
+ pass
+
+
+def consumer_3():
+ pass
+
+
+class TestClassConsumer(BaseConsumer):
+
+ method_mapping = {
+ "test.channel": "some_method",
+ }
+
+ def some_method(self, message, **kwargs):
+ pass
+
+
+chatroom_routing = [
+ route("websocket.connect", consumer_2, path=r"^/chat/(?P[^/]+)/$"),
+ route("websocket.connect", consumer_3, path=r"^/mentions/$"),
+]
+
+chatroom_routing_nolinestart = [
+ route("websocket.connect", consumer_2, path=r"/chat/(?P[^/]+)/$"),
+ route("websocket.connect", consumer_3, path=r"/mentions/$"),
+]
+
+class_routing = [
+ route_class(TestClassConsumer, path=r"^/foobar/$"),
+]
+
+
+class RoutingTests(SimpleTestCase):
+ """
+ Tests that the router's routing code works correctly.
+ """
+
+ def assertRoute(self, router, channel, content, consumer, kwargs=None):
+ """
+ Asserts that asking the `router` to route the `content` as a message
+ from `channel` means it returns consumer `consumer`, optionally
+ testing it also returns `kwargs` to be passed in
+
+ Use `consumer` = None to assert that no route is found.
+ """
+ message = Message(content, channel, channel_layer="fake channel layer")
+ match = router.match(message)
+ if match is None:
+ if consumer is None:
+ return
+ else:
+ self.fail("No route found for %s on %s; expecting %s" % (
+ content,
+ channel,
+ name_that_thing(consumer),
+ ))
+ else:
+ mconsumer, mkwargs = match
+ if consumer is None:
+ self.fail("Route found for %s on %s; expecting no route." % (
+ content,
+ channel,
+ ))
+ self.assertEqual(consumer, mconsumer, "Route found for %s on %s; but wrong consumer (%s not %s)." % (
+ content,
+ channel,
+ name_that_thing(mconsumer),
+ name_that_thing(consumer),
+ ))
+ if kwargs is not None:
+ self.assertEqual(kwargs, mkwargs, "Route found for %s on %s; but wrong kwargs (%s not %s)." % (
+ content,
+ channel,
+ mkwargs,
+ kwargs,
+ ))
+
+ def test_assumption(self):
+ """
+ Ensures the test consumers don't compare equal, as if this ever happens
+ this test file will pass and miss most bugs.
+ """
+ self.assertEqual(consumer_1, consumer_1)
+ self.assertNotEqual(consumer_1, consumer_2)
+ self.assertNotEqual(consumer_1, consumer_3)
+
+ def test_dict(self):
+ """
+ Tests dict expansion
+ """
+ router = Router({
+ "http.request": consumer_1,
+ "http.disconnect": consumer_2,
+ })
+ self.assertRoute(
+ router,
+ channel="http.request",
+ content={},
+ consumer=consumer_1,
+ kwargs={},
+ )
+ self.assertRoute(
+ router,
+ channel="http.request",
+ content={"path": "/chat/"},
+ consumer=consumer_1,
+ kwargs={},
+ )
+ self.assertRoute(
+ router,
+ channel="http.disconnect",
+ content={},
+ consumer=consumer_2,
+ kwargs={},
+ )
+
+ def test_filters(self):
+ """
+ Tests that filters catch things correctly.
+ """
+ router = Router([
+ route("http.request", consumer_1, path=r"^/chat/$"),
+ route("http.disconnect", consumer_2),
+ route("http.request", consumer_3),
+ ])
+ # Filter hit
+ self.assertRoute(
+ router,
+ channel="http.request",
+ content={"path": "/chat/"},
+ consumer=consumer_1,
+ kwargs={},
+ )
+ # Fall-through
+ self.assertRoute(
+ router,
+ channel="http.request",
+ content={},
+ consumer=consumer_3,
+ kwargs={},
+ )
+ self.assertRoute(
+ router,
+ channel="http.request",
+ content={"path": "/liveblog/"},
+ consumer=consumer_3,
+ kwargs={},
+ )
+
+ def test_include(self):
+ """
+ Tests inclusion without a prefix
+ """
+ router = Router([
+ include("tests.test_routing.chatroom_routing"),
+ ])
+ self.assertRoute(
+ router,
+ channel="websocket.connect",
+ content={"path": "/boom/"},
+ consumer=None,
+ )
+ self.assertRoute(
+ router,
+ channel="websocket.connect",
+ content={"path": "/chat/django/"},
+ consumer=consumer_2,
+ kwargs={"room": "django"},
+ )
+ self.assertRoute(
+ router,
+ channel="websocket.connect",
+ content={"path": "/mentions/"},
+ consumer=consumer_3,
+ kwargs={},
+ )
+
+ def test_route_class(self):
+ """
+ Tests route_class with/without prefix
+ """
+ router = Router([
+ include("tests.test_routing.class_routing"),
+ ])
+ self.assertRoute(
+ router,
+ channel="websocket.connect",
+ content={"path": "/foobar/"},
+ consumer=None,
+ )
+ self.assertRoute(
+ router,
+ channel="test.channel",
+ content={"path": "/foobar/"},
+ consumer=TestClassConsumer,
+ )
+ self.assertRoute(
+ router,
+ channel="test.channel",
+ content={"path": "/"},
+ consumer=None,
+ )
+
+ def test_include_prefix(self):
+ """
+ Tests inclusion with a prefix
+ """
+ router = Router([
+ include("tests.test_routing.chatroom_routing", path="^/ws/v(?P[0-9]+)"),
+ ])
+ self.assertRoute(
+ router,
+ channel="websocket.connect",
+ content={"path": "/boom/"},
+ consumer=None,
+ )
+ self.assertRoute(
+ router,
+ channel="websocket.connect",
+ content={"path": "/chat/django/"},
+ consumer=None,
+ )
+ self.assertRoute(
+ router,
+ channel="websocket.connect",
+ content={"path": "/ws/v2/chat/django/"},
+ consumer=consumer_2,
+ kwargs={"version": "2", "room": "django"},
+ )
+ self.assertRoute(
+ router,
+ channel="websocket.connect",
+ content={"path": "/ws/v1/mentions/"},
+ consumer=consumer_3,
+ kwargs={"version": "1"},
+ )
+ # Check it works without the ^s too.
+ router = Router([
+ include("tests.test_routing.chatroom_routing_nolinestart", path="/ws/v(?P[0-9]+)"),
+ ])
+ self.assertRoute(
+ router,
+ channel="websocket.connect",
+ content={"path": "/ws/v2/chat/django/"},
+ consumer=consumer_2,
+ kwargs={"version": "2", "room": "django"},
+ )
+
+ def test_positional_pattern(self):
+ """
+ Tests that regexes with positional groups are rejected.
+ """
+ with self.assertRaises(ValueError):
+ Router([
+ route("http.request", consumer_1, path=r"^/chat/([^/]+)/$"),
+ ])
+
+ def test_mixed_unicode_bytes(self):
+ """
+ Tests that having the message key be bytes and pattern unicode (or vice-versa)
+ still works.
+ """
+ # Unicode patterns, byte message
+ router = Router([
+ route("websocket.connect", consumer_1, path="^/foo/"),
+ include("tests.test_routing.chatroom_routing", path="^/ws/v(?P[0-9]+)"),
+ ])
+ self.assertRoute(
+ router,
+ channel="websocket.connect",
+ content={"path": b"/boom/"},
+ consumer=None,
+ )
+ self.assertRoute(
+ router,
+ channel="websocket.connect",
+ content={"path": b"/foo/"},
+ consumer=consumer_1,
+ )
+ self.assertRoute(
+ router,
+ channel="websocket.connect",
+ content={"path": b"/ws/v2/chat/django/"},
+ consumer=consumer_2,
+ kwargs={"version": "2", "room": "django"},
+ )
+ # Byte patterns, unicode message
+ router = Router([
+ route("websocket.connect", consumer_1, path=b"^/foo/"),
+ include("tests.test_routing.chatroom_routing", path=b"^/ws/v(?P[0-9]+)"),
+ ])
+ self.assertRoute(
+ router,
+ channel="websocket.connect",
+ content={"path": "/boom/"},
+ consumer=None,
+ )
+ self.assertRoute(
+ router,
+ channel="websocket.connect",
+ content={"path": "/foo/"},
+ consumer=consumer_1,
+ )
+ self.assertRoute(
+ router,
+ channel="websocket.connect",
+ content={"path": "/ws/v2/chat/django/"},
+ consumer=consumer_2,
+ kwargs={"version": "2", "room": "django"},
+ )
+
+ def test_channels(self):
+ """
+ Tests that the router reports channels to listen on correctly
+ """
+ router = Router([
+ route("http.request", consumer_1, path=r"^/chat/$"),
+ route("http.disconnect", consumer_2),
+ route("http.request", consumer_3),
+ route_class(TestClassConsumer),
+ ])
+ # Initial check
+ self.assertEqual(
+ router.channels,
+ {"http.request", "http.disconnect", "test.channel"},
+ )
+ # Dynamically add route, recheck
+ router.add_route(route("websocket.receive", consumer_1))
+ self.assertEqual(
+ router.channels,
+ {"http.request", "http.disconnect", "websocket.receive", "test.channel"},
+ )
diff --git a/tests/test_security.py b/tests/test_security.py
new file mode 100644
index 0000000..2805481
--- /dev/null
+++ b/tests/test_security.py
@@ -0,0 +1,44 @@
+from __future__ import unicode_literals
+
+from django.test import override_settings
+from channels.exceptions import DenyConnection
+from channels.security.websockets import allowed_hosts_only
+from channels.message import Message
+from channels.test import ChannelTestCase
+
+
+@allowed_hosts_only
+def connect(message):
+ return True
+
+
+class OriginValidationTestCase(ChannelTestCase):
+
+ @override_settings(ALLOWED_HOSTS=['example.com'])
+ def test_valid_origin(self):
+ content = {
+ 'headers': [[b'origin', b'http://example.com']]
+ }
+ message = Message(content, 'websocket.connect', None)
+ self.assertTrue(connect(message))
+
+ @override_settings(ALLOWED_HOSTS=['example.com'])
+ def test_invalid_origin(self):
+ content = {
+ 'headers': [[b'origin', b'http://example.org']]
+ }
+ message = Message(content, 'websocket.connect', None)
+ self.assertRaises(DenyConnection, connect, message)
+
+ def test_invalid_origin_header(self):
+ invalid_headers = [
+ [], # origin header missing
+ [b'origin', b''], # origin header empty
+ [b'origin', b'\xc3\xa4'] # non-ascii
+ ]
+ for headers in invalid_headers:
+ content = {
+ 'headers': [headers]
+ }
+ message = Message(content, 'websocket.connect', None)
+ self.assertRaises(DenyConnection, connect, message)
diff --git a/tests/test_sessions.py b/tests/test_sessions.py
new file mode 100644
index 0000000..007ae98
--- /dev/null
+++ b/tests/test_sessions.py
@@ -0,0 +1,387 @@
+from __future__ import unicode_literals
+
+from django.conf import settings
+from django.test import override_settings
+
+from channels import DEFAULT_CHANNEL_LAYER, channel_layers
+from channels.message import Message
+from channels.sessions import (
+ channel_and_http_session, channel_session, enforce_ordering, http_session, session_for_reply_channel,
+)
+from channels.test import ChannelTestCase
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.cache")
+class SessionTests(ChannelTestCase):
+ """
+ Tests the channels session module.
+ """
+
+ def test_session_for_reply_channel(self):
+ """
+ Tests storing and retrieving values by reply_channel.
+ """
+ session1 = session_for_reply_channel("test-reply-channel")
+ session1["testvalue"] = 42
+ session1.save(must_create=True)
+ session2 = session_for_reply_channel("test-reply-channel")
+ self.assertEqual(session2["testvalue"], 42)
+
+ def test_channel_session(self):
+ """
+ Tests the channel_session decorator
+ """
+ # Construct message to send
+ message = Message({"reply_channel": "test-reply"}, None, None)
+
+ # Run through a simple fake consumer that assigns to it
+ @channel_session
+ def inner(message):
+ message.channel_session["num_ponies"] = -1
+
+ inner(message)
+ # Test the session worked
+ session2 = session_for_reply_channel("test-reply")
+ self.assertEqual(session2["num_ponies"], -1)
+
+ def test_channel_session_method(self):
+ """
+ Tests the channel_session decorator works on methods
+ """
+ # Construct message to send
+ message = Message({"reply_channel": "test-reply"}, None, None)
+
+ # Run through a simple fake consumer that assigns to it
+ class Consumer(object):
+ @channel_session
+ def inner(self, message):
+ message.channel_session["num_ponies"] = -1
+
+ Consumer().inner(message)
+ # Test the session worked
+ session2 = session_for_reply_channel("test-reply")
+ self.assertEqual(session2["num_ponies"], -1)
+
+ def test_channel_session_third_arg(self):
+ """
+ Tests the channel_session decorator with message as 3rd argument
+ """
+ # Construct message to send
+ message = Message({"reply_channel": "test-reply"}, None, None)
+
+ # Run through a simple fake consumer that assigns to it
+ @channel_session
+ def inner(a, b, message):
+ message.channel_session["num_ponies"] = -1
+
+ with self.assertRaisesMessage(ValueError, 'channel_session called without Message instance'):
+ inner(None, None, message)
+
+ def test_channel_session_double(self):
+ """
+ Tests the channel_session decorator detects being wrapped in itself
+ and doesn't blow up.
+ """
+ # Construct message to send
+ message = Message({"reply_channel": "test-reply"}, None, None)
+
+ # Run through a simple fake consumer that should trigger the error
+ @channel_session
+ @channel_session
+ def inner(message):
+ message.channel_session["num_ponies"] = -1
+ inner(message)
+
+ # Test the session worked
+ session2 = session_for_reply_channel("test-reply")
+ self.assertEqual(session2["num_ponies"], -1)
+
+ def test_channel_session_double_method(self):
+ """
+ Tests the channel_session decorator detects being wrapped in itself
+ and doesn't blow up. Method version.
+ """
+ # Construct message to send
+ message = Message({"reply_channel": "test-reply"}, None, None)
+
+ # Run through a simple fake consumer that should trigger the error
+ class Consumer(object):
+ @channel_session
+ @channel_session
+ def inner(self, message):
+ message.channel_session["num_ponies"] = -1
+ Consumer().inner(message)
+
+ # Test the session worked
+ session2 = session_for_reply_channel("test-reply")
+ self.assertEqual(session2["num_ponies"], -1)
+
+ def test_channel_session_double_third_arg(self):
+ """
+ Tests the channel_session decorator detects being wrapped in itself
+ and doesn't blow up.
+ """
+ # Construct message to send
+ message = Message({"reply_channel": "test-reply"}, None, None)
+
+ # Run through a simple fake consumer that should trigger the error
+ @channel_session
+ @channel_session
+ def inner(a, b, message):
+ message.channel_session["num_ponies"] = -1
+ with self.assertRaisesMessage(ValueError, 'channel_session called without Message instance'):
+ inner(None, None, message)
+
+ def test_channel_session_no_reply(self):
+ """
+ Tests the channel_session decorator detects no reply channel
+ """
+ # Construct message to send
+ message = Message({}, None, None)
+
+ # Run through a simple fake consumer that should trigger the error
+ @channel_session
+ @channel_session
+ def inner(message):
+ message.channel_session["num_ponies"] = -1
+
+ with self.assertRaises(ValueError):
+ inner(message)
+
+ def test_channel_session_no_reply_method(self):
+ """
+ Tests the channel_session decorator detects no reply channel
+ """
+ # Construct message to send
+ message = Message({}, None, None)
+
+ # Run through a simple fake consumer that should trigger the error
+ class Consumer(object):
+ @channel_session
+ @channel_session
+ def inner(self, message):
+ message.channel_session["num_ponies"] = -1
+
+ with self.assertRaises(ValueError):
+ Consumer().inner(message)
+
+ def test_channel_session_no_reply_third_arg(self):
+ """
+ Tests the channel_session decorator detects no reply channel
+ """
+ # Construct message to send
+ message = Message({}, None, None)
+
+ # Run through a simple fake consumer that should trigger the error
+ @channel_session
+ @channel_session
+ def inner(a, b, message):
+ message.channel_session["num_ponies"] = -1
+
+ with self.assertRaisesMessage(ValueError, 'channel_session called without Message instance'):
+ inner(None, None, message)
+
+ def test_http_session(self):
+ """
+ Tests that http_session correctly extracts a session cookie.
+ """
+ # Make a session to try against
+ session1 = session_for_reply_channel("test-reply")
+ # Construct message to send
+ message = Message({
+ "reply_channel": "test-reply",
+ "http_version": "1.1",
+ "method": "GET",
+ "path": "/test2/",
+ "headers": {
+ "host": b"example.com",
+ "cookie": ("%s=%s" % (settings.SESSION_COOKIE_NAME, session1.session_key)).encode("ascii"),
+ },
+ }, None, None)
+
+ # Run it through http_session, make sure it works (test double here too)
+ @http_session
+ @http_session
+ def inner(message):
+ message.http_session["species"] = "horse"
+
+ inner(message)
+ # Check value assignment stuck
+ session2 = session_for_reply_channel("test-reply")
+ self.assertEqual(session2["species"], "horse")
+
+ def test_channel_and_http_session(self):
+ """
+ Tests that channel_and_http_session decorator stores the http session key and hydrates it when expected
+ """
+ # Make a session to try against
+ session = session_for_reply_channel("test-reply-session")
+ # Construct message to send
+ message = Message({
+ "reply_channel": "test-reply-session",
+ "http_version": "1.1",
+ "method": "GET",
+ "path": "/test2/",
+ "headers": {
+ "host": b"example.com",
+ "cookie": ("%s=%s" % (settings.SESSION_COOKIE_NAME, session.session_key)).encode("ascii"),
+ },
+ }, None, None)
+
+ @channel_and_http_session
+ def inner(message):
+ pass
+
+ inner(message)
+
+ # It should store the session key
+ self.assertEqual(message.channel_session[settings.SESSION_COOKIE_NAME], session.session_key)
+
+ # Construct a new message
+ message2 = Message({"reply_channel": "test-reply-session", "path": "/"}, None, None)
+
+ inner(message2)
+
+ # It should hydrate the http_session
+ self.assertEqual(message2.http_session.session_key, session.session_key)
+
+ def test_enforce_ordering(self):
+ """
+ Tests that strict mode of enforce_ordering works
+ """
+ # Construct messages to send
+ message0 = Message(
+ {"reply_channel": "test-reply!b", "order": 0},
+ "websocket.connect",
+ channel_layers[DEFAULT_CHANNEL_LAYER]
+ )
+ message1 = Message(
+ {"reply_channel": "test-reply!b", "order": 1},
+ "websocket.receive",
+ channel_layers[DEFAULT_CHANNEL_LAYER]
+ )
+ message2 = Message(
+ {"reply_channel": "test-reply!b", "order": 2},
+ "websocket.receive",
+ channel_layers[DEFAULT_CHANNEL_LAYER]
+ )
+
+ # Run them in an acceptable strict order
+ @enforce_ordering
+ def inner(message):
+ pass
+
+ inner(message0)
+ inner(message1)
+ inner(message2)
+
+ # Ensure wait channel is empty
+ wait_channel = "__wait__.test-reply?b"
+ next_message = self.get_next_message(wait_channel)
+ self.assertEqual(next_message, None)
+
+ def test_enforce_ordering_fail(self):
+ """
+ Tests that strict mode of enforce_ordering fails on bad ordering
+ """
+ # Construct messages to send
+ message0 = Message(
+ {"reply_channel": "test-reply-c", "order": 0},
+ "websocket.connect",
+ channel_layers[DEFAULT_CHANNEL_LAYER]
+ )
+ message2 = Message(
+ {"reply_channel": "test-reply-c", "order": 2},
+ "websocket.receive",
+ channel_layers[DEFAULT_CHANNEL_LAYER]
+ )
+
+ # Run them in an acceptable strict order
+ @enforce_ordering
+ def inner(message):
+ pass
+
+ inner(message0)
+ inner(message2)
+
+ # Ensure wait channel is not empty
+ wait_channel = "__wait__.%s" % "test-reply-c"
+ next_message = self.get_next_message(wait_channel)
+ self.assertNotEqual(next_message, None)
+
+ def test_enforce_ordering_fail_no_order(self):
+ """
+ Makes sure messages with no "order" key fail
+ """
+ message0 = Message(
+ {"reply_channel": "test-reply-d"},
+ None,
+ channel_layers[DEFAULT_CHANNEL_LAYER]
+ )
+
+ @enforce_ordering
+ def inner(message):
+ pass
+
+ with self.assertRaises(ValueError):
+ inner(message0)
+
+ def test_enforce_ordering_concurrent(self):
+ """
+ Tests that strict mode of enforce_ordering puts messages in the correct queue after
+ the current message number changes while the message is being processed
+ """
+ # Construct messages to send
+ message0 = Message(
+ {"reply_channel": "test-reply-e", "order": 0},
+ "websocket.connect",
+ channel_layers[DEFAULT_CHANNEL_LAYER]
+ )
+ message2 = Message(
+ {"reply_channel": "test-reply-e", "order": 2},
+ "websocket.receive",
+ channel_layers[DEFAULT_CHANNEL_LAYER]
+ )
+ message3 = Message(
+ {"reply_channel": "test-reply-e", "order": 3},
+ "websocket.receive",
+ channel_layers[DEFAULT_CHANNEL_LAYER]
+ )
+
+ @channel_session
+ def add_session(message):
+ pass
+
+ # Run them in an acceptable strict order
+ @enforce_ordering
+ def inner(message):
+ pass
+
+ inner(message0)
+ inner(message3)
+
+ # Add the session now so it can be mocked
+ add_session(message2)
+
+ with mock.patch.object(message2.channel_session, 'load', return_value={'__channels_next_order': 2}):
+ inner(message2)
+
+ # Ensure wait channel is empty
+ wait_channel = "__wait__.%s" % "test-reply-e"
+ next_message = self.get_next_message(wait_channel)
+ self.assertEqual(next_message, None)
+
+ # Ensure messages 3 and 2 both ended up back on the original channel
+ expected = {
+ 2: message2,
+ 3: message3
+ }
+ for m in range(2):
+ message = self.get_next_message("websocket.receive")
+ expected.pop(message.content['order'])
+ self.assertEqual(expected, {})
diff --git a/tests/test_worker.py b/tests/test_worker.py
new file mode 100644
index 0000000..a8975fd
--- /dev/null
+++ b/tests/test_worker.py
@@ -0,0 +1,95 @@
+from __future__ import unicode_literals
+
+from channels import DEFAULT_CHANNEL_LAYER, Channel, route
+from channels.asgi import channel_layers
+from channels.exceptions import ConsumeLater
+from channels.test import ChannelTestCase
+from channels.worker import Worker
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+class PatchedWorker(Worker):
+ """Worker with specific numbers of loops"""
+ def get_termed(self):
+ if not self.__iters:
+ return True
+ self.__iters -= 1
+ return False
+
+ def set_termed(self, value):
+ self.__iters = value
+
+ termed = property(get_termed, set_termed)
+
+
+class WorkerTests(ChannelTestCase):
+ """
+ Tests that the router's routing code works correctly.
+ """
+
+ def test_channel_filters(self):
+ """
+ Tests that the include/exclude logic works
+ """
+ # Include
+ worker = Worker(None, only_channels=["yes.*", "maybe.*"])
+ self.assertEqual(
+ worker.apply_channel_filters(["yes.1", "no.1"]),
+ ["yes.1"],
+ )
+ self.assertEqual(
+ worker.apply_channel_filters(["yes.1", "no.1", "maybe.2", "yes"]),
+ ["yes.1", "maybe.2"],
+ )
+ # Exclude
+ worker = Worker(None, exclude_channels=["no.*", "maybe.*"])
+ self.assertEqual(
+ worker.apply_channel_filters(["yes.1", "no.1", "maybe.2", "yes"]),
+ ["yes.1", "yes"],
+ )
+ # Both
+ worker = Worker(None, exclude_channels=["no.*"], only_channels=["yes.*"])
+ self.assertEqual(
+ worker.apply_channel_filters(["yes.1", "no.1", "maybe.2", "yes"]),
+ ["yes.1"],
+ )
+
+ def test_run_with_consume_later_error(self):
+
+ # consumer with ConsumeLater error at first call
+ def _consumer(message, **kwargs):
+ _consumer._call_count = getattr(_consumer, '_call_count', 0) + 1
+ if _consumer._call_count == 1:
+ raise ConsumeLater()
+
+ Channel('test').send({'test': 'test'}, immediately=True)
+ channel_layer = channel_layers[DEFAULT_CHANNEL_LAYER]
+ channel_layer.router.add_route(route('test', _consumer))
+ old_send = channel_layer.send
+ channel_layer.send = mock.Mock(side_effect=old_send) # proxy 'send' for counting
+
+ worker = PatchedWorker(channel_layer)
+ worker.termed = 2 # first loop with error, second with sending
+
+ worker.run()
+ self.assertEqual(getattr(_consumer, '_call_count', None), 2)
+ self.assertEqual(channel_layer.send.call_count, 1)
+
+ def test_normal_run(self):
+ consumer = mock.Mock()
+ Channel('test').send({'test': 'test'}, immediately=True)
+ channel_layer = channel_layers[DEFAULT_CHANNEL_LAYER]
+ channel_layer.router.add_route(route('test', consumer))
+ old_send = channel_layer.send
+ channel_layer.send = mock.Mock(side_effect=old_send) # proxy 'send' for counting
+
+ worker = PatchedWorker(channel_layer)
+ worker.termed = 2
+
+ worker.run()
+ self.assertEqual(consumer.call_count, 1)
+ self.assertEqual(channel_layer.send.call_count, 0)
diff --git a/tests/test_wsclient.py b/tests/test_wsclient.py
new file mode 100644
index 0000000..0b94621
--- /dev/null
+++ b/tests/test_wsclient.py
@@ -0,0 +1,123 @@
+from __future__ import unicode_literals
+
+from django.http.cookie import parse_cookie
+
+from channels import route
+from channels.exceptions import ChannelSocketException
+from channels.handler import AsgiRequest
+from channels.test import ChannelTestCase, WSClient, apply_routes
+from channels.sessions import enforce_ordering
+
+
+class WSClientTests(ChannelTestCase):
+ def test_cookies(self):
+ client = WSClient()
+ client.set_cookie('foo', 'not-bar')
+ client.set_cookie('foo', 'bar')
+ client.set_cookie('qux', 'qu;x')
+
+ # Django's interpretation of the serialized cookie.
+ cookie_dict = parse_cookie(client.headers['cookie'].decode('ascii'))
+
+ self.assertEqual(client.get_cookies(),
+ cookie_dict)
+
+ self.assertEqual({'foo': 'bar',
+ 'qux': 'qu;x',
+ 'sessionid': client.get_cookies()['sessionid']},
+ cookie_dict)
+
+ def test_simple_content(self):
+ client = WSClient()
+ content = client._get_content(text={'key': 'value'}, path='/my/path')
+
+ self.assertEqual(content['text'], '{"key": "value"}')
+ self.assertEqual(content['path'], '/my/path')
+ self.assertTrue('reply_channel' in content)
+ self.assertTrue('headers' in content)
+
+ def test_path_in_content(self):
+ client = WSClient()
+ content = client._get_content(content={'path': '/my_path'}, text={'path': 'hi'}, path='/my/path')
+
+ self.assertEqual(content['text'], '{"path": "hi"}')
+ self.assertEqual(content['path'], '/my_path')
+ self.assertTrue('reply_channel' in content)
+ self.assertTrue('headers' in content)
+
+ def test_session_in_headers(self):
+ client = WSClient()
+ content = client._get_content()
+ self.assertTrue('path' in content)
+ self.assertEqual(content['path'], '/')
+
+ self.assertTrue('headers' in content)
+ self.assertIn(b'cookie', [x[0] for x in content['headers']])
+ self.assertIn(b'sessionid', [x[1] for x in content['headers'] if x[0] == b'cookie'][0])
+
+ def test_ordering_in_content(self):
+ client = WSClient(ordered=True)
+ content = client._get_content()
+ self.assertTrue('order' in content)
+ self.assertEqual(content['order'], 0)
+ client.order = 2
+ content = client._get_content()
+ self.assertTrue('order' in content)
+ self.assertEqual(content['order'], 2)
+
+ def test_ordering(self):
+
+ client = WSClient(ordered=True)
+
+ @enforce_ordering
+ def consumer(message):
+ message.reply_channel.send({'text': message['text']})
+
+ with apply_routes(route('websocket.receive', consumer)):
+ client.send_and_consume('websocket.receive', text='1') # order = 0
+ client.send_and_consume('websocket.receive', text='2') # order = 1
+ client.send_and_consume('websocket.receive', text='3') # order = 2
+
+ self.assertEqual(client.receive(), 1)
+ self.assertEqual(client.receive(), 2)
+ self.assertEqual(client.receive(), 3)
+
+ def test_get_params(self):
+ client = WSClient()
+ content = client._get_content(path='/my/path?test=1&token=2')
+ self.assertTrue('path' in content)
+ self.assertTrue('query_string' in content)
+ self.assertEqual(content['path'], '/my/path')
+ self.assertEqual(content['query_string'], 'test=1&token=2')
+
+ def test_get_params_with_consumer(self):
+ client = WSClient(ordered=True)
+
+ def consumer(message):
+ message.content['method'] = 'FAKE'
+ message.reply_channel.send({'text': dict(AsgiRequest(message).GET)})
+
+ with apply_routes([route('websocket.receive', consumer, path=r'^/test'),
+ route('websocket.connect', consumer, path=r'^/test')]):
+ path = '/test?key1=val1&key2=val2&key1=val3'
+ client.send_and_consume('websocket.connect', path=path, check_accept=False)
+ self.assertDictEqual(client.receive(), {'key2': ['val2'], 'key1': ['val1', 'val3']})
+
+ client.send_and_consume('websocket.receive', path=path)
+ self.assertDictEqual(client.receive(), {})
+
+ def test_channel_socket_exception(self):
+
+ class MyChannelSocketException(ChannelSocketException):
+
+ def run(self, message):
+ message.reply_channel.send({'text': 'error'})
+
+ def consumer(message):
+ raise MyChannelSocketException
+
+ client = WSClient()
+ with apply_routes(route('websocket.receive', consumer)):
+ client.send_and_consume('websocket.receive')
+
+ self.assertEqual(client.receive(json=False), 'error')
diff --git a/tox.ini b/tox.ini
index c420d40..11900b1 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,10 +1,17 @@
-# We test against the oldest supported Twisted release, and the current release.
[tox]
-envlist = py{27,34,35,36}-twisted-{old,new}
+envlist =
+ py{27,34,35,36}-django-{18,19,110,111}
+ py{27,35,36}-flake8
+ isort
[testenv]
+extras = tests
deps =
- twisted-old: twisted==17.1.0
+ django-18: Django>=1.8,<1.9
+ django-19: Django>=1.9,<1.10
+ django-110: Django>=1.10,<1.11
+ django-111: Django>=1.11,<2.0
commands =
- pip install -e .[tests]
- python -m unittest discover
+ flake8: flake8
+ isort: isort --check-only --recursive channels
+ django: coverage run --parallel-mode {toxinidir}/runtests.py {posargs}