diff --git a/website/.gitignore b/website/.gitignore
new file mode 100644
index 000000000..a04df6008
--- /dev/null
+++ b/website/.gitignore
@@ -0,0 +1,2 @@
+www
+*.swp
diff --git a/website/404.jade b/website/404.jade
index 9ea96e237..e243a9ba6 100644
--- a/website/404.jade
+++ b/website/404.jade
@@ -1,10 +1,7 @@
+//- ----------------------------------
+//- 💫 404 ERROR
+//- ----------------------------------
+
include _includes/_mixins
-//- 404 Error
-//- ============================================================================
-
-+lead.text-center Ooops, this page does not exist. Click #[a(href='javascript:history.go(-1)') here] to go back or check out one of the latest posts below.
-
-+divider('bottom')
-
-!=partial('_includes/_latest-posts', { max: 3 } )
+p.u-text-large.u-text-center Ooops, this page does not exist. Click #[a(href="javascript:history.go(-1)") here] to go back.
diff --git a/website/README.md b/website/README.md
index 12becb008..389721da0 100644
--- a/website/README.md
+++ b/website/README.md
@@ -1,22 +1,18 @@
+
+
# Source files for the spacy.io website and docs
-The [spacy.io](https://spacy.io) website is implemented in [Jade (aka Pug)](https://www.jade-lang.org), and is built or served by [Harp](https://harpjs.com).
+The [spacy.io](https://spacy.io) website is implemented in [Jade (aka Pug)](https://www.jade-lang.org), and is built or served by [Harp](https://harpjs.com). Jade is an extensible templating language with a readable syntax, that compiles to HTML.
+The website source makes extensive use of Jade mixins, so that the design system is abstracted away from the content you're
+writing. You can read more about our approach in our blog post, ["Rebuilding a Website with Modular Markup"](https://explosion.ai/blog/modular-markup).
+
## Building the site
-To build the site and start making changes:
+```bash
+sudo npm install --global harp
+git clone https://github.com/explosion/spacy.io
+harp server
+```
- sudo npm install --global harp
- git clone https://github.com/spacy-io/website
- cd website
- harp server
-
-This will serve the site on [http://localhost:9000](http://localhost:9000). You can then edit the jade source and refresh the page to see your changes.
-
-## Reading the source
-
-Jade is an extensible templating language with a readable syntax, that compiles to HTML.
-The website source makes extensive use of Jade mixins, so that the design system is abstracted away from the content you're
-writing. You can read more about our approach in our blog post, ["Rebuilding a Website with Modular Markup Components"](https://spacy.io/blog/modular-markup).
-
-If you want to write or edit the pages, the site's [styleguide](http://spacy.io/styleguide) serves as a useful reference of the available mixins.
+This will serve the site on [http://localhost:9000](http://localhost:9000).
diff --git a/website/_data.json b/website/_data.json
index 5a7c17ff7..17b29dd20 100644
--- a/website/_data.json
+++ b/website/_data.json
@@ -1,10 +1,29 @@
{
"index": {
- "landing": true
- },
-
- "feed": {
- "layout": false
+ "landing": true,
+ "logos": [
+ [
+ ["chartbeat", "https://chartbeat.com"],
+ ["socrata", "https://www.socrata.com"],
+ ["chattermill", "https://chattermill.io"],
+ ["cytora", "http://www.cytora.com"],
+ ["signaln", "http://signaln.com"],
+ ["duedil", "https://www.duedil.com/"],
+ ["spyjack", "https://spyjack.io"]
+ ],
+ [
+ ["keyreply", "https://keyreply.com/"],
+ ["dato", "https://dato.com"],
+ ["kip", "http://kipthis.com"],
+ ["wonderflow", "http://www.wonderflow.co"],
+ ["foxtype", "https://foxtype.com"]
+ ],
+ [
+ ["synapsify", "http://www.gosynapsify.com"],
+ ["stitchfix", "https://www.stitchfix.com/"],
+ ["wayblazer", "http://wayblazer.com"]
+ ]
+ ]
},
"robots": {
@@ -16,19 +35,8 @@
"asides": false
},
- "team": {
- "title": "Team"
- },
-
- "legal": {
- "title": "Legal & Imprint",
- "sidebar": true,
- "asides": true
- },
-
"styleguide": {
"title" : "Styleguide",
- "standalone" : true,
"asides": true,
"sidebar": {
diff --git a/website/_fabfile.py b/website/_fabfile.py
deleted file mode 100644
index d31ea67eb..000000000
--- a/website/_fabfile.py
+++ /dev/null
@@ -1,94 +0,0 @@
-from __future__ import print_function
-
-from fabric.api import local
-import os
-import hashlib
-import mimetypes
-import shutil
-
-import boto.s3.connection
-
-
-mimetypes.init()
-
-buckets = {
- 'staging': 'staging.spacy.io',
- 'production': 'spacy.io',
-}
-
-
-def compile():
- shutil.rmtree('www')
- local('NODE_ENV=s3 harp compile')
-
-
-def publish(env='staging', site_path='www'):
- os.environ['S3_USE_SIGV4'] = 'True'
- conn = boto.s3.connection.S3Connection(host='s3.eu-central-1.amazonaws.com',
- calling_format=boto.s3.connection.OrdinaryCallingFormat())
- bucket = conn.get_bucket(buckets[env], validate=False)
-
- keys = {k.name: k for k in bucket.list()}
- keys_left = set(keys)
-
- for root, dirnames, filenames in os.walk(site_path):
- for dirname in dirnames:
- target = os.path.relpath(os.path.join(root, dirname), site_path)
- source = os.path.join(target, 'index.html')
-
- if os.path.exists(os.path.join(root, dirname, 'index.html')):
- redirect = '//%s/%s' % (bucket.name, target)
- key = bucket.lookup(source)
- if not key:
- key = bucket.new_key(source)
- key.set_redirect(redirect)
- print('setting redirect for %s' % target)
- elif key.get_redirect() != redirect:
- key.set_redirect(redirect)
- print('setting redirect for %s' % target)
-
- if source in keys_left:
- keys_left.remove(source)
-
- for filename in filenames:
- source = os.path.join(root, filename)
-
- if filename == 'index.html':
- target = os.path.normpath(os.path.relpath(root, site_path))
- if target == '.':
- target = filename
- else:
- target = os.path.normpath(os.path.join(os.path.relpath(root, site_path), filename))
- if target.endswith('.html'):
- target = target[:-len('.html')]
-
- content_type = mimetypes.guess_type(source)[0]
- cache_control = 'no-transform,public,max-age=300,s-maxage=300'
- checksum = hashlib.md5(open(source).read()).hexdigest()
-
- if (target not in keys
- or keys[target].etag.replace('"', '') != checksum):
-
- key = bucket.new_key(target)
- if content_type:
- key.content_type = content_type
- key.set_contents_from_filename(source,
- headers={'Cache-Control': cache_control})
- print('uploading %s' % target)
-
- elif content_type:
- key = bucket.lookup(target)
- if (key
- and (key.content_type != content_type
- or key.cache_control != cache_control)):
- key.copy(key.bucket, key.name, preserve_acl=True,
- metadata={'Content-Type': content_type,
- 'Cache-Control': cache_control})
- print('update headers %s' % target)
-
- if target in keys_left:
- keys_left.remove(target)
-
- for key_name in keys_left:
- print('deleting %s' % key_name)
- bucket.delete_key(key_name)
diff --git a/website/_harp.json b/website/_harp.json
index 6510f73cf..1ceb54023 100644
--- a/website/_harp.json
+++ b/website/_harp.json
@@ -1,85 +1,29 @@
{
"globals": {
"title": "spaCy.io",
- "sitename": "spaCy",
- "slogan": "Industrial-strength Natural Language Processing",
"description": "spaCy is a free open-source library featuring state-of-the-art speed and accuracy and a powerful Python API.",
- "url": "https://spacy.io",
- "email": "contact@spacy.io",
- "company": "spaCy GmbH",
- "team_members": [ "henning", "matt", "wolfgang", "elmar", "ines" ],
- "navigation": { "Docs": "docs", "Demos": "demos", "Team": "team", "Blog": "blog" },
- "profiles": { "twitter": "spacy_io", "github": "spacy-io", "reddit": "spacynlp", "medium": "spacy" },
- "google_analytics": "UA-58931649-1",
+ "SITENAME": "spaCy",
+ "SLOGAN": "Industrial-strength Natural Language Processing",
+ "SITE_URL": "https://spacy.io",
+ "EMAIL": "contact@explosion.ai",
- "stylesheets": { "default": "style", "blog": "style_blog" },
- "scripts" : [ "main", "prism" ],
- "feed": "feed.xml",
- "image_sizes" : { "small" : "640", "medium": "1440", "large": "2000" },
- "default_syntax" : "python",
+ "COMPANY": "Explosion AI",
+ "COMPANY_URL": "https://explosion.ai",
+ "DEMOS_URL": "https://demos.explosion.ai",
- "spacy_version": "0.100.6",
- "spacy_stars": "1500",
- "github_settings": { "user": "spacy-io", "repo": "spacy" },
-
- "apis": {
- "displacy": "https://displacy.spacy.io/",
- "sense2vec": "https://sense2vec.spacy.io/api/similarity/reddit/"
+ "SOCIAL": {
+ "twitter": "spacy_io",
+ "github": "explosion",
+ "reddit": "spacynlp"
},
- "authors" : {
- "matt" : {
- "name" : "Matthew Honnibal",
- "title": "CTO",
- "description" : "is co-founder and CTO of spaCy. He studied linguistics as an undergrad, and never thought he'd be a programmer. By 2009 he had a PhD in computer science, and in 2014 he left academia to write spaCy. He's from Sydney and lives in Berlin.",
- "links": {
- "twitter": [ "https://twitter.com/honnibal", "Twitter" ],
- "website": [ "https://www.semanticscholar.org/search?q=Matthew%20Honnibal", "Semantic Scholar" ]
- }
- },
+ "SCRIPTS" : [ "main", "prism" ],
+ "DEFAULT_SYNTAX" : "python",
+ "ANALYTICS": "UA-58931649-1",
- "henning": {
- "name": "Henning Peters",
- "title": "CEO",
- "description": "is co-founder and CEO of spaCy. He holds a MSc in computer science and has been co-founder and CTO of Skoobe and Absolventa. His passions are uncommon languages and backcountry skiing.",
- "links": {
- "twitter": [ "https://twitter.com/henningpeters", "Twitter"],
- "linkedin": [ "https://de.linkedin.com/in/hepeters", "LinkedIn"],
- "github": [ "https://github.com/henningpeters", "GitHub"]
- }
- },
-
- "ines": {
- "name": "Ines Montani",
- "title": "Front-End",
- "description": "As Head of Front-End, Ines is in charge of showing people what spaCy can do. She develops, designs and implements our interactive demos and the spacy.io website. Ines has a degree in media, linguistics and communications, and over ten years experience in web development.",
- "links": {
- "twitter": [ "https://twitter.com/_inesmontani", "Twitter" ],
- "codepen": [ "https://codepen.io/inesmontani", "Codepen"],
- "github": [ "https://github.com/inesmontani", "GitHub"],
- "website": [ "http://ines.io", "Blog" ]
- }
- },
-
- "wolfgang": {
- "name": "Wolfgang Seeker",
- "title": "NLP Engineer",
- "description": "is a computational linguist from Germany. He is fascinated with the complexity and variety of human language, and spent his PhD looking for ways to make NLP work well with any kind of language in the world. He joined spaCy to build effective and truly multilingual NLP software.",
- "links": {
- "website": [ "https://www.semanticscholar.org/search?q=Wolfgang%20Seeker", "Semantic Scholar" ]
- }
- },
-
- "elmar": {
- "name": "Elmar Haußmann",
- "title": "NLP Engineer",
- "description": "is an NLP engineer at spaCy, passionate about deep learning. He has a background in both, academic research, with a PhD in computer science, and industry, as a former consultant and software engineer at IBM. Originally from Stuttgart, the avid snowboarder and mountain biker doesn't only ride powder and trails but also covers distances via plane between the spaCy office in Berlin and his new home in Beijing.",
- "links": {
- "github": [ "https://github.com/elmar-haussmann", "GitHub"],
- "twitter": [ "https://twitter.com/elhaussmann", "Twitter" ]
- }
- }
- }
+ "SPACY_VERSION": "0.101.0",
+ "SPACY_STARS": "2300",
+ "GITHUB": { "user": "explosion", "repo": "spacy" }
}
}
diff --git a/website/_includes/_analytics.jade b/website/_includes/_analytics.jade
deleted file mode 100644
index ab322b800..000000000
--- a/website/_includes/_analytics.jade
+++ /dev/null
@@ -1,7 +0,0 @@
-if environment != 'development'
- script.
- (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
- (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
- m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
- })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
- ga('create', '#{google_analytics}', 'auto'); ga('send', 'pageview');
diff --git a/website/_includes/_article.jade b/website/_includes/_article.jade
deleted file mode 100644
index e39bd3649..000000000
--- a/website/_includes/_article.jade
+++ /dev/null
@@ -1,34 +0,0 @@
-include ../_includes/_mixins
-
-//- Article
-//- ============================================================================
-
-article.article(id=current.source)
-
- header.article-header
- +h2.article-title=title
- .article-meta
- if author
- | by #[a.link(href=(authors[author].url || url) target='_blank')=authors[author].name] on
- | #[+date(date)]
-
- .article-body!=yield
-
- footer.article-footer
-
- +grid('padding', 'align-right', 'valign-center')
- +tweet(title)
-
- if links
- for link, index in links
- div: +button('primary', 'small', index.toLowerCase())(href=link target='_blank')
- +icon(index.toLowerCase(), 'medium', 'secondary')
- | Discussion on #{index}
-
- if author
- +divider
-
- !=partial('_profile', { label: 'About the Author', style: 'alt' })
-
-!=partial('_newsletter', { divider: 'both' })
-!=partial('_latest-posts', { max: 2, _section: _section } )
diff --git a/website/_includes/_footer.jade b/website/_includes/_footer.jade
index 017cdfa86..bd7688bfb 100644
--- a/website/_includes/_footer.jade
+++ b/website/_includes/_footer.jade
@@ -1,14 +1,17 @@
+//- ----------------------------------
+//- 💫 INCLUDES > FOOTER
+//- ----------------------------------
+
include _mixins
-//- Footer
-//- ============================================================================
+footer.o-footer.o-inline-list.u-pattern.u-text-center.u-text-label.u-text-strong
+ span © #{new Date().getFullYear()} #[+a(COMPANY_URL, true)=COMPANY]
-footer.footer
- span © #{new Date().getFullYear()} #{company}
- a(href='/legal') Legal / Imprint
+ +a(COMPANY_URL + "/legal", true) Legal / Imprint
+ a(href="mailto:#{EMAIL}") #[+icon("mail", 16)]
- a(href='https://twitter.com/' + profiles.twitter target='_blank' aria-label="Twitter")
- +icon('twitter', 'secondary')
+ +a("https://twitter.com/" + SOCIAL.twitter)(aria-label="Twitter")
+ +icon("twitter", 20)
- a(href='/feed.xml' target='_blank' aria-label="RSS Feed")
- +icon('feed', 'secondary')
+ +a("https://github.com/" + SOCIAL.github + "/spaCy")(aria-label="GitHub")
+ +icon("github", 20)
diff --git a/website/_includes/_functions.jade b/website/_includes/_functions.jade
index b58eea883..a191b330d 100644
--- a/website/_includes/_functions.jade
+++ b/website/_includes/_functions.jade
@@ -1,101 +1,11 @@
-//- Functions
-//- ============================================================================
+//- ----------------------------------
+//- 💫 INCLUDES > FUNCTIONS
+//- ----------------------------------
-//- Full page title
-
-- function getPageTitle() {
-- if(current.path[0] == 'blog' && current.source != 'index') title += ' | Blog';
-- return (current.path[0] == 'index') ? sitename + ' | ' + slogan : title + ' | ' + sitename;
-- }
-
-
-//- Get current URL
- current - [string] current path
-
-- function getCurrentUrl() {
-- var base = current.path;
-- if(current.source == 'index') base.pop();
-- return url + '/' + base.join('/');
-- }
-
-
-//- Assign flexbox order, elements are assigned negative values to always move
- them to the start of a flexbox in the correct order (i.e. -3, -2, -1)
- counter - [integer] index of current item
- max - [integer] amount of items in total
- start - [integer] index of start position, i.e. 0 -> oder: -1 (optional)
-
-- function assignOrder(counter, max, start) {
-- if(counter >= 0 && counter < max) return "order: -" + (max - counter + (start || 0));
-- }
-
-
-//- Create Twitter share URL
- current - [string] current path
- tweet - [string] text to be shared with link
-
-- function twitterShareUrl(current, tweet) {
-- return "https://twitter.com/share?text=" + tweet + "&url=" + getCurrentUrl(current) + ";via=" + profiles.twitter;
-- }
-
-
-//- Add prefix to each item in an array (used for modifier CSS classes)
- array - [array] array of strings, taken from mixin arguments
- prefix - [string] class prefix (i.e. 'button--')
+//- Add prefixes to items of an array (for modifier CSS classes)
- function prefixArgs(array, prefix) {
-- for(var i = 0; i < array.length; i++) {
-- array[i] = prefix + array[i];
-- }
-- return array.join(' ');
-- }
-
-
-//- Convert date to human readable and timestamp format
- input - [string] date in the format YYYY-MM-DD
-
-- function convertDate(input) {
-- var dates = [];
-- var months = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December' ];
-- var date = new Date(input);
-- dates.full = months[date.getMonth()] + ' ' + date.getDate() + ', ' + date.getFullYear();
-- dates.timestamp = JSON.parse(JSON.stringify(date));
-- return dates;
-- }
-
-
-//- Convert date to valid RSS pubDate
- input - [string] date in the format YYYY-MM-DD
-
-- function convertPubDate(input) {
-- var date = new Date(input);
-- var pieces = date.toString().split(' ');
-- var offsetTime = pieces[5].match(/[-+]\d{4}/);
-- var offset = (offsetTime) ? offsetTime : pieces[5];
-- var parts = [ pieces[0] + ',', pieces[2], pieces[1], pieces[3], pieces[4], offset ];
-- return parts.join(' ');
-- }
-
-
-//- Compile scrset attribute for hero images
- image - [object] article image object from _data.json
- path - [string] relative path to image folder
-
-- function getScrset(image, path) {
-- var scrset = path + image.file + ' ' + image_sizes.medium + 'w';
-- if(image.file_small) scrset += ', ' + path + image.file_small + ' ' + image_sizes.small + 'w';
-- if(image.file_large) scrset += ', ' + path + image.file_large + ' ' + image_sizes.large + 'w';
-- return scrset;
-- }
-
-
-//- Get meta image
-
-- function getMetaImage() {
-- if(current.path[0] == 'blog' && image && image.file) {
-- return url + '/blog/img/' + image.file;
-- }
-- else {
-- return url + '/assets/img/social.png';
-- }
+- return array.map(function(arg) {
+- return prefix + '--' + arg;
+- }).join(' ');
- }
diff --git a/website/_includes/_head.jade b/website/_includes/_head.jade
deleted file mode 100644
index 96c4d0154..000000000
--- a/website/_includes/_head.jade
+++ /dev/null
@@ -1,31 +0,0 @@
-include _mixins
-
-- var is_blog = (_section == 'blog')
-
-
-//- Head
-//- ============================================================================
-
-head
- title=getPageTitle()
-
- meta(charset='utf-8')
- meta(name="viewport" content="width=device-width, initial-scale=1.0")
- meta(name='referrer' content='always')
-
- meta(property='og:type' content='website')
- meta(property='og:site_name' content=sitename)
- meta(property='og:url' content=getCurrentUrl())
- meta(property='og:title' content=title)
- meta(property='og:description' content=description)
- meta(property='og:image' content=getMetaImage())
-
- meta(name='twitter:card' content='summary_large_image')
- meta(name='twitter:site' content='@' + profiles.twitter)
- meta(name='twitter:title' content=title)
- meta(name='twitter:description' content=description)
- meta(name='twitter:image' content=getMetaImage())
-
- link(rel='icon' type='image/x-icon' href='/assets/img/favicon.ico')
- link(href='/assets/css/' + ((is_blog) ? stylesheets.blog : stylesheets.default) + '.css' rel='stylesheet')
- link(href='/' + feed rel='alternate' type='application/rss+xml' title='RSS')
diff --git a/website/_includes/_header.jade b/website/_includes/_header.jade
deleted file mode 100644
index 345073c71..000000000
--- a/website/_includes/_header.jade
+++ /dev/null
@@ -1,21 +0,0 @@
-include _mixins
-
-//- Header
-//- ============================================================================
-
-header.header(class=(image) ? 'hero' : '')
-
- if image
- img(srcset=getScrset(image, 'img/') alt=image.alt sizes='100vw')
-
- if image.credit
- .hero-credit
- if image.url
- a(href=image.url target='_blank')=image.credit
-
- else
- !=image.credit
-
- else
- if !is_article && headline != false
- h1.header-title=title
diff --git a/website/_includes/_latest-posts.jade b/website/_includes/_latest-posts.jade
deleted file mode 100644
index f4289866b..000000000
--- a/website/_includes/_latest-posts.jade
+++ /dev/null
@@ -1,17 +0,0 @@
-include _mixins
-
-- var post_counter = 0
-- var is_docs = (_section == 'docs')
-
-
-//- Latest Posts
-//- ============================================================================
-
-+grid('padding')
- each post, slug in ( (_section == 'docs' ) ? public.docs.tutorials._data : public.blog._data)
- if slug != 'index' && slug != current.source && post_counter < (max || 3)
-
- +grid-col('space-between', ((max > 2 && max % 3 == 0) ? 'third' : 'half'))
- !=partial('_teaser', { teaser: post, slug: slug, _root: (is_docs) ? '/docs/tutorials/' : '/blog/' })
-
- - post_counter++
diff --git a/website/_includes/_logo.jade b/website/_includes/_logo.jade
index b5d2698af..a315e681b 100644
--- a/website/_includes/_logo.jade
+++ b/website/_includes/_logo.jade
@@ -1,5 +1,6 @@
-//- Logo
-//- ============================================================================
+//- ----------------------------------
+//- 💫 INCLUDES > LOGO
+//- ----------------------------------
-svg.logo(class=(logo_size) ? 'logo--' + logo_size : '' viewBox='0 0 675 215' width='500')
- path(d='M83.6 83.3C68.3 81.5 67.2 61 47.5 62.8c-9.5 0-18.4 4-18.4 12.7 0 13.2 20.3 14.4 32.5 17.7 20.9 6.3 41 10.7 41 33.3 0 28.8-22.6 38.8-52.4 38.8-24.9 0-50.2-8.9-50.2-31.8 0-6.4 6.1-11.3 12-11.3 7.5 0 10.1 3.2 12.7 8.4 5.8 10.2 12.3 15.6 28.3 15.6 10.2 0 20.6-3.9 20.6-12.7 0-12.6-12.8-15.3-26.1-18.4-23.5-6.6-43.6-10-46-36.1C-1 34.5 91.7 32.9 97 71.9c.1 7.1-6.5 11.4-13.4 11.4zm110.2-39c32.5 0 51 27.2 51 60.8 0 33.7-17.9 60.8-51 60.8-18.4 0-29.8-7.8-38.1-19.8v44.5c0 13.4-4.3 19.8-14.1 19.8-11.9 0-14.1-7.6-14.1-19.8V61.3c0-10.6 4.4-17 14.1-17 9.1 0 14.1 7.2 14.1 17v3.6c9.2-11.6 19.7-20.6 38.1-20.6zm-7.7 98.4c19.1 0 27.6-17.6 27.6-38.1 0-20.1-8.6-38.1-27.6-38.1-19.8 0-29 16.3-29 38.1 0 21.2 9.2 38.1 29 38.1zM266.9 76c0-23.4 26.9-31.7 52.9-31.7 36.6 0 51.7 10.7 51.7 46v34c0 8.1 5 24.1 5 29 0 7.4-6.8 12-14.1 12-8.1 0-14.1-9.5-18.4-16.3-11.9 9.5-24.5 16.3-43.8 16.3-21.3 0-38.1-12.6-38.1-33.3 0-18.4 13.2-28.9 29-32.5 0 .1 51-12 51-12.1 0-15.7-5.5-22.6-22-22.6-14.5 0-21.9 4-27.5 12.7-4.5 6.6-4 10.6-12.7 10.6-6.9-.1-13-4.9-13-12.1zm43.6 70.2c22.3 0 31.8-11.8 31.8-35.3v-5c-6 2-30.3 8-36.8 9.1-7 1.4-14.1 6.6-14.1 14.9.1 9.1 9.4 16.3 19.1 16.3zM474.5 0c31.5 0 65.7 18.8 65.7 48.8 0 7.7-5.8 14.1-13.4 14.1-10.3 0-11.8-5.5-16.3-13.4-7.6-13.9-16.5-23.3-36.1-23.3-30.2-.2-43.7 25.6-43.7 57.8 0 32.4 11.2 55.8 42.4 55.8 20.7 0 32.2-12 38.1-27.6 2.4-7.1 6.7-14.1 15.6-14.1 7 0 14.1 7.2 14.1 14.8 0 31.8-32.4 53.8-65.8 53.8-36.5 0-57.2-15.4-68.5-41-5.5-12.2-9.1-24.9-9.1-42.4-.1-49.2 28.6-83.3 77-83.3zm180.3 44.3c8 0 12.7 5.2 12.7 13.4 0 3.3-2.6 9.9-3.6 13.4L625.1 173c-8.6 22.1-15.1 37.4-44.5 37.4-14 0-26.1-1.2-26.1-13.4 0-7 5.3-10.6 12.7-10.6 1.4 0 3.6.7 5 .7 2.1 0 3.6.7 5 .7 14.7 0 16.8-15.1 22-25.5l-37.4-92.6c-2.1-5-3.6-8.4-3.6-11.3 0-8.2 6.4-14.1 14.8-14.1 9.5 0 13.3 7.5 15.6 15.6l24.7 73.5L638 65.5c3.9-10.5 4.2-21.2 16.8-21.2z')
+svg.o-logo(class=(logo_size) ? "o-logo--" + logo_size : "" viewBox="0 0 675 215" width="500")
+ path(d="M83.6 83.3C68.3 81.5 67.2 61 47.5 62.8c-9.5 0-18.4 4-18.4 12.7 0 13.2 20.3 14.4 32.5 17.7 20.9 6.3 41 10.7 41 33.3 0 28.8-22.6 38.8-52.4 38.8-24.9 0-50.2-8.9-50.2-31.8 0-6.4 6.1-11.3 12-11.3 7.5 0 10.1 3.2 12.7 8.4 5.8 10.2 12.3 15.6 28.3 15.6 10.2 0 20.6-3.9 20.6-12.7 0-12.6-12.8-15.3-26.1-18.4-23.5-6.6-43.6-10-46-36.1C-1 34.5 91.7 32.9 97 71.9c.1 7.1-6.5 11.4-13.4 11.4zm110.2-39c32.5 0 51 27.2 51 60.8 0 33.7-17.9 60.8-51 60.8-18.4 0-29.8-7.8-38.1-19.8v44.5c0 13.4-4.3 19.8-14.1 19.8-11.9 0-14.1-7.6-14.1-19.8V61.3c0-10.6 4.4-17 14.1-17 9.1 0 14.1 7.2 14.1 17v3.6c9.2-11.6 19.7-20.6 38.1-20.6zm-7.7 98.4c19.1 0 27.6-17.6 27.6-38.1 0-20.1-8.6-38.1-27.6-38.1-19.8 0-29 16.3-29 38.1 0 21.2 9.2 38.1 29 38.1zM266.9 76c0-23.4 26.9-31.7 52.9-31.7 36.6 0 51.7 10.7 51.7 46v34c0 8.1 5 24.1 5 29 0 7.4-6.8 12-14.1 12-8.1 0-14.1-9.5-18.4-16.3-11.9 9.5-24.5 16.3-43.8 16.3-21.3 0-38.1-12.6-38.1-33.3 0-18.4 13.2-28.9 29-32.5 0 .1 51-12 51-12.1 0-15.7-5.5-22.6-22-22.6-14.5 0-21.9 4-27.5 12.7-4.5 6.6-4 10.6-12.7 10.6-6.9-.1-13-4.9-13-12.1zm43.6 70.2c22.3 0 31.8-11.8 31.8-35.3v-5c-6 2-30.3 8-36.8 9.1-7 1.4-14.1 6.6-14.1 14.9.1 9.1 9.4 16.3 19.1 16.3zM474.5 0c31.5 0 65.7 18.8 65.7 48.8 0 7.7-5.8 14.1-13.4 14.1-10.3 0-11.8-5.5-16.3-13.4-7.6-13.9-16.5-23.3-36.1-23.3-30.2-.2-43.7 25.6-43.7 57.8 0 32.4 11.2 55.8 42.4 55.8 20.7 0 32.2-12 38.1-27.6 2.4-7.1 6.7-14.1 15.6-14.1 7 0 14.1 7.2 14.1 14.8 0 31.8-32.4 53.8-65.8 53.8-36.5 0-57.2-15.4-68.5-41-5.5-12.2-9.1-24.9-9.1-42.4-.1-49.2 28.6-83.3 77-83.3zm180.3 44.3c8 0 12.7 5.2 12.7 13.4 0 3.3-2.6 9.9-3.6 13.4L625.1 173c-8.6 22.1-15.1 37.4-44.5 37.4-14 0-26.1-1.2-26.1-13.4 0-7 5.3-10.6 12.7-10.6 1.4 0 3.6.7 5 .7 2.1 0 3.6.7 5 .7 14.7 0 16.8-15.1 22-25.5l-37.4-92.6c-2.1-5-3.6-8.4-3.6-11.3 0-8.2 6.4-14.1 14.8-14.1 9.5 0 13.3 7.5 15.6 15.6l24.7 73.5L638 65.5c3.9-10.5 4.2-21.2 16.8-21.2z")
diff --git a/website/_includes/_mixins.jade b/website/_includes/_mixins.jade
index cb1207673..04faf8993 100644
--- a/website/_includes/_mixins.jade
+++ b/website/_includes/_mixins.jade
@@ -1,381 +1,9 @@
+//- ----------------------------------
+//- 💫 INCLUDES > MIXINS
+//- ----------------------------------
+
include _functions
-//- Mixins
-//- ============================================================================
-
-//- Sections for content pages
- id - [string] id, can be headline id as it's being prefixed (optional)
- block - section content (block and inline elements)
-
-mixin section(id)
- section.section(id=(id) ? 'section-' + id : '')&attributes(attributes)
- block
-
-
-//- Flexbox grid to align children elements
- ...style - [strings] flexbox CSS classes without prefix (optional)
- block - container content (block and inline elements)
-
-mixin grid(...style)
- .grid(class=prefixArgs(style, 'grid--'))&attributes(attributes)
- block
-
-mixin grid-col(...style)
- .grid-col(class=prefixArgs(style, 'grid-col--'))&attributes(attributes)
- block
-
-
-//- Aside
- headline - [string] Headline of aside (optional)
- block - aside content (inline elements)
-
-mixin aside(headline)
- span.aside(data-label=headline)&attributes(attributes)
- span.aside-body
- block
-
-
-//- Paragraphs
- block - paragraph content (inline elements)
-
-mixin lead
- p.text-lead&attributes(attributes)
- block
-
-
-//- Various text styles
- block - text (inline elements)
-
-mixin example
- p.text-example&attributes(attributes)
- block
-
-mixin source
- span.text-source&attributes(attributes)
- block
-
-mixin label(...style)
- span(class=(style != '') ? prefixArgs(style, 'label-') : 'label')&attributes(attributes)
- block
-
-
-//- Headings with optional permalinks
- id - [string] unique id (optional, no permalink without id)
- source - [string] link for source button (optional)
- block - headline text (inline elements)
-
-mixin headline(level, id, source)
- if level == 2
- +h2(id, source)
- block
-
- else if level == 3
- +h3(id, source)
- block
-
- else if level == 4
- +h4(id, source)
- block
-
- else if level == 5
- +h5(id, source)
- block
-
- else
- +h6(id, source)
- block
-
-mixin h1(id, source)
- h1(id=id)&attributes(attributes)
- +permalink(id, source)
- block
-
-mixin h2(id, source)
- h2(id=id)&attributes(attributes)
- +permalink(id, source)
- block
-
-mixin h3(id, source)
- h3(id=id)&attributes(attributes)
- +permalink(id, source)
- block
-
-mixin h4(id, source)
- h4(id=id)&attributes(attributes)
- +permalink(id, source)
- block
-
-mixin h5(id, source)
- h5(id=id)&attributes(attributes)
- +permalink(id, source)
- block
-
-mixin h6(id, source)
- h6(id=id)&attributes(attributes)
- +permalink(id, source)
- block
-
-mixin permalink(id, source)
- if id
- a.permalink(href='#' + id)
- block
-
- else
- block
-
- if source
- +button('secondary', 'small', 'source')(href=source target='_blank') Source
-
-
-//- Button
- element - [string] specifies HTML element, 'button' or 'link'
- ...style - [strings] button CSS classes without prefix (optional)
- block - button text (inline elements)
-
-mixin button(type, ...style)
- - var classname = 'button-' + type + ' ' + ((style) ? prefixArgs(style, 'button--') : '')
-
- a.button(class=classname)&attributes(attributes)
- block
-
-mixin form-button(type, ...style)
- - var classname = 'button-' + type + ' ' + ((style) ? prefixArgs(style, 'button--') : '')
- button(class=classname)&attributes(attributes)
- block
-
-
-//- Input
- placeholder - [string] placeholder for input field (optional)
- value - [string] value of input field (optional)
-
-mixin input(placeholder, value)
- input.input(placeholder=placeholder value=value)&attributes(attributes)
-
-
-//- Icon
- name - [string] icon name, refers to CSS classes
- size - [string] 'medium' or 'large' (optional)
- type - [string] 'button' (optional)
- block - description, if as a text node to the icon element it prevents line
- breaks between icon and text (inline elements)
-
-mixin icon(type, ...style)
- span(class='icon-' + type + ' ' + prefixArgs(style, 'icon--') aria-hidden="true")&attributes(attributes)
- block
-
-
-//- Image for illustration purposes
- file - [string] file name (in /img)
- alt - [string] descriptive alt text (optional)
- caption - [string] image caption (optional)
-
-mixin image(file, alt, caption)
- figure.image-container&attributes(attributes)
- img(src='img/' + file alt=alt)
-
- if caption
- figcaption.text-caption=caption
-
- block
-
-
-//- Illustrated code view
- title - [string] title of window
-
-mixin code-demo(title)
- .x-terminal&attributes(attributes)
- .x-terminal-icons: span
- .x-terminal-title=title
- +code.x-terminal-code
- block
-
-
-//- Data table
- head - [array] column headings (optional, without headings no table
- head is displayed)
- ...style - [strings] table CSS classes without prefix (optional)
- block - only +row (tr)
-
-mixin table(head, ...style)
- table.table(class=prefixArgs(style, 'table--'))&attributes(attributes)
-
- if head
- tr.table-row
- each column in head
- th.table-head-cell=column
-
- block
-
-
-//- Data table row
- block - only +cell (td)
-
-mixin row(...style)
- tr.table-row(class=prefixArgs(style, 'table-cell--'))&attributes(attributes)
- block
-
-
-//- Data table cell
- block - table cell content (inline elements)
-
-mixin cell(...style)
- td.table-cell(class=prefixArgs(style, 'table-cell--'))&attributes(attributes)
- block
-
-
-//- General list (ordered and unordered)
- type - [string] 'numbers', 'letters', 'roman' (optional)
- start - [integer] starting point of list (1 = list starts at 1 or A)
- block - only +item (li)
-
-mixin list(type, start)
- if type
- ol.list(class='list--' + type style=(start === 0 || start) ? 'counter-reset: li ' + (start - 1) : '')&attributes(attributes)
- block
-
- else
- ul.list.list--bullets&attributes(attributes)
- block
-
-
-//- List item
- block - item text (inline elements)
-
-mixin item
- li.list-item&attributes(attributes)
- block
-
-
-//- Blockquote
- source - [string] quote source / author (optional)
- link - [string] link to quote source (only with source, optional)
- block - quote text (inline elements)
-
-mixin quote(source, link)
- blockquote.quote&attributes(attributes)
- p.quote-text
- block
-
- if source && link
- | #[a.quote-source(href=link target='_blank')=source]
-
- else if source && !link
- .quote-source !{source}
-
-
-//- Pullquotes with optional 'tweet this' function
- tweet - [string] text to be tweeted (optional)
- block - pullquote text (inline elements, only shown if no tweet text)
-
-mixin pullquote(tweet)
- blockquote.quote&attributes(attributes)
-
- p.quote-text-strong
- if tweet
- | !{tweet} #[a.quote-source(href=twitterShareUrl(current.path, tweet) target='_blank') Tweet this]
-
- else
- block
-
-
-//- Code block
- use as +code(args). to preserve whitespace and prevent code interprettion
- language - [string] language for syntax highlighting (optional, default:
- 'python', see Prism for options: http://prismjs.com)
- label - [string] code block headline (optional)
- block - code text (inline elements)
-
-
-mixin code(language, label)
- pre.code-block(class='lang-' + (language || default_syntax) data-label=label)&attributes(attributes)
- code.code-inline
- block
-
-
-//- Infobox for notes and alerts
- label - [string] infobox headline (optional)
- block - infobox text (inline and block elements)
-
-mixin infobox(label)
- .box.box--info(data-label=label)&attributes(attributes)
- p.box-body
- block
-
-
-//- Alerts for notes and updates
-
-mixin alert(button)
- .alert&attributes(attributes)
- block
-
- if button
- +form-button('primary', 'small')(onclick='this.parentNode.parentNode.removeChild(this.parentNode);')=button
-
- else
- button.alert-close(onclick='this.parentNode.parentNode.removeChild(this.parentNode);')
-
-
-
-//- Embeds
- border - [boolean] add border to embed container
- caption - [string] embed caption
- block - embed content (inline and block elements)
-
-mixin embed(border, caption)
- figure.embed(class=(border) ? 'embed--border' : '')&attributes(attributes)
- block
-
- if caption
- figcaption.embed-caption=caption
-
-
-//- displaCy
- filename - [string] name of file in displacy folder (no .html)
- caption - [string] caption (optional)
- height - [integer] iframe height in px (optional)
-
-mixin displacy(filename, caption, height)
- +embed(true, caption).embed--displacy
- iframe(src='/blog/displacy/' + filename height=height)
-
-
-//- Logo, imports SVG
- size - [string] 'tiny', 'small', 'regular' or 'large'
-
-mixin logo(size)
- !=partial('/_includes/_logo', { logo_size: size })
-
-
-//- element with date
- input - [string] date in the format YYYY-MM-DD
- type - [string] 'timestamp' (optional)
-
-mixin date(input, type)
- - var dates = convertDate(input)
-
- if type == 'timestamp'
- time=dates.timestamp
-
- else
- time(datetime=dates.timestamp)=dates.full
-
-
-//- Divider
- type - [string] divider tpe
-
-mixin divider(type, ...style)
- div(class=((type) ? 'divider-' + type : 'divider') + ' ' + prefixArgs(style, 'divider--'))&attributes(attributes)
- if type == 'text'
- .divider-text-content
- block
-
- else
- block
-
-
-//- Twitter Share Button
- tweet - [string] text to be shared with the tweet
-
-mixin tweet(tweet)
- a(href=twitterShareUrl(current.path, tweet) target='_blank' aria-label="Shsre on Twitter")
- +icon('twitter', 'large')
+include _mixins/_base
+include _mixins/_components
+include _mixins/_headlines
diff --git a/website/_includes/_mixins/_base.jade b/website/_includes/_mixins/_base.jade
new file mode 100644
index 000000000..4ab081daa
--- /dev/null
+++ b/website/_includes/_mixins/_base.jade
@@ -0,0 +1,42 @@
+//- ----------------------------------
+//- 💫 MIXINS > BASE
+//- ----------------------------------
+
+//- External Link
+
+mixin a(url, trusted)
+ a(href=url target="_blank" rel=(!trusted) ? "noopener nofollow" : "")&attributes(attributes)
+ block
+
+
+//- Sections for content pages
+ id - [string] id, can be headline id as it's being prefixed (optional)
+ block - section content (block and inline elements)
+
+mixin section(id)
+ section.o-block(id=(id) ? 'section-' + id : '')&attributes(attributes)
+ block
+
+
+//- Date
+ input - [string] date in the format YYYY-MM-DD
+
+mixin date(input)
+ - var date = new Date(input)
+ - var months = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December' ]
+
+ time(datetime=JSON.parse(JSON.stringify(date)))&attributes(attributes)=months[date.getMonth()] + ' ' + date.getDate() + ', ' + date.getFullYear()
+
+
+//- Grid Container
+
+mixin grid(...style)
+ .o-grid.o-block(class=prefixArgs(style, "o-grid"))&attributes(attributes)
+ block
+
+
+//- Grid Column
+
+mixin grid-col(...style)
+ .o-grid__col(class=prefixArgs(style, "o-grid__col"))&attributes(attributes)
+ block
diff --git a/website/_includes/_mixins/_components.jade b/website/_includes/_mixins/_components.jade
new file mode 100644
index 000000000..c7deffe67
--- /dev/null
+++ b/website/_includes/_mixins/_components.jade
@@ -0,0 +1,112 @@
+//- ----------------------------------
+//- 💫 MIXINS > COMPONENTS
+//- ----------------------------------
+
+//- Aside
+
+mixin aside(label)
+ span.c-aside.u-text-small(role="complementary")&attributes(attributes)
+ span.c-aside__label.u-text-label.u-text-strong.u-color-theme=label
+ block
+
+
+//- Button
+
+mixin button(url, trusted, ...style)
+ a.c-button.u-text-label(href=url class=prefixArgs(style, "c-button") role="button" target="_blank" rel=(!trusted) ? "noopener nofollow" : "")&attributes(attributes)
+ block
+
+
+//- Code
+
+mixin code(language, label, small)
+ pre.c-code-block(class="lang-#{(language || DEFAULT_SYNTAX)} #{small ? '' : 'o-block'}")&attributes(attributes)
+ if label
+ span.c-code-block__label.u-text-label.u-text-strong=label
+
+ code.c-code-block__content(class=small ? "u-code-small" : "u-code-regular")
+ block
+
+
+//- Icon
+
+mixin icon(name, size)
+ - var size = size || 20
+
+ svg.o-icon(aria-hidden="true" viewBox="0 0 #{size} #{size}" width=size height=size)&attributes(attributes)
+ use(xlink:href="/assets/img/icons.svg#icon-#{name}")
+
+
+//- Image for illustration purposes
+ file - [string] file name (in /assets/img)
+ alt - [string] descriptive alt text (optional)
+ caption - [string] image caption (optional)
+
+mixin image(file, alt, caption)
+ figure.o-block&attributes(attributes)
+ img(src="/assets/img/#{file}" alt=(alt || caption) width="800")
+
+ if caption
+ figcaption.u-text-small=caption
+
+ block
+
+
+//- Label
+
+mixin label()
+ .u-text-label.u-text-strong.u-color-theme&attributes(attributes)
+ block
+
+
+//- List
+
+mixin list(type, start)
+ if type
+ ol.c-list.o-block(class="c-list--#{type}" style=(start === 0 || start) ? "counter-reset: li #{(start - 1)}" : "")&attributes(attributes)
+ block
+
+ else
+ ul.c-list.c-list--bullets.o-block&attributes(attributes)
+ block
+
+
+//- List item
+
+mixin item()
+ li.c-list__item.u-text-regular&attributes(attributes)
+ block
+
+
+//- Table
+
+mixin table(head)
+ table.c-table.o-block.has-aside&attributes(attributes)
+
+ if head
+ +row
+ each column in head
+ th.c-table__head-cell.u-text-label.u-text-strong=column
+
+ block
+
+
+//- Table row
+
+mixin row(...style)
+ tr.c-table__row(class=prefixArgs(style, "c-table__cell"))&attributes(attributes)
+ block
+
+
+//- Table cell
+
+mixin cell(...style)
+ td.c-table__cell.u-text-regular.has-aside(class=prefixArgs(style, "c-table__cell"))&attributes(attributes)
+ block
+
+
+//- Tag
+
+mixin tag()
+ span.u-text-tag.u-text-label.u-color-theme.u-text-strong.u-padding-small
+ block
diff --git a/website/_includes/_mixins/_headlines.jade b/website/_includes/_mixins/_headlines.jade
new file mode 100644
index 000000000..93396dca5
--- /dev/null
+++ b/website/_includes/_mixins/_headlines.jade
@@ -0,0 +1,49 @@
+//- ----------------------------------
+//- 💫 MIXINS > HEADLINES
+//- ----------------------------------
+
+//- Headlines Helper Mixin
+
+mixin headline(level)
+ if level == 1
+ h1.u-heading-1&attributes(attributes)
+ block
+
+ else if level == 2
+ h2.u-heading-2&attributes(attributes)
+ block
+
+ else if level == 3
+ h3.u-heading-3&attributes(attributes)
+ block
+
+ else if level == 4
+ h4.u-heading-4&attributes(attributes)
+ block
+
+ else if level == 5
+ h5.u-heading-5&attributes(attributes)
+ block
+
+
+//- Permalink rendering
+
+mixin permalink(id)
+ if id
+ a.u-permalink(id=id href="##{id}")
+ +icon("link").u-permalink__icon
+ block
+
+ else
+ block
+
+
+//- Headlines
+
+mixin h(level, id, source)
+ +headline(level)&attributes(attributes)
+ +permalink(id)
+ block
+
+ if source
+ +button(source, false, "secondary").u-text-small.u-float-right Source
diff --git a/website/_includes/_nav.jade b/website/_includes/_nav.jade
deleted file mode 100644
index d94e1fcb8..000000000
--- a/website/_includes/_nav.jade
+++ /dev/null
@@ -1,32 +0,0 @@
-include _mixins
-
-- var nav_active_class = 'nav-item--active'
-
-
-//- Top Navigation Bar
-//- ============================================================================
-
-nav#topnav.nav
-
- a(href='/')
- !=partial('_logo', { logo_size: 'small' })
-
- input(type='checkbox' class='nav-checkbox' id='nav-checkbox' aria-hidden='true')
-
- ul.nav-menu
-
- if standalone
- li.nav-item(class=nav_active_class)=title
- li.nav-item: a(href='/') Back to website
-
- else
- li.nav-item(class=(_section == 'index') ? nav_active_class : '')
- a(href='/') Home
-
- each slug, item in navigation
- li.nav-item(class=(_section == slug) ? nav_active_class : '')
- a(href='/' + slug)=item
-
- li.nav-item: a(href='https://github.com/' + profiles.github + '/spaCy' target='_blank') GitHub
-
- label(for='nav-checkbox' class='nav-button' arial-label='Toggle Navigation')
diff --git a/website/_includes/_navigation.jade b/website/_includes/_navigation.jade
new file mode 100644
index 000000000..a4318d628
--- /dev/null
+++ b/website/_includes/_navigation.jade
@@ -0,0 +1,26 @@
+//- ----------------------------------
+//- 💫 INCLUDES > TOP NAVIGATION
+//- ----------------------------------
+
+include _mixins
+
+nav.c-nav.u-text-label.js-nav
+
+ a(href='/')
+ !=partial("_includes/_logo", { logo_size: 'small' })
+
+ ul.c-nav__menu
+ li.c-nav__menu__item(class=(current.path[0] == 'index') ? "is-active" : "")
+ a(href='/') Home
+
+ li.c-nav__menu__item(class=(current.path[0] == 'docs') ? "is-active" : "")
+ a(href="/docs") Docs
+
+ li.c-nav__menu__item
+ a(href="https://demos.explosion.ai" target="_blank") Demos
+
+ li.c-nav__menu__item
+ a(href="https://explosion.ai/blog" target="_blank") Blog
+
+ li.c-nav__menu__item
+ a(href="https://github.com/" + SOCIAL.github + "/spaCy" target="_blank") #[+icon("github", 18)] #[span.u-hidden-sm GitHub]
diff --git a/website/_includes/_newsletter.jade b/website/_includes/_newsletter.jade
index a4c7b38b0..80dc01c22 100644
--- a/website/_includes/_newsletter.jade
+++ b/website/_includes/_newsletter.jade
@@ -1,21 +1,20 @@
+//- ----------------------------------
+//- 💫 INCLUDES > NEWSLETTER SIGNUP
+//- ----------------------------------
+
include _mixins
-//- Newsletter Signup
-//- ============================================================================
+.o-block.u-text-center.u-padding.u-border-top
-.block.text-center(class=(divider) ? 'divider-' + divider : '')
+ +label Sign up for the spaCy newsletter
+ h3.u-heading-1 Stay in the loop!
+ p.u-text-large Receive updates about new releases, tutorials and more.
- +label('strong') Sign up for the spaCy newsletter
- +h3.h1 Stay in the loop!
- +lead Receive updates about new releases, tutorials and more.
+ form#mc-embedded-subscribe-form.o-inline-list(action="https://spacy.us12.list-manage.com/subscribe/post?u=83b0498b1e7fa3c91ce68c3f1&id=89ad33e698" method="post" name="mc-embedded-subscribe-form" target="_blank" novalidate)
+ input#mce-EMAIL.u-border.u-padding-small.u-text-regular(type="email" name="EMAIL" placeholder="Your email address")
- form(action='https://spacy.us12.list-manage.com/subscribe/post?u=83b0498b1e7fa3c91ce68c3f1&id=89ad33e698' method='post' id='mc-embedded-subscribe-form' name='mc-embedded-subscribe-form' target="_blank" novalidate)
-
- +grid('align-center', 'valign-center', 'margin-right')
- +input('Your email address', '')(type='email' name='EMAIL' id='mce-EMAIL')
+ //- Spam bot protection
+ div(style="position: absolute; left: -5000px;" aria-hidden="true")
+ input(type="text" name="b_83b0498b1e7fa3c91ce68c3f1_89ad33e698" tabindex="-1" value="")
- //- Spam bot protection
- div(style='position: absolute; left: -5000px;' aria-hidden='true')
- input(type='text' name='b_83b0498b1e7fa3c91ce68c3f1_89ad33e698' tabindex='-1' value='')
-
- +form-button('primary', 'small')(type='submit' name='subscribe' id='mc-embedded-subscribe') Sign up
+ button#mc-embedded-subscribe.c-button.c-button--primary.u-text-label(type="submit" name="subscribe") Sign up
diff --git a/website/_includes/_profile.jade b/website/_includes/_profile.jade
deleted file mode 100644
index 658df8306..000000000
--- a/website/_includes/_profile.jade
+++ /dev/null
@@ -1,21 +0,0 @@
-//- Author profile
-//- ============================================================================
-
-include _mixins
-
-if authors[author]
- .box.box--info(data-label=label)
- if image != false
- .box-image: img(src='/assets/img/profile_' + author + ( (style) ? '_' + style : '' ) + '.png')
-
- p.box-body.text-big
- if authors[author].name
- strong=authors[author].name + ' '
-
- if authors[author].description
- !=authors[author].description
-
- if authors[author].links
- span.box-links
- each link, index in authors[author].links
- a(href=link[0] target='_blank') #[+icon(index)=(link[1] || index)]
diff --git a/website/_includes/_scripts.jade b/website/_includes/_scripts.jade
new file mode 100644
index 000000000..839cd39f9
--- /dev/null
+++ b/website/_includes/_scripts.jade
@@ -0,0 +1,14 @@
+//- ----------------------------------
+//- 💫 INCLUDES > SCRIPTS
+//- ----------------------------------
+
+each script in SCRIPTS
+ script(src="/assets/js/" + script + ".js", type="text/javascript")
+
+if landing
+ script(async src="https://platform.twitter.com/widgets.js" charset="utf-8")
+
+if environment == "deploy"
+ script window.ga=window.ga||function(){(ga.q=ga.q||[]).push(arguments)};ga.l=+new Date; ga('create', '#{ANALYTICS}', 'auto'); ga('send', 'pageview');
+
+ script(async src="https://www.google-analytics.com/analytics.js")
diff --git a/website/_includes/_sidebar.jade b/website/_includes/_sidebar.jade
index 43542e58c..051ca28e1 100644
--- a/website/_includes/_sidebar.jade
+++ b/website/_includes/_sidebar.jade
@@ -1,12 +1,13 @@
+//- ----------------------------------
+//- 💫 INCLUDES > SIDEBAR
+//- ----------------------------------
+
include _mixins
-//- Sidebar
-//- ============================================================================
-
-nav#sidebar.sidebar: .sidebar-body
- each items, menu in sidebar
-
- ul.sidebar-menu(data-label=menu)
- each item in items
- li.sidebar-menu-item
- a(href=item[1] data-section=(item[2]) ? 'section-' + item[2] : null)=item[0]
+nav.c-sidebar.js-sidebar
+ .c-sidebar__body.u-text-regular
+ each items, menu in sidebar
+ ul.o-block-small
+ li.u-text-label.u-color-subtle=menu
+ each item in items
+ li: a(href=item[1] data-section=(item[2]) ? "section-" + item[2] : "")=item[0]
diff --git a/website/_includes/_teaser.jade b/website/_includes/_teaser.jade
deleted file mode 100644
index 5a26b405a..000000000
--- a/website/_includes/_teaser.jade
+++ /dev/null
@@ -1,22 +0,0 @@
-include _mixins
-
-//- Teaser
-//- ============================================================================
-
-.teaser
- if teaser.image
- a(href=(_root || '') + slug target=teaser.target)
- .image-ratio: img(src=(_root || '') + 'img/' + ((is_featured) ? teaser.image.file : teaser.image.file_small || teaser.image.file))
-
- +h2
- if is_featured
- div: .label-strong Featured
-
- a.block(href=(_root || '') + slug target=teaser.target class=(is_featured) ? 'h1' : 'h2')=teaser.title
-
- p(class=(is_featured) ? 'text-lead' : '')=teaser.description
-
-if showmeta != false
- .text-meta.text-small
- //- | by #{authors[teaser.author].name} on
- | #[+date(teaser.date)]
diff --git a/website/_layout.jade b/website/_layout.jade
index f823203d1..03bdb7eb8 100644
--- a/website/_layout.jade
+++ b/website/_layout.jade
@@ -1,43 +1,59 @@
+//- ----------------------------------
+//- 💫 GLOBAL LAYOUT
+//- ----------------------------------
+
include _includes/_mixins
-- var _section = current.path[0]
-- var _site = current.source
-
-- var is_article = ( (_section == 'blog' && _site != 'index') || template == 'article')
-- var has_asides = (is_article || (_section == 'docs' && asides != false) || asides)
-
-
-//- Layout
-//- ============================================================================
-
doctype html
html(lang="en")
- !=partial("_includes/_head", { _section: _section })
+ title=(current.path[0] == "index") ? SITENAME + " | " + SLOGAN : title + " | " + SITENAME
- body.body
- !=partial('_includes/_nav', { _section: _section, _site: _site })
+ meta(charset="utf-8")
+ meta(name="viewport" content="width=device-width, initial-scale=1.0")
+ meta(name="referrer" content="always")
+ meta(name="description" content=description)
- if landing
- != yield
-
- else
- !=partial('_includes/_header', { is_article: is_article })
+ meta(property="og:type" content="website")
+ meta(property="og:site_name" content=sitename)
+ meta(property="og:url" content="#{SITE_URL}/#{current.path.join('/')}")
+ meta(property="og:title" content=title)
+ meta(property="og:description" content=description)
+ meta(property="og:image" content="/assets/img/social.png")
- if sidebar
- !=partial('_includes/_sidebar')
+ meta(name="twitter:card" content="summary_large_image")
+ meta(name="twitter:site" content="@" + SOCIAL.twitter)
+ meta(name="twitter:title" content=title)
+ meta(name="twitter:description" content=description)
+ meta(name="twitter:image" content="/assets/img/social.jpg")
- main.main(class='#{(sidebar) ? "main--sidebar" : "" } #{(has_asides) ? "main--asides" : "" } #{(is_article) ? "main--article" : "" }')
+ link(rel="shortcut icon" href="/assets/img/favicon.ico")
+ link(rel="icon" type="image/x-icon" href="/assets/img/favicon.ico")
+ link(href="/assets/css/style.css" rel="stylesheet")
- if is_article
- !=partial('_includes/_article', { _section: _section })
+ body
+ include _includes/_navigation
+
+ if !landing
+ header.o-header.u-pattern.u-text-center
+ if current.path[1] == "tutorials"
+ h2.u-heading-1.u-text-shadow Tutorials
else
- !=yield
+ +h(1).u-text-shadow=title
- !=partial('_includes/_footer')
+ if sidebar
+ include _includes/_sidebar
- each script in scripts
- script(src='/assets/js/' + script + '.js', type='text/javascript')
+ main.o-content(class="#{(sidebar) ? 'o-content--sidebar' : '' } #{((current.path[0] == 'docs' && asides != false) || asides) ? 'o-content--asides' : '' }")
+ if current.path[1] == "tutorials"
+ +h(1)=title
- !=partial('_includes/_analytics')
+ !=yield
+
+ else
+ !=yield
+
+ include _includes/_footer
+
+ include _includes/_scripts
diff --git a/website/assets/css/_base/_animations.sass b/website/assets/css/_base/_animations.sass
index f30123ffb..9812f904b 100644
--- a/website/assets/css/_base/_animations.sass
+++ b/website/assets/css/_base/_animations.sass
@@ -1,23 +1,23 @@
-// Animations
-// ============================================================================
+//- ----------------------------------
+//- 💫 BASE > ANIMATIONS
+//- ----------------------------------
-// Element slides in from the top
+//- Fade in
+
+@keyframes fadeIn
+ from
+ opacity: 0
+
+ to
+ opacity: 1
+
+
+//- Element slides in from the top
@keyframes slideInDown
from
- transform: translate3d(0, -100%, 0);
- visibility: visible;
+ transform: translate3d(0, -100%, 0)
+ visibility: visible
to
- transform: translate3d(0, 0, 0);
-
-
-// Element blinks
-
-@keyframes blink
- 0%
- opacity: 1
- 50%
- opacity: 0
- 100%
- opacity: 1
+ transform: translate3d(0, 0, 0)
diff --git a/website/assets/css/_base/_fonts.sass b/website/assets/css/_base/_fonts.sass
index 456303eff..cef2cc4f6 100644
--- a/website/assets/css/_base/_fonts.sass
+++ b/website/assets/css/_base/_fonts.sass
@@ -1,112 +1,43 @@
-// Fonts
-// ============================================================================
+//- ----------------------------------
+//- 💫 BASE > FONTS
+//- ----------------------------------
-// Lato (regular, italic, bold, bold italic)
+// Source Sans Pro
@font-face
- font-family: 'Lato'
+ font-family: "Source Sans Pro"
font-style: normal
font-weight: 400
- src: url('../fonts/lato-regular.eot')
- src: url('../fonts/lato-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/lato-regular.woff2') format('woff2'), url('../fonts/lato-regular.woff') format('woff'), url('../fonts/lato-regular.ttf') format('truetype'), url('../fonts/lato-regular.svg#latoregular') format('svg')
- unicode-range: U+0100-024F, U+1E00-1EFF, U+20A0-20AB, U+20AD-20CF, U+2C60-2C7F, U+A720-A7FF
+ src: url("../fonts/sourcesanspro-regular.eot")
+ src: url("../fonts/sourcesanspro-regular.eot?#iefix") format("embedded-opentype"), url("../fonts/sourcesanspro-regular.woff2") format("woff2"), url("../fonts/sourcesanspro-regular.woff") format("woff"), url("../fonts/sourcesanspro-regular.ttf") format("truetype"), url("../fonts/sourcesanspro-regular.svg#source_sans_proregular") format("svg")
@font-face
- font-family: 'Lato'
- font-style: normal
- font-weight: 400
- src: url('../fonts/lato-regular.eot')
- src: url('../fonts/lato-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/lato-regular.woff2') format('woff2'), url('../fonts/lato-regular.woff') format('woff'), url('../fonts/lato-regular.ttf') format('truetype'), url('../fonts/lato-regular.svg#latoregular') format('svg')
- unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2212, U+2215, U+E0FF, U+EFFD, U+F000
-
-@font-face
- font-family: 'Lato'
+ font-family: "Source Sans Pro"
font-style: italic
font-weight: 400
- src: url('../fonts/lato-italic.eot')
- src: url('../fonts/lato-italic.eot?#iefix') format('embedded-opentype'), url('../fonts/lato-italic.woff2') format('woff2'), url('../fonts/lato-italic.woff') format('woff'), url('../fonts/lato-italic.ttf') format('truetype'), url('../fonts/lato-italic.svg#latoitalic') format('svg')
- unicode-range: U+0100-024F, U+1E00-1EFF, U+20A0-20AB, U+20AD-20CF, U+2C60-2C7F, U+A720-A7FF
+ src: url("../fonts/sourcesanspro-italic.eot")
+ src: url("../fonts/sourcesanspro-italic.eot?#iefix") format("embedded-opentype"), url("../fonts/sourcesanspro-italic.woff2") format("woff2"), url("../fonts/sourcesanspro-italic.woff") format("woff"), url("../fonts/sourcesanspro-italic.ttf") format("truetype"), url("../fonts/sourcesanspro-italic.svg#source_sans_proitalic") format("svg")
@font-face
- font-family: 'Lato'
- font-style: italic
- font-weight: 400
- src: url('../fonts/lato-italic.eot')
- src: url('../fonts/lato-italic.eot?#iefix') format('embedded-opentype'), url('../fonts/lato-italic.woff2') format('woff2'), url('../fonts/lato-italic.woff') format('woff'), url('../fonts/lato-italic.ttf') format('truetype'), url('../fonts/lato-italic.svg#latoitalic') format('svg')
- unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2212, U+2215, U+E0FF, U+EFFD, U+F000
-
-@font-face
- font-family: 'Lato'
+ font-family: "Source Sans Pro"
font-style: normal
font-weight: 700
- src: url('../fonts/lato-bold.eot')
- src: url('../fonts/lato-bold.eot?#iefix') format('embedded-opentype'), url('../fonts/lato-bold.woff2') format('woff2'), url('../fonts/lato-bold.woff') format('woff'), url('../fonts/lato-bold.ttf') format('truetype'), url('../fonts/lato-bold.svg#latobold') format('svg')
- unicode-range: U+0100-024F, U+1E00-1EFF, U+20A0-20AB, U+20AD-20CF, U+2C60-2C7F, U+A720-A7FF
+ src: url("../fonts/sourcesanspro-bold.eot")
+ src: url("../fonts/sourcesanspro-bold.eot?#iefix") format("embedded-opentype"), url("../fonts/sourcesanspro-bold.woff2") format("woff2"), url("../fonts/sourcesanspro-bold.woff") format("woff"), url("../fonts/sourcesanspro-bold.ttf") format("truetype"), url("../fonts/sourcesanspro-bold.svg#source_sans_probold") format("svg")
@font-face
- font-family: 'Lato'
- font-style: normal
- font-weight: 700
- src: url('../fonts/lato-bold.eot')
- src: url('../fonts/lato-bold.eot?#iefix') format('embedded-opentype'), url('../fonts/lato-bold.woff2') format('woff2'), url('../fonts/lato-bold.woff') format('woff'), url('../fonts/lato-bold.ttf') format('truetype'), url('../fonts/lato-bold.svg#latobold') format('svg')
- unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2212, U+2215, U+E0FF, U+EFFD, U+F000
-
-@font-face
- font-family: 'Lato'
+ font-family: "Source Sans Pro"
font-style: italic
font-weight: 700
- src: url('../fonts/lato-bolditalic.eot')
- src: url('../fonts/lato-bolditalic.eot?#iefix') format('embedded-opentype'), url('../fonts/lato-bolditalic.woff2') format('woff2'), url('../fonts/lato-bolditalic.woff') format('woff'), url('../fonts/lato-bolditalic.ttf') format('truetype'), url('../fonts/lato-bolditalic.svg#latobolditalic') format('svg')
- unicode-range: U+0100-024F, U+1E00-1EFF, U+20A0-20AB, U+20AD-20CF, U+2C60-2C7F, U+A720-A7FF
+ src: url("../fonts/sourcesanspro-bolditalic.eot")
+ src: url("../fonts/sourcesanspro-bolditalic.eot?#iefix") format("embedded-opentype"), url("../fonts/sourcesanspro-bolditalic.woff2") format("woff2"), url("../fonts/sourcesanspro-bolditalic.woff") format("woff"), url("../fonts/sourcesanspro-bolditalic.ttf") format("truetype"), url("../fonts/sourcesanspro-bolditalic.svg#source_sans_probold_italic") format("svg")
+
+
+// Source Code Pro
@font-face
- font-family: 'Lato'
- font-style: italic
- font-weight: 700
- src: url('../fonts/lato-bolditalic.eot')
- src: url('../fonts/lato-bolditalic.eot?#iefix') format('embedded-opentype'), url('../fonts/lato-bolditalic.woff2') format('woff2'), url('../fonts/lato-bolditalic.woff') format('woff'), url('../fonts/lato-bolditalic.ttf') format('truetype'), url('../fonts/lato-bolditalic.svg#latobolditalic') format('svg')
- unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2212, U+2215, U+E0FF, U+EFFD, U+F000
-
-
-// Work Sans (regular, semibold, bold)
-
-@font-face
- font-family: 'Work Sans'
- font-style: normal
- font-weight: 400
- src: url('../fonts/worksans-regular.eot')
- src: url('../fonts/worksans-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/worksans-regular.woff2') format('woff2'), url('../fonts/worksans-regular.woff') format('woff'), url('../fonts/worksans-regular.ttf') format('truetype'), url('../fonts/worksans-regular.svg#worksansregular') format('svg')
-
-@font-face
- font-family: 'Work Sans'
- font-style: normal
- font-weight: 600
- src: url('../fonts/worksans-semibold.eot')
- src: url('../fonts/worksans-semibold.eot?#iefix') format('embedded-opentype'), url('../fonts/worksans-semibold.woff2') format('woff2'), url('../fonts/worksans-semibold.woff') format('woff'), url('../fonts/worksans-semibold.ttf') format('truetype'), url('../fonts/worksans-semibold.svg#worksanssemibold') format('svg')
-
-@font-face
- font-family: 'Work Sans'
- font-style: normal
- font-weight: 700
- src: url('../fonts/worksans-bold.eot')
- src: url('../fonts/worksans-bold.eot?#iefix') format('embedded-opentype'), url('../fonts/worksans-bold.woff2') format('woff2'), url('../fonts/worksans-bold.woff') format('woff'), url('../fonts/worksans-bold.ttf') format('truetype'), url('../fonts/worksans-bold.svg#worksansbold') format('svg')
-
-
-// Source Code Pro (semibold)
-
-@font-face
- font-family: 'Source Code Pro'
+ font-family: "Source Code Pro"
font-style: normal
font-weight: 600
- src: url('../fonts/sourcecodepro-semibold.eot')
- src: url('../fonts/sourcecodepro-semibold.eot?#iefix') format('embedded-opentype'), url('../fonts/sourcecodepro-semibold.woff') format('woff'), url('../fonts/sourcecodepro-semibold.ttf') format('truetype'), url('../fonts/sourcecodepro-semibold.svg#sourcecodepro_semibold') format('svg')
-
-
-// Icomoon (regular)
-
-@font-face
- font-family: 'Icomoon'
- font-style: normal
- font-weight: 400
- src: url('../fonts/icomoon.eot?nt9usq')
- src: url('../fonts/icomoon.eot?nt9usq#iefix') format('embedded-opentype'), url('../fonts/icomoon.ttf?nt9usq') format('truetype'), url('../fonts/icomoon.woff?nt9usq') format('woff'), url('../fonts/icomoon.svg?nt9usq#icomoon') format('svg')
+ src: url("../fonts/sourcecodepro-semibold.eot")
+ src: url("../fonts/sourcecodepro-semibold.eot?#iefix") format("embedded-opentype"), url("../fonts/sourcecodepro-semibold.woff") format("woff"), url("../fonts/sourcecodepro-semibold.ttf") format("truetype"), url("../fonts/sourcecodepro-semibold.svg#sourcecodepro_semibold") format("svg")
diff --git a/website/assets/css/_base/_grid.sass b/website/assets/css/_base/_grid.sass
index 93ad49b6d..06e72ef63 100644
--- a/website/assets/css/_base/_grid.sass
+++ b/website/assets/css/_base/_grid.sass
@@ -1,131 +1,45 @@
-// Grid - Variables
-// ============================================================================
+//- ----------------------------------
+//- 💫 BASE > GRID
+//- ----------------------------------
-$grid-cols : $columns
-$grid-padding : 2.25rem
-$grid-margin : 1rem
+//- Grid container
-
-// Grid - Style
-// ============================================================================
-
-// Blocks
-
-p,
-.block
- @extend .has-aside
- margin-bottom: 5rem
-
-.no-block.no-block
- margin-bottom: 0
-
-
-// Responsive containers
-
-.responsive-container
- max-width: 100%
- overflow: auto
- width: 100%
-
-
-// Flexbox grid container
-// .grid--wrap - wraps chrildren if bigger than the container
-// .grid--space-between - aligns children horizontally, adds space between them
-// .grid--space-around - aligns children horizontally, adds space around them
-// .grid--align-center - aligns children horizonally and centered
-// .grid--align-right - aligns children horizontally on the right
-// .grid--valign-bottom - aligns children vertically at the bottom
-// .grid-padding - adds padding to children
-// .grid--margin-right - adds right margin to children
-// .grid--margin-left - adds left margin to children
-// .grid--block - extends block style
-
-.grid,
-.grid-col
- align-items: flex-start
+.o-grid
display: flex
flex-wrap: wrap
- &.grid--nowrap,
- &.grid-col--nowrap
- flex-wrap: nowrap
-
- &.grid--space-between,
- &.grid-col--space-between
+ @include breakpoint(min, sm)
+ flex-direction: row
+ align-items: stretch
justify-content: space-between
- &.grid--space-around,
- &.grid-col--space-around
- justify-content: space-around
- &.grid--align-center,
- &.grid-col--align-center
- justify-content: center
+//- Grid column
- &.grid--align-right,
- &.grid-col--align-right
- justify-content: flex-end
+.o-grid__col
+ $grid-gutter: 2rem
- &.grid--valign-center,
- &.grid-col--valign-center
- align-items: center
-
- &.grid--valign-bottom,
- &.grid-col--valign-bottom
- align-items: flex-end
-
- &.grid--valign-space-between,
- &.grid-col--valign-space-between
- align-items: space-between
-
- &.grid--text-center,
- &.grid-col--text-center
- text-align: center
-
- &.grid--block,
- &.grid-col--block
- @extend .block
-
-
-.grid
- &--padding > *
- padding: $grid-padding
-
- &--margin-right > *
- margin-right: $grid-margin
-
- &--margin-left > *
- margin-left: $grid-margin
-
-
-.grid-col
+ margin-top: $grid-gutter
overflow: hidden
+ @include breakpoint(min, sm)
+ display: flex
+ flex: 0 0 100%
+ flex-direction: column
+ flex-wrap: wrap
+
+ @each $mode, $count in $grid
+ &.o-grid__col--#{$mode}
+ $percentage: calc(#{100% / $count} - #{$grid-gutter})
+ flex: 0 0 $percentage
+ max-width: $percentage
+
+ @include breakpoint(max, xs)
+ flex: 0 0 100%
+ flex-flow: column wrap
+
+ // Fix overflow issue in old browsers
+
& > *
flex-shrink: 1
max-width: 100%
-
-
-// Responsive grid elements
-// adapted from Gridly, https://github.com/IonicaBizau/gridly
-
-@media (min-width: #{$screen-size-medium})
- .grid
- flex-direction: row
- align-items: stretch
-
- .grid-col
- display: flex
- flex: 0 0 100%
- flex-direction: column
-
- @each $grid-mode, $grid-percentage in $grid-cols
- &--#{$grid-mode}
- flex: 0 0 $grid-percentage
- max-width: $grid-percentage
-
-
-@media(max-width: #{$screen-size-medium})
- .grid-col.grid-col
- flex: 0 0 100%
- flex-flow: column wrap
diff --git a/website/assets/css/_base/_layout.sass b/website/assets/css/_base/_layout.sass
new file mode 100644
index 000000000..b08b8910f
--- /dev/null
+++ b/website/assets/css/_base/_layout.sass
@@ -0,0 +1,41 @@
+//- ----------------------------------
+//- 💫 BASE > LAYOUT
+//- ----------------------------------
+
+//- HTML
+
+html
+ @include breakpoint(min, lg)
+ font-size: $type-base
+
+ @include breakpoint(max, md)
+ font-size: $type-base * 0.8
+
+//- Body
+
+body
+ display: flex
+ flex-flow: row wrap
+ animation: fadeIn 0.25s ease
+ background: $color-back
+ color: $color-front
+
+
+//- Paragraphs
+
+p
+ @extend .u-text-regular, .o-block, .has-aside
+
+
+//- Links
+
+main p a, main table a, main li a, .c-aside a
+ @extend .u-link
+
+
+//- Selection
+
+::selection
+ background: $color-theme
+ color: $color-back
+ text-shadow: none
diff --git a/website/assets/css/_base/_objects.sass b/website/assets/css/_base/_objects.sass
new file mode 100644
index 000000000..914a372c7
--- /dev/null
+++ b/website/assets/css/_base/_objects.sass
@@ -0,0 +1,75 @@
+//- ----------------------------------
+//- 💫 BASE > OBJECTS
+//- ----------------------------------
+
+//- Containers
+
+.o-content
+ flex: 1 1 auto
+ padding: $nav-height 4rem 8rem
+ width: $content-width - $aside-width
+ max-width: 100%
+
+ @include breakpoint(min, md)
+ &.o-content--asides
+ padding-left: 5rem
+ padding-right: $aside-width + $aside-padding * 2
+
+//- Header
+
+.o-header
+ display: flex
+ justify-content: center
+ flex-flow: column nowrap
+ padding: 3rem 5rem
+ margin-top: $nav-height
+ width: 100%
+ min-height: 250px
+
+
+//- Footer
+
+.o-footer
+ position: relative
+ padding: 5rem 0
+ overflow: auto
+ width: 100%
+ z-index: 200
+
+
+//- Blocks
+
+.o-block
+ margin-bottom: 5rem
+
+.o-block-small
+ margin-bottom: 2rem
+
+.o-responsive
+ overflow: auto
+ width: 100%
+ max-width: 100%
+
+.o-icon
+ vertical-align: middle
+
+
+//- Inline List
+
+.o-inline-list > *
+ display: inline
+ margin-bottom: 3rem
+
+ &:not(:last-child)
+ margin-right: 3rem
+
+
+//- Logo
+
+.o-logo
+ @include size(100%, auto)
+ fill: currentColor
+
+ @each $name, $size in $logo-sizes
+ &.o-logo--#{$name}
+ width: $size
diff --git a/website/assets/css/_base/_reset.sass b/website/assets/css/_base/_reset.sass
index c6e034246..33895f460 100644
--- a/website/assets/css/_base/_reset.sass
+++ b/website/assets/css/_base/_reset.sass
@@ -1,59 +1,92 @@
-// Reset - Variables
-// ============================================================================
+//- ----------------------------------
+//- 💥 BASE > RESET
+//- ----------------------------------
-$reset-font-size : $base-font-size
-$reset-font-size-small : $base-font-size * 0.8
-
-
-// Reset - Style
-// ============================================================================
-
-// normalize.css
-
-@import ../_vendors/normalize
-
-
-// Clearfix
-
-%clearfix
- *zoom: 1
-
- &:before,
- &:after
- content: ''
- display: table
-
- &:after
- clear: both
-
-
-// Custom Resets
+//- adapted from "normalize.css" by Nicolas Gallagher & Jonathan Neal
+//- https://github.com/necolas/normalize.css
*
- border: 0
box-sizing: border-box
- margin: 0
- outline: 0
padding: 0
+ margin: 0
+ border: 0
+ outline: 0
-webkit-font-smoothing: antialiased
html
- font-size: $reset-font-size
+ font-family: sans-serif
+ -ms-text-size-adjust: 100%
+ -webkit-text-size-adjust: 100%
- @media (max-width: #{$screen-size-small})
- font-size: $reset-font-size-small
+body
+ margin: 0
-header,
-footer,
-figure
- width: 100%
+article, aside, details, figcaption, figure, footer, header, main, menu, nav,
+section, summary, progress
+ display: block
+
+a
+ background-color: transparent
+ color: inherit
+ text-decoration: none
+
+ &:active,
+ &:hover
+ outline: 0
+
+abbr[title]
+ border-bottom: none
+ text-decoration: underline
+ text-decoration: underline dotted
+
+b, strong
+ font-weight: inherit
+ font-weight: bolder
+
+small
+ font-size: 80%
+
+sub, sup
+ position: relative
+ font-size: 65%
+ line-height: 0
+ vertical-align: baseline
+
+sup
+ top: -0.5em
+
+sub
+ bottom: -0.15em
+
+img
+ border: 0
+ height: auto
max-width: 100%
+svg
+ color-interpolation-filters: sRGB
+ fill: currentColor
+
+ &:not(:root)
+ overflow: hidden
+
+hr
+ box-sizing: content-box
+ overflow: visible
+ height: 0
+
+pre
+ overflow: auto
+
+code, pre
+ font-family: monospace, monospace
+ font-size: 1em
+
table
- border-collapse: collapse
- max-width: 100%
text-align: left
width: 100%
+ max-width: 100%
+ border-collapse: collapse
td,
th
@@ -63,13 +96,9 @@ ul,
ol
list-style: none
-a
- color: inherit
- text-decoration: none
+input,
+button
+ appearance: none
-img
- height: auto
- max-width: 100%
-
-[data-label]:before
- content: attr(data-label)
+button
+ cursor: pointer
diff --git a/website/assets/css/_base/_typography.sass b/website/assets/css/_base/_typography.sass
deleted file mode 100644
index 96fe0e5af..000000000
--- a/website/assets/css/_base/_typography.sass
+++ /dev/null
@@ -1,174 +0,0 @@
-// Typography - Variables
-// ============================================================================
-
-$font-size : 1.6rem
-$headings : h1, h2, h3, h4, h5, h6
-$heading-font-family : $font-secondary
-$heading-font-sizes : 5rem, 2.8rem, 2.4rem, 2rem, 1.8rem, 1.6rem
-$heading-font-weight : bold
-$heading-padding : $height-navbar 0 1.5rem 0
-$heading-text-shadow : 2px 2px
-$line-height : 1.375
-$page-title-size : 6rem
-$color-highlight : color($theme)
-$color-highlight-dark : color($theme, dark)
-
-
-// Typography - Style
-// ============================================================================
-
-// Placeholders
-
-%font-base
- font-size: $font-size
- line-height: $line-height
-
-%font-big
- font-size: round-dec($font-size * 1.25)
- line-height: round-dec($line-height * 1.15)
-
-%font-lead
- font-size: round-dec($font-size * 1.75)
- line-height: round-dec($line-height * 1.2)
-
-%font-small
- font-size: round-dec($font-size * 0.75)
- line-height: round-dec($line-height * 1.1)
-
-%font-medium-small
- font-size: round-dec($font-size * 0.875)
- line-height: round-dec($line-height * 1.1)
-
-%font-primary
- font-family: $font-primary
-
-%font-secondary
- font-family: $font-secondary
-
-%font-code
- font-family: $font-code
-
-
-// Text styles
-// .text - regular text
-// .text-big - bigger style for blogs
-// .text-lead - large style for intro paragraphs
-// .text-small - smaller font size
-// .text-quote - style for quotation
-// .text-meta - slightly fainter but emphasized font for meta text
-// .text-meta-strong - emphasized meta text
-// .text-label - text for labels
-// .text-credit - meta text with copyright symbol for image credits
-// .text-caption - text for figure captions
-// .text-source - text for bibliography sources
-// .text-example - text for linguistic examples
-
-.text
- @extend %font-primary, %font-base
-
-.text-big
- @extend %font-primary, %font-big
-
-.text-lead
- @extend %font-primary, %font-lead
-
-.text-medium-small
- @extend %font-medium-small
-
-.text-small
- @extend %font-small
-
-.text-quote
- @extend .text-big
- font-style: italic
-
-.text-meta
- @extend %font-secondary, %font-base
- font-style: normal
- font-weight: 600
- text-transform: uppercase
-
-.text-meta-strong
- @extend .text-meta
- font-weight: bold
-
-.text-label
- @extend %font-secondary
- font-size: round-dec($font-size * 0.875)
- line-height: $line-height
- font-weight: normal
- text-transform: uppercase
-
-.text-credit
- @extend .text-meta, .text-small
- @include icon(copyright, currentColor, 0 0.25em 0 0)
- color: color(grey, dark)
-
-.text-caption
- @extend .text-small
- color: color(grey, dark)
- padding-top: 2rem
-
-.text-source
- @extend .text-quote
- display: block
-
-.text-example
- @extend .text-lead
- color: color(grey, dark)
- font-style: italic
-
-.text-code
- @extend %font-code
- font-size: round-dec($font-size * 0.875)
- font-weight: 600
- font-style: normal
- line-height: round-dec($line-height * 1.65)
-
-.text-center
- text-align: center
-
-
-// Headings - Style
-// ============================================================================
-
-// Global heading style
-
-%heading
- font-weight: $heading-font-weight
- font-family: $heading-font-family
- position: relative
-
-
-// Headings
-
-.h0
- font-size: $page-title-size
- line-height: round-dec($line-height * 0.9)
- margin: 0
- text-shadow: $heading-text-shadow $color-highlight-dark
-
-@for $i from 1 through length($headings)
- $heading: nth($headings, $i)
-
- #{$heading},
- .#{$heading}
- @extend %heading
- font-size: nth($heading-font-sizes, $i)
-
- @if $i == 1
- .#{$heading}
- padding: 0
-
- @else
- #{$heading}
- padding: $heading-padding
-
-
-// Selection - Style
-// ============================================================================
-
-*::selection
- text-shadow: none
- background: $color-highlight
- color: color(white)
diff --git a/website/assets/css/_base/_utilities.sass b/website/assets/css/_base/_utilities.sass
new file mode 100644
index 000000000..c51b0dfcc
--- /dev/null
+++ b/website/assets/css/_base/_utilities.sass
@@ -0,0 +1,134 @@
+//- ----------------------------------
+//- 💫 BASE > UTILITIES
+//- ----------------------------------
+
+//- Text
+
+%text
+ font-family: $font-primary
+ line-height: 1.5
+
+.u-text-regular
+ @extend %text
+ font-size: 1.6rem
+
+.u-text-medium
+ @extend %text
+ font-size: 2rem
+
+.u-text-small
+ @extend %text
+ font-size: 1.2rem
+
+.u-text-large
+ @extend %text
+ font-size: 2.8rem
+
+.u-text-label
+ @extend %text
+ font-size: 1.4rem
+ font-weight: normal
+ text-transform: uppercase
+
+.u-text-strong
+ font-weight: bold
+
+.u-code-regular
+ font: normal normal 1.3rem/#{2} $font-code
+
+.u-code-small
+ font: normal normal 0.85em $font-code
+ line-height: inherit
+
+.u-link
+ color: $color-theme
+ border-bottom: 1px solid
+
+
+//- Headings
+
+.u-heading-0
+ font: normal bold 7rem/#{1} $font-primary
+
+@each $level, $size in (1: 5.5, 2: 3, 3: 2.6, 4: 2, 5: 1.8)
+ .u-heading-#{$level}
+ font: normal bold #{$size}rem/#{1.25} $font-primary
+ margin-bottom: 2rem
+
+.u-heading-label
+ @extend .u-text-label
+ margin-bottom: 1rem
+
+
+//- Permalinks
+
+.u-permalink
+ position: relative
+
+ &:target
+ display: inline-block
+ padding-top: $nav-height * 1.5
+
+ & + *
+ margin-top: $nav-height * 1.5
+
+.u-permalink__icon
+ @include position(absolute, bottom, left, 0.25em, -3.25rem)
+ @include size(2rem)
+ color: $color-subtle
+
+ .u-permalink:hover &
+ color: $color-subtle-dark
+
+ .u-permalink:active &
+ color: $color-theme
+
+
+//- Layout
+
+.u-text-center
+ text-align: center
+
+.u-float-right
+ float: right
+
+.u-padding-small
+ padding: 0.5em 0.75em
+
+.u-padding-medium
+ padding: 2rem
+
+.u-padding
+ padding: 5rem
+
+.u-border
+ border: 1px solid $color-subtle
+ border-radius: 3px
+
+.u-border-top
+ border-top: 1px solid $color-subtle
+
+.u-border-bottom
+ border-bottom: 1px solid $color-subtle
+
+.u-color-theme
+ color: $color-theme
+
+.u-color-subtle
+ color: $color-subtle-dark
+
+.u-text-shadow
+ text-shadow: 2px 2px $color-theme-dark
+
+.u-pattern
+ background: $color-theme url("/assets/img/pattern.jpg")
+ color: $color-back
+
+
+.u-hidden
+ display: none
+
+@each $breakpoint in (sm, md)
+ .u-hidden-#{$breakpoint}
+ @include breakpoint(max, $breakpoint)
+ display: none
diff --git a/website/assets/css/_components/_alerts.sass b/website/assets/css/_components/_alerts.sass
deleted file mode 100644
index c12c8adbb..000000000
--- a/website/assets/css/_components/_alerts.sass
+++ /dev/null
@@ -1,33 +0,0 @@
-// Variables
-// ============================================================================
-
-$alert-background : color(white)
-$alert-border : 1px solid
-$alert-close-size : 2.25rem
-$alert-color : color($theme)
-$alert-padding : 1.5rem 2rem
-
-
-// Style
-// ============================================================================
-
-// Alert boxes
-// .alert - alert container
-// .alert-close - icon to close alert
-
-.alert
- @include position(fixed, bottom, left, 0, 0)
- align-items: center
- background: $alert-background
- border-top: $alert-border
- color: $alert-color
- display: flex
- justify-content: space-between
- padding: $alert-padding
- width: 100%
- z-index: 200
-
-.alert-close
- @include icon(close, currentColor, 0, $alert-close-size)
- background: transparent
- color: $alert-color
diff --git a/website/assets/css/_components/_asides.sass b/website/assets/css/_components/_asides.sass
index cd406d8e7..0f57eaacc 100644
--- a/website/assets/css/_components/_asides.sass
+++ b/website/assets/css/_components/_asides.sass
@@ -1,63 +1,31 @@
-// Variables
-// ============================================================================
+//- ----------------------------------
+//- 💫 COMPONENTS > ASIDES
+//- ----------------------------------
-$aside-block-margin : 4rem
-$aside-border : 1px solid color(grey)
-$aside-font-size : 1.4rem
-$aside-font-family : $font-primary
-$aside-line-height : 1.65
-$aside-margin-side : 1.5rem
-$aside-opacity : 0.4
-$aside-padding : 0 2rem
-$aside-transition : $transition
-$aside-width : $width-aside
+//- Aside
-
-// Style
-// ============================================================================
-
-// Aside content
-// :hover - show aside on hover
-// [data-label] - style of aside headlines
-
-.aside
- @extend .text-small
-
- @media (max-width: #{$screen-size-large})
- display: block
- margin-bottom: $aside-block-margin
- margin-top: $aside-block-margin
-
- @media (min-width: #{$screen-size-large})
- @include position(absolute, top, left, 0, calc(100% + #{$aside-margin-side}))
- border-left: $aside-border
- opacity: $aside-opacity
- padding: $aside-padding
- transition: $aside-transition
- white-space: normal
+.c-aside
+ @include breakpoint(min, md)
+ @include position(absolute, top, left, 0, calc(100% + #{$aside-padding}))
+ border-left: 1px solid $color-subtle
+ opacity: 0.5
+ transition: opacity 0.25s ease
+ padding: 0 $aside-padding
width: $aside-width
- &:hover
- opacity: 1
+ &:hover
+ opacity: 1
- &[data-label]:before
- @extend .label-strong
+ @include breakpoint(max, sm)
display: block
+ margin: type(5) 0
- .block
- margin-bottom: ($aside-block-margin / 2)
- .code-inline
- @extend .code-small
+//- Aside label
- .code-block
- @extend .code-block-small
-
- .table &
- top: initial
-
-.aside-body
+.c-aside__label
display: block
+ margin-bottom: 1rem
// Aside container
@@ -65,5 +33,5 @@ $aside-width : $width-aside
.has-aside
position: relative
- &:hover > .aside
- opacity: 1
+ &:hover > .c-aside
+ opacity: 1
diff --git a/website/assets/css/_components/_boxes.sass b/website/assets/css/_components/_boxes.sass
deleted file mode 100644
index f9e1a43a9..000000000
--- a/website/assets/css/_components/_boxes.sass
+++ /dev/null
@@ -1,39 +0,0 @@
-// Boxes - Variables
-// ============================================================================
-
-$box-border : 1px solid
-$box-padding : 2em
-
-
-// Boxes - Style
-// ============================================================================
-
-// Box for notes and alerts
-// [data-label] - style of box labels
-// .box--info - emphasized style for info boxes
-// .box-image - optional image, like profile image
-// .box-body - body text of box
-// .box-links - link list
-
-.box
- @extend .block, .text
- padding: ($box-padding / 2) 0
-
- &[data-label]:before
- @extend .label-box
-
- &--info
- background: color($theme, light)
- border: $box-border darken(color($theme, light), 4)
-
-.box-image
- @extend .image-profile
- float: right
-
-.box-body
- margin-bottom: 0
- padding: $box-padding
-
-.box-links
- @extend .link-list
- padding: $box-padding
diff --git a/website/assets/css/_components/_buttons.sass b/website/assets/css/_components/_buttons.sass
index d0c6276cd..a8c3cf11b 100644
--- a/website/assets/css/_components/_buttons.sass
+++ b/website/assets/css/_components/_buttons.sass
@@ -1,75 +1,24 @@
-// Buttons - Variables
-// ============================================================================
+//- ----------------------------------
+//- 💫 COMPONENTS > BUTTONS
+//- ----------------------------------
-$button-border-radius : $border-radius
-$button-border-style : solid
-$button-border-width : 2px
-$button-color : color($theme)
-$button-opacity-hover : 0.85
-$button-padding : 1em 1.5em 0.75em
-$button-margin : 0.5rem
+.c-button
+ display: inline-block
+ font-weight: bold
+ padding: 0.5em 0.75em
+ border: 2px solid
+ border-radius: 3px
+ transition: opacity 0.25s ease
+ &:hover
+ opacity: 0.8
-// Buttons - Style
-// ============================================================================
+ &.c-button--primary
+ background: $color-theme
+ color: $color-back
+ border-color: $color-theme
-// :active - effect on active
-// :hover - effect on hover
-// .button--small - small style
-// .button--#{$social-button} - social button styled according to site
-// .button-primary - primary style
-// .button-secondary - secondary style
-// .button-tertiary - tertiary style
-
-.button
- @extend .text-meta
- border: $button-border-width $button-border-style
- border-radius: $button-border-radius
- padding: $button-padding
-
- &--small
- @extend .text-small
- border-width: ($button-border-width / 2)
-
- &--source
- float: right
- margin-left: $button-margin
-
- @each $social-button in $social-buttons
- &.button--#{$social-button}
- background: color(social, $social-button)
- border-color: color(social, $social-button)
- color: color(white)
-
-.button-primary
- @extend .button
- background-color: $button-color
- border-color: $button-color
- color: color(white)
-
- &:hover,
- &:active
- opacity: $button-opacity-hover
-
-.button-secondary
- @extend .button
- background: color(white)
- color: $button-color
-
- &:hover,
- &:active
- background: $button-color
- color: color(white)
- opacity: 1
-
-.button-tertiary
- @extend .button
- background: color(white)
- color: color(grey, dark)
- border-color: currentColor
-
- &:hover,
- &:active
- border-color: $button-color
- color: $button-color
- opacity: 1
+ &.c-button--secondary
+ background: $color-back
+ color: $color-theme
+ border-color: $color-theme
diff --git a/website/assets/css/_components/_cards.sass b/website/assets/css/_components/_cards.sass
deleted file mode 100644
index 72f5a2222..000000000
--- a/website/assets/css/_components/_cards.sass
+++ /dev/null
@@ -1,44 +0,0 @@
-// Cards - Variables
-// ============================================================================
-
-$card-border-radius : $border-radius
-$card-border : 1px solid color(grey)
-$card-figure-background : color(grey, light)
-$card-figure-ratio : $image-ratio
-$card-padding : 8%
-$card-shadow-light : 0 0 5px color(grey, light)
-$card-shadow : 0 0 5px color(grey)
-$card-transition : $transition
-
-
-// Cards - Style
-// ============================================================================
-
-// .card - card element, ideally used within grids
-// .card-strong - highlighted style
-// .card-figure - graphic element within card
-
-.card
- border: $card-border
- border-radius: $border-radius
- flex: 1
- overflow: auto
- padding: $card-padding
- width: 100%
-
-.card-strong
- @extend .card
- box-shadow: $card-shadow
- transition: $card-transition
-
- &:hover
- box-shadow: $card-shadow-light
-
-.card-figure
- background: $card-figure-background
- border-radius: $card-border-radius
- display: block
- height: 0
- margin-bottom: $card-padding
- overflow: hidden
- padding-bottom: 50%
diff --git a/website/assets/css/_components/_code.sass b/website/assets/css/_components/_code.sass
index 4979697e6..73cd5e7c8 100644
--- a/website/assets/css/_components/_code.sass
+++ b/website/assets/css/_components/_code.sass
@@ -1,87 +1,81 @@
-// Variables
-// ============================================================================
+//- ----------------------------------
+//- 💫 COMPONENTS > CODE
+//- ----------------------------------
-$code-background : color(grey, light)
-$code-background-dark : color(black)
-$code-block-border : 4px solid color($theme)
-$code-block-padding : 2em 3em
-$code-block-padding-small : 1em 1.5em
-$code-inline-margin : 0.25em 0.5em 0 0.5em
-$code-inline-padding : 0 0.25rem
-$code-line-height : 2.25
-$code-text-shadow : 1px 1px 0 color(white)
+//- Code block
-
-// Style
-// ============================================================================
-
-// General code style
-// .code-inline - source code
-// .code-small - smaller style
-
-.code-inline
- @extend .text-code
- direction: ltr
- text-shadow: $code-text-shadow
- white-space: pre
- word-spacing: normal
- word-break: normal
-
-.code-small
- @extend .text-small
- padding-top: 0
- padding-bottom: 0
-
-
-// Inline Code
-// :not(.code-block) - style for items outside of code blocks
-
-code
- @extend .code-inline
-
-:not(.code-block) > .code-inline
- background: $code-background
- display: inline
- line-height: inherit
- margin: $code-inline-margin
- padding: $code-inline-padding
-
-
-// Code blocks in preformatted text
-// .code-block - block of source code
-// .code - text content of code blocks
-// [data-label] - style of code labels
-// .code-block-small - smaller style
-// .code-block-dark - alternative dark style
-
-.code-block
- @extend .block
- background: $code-background
- border-left: $code-block-border
- max-width: 100%
- overflow: auto
+.c-code-block
+ background: $color-subtle-light
padding: 1em 0
- white-space: pre
+ border-left: 5px solid $color-theme
+ overflow: auto
width: 100%
+ max-width: 100%
+ white-space: pre
+ direction: ltr
- .code-inline
- display: block
- padding: $code-block-padding
+ :not(.o-block)
+ margin-bottom: 2rem
- &[data-label]:before
- @extend .label-box
-.code-block-small
- @extend .code-small
+//- Code block content
- .code-inline
- padding: $code-block-padding-small
+.c-code-block__content
+ display: block
+ padding: 2em 2.5em
-.code-block-dark
- @extend .code-block
- background: $code-background-dark
- border: none
- color: color(white)
- .code-inline
- text-shadow: none
+//- Code block label
+
+.c-code-block__label
+ display: inline-block
+ background: $color-theme
+ color: $color-back
+ padding: 1rem
+ margin-bottom: 1.5rem
+
+
+//- Inline code
+
+:not(.c-code-block) > code
+ @extend .u-code-small
+
+ background: $color-subtle-light
+ box-shadow: 1px 1px 0 $color-subtle
+ color: $color-front
+ padding: 0.15em 0.5em
+ margin: 0 0.25em
+ border-radius: 2px
+ text-shadow: 1px 1px 0 $color-back
+
+
+//- Syntax Highlighting
+
+[class*="language-"] .token
+ &.comment, &.prolog, &.doctype, &.cdata, &.punctuation
+ color: map-get($syntax-highlighting, comment)
+
+ &.property, &.tag, &.constant, &.symbol, &.deleted
+ color: map-get($syntax-highlighting, tag)
+
+ &.boolean, &.number
+ color: map-get($syntax-highlighting, number)
+
+ &.selector, &.attr-name, &.string, &.char, &.builtin, &.inserted
+ color: map-get($syntax-highlighting, selector)
+
+ @at-root .language-css .token.string,
+ &.operator, &.entity, &.url, &.variable
+ color: map-get($syntax-highlighting, operator)
+
+ &.atrule, &.attr-value, &.function
+ color: map-get($syntax-highlighting, function)
+
+ &.regex, &.important
+ color: map-get($syntax-highlighting, regex)
+
+ &.keyword
+ color: map-get($syntax-highlighting, keyword)
+
+ &.italic
+ font-style: italic
diff --git a/website/assets/css/_components/_dividers.sass b/website/assets/css/_components/_dividers.sass
deleted file mode 100644
index be546281e..000000000
--- a/website/assets/css/_components/_dividers.sass
+++ /dev/null
@@ -1,72 +0,0 @@
-// Dividers - Variables
-// ============================================================================
-
-$divider-border : 1px solid color(grey)
-$divider-locations : top, bottom
-$divider-margin : 5rem
-$divider-padding : 3rem
-$divider-text-align : center
-
-
-// Dividers - Style
-// ============================================================================
-
-// General divider
-
-.divider
- @extend %clearfix
- width: 100%
- margin: $divider-margin 0
-
-@each $divider-location in $divider-locations
- %divider-#{$divider-location}
- @extend .divider
- border-#{$divider-location}: $divider-border
-
-
-// Divider above element
-// .divider-top - add divider to top of element
-// .divider-bottom - add divider to bottom of element
-// .divider-both - add divider to top and bottom of element
-
-.divider-top
- @extend %divider-top
- padding-top: $divider-padding
-
-.divider-bottom
- @extend %divider-bottom
- padding-bottom: $divider-padding
-
-.divider-both
- @extend .divider-top
- @extend .divider-bottom
-
-
-// Divider bar for text and links
-// .divider-bar - container element
-
-.divider-bar
- @extend %divider-top, %divider-bottom, .label
- margin: 0
- text-align: $divider-text-align
-
-
-// Divider with text
-// .divider-text - container element
-// .divider-text-content - text content
-
-.divider-text
- @extend %divider-top
- text-align: $divider-text-align
- margin: $divider-margin 0
-
- .divider-text-content
- background: color(white)
- display: inline-block
- line-height: 0.5
- margin: -0.5em 0 0 0
- padding: 0 0.5em
-
- & > *
- margin: 0
- padding: 0
diff --git a/website/assets/css/_components/_embeds.sass b/website/assets/css/_components/_embeds.sass
deleted file mode 100644
index 3c9c3466d..000000000
--- a/website/assets/css/_components/_embeds.sass
+++ /dev/null
@@ -1,35 +0,0 @@
-// Embeds - Variables
-// ============================================================================
-
-$embed-border : 1px solid color(grey)
-$embed-caption-align : center
-$embed-padding : 2rem
-$embed-displacy-min : 325px
-
-
-// Embeds - Style
-// ============================================================================
-
-// iframe - content of embed
-// .embed--border - embed with border
-// .embed--displacy - embed for displaCy visualization
-
-.embed
- @extend .block
- margin-left: 0
- padding: $embed-padding
-
- iframe
- max-width: 100%
- width: 100%
-
- &--border
- border: $embed-border
-
- &--displacy iframe
- min-height: $embed-displacy-min
-
-.embed-caption
- @extend .label
- display: block
- text-align: $embed-caption-align
diff --git a/website/assets/css/_components/_forms.sass b/website/assets/css/_components/_forms.sass
deleted file mode 100644
index 3e3d21ced..000000000
--- a/website/assets/css/_components/_forms.sass
+++ /dev/null
@@ -1,28 +0,0 @@
-// Form Elements - Variables
-// ============================================================================
-
-$input-border : 1px solid color(grey)
-$input-border-radius : $border-radius
-$input-color : color(grey, dark)
-$input-padding : 0.5em 0.75em
-$input-transition : $transition
-
-
-// Form Elements - Style
-// ============================================================================
-
-// Text input field
-// :hover - style on hover
-// :focus - style on focus
-
-.input
- @extend .text
- border: $input-border
- border-radius: $input-border-radius
- color: $input-color
- padding: $input-padding
- transition: $input-transition
-
- &:hover,
- &:focus
- border-color: currentColor
diff --git a/website/assets/css/_components/_icons.sass b/website/assets/css/_components/_icons.sass
deleted file mode 100644
index c40e92c01..000000000
--- a/website/assets/css/_components/_icons.sass
+++ /dev/null
@@ -1,39 +0,0 @@
-// Icons - Variables
-// ============================================================================
-
-$icon-padding : 0.5em
-$icon-large-size : 3rem
-$icon-medium-size : 1.75rem
-
-
-// Icons - Style
-// ============================================================================
-
-// .icon--secondary - non-colored version of icon
-// .icon--large - large version of graphic icon
-// .icon-#{$icon} - graphic icon
-
-%icon
- display: inline-block
- font-family: $font-icons
- font-style: normal
- font-weight: normal
- line-height: 1
-
-.icon
- display: inline-block
- vertical-align: middle
-
- &--secondary:before
- color: currentColor !important
-
- &--medium:before
- font-size: $icon-medium-size
-
- &--large:before
- font-size: $icon-large-size
-
-@each $icon, $unicode in $icons
- .icon-#{$icon}
- @extend .icon
- @include icon($icon, color(social, $icon), 0 $icon-padding)
diff --git a/website/assets/css/_components/_images.sass b/website/assets/css/_components/_images.sass
deleted file mode 100644
index d82eb41aa..000000000
--- a/website/assets/css/_components/_images.sass
+++ /dev/null
@@ -1,39 +0,0 @@
-// Images - Variables
-// ============================================================================
-
-$image-background : color(grey, light)
-$image-profile-margin : 1rem 3rem
-$image-profile-width : $width-profile
-$image-ratio : $image-ratio
-
-
-// Images - Style
-// ============================================================================
-
-// Image Containers
-// .image-container - container for figures and inline images
-// .image-hero - container for hero image for blog posts
-// .image-profile - container for profile photo
-
-.image-container
- @extend .block
- margin-left: 0
- margin-right: 0
-
-.image-profile
- @include size($image-profile-width)
- background: $image-background
- border-radius: 50%
- margin: $image-profile-margin
- overflow: hidden
- shape-outside: circle()
-
-
-// Global image ratio
-
-.image-ratio
- background: $image-background
- height: 0
- overflow: hidden
- padding-bottom: (100% / $image-ratio)
- width: 100%
diff --git a/website/assets/css/_components/_labels.sass b/website/assets/css/_components/_labels.sass
deleted file mode 100644
index 9dfdecc71..000000000
--- a/website/assets/css/_components/_labels.sass
+++ /dev/null
@@ -1,46 +0,0 @@
-// Labels - Variables
-// ============================================================================
-
-$label-border-radius : $border-radius
-$label-color : color(grey, dark)
-$label-color-light : color(grey, light)
-$label-color-dark : color($theme)
-$label-padding : 0.75em
-$label-padding-small : 0.25em 0.75em
-$label-margin-side : 1rem
-
-
-// Labels - Style
-// ============================================================================
-
-// .label - regular label
-// .label-strong - stronger version
-// .label-box - label in box with background
-// .label-tag - label in inline-tag style
-
-.label
- @extend .text-label
- color: $label-color
- display: inline-block
- padding: $label-padding 0
-
-.label-strong
- @extend .label
- color: $label-color-dark
- font-weight: bold
-
-.label-box
- @extend .label
- background: $label-color-dark
- color: color(white)
- font-weight: 600
- padding: $label-padding
-
-.label-tag
- @extend .label, .text-small
- background: $label-color-light
- border: 1px solid darken($label-color-light, 7.5)
- border-radius: $label-border-radius
- margin: 0 $label-margin-side 0 0
- padding: $label-padding-small
- vertical-align: text-top
diff --git a/website/assets/css/_components/_links.sass b/website/assets/css/_components/_links.sass
deleted file mode 100644
index f3ed9b208..000000000
--- a/website/assets/css/_components/_links.sass
+++ /dev/null
@@ -1,60 +0,0 @@
-// Links - Variables
-// ============================================================================
-
-$link-border : 1px solid currentColor
-$link-color : color($theme)
-$link-icon-color : color(grey, dark)
-$link-icon-color-hidden : color(grey)
-$link-icon-size : 2rem
-$link-list-padding : 0.5em
-$link-transition : $transition
-
-
-// Links - Style
-// ============================================================================
-
-.link
- color: $link-color
- transition: $link-transition
-
-.link-strong
- @extend .link
- border-bottom: $link-border
-
-p a,
-.table-cell a,
-.list-item a,
-.aside a
- @extend .link-strong
-
-
-// Permalinks
-// :before - indicate permalink with icon
-// :hover:before - hover effect on permalink indicator
-// :active:before - different styling of active permalink indicator
-
-.permalink
- @include icon(permalink, $link-icon-color-hidden, 0 $link-icon-size 0 0, $link-icon-size)
- position: relative
-
- &:before
- @include position(absolute, top, left, calc(50% - #{$link-icon-size / 2}), (-$link-icon-size * 1.5))
- transition: $link-transition
- width: $link-icon-size
-
- &:hover:before
- color: $link-icon-color
-
- &:active:before
- color: $link-color
-
-
-// List of links, like social profiles
-
-.link-list
- @extend .text-label
- font-weight: 600
-
- & > *
- display: inline-block
- padding: $link-list-padding
diff --git a/website/assets/css/_components/_lists.sass b/website/assets/css/_components/_lists.sass
index 71d07bfe7..479d3bf13 100644
--- a/website/assets/css/_components/_lists.sass
+++ b/website/assets/css/_components/_lists.sass
@@ -1,59 +1,32 @@
-// Lists - Variables
-// ============================================================================
+//- ----------------------------------
+//- 💫 COMPONENTS > LISTS
+//- ----------------------------------
-$list-alternatives : (letters, upper-latin), (roman, lower-roman)
-$list-icon-color : color($theme)
-$list-icon-size : 2.4rem
-$list-item-padding : 1em
-$list-margin-side : 5%
-$list-padding-side : 2rem
+//- List Container
+
+.c-list
+ @each $type, $counter in (numbers: decimal, letters: upper-latin, roman: lower-roman)
+ &.c-list--#{$type}
+ counter-reset: li
+
+ .c-list__item:before
+ content: counter(li, #{$counter}) '.'
-// Lists - Style
-// ============================================================================
+//- List Item
-// .list - list of items
-// .list--bullets - unordered list with bullets
-// .list--numbers - ordered list with numbers
-// .list--letters - ordered list with letters
-// .list--roman - ordered list with roman numerals
-// .list-item - list item
-
-.list
- @extend .block, .text
- padding-left: $list-margin-side
-
- &--bullets
- margin-left: $list-margin-side
-
- .list-item
- @include icon(bullet, none, 0 $list-padding-side 0 0, $list-icon-size)
-
- &--numbers
- counter-reset: li
- margin-left: $list-margin-side
-
- .list-item:before
- @extend .h3
- content: counter(li) '.'
- counter-increment: li
- padding-right: $list-padding-side
-
- @each $list-type, $list-counter in $list-alternatives
- &--#{$list-type}
- @extend .list--numbers
-
- .list-item:before
- content: counter(li, #{$list-counter}) '.'
-
-.list-item
- margin-bottom: $list-item-padding
- text-indent: -$list-margin-side
+.c-list__item
+ padding-left: 2rem
+ margin-bottom: 1em
+ margin-left: 1.25rem
&:before
- color: $list-icon-color
+ content: '\25CF'
display: inline-block
- line-height: 1
- text-align: center
- width: $list-icon-size
- vertical-align: middle
+ font-size: 1.25em
+ font-weight: bold
+ padding-right: 1.25rem
+ margin-left: -3.75rem
+ text-align: right
+ width: 2.5rem
+ counter-increment: li
diff --git a/website/assets/css/_components/_logo.sass b/website/assets/css/_components/_logo.sass
deleted file mode 100644
index a8c0fd2b9..000000000
--- a/website/assets/css/_components/_logo.sass
+++ /dev/null
@@ -1,36 +0,0 @@
-// Logo - Variables
-// ============================================================================
-
-$logo-size-large : nth($width-logo, 1)
-$logo-size-regular : nth($width-logo, 2)
-$logo-size-small : nth($width-logo, 3)
-$logo-size-tiny : nth($width-logo, 4)
-
-
-// Logo - Style
-// ============================================================================
-
-// path - SVG path of logo
-// .logo--small - smaller style
-// .logo--regular - regular style
-// .logo--large - bigger style
-
-.logo
- width: 100%
-
- path
- width: 100%
- fill: currentColor
-
- &--tiny
- vertical-align: text-bottom
- width: $logo-size-tiny
-
- &--small
- width: $logo-size-small
-
- &--regular
- width: $logo-size-regular
-
- &--large
- width: $logo-size-large
diff --git a/website/assets/css/_components/_misc.sass b/website/assets/css/_components/_misc.sass
index f1e131655..4abf29d01 100644
--- a/website/assets/css/_components/_misc.sass
+++ b/website/assets/css/_components/_misc.sass
@@ -1,88 +1,44 @@
-// Misc - Variables
-// ============================================================================
-
-$x-terminal-background : color(grey)
-$x-terminal-border-radius : $border-radius * 2
-$x-terminal-color : color(black)
-$x-terminal-cursor-animation : blink 0.9s infinite
-$x-terminal-cursor : '\258B'
-$x-terminal-icon-colors : color(red), color(green), color(yellow)
-$x-terminal-icon-size : 1em
-$x-terminal-padding : 0.75em
-
-$x-bubble-size : 35px
-$x-bubble-margin : 1rem
-
-
-// Misc - Style
-// ============================================================================
-
-// Terminal window illustration to display code
-// .x-terminal - terminal window
-// .x-terminal-title - title of terminal window
-// .x-terminal-icons - container for toolbar icons
-// .x-terminal-code - code box
-// .x-terminal-code:after - cursor in last line
-// %x-terminal-icon - general style of toolbar icons
+//- ----------------------------------
+//- 💫 COMPONENTS > MISC
+//- ----------------------------------
.x-terminal
- background: $x-terminal-background
- border-radius: $x-terminal-border-radius
- color: $x-terminal-color
+ background: $color-subtle
+ color: $color-front
+ border-radius: 10px
width: 100%
- &--cursor
- .code:after
- animation: $x-terminal-cursor-animation
- opacity: 1
- content: $x-terminal-cursor
-
-.x-terminal-title
- text-align: center
- padding: $x-terminal-padding
-
-.x-terminal-icons
- padding: $x-terminal-padding
+.x-terminal__icons
position: absolute
+ padding: 10px
+
+ &:before,
+ &:after,
+ span
+ @include size(15px)
+ display: inline-block
+ float: left
+ border-radius: 50%
+ margin-right: 10px
&:before
- @extend %x-terminal-icon
- content: ''
- background: nth($x-terminal-icon-colors, 1)
+ content: ""
+ background: #e4514f
span
- @extend %x-terminal-icon
- background: nth($x-terminal-icon-colors, 2)
+ background: #3ec930
&:after
- @extend %x-terminal-icon
- content: ''
- background: nth($x-terminal-icon-colors, 3)
+ content: ""
+ background: #f4c025
-.x-terminal-code
- @extend .code-block-dark
- border-bottom-left-radius: $x-terminal-border-radius
- border-bottom-right-radius: $x-terminal-border-radius
- padding: $x-terminal-padding * 2
- max-width: 100%
+.x-terminal__code
+ background: $color-front
+ color: $color-back
+ margin: 0
+ border: none
+ border-bottom-left-radius: 10px
+ border-bottom-right-radius: 10px
width: 100%
+ max-width: 100%
white-space: pre-wrap
-
- &.code-block
- margin-bottom: 0
-
-%x-terminal-icon
- @include size($x-terminal-icon-size)
- display: inline-block
- float: left
- border-radius: 50%
- margin-right: $x-terminal-padding
-
-
-// Bubble to display colors
-
-.x-bubble
- @include size($x-bubble-size)
- border-radius: 50%
- cursor: pointer
- margin-right: $x-bubble-margin
diff --git a/website/assets/css/_components/_navigation.sass b/website/assets/css/_components/_navigation.sass
new file mode 100644
index 000000000..7352a6c6c
--- /dev/null
+++ b/website/assets/css/_components/_navigation.sass
@@ -0,0 +1,52 @@
+//- ----------------------------------
+//- 💫 COMPONENTS > NAVIGATION
+//- ----------------------------------
+
+.c-nav
+ @include position(absolute, top, left, 0, 0)
+ @include size(100%, $nav-height)
+ align-items: center
+ background: $color-back
+ border-color: $color-back
+ color: $color-theme
+ display: flex
+ justify-content: space-between
+ padding: 0 2rem
+ z-index: 10
+ width: 100%
+
+ &.is-fixed
+ animation: slideInDown 0.5s ease-in-out
+ position: fixed
+ background: $color-theme
+ color: $color-back
+ border-color: $color-theme
+
+ @include breakpoint(min, sm)
+ height: $nav-height * 0.8
+
+.c-nav__menu
+ @include size(100%)
+ display: flex
+ justify-content: flex-end
+ flex-flow: row nowrap
+ border-color: inherit
+
+.c-nav__menu__item
+ display: flex
+ align-items: center
+ height: 100%
+
+ &:not(:last-child)
+ margin-right: 1em
+
+ &.is-active
+ position: relative
+ font-weight: bold
+ border-color: inherit
+
+ &:after
+ $triangle: 8px
+
+ @include triangle-down($triangle)
+ @include position(absolute, top, left, 100%, calc(50% - #{$triangle}))
diff --git a/website/assets/css/_components/_quotes.sass b/website/assets/css/_components/_quotes.sass
deleted file mode 100644
index b0f50f0e4..000000000
--- a/website/assets/css/_components/_quotes.sass
+++ /dev/null
@@ -1,36 +0,0 @@
-// Quotes - Variables
-// ============================================================================
-
-$quote-icon-color : color($theme)
-$quote-icon-size : 6rem
-$quote-padding-side : 10rem
-
-
-// Quotes - Style
-// ============================================================================
-
-// :before - icon to emphasize blockquote
-// .quote-text - quote text content
-// .quote-text-strong - emphasized style of quote text content
-// .quote-source - source of quote (link optional)
-
-.quote
- @extend .block
- @include icon(quote, color(grey), 0, $quote-icon-size)
- padding-left: $quote-padding-side
-
- &:before
- @include position(relative, top, left, ($quote-icon-size / 1.75), -$quote-padding-side)
- color: $quote-icon-color
-
-.quote-text
- @extend .text-quote
-
-.quote-text-strong
- @extend .h2
- padding: 0
-
-.quote-source
- @extend .text-meta-strong
- @include icon(dash, color(grey), 0 0.75rem 0 0)
- border: none
diff --git a/website/assets/css/_components/_sidebar.sass b/website/assets/css/_components/_sidebar.sass
new file mode 100644
index 000000000..a32c85f2b
--- /dev/null
+++ b/website/assets/css/_components/_sidebar.sass
@@ -0,0 +1,40 @@
+//- ----------------------------------
+//- 💫 COMPONENTS > SIDEBAR
+//- ----------------------------------
+
+.c-sidebar
+ @include breakpoint(min, md)
+ flex: 0 0 $sidebar-width
+ margin-right: 6rem
+ margin-left: 4rem
+ padding-top: $nav-height
+ width: $sidebar-width
+
+ &.is-fixed .c-sidebar__body
+ @include position(fixed, top, left, $nav-height, 4rem)
+ @include size($sidebar-width, calc(100vh - #{$nav-height}))
+ overflow: auto
+ transition: none
+
+ @include breakpoint(max, sm)
+ flex: 100%
+ width: 100%
+
+ .c-sidebar__body
+ display: flex
+ flex-flow: row wrap
+ width: 100%
+
+ & > *
+ flex: 1 1 0
+ padding: 1rem
+ border-bottom: 1px solid $color-subtle
+
+
+ &:not(:last-child)
+ border-right: 1px solid $color-subtle
+
+.c-sidebar__body
+ .is-active
+ font-weight: bold
+ color: $color-theme
diff --git a/website/assets/css/_components/_tables.sass b/website/assets/css/_components/_tables.sass
index 224c72961..237e3d28d 100644
--- a/website/assets/css/_components/_tables.sass
+++ b/website/assets/css/_components/_tables.sass
@@ -1,79 +1,44 @@
-// Tables - Variables
-// ============================================================================
+//- ----------------------------------
+//- 💫 COMPONENTS > TABLES
+//- ----------------------------------
-$table-background-color : color(white)
-$table-border-color : color(grey)
-$table-border-style : solid
-$table-border-width : 1px
-$table-color : color($theme)
-$table-head-color : color(white)
-$table-highlight-border : 3px solid color($theme)
-$table-padding : 1em
-$table-shade-color : color(grey, light)
-$table-shadow-color : color(black, dark)
+// Shadows adapted from "CSS only Responsive Tables" by David Bushell
+// http://codepen.io/dbushell/pen/wGaamR
+//- Table Container
-// Tables - Style
-// ============================================================================
-
-// .table - data table
-// .table--code - table with code in first column
-// .table--params - table with parameters in first column
-// .table-row - table row
-// .table-cell - table cell
-// .table-cell--highlight - style for highlighted table cell(s)
-// .table-head-cell - table head cell
-// .table-container - block containing table
-
-.table
- @extend .table-responsive, .block, .text
+.c-table
vertical-align: top
- &--code .table-cell:first-child
- background: $table-shade-color
-
- &--params .table-cell:first-child
- font-weight: bold
- white-space: nowrap
-
-.table-cell
- border: $table-border-width $table-border-style $table-border-color
- padding: $table-padding
-
- &--highlight
- border: $table-highlight-border
-
-.table-head-cell
- @extend .text-meta
- background: $table-color
- border: $table-border-width $table-border-style $table-color
- color: $table-head-color
- display: table-cell
- padding: $table-padding
-
-.table-container
- @extend .responsive-container
-
-
-// Responsive Table Markup
-// adapted from David Bushell, http://codepen.io/dbushell/pen/wGaamR
-
-@media(max-width: #{$screen-size-large})
- .table-responsive
- @include scroll-shadow-base($table-shadow-color)
+ @include breakpoint(max, md)
+ @include scroll-shadow-base($color-front)
display: inline-block
overflow-x: auto
width: auto
-webkit-overflow-scrolling: touch
- .table-cell
- &:first-child
- @include scroll-shadow-cover(left, $table-background-color)
- &:last-child
- @include scroll-shadow-cover(right, $table-background-color)
+//- Table Cell
- .table-responsive.table--code
- .table-cell
- &:first-child
- @include scroll-shadow-cover(left, $table-shade-color)
+.c-table__cell
+ padding: 1rem
+ border: 1px solid $color-subtle
+
+ &.c-table__cell--highlight
+ border: 2px solid $color-theme
+
+ @include breakpoint(max, md)
+ &:first-child
+ @include scroll-shadow-cover(left, $color-back)
+
+ &:last-child
+ @include scroll-shadow-cover(right, $color-back)
+
+
+//- Table Head Cell
+
+.c-table__head-cell
+ background: $color-theme
+ color: $color-back
+ padding: 1rem
+ border: 1px solid $color-theme
diff --git a/website/assets/css/_components/_tooltips.sass b/website/assets/css/_components/_tooltips.sass
deleted file mode 100644
index 16d6b453b..000000000
--- a/website/assets/css/_components/_tooltips.sass
+++ /dev/null
@@ -1,44 +0,0 @@
-// Tooltips - Variables
-// ============================================================================
-
-$tooltip-background : color(black)
-$tooltip-border-radius : $border-radius
-$tooltip-color : color(white)
-$tooltip-padding : 0.15rem 1rem
-
-
-// Tooltips - Style
-// ============================================================================
-
-// [data-tooltip] - tooltip
-// :after - tooltip content
-// :hover - display tooltip on hover
-// .visible - makes tooltip visible
-
-[data-tooltip]
- display: inline
- position: relative
-
- &:after
- @extend .text, .text-small
- @include position(absolute, top, left, 125%, 50%)
- background: $tooltip-background
- border-radius: $tooltip-border-radius
- color: $tooltip-color
- content: attr(data-tooltip)
- opacity: 0
- padding: $tooltip-padding
- text-shadow: none
- text-transform: none
- transform: translateX(-50%) translateY(-2px)
- transition: opacity 0.2s cubic-bezier(0.64, 0.09, 0.08, 1), transform 0.2s cubic-bezier(0.64, 0.09, 0.08, 1)
- visibility: hidden
- white-space: nowrap
- z-index: 200
-
- &:hover:after,
- &.visible:after
- display: block
- opacity: 1
- transform: translateX(-50%) translateY(0)
- visibility: visible
diff --git a/website/assets/css/_layout/_article.sass b/website/assets/css/_layout/_article.sass
deleted file mode 100644
index 35505b4df..000000000
--- a/website/assets/css/_layout/_article.sass
+++ /dev/null
@@ -1,25 +0,0 @@
-// Article - Variables
-// ============================================================================
-
-$article-header-margin : 6rem
-$article-footer-margin : 1.5rem
-
-
-// Article - Style
-// ============================================================================
-
-article p,
-article .list
- @extend .text-big
-
-.article-header
- margin-bottom: $article-header-margin
-
-.article-title
- @extend .h1
-
-.article-meta
- @extend .text-meta
-
-.article-footer
- margin-top: $article-footer-margin
diff --git a/website/assets/css/_layout/_body.sass b/website/assets/css/_layout/_body.sass
deleted file mode 100644
index 066a8903b..000000000
--- a/website/assets/css/_layout/_body.sass
+++ /dev/null
@@ -1,53 +0,0 @@
-// Body - Variables
-// ============================================================================
-
-$body-background : color(white)
-$body-color : color(black)
-$main-padding-sides : 4rem
-$main-padding-bottom : 8rem
-$main-article-margin-left : 10rem
-
-
-// Body - Style
-// ============================================================================
-
-.body
- @extend .text
- background: $body-background
- color: $body-color
- display: flex
- flex-flow: row wrap
-
-
-// Main content
-// .main - main content container
-// .main--asides - main content with asides
-// .main--article - main content for articles
-
-.main
- flex: 1 1 auto
- max-width: 100%
- padding: $height-navbar $main-padding-sides $main-padding-bottom $main-padding-sides
- width: $width-content - $width-aside
-
-
- // Remove top padding from first element if it's a level 2 headline
-
- & > *:first-child > h2:first-child
- padding-top: 0
-
-
- // Large screens only
-
- @media (min-width: #{$screen-size-large})
- &.main--asides
- margin-right: $width-aside
-
- &.main--article
- margin-left: $main-article-margin-left
-
-
-// Sections
-
-.section
- @extend .block
diff --git a/website/assets/css/_layout/_footer.sass b/website/assets/css/_layout/_footer.sass
deleted file mode 100644
index 82dbe9e3b..000000000
--- a/website/assets/css/_layout/_footer.sass
+++ /dev/null
@@ -1,20 +0,0 @@
-// Footer - Variables
-// ============================================================================
-
-$footer-background : color($theme) pattern($theme)
-$footer-color : color(white)
-$footer-padding : 2.75rem
-$footer-text-align : center
-
-
-// Footer - Style
-// ============================================================================
-
-.footer
- @extend .link-list
- background: $footer-background
- color: $footer-color
- overflow: auto
- padding: $footer-padding
- text-align: $footer-text-align
- z-index: 100
diff --git a/website/assets/css/_layout/_header.sass b/website/assets/css/_layout/_header.sass
deleted file mode 100644
index b8042ef82..000000000
--- a/website/assets/css/_layout/_header.sass
+++ /dev/null
@@ -1,50 +0,0 @@
-// Header - Variables
-// ============================================================================
-
-$header-background : color($theme) pattern($theme)
-$header-color-highlight : color($theme, dark)
-$header-min-height : 250px
-$header-padding : 4rem 5rem 5rem
-$header-text-color : color(white)
-$header-text-shadow : 2px 2px
-$hero-credit-bottom : -2em
-$hero-credit-right : 1em
-
-
-// Header - Style
-// ============================================================================
-
-.header
- background: $header-background
- display: flex
- flex-flow: column nowrap
- justify-content: center
- margin-top: $height-navbar
- min-height: $header-min-height
- text-align: center
-
-.header-title
- @extend .h0
- color: $header-text-color
-
- &.header-title--center
- @extend .text-center
-
-.header-body
- color: $header-text-color
- padding: $header-padding
- text-align: left
-
-.header-text
- text-shadow: $header-text-shadow $header-color-highlight
-
-
-// Hero image
-
-.hero
- position: relative
- min-height: 0
-
-.hero-credit
- @extend .text-credit
- @include position(absolute, bottom, right, $hero-credit-bottom, $hero-credit-right)
diff --git a/website/assets/css/_layout/_nav.sass b/website/assets/css/_layout/_nav.sass
deleted file mode 100644
index 1fb95f6f9..000000000
--- a/website/assets/css/_layout/_nav.sass
+++ /dev/null
@@ -1,108 +0,0 @@
-// Top Navigation Bar - Variables
-// ============================================================================
-
-$nav-animation : slideInDown 0.5s ease-in-out
-$nav-background : color(white)
-$nav-color : color($theme)
-$nav-height : $height-navbar
-$nav-height-small : $nav-height * 0.8
-$nav-icon-size : 3.5rem
-$nav-item-spacing : 1em
-$nav-mobile-border : 1px solid
-$nav-mobile-font-size : 1.25em
-$nav-mobile-padding : 1rem
-$nav-mobile-width : 20vw
-$nav-padding : 0 0 0 2rem
-$nav-triangle-size : 8px
-
-
-// Top Navigation Bar - Style
-// ============================================================================
-
-// .nav - top and primary navigation bar
-// .fixed - sticky version
-// .logo - special styling for logo
-// .nav-menu - menu bar containing menu items
-// .nav-item - menu list item
-// .nav-button - button to toggle mobile navigation
-// .nav-checkbox - checkbox for checkbox hack
-// .active - active menu item
-
-.nav
- @extend .text-label
- @include position(absolute, top, left, 0, 0)
- @include size(100%, $nav-height)
- align-items: center
- background: $nav-background
- border-color: $nav-background
- color: $nav-color
- display: flex
- justify-content: space-between
- padding: $nav-padding
- z-index: 10
- width: 100%
-
- &.fixed
- animation: $nav-animation
- background: $nav-color
- border-color: $nav-color
- color: $nav-background
- position: fixed
-
- @media (min-width: #{$screen-size-small})
- height: $nav-height-small
-
-.nav-menu
- @include size(100%)
- justify-content: flex-end
- border-color: inherit
- display: flex
- margin: 0
-
- @media (max-width: #{$screen-size-small})
- @include position(absolute, top, left, $nav-height, 0)
- flex-flow: row wrap
-
- .nav-checkbox:checked + &
- background: inherit
-
- .nav-item
- @include visibility(visible)
-
- & + .nav-button:before
- color: color(grey)
-
-.nav-item
- align-items: center
- border-color: inherit
- display: flex
- height: 100%
- position: relative
-
- &--active
- font-weight: bold
-
- @media (min-width: #{$screen-size-small})
- margin-right: $nav-item-spacing
-
- &--active:after
- @include triangle-down($nav-triangle-size)
- @include position(absolute, bottom, left, -$nav-triangle-size, calc(50% - #{$nav-triangle-size}))
-
- @media (max-width: #{$screen-size-small})
- @include size(100%, auto)
- @include visibility(hidden)
- background: inherit
- border-top: $nav-mobile-border
- font-size: $nav-mobile-font-size
- justify-content: center
- padding: $nav-mobile-padding
-
-.nav-button
- @media (max-width: #{$screen-size-small})
- @include icon(menu, none, $nav-mobile-padding 0 0 0, $nav-icon-size)
- cursor: pointer
- padding: 0 1em 1em 0
-
-.nav-checkbox
- display: none
diff --git a/website/assets/css/_layout/_sidebar.sass b/website/assets/css/_layout/_sidebar.sass
deleted file mode 100644
index 2a8604fee..000000000
--- a/website/assets/css/_layout/_sidebar.sass
+++ /dev/null
@@ -1,65 +0,0 @@
-// Sidebar - Variables
-// ============================================================================
-
-$sidebar-breakpoint : 900px
-$sidebar-highlight-color : color($theme)
-$sidebar-margin-left : 4rem
-$sidebar-margin-right : 6rem
-$sidebar-margin-top : $height-navbar
-$sidebar-menu-spacing : 3rem
-$sidebar-small-border : 1px solid color(grey, light)
-$sidebar-small-padding : 2rem 3rem
-$sidebar-width : $width-sidebar
-
-
-// Sidebar - Style
-// ============================================================================
-
-// .sidebar - sidebar
-// .fixed - sticky version
-// .menu - menu list in sidebar
-// .active - active menu item
-// .sidebar-label - menu label
-
-.sidebar
- @media (min-width: #{$sidebar-breakpoint})
- flex: 0 0 $sidebar-width
- margin-right: $sidebar-margin-right
- margin-left: $sidebar-margin-left
- padding-top: $height-navbar
- width: $sidebar-width
-
- &.fixed .sidebar-body
- @include position(fixed, top, left, $sidebar-margin-top, $sidebar-margin-left)
- @include size($sidebar-width, calc(100vh - #{$sidebar-margin-top}))
- overflow: auto
- transition: none
-
- @media (max-width: #{$sidebar-breakpoint})
- border-bottom: $sidebar-small-border
- flex: 100%
- width: 100%
-
- .sidebar-body
- display: flex
- flex-flow: row wrap
-
-.sidebar-menu
- @media (min-width: #{$sidebar-breakpoint})
- margin-bottom: $sidebar-menu-spacing
-
- @media (max-width: #{$sidebar-breakpoint})
- flex: 1
- margin: 0
- padding: $sidebar-small-padding
- border-top: $sidebar-small-border
-
- &:not(:last-child)
- border-right: $sidebar-small-border
-
- .active
- color: $sidebar-highlight-color
- font-weight: bold
-
-.sidebar-menu[data-label]:before
- @extend .label
diff --git a/website/assets/css/_utils/_mixins.sass b/website/assets/css/_mixins.sass
similarity index 61%
rename from website/assets/css/_utils/_mixins.sass
rename to website/assets/css/_mixins.sass
index 6fc8d619a..d9fd210e1 100644
--- a/website/assets/css/_utils/_mixins.sass
+++ b/website/assets/css/_mixins.sass
@@ -1,5 +1,6 @@
-// Mixins
-// ============================================================================
+//- ----------------------------------
+//- 💫 MIXINS
+//- ----------------------------------
// Helper for position
// $position - valid position value (static, absolute, fixed, relative)
@@ -19,27 +20,22 @@
// $height - height of element (default: $width)
@mixin size($width, $height: $width)
- height: $height
width: $width
+ height: $height
-// Icon before or after an element
-// $icon-name - name of icon, refers to icon specified in settings
-// $icon-color - color of icon (default: no color specified)
-// $icon-padding - padding around icon (in shorthand style, default: 0)
-// $icon-size - font size of icon (default: no size specified)
+//- Responsive Breakpoint utility
-@mixin icon($icon-name, $icon-color: none, $icon-padding: 0, $icon-size: none)
- &:before
- @extend %icon
- content: map-get($icons, $icon-name)
- padding: $icon-padding
+@mixin breakpoint($limit, $size)
+ $breakpoints-max: ( xs: map-get($breakpoints, sm) - 1, sm: map-get($breakpoints, md) - 1, md: map-get($breakpoints, lg) - 1 )
- @if $icon-color != none
- color: $icon-color
+ @if $limit == "min"
+ @media(min-width: #{map-get($breakpoints, $size)})
+ @content
- @if $icon-size != none
- font-size: $icon-size
+ @else if $limit == "max"
+ @media(max-width: #{map-get($breakpoints-max, $size)})
+ @content
// Triangle pointing down
@@ -51,25 +47,7 @@
border-style: solid
border-top-color: inherit
border-width: $triangle-size $triangle-size 0 $triangle-size
- content: ''
-
-
-// Make element visible or invisible (included as mixin to work in media queries)
-// $visibility-state - visible or invisible
-// $visibility-transition - transition (default: $transition)
-
-@mixin visibility($visibility-state, $visibility-transition: $transition)
- transition: $visibility-transition
-
- @if $visibility-state == hidden
- opacity: 0
- pointer-events: none
- visibility: hidden
-
- @else
- opacity: 1
- pointer-events: initial
- visibility: visible
+ content: ""
// Scroll shadows for reponsive tables
@@ -80,9 +58,9 @@
@mixin scroll-shadow-base($scroll-shadow-color)
background: radial-gradient(left, ellipse, rgba(0,0,0, .2) 0%, rgba(0,0,0, 0) 75%) 0 center, radial-gradient(right, ellipse, rgba(0,0,0, .2) 0%, rgba(0,0,0, 0) 75%) 100% center
- background-size: 10px 100%, 10px 100%
background-attachment: scroll, scroll
background-repeat: no-repeat
+ background-size: 10px 100%, 10px 100%
@mixin scroll-shadow-cover($scroll-shadow-side, $scroll-shadow-background)
$scroll-gradient-direction: right !default
diff --git a/website/assets/css/_utils/_functions.sass b/website/assets/css/_utils/_functions.sass
deleted file mode 100644
index ae61df80d..000000000
--- a/website/assets/css/_utils/_functions.sass
+++ /dev/null
@@ -1,23 +0,0 @@
-// Functions
-// ============================================================================
-
-// Helper to round to decimals (used for rem units)
-// $calculation - value or performed calculation
-
-@function round-dec($calculation)
- @return round($calculation * 10) / 10
-
-
-// Helper for colors (refers to $colors in colors)
-// $color-name - name of predefined color (i.e. blue)
-// $color-variant - predefined hue (default: base)
-
-@function color($color-name, $color-variant: base)
- @return map-get(map-get($colors, $color-name), $color-variant)
-
-
-// Helper for patterns, returns pattern image of color
-// $color-name - name of predefined color (i.e. blue)
-
-@function pattern($color-name)
- @return url('../img/pattern_#{$color-name}.jpg') center center repeat scroll
diff --git a/website/assets/css/_variables.sass b/website/assets/css/_variables.sass
new file mode 100644
index 000000000..62c90555c
--- /dev/null
+++ b/website/assets/css/_variables.sass
@@ -0,0 +1,37 @@
+//- ----------------------------------
+//- 💫 VARIABLES
+//- ----------------------------------
+
+// Settings and Sizes
+
+$type-base: 11px
+
+$nav-height: 55px
+$content-width: 800px
+$sidebar-width: 230px
+$aside-width: 300px
+$aside-padding: 25px
+
+$logo-sizes: ( large: 500px, medium: 250px, small: 100px, tiny: 65px )
+$grid: ( third: 3, half: 2, two-thirds: 1.5 )
+$breakpoints: ( sm: 768px, md: 992px, lg: 1200px )
+
+
+// Fonts
+
+$font-primary: "Source Sans Pro", Tahoma, Geneva, sans-serif !default
+$font-code: 'Source Code Pro', Consolas, 'Andale Mono', Menlo, Monaco, Courier, monospace !default
+
+
+// Colors
+
+$color-theme: #09a3d5
+$color-theme-dark: #008ebc
+$color-back: #fff
+$color-front: #222
+
+$color-subtle: #ddd
+$color-subtle-light: #f6f6f6
+$color-subtle-dark: #999
+
+$syntax-highlighting: ( comment: #999, tag: #3ec930, number: #8130c9, selector: #09a3d5, operator: #e4514f, function: #09a3d5, keyword: #e4514f, regex: #f4c025 )
diff --git a/website/assets/css/_variables.scss b/website/assets/css/_variables.scss
deleted file mode 100644
index 218f6aa70..000000000
--- a/website/assets/css/_variables.scss
+++ /dev/null
@@ -1,93 +0,0 @@
-// Variables
-// ============================================================================
-
-// Settings and Sizes
-
-$theme : blue !default;
-
-$base-font-size : 10px !default;
-
-$width-logo : 500px, 250px, 100px, 65px !default;
-$width-content : 800px;
-$width-aside : 300px;
-$width-sidebar : 230px;
-$width-profile : 160px;
-$height-navbar : 55px;
-
-$border-radius : 4px;
-$image-ratio : 3 / 1;
-$transition : all 0.2s;
-$social-buttons : twitter, reddit, hackernews, producthunt;
-
-$screen-size-small : 480px;
-$screen-size-medium : 800px;
-$screen-size-large : 1200px;
-
-
-// Grid Columns
-
-$columns: ( fifth, 20% ),
- ( quarter, 25% ),
- ( third, (100% / 3) ),
- ( half, 50% ),
- ( two-thirds, (100% / 1.5) ),
- ( full, 100% );
-
-// Fonts
-
-$font-primary : Lato, Tahoma, Geneva, sans-serif !default;
-$font-secondary : 'Work Sans', 'Arial Black', Gadget, sans-serif !default;
-$font-code : 'Source Code Pro', Consolas, 'Andale Mono', Menlo, Monaco, Courier, monospace !default;
-$font-icons : Icomoon !default;
-
-
-// Colors
-// only to be used with the color() function, i.e. color(blue, light)
-
-$colors: (
- blue : ( base: #09a3d5, dark: #008ebc, light: #e2f7fe ),
- red : ( base: #e4514f, dark: #dd2422, light: #fceeed ),
- grey : ( base: #dddddd, dark: #999999, light: #f6f6f6 ),
- black : ( base: #222222, dark: #000000 ),
- white : ( base: #ffffff ),
- yellow : ( base: #f4c025 ),
- green : ( base: #3ec930 ),
- purple : ( base: #8130c9 ),
- orange : ( base: #f47725 ),
-
- social: (
- twitter : #5ea9dd,
- hackernews : #ff6600,
- reddit : #ff4500,
- producthunt : #da552f,
- github : #000000,
- linkedin : #0077b5,
- facebook : #3b5999,
- medium : #02b875,
- ),
-);
-
-
-// Icons
-
-$icons: (
- website : '\1f310',
- search : '\1f50d',
- feed : '\1f525',
- quote : '\275d',
- twitter : '\e900',
- github : '\e901',
- producthunt : '\e902',
- linkedin : '\e903',
- hackernews : '\e904',
- reddit : '\e905',
- facebook : '\e906',
- medium : '\e907',
- codepen : '\e908',
- dash : '\2014',
- bullet : '\2022',
- menu : '\2261',
- close : '\2715',
- copyright : '\00a9',
- permalink : '\0023',
-);
diff --git a/website/assets/css/_vendors/_displacy.sass b/website/assets/css/_vendors/_displacy.sass
deleted file mode 100644
index 381331a4e..000000000
--- a/website/assets/css/_vendors/_displacy.sass
+++ /dev/null
@@ -1,42 +0,0 @@
-// displaCy Custom Visualization - Variables
-// ============================================================================
-
-$displacy-classes: ( bad: color(red), good: color(green), highlight: color(yellow), lowlight: color(grey, dark) )
-$displacy-word-margin : 3rem
-
-
-// displaCy Custom Visualization - Style
-// ============================================================================
-
-// displaCy visualization (refers to exports from displaCy v1)
-// .word - token container
-// .word span - word
-// .arrow:before - arrow label
-// .word:after - part-of-speech tag
-// .arrow.#{$displacy-class}:before - arrow with special styling
-// .arrow.#{$displacy-class}:after - arrow head with special styling
-// .word.#{$displacy-class} span - word with special styling
-// $displacy-classes - refers to $displacy-classes in settings
-
-#displacy
- width: 100%
-
- .word
- margin-top: $displacy-word-margin
-
- .word span
- @extend .h4
-
- .arrow:before,
- .word:after
- @extend .text-label
-
- @each $displacy-class, $displacy-color in $displacy-classes
- .arrow.#{$displacy-class}:before
- border-color: $displacy-color
-
- .arrow.#{$displacy-class}:after
- border-top-color: $displacy-color
-
- .word.#{$displacy-class} span
- color: $displacy-color
diff --git a/website/assets/css/_vendors/_normalize.sass b/website/assets/css/_vendors/_normalize.sass
deleted file mode 100644
index 185439710..000000000
--- a/website/assets/css/_vendors/_normalize.sass
+++ /dev/null
@@ -1,181 +0,0 @@
-// Normalize
-// ============================================================================
-
-// adapted from normalize.css, https://github.com/necolas/normalize.css
-
-html
- font-family: sans-serif
- -ms-text-size-adjust: 100%
- -webkit-text-size-adjust: 100%
-
-body
- margin: 0
-
-article,
-aside,
-details,
-figcaption,
-figure,
-footer,
-header,
-main,
-menu,
-nav,
-section,
-summary
- display: block
-
-audio,
-canvas,
-progress,
-video
- display: inline-block
- vertical-align: baseline
-
-audio:not([controls])
- display: none
- height: 0
-
-[hidden],
-template
- display: none
-
-a
- background-color: transparent
-
-a:active,
-a:hover
- outline: 0
-
-abbr[title]
- border-bottom: none
- text-decoration: underline
- text-decoration: underline dotted
-
-b,
-strong
- font-weight: inherit
-
-b,
-strong
- font-weight: bolder
-
-dfn
- font-style: italic
-
-h1
- font-size: 2em
- margin: 0.67em 0
-
-mark
- background-color: #ff0
- color: #000
-
-small
- font-size: 80%
-
-sub,
-sup
- font-size: 75%
- line-height: 0
- position: relative
- vertical-align: baseline
-
-sup
- top: -0.5em
-
-sub
- bottom: -0.25em
-
-img
- border: 0
-
-svg:not(:root)
- overflow: hidden
-
-figure
- margin: 1em 40px
-
-hr
- box-sizing: content-box
- height: 0
- overflow: visible
-
-pre
- overflow: auto
-
-code,
-kbd,
-pre,
-samp
- font-family: monospace, monospace
- font-size: 1em
-
-button,
-input,
-optgroup,
-select,
-textarea
- font: inherit
- margin: 0
-
-button
- overflow: visible
-
-button,
-select
- text-transform: none
-
-button,
-html input[type="button"],
-input[type="reset"],
-input[type="submit"]
- -webkit-appearance: button
- cursor: pointer
-
-button[disabled],
-html input[disabled]
- cursor: default
-
-input,
-button
- &::-moz-focus-inner
- border: 0
- padding: 0
-
- &:-moz-focusring
- outline: 1px dotted ButtonText
-
-input
- line-height: normal
-
- &[type="checkbox"],
- &[type="radio"]
- box-sizing: border-box
- padding: 0
-
- &[type="number"]::-webkit-inner-spin-button,
- &[type="number"]::-webkit-outer-spin-button
- height: auto
-
- &[type="search"]
- -webkit-appearance: textfield
-
- &[type="search"]::-webkit-search-cancel-button,
- &[type="search"]::-webkit-search-decoration
- -webkit-appearance: none
-
-fieldset
- border: 1px solid #c0c0c0
- margin: 0 2px
- padding: 0.35em 0.625em 0.75em
-
-legend
- border: 0
- padding: 0
-
-textarea
- overflow: auto
-
-optgroup
- font-weight: bold
diff --git a/website/assets/css/_vendors/_prism.sass b/website/assets/css/_vendors/_prism.sass
deleted file mode 100644
index 22306cff4..000000000
--- a/website/assets/css/_vendors/_prism.sass
+++ /dev/null
@@ -1,77 +0,0 @@
-// prism.js Syntax Highlighting
-// ============================================================================
-
-// Commment, prolog, doctype, CDATA, punctuation
-
-.token.comment,
-.token.prolog,
-.token.doctype,
-.token.cdata,
-.token.punctuation
- color: color(grey, dark)
-
-
-// Property, tag, constant, symbol, deleted
-
-.token.property,
-.token.tag,
-.token.constant,
-.token.symbol,
-.token.deleted
- color: color(green)
-
-
-// Boolean, number
-
-.token.boolean,
-.token.number
- color: color(purple)
-
-
-// Selector, attribute name, string, char, built in, inserted
-
-.token.selector,
-.token.attr-name,
-.token.string,
-.token.char,
-.token.builtin,
-.token.inserted
- color: color(blue)
-
-
-// Operator, entity, URL, CSS string, variable
-
-.token.operator,
-.token.entity,
-.token.url,
-.language-css .token.string,
-.style .token.string,
-.token.variable
- color: color(red)
-
-
-// @-rule, attribute value, function
-
-.token.atrule,
-.token.attr-value,
-.token.function
- color: color(blue)
-
-
-// Keyword
-
-.token.keyword
- color: color(red)
-
-
-// Regex, important
-
-.token.regex,
-.token.important
- color: color(yellow)
-
-
-// Italic
-
-.token.italic
- font-style: italic
diff --git a/website/assets/css/style.sass b/website/assets/css/style.sass
index 691919cf8..f8f6cbc24 100644
--- a/website/assets/css/style.sass
+++ b/website/assets/css/style.sass
@@ -1,60 +1,32 @@
-// Style
-// ============================================================================
+//- ----------------------------------
+//- 💫 STYLE
+//- ----------------------------------
+
// Variables
@import variables
-
-
-// Utilities
-
-@import _utils/functions
-@import _utils/mixins
+@import mixins
// Base
-@import _base/animations
-@import _base/fonts
@import _base/reset
+@import _base/fonts
+@import _base/animations
@import _base/grid
-@import _base/typography
-
-
-// Layout
-
-@import _layout/article
-@import _layout/body
-@import _layout/footer
-@import _layout/header
-@import _layout/nav
-@import _layout/sidebar
+@import _base/layout
+@import _base/objects
+@import _base/utilities
// Components
-@import _components/alerts
@import _components/asides
-@import _components/boxes
@import _components/buttons
-@import _components/cards
@import _components/code
-@import _components/dividers
-@import _components/embeds
-@import _components/forms
-@import _components/icons
-@import _components/links
-@import _components/logo
-@import _components/images
-@import _components/labels
@import _components/lists
@import _components/misc
-@import _components/quotes
+@import _components/navigation
+@import _components/sidebar
@import _components/tables
-@import _components/tooltips
-
-
-// Vendors
-
-@import _vendors/prism
-@import _vendors/displacy
diff --git a/website/assets/css/style_blog.sass b/website/assets/css/style_blog.sass
deleted file mode 100644
index 08be63468..000000000
--- a/website/assets/css/style_blog.sass
+++ /dev/null
@@ -1,6 +0,0 @@
-// Style Blog
-// ============================================================================
-
-$theme: red
-
-@import style
diff --git a/website/assets/fonts/icomoon.eot b/website/assets/fonts/icomoon.eot
deleted file mode 100644
index fd6ab6882..000000000
Binary files a/website/assets/fonts/icomoon.eot and /dev/null differ
diff --git a/website/assets/fonts/icomoon.svg b/website/assets/fonts/icomoon.svg
deleted file mode 100644
index ccbfec442..000000000
--- a/website/assets/fonts/icomoon.svg
+++ /dev/null
@@ -1,27 +0,0 @@
-
-
-
-Generated by IcoMoon
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/website/assets/fonts/icomoon.ttf b/website/assets/fonts/icomoon.ttf
deleted file mode 100644
index fbca51c2a..000000000
Binary files a/website/assets/fonts/icomoon.ttf and /dev/null differ
diff --git a/website/assets/fonts/icomoon.woff b/website/assets/fonts/icomoon.woff
deleted file mode 100644
index cf1fc0131..000000000
Binary files a/website/assets/fonts/icomoon.woff and /dev/null differ
diff --git a/website/assets/fonts/lato-bold.eot b/website/assets/fonts/lato-bold.eot
deleted file mode 100755
index e8d5643a1..000000000
Binary files a/website/assets/fonts/lato-bold.eot and /dev/null differ
diff --git a/website/assets/fonts/lato-bold.svg b/website/assets/fonts/lato-bold.svg
deleted file mode 100755
index 2439f3bb4..000000000
--- a/website/assets/fonts/lato-bold.svg
+++ /dev/null
@@ -1,2787 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/website/assets/fonts/lato-bold.ttf b/website/assets/fonts/lato-bold.ttf
deleted file mode 100755
index e4767745f..000000000
Binary files a/website/assets/fonts/lato-bold.ttf and /dev/null differ
diff --git a/website/assets/fonts/lato-bold.woff b/website/assets/fonts/lato-bold.woff
deleted file mode 100755
index 3766cec73..000000000
Binary files a/website/assets/fonts/lato-bold.woff and /dev/null differ
diff --git a/website/assets/fonts/lato-bold.woff2 b/website/assets/fonts/lato-bold.woff2
deleted file mode 100755
index ad8583d2e..000000000
Binary files a/website/assets/fonts/lato-bold.woff2 and /dev/null differ
diff --git a/website/assets/fonts/lato-bolditalic.eot b/website/assets/fonts/lato-bolditalic.eot
deleted file mode 100755
index af4fed2d2..000000000
Binary files a/website/assets/fonts/lato-bolditalic.eot and /dev/null differ
diff --git a/website/assets/fonts/lato-bolditalic.svg b/website/assets/fonts/lato-bolditalic.svg
deleted file mode 100755
index 9cbc71230..000000000
--- a/website/assets/fonts/lato-bolditalic.svg
+++ /dev/null
@@ -1,2800 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/website/assets/fonts/lato-bolditalic.ttf b/website/assets/fonts/lato-bolditalic.ttf
deleted file mode 100755
index 0a23a2b34..000000000
Binary files a/website/assets/fonts/lato-bolditalic.ttf and /dev/null differ
diff --git a/website/assets/fonts/lato-bolditalic.woff b/website/assets/fonts/lato-bolditalic.woff
deleted file mode 100755
index 8221140d1..000000000
Binary files a/website/assets/fonts/lato-bolditalic.woff and /dev/null differ
diff --git a/website/assets/fonts/lato-bolditalic.woff2 b/website/assets/fonts/lato-bolditalic.woff2
deleted file mode 100755
index c7d52d734..000000000
Binary files a/website/assets/fonts/lato-bolditalic.woff2 and /dev/null differ
diff --git a/website/assets/fonts/lato-italic.eot b/website/assets/fonts/lato-italic.eot
deleted file mode 100755
index d720a79f1..000000000
Binary files a/website/assets/fonts/lato-italic.eot and /dev/null differ
diff --git a/website/assets/fonts/lato-italic.svg b/website/assets/fonts/lato-italic.svg
deleted file mode 100755
index 9c0dd3ce8..000000000
--- a/website/assets/fonts/lato-italic.svg
+++ /dev/null
@@ -1,2805 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/website/assets/fonts/lato-italic.ttf b/website/assets/fonts/lato-italic.ttf
deleted file mode 100755
index 7a80d7c99..000000000
Binary files a/website/assets/fonts/lato-italic.ttf and /dev/null differ
diff --git a/website/assets/fonts/lato-italic.woff b/website/assets/fonts/lato-italic.woff
deleted file mode 100755
index c5d2b2fa1..000000000
Binary files a/website/assets/fonts/lato-italic.woff and /dev/null differ
diff --git a/website/assets/fonts/lato-italic.woff2 b/website/assets/fonts/lato-italic.woff2
deleted file mode 100755
index ea725a891..000000000
Binary files a/website/assets/fonts/lato-italic.woff2 and /dev/null differ
diff --git a/website/assets/fonts/lato-regular.eot b/website/assets/fonts/lato-regular.eot
deleted file mode 100755
index 721447bee..000000000
Binary files a/website/assets/fonts/lato-regular.eot and /dev/null differ
diff --git a/website/assets/fonts/lato-regular.svg b/website/assets/fonts/lato-regular.svg
deleted file mode 100755
index eefbd41bc..000000000
--- a/website/assets/fonts/lato-regular.svg
+++ /dev/null
@@ -1,2788 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/website/assets/fonts/lato-regular.ttf b/website/assets/fonts/lato-regular.ttf
deleted file mode 100755
index ab5702fe8..000000000
Binary files a/website/assets/fonts/lato-regular.ttf and /dev/null differ
diff --git a/website/assets/fonts/lato-regular.woff b/website/assets/fonts/lato-regular.woff
deleted file mode 100755
index 8dae150a1..000000000
Binary files a/website/assets/fonts/lato-regular.woff and /dev/null differ
diff --git a/website/assets/fonts/lato-regular.woff2 b/website/assets/fonts/lato-regular.woff2
deleted file mode 100755
index 3aa7f63ec..000000000
Binary files a/website/assets/fonts/lato-regular.woff2 and /dev/null differ
diff --git a/website/assets/fonts/sourcesanspro-bold.eot b/website/assets/fonts/sourcesanspro-bold.eot
new file mode 100644
index 000000000..b3b60be2d
Binary files /dev/null and b/website/assets/fonts/sourcesanspro-bold.eot differ
diff --git a/website/assets/fonts/sourcesanspro-bold.svg b/website/assets/fonts/sourcesanspro-bold.svg
new file mode 100644
index 000000000..94efdcbe5
--- /dev/null
+++ b/website/assets/fonts/sourcesanspro-bold.svg
@@ -0,0 +1,1031 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/website/assets/fonts/sourcesanspro-bold.ttf b/website/assets/fonts/sourcesanspro-bold.ttf
new file mode 100644
index 000000000..4619eef6b
Binary files /dev/null and b/website/assets/fonts/sourcesanspro-bold.ttf differ
diff --git a/website/assets/fonts/sourcesanspro-bold.woff b/website/assets/fonts/sourcesanspro-bold.woff
new file mode 100644
index 000000000..3257aeddf
Binary files /dev/null and b/website/assets/fonts/sourcesanspro-bold.woff differ
diff --git a/website/assets/fonts/sourcesanspro-bold.woff2 b/website/assets/fonts/sourcesanspro-bold.woff2
new file mode 100644
index 000000000..42b02574f
Binary files /dev/null and b/website/assets/fonts/sourcesanspro-bold.woff2 differ
diff --git a/website/assets/fonts/sourcesanspro-bolditalic.eot b/website/assets/fonts/sourcesanspro-bolditalic.eot
new file mode 100644
index 000000000..da1580939
Binary files /dev/null and b/website/assets/fonts/sourcesanspro-bolditalic.eot differ
diff --git a/website/assets/fonts/sourcesanspro-bolditalic.svg b/website/assets/fonts/sourcesanspro-bolditalic.svg
new file mode 100644
index 000000000..aa37571dd
--- /dev/null
+++ b/website/assets/fonts/sourcesanspro-bolditalic.svg
@@ -0,0 +1,840 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/website/assets/fonts/sourcesanspro-bolditalic.ttf b/website/assets/fonts/sourcesanspro-bolditalic.ttf
new file mode 100644
index 000000000..ae8b08166
Binary files /dev/null and b/website/assets/fonts/sourcesanspro-bolditalic.ttf differ
diff --git a/website/assets/fonts/sourcesanspro-bolditalic.woff b/website/assets/fonts/sourcesanspro-bolditalic.woff
new file mode 100644
index 000000000..3ac22abd8
Binary files /dev/null and b/website/assets/fonts/sourcesanspro-bolditalic.woff differ
diff --git a/website/assets/fonts/sourcesanspro-bolditalic.woff2 b/website/assets/fonts/sourcesanspro-bolditalic.woff2
new file mode 100644
index 000000000..629413ac6
Binary files /dev/null and b/website/assets/fonts/sourcesanspro-bolditalic.woff2 differ
diff --git a/website/assets/fonts/sourcesanspro-italic.eot b/website/assets/fonts/sourcesanspro-italic.eot
new file mode 100644
index 000000000..a5d050e75
Binary files /dev/null and b/website/assets/fonts/sourcesanspro-italic.eot differ
diff --git a/website/assets/fonts/sourcesanspro-italic.svg b/website/assets/fonts/sourcesanspro-italic.svg
new file mode 100644
index 000000000..bf0f85da9
--- /dev/null
+++ b/website/assets/fonts/sourcesanspro-italic.svg
@@ -0,0 +1,852 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/website/assets/fonts/sourcesanspro-italic.ttf b/website/assets/fonts/sourcesanspro-italic.ttf
new file mode 100644
index 000000000..f17a12856
Binary files /dev/null and b/website/assets/fonts/sourcesanspro-italic.ttf differ
diff --git a/website/assets/fonts/sourcesanspro-italic.woff b/website/assets/fonts/sourcesanspro-italic.woff
new file mode 100644
index 000000000..32c1e1962
Binary files /dev/null and b/website/assets/fonts/sourcesanspro-italic.woff differ
diff --git a/website/assets/fonts/sourcesanspro-italic.woff2 b/website/assets/fonts/sourcesanspro-italic.woff2
new file mode 100644
index 000000000..c3d399f8c
Binary files /dev/null and b/website/assets/fonts/sourcesanspro-italic.woff2 differ
diff --git a/website/assets/fonts/sourcesanspro-regular.eot b/website/assets/fonts/sourcesanspro-regular.eot
new file mode 100644
index 000000000..6e98cf79a
Binary files /dev/null and b/website/assets/fonts/sourcesanspro-regular.eot differ
diff --git a/website/assets/fonts/sourcesanspro-regular.svg b/website/assets/fonts/sourcesanspro-regular.svg
new file mode 100644
index 000000000..27d435ad9
--- /dev/null
+++ b/website/assets/fonts/sourcesanspro-regular.svg
@@ -0,0 +1,1039 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/website/assets/fonts/sourcesanspro-regular.ttf b/website/assets/fonts/sourcesanspro-regular.ttf
new file mode 100644
index 000000000..0bb505790
Binary files /dev/null and b/website/assets/fonts/sourcesanspro-regular.ttf differ
diff --git a/website/assets/fonts/sourcesanspro-regular.woff b/website/assets/fonts/sourcesanspro-regular.woff
new file mode 100644
index 000000000..aa0503cac
Binary files /dev/null and b/website/assets/fonts/sourcesanspro-regular.woff differ
diff --git a/website/assets/fonts/sourcesanspro-regular.woff2 b/website/assets/fonts/sourcesanspro-regular.woff2
new file mode 100644
index 000000000..06206e483
Binary files /dev/null and b/website/assets/fonts/sourcesanspro-regular.woff2 differ
diff --git a/website/assets/fonts/worksans-bold.eot b/website/assets/fonts/worksans-bold.eot
deleted file mode 100755
index 0a27c109c..000000000
Binary files a/website/assets/fonts/worksans-bold.eot and /dev/null differ
diff --git a/website/assets/fonts/worksans-bold.svg b/website/assets/fonts/worksans-bold.svg
deleted file mode 100755
index 4e431dcbb..000000000
--- a/website/assets/fonts/worksans-bold.svg
+++ /dev/null
@@ -1,1909 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/website/assets/fonts/worksans-bold.ttf b/website/assets/fonts/worksans-bold.ttf
deleted file mode 100755
index 83f6dcbe6..000000000
Binary files a/website/assets/fonts/worksans-bold.ttf and /dev/null differ
diff --git a/website/assets/fonts/worksans-bold.woff b/website/assets/fonts/worksans-bold.woff
deleted file mode 100755
index 49d6650a7..000000000
Binary files a/website/assets/fonts/worksans-bold.woff and /dev/null differ
diff --git a/website/assets/fonts/worksans-bold.woff2 b/website/assets/fonts/worksans-bold.woff2
deleted file mode 100755
index 7ff03ef9b..000000000
Binary files a/website/assets/fonts/worksans-bold.woff2 and /dev/null differ
diff --git a/website/assets/fonts/worksans-regular.eot b/website/assets/fonts/worksans-regular.eot
deleted file mode 100755
index d452ec370..000000000
Binary files a/website/assets/fonts/worksans-regular.eot and /dev/null differ
diff --git a/website/assets/fonts/worksans-regular.svg b/website/assets/fonts/worksans-regular.svg
deleted file mode 100755
index dcbf3f153..000000000
--- a/website/assets/fonts/worksans-regular.svg
+++ /dev/null
@@ -1,1586 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/website/assets/fonts/worksans-regular.ttf b/website/assets/fonts/worksans-regular.ttf
deleted file mode 100755
index 8eab67801..000000000
Binary files a/website/assets/fonts/worksans-regular.ttf and /dev/null differ
diff --git a/website/assets/fonts/worksans-regular.woff b/website/assets/fonts/worksans-regular.woff
deleted file mode 100755
index 95d0b0737..000000000
Binary files a/website/assets/fonts/worksans-regular.woff and /dev/null differ
diff --git a/website/assets/fonts/worksans-regular.woff2 b/website/assets/fonts/worksans-regular.woff2
deleted file mode 100755
index f4cd3e3de..000000000
Binary files a/website/assets/fonts/worksans-regular.woff2 and /dev/null differ
diff --git a/website/assets/fonts/worksans-semibold.eot b/website/assets/fonts/worksans-semibold.eot
deleted file mode 100755
index 26ca5d1d0..000000000
Binary files a/website/assets/fonts/worksans-semibold.eot and /dev/null differ
diff --git a/website/assets/fonts/worksans-semibold.svg b/website/assets/fonts/worksans-semibold.svg
deleted file mode 100755
index 03f6d943d..000000000
--- a/website/assets/fonts/worksans-semibold.svg
+++ /dev/null
@@ -1,1909 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/website/assets/fonts/worksans-semibold.ttf b/website/assets/fonts/worksans-semibold.ttf
deleted file mode 100755
index b0f25efcb..000000000
Binary files a/website/assets/fonts/worksans-semibold.ttf and /dev/null differ
diff --git a/website/assets/fonts/worksans-semibold.woff b/website/assets/fonts/worksans-semibold.woff
deleted file mode 100755
index 4e0affcca..000000000
Binary files a/website/assets/fonts/worksans-semibold.woff and /dev/null differ
diff --git a/website/assets/fonts/worksans-semibold.woff2 b/website/assets/fonts/worksans-semibold.woff2
deleted file mode 100755
index 67e487e63..000000000
Binary files a/website/assets/fonts/worksans-semibold.woff2 and /dev/null differ
diff --git a/website/assets/img/icons.svg b/website/assets/img/icons.svg
new file mode 100644
index 000000000..aa29ecdca
--- /dev/null
+++ b/website/assets/img/icons.svg
@@ -0,0 +1,29 @@
+
+
+
+ mail
+
+
+
+
+ link
+
+
+
+
+ github
+
+
+
+
+ reddit
+
+
+
+
diff --git a/website/assets/img/logos/chattermill.png b/website/assets/img/logos/chattermill.png
new file mode 100644
index 000000000..0fdd26328
Binary files /dev/null and b/website/assets/img/logos/chattermill.png differ
diff --git a/website/assets/img/logos/dato.png b/website/assets/img/logos/dato.png
new file mode 100644
index 000000000..c0352b914
Binary files /dev/null and b/website/assets/img/logos/dato.png differ
diff --git a/website/assets/img/logos/duedil.png b/website/assets/img/logos/duedil.png
new file mode 100644
index 000000000..8af86b058
Binary files /dev/null and b/website/assets/img/logos/duedil.png differ
diff --git a/website/assets/img/logos/foxtype.png b/website/assets/img/logos/foxtype.png
new file mode 100644
index 000000000..fe412c8c8
Binary files /dev/null and b/website/assets/img/logos/foxtype.png differ
diff --git a/website/assets/img/logos/spyjack.png b/website/assets/img/logos/spyjack.png
new file mode 100644
index 000000000..a4445ba91
Binary files /dev/null and b/website/assets/img/logos/spyjack.png differ
diff --git a/website/assets/img/logos/stitchfix.png b/website/assets/img/logos/stitchfix.png
new file mode 100644
index 000000000..da5ea5cba
Binary files /dev/null and b/website/assets/img/logos/stitchfix.png differ
diff --git a/website/assets/img/logos/synapsify.png b/website/assets/img/logos/synapsify.png
new file mode 100644
index 000000000..0d97bdcba
Binary files /dev/null and b/website/assets/img/logos/synapsify.png differ
diff --git a/website/assets/img/logos/wayblazer.png b/website/assets/img/logos/wayblazer.png
new file mode 100644
index 000000000..f20b28c47
Binary files /dev/null and b/website/assets/img/logos/wayblazer.png differ
diff --git a/website/assets/img/logos/wonderflow.png b/website/assets/img/logos/wonderflow.png
new file mode 100644
index 000000000..9b890e54d
Binary files /dev/null and b/website/assets/img/logos/wonderflow.png differ
diff --git a/website/assets/img/pattern_blue.jpg b/website/assets/img/pattern.jpg
similarity index 100%
rename from website/assets/img/pattern_blue.jpg
rename to website/assets/img/pattern.jpg
diff --git a/website/assets/img/pattern_red.jpg b/website/assets/img/pattern_red.jpg
deleted file mode 100644
index a28d1d8b1..000000000
Binary files a/website/assets/img/pattern_red.jpg and /dev/null differ
diff --git a/website/assets/img/profile_elmar.png b/website/assets/img/profile_elmar.png
deleted file mode 100644
index 4a7b8db16..000000000
Binary files a/website/assets/img/profile_elmar.png and /dev/null differ
diff --git a/website/assets/img/profile_henning.png b/website/assets/img/profile_henning.png
deleted file mode 100644
index 9d1ccb82a..000000000
Binary files a/website/assets/img/profile_henning.png and /dev/null differ
diff --git a/website/assets/img/profile_ines.png b/website/assets/img/profile_ines.png
deleted file mode 100644
index 15639a724..000000000
Binary files a/website/assets/img/profile_ines.png and /dev/null differ
diff --git a/website/assets/img/profile_ines_alt.png b/website/assets/img/profile_ines_alt.png
deleted file mode 100644
index 90390876d..000000000
Binary files a/website/assets/img/profile_ines_alt.png and /dev/null differ
diff --git a/website/assets/img/profile_matt.png b/website/assets/img/profile_matt.png
deleted file mode 100644
index 7b0eb2b26..000000000
Binary files a/website/assets/img/profile_matt.png and /dev/null differ
diff --git a/website/assets/img/profile_matt_alt.png b/website/assets/img/profile_matt_alt.png
deleted file mode 100644
index e75782405..000000000
Binary files a/website/assets/img/profile_matt_alt.png and /dev/null differ
diff --git a/website/assets/img/profile_placeholder.png b/website/assets/img/profile_placeholder.png
deleted file mode 100644
index 4a7b8db16..000000000
Binary files a/website/assets/img/profile_placeholder.png and /dev/null differ
diff --git a/website/assets/img/profile_wolfgang.png b/website/assets/img/profile_wolfgang.png
deleted file mode 100644
index 15b8ac7ef..000000000
Binary files a/website/assets/img/profile_wolfgang.png and /dev/null differ
diff --git a/website/assets/js/main.js b/website/assets/js/main.js
index 486b79eca..075d258e3 100644
--- a/website/assets/js/main.js
+++ b/website/assets/js/main.js
@@ -1,92 +1,55 @@
-(function() {
+//- ----------------------------------
+//- 💫 MAIN JAVASCRIPT
+//- ----------------------------------
- // Elements
- var topnav = document.getElementById('topnav');
- var sidebar = document.getElementById('sidebar');
-
- if(sidebar) {
- var navSelector = 'data-section';
- var sidebarOffset = sidebar.offsetTop;
- var navLinks = document.querySelectorAll('[' + navSelector + ']');
- var elements = getElements();
- }
+'use strict';
- var vh = getVh();
- var vhPadding = 525;
- var scrollY = 0;
- var ticking = false;
- var scrollUp = false;
+const $ = document.querySelector.bind(document);
+const $$ = document.querySelectorAll.bind(document);
- // Load
- document.addEventListener('DOMContentLoaded', function() {
- window.addEventListener('scroll', onScroll, false);
- window.addEventListener('resize', onResize, false);
- });
+{
+ const updateVh = () => Math.max(document.documentElement.clientHeight, window.innerHeight || 0);
+ const nav = $('.js-nav');
+ const sidebar = $('.js-sidebar');
+ const vhPadding = 525;
- function onScroll() {
- var newScrollY = (window.pageYOffset || document.scrollTop) - (document.clientTop || 0);
+ let vh = updateVh();
+ let scrollY = 0;
+ let scrollUp = false;
+
+ const updateNav = () => {
+ const vh = updateVh();
+ const newScrollY = (window.pageYOffset || document.scrollTop) - (document.clientTop || 0);
scrollUp = newScrollY <= scrollY;
scrollY = newScrollY;
- if(!ticking) {
- requestAnimationFrame(update);
- ticking = true;
- }
+ if(scrollUp && !(isNaN(scrollY) || scrollY <= vh)) topnav.classList.add('is-fixed');
+ else if(!scrollUp || (isNaN(scrollY) || scrollY <= vh/2)) topnav.classList.remove('is-fixed');
}
- function update() {
+ const updateSidebar = () => {
+ const sidebar = $('.js-sidebar');
+ if(sidebar.offsetTop - scrollY <= 0) sidebar.classList.add('is-fixed');
+ else sidebar.classList.remove('is-fixed');
- if(sidebar) {
- // Fix sidebar
- if(sidebarOffset - scrollY <= 0) sidebar.classList.add('fixed');
- else sidebar.classList.remove('fixed');
+ [...$$('[data-section]')].map(el => {
+ const trigger = el.getAttribute('data-section');
- // Toggle navlinks
- for(var i = 0; i < elements.length; i++) {
- if(inViewport(elements[i])) elements[i].target.classList.add('active');
- else elements[i].target.classList.remove('active');
+ if(trigger) {
+ const target = $(`#${trigger}`);
+ const offset = parseInt(target.offsetTop);
+ const height = parseInt(target.scrollHeight);
+
+ if((offset - scrollY) <= vh/2 && (offset - scrollY) > -height + vhPadding) {
+ [...$$('[data-section]')].forEach(item => item.classList.remove('is-active'));
+ $(`[data-section="${trigger}"]`).classList.add('is-active');
+ }
}
- }
-
- // Fix topnav
- if(scrollUp && !(isNaN(scrollY) || scrollY <= vh)) topnav.classList.add('fixed');
- else if(!scrollUp || (isNaN(scrollY) || scrollY <= vh/2)) topnav.classList.remove('fixed');
-
- ticking = false;
+ });
}
- function onResize() {
- vh = getVh();
-
- if(sidebar) {
- sidebarOffset = sidebar.offsetTop;
- elements = getElements();
- }
- }
-
- function getElements() {
- var elements = [];
-
- for(var i = 0; i < navLinks.length; i++) {
- var trigger = document.getElementById(navLinks[i].getAttribute(navSelector));
-
- elements.push({
- trigger: trigger,
- target: navLinks[i],
- height: parseInt(trigger.scrollHeight),
- offset: parseInt(trigger.offsetTop)
- });
- }
-
- return elements;
- }
-
- function getVh() {
- return Math.max(document.documentElement.clientHeight, window.innerHeight || 0);
- }
-
- function inViewport(element) {
- return (element.offset - scrollY) <= vh/2 && (element.offset - scrollY) > -element.height + vhPadding;
- }
-})();
+ window.addEventListener('resize', () => vh = updateVh());
+ window.addEventListener('scroll', updateNav);
+ if($('.js-sidebar')) window.addEventListener('scroll', updateSidebar);
+}
diff --git a/website/blog/_data.json b/website/blog/_data.json
index 84eb25376..0d207a2e2 100644
--- a/website/blog/_data.json
+++ b/website/blog/_data.json
@@ -4,184 +4,7 @@
"title" : "Blog"
},
- "modular-markup": {
- "title": "Rebuilding a Website with Modular Markup Components",
- "date": "2016-03-31",
- "author": "ines",
- "description": "In a small team, everyone should be able to contribute content to the website and make use of the full set of visual components, without having to worry about design or write complex HTML. To help us write docs, tutorials and blog posts about spaCy, we've developed a powerful set of modularized markup components, implemented using Jade.",
- "image": {
- "file": "markup.jpg",
- "file_small": "markup_small.jpg",
- "file_large": "markup_large.jpg",
- "credit": "Kemal Sanli",
- "url": "https://dribbble.com/kemal"
- }
- },
-
- "sense2vec-with-spacy": {
- "title": "Sense2vec with spaCy and Gensim",
- "date": "2016-02-15",
- "author": "matt",
- "description": "If you were doing text analytics in 2015, you were probably using word2vec. Sense2vec (Trask et. al, 2015) is a new twist on word2vec that lets you learn more interesting, detailed and context-sensitive word vectors. This post motivates the idea, explains our implementation, and comes with an interactive demo that we've found surprisingly addictive.",
- "image" : {
- "file": "sense2vec.jpg",
- "file_small": "sense2vec_small.jpg",
- "file_large": "sense2vec_large.jpg",
- "credit": "Kemal Sanli",
- "url": "https://dribbble.com/kemal"
- },
- "links": {
- "HackerNews": "https://news.ycombinator.com/item?id=11106386",
- "ProductHunt": "https://www.producthunt.com/tech/spacy-io"
- }
- },
-
- "spacy-now-mit": {
- "title": "AGPL Not Free Enough: spaCy now MIT",
- "date": "2015-09-28",
- "author": "matt",
- "description": "Three big announcements: we're changing license, to MIT from AGPL; a new co-founder is coming on board, Henning Peters; and we're launching a new service, to adapt spaCy's statistical models to your task.",
- "image" : {
- "file": "agpl-not-free.jpg",
- "file_small": "agpl-not-free_small.jpg",
- "file_large": "agpl-not-free_large.jpg",
- "credit": "Kemal Sanli",
- "url": "https://dribbble.com/kemal"
- },
- "links": {
- "HackerNews": "https://news.ycombinator.com/item?id=10288089"
- }
- },
-
- "dead-code-should-be-buried": {
- "title": "Dead Code Should Be Buried",
- "date": "2015-09-04",
- "author": "matt",
- "description": "Natural Language Processing moves fast, so maintaining a good library means constantly throwing things away. Most libraries are failing badly at this, as academics hate to editorialize. This post explains the problem, why it's so damaging, and why I wrote spaCy to do things differently.",
- "image" : {
- "file": "deadcode.jpg",
- "file_small": "deadcode_small.jpg",
- "file_large": "deadcode_large.jpg",
- "credit": "Kemal Sanli",
- "url": "https://dribbble.com/kemal"
- },
- "links": {
- "Reddit": "https://www.reddit.com/r/programming/comments/3jmgck/dead_code_should_be_buried_why_i_wrote_spacy/",
- "HackerNews": "https://news.ycombinator.com/item?id=10173669"
- }
- },
-
- "eli5-computers-learn-reading": {
- "title": "Statistical NLP in Basic English",
- "date": "2015-08-24",
- "author": "matt",
- "description": "When I was little, my favorite TV shows all had talking computers. Now I’m big and there are still no talking computers, so I’m trying to make some myself. Well, we can make computers say things. But when we say things back, they don’t really understand. Why not?",
- "image" : {
- "file": "basic-english.jpg",
- "file_small": "basic-english_small.jpg",
- "file_large": "basic-english_large.jpg",
- "credit": "Kemal Sanli",
- "url": "https://dribbble.com/kemal"
- },
- "links": false
- },
-
- "displacy": {
- "featured": true,
- "title": "Displaying Linguistic Structure With CSS",
- "date": "2015-08-19",
- "author": "matt",
- "description": "One of the features of the relaunch I'm most excited about is the displaCy visualizer and annotation tool. This solves two problems I've thought about a lot: first, how can I help people understand what information spaCy gives them access to? Without a good visualization, the ideas are very abstract. Second, how can we make dependency trees easy for humans to create?",
- "image": {
- "file": "displacy.jpg",
- "file_small": "displacy_small.jpg",
- "file_large": "displacy_large.jpg"
- },
- "links": {
- "Reddit": "https://www.reddit.com/r/programming/comments/3hoj0b/displaying_linguistic_structure_with_css/"
- }
- },
-
- "introducing-spacy": {
- "title": "Introducing spaCy",
- "date": "2015-02-19",
- "author": "matt",
- "description": "Computers don't understand text. This is unfortunate, because that's what the web almost entirely consists of. We want to recommend people text based on other text they liked. We want to shorten text to display it on a mobile screen. We want to aggregate it, link it, filter it, categorise it, generate it and correct it. spaCy provides a library of utility functions that help programmers build such products.",
- "image": {
- "file": "introducing-spacy.jpg",
- "file_small": "introducing-spacy_small.jpg",
- "file_large": "introducing-spacy_large.jpg"
- },
- "links": {
- "Reddit": "https://www.reddit.com/r/programming/comments/2tlyrr/spacy_industrialstrength_nlp_with_pythoncython",
- "HackerNews": "https://news.ycombinator.com/item?id=8942783"
- }
- },
-
- "how-spacy-works": {
- "title": "How spaCy Works",
- "date": "2015-02-19",
- "author": "matt",
- "description": "This post is a work in progress, explaining some of how spaCy is designed and implemented, and noting which algorithms were used. spaCy is built on science, not alchemy, and when new discoveries are made, we publish them. We want to stay on the same page as the academic community, to use their work. Still, explaining everything takes time — so this post isn't yet as complete as we'd like it to be. Stay tuned.",
- "image": {
- "file": "how-spacy-works.jpg",
- "file_small": "how-spacy-works_small.jpg",
- "file_large": "how-spacy-works_large.jpg",
- "credit": "Kemal Sanli",
- "url": "https://dribbble.com/kemal"
- }
- },
-
- "writing-c-in-cython": {
- "title": "Writing C in Cython",
- "date": "2014-10-21",
- "author": "matt",
- "description": "For the last two years, I’ve done almost all of my work in Cython. And I don’t mean, I write Python, and then “Cythonize” it, with various type-declarations et cetera. I just, write Cython. I use \"raw\" C structs and arrays, and occasionally C++ vectors, with a thin wrapper around malloc/free that I wrote myself. The code is almost always exactly as fast as C/C++, because that's really all it is, but with Python right there, if I want it.",
- "image" : {
- "file": "cython.jpg",
- "file_small": "cython_small.jpg",
- "file_large": "cython_large.jpg",
- "credit": "Kemal Sanli",
- "url": "https://dribbble.com/kemal"
- },
- "links": {
- "Reddit": "https://www.reddit.com/r/Python/comments/2jvdw9/writing_c_in_cython/",
- "HackerNews": "https://news.ycombinator.com/item?id=8483872"
- }
- },
-
- "parsing-english-in-python": {
- "title": "Parsing English in 500 Lines of Python",
- "date": "2013-12-18",
- "author": "matt",
- "description": "This post explains how transition-based dependency parsers work, and argues that this algorithm represents a break-through in natural language understanding. A concise sample implementation is provided, in 500 lines of Python, with no external dependencies. This post was written in 2013. In 2015 this type of parser is now increasingly dominant.",
- "image" : {
- "file": "pizza.jpg",
- "file_small": "pizza_small.jpg",
- "file_large": "pizza_large.jpg",
- "credit": "Kemal Sanli",
- "url": "https://dribbble.com/kemal"
- },
- "links": {
- "Reddit": "https://www.reddit.com/r/programming/comments/245jte/parsing_english_with_500_lines_of_python/",
- "HackerNews": "https://news.ycombinator.com/item?id=7658864"
- }
- },
-
- "part-of-speech-pos-tagger-in-python": {
- "title": "A Good Part-of-Speech Tagger in about 200 Lines of Python",
- "date": "2013-09-18",
- "author": "matt",
- "description": "Up-to-date knowledge about natural language processing is mostly locked away in academia. And academics are mostly pretty self-conscious when we write. We’re careful. We don’t want to stick our necks out too much. But under-confident recommendations suck, so here’s how to write a good part-of-speech tagger.",
- "image" : {
- "file": "pos-tagger.jpg",
- "file_small": "pos-tagger_small.jpg",
- "file_large": "pos-tagger_large.jpg",
- "credit": "Kemal Sanli",
- "url": "https://dribbble.com/kemal"
- },
- "links": {
- "Reddit": "https://www.reddit.com/r/programming/comments/1mdn75/a_good_partofspeech_tagger_in_200_lines_of_python/"
- }
+ "announcement" : {
+ "title": "Important Announcement"
}
}
diff --git a/website/blog/announcement.jade b/website/blog/announcement.jade
new file mode 100644
index 000000000..977ff9944
--- /dev/null
+++ b/website/blog/announcement.jade
@@ -0,0 +1,12 @@
+include ../_includes/_mixins
+
+.u-padding
+ +label #[+date("2016-08-09")]
+
+ p.u-text-large Dear spaCy users,
+
+ p.u-text-medium Unfortunately, we (Henning Peters and Matthew Honnibal) are parting ways. Breaking up is never easy, and it's taken us a while to get our stuff together. Hopefully, you didn't notice anything was up — if you did, we hope you haven't been inconvenienced.
+
+ p.u-text-medium Here's how this is going to work: Matt will continue to develop and maintain spaCy and all related projects under his name. Nothing will change for you. Henning will take over our legal structure and start a new business under a new name.
+
+ p.u-text-medium Sincerely,#[br] Henning Peters and Matthew Honnibal
diff --git a/website/blog/dead-code-should-be-buried.jade b/website/blog/dead-code-should-be-buried.jade
deleted file mode 100644
index 7e07ab204..000000000
--- a/website/blog/dead-code-should-be-buried.jade
+++ /dev/null
@@ -1,30 +0,0 @@
-include ../_includes/_mixins
-
-+lead Natural Language Processing moves fast, so maintaining a good library means constantly throwing things away. Most libraries are failing badly at this, as academics hate to editorialize. This post explains the problem, why it's so damaging, and why I wrote #[a(href=url target="_blank") spaCy] to do things differently.
-
-p Imagine: you try to use Google Translate, but it asks you to first select which model you want. The new, awesome deep-learning model is there, but so are lots of others. You pick one that sounds fancy, but it turns out it's a 20-year old experimental model trained on a corpus of oven manuals. When it performs little better than chance, you can't even tell from its output. Of course, Google Translate would not do this to you. But most Natural Language Processing libraries do, and it's terrible.
-
-p Natural Language Processing (NLP) research moves very quickly. The new models supercede the old ones. And yet most NLP libraries are loathe to ever throw anything away. The ones that have been around a long time then start to look very large and impressive. But big is not beautiful here. It is not a virtue to present users with a dozen bad options.
-
-p Have a look through the #[a(href="http://gate.ac.uk/sale/tao/split.html" target="_blank") GATE software]. There's a lot there, developed over 12 years and many person-hours. But there's approximately zero curation. The philosophy is just to provide things. It's up to you to decide what to use.
-
-p This is bad. It's bad to provide an implementation of #[a(href="https://gate.ac.uk/sale/tao/splitch18.html" target="_blank") MiniPar], and have it just...sit there, with no hint that it's 20 years old and should not be used. The RASP parser, too. Why are these provided? Worse, why is there no warning? The #[a(href="http://wayback.archive.org/web/20150510100903/http://webdocs.cs.ualberta.ca/~lindek/minipar.htm" target="_blank") Minipar homepage] puts the software in the right context:
-
-+quote MINIPAR is a broad-coverage parser for the English language. An evaluation with the SUSANNE corpus shows that MINIPAR achieves about 88% precision and 80% recall with respect to dependency relationships. MINIPAR is very efficient, #[strong on a Pentium II 300 with 128MB memory], it parses about 300 words per second.
-
-p Ideally there would be a date, but it's still obvious that this isn't software anyone should be executing in 2015, unless they're investigating the history of the field.
-
-p A less extreme example is #[a(href="http://nlp.stanford.edu/software/corenlp.shtml" target="_blank") CoreNLP]. They offer a range of models with complicated speed/accuracy/loading time trade-offs, many with subtly different output. Mostly no model is strictly dominated by another, so there's some case for offering all these options. But to my taste there's still far too much there, and the recommendation of what to use is far from clear.
-
-+h3("why-i-didnt-contribute-to-nltk") Why I didn't contribute to NLTK
-
-p Various people have asked me why I decided to make a new Python NLP library, #[a(href=url target="_blank") spaCy], instead of supporting the #[a(href="http://nltk.org" target="_blank") NLTK] project. This is the main reason. You can't contribute to a project if you believe that the first thing that they should do is throw almost all of it away. You should just make your own project, which is what I did.
-p Have a look through #[a(href="http://www.nltk.org/py-modindex.html" target="_blank") the module list of NLTK]. It looks like there's a lot there, but there's not. What NLTK has is a decent tokenizer, some passable stemmers, a good implementation of the Punkt sentence boundary detector (after #[a(href="http://joelnothman.com/" target="_blank") Joel Nothman] rewrote it), some visualization tools, and some wrappers for other libraries. Nothing else is of any use.
-
-p For instance, consider #[code nltk.parse]. You might think that amongst all this code there was something that could actually predict the syntactic structure of a sentence for you, but you would be wrong. There are wrappers for the BLLIP and Stanford parsers, and since March there's been an implementation of Nivre's 2003 transition-based dependency parser. Unfortunately no model is provided for it, as they rely on an external wrapper of an external learner, which is unsuitable for the structure of their problem. So the implementation is too slow to be actually useable.
-
-p This problem is totally avoidable, if you just sit down and write good code, instead of stitching together external dependencies. I pointed NLTK to my tutorial describing #[a(href="/blog/parsing-english-in-python" target="_blank") how to implement a modern dependency parser], which includes a BSD-licensed implementation in 500 lines of Python. I was told "thanks but no thanks", and #[a(href="https://github.com/nltk/nltk/issues/694") the issue was abruptly closed]. Another researcher's offer from 2012 to implement this type of model also went #[a(href="http://arxiv.org/pdf/1409.7386v1.pdf") unanswered].
-
-p The story in #[code nltk.tag] is similar. There are plenty of wrappers, for the external libraries that have actual taggers. The only actual tagger model they distribute is #[a(href="/blog/part-of-speech-POS-tagger-in-python/") terrible]. Now it seems that #[a(href="https://github.com/nltk/nltk/issues/1063" target="_blank") NLTK does not even know how its POS tagger was trained]. The model is just this .pickle file that's been passed around for 5 years, its origins lost to time. It's not okay to offer this to people, to recommend they use it.
-
-p I think open source software should be very careful to make its limitations clear. It's a disservice to provide something that's much less useful than you imply. It's like offering your friend a lift and then not showing up. It's totally fine to not do something – so long as you never suggested you were going to do it. There are ways to do worse than nothing.
diff --git a/website/blog/displacy.jade b/website/blog/displacy.jade
deleted file mode 100644
index da8ef3047..000000000
--- a/website/blog/displacy.jade
+++ /dev/null
@@ -1,28 +0,0 @@
-include ../_includes/_mixins
-
-+lead A syntactic dependency parse is a kind of shallow meaning representation. It's an important piece of many language understanding and text processing technologies. Now that these representations can be computed quickly, and with increasingly high accuracy, they're being used in lots of applications – translation, sentiment analysis, and summarization are major application areas.
-
-p I've been living and breathing similar representations for most of my career. But there's always been a problem: talking about these things is tough. Most people haven't thought much about grammatical structure, and the idea of them is inherently abstract. When I left academia to write #[a(href=url target="_blank") spaCy], I knew I wanted a good visualizer. Unfortunately, I also knew I'd never be the one to write it. I'm deeply graphically challenged. Fortunately, when working with #[a(href="http://ines.io" target="_blank") Ines] to build this site, she really nailed the problem, with a solution I'd never have thought of. I really love the result, which we're calling #[a(href="/demos/displacy" target="_blank") displaCy]:
-
-+displacy("robots-in-popular-culture", "Scroll to see the full parse")
-
- p The #[a(href="https://code.google.com/p/whatswrong/" target="_blank") best alternative] is a Java command-line tool that outputs static images, which look like this:
-
- +image("linguistic-structure.jpg", "Output of the Brat parse tree visualizer")
-
- p I find the output of the CMU visualizer basically unreadable. Pretty much all visualizers suffer from this problem: they don't add enough space. I always thought this was a hard problem, and a good Javascript visualizer would need to do something crazy with Canvas. Ines quickly proposed a much better solution, based on native, web-standard technologies.
-
-p The idea is to use CSS to draw shapes, mostly with border styling, and some arithmetic to figure out the spacing:
-
-+quote("Ines Montani, Developing Displacy", "http://ines.io/blog/developing-displacy") The arrow needs only one HTML element, #[code <div class="arrow">] and the CSS pseudo-elements #[code :before] and #[code :after]. The #[code :before] pseudo-element is used for the arc and is essentially a circle (#[code border-radius: 50%]) with a black outline. Since its parent #[code .arrow] is only half its height and set to #[code overflow: hidden], it’s "cut in half" and ends up looking like a half circle.
-
-p To me, this seemed like witchcraft, or a hack at best. But I was quickly won over: if all we do is declare the data and the relationships, in standards-compliant HTML and CSS, then we can simply step back and let the browser do its job. We know the code will be small, the layout will work on a variety of display, and we'll have a ready separation of style and content. For long output, we simply let the graphic overflow, and let users scroll.
-
-p What I'm particularly excited about is the potential for displaCy as an #[a(href="http://spacy.io/displacy/?manual=Robots%20in%20popular%20culture%20are%20there%20to%20remind%20us%20of%20the%20awesomeness%20of%20unbounded%20human%20agency" target="_blank") annotation tool]. It may seem unintuitive at first, but I think it will be much better to annotate texts the way the parser operates, with a small set of actions and a stack, than by selecting arcs directly. Why? A few reasons:
-
-+list
- +item You're always asked a question. You don't have to decide-what-to-decide.
- +item The viewport can scroll with the user, making it easier to work with spacious, readable designs.
- +item With only 4-6 different actions, it's easy to have key-based input.
-
-p Efficient manual annotation is incredibly important. If we can get that right, then we can offer you cheap domain adaptation. You give us some text, we get it annotated, and ship you a custom model, that's much more accurate on your data. If you're interested in helping us beta test this idea, #[a(href="mailto:" + email) get in touch].
diff --git a/website/blog/displacy/pizza-with-anchovies-bad.html b/website/blog/displacy/pizza-with-anchovies-bad.html
deleted file mode 100644
index 09250c6ee..000000000
--- a/website/blog/displacy/pizza-with-anchovies-bad.html
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
-
- displaCy Demo
-
-
-
-
-
-
-They
ate
the
pizza
with
anchovies
-
-
-
\ No newline at end of file
diff --git a/website/blog/displacy/pizza-with-anchovies-good.html b/website/blog/displacy/pizza-with-anchovies-good.html
deleted file mode 100644
index 1826c3db9..000000000
--- a/website/blog/displacy/pizza-with-anchovies-good.html
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
-
- displaCy Demo
-
-
-
-
-
-
-They
ate
the
pizza
with
anchovies
-
-
-
\ No newline at end of file
diff --git a/website/blog/displacy/robots-in-popular-culture.html b/website/blog/displacy/robots-in-popular-culture.html
deleted file mode 100644
index 30c2e6335..000000000
--- a/website/blog/displacy/robots-in-popular-culture.html
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
-
- displaCy Demo
-
-
-
-
-
-
-Robots
in
popular culture
are
there
to
remind
us
of
the awesomeness
of
unbounded human agency.
-
-
-
\ No newline at end of file
diff --git a/website/blog/eli5-computers-learn-reading.jade b/website/blog/eli5-computers-learn-reading.jade
deleted file mode 100644
index dc6a49b7f..000000000
--- a/website/blog/eli5-computers-learn-reading.jade
+++ /dev/null
@@ -1,31 +0,0 @@
-include ../_includes/_mixins
-
-+lead As told with the #[a(href="http://splasho.com/upgoer5/" target="_blank") ten hundred most common words] that I speak.
-
-p When I was little, my favorite TV shows all had talking computers. Now I'm big and there are still no talking computers. At least, not really talking. We can make them, like, say things — but I want them to tell us things. And I want them to listen, and to read. Why is this so hard?
-
-p It turns out that almost anything we say could mean many many different things, but we don't notice because almost all of those meanings would be weird or stupid or just not possible. If I say:
-
-+example: a(href="http://spacy.io/displacy/?full=I%20saw%20a%20movie%20in%20a%20dress" target="_blank") I saw a movie in a dress
-
-p Would you ever ask me,
-
-+example “Were you in the dress, or was the movie in the dress?”
-
-p It's weird to even think of that. But a computer just might, because there are other cases like:
-
-+example: a(href="http://spacy.io/displacy/?full=The%20TV%20showed%20a%20girl%20in%20a%20dress" target="_blank") The TV showed a girl in a dress
-
-p Where the words hang together in the other way. People used to think that the answer was to tell the computer lots and lots of facts. But then you wake up one day and you're writing facts like #[em movies do not wear dresses], and you wonder where it all went wrong. Actually it's even worse than that. Not only are there too many facts, most of them are not even really facts! #[a(href="https://en.wikipedia.org/wiki/Cyc" target="_blank") People really tried this]. We've found that the world is made up of #[em if]s and #[em but]s.
-
-p These days we just show the computer lots and lots and lots of words. We gave up trying to get it to understand what a “dress” is. We let #[em dress] be just some letters. But if it is seen it around #[em girl] enough times (which is just some other letters, which are seen around some #[strong other] other letters), it can make good guesses.
-
-p It doesn't always guess right, but we can tell how often it does, and we can think of ways t help it learn better. We have a number, and we can slowly make it bigger, a little bit by a little bit.
-
-p (One thing I've learned is, people are great at making a number bigger, if you pay a lot of them to try. The key is to pick numbers where, if they make the number bigger, they can't help but have done something actually good. This is harder than it sounds. Some say no numbers are like this. I ask them to show me much good being done another way, but they never can.)
-
-+pullquote("Instead of telling the computer facts, what we needed to do was tell it how to learn.")
-
-p The ideas we come up with for getting the computer to talk, listen or read a little better can be used to get it to see or plan a little better, and the other way around. Once we stopped telling it things like “#[em movies do not wear dresses]”, things really took off.
-
-p Each bit of work still only makes our numbers a little bit bigger, and the bigger the numbers go, the harder they are to raise. But that is a good problem to have. Now that computers can read quite well, I think we should be able to do pretty great things. What should we get them to read?
diff --git a/website/blog/how-spacy-works.jade b/website/blog/how-spacy-works.jade
deleted file mode 100644
index 8ac362fb9..000000000
--- a/website/blog/how-spacy-works.jade
+++ /dev/null
@@ -1,143 +0,0 @@
-include ../_includes/_mixins
-
-+lead The following are some hasty preliminary notes on how spaCy works. The short story is, there are no new killer algorithms. The way that the tokenizer works is novel and a bit neat, and the parser has a new feature set, but otherwise the key algorithms are well known in the recent literature.
-
-p Some might also wonder how I get Python code to run so fast. I don't – spaCy is written in #[a(href="http://cython.org" target="_blank") Cython], an optionally statically-typed language that compiles to C or C++, which is then loaded as a C extension module. This makes it #[a(href="/blog/writing-c-in-cython" target="_blank") easy] to achieve the performance of native C code, but allows the use of Python language features, via the Python C API. The Python unicode library was particularly useful to me. I think it would have been much more difficult to write spaCy in another language.
-
-+h3("tokenizer-and-lexicon") Tokenizer and Lexicon
-
-p Tokenization is the task of splitting a string into meaningful pieces, called tokens, which you can then compute with. In practice, the task is usually to match the tokenization performed in some treebank, or other corpus. If we want to apply a tagger, entity recogniser, parser etc, then we want our run-time text to match the training conventions. If we want to use a model that's been trained to expect "isn't" to be split into two tokens, ["is", "n't"], then that's how we need to prepare our data.
-
-p In order to train spaCy's models with the best data available, I therefore tokenize English according to the Penn Treebank scheme. It's not perfect, but it's what everybody is using, and it's good enough.
-
-+h3("what-we-dont-do") What we don't do
-
-p The Penn Treebank was distributed with a script called tokenizer.sed, which tokenizes ASCII newswire text roughly according to the Penn Treebank standard. Almost all tokenizers are based on these regular expressions, with various updates to account for unicode characters, and the fact that it's no longer 1986 – today's text has URLs, emails, emoji, etc.
-
-p Usually, the resulting regular expressions are applied in multiple passes, which is quite inefficient. Often no care is taken to preserve indices into the original string. If you lose these indices, it'll be difficult to calculate mark-up based on your annotations.
-
-+h3("tokenizer-algorithm") Tokenizer Algorithm
-
-p spaCy's tokenizer assumes that no tokens will cross whitespace – there will be no multi-word tokens. If we want these, we can post-process the token-stream later, merging as necessary. This assumption allows us to deal only with small chunks of text. We can cache the processing of these, and simplify our expressions somewhat.
-
-p Here is what the outer-loop would look like in Python. (You can see the production implementation, in Cython, #[a(href="https://github.com/" + profiles.github + "/spaCy/blob/master/spacy/tokenizer.pyx#L56" target="_blank") here].)
-
-+code.
- cache = {}
- def tokenize(text):
- tokens = []
- for substring in text.split(' '):
- if substring in cache:
- tokens.extend(cache[substring])
- else:
- subtokens = _tokenize_substring(substring)
- tokens.extend(subtokens)
- cache[substring] = subtokens
- return tokens
-
-p The actual work is performed in #[code _tokenize_substring]. For this, I divide the tokenization rules into three pieces:
-
-+list
- +item A prefixes expression, which matches from the start of the string;
- +item A suffixes expression, which matches from the end of the string;
- +item A special-cases table, which matches the whole string.
-
-p The algorithm then proceeds roughly like this (consider this like pseudo-code; this was written quickly and has not been executed):
-
-+code.
- # Tokens which can be attached at the beginning or end of another
- prefix_re = _make_re([",", '"', '(', ...])
- suffix_re = _make_re(s[",", "'", ":", "'s", ...])
-
- # Contractions etc are simply enumerated, since they're a finite set. We
- # can also specify anything we like here, which is nice --- different data
- # has different quirks, so we want to be able to add ad hoc exceptions.
- special_cases = {
- "can't": ("ca", "n't"),
- "won't": ("wo", "n't"),
- "he'd've": ("he", "'d", "'ve"),
- ...
- ":)": (":)",) # We can add any arbitrary thing to this list.
- }
-
- def _tokenize_substring(substring):
- prefixes = []
- suffixes = []
- while substring not in special_cases:
- prefix, substring = _apply_re(substring, prefix_re)
- if prefix:
- prefixes.append(prefix)
- else:
- suffix, substring = _apply_re(substring, suffix_re)
- if suffix:
- suffixes.append(suffix)
- else:
- break
-
-p This procedure splits off tokens from the start and end of the string, at each point checking whether the remaining string is in our special-cases table. If it is, we stop splitting, and return the tokenization at that point.
-
-p The advantage of this design is that the prefixes, suffixes and special-cases can be declared separately, in easy-to-understand files. If a new entry is added to the special-cases, you can be sure that it won't have some unforeseen consequence to a complicated regular-expression grammar.
-
-+h3("coupling-tokenizer-lexicon") Coupling the Tokenizer and Lexicon
-
-p As mentioned above, the tokenizer is designed to support easy caching. If all we were caching were the matched substrings, this would not be so advantageous. Instead, what we do is create a struct which houses all of our lexical features, and cache *that*. The tokens are then simply pointers to these rich lexical types.
-
-p In a sample of text, vocabulary size grows exponentially slower than word count. So any computations we can perform over the vocabulary and apply to the word count are efficient.
-
-+h3("part-of-speech-tagger") Part-of-speech Tagger
-
-p In 2013, I wrote a blog post describing #[a(href="/blog/part-of-speech-POS-tagger-in-python/" target="_blank") how to write a good part of speech tagger]. My recommendation then was to use greedy decoding with the averaged perceptron. I think this is still the best approach, so it's what I implemented in spaCy.
-
-p The tutorial also recommends the use of Brown cluster features, and case normalization features, as these make the model more robust and domain independent. spaCy's tagger makes heavy use of these features.
-
-+h3("dependency-parser") Dependency Parser
-
-p The parser uses the algorithm described in my #[a(href="/blog/parsing-english-in-python" target="_blank") 2014 blog post]. This algorithm, shift-reduce dependency parsing, is becoming widely adopted due to its compelling speed/accuracy trade-off.
-
-p Some quick details about spaCy's take on this, for those who happen to know these models well. I'll write up a better description shortly.
-
-+list("numbers")
- +item I use greedy decoding, not beam search;
- +item I use the arc-eager transition system;
- +item I use the Goldberg and Nivre (2012) dynamic oracle.
- +item I use the non-monotonic update from my CoNLL 2013 paper (Honnibal, Goldberg and Johnson 2013).
-
-p So far, this is exactly the configuration from the CoNLL 2013 paper, which scored 91.0. So how have I gotten it to 92.4? The following tweaks:
-
-+list("numbers")
- +item I use Brown cluster features – these help a lot;
- +item I redesigned the feature set. I've long known that the Zhang and Nivre (2011) feature set was suboptimal, but a few features don't make a very compelling publication. Still, they're important.
- +item When I do the dynamic oracle training, I also make the upate cost-sensitive: if the oracle determines that the move the parser took has a cost of N, then the weights for the gold class are incremented by +N, and the weights for the predicted class are incremented by -N. This only made a small (0.1-0.2%) difference.
-
-+h3("implementation") Implementation
-
-p I don't do anything algorithmically novel to improve the efficiency of the parser. However, I was very careful in the implementation.
-
-p A greedy shift-reduce parser with a linear model boils down to the following loop:
-
-+code.
- def parse(words, model, feature_funcs, n_classes):
- state = init_state(words)
- for _ in range(len(words) * 2):
- features = [templ(state) for templ in feature_funcs]
- scores = [0 for _ in range(n_classes)]
- for feat in features:
- weights = model[feat]
- for i, weight in enumerate(weights):
- scores[i] += weight
- class_, score = max(enumerate(scores), key=lambda item: item[1])
- transition(state, class_)
-
-p The parser makes 2N transitions for a sentence of length N. In order to select the transition, it extracts a vector of K features from the state. Each feature is used as a key into a hash table managed by the model. The features map to a vector of weights, of length C. We then dot product the feature weights to the scores vector we are building for that instance.
-
-p The inner-most loop here is not so bad: we only have a few dozen classes, so pit's just a short dot product. Both of the vectors are in the cache, so this pis a snack to a modern CPU.
-
-p The bottle-neck in this algorithm is the 2NK look-ups into the hash-table that we must make, as these almost always have to hit main memory. The feature-set is enormously large, because all of our features are one-hot boolean indicators. Some of the features will be common, so they'll lurk around in the CPU's cache hierarchy. But a lot of them won't be, and accessing main memory takes a lot of cycles.
-
-p I used to use the Google dense_hash_map implementation. This seemed a solid choice: it came from a big brand, it was in C++, and it seemed very complicated. Later, I read #[a(href="http://preshing.com/20130107/this-hash-table-is-faster-than-a-judy-array/" target="_blank") Jeff Preshing's excellent post] on open-addressing with linear probing. This really spoke to me. I had assumed that a fast hash table implementation would necessarily be very complicated, but no – this is another situation where the simple strategy wins.
-
-p I've packaged my Cython implementation separately from spaCy, in the package #[a(href="https://github.com/syllog1sm/preshed" target="_blank") preshed] – for "pre-hashed", but also as a nod to Preshing. I've also taken great care over the feature extraction and perceptron code, which I'm distributing in a package named #[a(href="https://github.com/honnibal/thinc" target="_blank") thinc] (since it's for learning very sparse models with Cython).
-
-p By the way: from comparing notes with a few people, it seems common to implement linear models in a way that's suboptimal for multi-class classification. The mistake is to store in the hash-table one weight per (feature, class) pair, rather than mapping the feature to a vector of weights, for all of the classes. This is bad because it means you need to hit the table C times, one per class, as you always need to evaluate a feature against all of the classes. In the case of the parser, this means the hash table is accessed 2NKC times, instead of the 2NK times if you have a weights vector. You should also be careful to store the weights contiguously in memory – you don't want a linked list here. I use a block-sparse format, because my problems tend to have a few dozen classes.
-
-p I guess if I had to summarize my experience, I'd say that the efficiency of these models is really all about the data structures. We want to stay small, and stay contiguous. Minimize redundancy and minimize pointer chasing. That's why Cython is so well suited to this: we get to lay out our data structures, and manage the memory ourselves, with full C-level control.
diff --git a/website/blog/img/agpl-not-free.jpg b/website/blog/img/agpl-not-free.jpg
deleted file mode 100644
index 6298724a8..000000000
Binary files a/website/blog/img/agpl-not-free.jpg and /dev/null differ
diff --git a/website/blog/img/agpl-not-free_large.jpg b/website/blog/img/agpl-not-free_large.jpg
deleted file mode 100644
index b76e169a3..000000000
Binary files a/website/blog/img/agpl-not-free_large.jpg and /dev/null differ
diff --git a/website/blog/img/agpl-not-free_small.jpg b/website/blog/img/agpl-not-free_small.jpg
deleted file mode 100644
index cb5ea5b78..000000000
Binary files a/website/blog/img/agpl-not-free_small.jpg and /dev/null differ
diff --git a/website/blog/img/anchovies.png b/website/blog/img/anchovies.png
deleted file mode 100644
index 298411a6c..000000000
Binary files a/website/blog/img/anchovies.png and /dev/null differ
diff --git a/website/blog/img/basic-english.jpg b/website/blog/img/basic-english.jpg
deleted file mode 100644
index 7914d2da9..000000000
Binary files a/website/blog/img/basic-english.jpg and /dev/null differ
diff --git a/website/blog/img/basic-english_large.jpg b/website/blog/img/basic-english_large.jpg
deleted file mode 100644
index 499728d57..000000000
Binary files a/website/blog/img/basic-english_large.jpg and /dev/null differ
diff --git a/website/blog/img/basic-english_small.jpg b/website/blog/img/basic-english_small.jpg
deleted file mode 100644
index 2b1ce4526..000000000
Binary files a/website/blog/img/basic-english_small.jpg and /dev/null differ
diff --git a/website/blog/img/cython.jpg b/website/blog/img/cython.jpg
deleted file mode 100644
index 7bb7e5289..000000000
Binary files a/website/blog/img/cython.jpg and /dev/null differ
diff --git a/website/blog/img/cython_large.jpg b/website/blog/img/cython_large.jpg
deleted file mode 100644
index ea885b094..000000000
Binary files a/website/blog/img/cython_large.jpg and /dev/null differ
diff --git a/website/blog/img/cython_small.jpg b/website/blog/img/cython_small.jpg
deleted file mode 100644
index d5c39a6ca..000000000
Binary files a/website/blog/img/cython_small.jpg and /dev/null differ
diff --git a/website/blog/img/deadcode.jpg b/website/blog/img/deadcode.jpg
deleted file mode 100644
index c879defbe..000000000
Binary files a/website/blog/img/deadcode.jpg and /dev/null differ
diff --git a/website/blog/img/deadcode_large.jpg b/website/blog/img/deadcode_large.jpg
deleted file mode 100644
index 682a14fb7..000000000
Binary files a/website/blog/img/deadcode_large.jpg and /dev/null differ
diff --git a/website/blog/img/deadcode_small.jpg b/website/blog/img/deadcode_small.jpg
deleted file mode 100644
index ac7324e5c..000000000
Binary files a/website/blog/img/deadcode_small.jpg and /dev/null differ
diff --git a/website/blog/img/displacy.jpg b/website/blog/img/displacy.jpg
deleted file mode 100644
index 459d1602a..000000000
Binary files a/website/blog/img/displacy.jpg and /dev/null differ
diff --git a/website/blog/img/displacy_large.jpg b/website/blog/img/displacy_large.jpg
deleted file mode 100644
index 93243b2f3..000000000
Binary files a/website/blog/img/displacy_large.jpg and /dev/null differ
diff --git a/website/blog/img/displacy_small.jpg b/website/blog/img/displacy_small.jpg
deleted file mode 100644
index d8384cb4c..000000000
Binary files a/website/blog/img/displacy_small.jpg and /dev/null differ
diff --git a/website/blog/img/how-spacy-works.jpg b/website/blog/img/how-spacy-works.jpg
deleted file mode 100644
index 05a3883a7..000000000
Binary files a/website/blog/img/how-spacy-works.jpg and /dev/null differ
diff --git a/website/blog/img/how-spacy-works_large.jpg b/website/blog/img/how-spacy-works_large.jpg
deleted file mode 100644
index 782535556..000000000
Binary files a/website/blog/img/how-spacy-works_large.jpg and /dev/null differ
diff --git a/website/blog/img/how-spacy-works_small.jpg b/website/blog/img/how-spacy-works_small.jpg
deleted file mode 100644
index 8794f4c7c..000000000
Binary files a/website/blog/img/how-spacy-works_small.jpg and /dev/null differ
diff --git a/website/blog/img/introducing-spacy.jpg b/website/blog/img/introducing-spacy.jpg
deleted file mode 100644
index 4c33de31d..000000000
Binary files a/website/blog/img/introducing-spacy.jpg and /dev/null differ
diff --git a/website/blog/img/introducing-spacy_large.jpg b/website/blog/img/introducing-spacy_large.jpg
deleted file mode 100644
index 91e642c5b..000000000
Binary files a/website/blog/img/introducing-spacy_large.jpg and /dev/null differ
diff --git a/website/blog/img/introducing-spacy_small.jpg b/website/blog/img/introducing-spacy_small.jpg
deleted file mode 100644
index 7cce704b7..000000000
Binary files a/website/blog/img/introducing-spacy_small.jpg and /dev/null differ
diff --git a/website/blog/img/linguistic-structure.jpg b/website/blog/img/linguistic-structure.jpg
deleted file mode 100644
index ebdd4e464..000000000
Binary files a/website/blog/img/linguistic-structure.jpg and /dev/null differ
diff --git a/website/blog/img/markup.jpg b/website/blog/img/markup.jpg
deleted file mode 100644
index 48191552e..000000000
Binary files a/website/blog/img/markup.jpg and /dev/null differ
diff --git a/website/blog/img/markup_basscss.jpg b/website/blog/img/markup_basscss.jpg
deleted file mode 100644
index d87bd5ed2..000000000
Binary files a/website/blog/img/markup_basscss.jpg and /dev/null differ
diff --git a/website/blog/img/markup_bootstrap.jpg b/website/blog/img/markup_bootstrap.jpg
deleted file mode 100644
index 0f98fa009..000000000
Binary files a/website/blog/img/markup_bootstrap.jpg and /dev/null differ
diff --git a/website/blog/img/markup_docs.jpg b/website/blog/img/markup_docs.jpg
deleted file mode 100644
index 81d2bad96..000000000
Binary files a/website/blog/img/markup_docs.jpg and /dev/null differ
diff --git a/website/blog/img/markup_large.jpg b/website/blog/img/markup_large.jpg
deleted file mode 100644
index 72b02ccca..000000000
Binary files a/website/blog/img/markup_large.jpg and /dev/null differ
diff --git a/website/blog/img/markup_mixins.jpg b/website/blog/img/markup_mixins.jpg
deleted file mode 100644
index a4bd519a1..000000000
Binary files a/website/blog/img/markup_mixins.jpg and /dev/null differ
diff --git a/website/blog/img/markup_sections.jpg b/website/blog/img/markup_sections.jpg
deleted file mode 100644
index 4dd25ce5b..000000000
Binary files a/website/blog/img/markup_sections.jpg and /dev/null differ
diff --git a/website/blog/img/markup_small.jpg b/website/blog/img/markup_small.jpg
deleted file mode 100644
index 8c78c962d..000000000
Binary files a/website/blog/img/markup_small.jpg and /dev/null differ
diff --git a/website/blog/img/markup_workflow.jpg b/website/blog/img/markup_workflow.jpg
deleted file mode 100644
index 2d5e0d643..000000000
Binary files a/website/blog/img/markup_workflow.jpg and /dev/null differ
diff --git a/website/blog/img/pizza.jpg b/website/blog/img/pizza.jpg
deleted file mode 100644
index 5f8e0ede1..000000000
Binary files a/website/blog/img/pizza.jpg and /dev/null differ
diff --git a/website/blog/img/pizza_large.jpg b/website/blog/img/pizza_large.jpg
deleted file mode 100644
index c4d10222c..000000000
Binary files a/website/blog/img/pizza_large.jpg and /dev/null differ
diff --git a/website/blog/img/pizza_small.jpg b/website/blog/img/pizza_small.jpg
deleted file mode 100644
index acbdb09fc..000000000
Binary files a/website/blog/img/pizza_small.jpg and /dev/null differ
diff --git a/website/blog/img/pos-tagger.jpg b/website/blog/img/pos-tagger.jpg
deleted file mode 100644
index 63058471c..000000000
Binary files a/website/blog/img/pos-tagger.jpg and /dev/null differ
diff --git a/website/blog/img/pos-tagger_large.jpg b/website/blog/img/pos-tagger_large.jpg
deleted file mode 100644
index 00b7f8a54..000000000
Binary files a/website/blog/img/pos-tagger_large.jpg and /dev/null differ
diff --git a/website/blog/img/pos-tagger_small.jpg b/website/blog/img/pos-tagger_small.jpg
deleted file mode 100644
index 20db318d0..000000000
Binary files a/website/blog/img/pos-tagger_small.jpg and /dev/null differ
diff --git a/website/blog/img/sense2vec.jpg b/website/blog/img/sense2vec.jpg
deleted file mode 100644
index 93b50d242..000000000
Binary files a/website/blog/img/sense2vec.jpg and /dev/null differ
diff --git a/website/blog/img/sense2vec_large.jpg b/website/blog/img/sense2vec_large.jpg
deleted file mode 100644
index 852e1a6c9..000000000
Binary files a/website/blog/img/sense2vec_large.jpg and /dev/null differ
diff --git a/website/blog/img/sense2vec_small.jpg b/website/blog/img/sense2vec_small.jpg
deleted file mode 100644
index 47ff93d51..000000000
Binary files a/website/blog/img/sense2vec_small.jpg and /dev/null differ
diff --git a/website/blog/index.jade b/website/blog/index.jade
index bc700af15..a598f51d2 100644
--- a/website/blog/index.jade
+++ b/website/blog/index.jade
@@ -1,31 +1,5 @@
-include ../_includes/_mixins
+//- ----------------------------------
+//- 💫 BLOG INDEX (REDIRECT)
+//- ----------------------------------
-- var featured_counter = 0
-- var post_counter = 0
-
-
-//- Blog
-//- ============================================================================
-
-//- Display all blog posts in order and highlight the first post set to
- `featured: true` as the fourth post
-
-+grid('padding')
-
- each post, slug in public.blog._data
- if slug != 'index'
-
- if post.featured && featured_counter === 0
- +grid-col(style='order: -1').divider-both
- !=partial('../_includes/_teaser', { teaser: post, slug: slug, is_featured: true, _root: '/blog/' })
-
- - featured_counter++
-
- else
- +grid-col('third', 'space-between')(style=assignOrder(post_counter, 3, 1))
- !=partial('../_includes/_teaser', { teaser: post, slug: slug, _root: '/blog/' })
-
- - post_counter++
-
- if post_counter === 6
- !=partial('../_includes/_newsletter', { divider: 'both' })
+script window.location = '!{SITE_URL}'
diff --git a/website/blog/introducing-spacy.jade b/website/blog/introducing-spacy.jade
deleted file mode 100644
index a51109df5..000000000
--- a/website/blog/introducing-spacy.jade
+++ /dev/null
@@ -1,18 +0,0 @@
-include ../_includes/_mixins
-
-+lead spaCy is a new library for text processing in Python and Cython. I wrote it because I think small companies are terrible at natural language processing (NLP). Or rather: small companies are using terrible NLP technology.
-
-p To do great NLP, you have to know a little about linguistics, a lot about machine learning, and almost everything about the latest research. The people who fit this description seldom join small companies. Most are broke – they've just finished grad school. If they don't want to stay in academia, they join Google, IBM, etc.
-
-p The net result is that outside of the tech giants, commercial NLP has changed little in the last ten years. In academia, it's changed entirely. Amazing improvements in quality. Orders of magnitude faster. But the academic code is always GPL, undocumented, unuseable, or all three. You could implement the ideas yourself, but the papers are hard to read, and training data is exorbitantly expensive. So what are you left with? A common answer is NLTK, which was written primarily as an educational resource. Nothing past the tokenizer is suitable for production use.
-
-p I used to think that the NLP community just needed to do more to communicate its findings to software engineers. So I wrote two blog posts, explaining #[a(href="/blog/part-of-speech-pos-tagger-in-python" target="_blank") how to write a part-of-speech tagger] and #[a(href="/blog/parsing-english-in-python" target="_blank") parser]. Both were well received, and there's been a bit of interest in #[a(href="https://github.com/syllog1sm/redshift/tree/develop" target="_blank") my research software] – even though it's entirely undocumented, and mostly unuseable to anyone but me.
-
-p So six months ago I quit my post-doc, and I've been working day and night on spaCy since. I'm now pleased to announce an alpha release.
-
-p If you're a small company doing NLP, I think spaCy will seem like a minor miracle. It's by far the fastest NLP software ever released. The full processing pipeline completes in 20ms per document, including accurate tagging and parsing. All strings are mapped to integer IDs, tokens are linked to embedded word representations, and a range of useful features are pre-calculated and cached.
-
-+pullquote("Computers don't understand text. This is unfortunate, because that's what the web is mostly made of.")
-
-p If none of that made any sense to you, here's the gist of it. Computers don't understand text. This is unfortunate, because that's what the web almost entirely consists of. We want to recommend people text based on other text they liked. We want to shorten text to display it on a mobile screen. We want to aggregate it, link it, filter it, categorise it, generate it and correct it.
-p spaCy provides a library of utility functions that help programmers build such products. It's commercial open source software: you can either use it under the AGPL, or you can buy a commercial license under generous terms.
diff --git a/website/blog/modular-markup.jade b/website/blog/modular-markup.jade
deleted file mode 100644
index 031744fc4..000000000
--- a/website/blog/modular-markup.jade
+++ /dev/null
@@ -1,147 +0,0 @@
-include ../_includes/_mixins
-
-+lead In a small team, everyone should be able to contribute content to the website and make use of the full set of visual components, without having to worry about design or write complex HTML. To help us write docs, tutorials and blog posts about #[a(href=url target="_blank") spaCy], we've developed a powerful set of modularized markup components, implemented using #[a(href="http://jade-lang.com" target="_blank") Jade].
-
- +aside("What's spaCy?") spaCy is a library for industrial-strength Natural Language Processing written in Python / Cython. We're helping advanced text understanding AI get out into real products.
-
-p This approach has worked well for us, so we decided to #[a(href="https://github.com/" + profiles.github + "/spaCy/blob/master/website" target="_blank") open source] our site, and explain the motivation. You can see the idea in action by taking a look at the #[a(href="https://github.com/" + profiles.github + "/spaCy/blob/master/website/blog/modular-markup.jade" target="_blank") source code for this post], or our #[a(href="/styleguide" target="_blank") style guide], which lets you see most of our current markup components. There are also some simpler examples to follow in this post.
-
-+h2("modularizing-markup") Modularizing the Markup
-
-p A product website will never stand still – it will be in constant development. Its architecture therefore needs to be set up from the start to be updated and modified frequently, and allow changes to its components. Instead of thinking in #[em sections] and #[em pages], we need to start thinking in #[em design systems] and #[em modules]. #[a(href="http://bradfrost.com/blog/post/atomic-web-design/" target="_blank") Atomic Design] is a popular methodology for modular design systems: small elements (atoms) are combined to larger structures (molecules), which can be connected to form complex components (organisms). For example, a permalink atom and a heading atom can be combined to form a headline molecule, which becomes part of a larger text block organism.
-
-p We need a lot of those flexible organisms, and they all come with custom markup that will likely change over time – think responsive embeds, data tables, or code blocks. Every time a component is used, its full markup needs to be implemented. This also means that every change to this markup requires changing every instance of the respective component. In order to make this work, we need a concise and dynamic markup language to define those components and make them accessible for content creators to use. In short: we need a markup language as compact as Markdown, as feature-rich as HTML and as programmable as JavasScript.
-
-+h2("markup-language") Introducing a Programmable Markup Language
-
-p #[a(href="http://jade-lang.com" target="_blank") Jade] (recently renamed to Pug) is a markup language that comes with a very powerful feature: #[a(href="http://jade-lang.com/reference/mixins/" target="_blank") mixins]. Mixins are reusable content elements that act as "content functions", by taking custom arguments. Mixins offer a powerful way to implement a modular design. Every component and its mark-up can be defined once, and reused in different contexts across the site.
-
-+image("markup_mixins.jpg", "How a mixin works", "Mixins can be reused with different configuration across the entire site.")
-
-p Jade maintains the full power of HTML, but adds vital syntactic sugar and the option to extend the markup using JavaScript. Its syntax is simplified and whitespace-sensitive, making the code compact and easy to read. Here's an example:
-
-+code("jade", "Jade").
- each text in [ "one", "two", "three" ]
- article.teaser(class=(text == "two") ? "active" : "")
- p=text
-
-+code("markup", "Compiled HTML").
- <article class="teaser">
- <p>one</p>
- </article>
-
- <article class="teaser active">
- <p>two</p>
- </article>
-
- <article class="teaser">
- <p>three</p>
- </article>
-
-p To put it simply, Jade lets you program. Writing both the templates and the content itself in Jade keeps the markup consistent, and eliminates the problems and inflexibilities usually associated with the use of a simplified subset of a markup language.
-
-p Here's an example for an image mixin that can be used to insert a figure containing an image, complete with alt text and caption using only one line of code. Note how all markup, including the path to the image folder, is defined within the mixin:
-
-+code("jade", "Jade Mixin").
- //- Definition
- mixin image(source, alt, caption)
- figure.figure-class
- img.image-class(src="images/" + source alt=alt)
-
- if caption
- figcaption.caption-class=caption
-
- //- Usage
- +image("my-image.jpg", "This is a caption", "My image")
-
-+code("markup", "Compiled HTML").
- <figure class="figure-class">
- <img class="image-class" src="images/my-image.jpg" alt="My image" />
- <figcaption class="caption-class">This is a caption"</figcaption>
- </figure>
-
-+h2("framework") Working With Your Framework (Not Against It)
-
-p While we've implemented our own design system, the same principles apply if you're using an existing boilerplate. For instance, let's assume you're trying to get your project off the ground as quicky as possible and you're using #[a(href="http://getbootstrap.com/" target="_blank") Bootstrap] and its alert plugin to create #[a(href="http://getbootstrap.com/components/#alerts" target="_blank") dismissible and non-dismissible alerts]. Technically, alerts are very simple, self-contained elements, but (using Bootstrap) they still require a fair bit of markup to add JavaScript functionality and accessibility.
-
- +aside("Jade Bootstrap") While writing these examples, I came across #[a(href="http://rajasegar.github.io/JADE-Bootstrap/components.html" target="_blank") Jade Bootstrap], a library of Bootstrap components as reusable Jade mixins. In case this is something you're interested in, it might be worth checking out and adapting.
-
-+image("markup_bootstrap.jpg", "Example of Bootstrap Alerts created by mixins")
-
-p The mixin needs an option to specify the style ("success", "info", "warning" or "alert"), which will be translated to the corresponding class, as well as an option to make it dismissible and add a close button. The markup could look like this:
-
-+code("jade", "Jade Mixin").
- //- Definition
- mixin alert(style, dismissible)
- .alert(class="alert-" + style + ( (dismissible) ? " alert-dismissible" : "") role="alert")
- if dismissible
- button.close(type="button" data-dismiss="alert" aria-label="Close")
- span(aria-hidden="true") ×
-
- block
-
- //- Usage
- +alert("success") You successfully used an alert mixin.
- +alert("danger", true) This danger alert can be dismissed.
-
-p Alternatively, maybe you're using #[a(href="http://www.basscss.com/" target="_blank") Basscss]'s #[a(href="http://www.basscss.com/#basscss-flexbox" target="_blank") flexbox grid utility] to create responsive cards and column layouts like the one below.
-
-+image("markup_basscss.jpg", "Example of Basscss flexbox created by mixins")
-
-.has-aside
- +code("jade", "Jade Mixins").
- //- Definitions
- mixin grid(...style)
- .flex(class=style.join(" "))
- block
-
- mixin col(width, ...style)
- .col(class="col-" + width + " " + style.join(" "))
- block
-
- //- Usage
- +grid("border", "p2")
- each column in [ "one", "two", "three", "four"]
- +col(6, "border", "p1", "m1") Column #{column}
-
- +aside("Note") The "rest argument" syntax (#[code ...style]) lets your mixin take an unknown number of arguments that become available to your mixin as an array. This is especially useful for CSS classes.#[br]#[br]To handle modifiers, we're using a #[a(href="https://github.com/" + profiles.github + "/spaCy/blob/master/website/_includes/_functions.jade#L42" target="_blank") helper function] called #[code prefixArgs()], which adds a given prefix (like "grid-" or "table-") to the arguments and returns a list of class names.
-
-p The above markup still requires content creators to know and use the respective #[a(href="https://github.com/basscss/basscss/blob/master/modules/flexbox/index.css" target="_blank") classes] manually. To avoid this, you could instead define the styles within the mixins. This would give the content creators a #[code +grid()] utility that didn't expose any details of the design system. If the layout of the blog is ever going to change, the blog posts would remain untouched, and only the mixins would have to be adjusted to the new style.
-
-+h2("content-creators") Developing Tools For Content Creators
-
-p Content creators should never have to worry about stylistic properties like margins, paddings and borders. They shouldn't have to worry about an arbitrary order of nested elements, responsive breakpoints or sizing. Thus, the details of the underlying CSS framework should not leak into the markup. After all, this is why we're creating sophisticated design systems in the first place: to make it easy to author effective #[em content].
-
-+pullquote("Content creators should never have to worry about stylistic properties like margins, paddings and borders.")
-
-p Providing authors with a reasonable subset of markup utilities puts them back in control of the content they create and enables them to ship quickly and efficiently. In small teams, a short publishing pipeline like this is absolutely vital.
-
-+image("markup_workflow.jpg", "Workflow between development and content creation", "Front-end developers produce modular markup components that lead to better workflows for creating content.")
-
-+h2("cms") Refining the Workflow With a Static CMS
-
-p While building a site with Jade alone is certainly possible, it requires some extra scaffolding to make it work as a CMS. For example, the main way to store metadata with Jade is in Javascript-like variables, which quickly becomes impractical and #[a(href="http://jade-lang.com/reference/code/" target="_blank") awkward]. Our setup improved significantly after we started using #[a(href="http://harpjs.com/" target="_blank") Harp]. Harp is a static web server and compiler with built-in pre-processing, including Jade, EJS, Sass, Less, Stylus, CoffeeScript and Markdown. Metadata and additional content can be provided in JSON. Code is not only processed on build, but also while serving and previewing locally, which is as easy as this:
-
-+code("bash").
- sudo npm install --global harp
- git clone https://github.com/spacy-io/spacy
- cd spacy/website
- harp server
-
-p Like most static site generators, Harp compiles files into the same directory if they don't have a leading underscore. For instance, #[code blog-post.jade] would become #[code blog-post.html]. Files with a leading underscore are not compiled directly and can be used to store metadata and layout partials. This allows you to build dynamic templates that can accommodate both static content and dynamic data supplied via #[code _data.json] files.
-
-p There are two ways to set up reusable components:
-
-+list("numbers")
- +item #[strong Generate mixins] that are combined into a global #[code _mixins.jade] file and #[a(href="http://jade-lang.com/reference/includes/" target="_blank") included] at the top of a page. This makes sense for smaller components that will be used a lot by content authors, especially since mixins can take long #[a(href="http://jade-lang.com/reference/mixins/" target="_blank") blocks of content] to be used within the mixin.
- +item #[strong Use dynamic partials] via Harp's #[code !=partial()] syntax. This is useful for layout partials and widgets. Similar to mixin arguments, partials allow you to #[a(href="http://harpjs.com/docs/development/partial#jade" target="_blank") pass in] data and variables to modify the included file. We're using this feature in a "latest blog posts" partial to display a mutable number of teasers.
-
-p During development, we use Harp to serve our entire site locally using one simple command only, #[code harp server]. To deploy the site, we pre-process and compile the code and upload the files.
-
-+h2("conclusion") Conclusion
-
-p Modular markup gives content authors more powerful, domain-specific components that allow them to use more complex markup while effectively writing less. Instead of optimizing the look and feel of individual pages that might change tomorrow, the front-end developer takes on a blacksmith role of forging reusable tools.
-
-+grid('margin-right')
- +button('secondary')(href="https://github.com/" + profiles.github + "/spaCy/blob/master/website" target="_blank") #[+icon('github', 'secondary')] View Website Source
- +button('secondary')(href="/styleguide" target="_blank") View Styleguide
diff --git a/website/blog/parsing-english-in-python.jade b/website/blog/parsing-english-in-python.jade
deleted file mode 100644
index ce77be778..000000000
--- a/website/blog/parsing-english-in-python.jade
+++ /dev/null
@@ -1,575 +0,0 @@
-include ../_includes/_mixins
-
-+infobox("Update (August 19, 2015)") I wrote this blog post in 2013, describing an exciting advance in natural language understanding technology. Today, almost all high-performance parsers are using a variant of the algorithm described below (including spaCy). The original post is preserved below, with added commentary in light of recent research.
-
-p A #[a(href="http://googleresearch.blogspot.de/2013/05/syntactic-ngrams-over-time.html" target="_blank") syntactic parser] describes a sentence’s grammatical structure, to help another application reason about it. Natural languages introduce many unexpected ambiguities, which our world-knowledge immediately filters out. A favourite example:
-
-+example They ate the pizza with anchovies
-
-+image("anchovies.png", "Eat-with pizza-with ambiguity")
-
-p A correct parse links “with” to “pizza”, while an incorrect parse links “with” to “eat”:
-
-+displacy("pizza-with-anchovies-bad")
-
-+displacy("pizza-with-anchovies-good")
-
-p The Natural Language Processing (NLP) community has made big progress in syntactic parsing over the last few years. It’s now possible for a tiny Python implementation to perform better than the widely-used Stanford PCFG parser.
-
-.has-aside
- +table([ "Parser", "Accuracy", "Speed (w/s)", "Language", "LOC"])
- +row
- +cell Stanford PCFG
- +cell 89.6%
- +cell 19
- +cell Java
- +cell > 4,000
-
- +row
- +cell: strong parser.py
- +cell 89.8%
- +cell 2,020
- +cell Python
- +cell: strong ~500
-
- +row
- +cell Redshift
- +cell: strong 93.6%
- +cell: strong 2,580
- +cell Cython
- +cell ~4,000
-
- +row("highlight")
- +cell spaCy v0.89
- +cell 92.7%
- +cell 22,106
- +cell Cython
- +cell ~10,000
-
- +row("highlight")
- +cell Stanford NN
- +cell 91.7%
- +cell 16,800
- +cell Java
- +cell > 4,000
-
- +aside("Note") I wasn’t really sure how to count the lines of code in the Stanford parser. Its jar file ships over 200k, but there are a lot of different models in it. It’s not important, but it's certainly over 4k.
-
-+infobox("Update") Stanford's CoreNLP now features high-performance transition-based models. It is much faster than the Redshift parser (my research system), but less accurate. spaCy is faster again still, more accurate than CoreNLP, but less accurate than Redshift, due to spaCy's use of greedy search. It would be relatively easy to provide a beam-search version of spaCy...But, I think the gap in accuracy will continue to close, especially given advances in neural network learning.
-
-p The rest of the post sets up the problem, and then takes you through #[a(href="https://gist.github.com/syllog1sm/10343947" target="_blank") a concise implementation], prepared for this post. The first 200 lines of parser.py, the part-of-speech tagger and learner, are described #[a(href="#") here]. You should probably at least skim that post before reading this one, unless you’re very familiar with NLP research.
-
-p The Cython system, Redshift, was written for my current research. I plan to improve it for general use in June, after my contract ends at Macquarie University. The current version is #[a(href="http://github.com/syllog1sm/redshift" target="_blank") hosted on GitHub].
-
-
-+h3("problem-description") Problem Description
-
-p It’d be nice to type an instruction like this into your phone:
-
-+example Set volume to zero when I’m in a meeting, unless John’s school calls.
-
-p And have it set the appropriate policy. On Android you can do this sort of thing with #[a(href="https://play.google.com/store/apps/details?id=net.dinglisch.android.taskerm" target="_blank") Tasker], but an NL interface would be much better. It’d be especially nice to receive a meaning representation you could edit, so you could see what it thinks you said, and correct it.
-
-p There are lots of problems to solve to make that work, but some sort of syntactic representation is definitely necessary. We need to know that:
-
-+example Unless John’s school calls, when I’m in a meeting, set volume to zero
-
-p is another way of phrasing the first instruction, while:
-
-+example Unless John’s school, call when I’m in a meeting
-
-p means something completely different.
-
-p A dependency parser returns a graph of word-word relationships, intended to make such reasoning easier. Our graphs will be trees – edges will be directed, and every node (word) will have exactly one incoming arc (one dependency, with its head), except one.
-
-
-+h4('example-usage') Example usage
-
-+code.
- parser = parser.Parser()
- tokens = "Set the volume to zero when I 'm in a meeting unless John 's school calls".split()
- >>> tags, heads = parser.parse(tokens)
- >>> heads
- [-1, 2, 0, 0, 3, 0, 7, 5, 7, 10, 8, 0, 13, 15, 15, 11]
- >>> for i, h in enumerate(heads):
- ... head = tokens[heads[h]] if h >= 1 else 'None'
- ... print(tokens[i] + ' <-- ' + head])
- Set <-- None
- the <-- volume
- volume <-- Set
- to <-- Set
- zero <-- to
- when <-- Set
- I <-- 'm
- 'm <-- when
- in <-- 'm
- a <-- meeting
- meeting <-- in
- unless <-- Set
- John <-- 's
- 's <-- calls
- school <-- calls
- calls <-- unless
-
-p The idea is that it should be slightly easier to reason from the parse, than it was from the string. The parse-to-meaning mapping is hopefully simpler than the string-to-meaning mapping.
-
-p The most confusing thing about this problem area is that “correctness” is defined by convention — by annotation guidelines. If you haven’t read the guidelines and you’re not a linguist, you can’t tell whether the parse is “wrong” or “right”, which makes the whole task feel weird and artificial.
-
-p For instance, there’s a mistake in the parse above: “John’s school calls” is structured wrongly, according to the Stanford annotation guidelines. The structure of that part of the sentence is how the annotators were instructed to parse an example like “John’s school clothes”.
-
-p It’s worth dwelling on this point a bit. We could, in theory, have written our guidelines so that the “correct” parses were reversed. There’s good reason to believe the parsing task will be harder if we reversed our convention, as it’d be less consistent with the rest of the grammar. But we could test that empirically, and we’d be pleased to gain an advantage by reversing the policy.
-
- +aside("Note") For instance, how would you parse, “John’s school of music calls”? You want to make sure the phrase “John’s school” has a consistent structure in both “John’s school calls” and “John’s school of music calls”. Reasoning about the different “slots” you can put a phrase into is a key way we reason about what syntactic analyses look like. You can think of each phrase as having a different shaped connector, which you need to plug into different slots — which each phrase also has a certain number of, each of a different shape. We’re trying to figure out what connectors are where, so we can figure out how the sentences are put together.
-
-p We definitely do want that distinction in the guidelines — we don’t want both to receive the same structure, or our output will be less useful. The annotation guidelines strike a balance between what distinctions downstream applications will find useful, and what parsers will be able to predict easily.
-
-+h4("projective-trees") Projective trees
-
-p There’s a particularly useful simplification that we can make, when deciding what we want the graph to look like: we can restrict the graph structures we’ll be dealing with. This doesn’t just give us a likely advantage in learnability; it can have deep algorithmic implications. We follow most work on English in constraining the dependency graphs to be #[em projective trees]:
-
-+list("numbers")
- +item Tree. Every word has exactly one head, except for the dummy ROOT symbol.
- +item Projective. For every pair of dependencies (a1, a2) and (b1, b2), if a1 < b2, then a2 >= b2. In other words, dependencies cannot “cross”. You can’t have a pair of dependencies that goes a1 b1 a2 b2, or b1 a1 b2 a2.
-
-p There’s a rich literature on parsing non-projective trees, and a smaller literature on parsing DAGs. But the parsing algorithm I’ll be explaining deals with projective trees.
-
-+h3("greedy-transition-based-parsing") Greedy transition-based parsing
-
-p Our parser takes as input a list of string tokens, and outputs a list of head indices, representing edges in the graph. If the #[em i]th member of heads is #[em j], the dependency parse contains an edge (j, i). A transition-based parser is a finite-state transducer; it maps an array of N words onto an output array of N head indices:
-
-+table
- +row
- +cell: em start
- +cell MSNBC
- +cell reported
- +cell that
- +cell Facebook
- +cell bought
- +cell WhatsApp
- +cell for
- +cell $16bn
- +cell: em root
-
- +row
- +cell 0
- +cell 2
- +cell 9
- +cell 2
- +cell 4
- +cell 2
- +cell 4
- +cell 4
- +cell 7
- +cell 0
-
-p The heads array denotes that the head of #[em MSNBC] is #[em reported]: #[em MSNBC] is word 1, and #[em reported] is word 2, and #[code heads[1] == 2]. You can already see why parsing a tree is handy — this data structure wouldn’t work if we had to output a DAG, where words may have multiple heads.
-
-p Although #[code heads] can be represented as an array, we’d actually like to maintain some alternate ways to access the parse, to make it easy and efficient to extract features. Our #[code Parse] class looks like this:
-
-+code.
- class Parse(object):
- def __init__(self, n):
- self.n = n
- self.heads = [None] * (n-1)
- self.lefts = []
- self.rights = []
- for i in range(n+1):
- self.lefts.append(DefaultList(0))
- self.rights.append(DefaultList(0))
-
- def add_arc(self, head, child):
- self.heads[child] = head
- if child < head:
- self.lefts[head].append(child)
- else:
- self.rights[head].append(child)
-
-p As well as the parse, we also have to keep track of where we’re up to in the sentence. We’ll do this with an index into the #[code words] array, and a stack, to which we’ll push words, before popping them once their head is set. So our state data structure is fundamentally:
-
-+list
- +item An index, i, into the list of tokens;
- +item The dependencies added so far, in Parse
- +item A stack, containing words that occurred before i, for which we’re yet to assign a head.
-
-p Each step of the parsing process applies one of three actions to the state:
-
-+code.
- SHIFT = 0; RIGHT = 1; LEFT = 2
- MOVES = [SHIFT, RIGHT, LEFT]
-
- def transition(move, i, stack, parse):
- global SHIFT, RIGHT, LEFT
- if move == SHIFT:
- stack.append(i)
- return i + 1
- elif move == RIGHT:
- parse.add_arc(stack[-2], stack.pop())
- return i
- elif move == LEFT:
- parse.add_arc(i, stack.pop())
- return i
- raise GrammarError("Unknown move: %d" % move)
-
-p The #[code LEFT] and #[code RIGHT] actions add dependencies and pop the stack, while #[code SHIFT] pushes the stack and advances i into the buffer.
-
-p So, the parser starts with an empty stack, and a buffer index at 0, with no dependencies recorded. It chooses one of the (valid) actions, and applies it to the state. It continues choosing actions and applying them until the stack is empty and the buffer index is at the end of the input. (It’s hard to understand this sort of algorithm without stepping through it. Try coming up with a sentence, drawing a projective parse tree over it, and then try to reach the parse tree by choosing the right sequence of transitions.)
-
-p Here’s what the parsing loop looks like in code:
-
-+code.
- class Parser(object):
- ...
- def parse(self, words):
- tags = self.tagger(words)
- n = len(words)
- idx = 1
- stack = [0]
- deps = Parse(n)
- while stack or idx < n:
- features = extract_features(words, tags, idx, n, stack, deps)
- scores = self.model.score(features)
- valid_moves = get_valid_moves(i, n, len(stack))
- next_move = max(valid_moves, key=lambda move: scores[move])
- idx = transition(next_move, idx, stack, parse)
- return tags, parse
-
- def get_valid_moves(i, n, stack_depth):
- moves = []
- if i < n:
- moves.append(SHIFT)
- if stack_depth <= 2:
- moves.append(RIGHT)
- if stack_depth <= 1:
- moves.append(LEFT)
- return moves
-
-p We start by tagging the sentence, and initializing the state. We then map the state to a set of features, which we score using a linear model. We then find the best-scoring valid move, and apply it to the state.
-
-p The model scoring works the same as it did in #[a(href="/blog/part-of-speech-POS-tagger-in-python/" target="_blank") the POS tagger]. If you’re confused about the idea of extracting features and scoring them with a linear model, you should review that post. Here’s a reminder of how the model scoring works:
-
-+code.
- class Perceptron(object)
- ...
- def score(self, features):
- all_weights = self.weights
- scores = dict((clas, 0) for clas in self.classes)
- for feat, value in features.items():
- if value == 0:
- continue
- if feat not in all_weights:
- continue
- weights = all_weights[feat]
- for clas, weight in weights.items():
- scores[clas] += value * weight
- return scores
-
-p It’s just summing the class-weights for each feature. This is often expressed as a dot-product, but when you’re dealing with multiple classes, that gets awkward, I find.
-
-p The beam parser (RedShift) tracks multiple candidates, and only decides on the best one at the very end. We’re going to trade away accuracy in favour of efficiency and simplicity. We’ll only follow a single analysis. Our search strategy will be entirely greedy, as it was with the POS tagger. We’ll lock-in our choices at every step.
-
-p If you read the POS tagger post carefully, you might see the underlying similarity. What we’ve done is mapped the parsing problem onto a sequence-labelling problem, which we address using a “flat”, or unstructured, learning algorithm (by doing greedy search).
-
-+h3("features") Features
-
-p Feature extraction code is always pretty ugly. The features for the parser refer to a few tokens from the context:
-
-+list
- +item The first three words of the buffer (n0, n1, n2)
- +item The top three words of the stack (s0, s1, s2)
- +item The two leftmost children of s0 (s0b1, s0b2);
- +item The two rightmost children of s0 (s0f1, s0f2);
- +item The two leftmost children of n0 (n0b1, n0b2)
-
-p For these 12 tokens, we refer to the word-form, the part-of-speech tag, and the number of left and right children attached to the token.
-
-p Because we’re using a linear model, we have our features refer to pairs and triples of these atomic properties.
-
-+code.
- def extract_features(words, tags, n0, n, stack, parse):
- def get_stack_context(depth, stack, data):
- if depth >= 3:
- return data[stack[-1]], data[stack[-2]], data[stack[-3]]
- elif depth >= 2:
- return data[stack[-1]], data[stack[-2]], ''
- elif depth == 1:
- return data[stack[-1]], '', ''
- else:
- return '', '', ''
-
- def get_buffer_context(i, n, data):
- if i + 1 >= n:
- return data[i], '', ''
- elif i + 2 >= n:
- return data[i], data[i + 1], ''
- else:
- return data[i], data[i + 1], data[i + 2]
-
- def get_parse_context(word, deps, data):
- if word == -1:
- return 0, '', ''
- deps = deps[word]
- valency = len(deps)
- if not valency:
- return 0, '', ''
- elif valency == 1:
- return 1, data[deps[-1]], ''
- else:
- return valency, data[deps[-1]], data[deps[-2]]
-
- features = {}
- # Set up the context pieces --- the word, W, and tag, T, of:
- # S0-2: Top three words on the stack
- # N0-2: First three words of the buffer
- # n0b1, n0b2: Two leftmost children of the first word of the buffer
- # s0b1, s0b2: Two leftmost children of the top word of the stack
- # s0f1, s0f2: Two rightmost children of the top word of the stack
-
- depth = len(stack)
- s0 = stack[-1] if depth else -1
-
- Ws0, Ws1, Ws2 = get_stack_context(depth, stack, words)
- Ts0, Ts1, Ts2 = get_stack_context(depth, stack, tags)
-
- Wn0, Wn1, Wn2 = get_buffer_context(n0, n, words)
- Tn0, Tn1, Tn2 = get_buffer_context(n0, n, tags)
-
- Vn0b, Wn0b1, Wn0b2 = get_parse_context(n0, parse.lefts, words)
- Vn0b, Tn0b1, Tn0b2 = get_parse_context(n0, parse.lefts, tags)
-
- Vn0f, Wn0f1, Wn0f2 = get_parse_context(n0, parse.rights, words)
- _, Tn0f1, Tn0f2 = get_parse_context(n0, parse.rights, tags)
-
- Vs0b, Ws0b1, Ws0b2 = get_parse_context(s0, parse.lefts, words)
- _, Ts0b1, Ts0b2 = get_parse_context(s0, parse.lefts, tags)
-
- Vs0f, Ws0f1, Ws0f2 = get_parse_context(s0, parse.rights, words)
- _, Ts0f1, Ts0f2 = get_parse_context(s0, parse.rights, tags)
-
- # Cap numeric features at 5?
- # String-distance
- Ds0n0 = min((n0 - s0, 5)) if s0 != 0 else 0
-
- features['bias'] = 1
- # Add word and tag unigrams
- for w in (Wn0, Wn1, Wn2, Ws0, Ws1, Ws2, Wn0b1, Wn0b2, Ws0b1, Ws0b2, Ws0f1, Ws0f2):
- if w:
- features['w=%s' % w] = 1
- for t in (Tn0, Tn1, Tn2, Ts0, Ts1, Ts2, Tn0b1, Tn0b2, Ts0b1, Ts0b2, Ts0f1, Ts0f2):
- if t:
- features['t=%s' % t] = 1
-
- # Add word/tag pairs
- for i, (w, t) in enumerate(((Wn0, Tn0), (Wn1, Tn1), (Wn2, Tn2), (Ws0, Ts0))):
- if w or t:
- features['%d w=%s, t=%s' % (i, w, t)] = 1
-
- # Add some bigrams
- features['s0w=%s, n0w=%s' % (Ws0, Wn0)] = 1
- features['wn0tn0-ws0 %s/%s %s' % (Wn0, Tn0, Ws0)] = 1
- features['wn0tn0-ts0 %s/%s %s' % (Wn0, Tn0, Ts0)] = 1
- features['ws0ts0-wn0 %s/%s %s' % (Ws0, Ts0, Wn0)] = 1
- features['ws0-ts0 tn0 %s/%s %s' % (Ws0, Ts0, Tn0)] = 1
- features['wt-wt %s/%s %s/%s' % (Ws0, Ts0, Wn0, Tn0)] = 1
- features['tt s0=%s n0=%s' % (Ts0, Tn0)] = 1
- features['tt n0=%s n1=%s' % (Tn0, Tn1)] = 1
-
- # Add some tag trigrams
- trigrams = ((Tn0, Tn1, Tn2), (Ts0, Tn0, Tn1), (Ts0, Ts1, Tn0),
- (Ts0, Ts0f1, Tn0), (Ts0, Ts0f1, Tn0), (Ts0, Tn0, Tn0b1),
- (Ts0, Ts0b1, Ts0b2), (Ts0, Ts0f1, Ts0f2), (Tn0, Tn0b1, Tn0b2),
- (Ts0, Ts1, Ts1))
- for i, (t1, t2, t3) in enumerate(trigrams):
- if t1 or t2 or t3:
- features['ttt-%d %s %s %s' % (i, t1, t2, t3)] = 1
-
- # Add some valency and distance features
- vw = ((Ws0, Vs0f), (Ws0, Vs0b), (Wn0, Vn0b))
- vt = ((Ts0, Vs0f), (Ts0, Vs0b), (Tn0, Vn0b))
- d = ((Ws0, Ds0n0), (Wn0, Ds0n0), (Ts0, Ds0n0), (Tn0, Ds0n0),
- ('t' + Tn0+Ts0, Ds0n0), ('w' + Wn0+Ws0, Ds0n0))
- for i, (w_t, v_d) in enumerate(vw + vt + d):
- if w_t or v_d:
- features['val/d-%d %s %d' % (i, w_t, v_d)] = 1
- return features
-
-+h3("training") Training
-
-p Weights are learned using the same algorithm, averaged perceptron, that we used for part-of-speech tagging. Its key strength is that it’s an online learning algorithm: examples stream in one-by-one, we make our prediction, check the actual answer, and adjust our beliefs (weights) if we were wrong.
-
-p The training loop looks like this:
-
-+code.
- class Parser(object):
- ...
- def train_one(self, itn, words, gold_tags, gold_heads):
- n = len(words)
- i = 2; stack = [1]; parse = Parse(n)
- tags = self.tagger.tag(words)
- while stack or (i + 1) < n:
- features = extract_features(words, tags, i, n, stack, parse)
- scores = self.model.score(features)
- valid_moves = get_valid_moves(i, n, len(stack))
- guess = max(valid_moves, key=lambda move: scores[move])
- gold_moves = get_gold_moves(i, n, stack, parse.heads, gold_heads)
- best = max(gold_moves, key=lambda move: scores[move])
- self.model.update(best, guess, features)
- i = transition(guess, i, stack, parse)
- # Return number correct
- return len([i for i in range(n-1) if parse.heads[i] == gold_heads[i]])
-
-p The most interesting part of the training process is in #[code get_gold_moves]. The performance of our parser is made possible by an advance by Goldberg and Nivre (2012), who showed that we’d been doing this wrong for years.
-
-+infobox("Update") Interestingly, CoreNLP continues to "do it wrong" – their transition-based parser uses the static-oracle, rather than the dynamic oracle described here. I attribute spaCy's accuracy advantage to this difference in training algorithm. The ClearNLP parser uses an iterative algorithm that achieves the same sort of thing (and was published prior to the dynamic oracle). I find the dynamic oracle idea much more conceptually clear.
-
-p In the POS-tagging post, I cautioned that during training you need to make sure you pass in the last two #[em predicted] tags as features for the current tag, not the last two #[em gold] tags. At test time you’ll only have the predicted tags, so if you base your features on the gold sequence during training, your training contexts won’t resemble your test-time contexts, so you’ll learn the wrong weights.
-
-p In parsing, the problem was that we didn’t know #[em how] to pass in the predicted sequence! Training worked by taking the gold-standard tree, and finding a transition sequence that led to it. i.e., you got back a sequence of moves, with the guarantee that if you followed those moves, you’d get the gold-standard dependencies.
-
-p The problem is, we didn’t know how to define the “correct” move to teach a parser to make if it was in any state that #[em wasn’t] along that gold-standard sequence. Once the parser had made a mistake, we didn’t know how to train from that example.
-
-p That was a big problem, because it meant that once the parser started making mistakes, it would end up in states unlike any in its training data – leading to yet more mistakes. The problem was specific to greedy parsers: once you use a beam, there’s a natural way to do structured prediction.
-
-+infobox("Update") It's since been pointed out to me that what we're calling a "dynamic oracle" here is really a form of #[a(href="http://www.ausy.tu-darmstadt.de/Research/ICML2011" target="_blank") imitation learning].
-
-p The solution seems obvious once you know it, like all the best breakthroughs. What we do is define a function that asks “How many gold-standard dependencies can be recovered from this state?”. If you can define that function, then you can apply each move in turn, and ask, “How many gold-standard dependencies can be recovered from #[em this] state?”. If the action you applied allows #[em fewer] gold-standard dependencies to be reached, then it is sub-optimal.
-
-p That’s a lot to take in.
-
-p So we have this function #[code Oracle(state)]:
-
-+code.
- Oracle(state) = gold_arcs ∩ reachable_arcs(state) |
-
-p We also have a set of actions, each of which returns a new state. We want to know:
-
-+list
- +item #[code shift_cost = Oracle(state) – Oracle(shift(state))]
- +item #[code right_cost = Oracle(state) – Oracle(right(state))]
- +item #[code left_cost = Oracle(state) – Oracle(left(state))]
-
-p Now, at least one of those costs #[em has] to be zero. Oracle(state) is asking, “what’s the cost of the best path forward?”, and the first action of that best path has to be shift, right, or left.
-
-p It turns out that we can derive Oracle fairly simply for many transition systems. The derivation for the transition system we’re using, Arc Hybrid, is in Goldberg and Nivre (2013).
-
-p We’re going to implement the oracle as a function that returns the zero-cost moves, rather than implementing a function Oracle(state). This prevents us from doing a bunch of costly copy operations. Hopefully the reasoning in the code isn’t too hard to follow, but you can also consult Goldberg and Nivre’s papers if you’re confused and want to get to the bottom of this.
-
-+code.
- def get_gold_moves(n0, n, stack, heads, gold):
- def deps_between(target, others, gold):
- for word in others:
- if gold[word] == target or gold[target] == word:
- return True
- return False
-
- valid = get_valid_moves(n0, n, len(stack))
- if not stack or (SHIFT in valid and gold[n0] == stack[-1]):
- return [SHIFT]
- if gold[stack[-1]] == n0:
- return [LEFT]
- costly = set([m for m in MOVES if m not in valid])
- # If the word behind s0 is its gold head, Left is incorrect
- if len(stack) >= 2 and gold[stack[-1]] == stack[-2]:
- costly.add(LEFT)
- # If there are any dependencies between n0 and the stack,
- # pushing n0 will lose them.
- if SHIFT not in costly and deps_between(n0, stack, gold):
- costly.add(SHIFT)
- # If there are any dependencies between s0 and the buffer, popping
- # s0 will lose them.
- if deps_between(stack[-1], range(n0+1, n-1), gold):
- costly.add(LEFT)
- costly.add(RIGHT)
- return [m for m in MOVES if m not in costly]
-
-p Doing this “dynamic oracle” training procedure makes a big difference to accuracy — typically 1-2%, with no difference to the way the run-time works. The old “static oracle” greedy training procedure is fully obsolete; there’s no reason to do it that way any more.
-
-+h3("conclusion") Conclusion
-
-p I have the sense that language technologies, particularly those relating to grammar, are particularly mysterious. I can imagine having no idea what the program might even do.
-
-p I think it therefore seems natural to people that the best solutions would be over-whelmingly complicated. A 200,000 line Java package feels appropriate.
-
-p But, algorithmic code is usually short, when only a single algorithm is implemented. And when you only implement one algorithm, and you know exactly what you want to write before you write a line, you also don’t pay for any unnecessary abstractions, which can have a big performance impact.
-
-+h3("idle-speculation") Idle speculation
-
-p For a long time, incremental language processing algorithms were primarily of scientific interest. If you want to write a parser to test a theory about how the human sentence processor might work, well, that parser needs to build partial interpretations. There’s a wealth of evidence, including commonsense introspection, that establishes that we don’t buffer input and analyse it once the speaker has finished.
-
-p But now algorithms with that neat scientific feature are winning! As best as I can tell, the secret to that success is to be:
-
-+list
- +item Incremental. Earlier words constrain the search.
- +item Error-driven. Training involves a working hypothesis, which is updated as it makes mistakes.
-
-p The links to human sentence processing seem tantalising. I look forward to seeing whether these engineering breakthroughs lead to any psycholinguistic advances.
-
-
-+h3("bibliography") Bibliography
-
-p The NLP literature is almost entirely open access. All of the relavant papers can be found #[a(href="http://aclweb.org/anthology/" target="_blank") here].
-
-p The parser I’ve described is an implementation of the dynamic-oracle Arc-Hybrid system here: #[+source Goldberg, Yoav; Nivre, Joakim. #[em Training Deterministic Parsers with Non-Deterministic Oracles]. TACL 2013]
-
-p However, I wrote my own features for it. The arc-hybrid system was originally described here: #[+source Kuhlmann, Marco; Gomez-Rodriguez, Carlos; Satta, Giorgio. Dynamic programming algorithms for transition-based dependency parsers. ACL 2011]
-
-p The dynamic oracle training method was first described here: #[+source A Dynamic Oracle for Arc-Eager Dependency Parsing. Goldberg, Yoav; Nivre, Joakim. COLING 2012]
-
-p This work depended on a big break-through in accuracy for transition-based parsers, when beam-search was properly explored by Zhang and Clark. They have several papers, but the preferred citation is: #[+source Zhang, Yue; Clark, Steven. Syntactic Processing Using the Generalized Perceptron and Beam Search. Computational Linguistics 2011 (1)]
-
-p Another important paper was this little feature engineering paper, which further improved the accuracy: #[+source Zhang, Yue; Nivre, Joakim. Transition-based Dependency Parsing with Rich Non-local Features. ACL 2011]
-
-p The generalised perceptron, which is the learning framework for these beam parsers, is from this paper: #[+source Collins, Michael. Discriminative Training Methods for Hidden Markov Models: Theory and Experiments with Perceptron Algorithms. EMNLP 2002]
-
-
-+h3("experimental-details") Experimental details
-
-p The results at the start of the post refer to Section 22 of the Wall Street Journal corpus. The Stanford parser was run as follows:
-
-+code("bash").
- java -mx10000m -cp "$scriptdir/*:" edu.stanford.nlp.parser.lexparser.LexicalizedParser \
- -outputFormat "penn" edu/stanford/nlp/models/lexparser/englishFactored.ser.gz $*
-
-p A small post-process was applied, to undo the fancy tokenisation Stanford adds for numbers, to make them match the PTB tokenisation:
-
-+code.
- """Stanford parser retokenises numbers. Split them."""
- import sys
- import re
-
- qp_re = re.compile('\xc2\xa0')
- for line in sys.stdin:
- line = line.rstrip()
- if qp_re.search(line):
- line = line.replace('(CD', '(QP (CD', 1) + ')'
- line = line.replace('\xc2\xa0', ') (CD ')
- print line
-
-p The resulting PTB-format files were then converted into dependencies using the Stanford converter:
-
-+code("bash").
- for f in $1/*.mrg; do
- echo $f
- grep -v CODE $f > "$f.2"
- out="$f.dep"
- java -mx800m -cp "$scriptdir/*:" edu.stanford.nlp.trees.EnglishGrammaticalStructure \
- -treeFile "$f.2" -basic -makeCopulaHead -conllx > $out
- done
-
-p I can’t easily read that anymore, but it should just convert every .mrg file in a folder to a CoNLL-format Stanford basic dependencies file, using the settings common in the dependency literature.
-
-p I then converted the gold-standard trees from WSJ 22, for the evaluation. Accuracy scores refer to unlabelled attachment score (i.e. the head index) of all non-punctuation tokens.
-
-p To train parser.py, I fed the gold-standard PTB trees for WSJ 02-21 into the same conversion script.
-
-p In a nutshell: The Stanford model and parser.py are trained on the same set of sentences, and they each make their predictions on a held-out test set, for which we know the answers. Accuracy refers to how many of the words’ heads we got correct.
-
-p Speeds were measured on a 2.4Ghz Xeon. I ran the experiments on a server, to give the Stanford parser more memory. The parser.py system runs fine on my MacBook Air. I used PyPy for the parser.py experiments; CPython was about half as fast on an early benchmark.
-
-p One of the reasons parser.py is so fast is that it does unlabelled parsing. Based on previous experiments, a labelled parser would likely be about 40x slower, and about 1% more accurate. Adapting the program to labelled parsing would be a good exercise for the reader, if you have access to the data.
-
-p The result from the Redshift parser was produced from commit #[code b6b624c9900f3bf], which was run as follows:
-
-+code("bash").
- ./scripts/train.py -x zhang+stack -k 8 -p ~/data/stanford/train.conll ~/data/parsers/tmp
- ./scripts/parse.py ~/data/parsers/tmp ~/data/stanford/devi.txt /tmp/parse/
- ./scripts/evaluate.py /tmp/parse/parses ~/data/stanford/dev.conll
diff --git a/website/blog/part-of-speech-pos-tagger-in-python.jade b/website/blog/part-of-speech-pos-tagger-in-python.jade
deleted file mode 100644
index f6f0a3aa2..000000000
--- a/website/blog/part-of-speech-pos-tagger-in-python.jade
+++ /dev/null
@@ -1,257 +0,0 @@
-include ../_includes/_mixins
-
-+lead Up-to-date knowledge about natural language processing is mostly locked away in academia. And academics are mostly pretty self-conscious when we write. We’re careful. We don’t want to stick our necks out too much. But under-confident recommendations suck, so here’s how to write a good part-of-speech tagger.
-
-p There are a tonne of “best known techniques” for POS tagging, and you should ignore the others and just use Averaged Perceptron.
-
-p You should use two tags of history, and features derived from the Brown word clusters distributed here.
-
-p If you only need the tagger to work on carefully edited text, you should use case-sensitive features, but if you want a more robust tagger you should avoid them because they’ll make you over-fit to the conventions of your training domain. Instead, features that ask “how frequently is this word title-cased, in a large sample from the web?” work well. Then you can lower-case your comparatively tiny training corpus.
-
-p For efficiency, you should figure out which frequent words in your training data have unambiguous tags, so you don’t have to do anything but output their tags when they come up. About 50% of the words can be tagged that way.
-
-p And unless you really, really can’t do without an extra 0.1% of accuracy, you probably shouldn’t bother with any kind of search strategy you should just use a greedy model.
-
-p If you do all that, you’ll find your tagger easy to write and understand, and an efficient Cython implementation will perform as follows on the standard evaluation, 130,000 words of text from the Wall Street Journal:
-
-+table(["Tagger", "Accurarcy", "Time (130k words)"], "parameters")
- +row
- +cell CyGreedyAP
- +cell 97.1%
- +cell 4s
-
-p The 4s includes initialisation time — the actual per-token speed is high enough to be irrelevant; it won’t be your bottleneck.
-
-p It’s tempting to look at 97% accuracy and say something similar, but that’s not true. My parser is about 1% more accurate if the input has hand-labelled POS tags, and the taggers all perform much worse on out-of-domain data. Unfortunately accuracies have been fairly flat for the last ten years. That’s why my recommendation is to just use a simple and fast tagger that’s roughly as good.
-
-p The thing is though, it’s very common to see people using taggers that aren’t anywhere near that good! For an example of what a non-expert is likely to use, these were the two taggers wrapped by TextBlob, a new Python api that I think is quite neat:
-
-+table(["Tagger", "Accurarcy", "Time (130k words)"], "parameters")
- +row
- +cell NLTK
- +cell 94.0%
- +cell 3m56s
-
- +row
- +cell Pattern
- +cell 93.5%
- +cell 26s
-
-p Both Pattern and NLTK are very robust and beautifully well documented, so the appeal of using them is obvious. But Pattern’s algorithms are pretty crappy, and NLTK carries tremendous baggage around in its implementation because of its massive framework, and double-duty as a teaching tool.
-
-p As a stand-alone tagger, my Cython implementation is needlessly complicated – it was written for my parser. So today I wrote a 200 line version of my recommended algorithm for TextBlob. It gets:
-
-+table(["Tagger", "Accurarcy", "Time (130k words)"], "parameters")
- +row
- +cell PyGreedyAP
- +cell 96.8%
- +cell 12s
-
-p I traded some accuracy and a lot of efficiency to keep the implementation simple. Here’s a far-too-brief description of how it works.
-
-+h3("averaged-perceptron") Averaged Perceptron
-
-p POS tagging is a “supervised learning problem”. You’re given a table of data, and you’re told that the values in the last column will be missing during run-time. You have to find correlations from the other columns to predict that value.
-
-p So for us, the missing column will be “part of speech at word i“. The predictor columns (features) will be things like “part of speech at word i-1“, “last three letters of word at i+1“, etc
-
-p First, here’s what prediction looks like at run-time:
-
-+code.
- def predict(self, features):
- '''Dot-product the features and current weights and return the best class.'''
- scores = defaultdict(float)
- for feat in features:
- if feat not in self.weights:
- continue
- weights = self.weights[feat]
- for clas, weight in weights.items():
- scores[clas] += weight
- # Do a secondary alphabetic sort, for stability
- return max(self.classes, key=lambda clas: (scores[clas], clas))
-
-p Earlier I described the learning problem as a table, with one of the columns marked as missing-at-runtime. For NLP, our tables are always exceedingly sparse. You have columns like “word i-1=Parliament”, which is almost always 0. So our “weight vectors” can pretty much never be implemented as vectors. Map-types are good though — here we use dictionaries.
-
-p The input data, features, is a set with a member for every non-zero “column” in our “table” – every active feature. Usually this is actually a dictionary, to let you set values for the features. But here all my features are binary present-or-absent type deals.
-
-p The weights data-structure is a dictionary of dictionaries, that ultimately associates feature/class pairs with some weight. You want to structure it this way instead of the reverse because of the way word frequencies are distributed: most words are rare, frequent words are very frequent.
-
-+h3("learning-the-weights") Learning the Weights
-
-p Okay, so how do we get the values for the weights? We start with an empty weights dictionary, and iteratively do the following:
-
-+list("numbers")
- +item Receive a new (features, POS-tag) pair
- +item Guess the value of the POS tag given the current “weights” for the features
- +item If guess is wrong, add +1 to the weights associated with the correct class for these features, and -1 to the weights for the predicted class.
-
-p It’s one of the simplest learning algorithms. Whenever you make a mistake, increment the weights for the correct class, and penalise the weights that led to your false prediction. In code:
-
-+code.
- def train(self, nr_iter, examples):
- for i in range(nr_iter):
- for features, true_tag in examples:
- guess = self.predict(features)
- if guess != true_tag:
- for f in features:
- self.weights[f][true_tag] += 1
- self.weights[f][guess] -= 1
- random.shuffle(examples)
-
-p If you iterate over the same example this way, the weights for the correct class would have to come out ahead, and you’d get the example right. If you think about what happens with two examples, you should be able to see that it will get them both right unless the features are identical. In general the algorithm will converge so long as the examples are linearly separable, although that doesn’t matter for our purpose.
-
-+h3("averaging-the-weights") Averaging the Weights
-
-p We need to do one more thing to make the perceptron algorithm competitive. The problem with the algorithm so far is that if you train it twice on slightly different sets of examples, you end up with really different models. It doesn’t generalise that smartly. And the problem is really in the later iterations — if you let it run to convergence, it’ll pay lots of attention to the few examples it’s getting wrong, and mutate its whole model around them.
-
-p So, what we’re going to do is make the weights more "sticky" – give the model less chance to ruin all its hard work in the later rounds. And we’re going to do that by returning the averaged weights, not the final weights.
-
-p I doubt there are many people who are convinced that’s the most obvious solution to the problem, but whatever. We’re not here to innovate, and this way is time tested on lots of problems. If you have another idea, run the experiments and tell us what you find. Actually I’d love to see more work on this, now that the averaged perceptron has become such a prominent learning algorithm in NLP.
-
-p Okay. So this averaging. How’s that going to work? Note that we don’t want to just average after each outer-loop iteration. We want the average of all the values — from the inner loop. So if we have 5,000 examples, and we train for 10 iterations, we’ll average across 50,000 values for each weight.
-
-p Obviously we’re not going to store all those intermediate values. Instead, we’ll track an accumulator for each weight, and divide it by the number of iterations at the end. Again: we want the average weight assigned to a feature/class pair during learning, so the key component we need is the total weight it was assigned. But we also want to be careful about how we compute that accumulator, too. On almost any instance, we’re going to see a tiny fraction of active feature/class pairs. All the other feature/class weights won’t change. So we shouldn’t have to go back and add the unchanged value to our accumulators anyway, like chumps.
-
-p Since we’re not chumps, we’ll make the obvious improvement. We’ll maintain another dictionary that tracks how long each weight has gone unchanged. Now when we do change a weight, we can do a fast-forwarded update to the accumulator, for all those iterations where it lay unchanged.
-
-p Here’s what a weight update looks like now that we have to maintain the totals and the time-stamps:
-
-+code.
- def update(self, truth, guess, features):
- def upd_feat(c, f, v):
- nr_iters_at_this_weight = self.i - self._timestamps[f][c]
- self._totals[f][c] += nr_iters_at_this_weight * self.weights[f][c]
- self.weights[f][c] += v
- self._timestamps[f][c] = self.i
-
- self.i += 1
- for f in features:
- upd_feat(truth, f, 1.0)
- upd_feat(guess, f, -1.0)
-
-+h3("features-and-pre-processing") Features and Pre-processing
-
-p The POS tagging literature has tonnes of intricate features sensitive to case, punctuation, etc. They help on the standard test-set, which is from Wall Street Journal articles from the 1980s, but I don’t see how they’ll help us learn models that are useful on other text.
-
-p To help us learn a more general model, we’ll pre-process the data prior to feature extraction, as follows:
-
-+list
- +item All words are lower cased;
- +item Digits in the range 1800-2100 are represented as !YEAR;
- +item Other digit strings are represented as !DIGITS
- +item It would be better to have a module recognising dates, phone numbers, emails, hash-tags, etc. but that will have to be pushed back into the tokenization.
-
-p I played around with the features a little, and this seems to be a reasonable bang-for-buck configuration in terms of getting the development-data accuracy to 97% (where it typically converges anyway), and having a smaller memory foot-print:
-
-+code.
- def _get_features(self, i, word, context, prev, prev2):
- '''Map tokens-in-contexts into a feature representation, implemented as a
- set. If the features change, a new model must be trained.'''
- def add(name, *args):
- features.add('+'.join((name,) + tuple(args)))
-
- features = set()
- add('bias') # This acts sort of like a prior
- add('i suffix', word[-3:])
- add('i pref1', word[0])
- add('i-1 tag', prev)
- add('i-2 tag', prev2)
- add('i tag+i-2 tag', prev, prev2)
- add('i word', context[i])
- add('i-1 tag+i word', prev, context[i])
- add('i-1 word', context[i-1])
- add('i-1 suffix', context[i-1][-3:])
- add('i-2 word', context[i-2])
- add('i+1 word', context[i+1])
- add('i+1 suffix', context[i+1][-3:])
- add('i+2 word', context[i+2])
- return features
-
-p I haven’t added any features from external data, such as case frequency statistics from the Google Web 1T corpus. I might add those later, but for now I figured I’d keep things simple.
-
-+h2("what-about-search") What About Search?
-
-p The model I’ve recommended commits to its predictions on each word, and moves on to the next one. Those predictions are then used as features for the next word. There’s a potential problem here, but it turns out it doesn’t matter much. It’s easy to fix with beam-search, but I say it’s not really worth bothering. And it definitely doesn’t matter enough to adopt a slow and complicated algorithm like Conditional Random Fields.
-
-p Here’s the problem. The best indicator for the tag at position, say, 3 in a sentence is the word at position 3. But the next-best indicators are the tags at positions 2 and 4. So there’s a chicken-and-egg problem: we want the predictions for the surrounding words in hand before we commit to a prediction for the current word. Here’s an example where search might matter:
-
-+example Their management plan reforms worked
-
-p Depending on just what you’ve learned from your training data, you can imagine making a different decision if you started at the left and moved right, conditioning on your previous decisions, than if you’d started at the right and moved left.
-
-p If that’s not obvious to you, think about it this way: “worked” is almost surely a verb, so if you tag “reforms” with that in hand, you’ll have a different idea of its tag than if you’d just come from “plan“, which you might have regarded as either a noun or a verb.
-
-p Search can only help you when you make a mistake. It can prevent that error from throwing off your subsequent decisions, or sometimes your future choices will correct the mistake. And that’s why for POS tagging, search hardly matters! Your model is so good straight-up that your past predictions are almost always true. So you really need the planets to align for search to matter at all.
-
-p And as we improve our taggers, search will matter less and less. Instead of search, what we should be caring about is multi-tagging. If we let the model be a bit uncertain, we can get over 99% accuracy assigning an average of 1.05 tags per word (Vadas et al, ACL 2006). The averaged perceptron is rubbish at multi-tagging though. That’s its big weakness. You really want a probability distribution for that.
-
-p One caveat when doing greedy search, though. It’s very important that your training data model the fact that the history will be imperfect at run-time. Otherwise, it will be way over-reliant on the tag-history features. Because the Perceptron is iterative, this is very easy.
-
-p Here’s the training loop for the tagger:
-
-+code.
- def train(self, sentences, save_loc=None, nr_iter=5, quiet=False):
- '''Train a model from sentences, and save it at save_loc. nr_iter
- controls the number of Perceptron training iterations.'''
- self._make_tagdict(sentences, quiet=quiet)
- self.model.classes = self.classes
- prev, prev2 = START
- for iter_ in range(nr_iter):
- c = 0; n = 0
- for words, tags in sentences:
- context = START + [self._normalize(w) for w in words] + END
- for i, word in enumerate(words):
- guess = self.tagdict.get(word)
- if not guess:
- feats = self._get_features(
- i, word, context, prev, prev2)
- guess = self.model.predict(feats)
- self.model.update(tags[i], guess, feats)
- # Set the history features from the guesses, not the
- # true tags
- prev2 = prev; prev = guess
- c += guess == tags[i]; n += 1
- random.shuffle(sentences)
- if not quiet:
- print("Iter %d: %d/%d=%.3f" % (iter_, c, n, _pc(c, n)))
- self.model.average_weights()
- # Pickle as a binary file
- if save_loc is not None:
- cPickle.dump((self.model.weights, self.tagdict, self.classes),
- open(save_loc, 'wb'), -1)
-
-p Unlike the previous snippets, this one’s literal – I tended to edit the previous ones to simplify. So if they have bugs, hopefully that’s why!
-
-p At the time of writing, I’m just finishing up the implementation before I submit a pull request to TextBlob. You can see the rest of the source here:
-
-+list
- +item #[a(href="https://github.com/sloria/textblob-aptagger/blob/master/textblob_aptagger/taggers.py" target="_blank") taggers.py]
- +item #[a(href="https://github.com/sloria/textblob-aptagger/blob/master/textblob_aptagger/_perceptron.py" target="_blank") perceptron.py]
-
-+h2("comparison") A final comparison…
-
-p Over the years I’ve seen a lot of cynicism about the WSJ evaluation methodology. The claim is that we’ve just been meticulously over-fitting our methods to this data. Actually the evidence doesn’t really bear this out. Mostly, if a technique is clearly better on one evaluation, it improves others as well. Still, it’s very reasonable to want to know how these tools perform on other text. So I ran the unchanged models over two other sections from the OntoNotes corpus:
-
-+table(["Tagger", "WSJ", "ABC", "Web"], "parameters")
- +row
- +cell Pattern
- +cell 93.5
- +cell 90.7
- +cell 88.1
-
- +row
- +cell NLTK
- +cell 94.0
- +cell 91.5
- +cell 88.4
- +row
- +cell PyGreedyAP
- +cell 96.8
- +cell 94.8
- +cell 91.8
-
-p The ABC section is broadcast news, Web is text from the web (blogs etc — I haven’t looked at the data much).
-
-p As you can see, the order of the systems is stable across the three comparisons, and the advantage of our Averaged Perceptron tagger over the other two is real enough. Actually the pattern tagger does very poorly on out-of-domain text. It mostly just looks up the words, so it’s very domain dependent. I hadn’t realised it before, but it’s obvious enough now that I think about it.
-
-p We can improve our score greatly by training on some of the foreign data. The technique described in this paper (Daume III, 2007) is the first thing I try when I have to do that.
diff --git a/website/blog/sense2vec-with-spacy.jade b/website/blog/sense2vec-with-spacy.jade
deleted file mode 100644
index 3e32970b8..000000000
--- a/website/blog/sense2vec-with-spacy.jade
+++ /dev/null
@@ -1,166 +0,0 @@
-include ../_includes/_mixins
-
-+lead If you were doing text analytics in 2015, you were probably using #[a(href="https://en.wikipedia.org/wiki/Word2vec" target="_blank") word2vec]. Sense2vec #[a(href="http://arxiv.org/abs/1511.06388" target="_blank") (Trask et. al, 2015)] is a new twist on word2vec that lets you learn more interesting, detailed and context-sensitive word vectors. This post motivates the idea, explains our implementation, and comes with an #[a(href="https://sense2vec.spacy.io" target="_blank") interactive demo] that we've found surprisingly #[a(href="https://sense2vec.spacy.io/?crack|NOUN" target="_blank") addictive].
-
-+h2("word2vec") Polysemy: the problem with word2vec
-
-p When humans write dictionaries and thesauruses, we define concepts in relation to other concepts. For automatic natural language processing, it's often more effective to use dictionaries that define concepts in terms of their usage statistics. The word2vec family of models are the most popular way of creating these dictionaries. Given a large sample of text, word2vec gives you a dictionary where each definition is just a row of, say, 300 floating-point numbers. To find out whether two entries in the dictionary are similar, you ask how similar their definitions are – a well-defined mathematical operation.
-
-p The problem with word2vec is the #[em word] part. Consider a word like #[em duck]. No individual usage of the word #[em duck] refers to the concept "a waterfowl, or the action of crouching". But that's the concept that word2vec is trying to model – because it smooshes all senses of the words together. #[a(href="http://arxiv.org/abs/1511.05392" target="_blank") Nalisnick and Ravi (2015)] noticed this problem, and suggested that we should allow word vectors to grow arbitrarily, so that we can do a better job of modelling complicated concepts. This seems like a nice approach for subtle sense distinctions, but for cases like #[em duck] it's not so satisfying. What we want to do is have different vectors for the different senses. We also want a simple way of knowing which meaning a given usage refers to. For this, we need to analyse tokens in context. This is where #[a(href=url target="_blank") spaCy] comes in.
-
-+h2("sense2vec") Sense2vec: Using NLP annotations for more precise vectors
-
-p The idea behind sense2vec is super simple. If the problem is that #[em duck] as in #[em waterfowl] and #[em duck] as in #[em crouch] are different concepts, the straight-forward solution is to just have two entries, #[a(href="https://sense2vec.spacy.io/?duck|NOUN" target="_blank") duck#[sub N]] and #[a(href="https://sense2vec.spacy.io/?duck|VERB" target="_blank") duck#[sub V]]. We've wanted to try this #[a(href="https://github.com/" + profiles.github + "/spaCy/issues/58" target="_blank") for some time]. So when #[a(href="http://arxiv.org/pdf/1511.06388.pdf" target="_blank") Trask et al (2015)] published a nice set of experiments showing that the idea worked well, we were easy to convince.
-
-p We follow Trask et al in adding part-of-speech tags and named entity labels to the tokens. Additionally, we merge named entities and base noun phrases into single tokens, so that they receive a single vector. We're very pleased with the results from this, even though we consider the current version an early draft. There's a lot more that can be done with the idea. Multi-word verbs such as #[em get up] and #[em give back] and even #[em take a walk] or #[em make a decision] would be great extensions. We also don't do anything currently to trim back phrases that are compositional – phrases which really are two words.
-
-p Here's how the current pre-processing function looks, at the time of writing. The rest of the code can be found on #[a(href="https://github.com/" + profiles.github + "/sense2vec/" target="_blank") GitHub].
-
-+code("python", "merge_text.py").
- def transform_texts(texts):
- # Load the annotation models
- nlp = English()
- # Stream texts through the models. We accumulate a buffer and release
- # the GIL around the parser, for efficient multi-threading.
- for doc in nlp.pipe(texts, n_threads=4):
- # Iterate over base NPs, e.g. "all their good ideas"
- for np in doc.noun_chunks:
- # Only keep adjectives and nouns, e.g. "good ideas"
- while len(np) > 1 and np[0].dep_ not in ('amod', 'compound'):
- np = np[1:]
- if len(np) > 1:
- # Merge the tokens, e.g. good_ideas
- np.merge(np.root.tag_, np.text, np.root.ent_type_)
- # Iterate over named entities
- for ent in doc.ents:
- if len(ent) > 1:
- # Merge them into single tokens
- ent.merge(ent.root.tag_, ent.text, ent.label_)
- token_strings = []
- for token in doc:
- text = token.text.replace(' ', '_')
- tag = token.ent_type_ or token.pos_
- token_strings.append('%s|%s' % (text, tag))
- yield ' '.join(token_strings)
-
-p Even with all this additional processing, we can still train massive models without difficulty. Because spaCy is written in #[a(href="/blog/writing-c-in-cython" target="_blank") Cython], we can #[a(href="http://docs.cython.org/src/userguide/parallelism.html" target="_blank") release the GIL] around the syntactic parser, allowing efficient multi-threading. With 4 threads, throughput is over 100,000 words per second.
-
-p After pre-processing the text, the vectors can be trained as normal, using #[a(href="https://code.google.com/archive/p/word2vec/" target="_blank") the original C code], #[a(href="https://radimrehurek.com/gensim/" target="_blank") Gensim], or a related technique like #[a(href="http://nlp.stanford.edu/projects/glove/" target="_blank") GloVe]. So long as it expects the tokens to be whitespace delimited, and sentences to be separated by new lines, there should be no problem. The only caveat is that the tool should not try to employ its own tokenization – otherwise it might split off our tags.
-
-p We used Gensim, and trained the model using the Skip-Gram with Negative Sampling algorithm, using a frequency threshold of 10 and 5 iterations. After training, we applied a further frequency threshold of 50, to reduce the run-time memory requirements.
-
-+h2("examples") Example queries
-
-p As soon as we started playing with these vectors, we found all sorts of interesting things.Here are some of our first impressions.
-
-h3 1. The vector space seems like it'll give a good way to show compositionality:
-
-p A phrase is #[em compositional] to the extent that its meaning is the sum of its parts. The word vectors give us good insight into this. The model knows that #[em fair game] is not a type of game, while #[em multiplayer game] is:
-
-+code.
- >>> model.similarity('fair_game|NOUN', 'game|NOUN')
- 0.034977455677555599
- >>> model.similarity('multiplayer_game|NOUN', 'game|NOUN')
- 0.54464530644393849
-
-p Similarly, it knows that #[em class action] is only very weakly a type of action, but a #[em class action lawsuit] is definitely a type of lawsuit:
-
-+code.
- >>> model.similarity('class_action|NOUN', 'action|NOUN')
- 0.14957825452335169
- >>> model.similarity('class_action_lawsuit|NOUN', 'lawsuit|NOUN')
- 0.69595765453644187
-
-p Personally, I like the queries where you can see a little of the Reddit shining through (which might not be safe for every workplace). For instance, Reddit understands that a #[a(href="https://sense2vec.spacy.io/?garter_snake|NOUN" target="_blank") garter snake] is a type of snake, while a #[a(href="https://sense2vec.spacy.io/?trouser_snake|NOUN" target="_blank") trouser snake] is something else entirely.
-
-h3 2. Similarity between entities can be kind of fun.
-
-p Here's what Reddit thinks of Donald Trump:
-
-+code.
- >>> model.most_similar(['Donald_Trump|PERSON'])
- (u'Sarah_Palin|PERSON', 0.854670465),
- (u'Mitt_Romney|PERSON', 0.8245523572),
- (u'Barrack_Obama|PERSON', 0.808201313),
- (u'Bill_Clinton|PERSON', 0.8045649529),
- (u'Oprah|GPE', 0.8042222261),
- (u'Paris_Hilton|ORG', 0.7962667942),
- (u'Oprah_Winfrey|PERSON', 0.7941152453),
- (u'Stephen_Colbert|PERSON', 0.7926792502),
- (u'Herman_Cain|PERSON', 0.7869615555),
- (u'Michael_Moore|PERSON', 0.7835546732)]
-
-p The model is returning entities discussed in similar contexts. It's interesting to see that the word vectors correctly pick out the idea of Trump as a political figure but also a reality star. The comparison with #[a(href="https://en.wikipedia.org/wiki/Michael_Moore" target="_blank") Michael Moore] really tickles me. I doubt there are many people who are fans of both. If I had to pick an odd-one-out, I'd definitely choose Oprah. That comparison resonates much less with me.
-
-p The entry #[code Oprah|GPE] is also quite interesting. Nobody is living in the United States of Oprah just yet, which is what the tag #[code GPE] (geopolitican entity) would imply. The distributional similarity model has correctly learned that #[code Oprah|GPE] is closely related to #[code Oprah_Winfrey|PERSON]. This seems like a promising way to mine for errors made by the named entity recogniser, which could lead to improvements.
-
-p Word2vec has always worked well on named entities. I find the #[a(href="https://sense2vec.spacy.io/?Autechre|PERSON" target="_blank") music region of the vector space] particularly satisfying. It reminds me of the way I used to get music recommendations: by paying attention to the bands frequently mentioned alongside ones I already like. Of course, now we have much more powerful recommendation models, that look at the listening behaviour of millions of people. But to me there's something oddly intuitive about many of the band similarities our sense2vec model is returning.
-
-p Of course, the model is far from perfect, and when it produces weird results, it doesn't always pay to think too hard about them. One of our early models "uncovered" a hidden link between Carrot Top and Kate Mara:
-
-+code.
- >>> model.most_similar(['Carrot_Top|PERSON'])
- [(u'Kate_Mara|PERSON', 0.5347248911857605),
- (u'Andy_Samberg|PERSON', 0.5336876511573792),
- (u'Ryan_Gosling|PERSON', 0.5287898182868958),
- (u'Emma_Stone|PERSON', 0.5243821740150452),
- (u'Charlie_Sheen|PERSON', 0.5209298133850098),
- (u'Joseph_Gordon_Levitt|PERSON', 0.5196050405502319),
- (u'Jonah_Hill|PERSON', 0.5151286125183105),
- (u'Zooey_Deschanel|PERSON', 0.514430582523346),
- (u'Gerard_Butler|PERSON', 0.5115377902984619),
- (u'Ellen_Page|PERSON', 0.5094753503799438)]
-
-p I really spent too long thinking about this. It just didn't make any sense. Even though it was trivial, it was so bizarre it was almost upsetting. And then it hit me: is this not the nature of all things Carrot Top? Perhaps there was a deeper logic to this. It required further study. But when we ran the model on more data, and it was gone and soon forgotten. Just like Carrot Top.
-
-h3 3. Reddit talks about food a lot, and those regions of the vector space seem very well defined:
-
-+code.
- >>> model.most_similar(['onion_rings|NOUN'])
- [(u'hashbrowns|NOUN', 0.8040812611579895),
- (u'hot_dogs|NOUN', 0.7978234887123108),
- (u'chicken_wings|NOUN', 0.793393611907959),
- (u'sandwiches|NOUN', 0.7903584241867065),
- (u'fries|NOUN', 0.7885469198226929),
- (u'tater_tots|NOUN', 0.7821801900863647),
- (u'bagels|NOUN', 0.7788236141204834),
- (u'chicken_nuggets|NOUN', 0.7787706255912781),
- (u'coleslaw|NOUN', 0.7771176099777222),
- (u'nachos|NOUN', 0.7755396366119385)]
-
-p Some of Reddit's ideas about food are kind of...interesting. It seems to think bacon and brocoll are very similar:
-
-+code.
- >>> model.similarity('bacon|NOUN', 'broccoli|NOUN')
- 0.83276615202851845
-
-p Reddit also thinks hot dogs are practically salad:
-
-+code.
- >>> model.similarity('hot_dogs|NOUN', 'salad|NOUN')
- 0.76765100035460465
- >>> model.similarity('hot_dogs|NOUN', 'entrails|NOUN')
- 0.28360725445449464
-
-p Just keep telling yourself that Reddit.
-
-+divider("Appendix")
-
-+h2("demo-usage") Using the demo
-
-p Search for a word or phrase to explore related concepts. If you want to get fancy, you can try adding a tag to your query, like so: #[code query phrase|NOUN]. If you leave the tag out, we search for the most common one associated with the word. The tags are predicted by a statistical model that looks at the surrounding context of each example of the word.
-
-+grid("wrap")
- +grid-col("half", "padding")
- +label Part-of-speech tags
-
- p #[code ADJ] #[code ADP] #[code ADV] #[code AUX] #[code CONJ] #[code DET] #[code INTJ] #[code NOUN] #[code NUM] #[code PART] #[code PRON] #[code PROPN] #[code PUNCT] #[code SCONJ] #[code SYM] #[code VERB] #[code X]
-
- +grid-col("half", "padding")
- +label Named entities
-
- p #[code NORP] #[code FACILITY] #[code ORG] #[code GPE] #[code LOC] #[code PRODUCT] #[code EVENT] #[code WORK_OF_ART] #[code LANGUAGE]
-
-p For instance, if you enter #[code serve], we check how often many examples we have of #[code serve|VERB], #[code serve|NOUN], #[code serve|ADJ] etc. Since #[code serve|VERB] is the most common, we show results for that query. But if you type #[code serve|NOUN], you can see results for thatinstead. Because #[code serve|NOUN] is strongly associated with tennis, while usages of the verb are much more general, the results for the two queries are quite different.
-
-p We apply a similar frequency-based procedure to support case sensitivity. If your query is lower case and has no tag, we assume it is case insensitive, and look for the most frequent tagged and cased version. If your query includes at least one upper-case letter, or if you specify a tag, we assume you want the query to be case-sensitive.
diff --git a/website/blog/spacy-now-mit.jade b/website/blog/spacy-now-mit.jade
deleted file mode 100644
index 09a64dc16..000000000
--- a/website/blog/spacy-now-mit.jade
+++ /dev/null
@@ -1,50 +0,0 @@
-include ../_includes/_mixins
-
-+lead Three big announcements for #[a(href=url) spaCy], a Python library for industrial-strength natural language processing (NLP).
-
-+list("numbers")
- +item I'd like to welcome my new co-founder, #[a(href="https://de.linkedin.com/in/hepeters" target="_blank") Henning Peters].
- +item spaCy is now available under #[a(href="https://en.wikipedia.org/wiki/MIT_License" target="_blank") the MIT license]. Formerly, spaCy was dual licensed: #[a(href="https://en.wikipedia.org/wiki/Affero_General_Public_License" target="_blank") AGPL], or pay a fee to use it unencumbered.
- +item A new service is entering closed beta: #[em Adaptation]. We want to work with you to deliver custom statistical models, optimized for your task, using your metrics, on your data.
-
-+h3("the-old-model") The old model: AGPL-or-$
-
-p In mid 2014, I quit my day job as an academic, and started writing spaCy. I did this because I saw that companies were trying to use the code I'd been publishing to support my experiments in natural language understanding --- even though that code was never designed to be actually #[em used]. Its mission in life was to print some annotation and exit: to demonstrate some point about how we should design these systems going forward.
-
-p My idea for spaCy was simple. I'd write a better library, crafted lovingly for first-rate performance and usability, ensure it had great documentation and a simple install procedure, and offer long-term, business-friendly licenses.
-
-p I quickly ruled out an entirely closed source model. Users are valuable, whether or not they submit patches. They find problems and suggest solutions. And there's no better advertising than adoption.
-
-p But I did want spaCy to be the product, the thing that I was paid to make great. I wanted a business model that maximised the value of the library. To me, this excluded a SaaS model, since I think using the technology behind an API is an inferior technical approach to having the source code, and running the library locally.
-
-p So I settled on a dual license model. Anyone could download and use spaCy under the AGPL. However, most companies have a blanket ban on GPL libraries, since they're usually unwilling to release their own code under the GPL. These companies could instead sign up for a commercial license, which offered them near complete freedom, to use the library and its source however they wanted.
-
-p Commercial licenses were available as a free 90 day trial. On release, I offered lifetime licenses for a one-time fee of $5,000. As the library improved, this was repriced to $5,000 a year, or $20,000 for 5 years. I wanted to offer the library at prices that were very low relative to engineering salaries. I felt that spaCy could easily represent many weeks of development time savings per year, over a similar open source library.
-
-+h3("why-agpl-or-dollar-wasnt-right") Why AGPL-or-$ wasn't quite right
-
-p While copyleft licenses may be maximally "free" in some philosophical sense, engineers interested in spaCy were not free to simply download and try the library at work. And that's the sort of freedom we're most interested in. You shouldn't have to get management to sign a legal agreement to try out some code you read about on the internet.
-
-p Even though the trial was free, and the terms were pretty simple, a commercial license agreement was still a major barrier to adoption. When looking around for a new solution, there are always endless avenues to explore, almost all of which turn out to be dead ends. There's not a lot of room in this process for potential solutions that ask you to do additional leg-work.
-
-p Another huge problem is that neither of spaCy's licenses were suitable for most open-source developers. The ecosystem around copyleft licenses such as AGPL is tiny in comparison to the ecosystem around permissive licenses such as MIT. This cut spaCy off from a large community of potential users, making it much less useful than it should be.
-
-p I knew when I settled on the AGPL-or-$ idea that it was an unusual model. I expected to face the usual novelty problems: I'd have more explaining to do, perceptions might be unfavorable etc. Instead I think the novelty made this model intrinsically worse. It doesn't integrate well into the rest of the ecosystem.
-
-+h3("spacy-now-mit") spaCy now MIT licensed
-
-p spaCy is now available under the MIT license. Essentially, everyone now gets a free version of what used to be the commercial license (but in a standard form, that you don't have to bug management and legal to okay).
-
-p Anyone can now use spaCy in closed-source applications, however you like, without paying any license fees.
-
-p Any open-source libraries that want to build on spaCy, can.
-
-+h3("adaptation-as-a-service") Adaptation as a service
-
-p spaCy provides a suite of general-purpose natural language understanding components. In development, we measure and optimize the accuracy of these components against manually labelled data. But these annotations are a means to an end. They're only useful when you make use of them – when you put them to work in your product. So that's how we want to define success. We want to optimize spaCy for the metrics you care about, and we only want to be paid if we can improve them.
-
-p There are lots of ways we can deliver an improvement. The simplest is traditional training and consulting, which is particularly effective for NLP since it's such a deep and narrow niche. There are also a set of reuseable strategies for making spaCy work better on your data. Instead of the general purpose statistical model, you could get a model optimized specifically for your use case.
-
-p The details of all of this will vary, on a case-by-case basis. It will often be useful to gather a variety of statistics about how spaCy performs on your text, and we might spend time improving them. But these accuracy statistics are not the bottom-line. The numbers that really matter are the ones that get you paid. That's the needle we want to move.
-
-p To apply for the closed beta, #[a(href="mailto:" + email) send us an email] explaining what you're doing.
diff --git a/website/blog/writing-c-in-cython.jade b/website/blog/writing-c-in-cython.jade
deleted file mode 100644
index 1fe1b1ca7..000000000
--- a/website/blog/writing-c-in-cython.jade
+++ /dev/null
@@ -1,96 +0,0 @@
-include ../_includes/_mixins
-
-+lead For the last two years, I’ve done almost all of my work in #[a(href="https://en.wikipedia.org/wiki/Cython" target="_blank") Cython]. And I don’t mean, I write Python, and then “Cythonize” it, with various type-declarations etc. I just, write Cython. I use “raw” C structs and arrays, and occasionally C++ vectors, with a thin wrapper around malloc/free that I wrote myself. The code is almost always exactly as fast as C/C++, because it really is just C/C++ with some syntactic sugar — but with Python “right there”, should I need/want it.
-
-p This is basically the inverse of the old promise that languages like Python came with: that you would write your whole application in Python, optimise the “hot spots” with C, and voila! C speed, Python convenience, and money in the bank.
-
-p This was always much nicer in theory than practice. In practice, your data structures have a huge influence on both the efficiency of your code, and how annoying it is to write. Arrays are a pain and fast; lists are blissfully convenient, and very slow. Python loops and function calls are also quite slow, so the part you have to write in C tends to wriggle its way up the stack, until it’s almost your whole application.
-
-p Today a post came up on HN, on #[a(href="https://www.crumpington.com/blog/2014/10-19-high-performance-python-extensions-part-1.html" target="_blank") writing C extensions for Python]. The author wrote both a pure Python implementation, and a C implementation, using the Numpy C API. This seemed a good opportunity to demonstrate the difference, so I wrote a Cython implementation for comparison:
-
-+code.
- import random
- from cymem.cymem cimport Pool
-
- from libc.math cimport sqrt
-
- cimport cython
-
- cdef struct Point:
- double x
- double y
-
- cdef class World:
- cdef Pool mem
- cdef int N
- cdef double* m
- cdef Point* r
- cdef Point* v
- cdef Point* F
- cdef readonly double dt
- def __init__(self, N, threads=1, m_min=1, m_max=30.0, r_max=50.0, v_max=4.0, dt=1e-3):
- self.mem = Pool()
- self.N = N
- self.m = <double*>self.mem.alloc(N, sizeof(double))
- self.r = <Point*>self.mem.alloc(N, sizeof(Point))
- self.v = <Point*>self.mem.alloc(N, sizeof(Point))
- self.F = <Point*>self.mem.alloc(N, sizeof(Point))
- for i in range(N):
- self.m[i] = random.uniform(m_min, m_max)
- self.r[i].x = random.uniform(-r_max, r_max)
- self.r[i].y = random.uniform(-r_max, r_max)
- self.v[i].x = random.uniform(-v_max, v_max)
- self.v[i].y = random.uniform(-v_max, v_max)
- self.F[i].x = 0
- self.F[i].y = 0
- self.dt = dt
-
-
- @cython.cdivision(True)
- def compute_F(World w):
- """Compute the force on each body in the world, w."""
- cdef int i, j
- cdef double s3, tmp
- cdef Point s
- cdef Point F
- for i in range(w.N):
- # Set all forces to zero.
- w.F[i].x = 0
- w.F[i].y = 0
- for j in range(i+1, w.N):
- s.x = w.r[j].x - w.r[i].x
- s.y = w.r[j].y - w.r[i].y
-
- s3 = sqrt(s.x * s.x + s.y * s.y)
- s3 *= s3 * s3;
-
- tmp = w.m[i] * w.m[j] / s3
- F.x = tmp * s.x
- F.y = tmp * s.y
-
- w.F[i].x += F.x
- w.F[i].y += F.y
-
- w.F[j].x -= F.x
- w.F[j].y -= F.y
-
-
- @cython.cdivision(True)
- def evolve(World w, int steps):
- """Evolve the world, w, through the given number of steps."""
- cdef int _, i
- for _ in range(steps):
- compute_F(w)
- for i in range(w.N):
- w.v[i].x += w.F[i].x * w.dt / w.m[i]
- w.v[i].y += w.F[i].y * w.dt / w.m[i]
- w.r[i].x += w.v[i].x * w.dt
- w.r[i].y += w.v[i].y * w.dt
-
-p The Cython version took about 30 minutes to write, and it runs just as fast as the C code — because, why wouldn’t it? It *is* C code, really, with just some syntactic sugar. And you don’t even have to learn or think about a foreign, complicated C API…You just, write C. Or C++ — although that’s a little more awkward. Both the Cython version and the C version are about 70x faster than the pure Python version, which uses Numpy arrays.
-
-p One difference from C: I wrote a little wrapper around malloc/free, #[a(href="https://github.com/syllog1sm/cymem" target="_blank") cymem]. All it does is remember the addresses it served, and when the Pool is garbage collected, it frees the memory it allocated. I’ve had no trouble with memory leaks since I started using this.
-
-p The “intermediate” way of writing Cython, using typed memory-views, allows you to use the Numpy multi-dimensional array features. However, to me it feels more complicated, and the applications I tend to write involve very sparse arrays — where, once again, I want to define my own data structures.
-
-+infobox("Note") I found a Russian translation of this post #[a(href="http://habrahabr.ru/company/mailru/blog/242533/" target="_blank") here]. I don’t know how accurate it is.
diff --git a/website/demos/_data.json b/website/demos/_data.json
deleted file mode 100644
index d40c286f8..000000000
--- a/website/demos/_data.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "index": {
- "title": "Demos",
- "demos": ["displacy", "sense2vec"]
- }
-}
diff --git a/website/demos/img/displacy.jpg b/website/demos/img/displacy.jpg
deleted file mode 100644
index 05f924fad..000000000
Binary files a/website/demos/img/displacy.jpg and /dev/null differ
diff --git a/website/demos/img/sense2vec.jpg b/website/demos/img/sense2vec.jpg
deleted file mode 100644
index 5996bf82d..000000000
Binary files a/website/demos/img/sense2vec.jpg and /dev/null differ
diff --git a/website/demos/index.jade b/website/demos/index.jade
deleted file mode 100644
index c27b38b3f..000000000
--- a/website/demos/index.jade
+++ /dev/null
@@ -1,15 +0,0 @@
-include ../_includes/_mixins
-
-//- Demos
-//- ============================================================================
-
-+grid('padding')
- each demo in demos
- - var teaser = public.demos[demo]._data.index
- - teaser.target = '_blank'
- - teaser.showmeta = false
-
- +grid-col('half', 'space-between')
- !=partial('../_includes/_teaser', { teaser: teaser, showmeta: false, slug: demo, _root: '/demos/' })
-
- !=partial('../_includes/_newsletter', { divider: 'top' })
diff --git a/website/docs/_annotation-specs.jade b/website/docs/_annotation-specs.jade
index 6d113916c..28e26e61e 100644
--- a/website/docs/_annotation-specs.jade
+++ b/website/docs/_annotation-specs.jade
@@ -1,98 +1,96 @@
-//- Docs > Annotation Specs
-//- ============================================================================
+//- ----------------------------------
+//- 💫 DOCS > ANNOTATION SPECS
+//- ----------------------------------
-+section('annotation')
- +h2('annotation').
++section("annotation")
+ +h(2, "annotation").
Annotation Specifications
p.
- This document describes the target annotations spaCy is trained to predict.
- This is currently a work in progress. Please ask questions on the
- #[a(href='https://github.com/' + profiles.github + '/spaCy/issues' target="_blank") issue tracker],
+ This document describes the target annotations spaCy is trained to predict.
+ This is currently a work in progress. Please ask questions on the
+ #[+a("https://github.com/" + SOCIAL.github + "/spaCy/issues") issue tracker],
so that the answers can be integrated here to improve the documentation.
- +section('annotation-tokenization')
- +h3('annotation-tokenization').
+ +section("annotation-tokenization")
+ +h(3, "annotation-tokenization").
Tokenization
p.
- Tokenization standards are based on the OntoNotes 5 corpus. The
- tokenizer differs from most by including tokens for significant
- whitespace. Any sequence of whitespace characters beyond a single
+ Tokenization standards are based on the OntoNotes 5 corpus. The
+ tokenizer differs from most by including tokens for significant
+ whitespace. Any sequence of whitespace characters beyond a single
space (' ') is included as a token. For instance:
+code.
from spacy.en import English
- nlp = English(parse=False)
+ nlp = English(parser=False)
tokens = nlp('Some\nspaces and\ttab characters')
print([t.orth_ for t in tokens])
- p.
- Which produces:
+ p Which produces:
+code.
['Some', '\n', 'spaces', ' ', 'and', '\t', 'tab', 'characters']
p.
- The whitespace tokens are useful for much the same reason punctuation
- is – it's often an important delimiter in the text. By preserving it
- in the token output, we are able to maintain a simple alignment between
- the tokens and the original string, and we ensure that no information
+ The whitespace tokens are useful for much the same reason punctuation
+ is – it's often an important delimiter in the text. By preserving it
+ in the token output, we are able to maintain a simple alignment between
+ the tokens and the original string, and we ensure that no information
is lost during processing.
- +section('annotation-sentence-boundary')
- +h3('annotation-sentence-boundary').
+ +section("annotation-sentence-boundary")
+ +h(3, "annotation-sentence-boundary").
Sentence boundary detection
p.
- Sentence boundaries are calculated from the syntactic parse tree, so
- features such as punctuation and capitalisation play an important but
- non-decisive role in determining the sentence boundaries. Usually
- this means that the sentence boundaries will at least coincide with
+ Sentence boundaries are calculated from the syntactic parse tree, so
+ features such as punctuation and capitalisation play an important but
+ non-decisive role in determining the sentence boundaries. Usually
+ this means that the sentence boundaries will at least coincide with
clause boundaries, even given poorly punctuated text.
- +section('annotation-pos-tagging')
- +h3('annotation-pos-tagging').
+ +section("annotation-pos-tagging")
+ +h(3, "annotation-pos-tagging").
Part-of-speech Tagging
p.
- The part-of-speech tagger uses the OntoNotes 5 version of the Penn
- Treebank tag set. We also map the tags to the simpler Google Universal
- POS Tag set. Details #[a(href='https://github.com/' + profiles.github + '/spaCy/blob/master/spacy/tagger.pyx' target='_blank') here].
+ The part-of-speech tagger uses the OntoNotes 5 version of the Penn
+ Treebank tag set. We also map the tags to the simpler Google Universal
+ POS Tag set. Details #[+a("https://github.com/" + SOCIAL.github + "/spaCy/blob/master/spacy/tagger.pyx") here].
-
- +section('annotation-lemmatization')
- +h3('annotation-lemmatization').
+ +section("annotation-lemmatization")
+ +h(3, "annotation-lemmatization").
Lemmatization
- p.
- A "lemma" is the uninflected form of a word. In English, this means:
+ p A "lemma" is the uninflected form of a word. In English, this means:
+list
+item #[strong Adjectives:] The form like "happy", not "happier" or "happiest"
+item #[strong Adverbs:] The form like "badly", not "worse" or "worst"
+item #[strong Nouns:] The form like "dog", not "dogs"; like "child", not "children"
+item #[strong Verbs:] The form like "write", not "writes", "writing", "wrote" or "written"
-
+
p.
- The lemmatization data is taken from WordNet. However, we also add a
- special case for pronouns: all pronouns are lemmatized to the special
+ The lemmatization data is taken from WordNet. However, we also add a
+ special case for pronouns: all pronouns are lemmatized to the special
token #[code -PRON-].
- +section('annotation-dependency')
- +h3('annotation-dependency').
+ +section("annotation-dependency")
+ +h(3, "annotation-dependency").
Syntactic Dependency Parsing
p.
- The parser is trained on data produced by the ClearNLP converter.
- Details of the annotation scheme can be found
- #[a(href='http://www.mathcs.emory.edu/~choi/doc/clear-dependency-2012.pdf' target='_blank') here].
+ The parser is trained on data produced by the ClearNLP converter.
+ Details of the annotation scheme can be found
+ #[+a("http://www.mathcs.emory.edu/~choi/doc/clear-dependency-2012.pdf") here].
- +section('annotation-ner')
- +h3('annotation-ner').
+ +section("annotation-ner")
+ +h(3, "annotation-ner").
Named Entity Recognition
- +table(['Entity Type', 'Description'], 'params')
+ +table(["Entity Type", "Description"])
+row
+cell PERSON
+cell People, including fictional.
@@ -102,8 +100,8 @@
+cell Nationalities or religious or political groups.
+row
- +cell FACILITY
- +cell Buildings, airports, highways, bridges, etc.
+ +cell FAC
+ +cell Facilities, such as buildings, airports, highways, bridges, etc.
+row
+cell ORG
@@ -137,10 +135,9 @@
+cell LANGUAGE
+cell Any named language
- p.
- The following values are also annotated in a style similar to names:
+ p The following values are also annotated in a style similar to names:
- +table(['Entity Type', 'Description'], 'params')
+ +table(["Entity Type", "Description"])
+row
+cell DATE
+cell Absolute or relative dates or periods
diff --git a/website/docs/_api-doc.jade b/website/docs/_api-doc.jade
index cb4f0b0f8..672c80574 100644
--- a/website/docs/_api-doc.jade
+++ b/website/docs/_api-doc.jade
@@ -1,22 +1,22 @@
-//- Docs > API > Doc
-//- ============================================================================
+//- ----------------------------------
+//- 💫 DOCS > API > DOC
+//- ----------------------------------
-+section('doc')
- +h2('doc', 'https://github.com/' + profiles.github + '/spaCy/blob/master/spacy/tokens/doc.pyx#L58')
- | #[+label('tag') class] Doc
++section("doc")
+ +h(2, "doc", "https://github.com/" + SOCIAL.github + "/spaCy/blob/master/spacy/tokens/doc.pyx")
+ | #[+tag class] Doc
p
- | A sequence of #[code Token] objects. Access sentences and named entities,
- | export annotations to numpy arrays, losslessly serialize to compressed
+ | A sequence of #[code Token] objects. Access sentences and named entities,
+ | export annotations to numpy arrays, losslessly serialize to compressed
| binary strings.
+aside.
- Internally, the #[code Doc] object holds an array of #[code TokenC] structs.
- The Python-level #[code Token] and #[code Span] objects are views of this
+ Internally, the #[code Doc] object holds an array of #[code TokenC] structs.
+ The Python-level #[code Token] and #[code Span] objects are views of this
array, i.e. they don't own the data themselves.
-
- +code('python', 'overview').
+ +code("python", "Overview").
class Doc:
def __init__(self, vocab, orths_and_spaces=None):
return self
@@ -29,7 +29,7 @@
yield Token()
def __len__(self):
return int
-
+
def __unicode__(self):
return unicode
def __bytes__(self):
@@ -65,7 +65,7 @@
def merge(self, start_char, end_char, tag, lemma, ent_type):
return None
-
+
def to_array(self, attr_ids):
return numpy.ndarray(shape=(len(self), len(attr_ids)), dtype='int64')
@@ -77,86 +77,85 @@
def from_array(self, attrs, array):
return None
-
+
def from_bytes(self, data):
return self
-
+
@staticmethod
def read_bytes(file_):
yield bytes
-
-
- +section('doc-init')
- +h3('doc-init')
- | #[+label('tag') method] Doc.__init__
+
+ +section("doc-init")
+ +h(3, "doc-init")
+ | #[+tag method] Doc.__init__
.has-aside
- +code('python', 'definition').
+ +code("python", "Definition").
def __init__(self, vocab, orths_and_spaces=None):
return Doc
- +aside('Implementation').
- This method of constructing a #[code Doc] object is usually only used
- for deserialization. Standard usage is to construct the document via
+ +aside("Implementation").
+ This method of constructing a #[code Doc] object is usually only used
+ for deserialization. Standard usage is to construct the document via
a call to the language object.
- +table(['Name', 'Type', 'Description'], 'params')
+ +table(["Name", "Type", "Description"])
+row
+cell vocab
+cell.
- A Vocabulary object, which must match any models you want to
+ A Vocabulary object, which must match any models you want to
use (e.g. tokenizer, parser, entity recognizer).
+row
- +cell orth_and_spaces
+ +cell orths_and_spaces
+cell.
- A list of tokens in the document as a sequence of
- #[code (orth_id, has_space)] tuples, where #[code orth_id]
+ A list of tokens in the document as a sequence of
+ #[code (orth_id, has_space)] tuples, where #[code orth_id]
is an integer and #[code has_space] is a boolean, indicating
whether the token has a trailing space.
- +section('doc-sequenceapi')
- +h3('doc-sequenceapi')
- | #[+label('tag') Section] Sequence API
+ +section("doc-sequenceapi")
+ +h(3, "doc-sequenceapi")
+ | #[+tag Section] Sequence API
- +table(['Example', 'Description'], 'code')
- +row
- +cell #[code.lang-python doc[i]]
+ +table(["Example", "Description"])
+ +row
+ +cell #[code doc[i]]
+cell.
- Get the Token object at position i, where i is an integer.
- Negative indexing is supported, and follows the usual Python
+ Get the Token object at position i, where i is an integer.
+ Negative indexing is supported, and follows the usual Python
semantics, i.e. doc[-2] is doc[len(doc) - 2].
+row
- +cell #[code.lang-python doc[start : end]]
+ +cell #[code doc[start : end]]
+cell.
Get a #[code Span] object, starting at position #[code start]
and ending at position #[code end], where #[code start] and
#[code end] are token indices. For instance,
- #[code doc[2:5]] produces a span consisting of
- tokens 2, 3 and 4. Stepped slices (e.g. #[code doc[start : end : step]])
- are not supported, as #[code Span] objects must be contiguous
+ #[code doc[2:5]] produces a span consisting of
+ tokens 2, 3 and 4. Stepped slices (e.g. #[code doc[start : end : step]])
+ are not supported, as #[code Span] objects must be contiguous
(cannot have gaps). You can use negative indices and open-ended
ranges, which have their normal Python semantics.
+row
- +cell #[code.lang-python for token in doc]
+ +cell #[code for token in doc]
+cell.
- Iterate over Token objects, from which the annotations can
- be easily accessed. This is the main way of accessing Token
- objects, which are the main way annotations are accessed from
- Python. If faster-than-Python speeds are required, you can
- instead access the annotations as a numpy array, or access the
+ Iterate over Token objects, from which the annotations can
+ be easily accessed. This is the main way of accessing Token
+ objects, which are the main way annotations are accessed from
+ Python. If faster-than-Python speeds are required, you can
+ instead access the annotations as a numpy array, or access the
underlying C data directly from Cython.
+row
- +cell #[code.lang-python len(doc)]
+ +cell #[code len(doc)]
+cell.
The number of tokens in the document.
- +section('doc-spans')
- +h3('doc-spans-sents')
- | #[+label('tag') property] Doc.sents
+ +section("doc-spans")
+ +h(3, "doc-spans-sents")
+ | #[+tag property] Doc.sents
p.
Yields sentence #[code Span] objects. Sentence spans have no label.
@@ -164,21 +163,21 @@
boundaries from the syntactic dependency parse. If the parser is disabled,
the #[code sents] iterator will be unavailable.
- +code('python', 'Example').
+ +code("python", "Example").
from spacy.en import English
nlp = English()
doc = nlp("This is a sentence. Here's another...")
assert [s.root.orth_ for s in doc.sents] == ["is", "'s"]
- +h3('doc-spans-ents')
- | #[+label('tag') property] Doc.ents
+ +h(3, "doc-spans-ents")
+ | #[+tag property] Doc.ents
p.
Yields named-entity #[code Span] objects, if the entity recognizer
- has been applied to the document. Iterate over the span to get
+ has been applied to the document. Iterate over the span to get
individual Token objects, or access the label:
- +code('python', 'Example').
+ +code("python", "Example").
from spacy.en import English
nlp = English()
tokens = nlp(u'Mr. Best flew to New York on Saturday morning.')
@@ -186,114 +185,109 @@
assert ents[0].label == 346
assert ents[0].label_ == 'PERSON'
assert ents[0].orth_ == 'Best'
- assert ents[0].string == ents[0].string
+ assert ents[0].text == 'Mr. Best'
- +h3('doc-spans-nounchunks')
- | #[+label('tag') property] Doc.noun_chunks
+ +h(3, "doc-spans-nounchunks")
+ | #[+tag property] Doc.noun_chunks
p.
Yields base noun-phrase #[code Span] objects, if the document
- has been syntactically parsed. A base noun phrase, or
- 'NP chunk', is a noun phrase that does not permit other NPs to
- be nested within it – so no NP-level coordination, no prepositional
+ has been syntactically parsed. A base noun phrase, or
+ 'NP chunk', is a noun phrase that does not permit other NPs to
+ be nested within it – so no NP-level coordination, no prepositional
phrases, and no relative clauses. For example:
- +code('python', 'Example').
+ +code("python", "Example").
from spacy.en import English
nlp = English()
doc = nlp(u'The sentence in this example has three noun chunks.')
for chunk in doc.noun_chunks:
print(chunk.label_, chunk.orth_, '<--', chunk.root.head.orth_)
- +section('doc-exportimport-toarray')
- +h3('doc-exportimport-toarray')
- | #[+label('tag') method] Doc.to_array
-
+ +section("doc-exportimport-toarray")
+ +h(3, "doc-exportimport-toarray")
+ | #[+tag method] Doc.to_array
+
p.
- Given a list of M attribute IDs, export the tokens to a numpy
- #[code ndarray] of shape #[code N*M], where #[code N] is the length
+ Given a list of M attribute IDs, export the tokens to a numpy
+ #[code ndarray] of shape #[code N*M], where #[code N] is the length
of the document. The values will be 32-bit integers.
- +code('python', 'Example').
+ +code("python", "Example").
from spacy import attrs
doc = nlp(text)
# All strings mapped to integers, for easy export to numpy
np_array = doc.to_array([attrs.LOWER, attrs.POS, attrs.ENT_TYPE, attrs.IS_ALPHA])
-
- +code('python', 'definition').
+
+ +code("python", "Definition").
def to_array(self, attr_ids):
return numpy.ndarray(shape=(len(self), len(attr_ids)), dtype='int64')
-
- +table(['Name', 'Type', 'Description'], 'params')
+
+ +table(["Name", "Type", "Description"])
+row
+cell attr_ids
+cell list of ints
+cell.
- A list of attribute ID ints. Attribute IDs can be imported
+ A list of attribute ID ints. Attribute IDs can be imported
from #[code spacy.attrs] or #[code spacy.symbols].
- +section('doc-exportimport-countby')
- +h4('doc-exportimport-countby')
- | #[+label('tag') method] Doc.count_by
+ +section("doc-exportimport-countby")
+ +h(4, "doc-exportimport-countby")
+ | #[+tag method] Doc.count_by
p.
- Produce a dict of #[code {attribute (int): count (ints)}] frequencies,
+ Produce a dict of #[code {attribute (int): count (ints)}] frequencies,
keyed by the values of the given attribute ID.
- +code('python', 'Example').
+ +code("python", "Example").
def count_by(self, attr_id):
return dict
- +table(['Name', 'Type', 'Description'], 'params')
+ +table(["Name", "Type", "Description"])
+row
+cell attr_id
+cell int
+cell.
The attribute ID to key the counts.
+ +section("doc-exportimport-fromarray")
+ +h(4, "doc-exportimport-fromarray")
+ | #[+tag method] Doc.from_array
- +section('doc-exportimport-fromarray')
- +h4('doc-exportimport-fromarray')
- | #[+label('tag') method] Doc.from_array
-
- p.
- Write to a #[code Doc] object, from an M*N array of attributes.
+ p Write to a #[code Doc] object, from an M*N array of attributes.
- +code('python', 'definition').
+ +code("python", "Definition").
def from_array(self, attrs, array):
return None
-
- +section('doc-exportimport-frombytes')
- +h4('doc-exportimport-frombytes') Doc.from_bytes
-
- p.
- Deserialize, loading from bytes.
- +code('python', 'definition').
+ +section("doc-exportimport-frombytes")
+ +h(4, "doc-exportimport-frombytes") Doc.from_bytes
+
+ p Deserialize, loading from bytes.
+
+ +code("python", "Definition").
def from_bytes(self, byte_string):
return Doc
- +section('doc-exportimport-tobytes')
- +h4('doc-exportimport-tobytes')
- | #[+label('tag') method] Doc.to_bytes
-
- p.
- Serialize, producing a byte string.
+ +section("doc-exportimport-tobytes")
+ +h(4, "doc-exportimport-tobytes")
+ | #[+tag method] Doc.to_bytes
- +code('python', 'definition').
+ p Serialize, producing a byte string.
+
+ +code("python", "Definition").
def to_bytes(self):
return bytes
-
- +section('doc-exportimport-readbytes')
- +h4('doc-exportimport-readbytes')
- | #[+label('tag') method] Doc.read_bytes
+ +section("doc-exportimport-readbytes")
+ +h(4, "doc-exportimport-readbytes")
+ | #[+tag method] Doc.read_bytes
p.
- A static method, used to read serialized #[code Doc] objects from
+ A static method, used to read serialized #[code Doc] objects from
a file. For example:
- +code('python', 'Example').
+ +code("python", "Example").
from spacy.tokens.doc import Doc
loc = 'test_serialize.bin'
with open(loc, 'wb') as file_:
@@ -305,9 +299,7 @@
docs.append(Doc(nlp.vocab).from_bytes(byte_string))
assert len(docs) == 2
- +code('python', 'definition').
+ +code("python", "Definition").
@staticmethod
def read_bytes(file_):
yield bytes
-
-
diff --git a/website/docs/_api-english.jade b/website/docs/_api-english.jade
index 5fa6d36ff..2a951a8a4 100644
--- a/website/docs/_api-english.jade
+++ b/website/docs/_api-english.jade
@@ -1,20 +1,21 @@
-//- Docs > API > English
-//- ============================================================================
+//- ----------------------------------
+//- 💫 DOCS > API > ENGLISH
+//- ----------------------------------
-+section('english')
- +h2('english', 'https://github.com/' + profiles.github + '/spaCy/blob/master/spacy/language.py#L40')
- | #[+label('tag') class] English(Language)
++section("english")
+ +h(2, "english", "https://github.com/" + SOCIAL.github + "/spaCy/blob/master/spacy/language.py")
+ | #[+tag class] English(Language)
p.
- The English analysis pipeline. Usually you'll load this once per process,
+ The English analysis pipeline. Usually you"ll load this once per process,
and pass the instance around your program.
-
- +code('python', 'overview').
+
+ +code("python", "Overview").
class Language:
lang = None
def __init__(self, data_dir=None, tokenizer=None, tagger=None, parser=None, entity=None, matcher=None):
return self
-
+
def __call__(self, text, tag=True, parse=True, entity=True):
return Doc()
@@ -25,28 +26,28 @@
return None
class English(Language):
- lang = 'en'
+ lang = "en"
- # class German(Language): <-- Coming soon
- # lang = 'de'
+ class German(Language):
+ lang = "de"
+
+ +section("english-init")
+ +h(3, "english-init")
+ | #[+tag method] English.__init__
- +section('english-init')
- +h3('english-init')
- | #[+label('tag') method] English.__init__
-
p
| Load the pipeline. Each component can be passed
| as an argument, or left as #[code None], in which case it will be loaded
| from a classmethod, named e.g. #[code default_vocab()].
-
+
+aside("Efficiency").
- Loading takes 10-20 seconds, and the instance consumes 2 to 3
+ Loading takes 10-20 seconds, and the instance consumes 2 to 3
gigabytes of memory. Intended use is for one instance to be
created for each language per process, but you can create more
- if you're doing something unusual. You may wish to make the
- instance a global variable or 'singleton'.
+ if you"re doing something unusual. You may wish to make the
+ instance a global variable or "singleton".
- +table(['Example', 'Description'], 'code')
+ +table(["Example", "Description"])
+row
+cell #[code.lang-python nlp = English()]
+cell Load everything, from default package
@@ -67,56 +68,56 @@
+cell #[code.lang-python nlp = English(parser=MyParser())]
+cell Supply your own parser
- +code('python', 'Definition').
+ +code("python", "Definition").
def __init__(self, data_dir=None, tokenizer=None, tagger=None, parser=None, entity=None, matcher=None):
return self
-
- +table(['Arg', 'Type', 'Description'], 'params')
+
+ +table(["Arg", "Type", "Description"])
+row
+cell data_dir
+cell str
+cell.
- The data directory. If None, value is obtained via the
+ The data directory. If None, value is obtained via the
#[code default_data_dir()] method.
+row
+cell vocab
+cell #[code Vocab]
+cell.
- The vocab object, which should be an instance of class
- #[code spacy.vocab.Vocab]. If #[code None], the object is
- obtained from the #[code default_vocab()] class method. The
- vocab object manages all of the language specific rules and
- definitions, maintains the cache of lexical types, and manages
- the word vectors. Because the vocab owns this important data,
+ The vocab object, which should be an instance of class
+ #[code spacy.vocab.Vocab]. If #[code None], the object is
+ obtained from the #[code default_vocab()] class method. The
+ vocab object manages all of the language specific rules and
+ definitions, maintains the cache of lexical types, and manages
+ the word vectors. Because the vocab owns this important data,
most objects hold a reference to the vocab.
+row
+cell tokenizer
+cell #[code Tokenizer]
+cell.
- The tokenizer, which should be a callable that accepts a
- unicode string, and returns a #[code Doc] object. If set to
- #[code None], the default tokenizer is constructed from the
+ The tokenizer, which should be a callable that accepts a
+ unicode string, and returns a #[code Doc] object. If set to
+ #[code None], the default tokenizer is constructed from the
#[code default_tokenizer()] method.
+row
+cell tagger
+cell #[code Tagger]
+cell.
- The part-of-speech tagger, which should be a callable that
- accepts a #[code Doc] object, and sets the part-of-speech
- tags in-place. If set to None, the default tagger is constructed
+ The part-of-speech tagger, which should be a callable that
+ accepts a #[code Doc] object, and sets the part-of-speech
+ tags in-place. If set to None, the default tagger is constructed
from the #[code default_tagger()] method.
+row
+cell parser
+cell #[code Parser]
+cell.
- The dependency parser, which should be a callable that accepts
+ The dependency parser, which should be a callable that accepts
a #[code Doc] object, and sets the sentence boundaries,
syntactic heads and dependency labels in-place.
- If set to #[code None], the default parser is
+ If set to #[code None], the default parser is
constructed from the #[code default_parser()] method. To disable
the parser and prevent it from being loaded, pass #[code parser=False].
@@ -124,9 +125,9 @@
+cell entity
+cell #[code Parser]
+cell.
- The named entity recognizer, which should be a callable that
- accepts a #[code Doc] object, and sets the named entity annotations
- in-place. If set to None, the default entity recognizer is
+ The named entity recognizer, which should be a callable that
+ accepts a #[code Doc] object, and sets the named entity annotations
+ in-place. If set to None, the default entity recognizer is
constructed from the #[code default_entity()] method. To disable
the entity recognizer and prevent it from being loaded, pass
#[code entity=False].
@@ -135,26 +136,26 @@
+cell matcher
+cell #[code Matcher]
+cell.
- The pattern matcher, which should be a callable that accepts
+ The pattern matcher, which should be a callable that accepts
a #[code Doc] object, and sets named entity annotations in-place
- using token-based rules. If set
- to None, the default matcher is constructed from the
+ using token-based rules. If set
+ to None, the default matcher is constructed from the
#[code default_matcher()] method.
- +section('english-call')
- +h3('english-call')
- | #[+label('tag') method] English.__call__
+ +section("english-call")
+ +h(3, "english-call")
+ | #[+tag method] English.__call__
p
- | The main entry point to spaCy. Takes raw unicode text, and returns
- | a #[code Doc] object, which can be iterated to access #[code Token]
+ | The main entry point to spaCy. Takes raw unicode text, and returns
+ | a #[code Doc] object, which can be iterated to access #[code Token]
| and #[code Span] objects.
-
+
+aside("Efficiency").
- spaCy's algorithms are all linear-time, so you can supply
+ spaCy"s algorithms are all linear-time, so you can supply
documents of arbitrary length, e.g. whole novels.
- +table(['Example', 'Description'], 'code')
+ +table(["Example", "Description"], "code")
+row
+cell #[code.lang-python doc = nlp(u'Some text.')]
+cell Apply the full pipeline.
@@ -177,97 +178,97 @@
+cell #[code.lang-python doc = nlp(b'Some text'.decode('utf8'))]
+cell Decode bytes into unicode first.
- +code('python', 'Definition').
+ +code("python", "Definition").
def __call__(self, text, tag=True, parse=True, entity=True, matcher=True):
return self
- +table(['Name', 'Type', 'Description'], 'params')
+ +table(["Name", "Type", "Description"])
+row
+cell text
- +cell #[a(href=link_unicode target='_blank') unicode]
+ +cell #[+a(link_unicode) unicode]
+cell.
- The text to be processed. spaCy expects raw unicode text
- – you don't necessarily need to, say, split it into paragraphs.
- However, depending on your documents, you might be better
- off applying custom pre-processing. Non-text formatting,
- e.g. from HTML mark-up, should be removed before sending
- the document to spaCy. If your documents have a consistent
- format, you may be able to improve accuracy by pre-processing.
- For instance, if the first word of your documents are always
- in upper-case, it may be helpful to normalize them before
+ The text to be processed. spaCy expects raw unicode text
+ – you don"t necessarily need to, say, split it into paragraphs.
+ However, depending on your documents, you might be better
+ off applying custom pre-processing. Non-text formatting,
+ e.g. from HTML mark-up, should be removed before sending
+ the document to spaCy. If your documents have a consistent
+ format, you may be able to improve accuracy by pre-processing.
+ For instance, if the first word of your documents are always
+ in upper-case, it may be helpful to normalize them before
supplying them to spaCy.
+row
+cell tag
- +cell #[a(href=link_bool target='_blank') bool]
+ +cell #[+a(link_bool) bool]
+cell.
- Whether to apply the part-of-speech tagger. Required for
+ Whether to apply the part-of-speech tagger. Required for
parsing and entity recognition.
+row
+cell parse
- +cell #[a(href=link_bool target='_blank') bool]
+ +cell #[+a(link_bool) bool]
+cell.
Whether to apply the syntactic dependency parser.
+row
+cell entity
- +cell #[a(href=link_bool target='_blank') bool]
+ +cell #[+a(link_bool) bool]
+cell.
Whether to apply the named entity recognizer.
- +section('english-pipe')
- +h3('english-pipe')
- | #[+label('tag') method] English.pipe
+ +section("english-pipe")
+ +h(3, "english-pipe")
+ | #[+tag method] English.pipe
p
- | Parse a sequence of texts into a sequence of #[code Doc] objects.
- | Accepts a generator as input, and produces a generator as output.
- | Internally, it accumulates a buffer of #[code batch_size]
- | texts, works on them with #[code n_threads] workers in parallel,
+ | Parse a sequence of texts into a sequence of #[code Doc] objects.
+ | Accepts a generator as input, and produces a generator as output.
+ | Internally, it accumulates a buffer of #[code batch_size]
+ | texts, works on them with #[code n_threads] workers in parallel,
| and then yields the #[code Doc] objects one by one.
-
- +aside('Efficiency').
- spaCy releases the global interpreter lock around the parser and
- named entity recognizer, allowing shared-memory parallelism via
- OpenMP. However, OpenMP is not supported on OSX — so multiple
+
+ +aside("Efficiency").
+ spaCy releases the global interpreter lock around the parser and
+ named entity recognizer, allowing shared-memory parallelism via
+ OpenMP. However, OpenMP is not supported on OSX — so multiple
threads will only be used on Linux and Windows.
- +table(["Example", "Description"], 'usage')
+ +table(["Example", "Description"], "usage")
+row
- +cell #[a(href='https://github.com/' + profiles.github + '/spaCy/blob/master/examples/parallel_parse.py' target='_blank') parallel_parse.py]
+ +cell #[+a("https://github.com/" + SOCIAL.github + "/spaCy/blob/master/examples/parallel_parse.py") parallel_parse.py]
+cell Parse comments from Reddit in parallel.
- +code('python', 'Definition').
+ +code("python", "Definition").
def pipe(self, texts, n_threads=2, batch_size=1000):
yield Doc()
- +table(['Arg', 'Type', 'Description'], 'params')
+ +table(["Arg", "Type", "Description"])
+row
+cell texts
+cell
+cell.
- A sequence of unicode objects. Usually you will want this
- to be a generator, so that you don't need to have all of
+ A sequence of unicode objects. Usually you will want this
+ to be a generator, so that you don"t need to have all of
your texts in memory.
+row
+cell n_threads
- +cell #[a(href=link_int target='_blank') int]
+ +cell #[+a(link_int) int]
+cell.
- The number of worker threads to use. If -1, OpenMP will
+ The number of worker threads to use. If -1, OpenMP will
decide how many to use at run time. Default is 2.
+row
+cell batch_size
- +cell #[a(href=link_int target='_blank') int]
+ +cell #[+a(link_int) int]
+cell.
- The number of texts to buffer. Let's say you have a
- #[code batch_size] of 1,000. The input, #[code texts], is
- a generator that yields the texts one-by-one. We want to
- operate on them in parallel. So, we accumulate a work queue.
- Instead of taking one document from #[code texts] and
- operating on it, we buffer #[code batch_size] documents,
- work on them in parallel, and then yield them one-by-one.
+ The number of texts to buffer. Let"s say you have a
+ #[code batch_size] of 1,000. The input, #[code texts], is
+ a generator that yields the texts one-by-one. We want to
+ operate on them in parallel. So, we accumulate a work queue.
+ Instead of taking one document from #[code texts] and
+ operating on it, we buffer #[code batch_size] documents,
+ work on them in parallel, and then yield them one-by-one.
Higher #[code batch_size] therefore often results in better
parallelism, up to a point.
diff --git a/website/docs/_api-lexeme.jade b/website/docs/_api-lexeme.jade
index 2ae6b24b5..8e51a9687 100644
--- a/website/docs/_api-lexeme.jade
+++ b/website/docs/_api-lexeme.jade
@@ -1,40 +1,41 @@
-//- Docs > API > Lexeme
-//- ============================================================================
+//- ----------------------------------
+//- 💫 DOCS > API > LEXEME
+//- ----------------------------------
-+section('lexeme')
- +h2('lexeme', 'https://github.com/' + profiles.github + '/spaCy/blob/master/spacy/lexeme.pyx#L31')
- | #[+label('tag') class] Lexeme
++section("lexeme")
+ +h(2, "lexeme", "https://github.com/" + SOCIAL.github + "/spaCy/blob/master/spacy/lexeme.pyx")
+ | #[+tag class] Lexeme
p.
- The Lexeme object represents a lexical type, stored in the vocabulary –
+ The Lexeme object represents a lexical type, stored in the vocabulary –
as opposed to a token, occurring in a document.
p.
- Each Token object receives a reference to a lexeme object (specifically,
- it receives a pointer to a #[code LexemeC] struct). This allows features
- to be computed and saved once per type, rather than once per token. As
- job sizes grow, this amounts to substantial efficiency improvements, as
- the vocabulary size (number of types) will be much smaller than the total
+ Each Token object receives a reference to a lexeme object (specifically,
+ it receives a pointer to a #[code LexemeC] struct). This allows features
+ to be computed and saved once per type, rather than once per token. As
+ job sizes grow, this amounts to substantial efficiency improvements, as
+ the vocabulary size (number of types) will be much smaller than the total
number of words processed (number of tokens).
p.
- All Lexeme attributes are therefore context independent, as a single lexeme
- is reused for all usages of that word. Lexemes are keyed by the #[code orth]
+ All Lexeme attributes are therefore context independent, as a single lexeme
+ is reused for all usages of that word. Lexemes are keyed by the #[code orth]
attribute.
p.
- Most Lexeme attributes can be set, with the exception of the primary key,
- #[code orth]. Assigning to an attribute of the #[code Lexeme] object writes
- to the underlying struct, so all tokens that are backed by that
+ Most Lexeme attributes can be set, with the exception of the primary key,
+ #[code orth]. Assigning to an attribute of the #[code Lexeme] object writes
+ to the underlying struct, so all tokens that are backed by that
#[code Lexeme] will inherit the new value.
- +code('python', 'Overview').
+ +code("python", "Overview").
class Lexeme:
def __init__(self, vocab, key):
return self
int rank
-
+
int orth, lower, shape, prefix, suffix
unicode orth_, lower_, shape_, prefix_, suffix_
@@ -55,7 +56,7 @@
def similarity(self, other):
return float
- +table(['Example', 'Description'], 'code')
+ +table(["Example", "Description"])
+row
+cell #[code.lang-python lexeme = nlp.vocab[string]]
+cell Lookup by string
@@ -63,139 +64,128 @@
+cell #[code.lang-python lexeme = vocab[i]]
+cell Lookup by integer
- +section('lexeme-stringfeatures')
- +h3('lexeme-stringfeatures').
+ +section("lexeme-stringfeatures")
+ +h(3, "lexeme-stringfeatures").
String Features
- +table(['Name', 'Description'], 'params')
+ +table(["Name", "Description"])
+row
+cell orth / orth_
+cell.
- The form of the word with no string normalization or processing,
+ The form of the word with no string normalization or processing,
as it appears in the string, without trailing whitespace.
+row
+cell lower / lower_
+cell.
- The form of the word, but forced to lower-case, i.e.
+ The form of the word, but forced to lower-case, i.e.
#[code lower = word.orth_.lower()]
+row
+cell shape / shape_
+cell.
- A transform of the word's string, to show orthographic features.
- The characters a-z are mapped to x, A-Z is mapped to X, 0-9
- is mapped to d. After these mappings, sequences of 4 or more
- of the same character are truncated to length 4. Examples:
+ A transform of the word's string, to show orthographic features.
+ The characters a-z are mapped to x, A-Z is mapped to X, 0-9
+ is mapped to d. After these mappings, sequences of 4 or more
+ of the same character are truncated to length 4. Examples:
C3Po --> XdXx, favorite --> xxxx, :) --> :)
+row
+cell prefix / prefix_
+cell.
- A length-N substring from the start of the word. Length may
- vary by language; currently for English n=1, i.e.
+ A length-N substring from the start of the word. Length may
+ vary by language; currently for English n=1, i.e.
#[code prefix = word.orth_[:1]]
+row
+cell suffix / suffix_
+cell.
- A length-N substring from the end of the word. Length may vary
- by language; currently for English n=3, i.e.
+ A length-N substring from the end of the word. Length may vary
+ by language; currently for English n=3, i.e.
#[code suffix = word.orth_[-3:]]
- +section('lexeme-booleanflags')
- +h3('lexeme-booleanflags')
+ +section("lexeme-booleanflags")
+ +h(3, "lexeme-booleanflags")
| Boolean Flags
- +table(['Name', 'Description'], 'params')
+ +table(["Name", "Description"])
+row
+cell is_alpha
- +cell.
- Equivalent to #[code word.orth_.isalpha()]
+ +cell Equivalent to #[code word.orth_.isalpha()]
+row
+cell is_ascii
- +cell.
- Equivalent to any(ord(c) >= 128 for c in word.orth_)]
+ +cell Equivalent to any(ord(c) >= 128 for c in word.orth_)]
+row
+cell is_digit
- +cell.
- Equivalent to #[code word.orth_.isdigit()]
+ +cell Equivalent to #[code word.orth_.isdigit()]
+row
+cell is_lower
- +cell.
- Equivalent to #[code word.orth_.islower()]
+ +cell Equivalent to #[code word.orth_.islower()]
+row
+cell is_title
- +cell.
- Equivalent to #[code word.orth_.istitle()]
+ +cell Equivalent to #[code word.orth_.istitle()]
+row
+cell is_punct
- +cell.
- Equivalent to #[code word.orth_.ispunct()]
+ +cell Equivalent to #[code word.orth_.ispunct()]
+row
+cell is_space
- +cell.
- Equivalent to #[code word.orth_.isspace()]
+ +cell Equivalent to #[code word.orth_.isspace()]
+row
+cell like_url
- +cell.
- Does the word resemble a URL?
+ +cell Does the word resemble a URL?
+row
+cell like_num
- +cell.
- Does the word represent a number? e.g. “10.9”, “10”, “ten”, etc.
+ +cell Does the word represent a number? e.g. “10.9”, “10”, “ten”, etc.
+row
+cell like_email
- +cell.
- Does the word resemble an email?
+ +cell Does the word resemble an email?
+row
+cell is_oov
- +cell.
- Is the word out-of-vocabulary?
+ +cell Is the word out-of-vocabulary?
+row
+cell is_stop
+cell.
- Is the word part of a "stop list"? Stop lists are used to
- improve the quality of topic models, by filtering out common,
+ Is the word part of a "stop list"? Stop lists are used to
+ improve the quality of topic models, by filtering out common,
domain-general words.
- +section('lexeme-distributional')
- +h3('lexeme-distributional')
+ +section("lexeme-distributional")
+ +h(3, "lexeme-distributional")
| Distributional Features
- +table(['Name', 'Description'], 'params')
+ +table(["Name", "Description"])
+row
+cell prob
+cell.
- The unigram log-probability of the word, estimated from
- counts from a large corpus, smoothed using Simple Good Turing
+ The unigram log-probability of the word, estimated from
+ counts from a large corpus, smoothed using Simple Good Turing
estimation.
+row
+cell cluster
+cell.
- The Brown cluster ID of the word. These are often useful features
- for linear models. If you’re using a non-linear model, particularly
- a neural net or random forest, consider using the real-valued
+ The Brown cluster ID of the word. These are often useful features
+ for linear models. If you’re using a non-linear model, particularly
+ a neural net or random forest, consider using the real-valued
word representation vector, in #[code Token.repvec], instead.
+row
+cell vector
+cell.
- A "word embedding" representation: a dense real-valued vector
- that supports similarity queries between words. By default,
- spaCy currently loads vectors produced by the Levy and
+ A "word embedding" representation: a dense real-valued vector
+ that supports similarity queries between words. By default,
+ spaCy currently loads vectors produced by the Levy and
Goldberg (2014) dependency-based word2vec model.
+row
diff --git a/website/docs/_api-matcher.jade b/website/docs/_api-matcher.jade
index 69fa2d988..eb5f71029 100644
--- a/website/docs/_api-matcher.jade
+++ b/website/docs/_api-matcher.jade
@@ -1,31 +1,29 @@
-//- Docs > API > Matcher
-//- ============================================================================
+//- ----------------------------------
+//- 💫 DOCS > API > MATCHER
+//- ----------------------------------
-+section('matcher')
- +h2('matcher', 'https://github.com/' + profiles.github + '/spaCy/blob/master/spacy/matcher.pyx#L165')
- | #[+label('tag') class] Matcher
++section("matcher")
+ +h(2, "matcher", "https://github.com/" + SOCIAL.github + "/spaCy/blob/master/spacy/matcher.pyx")
+ | #[+tag class] Matcher
- p A full example can be found #[a(href="https://github.com/" + profiles.github + "blob/master/examples/matcher_example.py") here].
+ p A full example can be found #[a(href="https://github.com/" + SOCIAL.github + "blob/master/examples/matcher_example.py") here].
- +table(['Usage', 'Description'], 'code')
+ +table(["Usage", "Description"])
+row
+cell #[code.lang-python nlp(doc)]
- +cell.
- As part of annotation pipeline.
+ +cell As part of annotation pipeline.
+row
+cell #[code.lang-python nlp.matcher(doc)]
- +cell.
- Explicit invocation.
+ +cell Explicit invocation.
+row
+cell #[code.lang-python nlp.matcher.add(u'FooCorp', u'ORG', {}, [[{u'ORTH': u'Foo'}]])]
- +cell.
- Add a pattern to match.
+ +cell Add a pattern to match.
- +section('matcher-init')
- +h3('matcher-init') __init__(self, vocab, patterns)
- +table(['Name', 'Type', 'Description'], 'params')
+ +section("matcher-init")
+ +h(3, "matcher-init") __init__(self, vocab, patterns)
+ +table(["Name", "Type", "Description"])
+row
+cell vocab
+cell #[code.lang-python spacy.vocab.Vocab]
@@ -36,10 +34,10 @@
+cell #[code {entity_key: (etype, attrs, specs)}]
+cell.
Initial patterns to match. See #[code Matcher.add]
-
- +section('matcher-add')
- +h3('matcher-add') add(self, entity_key, etype, attrs, specs)
- +table(['Name', 'Type', 'Description'], 'params')
+
+ +section("matcher-add")
+ +h(3, "matcher-add") add(self, entity_key, etype, attrs, specs)
+ +table(["Name", "Type", "Description"])
+row
+cell entity_key
+cell unicode or int
@@ -57,26 +55,25 @@
+cell #[code [[{int: unicode}]]]
+cell A list of surface forms, where each surface form is defined as a list of token definitions, and each token definition is a dictionary mapping attribute IDs to attribute values.
- +section('matcher-saveload')
- +h3('matcher-saveload')
+ +section("matcher-saveload")
+ +h(3, "matcher-saveload")
| Save and Load
- +section('matcher-saveload-dump')
- +h4('matcher-saveload-dump') dump(loc)
+ +section("matcher-saveload-dump")
+ +h(4, "matcher-saveload-dump") dump(loc)
- +table(['Name', 'Type', 'Description'], 'params')
+ +table(["Name", "Type", "Description"])
+row
+cell loc
- +cell #[a(href=link_unicode target='_blank') unicode]
- +cell.
- Path to save the gazetteer.json file.
+ +cell #[+a(link_unicode) unicode]
+ +cell Path to save the gazetteer.json file.
- +section('matcher-saveload-load')
- +h4('matcher-saveload-load') load(loc)
+ +section("matcher-saveload-load")
+ +h(4, "matcher-saveload-load") load(loc)
- +table(['Name', 'Type', 'Description'], 'params')
+ +table(["Name", "Type", "Description"])
+row
+cell loc
- +cell #[a(href=link_unicode target='_blank') unicode]
+ +cell #[+a(link_unicode) unicode]
+cell.
Path to load the gazetteer.json file from.
diff --git a/website/docs/_api-span.jade b/website/docs/_api-span.jade
index e0d603e3c..607869fdc 100644
--- a/website/docs/_api-span.jade
+++ b/website/docs/_api-span.jade
@@ -1,22 +1,23 @@
-//- Docs > API > Span
-//- ============================================================================
+//- ----------------------------------
+//- 💫 DOCS > API > SPAN
+//- ----------------------------------
-+section('span')
- +h2('span', 'https://github.com/' + profiles.github + '/spaCy/blob/master/spacy/tokens/span.pyx#L19')
- | #[+label('tag') class] Span
++section("span")
+ +h(2, "span", "https://github.com/" + SOCIAL.github + "/spaCy/blob/master/spacy/tokens/span.pyx")
+ | #[+tag class] Span
- p
- | A slice of a #[code Doc] object, consisting of zero or
- | more tokens. Spans are usually used to represent sentences, named entities,
- | phrases.
-
- +aside('Implementation')
- #[code Span] objects are views – that is, they do not copy the
- underlying C data. This makes them cheap to construct, as internally are
- simply a reference to the #[code Doc] object, a start position, an end
+ p.
+ A slice of a #[code Doc] object, consisting of zero or
+ more tokens. Spans are usually used to represent sentences, named entities,
+ phrases.
+
+ +aside("Implementation").
+ #[code Span] objects are views — that is, they do not copy the
+ underlying C data. This makes them cheap to construct, as internally are
+ simply a reference to the #[code Doc] object, a start position, an end
position, and a label ID.
- +code('python', 'Overview').
+ +code("python", "Overview").
class Span:
doc = Doc
start = int
@@ -38,14 +39,14 @@
def merge(self, tag, lemma, ent_type):
return None
-
+
@property
def label_(self):
return unicode
@property
def vector(self):
- return numpy.ndarray(dtype='float64')
+ return numpy.ndarray(dtype="float64")
@property
def vector_norm(self):
return float
@@ -75,15 +76,15 @@
@property
def subtree(self):
yield Token()
-
- +section('span-create')
- +h3('span-init')
- | #[+label('tag') Section] Create a Span
- p
- | Span instances are usually created via the #[code Doc] object.
+ +section("span-create")
+ +h(3, "span-init")
+ | #[+tag Section] Create a Span
- +table(['Example', 'Description'], 'code')
+ p.
+ Span instances are usually created via the #[code Doc] object.
+
+ +table(["Example", "Description"])
+row
+cell #[code.lang-python span = doc[4 : 7]]
+cell Produce a span with tokens 4, 5 and 6.
@@ -99,12 +100,12 @@
+row
+cell #[code.lang-python for noun_phrase in doc.noun_chunks]
+cell See #[a(href="/docs#doc-spans-nounchunks") Doc.noun_chunks]
-
- +code('python', 'Definition').
+
+ +code("python", "Definition").
def __init__(self, doc, start, end, label=0, vector=None, vector_norm=None):
return Span()
- +table(['Name', 'Type', 'Description'], 'params')
+ +table(["Name", "Type", "Description"])
+row
+cell doc
+cell Doc
@@ -120,32 +121,32 @@
+row
+cell label
+cell int or unicode
- +cell A label for the span. Either a string, or an integer ID, that should refer to a string mapped by the #[code Doc] object's #[code StringStore].
+ +cell A label for the span. Either a string, or an integer ID, that should refer to a string mapped by the #[code Doc] object"s #[code StringStore].
+row
+cell vector
- +cell
+ +cell
+cell
+row
+cell vector_norm
- +cell
+cell
-
- +section('span-merge')
- +h3('span-merge')
- | #[+label('tag') method] Span.merge
+ +cell
- p
- | Merge the span into a single token, modifying the underlying
- | #[code.lang-python Doc] object in place.
-
- +aside('Caveat').
+ +section("span-merge")
+ +h(3, "span-merge")
+ | #[+tag method] Span.merge
+
+ p.
+ Merge the span into a single token, modifying the underlying
+ #[code.lang-python Doc] object in place.
+
+ +aside("Caveat").
Magic is done to allow you to call #[code.lang-python merge()]
without invalidating other #[code.lang-python Span] objects.
- However, it's difficult to ensure all indices are recomputed
- correctly. Please report any errors encountered on the issue
+ However, it"s difficult to ensure all indices are recomputed
+ correctly. Please report any errors encountered on the issue
tracker.
- +code('python', 'Example').
+ +code("python", "Example").
for ent in doc.ents:
ent.merge(ent.root.tag_, ent.text, ent.label_)
for np in doc.noun_chunks:
@@ -153,11 +154,11 @@
np = np[1:]
np.merge(np.root.tag_, np.text, np.root.ent_type_)
- +code('python', 'Definition').
+ +code("python", "Definition").
def merge(self, tag, lemma, ent_type):
return None
- +table(['Name', 'Type', 'Description'], 'params')
+ +table(["Name", "Type", "Description"])
+row
+cell tag
+cell unicode
@@ -171,52 +172,50 @@
+cell unicode
+cell The named entity type to assign to the new token.
- +section('span-similarity')
- +h3('span-similarity')
- | #[+label('tag') method] Span.similarity
+ +section("span-similarity")
+ +h(3, "span-similarity")
+ | #[+tag method] Span.similarity
- p
- | Estimate the semantic similarity between the span and another #[code Span],
- #[code Doc], #[code Token] or #[code Lexeme].
-
- +aside('Algorithm').
+ p Estimate the semantic similarity between the span and another #[code Span], #[code Doc], #[code Token] or #[code Lexeme].
+
+ +aside("Algorithm").
Similarity is estimated
using the cosine metric, between #[code Span.vector] and #[code other.vector].
By default, #[code Span.vector] is computed by averaging the vectors
of its tokens.
- +code('python', 'Example').
+ +code("python", "Example").
doc = nlp("Apples and oranges are similar. Boots and hippos aren't.")
apples_sent, boots_sent = doc.sents
fruit = doc.vocab[u'fruit']
assert apples_sent.similarity(fruit) > boot_sent.similarity(fruit)
- +code('python', 'Definition').
+ +code("python", "Definition").
def similarity(self, other):
return float
- +table(['Name', 'Type', 'Description'], 'params')
+ +table(["Name", "Type", "Description"])
+row
+cell other
+cell Token, Span, Doc or Lexeme
+cell The other object to judge similarity with.
- +section('span-sequence')
- +h3('span-sequence')
- | #[+label('tag') section] Span as a Sequence
+ +section("span-sequence")
+ +h(3, "span-sequence")
+ | #[+tag section] Span as a Sequence
p.
#[code Span] objects act as a sequence of #[code Token] objects. In
this way they mirror the API of the #[code Doc] object.
- +table(['Name', 'Description'], 'params')
+ +table(["Name", "Description"], "params")
+row
+cell #[code.lang-python token = span[i]]
- +cell
- | Get the #[code Token] object at position #[em i], where
- | #[code i] is an offset within the #[code Span], not the
- | document. That is, if you have #[code.lang-python span = doc[4:6]],
- | then #[code.lang-python span[0].i == 4]
+ +cell.
+ Get the #[code Token] object at position #[em i], where
+ #[code i] is an offset within the #[code Span], not the
+ document. That is, if you have #[code.lang-python span = doc[4:6]],
+ then #[code.lang-python span[0].i == 4]
+row
+cell #[code.lang-python for token in span]
@@ -225,13 +224,12 @@
+row
+cell __len__
- +cell.
- Number of tokens in the span.
+ +cell Number of tokens in the span.
+row
+cell text
+cell.
- The text content of the span, obtained from
+ The text content of the span, obtained from
#[code ''.join(token.text_with_ws for token in span)].
+row
@@ -244,21 +242,21 @@
+cell.
The end offset of the span, i.e. #[code span[-1].i + 1].
- +section('span-navigating-parse')
- +h3('span-navigativing-parse')
- | #[+label('tag') Section] Span and the Syntactic Parse
+ +section("span-navigating-parse")
+ +h(3, "span-navigativing-parse")
+ | #[+tag Section] Span and the Syntactic Parse
p.
Span objects allow similar access to the syntactic parse as individual
tokens.
- +table(['Name', 'Type', 'Description'], 'params')
+ +table(["Name", "Type", "Description"])
+row
+cell root
+cell #[code.lang-python Token]
- +cell
- | The word with the shortest path to the root of the sentence is
- | the root of the span.
+ +cell.
+ The word with the shortest path to the root of the sentence is
+ the root of the span.
+row
+cell lefts
+cell #[code.lang-python yield Token]
@@ -271,27 +269,27 @@
+row
+cell subtree
+cell #[code.lang-python yield Token]
- +cell
- | Tokens in the range #[code (start, end+1)], where #[code start]
- | is the index of the leftmost word descended from a token in the
- | span, and #[code end] is the index of the rightmost token descended
- | from a token in the span.
+ +cell.
+ Tokens in the range #[code (start, end+1)], where #[code start]
+ is the index of the leftmost word descended from a token in the
+ span, and #[code end] is the index of the rightmost token descended
+ from a token in the span.
- +section('span-strings')
- +h3('span-strings')
- | #[+label('tag') section] Span's Strings API
+ +section("span-strings")
+ +h(3, "span-strings")
+ | #[+tag section] Span"s Strings API
p.
You can access the textual content of the span, and different view of
it, with the following properties.
- +table(['Name', 'Type', 'Description'], 'params')
+ +table(["Name", "Type", "Description"])
+row
+cell text_with_ws
+cell unicode
+cell.
- The form of the span as it appears in the string, including
- trailing whitespace. This is useful when you need to use linguistic
+ The form of the span as it appears in the string, including
+ trailing whitespace. This is useful when you need to use linguistic
features to add inline mark-up to the string.
+row
diff --git a/website/docs/_api-stringstore.jade b/website/docs/_api-stringstore.jade
index aa5f0cb9b..ec524a0f5 100644
--- a/website/docs/_api-stringstore.jade
+++ b/website/docs/_api-stringstore.jade
@@ -1,58 +1,58 @@
-//- Docs > API > StringStore
-//- ============================================================================
+//- ----------------------------------
+//- 💫 DOCS > API > STRINGSTORE
+//- ----------------------------------
-+section('stringstore')
- +h2('stringstore', 'https://github.com/' + profiles.github + '/spaCy/blob/master/spacy/strings.pyx#L74')
- | #[+label('tag') class] StringStore
++section("stringstore")
+ +h(2, "stringstore", "https://github.com/" + SOCIAL.github + "/spaCy/blob/master/spacy/strings.pyx")
+ | #[+tag class] StringStore
- p
- | Intern strings, and map them to sequential integer IDs.
+ p Intern strings, and map them to sequential integer IDs.
- p
- | Only the integer IDs are held by spaCy's data
- | classes (#[code Doc], #[code Token], #[code Span] and #[code Lexeme])
- | – when you use a string-valued attribute like #[code token.orth_],
- | you access a property that computes #[code token.strings[token.orth]].
+ p.
+ Only the integer IDs are held by spaCy's data
+ classes (#[code Doc], #[code Token], #[code Span] and #[code Lexeme])
+ – when you use a string-valued attribute like #[code token.orth_],
+ you access a property that computes #[code token.strings[token.orth]].
- +aside('Efficiency').
+ +aside("Efficiency").
The mapping table is very efficient , and a small-string optimization
is used to maintain a small memory footprint.
- +table(['Usage', 'Description'], 'code')
+ +table(["Usage", "Description"])
+row
- +cell #[code.lang-python string = string_store[int_id]]
+ +cell #[code string = string_store[int_id]]
+cell.
- Retrieve a string from a given integer ID. If the integer ID
+ Retrieve a string from a given integer ID. If the integer ID
is not found, raise #[code IndexError].
+row
- +cell #[code.lang-python int_id = string_store[unicode_string]]
+ +cell #[code int_id = string_store[unicode_string]]
+cell.
- Map a unicode string to an integer ID. If the string is
+ Map a unicode string to an integer ID. If the string is
previously unseen, it is interned, and a new ID is returned.
+row
- +cell #[code.lang-python int_id = string_store[utf8_byte_string]]
+ +cell #[code int_id = string_store[utf8_byte_string]]
+cell.
- Byte strings are assumed to be in UTF-8 encoding. Strings
- encoded with other codecs may fail silently. Given a utf8
- string, the behaviour is the same as for unicode strings.
- Internally, strings are stored in UTF-8 format. So if you start
- with a UTF-8 byte string, it's less efficient to first decode
- it as unicode, as StringStore will then have to encode it as
+ Byte strings are assumed to be in UTF-8 encoding. Strings
+ encoded with other codecs may fail silently. Given a utf8
+ string, the behaviour is the same as for unicode strings.
+ Internally, strings are stored in UTF-8 format. So if you start
+ with a UTF-8 byte string, it's less efficient to first decode
+ it as unicode, as StringStore will then have to encode it as
UTF-8 once again.
+row
- +cell #[code.lang-python n_strings = len(string_store)]
+ +cell #[code n_strings = len(string_store)]
+cell.
Number of strings in the string-store.
+row
- +cell #[code.lang-python for string in string_store]
- +cell
+ +cell #[code for string in string_store]
+ +cell
p.
- Iterate over strings in the string store, in order, such
+ Iterate over strings in the string store, in order, such
that the ith string in the sequence has the ID #[code i]:
+code.code-block-small.no-block.
@@ -60,43 +60,43 @@
for i, string in enumerate(string_store):
assert i == string_store[string]
- +section('stringstore-init')
- +h3('stringstore-init')
- | #[+label('tag') method] StringStore.__init__
+ +section("stringstore-init")
+ +h(3, "stringstore-init")
+ | #[+tag method] StringStore.__init__
- +code('python', 'Definition').
+ +code("python", "Definition").
def __init__(self):
return self
- +section('stringstore-dump')
- +h3('stringstore-dump')
- | #[+label('tag') method] StringStore.dump
-
+ +section("stringstore-dump")
+ +h(3, "stringstore-dump")
+ | #[+tag method] StringStore.dump
+
p Save the string-to-int mapping to the given file.
- +code('python', 'Definition').
+ +code("python", "Definition").
def dump(self, file):
return None
- +table(['Name', 'Type', 'Description'], 'params')
- +row
+ +table(["Name", "Type", "Description"])
+ +row
+cell loc
+cell str
+cell.
The file to write the data to.
- +section('stringstore-load')
- +h3('stringstore-load')
- | #[+label('tag') method] StringStore.load
-
+ +section("stringstore-load")
+ +h(3, "stringstore-load")
+ | #[+tag method] StringStore.load
+
p Load the strings from the given file.
- +code('python', 'Definition').
+ +code("python", "Definition").
def load(self, file):
return None
- +table(['Name', 'Type', 'Description'], 'params')
- +row
+ +table(["Name", "Type", "Description"])
+ +row
+cell file
+cell file
+cell.
diff --git a/website/docs/_api-token.jade b/website/docs/_api-token.jade
index c943c394c..ef596f68a 100644
--- a/website/docs/_api-token.jade
+++ b/website/docs/_api-token.jade
@@ -1,99 +1,100 @@
-//- Docs > API > Token
-//- ============================================================================
+//- ----------------------------------
+//- 💫 DOCS > API > TOKEN
+//- ----------------------------------
-+section('token')
- +h2('token', 'https://github.com/' + profiles.github + '/spaCy/blob/master/spacy/tokens/token.pyx#L31')
- | #[+label('tag') class] Token
++section("token")
+ +h(2, "token", "https://github.com/" + SOCIAL.github + "/spaCy/blob/master/spacy/tokens/token.pyx")
+ | #[+tag class] Token
p.
- A Token represents a single word, punctuation or significant whitespace
- symbol. Integer IDs are provided for all string features. The (unicode)
- string is provided by an attribute of the same name followed by an underscore,
- e.g. #[code token.orth] is an integer ID, #[code token.orth_] is the unicode
- value. The only exception is the #[code token.text] attribute, which is (unicode)
+ A Token represents a single word, punctuation or significant whitespace
+ symbol. Integer IDs are provided for all string features. The (unicode)
+ string is provided by an attribute of the same name followed by an underscore,
+ e.g. #[code token.orth] is an integer ID, #[code token.orth_] is the unicode
+ value. The only exception is the #[code token.text] attribute, which is (unicode)
string-typed.
- +section('token-init')
- +h3('token-init')
+ +section("token-init")
+ +h(3, "token-init")
| Token.__init__
- +code('python', 'definition').
+ +code("python", "Definition").
def __init__(vocab, doc, offset):
return Token()
- +table(['Name', 'Type', 'Description'], 'params')
+ +table(["Name", "Type", "Description"])
+row
+cell vocab
- +cell
+ +cell Vocab
+cell A Vocab object
+row
+cell doc
- +cell
+ +cell Doc
+cell The parent sequence
+row
+cell offset
- +cell #[a(href=link_int target='_blank') int]
+ +cell #[+a(link_int) int]
+cell The index of the token within the document
- +section('token-stringfeatures')
- +h3('token-stringfeatures')
+ +section("token-stringfeatures")
+ +h(3, "token-stringfeatures")
| String Features
- +table(['Name', 'Description'], 'params')
+ +table(["Name", "Description"])
+row
+cell lemma / lemma_
+cell.
- The "base" of the word, with no inflectional suffixes, e.g.
- the lemma of "developing" is "develop", the lemma of "geese"
- is "goose", etc. Note that #[em derivational] suffixes are
- not stripped, e.g. the lemma of "instutitions" is "institution",
- not "institute". Lemmatization is performed using the WordNet
- data, but extended to also cover closed-class words such as
- pronouns. By default, the WN lemmatizer returns "hi" as the
+ The "base" of the word, with no inflectional suffixes, e.g.
+ the lemma of "developing" is "develop", the lemma of "geese"
+ is "goose", etc. Note that #[em derivational] suffixes are
+ not stripped, e.g. the lemma of "instutitions" is "institution",
+ not "institute". Lemmatization is performed using the WordNet
+ data, but extended to also cover closed-class words such as
+ pronouns. By default, the WN lemmatizer returns "hi" as the
lemma of "his". We assign pronouns the lemma #[code -PRON-].
+row
+cell orth / orth_
+cell.
- The form of the word with no string normalization or processing,
+ The form of the word with no string normalization or processing,
as it appears in the string, without trailing whitespace.
+row
+cell lower / lower_
+cell.
- The form of the word, but forced to lower-case, i.e.
+ The form of the word, but forced to lower-case, i.e.
#[code lower = word.orth_.lower()]
+row
+cell shape / shape_
+cell.
A transform of the word's string, to show orthographic features.
- The characters a-z are mapped to x, A-Z is mapped to X, 0-9
- is mapped to d. After these mappings, sequences of 4 or more
- of the same character are truncated to length 4. Examples:
+ The characters a-z are mapped to x, A-Z is mapped to X, 0-9
+ is mapped to d. After these mappings, sequences of 4 or more
+ of the same character are truncated to length 4. Examples:
C3Po --> XdXx, favorite --> xxxx, :) --> :)
+row
+cell prefix / prefix_
+cell.
- A length-N substring from the start of the word. Length may
- vary by language; currently for English n=1, i.e.
+ A length-N substring from the start of the word. Length may
+ vary by language; currently for English n=1, i.e.
#[code prefix = word.orth_[:1]]
+row
+cell suffix / suffix_
+cell.
- A length-N substring from the end of the word. Length may
- vary by language; currently for English n=3, i.e.
+ A length-N substring from the end of the word. Length may
+ vary by language; currently for English n=3, i.e.
#[code suffix = word.orth_[-3:]]
- +section('token-booleanflags')
- +h3('token-booleanflags')
+ +section("token-booleanflags")
+ +h(3, "token-booleanflags")
| Boolean Flags
- +table(['Name', 'Description'], 'params')
+ +table(["Name", "Description"])
+row
+cell is_alpha
+cell.
@@ -152,36 +153,36 @@
+row
+cell is_stop
+cell.
- Is the word part of a "stop list"? Stop lists are used to
- improve the quality of topic models, by filtering out common,
+ Is the word part of a "stop list"? Stop lists are used to
+ improve the quality of topic models, by filtering out common,
domain-general words.
- +section('token-distributional')
- +h3('token-distributional')
+ +section("token-distributional")
+ +h(3, "token-distributional")
| Distributional Features
- +table(['Name', 'Description'], 'params')
+ +table(["Name", "Description"])
+row
+cell prob
+cell.
- The unigram log-probability of the word, estimated from
- counts from a large corpus, smoothed using Simple Good Turing
+ The unigram log-probability of the word, estimated from
+ counts from a large corpus, smoothed using Simple Good Turing
estimation.
+row
+cell cluster
+cell.
- The Brown cluster ID of the word. These are often useful features
- for linear models. If you’re using a non-linear model, particularly
- a neural net or random forest, consider using the real-valued
+ The Brown cluster ID of the word. These are often useful features
+ for linear models. If you’re using a non-linear model, particularly
+ a neural net or random forest, consider using the real-valued
word representation vector, in #[code Token.repvec], instead.
+row
+cell vector
+cell.
- A "word embedding" representation: a dense real-valued vector
- that supports similarity queries between words. By default,
- spaCy currently loads vectors produced by the Levy and
+ A "word embedding" representation: a dense real-valued vector
+ that supports similarity queries between words. By default,
+ spaCy currently loads vectors produced by the Levy and
Goldberg (2014) dependency-based word2vec model.
+row
@@ -189,11 +190,11 @@
+cell.
A boolean value indicating whether a vector.
- +section('token-alignment')
- +h3('token-alignment')
+ +section("token-alignment")
+ +h(3, "token-alignment")
| Alignment and Output
- +table(['Name', 'Description'], 'params')
+ +table(["Name", "Description"])
+row
+cell idx
+cell.
@@ -212,7 +213,7 @@
+row
+cell str(token)
+cell.
- In Python 3, returns #[code token.orth_]. In Python 2, returns
+ In Python 3, returns #[code token.orth_]. In Python 2, returns
#[code token.orth_.encode('utf8')].
+row
@@ -223,9 +224,9 @@
+row
+cell text_with_ws
+cell.
- #[code token.orth_ + token.whitespace_], i.e. the form of the
- word as it appears in the string, trailing whitespace. This is
- useful when you need to use linguistic features to add inline
+ #[code token.orth_ + token.whitespace_], i.e. the form of the
+ word as it appears in the string, trailing whitespace. This is
+ useful when you need to use linguistic features to add inline
mark-up to the string.
+row
@@ -234,44 +235,47 @@
The number of immediate syntactic children following the word
in the string.
- +section('token-postags')
- +h3('token-postags')
+ +section("token-postags")
+ +h(3, "token-postags")
| Part-of-Speech Tags
- +table(['Name', 'Description'], 'params')
+ +table(["Name", "Description"])
+row
+cell pos / pos_
+cell.
- A coarse-grained, less detailed tag that represents the
- word-class of the token. The set of #[code .pos] tags are
- consistent across languages. The available tags are #[code ADJ],
- #[code ADP], #[code ADV], #[code AUX], #[code CONJ], #[code DET],
- #[code INTJ], #[code NOUN], #[code NUM], #[code PART],
- #[code PRON], #[code PROPN], #[code PUNCT], #[code SCONJ],
+ A coarse-grained, less detailed tag that represents the
+ word-class of the token. The set of #[code .pos] tags are
+ consistent across languages. The available tags are #[code ADJ],
+ #[code ADP], #[code ADV], #[code AUX], #[code CONJ], #[code DET],
+ #[code INTJ], #[code NOUN], #[code NUM], #[code PART],
+ #[code PRON], #[code PROPN], #[code PUNCT], #[code SCONJ],
#[code SYM], #[code VERB], #[code X], #[code EOL], #[code SPACE].
+row
+cell tag / tag_
+cell.
- A fine-grained, more detailed tag that represents the
- word-class and some basic morphological information for the
- token. These tags are primarily designed to be good features
- for subsequent models, particularly the syntactic parser.
- They are language and treebank dependent. The tagger is
- trained to predict these fine-grained tags, and then a
- mapping table is used to reduce them to the coarse-grained
+ A fine-grained, more detailed tag that represents the
+ word-class and some basic morphological information for the
+ token. These tags are primarily designed to be good features
+ for subsequent models, particularly the syntactic parser.
+ They are language and treebank dependent. The tagger is
+ trained to predict these fine-grained tags, and then a
+ mapping table is used to reduce them to the coarse-grained
#[code .pos] tags.
- +section('token-navigating')
- +h3('token-navigating')
- | Navigating the Parse Tree
+ +section("token-navigating")
+ +h(3, "token-navigating") Navigating the Parse Tree
- +table(['Name', 'Description'], 'params')
+ +table(["Name", "Description"])
+ +row
+ +cell dep / dep_
+ +cell.
+ The syntactic relation type, aka the dependency label, connecting the word to its head.
+row
+cell head
+cell.
- The immediate syntactic head of the token. If the token is the
- root of its sentence, it is the token itself, i.e.
+ The immediate syntactic head of the token. If the token is the
+ root of its sentence, it is the token itself, i.e.
#[code root_token.head is root_token].
+row
@@ -300,11 +304,11 @@
+cell.
Get the #[code i]#[sup th] next / previous neighboring token.
- +section('token-namedentities')
- +h3('token-namedentities')
+ +section("token-namedentities")
+ +h(3, "token-namedentities")
| Named Entity Recognition
- +table(['Name', 'Description'], 'params')
+ +table(["Name", "Description"])
+row
+cell ent_type
+cell.
@@ -313,5 +317,5 @@
+row
+cell ent_iob
+cell.
- The IOB (inside, outside, begin) entity recognition tag for
+ The IOB (inside, outside, begin) entity recognition tag for
the token.
diff --git a/website/docs/_api-vocab.jade b/website/docs/_api-vocab.jade
index cc9e400de..9176172e6 100644
--- a/website/docs/_api-vocab.jade
+++ b/website/docs/_api-vocab.jade
@@ -1,9 +1,10 @@
-//- Docs > API > Vocab
-//- ============================================================================
+//- ----------------------------------
+//- 💫 DOCS > API > VOCAB
+//- ----------------------------------
-+section('vocab')
- +h2('vocab', 'https://github.com/' + profiles.github + '/spaCy/blob/master/spacy/vocab.pyx#L47')
- | #[+label('tag') class] Vocab
++section("vocab")
+ +h(2, "vocab", "https://github.com/" + SOCIAL.github + "/spaCy/blob/master/spacy/vocab.pyx")
+ | #[+tag class] Vocab
p
| A look-up table that allows you to access #[code.lang-python Lexeme]
@@ -18,7 +19,7 @@
objects produced by the same #[code Language] instance will hold
a reference to the same #[code Vocab] instance.
- +code('python', 'Overview').
+ +code("python", "Overview").
class Vocab:
StringStore strings
Morphology morphology
@@ -62,8 +63,8 @@
def load_vectors_from_bin_loc(self, loc):
return int
-
- +table(['Example', 'Description'], 'code')
+
+ +table(["Example", "Description"])
+row
+cell #[code.lang-python lexeme = vocab[integer_id]]
+cell.
@@ -87,71 +88,67 @@
+cell.
Access the from #[code.lang-python Doc]
- +section('vocab-dump')
- +h3('vocab-dump')
- | #[+label('tag') method] Vocab.dump
+ +section("vocab-dump")
+ +h(3, "vocab-dump")
+ | #[+tag method] Vocab.dump
- +code('python', 'definition').
+ +code("python", "Definition").
def dump(self, loc):
return None
- +table(['Name', 'Type', 'Description'], 'params')
+ +table(["Name", "Type", "Description"])
+row
+cell loc
- +cell #[a(href=link_unicode target='_blank') unicode]
- +cell.
- Path where the vocabulary should be saved.
+ +cell #[+a(link_unicode) unicode]
+ +cell Path where the vocabulary should be saved.
- +section('vocab-load_lexemes')
- +h3('vocab-load_lexemes')
- | #[+label('tag') method] Vocab.load_lexemes
+ +section("vocab-load_lexemes")
+ +h(3, "vocab-load_lexemes")
+ | #[+tag method] Vocab.load_lexemes
- +code('python', 'definition').
+ +code("python", "Definition").
def load_lexemes(self, loc):
return None
- +table(['Name', 'Type', 'Description'], 'params')
+ +table(["Name", "Type", "Description"])
+row
+cell loc
- +cell #[a(href=link_unicode target='_blank') unicode]
- +cell.
- Path to load the lexemes.bin file from.
+ +cell #[+a(link_unicode) unicode]
+ +cell Path to load the lexemes.bin file from.
- +section('vocab-dump_vectors')
- +h3('vocab-dump_vectors')
- | #[+label('tag') method] Vocab.dump_vectors
+ +section("vocab-dump_vectors")
+ +h(3, "vocab-dump_vectors")
+ | #[+tag method] Vocab.dump_vectors
- +code('python', 'definition').
+ +code("python", "Definition").
def dump_vectors(self, loc):
return None
- +section('vocab-loadvectors')
- +h3('vocab-loadvectors')
- | #[+label('tag') method] Vocab.load_vectors
+ +section("vocab-loadvectors")
+ +h(3, "vocab-loadvectors")
+ | #[+tag method] Vocab.load_vectors
- +code('python', 'definition').
+ +code("python", "Definition").
def load_vectors(self, file_):
return None
- +table(['Name', 'Type', 'Description'], 'params')
+ +table(["Name", "Type", "Description"])
+row
+cell file
- +cell #[a(href=link_unicode target='_blank') unicode]
- +cell.
- A file-like object, to load word vectors from.
+ +cell #[+a(link_unicode) unicode]
+ +cell A file-like object, to load word vectors from.
+ +section("vocab-loadvectorsfrombinloc")
+ +h(3, "vocab-saveload-loadvectorsfrom")
+ | #[+tag method] Vocab.load_vectors_from_bin_loc
- +section('vocab-loadvectorsfrombinloc')
- +h3('vocab-saveload-loadvectorsfrom')
- | #[+label('tag') method] Vocab.load_vectors_from_bin_loc
-
- +code('python', 'definition').
+ +code("python", "Definition").
def load_vectors_from_bin_loc(self, loc):
return None
- +table(['Name', 'Type', 'Description'], 'params')
+ +table(["Name", "Type", "Description"])
+row
+cell loc
- +cell #[a(href=link_unicode target='_blank') unicode]
+ +cell #[+a(link_unicode) unicode]
+cell.
A path to a file, in spaCy's binary word-vectors file format.
diff --git a/website/docs/_data.json b/website/docs/_data.json
index d7a5b46f1..cde95e48b 100644
--- a/website/docs/_data.json
+++ b/website/docs/_data.json
@@ -4,7 +4,7 @@
"sidebar": {
"Quickstart": [
- ["Install", "#install", "install"],
+ ["Getting started", "#getting-started", "getting-started"],
["Usage Examples", "#examples", "examples"]
],
"API": [
diff --git a/website/docs/_quickstart-examples.jade b/website/docs/_quickstart-examples.jade
index 4b967a94a..33c78c077 100644
--- a/website/docs/_quickstart-examples.jade
+++ b/website/docs/_quickstart-examples.jade
@@ -1,18 +1,20 @@
-//- Docs > Quickstart > Usage Examples
-//- ============================================================================
+//- ----------------------------------
+//- 💫 DOCS > QUICKSTART > USAGE EXAMPLES
+//- ----------------------------------
-+section('examples')
- +h2('examples').
++section("examples")
+ +h(2, "examples").
Usage Examples
- +h3('examples-resources') Load resources and process text
+ +h(3, "examples-resources") Load resources and process text
+code.
- from spacy.en import English
- nlp = English()
- doc = nlp(u'Hello, world. Here are two sentences.')
+ import spacy
+ en_nlp = spacy.load('en')
+ en_doc = en_nlp(u'Hello, world. Here are two sentences.')
+ de_doc = de_nlp(u'ich bin ein Berliner.')
- +h3('multi-threaded') Multi-threaded generator (using OpenMP. No GIL!)
+ +h(3, "multi-threaded") Multi-threaded generator (using OpenMP. No GIL!)
+code.
texts = [u'One document.', u'...', u'Lots of documents']
@@ -23,7 +25,7 @@
if i == 100:
break
- +h3('examples-tokens-sentences') Get tokens and sentences
+ +h(3, "examples-tokens-sentences") Get tokens and sentences
+code.
token = doc[0]
@@ -31,7 +33,7 @@
assert token is sentence[0]
assert sentence.text == 'Hello, world.'
- +h3('examples-integer-ids') Use integer IDs for any string
+ +h(3, "examples-integer-ids") Use integer IDs for any string
+code.
hello_id = nlp.vocab.strings['Hello']
@@ -40,22 +42,22 @@
assert token.orth == hello_id == 3125
assert token.orth_ == hello_str == 'Hello'
- +h3('examples-string-views-flags') Get and set string views and flags
+ +h(3, "examples-string-views-flags") Get and set string views and flags
+code.
assert token.shape_ == 'Xxxxx'
for lexeme in nlp.vocab:
if lexeme.is_alpha:
- lexeme.shape_ = u'W'
+ lexeme.shape_ = 'W'
elif lexeme.is_digit:
- lexeme.shape_ = u'D'
+ lexeme.shape_ = 'D'
elif lexeme.is_punct:
- lexeme.shape_ = u'P'
+ lexeme.shape_ = 'P'
else:
- lexeme.shape_ = u'M'
+ lexeme.shape_ = 'M'
assert token.shape_ == 'W'
- +h3('examples-numpy-arrays') Export to numpy arrays
+ +h(3, "examples-numpy-arrays") Export to numpy arrays
+code.
from spacy.attrs import ORTH, LIKE_URL, IS_OOV
@@ -68,7 +70,7 @@
assert doc[0].like_url == doc_array[0, 1]
assert list(doc_array[:, 1]) == [t.like_url for t in doc]
- +h3('examples-word-vectors') Word vectors
+ +h(3, "examples-word-vectors") Word vectors
+code.
doc = nlp("Apples and oranges are similar. Boots and hippos aren't.")
@@ -80,7 +82,7 @@
assert apples.similarity(oranges) > boots.similarity(hippos)
- +h3('examples-pos-tags') Part-of-speech tags
+ +h(3, "examples-pos-tags") Part-of-speech tags
+code.
from spacy.parts_of_speech import ADV
@@ -101,7 +103,7 @@
def print_fine_pos(token):
print(token.tag_)
- +h3('examples-dependencies') Syntactic dependencies
+ +h(3, "examples-dependencies") Syntactic dependencies
+code.
def dependency_labels_to_root(token):
@@ -112,7 +114,7 @@
token = token.head
return dep_labels
- +h3('examples-entities') Named entities
+ +h(3, "examples-entities") Named entities
+code.
def iter_products(docs):
@@ -132,7 +134,7 @@
counts[ent.orth_][ent.root.head.lemma_] += 1
return counts
- +h3('examples-inline') Calculate inline mark-up on original string
+ +h(3, "examples-inline") Calculate inline mark-up on original string
+code.
def put_spans_around_tokens(doc, get_classes):
@@ -159,15 +161,16 @@
string = string.replace('\t', ' ')
return string
- +h3('examples-binary') Efficient binary serialization
+ +h(3, "examples-binary") Efficient binary serialization
+code.
+ import spacy
from spacy.tokens.doc import Doc
byte_string = doc.to_bytes()
open('moby_dick.bin', 'wb').write(byte_string)
- nlp = spacy.en.English()
+ nlp = spacy.load('en')
for byte_string in Doc.read_bytes(open('moby_dick.bin', 'rb')):
doc = Doc(nlp.vocab)
doc.from_bytes(byte_string)
diff --git a/website/docs/_quickstart-install.jade b/website/docs/_quickstart-install.jade
index 9691ce783..2e746e6cd 100644
--- a/website/docs/_quickstart-install.jade
+++ b/website/docs/_quickstart-install.jade
@@ -1,119 +1,177 @@
-//- Docs > Quickstart > Install
-//- ============================================================================
+//- ----------------------------------
+//- 💫 QUICKSTART > GETTING STARTED
+//- ----------------------------------
-+section('install')
- +h2('install')
- | Install spaCy v#{spacy_version}
++section("getting-started")
+ +h(2, "getting-started")
+ | Getting started
- +section('install-pip-virtualenv')
- +h3('install-pip-virtualenv')
- | pip and virtualenv
+ +section("install-spacy")
+ +h(3, "install-spacy")
+ | Install spaCy
p.
- With Python 2.7 or Python 3, using Linux or OSX, ensure that you have
- the packages #[code build-essential] and #[code python-dev] installed.
- Then run:
+ spaCy is compatible with 64-bit CPython 2.6+/3.3+ and runs on Unix/Linux,
+ OS X and Windows. Source and binary packages are available via
+ #[+a("https://pypi.python.org/pypi/spacy") pip] and
+ #[+a("https://anaconda.org/spacy/spacy") conda]. If there are
+ no binary packages for your platform available please make sure that you have
+ a working build enviroment set up. See
+ notes on #[a(href="/docs#install-source-ubuntu") Ubuntu],
+ #[a(href="/docs#install-source-osx") OS X] and
+ #[a(href="/docs#install-source-windows") Windows] for details.
- +code('bash').
- pip install spacy
- python -m spacy.en.download
+ +code("bash", "conda").
+ conda config --add channels spacy # only needed once
+ conda install spacy
p.
- The download command fetches and installs about 500mb of data, for the
- parser model and word vectors, which it installs within the spacy
- package directory. Usually you'll want to install spaCy within a
- #[a(href='https://virtualenv.readthedocs.org/en/latest/' target='_blank') virtualenv],
+ When using pip it is generally recommended to install packages in a
+ #[+a("https://virtualenv.readthedocs.org/en/latest/") virtualenv]
to avoid modifying system state:
- +code('bash').
- virtualenv my_env_dir
- source my_env_dir/bin/activate
+ +code("bash", "pip").
+ # make sure you are using a recent pip/virtualenv version
+ python -m pip install -U pip virtualenv
- +section('install-conda')
- +h3('install-conda')
- | conda
+ virtualenv .env
+ source .env/bin/activate
- +code('bash').
- conda config --add channels spacy
- conda install spacy
- python -m spacy.en.download
+ pip install spacy
p.
- Sometimes conda is not up to date with the latest release. If you
- can't get the latest version on conda, you can always fall back to the
- pip install.
+ Python packaging is awkward at the best of times, and it's particularly
+ tricky with C extensions, built via Cython, requiring large data files.
+ So, please report issues as you encounter them.
- +section('install-windows')
- +h3('install-windows')
- | Windows (64 bit)
+ +section("install-model")
+ +h(3, "install-model")
+ | Install model
p.
- We've been working on Windows support. Our tests now succeed on 64 bit
- builds of Windows. Installation from pip should work if you have a C++
- compiler installed. Please see the README-MSVC.txt file for instructions
- on compiling from source.
-
- +section('install-update')
- +h3('install-update')
- | Updating your installation
+ After installation you need to download a language model.
+ Currently only models for English and German, named #[code en] and #[code de], are available. Please get in touch with us if you need support for a particular language.
+
+ +code("bash").
+ sputnik --name spacy --repository-url http://index.spacy.io install en==1.1.0
p.
- To update your installation:
+ Then check whether the model was successfully installed:
- +code('bash').
- pip install --upgrade spacy
- python -m spacy.en.download
+ +code("bash").
+ python -c "import spacy; spacy.load('en'); print('OK')"
p.
- Most updates ship a new model, so you will usually have to redownload
- the data.
+ The download command fetches and installs about 500 MB of data which it installs
+ within the #[code spacy] package directory.
-
- +section('install-obsolete-python')
- +h3('install-obsolete-python')
- | Workaround for obsolete system Python
+ +section("install-upgrade")
+ +h(3, "install-upgrade")
+ | Upgrading spaCy
p.
- If you're stuck using a server with an old version of Python, and you
- don't have root access, we've prepared a bootstrap script to help you
- compile a local Python install. Run:
+ To upgrade spaCy to the latest release:
- +code('bash')
- curl https://raw.githubusercontent.com/spacy-io/gist/master/bootstrap_python_env.sh | bash && source .env/bin/activate
+ +code("bash", "conda").
+ conda update spacy
- +section('install-compile')
- +h3('install-compile')
+ +code("bash", "pip").
+ pip install -U spacy
+
+ p.
+ Sometimes new releases require a new language model. Then you will have to upgrade to
+ a new model, too. You can also force re-downloading and installing a new language model:
+
+ +code("bash").
+ python -m spacy.en.download --force
+
+ +section("install-source")
+ +h(3, "install-source")
| Compile from source
p.
- The other way to install the package is to clone the github repository,
- and build it from source. This installs an additional dependency, Cython.
- If you're using Python 2, we also recommend installing fabric and
- fabtools – this is how we build the project. Ensure that you have the
- packages #[code build-essential], #[code python-dev], #[code git] and
- #[code python-virtualenv] installed.
+ The other way to install spaCy is to clone its
+ #[a(href="https://github.com/spacy-io/spaCy") GitHub repository] and
+ build it from source. That is the common way if you want to make changes
+ to the code base.
- +code('bash')
+ p.
+ You'll need to make sure that you have a development enviroment consisting
+ of a Python distribution including header files, a compiler, pip,
+ virtualenv and git installed. The compiler
+ part is the trickiest. How to do that depends on your system. See
+ notes on #[a(href="/docs#install-source-ubuntu") Ubuntu],
+ #[a(href="/docs#install-source-osx") OS X] and
+ #[a(href="/docs#install-source-windows") Windows] for details.
+
+ +code("bash").
+ # make sure you are using recent pip/virtualenv versions
+ python -m pip install -U pip virtualenv
+
+ # find git install instructions at https://git-scm.com/downloads
git clone https://github.com/spacy-io/spaCy.git
+
cd spaCy
virtualenv .env && source .env/bin/activate
pip install -r requirements.txt
pip install -e .
- python -m spacy.en.download
- pip install pytest
- python -m pytest spacy
p.
- Python packaging is awkward at the best of times, and it's particularly
- tricky with C extensions, built via Cython, requiring large data files.
- So, please report issues as you encounter them.
+ Compared to regular install via #[code pip] and #[code conda]
+ #[+a("https://github.com/" + SOCIAL.github + "/spaCy/blob/master/requirements.txt") requirements.txt]
+ additionally installs developer dependencies such as #[code cython].
- +section('install-pypy')
- +h3('install-pypy')
- | pypy (unsupported)
+ +h(4, "install-source-ubuntu")
+ | Ubuntu
+
+ p Install system-level dependencies via #[code apt-get]:
+
+ +code("bash").
+ sudo apt-get install build-essential python-dev git
+
+ +h(4, "install-source-osx")
+ | OS X
p.
- If PyPy support is a priority for you, please get in touch. We could
- likely fix the remaining issues, if necessary. However, the library
- is likely to be much slower on PyPy, as it's written in Cython, which
- produces code tuned for the performance of CPython.
+ Install a recent version of XCode, including the so-called "Command Line Tools". OS X
+ ships with Python and git preinstalled.
+
+ +h(4, "install-source-windows")
+ | Windows
+
+ p.
+ Install a version of Visual Studio Express or higher that matches the version that was
+ used to compile your Python interpreter. For official distributions
+ these are VS 2008 (Python 2.7), VS 2010 (Python 3.4) and VS 2015 (Python 3.5).
+
+ +section("install-obsolete-python")
+ +h(3, "install-obsolete-python")
+ | Workaround for obsolete system Python
+
+ p.
+ If you're stuck using a system with an old version of Python, and you
+ don't have root access, we've prepared a bootstrap script to help you
+ compile a local Python install. Run:
+
+ +code("bash").
+ curl https://raw.githubusercontent.com/spacy-io/gist/master/bootstrap_python_env.sh | bash && source .env/bin/activate
+
+ +section("run-tests")
+ +h(3, "run-tests")
+ | Run tests
+
+ p.
+ spaCy comes with an extensive test suite. First, find out where spaCy is installed:
+
+ +code("bash").
+ python -c "import os; import spacy; print(os.path.dirname(spacy.__file__))"
+
+ p.
+ Then run #[code pytest] on that directory. The flags #[code --vectors],
+ #[code --slow] and #[code --model] are optional and enable additional tests:
+
+ +code("bash").
+ # make sure you are using recent pytest version
+ python -m pip install -U pytest
+
+ python -m pytest <spacy-directory> --vectors --model --slow
diff --git a/website/docs/_tutorials.jade b/website/docs/_tutorials.jade
index efd6f7ada..0607bbd11 100644
--- a/website/docs/_tutorials.jade
+++ b/website/docs/_tutorials.jade
@@ -1,10 +1,12 @@
-//- Docs > Tutorials
-//- ============================================================================
+//- ----------------------------------
+//- 💫 DOCS > TUTORIALS
+//- ----------------------------------
-+section('tutorials')
- +h2('tutorials') Tutorials
++section("tutorials")
+ +h(2, "tutorials") Tutorials
each post, slug in public.docs.tutorials._data
if slug != 'index'
- +h3: a(href='/docs/tutorials/' + slug)=post.title
- a.block(href='/docs/tutorials/' + slug)=post.description
+ a.o-block(href='/docs/tutorials/' + slug)
+ +h(3)=post.title
+ p=post.description
diff --git a/website/docs/index.jade b/website/docs/index.jade
index f00b828b2..043021193 100644
--- a/website/docs/index.jade
+++ b/website/docs/index.jade
@@ -1,27 +1,17 @@
+//- ----------------------------------
+//- 💫 DOCS
+//- ----------------------------------
+
include ../_includes/_mixins
- var link_bool = 'http://docs.python.org/library/functions.html#bool'
- var link_int = 'http://docs.python.org/library/functions.html#int'
- var link_unicode = 'http://docs.python.org/library/functions.html#unicode'
-
-//- Docs
-//- ============================================================================
-
-+infobox('Update March, 2016').
- We know change can be jarring, especially when you've got a deadline. So to
- ease the transition to the new documentation style, we've kept
- #[a(href='/docs/legacy' target='_blank') the old docs] online too.
- If you have any problems, you can let us know on the
- #[a(href='https://github.com/' + profiles.github + '/spaCy' target='_blank') spaCy issue tracker]
- or the #[a(href='https://reddit.com/r/' + profiles.reddit target='_blank') Reddit user group].
-
include _quickstart-install
include _quickstart-examples
-+divider('bottom')
-
-+h2('api') API
++h(2, "api") API
include _api-english
include _api-doc
@@ -31,7 +21,5 @@ include _api-lexeme
include _api-vocab
include _api-stringstore
-+divider
-
include _annotation-specs
include _tutorials
diff --git a/website/docs/legacy/index.html b/website/docs/legacy/index.html
deleted file mode 100644
index 6aa32ebed..000000000
--- a/website/docs/legacy/index.html
+++ /dev/null
@@ -1,933 +0,0 @@
-
-
-
- Docs | spaCy.io
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- spaCy.io
- Legacy Docs (v0.100.6)
-
-
-
- Home
- Back to the docs
-
-
-
- This page shows documentation for spaCy in the legacy style. We've kept this page accessible to ease your transition to our current documentation , since we know change can be jarring, especially when you're working against a deadline. This page will not be updated when the library changes, so if you're using a version of the library newer than v0.100.6, the information on this page may not be accurate.
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- class English
- Load models into a callable object to process English text. Intended use is for one instance to be created per process. You can create more if you're doing something unusual. You may wish to make the instance a global variable or "singleton". We usually instantiate the object in the main()
function and pass it around as an explicit argument.
-
- __init__
self, data_dir=None, vocab=None, tokenizer=None, tagger=None, parser=None, entity=None, matcher=None, serializer=None)
- Load the linguistic analysis pipeline. Loading may take up to a minute, and the instance consumes 2 to 3 gigabytes of memory. The pipeline class is responsible for loading and saving the components, and applying them in sequence. Each component can be passed as an argument to the __init__
function, or left as None
, in which case it will be loaded from a classmethod, named e.g. default_vocab
.
- Common usage is to accept all defaults, in which case loading is simply:
- nlp = spacy.en.English()
- To keep the default components, but load data from a specified directory, use:
- nlp = English(data_dir=u'path/to/data_directory')
- To disable (and avoid loading) parts of the processing pipeline:
- nlp = English(parser=False, tagger=False, entity=False)
-
- data_dir – The data directory. If None
, value is obtained via the default_data_dir()
method.
-
- vocab –The vocab
object, which should be an instance of class spacy.vocab.Vocab
. If None
, the object is obtained from the default_vocab()
class method. The vocab
object manages all of the language specific rules and definitions, maintains the cache of lexical types, and manages the word vectors. Because the vocab
owns this important data, most objects hold a reference to the vocab
.
-
- tokenizer – The tokenizer, which should be a callable that accepts a unicode string, and returns a Doc
object. If set to None
, the default tokenizer is constructed from the default_tokenizer()
method.
-
- tagger – The part-of-speech tagger, which should be a callable that accepts a Doc
object, and sets the part-of-speech tags in-place. If set to None
, the default tagger is constructed from the default_tagger()
method.
-
- parser – The dependency parser, which should be a callable that accepts a Doc
object, and sets the syntactic heads and dependency labels in-place. If set to None
, the default parser is constructed from the default_parser()
method.
-
- entity – The named entity recognizer, which should be a callable that accepts a Doc
object, and sets the named entity annotations in-place. If set to None
, the default entity recognizer is constructed from the default_entity()
method.
-
- matcher – The pattern matcher, which should be a callable that accepts a Doc
object, and sets annotations in-place. If set to None
, the default matcher is constructed from the default_matcher()
method.
-
-
-
-
- __call__
text, tag=True, parse=True, entity=True
- The main entry point to spaCy. Takes raw unicode text, and returns a Doc
object, which can be iterated to access Token
and Span
objects. spaCy's models are all linear-time, so you can supply documents of arbitrary length, e.g. whole novels.
-
- text (unicode ) –The text to be processed. spaCy expects raw unicode txt – you don't necessarily need to, say, split it into paragraphs. However, depending on your documents, you might be better off applying custom pre-processing. Non-text formatting, e.g. from HTML mark-up, should be removed before sending the document to spaCy. If your documents have a consistent format, you may be able to improve accuracy by pre-processing. For instance, if the first word of your documents are always in upper-case, it may be helpful to normalize them before supplying them to spaCy.
-
- tag (bool ) –Whether to apply the part-of-speech tagger. Required for parsing and entity recognition.
-
- parse (bool ) – Whether to apply the syntactic dependency parser.
-
- entity (bool ) –Whether to apply the named entity recognizer.
-
-
- # from spacy.en import English
-# nlp = English()
-doc = nlp('Some text.') # Applies tagger, parser, entity
-doc = nlp('Some text.', parse=False) # Applies tagger and entity, not parser
-doc = nlp('Some text.', entity=False) # Applies tagger and parser, not entity
-doc = nlp('Some text.', tag=False) # Does not apply tagger, entity or parser
-doc = nlp('') # Zero-length tokens, not an error
-# doc = nlp(b'Some text') <-- Error: need unicode
-doc = nlp(b'Some text'.decode('utf8')) # Encode to unicode first.
-
-
-
- pipe
self, texts_iterator, batch_size=1000, n_threads=2
- Parse a sequence of texts into a sequence of Doc
objects. Accepts a generator as input, and produces a generator as output. spaCy releases the global interpreter lock around the parser and named entity recognizer, allowing shared-memory parallelism via OpenMP. However, OpenMP is not supported on OSX — so multiple threads will only be used on Linux and Windows.
- Internally, .pipe
accumulates a buffer of batch_size
texts, works on them with n_threads
workers in parallel, and then yields the Doc
objects one by one. Increasing batch_size
results in higher latency (a longer time before the first document is yielded), and higher memory used (for the texts in the buffer), but can allow better parallelism.
-
- n_threads (int ) –The number of worker threads to use. If -1, OpenMP will decide how many to use at run time. Default is 2.
-
- texts –A sequence of unicode objects. Usually you will want this to be a generator, so that you don't need to have all of your texts in memory.
-
- batch_size (int ) –The number of texts to buffer. Let's say you have a batch_size
of 1,000. The input, texts
, is a generator that yields the texts one-by-one. We want to operate on them in parallel. So, we accumulate a work queue. Instead of taking one document from texts
and operating on it, we buffer batch_size
documents, work on them in parallel, and then yield them one-by-one. Higher batch_size
therefore often results in better parallelism, up to a point.
-
-
- texts = [u'One document.', u'...', u'Lots of documents']
-# .pipe streams input, and produces streaming output
-iter_texts = (texts[i % 3] for i in xrange(100000000))
-for i, doc in enumerate(nlp.pipe(iter_texts, batch_size=50, n_threads=4)):
- assert doc.is_parsed
- if i == 100:
- break
-
-
-
-
- class Doc
- A sequence of Token
objects. Access sentences and named entities, export annotations to numpy arrays, losslessly serialize to compressed binary strings.
- Internally, the Doc
object holds an array of TokenC
structs. The Python-level Token
and Span
objects are views of this array, i.e. they don't own the data themselves. This details of the internals shouldn't matter for the API – but it may help you read the code, and understand how spaCy is designed.
-
-
- Constructors
- via English.__call__(unicode text)
-
- __init__
self, vocab, orth_and_spaces=None This method of constructing a Doc
object is usually only used for deserialization. Standard usage is to construct the document via a call to the language object.
-
- vocab – A Vocabulary object, which must match any models you want to use (e.g. tokenizer, parser, entity recognizer).
-
- orth_and_spaces – A list of (orth_id, has_space)
tuples, where orth_id
is an integer, and has_space is a boolean, indicating whether the token has a trailing space.
-
-
-
-
-
-
- Sequence API
-
-
- doc[i]
Get the Token
object at position i
, where i
is an integer. Negative indexing is supported, and follows the usual Python semantics, i.e. doc[-2]
is doc[len(doc) - 2]
.
-
- doc[start : end]
Get a Span
object, starting at position start
and ending at position end
. For instance, doc[2:5]
produces a span consisting of tokens 2, 3 and 4. Stepped slices (e.g. doc[start : end : step]
) are not supported, as Span
objects must be contiguous (cannot have gaps).
-
- for token in doc
Iterate over Token
objects, from which the annotations can be easily accessed. This is the main way of accessing Token
objects, which are the main way annotations are accessed from Python. If faster-than-Python speeds are required, you can instead access the annotations as a numpy array, or access the underlying C data directly from Cython, via Doc.data
, an array of TokenC
structs. The C API has not yet been finalized, and is subject to change.
-
- len(doc)
The number of tokens in the document.
-
-
-
-
-
- Sentence, entity and noun chunk spans
-
-
- sents
- Yields sentence Span
objects. Iterate over the span to get individual Token
objects. Sentence spans have no label.
-
# from spacy.en import English
-# nlp = English()
-doc = nlp("This is a sentence. Here's another...")
-assert [s.root.orth_ for s in doc.sents] == ["is", "'s"]
-
-
-
-
- ents
- Yields named-entity Span
objects. Iterate over the span to get individual Token
objects, or access the label:
-
# from spacy.en import English
-# nlp = English()
-tokens = nlp('Mr. Best flew to New York on Saturday morning.')
-ents = list(tokens.ents)
-assert ents[0].label == 346
-assert ents[0].label_ == 'PERSON'
-assert ents[0].orth_ == 'Best'
-assert ents[0].string == ents[0].string
-
-
-
-
- noun_chunks
- Yields base noun-phrase Span
objects. A base noun phrase, or "NP chunk", is a noun phrase that does not permit other NPs to be nested within it – so no NP-level coordination, no prepositional phrases, and no relative clauses. For example:
-
# from spacy.en import English
-# nlp = English()
-doc = nlp('The sentence in this example has three noun chunks.')
-for chunk in doc.noun_chunks:
- print(chunk.label, chunk.orth_, '<--', chunk.root.head.orth_)
-
-
-
-
-
-
- Export/Import
-
-
- to_array
attr_ids Given a list of M attribute IDs, export the tokens to a numpy ndarray of shape N*M, where N is the length of the sentence.
-
- attr_ids (list[int]) –A list of attribute ID ints. Attribute IDs can be imported from spacy.attrs
-
-
-
-
- count_by
attr_id Produce a dict of {attribute (int): count (ints)}
frequencies, keyed by the values of the given attribute ID.
- # from spacy.en import English, attrs
-# nlp = English()
-import numpy
-from spacy import attrs
-tokens = nlp('apple apple orange banana')
-assert tokens.count_by(attrs.ORTH) == {3699: 2, 3750: 1, 5965: 1}
-assert repr(tokens.to_array([attrs.ORTH])) == repr(numpy.array([[3699],
- [3699],
- [3750],
- [5965]], dtype=numpy.int32))
-
-
-
- from_array
attrs, array
- to a Doc
object, from an M*N array of attributes.
-
-
- from_bytes
byte_string Deserialize, loading from bytes.
-
-
- to_bytes
Serialize, producing a byte string.
-
-
- read_bytes
A staticmethod, used to read serialized Doc
objects from a file.For example:
- from spacy.tokens.doc import Doc
-loc = 'test_serialize.bin'
-with open(loc, 'wb') as file_:
- file_.write(nlp(u'This is a document.').to_bytes())
- file_.write(nlp(u'This is another.').to_bytes())
-docs = []
-with open(loc, 'rb') as file_:
- for byte_string in Doc.read_bytes(file_):
- docs.append(Doc(nlp.vocab).from_bytes(byte_string))
-assert len(docs) == 2
-
-
-
-
-
- class Token
- A Token represents a single word, punctuation or significant whitespace symbol. Integer IDs are provided for all string features. The (unicode) string is provided by an attribute of the same name followed by an underscore, e.g. token.orth
is an integer ID, token.orth_
is the unicode value. The only exception is the Token.string attribute, which is (unicode) string-typed.
-
-
- String Features
-
-
- lemma / lemma_
The "base" of the word, with no inflectional suffixes, e.g. the lemma of "developing" is "develop", the lemma of "geese" is "goose", etc. Note that derivational suffixes are not stripped, e.g. the lemma of "instutitions" is "institution", not "institute". Lemmatization is performed using the WordNet data, but extended to also cover closed-class words such as pronouns. By default, the WN lemmatizer returns "hi" as the lemma of "his". We assign pronouns the lemma -PRON-
.
-
-
-
- orth / orth_
The form of the word with no string normalization or processing, as it appears in the string, without trailing whitespace.
-
- lower / lower_
The form of the word, but forced to lower-case, i.e. lower = word.orth_.lower()
-
- shape / shape_
A transform of the word's string, to show orthographic features. The characters a-z are mapped to x, A-Z is mapped to X, 0-9 is mapped to d. After these mappings, sequences of 4 or more of the same character are truncated to length 4. Examples: C3Po --> XdXx, favorite --> xxxx, :) --> :)
-
- prefix / prefix_
A length-N substring from the start of the word. Length may vary by language; currently for English n=1, i.e. prefix = word.orth_[:1]
-
- suffix / suffix_
A length-N substring from the end of the word. Length may vary by language; currently for English n=3, i.e. suffix = word.orth_[-3:]
-
-
-
-
-
- Boolean Flags
-
-
- is_alpha
Equivalent to word.orth_.isalpha()
-
- is_ascii
Equivalent to any(ord(c) >= 128 for c in word.orth_)
-
- is_digit
Equivalent to word.orth_.isdigit()
-
- is_lower
Equivalent to word.orth_.islower()
-
- is_title
Equivalent to word.orth_.istitle()
-
- is_punct
Equivalent to word.orth_.ispunct()
-
- is_space
Equivalent to word.orth_.isspace()
-
- like_url
Does the word resembles a URL?
-
- like_num
Does the word represent a number? e.g. “10.9”, “10”, “ten”, etc
-
- like_email
Does the word resemble an email?
-
- is_oov
Is the word out-of-vocabulary?
-
- is_stop
Is the word part of a "stop list"? Stop lists are used to improve the quality of topic models, by filtering out common, domain-general words.
-
-
-
- check_flag
flag_id Get the value of one of the boolean flags
-
-
-
-
- Distributional Features
-
-
- prob
The unigram log-probability of the word, estimated from counts from a large corpus, smoothed using Simple Good Turing estimation.
-
- cluster
The Brown cluster ID of the word. These are often useful features for linear models. If you’re using a non-linear model, particularly a neural net or random forest, consider using the real-valued word representation vector, in Token.repvec, instead.
-
- vector
A “word embedding” representation: a dense real-valued vector that supports similarity queries between words. By default, spaCy currently loads vectors produced by the Levy and Goldberg (2014) dependency-based word2vec model.
-
- has_vector
A boolean value indicating whether a vector.
-
-
-
-
-
- Alignment and Output
-
-
- idx
Start index of the token in the string
-
- len(token)
Length of the token's orth string, in unicode code-points.
-
- unicode(token)
Same as token.orth_
-
- str(token)
In Python 3, returns token.orth_
. In Python 2, returnstoken.orth_.encode('utf8')
-
- text
An alias for token.orth_
.
-
- text_with_ws
token.orth_ + token.whitespace_
, i.e. the form of the word as it appears in the string,
- trailing whitespace . This is useful when you need to use linguistic features to add inline mark-up to the string.
-
- whitespace_
The number of immediate syntactic children following the word in the string.
-
-
-
-
-
- Part-of-Speech Tags
-
-
- pos / pos_
A coarse-grained, less detailed tag that represents the word-class of the token. The set of .pos
tags are consistent across languages. The available tags are ADJ, ADP, ADV, AUX, CONJ, DET, INTJ, NOUN, NUM, PART, PRON, PROPN, PUNCT, SCONJ, SYM, VERB, X, EOL, SPACE.
-
-
-
- tag / tag_
A fine-grained, more detailed tag that represents the word-class and some basic morphological information for the token. These tags are primarily designed to be good features for subsequent models, particularly the syntactic parser. They are language and treebank dependent. The tagger is trained to predict these fine-grained tags, and then a mapping table is used to reduce them to the coarse-grained .pos
tags.
-
-
-
-
-
- Navigating the Parse Tree
-
-
- head
The immediate syntactic head of the token. If the token is the root of its sentence, it is the token itself, i.e. root_token.head is root_token
-
- children
An iterator that yields from lefts, and then yields from rights.
-
- subtree
An iterator for the part of the sentence syntactically governed by the word, including the word itself.
-
- left_edge
The leftmost edge of the token's subtree
-
- right_edge
The rightmost edge of the token's subtree
-
-
-
- nbor(i=1)
Get the i th next / previous neighboring token.
-
-
-
-
- Named Entities
-
-
- ent_type
If the token is part of an entity, its entity type.
-
- ent_iob
The IOB (inside, outside, begin) entity recognition tag for the token.
-
-
-
-
-
- Constructors
-
-
- __init__
vocab, doc, offset
-
- vocab –A Vocab object
-
- doc –The parent sequence
-
- offset (int ) –The index of the token within the document
-
-
-
-
-
-
-
-
- class Span
A Span
is a slice of a Doc
object, consisting of zero or more tokens. Spans are used to represent sentences, named entities, phrases, and arbitrary contiguous slices from the Doc
object. Span
objects are views – that is, they do not copy the underlying C data. This makes them cheap to construct, as internally are simply a reference to the Doc
object, a start position, an end position, and a label ID.
- token = span[i]
Get the Token
object at position i , where i is an offset within the Span
, not the document. That is:
- span = doc[4:6]
-token = span[0]
-assert token.i == 4
-
-
-
- for token in span
Iterate over the Token
objects in the span.
-
- __len__
Number of tokens in the span.
-
- text
The text content of the span, obtained from ''.join(token.text_with_ws for token in span)
-
- start
The start offset of the span, i.e. span[0].i
.
-
- end
The end offset of the span, i.e. span[-1].i + 1
-
-
-
-
- Navigating the Parse Tree
-
-
- root
- The word with the shortest path to the root of the sentence is the root of the span.
-
toks = nlp('I like New York in Autumn.')
-
- Let's name the indices --- easier than writing toks[4]
etc.
- i, like, new, york, in_, autumn, dot = range(len(toks))
-
- The head of new is York , and the head of York is like
- assert toks[new].head.orth_ == 'York'
-assert toks[york].head.orth_ == 'like'
-
- Create a span for "New York". Its root is "York".
- new_york = toks[new:york+1]
-assert new_york.root.orth_ == 'York'
-
- When there are multiple words with external dependencies, we take the first:
- assert toks[autumn].head.orth_ == 'in'
-assert toks[dot].head.orth_ == 'like'
-autumn_dot = toks[autumn:]
-assert autumn_dot.root.orth_ == 'Autumn'
-
-
-
-
- lefts
- Tokens that are to the left of the span, whose head is within the span, i.e.
-
# TODO: where does the span object come from?
-span = doc[:2]
-lefts = [span.doc[i] for i in range(0, span.start)
- if span.doc[i].head in span]
-
-
-
-
- rights
- Tokens that are to the right of the span, whose head is within the span, i.e.
-
span = doc[:2]
-rights = [span.doc[i] for i in range(span.end, len(span.doc))
- if span.doc[i].head in span]
-
-
-
-
- subtree
- Tokens in the range (start, end+1)
, where start
is the index of the leftmost word descended from a token in the span, and end
is the index of the rightmost token descended from a token in the span.
-
-
-
-
-
- Constructors
-
-
- doc[start : end]
-
- for entity in doc.ents
-
- for sentence in doc.sents
-
- for noun_phrase in doc.noun_chunks
-
- span = Span(doc, start, end, label=0)
-
-
-
-
-
- Strings
-
-
- text_with_ws
The form of the span as it appears in the string,
- trailing whitespace . This is useful when you need to use linguistic features to add inline mark-up to the string.
-
- lemma / lemma_
Whitespace-concatenated lemmas of each token in the span.
-
- label / label_
The span label, used particularly for named entities.
-
-
-
-
-
- class Lexeme
- The Lexeme object represents a lexical type, stored in the vocabulary – as opposed to a token, occurring in a document.
- Each Token
object receives a reference to a lexeme object (specifically, it receives a pointer to a LexemeC
struct). This allows features to be computed and saved once per type , rather than once per token . As job sizes grow, this amounts to substantial efficiency improvements, as the vocabulary size (number of types) will be much smaller than the total number of words processed (number of tokens).
- All Lexeme attributes are therefore context independent, as a single lexeme is reused for all usages of that word. Lexemes are keyed by the “orth” attribute.
- Most Lexeme attributes can be set, with the exception of the primary key, orth
. Assigning to an attribute of the Lexeme object writes to the underlying struct, so all tokens that are backed by that Lexeme will inherit the new value.
-
-
- String Features
-
-
- orth / orth_
The form of the word with no string normalization or processing, as it appears in the string, without trailing whitespace.
-
- lower / lower_
The form of the word, but forced to lower-case, i.e. lower = word.orth_.lower()
-
- shape / shape_
A transform of the word's string, to show orthographic features. The characters a-z are mapped to x, A-Z is mapped to X, 0-9 is mapped to d. After these mappings, sequences of 4 or more of the same character are truncated to length 4. Examples: C3Po --> XdXx, favorite --> xxxx, :) --> :)
-
- prefix / prefix_
A length-N substring from the start of the word. Length may vary by language; currently for English n=1, i.e. prefix = word.orth_[:1]
-
- suffix / suffix_
A length-N substring from the end of the word. Length may vary by language; currently for English n=3, i.e. suffix = word.orth_[-3:]
-
-
-
-
-
- Boolean Features
-
-
- is_alpha
Equivalent to word.orth_.isalpha()
-
- is_ascii
Equivalent to any(ord(c) >= 128 for c in word.orth_)
-
- is_digit
Equivalent to word.orth_.isdigit()
-
- is_lower
Equivalent to word.orth_.islower()
-
- is_title
Equivalent to word.orth_.istitle()
-
- is_punct
Equivalent to word.orth_.ispunct()
-
- is_space
Equivalent to word.orth_.isspace()
-
- like_url
Does the word resembles a URL?
-
- like_num
Does the word represent a number? e.g. “10.9”, “10”, “ten”, etc
-
- like_email
Does the word resemble an email?
-
- is_oov
Is the word out-of-vocabulary?
-
- is_stop
Is the word part of a "stop list"? Stop lists are used to improve the quality of topic models, by filtering out common, domain-general words.
-
-
-
-
-
- Distributional Features
-
-
- prob
The unigram log-probability of the word, estimated from counts from a large corpus, smoothed using Simple Good Turing estimation.
-
- cluster
The Brown cluster ID of the word. These are often useful features for linear models. If you’re using a non-linear model, particularly a neural net or random forest, consider using the real-valued word representation vector, in Token.repvec, instead.
-
- vector
A “word embedding” representation: a dense real-valued vector that supports similarity queries between words. By default, spaCy currently loads vectors produced by the Levy and Goldberg (2014) dependency-based word2vec model.
-
- has_vector
A boolean value indicating whether a vector.
-
-
-
-
-
- Constructors
-
-
- lexeme = vocab[string]
-
- lexeme = vocab[i]
-
-
-
-
-
- class Vocab
-
- lexeme = vocab[integer_id]
Get a lexeme by its orth ID
-
- lexeme = vocab[string]
Get a lexeme by the string corresponding to its orth ID.
-
- for lexeme in vocab
Iterate over Lexeme
objects
-
- vocab[integer_id] = attributes_dict
A props dictionary
-
- len(vocab)
Number of lexemes (unique words) in the
-
-
-
-
- Constructors
-
-
- nlp.vocab
-
- doc.vocab
-
- span.vocab
-
- token.vocab
-
- lexeme.vocab
-
-
-
-
-
- Save and Load
-
-
- dump
loc
-
- loc (unicode ) –Path where the vocabulary should be saved
-
-
-
-
- load_lexemes
loc
-
- loc (unicode ) –Path to load the lexemes.bin file from
-
-
-
-
- load_vectors
file
-
- file (unicode ) –A file-like object, to load word vectors from.
-
-
-
-
- load_vectors_from_bin_loc
loc
-
- loc (unicode ) –A path to a file, in spaCy's binary word-vectors file format.
-
-
-
-
-
-
- class StringStore
- Intern strings, and map them to sequential integer IDs. The mapping table is very efficient , and a small-string optimization is used to maintain a small memory footprint. Only the integer IDs are held by spaCy's data classes (Doc
, Token
, Span
and Lexeme
) – when you use a string-valued attribute like token.orth_
, you access a property that computes token.strings[token.orth]
.
-
-
-
- Constructors
-
- StringStore.__init__
takes no arguments, so a new instance can be constructed as follows:
- string_store = StringStore()
- However, in practice you'll usually use the instance owned by the language's vocab
object, which all classes hold a reference to:
-
- english.vocab.strings
- doc.vocab.strings
- span.vocab.strings
- token.vocab.strings
- lexeme.vocab.strings
-
- If you create another instance, it will map strings to different integers – which is usually not what you want.
-
-
-
- Save and Load
-
-
- dump
loc
- Save the strings mapping to the given location, in plain text. The format is subject to change; so if you need to read/write compatible files, please can find details in the strings.pyx
source.
-
-
- load
loc
- Load the strings mapping from a plain-text file in the given location. The format is subject to change; so if you need to read/write compatible files, please can find details in the strings.pyx
source.
-
-
-
-
-
-
-
-
-
-
- Overview
-
- This document describes the target annotations spaCy is trained to predict. This is currently a work in progress. Please ask questions on the issue tracker, so that the answers can be integrated here to improve the documentation.
-
-
-
- Tokenization
-
- Tokenization standards are based on the OntoNotes 5 corpus.
- The tokenizer differs from most by including tokens for significant whitespace. Any sequence of whitespace characters beyond a single space (' ') is included as a token. For instance:
- from spacy.en import English
-nlp = English(parse=False)
-tokens = nlp('Some\nspaces and\ttab characters')
-print([t.orth_ for t in tokens])
- Which produces:
- ['Some', '\n', 'spaces', ' ', 'and', '\t', 'tab', 'characters']
- The whitespace tokens are useful for much the same reason punctuation is – it's often an important delimiter in the text. By preserving it in the token output, we are able to maintain a simple alignment between the tokens and the original string, and we ensure that no information is lost during processing.
-
-
-
- Sentence boundary detection
-
- Sentence boundaries are calculated from the syntactic parse tree, so features such as punctuation and capitalisation play an important but non-decisive role in determining the sentence boundaries. Usually this means that the sentence boundaries will at least coincide with clause boundaries, even given poorly punctuated text.
-
-
-
- Part-of-speech Tagging
-
- The part-of-speech tagger uses the OntoNotes 5 version of the Penn Treebank tag set. We also map the tags to the simpler Google Universal POS Tag set.
- Details here .
-
-
-
- Lemmatization
-
-
- A "lemma" is the uninflected form of a word. In English, this means:
-
-
-
- Adjectives: The form like "happy", not "happier" or "happiest"
- Adverbs: The form like "badly", not "worse" or "worst"
- Nouns: The form like "dog", not "dogs"; like "child", not "children"
- Verbs: The form like "write", not "writes", "writing", "wrote" or "written"
-
-
- The lemmatization data is taken from WordNet. However, we also add a
- special case for pronouns: all pronouns are lemmatized to the special
- token -PRON-
.
-
-
-
-
-
-
- Syntactic Dependency Parsing
-
- The parser is trained on data produced by the ClearNLP converter. Details of the annotation scheme can be found here .
-
-
-
- Named Entity Recognition
-
-
-
-
- Entity Type
- Description
-
-
-
-
- PERSON
- People, including fictional.
-
-
- NORP
- Nationalities or religious or political groups.
-
-
- FACILITY
- Buildings, airports, highways, bridges, etc.
-
-
- ORG
- Companies, agencies, institutions, etc.
-
-
- GPE
- Countries, cities, states.
-
-
- LOC
- Non-GPE locations, mountain ranges, bodies of water.
-
-
- PRODUCT
- Vehicles, weapons, foods, etc. (Not services
-
-
- EVENT
- Named hurricanes, battles, wars, sports events, etc.
-
-
- WORK_OF_ART
- Titles of books, songs, etc.
-
-
- LAW
- Named documents made into laws
-
-
- LANGUAGE
- Any named language
-
-
-
- The following values are also annotated in a style similar to names:
-
-
-
- Entity Type
- Description
-
-
-
-
- DATE
- Absolute or relative dates or periods
-
-
- TIME
- Times smaller than a day
-
-
- PERCENT
- Percentage (including “%”)
-
-
- MONEY
- Monetary values, including unit
-
-
- QUANTITY
- Measurements, as of weight or distance
-
-
- ORDINAL
- first", "second"
-
-
- CARDINAL
- Numerals that do not fall under another type
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/website/docs/legacy/resources/css/style.css b/website/docs/legacy/resources/css/style.css
deleted file mode 100755
index 75d6312ec..000000000
--- a/website/docs/legacy/resources/css/style.css
+++ /dev/null
@@ -1 +0,0 @@
-@font-face{font-family:"Karla";src:url("../fonts/karla-regular.eot");src:url("../fonts/karla-regular.eot?#iefix") format("embedded-opentype"),url("../fonts/karla-regular.woff2") format("woff2"),url("../fonts/karla-regular.woff") format("woff"),url("../fonts/karla-regular.ttf") format("truetype"),url("../fonts/karla-regular.svg#karlaregular") format("svg");font-weight:400;font-style:normal;unicode-range:U+0100-024F,U+1E00-1EFF,U+20A0-20AB,U+20AD-20CF,U+2C60-2C7F,U+A720-A7FF}@font-face{font-family:"Karla";src:url("../fonts/karla-regular.eot");src:url("../fonts/karla-regular.eot?#iefix") format("embedded-opentype"),url("../fonts/karla-regular.woff2") format("woff2"),url("../fonts/karla-regular.woff") format("woff"),url("../fonts/karla-regular.ttf") format("truetype"),url("../fonts/karla-regular.svg#karlaregular") format("svg");font-weight:400;font-style:normal;unicode-range:U+0000-00FF,U+0131,U+0152-0153,U+02C6,U+02DA,U+02DC,U+2000-206F,U+2074,U+20AC,U+2212,U+2215,U+E0FF,U+EFFD,U+F000}@font-face{font-family:"Karla";src:url("../fonts/karla-italic.eot");src:url("../fonts/karla-italic.eot?#iefix") format("embedded-opentype"),url("../fonts/karla-italic.woff2") format("woff2"),url("../fonts/karla-italic.woff") format("woff"),url("../fonts/karla-italic.ttf") format("truetype"),url("../fonts/karla-italic.svg#karlaitalic") format("svg");font-weight:400;font-style:italic;unicode-range:U+0100-024F,U+1E00-1EFF,U+20A0-20AB,U+20AD-20CF,U+2C60-2C7F,U+A720-A7FF}@font-face{font-family:"Karla";src:url("../fonts/karla-italic.eot");src:url("../fonts/karla-italic.eot?#iefix") format("embedded-opentype"),url("../fonts/karla-italic.woff2") format("woff2"),url("../fonts/karla-italic.woff") format("woff"),url("../fonts/karla-italic.ttf") format("truetype"),url("../fonts/karla-italic.svg#karlaitalic") format("svg");font-weight:400;font-style:italic;unicode-range:U+0000-00FF,U+0131,U+0152-0153,U+02C6,U+02DA,U+02DC,U+2000-206F,U+2074,U+20AC,U+2212,U+2215,U+E0FF,U+EFFD,U+F000}@font-face{font-family:"Karla";src:url("../fonts/karla-bold.eot");src:url("../fonts/karla-bold.eot?#iefix") format("embedded-opentype"),url("../fonts/karla-bold.woff2") format("woff2"),url("../fonts/karla-bold.woff") format("woff"),url("../fonts/karla-bold.ttf") format("truetype"),url("../fonts/karla-bold.svg#karlabold") format("svg");font-weight:700;font-style:normal;unicode-range:U+0100-024F,U+1E00-1EFF,U+20A0-20AB,U+20AD-20CF,U+2C60-2C7F,U+A720-A7FF}@font-face{font-family:"Karla";src:url("../fonts/karla-bold.eot");src:url("../fonts/karla-bold.eot?#iefix") format("embedded-opentype"),url("../fonts/karla-bold.woff2") format("woff2"),url("../fonts/karla-bold.woff") format("woff"),url("../fonts/karla-bold.ttf") format("truetype"),url("../fonts/karla-bold.svg#karlabold") format("svg");font-weight:700;font-style:normal;unicode-range:U+0000-00FF,U+0131,U+0152-0153,U+02C6,U+02DA,U+02DC,U+2000-206F,U+2074,U+20AC,U+2212,U+2215,U+E0FF,U+EFFD,U+F000}@font-face{font-family:"Karla";src:url("../fonts/karla-bolditalic.eot");src:url("../fonts/karla-bolditalic.eot?#iefix") format("embedded-opentype"),url("../fonts/karla-bolditalic.woff2") format("woff2"),url("../fonts/karla-bolditalic.woff") format("woff"),url("../fonts/karla-bolditalic.ttf") format("truetype"),url("../fonts/karla-bolditalic.svg#karlabolditalic") format("svg");font-weight:700;font-style:italic;unicode-range:U+0100-024F,U+1E00-1EFF,U+20A0-20AB,U+20AD-20CF,U+2C60-2C7F,U+A720-A7FF}@font-face{font-family:"Karla";src:url("../fonts/karla-bolditalic.eot");src:url("../fonts/karla-bolditalic.eot?#iefix") format("embedded-opentype"),url("../fonts/karla-bolditalic.woff2") format("woff2"),url("../fonts/karla-bolditalic.woff") format("woff"),url("../fonts/karla-bolditalic.ttf") format("truetype"),url("../fonts/karla-bolditalic.svg#karlabolditalic") format("svg");font-weight:700;font-style:italic;unicode-range:U+0000-00FF,U+0131,U+0152-0153,U+02C6,U+02DA,U+02DC,U+2000-206F,U+2074,U+20AC,U+2212,U+2215,U+E0FF,U+EFFD,U+F000}@font-face{font-family:"Inconsolata";src:url("../fonts/inconsolata-regular.eot");src:url("../fonts/inconsolata-regular.eot?#iefix") format("embedded-opentype"),url("../fonts/inconsolata-regular.woff2") format("woff2"),url("../fonts/inconsolata-regular.woff") format("woff"),url("../fonts/inconsolata-regular.ttf") format("truetype"),url("../fonts/inconsolata-regular.svg#inconsolataregular") format("svg");font-weight:400;font-style:normal;unicode-range:U+0100-024F,U+1E00-1EFF,U+20A0-20AB,U+20AD-20CF,U+2C60-2C7F,U+A720-A7FF}@font-face{font-family:"Inconsolata";src:url("../fonts/inconsolata-regular.eot");src:url("../fonts/inconsolata-regular.eot?#iefix") format("embedded-opentype"),url("../fonts/inconsolata-regular.woff2") format("woff2"),url("../fonts/inconsolata-regular.woff") format("woff"),url("../fonts/inconsolata-regular.ttf") format("truetype"),url("../fonts/inconsolata-regular.svg#inconsolataregular") format("svg");font-weight:400;font-style:normal;unicode-range:U+0000-00FF,U+0131,U+0152-0153,U+02C6,U+02DA,U+02DC,U+2000-206F,U+2074,U+20AC,U+2212,U+2215,U+E0FF,U+EFFD,U+F000}@font-face{font-family:"Inconsolata";src:url("../fonts/inconsolata-bold.eot");src:url("../fonts/inconsolata-bold.eot?#iefix") format("embedded-opentype"),url("../fonts/inconsolata-bold.woff2") format("woff2"),url("../fonts/inconsolata-bold.woff") format("woff"),url("../fonts/inconsolata-bold.ttf") format("truetype"),url("../fonts/inconsolata-bold.svg#inconsolatabold") format("svg");font-weight:700;font-style:normal;unicode-range:U+0100-024F,U+1E00-1EFF,U+20A0-20AB,U+20AD-20CF,U+2C60-2C7F,U+A720-A7FF}@font-face{font-family:"Inconsolata";src:url("../fonts/inconsolata-bold.eot");src:url("../fonts/inconsolata-bold.eot?#iefix") format("embedded-opentype"),url("../fonts/inconsolata-bold.woff2") format("woff2"),url("../fonts/inconsolata-bold.woff") format("woff"),url("../fonts/inconsolata-bold.ttf") format("truetype"),url("../fonts/inconsolata-bold.svg#inconsolatabold") format("svg");font-weight:700;font-style:normal;unicode-range:U+0000-00FF,U+0131,U+0152-0153,U+02C6,U+02DA,U+02DC,U+2000-206F,U+2074,U+20AC,U+2212,U+2215,U+E0FF,U+EFFD,U+F000}code,pre{font:bold 1rem/1.5em "Inconsolata",monospace;-webkit-tab-size:4;-ms-tab-size:4;-moz-tab-size:4;-o-tab-size:4;tab-size:4;-webkit-hyphens:none;-ms-hyphens:none;-moz-hyphens:none;-o-hyphens:none;hyphens:none;direction:ltr;white-space:pre;border:none;word-spacing:normal;word-break:normal}pre{margin:0 0 2em 0;padding:2em;background-color:#272822;color:#f8f8f2;overflow:auto;text-shadow:0 1px rgba(0,0,0,0.3)}*:not(pre)>code{margin:0 0.25em;padding:0 0.5em;display:inline-block;border-radius:0.2em}*:not(pre)>code[class*="language-"]{background-color:#272822;color:#f8f8f2}*:not(pre)>code:not([class*="language-"]){border:1px solid #c0c2bd}.declaration code{background:transparent;border:none !important}.token.comment,.token.prolog,.token.doctype,.token.cdata{color:#708090}.token.punctuation{color:#999}.namespace{opacity:0.7}.token.property,.token.tag,.token.constant,.token.symbol,.token.deleted{color:#f92672}.token.boolean,.token.number{color:#ae81ff}.token.selector,.token.attr-name,.token.string,.token.char,.token.builtin,.token.inserted{color:#a6e22e}.token.operator,.token.entity,.token.url,.language-css .token.string,.style .token.string,.token.variable{color:#f92672}.token.atrule,.token.attr-value,.token.function{color:#fd971f}.token.keyword{color:#66d9ef}.token.regex,.token.important{color:#e6db74}.token.important,.token.bold{font-weight:bold}.token.italic{font-style:italic}.token.entity{cursor:help}h1{text-indent:100%;white-space:nowrap;overflow:hidden}article blockquote.pull-quote p,.columnar,.box.license,.blogs{display:-webkit-box-flex;display:-webkit-flex;display:flex}.box,.profile{zoom:1}.box:before,.profile:before,.box:after,.profile:after{content:"";display:table}.box:after,.profile:after{clear:both}*{margin:0;padding:0;-webkit-box-sizing:border-box;-ms-box-sizing:border-box;-moz-box-sizing:border-box;-o-box-sizing:border-box;box-sizing:border-box;outline:0;-webkit-font-smoothing:antialiased}html{width:100%;height:100%}header,main,section,article,footer,nav,aside,details,summary{display:block}body{width:100%;height:100%;font:normal 22px/1.6em Georgia,"Times New Roman",serif;overflow:auto}body>header{width:100%;height:10em;padding:3em 0 0 0;text-align:center}h1{width:5em;height:1.59em;margin:0 auto;display:block;position:relative;background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAANwAAABGCAYAAACnp/qkAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAACspJREFUeNrsXetx2zgQXnvyX7wKxA7MVGCkgjAVmKnASgWhKzilAtMVnFxB6AqOriBUBZEq0AmTxZnWiOQusIBe+GY4l7uLSHC5374ALK42mw1ERESEwYcoggghZHilnf+m/9x2/r3Bf28i4SIieNBkyreX2l6fLX7/vL0WeK0uRWhXMaSMYEITbGZJsj48ba9yxxtGwkVcvEerttetx2f8QOKdrce7jnoUQYAmwS/PZNO4Ry+XRw8XcYlIMMe6PcCzf2DoGgkXcTFkq7fXzQHHoHO7IoaUEZFsYXCHeWP0cBGRbAN4hfeFj8Txfl/PhXiRcBG7qNCz2IR/Zl6tDznmZdyccA1/JtXbSLiIc4ImxD/M37xgnsUhg0JiT5nPUZFwEecUSjZMEjzAnymDUKHrRzjxZWGxaBJhMGOS7asD2QBzPIX5HmeM0cNFnIV30yHhhJGvFQd49hLeL46+GMIpeFsZnvX8nRqtWIN/loTCD26E3wB9SVDeGXu6M962889QMDI0VzLiFbor7qXCKy3LR+LfXeI4JZdfcXLHqxHymgXVKcpp7iEMVZ3L6PeC9EtNOOKVbq9qe602dlhsr4LxvL6r6Lm/HlcmNPZGaKxDsiy3V7txwwrfSzmOp2E8M/ckk5r4/L53TQbeQwmOM+95RkX5PeUBCd5MClrJZg6KOoR6z29Kx7HmwkSTlOXuu6ceZLorD59GiILEwmi0gmMcMtqFK+EyB49G8SJcBZkR7psRLB4XcyHP7EuWFC/vIlOyQjlecwuDCujBQox9zFiOermhKqWO0/9lJNJc3GD8mzHzHcrfkV6adA9uKx1KzJEmnvPBCb53ysxHqFh4Hn85UrWcObyDa4XT5IdO+nk98MMa/GOCHzEh/v2MqECSZDOwXdenCxLfAxZhJsxxUgn3Cv73qZmpgpc9hZpPjsWPG8cKZ04wmKPFtg8DloZqjZ93BJGg0KgKP0XrUwp9tHuPCnGH7zpnWMU5hMdtx/CMWWTqd64Djd2QLoG35Vxjikwl4szB0xUSMto3LaA/wi/iXMxswOop9F6UD7omerka3PZmvXamK8y75sAL9Tjr+kqGd3tBz9Q3LZHjR6e2NqDMlelv9JN4v2NfQLwifEfbeTwqJ/4ajQIsq3rUIkLGKH1Llo65FbwZs6BREcfbEgsdykORY+UwxeK7tH6IgstuUU26sNTYTgtQlDphDDYhKp4vws0YxoFDOkqF1UdVkSOHsTFypkySIyccdVrBpuLcSFVBbddSZkxXn2MoNuTqfeAbI4dqMMRaM/IBiRUeNoWAihEKSYZsx4x2T7Glr/jBDScp9QhSBdeWcNwCx5Ayr8FP05gXi4JFw3g3ShK9Hik22ZbZqQUMBZcFiiGaMh0G5Ts/UQ2SLeFuLZVZF0a+4AA1IR7w5X1suSgsfzcnetwJ4cPNB4hYOLxbCxF9hFsL60YhRPRewlGV37Q0mzFDlwW+hAJ/zT9fHO9LNSZjHqTEsHa5Ez4rxxAt8RiGX4KXo0ZUGYxvWVoyIo69hOOEOXowf2PJtEVFzYE+ke0LrisiJEO2ORqkK7xSS49uDJQe22/g7V27JMyJekshnah30/jQo2wvwJ/vmqLXMxPPZs7LXCGT7kbg92sYn9fxZVjMpK/C6zbyiF08GZNZTjDMeQjCGWY34Lb27wav+50iQRVA6LUQacc+WiY0XgXv98SFak/HMUy+cm1fYeWtI5kUIYpgpy7XA1ZCAb1EToFeIfGInq48grBTAjYGKUGDZjZG6qU+PzE0v4OwvSBXzHGHgsntNzjGilknoBRPJiOkEw8nhwhnrF8KtLkNrpJ+7xRcpPEqdJ9aeFwpfqDfaHju4fCNVjkeSwUa0xz1Y9rRF7OGNWOSzqV4MuYB19KEMxZQC/oryFfFJmjVObsFpK12yET+F9j1e+z72GshWVG/axZATgr6F59PgFcMcyEcZX2tVWHumjF4baH1FoknYSF/Rm8iRbrsiO5jWs9J7GAw85YfO/cN6eVCeLixMI4zad0Qop2+sJJSLJn7JFw3zNJC0auizQS2hKW9AbntOVKbPCUMQGUZNi5Rtt+QYFfwNi0gXbhYMOTq8xiphBgBcL4LhRT5nvuPveer7XewXWmygrcJ7ASVQivHswMB70Fu7V8a6B5ju5OpW2nWSLAvaMzSncKKT0/OyVV9Eq7wcM8FQR/vdkhMCScr2wFJNYI1mzLNpLet95MqoigBsk2JhqcPVI/9jOQpgHfetZQnb4FeGLsDf30hZ4zxch0Dx5CIz71RCJfC2y7swkLIi87vHg6Qf6lAv68dkm5Dthz4y9CkS/QcBfKxg70kGri1haw4YWVCiEqewaUwx9gX5tL1SDH2XUnsA1s57t+iPid33AyZbmR7I+6iZNyT0yNTsntX5mHjr81+toS4IdepbWLfWspJj6Ww9UA1WoZQmDgUYbR3u3X0cBQ5LcF+gbXy5GWoeBQaQ8bMIW1DOaqXKwjfzGmd7vWeDzkdUOLK4VkNUQmlcG+hFAnjHd1CC/vfJowCA3eimCP/n46FDkM2ai66BPvFCJTiCeXcOuc2gdyiiWT53jUhpgqaq3RTxr1dYNu2rWIoKff+XAI9Ar8PZoI6xO156kJuSvGEMn3jnr9axtPcGJ7a16T00NNkRehrkjK7NI+1zq489dewaZPOzWXnGzvUKGfVk6cVDm3eF4FzResmQTZNhFrhhDxhECXb+Ona1T3TQCHBMkyAbZRgzOBw2odTmhwph7btuYVxlGoRL3UWhVQDI5f3KnwRrmQKYz5g1UpGJ6zaY5s86TMRJA/IMO+e7yiWQjK6Kr9NZS/d+D8HwddZCVJtASWr3v9ffefDNRB+JfsnQlJcExLbJfjbDb2GtzPBKLnWHRwets1PuUWNQ8qakz+2Fu8kdgDl9UCCug4o3AeQ2w5TgfwC665cqApQepbhA/H+U0vCme0wr2dCNmrxRHI6gkw4bo9GF/zwUPkshEmn5fCV+bFa8LM+0BxsUTLub7sypUU9eApIthfwu7t8biHv2jfhDOlS8DdhrZX4C/g7KF0r4zchBVeWVm6B77gWktcDvJ8sXqDBGlNgF+VdoSw/gd9OYcaoKfDbBpCybUd2KmBgWmCoSrYQTIRLyySUUjQp9xRvmsDj3FeEqB3kVY0sA6sCngmQC+qCKbwVgVupc4onqeSzbRRnhgJfMatw84378b02hOsKeUEc68yTAqgN7azx1ebtTPSEoUTtHoPhS2mTztwa16DVOLZsc5hzCKhrexfSz+6rUnKqPtlIDiAZHlCqlA+EnDDryWtqCIe0p5jhKjPVCZ1Ct5tIRwo0LRxH12hqBVn8iC5XwoWGFOEiLhfUqQHqmYViRZOIiHPEDGjzcJWPh0fCRVwaCkbYGQkXEeEAnWpQViFZNwmKhIuIePNs1PPW574G8SF+h4gzgd6xbSrmqx0Ppf8ftTfoEjyefxEJF3Hq0JXEGuQW25c+B3tqIeUq6lfEnlBRimxevdspEo6SyNZRBy8KmTB5IRIuesEI/99bcovY2RBubHvMEk7n0MCIcFEPhWxliMGeYkg51s8/4rKgcy7b/XrdvYVBcGprKU3MrudJumsq10i2KurfxSLHS8Hw5LaZ1F6AQJ/JSyBcF1q4LRzHCvSI49QPg/oYBnTqhIuIOCnEpV0REQHxnwADAJGYzpqfxJdQAAAAAElFTkSuQmCC);background-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0idXRmLTgiPz48IURPQ1RZUEUgc3ZnIFBVQkxJQyAiLS8vVzNDLy9EVEQgU1ZHIDEuMS8vRU4iICJodHRwOi8vd3d3LnczLm9yZy9HcmFwaGljcy9TVkcvMS4xL0RURC9zdmcxMS5kdGQiPjxzdmcgdmVyc2lvbj0iMS4xIiBpZD0ibG9nby5zdmdfMV8iIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiIHg9IjBweCIgeT0iMHB4IiB2aWV3Qm94PSIwIDAgMjIwIDcwIiBlbmFibGUtYmFja2dyb3VuZD0ibmV3IDAgMCAyMjAgNzAiIHhtbDpzcGFjZT0icHJlc2VydmUiPjxnIGlkPSJsb2dvLnN2ZyI+PGc+PHBhdGggZmlsbC1ydWxlPSJldmVub2RkIiBjbGlwLXJ1bGU9ImV2ZW5vZGQiIGZpbGw9IiNGRkZGRkYiIGQ9Ik0zMC4yLDMzYy0xLjgtMS00LjItMS44LTcuMS0yLjVjLTIuNC0wLjYtNC4zLTEuMS01LjctMS41Yy0xLjQtMC40LTIuNS0wLjktMy4zLTEuNmMtMC44LTAuNi0xLjItMS40LTEuMi0yLjRjMC0xLjEsMC42LTIsMS43LTIuN2MxLjEtMC43LDIuNS0xLjEsNC4yLTEuMWMxLjYsMCwyLjksMC4zLDMuOSwwLjhzMS44LDEuMywyLjUsMi4yYzAuOSwxLjEsMS42LDEuOSwyLjMsMi41YzAuNywwLjUsMS41LDAuOCwyLjQsMC44YzEuMywwLDIuMy0wLjMsMy4xLTFjMC43LTAuNywxLjEtMS42LDEuMS0yLjZjMC0xLTAuMy0yLTEtM2MtMC43LTEtMS42LTItMi45LTIuOGMtMS4zLTAuOC0yLjktMS41LTQuOC0yYy0xLjktMC41LTQuMS0wLjgtNi41LTAuOGMtMy4xLDAtNS43LDAuNS03LjksMS41Yy0yLjIsMS0zLjgsMi4zLTQuOSw0Yy0xLjEsMS43LTEuNywzLjQtMS43LDUuM2MwLDIuMSwwLjYsMy45LDEuOCw1LjNjMS4yLDEuNCwyLjgsMi42LDQuOCwzLjVjMiwwLjksNC42LDEuNyw3LjgsMi41YzIuOCwwLjcsNC45LDEuNCw2LjEsMi4xYzEuMiwwLjgsMS45LDEuOSwxLjksMy42YzAsMS4xLTAuNiwyLTEuOCwyLjhjLTEuMiwwLjgtMi43LDEuMi00LjYsMS4yYy0yLjMsMC00LjEtMC40LTUuNC0xLjJjLTEuMy0wLjgtMi41LTIuMS0zLjQtMy43Yy0wLjUtMC45LTEtMS41LTEuNS0yYy0wLjUtMC41LTEuMy0wLjctMi4zLTAuN2MtMS4xLDAtMiwwLjQtMi44LDEuMWMtMC43LDAuNy0xLjEsMS42LTEuMSwyLjZjMCwxLjUsMC42LDMuMSwxLjcsNC42YzEuMSwxLjUsMi45LDIuOCw1LjMsMy44czUuMywxLjUsOC42LDEuNWMzLjUsMCw2LjUtMC41LDguOS0xLjRjMi40LTAuOSw0LjMtMi4zLDUuNS00LjJjMS4zLTEuOCwxLjktNCwxLjktNi42YzAtMS43LTAuNS0zLjItMS40LTQuNkMzMy40LDM1LjIsMzIuMSwzNCwzMC4yLDMzeiBNNzIuMSwxNy42Yy0yLjQtMS41LTUuMS0yLjItNy45LTIuMmMtMi40LDAtNC41LDAuNS02LjQsMS41Yy0xLjksMS0zLjcsMi42LTUuNSw0Ljh2LTEuMWMwLTEuNy0wLjQtMy0xLjItMy45Yy0wLjgtMC45LTEuOS0xLjMtMy4yLTEuM2MtMS4zLDAtMi40LDAuNC0zLjIsMS4zYy0wLjgsMC45LTEuMiwyLjItMS4yLDR2NDAuMmMwLDIsMC4zLDMuNiwwLjksNC42YzAuNiwxLjEsMS43LDEuNiwzLjUsMS42YzMsMCw0LjUtMiw0LjUtNi4xdi0xNGMxLjYsMi4xLDMuNCwzLjYsNS4yLDQuN2MxLjgsMS4xLDQsMS42LDYuNywxLjZjMi4yLDAsNC4zLTAuNCw2LjItMS4zYzEuOS0wLjksMy42LTIuMSw1LTMuOGMxLjQtMS42LDIuNS0zLjYsMy4zLTZjMC44LTIuNCwxLjItNSwxLjItNy45YzAtMy45LTAuNy03LjMtMi4xLTEwLjFDNzYuNCwyMS4zLDc0LjUsMTkuMSw3Mi4xLDE3LjZ6IE02OS4zLDQwLjZjLTAuOCwxLjgtMS45LDMuMS0zLjIsNGMtMS4zLDAuOS0yLjgsMS4zLTQuMywxLjNjLTIuNiwwLTQuOC0xLTYuNS0zLjFjLTEuOC0yLjEtMi42LTQuOS0yLjYtOC42YzAtMy45LDAuOS02LjksMi42LTguOWMxLjgtMiwzLjktMyw2LjUtM2MxLjYsMCwzLjEsMC41LDQuNCwxLjRjMS4zLDAuOSwyLjQsMi4zLDMuMSw0LjFjMC44LDEuOCwxLjEsMy45LDEuMSw2LjNDNzAuNSwzNi43LDcwLjEsMzguOCw2OS4zLDQwLjZ6IE0xMTkuNCw0MC4zYzAtMS45LDAtMy42LDAuMS01LjFjMC0xLjUsMC0zLjQsMC01LjZjMC0zLjUtMC41LTYuMi0xLjUtOC4zYy0xLTIuMS0yLjYtMy42LTUtNC42Yy0yLjMtMS01LjYtMS40LTkuNy0xLjRjLTMuNywwLTYuNywwLjUtOS4yLDEuNWMtMi41LDEtNC4zLDIuMy01LjQsMy44Yy0xLjIsMS41LTEuNywzLjEtMS43LDQuOGMwLDEsMC40LDEuOSwxLjEsMi42YzAuOCwwLjcsMS43LDEsMi43LDFjMS4yLDAsMS45LTAuMiwyLjMtMC42YzAuNC0wLjQsMS0xLjMsMS44LTIuNmMwLjktMS4zLDEuOS0yLjMsMy4xLTNjMS4yLTAuNywzLTEsNS4zLTFjMi44LDAsNC42LDAuNiw1LjQsMS43YzAuOSwxLjEsMS40LDMsMS41LDUuNGMtMiwwLjYtMy44LDEuMS01LjQsMS40Yy0xLjcsMC40LTMuNywwLjgtNiwxLjNjLTIuMywwLjUtMy44LDAuOC00LjQsMC45Yy0yLjgsMC42LTUsMS44LTYuNywzLjZjLTEuNiwxLjgtMi40LDQtMi40LDYuNWMwLDEuOSwwLjUsMy42LDEuNSw1LjNjMSwxLjYsMi40LDIuOSw0LjIsMy44YzEuOCwwLjksMy45LDEuNCw2LjMsMS40YzIuNiwwLDUtMC40LDcuMS0xLjNjMi4xLTAuOSw0LjMtMi4yLDYuNS0zLjljMSwxLjcsMiwzLDMsMy45YzAuOSwwLjksMS45LDEuMywyLjgsMS4zYzEuMSwwLDIuMi0wLjQsMy4xLTEuMmMwLjktMC44LDEuMy0xLjcsMS4zLTIuNmMwLTAuNS0wLjMtMS44LTAuOS0zLjdDMTE5LjcsNDMuOSwxMTkuNCw0MiwxMTkuNCw0MC4zeiBNMTEwLjMsMzUuOGMwLDMuMi0wLjQsNS41LTEuMyw3Yy0wLjcsMS4zLTEuOSwyLjMtMy40LDMuMWMtMS42LDAuOC0zLjMsMS4yLTUuMSwxLjJjLTEuNywwLTMuMS0wLjUtNC4yLTEuNWMtMS4xLTEtMS42LTIuMi0xLjYtMy42YzAtMS40LDAuNS0yLjQsMS40LTMuMmMwLjktMC44LDEuOS0xLjMsMi45LTEuNWMxLTAuMywyLjgtMC43LDUuNC0xLjJjMi42LTAuNiw0LjYtMS4xLDYtMS42VjM1Ljh6IE0xNjcuOCwzMi4yYy0xLjQsMC0yLjQsMC40LTMuMSwxLjJjLTAuNiwwLjgtMS4yLDEuOS0xLjYsMy4yYy0xLDIuOC0yLjUsNC45LTQuNiw2LjRjLTIsMS40LTQuNSwyLjItNy40LDIuMmMtMi43LDAtNS0wLjYtNy0xLjljLTItMS4zLTMuNS0zLjItNC42LTUuOGMtMS4xLTIuNi0xLjYtNS44LTEuNi05LjdjMC01LjgsMS4yLTEwLjMsMy43LTEzLjVjMi41LTMuMiw1LjgtNC43LDkuOS00LjdjMi42LDAsNC44LDAuNiw2LjYsMS44YzEuOCwxLjIsMy4zLDMuMSw0LjcsNS42YzAuOCwxLjUsMS41LDIuNiwyLjIsMy4yYzAuNiwwLjYsMS42LDAuOSwyLjksMC45YzEuMiwwLDIuMS0wLjQsMy0xLjNzMS4yLTEuOSwxLjItMy4xYzAtMi4xLTAuOC00LjQtMi41LTYuN2MtMS43LTIuNC00LjEtNC40LTcuMy02Yy0zLjItMS42LTYuOC0yLjQtMTAuOC0yLjRjLTMuMywwLTYuNCwwLjYtOS40LDEuOGMtMi45LDEuMi01LjUsMi45LTcuNyw1LjJjLTIuMiwyLjMtMy45LDUtNSw4LjJjLTEuMiwzLjItMS44LDYuOC0xLjgsMTAuOGMwLDIuNSwwLjIsNC44LDAuNyw3YzAuNSwyLjIsMS4yLDQuMywyLjEsNi4yYzAuOSwxLjksMi4xLDMuNywzLjQsNS4zYzEuNSwxLjcsMy4xLDMuMSw0LjksNC4yYzEuOCwxLjEsMy43LDEuOCw1LjksMi4zYzIuMiwwLjUsNC42LDAuOCw3LjIsMC44YzMuNSwwLDYuNS0wLjYsOS4xLTEuN2MyLjYtMS4xLDQuOC0yLjYsNi40LTQuM2MxLjctMS44LDIuOS0zLjYsMy43LTUuNWMwLjgtMS45LDEuMi0zLjYsMS4yLTUuMmMwLTEuMy0wLjQtMi4zLTEuMy0zLjFDMTcwLDMyLjYsMTY5LDMyLjIsMTY3LjgsMzIuMnogTTIxMS4yLDE3LjRjLTAuNC0wLjYtMC45LTEuMS0xLjUtMS41Yy0wLjYtMC40LTEuMy0wLjYtMi0wLjZjLTEsMC0xLjcsMC4yLTIuMywwLjVjLTAuNSwwLjMtMSwxLTEuNSwyYy0wLjUsMS0xLDIuMy0xLjYsNC4xbC03LjUsMjEuM2wtNy45LTIyLjljLTAuNS0xLjYtMS4xLTIuOC0xLjgtMy43cy0xLjctMS4zLTMtMS4zYy0wLjgsMC0xLjUsMC4yLTIuMiwwLjZzLTEuMywxLTEuNywxLjZjLTAuNCwwLjctMC42LDEuNC0wLjYsMi4yYzAsMC44LDAuNCwyLDEuMSwzLjZsMTEuNSwyOC45bC0wLjksMi4xYy0wLjYsMS41LTEuMiwyLjctMS43LDMuNWMtMC41LDAuOC0xLDEuMy0xLjcsMS43Yy0wLjYsMC4zLTEuNSwwLjUtMi41LDAuNWMtMC40LDAtMC45LTAuMS0xLjUtMC4yYy0wLjUtMC4xLTEtMC4yLTEuNS0wLjJjLTEuMywwLTIuMiwwLjMtMi45LDAuOXMtMSwxLjQtMSwyLjVjMCwxLjYsMC43LDIuNiwyLDMuMmMxLjQsMC42LDMuNCwwLjksNi4xLDAuOWMyLjgsMCw1LTAuNCw2LjctMS4zYzEuNi0wLjksMy0yLjEsNC0zLjdjMS0xLjYsMi0zLjgsMy4xLTYuNmwxMi4xLTMxLjhjMC4zLTAuOCwwLjUtMS42LDAuOC0yLjRjMC4yLTAuOCwwLjQtMS40LDAuNC0xLjhDMjExLjcsMTguNywyMTEuNiwxOC4xLDIxMS4yLDE3LjR6Ii8+PC9nPjwvZz48L3N2Zz4=),none;background-color:transparent;background-position:top left;background-repeat:no-repeat;background-size:100% 100%;z-index:2}.slogan{font:normal 1em/1em "Karla",Arial,sans-serif;text-align:center;text-transform:uppercase;margin:0;padding:0.5em 0 0 0;color:#fff;display:inline-block;position:relative;z-index:2}.copyright{font-size:0.75em}body>footer{margin:3em 0 0 0;padding:2em 5em;text-align:center}body>footer a .call-to-action{padding:0 0 0.1em 0;border-bottom:2px solid #fff}main{width:950px;margin:3.4em auto;overflow-x:hidden;overflow-y:auto}article>header{margin:0 0 1.8em 0}p{margin:0 0 1.25em 0;padding:0}summary p{clear:both;margin-top:1em}details[open]>summary:before,details[data-open="true"]>summary:before,details>summary:before,details[data-open="false"]>summary:before{font:bold 1.25em/1.15em "Karla",Arial,sans-serif;text-align:center;width:1.15em;height:1.15em;margin:0 0.5em 0 0;color:#fff;display:block;float:left}details{margin:2em 0 1em 0;padding:0 0 0 2em}details[data-open="false"]>:not(summary){position:fixed;visibility:hidden}details[data-open="true"]{position:static;visibility:visible}details[open]>summary:before,details[data-open="true"]>summary:before{content:"-"}details>summary:before,details[data-open="false"]>summary:before{content:"+"}details>summary{margin:0 0 0.5em -2em;cursor:pointer}details>summary::-webkit-details-marker{display:none}article>details>summary,section>details>summary{font-size:1.25rem}article>details,section>details{font-size:1.1rem;line-height:1.7rem;margin:0.75em 0 1em 2.5em;padding:0}article>details>pre[class*="language-"]{margin:1em 0 2em 0}h2{font:bold 2.09em/1.2em "Karla",Arial,sans-serif;margin:0 0 0.35em;letter-spacing:-1px}h3{font:bold 1.36em/1em "Karla",Arial,sans-serif;margin:0 0 1.1em 0;padding:0.8em 0 0 0;letter-spacing:-1px}.landing-page h3{padding:1.8em 0 0 0}h4{font:bold 1.18em/1em "Karla",Arial,sans-serif;margin:0 0 1.3em 0;padding:0.9em 0 0 0;letter-spacing:-1px}details h4{font-size:1.18rem;margin:0 1em 0.25em 0;padding:1.25em 0 0 0}details summary h4{display:inline}h5{font:bold 0.9em/1em "Karla",Arial,sans-serif;text-transform:uppercase;margin:0;padding:1em 0 0.5em 0}a{color:inherit;text-decoration:none}article p a:not(.button),article ul a,.box a:not(.button),.intro p a{border-bottom:2px solid}article p a:hover:not(.button),article ul a:hover,.box a:not(.button):hover,.intro p a:hover{border-bottom-color:#000 !important}.subhead a{color:#000;font-weight:bold;text-decoration:none}article h2 a,article h3 a,article h4 a,a.permalink{padding:3.25rem 0 0 0;display:inline-block;color:inherit}article h2 a:after,article h3 a:after,article h4 a:after,a.permalink:after{padding:0 0 0 0.5em;content:"☍";opacity:0}article h2 a:hover:after,article h3 a:hover:after,article h4 a:hover:after,a.permalink:hover:after{-webkit-transition:all 0.4s ease-in-out;-ms-transition:all 0.4s ease-in-out;-moz-transition:all 0.4s ease-in-out;-o-transition:all 0.4s ease-in-out;transition:all 0.4s ease-in-out;opacity:1}a.reference{border:none}body>nav{position:fixed;top:0;left:0;font:normal 0.7em/1em "Karla",Arial,sans-serif;text-align:right;padding:0.75em 2em 0.75em 0;width:100%;z-index:1}body>nav li{padding:0 0.4em;display:inline}body>nav li.active{font-weight:bold}body>nav a{color:#fff;text-transform:uppercase;text-decoration:none}.intro nav ul{list-style-type:none;text-align:center}.intro nav ul li{display:inline}.intro{margin:0 0 1.5em 0}.intro p{font:normal 1.3em/1.5em "Karla",Arial,sans-serif;margin:0 0 1.7em 0}article .subhead{font:normal 0.825em/1em "Karla",Arial,sans-serif;text-transform:uppercase}article img{width:100%;border:2px solid #f9f9f9}article img.title{border:none}article iframe{width:100%;border:none}article ol{padding:0.7em 0 1.5em 1em;counter-reset:counter;list-style-type:none}article ol li{margin:0 0 1.2em 0;padding:0}article ol li:before{font:bold 1.2em/1em "Karla",Arial,sans-serif;color:#000;padding:0 0.5em 0 0;content:counter(counter) ".";counter-increment:counter}article ul{margin:0 0 1.5em 0.7em;list-style-type:none}article ul li{margin:0 0 0.5em 1em;padding:0}article ul li:before{font-size:2.5em;line-height:0;margin:0 0 0 -0.6em;padding:0 0.25em 0 0;color:#000;content:"▪";vertical-align:middle}article ul li.pro:before{color:#00cc3a !important}article ul li.con:before{color:#e80037 !important}article ul li.neutral:before{color:#a7aaa2 !important}article details ul li,article details ol li{line-height:1.75em}article table{margin:0 0 1.7em 0;padding:1.2em;background-color:#f9f9f9;width:100%;table-layout:fixed;border-spacing:0}article table thead{font:normal 0.9em/1em "Karla",Arial,sans-serif;text-align:left;text-transform:uppercase}article table th{padding:0 0 1em 0;border-bottom:2px solid #a7aaa2}article table td{font-size:0.85em;line-height:0;margin:0;padding:1.35em 0}article table.center th,article table.center td{text-align:center}article .caption{font:0.75em "Karla",Arial,sans-serif;margin:0 0 1.5em 0;padding:0 0 1.5em 0;color:#74786e}article .example{font:italic normal 1.25em/1.5em "Karla",Arial,sans-serif;text-indent:2.5em}article blockquote:not(.pull-quote){margin:0;padding:1em 10% 2.5em 5%}article blockquote:not(.pull-quote) p{border-left:4px solid}article blockquote:not(.pull-quote) p:before,article blockquote:not(.pull-quote) p:after{font:bold 1.25em/1.2em "Karla",Arial,sans-serif}article blockquote:not(.pull-quote) p:before{padding:0 0.25em 0 0;content:"“"}article blockquote:not(.pull-quote) p:after{padding:0 0 0 0.25em;content:"”"}article blockquote p{font-size:0.8em;margin:0 0 1em 0;padding:0 0 0 5%}article blockquote cite{font:normal bold 1rem/1.5rem "Karla",Arial,sans-serif;text-align:right;text-transform:uppercase;display:block}article blockquote.pull-quote{font:bold 2em/1.2em "Karla",Arial,sans-serif;margin:1.5em 5% 1em 5%}article blockquote.pull-quote p{flex-flow:row nowrap;-webkit-flex-flow:row nowrap;-webkit-box-flow:row nowrap}article blockquote.pull-quote p:before{font-size:2.5em;margin:0;padding:0.1em 0.1em 0 0;content:"“"}article blockquote.pull-quote p .share{font:bold 0.5em "Karla",Arial,sans-serif;text-transform:uppercase;padding:0 0 0 1em;-webkit-transition:all 0.4s ease-in-out;-ms-transition:all 0.4s ease-in-out;-moz-transition:all 0.4s ease-in-out;-o-transition:all 0.4s ease-in-out;transition:all 0.4s ease-in-out;border:none;opacity:0.25}article blockquote.pull-quote:hover .share{-webkit-transition:all 0.4s ease-in-out;-ms-transition:all 0.4s ease-in-out;-moz-transition:all 0.4s ease-in-out;-o-transition:all 0.4s ease-in-out;transition:all 0.4s ease-in-out;opacity:1}article sup a{font-size:0.8em;color:#000;text-decoration:none;border:none !important}article .bib-item{font:normal 0.9em/1.25em "Karla",Arial,sans-serif;padding:0.5em 0 0.5em 1.75em;text-indent:-1.75em;display:block}article .bib-item.inline{display:inline}article .meta{padding:1.5em 0}article .discuss{float:right;text-align:right}article .twitter-tweet+p{margin-top:2em}.declaration{padding:0.1em 0.5em;display:inline}article>details>summary>.declaration{border-bottom:4px solid}.declaration .label,.declaration .parameters{font:italic bold 0.9em/1em "Karla",Arial,sans-serif;padding:0 0.75em 0 0}.declaration .parameters{color:#000}.columnar .col{width:50%;margin:0 0.5em;padding:0}.columnar .col:first-child{margin-left:0}.box{margin:0 0 1em 0;font-family:"Karla",Arial,sans-serif}.box.infobox{font-size:0.8em;line-height:1.75em;margin:2em 0;padding:1.25em;border:4px solid}.box.license{flex-flow:row wrap;-webkit-flex-flow:row wrap;-webkit-box-flow:row wrap}.box.license .item{width:33.33333%;font-size:0.9em;margin:0.25em 0.5em;padding:1em;text-align:center;flex:1 1 275px;-webkit-flex:1 1 275px;-webkit-box-flex:1 1 275px;border-width:2px;border-style:solid;border-radius:0.5em}.box.license .item:first-child{margin-left:0}.box.license .item h5{margin:1.75em 0 0 0;padding:0;text-transform:none}.box.license .item span{display:block}.box.license .item .focus{font-size:1.5em}.box.license .item .button,.box.license .item .displacy iframe+a.view-displacy,.displacy .box.license .item iframe+a.view-displacy{margin:2em 10% 0.1em 10%;display:block;clear:both}.box.license .item .button+span,.box.license .item .displacy iframe+a.view-displacy+span,.displacy .box.license .item iframe+a.view-displacy+span{font-size:0.75em}.blogs{margin:2em 0 0 0;padding:0;flex-flow:row wrap;-webkit-flex-flow:row wrap;-webkit-box-flow:row wrap}.blogs article{font-size:0.75em;line-height:1.75em;margin:0 3em 3em 0;padding:0;flex:1 0 calc(50% - 1.5em);-webkit-flex:1 0 calc(50% - 1.5em);-webkit-box-flex:1 0 calc(50% - 1.5em);min-width:300px;max-width:100%}.blogs article:nth-child(2n+2){margin-right:0}.blogs article .readmore{border:none}.blogs h2{font-size:1.75em;line-height:1.15em;max-width:90%}.blogs h2 a{margin:0;padding:0}.blogs .readmore{flex:0 0 100%;-webkit-flex:0 0 100%;-webkit-box-flex:0 0 100%;text-align:center}.profile img{width:6.5em;height:6.5em;margin:0 1em 1em 0;shape-outside:circle();border-radius:50%;float:left}.profile .social{font-size:0.75em;display:block}.profile .social a{margin:0 0.3em 0 0.2em}article .profile{margin:3.5em 0 0 0}article .profile img{border:none}article .profile p{font-size:0.9em}article .profile .social{display:inline}.button,.displacy iframe+a.view-displacy{font:normal 0.77em/1em "Karla",Arial,sans-serif;text-transform:uppercase;background-color:#000;color:#fff;margin:0 0 0.35em 0;padding:0.9em;display:inline-block;text-decoration:none}.button:hover,.displacy iframe+a.view-displacy:hover{-webkit-transition:all 0.4s ease-in-out;-ms-transition:all 0.4s ease-in-out;-moz-transition:all 0.4s ease-in-out;-o-transition:all 0.4s ease-in-out;transition:all 0.4s ease-in-out;opacity:0.8 !important}.button .button-caption,.displacy iframe+a.view-displacy .button-caption{font-size:0.75em;line-height:0.75em;text-transform:none}.button-twitter{background-color:#5ea9dd !important}.button-hn{background-color:#f60 !important}.button-reddit{background-color:#ff4500 !important}#home>header,#home>footer,#home>nav,#home .button,#home .displacy iframe+a.view-displacy,.displacy #home iframe+a.view-displacy,#home details>summary:before{background:#009acc}#home .subhead a,#home blockquote a,#home a:after,#home .reference,#home article ol li:before,#home article ul li:before,#home .declaration .label,#home .license h4,#home .note,#home .readmore,#home blockquote p:before,#home blockquote p:after{color:#009acc}#home .declaration,#home article p a,#home article li a,#home .intro p a,#home .infobox,#home .license .item,#home .license .item a,#home blockquote p{border-color:#009acc}#home .declaration{background:rgba(0,154,204,0.1)}#docs>header,#docs>footer,#docs>nav,#docs .button,#docs .displacy iframe+a.view-displacy,.displacy #docs iframe+a.view-displacy,#docs details>summary:before{background:#009acc}#docs .subhead a,#docs blockquote a,#docs a:after,#docs .reference,#docs article ol li:before,#docs article ul li:before,#docs .declaration .label,#docs .license h4,#docs .note,#docs .readmore,#docs blockquote p:before,#docs blockquote p:after{color:#009acc}#docs .declaration,#docs article p a,#docs article li a,#docs .intro p a,#docs .infobox,#docs .license .item,#docs .license .item a,#docs blockquote p{border-color:#009acc}#docs .declaration{background:rgba(0,154,204,0.1)}#license>header,#license>footer,#license>nav,#license .button,#license .displacy iframe+a.view-displacy,.displacy #license iframe+a.view-displacy,#license details>summary:before{background:#009acc}#license .subhead a,#license blockquote a,#license a:after,#license .reference,#license article ol li:before,#license article ul li:before,#license .declaration .label,#license .license h4,#license .note,#license .readmore,#license blockquote p:before,#license blockquote p:after{color:#009acc}#license .declaration,#license article p a,#license article li a,#license .intro p a,#license .infobox,#license .license .item,#license .license .item a,#license blockquote p{border-color:#009acc}#license .declaration{background:rgba(0,154,204,0.1)}#blog>header,#blog>footer,#blog>nav,#blog .button,#blog .displacy iframe+a.view-displacy,.displacy #blog iframe+a.view-displacy,#blog details>summary:before{background:#f25f5c}#blog .subhead a,#blog blockquote a,#blog a:after,#blog .reference,#blog article ol li:before,#blog article ul li:before,#blog .declaration .label,#blog .license h4,#blog .note,#blog .readmore,#blog blockquote p:before,#blog blockquote p:after{color:#f25f5c}#blog .declaration,#blog article p a,#blog article li a,#blog .intro p a,#blog .infobox,#blog .license .item,#blog .license .item a,#blog blockquote p{border-color:#f25f5c}#blog .declaration{background:rgba(242,95,92,0.1)}.displacy{margin:0 0 1em 0;position:relative;overflow:scroll;-webkit-overflow-scrolling:touch}.displacy:after{content:"";width:3em;height:0.75em;position:absolute;top:0.5em;left:0.5em;-webkit-transition:all 0.4s ease-in-out;-ms-transition:all 0.4s ease-in-out;-moz-transition:all 0.4s ease-in-out;-o-transition:all 0.4s ease-in-out;transition:all 0.4s ease-in-out;display:inline-block;background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAMgAAAAyCAYAAAAZUZThAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAADmdJREFUeNrsXQ2YVlMeP5PpO/pQRKXSVqKsEKsPYX2UtMJGZbPLMiOfydplU4QH9Wy0sbvNFNoiEvrAomzS2KxqfWz0YVgkilL60Oc0s/+f9zeP63bOuffc976vGXP/z/N/Zua8595z7rnn9/8+7+SUlZWphBJKSE/VkiVIKKEEIAklFIly071BYWFhsoppUH5+frIIPzDZ3IzcLIPxKOHtwsXJa7FTQUFBC/nRRHilgGh7JXyEGsKHCdfn31uFPxP+JjGx9qXmwsuF3xZ+X/g54eqW/scJLxDeKPymcO8qBo4H5ccnwv/BppK/z64kU+8mPE54mUcQLiWvIkg+FJ4ofE5lMPGzNcHhwm09f/cRvtjQ9yDhl4V7CjcU7iw8h9qnKoADwuBq4Rw2NRCeLO21KvC0+wm/Jfya8PXCHYX30/TDMx0ufLnwsxSWAz3PWmUB0kvTdqKhb29uCr951quKKJCfa9pgah1TAed6oPAs4ZkR59dGeJrwP4WbVWWANNS0HWDoW2Jo31NFANLYshkrErWj6XRuDPc6lff6aVUFiAu9ILze17ZZ+JkqApDqlWCOcL4XCrcK0Re+yKYQAq6p8DzhlglA7LSR5tcUOqlP0x9ZoxKqCFSLPuHBlj4vCven5VBXuJHw/sIn872WGq5rQpOrwuzL3Ar6Ej4S/nWyFyskjbKYQtD0lxm0/S7hIvLDFHw6s7Gr8CDhRxMNklBlI0SghlpMqb4hTeFXVSoYY8rvXFfpNUhhYWEtSpIWBBokBOLeKy0q1IV+Jny0SsXUX3e5MD8/vx6vbUnVvlulYvAfC79XUFCw0/F+eD7kIhCCLpLriw39WnDcelwPxPyXS/+9P8C7rcu5tGJABJHBbdyU61QqL/E/x3sOU6kEoI6GUjuEpSXCNwn/RfPZ8Zzv1772TsJdOPd/RVgTpBqQq/lcpVIJpbEDRICBSd7C6EUdTZcvqUJHax4wLD0gfI3n70eouoM2MiTYtYyKmJ6tRPohXv8E1Lhs3m8C7ol4/vPCZ3muHyTXzfD0GcCXfaxuPeRzmAtj5JovMgwKbN7BwpdwI+wX0P8LSvyxBLONalvM3neEH4ow3wnClxIQXsqhYPPun5HCt6vvcia3Cd/hMNYJKpV8rs2/EZ4+X7gsFhNLgJEjfKtKZbYHGsChKGVvFl7BjepKR/nAobiI3SybuJHw83QezwgAPj47hS+nWK47PWA+p3vAUX79/Rz3AGGM+bgBHOXrAcn7vvQdmEFwnMo1n0RneL8Q18DRHgKtKpwX0PcMakaTQItiNZRSoPk17Oe+oEw7HzhA2IutHcYa7QEHCMnNHnH6IOOF73TQOgjbIWR7nuOidTG0n2QAB4A6nyaQKx2CiIvc40JLn06atmZyTStGa/qGHAtmzmNy3Q0ZAMcvhefSR4hCNSkwzg0AoGmTz0xj7v+mANnNvxHiH+CT7G3Vvtn26iFAXU4dKRT9VC8WgIjmuEIj1cMu/IwwE/Gpchdz8G5DVOUD4bu4iNfT1t2s6QdJ+7Bs3EMcx51jAq2F8JLHylhxVgU0oXljmidMp5f4HuYpc7g8h5rRtCeON7S/q1Kh+XRoPAVqZ2qFIo0JpzOFwpapDNa0IS+zKG0fRMCBGPafLF0gve8VXkxAHEMw9fVswIyQbLT6BikCm/9SsflLfP1H0/Y8VuPQYr4utfudfFGZ+/lid1JiDaHNnqPZiAUylw4yvziqdIcpfVXCq5zDCs340JhT1b5JSWzO7iqVBNRFsHS0LKbXuYmsozV8Hr8WaEkhtShACehM24VhfOQw5tKVylwWArtwlK9tLvnGAGDFQScZNM4oPzhA0vYppTc2cnuNtotC0FIj5d5eCQdhsVjGepUBCz9IkImGVv5zDGugs6PxfGd6zBYvYZ7TVSr5OkTz+ckGgJg07OdZispNMphJFwUABL5rC03742EGDWNiDTa0z87LyxtluW4so0+ZpEaG9iNNF8hGXk/H+xNP80eMarnSbLnfCB84vGNNxg+L4ImD/FE4PF9/Azi89LShvZ2mrb7FlNmYJYA8YxjrwoB9rNMeCHc/mTZAxLw6VH4cYfj41hD3H64yW2S4ztD+kEjvi5i/0G3cNQQRzCqU3ncicFwIoPh9iH4jaXb56QiZX+sY1gAh97UehxmaKUw4eYXFp6mItEPps+tNDZql3JHvr2l/SqXyYoEUZGKZQpfLRHu8G+L+eHGvqWjh3rARENiR/vL4xtQI98kmnK1SSaEFAoKNHpDA/n8ujbEXyz3eD+oE4MkcEO3qp/m4K7VXOvQmozwdVCoHtTqgfxu1b+g6KLJTUU4B4qDVdQYza76m/Uylr44ObdkEAaSVoX2Rw0PNzRRAsMll80GTPWjocijtbHCp9MWhnn8QGEvl+nQy/m84AlkHkLYxmllLNe01GBlCVcKJBGSUatkSSnCdv9c4iwB5l3uvq6/9fAaG9miA46di5ZDxD/JB6hvaP3V4qLWZXDHZ5Ajf3qkCMqJ8VhzlHcHNvUoAc51w3YhDr4mh70ExL0cDbooH+IxbCM5xtMXTKSX/wNDePIZ5w6xDFHAmzfLaAc66DqT+hG8dg1CaHGKvhAZI3Rgcsw2ZFisCEtj5p6nUsc+w9BNGkd4RkJwcYdhtDn2/dlxfV9qfG+wzmpaQpiitsEXmXH2ulYb2dE865tAnGMoNfZeyJx4RgdtsMLO81Jfr4iVYDFNcJhcEkC0OdmrUMeICyQJqiJ6UMmFBDJt8noDkF45DupwRN2niOPIgkOCLucHqWPptpGn5B5pb7R3HWWhob0dHOSrhiLFfQJ1lCQ5hzaZp2s/zCYQBmj4vOWr+wM27IQa12lxliRBuFV4ofAXNl640v5Yoe60QbPW/C0hcjrUe4tC3WQya2ORDzjRsJoR5Ecr8rUpF7BpTqo4hoFz9r3kWDZBOjdk1jkIFpEvoIld3tsfU7G0wr1ScACl2jG7pqHumgSEbO1cDlr3Cr8P8Ej6BUi7fYipgUX/lMGyXGPp+mOajQ0oeb7jvcTQ7kKhcobG7TcLAdBRgFYGlo+tVtERrB6WvZSsLWJu3KfRMZtYFmvl8JTw7boC8pfRfotC1sLDwsBD3r6miFRGGBUYNYdiU2+TnJuF+Fu2yXhiSB3Vbzxq6He0CfJ7/CJojJJup9mppmktwgaH9KkZ8bGRKptoc2AmGdjj/tznOHSVIEw17cHEI33Wipu0c+nWDNJ/BLNsVK0Dy8vLggywyXHdPiPsPU/vmKOIkqOfBBCLGmSobsmWAGbbbMneXr7qH1gpzHmGkxlksj2ylW8fU3iLYgqiPod1mZk61aGAccchzWDtscNMRhjDHbR/XBEoADpjXp2j6R6rqCONATzW0DxQtYjs3jjqpERm2ro7UBA8m8ZBTlODDasfxfyNjXW3RHjhDYTo++qipRMUxAqSjoPwKciOXGj5rbdkXsCZuMAiSb4sw4csp+7edwDyfbxkfJUBhDl9tU/p6qrs1839LuUU4nQDymMHzzyEq/0onsZrHIR3BRaidYYDozAjEwx+RzVnL4q+MNNxvfoQ5PCj3fEK4s2eM5sIIV+IQV3VDJCaOQkVTuci9Fp+gDx1u0+cNLeYXCFUB4y2fX0L/AVGv+xg1u4X7BGXr+KYa00ElBA7wrYs7Qj6/zsyqHZf2KFd1KsDM2iGaAlJjhgEk5ZnqXXTw6muiKdVUZr5BBar4ds2YMLu6ySYtpD2LmH8d+hhXUYLqzBKXc87b1XdhVTiHqP3aSSkbFAZHtfG6GJ4fBYe6A2k9+NzjaBIBDB05zzBBk9MCfBjUoB2l9k3OeQVvDxXixJ6P/qhSZUFhaQkddlsuZrfSh4Vj0yAACRI5QaXrNTUbtYQvRVcYtseixk3Sxe9PwJG70qDyD6cknU9b/w1KnM6GzX65o8mDKlJ/wqpWCHDMCVjLLZYX7afpFtPhaEaw4EO+olLZ9e4+U+ZJC0BUwKZD3uipmARdKTXN6AjXBp3hmcUIVuYAQpDcRJUZljZTnc8yhOw+Nly33MWckE39BEEY9QsiUPnaS+7zpuN1yxg1cVl82MwXBdSArTAED1YZhEl/R9+pjCYHAAQf8j1Nn4ND3GcHxx6i3KoK/IQSFiQGx0S8fpqyJ1wnp4Ncpyy3gASHoHBu2VbFupeSCSp9LtvG+fpspTOnI0i8Io3584IlMjWDdjPKLcIm3zbQocPJvqIoiyfXoVIZ+YagqmAIiAHSf1CIrxyaqvH5pon5ttpy7y4EX2mAlJ5DJ/kyairMpZ/6fnl8maN/NIGO/R2WCJeO/ktwdXQ0q3SCeLrhszWePRgtChLln3iKTwJg9aQqPpz+xVe0B180SLS+lPTbaGJ8YBkCYVGUTnSipB6v9PU3Oie8JiNoSA4iV3OQx6T7kuPCUVwS5vuq5H4IX+rCwi3l+tWefrDJezNggdwHjo9+SrAXmbSG7j9MSV9UHyBEjmpkgBBHdMOcq2nNdcama0BQfMbN+DJ/1xGqAmAhIEw6RUX7zilvBO1YCqymfJe51DhrCaLXAt6/K43leukiWsMDVaoFAznp/pfbH/u/YAsLkDTurxJKi+rRXD9QY0oiT1ScDkC+/bAychbpZi62nw9L9maFoBsN7+eV0E6ZZZ8l382bUGUmmLK/M3z2tzgGyE3WOKFKQvB1m9O/hS/ZhsEfXak9fK2ZCUASqio0hkGEsHSPiunLQhITK5hMZQ8bkqXJGl3r0BcRyoK4Bk4AEky62D7yQNuTpckahU0CI3SML3AoiW3kJIoVSDUIiPLoCHILFyd7NquEL+f+SumjVWAkPPHlHfvHjYGcLG+22CgnJ6v/WrsBJRMqXRdQjSeUXYK/jORvW74P1L2hGgD/BAjVF1uj3jijicIqApCEfsRkw0Duj/GhEkoocdITSigL9H8BBgBQhA/bIb53pQAAAABJRU5ErkJggg==);background-position:top left;background-repeat:no-repeat;background-size:100%;opacity:0.25}.displacy iframe{margin:0;padding:1.5em 0 0 0;border:2px solid #f9f9f9}.displacy iframe.column{width:44%}.displacy iframe.column:first-child{margin-right:2%}.displacy iframe+a.view-displacy{position:absolute;top:0.5em;right:0.5em;font-size:0.6em;-webkit-transition:all 0.4s ease-in-out;-ms-transition:all 0.4s ease-in-out;-moz-transition:all 0.4s ease-in-out;-o-transition:all 0.4s ease-in-out;transition:all 0.4s ease-in-out;font-weight:bold;opacity:0.25}.displacy:hover iframe+a.view-displacy,.displacy:hover:after{-webkit-transition:all 0.4s ease-in-out;-ms-transition:all 0.4s ease-in-out;-moz-transition:all 0.4s ease-in-out;-o-transition:all 0.4s ease-in-out;transition:all 0.4s ease-in-out;opacity:1}#displacy{overflow:visible !important}#displacy .words .word:after,#displacy .arrows .arrow:before{font:bold 0.6em/1em "Karla",Arial,sans-serif;text-transform:uppercase;padding:0.25em 0 0 0}#displacy .words .word span{line-height:1.25em;margin:0;padding:1em 0 0 0;display:inline-block}#displacy .arrows .arrow.bad:before{border-color:#e80037}#displacy .arrows .arrow.bad:after{border-top-color:#e80037}#displacy .words .word.bad span{color:#e80037}#displacy .arrows .arrow.good:before{border-color:#00cc3a}#displacy .arrows .arrow.good:after{border-top-color:#00cc3a}#displacy .words .word.good span{color:#00cc3a}#displacy .arrows .arrow.highlight:before{border-color:#ffa400}#displacy .arrows .arrow.highlight:after{border-top-color:#ffa400}#displacy .words .word.highlight span{color:#ffa400}#displacy .arrows .arrow.lowlight:before{border-color:#a7aaa2}#displacy .arrows .arrow.lowlight:after{border-top-color:#a7aaa2}#displacy .words .word.lowlight span{color:#a7aaa2}#displacy .arrows .arrow.light:before{border-style:dashed !important}@media screen and (max-width: 950px){main{width:95%}}@media screen and (max-width: 700px){body{font-size:1.2em}.discuss{display:inline;float:none;text-align:left !important}}@media screen and (max-width: 480px){.intro p{font-size:1em;line-height:1.5em}.content{font-size:1.25em;line-height:2em}article table{font-size:0.65em;line-height:1.25em;table-layout:auto}article table td{white-space:wrap}h1,.slogan{z-index:1}}
diff --git a/website/docs/legacy/resources/fonts/inconsolata-bold.eot b/website/docs/legacy/resources/fonts/inconsolata-bold.eot
deleted file mode 100644
index 051e62a55..000000000
Binary files a/website/docs/legacy/resources/fonts/inconsolata-bold.eot and /dev/null differ
diff --git a/website/docs/legacy/resources/fonts/inconsolata-bold.svg b/website/docs/legacy/resources/fonts/inconsolata-bold.svg
deleted file mode 100644
index a02a7215a..000000000
--- a/website/docs/legacy/resources/fonts/inconsolata-bold.svg
+++ /dev/null
@@ -1,230 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/website/docs/legacy/resources/fonts/inconsolata-bold.ttf b/website/docs/legacy/resources/fonts/inconsolata-bold.ttf
deleted file mode 100644
index 036193c1e..000000000
Binary files a/website/docs/legacy/resources/fonts/inconsolata-bold.ttf and /dev/null differ
diff --git a/website/docs/legacy/resources/fonts/inconsolata-bold.woff b/website/docs/legacy/resources/fonts/inconsolata-bold.woff
deleted file mode 100644
index bf577a1f8..000000000
Binary files a/website/docs/legacy/resources/fonts/inconsolata-bold.woff and /dev/null differ
diff --git a/website/docs/legacy/resources/fonts/inconsolata-bold.woff2 b/website/docs/legacy/resources/fonts/inconsolata-bold.woff2
deleted file mode 100644
index 1c09d2780..000000000
Binary files a/website/docs/legacy/resources/fonts/inconsolata-bold.woff2 and /dev/null differ
diff --git a/website/docs/legacy/resources/fonts/inconsolata-regular.eot b/website/docs/legacy/resources/fonts/inconsolata-regular.eot
deleted file mode 100644
index 6c3f16879..000000000
Binary files a/website/docs/legacy/resources/fonts/inconsolata-regular.eot and /dev/null differ
diff --git a/website/docs/legacy/resources/fonts/inconsolata-regular.svg b/website/docs/legacy/resources/fonts/inconsolata-regular.svg
deleted file mode 100644
index f4c2bffe8..000000000
--- a/website/docs/legacy/resources/fonts/inconsolata-regular.svg
+++ /dev/null
@@ -1,229 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/website/docs/legacy/resources/fonts/inconsolata-regular.ttf b/website/docs/legacy/resources/fonts/inconsolata-regular.ttf
deleted file mode 100644
index c24284e62..000000000
Binary files a/website/docs/legacy/resources/fonts/inconsolata-regular.ttf and /dev/null differ
diff --git a/website/docs/legacy/resources/fonts/inconsolata-regular.woff b/website/docs/legacy/resources/fonts/inconsolata-regular.woff
deleted file mode 100644
index 3ef05eea4..000000000
Binary files a/website/docs/legacy/resources/fonts/inconsolata-regular.woff and /dev/null differ
diff --git a/website/docs/legacy/resources/fonts/inconsolata-regular.woff2 b/website/docs/legacy/resources/fonts/inconsolata-regular.woff2
deleted file mode 100644
index 56edb28d0..000000000
Binary files a/website/docs/legacy/resources/fonts/inconsolata-regular.woff2 and /dev/null differ
diff --git a/website/docs/legacy/resources/fonts/karla-bold.eot b/website/docs/legacy/resources/fonts/karla-bold.eot
deleted file mode 100644
index b9738666a..000000000
Binary files a/website/docs/legacy/resources/fonts/karla-bold.eot and /dev/null differ
diff --git a/website/docs/legacy/resources/fonts/karla-bold.svg b/website/docs/legacy/resources/fonts/karla-bold.svg
deleted file mode 100644
index 358074020..000000000
--- a/website/docs/legacy/resources/fonts/karla-bold.svg
+++ /dev/null
@@ -1,346 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/website/docs/legacy/resources/fonts/karla-bold.ttf b/website/docs/legacy/resources/fonts/karla-bold.ttf
deleted file mode 100644
index cd8c4c28a..000000000
Binary files a/website/docs/legacy/resources/fonts/karla-bold.ttf and /dev/null differ
diff --git a/website/docs/legacy/resources/fonts/karla-bold.woff b/website/docs/legacy/resources/fonts/karla-bold.woff
deleted file mode 100644
index 433b0429d..000000000
Binary files a/website/docs/legacy/resources/fonts/karla-bold.woff and /dev/null differ
diff --git a/website/docs/legacy/resources/fonts/karla-bold.woff2 b/website/docs/legacy/resources/fonts/karla-bold.woff2
deleted file mode 100644
index 291f5d6c3..000000000
Binary files a/website/docs/legacy/resources/fonts/karla-bold.woff2 and /dev/null differ
diff --git a/website/docs/legacy/resources/fonts/karla-bolditalic.eot b/website/docs/legacy/resources/fonts/karla-bolditalic.eot
deleted file mode 100644
index 4fcdaf898..000000000
Binary files a/website/docs/legacy/resources/fonts/karla-bolditalic.eot and /dev/null differ
diff --git a/website/docs/legacy/resources/fonts/karla-bolditalic.svg b/website/docs/legacy/resources/fonts/karla-bolditalic.svg
deleted file mode 100644
index 55f796285..000000000
--- a/website/docs/legacy/resources/fonts/karla-bolditalic.svg
+++ /dev/null
@@ -1,351 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/website/docs/legacy/resources/fonts/karla-bolditalic.ttf b/website/docs/legacy/resources/fonts/karla-bolditalic.ttf
deleted file mode 100644
index 0136b4705..000000000
Binary files a/website/docs/legacy/resources/fonts/karla-bolditalic.ttf and /dev/null differ
diff --git a/website/docs/legacy/resources/fonts/karla-bolditalic.woff b/website/docs/legacy/resources/fonts/karla-bolditalic.woff
deleted file mode 100644
index 7a879c8a3..000000000
Binary files a/website/docs/legacy/resources/fonts/karla-bolditalic.woff and /dev/null differ
diff --git a/website/docs/legacy/resources/fonts/karla-bolditalic.woff2 b/website/docs/legacy/resources/fonts/karla-bolditalic.woff2
deleted file mode 100644
index af436a759..000000000
Binary files a/website/docs/legacy/resources/fonts/karla-bolditalic.woff2 and /dev/null differ
diff --git a/website/docs/legacy/resources/fonts/karla-italic.eot b/website/docs/legacy/resources/fonts/karla-italic.eot
deleted file mode 100644
index 0d09a4913..000000000
Binary files a/website/docs/legacy/resources/fonts/karla-italic.eot and /dev/null differ
diff --git a/website/docs/legacy/resources/fonts/karla-italic.svg b/website/docs/legacy/resources/fonts/karla-italic.svg
deleted file mode 100644
index b4aa9c4b3..000000000
--- a/website/docs/legacy/resources/fonts/karla-italic.svg
+++ /dev/null
@@ -1,351 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/website/docs/legacy/resources/fonts/karla-italic.ttf b/website/docs/legacy/resources/fonts/karla-italic.ttf
deleted file mode 100644
index 409634765..000000000
Binary files a/website/docs/legacy/resources/fonts/karla-italic.ttf and /dev/null differ
diff --git a/website/docs/legacy/resources/fonts/karla-italic.woff b/website/docs/legacy/resources/fonts/karla-italic.woff
deleted file mode 100644
index cb9ae0eed..000000000
Binary files a/website/docs/legacy/resources/fonts/karla-italic.woff and /dev/null differ
diff --git a/website/docs/legacy/resources/fonts/karla-italic.woff2 b/website/docs/legacy/resources/fonts/karla-italic.woff2
deleted file mode 100644
index 4a8a0ca59..000000000
Binary files a/website/docs/legacy/resources/fonts/karla-italic.woff2 and /dev/null differ
diff --git a/website/docs/legacy/resources/fonts/karla-regular.eot b/website/docs/legacy/resources/fonts/karla-regular.eot
deleted file mode 100644
index 0f46429b6..000000000
Binary files a/website/docs/legacy/resources/fonts/karla-regular.eot and /dev/null differ
diff --git a/website/docs/legacy/resources/fonts/karla-regular.svg b/website/docs/legacy/resources/fonts/karla-regular.svg
deleted file mode 100644
index f8cd77cce..000000000
--- a/website/docs/legacy/resources/fonts/karla-regular.svg
+++ /dev/null
@@ -1,351 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/website/docs/legacy/resources/fonts/karla-regular.ttf b/website/docs/legacy/resources/fonts/karla-regular.ttf
deleted file mode 100644
index bf3efe9dc..000000000
Binary files a/website/docs/legacy/resources/fonts/karla-regular.ttf and /dev/null differ
diff --git a/website/docs/legacy/resources/fonts/karla-regular.woff b/website/docs/legacy/resources/fonts/karla-regular.woff
deleted file mode 100644
index 6860fde6a..000000000
Binary files a/website/docs/legacy/resources/fonts/karla-regular.woff and /dev/null differ
diff --git a/website/docs/legacy/resources/fonts/karla-regular.woff2 b/website/docs/legacy/resources/fonts/karla-regular.woff2
deleted file mode 100644
index d49ef1c09..000000000
Binary files a/website/docs/legacy/resources/fonts/karla-regular.woff2 and /dev/null differ
diff --git a/website/docs/legacy/resources/img/logo.png b/website/docs/legacy/resources/img/logo.png
deleted file mode 100644
index 362b31bf7..000000000
Binary files a/website/docs/legacy/resources/img/logo.png and /dev/null differ
diff --git a/website/docs/legacy/resources/img/logo.svg b/website/docs/legacy/resources/img/logo.svg
deleted file mode 100644
index 7f7f165dc..000000000
--- a/website/docs/legacy/resources/img/logo.svg
+++ /dev/null
@@ -1,46 +0,0 @@
-
-
-
-
-
-
-
-
-
-
diff --git a/website/docs/legacy/resources/js/prism.js b/website/docs/legacy/resources/js/prism.js
deleted file mode 100644
index b5c54306e..000000000
--- a/website/docs/legacy/resources/js/prism.js
+++ /dev/null
@@ -1,1692 +0,0 @@
-/* http://prismjs.com/download.html?themes=prism-okaidia&languages=markup+css+clike+javascript+bash+c+cpp+css-extras+git+haml+jade+latex+makefile+markdown+php+php-extras+powershell+python+ruby+sass+scss+sql+wiki */
-var _self = (typeof window !== 'undefined')
- ? window // if in browser
- : (
- (typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope)
- ? self // if in worker
- : {} // if in node js
- );
-
-/**
- * Prism: Lightweight, robust, elegant syntax highlighting
- * MIT license http://www.opensource.org/licenses/mit-license.php/
- * @author Lea Verou http://lea.verou.me
- */
-
-var Prism = (function(){
-
-// Private helper vars
-var lang = /\blang(?:uage)?-(?!\*)(\w+)\b/i;
-
-var _ = _self.Prism = {
- util: {
- encode: function (tokens) {
- if (tokens instanceof Token) {
- return new Token(tokens.type, _.util.encode(tokens.content), tokens.alias);
- } else if (_.util.type(tokens) === 'Array') {
- return tokens.map(_.util.encode);
- } else {
- return tokens.replace(/&/g, '&').replace(/ text.length) {
- // Something went terribly wrong, ABORT, ABORT!
- break tokenloop;
- }
-
- if (str instanceof Token) {
- continue;
- }
-
- pattern.lastIndex = 0;
-
- var match = pattern.exec(str);
-
- if (match) {
- if(lookbehind) {
- lookbehindLength = match[1].length;
- }
-
- var from = match.index - 1 + lookbehindLength,
- match = match[0].slice(lookbehindLength),
- len = match.length,
- to = from + len,
- before = str.slice(0, from + 1),
- after = str.slice(to + 1);
-
- var args = [i, 1];
-
- if (before) {
- args.push(before);
- }
-
- var wrapped = new Token(token, inside? _.tokenize(match, inside) : match, alias);
-
- args.push(wrapped);
-
- if (after) {
- args.push(after);
- }
-
- Array.prototype.splice.apply(strarr, args);
- }
- }
- }
- }
-
- return strarr;
- },
-
- hooks: {
- all: {},
-
- add: function (name, callback) {
- var hooks = _.hooks.all;
-
- hooks[name] = hooks[name] || [];
-
- hooks[name].push(callback);
- },
-
- run: function (name, env) {
- var callbacks = _.hooks.all[name];
-
- if (!callbacks || !callbacks.length) {
- return;
- }
-
- for (var i=0, callback; callback = callbacks[i++];) {
- callback(env);
- }
- }
- }
-};
-
-var Token = _.Token = function(type, content, alias) {
- this.type = type;
- this.content = content;
- this.alias = alias;
-};
-
-Token.stringify = function(o, language, parent) {
- if (typeof o == 'string') {
- return o;
- }
-
- if (_.util.type(o) === 'Array') {
- return o.map(function(element) {
- return Token.stringify(element, language, o);
- }).join('');
- }
-
- var env = {
- type: o.type,
- content: Token.stringify(o.content, language, parent),
- tag: 'span',
- classes: ['token', o.type],
- attributes: {},
- language: language,
- parent: parent
- };
-
- if (env.type == 'comment') {
- env.attributes['spellcheck'] = 'true';
- }
-
- if (o.alias) {
- var aliases = _.util.type(o.alias) === 'Array' ? o.alias : [o.alias];
- Array.prototype.push.apply(env.classes, aliases);
- }
-
- _.hooks.run('wrap', env);
-
- var attributes = '';
-
- for (var name in env.attributes) {
- attributes += name + '="' + (env.attributes[name] || '') + '"';
- }
-
- return '<' + env.tag + ' class="' + env.classes.join(' ') + '" ' + attributes + '>' + env.content + '' + env.tag + '>';
-
-};
-
-if (!_self.document) {
- if (!_self.addEventListener) {
- // in Node.js
- return _self.Prism;
- }
- // In worker
- _self.addEventListener('message', function(evt) {
- var message = JSON.parse(evt.data),
- lang = message.language,
- code = message.code;
-
- _self.postMessage(JSON.stringify(_.util.encode(_.tokenize(code, _.languages[lang]))));
- _self.close();
- }, false);
-
- return _self.Prism;
-}
-
-// Get current script and highlight
-var script = document.getElementsByTagName('script');
-
-script = script[script.length - 1];
-
-if (script) {
- _.filename = script.src;
-
- if (document.addEventListener && !script.hasAttribute('data-manual')) {
- document.addEventListener('DOMContentLoaded', _.highlightAll);
- }
-}
-
-return _self.Prism;
-
-})();
-
-if (typeof module !== 'undefined' && module.exports) {
- module.exports = Prism;
-}
-;
-Prism.languages.markup = {
- 'comment': //,
- 'prolog': /<\?[\w\W]+?\?>/,
- 'doctype': //,
- 'cdata': //i,
- 'tag': {
- pattern: /<\/?[^\s>\/]+(?:\s+[^\s>\/=]+(?:=(?:("|')(?:\\\1|\\?(?!\1)[\w\W])*\1|[^\s'">=]+))?)*\s*\/?>/i,
- inside: {
- 'tag': {
- pattern: /^<\/?[^\s>\/]+/i,
- inside: {
- 'punctuation': /^<\/?/,
- 'namespace': /^[^\s>\/:]+:/
- }
- },
- 'attr-value': {
- pattern: /=(?:('|")[\w\W]*?(\1)|[^\s>]+)/i,
- inside: {
- 'punctuation': /[=>"']/
- }
- },
- 'punctuation': /\/?>/,
- 'attr-name': {
- pattern: /[^\s>\/]+/,
- inside: {
- 'namespace': /^[^\s>\/:]+:/
- }
- }
-
- }
- },
- 'entity': /?[\da-z]{1,8};/i
-};
-
-// Plugin to make entity title show the real entity, idea by Roman Komarov
-Prism.hooks.add('wrap', function(env) {
-
- if (env.type === 'entity') {
- env.attributes['title'] = env.content.replace(/&/, '&');
- }
-});
-;
-Prism.languages.css = {
- 'comment': /\/\*[\w\W]*?\*\//,
- 'atrule': {
- pattern: /@[\w-]+?.*?(;|(?=\s*\{))/i,
- inside: {
- 'rule': /@[\w-]+/
- // See rest below
- }
- },
- 'url': /url\((?:(["'])(\\(?:\r\n|[\w\W])|(?!\1)[^\\\r\n])*\1|.*?)\)/i,
- 'selector': /[^\{\}\s][^\{\};]*?(?=\s*\{)/,
- 'string': /("|')(\\(?:\r\n|[\w\W])|(?!\1)[^\\\r\n])*\1/,
- 'property': /(\b|\B)[\w-]+(?=\s*:)/i,
- 'important': /\B!important\b/i,
- 'function': /[-a-z0-9]+(?=\()/i,
- 'punctuation': /[(){};:]/
-};
-
-Prism.languages.css['atrule'].inside.rest = Prism.util.clone(Prism.languages.css);
-
-if (Prism.languages.markup) {
- Prism.languages.insertBefore('markup', 'tag', {
- 'style': {
- pattern: /