import React from 'react' import PropTypes from 'prop-types' import { LandingHeader, LandingTitle, LandingSubtitle, LandingGrid, LandingCard, LandingCol, LandingDemo, LandingBannerGrid, LandingBanner, } from '../src/components/landing' import { H2 } from '../src/components/typography' import { InlineCode } from '../src/components/code' import { Ul, Li } from '../src/components/list' import Button from '../src/components/button' import Link from '../src/components/link' import QuickstartTraining from '../src/widgets/quickstart-training' import Project from '../src/widgets/project' import Features from '../src/widgets/features' import Layout from '../src/templates' import courseImage from '../public/images/course.jpg' import prodigyImage from '../public/images/prodigy_overview.jpg' import projectsImage from '../public/images/projects.png' import tailoredPipelinesImage from '../public/images/spacy-tailored-pipelines_wide.png' import { nightly, legacy } from '../meta/dynamicMeta.mjs' import Benchmarks from '../docs/usage/_benchmarks-models.mdx' import { ImageFill } from '../src/components/embed' function getCodeExample(nightly) { return `# pip install -U ${nightly ? 'spacy-nightly --pre' : 'spacy'} # python -m spacy download en_core_web_sm import spacy # Load English tokenizer, tagger, parser and NER nlp = spacy.load("en_core_web_sm") # Process whole documents text = ("When Sebastian Thrun started working on self-driving cars at " "Google in 2007, few people outside of the company took him " "seriously. “I can tell you very senior CEOs of major American " "car companies would shake my hand and turn away because I wasn’t " "worth talking to,” said Thrun, in an interview with Recode earlier " "this week.") doc = nlp(text) # Analyze syntax print("Noun phrases:", [chunk.text for chunk in doc.noun_chunks]) print("Verbs:", [token.lemma_ for token in doc if token.pos_ == "VERB"]) # Find named entities, phrases and concepts for entity in doc.ents: print(entity.text, entity.label_) ` } const Landing = () => { const codeExample = getCodeExample(nightly) return ( Industrial-Strength
Natural Language
Processing
in Python
spaCy is designed to help you do real work — to build real products, or gather real insights. The library respects your time, and tries to avoid wasting it. It's easy to install, and its API is simple and productive. spaCy excels at large-scale information extraction tasks. It's written from the ground up in carefully memory-managed Cython. If your application needs to process entire web dumps, spaCy is the library you want to be using. In the five years since its release, spaCy has become an industry standard with a huge ecosystem. Choose from a variety of plugins, integrate with your machine learning stack and build custom components and workflows. {codeExample}

Features

Get a custom spaCy pipeline, tailor-made for your NLP problem by spaCy's core developers.

  • Streamlined. Nobody knows spaCy better than we do. Send us your pipeline requirements and we'll be ready to start producing your solution in no time at all.
  • Production ready. spaCy pipelines are robust and easy to deploy. You'll get a complete spaCy project folder which is ready to spacy project run.
  • Predictable. You'll know exactly what you're going to get and what it's going to cost. We quote fees up-front, let you try before you buy, and don't charge for over-runs at our end — all the risk is on us.
  • Maintainable. spaCy is an industry standard, and we'll deliver your pipeline with full code, data, tests and documentation, so your team can retrain, update and extend the solution as your requirements change.

Prodigy is an annotation tool so efficient that data scientists can do the annotation themselves, enabling a new level of rapid iteration. Whether you're working on entity recognition, intent detection or image classification, Prodigy can help you{' '} train and evaluate your models faster.

Reproducible training for custom pipelines

spaCy v3.0 introduces a comprehensive and extensible system for{' '} configuring your training runs. Your configuration file will describe every detail of your training run, with no hidden defaults, making it easy to rerun your experiments and track changes. You can use the quickstart widget or the{' '} init config {' '} command to get started, or clone a project template for an end-to-end workflow.




The easiest way to get started is to clone a project template and run it – for example, this template for training a{' '} part-of-speech tagger and{' '} dependency parser on a Universal Dependencies treebank.

End-to-end workflows from prototype to production

spaCy's new project system gives you a smooth path from prototype to production. It lets you keep track of all those{' '} data transformation, preprocessing and{' '} training steps, so you can make sure your project is always ready to hand over for automation. It features source asset download, command execution, checksum verification, and caching with a variety of backends and integrations.

spaCy v3.0 features all new transformer-based pipelines{' '} that bring spaCy's accuracy right up to the current{' '} state-of-the-art. You can use any pretrained transformer to train your own pipelines, and even share one transformer between multiple components with multi-task learning. Training is now fully configurable and extensible, and you can define your own custom models using{' '} PyTorch, TensorFlow and other frameworks.

In this free and interactive online course you’ll learn how to use spaCy to build advanced natural language understanding systems, using both rule-based and machine learning approaches. It includes{' '} 55 exercises featuring videos, slide decks, multiple-choice questions and interactive coding practice in the browser.

Benchmarks

spaCy v3.0 introduces transformer-based pipelines that bring spaCy's accuracy right up to the current state-of-the-art. You can also use a CPU-optimized pipeline, which is less accurate but much cheaper to run.

) } export default Landing