Merge pull request #1 from explosion/master

Update
This commit is contained in:
Shen Qin 2022-06-30 12:40:53 +08:00 committed by GitHub
commit e266e07d2f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
41 changed files with 602 additions and 114 deletions

View File

@ -27,7 +27,6 @@ steps:
- script: python -m mypy spacy - script: python -m mypy spacy
displayName: 'Run mypy' displayName: 'Run mypy'
condition: ne(variables['python_version'], '3.10')
- task: DeleteFiles@1 - task: DeleteFiles@1
inputs: inputs:
@ -64,12 +63,12 @@ steps:
displayName: "Run GPU tests" displayName: "Run GPU tests"
condition: eq(${{ parameters.gpu }}, true) condition: eq(${{ parameters.gpu }}, true)
- script: | # - script: |
python -m spacy download ca_core_news_sm # python -m spacy download ca_core_news_sm
python -m spacy download ca_core_news_md # python -m spacy download ca_core_news_md
python -c "import spacy; nlp=spacy.load('ca_core_news_sm'); doc=nlp('test')" # python -c "import spacy; nlp=spacy.load('ca_core_news_sm'); doc=nlp('test')"
displayName: 'Test download CLI' # displayName: 'Test download CLI'
condition: eq(variables['python_version'], '3.8') # condition: eq(variables['python_version'], '3.8')
- script: | - script: |
python -m spacy convert extra/example_data/ner_example_data/ner-token-per-line-conll2003.json . python -m spacy convert extra/example_data/ner_example_data/ner-token-per-line-conll2003.json .
@ -93,17 +92,17 @@ steps:
displayName: 'Test train CLI' displayName: 'Test train CLI'
condition: eq(variables['python_version'], '3.8') condition: eq(variables['python_version'], '3.8')
- script: | # - script: |
python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_sm'}; config.to_disk('ner_source_sm.cfg')" # python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_sm'}; config.to_disk('ner_source_sm.cfg')"
PYTHONWARNINGS="error,ignore::DeprecationWarning" python -m spacy assemble ner_source_sm.cfg output_dir # PYTHONWARNINGS="error,ignore::DeprecationWarning" python -m spacy assemble ner_source_sm.cfg output_dir
displayName: 'Test assemble CLI' # displayName: 'Test assemble CLI'
condition: eq(variables['python_version'], '3.8') # condition: eq(variables['python_version'], '3.8')
#
- script: | # - script: |
python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_md'}; config.to_disk('ner_source_md.cfg')" # python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_md'}; config.to_disk('ner_source_md.cfg')"
python -m spacy assemble ner_source_md.cfg output_dir 2>&1 | grep -q W113 # python -m spacy assemble ner_source_md.cfg output_dir 2>&1 | grep -q W113
displayName: 'Test assemble CLI vectors warning' # displayName: 'Test assemble CLI vectors warning'
condition: eq(variables['python_version'], '3.8') # condition: eq(variables['python_version'], '3.8')
- script: | - script: |
python .github/validate_universe_json.py website/meta/universe.json python .github/validate_universe_json.py website/meta/universe.json
@ -111,7 +110,7 @@ steps:
condition: eq(variables['python_version'], '3.8') condition: eq(variables['python_version'], '3.8')
- script: | - script: |
${{ parameters.prefix }} python -m pip install thinc-apple-ops ${{ parameters.prefix }} python -m pip install --pre thinc-apple-ops
${{ parameters.prefix }} python -m pytest --pyargs spacy ${{ parameters.prefix }} python -m pytest --pyargs spacy
displayName: "Run CPU tests with thinc-apple-ops" displayName: "Run CPU tests with thinc-apple-ops"
condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.9')) condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.10'))

106
.github/contributors/Lucaterre.md vendored Normal file
View File

@ -0,0 +1,106 @@
# spaCy contributor agreement
This spaCy Contributor Agreement (**"SCA"**) is based on the
[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf).
The SCA applies to any contribution that you make to any product or project
managed by us (the **"project"**), and sets out the intellectual property rights
you grant to us in the contributed materials. The term **"us"** shall mean
[ExplosionAI GmbH](https://explosion.ai/legal). The term
**"you"** shall mean the person or entity identified below.
If you agree to be bound by these terms, fill in the information requested
below and include the filled-in version with your first pull request, under the
folder [`.github/contributors/`](/.github/contributors/). The name of the file
should be your GitHub username, with the extension `.md`. For example, the user
example_user would create the file `.github/contributors/example_user.md`.
Read this agreement carefully before signing. These terms and conditions
constitute a binding legal agreement.
## Contributor Agreement
1. The term "contribution" or "contributed materials" means any source code,
object code, patch, tool, sample, graphic, specification, manual,
documentation, or any other material posted or submitted by you to the project.
2. With respect to any worldwide copyrights, or copyright applications and
registrations, in your contribution:
* you hereby assign to us joint ownership, and to the extent that such
assignment is or becomes invalid, ineffective or unenforceable, you hereby
grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge,
royalty-free, unrestricted license to exercise all rights under those
copyrights. This includes, at our option, the right to sublicense these same
rights to third parties through multiple levels of sublicensees or other
licensing arrangements;
* you agree that each of us can do all things in relation to your
contribution as if each of us were the sole owners, and if one of us makes
a derivative work of your contribution, the one who makes the derivative
work (or has it made will be the sole owner of that derivative work;
* you agree that you will not assert any moral rights in your contribution
against us, our licensees or transferees;
* you agree that we may register a copyright in your contribution and
exercise all ownership rights associated with it; and
* you agree that neither of us has any duty to consult with, obtain the
consent of, pay or render an accounting to the other for any use or
distribution of your contribution.
3. With respect to any patents you own, or that you can license without payment
to any third party, you hereby grant to us a perpetual, irrevocable,
non-exclusive, worldwide, no-charge, royalty-free license to:
* make, have made, use, sell, offer to sell, import, and otherwise transfer
your contribution in whole or in part, alone or in combination with or
included in any product, work or materials arising out of the project to
which your contribution was submitted, and
* at our option, to sublicense these same rights to third parties through
multiple levels of sublicensees or other licensing arrangements.
4. Except as set out above, you keep all right, title, and interest in your
contribution. The rights that you grant to us under these terms are effective
on the date you first submitted a contribution to us, even if your submission
took place before the date you sign these terms.
5. You covenant, represent, warrant and agree that:
* Each contribution that you submit is and shall be an original work of
authorship and you can legally grant the rights set out in this SCA;
* to the best of your knowledge, each contribution will not violate any
third party's copyrights, trademarks, patents, or other intellectual
property rights; and
* each contribution shall be in compliance with U.S. export control laws and
other applicable export and import laws. You agree to notify us if you
become aware of any circumstance which would make any of the foregoing
representations inaccurate in any respect. We may publicly disclose your
participation in the project, including the fact that you have signed the SCA.
6. This SCA is governed by the laws of the State of California and applicable
U.S. Federal law. Any choice of law rules will not apply.
7. Please place an “x” on one of the applicable statement below. Please do NOT
mark both statements:
* [x] I am signing on behalf of myself as an individual and no other person
or entity, including my employer, has or will have rights with respect to my
contributions.
* [ ] I am signing on behalf of my employer or a legal entity and I have the
actual authority to contractually bind that entity.
## Contributor Details
| Field | Entry |
|------------------------------- |---------------|
| Name | Lucas Terriel |
| Company name (if applicable) | |
| Title or role (if applicable) | |
| Date | 2022-06-20 |
| GitHub username | Lucaterre |
| Website (optional) | |

View File

@ -455,6 +455,10 @@ Regression tests are tests that refer to bugs reported in specific issues. They
The test suite also provides [fixtures](https://github.com/explosion/spaCy/blob/master/spacy/tests/conftest.py) for different language tokenizers that can be used as function arguments of the same name and will be passed in automatically. Those should only be used for tests related to those specific languages. We also have [test utility functions](https://github.com/explosion/spaCy/blob/master/spacy/tests/util.py) for common operations, like creating a temporary file. The test suite also provides [fixtures](https://github.com/explosion/spaCy/blob/master/spacy/tests/conftest.py) for different language tokenizers that can be used as function arguments of the same name and will be passed in automatically. Those should only be used for tests related to those specific languages. We also have [test utility functions](https://github.com/explosion/spaCy/blob/master/spacy/tests/util.py) for common operations, like creating a temporary file.
### Testing Cython Code
If you're developing Cython code (`.pyx` files), those extensions will need to be built before the test runner can test that code - otherwise it's going to run the tests with stale code from the last time the extension was built. You can build the extensions locally with `python setup.py build_ext -i`.
### Constructing objects and state ### Constructing objects and state
Test functions usually follow the same simple structure: they set up some state, perform the operation you want to test and `assert` conditions that you expect to be true, usually before and after the operation. Test functions usually follow the same simple structure: they set up some state, perform the operation you want to test and `assert` conditions that you expect to be true, usually before and after the operation.

View File

@ -5,7 +5,7 @@ requires = [
"cymem>=2.0.2,<2.1.0", "cymem>=2.0.2,<2.1.0",
"preshed>=3.0.2,<3.1.0", "preshed>=3.0.2,<3.1.0",
"murmurhash>=0.28.0,<1.1.0", "murmurhash>=0.28.0,<1.1.0",
"thinc>=8.1.0.dev2,<8.2.0", "thinc>=8.1.0.dev3,<8.2.0",
"pathy", "pathy",
"numpy>=1.15.0", "numpy>=1.15.0",
] ]

View File

@ -3,7 +3,7 @@ spacy-legacy>=3.0.9,<3.1.0
spacy-loggers>=1.0.0,<2.0.0 spacy-loggers>=1.0.0,<2.0.0
cymem>=2.0.2,<2.1.0 cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0 preshed>=3.0.2,<3.1.0
thinc>=8.1.0.dev2,<8.2.0 thinc>=8.1.0.dev3,<8.2.0
ml_datasets>=0.2.0,<0.3.0 ml_datasets>=0.2.0,<0.3.0
murmurhash>=0.28.0,<1.1.0 murmurhash>=0.28.0,<1.1.0
wasabi>=0.9.1,<1.1.0 wasabi>=0.9.1,<1.1.0

View File

@ -38,7 +38,7 @@ setup_requires =
cymem>=2.0.2,<2.1.0 cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0 preshed>=3.0.2,<3.1.0
murmurhash>=0.28.0,<1.1.0 murmurhash>=0.28.0,<1.1.0
thinc>=8.1.0.dev2,<8.2.0 thinc>=8.1.0.dev3,<8.2.0
install_requires = install_requires =
# Our libraries # Our libraries
spacy-legacy>=3.0.9,<3.1.0 spacy-legacy>=3.0.9,<3.1.0
@ -46,7 +46,7 @@ install_requires =
murmurhash>=0.28.0,<1.1.0 murmurhash>=0.28.0,<1.1.0
cymem>=2.0.2,<2.1.0 cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0 preshed>=3.0.2,<3.1.0
thinc>=8.1.0.dev2,<8.2.0 thinc>=8.1.0.dev3,<8.2.0
wasabi>=0.9.1,<1.1.0 wasabi>=0.9.1,<1.1.0
srsly>=2.4.3,<3.0.0 srsly>=2.4.3,<3.0.0
catalogue>=2.0.6,<2.1.0 catalogue>=2.0.6,<2.1.0
@ -104,7 +104,7 @@ cuda114 =
cuda115 = cuda115 =
cupy-cuda115>=5.0.0b4,<11.0.0 cupy-cuda115>=5.0.0b4,<11.0.0
apple = apple =
thinc-apple-ops>=0.0.4,<1.0.0 thinc-apple-ops>=0.1.0.dev0,<1.0.0
# Language tokenizers with external dependencies # Language tokenizers with external dependencies
ja = ja =
sudachipy>=0.5.2,!=0.6.1 sudachipy>=0.5.2,!=0.6.1

View File

@ -1,6 +1,6 @@
# fmt: off # fmt: off
__title__ = "spacy" __title__ = "spacy"
__version__ = "3.3.0" __version__ = "3.4.0"
__download_url__ = "https://github.com/explosion/spacy-models/releases/download" __download_url__ = "https://github.com/explosion/spacy-models/releases/download"
__compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json" __compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json"
__projects__ = "https://github.com/explosion/projects" __projects__ = "https://github.com/explosion/projects"

View File

@ -462,6 +462,23 @@ def git_sparse_checkout(repo, subpath, dest, branch):
shutil.move(str(source_path), str(dest)) shutil.move(str(source_path), str(dest))
def git_repo_branch_exists(repo: str, branch: str) -> bool:
"""Uses 'git ls-remote' to check if a repository and branch exists
repo (str): URL to get repo.
branch (str): Branch on repo to check.
RETURNS (bool): True if repo:branch exists.
"""
get_git_version()
cmd = f"git ls-remote {repo} {branch}"
# We might be tempted to use `--exit-code` with `git ls-remote`, but
# `run_command` handles the `returncode` for us, so we'll rely on
# the fact that stdout returns '' if the requested branch doesn't exist
ret = run_command(cmd, capture=True)
exists = ret.stdout != ""
return exists
def get_git_version( def get_git_version(
error: str = "Could not run 'git'. Make sure it's installed and the executable is available.", error: str = "Could not run 'git'. Make sure it's installed and the executable is available.",
) -> Tuple[int, int]: ) -> Tuple[int, int]:

View File

@ -361,7 +361,7 @@ def debug_data(
if label != "-" if label != "-"
] ]
labels_with_counts = _format_labels(labels_with_counts, counts=True) labels_with_counts = _format_labels(labels_with_counts, counts=True)
msg.text(f"Labels in train data: {_format_labels(labels)}", show=verbose) msg.text(f"Labels in train data: {labels_with_counts}", show=verbose)
missing_labels = model_labels - labels missing_labels = model_labels - labels
if missing_labels: if missing_labels:
msg.warn( msg.warn(

View File

@ -7,11 +7,11 @@ import re
from ... import about from ... import about
from ...util import ensure_path from ...util import ensure_path
from .._util import project_cli, Arg, Opt, COMMAND, PROJECT_FILE from .._util import project_cli, Arg, Opt, COMMAND, PROJECT_FILE
from .._util import git_checkout, get_git_version from .._util import git_checkout, get_git_version, git_repo_branch_exists
DEFAULT_REPO = about.__projects__ DEFAULT_REPO = about.__projects__
DEFAULT_PROJECTS_BRANCH = about.__projects_branch__ DEFAULT_PROJECTS_BRANCH = about.__projects_branch__
DEFAULT_BRANCH = "master" DEFAULT_BRANCHES = ["main", "master"]
@project_cli.command("clone") @project_cli.command("clone")
@ -20,7 +20,7 @@ def project_clone_cli(
name: str = Arg(..., help="The name of the template to clone"), name: str = Arg(..., help="The name of the template to clone"),
dest: Optional[Path] = Arg(None, help="Where to clone the project. Defaults to current working directory", exists=False), dest: Optional[Path] = Arg(None, help="Where to clone the project. Defaults to current working directory", exists=False),
repo: str = Opt(DEFAULT_REPO, "--repo", "-r", help="The repository to clone from"), repo: str = Opt(DEFAULT_REPO, "--repo", "-r", help="The repository to clone from"),
branch: Optional[str] = Opt(None, "--branch", "-b", help="The branch to clone from"), branch: Optional[str] = Opt(None, "--branch", "-b", help=f"The branch to clone from. If not provided, will attempt {', '.join(DEFAULT_BRANCHES)}"),
sparse_checkout: bool = Opt(False, "--sparse", "-S", help="Use sparse Git checkout to only check out and clone the files needed. Requires Git v22.2+.") sparse_checkout: bool = Opt(False, "--sparse", "-S", help="Use sparse Git checkout to only check out and clone the files needed. Requires Git v22.2+.")
# fmt: on # fmt: on
): ):
@ -33,9 +33,25 @@ def project_clone_cli(
""" """
if dest is None: if dest is None:
dest = Path.cwd() / Path(name).parts[-1] dest = Path.cwd() / Path(name).parts[-1]
if repo == DEFAULT_REPO and branch is None:
branch = DEFAULT_PROJECTS_BRANCH
if branch is None: if branch is None:
# If it's a user repo, we want to default to other branch for default_branch in DEFAULT_BRANCHES:
branch = DEFAULT_PROJECTS_BRANCH if repo == DEFAULT_REPO else DEFAULT_BRANCH if git_repo_branch_exists(repo, default_branch):
branch = default_branch
break
if branch is None:
default_branches_msg = ", ".join(f"'{b}'" for b in DEFAULT_BRANCHES)
msg.fail(
"No branch provided and attempted default "
f"branches {default_branches_msg} do not exist.",
exits=1,
)
else:
if not git_repo_branch_exists(repo, branch):
msg.fail(f"repo: {repo} (branch: {branch}) does not exist.", exits=1)
assert isinstance(branch, str)
project_clone(name, dest, repo=repo, branch=branch, sparse_checkout=sparse_checkout) project_clone(name, dest, repo=repo, branch=branch, sparse_checkout=sparse_checkout)
@ -61,9 +77,9 @@ def project_clone(
try: try:
git_checkout(repo, name, dest, branch=branch, sparse=sparse_checkout) git_checkout(repo, name, dest, branch=branch, sparse=sparse_checkout)
except subprocess.CalledProcessError: except subprocess.CalledProcessError:
err = f"Could not clone '{name}' from repo '{repo_name}'" err = f"Could not clone '{name}' from repo '{repo_name}' (branch '{branch}')"
msg.fail(err, exits=1) msg.fail(err, exits=1)
msg.good(f"Cloned '{name}' from {repo_name}", project_dir) msg.good(f"Cloned '{name}' from '{repo_name}' (branch '{branch}')", project_dir)
if not (project_dir / PROJECT_FILE).exists(): if not (project_dir / PROJECT_FILE).exists():
msg.warn(f"No {PROJECT_FILE} found in directory") msg.warn(f"No {PROJECT_FILE} found in directory")
else: else:

View File

@ -64,8 +64,11 @@ class SpanRenderer:
# Set up how the text and labels will be rendered # Set up how the text and labels will be rendered
self.direction = DEFAULT_DIR self.direction = DEFAULT_DIR
self.lang = DEFAULT_LANG self.lang = DEFAULT_LANG
# These values are in px
self.top_offset = options.get("top_offset", 40) self.top_offset = options.get("top_offset", 40)
self.top_offset_step = options.get("top_offset_step", 17) # This is how far under the top offset the span labels appear
self.span_label_offset = options.get("span_label_offset", 20)
self.offset_step = options.get("top_offset_step", 17)
# Set up which templates will be used # Set up which templates will be used
template = options.get("template") template = options.get("template")
@ -161,8 +164,16 @@ class SpanRenderer:
if entities: if entities:
slices = self._get_span_slices(token["entities"]) slices = self._get_span_slices(token["entities"])
starts = self._get_span_starts(token["entities"]) starts = self._get_span_starts(token["entities"])
total_height = (
self.top_offset
+ self.span_label_offset
+ (self.offset_step * (len(entities) - 1))
)
markup += self.span_template.format( markup += self.span_template.format(
text=token["text"], span_slices=slices, span_starts=starts text=token["text"],
span_slices=slices,
span_starts=starts,
total_height=total_height,
) )
else: else:
markup += escape_html(token["text"] + " ") markup += escape_html(token["text"] + " ")
@ -171,7 +182,7 @@ class SpanRenderer:
def _get_span_slices(self, entities: List[Dict]) -> str: def _get_span_slices(self, entities: List[Dict]) -> str:
"""Get the rendered markup of all Span slices""" """Get the rendered markup of all Span slices"""
span_slices = [] span_slices = []
for entity, step in zip(entities, itertools.count(step=self.top_offset_step)): for entity, step in zip(entities, itertools.count(step=self.offset_step)):
color = self.colors.get(entity["label"].upper(), self.default_color) color = self.colors.get(entity["label"].upper(), self.default_color)
span_slice = self.span_slice_template.format( span_slice = self.span_slice_template.format(
bg=color, top_offset=self.top_offset + step bg=color, top_offset=self.top_offset + step
@ -182,7 +193,7 @@ class SpanRenderer:
def _get_span_starts(self, entities: List[Dict]) -> str: def _get_span_starts(self, entities: List[Dict]) -> str:
"""Get the rendered markup of all Span start tokens""" """Get the rendered markup of all Span start tokens"""
span_starts = [] span_starts = []
for entity, step in zip(entities, itertools.count(step=self.top_offset_step)): for entity, step in zip(entities, itertools.count(step=self.offset_step)):
color = self.colors.get(entity["label"].upper(), self.default_color) color = self.colors.get(entity["label"].upper(), self.default_color)
span_start = ( span_start = (
self.span_start_template.format( self.span_start_template.format(

View File

@ -67,7 +67,7 @@ TPL_SPANS = """
""" """
TPL_SPAN = """ TPL_SPAN = """
<span style="font-weight: bold; display: inline-block; position: relative;"> <span style="font-weight: bold; display: inline-block; position: relative; height: {total_height}px;">
{text} {text}
{span_slices} {span_slices}
{span_starts} {span_starts}

View File

@ -2,7 +2,8 @@ from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .lex_attrs import LEX_ATTRS from .lex_attrs import LEX_ATTRS
from ..tokenizer_exceptions import BASE_EXCEPTIONS from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ..punctuation import COMBINING_DIACRITICS_TOKENIZER_INFIXES
from ..punctuation import COMBINING_DIACRITICS_TOKENIZER_SUFFIXES
from ...language import Language, BaseDefaults from ...language import Language, BaseDefaults
from ...attrs import LANG from ...attrs import LANG
from ...util import update_exc from ...util import update_exc
@ -16,6 +17,8 @@ class BulgarianDefaults(BaseDefaults):
stop_words = STOP_WORDS stop_words = STOP_WORDS
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS) tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
suffixes = COMBINING_DIACRITICS_TOKENIZER_SUFFIXES
infixes = COMBINING_DIACRITICS_TOKENIZER_INFIXES
class Bulgarian(Language): class Bulgarian(Language):

View File

@ -258,6 +258,10 @@ ALPHA = group_chars(
ALPHA_LOWER = group_chars(_lower + _uncased) ALPHA_LOWER = group_chars(_lower + _uncased)
ALPHA_UPPER = group_chars(_upper + _uncased) ALPHA_UPPER = group_chars(_upper + _uncased)
_combining_diacritics = r"\u0300-\u036f"
COMBINING_DIACRITICS = _combining_diacritics
_units = ( _units = (
"km km² km³ m m² m³ dm dm² dm³ cm cm² cm³ mm mm² mm³ ha µm nm yd in ft " "km km² km³ m m² m³ dm dm² dm³ cm cm² cm³ mm mm² mm³ ha µm nm yd in ft "
"kg g mg µg t lb oz m/s km/h kmh mph hPa Pa mbar mb MB kb KB gb GB tb " "kg g mg µg t lb oz m/s km/h kmh mph hPa Pa mbar mb MB kb KB gb GB tb "

View File

@ -1,5 +1,5 @@
from .char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, LIST_CURRENCY from .char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, LIST_CURRENCY
from .char_classes import LIST_ICONS, HYPHENS, CURRENCY, UNITS from .char_classes import LIST_ICONS, HYPHENS, CURRENCY, UNITS, COMBINING_DIACRITICS
from .char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA, PUNCT from .char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA, PUNCT
@ -44,3 +44,23 @@ TOKENIZER_INFIXES = (
r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA), r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
] ]
) )
# Some languages e.g. written with the Cyrillic alphabet permit the use of diacritics
# to mark stressed syllables in words where stress is distinctive. Such languages
# should use the COMBINING_DIACRITICS... suffix and infix regex lists in
# place of the standard ones.
COMBINING_DIACRITICS_TOKENIZER_SUFFIXES = list(TOKENIZER_SUFFIXES) + [
r"(?<=[{a}][{d}])\.".format(a=ALPHA, d=COMBINING_DIACRITICS),
]
COMBINING_DIACRITICS_TOKENIZER_INFIXES = list(TOKENIZER_INFIXES) + [
r"(?<=[{al}][{d}])\.(?=[{au}{q}])".format(
al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES, d=COMBINING_DIACRITICS
),
r"(?<=[{a}][{d}]),(?=[{a}])".format(a=ALPHA, d=COMBINING_DIACRITICS),
r"(?<=[{a}][{d}])(?:{h})(?=[{a}])".format(
a=ALPHA, d=COMBINING_DIACRITICS, h=HYPHENS
),
r"(?<=[{a}][{d}])[:<>=/](?=[{a}])".format(a=ALPHA, d=COMBINING_DIACRITICS),
]

View File

@ -5,6 +5,8 @@ from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .lex_attrs import LEX_ATTRS from .lex_attrs import LEX_ATTRS
from .lemmatizer import RussianLemmatizer from .lemmatizer import RussianLemmatizer
from ..punctuation import COMBINING_DIACRITICS_TOKENIZER_INFIXES
from ..punctuation import COMBINING_DIACRITICS_TOKENIZER_SUFFIXES
from ...language import Language, BaseDefaults from ...language import Language, BaseDefaults
@ -12,6 +14,8 @@ class RussianDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS tokenizer_exceptions = TOKENIZER_EXCEPTIONS
lex_attr_getters = LEX_ATTRS lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS stop_words = STOP_WORDS
suffixes = COMBINING_DIACRITICS_TOKENIZER_SUFFIXES
infixes = COMBINING_DIACRITICS_TOKENIZER_INFIXES
class Russian(Language): class Russian(Language):

View File

@ -6,6 +6,8 @@ from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .stop_words import STOP_WORDS from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS from .lex_attrs import LEX_ATTRS
from .lemmatizer import UkrainianLemmatizer from .lemmatizer import UkrainianLemmatizer
from ..punctuation import COMBINING_DIACRITICS_TOKENIZER_INFIXES
from ..punctuation import COMBINING_DIACRITICS_TOKENIZER_SUFFIXES
from ...language import Language, BaseDefaults from ...language import Language, BaseDefaults
@ -13,6 +15,8 @@ class UkrainianDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS tokenizer_exceptions = TOKENIZER_EXCEPTIONS
lex_attr_getters = LEX_ATTRS lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS stop_words = STOP_WORDS
suffixes = COMBINING_DIACRITICS_TOKENIZER_SUFFIXES
infixes = COMBINING_DIACRITICS_TOKENIZER_INFIXES
class Ukrainian(Language): class Ukrainian(Language):

View File

@ -0,0 +1,8 @@
import pytest
def test_bg_tokenizer_handles_final_diacritics(bg_tokenizer):
text = "Ня̀маше яйца̀. Ня̀маше яйца̀."
tokens = bg_tokenizer(text)
assert tokens[1].text == "яйца̀"
assert tokens[2].text == "."

View File

@ -1,3 +1,4 @@
from string import punctuation
import pytest import pytest
@ -122,3 +123,36 @@ def test_ru_tokenizer_splits_bracket_period(ru_tokenizer):
text = "(Раз, два, три, проверка)." text = "(Раз, два, три, проверка)."
tokens = ru_tokenizer(text) tokens = ru_tokenizer(text)
assert tokens[len(tokens) - 1].text == "." assert tokens[len(tokens) - 1].text == "."
@pytest.mark.parametrize(
"text",
[
"рекоменду́я подда́ть жару́. Самого́ Баргамота",
"РЕКОМЕНДУ́Я ПОДДА́ТЬ ЖАРУ́. САМОГО́ БАРГАМОТА",
"рекоменду̍я подда̍ть жару̍.Самого̍ Баргамота",
"рекоменду̍я подда̍ть жару̍.'Самого̍ Баргамота",
"рекоменду̍я подда̍ть жару̍,самого̍ Баргамота",
"рекоменду̍я подда̍ть жару̍:самого̍ Баргамота",
"рекоменду̍я подда̍ть жару̍. самого̍ Баргамота",
"рекоменду̍я подда̍ть жару̍, самого̍ Баргамота",
"рекоменду̍я подда̍ть жару̍: самого̍ Баргамота",
"рекоменду̍я подда̍ть жару̍-самого̍ Баргамота",
],
)
def test_ru_tokenizer_handles_final_diacritics(ru_tokenizer, text):
tokens = ru_tokenizer(text)
assert tokens[2].text in ("жару́", "ЖАРУ́", "жару̍")
assert tokens[3].text in punctuation
@pytest.mark.parametrize(
"text",
[
"РЕКОМЕНДУ́Я ПОДДА́ТЬ ЖАРУ́.САМОГО́ БАРГАМОТА",
"рекоменду̍я подда̍ть жару́.самого́ Баргамота",
],
)
def test_ru_tokenizer_handles_final_diacritic_and_period(ru_tokenizer, text):
tokens = ru_tokenizer(text)
assert tokens[2].text.lower() == "жару́.самого́"

View File

@ -140,3 +140,10 @@ def test_uk_tokenizer_splits_bracket_period(uk_tokenizer):
text = "(Раз, два, три, проверка)." text = "(Раз, два, три, проверка)."
tokens = uk_tokenizer(text) tokens = uk_tokenizer(text)
assert tokens[len(tokens) - 1].text == "." assert tokens[len(tokens) - 1].text == "."
def test_uk_tokenizer_handles_final_diacritics(uk_tokenizer):
text = "Хлібі́в не було́. Хлібі́в не було́."
tokens = uk_tokenizer(text)
assert tokens[2].text == "було́"
assert tokens[3].text == "."

View File

@ -158,13 +158,18 @@ def test_issue3209():
def test_labels_from_BILUO(): def test_labels_from_BILUO():
"""Test that labels are inferred correctly when there's a - in label. """Test that labels are inferred correctly when there's a - in label."""
"""
nlp = English() nlp = English()
ner = nlp.add_pipe("ner") ner = nlp.add_pipe("ner")
ner.add_label("LARGE-ANIMAL") ner.add_label("LARGE-ANIMAL")
nlp.initialize() nlp.initialize()
move_names = ["O", "B-LARGE-ANIMAL", "I-LARGE-ANIMAL", "L-LARGE-ANIMAL", "U-LARGE-ANIMAL"] move_names = [
"O",
"B-LARGE-ANIMAL",
"I-LARGE-ANIMAL",
"L-LARGE-ANIMAL",
"U-LARGE-ANIMAL",
]
labels = {"LARGE-ANIMAL"} labels = {"LARGE-ANIMAL"}
assert ner.move_names == move_names assert ner.move_names == move_names
assert set(ner.labels) == labels assert set(ner.labels) == labels

View File

@ -589,6 +589,7 @@ def test_string_to_list_intify(value):
assert string_to_list(value, intify=True) == [1, 2, 3] assert string_to_list(value, intify=True) == [1, 2, 3]
@pytest.mark.skip(reason="Temporarily skip for dev version")
def test_download_compatibility(): def test_download_compatibility():
spec = SpecifierSet("==" + about.__version__) spec = SpecifierSet("==" + about.__version__)
spec.prereleases = False spec.prereleases = False
@ -599,6 +600,7 @@ def test_download_compatibility():
assert get_minor_version(about.__version__) == get_minor_version(version) assert get_minor_version(about.__version__) == get_minor_version(version)
@pytest.mark.skip(reason="Temporarily skip for dev version")
def test_validate_compatibility_table(): def test_validate_compatibility_table():
spec = SpecifierSet("==" + about.__version__) spec = SpecifierSet("==" + about.__version__)
spec.prereleases = False spec.prereleases = False

View File

@ -60,11 +60,12 @@ def test_readers():
assert isinstance(extra_corpus, Callable) assert isinstance(extra_corpus, Callable)
# TODO: enable IMDB test once Stanford servers are back up and running
@pytest.mark.slow @pytest.mark.slow
@pytest.mark.parametrize( @pytest.mark.parametrize(
"reader,additional_config", "reader,additional_config",
[ [
("ml_datasets.imdb_sentiment.v1", {"train_limit": 10, "dev_limit": 10}), # ("ml_datasets.imdb_sentiment.v1", {"train_limit": 10, "dev_limit": 10}),
("ml_datasets.dbpedia.v1", {"train_limit": 10, "dev_limit": 10}), ("ml_datasets.dbpedia.v1", {"train_limit": 10, "dev_limit": 10}),
("ml_datasets.cmu_movies.v1", {"limit": 10, "freq_cutoff": 200, "split": 0.8}), ("ml_datasets.cmu_movies.v1", {"limit": 10, "freq_cutoff": 200, "split": 0.8}),
], ],

View File

@ -679,6 +679,31 @@ def test_projectivize(en_tokenizer):
assert proj_heads == [3, 2, 3, 3, 3] assert proj_heads == [3, 2, 3, 3, 3]
assert nonproj_heads == [3, 2, 3, 3, 2] assert nonproj_heads == [3, 2, 3, 3, 2]
# Test single token documents
doc = en_tokenizer("Conrail")
heads = [0]
deps = ["dep"]
example = Example.from_dict(doc, {"heads": heads, "deps": deps})
proj_heads, proj_labels = example.get_aligned_parse(projectivize=True)
assert proj_heads == heads
assert proj_labels == deps
# Test documents with no alignments
doc_a = Doc(
doc.vocab, words=["Double-Jointed"], spaces=[False], deps=["ROOT"], heads=[0]
)
doc_b = Doc(
doc.vocab,
words=["Double", "-", "Jointed"],
spaces=[True, True, True],
deps=["amod", "punct", "ROOT"],
heads=[2, 2, 2],
)
example = Example(doc_a, doc_b)
proj_heads, proj_deps = example.get_aligned_parse(projectivize=True)
assert proj_heads == [None]
assert proj_deps == [None]
def test_iob_to_biluo(): def test_iob_to_biluo():
good_iob = ["O", "O", "B-LOC", "I-LOC", "O", "B-PERSON"] good_iob = ["O", "O", "B-LOC", "I-LOC", "O", "B-PERSON"]

View File

@ -1,6 +1,7 @@
import pytest import pytest
import numpy import numpy
from spacy.tokens import Doc from spacy.tokens import Doc
from spacy.vocab import Vocab
from ..util import get_cosine, add_vecs_to_vocab from ..util import get_cosine, add_vecs_to_vocab
@ -71,19 +72,17 @@ def test_vectors_similarity_DD(vocab, vectors):
def test_vectors_similarity_TD(vocab, vectors): def test_vectors_similarity_TD(vocab, vectors):
[(word1, vec1), (word2, vec2)] = vectors [(word1, vec1), (word2, vec2)] = vectors
doc = Doc(vocab, words=[word1, word2]) doc = Doc(vocab, words=[word1, word2])
with pytest.warns(UserWarning): assert isinstance(doc.similarity(doc[0]), float)
assert isinstance(doc.similarity(doc[0]), float) assert isinstance(doc[0].similarity(doc), float)
assert isinstance(doc[0].similarity(doc), float) assert doc.similarity(doc[0]) == doc[0].similarity(doc)
assert doc.similarity(doc[0]) == doc[0].similarity(doc)
def test_vectors_similarity_TS(vocab, vectors): def test_vectors_similarity_TS(vocab, vectors):
[(word1, vec1), (word2, vec2)] = vectors [(word1, vec1), (word2, vec2)] = vectors
doc = Doc(vocab, words=[word1, word2]) doc = Doc(vocab, words=[word1, word2])
with pytest.warns(UserWarning): assert isinstance(doc[:2].similarity(doc[0]), float)
assert isinstance(doc[:2].similarity(doc[0]), float) assert isinstance(doc[0].similarity(doc[:2]), float)
assert isinstance(doc[0].similarity(doc[-2]), float) assert doc[:2].similarity(doc[0]) == doc[0].similarity(doc[:2])
assert doc[:2].similarity(doc[0]) == doc[0].similarity(doc[:2])
def test_vectors_similarity_DS(vocab, vectors): def test_vectors_similarity_DS(vocab, vectors):
@ -91,3 +90,21 @@ def test_vectors_similarity_DS(vocab, vectors):
doc = Doc(vocab, words=[word1, word2]) doc = Doc(vocab, words=[word1, word2])
assert isinstance(doc.similarity(doc[:2]), float) assert isinstance(doc.similarity(doc[:2]), float)
assert doc.similarity(doc[:2]) == doc[:2].similarity(doc) assert doc.similarity(doc[:2]) == doc[:2].similarity(doc)
def test_vectors_similarity_no_vectors():
vocab = Vocab()
doc1 = Doc(vocab, words=["a", "b"])
doc2 = Doc(vocab, words=["c", "d", "e"])
with pytest.warns(UserWarning):
doc1.similarity(doc2)
with pytest.warns(UserWarning):
doc1.similarity(doc2[1])
with pytest.warns(UserWarning):
doc1.similarity(doc2[:2])
with pytest.warns(UserWarning):
doc2.similarity(doc1)
with pytest.warns(UserWarning):
doc2[1].similarity(doc1)
with pytest.warns(UserWarning):
doc2[:2].similarity(doc1)

View File

@ -318,17 +318,15 @@ def test_vectors_lexeme_doc_similarity(vocab, text):
@pytest.mark.parametrize("text", [["apple", "orange", "juice"]]) @pytest.mark.parametrize("text", [["apple", "orange", "juice"]])
def test_vectors_span_span_similarity(vocab, text): def test_vectors_span_span_similarity(vocab, text):
doc = Doc(vocab, words=text) doc = Doc(vocab, words=text)
with pytest.warns(UserWarning): assert doc[0:2].similarity(doc[1:3]) == doc[1:3].similarity(doc[0:2])
assert doc[0:2].similarity(doc[1:3]) == doc[1:3].similarity(doc[0:2]) assert -1.0 < doc[0:2].similarity(doc[1:3]) < 1.0
assert -1.0 < doc[0:2].similarity(doc[1:3]) < 1.0
@pytest.mark.parametrize("text", [["apple", "orange", "juice"]]) @pytest.mark.parametrize("text", [["apple", "orange", "juice"]])
def test_vectors_span_doc_similarity(vocab, text): def test_vectors_span_doc_similarity(vocab, text):
doc = Doc(vocab, words=text) doc = Doc(vocab, words=text)
with pytest.warns(UserWarning): assert doc[0:2].similarity(doc) == doc.similarity(doc[0:2])
assert doc[0:2].similarity(doc) == doc.similarity(doc[0:2]) assert -1.0 < doc[0:2].similarity(doc) < 1.0
assert -1.0 < doc[0:2].similarity(doc) < 1.0
@pytest.mark.parametrize( @pytest.mark.parametrize(

View File

@ -607,7 +607,8 @@ cdef class Doc:
if self.vocab.vectors.n_keys == 0: if self.vocab.vectors.n_keys == 0:
warnings.warn(Warnings.W007.format(obj="Doc")) warnings.warn(Warnings.W007.format(obj="Doc"))
if self.vector_norm == 0 or other.vector_norm == 0: if self.vector_norm == 0 or other.vector_norm == 0:
warnings.warn(Warnings.W008.format(obj="Doc")) if not self.has_vector or not other.has_vector:
warnings.warn(Warnings.W008.format(obj="Doc"))
return 0.0 return 0.0
vector = self.vector vector = self.vector
xp = get_array_module(vector) xp = get_array_module(vector)
@ -627,7 +628,7 @@ cdef class Doc:
if "has_vector" in self.user_hooks: if "has_vector" in self.user_hooks:
return self.user_hooks["has_vector"](self) return self.user_hooks["has_vector"](self)
elif self.vocab.vectors.size: elif self.vocab.vectors.size:
return True return any(token.has_vector for token in self)
elif self.tensor.size: elif self.tensor.size:
return True return True
else: else:

View File

@ -354,7 +354,8 @@ cdef class Span:
if self.vocab.vectors.n_keys == 0: if self.vocab.vectors.n_keys == 0:
warnings.warn(Warnings.W007.format(obj="Span")) warnings.warn(Warnings.W007.format(obj="Span"))
if self.vector_norm == 0.0 or other.vector_norm == 0.0: if self.vector_norm == 0.0 or other.vector_norm == 0.0:
warnings.warn(Warnings.W008.format(obj="Span")) if not self.has_vector or not other.has_vector:
warnings.warn(Warnings.W008.format(obj="Span"))
return 0.0 return 0.0
vector = self.vector vector = self.vector
xp = get_array_module(vector) xp = get_array_module(vector)

View File

@ -206,7 +206,8 @@ cdef class Token:
if self.vocab.vectors.n_keys == 0: if self.vocab.vectors.n_keys == 0:
warnings.warn(Warnings.W007.format(obj="Token")) warnings.warn(Warnings.W007.format(obj="Token"))
if self.vector_norm == 0 or other.vector_norm == 0: if self.vector_norm == 0 or other.vector_norm == 0:
warnings.warn(Warnings.W008.format(obj="Token")) if not self.has_vector or not other.has_vector:
warnings.warn(Warnings.W008.format(obj="Token"))
return 0.0 return 0.0
vector = self.vector vector = self.vector
xp = get_array_module(vector) xp = get_array_module(vector)

View File

@ -1,33 +1,39 @@
from typing import List from typing import List
from ..errors import Errors from ..errors import Errors
import numpy import numpy
from libc.stdint cimport int32_t
cdef class AlignmentArray: cdef class AlignmentArray:
"""AlignmentArray is similar to Thinc's Ragged with two simplfications: """AlignmentArray is similar to Thinc's Ragged with two simplfications:
indexing returns numpy arrays and this type can only be used for CPU arrays. indexing returns numpy arrays and this type can only be used for CPU arrays.
However, these changes make AlginmentArray more efficient for indexing in a However, these changes make AlignmentArray more efficient for indexing in a
tight loop.""" tight loop."""
__slots__ = [] __slots__ = []
def __init__(self, alignment: List[List[int]]): def __init__(self, alignment: List[List[int]]):
self._lengths = None
self._starts_ends = numpy.zeros(len(alignment) + 1, dtype="i")
cdef int data_len = 0 cdef int data_len = 0
cdef int outer_len cdef int outer_len
cdef int idx cdef int idx
self._starts_ends = numpy.zeros(len(alignment) + 1, dtype='int32')
cdef int32_t* starts_ends_ptr = <int32_t*>self._starts_ends.data
for idx, outer in enumerate(alignment): for idx, outer in enumerate(alignment):
outer_len = len(outer) outer_len = len(outer)
self._starts_ends[idx + 1] = self._starts_ends[idx] + outer_len starts_ends_ptr[idx + 1] = starts_ends_ptr[idx] + outer_len
data_len += outer_len data_len += outer_len
self._data = numpy.empty(data_len, dtype="i") self._lengths = None
self._data = numpy.empty(data_len, dtype="int32")
idx = 0 idx = 0
cdef int32_t* data_ptr = <int32_t*>self._data.data
for outer in alignment: for outer in alignment:
for inner in outer: for inner in outer:
self._data[idx] = inner data_ptr[idx] = inner
idx += 1 idx += 1
def __getitem__(self, idx): def __getitem__(self, idx):

View File

@ -13,7 +13,7 @@ from .iob_utils import biluo_tags_to_spans, remove_bilu_prefix
from ..errors import Errors, Warnings from ..errors import Errors, Warnings
from ..pipeline._parser_internals import nonproj from ..pipeline._parser_internals import nonproj
from ..tokens.token cimport MISSING_DEP from ..tokens.token cimport MISSING_DEP
from ..util import logger, to_ternary_int from ..util import logger, to_ternary_int, all_equal
cpdef Doc annotations_to_doc(vocab, tok_annot, doc_annot): cpdef Doc annotations_to_doc(vocab, tok_annot, doc_annot):
@ -151,50 +151,127 @@ cdef class Example:
self._y_sig = y_sig self._y_sig = y_sig
return self._cached_alignment return self._cached_alignment
def _get_aligned_vectorized(self, align, gold_values):
# Fast path for Doc attributes/fields that are predominantly a single value,
# i.e., TAG, POS, MORPH.
x2y_single_toks = []
x2y_single_toks_i = []
x2y_multiple_toks = []
x2y_multiple_toks_i = []
# Gather indices of gold tokens aligned to the candidate tokens into two buckets.
# Bucket 1: All tokens that have a one-to-one alignment.
# Bucket 2: All tokens that have a one-to-many alignment.
for idx, token in enumerate(self.predicted):
aligned_gold_i = align[token.i]
aligned_gold_len = len(aligned_gold_i)
if aligned_gold_len == 1:
x2y_single_toks.append(aligned_gold_i.item())
x2y_single_toks_i.append(idx)
elif aligned_gold_len > 1:
x2y_multiple_toks.append(aligned_gold_i)
x2y_multiple_toks_i.append(idx)
# Map elements of the first bucket directly to the output array.
output = numpy.full(len(self.predicted), None)
output[x2y_single_toks_i] = gold_values[x2y_single_toks].squeeze()
# Collapse many-to-one alignments into one-to-one alignments if they
# share the same value. Map to None in all other cases.
for i in range(len(x2y_multiple_toks)):
aligned_gold_values = gold_values[x2y_multiple_toks[i]]
# If all aligned tokens have the same value, use it.
if all_equal(aligned_gold_values):
x2y_multiple_toks[i] = aligned_gold_values[0].item()
else:
x2y_multiple_toks[i] = None
output[x2y_multiple_toks_i] = x2y_multiple_toks
return output.tolist()
def _get_aligned_non_vectorized(self, align, gold_values):
# Slower path for fields that return multiple values (resulting
# in ragged arrays that cannot be vectorized trivially).
output = [None] * len(self.predicted)
for token in self.predicted:
aligned_gold_i = align[token.i]
values = gold_values[aligned_gold_i].ravel()
if len(values) == 1:
output[token.i] = values.item()
elif all_equal(values):
# If all aligned tokens have the same value, use it.
output[token.i] = values[0].item()
return output
def get_aligned(self, field, as_string=False): def get_aligned(self, field, as_string=False):
"""Return an aligned array for a token attribute.""" """Return an aligned array for a token attribute."""
align = self.alignment.x2y align = self.alignment.x2y
gold_values = self.reference.to_array([field])
if len(gold_values.shape) == 1:
output = self._get_aligned_vectorized(align, gold_values)
else:
output = self._get_aligned_non_vectorized(align, gold_values)
vocab = self.reference.vocab vocab = self.reference.vocab
gold_values = self.reference.to_array([field])
output = [None] * len(self.predicted)
for token in self.predicted:
values = gold_values[align[token.i]]
values = values.ravel()
if len(values) == 0:
output[token.i] = None
elif len(values) == 1:
output[token.i] = values[0]
elif len(set(list(values))) == 1:
# If all aligned tokens have the same value, use it.
output[token.i] = values[0]
else:
output[token.i] = None
if as_string and field not in ["ENT_IOB", "SENT_START"]: if as_string and field not in ["ENT_IOB", "SENT_START"]:
output = [vocab.strings[o] if o is not None else o for o in output] output = [vocab.strings[o] if o is not None else o for o in output]
return output return output
def get_aligned_parse(self, projectivize=True): def get_aligned_parse(self, projectivize=True):
cand_to_gold = self.alignment.x2y cand_to_gold = self.alignment.x2y
gold_to_cand = self.alignment.y2x gold_to_cand = self.alignment.y2x
aligned_heads = [None] * self.x.length
aligned_deps = [None] * self.x.length
has_deps = [token.has_dep() for token in self.y]
has_heads = [token.has_head() for token in self.y]
heads = [token.head.i for token in self.y] heads = [token.head.i for token in self.y]
deps = [token.dep_ for token in self.y] deps = [token.dep_ for token in self.y]
if projectivize: if projectivize:
proj_heads, proj_deps = nonproj.projectivize(heads, deps) proj_heads, proj_deps = nonproj.projectivize(heads, deps)
has_deps = [token.has_dep() for token in self.y]
has_heads = [token.has_head() for token in self.y]
# ensure that missing data remains missing # ensure that missing data remains missing
heads = [h if has_heads[i] else heads[i] for i, h in enumerate(proj_heads)] heads = [h if has_heads[i] else heads[i] for i, h in enumerate(proj_heads)]
deps = [d if has_deps[i] else deps[i] for i, d in enumerate(proj_deps)] deps = [d if has_deps[i] else deps[i] for i, d in enumerate(proj_deps)]
for cand_i in range(self.x.length):
if cand_to_gold.lengths[cand_i] == 1: # Select all candidate tokens that are aligned to a single gold token.
gold_i = cand_to_gold[cand_i][0] c2g_single_toks = numpy.where(cand_to_gold.lengths == 1)[0]
if gold_to_cand.lengths[heads[gold_i]] == 1:
aligned_heads[cand_i] = int(gold_to_cand[heads[gold_i]][0]) # Fetch all aligned gold token incides.
aligned_deps[cand_i] = deps[gold_i] if c2g_single_toks.shape == cand_to_gold.lengths.shape:
return aligned_heads, aligned_deps # This the most likely case.
gold_i = cand_to_gold[:]
else:
gold_i = numpy.vectorize(lambda x: cand_to_gold[int(x)][0], otypes='i')(c2g_single_toks)
# Fetch indices of all gold heads for the aligned gold tokens.
heads = numpy.asarray(heads, dtype='i')
gold_head_i = heads[gold_i]
# Select all gold tokens that are heads of the previously selected
# gold tokens (and are aligned to a single candidate token).
g2c_len_heads = gold_to_cand.lengths[gold_head_i]
g2c_len_heads = numpy.where(g2c_len_heads == 1)[0]
g2c_i = numpy.vectorize(lambda x: gold_to_cand[int(x)][0], otypes='i')(gold_head_i[g2c_len_heads]).squeeze()
# Update head/dep alignments with the above.
aligned_heads = numpy.full((self.x.length), None)
aligned_heads[c2g_single_toks[g2c_len_heads]] = g2c_i
deps = numpy.asarray(deps)
aligned_deps = numpy.full((self.x.length), None)
aligned_deps[c2g_single_toks] = deps[gold_i]
return aligned_heads.tolist(), aligned_deps.tolist()
def get_aligned_sent_starts(self): def get_aligned_sent_starts(self):
"""Get list of SENT_START attributes aligned to the predicted tokenization. """Get list of SENT_START attributes aligned to the predicted tokenization.

View File

@ -1716,3 +1716,10 @@ def packages_distributions() -> Dict[str, List[str]]:
for pkg in (dist.read_text("top_level.txt") or "").split(): for pkg in (dist.read_text("top_level.txt") or "").split():
pkg_to_dist[pkg].append(dist.metadata["Name"]) pkg_to_dist[pkg].append(dist.metadata["Name"])
return dict(pkg_to_dist) return dict(pkg_to_dist)
def all_equal(iterable):
"""Return True if all the elements are equal to each other
(or if the input is an empty sequence), False otherwise."""
g = itertools.groupby(iterable)
return next(g, True) and not next(g, False)

View File

@ -336,10 +336,10 @@ cdef class Vectors:
xp = get_array_module(self.data) xp = get_array_module(self.data)
if key is not None: if key is not None:
key = get_string_id(key) key = get_string_id(key)
return self.key2row.get(key, -1) return self.key2row.get(int(key), -1)
elif keys is not None: elif keys is not None:
keys = [get_string_id(key) for key in keys] keys = [get_string_id(key) for key in keys]
rows = [self.key2row.get(key, -1) for key in keys] rows = [self.key2row.get(int(key), -1) for key in keys]
return xp.asarray(rows, dtype="i") return xp.asarray(rows, dtype="i")
else: else:
row2key = {row: key for key, row in self.key2row.items()} row2key = {row: key for key, row in self.key2row.items()}

View File

@ -587,7 +587,7 @@ consists of either two or three subnetworks:
run once for each batch. run once for each batch.
- **lower**: Construct a feature-specific vector for each `(token, feature)` - **lower**: Construct a feature-specific vector for each `(token, feature)`
pair. This is also run once for each batch. Constructing the state pair. This is also run once for each batch. Constructing the state
representation is then simply a matter of summing the component features and representation is then a matter of summing the component features and
applying the non-linearity. applying the non-linearity.
- **upper** (optional): A feed-forward network that predicts scores from the - **upper** (optional): A feed-forward network that predicts scores from the
state representation. If not present, the output from the lower model is used state representation. If not present, the output from the lower model is used
@ -628,7 +628,7 @@ same signature, but the `use_upper` argument was `True` by default.
> ``` > ```
Build a tagger model, using a provided token-to-vector component. The tagger Build a tagger model, using a provided token-to-vector component. The tagger
model simply adds a linear layer with softmax activation to predict scores given model adds a linear layer with softmax activation to predict scores given
the token vectors. the token vectors.
| Name | Description | | Name | Description |
@ -920,5 +920,5 @@ A function that reads an existing `KnowledgeBase` from file.
A function that takes as input a [`KnowledgeBase`](/api/kb) and a A function that takes as input a [`KnowledgeBase`](/api/kb) and a
[`Span`](/api/span) object denoting a named entity, and returns a list of [`Span`](/api/span) object denoting a named entity, and returns a list of
plausible [`Candidate`](/api/kb/#candidate) objects. The default plausible [`Candidate`](/api/kb/#candidate) objects. The default
`CandidateGenerator` simply uses the text of a mention to find its potential `CandidateGenerator` uses the text of a mention to find its potential
aliases in the `KnowledgeBase`. Note that this function is case-dependent. aliases in the `KnowledgeBase`. Note that this function is case-dependent.

View File

@ -0,0 +1,78 @@
---
title: Attributes
teaser: Token attributes
source: spacy/attrs.pyx
---
[Token](/api/token) attributes are specified using internal IDs in many places
including:
- [`Matcher` patterns](/api/matcher#patterns),
- [`Doc.to_array`](/api/doc#to_array) and
[`Doc.from_array`](/api/doc#from_array)
- [`Doc.has_annotation`](/api/doc#has_annotation)
- [`MultiHashEmbed`](/api/architectures#MultiHashEmbed) Tok2Vec architecture
`attrs`
> ```python
> import spacy
> from spacy.attrs import DEP
>
> nlp = spacy.blank("en")
> doc = nlp("There are many attributes.")
>
> # DEP always has the same internal value
> assert DEP == 76
>
> # "DEP" is automatically converted to DEP
> assert DEP == nlp.vocab.strings["DEP"]
> assert doc.has_annotation(DEP) == doc.has_annotation("DEP")
>
> # look up IDs in spacy.attrs.IDS
> from spacy.attrs import IDS
> assert IDS["DEP"] == DEP
> ```
All methods automatically convert between the string version of an ID (`"DEP"`)
and the internal integer symbols (`DEP`). The internal IDs can be imported from
`spacy.attrs` or retrieved from the [`StringStore`](/api/stringstore). A map
from string attribute names to internal attribute IDs is stored in
`spacy.attrs.IDS`.
The corresponding [`Token` object attributes](/api/token#attributes) can be
accessed using the same names in lowercase, e.g. `token.orth` or `token.length`.
For attributes that represent string values, the internal integer ID is
accessed as `Token.attr`, e.g. `token.dep`, while the string value can be
retrieved by appending `_` as in `token.dep_`.
| Attribute | Description |
| ------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `DEP` | The token's dependency label. ~~str~~ |
| `ENT_ID` | The token's entity ID (`ent_id`). ~~str~~ |
| `ENT_IOB` | The IOB part of the token's entity tag. Uses custom integer vaues rather than the string store: unset is `0`, `I` is `1`, `O` is `2`, and `B` is `3`. ~~str~~ |
| `ENT_KB_ID` | The token's entity knowledge base ID. ~~str~~ |
| `ENT_TYPE` | The token's entity label. ~~str~~ |
| `IS_ALPHA` | Token text consists of alphabetic characters. ~~bool~~ |
| `IS_ASCII` | Token text consists of ASCII characters. ~~bool~~ |
| `IS_DIGIT` | Token text consists of digits. ~~bool~~ |
| `IS_LOWER` | Token text is in lowercase. ~~bool~~ |
| `IS_PUNCT` | Token is punctuation. ~~bool~~ |
| `IS_SPACE` | Token is whitespace. ~~bool~~ |
| `IS_STOP` | Token is a stop word. ~~bool~~ |
| `IS_TITLE` | Token text is in titlecase. ~~bool~~ |
| `IS_UPPER` | Token text is in uppercase. ~~bool~~ |
| `LEMMA` | The token's lemma. ~~str~~ |
| `LENGTH` | The length of the token text. ~~int~~ |
| `LIKE_EMAIL` | Token text resembles an email address. ~~bool~~ |
| `LIKE_NUM` | Token text resembles a number. ~~bool~~ |
| `LIKE_URL` | Token text resembles a URL. ~~bool~~ |
| `LOWER` | The lowercase form of the token text. ~~str~~ |
| `MORPH` | The token's morphological analysis. ~~MorphAnalysis~~ |
| `NORM` | The normalized form of the token text. ~~str~~ |
| `ORTH` | The exact verbatim text of a token. ~~str~~ |
| `POS` | The token's universal part of speech (UPOS). ~~str~~ |
| `SENT_START` | Token is start of sentence. ~~bool~~ |
| `SHAPE` | The token's shape. ~~str~~ |
| `SPACY` | Token has a trailing space. ~~bool~~ |
| `TAG` | The token's fine-grained part of speech. ~~str~~ |

View File

@ -2,7 +2,7 @@
title: SpanRuler title: SpanRuler
tag: class tag: class
source: spacy/pipeline/span_ruler.py source: spacy/pipeline/span_ruler.py
new: 3.3.1 new: 3.3
teaser: 'Pipeline component for rule-based span and named entity recognition' teaser: 'Pipeline component for rule-based span and named entity recognition'
api_string_name: span_ruler api_string_name: span_ruler
api_trainable: false api_trainable: false

View File

@ -1899,7 +1899,7 @@ access to some nice Latin vectors. You can then pass the directory path to
> ``` > ```
```cli ```cli
$ wget https://s3-us-west-1.amazonaws.com/fasttext-vectors/word-vectors-v2/cc.la.300.vec.gz $ wget https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.la.300.vec.gz
$ python -m spacy init vectors en cc.la.300.vec.gz /tmp/la_vectors_wiki_lg $ python -m spacy init vectors en cc.la.300.vec.gz /tmp/la_vectors_wiki_lg
``` ```

View File

@ -203,11 +203,14 @@ the data to and from a JSON file.
```python ```python
### {highlight="16-23,25-30"} ### {highlight="16-23,25-30"}
import json
from spacy import Language
from spacy.util import ensure_path from spacy.util import ensure_path
@Language.factory("my_component") @Language.factory("my_component")
class CustomComponent: class CustomComponent:
def __init__(self): def __init__(self, nlp: Language, name: str = "my_component"):
self.name = name
self.data = [] self.data = []
def __call__(self, doc): def __call__(self, doc):
@ -231,7 +234,7 @@ class CustomComponent:
# This will receive the directory path + /my_component # This will receive the directory path + /my_component
data_path = path / "data.json" data_path = path / "data.json"
with data_path.open("r", encoding="utf8") as f: with data_path.open("r", encoding="utf8") as f:
self.data = json.loads(f) self.data = json.load(f)
return self return self
``` ```

View File

@ -124,6 +124,7 @@
{ {
"label": "Other", "label": "Other",
"items": [ "items": [
{ "text": "Attributes", "url": "/api/attributes" },
{ "text": "Corpus", "url": "/api/corpus" }, { "text": "Corpus", "url": "/api/corpus" },
{ "text": "KnowledgeBase", "url": "/api/kb" }, { "text": "KnowledgeBase", "url": "/api/kb" },
{ "text": "Lookups", "url": "/api/lookups" }, { "text": "Lookups", "url": "/api/lookups" },

View File

@ -1,5 +1,34 @@
{ {
"resources": [ "resources": [
{
"id": "spacyfishing",
"title": "spaCy fishing",
"slogan": "Named entity disambiguation and linking on Wikidata in spaCy with Entity-Fishing.",
"description": "A spaCy wrapper of Entity-Fishing for named entity disambiguation and linking against a Wikidata knowledge base.",
"github": "Lucaterre/spacyfishing",
"pip": "spacyfishing",
"code_example": [
"import spacy",
"text = 'Victor Hugo and Honoré de Balzac are French writers who lived in Paris.'",
"nlp = spacy.load('en_core_web_sm')",
"nlp.add_pipe('entityfishing')",
"doc = nlp(text)",
"for span in doc.ents:",
" print((ent.text, ent.label_, ent._.kb_qid, ent._.url_wikidata, ent._.nerd_score))",
"# ('Victor Hugo', 'PERSON', 'Q535', 'https://www.wikidata.org/wiki/Q535', 0.972)",
"# ('Honoré de Balzac', 'PERSON', 'Q9711', 'https://www.wikidata.org/wiki/Q9711', 0.9724)",
"# ('French', 'NORP', 'Q121842', 'https://www.wikidata.org/wiki/Q121842', 0.3739)",
"# ('Paris', 'GPE', 'Q90', 'https://www.wikidata.org/wiki/Q90', 0.5652)",
"## Set parameter `extra_info` to `True` and check also span._.description, span._.src_description, span._.normal_term, span._.other_ids"
],
"category": ["models", "pipeline"],
"tags": ["NER", "NEL"],
"author": "Lucas Terriel",
"author_links": {
"twitter": "TerreLuca",
"github": "Lucaterre"
}
},
{ {
"id": "aim-spacy", "id": "aim-spacy",
"title": "Aim-spaCy", "title": "Aim-spaCy",
@ -55,7 +84,7 @@
"code_language": "python", "code_language": "python",
"author": "Leap Beyond", "author": "Leap Beyond",
"author_links": { "author_links": {
"github": "https://github.com/LeapBeyond", "github": "LeapBeyond",
"website": "https://leapbeyond.ai" "website": "https://leapbeyond.ai"
}, },
"code_example": [ "code_example": [
@ -78,8 +107,8 @@
"code_language": "python", "code_language": "python",
"author": "Peter Baumgartner", "author": "Peter Baumgartner",
"author_links": { "author_links": {
"twitter" : "https://twitter.com/pmbaumgartner", "twitter" : "pmbaumgartner",
"github": "https://github.com/pmbaumgartner", "github": "pmbaumgartner",
"website": "https://www.peterbaumgartner.com/" "website": "https://www.peterbaumgartner.com/"
}, },
"code_example": [ "code_example": [
@ -98,8 +127,8 @@
"code_language": "python", "code_language": "python",
"author": "Explosion", "author": "Explosion",
"author_links": { "author_links": {
"twitter" : "https://twitter.com/explosion_ai", "twitter" : "explosion_ai",
"github": "https://github.com/explosion", "github": "explosion",
"website": "https://explosion.ai/" "website": "https://explosion.ai/"
}, },
"code_example": [ "code_example": [
@ -571,8 +600,8 @@
"code_language": "python", "code_language": "python",
"author": "Keith Rozario", "author": "Keith Rozario",
"author_links": { "author_links": {
"twitter" : "https://twitter.com/keithrozario", "twitter" : "keithrozario",
"github": "https://github.com/keithrozario", "github": "keithrozario",
"website": "https://www.keithrozario.com" "website": "https://www.keithrozario.com"
}, },
"code_example": [ "code_example": [
@ -2295,7 +2324,7 @@
"author": "Daniel Whitenack & Chris Benson", "author": "Daniel Whitenack & Chris Benson",
"author_links": { "author_links": {
"website": "https://changelog.com/practicalai", "website": "https://changelog.com/practicalai",
"twitter": "https://twitter.com/PracticalAIFM" "twitter": "PracticalAIFM"
}, },
"category": ["podcasts"] "category": ["podcasts"]
}, },

View File

@ -24,7 +24,6 @@ const CUDA = {
'11.3': 'cuda113', '11.3': 'cuda113',
'11.4': 'cuda114', '11.4': 'cuda114',
'11.5': 'cuda115', '11.5': 'cuda115',
'11.6': 'cuda116',
} }
const LANG_EXTRAS = ['ja'] // only for languages with models const LANG_EXTRAS = ['ja'] // only for languages with models