mirror of
https://github.com/explosion/spaCy.git
synced 2025-01-30 19:24:07 +03:00
Merge remote-tracking branch 'upstream/master' into store-activations
This commit is contained in:
commit
508b96fdc7
34
.github/azure-steps.yml
vendored
34
.github/azure-steps.yml
vendored
|
@ -64,12 +64,12 @@ steps:
|
||||||
displayName: "Run GPU tests"
|
displayName: "Run GPU tests"
|
||||||
condition: eq(${{ parameters.gpu }}, true)
|
condition: eq(${{ parameters.gpu }}, true)
|
||||||
|
|
||||||
- script: |
|
# - script: |
|
||||||
python -m spacy download ca_core_news_sm
|
# python -m spacy download ca_core_news_sm
|
||||||
python -m spacy download ca_core_news_md
|
# python -m spacy download ca_core_news_md
|
||||||
python -c "import spacy; nlp=spacy.load('ca_core_news_sm'); doc=nlp('test')"
|
# python -c "import spacy; nlp=spacy.load('ca_core_news_sm'); doc=nlp('test')"
|
||||||
displayName: 'Test download CLI'
|
# displayName: 'Test download CLI'
|
||||||
condition: eq(variables['python_version'], '3.8')
|
# condition: eq(variables['python_version'], '3.8')
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
python -m spacy convert extra/example_data/ner_example_data/ner-token-per-line-conll2003.json .
|
python -m spacy convert extra/example_data/ner_example_data/ner-token-per-line-conll2003.json .
|
||||||
|
@ -93,17 +93,17 @@ steps:
|
||||||
displayName: 'Test train CLI'
|
displayName: 'Test train CLI'
|
||||||
condition: eq(variables['python_version'], '3.8')
|
condition: eq(variables['python_version'], '3.8')
|
||||||
|
|
||||||
- script: |
|
# - script: |
|
||||||
python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_sm'}; config.to_disk('ner_source_sm.cfg')"
|
# python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_sm'}; config.to_disk('ner_source_sm.cfg')"
|
||||||
PYTHONWARNINGS="error,ignore::DeprecationWarning" python -m spacy assemble ner_source_sm.cfg output_dir
|
# PYTHONWARNINGS="error,ignore::DeprecationWarning" python -m spacy assemble ner_source_sm.cfg output_dir
|
||||||
displayName: 'Test assemble CLI'
|
# displayName: 'Test assemble CLI'
|
||||||
condition: eq(variables['python_version'], '3.8')
|
# condition: eq(variables['python_version'], '3.8')
|
||||||
|
#
|
||||||
- script: |
|
# - script: |
|
||||||
python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_md'}; config.to_disk('ner_source_md.cfg')"
|
# python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_md'}; config.to_disk('ner_source_md.cfg')"
|
||||||
python -m spacy assemble ner_source_md.cfg output_dir 2>&1 | grep -q W113
|
# python -m spacy assemble ner_source_md.cfg output_dir 2>&1 | grep -q W113
|
||||||
displayName: 'Test assemble CLI vectors warning'
|
# displayName: 'Test assemble CLI vectors warning'
|
||||||
condition: eq(variables['python_version'], '3.8')
|
# condition: eq(variables['python_version'], '3.8')
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
python .github/validate_universe_json.py website/meta/universe.json
|
python .github/validate_universe_json.py website/meta/universe.json
|
||||||
|
|
106
.github/contributors/Lucaterre.md
vendored
Normal file
106
.github/contributors/Lucaterre.md
vendored
Normal file
|
@ -0,0 +1,106 @@
|
||||||
|
# spaCy contributor agreement
|
||||||
|
|
||||||
|
This spaCy Contributor Agreement (**"SCA"**) is based on the
|
||||||
|
[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf).
|
||||||
|
The SCA applies to any contribution that you make to any product or project
|
||||||
|
managed by us (the **"project"**), and sets out the intellectual property rights
|
||||||
|
you grant to us in the contributed materials. The term **"us"** shall mean
|
||||||
|
[ExplosionAI GmbH](https://explosion.ai/legal). The term
|
||||||
|
**"you"** shall mean the person or entity identified below.
|
||||||
|
|
||||||
|
If you agree to be bound by these terms, fill in the information requested
|
||||||
|
below and include the filled-in version with your first pull request, under the
|
||||||
|
folder [`.github/contributors/`](/.github/contributors/). The name of the file
|
||||||
|
should be your GitHub username, with the extension `.md`. For example, the user
|
||||||
|
example_user would create the file `.github/contributors/example_user.md`.
|
||||||
|
|
||||||
|
Read this agreement carefully before signing. These terms and conditions
|
||||||
|
constitute a binding legal agreement.
|
||||||
|
|
||||||
|
## Contributor Agreement
|
||||||
|
|
||||||
|
1. The term "contribution" or "contributed materials" means any source code,
|
||||||
|
object code, patch, tool, sample, graphic, specification, manual,
|
||||||
|
documentation, or any other material posted or submitted by you to the project.
|
||||||
|
|
||||||
|
2. With respect to any worldwide copyrights, or copyright applications and
|
||||||
|
registrations, in your contribution:
|
||||||
|
|
||||||
|
* you hereby assign to us joint ownership, and to the extent that such
|
||||||
|
assignment is or becomes invalid, ineffective or unenforceable, you hereby
|
||||||
|
grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge,
|
||||||
|
royalty-free, unrestricted license to exercise all rights under those
|
||||||
|
copyrights. This includes, at our option, the right to sublicense these same
|
||||||
|
rights to third parties through multiple levels of sublicensees or other
|
||||||
|
licensing arrangements;
|
||||||
|
|
||||||
|
* you agree that each of us can do all things in relation to your
|
||||||
|
contribution as if each of us were the sole owners, and if one of us makes
|
||||||
|
a derivative work of your contribution, the one who makes the derivative
|
||||||
|
work (or has it made will be the sole owner of that derivative work;
|
||||||
|
|
||||||
|
* you agree that you will not assert any moral rights in your contribution
|
||||||
|
against us, our licensees or transferees;
|
||||||
|
|
||||||
|
* you agree that we may register a copyright in your contribution and
|
||||||
|
exercise all ownership rights associated with it; and
|
||||||
|
|
||||||
|
* you agree that neither of us has any duty to consult with, obtain the
|
||||||
|
consent of, pay or render an accounting to the other for any use or
|
||||||
|
distribution of your contribution.
|
||||||
|
|
||||||
|
3. With respect to any patents you own, or that you can license without payment
|
||||||
|
to any third party, you hereby grant to us a perpetual, irrevocable,
|
||||||
|
non-exclusive, worldwide, no-charge, royalty-free license to:
|
||||||
|
|
||||||
|
* make, have made, use, sell, offer to sell, import, and otherwise transfer
|
||||||
|
your contribution in whole or in part, alone or in combination with or
|
||||||
|
included in any product, work or materials arising out of the project to
|
||||||
|
which your contribution was submitted, and
|
||||||
|
|
||||||
|
* at our option, to sublicense these same rights to third parties through
|
||||||
|
multiple levels of sublicensees or other licensing arrangements.
|
||||||
|
|
||||||
|
4. Except as set out above, you keep all right, title, and interest in your
|
||||||
|
contribution. The rights that you grant to us under these terms are effective
|
||||||
|
on the date you first submitted a contribution to us, even if your submission
|
||||||
|
took place before the date you sign these terms.
|
||||||
|
|
||||||
|
5. You covenant, represent, warrant and agree that:
|
||||||
|
|
||||||
|
* Each contribution that you submit is and shall be an original work of
|
||||||
|
authorship and you can legally grant the rights set out in this SCA;
|
||||||
|
|
||||||
|
* to the best of your knowledge, each contribution will not violate any
|
||||||
|
third party's copyrights, trademarks, patents, or other intellectual
|
||||||
|
property rights; and
|
||||||
|
|
||||||
|
* each contribution shall be in compliance with U.S. export control laws and
|
||||||
|
other applicable export and import laws. You agree to notify us if you
|
||||||
|
become aware of any circumstance which would make any of the foregoing
|
||||||
|
representations inaccurate in any respect. We may publicly disclose your
|
||||||
|
participation in the project, including the fact that you have signed the SCA.
|
||||||
|
|
||||||
|
6. This SCA is governed by the laws of the State of California and applicable
|
||||||
|
U.S. Federal law. Any choice of law rules will not apply.
|
||||||
|
|
||||||
|
7. Please place an “x” on one of the applicable statement below. Please do NOT
|
||||||
|
mark both statements:
|
||||||
|
|
||||||
|
* [x] I am signing on behalf of myself as an individual and no other person
|
||||||
|
or entity, including my employer, has or will have rights with respect to my
|
||||||
|
contributions.
|
||||||
|
|
||||||
|
* [ ] I am signing on behalf of my employer or a legal entity and I have the
|
||||||
|
actual authority to contractually bind that entity.
|
||||||
|
|
||||||
|
## Contributor Details
|
||||||
|
|
||||||
|
| Field | Entry |
|
||||||
|
|------------------------------- |---------------|
|
||||||
|
| Name | Lucas Terriel |
|
||||||
|
| Company name (if applicable) | |
|
||||||
|
| Title or role (if applicable) | |
|
||||||
|
| Date | 2022-06-20 |
|
||||||
|
| GitHub username | Lucaterre |
|
||||||
|
| Website (optional) | |
|
|
@ -455,6 +455,10 @@ Regression tests are tests that refer to bugs reported in specific issues. They
|
||||||
|
|
||||||
The test suite also provides [fixtures](https://github.com/explosion/spaCy/blob/master/spacy/tests/conftest.py) for different language tokenizers that can be used as function arguments of the same name and will be passed in automatically. Those should only be used for tests related to those specific languages. We also have [test utility functions](https://github.com/explosion/spaCy/blob/master/spacy/tests/util.py) for common operations, like creating a temporary file.
|
The test suite also provides [fixtures](https://github.com/explosion/spaCy/blob/master/spacy/tests/conftest.py) for different language tokenizers that can be used as function arguments of the same name and will be passed in automatically. Those should only be used for tests related to those specific languages. We also have [test utility functions](https://github.com/explosion/spaCy/blob/master/spacy/tests/util.py) for common operations, like creating a temporary file.
|
||||||
|
|
||||||
|
### Testing Cython Code
|
||||||
|
|
||||||
|
If you're developing Cython code (`.pyx` files), those extensions will need to be built before the test runner can test that code - otherwise it's going to run the tests with stale code from the last time the extension was built. You can build the extensions locally with `python setup.py build_ext -i`.
|
||||||
|
|
||||||
### Constructing objects and state
|
### Constructing objects and state
|
||||||
|
|
||||||
Test functions usually follow the same simple structure: they set up some state, perform the operation you want to test and `assert` conditions that you expect to be true, usually before and after the operation.
|
Test functions usually follow the same simple structure: they set up some state, perform the operation you want to test and `assert` conditions that you expect to be true, usually before and after the operation.
|
||||||
|
|
|
@ -5,7 +5,7 @@ requires = [
|
||||||
"cymem>=2.0.2,<2.1.0",
|
"cymem>=2.0.2,<2.1.0",
|
||||||
"preshed>=3.0.2,<3.1.0",
|
"preshed>=3.0.2,<3.1.0",
|
||||||
"murmurhash>=0.28.0,<1.1.0",
|
"murmurhash>=0.28.0,<1.1.0",
|
||||||
"thinc>=8.1.0.dev2,<8.2.0",
|
"thinc>=8.1.0.dev3,<8.2.0",
|
||||||
"pathy",
|
"pathy",
|
||||||
"numpy>=1.15.0",
|
"numpy>=1.15.0",
|
||||||
]
|
]
|
||||||
|
|
|
@ -3,7 +3,7 @@ spacy-legacy>=3.0.9,<3.1.0
|
||||||
spacy-loggers>=1.0.0,<2.0.0
|
spacy-loggers>=1.0.0,<2.0.0
|
||||||
cymem>=2.0.2,<2.1.0
|
cymem>=2.0.2,<2.1.0
|
||||||
preshed>=3.0.2,<3.1.0
|
preshed>=3.0.2,<3.1.0
|
||||||
thinc>=8.1.0.dev2,<8.2.0
|
thinc>=8.1.0.dev3,<8.2.0
|
||||||
ml_datasets>=0.2.0,<0.3.0
|
ml_datasets>=0.2.0,<0.3.0
|
||||||
murmurhash>=0.28.0,<1.1.0
|
murmurhash>=0.28.0,<1.1.0
|
||||||
wasabi>=0.9.1,<1.1.0
|
wasabi>=0.9.1,<1.1.0
|
||||||
|
|
|
@ -38,7 +38,7 @@ setup_requires =
|
||||||
cymem>=2.0.2,<2.1.0
|
cymem>=2.0.2,<2.1.0
|
||||||
preshed>=3.0.2,<3.1.0
|
preshed>=3.0.2,<3.1.0
|
||||||
murmurhash>=0.28.0,<1.1.0
|
murmurhash>=0.28.0,<1.1.0
|
||||||
thinc>=8.1.0.dev2,<8.2.0
|
thinc>=8.1.0.dev3,<8.2.0
|
||||||
install_requires =
|
install_requires =
|
||||||
# Our libraries
|
# Our libraries
|
||||||
spacy-legacy>=3.0.9,<3.1.0
|
spacy-legacy>=3.0.9,<3.1.0
|
||||||
|
@ -46,7 +46,7 @@ install_requires =
|
||||||
murmurhash>=0.28.0,<1.1.0
|
murmurhash>=0.28.0,<1.1.0
|
||||||
cymem>=2.0.2,<2.1.0
|
cymem>=2.0.2,<2.1.0
|
||||||
preshed>=3.0.2,<3.1.0
|
preshed>=3.0.2,<3.1.0
|
||||||
thinc>=8.1.0.dev2,<8.2.0
|
thinc>=8.1.0.dev3,<8.2.0
|
||||||
wasabi>=0.9.1,<1.1.0
|
wasabi>=0.9.1,<1.1.0
|
||||||
srsly>=2.4.3,<3.0.0
|
srsly>=2.4.3,<3.0.0
|
||||||
catalogue>=2.0.6,<2.1.0
|
catalogue>=2.0.6,<2.1.0
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# fmt: off
|
# fmt: off
|
||||||
__title__ = "spacy"
|
__title__ = "spacy"
|
||||||
__version__ = "3.3.0"
|
__version__ = "3.4.0"
|
||||||
__download_url__ = "https://github.com/explosion/spacy-models/releases/download"
|
__download_url__ = "https://github.com/explosion/spacy-models/releases/download"
|
||||||
__compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json"
|
__compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json"
|
||||||
__projects__ = "https://github.com/explosion/projects"
|
__projects__ = "https://github.com/explosion/projects"
|
||||||
|
|
|
@ -158,13 +158,18 @@ def test_issue3209():
|
||||||
|
|
||||||
|
|
||||||
def test_labels_from_BILUO():
|
def test_labels_from_BILUO():
|
||||||
"""Test that labels are inferred correctly when there's a - in label.
|
"""Test that labels are inferred correctly when there's a - in label."""
|
||||||
"""
|
|
||||||
nlp = English()
|
nlp = English()
|
||||||
ner = nlp.add_pipe("ner")
|
ner = nlp.add_pipe("ner")
|
||||||
ner.add_label("LARGE-ANIMAL")
|
ner.add_label("LARGE-ANIMAL")
|
||||||
nlp.initialize()
|
nlp.initialize()
|
||||||
move_names = ["O", "B-LARGE-ANIMAL", "I-LARGE-ANIMAL", "L-LARGE-ANIMAL", "U-LARGE-ANIMAL"]
|
move_names = [
|
||||||
|
"O",
|
||||||
|
"B-LARGE-ANIMAL",
|
||||||
|
"I-LARGE-ANIMAL",
|
||||||
|
"L-LARGE-ANIMAL",
|
||||||
|
"U-LARGE-ANIMAL",
|
||||||
|
]
|
||||||
labels = {"LARGE-ANIMAL"}
|
labels = {"LARGE-ANIMAL"}
|
||||||
assert ner.move_names == move_names
|
assert ner.move_names == move_names
|
||||||
assert set(ner.labels) == labels
|
assert set(ner.labels) == labels
|
||||||
|
|
|
@ -589,6 +589,7 @@ def test_string_to_list_intify(value):
|
||||||
assert string_to_list(value, intify=True) == [1, 2, 3]
|
assert string_to_list(value, intify=True) == [1, 2, 3]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.skip(reason="Temporarily skip for dev version")
|
||||||
def test_download_compatibility():
|
def test_download_compatibility():
|
||||||
spec = SpecifierSet("==" + about.__version__)
|
spec = SpecifierSet("==" + about.__version__)
|
||||||
spec.prereleases = False
|
spec.prereleases = False
|
||||||
|
@ -599,6 +600,7 @@ def test_download_compatibility():
|
||||||
assert get_minor_version(about.__version__) == get_minor_version(version)
|
assert get_minor_version(about.__version__) == get_minor_version(version)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.skip(reason="Temporarily skip for dev version")
|
||||||
def test_validate_compatibility_table():
|
def test_validate_compatibility_table():
|
||||||
spec = SpecifierSet("==" + about.__version__)
|
spec = SpecifierSet("==" + about.__version__)
|
||||||
spec.prereleases = False
|
spec.prereleases = False
|
||||||
|
|
|
@ -60,11 +60,12 @@ def test_readers():
|
||||||
assert isinstance(extra_corpus, Callable)
|
assert isinstance(extra_corpus, Callable)
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: enable IMDB test once Stanford servers are back up and running
|
||||||
@pytest.mark.slow
|
@pytest.mark.slow
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"reader,additional_config",
|
"reader,additional_config",
|
||||||
[
|
[
|
||||||
("ml_datasets.imdb_sentiment.v1", {"train_limit": 10, "dev_limit": 10}),
|
# ("ml_datasets.imdb_sentiment.v1", {"train_limit": 10, "dev_limit": 10}),
|
||||||
("ml_datasets.dbpedia.v1", {"train_limit": 10, "dev_limit": 10}),
|
("ml_datasets.dbpedia.v1", {"train_limit": 10, "dev_limit": 10}),
|
||||||
("ml_datasets.cmu_movies.v1", {"limit": 10, "freq_cutoff": 200, "split": 0.8}),
|
("ml_datasets.cmu_movies.v1", {"limit": 10, "freq_cutoff": 200, "split": 0.8}),
|
||||||
],
|
],
|
||||||
|
|
|
@ -1,33 +1,39 @@
|
||||||
from typing import List
|
from typing import List
|
||||||
from ..errors import Errors
|
from ..errors import Errors
|
||||||
import numpy
|
import numpy
|
||||||
|
from libc.stdint cimport int32_t
|
||||||
|
|
||||||
|
|
||||||
cdef class AlignmentArray:
|
cdef class AlignmentArray:
|
||||||
"""AlignmentArray is similar to Thinc's Ragged with two simplfications:
|
"""AlignmentArray is similar to Thinc's Ragged with two simplfications:
|
||||||
indexing returns numpy arrays and this type can only be used for CPU arrays.
|
indexing returns numpy arrays and this type can only be used for CPU arrays.
|
||||||
However, these changes make AlginmentArray more efficient for indexing in a
|
However, these changes make AlignmentArray more efficient for indexing in a
|
||||||
tight loop."""
|
tight loop."""
|
||||||
|
|
||||||
__slots__ = []
|
__slots__ = []
|
||||||
|
|
||||||
def __init__(self, alignment: List[List[int]]):
|
def __init__(self, alignment: List[List[int]]):
|
||||||
self._lengths = None
|
|
||||||
self._starts_ends = numpy.zeros(len(alignment) + 1, dtype="i")
|
|
||||||
|
|
||||||
cdef int data_len = 0
|
cdef int data_len = 0
|
||||||
cdef int outer_len
|
cdef int outer_len
|
||||||
cdef int idx
|
cdef int idx
|
||||||
|
|
||||||
|
self._starts_ends = numpy.zeros(len(alignment) + 1, dtype='int32')
|
||||||
|
cdef int32_t* starts_ends_ptr = <int32_t*>self._starts_ends.data
|
||||||
|
|
||||||
for idx, outer in enumerate(alignment):
|
for idx, outer in enumerate(alignment):
|
||||||
outer_len = len(outer)
|
outer_len = len(outer)
|
||||||
self._starts_ends[idx + 1] = self._starts_ends[idx] + outer_len
|
starts_ends_ptr[idx + 1] = starts_ends_ptr[idx] + outer_len
|
||||||
data_len += outer_len
|
data_len += outer_len
|
||||||
|
|
||||||
self._data = numpy.empty(data_len, dtype="i")
|
self._lengths = None
|
||||||
|
self._data = numpy.empty(data_len, dtype="int32")
|
||||||
|
|
||||||
idx = 0
|
idx = 0
|
||||||
|
cdef int32_t* data_ptr = <int32_t*>self._data.data
|
||||||
|
|
||||||
for outer in alignment:
|
for outer in alignment:
|
||||||
for inner in outer:
|
for inner in outer:
|
||||||
self._data[idx] = inner
|
data_ptr[idx] = inner
|
||||||
idx += 1
|
idx += 1
|
||||||
|
|
||||||
def __getitem__(self, idx):
|
def __getitem__(self, idx):
|
||||||
|
|
|
@ -13,7 +13,7 @@ from .iob_utils import biluo_tags_to_spans, remove_bilu_prefix
|
||||||
from ..errors import Errors, Warnings
|
from ..errors import Errors, Warnings
|
||||||
from ..pipeline._parser_internals import nonproj
|
from ..pipeline._parser_internals import nonproj
|
||||||
from ..tokens.token cimport MISSING_DEP
|
from ..tokens.token cimport MISSING_DEP
|
||||||
from ..util import logger, to_ternary_int
|
from ..util import logger, to_ternary_int, all_equal
|
||||||
|
|
||||||
|
|
||||||
cpdef Doc annotations_to_doc(vocab, tok_annot, doc_annot):
|
cpdef Doc annotations_to_doc(vocab, tok_annot, doc_annot):
|
||||||
|
@ -151,50 +151,127 @@ cdef class Example:
|
||||||
self._y_sig = y_sig
|
self._y_sig = y_sig
|
||||||
return self._cached_alignment
|
return self._cached_alignment
|
||||||
|
|
||||||
|
|
||||||
|
def _get_aligned_vectorized(self, align, gold_values):
|
||||||
|
# Fast path for Doc attributes/fields that are predominantly a single value,
|
||||||
|
# i.e., TAG, POS, MORPH.
|
||||||
|
x2y_single_toks = []
|
||||||
|
x2y_single_toks_i = []
|
||||||
|
|
||||||
|
x2y_multiple_toks = []
|
||||||
|
x2y_multiple_toks_i = []
|
||||||
|
|
||||||
|
# Gather indices of gold tokens aligned to the candidate tokens into two buckets.
|
||||||
|
# Bucket 1: All tokens that have a one-to-one alignment.
|
||||||
|
# Bucket 2: All tokens that have a one-to-many alignment.
|
||||||
|
for idx, token in enumerate(self.predicted):
|
||||||
|
aligned_gold_i = align[token.i]
|
||||||
|
aligned_gold_len = len(aligned_gold_i)
|
||||||
|
|
||||||
|
if aligned_gold_len == 1:
|
||||||
|
x2y_single_toks.append(aligned_gold_i.item())
|
||||||
|
x2y_single_toks_i.append(idx)
|
||||||
|
elif aligned_gold_len > 1:
|
||||||
|
x2y_multiple_toks.append(aligned_gold_i)
|
||||||
|
x2y_multiple_toks_i.append(idx)
|
||||||
|
|
||||||
|
# Map elements of the first bucket directly to the output array.
|
||||||
|
output = numpy.full(len(self.predicted), None)
|
||||||
|
output[x2y_single_toks_i] = gold_values[x2y_single_toks].squeeze()
|
||||||
|
|
||||||
|
# Collapse many-to-one alignments into one-to-one alignments if they
|
||||||
|
# share the same value. Map to None in all other cases.
|
||||||
|
for i in range(len(x2y_multiple_toks)):
|
||||||
|
aligned_gold_values = gold_values[x2y_multiple_toks[i]]
|
||||||
|
|
||||||
|
# If all aligned tokens have the same value, use it.
|
||||||
|
if all_equal(aligned_gold_values):
|
||||||
|
x2y_multiple_toks[i] = aligned_gold_values[0].item()
|
||||||
|
else:
|
||||||
|
x2y_multiple_toks[i] = None
|
||||||
|
|
||||||
|
output[x2y_multiple_toks_i] = x2y_multiple_toks
|
||||||
|
|
||||||
|
return output.tolist()
|
||||||
|
|
||||||
|
|
||||||
|
def _get_aligned_non_vectorized(self, align, gold_values):
|
||||||
|
# Slower path for fields that return multiple values (resulting
|
||||||
|
# in ragged arrays that cannot be vectorized trivially).
|
||||||
|
output = [None] * len(self.predicted)
|
||||||
|
|
||||||
|
for token in self.predicted:
|
||||||
|
aligned_gold_i = align[token.i]
|
||||||
|
values = gold_values[aligned_gold_i].ravel()
|
||||||
|
if len(values) == 1:
|
||||||
|
output[token.i] = values.item()
|
||||||
|
elif all_equal(values):
|
||||||
|
# If all aligned tokens have the same value, use it.
|
||||||
|
output[token.i] = values[0].item()
|
||||||
|
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
def get_aligned(self, field, as_string=False):
|
def get_aligned(self, field, as_string=False):
|
||||||
"""Return an aligned array for a token attribute."""
|
"""Return an aligned array for a token attribute."""
|
||||||
align = self.alignment.x2y
|
align = self.alignment.x2y
|
||||||
|
gold_values = self.reference.to_array([field])
|
||||||
|
|
||||||
|
if len(gold_values.shape) == 1:
|
||||||
|
output = self._get_aligned_vectorized(align, gold_values)
|
||||||
|
else:
|
||||||
|
output = self._get_aligned_non_vectorized(align, gold_values)
|
||||||
|
|
||||||
vocab = self.reference.vocab
|
vocab = self.reference.vocab
|
||||||
gold_values = self.reference.to_array([field])
|
|
||||||
output = [None] * len(self.predicted)
|
|
||||||
for token in self.predicted:
|
|
||||||
values = gold_values[align[token.i]]
|
|
||||||
values = values.ravel()
|
|
||||||
if len(values) == 0:
|
|
||||||
output[token.i] = None
|
|
||||||
elif len(values) == 1:
|
|
||||||
output[token.i] = values[0]
|
|
||||||
elif len(set(list(values))) == 1:
|
|
||||||
# If all aligned tokens have the same value, use it.
|
|
||||||
output[token.i] = values[0]
|
|
||||||
else:
|
|
||||||
output[token.i] = None
|
|
||||||
if as_string and field not in ["ENT_IOB", "SENT_START"]:
|
if as_string and field not in ["ENT_IOB", "SENT_START"]:
|
||||||
output = [vocab.strings[o] if o is not None else o for o in output]
|
output = [vocab.strings[o] if o is not None else o for o in output]
|
||||||
|
|
||||||
return output
|
return output
|
||||||
|
|
||||||
def get_aligned_parse(self, projectivize=True):
|
def get_aligned_parse(self, projectivize=True):
|
||||||
cand_to_gold = self.alignment.x2y
|
cand_to_gold = self.alignment.x2y
|
||||||
gold_to_cand = self.alignment.y2x
|
gold_to_cand = self.alignment.y2x
|
||||||
aligned_heads = [None] * self.x.length
|
|
||||||
aligned_deps = [None] * self.x.length
|
|
||||||
has_deps = [token.has_dep() for token in self.y]
|
|
||||||
has_heads = [token.has_head() for token in self.y]
|
|
||||||
heads = [token.head.i for token in self.y]
|
heads = [token.head.i for token in self.y]
|
||||||
deps = [token.dep_ for token in self.y]
|
deps = [token.dep_ for token in self.y]
|
||||||
|
|
||||||
if projectivize:
|
if projectivize:
|
||||||
proj_heads, proj_deps = nonproj.projectivize(heads, deps)
|
proj_heads, proj_deps = nonproj.projectivize(heads, deps)
|
||||||
|
has_deps = [token.has_dep() for token in self.y]
|
||||||
|
has_heads = [token.has_head() for token in self.y]
|
||||||
|
|
||||||
# ensure that missing data remains missing
|
# ensure that missing data remains missing
|
||||||
heads = [h if has_heads[i] else heads[i] for i, h in enumerate(proj_heads)]
|
heads = [h if has_heads[i] else heads[i] for i, h in enumerate(proj_heads)]
|
||||||
deps = [d if has_deps[i] else deps[i] for i, d in enumerate(proj_deps)]
|
deps = [d if has_deps[i] else deps[i] for i, d in enumerate(proj_deps)]
|
||||||
for cand_i in range(self.x.length):
|
|
||||||
if cand_to_gold.lengths[cand_i] == 1:
|
# Select all candidate tokens that are aligned to a single gold token.
|
||||||
gold_i = cand_to_gold[cand_i][0]
|
c2g_single_toks = numpy.where(cand_to_gold.lengths == 1)[0]
|
||||||
if gold_to_cand.lengths[heads[gold_i]] == 1:
|
|
||||||
aligned_heads[cand_i] = int(gold_to_cand[heads[gold_i]][0])
|
# Fetch all aligned gold token incides.
|
||||||
aligned_deps[cand_i] = deps[gold_i]
|
if c2g_single_toks.shape == cand_to_gold.lengths.shape:
|
||||||
return aligned_heads, aligned_deps
|
# This the most likely case.
|
||||||
|
gold_i = cand_to_gold[:].squeeze()
|
||||||
|
else:
|
||||||
|
gold_i = numpy.vectorize(lambda x: cand_to_gold[int(x)][0])(c2g_single_toks).squeeze()
|
||||||
|
|
||||||
|
# Fetch indices of all gold heads for the aligned gold tokens.
|
||||||
|
heads = numpy.asarray(heads, dtype='i')
|
||||||
|
gold_head_i = heads[gold_i]
|
||||||
|
|
||||||
|
# Select all gold tokens that are heads of the previously selected
|
||||||
|
# gold tokens (and are aligned to a single candidate token).
|
||||||
|
g2c_len_heads = gold_to_cand.lengths[gold_head_i]
|
||||||
|
g2c_len_heads = numpy.where(g2c_len_heads == 1)[0]
|
||||||
|
g2c_i = numpy.vectorize(lambda x: gold_to_cand[int(x)][0])(gold_head_i[g2c_len_heads]).squeeze()
|
||||||
|
|
||||||
|
# Update head/dep alignments with the above.
|
||||||
|
aligned_heads = numpy.full((self.x.length), None)
|
||||||
|
aligned_heads[c2g_single_toks[g2c_len_heads]] = g2c_i
|
||||||
|
|
||||||
|
deps = numpy.asarray(deps)
|
||||||
|
aligned_deps = numpy.full((self.x.length), None)
|
||||||
|
aligned_deps[c2g_single_toks] = deps[gold_i]
|
||||||
|
|
||||||
|
return aligned_heads.tolist(), aligned_deps.tolist()
|
||||||
|
|
||||||
def get_aligned_sent_starts(self):
|
def get_aligned_sent_starts(self):
|
||||||
"""Get list of SENT_START attributes aligned to the predicted tokenization.
|
"""Get list of SENT_START attributes aligned to the predicted tokenization.
|
||||||
|
|
|
@ -1716,3 +1716,10 @@ def packages_distributions() -> Dict[str, List[str]]:
|
||||||
for pkg in (dist.read_text("top_level.txt") or "").split():
|
for pkg in (dist.read_text("top_level.txt") or "").split():
|
||||||
pkg_to_dist[pkg].append(dist.metadata["Name"])
|
pkg_to_dist[pkg].append(dist.metadata["Name"])
|
||||||
return dict(pkg_to_dist)
|
return dict(pkg_to_dist)
|
||||||
|
|
||||||
|
|
||||||
|
def all_equal(iterable):
|
||||||
|
"""Return True if all the elements are equal to each other
|
||||||
|
(or if the input is an empty sequence), False otherwise."""
|
||||||
|
g = itertools.groupby(iterable)
|
||||||
|
return next(g, True) and not next(g, False)
|
||||||
|
|
|
@ -587,7 +587,7 @@ consists of either two or three subnetworks:
|
||||||
run once for each batch.
|
run once for each batch.
|
||||||
- **lower**: Construct a feature-specific vector for each `(token, feature)`
|
- **lower**: Construct a feature-specific vector for each `(token, feature)`
|
||||||
pair. This is also run once for each batch. Constructing the state
|
pair. This is also run once for each batch. Constructing the state
|
||||||
representation is then simply a matter of summing the component features and
|
representation is then a matter of summing the component features and
|
||||||
applying the non-linearity.
|
applying the non-linearity.
|
||||||
- **upper** (optional): A feed-forward network that predicts scores from the
|
- **upper** (optional): A feed-forward network that predicts scores from the
|
||||||
state representation. If not present, the output from the lower model is used
|
state representation. If not present, the output from the lower model is used
|
||||||
|
@ -628,7 +628,7 @@ same signature, but the `use_upper` argument was `True` by default.
|
||||||
> ```
|
> ```
|
||||||
|
|
||||||
Build a tagger model, using a provided token-to-vector component. The tagger
|
Build a tagger model, using a provided token-to-vector component. The tagger
|
||||||
model simply adds a linear layer with softmax activation to predict scores given
|
model adds a linear layer with softmax activation to predict scores given
|
||||||
the token vectors.
|
the token vectors.
|
||||||
|
|
||||||
| Name | Description |
|
| Name | Description |
|
||||||
|
@ -920,5 +920,5 @@ A function that reads an existing `KnowledgeBase` from file.
|
||||||
A function that takes as input a [`KnowledgeBase`](/api/kb) and a
|
A function that takes as input a [`KnowledgeBase`](/api/kb) and a
|
||||||
[`Span`](/api/span) object denoting a named entity, and returns a list of
|
[`Span`](/api/span) object denoting a named entity, and returns a list of
|
||||||
plausible [`Candidate`](/api/kb/#candidate) objects. The default
|
plausible [`Candidate`](/api/kb/#candidate) objects. The default
|
||||||
`CandidateGenerator` simply uses the text of a mention to find its potential
|
`CandidateGenerator` uses the text of a mention to find its potential
|
||||||
aliases in the `KnowledgeBase`. Note that this function is case-dependent.
|
aliases in the `KnowledgeBase`. Note that this function is case-dependent.
|
||||||
|
|
78
website/docs/api/attributes.md
Normal file
78
website/docs/api/attributes.md
Normal file
|
@ -0,0 +1,78 @@
|
||||||
|
---
|
||||||
|
title: Attributes
|
||||||
|
teaser: Token attributes
|
||||||
|
source: spacy/attrs.pyx
|
||||||
|
---
|
||||||
|
|
||||||
|
[Token](/api/token) attributes are specified using internal IDs in many places
|
||||||
|
including:
|
||||||
|
|
||||||
|
- [`Matcher` patterns](/api/matcher#patterns),
|
||||||
|
- [`Doc.to_array`](/api/doc#to_array) and
|
||||||
|
[`Doc.from_array`](/api/doc#from_array)
|
||||||
|
- [`Doc.has_annotation`](/api/doc#has_annotation)
|
||||||
|
- [`MultiHashEmbed`](/api/architectures#MultiHashEmbed) Tok2Vec architecture
|
||||||
|
`attrs`
|
||||||
|
|
||||||
|
> ```python
|
||||||
|
> import spacy
|
||||||
|
> from spacy.attrs import DEP
|
||||||
|
>
|
||||||
|
> nlp = spacy.blank("en")
|
||||||
|
> doc = nlp("There are many attributes.")
|
||||||
|
>
|
||||||
|
> # DEP always has the same internal value
|
||||||
|
> assert DEP == 76
|
||||||
|
>
|
||||||
|
> # "DEP" is automatically converted to DEP
|
||||||
|
> assert DEP == nlp.vocab.strings["DEP"]
|
||||||
|
> assert doc.has_annotation(DEP) == doc.has_annotation("DEP")
|
||||||
|
>
|
||||||
|
> # look up IDs in spacy.attrs.IDS
|
||||||
|
> from spacy.attrs import IDS
|
||||||
|
> assert IDS["DEP"] == DEP
|
||||||
|
> ```
|
||||||
|
|
||||||
|
All methods automatically convert between the string version of an ID (`"DEP"`)
|
||||||
|
and the internal integer symbols (`DEP`). The internal IDs can be imported from
|
||||||
|
`spacy.attrs` or retrieved from the [`StringStore`](/api/stringstore). A map
|
||||||
|
from string attribute names to internal attribute IDs is stored in
|
||||||
|
`spacy.attrs.IDS`.
|
||||||
|
|
||||||
|
The corresponding [`Token` object attributes](/api/token#attributes) can be
|
||||||
|
accessed using the same names in lowercase, e.g. `token.orth` or `token.length`.
|
||||||
|
For attributes that represent string values, the internal integer ID is
|
||||||
|
accessed as `Token.attr`, e.g. `token.dep`, while the string value can be
|
||||||
|
retrieved by appending `_` as in `token.dep_`.
|
||||||
|
|
||||||
|
|
||||||
|
| Attribute | Description |
|
||||||
|
| ------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| `DEP` | The token's dependency label. ~~str~~ |
|
||||||
|
| `ENT_ID` | The token's entity ID (`ent_id`). ~~str~~ |
|
||||||
|
| `ENT_IOB` | The IOB part of the token's entity tag. Uses custom integer vaues rather than the string store: unset is `0`, `I` is `1`, `O` is `2`, and `B` is `3`. ~~str~~ |
|
||||||
|
| `ENT_KB_ID` | The token's entity knowledge base ID. ~~str~~ |
|
||||||
|
| `ENT_TYPE` | The token's entity label. ~~str~~ |
|
||||||
|
| `IS_ALPHA` | Token text consists of alphabetic characters. ~~bool~~ |
|
||||||
|
| `IS_ASCII` | Token text consists of ASCII characters. ~~bool~~ |
|
||||||
|
| `IS_DIGIT` | Token text consists of digits. ~~bool~~ |
|
||||||
|
| `IS_LOWER` | Token text is in lowercase. ~~bool~~ |
|
||||||
|
| `IS_PUNCT` | Token is punctuation. ~~bool~~ |
|
||||||
|
| `IS_SPACE` | Token is whitespace. ~~bool~~ |
|
||||||
|
| `IS_STOP` | Token is a stop word. ~~bool~~ |
|
||||||
|
| `IS_TITLE` | Token text is in titlecase. ~~bool~~ |
|
||||||
|
| `IS_UPPER` | Token text is in uppercase. ~~bool~~ |
|
||||||
|
| `LEMMA` | The token's lemma. ~~str~~ |
|
||||||
|
| `LENGTH` | The length of the token text. ~~int~~ |
|
||||||
|
| `LIKE_EMAIL` | Token text resembles an email address. ~~bool~~ |
|
||||||
|
| `LIKE_NUM` | Token text resembles a number. ~~bool~~ |
|
||||||
|
| `LIKE_URL` | Token text resembles a URL. ~~bool~~ |
|
||||||
|
| `LOWER` | The lowercase form of the token text. ~~str~~ |
|
||||||
|
| `MORPH` | The token's morphological analysis. ~~MorphAnalysis~~ |
|
||||||
|
| `NORM` | The normalized form of the token text. ~~str~~ |
|
||||||
|
| `ORTH` | The exact verbatim text of a token. ~~str~~ |
|
||||||
|
| `POS` | The token's universal part of speech (UPOS). ~~str~~ |
|
||||||
|
| `SENT_START` | Token is start of sentence. ~~bool~~ |
|
||||||
|
| `SHAPE` | The token's shape. ~~str~~ |
|
||||||
|
| `SPACY` | Token has a trailing space. ~~bool~~ |
|
||||||
|
| `TAG` | The token's fine-grained part of speech. ~~str~~ |
|
|
@ -2,7 +2,7 @@
|
||||||
title: SpanRuler
|
title: SpanRuler
|
||||||
tag: class
|
tag: class
|
||||||
source: spacy/pipeline/span_ruler.py
|
source: spacy/pipeline/span_ruler.py
|
||||||
new: 3.3.1
|
new: 3.3
|
||||||
teaser: 'Pipeline component for rule-based span and named entity recognition'
|
teaser: 'Pipeline component for rule-based span and named entity recognition'
|
||||||
api_string_name: span_ruler
|
api_string_name: span_ruler
|
||||||
api_trainable: false
|
api_trainable: false
|
||||||
|
|
|
@ -203,11 +203,14 @@ the data to and from a JSON file.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
### {highlight="16-23,25-30"}
|
### {highlight="16-23,25-30"}
|
||||||
|
import json
|
||||||
|
from spacy import Language
|
||||||
from spacy.util import ensure_path
|
from spacy.util import ensure_path
|
||||||
|
|
||||||
@Language.factory("my_component")
|
@Language.factory("my_component")
|
||||||
class CustomComponent:
|
class CustomComponent:
|
||||||
def __init__(self):
|
def __init__(self, nlp: Language, name: str = "my_component"):
|
||||||
|
self.name = name
|
||||||
self.data = []
|
self.data = []
|
||||||
|
|
||||||
def __call__(self, doc):
|
def __call__(self, doc):
|
||||||
|
@ -231,7 +234,7 @@ class CustomComponent:
|
||||||
# This will receive the directory path + /my_component
|
# This will receive the directory path + /my_component
|
||||||
data_path = path / "data.json"
|
data_path = path / "data.json"
|
||||||
with data_path.open("r", encoding="utf8") as f:
|
with data_path.open("r", encoding="utf8") as f:
|
||||||
self.data = json.loads(f)
|
self.data = json.load(f)
|
||||||
return self
|
return self
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -124,6 +124,7 @@
|
||||||
{
|
{
|
||||||
"label": "Other",
|
"label": "Other",
|
||||||
"items": [
|
"items": [
|
||||||
|
{ "text": "Attributes", "url": "/api/attributes" },
|
||||||
{ "text": "Corpus", "url": "/api/corpus" },
|
{ "text": "Corpus", "url": "/api/corpus" },
|
||||||
{ "text": "KnowledgeBase", "url": "/api/kb" },
|
{ "text": "KnowledgeBase", "url": "/api/kb" },
|
||||||
{ "text": "Lookups", "url": "/api/lookups" },
|
{ "text": "Lookups", "url": "/api/lookups" },
|
||||||
|
|
|
@ -1,5 +1,34 @@
|
||||||
{
|
{
|
||||||
"resources": [
|
"resources": [
|
||||||
|
{
|
||||||
|
"id": "spacyfishing",
|
||||||
|
"title": "spaCy fishing",
|
||||||
|
"slogan": "Named entity disambiguation and linking on Wikidata in spaCy with Entity-Fishing.",
|
||||||
|
"description": "A spaCy wrapper of Entity-Fishing for named entity disambiguation and linking against a Wikidata knowledge base.",
|
||||||
|
"github": "Lucaterre/spacyfishing",
|
||||||
|
"pip": "spacyfishing",
|
||||||
|
"code_example": [
|
||||||
|
"import spacy",
|
||||||
|
"text = 'Victor Hugo and Honoré de Balzac are French writers who lived in Paris.'",
|
||||||
|
"nlp = spacy.load('en_core_web_sm')",
|
||||||
|
"nlp.add_pipe('entityfishing')",
|
||||||
|
"doc = nlp(text)",
|
||||||
|
"for span in doc.ents:",
|
||||||
|
" print((ent.text, ent.label_, ent._.kb_qid, ent._.url_wikidata, ent._.nerd_score))",
|
||||||
|
"# ('Victor Hugo', 'PERSON', 'Q535', 'https://www.wikidata.org/wiki/Q535', 0.972)",
|
||||||
|
"# ('Honoré de Balzac', 'PERSON', 'Q9711', 'https://www.wikidata.org/wiki/Q9711', 0.9724)",
|
||||||
|
"# ('French', 'NORP', 'Q121842', 'https://www.wikidata.org/wiki/Q121842', 0.3739)",
|
||||||
|
"# ('Paris', 'GPE', 'Q90', 'https://www.wikidata.org/wiki/Q90', 0.5652)",
|
||||||
|
"## Set parameter `extra_info` to `True` and check also span._.description, span._.src_description, span._.normal_term, span._.other_ids"
|
||||||
|
],
|
||||||
|
"category": ["models", "pipeline"],
|
||||||
|
"tags": ["NER", "NEL"],
|
||||||
|
"author": "Lucas Terriel",
|
||||||
|
"author_links": {
|
||||||
|
"twitter": "TerreLuca",
|
||||||
|
"github": "Lucaterre"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"id": "aim-spacy",
|
"id": "aim-spacy",
|
||||||
"title": "Aim-spaCy",
|
"title": "Aim-spaCy",
|
||||||
|
@ -55,7 +84,7 @@
|
||||||
"code_language": "python",
|
"code_language": "python",
|
||||||
"author": "Leap Beyond",
|
"author": "Leap Beyond",
|
||||||
"author_links": {
|
"author_links": {
|
||||||
"github": "https://github.com/LeapBeyond",
|
"github": "LeapBeyond",
|
||||||
"website": "https://leapbeyond.ai"
|
"website": "https://leapbeyond.ai"
|
||||||
},
|
},
|
||||||
"code_example": [
|
"code_example": [
|
||||||
|
@ -78,8 +107,8 @@
|
||||||
"code_language": "python",
|
"code_language": "python",
|
||||||
"author": "Peter Baumgartner",
|
"author": "Peter Baumgartner",
|
||||||
"author_links": {
|
"author_links": {
|
||||||
"twitter" : "https://twitter.com/pmbaumgartner",
|
"twitter" : "pmbaumgartner",
|
||||||
"github": "https://github.com/pmbaumgartner",
|
"github": "pmbaumgartner",
|
||||||
"website": "https://www.peterbaumgartner.com/"
|
"website": "https://www.peterbaumgartner.com/"
|
||||||
},
|
},
|
||||||
"code_example": [
|
"code_example": [
|
||||||
|
@ -98,8 +127,8 @@
|
||||||
"code_language": "python",
|
"code_language": "python",
|
||||||
"author": "Explosion",
|
"author": "Explosion",
|
||||||
"author_links": {
|
"author_links": {
|
||||||
"twitter" : "https://twitter.com/explosion_ai",
|
"twitter" : "explosion_ai",
|
||||||
"github": "https://github.com/explosion",
|
"github": "explosion",
|
||||||
"website": "https://explosion.ai/"
|
"website": "https://explosion.ai/"
|
||||||
},
|
},
|
||||||
"code_example": [
|
"code_example": [
|
||||||
|
@ -571,8 +600,8 @@
|
||||||
"code_language": "python",
|
"code_language": "python",
|
||||||
"author": "Keith Rozario",
|
"author": "Keith Rozario",
|
||||||
"author_links": {
|
"author_links": {
|
||||||
"twitter" : "https://twitter.com/keithrozario",
|
"twitter" : "keithrozario",
|
||||||
"github": "https://github.com/keithrozario",
|
"github": "keithrozario",
|
||||||
"website": "https://www.keithrozario.com"
|
"website": "https://www.keithrozario.com"
|
||||||
},
|
},
|
||||||
"code_example": [
|
"code_example": [
|
||||||
|
@ -2295,7 +2324,7 @@
|
||||||
"author": "Daniel Whitenack & Chris Benson",
|
"author": "Daniel Whitenack & Chris Benson",
|
||||||
"author_links": {
|
"author_links": {
|
||||||
"website": "https://changelog.com/practicalai",
|
"website": "https://changelog.com/practicalai",
|
||||||
"twitter": "https://twitter.com/PracticalAIFM"
|
"twitter": "PracticalAIFM"
|
||||||
},
|
},
|
||||||
"category": ["podcasts"]
|
"category": ["podcasts"]
|
||||||
},
|
},
|
||||||
|
|
|
@ -24,7 +24,6 @@ const CUDA = {
|
||||||
'11.3': 'cuda113',
|
'11.3': 'cuda113',
|
||||||
'11.4': 'cuda114',
|
'11.4': 'cuda114',
|
||||||
'11.5': 'cuda115',
|
'11.5': 'cuda115',
|
||||||
'11.6': 'cuda116',
|
|
||||||
}
|
}
|
||||||
const LANG_EXTRAS = ['ja'] // only for languages with models
|
const LANG_EXTRAS = ['ja'] // only for languages with models
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user