mirror of
https://github.com/explosion/spaCy.git
synced 2025-03-03 10:55:52 +03:00
Tidy up and auto-format [ci skip]
This commit is contained in:
parent
a5633b205f
commit
febb99916d
|
@ -5,7 +5,8 @@ from thinc.api import require_gpu, fix_random_seed, set_dropout_rate, Adam
|
|||
from thinc.api import Model, data_validation
|
||||
import typer
|
||||
|
||||
from ._util import Arg, Opt, debug_cli, show_validation_error, parse_config_overrides, string_to_list
|
||||
from ._util import Arg, Opt, debug_cli, show_validation_error
|
||||
from ._util import parse_config_overrides, string_to_list
|
||||
from .. import util
|
||||
|
||||
|
||||
|
|
|
@ -277,7 +277,7 @@ def read_vectors(msg: Printer, vectors_loc: Path, truncate_vectors: int):
|
|||
|
||||
def ensure_shape(lines):
|
||||
"""Ensure that the first line of the data is the vectors shape.
|
||||
|
||||
|
||||
If it's not, we read in the data and output the shape as the first result,
|
||||
so that the reader doesn't have to deal with the problem.
|
||||
"""
|
||||
|
|
|
@ -244,7 +244,8 @@ class Language:
|
|||
self._config["nlp"]["disabled"] = list(self.disabled)
|
||||
self._config["components"] = pipeline
|
||||
if not self._config["training"].get("score_weights"):
|
||||
self._config["training"]["score_weights"] = combine_score_weights(score_weights)
|
||||
combined_score_weights = combine_score_weights(score_weights)
|
||||
self._config["training"]["score_weights"] = combined_score_weights
|
||||
if not srsly.is_json_serializable(self._config):
|
||||
raise ValueError(Errors.E961.format(config=self._config))
|
||||
return self._config
|
||||
|
|
|
@ -9,7 +9,10 @@ from spacy.pipeline.ner import DEFAULT_NER_MODEL
|
|||
|
||||
|
||||
def _ner_example(ner):
|
||||
doc = Doc(ner.vocab, words=["Joe", "loves", "visiting", "London", "during", "the", "weekend"])
|
||||
doc = Doc(
|
||||
ner.vocab,
|
||||
words=["Joe", "loves", "visiting", "London", "during", "the", "weekend"],
|
||||
)
|
||||
gold = {"entities": [(0, 3, "PERSON"), (19, 25, "LOC")]}
|
||||
return Example.from_dict(doc, gold)
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ def patterns(en_vocab):
|
|||
"REL_OP": ">",
|
||||
"RIGHT_ID": "fox",
|
||||
"RIGHT_ATTRS": {"ORTH": "fox"},
|
||||
}
|
||||
},
|
||||
]
|
||||
|
||||
pattern5 = [
|
||||
|
@ -233,9 +233,7 @@ def test_dependency_matcher_callback(en_vocab, doc):
|
|||
assert matches == matches2
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"op,num_matches", [(".", 8), (".*", 20), (";", 8), (";*", 20),]
|
||||
)
|
||||
@pytest.mark.parametrize("op,num_matches", [(".", 8), (".*", 20), (";", 8), (";*", 20)])
|
||||
def test_dependency_matcher_precedence_ops(en_vocab, op, num_matches):
|
||||
# two sentences to test that all matches are within the same sentence
|
||||
doc = get_doc(
|
||||
|
@ -248,7 +246,7 @@ def test_dependency_matcher_precedence_ops(en_vocab, op, num_matches):
|
|||
for text in ["a", "b", "c", "d", "e"]:
|
||||
pattern = [
|
||||
{"RIGHT_ID": "1", "RIGHT_ATTRS": {"ORTH": text}},
|
||||
{"LEFT_ID": "1", "REL_OP": op, "RIGHT_ID": "2", "RIGHT_ATTRS": {},},
|
||||
{"LEFT_ID": "1", "REL_OP": op, "RIGHT_ID": "2", "RIGHT_ATTRS": {}},
|
||||
]
|
||||
matcher = DependencyMatcher(en_vocab)
|
||||
matcher.add("A", [pattern])
|
||||
|
|
|
@ -54,7 +54,10 @@ def _parser_example(parser):
|
|||
|
||||
|
||||
def _ner_example(ner):
|
||||
doc = Doc(ner.vocab, words=["Joe", "loves", "visiting", "London", "during", "the", "weekend"])
|
||||
doc = Doc(
|
||||
ner.vocab,
|
||||
words=["Joe", "loves", "visiting", "London", "during", "the", "weekend"],
|
||||
)
|
||||
gold = {"entities": [(0, 3, "PERSON"), (19, 25, "LOC")]}
|
||||
return Example.from_dict(doc, gold)
|
||||
|
||||
|
|
|
@ -30,9 +30,10 @@ TRAIN_DATA = [
|
|||
),
|
||||
]
|
||||
|
||||
|
||||
def test_begin_training_examples():
|
||||
nlp = Language()
|
||||
senter = nlp.add_pipe("senter")
|
||||
nlp.add_pipe("senter")
|
||||
train_examples = []
|
||||
for t in TRAIN_DATA:
|
||||
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
|
||||
|
|
|
@ -89,7 +89,7 @@ def test_no_label():
|
|||
|
||||
def test_implicit_label():
|
||||
nlp = Language()
|
||||
textcat = nlp.add_pipe("textcat")
|
||||
nlp.add_pipe("textcat")
|
||||
train_examples = []
|
||||
for t in TRAIN_DATA:
|
||||
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
|
||||
|
|
Loading…
Reference in New Issue
Block a user