Tidy up and auto-format [ci skip]

This commit is contained in:
Ines Montani 2020-09-13 10:55:36 +02:00
parent a5633b205f
commit febb99916d
8 changed files with 19 additions and 12 deletions

View File

@ -5,7 +5,8 @@ from thinc.api import require_gpu, fix_random_seed, set_dropout_rate, Adam
from thinc.api import Model, data_validation
import typer
from ._util import Arg, Opt, debug_cli, show_validation_error, parse_config_overrides, string_to_list
from ._util import Arg, Opt, debug_cli, show_validation_error
from ._util import parse_config_overrides, string_to_list
from .. import util

View File

@ -244,7 +244,8 @@ class Language:
self._config["nlp"]["disabled"] = list(self.disabled)
self._config["components"] = pipeline
if not self._config["training"].get("score_weights"):
self._config["training"]["score_weights"] = combine_score_weights(score_weights)
combined_score_weights = combine_score_weights(score_weights)
self._config["training"]["score_weights"] = combined_score_weights
if not srsly.is_json_serializable(self._config):
raise ValueError(Errors.E961.format(config=self._config))
return self._config

View File

@ -9,7 +9,10 @@ from spacy.pipeline.ner import DEFAULT_NER_MODEL
def _ner_example(ner):
doc = Doc(ner.vocab, words=["Joe", "loves", "visiting", "London", "during", "the", "weekend"])
doc = Doc(
ner.vocab,
words=["Joe", "loves", "visiting", "London", "during", "the", "weekend"],
)
gold = {"entities": [(0, 3, "PERSON"), (19, 25, "LOC")]}
return Example.from_dict(doc, gold)

View File

@ -78,7 +78,7 @@ def patterns(en_vocab):
"REL_OP": ">",
"RIGHT_ID": "fox",
"RIGHT_ATTRS": {"ORTH": "fox"},
}
},
]
pattern5 = [
@ -233,9 +233,7 @@ def test_dependency_matcher_callback(en_vocab, doc):
assert matches == matches2
@pytest.mark.parametrize(
"op,num_matches", [(".", 8), (".*", 20), (";", 8), (";*", 20),]
)
@pytest.mark.parametrize("op,num_matches", [(".", 8), (".*", 20), (";", 8), (";*", 20)])
def test_dependency_matcher_precedence_ops(en_vocab, op, num_matches):
# two sentences to test that all matches are within the same sentence
doc = get_doc(
@ -248,7 +246,7 @@ def test_dependency_matcher_precedence_ops(en_vocab, op, num_matches):
for text in ["a", "b", "c", "d", "e"]:
pattern = [
{"RIGHT_ID": "1", "RIGHT_ATTRS": {"ORTH": text}},
{"LEFT_ID": "1", "REL_OP": op, "RIGHT_ID": "2", "RIGHT_ATTRS": {},},
{"LEFT_ID": "1", "REL_OP": op, "RIGHT_ID": "2", "RIGHT_ATTRS": {}},
]
matcher = DependencyMatcher(en_vocab)
matcher.add("A", [pattern])

View File

@ -54,7 +54,10 @@ def _parser_example(parser):
def _ner_example(ner):
doc = Doc(ner.vocab, words=["Joe", "loves", "visiting", "London", "during", "the", "weekend"])
doc = Doc(
ner.vocab,
words=["Joe", "loves", "visiting", "London", "during", "the", "weekend"],
)
gold = {"entities": [(0, 3, "PERSON"), (19, 25, "LOC")]}
return Example.from_dict(doc, gold)

View File

@ -30,9 +30,10 @@ TRAIN_DATA = [
),
]
def test_begin_training_examples():
nlp = Language()
senter = nlp.add_pipe("senter")
nlp.add_pipe("senter")
train_examples = []
for t in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))

View File

@ -89,7 +89,7 @@ def test_no_label():
def test_implicit_label():
nlp = Language()
textcat = nlp.add_pipe("textcat")
nlp.add_pipe("textcat")
train_examples = []
for t in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))