Auto-format code with black

This commit is contained in:
explosion-bot 2023-02-03 08:03:51 +00:00 committed by GitHub
parent 79ef6cf0f9
commit 66a7563deb
9 changed files with 19 additions and 19 deletions

View File

@ -128,7 +128,6 @@ _other_exc = {
_exc.update(_other_exc)
for h in range(1, 12 + 1):
for period in ["π.μ.", "πμ"]:
_exc[f"{h}{period}"] = [
{ORTH: f"{h}"},

View File

@ -101,9 +101,9 @@ conj_contraction_negations = [
("eivat", "eivät"),
("eivät", "eivät"),
]
for (base_lower, base_norm) in conj_contraction_bases:
for base_lower, base_norm in conj_contraction_bases:
for base in [base_lower, base_lower.title()]:
for (suffix, suffix_norm) in conj_contraction_negations:
for suffix, suffix_norm in conj_contraction_negations:
_exc[base + suffix] = [
{ORTH: base, NORM: base_norm},
{ORTH: suffix, NORM: suffix_norm},

View File

@ -105,7 +105,7 @@ def create_candidates() -> Callable[[KnowledgeBase, Span], Iterable[Candidate]]:
@registry.misc("spacy.CandidateBatchGenerator.v1")
def create_candidates_batch() -> Callable[
[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]
]:
def create_candidates_batch() -> (
Callable[[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]]
):
return get_candidates_batch

View File

@ -130,7 +130,7 @@ class EditTreeLemmatizer(TrainablePipe):
truths = []
for eg in examples:
eg_truths = []
for (predicted, gold_lemma) in zip(
for predicted, gold_lemma in zip(
eg.predicted, eg.get_aligned("LEMMA", as_string=True)
):
if gold_lemma is None or gold_lemma == "":

View File

@ -132,7 +132,8 @@ def test_issue3869(sentence):
@pytest.mark.issue(3962)
def test_issue3962(en_vocab):
"""Ensure that as_doc does not result in out-of-bound access of tokens.
This is achieved by setting the head to itself if it would lie out of the span otherwise."""
This is achieved by setting the head to itself if it would lie out of the span otherwise.
"""
# fmt: off
words = ["He", "jests", "at", "scars", ",", "that", "never", "felt", "a", "wound", "."]
heads = [1, 7, 1, 2, 7, 7, 7, 7, 9, 7, 7]
@ -171,7 +172,8 @@ def test_issue3962(en_vocab):
@pytest.mark.issue(3962)
def test_issue3962_long(en_vocab):
"""Ensure that as_doc does not result in out-of-bound access of tokens.
This is achieved by setting the head to itself if it would lie out of the span otherwise."""
This is achieved by setting the head to itself if it would lie out of the span otherwise.
"""
# fmt: off
words = ["He", "jests", "at", "scars", ".", "They", "never", "felt", "a", "wound", "."]
heads = [1, 1, 1, 2, 1, 7, 7, 7, 9, 7, 7]

View File

@ -238,7 +238,6 @@ def test_spans_span_sent(doc, doc_not_parsed):
],
)
def test_spans_span_sent_user_hooks(doc, start, end, expected_sentence):
# Doc-level sents hook
def user_hook(doc):
return [doc[ii : ii + 2] for ii in range(0, len(doc), 2)]
@ -668,7 +667,6 @@ def test_span_comparison(doc):
],
)
def test_span_sents(doc, start, end, expected_sentences, expected_sentences_with_hook):
assert len(list(doc[start:end].sents)) == expected_sentences
def user_hook(doc):

View File

@ -544,7 +544,7 @@ def test_greedy_matching_longest(doc, text, pattern, longest):
matcher = Matcher(doc.vocab)
matcher.add("RULE", [pattern], greedy="LONGEST")
matches = matcher(doc)
for (key, s, e) in matches:
for key, s, e in matches:
assert doc[s:e].text == longest

View File

@ -505,15 +505,15 @@ def test_el_pipe_configuration(nlp):
return [get_lowercased_candidates(kb, span) for span in spans]
@registry.misc("spacy.LowercaseCandidateGenerator.v1")
def create_candidates() -> Callable[
[InMemoryLookupKB, "Span"], Iterable[Candidate]
]:
def create_candidates() -> (
Callable[[InMemoryLookupKB, "Span"], Iterable[Candidate]]
):
return get_lowercased_candidates
@registry.misc("spacy.LowercaseCandidateBatchGenerator.v1")
def create_candidates_batch() -> Callable[
[InMemoryLookupKB, Iterable["Span"]], Iterable[Iterable[Candidate]]
]:
def create_candidates_batch() -> (
Callable[[InMemoryLookupKB, Iterable["Span"]], Iterable[Iterable[Candidate]]]
):
return get_lowercased_candidates_batch
# replace the pipe with a new one with with a different candidate generator

View File

@ -822,7 +822,8 @@ def test_textcat_eval_missing(multi_label: bool, spring_p: float):
def test_textcat_loss(multi_label: bool, expected_loss: float):
"""
multi-label: the missing 'spring' in gold_doc_2 doesn't incur an increase in loss
exclusive labels: the missing 'spring' in gold_doc_2 is interpreted as 0.0 and adds to the loss"""
exclusive labels: the missing 'spring' in gold_doc_2 is interpreted as 0.0 and adds to the loss
"""
train_examples = []
nlp = English()