mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-24 17:06:29 +03:00
Update/remove old Matcher syntax (#11370)
* Clean up old Matcher call style related stuff In v2 Matcher.add was called with (key, on_match, *patterns). In v3 this was changed to (key, patterns, *, on_match=None), but there were various points where the old call syntax was documented or handled specially. This removes all those. The Matcher itself didn't need any code changes, as it just gives a generic type error. However the PhraseMatcher required some changes because it would automatically "fix" the old call style. Surprisingly, the tokenizer was still using the old call style in one place. After these changes tests failed in two places: 1. one test for the "new" call style, including the "old" call style. I removed this test. 2. deserializing the PhraseMatcher fails because the input docs are a set. I am not sure why 2 is happening - I guess it's a quirk of the serialization format? - so for now I just convert the set to a list when deserializing. The check that the input Docs are a List in the PhraseMatcher is a new check, but makes it parallel with the other Matchers, which seemed like the right thing to do. * Add notes related to input docs / deserialization type * Remove Typing import * Remove old note about call style change * Apply suggestions from code review Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Use separate method for setting internal doc representations In addition to the title change, this changes the internal dict to be a defaultdict, instead of a dict with frequent use of setdefault. * Add _add_from_arrays for unpickling * Cleanup around adding from arrays This moves adding to internal structures into the private batch method, and removes the single-add method. This has one behavioral change for `add`, in that if something is wrong with the list of input Docs (such as one of the items not being a Doc), valid items before the invalid one will not be added. Also the callback will not be updated if anything is invalid. This change should not be significant. This also adds a test to check failure when given a non-Doc. * Update spacy/matcher/phrasematcher.pyx Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com>
This commit is contained in:
parent
98a916e01a
commit
698b8b495f
|
@ -487,7 +487,7 @@ class Errors(metaclass=ErrorsWithCodes):
|
|||
"Current DocBin: {current}\nOther DocBin: {other}")
|
||||
E169 = ("Can't find module: {module}")
|
||||
E170 = ("Cannot apply transition {name}: invalid for the current state.")
|
||||
E171 = ("Matcher.add received invalid 'on_match' callback argument: expected "
|
||||
E171 = ("{name}.add received invalid 'on_match' callback argument: expected "
|
||||
"callable or None, but got: {arg_type}")
|
||||
E175 = ("Can't remove rule for unknown match pattern ID: {key}")
|
||||
E176 = ("Alias '{alias}' is not defined in the Knowledge Base.")
|
||||
|
@ -738,7 +738,7 @@ class Errors(metaclass=ErrorsWithCodes):
|
|||
"loaded nlp object, but got: {source}")
|
||||
E947 = ("`Matcher.add` received invalid `greedy` argument: expected "
|
||||
"a string value from {expected} but got: '{arg}'")
|
||||
E948 = ("`Matcher.add` received invalid 'patterns' argument: expected "
|
||||
E948 = ("`{name}.add` received invalid 'patterns' argument: expected "
|
||||
"a list, but got: {arg_type}")
|
||||
E949 = ("Unable to align tokens for the predicted and reference docs. It "
|
||||
"is only possible to align the docs when both texts are the same "
|
||||
|
@ -941,6 +941,9 @@ class Errors(metaclass=ErrorsWithCodes):
|
|||
E1043 = ("Expected None or a value in range [{range_start}, {range_end}] for entity linker threshold, but got "
|
||||
"{value}.")
|
||||
|
||||
# v4 error strings
|
||||
E4000 = ("Expected a Doc as input, but got: '{type}'")
|
||||
|
||||
|
||||
# Deprecated model shortcuts, only used in errors and warnings
|
||||
OLD_MODEL_SHORTCUTS = {
|
||||
|
|
|
@ -165,9 +165,9 @@ cdef class DependencyMatcher:
|
|||
on_match (callable): Optional callback executed on match.
|
||||
"""
|
||||
if on_match is not None and not hasattr(on_match, "__call__"):
|
||||
raise ValueError(Errors.E171.format(arg_type=type(on_match)))
|
||||
if patterns is None or not isinstance(patterns, List): # old API
|
||||
raise ValueError(Errors.E948.format(arg_type=type(patterns)))
|
||||
raise ValueError(Errors.E171.format(name="DependencyMatcher", arg_type=type(on_match)))
|
||||
if patterns is None or not isinstance(patterns, List):
|
||||
raise ValueError(Errors.E948.format(name="DependencyMatcher", arg_type=type(patterns)))
|
||||
for pattern in patterns:
|
||||
if len(pattern) == 0:
|
||||
raise ValueError(Errors.E012.format(key=key))
|
||||
|
|
|
@ -110,9 +110,9 @@ cdef class Matcher:
|
|||
"""
|
||||
errors = {}
|
||||
if on_match is not None and not hasattr(on_match, "__call__"):
|
||||
raise ValueError(Errors.E171.format(arg_type=type(on_match)))
|
||||
if patterns is None or not isinstance(patterns, List): # old API
|
||||
raise ValueError(Errors.E948.format(arg_type=type(patterns)))
|
||||
raise ValueError(Errors.E171.format(name="Matcher", arg_type=type(on_match)))
|
||||
if patterns is None or not isinstance(patterns, List):
|
||||
raise ValueError(Errors.E948.format(name="Matcher", arg_type=type(patterns)))
|
||||
if greedy is not None and greedy not in ["FIRST", "LONGEST"]:
|
||||
raise ValueError(Errors.E947.format(expected=["FIRST", "LONGEST"], arg=greedy))
|
||||
for i, pattern in enumerate(patterns):
|
||||
|
|
|
@ -20,6 +20,15 @@ class PhraseMatcher:
|
|||
Callable[[Matcher, Doc, int, List[Tuple[Any, ...]]], Any]
|
||||
] = ...,
|
||||
) -> None: ...
|
||||
def _add_from_arrays(
|
||||
self,
|
||||
key: str,
|
||||
specs: List[List[int]],
|
||||
*,
|
||||
on_match: Optional[
|
||||
Callable[[Matcher, Doc, int, List[Tuple[Any, ...]]], Any]
|
||||
] = ...,
|
||||
) -> None: ...
|
||||
def remove(self, key: str) -> None: ...
|
||||
@overload
|
||||
def __call__(
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
# cython: infer_types=True, profile=True
|
||||
from typing import List
|
||||
from collections import defaultdict
|
||||
from libc.stdint cimport uintptr_t
|
||||
from preshed.maps cimport map_init, map_set, map_get, map_clear, map_iter
|
||||
|
||||
|
@ -39,7 +41,7 @@ cdef class PhraseMatcher:
|
|||
"""
|
||||
self.vocab = vocab
|
||||
self._callbacks = {}
|
||||
self._docs = {}
|
||||
self._docs = defaultdict(set)
|
||||
self._validate = validate
|
||||
|
||||
self.mem = Pool()
|
||||
|
@ -155,66 +157,24 @@ cdef class PhraseMatcher:
|
|||
del self._callbacks[key]
|
||||
del self._docs[key]
|
||||
|
||||
def add(self, key, docs, *_docs, on_match=None):
|
||||
"""Add a match-rule to the phrase-matcher. A match-rule consists of: an ID
|
||||
key, an on_match callback, and one or more patterns.
|
||||
|
||||
Since spaCy v2.2.2, PhraseMatcher.add takes a list of patterns as the
|
||||
second argument, with the on_match callback as an optional keyword
|
||||
argument.
|
||||
def _add_from_arrays(self, key, specs, *, on_match=None):
|
||||
"""Add a preprocessed list of specs, with an optional callback.
|
||||
|
||||
key (str): The match ID.
|
||||
docs (list): List of `Doc` objects representing match patterns.
|
||||
specs (List[List[int]]): A list of lists of hashes to match.
|
||||
on_match (callable): Callback executed on match.
|
||||
*_docs (Doc): For backwards compatibility: list of patterns to add
|
||||
as variable arguments. Will be ignored if a list of patterns is
|
||||
provided as the second argument.
|
||||
|
||||
DOCS: https://spacy.io/api/phrasematcher#add
|
||||
"""
|
||||
if docs is None or hasattr(docs, "__call__"): # old API
|
||||
on_match = docs
|
||||
docs = _docs
|
||||
|
||||
_ = self.vocab[key]
|
||||
self._callbacks[key] = on_match
|
||||
self._docs.setdefault(key, set())
|
||||
|
||||
cdef MapStruct* current_node
|
||||
cdef MapStruct* internal_node
|
||||
cdef void* result
|
||||
|
||||
if isinstance(docs, Doc):
|
||||
raise ValueError(Errors.E179.format(key=key))
|
||||
for doc in docs:
|
||||
if len(doc) == 0:
|
||||
continue
|
||||
if isinstance(doc, Doc):
|
||||
attrs = (TAG, POS, MORPH, LEMMA, DEP)
|
||||
has_annotation = {attr: doc.has_annotation(attr) for attr in attrs}
|
||||
for attr in attrs:
|
||||
if self.attr == attr and not has_annotation[attr]:
|
||||
if attr == TAG:
|
||||
pipe = "tagger"
|
||||
elif attr in (POS, MORPH):
|
||||
pipe = "morphologizer or tagger+attribute_ruler"
|
||||
elif attr == LEMMA:
|
||||
pipe = "lemmatizer"
|
||||
elif attr == DEP:
|
||||
pipe = "parser"
|
||||
error_msg = Errors.E155.format(pipe=pipe, attr=self.vocab.strings.as_string(attr))
|
||||
raise ValueError(error_msg)
|
||||
if self._validate and any(has_annotation.values()) \
|
||||
and self.attr not in attrs:
|
||||
string_attr = self.vocab.strings[self.attr]
|
||||
warnings.warn(Warnings.W012.format(key=key, attr=string_attr))
|
||||
keyword = self._convert_to_array(doc)
|
||||
else:
|
||||
keyword = doc
|
||||
self._docs[key].add(tuple(keyword))
|
||||
self._callbacks[key] = on_match
|
||||
for spec in specs:
|
||||
self._docs[key].add(tuple(spec))
|
||||
|
||||
current_node = self.c_map
|
||||
for token in keyword:
|
||||
for token in spec:
|
||||
if token == self._terminal_hash:
|
||||
warnings.warn(Warnings.W021)
|
||||
break
|
||||
|
@ -233,6 +193,57 @@ cdef class PhraseMatcher:
|
|||
result = internal_node
|
||||
map_set(self.mem, <MapStruct*>result, self.vocab.strings[key], NULL)
|
||||
|
||||
|
||||
def add(self, key, docs, *, on_match=None):
|
||||
"""Add a match-rule to the phrase-matcher. A match-rule consists of: an ID
|
||||
key, a list of one or more patterns, and (optionally) an on_match callback.
|
||||
|
||||
key (str): The match ID.
|
||||
docs (list): List of `Doc` objects representing match patterns.
|
||||
on_match (callable): Callback executed on match.
|
||||
|
||||
If any of the input Docs are invalid, no internal state will be updated.
|
||||
|
||||
DOCS: https://spacy.io/api/phrasematcher#add
|
||||
"""
|
||||
if isinstance(docs, Doc):
|
||||
raise ValueError(Errors.E179.format(key=key))
|
||||
if docs is None or not isinstance(docs, List):
|
||||
raise ValueError(Errors.E948.format(name="PhraseMatcher", arg_type=type(docs)))
|
||||
if on_match is not None and not hasattr(on_match, "__call__"):
|
||||
raise ValueError(Errors.E171.format(name="PhraseMatcher", arg_type=type(on_match)))
|
||||
|
||||
_ = self.vocab[key]
|
||||
specs = []
|
||||
|
||||
for doc in docs:
|
||||
if len(doc) == 0:
|
||||
continue
|
||||
if not isinstance(doc, Doc):
|
||||
raise ValueError(Errors.E4000.format(type=type(doc)))
|
||||
|
||||
attrs = (TAG, POS, MORPH, LEMMA, DEP)
|
||||
has_annotation = {attr: doc.has_annotation(attr) for attr in attrs}
|
||||
for attr in attrs:
|
||||
if self.attr == attr and not has_annotation[attr]:
|
||||
if attr == TAG:
|
||||
pipe = "tagger"
|
||||
elif attr in (POS, MORPH):
|
||||
pipe = "morphologizer or tagger+attribute_ruler"
|
||||
elif attr == LEMMA:
|
||||
pipe = "lemmatizer"
|
||||
elif attr == DEP:
|
||||
pipe = "parser"
|
||||
error_msg = Errors.E155.format(pipe=pipe, attr=self.vocab.strings.as_string(attr))
|
||||
raise ValueError(error_msg)
|
||||
if self._validate and any(has_annotation.values()) \
|
||||
and self.attr not in attrs:
|
||||
string_attr = self.vocab.strings[self.attr]
|
||||
warnings.warn(Warnings.W012.format(key=key, attr=string_attr))
|
||||
specs.append(self._convert_to_array(doc))
|
||||
|
||||
self._add_from_arrays(key, specs, on_match=on_match)
|
||||
|
||||
def __call__(self, object doclike, *, as_spans=False):
|
||||
"""Find all sequences matching the supplied patterns on the `Doc`.
|
||||
|
||||
|
@ -345,7 +356,7 @@ def unpickle_matcher(vocab, docs, callbacks, attr):
|
|||
matcher = PhraseMatcher(vocab, attr=attr)
|
||||
for key, specs in docs.items():
|
||||
callback = callbacks.get(key, None)
|
||||
matcher.add(key, specs, on_match=callback)
|
||||
matcher._add_from_arrays(key, specs, on_match=callback)
|
||||
return matcher
|
||||
|
||||
|
||||
|
|
|
@ -198,28 +198,6 @@ def test_phrase_matcher_contains(en_vocab):
|
|||
assert "TEST2" not in matcher
|
||||
|
||||
|
||||
def test_phrase_matcher_add_new_api(en_vocab):
|
||||
doc = Doc(en_vocab, words=["a", "b"])
|
||||
patterns = [Doc(en_vocab, words=["a"]), Doc(en_vocab, words=["a", "b"])]
|
||||
matcher = PhraseMatcher(en_vocab)
|
||||
matcher.add("OLD_API", None, *patterns)
|
||||
assert len(matcher(doc)) == 2
|
||||
matcher = PhraseMatcher(en_vocab)
|
||||
on_match = Mock()
|
||||
matcher.add("OLD_API_CALLBACK", on_match, *patterns)
|
||||
assert len(matcher(doc)) == 2
|
||||
assert on_match.call_count == 2
|
||||
# New API: add(key: str, patterns: List[List[dict]], on_match: Callable)
|
||||
matcher = PhraseMatcher(en_vocab)
|
||||
matcher.add("NEW_API", patterns)
|
||||
assert len(matcher(doc)) == 2
|
||||
matcher = PhraseMatcher(en_vocab)
|
||||
on_match = Mock()
|
||||
matcher.add("NEW_API_CALLBACK", patterns, on_match=on_match)
|
||||
assert len(matcher(doc)) == 2
|
||||
assert on_match.call_count == 2
|
||||
|
||||
|
||||
def test_phrase_matcher_repeated_add(en_vocab):
|
||||
matcher = PhraseMatcher(en_vocab)
|
||||
# match ID only gets added once
|
||||
|
@ -468,6 +446,13 @@ def test_phrase_matcher_deprecated(en_vocab):
|
|||
assert "spaCy v3.0" in str(record.list[0].message)
|
||||
|
||||
|
||||
def test_phrase_matcher_non_doc(en_vocab):
|
||||
matcher = PhraseMatcher(en_vocab)
|
||||
doc = Doc(en_vocab, words=["hello", "world"])
|
||||
with pytest.raises(ValueError):
|
||||
matcher.add("TEST", [doc, "junk"])
|
||||
|
||||
|
||||
@pytest.mark.parametrize("attr", ["SENT_START", "IS_SENT_START"])
|
||||
def test_phrase_matcher_sent_start(en_vocab, attr):
|
||||
_ = PhraseMatcher(en_vocab, attr=attr) # noqa: F841
|
||||
|
|
|
@ -614,7 +614,7 @@ cdef class Tokenizer:
|
|||
self._rules[string] = substrings
|
||||
self._flush_cache()
|
||||
if not self.faster_heuristics or self.find_prefix(string) or self.find_infix(string) or self.find_suffix(string) or " " in string:
|
||||
self._special_matcher.add(string, None, self._tokenize_affixes(string, False))
|
||||
self._special_matcher.add(string, [self._tokenize_affixes(string, False)])
|
||||
|
||||
def _reload_special_cases(self):
|
||||
self._flush_cache()
|
||||
|
|
|
@ -64,7 +64,7 @@ matched:
|
|||
> ```
|
||||
|
||||
| OP | Description |
|
||||
|---------|------------------------------------------------------------------------|
|
||||
| ------- | ---------------------------------------------------------------------- |
|
||||
| `!` | Negate the pattern, by requiring it to match exactly 0 times. |
|
||||
| `?` | Make the pattern optional, by allowing it to match 0 or 1 times. |
|
||||
| `+` | Require the pattern to match 1 or more times. |
|
||||
|
@ -204,20 +204,6 @@ will be overwritten.
|
|||
> matches = matcher(doc)
|
||||
> ```
|
||||
|
||||
<Infobox title="Changed in v3.0" variant="warning">
|
||||
|
||||
As of spaCy v3.0, `Matcher.add` takes a list of patterns as the second argument
|
||||
(instead of a variable number of arguments). The `on_match` callback becomes an
|
||||
optional keyword argument.
|
||||
|
||||
```diff
|
||||
patterns = [[{"TEXT": "Google"}, {"TEXT": "Now"}], [{"TEXT": "GoogleNow"}]]
|
||||
- matcher.add("GoogleNow", on_match, *patterns)
|
||||
+ matcher.add("GoogleNow", patterns, on_match=on_match)
|
||||
```
|
||||
|
||||
</Infobox>
|
||||
|
||||
| Name | Description |
|
||||
| ----------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `match_id` | An ID for the thing you're matching. ~~str~~ |
|
||||
|
|
|
@ -116,10 +116,10 @@ Check whether the matcher contains rules for a match ID.
|
|||
## PhraseMatcher.add {#add tag="method"}
|
||||
|
||||
Add a rule to the matcher, consisting of an ID key, one or more patterns, and a
|
||||
callback function to act on the matches. The callback function will receive the
|
||||
arguments `matcher`, `doc`, `i` and `matches`. If a pattern already exists for
|
||||
the given ID, the patterns will be extended. An `on_match` callback will be
|
||||
overwritten.
|
||||
optional callback function to act on the matches. The callback function will
|
||||
receive the arguments `matcher`, `doc`, `i` and `matches`. If a pattern already
|
||||
exists for the given ID, the patterns will be extended. An `on_match` callback
|
||||
will be overwritten.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
|
@ -134,20 +134,6 @@ overwritten.
|
|||
> matches = matcher(doc)
|
||||
> ```
|
||||
|
||||
<Infobox title="Changed in v3.0" variant="warning">
|
||||
|
||||
As of spaCy v3.0, `PhraseMatcher.add` takes a list of patterns as the second
|
||||
argument (instead of a variable number of arguments). The `on_match` callback
|
||||
becomes an optional keyword argument.
|
||||
|
||||
```diff
|
||||
patterns = [nlp("health care reform"), nlp("healthcare reform")]
|
||||
- matcher.add("HEALTH", on_match, *patterns)
|
||||
+ matcher.add("HEALTH", patterns, on_match=on_match)
|
||||
```
|
||||
|
||||
</Infobox>
|
||||
|
||||
| Name | Description |
|
||||
| -------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `key` | An ID for the thing you're matching. ~~str~~ |
|
||||
|
|
Loading…
Reference in New Issue
Block a user