2017-11-01 02:43:22 +03:00
|
|
|
#!/usr/bin/env python
|
|
|
|
# coding: utf8
|
2015-10-06 01:06:52 +03:00
|
|
|
"""Match a large set of multi-word expressions in O(1) time.
|
|
|
|
|
|
|
|
The idea is to associate each word in the vocabulary with a tag, noting whether
|
|
|
|
they begin, end, or are inside at least one pattern. An additional tag is used
|
|
|
|
for single-word patterns. Complete patterns are also stored in a hash set.
|
2017-10-26 18:32:59 +03:00
|
|
|
When we process a document, we look up the words in the vocabulary, to
|
|
|
|
associate the words with the tags. We then search for tag-sequences that
|
|
|
|
correspond to valid candidates. Finally, we look up the candidates in the hash
|
|
|
|
set.
|
2015-10-06 01:06:52 +03:00
|
|
|
|
2017-10-26 18:32:59 +03:00
|
|
|
For instance, to search for the phrases "Barack Hussein Obama" and "Hilary
|
|
|
|
Clinton", we would associate "Barack" and "Hilary" with the B tag, Hussein with
|
|
|
|
the I tag, and Obama and Clinton with the L tag.
|
2015-10-06 01:06:52 +03:00
|
|
|
|
|
|
|
The document "Barack Clinton and Hilary Clinton" would have the tag sequence
|
2017-10-26 18:32:59 +03:00
|
|
|
[{B}, {L}, {}, {B}, {L}], so we'd get two matches. However, only the second
|
|
|
|
candidate is in the phrase dictionary, so only one is returned as a match.
|
2015-10-06 01:06:52 +03:00
|
|
|
|
2017-10-26 18:32:59 +03:00
|
|
|
The algorithm is O(n) at run-time for document of length n because we're only
|
|
|
|
ever matching over the tag patterns. So no matter how many phrases we're
|
|
|
|
looking for, our pattern set stays very small (exact size depends on the
|
|
|
|
maximum length we're looking for, as the query language currently has no
|
|
|
|
quantifiers).
|
2017-09-20 23:51:41 +03:00
|
|
|
|
|
|
|
The example expects a .bz2 file from the Reddit corpus, and a patterns file,
|
|
|
|
formatted in jsonl as a sequence of entries like this:
|
|
|
|
|
|
|
|
{"text":"Anchorage"}
|
|
|
|
{"text":"Angola"}
|
|
|
|
{"text":"Ann Arbor"}
|
|
|
|
{"text":"Annapolis"}
|
|
|
|
{"text":"Appalachia"}
|
|
|
|
{"text":"Argentina"}
|
2017-11-07 03:11:45 +03:00
|
|
|
|
2018-06-11 01:33:13 +03:00
|
|
|
Reddit comments corpus:
|
|
|
|
* https://files.pushshift.io/reddit/
|
|
|
|
* https://archive.org/details/2015_reddit_comments_corpus
|
|
|
|
|
2017-11-07 03:22:30 +03:00
|
|
|
Compatible with: spaCy v2.0.0+
|
2015-10-06 01:06:52 +03:00
|
|
|
"""
|
|
|
|
from __future__ import print_function, unicode_literals, division
|
2017-10-26 18:32:59 +03:00
|
|
|
|
2015-10-08 05:59:32 +03:00
|
|
|
from bz2 import BZ2File
|
|
|
|
import time
|
2015-10-06 01:06:52 +03:00
|
|
|
import plac
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 03:28:22 +03:00
|
|
|
import json
|
2015-10-06 01:06:52 +03:00
|
|
|
|
2015-10-08 18:02:37 +03:00
|
|
|
from spacy.matcher import PhraseMatcher
|
2017-09-20 23:51:41 +03:00
|
|
|
import spacy
|
2015-10-08 18:02:37 +03:00
|
|
|
|
|
|
|
|
2017-10-26 18:32:59 +03:00
|
|
|
@plac.annotations(
|
|
|
|
patterns_loc=("Path to gazetteer", "positional", None, str),
|
|
|
|
text_loc=("Path to Reddit corpus file", "positional", None, str),
|
|
|
|
n=("Number of texts to read", "option", "n", int),
|
2018-12-02 06:28:34 +03:00
|
|
|
lang=("Language class to initialise", "option", "l", str),
|
|
|
|
)
|
|
|
|
def main(patterns_loc, text_loc, n=10000, lang="en"):
|
2019-02-01 01:43:22 +03:00
|
|
|
nlp = spacy.blank(lang)
|
2017-10-26 18:32:59 +03:00
|
|
|
nlp.vocab.lex_attr_getters = {}
|
|
|
|
phrases = read_gazetteer(nlp.tokenizer, patterns_loc)
|
|
|
|
count = 0
|
|
|
|
t1 = time.time()
|
2018-12-02 06:28:34 +03:00
|
|
|
for ent_id, text in get_matches(nlp.tokenizer, phrases, read_text(text_loc, n=n)):
|
2017-10-26 18:32:59 +03:00
|
|
|
count += 1
|
|
|
|
t2 = time.time()
|
|
|
|
print("%d docs in %.3f s. %d matches" % (n, (t2 - t1), count))
|
|
|
|
|
|
|
|
|
2015-10-08 18:02:37 +03:00
|
|
|
def read_gazetteer(tokenizer, loc, n=-1):
|
|
|
|
for i, line in enumerate(open(loc)):
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 03:28:22 +03:00
|
|
|
data = json.loads(line.strip())
|
2018-12-02 06:28:34 +03:00
|
|
|
phrase = tokenizer(data["text"])
|
2017-09-20 23:51:41 +03:00
|
|
|
for w in phrase:
|
|
|
|
_ = tokenizer.vocab[w.text]
|
2015-10-08 18:02:37 +03:00
|
|
|
if len(phrase) >= 2:
|
|
|
|
yield phrase
|
|
|
|
|
2015-10-08 05:59:32 +03:00
|
|
|
|
2017-09-20 23:51:41 +03:00
|
|
|
def read_text(bz2_loc, n=10000):
|
2015-10-08 05:59:32 +03:00
|
|
|
with BZ2File(bz2_loc) as file_:
|
2017-09-20 23:51:41 +03:00
|
|
|
for i, line in enumerate(file_):
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 03:28:22 +03:00
|
|
|
data = json.loads(line)
|
2018-12-02 06:28:34 +03:00
|
|
|
yield data["body"]
|
2017-09-20 23:51:41 +03:00
|
|
|
if i >= n:
|
|
|
|
break
|
2015-10-06 01:06:52 +03:00
|
|
|
|
|
|
|
|
2015-10-08 18:02:37 +03:00
|
|
|
def get_matches(tokenizer, phrases, texts, max_length=6):
|
2017-09-20 23:51:41 +03:00
|
|
|
matcher = PhraseMatcher(tokenizer.vocab, max_length=max_length)
|
2018-12-02 06:28:34 +03:00
|
|
|
matcher.add("Phrase", None, *phrases)
|
2015-10-08 18:02:37 +03:00
|
|
|
for text in texts:
|
|
|
|
doc = tokenizer(text)
|
2017-09-20 23:51:41 +03:00
|
|
|
for w in doc:
|
|
|
|
_ = doc.vocab[w.text]
|
2015-10-08 18:02:37 +03:00
|
|
|
matches = matcher(doc)
|
2017-09-20 23:51:41 +03:00
|
|
|
for ent_id, start, end in matches:
|
|
|
|
yield (ent_id, doc[start:end].text)
|
2015-10-08 18:02:37 +03:00
|
|
|
|
2015-10-06 01:06:52 +03:00
|
|
|
|
2018-12-02 06:28:34 +03:00
|
|
|
if __name__ == "__main__":
|
2015-10-08 18:02:37 +03:00
|
|
|
if False:
|
|
|
|
import cProfile
|
|
|
|
import pstats
|
2018-12-02 06:28:34 +03:00
|
|
|
|
2015-10-08 18:02:37 +03:00
|
|
|
cProfile.runctx("plac.call(main)", globals(), locals(), "Profile.prof")
|
|
|
|
s = pstats.Stats("Profile.prof")
|
|
|
|
s.strip_dirs().sort_stats("time").print_stats()
|
|
|
|
else:
|
|
|
|
plac.call(main)
|