mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-24 17:06:29 +03:00
doctests for website: 'home'-section
This commit is contained in:
parent
d1850dcbf7
commit
936edea425
|
@ -1,6 +1,6 @@
|
||||||
all: src/code site
|
all: src/code site
|
||||||
|
|
||||||
src/code:
|
src/code: tests/test_*.py
|
||||||
mkdir -p src/code/
|
mkdir -p src/code/
|
||||||
./create_code_samples tests/ src/code/
|
./create_code_samples tests/ src/code/
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,12 @@ import re
|
||||||
import os
|
import os
|
||||||
import ast
|
import ast
|
||||||
|
|
||||||
|
# cgi.escape is deprecated since py32
|
||||||
|
try:
|
||||||
|
from html import escape
|
||||||
|
except ImportError:
|
||||||
|
from cgi import escape
|
||||||
|
|
||||||
|
|
||||||
src_dirname = sys.argv[1]
|
src_dirname = sys.argv[1]
|
||||||
dst_dirname = sys.argv[2]
|
dst_dirname = sys.argv[2]
|
||||||
|
@ -24,10 +30,18 @@ for filename in os.listdir(src_dirname):
|
||||||
|
|
||||||
# only ast.expr and ast.stmt have line numbers, see:
|
# only ast.expr and ast.stmt have line numbers, see:
|
||||||
# https://docs.python.org/2/library/ast.html#ast.AST.lineno
|
# https://docs.python.org/2/library/ast.html#ast.AST.lineno
|
||||||
line_numbers = [x.lineno for x in ast.iter_child_nodes(item)
|
line_numbers = []
|
||||||
if isinstance(x, ast.expr) or
|
|
||||||
isinstance(x, ast.stmt)]
|
|
||||||
|
|
||||||
|
def fill_line_numbers(node):
|
||||||
|
for child in ast.iter_child_nodes(node):
|
||||||
|
if ((isinstance(child, ast.expr) or
|
||||||
|
isinstance(child, ast.stmt)) and
|
||||||
|
child.lineno > item.lineno):
|
||||||
|
|
||||||
|
line_numbers.append(child.lineno)
|
||||||
|
fill_line_numbers(child)
|
||||||
|
|
||||||
|
fill_line_numbers(item)
|
||||||
body = source[min(line_numbers)-1:max(line_numbers)]
|
body = source[min(line_numbers)-1:max(line_numbers)]
|
||||||
|
|
||||||
# make sure we are inside an indented function body
|
# make sure we are inside an indented function body
|
||||||
|
@ -38,6 +52,7 @@ for filename in os.listdir(src_dirname):
|
||||||
match = re.search(r"[^\s]", line)
|
match = re.search(r"[^\s]", line)
|
||||||
if match:
|
if match:
|
||||||
offset = match.start(0)
|
offset = match.start(0)
|
||||||
|
break
|
||||||
|
|
||||||
# remove indentation
|
# remove indentation
|
||||||
assert offset > 0
|
assert offset > 0
|
||||||
|
@ -51,4 +66,4 @@ for filename in os.listdir(src_dirname):
|
||||||
code_filename = "%s.%s" % (name, item.name[len(prefix):])
|
code_filename = "%s.%s" % (name, item.name[len(prefix):])
|
||||||
|
|
||||||
with open(os.path.join(dst_dirname, code_filename), "w") as f:
|
with open(os.path.join(dst_dirname, code_filename), "w") as f:
|
||||||
f.write("".join(body))
|
f.write(escape("".join(body)))
|
||||||
|
|
|
@ -19,89 +19,27 @@ mixin example(name)
|
||||||
|
|
||||||
+example("Get and set string views and flags")
|
+example("Get and set string views and flags")
|
||||||
pre.language-python: code
|
pre.language-python: code
|
||||||
| assert token.shape_ == 'Xxxxx'
|
include ../../code/home.get_and_set_string_views_and_flags
|
||||||
| for lexeme in nlp.vocab:
|
|
||||||
| if lexeme.is_alpha:
|
|
||||||
| lexeme.shape_ = 'W'
|
|
||||||
| elif lexeme.is_digit:
|
|
||||||
| lexeme.shape_ = 'D'
|
|
||||||
| elif lexeme.is_punct:
|
|
||||||
| lexeme.shape_ = 'P'
|
|
||||||
| else:
|
|
||||||
| lexeme.shape_ = 'M'
|
|
||||||
| assert token.shape_ == 'W'
|
|
||||||
|
|
||||||
+example("Export to numpy arrays")
|
+example("Export to numpy arrays")
|
||||||
pre.language-python: code
|
pre.language-python: code
|
||||||
| from spacy.en.attrs import ORTH, LIKE_URL, IS_OOV
|
include ../../code/home.export_to_numpy_arrays
|
||||||
|
|
|
||||||
| attr_ids = [ORTH, LIKE_URL, IS_OOV]
|
|
||||||
| doc_array = doc.to_array(attr_ids)
|
|
||||||
| assert doc_array.shape == (len(doc), len(attr_ids))
|
|
||||||
| assert doc[0].orth == doc_array[0, 0]
|
|
||||||
| assert doc[1].orth == doc_array[1, 0]
|
|
||||||
| assert doc[0].like_url == doc_array[0, 1]
|
|
||||||
| assert list(doc_array[:, 1]) == [t.like_url for t in doc]
|
|
||||||
|
|
||||||
+example("Word vectors")
|
+example("Word vectors")
|
||||||
pre.language-python: code
|
pre.language-python: code
|
||||||
| doc = nlp("Apples and oranges are similar. Boots and hippos aren't.")
|
include ../../code/home.word_vectors
|
||||||
|
|
|
||||||
| apples = doc[0]
|
|
||||||
| oranges = doc[1]
|
|
||||||
| boots = doc[6]
|
|
||||||
| hippos = doc[8]
|
|
||||||
|
|
|
||||||
| assert apples.similarity(oranges) > boots.similarity(hippos)
|
|
||||||
|
|
||||||
+example("Part-of-speech tags")
|
+example("Part-of-speech tags")
|
||||||
pre.language-python: code
|
pre.language-python: code
|
||||||
| from spacy.parts_of_speech import ADV
|
include ../../code/home.part_of_speech_tags
|
||||||
|
|
|
||||||
| def is_adverb(token):
|
|
||||||
| return token.pos == spacy.parts_of_speech.ADV
|
|
||||||
|
|
|
||||||
| # These are data-specific, so no constants are provided. You have to look
|
|
||||||
| # up the IDs from the StringStore.
|
|
||||||
| NNS = nlp.vocab.strings['NNS']
|
|
||||||
| NNPS = nlp.vocab.strings['NNPS']
|
|
||||||
| def is_plural_noun(token):
|
|
||||||
| return token.tag == NNS or token.tag == NNPS
|
|
||||||
|
|
|
||||||
| def print_coarse_pos(token):
|
|
||||||
| print(token.pos_)
|
|
||||||
|
|
|
||||||
| def print_fine_pos(token):
|
|
||||||
| print(token.tag_)
|
|
||||||
|
|
||||||
+example("Syntactic dependencies")
|
+example("Syntactic dependencies")
|
||||||
pre.language-python: code
|
pre.language-python: code
|
||||||
| def dependency_labels_to_root(token):
|
include ../../code/home.syntactic_dependencies
|
||||||
| '''Walk up the syntactic tree, collecting the arc labels.'''
|
|
||||||
| dep_labels = []
|
|
||||||
| while token.head is not token:
|
|
||||||
| dep_labels.append(token.dep)
|
|
||||||
| token = token.head
|
|
||||||
| return dep_labels
|
|
||||||
|
|
||||||
+example("Named entities")
|
+example("Named entities")
|
||||||
pre.language-python: code
|
pre.language-python: code
|
||||||
| def iter_products(docs):
|
include ../../code/home.named_entities
|
||||||
| for doc in docs:
|
|
||||||
| for ent in doc.ents:
|
|
||||||
| if ent.label_ == 'PRODUCT':
|
|
||||||
| yield ent
|
|
||||||
|
|
|
||||||
| def word_is_in_entity(word):
|
|
||||||
| return word.ent_type != 0
|
|
||||||
|
|
|
||||||
| def count_parent_verb_by_person(docs):
|
|
||||||
| counts = defaultdict(defaultdict(int))
|
|
||||||
| for doc in docs:
|
|
||||||
| for ent in doc.ents:
|
|
||||||
| if ent.label_ == 'PERSON' and ent.root.head.pos == VERB:
|
|
||||||
| counts[ent.orth_][ent.root.head.lemma_] += 1
|
|
||||||
| return counts
|
|
||||||
|
|
||||||
//+example("Define custom NER rules")
|
//+example("Define custom NER rules")
|
||||||
// pre.language-python: code
|
// pre.language-python: code
|
||||||
|
@ -110,40 +48,11 @@ mixin example(name)
|
||||||
|
|
||||||
+example("Calculate inline mark-up on original string")
|
+example("Calculate inline mark-up on original string")
|
||||||
pre.language-python: code
|
pre.language-python: code
|
||||||
| def put_spans_around_tokens(doc, get_classes):
|
include ../../code/home.calculate_inline_mark_up_on_original_string
|
||||||
| '''Given some function to compute class names, put each token in a
|
|
||||||
| span element, with the appropriate classes computed.
|
|
||||||
|
|
|
||||||
| All whitespace is preserved, outside of the spans. (Yes, I know HTML
|
|
||||||
| won't display it. But the point is no information is lost, so you can
|
|
||||||
| calculate what you need, e.g. <br /> tags, <p> tags, etc.)
|
|
||||||
| '''
|
|
||||||
| output = []
|
|
||||||
| template = '<span classes="{classes}">{word}</span>{space}'
|
|
||||||
| for token in doc:
|
|
||||||
| if token.is_space:
|
|
||||||
| output.append(token.orth_)
|
|
||||||
| else:
|
|
||||||
| output.append(
|
|
||||||
| template.format(
|
|
||||||
| classes=' '.join(get_classes(token)),
|
|
||||||
| word=token.orth_,
|
|
||||||
| space=token.whitespace_))
|
|
||||||
| string = ''.join(output)
|
|
||||||
| string = string.replace('\n', '<br />')
|
|
||||||
| string = string.replace('\t', ' ')
|
|
||||||
| return string
|
|
||||||
|
|
||||||
|
|
||||||
+example("Efficient binary serialization")
|
+example("Efficient binary serialization")
|
||||||
pre.language-python: code
|
pre.language-python: code
|
||||||
| byte_string = doc.as_bytes()
|
include ../../code/home.efficient_binary_serialization
|
||||||
| open('/tmp/moby_dick.bin', 'wb').write(byte_string)
|
|
||||||
|
|
|
||||||
| nlp = spacy.en.English()
|
|
||||||
| for byte_string in Doc.read(open('/tmp/moby_dick.bin', 'rb')):
|
|
||||||
| doc = Doc(nlp.vocab)
|
|
||||||
| doc.from_bytes(byte_string)
|
|
||||||
|
|
||||||
+example("Full documentation")
|
+example("Full documentation")
|
||||||
ul
|
ul
|
||||||
|
|
|
@ -27,7 +27,6 @@ def test_load_resources_and_process_text():
|
||||||
def test_get_tokens_and_sentences(doc):
|
def test_get_tokens_and_sentences(doc):
|
||||||
token = doc[0]
|
token = doc[0]
|
||||||
sentence = doc.sents.next()
|
sentence = doc.sents.next()
|
||||||
|
|
||||||
assert token is sentence[0]
|
assert token is sentence[0]
|
||||||
assert sentence.text == 'Hello, world.'
|
assert sentence.text == 'Hello, world.'
|
||||||
|
|
||||||
|
@ -36,5 +35,127 @@ def test_use_integer_ids_for_any_strings(nlp, token):
|
||||||
hello_id = nlp.vocab.strings['Hello']
|
hello_id = nlp.vocab.strings['Hello']
|
||||||
hello_str = nlp.vocab.strings[hello_id]
|
hello_str = nlp.vocab.strings[hello_id]
|
||||||
|
|
||||||
assert token.orth == hello_id == 3404
|
assert token.orth == hello_id == 469755
|
||||||
assert token.orth_ == hello_str == 'Hello'
|
assert token.orth_ == hello_str == 'Hello'
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_and_set_string_views_and_flags(nlp, token):
|
||||||
|
assert token.shape_ == 'Xxxxx'
|
||||||
|
for lexeme in nlp.vocab:
|
||||||
|
if lexeme.is_alpha:
|
||||||
|
lexeme.shape_ = 'W'
|
||||||
|
elif lexeme.is_digit:
|
||||||
|
lexeme.shape_ = 'D'
|
||||||
|
elif lexeme.is_punct:
|
||||||
|
lexeme.shape_ = 'P'
|
||||||
|
else:
|
||||||
|
lexeme.shape_ = 'M'
|
||||||
|
assert token.shape_ == 'W'
|
||||||
|
|
||||||
|
|
||||||
|
def test_export_to_numpy_arrays(nlp, doc):
|
||||||
|
from spacy.en.attrs import ORTH, LIKE_URL, IS_OOV
|
||||||
|
|
||||||
|
attr_ids = [ORTH, LIKE_URL, IS_OOV]
|
||||||
|
doc_array = doc.to_array(attr_ids)
|
||||||
|
assert doc_array.shape == (len(doc), len(attr_ids))
|
||||||
|
assert doc[0].orth == doc_array[0, 0]
|
||||||
|
assert doc[1].orth == doc_array[1, 0]
|
||||||
|
assert doc[0].like_url == doc_array[0, 1]
|
||||||
|
assert list(doc_array[:, 1]) == [t.like_url for t in doc]
|
||||||
|
|
||||||
|
|
||||||
|
def test_word_vectors(nlp):
|
||||||
|
doc = nlp("Apples and oranges are similar. Boots and hippos aren't.")
|
||||||
|
|
||||||
|
apples = doc[0]
|
||||||
|
oranges = doc[1]
|
||||||
|
boots = doc[6]
|
||||||
|
hippos = doc[8]
|
||||||
|
|
||||||
|
assert apples.similarity(oranges) > boots.similarity(hippos)
|
||||||
|
|
||||||
|
|
||||||
|
def test_part_of_speech_tags(nlp):
|
||||||
|
from spacy.parts_of_speech import ADV
|
||||||
|
|
||||||
|
def is_adverb(token):
|
||||||
|
return token.pos == spacy.parts_of_speech.ADV
|
||||||
|
|
||||||
|
# These are data-specific, so no constants are provided. You have to look
|
||||||
|
# up the IDs from the StringStore.
|
||||||
|
NNS = nlp.vocab.strings['NNS']
|
||||||
|
NNPS = nlp.vocab.strings['NNPS']
|
||||||
|
def is_plural_noun(token):
|
||||||
|
return token.tag == NNS or token.tag == NNPS
|
||||||
|
|
||||||
|
def print_coarse_pos(token):
|
||||||
|
print(token.pos_)
|
||||||
|
|
||||||
|
def print_fine_pos(token):
|
||||||
|
print(token.tag_)
|
||||||
|
|
||||||
|
|
||||||
|
def test_syntactic_dependencies():
|
||||||
|
def dependency_labels_to_root(token):
|
||||||
|
'''Walk up the syntactic tree, collecting the arc labels.'''
|
||||||
|
dep_labels = []
|
||||||
|
while token.head is not token:
|
||||||
|
dep_labels.append(token.dep)
|
||||||
|
token = token.head
|
||||||
|
return dep_labels
|
||||||
|
|
||||||
|
|
||||||
|
def test_named_entities():
|
||||||
|
def iter_products(docs):
|
||||||
|
for doc in docs:
|
||||||
|
for ent in doc.ents:
|
||||||
|
if ent.label_ == 'PRODUCT':
|
||||||
|
yield ent
|
||||||
|
|
||||||
|
def word_is_in_entity(word):
|
||||||
|
return word.ent_type != 0
|
||||||
|
|
||||||
|
def count_parent_verb_by_person(docs):
|
||||||
|
counts = defaultdict(defaultdict(int))
|
||||||
|
for doc in docs:
|
||||||
|
for ent in doc.ents:
|
||||||
|
if ent.label_ == 'PERSON' and ent.root.head.pos == VERB:
|
||||||
|
counts[ent.orth_][ent.root.head.lemma_] += 1
|
||||||
|
return counts
|
||||||
|
|
||||||
|
|
||||||
|
def test_calculate_inline_mark_up_on_original_string():
|
||||||
|
def put_spans_around_tokens(doc, get_classes):
|
||||||
|
'''Given some function to compute class names, put each token in a
|
||||||
|
span element, with the appropriate classes computed.
|
||||||
|
|
||||||
|
All whitespace is preserved, outside of the spans. (Yes, I know HTML
|
||||||
|
won't display it. But the point is no information is lost, so you can
|
||||||
|
calculate what you need, e.g. <br /> tags, <p> tags, etc.)
|
||||||
|
'''
|
||||||
|
output = []
|
||||||
|
template = '<span classes="{classes}">{word}</span>{space}'
|
||||||
|
for token in doc:
|
||||||
|
if token.is_space:
|
||||||
|
output.append(token.orth_)
|
||||||
|
else:
|
||||||
|
output.append(
|
||||||
|
template.format(
|
||||||
|
classes=' '.join(get_classes(token)),
|
||||||
|
word=token.orth_,
|
||||||
|
space=token.whitespace_))
|
||||||
|
string = ''.join(output)
|
||||||
|
string = string.replace('\n', '')
|
||||||
|
string = string.replace('\t', ' ')
|
||||||
|
return string
|
||||||
|
|
||||||
|
|
||||||
|
def test_efficient_binary_serialization(doc):
|
||||||
|
byte_string = doc.as_bytes()
|
||||||
|
open('/tmp/moby_dick.bin', 'wb').write(byte_string)
|
||||||
|
|
||||||
|
nlp = spacy.en.English()
|
||||||
|
for byte_string in Doc.read(open('/tmp/moby_dick.bin', 'rb')):
|
||||||
|
doc = Doc(nlp.vocab)
|
||||||
|
doc.from_bytes(byte_string)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user