add doctests for website 'api'-section (merge)

This commit is contained in:
Henning Peters 2015-09-28 14:35:11 +02:00
commit 5dfd2df686
4 changed files with 14 additions and 10 deletions

View File

@ -1,5 +1,6 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import pytest import pytest
import spacy.en
@pytest.fixture() @pytest.fixture()
@ -13,6 +14,7 @@ def test_load_resources_and_process_text():
doc = nlp('Hello, world. Here are two sentences.') doc = nlp('Hello, world. Here are two sentences.')
@pytest.mark.models
def test_get_tokens_and_sentences(doc): def test_get_tokens_and_sentences(doc):
token = doc[0] token = doc[0]
sentence = doc.sents.next() sentence = doc.sents.next()
@ -20,12 +22,11 @@ def test_get_tokens_and_sentences(doc):
assert sentence.text == 'Hello, world.' assert sentence.text == 'Hello, world.'
@pytest.mark.xfail
def test_use_integer_ids_for_any_strings(nlp, token): def test_use_integer_ids_for_any_strings(nlp, token):
hello_id = nlp.vocab.strings['Hello'] hello_id = nlp.vocab.strings['Hello']
hello_str = nlp.vocab.strings[hello_id] hello_str = nlp.vocab.strings[hello_id]
assert token.orth == hello_id == 469755 assert token.orth == hello_id == 3404
assert token.orth_ == hello_str == 'Hello' assert token.orth_ == hello_str == 'Hello'
@ -55,19 +56,18 @@ def test_export_to_numpy_arrays(nlp, doc):
assert list(doc_array[:, 1]) == [t.like_url for t in doc] assert list(doc_array[:, 1]) == [t.like_url for t in doc]
@pytest.mark.xfail @pytest.mark.models
def test_word_vectors(nlp): def test_word_vectors(nlp):
doc = nlp("Apples and oranges are similar. Boots and hippos aren't.") doc = nlp("Apples and oranges are similar. Boots and hippos aren't.")
apples = doc[0] apples = doc[0]
oranges = doc[1] oranges = doc[2]
boots = doc[6] boots = doc[6]
hippos = doc[8] hippos = doc[8]
assert apples.similarity(oranges) > boots.similarity(hippos) assert apples.similarity(oranges) > boots.similarity(hippos)
@pytest.mark.xfail
def test_part_of_speech_tags(nlp): def test_part_of_speech_tags(nlp):
from spacy.parts_of_speech import ADV from spacy.parts_of_speech import ADV
@ -88,6 +88,7 @@ def test_part_of_speech_tags(nlp):
print(token.tag_) print(token.tag_)
@pytest.mark.models
def test_syntactic_dependencies(): def test_syntactic_dependencies():
def dependency_labels_to_root(token): def dependency_labels_to_root(token):
'''Walk up the syntactic tree, collecting the arc labels.''' '''Walk up the syntactic tree, collecting the arc labels.'''
@ -98,6 +99,7 @@ def test_syntactic_dependencies():
return dep_labels return dep_labels
@pytest.mark.models
def test_named_entities(): def test_named_entities():
def iter_products(docs): def iter_products(docs):
for doc in docs: for doc in docs:
@ -143,12 +145,14 @@ def test_calculate_inline_mark_up_on_original_string():
return string return string
@pytest.mark.xfail @pytest.mark.models
def test_efficient_binary_serialization(doc): def test_efficient_binary_serialization(doc):
byte_string = doc.as_bytes() from spacy.tokens.doc import Doc
byte_string = doc.to_bytes()
open('/tmp/moby_dick.bin', 'wb').write(byte_string) open('/tmp/moby_dick.bin', 'wb').write(byte_string)
nlp = spacy.en.English() nlp = spacy.en.English()
for byte_string in Doc.read(open('/tmp/moby_dick.bin', 'rb')): for byte_string in Doc.read_bytes(open('/tmp/moby_dick.bin', 'rb')):
doc = Doc(nlp.vocab) doc = Doc(nlp.vocab)
doc.from_bytes(byte_string) doc.from_bytes(byte_string)

View File

@ -1,8 +1,8 @@
all: src/code site all: src/code site
src/code: tests/test_*.py src/code:
mkdir -p src/code/ mkdir -p src/code/
./create_code_samples tests/ src/code/ ./create_code_samples ../tests/website/ src/code/
site: site/index.html site/blog/ site/docs/ site/license/ site/blog/introducing-spacy/ site/blog/parsing-english-in-python/ site/blog/part-of-speech-POS-tagger-in-python/ site/tutorials/twitter-filter/ site/tutorials/syntax-search/ site/tutorials/mark-adverbs/ site/blog/writing-c-in-cython/ site/blog/how-spacy-works/ site: site/index.html site/blog/ site/docs/ site/license/ site/blog/introducing-spacy/ site/blog/parsing-english-in-python/ site/blog/part-of-speech-POS-tagger-in-python/ site/tutorials/twitter-filter/ site/tutorials/syntax-search/ site/tutorials/mark-adverbs/ site/blog/writing-c-in-cython/ site/blog/how-spacy-works/