mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-27 18:36:36 +03:00
Fix error messages if model is required (resolves #1051)
Rename about.__docs__ to about.__docs_models__.
This commit is contained in:
parent
24e973b17f
commit
9003fd25e5
|
@ -24,6 +24,7 @@ from .attrs cimport IS_QUOTE
|
||||||
from .attrs cimport IS_LEFT_PUNCT
|
from .attrs cimport IS_LEFT_PUNCT
|
||||||
from .attrs cimport IS_RIGHT_PUNCT
|
from .attrs cimport IS_RIGHT_PUNCT
|
||||||
from .attrs cimport IS_OOV
|
from .attrs cimport IS_OOV
|
||||||
|
from . import about
|
||||||
|
|
||||||
|
|
||||||
memset(&EMPTY_LEXEME, 0, sizeof(LexemeC))
|
memset(&EMPTY_LEXEME, 0, sizeof(LexemeC))
|
||||||
|
@ -137,11 +138,10 @@ cdef class Lexeme:
|
||||||
cdef int length = self.vocab.vectors_length
|
cdef int length = self.vocab.vectors_length
|
||||||
if length == 0:
|
if length == 0:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Word vectors set to length 0. This may be because the "
|
"Word vectors set to length 0. This may be because you "
|
||||||
"data is not installed. If you haven't already, run"
|
"don't have a model installed or loaded, or because your "
|
||||||
"\npython -m spacy download %s\n"
|
"model doesn't include word vectors. For more info, see "
|
||||||
"to install the data." % self.vocab.lang
|
"the documentation: \n%s\n" % about.__docs_models__)
|
||||||
)
|
|
||||||
|
|
||||||
vector_view = <float[:length,]>self.c.vector
|
vector_view = <float[:length,]>self.c.vector
|
||||||
return numpy.asarray(vector_view)
|
return numpy.asarray(vector_view)
|
||||||
|
|
|
@ -29,6 +29,7 @@ from ..serialize.bits cimport BitArray
|
||||||
from ..util import normalize_slice
|
from ..util import normalize_slice
|
||||||
from ..syntax.iterators import CHUNKERS
|
from ..syntax.iterators import CHUNKERS
|
||||||
from ..compat import is_config
|
from ..compat import is_config
|
||||||
|
from .. import about
|
||||||
|
|
||||||
|
|
||||||
DEF PADDING = 5
|
DEF PADDING = 5
|
||||||
|
@ -403,9 +404,8 @@ cdef class Doc:
|
||||||
if not self.is_parsed:
|
if not self.is_parsed:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"noun_chunks requires the dependency parse, which "
|
"noun_chunks requires the dependency parse, which "
|
||||||
"requires data to be installed. If you haven't done so, run: "
|
"requires data to be installed. For more info, see the "
|
||||||
"\npython -m spacy download %s\n"
|
"documentation: \n%s\n" % about.__docs_models__)
|
||||||
"to install the data" % self.vocab.lang)
|
|
||||||
# Accumulate the result before beginning to iterate over it. This prevents
|
# Accumulate the result before beginning to iterate over it. This prevents
|
||||||
# the tokenisation from being changed out from under us during the iteration.
|
# the tokenisation from being changed out from under us during the iteration.
|
||||||
# The tricky thing here is that Span accepts its tokenisation changing,
|
# The tricky thing here is that Span accepts its tokenisation changing,
|
||||||
|
@ -435,10 +435,9 @@ cdef class Doc:
|
||||||
|
|
||||||
if not self.is_parsed:
|
if not self.is_parsed:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"sentence boundary detection requires the dependency parse, which "
|
"Sentence boundary detection requires the dependency parse, which "
|
||||||
"requires data to be installed. If you haven't done so, run: "
|
"requires data to be installed. For more info, see the "
|
||||||
"\npython -m spacy download %s\n"
|
"documentation: \n%s\n" % about.__docs_models__)
|
||||||
"to install the data" % self.vocab.lang)
|
|
||||||
cdef int i
|
cdef int i
|
||||||
start = 0
|
start = 0
|
||||||
for i in range(1, self.length):
|
for i in range(1, self.length):
|
||||||
|
|
|
@ -16,6 +16,7 @@ from ..util import normalize_slice
|
||||||
from ..attrs cimport IS_PUNCT, IS_SPACE
|
from ..attrs cimport IS_PUNCT, IS_SPACE
|
||||||
from ..lexeme cimport Lexeme
|
from ..lexeme cimport Lexeme
|
||||||
from ..compat import is_config
|
from ..compat import is_config
|
||||||
|
from .. import about
|
||||||
|
|
||||||
|
|
||||||
cdef class Span:
|
cdef class Span:
|
||||||
|
@ -221,9 +222,8 @@ cdef class Span:
|
||||||
if not self.doc.is_parsed:
|
if not self.doc.is_parsed:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"noun_chunks requires the dependency parse, which "
|
"noun_chunks requires the dependency parse, which "
|
||||||
"requires data to be installed. If you haven't done so, run: "
|
"requires data to be installed. For more info, see the "
|
||||||
"\npython -m spacy download %s\n"
|
"documentation: \n%s\n" % about.__docs_models__)
|
||||||
"to install the data" % self.vocab.lang)
|
|
||||||
# Accumulate the result before beginning to iterate over it. This prevents
|
# Accumulate the result before beginning to iterate over it. This prevents
|
||||||
# the tokenisation from being changed out from under us during the iteration.
|
# the tokenisation from being changed out from under us during the iteration.
|
||||||
# The tricky thing here is that Span accepts its tokenisation changing,
|
# The tricky thing here is that Span accepts its tokenisation changing,
|
||||||
|
|
|
@ -26,6 +26,7 @@ from ..attrs cimport IS_TITLE, IS_UPPER, LIKE_URL, LIKE_NUM, LIKE_EMAIL, IS_STOP
|
||||||
from ..attrs cimport IS_OOV
|
from ..attrs cimport IS_OOV
|
||||||
from ..lexeme cimport Lexeme
|
from ..lexeme cimport Lexeme
|
||||||
from ..compat import is_config
|
from ..compat import is_config
|
||||||
|
from .. import about
|
||||||
|
|
||||||
|
|
||||||
cdef class Token:
|
cdef class Token:
|
||||||
|
@ -237,11 +238,10 @@ cdef class Token:
|
||||||
cdef int length = self.vocab.vectors_length
|
cdef int length = self.vocab.vectors_length
|
||||||
if length == 0:
|
if length == 0:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Word vectors set to length 0. This may be because the "
|
"Word vectors set to length 0. This may be because you "
|
||||||
"data is not installed. If you haven't already, run"
|
"don't have a model installed or loaded, or because your "
|
||||||
"\npython -m spacy download %s\n"
|
"model doesn't include word vectors. For more info, see "
|
||||||
"to install the data." % self.vocab.lang
|
"the documentation: \n%s\n" % about.__docs_models__)
|
||||||
)
|
|
||||||
vector_view = <float[:length,]>self.c.lex.vector
|
vector_view = <float[:length,]>self.c.lex.vector
|
||||||
return numpy.asarray(vector_view)
|
return numpy.asarray(vector_view)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user