Remove trailing whitespace

This commit is contained in:
Jordan Suchow 2015-04-19 01:31:31 -07:00
parent 5f0f940a1f
commit 3a8d9b37a6
67 changed files with 124 additions and 169 deletions

View File

@ -35,4 +35,3 @@ Difficult to support:
* PyPy 2.7
* PyPy 3.4

View File

@ -30,5 +30,3 @@ def main(text_loc):
if __name__ == '__main__':
plac.call(main)

View File

@ -11,5 +11,3 @@ The CLA must be signed on your first pull request. To do this, simply fill in th
$ git add -A spaCy/contributors/<your GitHub username>.md
Now finish your pull request, and you're done.

View File

@ -75,4 +75,3 @@ Boolean features
+-------------+--------------------------------------------------------------+
| IN_LIST | Facility for loading arbitrary run-time word lists? |
+-------------+--------------------------------------------------------------+

View File

@ -68,4 +68,3 @@ Cons:
- Higher memory usage (up to 1gb)
- More conceptually complicated
- Tokenization rules expressed in code, not as data

View File

@ -260,5 +260,3 @@ these models is really all about the data structures. We want to stay small,
and stay contiguous. Minimize redundancy and minimize pointer chasing.
That's why Cython is so well suited to this: we get to lay out our data
structures, and manage the memory ourselves, with full C-level control.

View File

@ -278,6 +278,3 @@ sentence represents the document as a whole.
Document Model
--------------

View File

@ -234,4 +234,3 @@ Features
+---------+-----------------------------------------------------------+
| prob | Log probability of word, smoothed with Simple Good-Turing |
+---------+-----------------------------------------------------------+

View File

@ -79,5 +79,3 @@ cpdef enum attr_id_t:
POS
TAG
DEP

View File

@ -22,4 +22,3 @@ cdef class EnPosTagger:
cdef int set_morph(self, const int i, const PosTag* tag, TokenC* tokens) except -1
cdef int lemmatize(self, const univ_pos_t pos, const LexemeC* lex) except -1

View File

@ -381,4 +381,3 @@ cdef inline void _fill_from_token(atom_t* context, const TokenC* t) nogil:
context[7] = 4
else:
context[7] = 0

View File

@ -149,5 +149,3 @@ cpdef enum:
cdef int fill_context(atom_t* context, State* s, Tokens tokens) except -1

View File

@ -15,4 +15,3 @@ cdef class Span:
cdef public Span head
cdef public list rights
cdef public list lefts

View File

@ -280,5 +280,3 @@ class OracleError(Exception):
class UnknownMove(Exception):
pass

View File

@ -13,5 +13,3 @@ class Config(object):
@classmethod
def read(cls, model_dir, name):
return cls(**json.load(open(path.join(model_dir, '%s.json' % name))))

View File

@ -608,4 +608,3 @@ _parse_unset_error = """Text has not been parsed, so cannot be accessed.
Check that the parser data is installed. Run "python -m spacy.en.download" if not.
Check whether parse=False in the call to English.__call__
"""

View File

@ -94,5 +94,3 @@ ctypedef uint64_t flags_t
ctypedef uint32_t id_t
ctypedef uint16_t len_t
ctypedef uint16_t tag_t

View File

@ -33,4 +33,3 @@ cdef class Vocab:
cdef int _add_lex_to_vocab(self, hash_t key, const LexemeC* lex) except -1
cdef PreshMap _map

View File

@ -30,6 +30,3 @@ def test_align_continue():
assert aligned[2] == ('re-align', [(5, 7), (7, 8), (8, 13)])
assert aligned[3] == ('and', [(13, 16)])
assert aligned[4] == ('continue', [(16, 24)])

View File

@ -37,5 +37,3 @@ def test_dep():
assert feats_array[1][1] == tokens[1].dep
assert feats_array[2][1] == tokens[2].dep
assert feats_array[3][1] == tokens[3].dep

View File

@ -35,4 +35,3 @@ def test_merge_heads():
def test_issue_54():
text = u'Talks given by women had a slightly higher number of questions asked (3.2$\pm$0.2) than talks given by men (2.6$\pm$0.1).'
tokens = NLU(text, merge_mwes=True)

View File

@ -33,4 +33,3 @@ def test_word():
def test_not_number():
assert not like_number('dog')
assert not like_number(',')

View File

@ -58,4 +58,3 @@ def test_child_consistency(nlp, sun_text):
assert not children
for head_index, children in rights.items():
assert not children

View File

@ -49,4 +49,3 @@ def test_three_same_close(close_puncts, EN):
def test_double_end_quote(EN):
assert len(EN("Hello''")) == 2
assert len(EN("''")) == 1

View File

@ -16,6 +16,3 @@ def test_one(EN):
assert tokens[0].orth_ == 'Betty'
tokens2 = EN('Betty also bought a pound of butter.')
assert tokens2[0].orth_ == 'Betty'

View File

@ -16,4 +16,3 @@ def test_subtrees():
assert len(list(bus.children)) == 1
assert len(list(wheels.subtree)) == 6

View File

@ -35,5 +35,3 @@ def test_single_token_string():
nlp = English()
tokens = nlp(u'foobar')
assert tokens[0].string == 'foobar'

View File

@ -39,5 +39,3 @@ def test_newline_double_space(EN):
def test_newline_space_wrap(EN):
tokens = EN('hello \n possums')
assert len(tokens) == 3