mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-25 17:36:30 +03:00
* More docs work
This commit is contained in:
parent
d5403a6fe3
commit
d10993f41a
|
@ -22,6 +22,7 @@ cdef struct Lexeme:
|
||||||
StringHash* string_views
|
StringHash* string_views
|
||||||
|
|
||||||
|
|
||||||
|
cpdef StringHash lex_of(LexID lex_id) except 0
|
||||||
cpdef char first_of(LexID lex_id) except 0
|
cpdef char first_of(LexID lex_id) except 0
|
||||||
cpdef size_t length_of(LexID lex_id) except 0
|
cpdef size_t length_of(LexID lex_id) except 0
|
||||||
cpdef double prob_of(LexID lex_id) except 0
|
cpdef double prob_of(LexID lex_id) except 0
|
||||||
|
|
|
@ -29,26 +29,21 @@ cpdef StringHash view_of(LexID lex_id, size_t view) except 0:
|
||||||
return (<Lexeme*>lex_id).string_views[view]
|
return (<Lexeme*>lex_id).string_views[view]
|
||||||
|
|
||||||
|
|
||||||
cpdef StringHash lex_of(size_t lex_id) except 0:
|
cpdef StringHash lex_of(LexID lex_id) except 0:
|
||||||
'''Access the `lex' field of the Lexeme pointed to by lex_id.
|
'''Access a hash of the word's string.
|
||||||
|
|
||||||
The lex field is the hash of the string you would expect to get back from
|
>>> lex_of(lookup(u'Hi')) == hash(u'Hi')
|
||||||
a standard tokenizer, i.e. the word with punctuation and other non-whitespace
|
True
|
||||||
delimited tokens split off. The other fields refer to properties of the
|
|
||||||
string that the lex field stores a hash of, except sic and tail.
|
|
||||||
|
|
||||||
>>> from spacy import en
|
|
||||||
>>> [en.unhash(lex_of(lex_id) for lex_id in en.tokenize(u'Hi! world')]
|
|
||||||
[u'Hi', u'!', u'world']
|
|
||||||
'''
|
'''
|
||||||
return (<Lexeme*>lex_id).lex
|
return (<Lexeme*>lex_id).lex
|
||||||
|
|
||||||
|
|
||||||
cpdef ClusterID cluster_of(LexID lex_id) except 0:
|
cpdef ClusterID cluster_of(LexID lex_id) except 0:
|
||||||
'''Access the `cluster' field of the Lexeme pointed to by lex_id, which
|
'''Access an integer representation of the word's Brown cluster.
|
||||||
gives an integer representation of the cluster ID of the word,
|
|
||||||
which should be understood as a binary address:
|
|
||||||
|
|
||||||
|
A Brown cluster is an address into a binary tree, which gives some (noisy)
|
||||||
|
information about the word's distributional context.
|
||||||
|
|
||||||
>>> strings = (u'pineapple', u'apple', u'dapple', u'scalable')
|
>>> strings = (u'pineapple', u'apple', u'dapple', u'scalable')
|
||||||
>>> token_ids = [lookup(s) for s in strings]
|
>>> token_ids = [lookup(s) for s in strings]
|
||||||
>>> clusters = [cluster_of(t) for t in token_ids]
|
>>> clusters = [cluster_of(t) for t in token_ids]
|
||||||
|
@ -64,29 +59,28 @@ cpdef ClusterID cluster_of(LexID lex_id) except 0:
|
||||||
|
|
||||||
|
|
||||||
cpdef char first_of(size_t lex_id) except 0:
|
cpdef char first_of(size_t lex_id) except 0:
|
||||||
'''Access the `first' field of the Lexeme pointed to by lex_id, which
|
'''Access the first byte of a utf8 encoding of the word.
|
||||||
stores the first character of the lex string of the word.
|
|
||||||
|
|
||||||
>>> lex_id = lookup(u'Hello')
|
>>> lex_id = lookup(u'Hello')
|
||||||
>>> unhash(first_of(lex_id))
|
>>> chr(first_of(lex_id))
|
||||||
u'H'
|
'H'
|
||||||
'''
|
'''
|
||||||
return (<Lexeme*>lex_id).string[0]
|
return (<Lexeme*>lex_id).string[0]
|
||||||
|
|
||||||
|
|
||||||
cpdef size_t length_of(size_t lex_id) except 0:
|
cpdef size_t length_of(size_t lex_id) except 0:
|
||||||
'''Access the `length' field of the Lexeme pointed to by lex_id, which stores
|
'''Access the (unicode) length of the word.
|
||||||
the length of the string hashed by lex_of.'''
|
'''
|
||||||
cdef Lexeme* word = <Lexeme*>lex_id
|
cdef Lexeme* word = <Lexeme*>lex_id
|
||||||
return word.length
|
return word.length
|
||||||
|
|
||||||
|
|
||||||
cpdef double prob_of(size_t lex_id) except 0:
|
cpdef double prob_of(size_t lex_id) except 0:
|
||||||
'''Access the `prob' field of the Lexeme pointed to by lex_id, which stores
|
'''Access an estimate of the word's unigram log probability.
|
||||||
the smoothed unigram log probability of the word, as estimated from a large
|
|
||||||
text corpus. By default, probabilities are based on counts from Gigaword,
|
Probabilities are calculated from a large text corpus, and smoothed using
|
||||||
smoothed using Knesser-Ney; but any probabilities file can be supplied to
|
simple Good-Turing. Estimates are read from data/en/probabilities, and
|
||||||
load_probs.
|
can be replaced using spacy.en.load_probabilities.
|
||||||
|
|
||||||
>>> prob_of(lookup(u'world'))
|
>>> prob_of(lookup(u'world'))
|
||||||
-20.10340371976182
|
-20.10340371976182
|
||||||
|
@ -97,31 +91,39 @@ DEF OFT_UPPER = 1
|
||||||
DEF OFT_TITLE = 2
|
DEF OFT_TITLE = 2
|
||||||
|
|
||||||
cpdef bint is_oft_upper(size_t lex_id):
|
cpdef bint is_oft_upper(size_t lex_id):
|
||||||
'''Access the `oft_upper' field of the Lexeme pointed to by lex_id, which
|
'''Check the OFT_UPPER distributional flag for the word.
|
||||||
stores whether the lowered version of the string hashed by `lex' is found
|
|
||||||
in all-upper case frequently in a large sample of text. Users are free
|
The OFT_UPPER flag records whether a lower-cased version of the word
|
||||||
to load different data, by default we use a sample from Wikipedia, with
|
is found in all-upper case frequently in a large sample of text, where
|
||||||
a threshold of 0.95, picked to maximize mutual information for POS tagging.
|
"frequently" is defined as P >= 0.95 (chosen for high mutual information for
|
||||||
|
POS tagging).
|
||||||
>>> is_oft_upper(lookup(u'abc'))
|
|
||||||
True
|
Case statistics are estimated from a large text corpus. Estimates are read
|
||||||
>>> is_oft_upper(lookup(u'aBc')) # This must get the same answer
|
from data/en/case_stats, and can be replaced using spacy.en.load_case_stats.
|
||||||
|
|
||||||
|
>>> is_oft_upper(lookup(u'nato'))
|
||||||
True
|
True
|
||||||
|
>>> is_oft_upper(lookup(u'the'))
|
||||||
|
False
|
||||||
'''
|
'''
|
||||||
return (<Lexeme*>lex_id).dist_flags & (1 << OFT_UPPER)
|
return (<Lexeme*>lex_id).dist_flags & (1 << OFT_UPPER)
|
||||||
|
|
||||||
|
|
||||||
cpdef bint is_oft_title(size_t lex_id):
|
cpdef bint is_oft_title(size_t lex_id):
|
||||||
'''Access the `oft_upper' field of the Lexeme pointed to by lex_id, which
|
'''Check the OFT_TITLE distributional flag for the word.
|
||||||
stores whether the lowered version of the string hashed by `lex' is found
|
|
||||||
title-cased frequently in a large sample of text. Users are free
|
The OFT_TITLE flag records whether a lower-cased version of the word
|
||||||
to load different data, by default we use a sample from Wikipedia, with
|
is found title-cased (see string.istitle) frequently in a large sample of text,
|
||||||
a threshold of 0.3, picked to maximize mutual information for POS tagging.
|
where "frequently" is defined as P >= 0.3 (chosen for high mutual information for
|
||||||
|
POS tagging).
|
||||||
>>> is_oft_title(lookup(u'marcus'))
|
|
||||||
True
|
Case statistics are estimated from a large text corpus. Estimates are read
|
||||||
>>> is_oft_title(lookup(u'MARCUS')) # This must get the same value
|
from data/en/case_stats, and can be replaced using spacy.en.load_case_stats.
|
||||||
|
|
||||||
|
>>> is_oft_upper(lookup(u'john'))
|
||||||
True
|
True
|
||||||
|
>>> is_oft_upper(lookup(u'Bill'))
|
||||||
|
False
|
||||||
'''
|
'''
|
||||||
return (<Lexeme*>lex_id).dist_flags & (1 << OFT_TITLE)
|
return (<Lexeme*>lex_id).dist_flags & (1 << OFT_TITLE)
|
||||||
|
|
||||||
|
|
|
@ -141,7 +141,11 @@ cpdef bint is_ascii(LexID lex_id) except *:
|
||||||
|
|
||||||
|
|
||||||
cpdef StringHash norm_of(LexID lex_id) except 0:
|
cpdef StringHash norm_of(LexID lex_id) except 0:
|
||||||
"""Return the hash of a normalized version of the string.
|
"""Return the hash of a "normalized" version of the string.
|
||||||
|
|
||||||
|
Normalized strings are intended to be less sparse, while still capturing
|
||||||
|
important lexical information. See spacy.latin.orthography.normalize_string for details of the normalization
|
||||||
|
function.
|
||||||
|
|
||||||
>>> unhash(norm_of(lookupu'Hi'))
|
>>> unhash(norm_of(lookupu'Hi'))
|
||||||
u'hi'
|
u'hi'
|
||||||
|
@ -154,7 +158,11 @@ cpdef StringHash norm_of(LexID lex_id) except 0:
|
||||||
|
|
||||||
|
|
||||||
cpdef StringHash shape_of(LexID lex_id) except 0:
|
cpdef StringHash shape_of(LexID lex_id) except 0:
|
||||||
"""Return the hash of the string shape.
|
"""Return the hash of a string describing the word's "orthograpgic shape".
|
||||||
|
|
||||||
|
Orthographic shapes are calculated by the spacy.orthography.latin.string_shape
|
||||||
|
function. Word shape features have been found useful for NER and POS tagging,
|
||||||
|
e.g. Manning (2011)
|
||||||
|
|
||||||
>>> unhash(shape_of(lookupu'Hi'))
|
>>> unhash(shape_of(lookupu'Hi'))
|
||||||
u'Xx'
|
u'Xx'
|
||||||
|
@ -168,8 +176,8 @@ cpdef StringHash shape_of(LexID lex_id) except 0:
|
||||||
|
|
||||||
|
|
||||||
cpdef StringHash last3_of(LexID lex_id) except 0:
|
cpdef StringHash last3_of(LexID lex_id) except 0:
|
||||||
'''Access the `last3' field of the Lexeme pointed to by lex_id, which stores
|
'''Return the hash of string[-3:], i.e. the last three characters of the word.
|
||||||
the hash of the last three characters of the word:
|
|
||||||
>>> lex_ids = [lookup(w) for w in (u'Hello', u'!')]
|
>>> lex_ids = [lookup(w) for w in (u'Hello', u'!')]
|
||||||
>>> [unhash(last3_of(lex_id)) for lex_id in lex_ids]
|
>>> [unhash(last3_of(lex_id)) for lex_id in lex_ids]
|
||||||
[u'llo', u'!']
|
[u'llo', u'!']
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
import os
|
import os
|
||||||
from os import path
|
from os import path
|
||||||
import codecs
|
import codecs
|
||||||
|
import json
|
||||||
|
|
||||||
DATA_DIR = path.join(path.dirname(__file__), '..', 'data')
|
DATA_DIR = path.join(path.dirname(__file__), '..', 'data')
|
||||||
|
|
||||||
|
@ -19,9 +20,13 @@ def load_case_stats(data_dir):
|
||||||
return case_stats
|
return case_stats
|
||||||
|
|
||||||
|
|
||||||
def load_dist_info(lang):
|
def read_dist_info(lang):
|
||||||
with path.join(DATA_DIR, lang, 'distribution_info.json') as file_:
|
dist_path = path.join(DATA_DIR, lang, 'distribution_info.json')
|
||||||
dist_info = json.load(file_)
|
if path.exists(dist_path):
|
||||||
|
with open(dist_path) as file_:
|
||||||
|
dist_info = json.load(file_)
|
||||||
|
else:
|
||||||
|
dist_info = {}
|
||||||
return dist_info
|
return dist_info
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user