spaCy/spacy/en/__init__.py

175 lines
5.6 KiB
Python
Raw Normal View History

2014-12-21 23:25:43 +03:00
from __future__ import unicode_literals
from os import path
import re
import struct
import json
2014-12-21 23:25:43 +03:00
from .. import orth
2014-12-21 23:25:43 +03:00
from ..vocab import Vocab
from ..tokenizer import Tokenizer
from ..syntax.arc_eager import ArcEager
2015-03-09 02:04:00 +03:00
from ..syntax.ner import BiluoPushDown
from ..syntax.parser import ParserFactory
from ..serialize.bits import BitArray
2015-07-08 19:53:00 +03:00
from ..tokens import Doc
from ..multi_words import RegexMerger
2014-12-21 23:25:43 +03:00
from .pos import EnPosTagger
from .pos import POS_TAGS
2014-12-21 23:25:43 +03:00
from .attrs import get_flags
from . import regexes
2014-12-21 23:25:43 +03:00
from ..util import read_lang_data
from ..attrs import TAG, HEAD, DEP, ENT_TYPE, ENT_IOB
def get_lex_props(string, oov_prob=-30):
return {
'flags': get_flags(string),
'length': len(string),
2015-01-22 18:08:25 +03:00
'orth': string,
'lower': string.lower(),
'norm': string,
'shape': orth.word_shape(string),
'prefix': string[0],
'suffix': string[-3:],
'cluster': 0,
'prob': oov_prob,
'sentiment': 0
}
if_model_present = -1
2015-07-08 20:34:55 +03:00
LOCAL_DATA_DIR = path.join(path.dirname(__file__), 'data')
2015-01-26 18:45:21 +03:00
2014-12-21 23:25:43 +03:00
class English(object):
2014-12-27 10:45:16 +03:00
"""The English NLP pipeline.
2015-07-07 15:00:07 +03:00
Example:
Load data from default directory:
>>> nlp = English()
>>> nlp = English(data_dir=u'')
Load data from specified directory:
>>> nlp = English(data_dir=u'path/to/data_directory')
Disable (and avoid loading) parts of the processing pipeline:
>>> nlp = English(vectors=False, parser=False, tagger=False, entity=False)
Start with nothing loaded:
>>> nlp = English(data_dir=None)
2014-12-27 10:45:16 +03:00
"""
ParserTransitionSystem = ArcEager
2015-03-09 02:04:00 +03:00
EntityTransitionSystem = BiluoPushDown
def __init__(self,
2015-07-08 20:35:30 +03:00
data_dir=LOCAL_DATA_DIR,
Tokenizer=Tokenizer.from_dir,
Tagger=EnPosTagger,
Parser=ParserFactory(ParserTransitionSystem),
Entity=ParserFactory(EntityTransitionSystem),
Packer=None,
load_vectors=True
):
self.data_dir = data_dir
2015-07-26 17:36:38 +03:00
if path.exists(path.join(data_dir, 'vocab', 'oov_prob')):
oov_prob = float(open(path.join(data_dir, 'vocab', 'oov_prob')).read())
else:
oov_prob = None
self.vocab = Vocab(data_dir=path.join(data_dir, 'vocab') if data_dir else None,
get_lex_props=get_lex_props, load_vectors=load_vectors,
pos_tags=POS_TAGS,
2015-07-26 17:36:38 +03:00
oov_prob=oov_prob)
2015-07-07 15:00:07 +03:00
if Tagger is True:
Tagger = EnPosTagger
2015-07-07 15:00:07 +03:00
if Parser is True:
transition_system = self.ParserTransitionSystem
Parser = lambda s, d: parser.Parser(s, d, transition_system)
2015-07-07 15:00:07 +03:00
if Entity is True:
transition_system = self.EntityTransitionSystem
Entity = lambda s, d: parser.Parser(s, d, transition_system)
self.tokenizer = Tokenizer(self.vocab, path.join(data_dir, 'tokenizer'))
if Tagger and path.exists(path.join(data_dir, 'pos')):
self.tagger = Tagger(self.vocab.strings, data_dir)
else:
self.tagger = None
if Parser and path.exists(path.join(data_dir, 'deps')):
self.parser = Parser(self.vocab.strings, path.join(data_dir, 'deps'))
else:
self.parser = None
if Entity and path.exists(path.join(data_dir, 'ner')):
self.entity = Entity(self.vocab.strings, path.join(data_dir, 'ner'))
else:
self.entity = None
if Packer:
self.packer = Packer(self.vocab, data_dir)
else:
self.packer = None
self.mwe_merger = RegexMerger([
('IN', 'O', regexes.MW_PREPOSITIONS_RE),
('CD', 'TIME', regexes.TIME_RE),
('NNP', 'DATE', regexes.DAYS_RE),
('CD', 'MONEY', regexes.MONEY_RE)])
2015-03-09 02:04:00 +03:00
2015-07-08 20:34:55 +03:00
def __call__(self, text, tag=True, parse=True, entity=True, merge_mwes=False):
2015-01-26 18:45:21 +03:00
"""Apply the pipeline to some text. The text can span multiple sentences,
and can contain arbtrary whitespace. Alignment into the original string
is preserved.
2014-12-27 10:45:16 +03:00
Args:
text (unicode): The text to be processed.
Returns:
2015-07-08 19:56:27 +03:00
tokens (spacy.tokens.Doc):
2015-01-26 18:45:21 +03:00
>>> from spacy.en import English
>>> nlp = English()
>>> tokens = nlp('An example sentence. Another example sentence.')
>>> tokens[0].orth_, tokens[0].head.tag_
('An', 'NN')
2014-12-27 10:45:16 +03:00
"""
tokens = self.tokenizer(text)
if self.tagger and tag:
self.tagger(tokens)
if self.parser and parse:
self.parser(tokens)
if self.entity and entity:
2015-03-09 02:04:00 +03:00
self.entity(tokens)
2015-07-08 20:34:55 +03:00
if merge_mwes and self.mwe_merger is not None:
self.mwe_merger(tokens)
2014-12-21 23:25:43 +03:00
return tokens
2014-12-24 09:42:00 +03:00
def end_training(self, data_dir=None):
if data_dir is None:
data_dir = self.data_dir
self.parser.model.end_training()
self.entity.model.end_training()
self.tagger.model.end_training()
self.vocab.strings.dump(path.join(data_dir, 'vocab', 'strings.txt'))
with open(path.join(data_dir, 'vocab', 'serializer.json'), 'w') as file_:
file_.write(
json.dumps([
(TAG, self.tagger.freqs[TAG].items()),
(DEP, self.parser.moves.freqs[DEP].items()),
(ENT_IOB, self.entity.moves.freqs[ENT_IOB].items()),
(ENT_TYPE, self.entity.moves.freqs[ENT_TYPE].items()),
(HEAD, self.parser.moves.freqs[HEAD].items())]))
2014-12-24 09:42:00 +03:00
@property
def tags(self):
"""Deprecated. List of part-of-speech tag names."""
return self.tagger.tag_names