Merge branch 'develop' of https://github.com/explosion/spaCy into develop

This commit is contained in:
Matthew Honnibal 2017-10-17 18:22:06 +02:00
commit e35a83d142
6 changed files with 71 additions and 86 deletions

View File

@ -600,7 +600,7 @@ class Language(object):
deserializers = OrderedDict(( deserializers = OrderedDict((
('vocab', lambda p: self.vocab.from_disk(p)), ('vocab', lambda p: self.vocab.from_disk(p)),
('tokenizer', lambda p: self.tokenizer.from_disk(p, vocab=False)), ('tokenizer', lambda p: self.tokenizer.from_disk(p, vocab=False)),
('meta.json', lambda p: p.open('w').write(json_dumps(self.meta))) ('meta.json', lambda p: self.meta.update(ujson.load(p.open('r'))))
)) ))
for name, proc in self.pipeline: for name, proc in self.pipeline:
if name in disable: if name in disable:

View File

@ -163,7 +163,7 @@ IDS = {
"Degree_sup": Degree_sup, "Degree_sup": Degree_sup,
"Degree_abs": Degree_abs, "Degree_abs": Degree_abs,
"Degree_com": Degree_com, "Degree_com": Degree_com,
"Degree_dim ": Degree_dim, # du "Degree_dim": Degree_dim, # du
"Degree_equ": Degree_equ, # U20 "Degree_equ": Degree_equ, # U20
"Evident_nfh": Evident_nfh, # U20 "Evident_nfh": Evident_nfh, # U20
"Gender_com": Gender_com, "Gender_com": Gender_com,
@ -189,8 +189,8 @@ IDS = {
"Number_none": Number_none, "Number_none": Number_none,
"Number_plur": Number_plur, "Number_plur": Number_plur,
"Number_sing": Number_sing, "Number_sing": Number_sing,
"Number_ptan ": Number_ptan, # bg "Number_ptan": Number_ptan, # bg
"Number_count ": Number_count, # bg, U20 "Number_count": Number_count, # bg, U20
"Number_tri": Number_tri, # U20 "Number_tri": Number_tri, # U20
"NumType_card": NumType_card, "NumType_card": NumType_card,
"NumType_dist": NumType_dist, "NumType_dist": NumType_dist,
@ -235,22 +235,22 @@ IDS = {
"VerbForm_sup": VerbForm_sup, "VerbForm_sup": VerbForm_sup,
"VerbForm_trans": VerbForm_trans, "VerbForm_trans": VerbForm_trans,
"VerbForm_conv": VerbForm_conv, # U20 "VerbForm_conv": VerbForm_conv, # U20
"VerbForm_gdv ": VerbForm_gdv, # la, "VerbForm_gdv": VerbForm_gdv, # la,
"VerbForm_vnoun": VerbForm_vnoun, # U20 "VerbForm_vnoun": VerbForm_vnoun, # U20
"Voice_act": Voice_act, "Voice_act": Voice_act,
"Voice_cau": Voice_cau, "Voice_cau": Voice_cau,
"Voice_pass": Voice_pass, "Voice_pass": Voice_pass,
"Voice_mid ": Voice_mid, # gkc, U20 "Voice_mid": Voice_mid, # gkc, U20
"Voice_int ": Voice_int, # hb, "Voice_int": Voice_int, # hb,
"Voice_antip": Voice_antip, # U20 "Voice_antip": Voice_antip, # U20
"Voice_dir": Voice_dir, # U20 "Voice_dir": Voice_dir, # U20
"Voice_inv": Voice_inv, # U20 "Voice_inv": Voice_inv, # U20
"Abbr_yes ": Abbr_yes, # cz, fi, sl, U, "Abbr_yes": Abbr_yes, # cz, fi, sl, U,
"AdpType_prep ": AdpType_prep, # cz, U, "AdpType_prep": AdpType_prep, # cz, U,
"AdpType_post ": AdpType_post, # U, "AdpType_post": AdpType_post, # U,
"AdpType_voc ": AdpType_voc, # cz, "AdpType_voc": AdpType_voc, # cz,
"AdpType_comprep ": AdpType_comprep, # cz, "AdpType_comprep": AdpType_comprep, # cz,
"AdpType_circ ": AdpType_circ, # U, "AdpType_circ": AdpType_circ, # U,
"AdvType_man": AdvType_man, "AdvType_man": AdvType_man,
"AdvType_loc": AdvType_loc, "AdvType_loc": AdvType_loc,
"AdvType_tim": AdvType_tim, "AdvType_tim": AdvType_tim,
@ -260,56 +260,56 @@ IDS = {
"AdvType_sta": AdvType_sta, "AdvType_sta": AdvType_sta,
"AdvType_ex": AdvType_ex, "AdvType_ex": AdvType_ex,
"AdvType_adadj": AdvType_adadj, "AdvType_adadj": AdvType_adadj,
"ConjType_oper ": ConjType_oper, # cz, U, "ConjType_oper": ConjType_oper, # cz, U,
"ConjType_comp ": ConjType_comp, # cz, U, "ConjType_comp": ConjType_comp, # cz, U,
"Connegative_yes ": Connegative_yes, # fi, "Connegative_yes": Connegative_yes, # fi,
"Derivation_minen ": Derivation_minen, # fi, "Derivation_minen": Derivation_minen, # fi,
"Derivation_sti ": Derivation_sti, # fi, "Derivation_sti": Derivation_sti, # fi,
"Derivation_inen ": Derivation_inen, # fi, "Derivation_inen": Derivation_inen, # fi,
"Derivation_lainen ": Derivation_lainen, # fi, "Derivation_lainen": Derivation_lainen, # fi,
"Derivation_ja ": Derivation_ja, # fi, "Derivation_ja": Derivation_ja, # fi,
"Derivation_ton ": Derivation_ton, # fi, "Derivation_ton": Derivation_ton, # fi,
"Derivation_vs ": Derivation_vs, # fi, "Derivation_vs": Derivation_vs, # fi,
"Derivation_ttain ": Derivation_ttain, # fi, "Derivation_ttain": Derivation_ttain, # fi,
"Derivation_ttaa ": Derivation_ttaa, # fi, "Derivation_ttaa": Derivation_ttaa, # fi,
"Echo_rdp ": Echo_rdp, # U, "Echo_rdp": Echo_rdp, # U,
"Echo_ech ": Echo_ech, # U, "Echo_ech": Echo_ech, # U,
"Foreign_foreign ": Foreign_foreign, # cz, fi, U, "Foreign_foreign": Foreign_foreign, # cz, fi, U,
"Foreign_fscript ": Foreign_fscript, # cz, fi, U, "Foreign_fscript": Foreign_fscript, # cz, fi, U,
"Foreign_tscript ": Foreign_tscript, # cz, U, "Foreign_tscript": Foreign_tscript, # cz, U,
"Foreign_yes ": Foreign_yes, # sl, "Foreign_yes": Foreign_yes, # sl,
"Gender_dat_masc ": Gender_dat_masc, # bq, U, "Gender_dat_masc": Gender_dat_masc, # bq, U,
"Gender_dat_fem ": Gender_dat_fem, # bq, U, "Gender_dat_fem": Gender_dat_fem, # bq, U,
"Gender_erg_masc ": Gender_erg_masc, # bq, "Gender_erg_masc": Gender_erg_masc, # bq,
"Gender_erg_fem ": Gender_erg_fem, # bq, "Gender_erg_fem": Gender_erg_fem, # bq,
"Gender_psor_masc ": Gender_psor_masc, # cz, sl, U, "Gender_psor_masc": Gender_psor_masc, # cz, sl, U,
"Gender_psor_fem ": Gender_psor_fem, # cz, sl, U, "Gender_psor_fem": Gender_psor_fem, # cz, sl, U,
"Gender_psor_neut ": Gender_psor_neut, # sl, "Gender_psor_neut": Gender_psor_neut, # sl,
"Hyph_yes ": Hyph_yes, # cz, U, "Hyph_yes": Hyph_yes, # cz, U,
"InfForm_one ": InfForm_one, # fi, "InfForm_one": InfForm_one, # fi,
"InfForm_two ": InfForm_two, # fi, "InfForm_two": InfForm_two, # fi,
"InfForm_three ": InfForm_three, # fi, "InfForm_three": InfForm_three, # fi,
"NameType_geo ": NameType_geo, # U, cz, "NameType_geo": NameType_geo, # U, cz,
"NameType_prs ": NameType_prs, # U, cz, "NameType_prs": NameType_prs, # U, cz,
"NameType_giv ": NameType_giv, # U, cz, "NameType_giv": NameType_giv, # U, cz,
"NameType_sur ": NameType_sur, # U, cz, "NameType_sur": NameType_sur, # U, cz,
"NameType_nat ": NameType_nat, # U, cz, "NameType_nat": NameType_nat, # U, cz,
"NameType_com ": NameType_com, # U, cz, "NameType_com": NameType_com, # U, cz,
"NameType_pro ": NameType_pro, # U, cz, "NameType_pro": NameType_pro, # U, cz,
"NameType_oth ": NameType_oth, # U, cz, "NameType_oth": NameType_oth, # U, cz,
"NounType_com ": NounType_com, # U, "NounType_com": NounType_com, # U,
"NounType_prop ": NounType_prop, # U, "NounType_prop": NounType_prop, # U,
"NounType_class ": NounType_class, # U, "NounType_class": NounType_class, # U,
"Number_abs_sing ": Number_abs_sing, # bq, U, "Number_abs_sing": Number_abs_sing, # bq, U,
"Number_abs_plur ": Number_abs_plur, # bq, U, "Number_abs_plur": Number_abs_plur, # bq, U,
"Number_dat_sing ": Number_dat_sing, # bq, U, "Number_dat_sing": Number_dat_sing, # bq, U,
"Number_dat_plur ": Number_dat_plur, # bq, U, "Number_dat_plur": Number_dat_plur, # bq, U,
"Number_erg_sing ": Number_erg_sing, # bq, U, "Number_erg_sing": Number_erg_sing, # bq, U,
"Number_erg_plur ": Number_erg_plur, # bq, U, "Number_erg_plur": Number_erg_plur, # bq, U,
"Number_psee_sing ": Number_psee_sing, # U, "Number_psee_sing": Number_psee_sing, # U,
"Number_psee_plur ": Number_psee_plur, # U, "Number_psee_plur": Number_psee_plur, # U,
"Number_psor_sing ": Number_psor_sing, # cz, fi, sl, U, "Number_psor_sing": Number_psor_sing, # cz, fi, sl, U,
"Number_psor_plur ": Number_psor_plur, # cz, fi, sl, U, "Number_psor_plur": Number_psor_plur, # cz, fi, sl, U,
"Number_pauc": Number_pauc, # U20 "Number_pauc": Number_pauc, # U20
"Number_grpa": Number_grpa, # U20 "Number_grpa": Number_grpa, # U20
"Number_grpl": Number_grpl, # U20 "Number_grpl": Number_grpl, # U20
@ -354,7 +354,7 @@ IDS = {
"Polite_infm": Polite_infm, # U20 "Polite_infm": Polite_infm, # U20
"Polite_form": Polite_form, # U20 "Polite_form": Polite_form, # U20
"Polite_form_elev": Polite_form_elev, # U20 "Polite_form_elev": Polite_form_elev, # U20
"Polite_form_humb ": Polite_form_humb, # U20 "Polite_form_humb": Polite_form_humb, # U20
"Prefix_yes": Prefix_yes, # U, "Prefix_yes": Prefix_yes, # U,
"PrepCase_npr": PrepCase_npr, # cz, "PrepCase_npr": PrepCase_npr, # cz,
"PrepCase_pre": PrepCase_pre, # U, "PrepCase_pre": PrepCase_pre, # U,

View File

@ -31,8 +31,7 @@
"StringStore": "stringstore", "StringStore": "stringstore",
"Vectors": "vectors", "Vectors": "vectors",
"GoldParse": "goldparse", "GoldParse": "goldparse",
"GoldCorpus": "goldcorpus", "GoldCorpus": "goldcorpus"
"Binder": "binder"
} }
}, },
@ -193,13 +192,6 @@
"source": "spacy/gold.pyx" "source": "spacy/gold.pyx"
}, },
"binder": {
"title": "Binder",
"tag": "class",
"tag_new": 2,
"source": "spacy/tokens/binder.pyx"
},
"vectors": { "vectors": {
"title": "Vectors", "title": "Vectors",
"teaser": "Store, save and load word vectors.", "teaser": "Store, save and load word vectors.",

View File

@ -1,7 +0,0 @@
//- 💫 DOCS > API > BINDER
include ../_includes/_mixins
p A container class for serializing collections of #[code Doc] objects.
+under-construction

View File

@ -138,7 +138,3 @@ p
+cell +cell
| An annotated corpus, using the JSON file format. Manages | An annotated corpus, using the JSON file format. Manages
| annotations for tagging, dependency parsing and NER. | annotations for tagging, dependency parsing and NER.
+row
+cell #[+api("binder") #[code Binder]]
+cell Container class for serializing collections of #[code Doc] objects.

View File

@ -206,7 +206,7 @@ p
| e.g. #[code from spacy.lang.en import English]. | e.g. #[code from spacy.lang.en import English].
+infobox +infobox
| #[+label-inline API:] #[+api("spacy#load") #[code spacy.load]], #[+api("binder") #[code Binder]] | #[+label-inline API:] #[+api("spacy#load") #[code spacy.load]]
| #[+label-inline Usage:] #[+a("/usage/saving-loading") Saving and loading] | #[+label-inline Usage:] #[+a("/usage/saving-loading") Saving and loading]
+h(3, "features-displacy") displaCy visualizer with Jupyter support +h(3, "features-displacy") displaCy visualizer with Jupyter support
@ -387,7 +387,11 @@ p
+row +row
+cell #[code Doc.read_bytes] +cell #[code Doc.read_bytes]
+cell #[+api("binder") #[code Binder]] +cell
| #[+api("doc#to_bytes") #[code Doc.to_bytes]]
| #[+api("doc#from_bytes") #[code Doc.from_bytes]]
| #[+api("doc#to_disk") #[code Doc.to_disk]]
| #[+api("doc#from_disk") #[code Doc.from_disk]]
+row +row
+cell #[code Token.is_ancestor_of] +cell #[code Token.is_ancestor_of]