2017-03-10 20:22:04 +03:00
|
|
|
# cython: profile=True
|
|
|
|
# cython: cdivision=True
|
|
|
|
# cython: infer_types=True
|
2017-04-15 14:05:15 +03:00
|
|
|
# coding: utf-8
|
2015-01-24 09:29:04 +03:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2017-10-27 20:45:57 +03:00
|
|
|
from cpython.ref cimport Py_INCREF
|
2015-06-09 02:41:09 +03:00
|
|
|
from cymem.cymem cimport Pool
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 04:58:08 +03:00
|
|
|
from collections import OrderedDict, defaultdict, Counter
|
2017-07-20 16:02:55 +03:00
|
|
|
from thinc.extra.search cimport Beam
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 04:58:08 +03:00
|
|
|
import json
|
2017-04-15 14:05:15 +03:00
|
|
|
|
2015-06-09 22:20:14 +03:00
|
|
|
from .stateclass cimport StateClass
|
2017-10-27 20:45:57 +03:00
|
|
|
from ._state cimport StateC
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 04:58:08 +03:00
|
|
|
from . import nonproj
|
2017-04-15 14:05:15 +03:00
|
|
|
from .transition_system cimport move_cost_func_t, label_cost_func_t
|
2017-10-27 20:45:57 +03:00
|
|
|
from ..gold cimport GoldParse, GoldParseC
|
2017-04-15 14:05:15 +03:00
|
|
|
from ..structs cimport TokenC
|
2015-06-09 02:41:09 +03:00
|
|
|
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 04:58:08 +03:00
|
|
|
# Calculate cost as gold/not gold. We don't use scalar value anyway.
|
|
|
|
cdef int BINARY_COSTS = 1
|
2015-01-27 19:09:45 +03:00
|
|
|
|
2014-12-20 21:42:23 +03:00
|
|
|
DEF NON_MONOTONIC = True
|
2015-06-23 01:03:30 +03:00
|
|
|
DEF USE_BREAK = True
|
2014-12-16 14:44:43 +03:00
|
|
|
|
2015-02-21 19:06:37 +03:00
|
|
|
cdef weight_t MIN_SCORE = -90000
|
2014-12-16 14:44:43 +03:00
|
|
|
|
2015-02-21 19:06:37 +03:00
|
|
|
# Break transition from here
|
|
|
|
# http://www.aclweb.org/anthology/P13-1074
|
2014-12-16 14:44:43 +03:00
|
|
|
cdef enum:
|
|
|
|
SHIFT
|
|
|
|
REDUCE
|
|
|
|
LEFT
|
|
|
|
RIGHT
|
2015-05-11 17:12:03 +03:00
|
|
|
|
2015-01-28 22:22:03 +03:00
|
|
|
BREAK
|
2015-05-11 17:12:03 +03:00
|
|
|
|
2014-12-16 14:44:43 +03:00
|
|
|
N_MOVES
|
|
|
|
|
2015-05-11 17:12:03 +03:00
|
|
|
|
2015-03-09 14:06:01 +03:00
|
|
|
MOVE_NAMES = [None] * N_MOVES
|
|
|
|
MOVE_NAMES[SHIFT] = 'S'
|
|
|
|
MOVE_NAMES[REDUCE] = 'D'
|
|
|
|
MOVE_NAMES[LEFT] = 'L'
|
|
|
|
MOVE_NAMES[RIGHT] = 'R'
|
|
|
|
MOVE_NAMES[BREAK] = 'B'
|
|
|
|
|
2015-01-27 19:18:43 +03:00
|
|
|
|
2015-06-07 04:21:29 +03:00
|
|
|
# Helper functions for the arc-eager oracle
|
|
|
|
|
2016-01-30 16:31:12 +03:00
|
|
|
cdef weight_t push_cost(StateClass stcls, const GoldParseC* gold, int target) nogil:
|
|
|
|
cdef weight_t cost = 0
|
2015-06-09 22:20:14 +03:00
|
|
|
cdef int i, S_i
|
|
|
|
for i in range(stcls.stack_depth()):
|
|
|
|
S_i = stcls.S(i)
|
|
|
|
if gold.heads[target] == S_i:
|
|
|
|
cost += 1
|
|
|
|
if gold.heads[S_i] == target and (NON_MONOTONIC or not stcls.has_head(S_i)):
|
|
|
|
cost += 1
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 04:58:08 +03:00
|
|
|
if BINARY_COSTS and cost >= 1:
|
|
|
|
return cost
|
2017-05-30 21:37:24 +03:00
|
|
|
cost += Break.is_valid(stcls.c, 0) and Break.move_cost(stcls, gold) == 0
|
2015-06-07 04:21:29 +03:00
|
|
|
return cost
|
|
|
|
|
|
|
|
|
2016-01-30 16:31:12 +03:00
|
|
|
cdef weight_t pop_cost(StateClass stcls, const GoldParseC* gold, int target) nogil:
|
|
|
|
cdef weight_t cost = 0
|
2015-06-09 22:20:14 +03:00
|
|
|
cdef int i, B_i
|
|
|
|
for i in range(stcls.buffer_length()):
|
|
|
|
B_i = stcls.B(i)
|
|
|
|
cost += gold.heads[B_i] == target
|
|
|
|
cost += gold.heads[target] == B_i
|
|
|
|
if gold.heads[B_i] == B_i or gold.heads[B_i] < target:
|
|
|
|
break
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 04:58:08 +03:00
|
|
|
if BINARY_COSTS and cost >= 1:
|
|
|
|
return cost
|
2017-05-30 21:37:24 +03:00
|
|
|
if Break.is_valid(stcls.c, 0) and Break.move_cost(stcls, gold) == 0:
|
2016-01-30 16:31:12 +03:00
|
|
|
cost += 1
|
2015-06-07 04:21:29 +03:00
|
|
|
return cost
|
|
|
|
|
2015-06-10 11:15:56 +03:00
|
|
|
|
2016-01-30 16:31:12 +03:00
|
|
|
cdef weight_t arc_cost(StateClass stcls, const GoldParseC* gold, int head, int child) nogil:
|
2015-06-08 15:49:04 +03:00
|
|
|
if arc_is_gold(gold, head, child):
|
2015-06-07 04:21:29 +03:00
|
|
|
return 0
|
2015-06-09 22:20:14 +03:00
|
|
|
elif stcls.H(child) == gold.heads[child]:
|
2015-06-08 15:49:04 +03:00
|
|
|
return 1
|
2015-06-10 11:15:56 +03:00
|
|
|
# Head in buffer
|
2017-05-30 21:37:24 +03:00
|
|
|
elif gold.heads[child] >= stcls.B(0) and stcls.B(1) != 0:
|
2015-06-08 15:49:04 +03:00
|
|
|
return 1
|
|
|
|
else:
|
2015-06-07 04:21:29 +03:00
|
|
|
return 0
|
2015-06-08 15:49:04 +03:00
|
|
|
|
|
|
|
|
2015-06-10 07:56:35 +03:00
|
|
|
cdef bint arc_is_gold(const GoldParseC* gold, int head, int child) nogil:
|
2017-05-30 21:37:24 +03:00
|
|
|
if not gold.has_dep[child]:
|
2015-06-08 15:49:04 +03:00
|
|
|
return True
|
|
|
|
elif gold.heads[child] == head:
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
2017-05-28 15:06:40 +03:00
|
|
|
cdef bint label_is_gold(const GoldParseC* gold, int head, int child, attr_t label) nogil:
|
2017-05-30 21:37:24 +03:00
|
|
|
if not gold.has_dep[child]:
|
2015-06-08 15:49:04 +03:00
|
|
|
return True
|
2017-05-30 21:37:24 +03:00
|
|
|
elif label == 0:
|
2015-06-08 15:49:04 +03:00
|
|
|
return True
|
2015-06-07 04:21:29 +03:00
|
|
|
elif gold.labels[child] == label:
|
2015-06-08 15:49:04 +03:00
|
|
|
return True
|
2015-06-07 04:21:29 +03:00
|
|
|
else:
|
2015-06-08 15:49:04 +03:00
|
|
|
return False
|
|
|
|
|
|
|
|
|
2015-06-10 07:56:35 +03:00
|
|
|
cdef bint _is_gold_root(const GoldParseC* gold, int word) nogil:
|
2017-05-30 21:37:24 +03:00
|
|
|
return gold.heads[word] == word or not gold.has_dep[word]
|
2015-06-07 04:21:29 +03:00
|
|
|
|
2015-06-04 23:43:03 +03:00
|
|
|
cdef class Shift:
|
|
|
|
@staticmethod
|
2017-05-28 15:06:40 +03:00
|
|
|
cdef bint is_valid(const StateC* st, attr_t label) nogil:
|
2018-02-26 12:57:37 +03:00
|
|
|
sent_start = st._sent[st.B_(0).l_edge].sent_start
|
|
|
|
return st.buffer_length() >= 2 and not st.shifted[st.B(0)] and sent_start != 1
|
2015-06-09 02:41:09 +03:00
|
|
|
|
2015-06-04 23:43:03 +03:00
|
|
|
@staticmethod
|
2017-05-28 15:06:40 +03:00
|
|
|
cdef int transition(StateC* st, attr_t label) nogil:
|
2015-06-10 15:08:30 +03:00
|
|
|
st.push()
|
|
|
|
st.fast_forward()
|
2014-12-16 14:44:43 +03:00
|
|
|
|
2015-06-04 23:43:03 +03:00
|
|
|
@staticmethod
|
2017-05-28 15:06:40 +03:00
|
|
|
cdef weight_t cost(StateClass st, const GoldParseC* gold, attr_t label) nogil:
|
2015-06-10 01:40:43 +03:00
|
|
|
return Shift.move_cost(st, gold) + Shift.label_cost(st, gold, label)
|
2015-06-07 04:21:29 +03:00
|
|
|
|
|
|
|
@staticmethod
|
2016-01-30 16:31:12 +03:00
|
|
|
cdef inline weight_t move_cost(StateClass s, const GoldParseC* gold) nogil:
|
2015-06-10 01:40:43 +03:00
|
|
|
return push_cost(s, gold, s.B(0))
|
2015-02-21 19:06:37 +03:00
|
|
|
|
2015-06-07 04:21:29 +03:00
|
|
|
@staticmethod
|
2017-05-28 15:06:40 +03:00
|
|
|
cdef inline weight_t label_cost(StateClass s, const GoldParseC* gold, attr_t label) nogil:
|
2015-06-07 04:21:29 +03:00
|
|
|
return 0
|
|
|
|
|
2015-02-21 19:06:37 +03:00
|
|
|
|
2015-06-04 23:43:03 +03:00
|
|
|
cdef class Reduce:
|
2015-06-09 02:41:09 +03:00
|
|
|
@staticmethod
|
2017-05-28 15:06:40 +03:00
|
|
|
cdef bint is_valid(const StateC* st, attr_t label) nogil:
|
2015-06-10 11:15:56 +03:00
|
|
|
return st.stack_depth() >= 2
|
2015-06-09 02:41:09 +03:00
|
|
|
|
2015-06-04 23:43:03 +03:00
|
|
|
@staticmethod
|
2017-05-28 15:06:40 +03:00
|
|
|
cdef int transition(StateC* st, attr_t label) nogil:
|
2015-06-10 11:15:56 +03:00
|
|
|
if st.has_head(st.S(0)):
|
|
|
|
st.pop()
|
|
|
|
else:
|
|
|
|
st.unshift()
|
2015-06-10 15:08:30 +03:00
|
|
|
st.fast_forward()
|
2015-06-04 23:43:03 +03:00
|
|
|
|
|
|
|
@staticmethod
|
2017-05-28 15:06:40 +03:00
|
|
|
cdef weight_t cost(StateClass s, const GoldParseC* gold, attr_t label) nogil:
|
2015-06-07 04:21:29 +03:00
|
|
|
return Reduce.move_cost(s, gold) + Reduce.label_cost(s, gold, label)
|
|
|
|
|
|
|
|
@staticmethod
|
2016-01-30 16:31:12 +03:00
|
|
|
cdef inline weight_t move_cost(StateClass st, const GoldParseC* gold) nogil:
|
2017-03-10 20:22:04 +03:00
|
|
|
cost = pop_cost(st, gold, st.S(0))
|
|
|
|
if not st.has_head(st.S(0)):
|
|
|
|
# Decrement cost for the arcs e save
|
|
|
|
for i in range(1, st.stack_depth()):
|
|
|
|
S_i = st.S(i)
|
|
|
|
if gold.heads[st.S(0)] == S_i:
|
|
|
|
cost -= 1
|
|
|
|
if gold.heads[S_i] == st.S(0):
|
|
|
|
cost -= 1
|
2017-05-30 21:37:24 +03:00
|
|
|
if Break.is_valid(st.c, 0) and Break.move_cost(st, gold) == 0:
|
2017-03-10 20:22:04 +03:00
|
|
|
cost -= 1
|
|
|
|
return cost
|
2015-06-07 04:21:29 +03:00
|
|
|
|
|
|
|
@staticmethod
|
2017-05-28 15:06:40 +03:00
|
|
|
cdef inline weight_t label_cost(StateClass s, const GoldParseC* gold, attr_t label) nogil:
|
2015-06-07 04:21:29 +03:00
|
|
|
return 0
|
|
|
|
|
2015-01-27 19:09:45 +03:00
|
|
|
|
2015-06-04 23:43:03 +03:00
|
|
|
cdef class LeftArc:
|
2015-06-09 02:41:09 +03:00
|
|
|
@staticmethod
|
2017-05-28 15:06:40 +03:00
|
|
|
cdef bint is_valid(const StateC* st, attr_t label) nogil:
|
2018-02-26 12:57:37 +03:00
|
|
|
sent_start = st._sent[st.B_(0).l_edge].sent_start
|
|
|
|
return sent_start != 1
|
2015-06-09 02:41:09 +03:00
|
|
|
|
2015-06-04 23:43:03 +03:00
|
|
|
@staticmethod
|
2017-05-28 15:06:40 +03:00
|
|
|
cdef int transition(StateC* st, attr_t label) nogil:
|
2015-06-10 11:15:56 +03:00
|
|
|
st.add_arc(st.B(0), st.S(0), label)
|
2015-06-10 02:35:28 +03:00
|
|
|
st.pop()
|
2015-06-10 15:08:30 +03:00
|
|
|
st.fast_forward()
|
2015-06-04 23:43:03 +03:00
|
|
|
|
|
|
|
@staticmethod
|
2017-05-28 15:06:40 +03:00
|
|
|
cdef weight_t cost(StateClass s, const GoldParseC* gold, attr_t label) nogil:
|
2015-06-07 04:21:29 +03:00
|
|
|
return LeftArc.move_cost(s, gold) + LeftArc.label_cost(s, gold, label)
|
|
|
|
|
|
|
|
@staticmethod
|
2016-01-30 16:31:12 +03:00
|
|
|
cdef inline weight_t move_cost(StateClass s, const GoldParseC* gold) nogil:
|
|
|
|
cdef weight_t cost = 0
|
2015-06-10 01:40:43 +03:00
|
|
|
if arc_is_gold(gold, s.B(0), s.S(0)):
|
2017-03-10 20:22:04 +03:00
|
|
|
# Have a negative cost if we 'recover' from the wrong dependency
|
|
|
|
return 0 if not s.has_head(s.S(0)) else -1
|
2015-06-08 15:49:04 +03:00
|
|
|
else:
|
2015-06-15 03:50:00 +03:00
|
|
|
# Account for deps we might lose between S0 and stack
|
|
|
|
if not s.has_head(s.S(0)):
|
|
|
|
for i in range(1, s.stack_depth()):
|
|
|
|
cost += gold.heads[s.S(i)] == s.S(0)
|
|
|
|
cost += gold.heads[s.S(0)] == s.S(i)
|
2016-05-03 01:21:08 +03:00
|
|
|
return cost + pop_cost(s, gold, s.S(0)) + arc_cost(s, gold, s.B(0), s.S(0))
|
2014-12-16 14:44:43 +03:00
|
|
|
|
2015-06-07 04:21:29 +03:00
|
|
|
@staticmethod
|
2017-05-28 15:06:40 +03:00
|
|
|
cdef inline weight_t label_cost(StateClass s, const GoldParseC* gold, attr_t label) nogil:
|
2015-06-10 01:40:43 +03:00
|
|
|
return arc_is_gold(gold, s.B(0), s.S(0)) and not label_is_gold(gold, s.B(0), s.S(0), label)
|
2015-06-07 04:21:29 +03:00
|
|
|
|
2015-01-27 19:09:45 +03:00
|
|
|
|
2015-06-04 23:43:03 +03:00
|
|
|
cdef class RightArc:
|
|
|
|
@staticmethod
|
2017-05-28 15:06:40 +03:00
|
|
|
cdef bint is_valid(const StateC* st, attr_t label) nogil:
|
2017-10-20 17:24:48 +03:00
|
|
|
# If there's (perhaps partial) parse pre-set, don't allow cycle.
|
2018-02-26 12:57:37 +03:00
|
|
|
sent_start = st._sent[st.B_(0).l_edge].sent_start
|
|
|
|
return sent_start != 1 and st.H(st.S(0)) != st.B(0)
|
2015-06-09 02:41:09 +03:00
|
|
|
|
2015-06-04 23:43:03 +03:00
|
|
|
@staticmethod
|
2017-05-28 15:06:40 +03:00
|
|
|
cdef int transition(StateC* st, attr_t label) nogil:
|
2015-06-10 02:35:28 +03:00
|
|
|
st.add_arc(st.S(0), st.B(0), label)
|
|
|
|
st.push()
|
2015-06-10 15:08:30 +03:00
|
|
|
st.fast_forward()
|
2015-02-21 19:06:37 +03:00
|
|
|
|
2015-06-04 23:43:03 +03:00
|
|
|
@staticmethod
|
2017-05-28 15:06:40 +03:00
|
|
|
cdef inline weight_t cost(StateClass s, const GoldParseC* gold, attr_t label) nogil:
|
2015-06-07 04:21:29 +03:00
|
|
|
return RightArc.move_cost(s, gold) + RightArc.label_cost(s, gold, label)
|
|
|
|
|
|
|
|
@staticmethod
|
2016-01-30 16:31:12 +03:00
|
|
|
cdef inline weight_t move_cost(StateClass s, const GoldParseC* gold) nogil:
|
2015-06-10 01:40:43 +03:00
|
|
|
if arc_is_gold(gold, s.S(0), s.B(0)):
|
2015-06-08 15:49:04 +03:00
|
|
|
return 0
|
2016-02-01 04:37:08 +03:00
|
|
|
elif s.c.shifted[s.B(0)]:
|
2015-06-10 11:15:56 +03:00
|
|
|
return push_cost(s, gold, s.B(0))
|
2015-06-08 15:49:04 +03:00
|
|
|
else:
|
2015-06-10 01:40:43 +03:00
|
|
|
return push_cost(s, gold, s.B(0)) + arc_cost(s, gold, s.S(0), s.B(0))
|
2015-06-07 04:21:29 +03:00
|
|
|
|
|
|
|
@staticmethod
|
2017-05-28 15:06:40 +03:00
|
|
|
cdef weight_t label_cost(StateClass s, const GoldParseC* gold, attr_t label) nogil:
|
2015-06-10 01:40:43 +03:00
|
|
|
return arc_is_gold(gold, s.S(0), s.B(0)) and not label_is_gold(gold, s.S(0), s.B(0), label)
|
2015-02-21 19:06:37 +03:00
|
|
|
|
|
|
|
|
2015-06-04 23:43:03 +03:00
|
|
|
cdef class Break:
|
2015-06-09 02:41:09 +03:00
|
|
|
@staticmethod
|
2017-05-28 15:06:40 +03:00
|
|
|
cdef bint is_valid(const StateC* st, attr_t label) nogil:
|
2015-06-09 02:41:09 +03:00
|
|
|
cdef int i
|
|
|
|
if not USE_BREAK:
|
|
|
|
return False
|
2015-06-10 15:08:30 +03:00
|
|
|
elif st.at_break():
|
2015-06-09 02:41:09 +03:00
|
|
|
return False
|
|
|
|
elif st.stack_depth() < 1:
|
|
|
|
return False
|
2017-10-09 00:53:34 +03:00
|
|
|
elif st.B_(0).l_edge < 0:
|
|
|
|
return False
|
2017-10-09 01:02:45 +03:00
|
|
|
elif st._sent[st.B_(0).l_edge].sent_start < 0:
|
|
|
|
return False
|
2015-06-09 02:41:09 +03:00
|
|
|
else:
|
2017-08-26 03:50:55 +03:00
|
|
|
return True
|
2015-06-09 02:41:09 +03:00
|
|
|
|
2015-06-04 23:43:03 +03:00
|
|
|
@staticmethod
|
2017-05-28 15:06:40 +03:00
|
|
|
cdef int transition(StateC* st, attr_t label) nogil:
|
2016-04-25 19:39:28 +03:00
|
|
|
st.set_break(st.B_(0).l_edge)
|
2015-06-12 02:50:23 +03:00
|
|
|
st.fast_forward()
|
2015-06-04 23:43:03 +03:00
|
|
|
|
|
|
|
@staticmethod
|
2017-05-28 15:06:40 +03:00
|
|
|
cdef weight_t cost(StateClass s, const GoldParseC* gold, attr_t label) nogil:
|
2015-06-10 00:23:28 +03:00
|
|
|
return Break.move_cost(s, gold) + Break.label_cost(s, gold, label)
|
2015-06-07 04:21:29 +03:00
|
|
|
|
|
|
|
@staticmethod
|
2016-01-30 16:31:12 +03:00
|
|
|
cdef inline weight_t move_cost(StateClass s, const GoldParseC* gold) nogil:
|
|
|
|
cdef weight_t cost = 0
|
2015-06-23 01:03:30 +03:00
|
|
|
cdef int i, j, S_i, B_i
|
2015-06-14 18:44:03 +03:00
|
|
|
for i in range(s.stack_depth()):
|
|
|
|
S_i = s.S(i)
|
|
|
|
for j in range(s.buffer_length()):
|
|
|
|
B_i = s.B(j)
|
|
|
|
cost += gold.heads[S_i] == B_i
|
|
|
|
cost += gold.heads[B_i] == S_i
|
2017-03-16 00:40:27 +03:00
|
|
|
if cost != 0:
|
|
|
|
return cost
|
2015-06-12 02:50:23 +03:00
|
|
|
# Check for sentence boundary --- if it's here, we can't have any deps
|
|
|
|
# between stack and buffer, so rest of action is irrelevant.
|
|
|
|
s0_root = _get_root(s.S(0), gold)
|
|
|
|
b0_root = _get_root(s.B(0), gold)
|
2015-06-14 18:44:03 +03:00
|
|
|
if s0_root != b0_root or s0_root == -1 or b0_root == -1:
|
|
|
|
return cost
|
2015-06-12 02:50:23 +03:00
|
|
|
else:
|
2015-06-14 18:44:03 +03:00
|
|
|
return cost + 1
|
2015-08-09 00:31:54 +03:00
|
|
|
|
2015-06-07 04:21:29 +03:00
|
|
|
@staticmethod
|
2017-05-28 15:06:40 +03:00
|
|
|
cdef inline weight_t label_cost(StateClass s, const GoldParseC* gold, attr_t label) nogil:
|
2015-06-07 04:21:29 +03:00
|
|
|
return 0
|
2015-02-21 19:06:37 +03:00
|
|
|
|
2015-06-12 02:50:23 +03:00
|
|
|
cdef int _get_root(int word, const GoldParseC* gold) nogil:
|
2017-08-26 03:50:55 +03:00
|
|
|
while gold.heads[word] != word and gold.has_dep[word] and word >= 0:
|
2015-06-12 02:50:23 +03:00
|
|
|
word = gold.heads[word]
|
2017-05-30 21:37:24 +03:00
|
|
|
if not gold.has_dep[word]:
|
2015-06-12 02:50:23 +03:00
|
|
|
return -1
|
|
|
|
else:
|
|
|
|
return word
|
2015-08-09 00:31:54 +03:00
|
|
|
|
2015-02-21 19:06:37 +03:00
|
|
|
|
2017-03-11 20:12:01 +03:00
|
|
|
cdef void* _init_state(Pool mem, int length, void* tokens) except NULL:
|
2017-11-14 04:11:40 +03:00
|
|
|
st = new StateC(<const TokenC*>tokens, length)
|
|
|
|
for i in range(st.length):
|
|
|
|
if st._sent[i].dep == 0:
|
|
|
|
st._sent[i].l_edge = i
|
|
|
|
st._sent[i].r_edge = i
|
|
|
|
st._sent[i].head = 0
|
|
|
|
st._sent[i].dep = 0
|
|
|
|
st._sent[i].l_kids = 0
|
|
|
|
st._sent[i].r_kids = 0
|
2017-03-11 20:12:01 +03:00
|
|
|
st.fast_forward()
|
|
|
|
return <void*>st
|
|
|
|
|
|
|
|
|
2015-06-05 03:27:17 +03:00
|
|
|
cdef class ArcEager(TransitionSystem):
|
2017-03-11 20:12:01 +03:00
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
TransitionSystem.__init__(self, *args, **kwargs)
|
|
|
|
self.init_beam_state = _init_state
|
|
|
|
|
2015-06-05 03:27:17 +03:00
|
|
|
@classmethod
|
2016-10-16 22:34:57 +03:00
|
|
|
def get_actions(cls, **kwargs):
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 04:58:08 +03:00
|
|
|
min_freq = kwargs.get('min_freq', None)
|
|
|
|
actions = defaultdict(lambda: Counter())
|
|
|
|
actions[SHIFT][''] = 1
|
|
|
|
actions[REDUCE][''] = 1
|
2016-10-16 22:34:57 +03:00
|
|
|
for label in kwargs.get('left_labels', []):
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 04:58:08 +03:00
|
|
|
actions[LEFT][label] = 1
|
|
|
|
actions[SHIFT][label] = 1
|
2016-10-16 22:34:57 +03:00
|
|
|
for label in kwargs.get('right_labels', []):
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 04:58:08 +03:00
|
|
|
actions[RIGHT][label] = 1
|
|
|
|
actions[REDUCE][label] = 1
|
2016-10-16 22:34:57 +03:00
|
|
|
for raw_text, sents in kwargs.get('gold_parses', []):
|
2015-06-05 03:27:17 +03:00
|
|
|
for (ids, words, tags, heads, labels, iob), ctnts in sents:
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 04:58:08 +03:00
|
|
|
heads, labels = nonproj.projectivize(heads, labels)
|
2015-06-05 03:27:17 +03:00
|
|
|
for child, head, label in zip(ids, heads, labels):
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 04:58:08 +03:00
|
|
|
if label.upper() == 'ROOT' :
|
2015-06-23 05:14:03 +03:00
|
|
|
label = 'ROOT'
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 04:58:08 +03:00
|
|
|
if head == child:
|
|
|
|
actions[BREAK][label] += 1
|
|
|
|
elif head < child:
|
|
|
|
actions[RIGHT][label] += 1
|
|
|
|
actions[REDUCE][''] += 1
|
|
|
|
elif head > child:
|
|
|
|
actions[LEFT][label] += 1
|
|
|
|
actions[SHIFT][''] += 1
|
|
|
|
if min_freq is not None:
|
|
|
|
for action, label_freqs in actions.items():
|
|
|
|
for label, freq in list(label_freqs.items()):
|
|
|
|
if freq < min_freq:
|
|
|
|
label_freqs.pop(label)
|
|
|
|
# Ensure these actions are present
|
|
|
|
actions[BREAK].setdefault('ROOT', 0)
|
|
|
|
actions[RIGHT].setdefault('subtok', 0)
|
|
|
|
actions[LEFT].setdefault('subtok', 0)
|
|
|
|
# Used for backoff
|
|
|
|
actions[RIGHT].setdefault('dep', 0)
|
|
|
|
actions[LEFT].setdefault('dep', 0)
|
2016-10-16 22:34:57 +03:00
|
|
|
return actions
|
2015-06-05 03:27:17 +03:00
|
|
|
|
2016-01-19 21:07:43 +03:00
|
|
|
property action_types:
|
|
|
|
def __get__(self):
|
|
|
|
return (SHIFT, REDUCE, LEFT, RIGHT, BREAK)
|
|
|
|
|
2017-08-18 23:38:59 +03:00
|
|
|
def is_gold_parse(self, StateClass state, GoldParse gold):
|
|
|
|
predicted = set()
|
|
|
|
truth = set()
|
|
|
|
for i in range(gold.length):
|
|
|
|
if gold.cand_to_gold[i] is None:
|
|
|
|
continue
|
|
|
|
if state.safe_get(i).dep:
|
2017-10-27 20:45:57 +03:00
|
|
|
predicted.add((i, state.H(i),
|
|
|
|
self.strings[state.safe_get(i).dep]))
|
2017-08-18 23:38:59 +03:00
|
|
|
else:
|
|
|
|
predicted.add((i, state.H(i), 'ROOT'))
|
|
|
|
id_, word, tag, head, dep, ner = gold.orig_annot[gold.cand_to_gold[i]]
|
|
|
|
truth.add((id_, head, dep))
|
|
|
|
return truth == predicted
|
|
|
|
|
2017-05-26 19:31:23 +03:00
|
|
|
def has_gold(self, GoldParse gold, start=0, end=None):
|
|
|
|
end = end or len(gold.heads)
|
|
|
|
if all([tag is None for tag in gold.heads[start:end]]):
|
|
|
|
return False
|
|
|
|
else:
|
|
|
|
return True
|
|
|
|
|
2017-05-22 18:30:12 +03:00
|
|
|
def preprocess_gold(self, GoldParse gold):
|
2017-05-26 19:31:23 +03:00
|
|
|
if not self.has_gold(gold):
|
2017-05-22 18:30:12 +03:00
|
|
|
return None
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 04:58:08 +03:00
|
|
|
for i, (head, dep) in enumerate(zip(gold.heads, gold.labels)):
|
2017-10-27 20:45:57 +03:00
|
|
|
# Missing values
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 04:58:08 +03:00
|
|
|
if head is None or dep is None:
|
2015-06-05 03:27:17 +03:00
|
|
|
gold.c.heads[i] = i
|
2017-05-30 21:37:24 +03:00
|
|
|
gold.c.has_dep[i] = False
|
2015-06-05 03:27:17 +03:00
|
|
|
else:
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 04:58:08 +03:00
|
|
|
if head > i:
|
|
|
|
action = LEFT
|
|
|
|
elif head < i:
|
|
|
|
action = RIGHT
|
|
|
|
else:
|
|
|
|
action = BREAK
|
|
|
|
if dep not in self.labels[action]:
|
|
|
|
if action == BREAK:
|
|
|
|
dep = 'ROOT'
|
|
|
|
elif nonproj.is_decorated(dep):
|
|
|
|
backoff = nonproj.decompose(dep)[0]
|
|
|
|
if backoff in self.labels[action]:
|
|
|
|
dep = backoff
|
|
|
|
else:
|
|
|
|
dep = 'dep'
|
|
|
|
else:
|
|
|
|
dep = 'dep'
|
2017-05-30 21:37:24 +03:00
|
|
|
gold.c.has_dep[i] = True
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 04:58:08 +03:00
|
|
|
if dep.upper() == 'ROOT':
|
|
|
|
dep = 'ROOT'
|
|
|
|
gold.c.heads[i] = head
|
|
|
|
gold.c.labels[i] = self.strings.add(dep)
|
2017-05-22 18:30:12 +03:00
|
|
|
return gold
|
2015-06-05 03:27:17 +03:00
|
|
|
|
2018-02-15 23:03:16 +03:00
|
|
|
def get_beam_parses(self, Beam beam):
|
|
|
|
parses = []
|
|
|
|
probs = beam.probs
|
|
|
|
for i in range(beam.size):
|
|
|
|
state = <StateC*>beam.at(i)
|
|
|
|
if state.is_final():
|
|
|
|
self.finalize_state(state)
|
|
|
|
prob = probs[i]
|
|
|
|
parse = []
|
|
|
|
for j in range(state.length):
|
|
|
|
head = state.H(j)
|
|
|
|
label = self.strings[state._sent[j].dep]
|
|
|
|
parse.append((head, j, label))
|
|
|
|
parses.append((prob, parse))
|
|
|
|
return parses
|
|
|
|
|
2015-06-05 03:27:17 +03:00
|
|
|
cdef Transition lookup_transition(self, object name) except *:
|
|
|
|
if '-' in name:
|
|
|
|
move_str, label_str = name.split('-', 1)
|
2015-08-09 00:31:54 +03:00
|
|
|
label = self.strings[label_str]
|
2015-06-05 03:27:17 +03:00
|
|
|
else:
|
2015-08-09 00:31:54 +03:00
|
|
|
move_str = name
|
2015-06-05 03:27:17 +03:00
|
|
|
label = 0
|
|
|
|
move = MOVE_NAMES.index(move_str)
|
|
|
|
for i in range(self.n_moves):
|
|
|
|
if self.c[i].move == move and self.c[i].label == label:
|
|
|
|
return self.c[i]
|
2017-08-18 23:23:03 +03:00
|
|
|
return Transition(clas=0, move=MISSING, label=0)
|
2015-06-05 03:27:17 +03:00
|
|
|
|
2017-05-28 15:06:40 +03:00
|
|
|
def move_name(self, int move, attr_t label):
|
2015-06-05 03:27:17 +03:00
|
|
|
label_str = self.strings[label]
|
|
|
|
if label_str:
|
|
|
|
return MOVE_NAMES[move] + '-' + label_str
|
|
|
|
else:
|
|
|
|
return MOVE_NAMES[move]
|
|
|
|
|
2017-05-28 15:06:40 +03:00
|
|
|
cdef Transition init_transition(self, int clas, int move, attr_t label) except *:
|
2015-06-05 03:27:17 +03:00
|
|
|
# TODO: Apparent Cython bug here when we try to use the Transition()
|
|
|
|
# constructor with the function pointers
|
|
|
|
cdef Transition t
|
|
|
|
t.score = 0
|
|
|
|
t.clas = clas
|
|
|
|
t.move = move
|
|
|
|
t.label = label
|
|
|
|
if move == SHIFT:
|
|
|
|
t.is_valid = Shift.is_valid
|
|
|
|
t.do = Shift.transition
|
|
|
|
t.get_cost = Shift.cost
|
|
|
|
elif move == REDUCE:
|
|
|
|
t.is_valid = Reduce.is_valid
|
|
|
|
t.do = Reduce.transition
|
|
|
|
t.get_cost = Reduce.cost
|
|
|
|
elif move == LEFT:
|
|
|
|
t.is_valid = LeftArc.is_valid
|
|
|
|
t.do = LeftArc.transition
|
|
|
|
t.get_cost = LeftArc.cost
|
|
|
|
elif move == RIGHT:
|
|
|
|
t.is_valid = RightArc.is_valid
|
|
|
|
t.do = RightArc.transition
|
|
|
|
t.get_cost = RightArc.cost
|
|
|
|
elif move == BREAK:
|
|
|
|
t.is_valid = Break.is_valid
|
|
|
|
t.do = Break.transition
|
|
|
|
t.get_cost = Break.cost
|
|
|
|
else:
|
|
|
|
raise Exception(move)
|
|
|
|
return t
|
|
|
|
|
2016-02-01 10:34:55 +03:00
|
|
|
cdef int initialize_state(self, StateC* st) nogil:
|
|
|
|
for i in range(st.length):
|
2017-10-20 17:24:48 +03:00
|
|
|
if st._sent[i].dep == 0:
|
|
|
|
st._sent[i].l_edge = i
|
|
|
|
st._sent[i].r_edge = i
|
|
|
|
st._sent[i].head = 0
|
|
|
|
st._sent[i].dep = 0
|
|
|
|
st._sent[i].l_kids = 0
|
|
|
|
st._sent[i].r_kids = 0
|
2015-06-10 15:08:30 +03:00
|
|
|
st.fast_forward()
|
2015-06-05 03:27:17 +03:00
|
|
|
|
2016-02-01 10:34:55 +03:00
|
|
|
cdef int finalize_state(self, StateC* st) nogil:
|
2016-04-25 22:39:19 +03:00
|
|
|
cdef int i
|
|
|
|
for i in range(st.length):
|
2017-10-20 17:24:48 +03:00
|
|
|
if st._sent[i].head == 0:
|
2016-04-25 22:39:19 +03:00
|
|
|
st._sent[i].dep = self.root_label
|
2015-06-05 03:27:17 +03:00
|
|
|
|
2016-05-02 15:25:10 +03:00
|
|
|
def finalize_doc(self, doc):
|
|
|
|
doc.is_parsed = True
|
|
|
|
|
2016-02-01 05:07:37 +03:00
|
|
|
cdef int set_valid(self, int* output, const StateC* st) nogil:
|
2015-06-05 03:27:17 +03:00
|
|
|
cdef bint[N_MOVES] is_valid
|
2017-05-30 21:37:24 +03:00
|
|
|
is_valid[SHIFT] = Shift.is_valid(st, 0)
|
|
|
|
is_valid[REDUCE] = Reduce.is_valid(st, 0)
|
|
|
|
is_valid[LEFT] = LeftArc.is_valid(st, 0)
|
|
|
|
is_valid[RIGHT] = RightArc.is_valid(st, 0)
|
|
|
|
is_valid[BREAK] = Break.is_valid(st, 0)
|
2015-06-05 03:27:17 +03:00
|
|
|
cdef int i
|
|
|
|
for i in range(self.n_moves):
|
|
|
|
output[i] = is_valid[self.c[i].move]
|
|
|
|
|
2017-03-10 20:22:04 +03:00
|
|
|
cdef int set_costs(self, int* is_valid, weight_t* costs,
|
2015-06-26 07:25:36 +03:00
|
|
|
StateClass stcls, GoldParse gold) except -1:
|
2017-05-30 21:37:24 +03:00
|
|
|
cdef int i, move
|
|
|
|
cdef attr_t label
|
2015-06-07 04:21:29 +03:00
|
|
|
cdef label_cost_func_t[N_MOVES] label_cost_funcs
|
|
|
|
cdef move_cost_func_t[N_MOVES] move_cost_funcs
|
2016-01-30 16:31:12 +03:00
|
|
|
cdef weight_t[N_MOVES] move_costs
|
2015-06-07 04:21:29 +03:00
|
|
|
for i in range(N_MOVES):
|
2017-03-10 20:22:04 +03:00
|
|
|
move_costs[i] = 9000
|
2015-06-07 04:21:29 +03:00
|
|
|
move_cost_funcs[SHIFT] = Shift.move_cost
|
|
|
|
move_cost_funcs[REDUCE] = Reduce.move_cost
|
|
|
|
move_cost_funcs[LEFT] = LeftArc.move_cost
|
|
|
|
move_cost_funcs[RIGHT] = RightArc.move_cost
|
|
|
|
move_cost_funcs[BREAK] = Break.move_cost
|
|
|
|
|
|
|
|
label_cost_funcs[SHIFT] = Shift.label_cost
|
|
|
|
label_cost_funcs[REDUCE] = Reduce.label_cost
|
|
|
|
label_cost_funcs[LEFT] = LeftArc.label_cost
|
|
|
|
label_cost_funcs[RIGHT] = RightArc.label_cost
|
|
|
|
label_cost_funcs[BREAK] = Break.label_cost
|
|
|
|
|
2017-05-28 15:06:40 +03:00
|
|
|
cdef attr_t* labels = gold.c.labels
|
2015-06-05 03:27:17 +03:00
|
|
|
cdef int* heads = gold.c.heads
|
2015-06-10 00:23:28 +03:00
|
|
|
|
2015-06-10 05:20:23 +03:00
|
|
|
n_gold = 0
|
2015-06-10 00:23:28 +03:00
|
|
|
for i in range(self.n_moves):
|
2016-02-01 04:58:14 +03:00
|
|
|
if self.c[i].is_valid(stcls.c, self.c[i].label):
|
2015-06-26 07:25:36 +03:00
|
|
|
is_valid[i] = True
|
2015-06-10 00:23:28 +03:00
|
|
|
move = self.c[i].move
|
|
|
|
label = self.c[i].label
|
2017-03-10 20:22:04 +03:00
|
|
|
if move_costs[move] == 9000:
|
2015-06-10 01:40:43 +03:00
|
|
|
move_costs[move] = move_cost_funcs[move](stcls, &gold.c)
|
2015-06-26 07:25:36 +03:00
|
|
|
costs[i] = move_costs[move] + label_cost_funcs[move](stcls, &gold.c, label)
|
2016-11-25 18:01:52 +03:00
|
|
|
n_gold += costs[i] <= 0
|
2015-06-10 07:56:35 +03:00
|
|
|
else:
|
2015-06-26 07:25:36 +03:00
|
|
|
is_valid[i] = False
|
|
|
|
costs[i] = 9000
|
2017-03-10 20:22:04 +03:00
|
|
|
if n_gold < 1:
|
2018-02-21 18:00:59 +03:00
|
|
|
# Check label set --- leading cause
|
|
|
|
label_set = set([self.strings[self.c[i].label] for i in range(self.n_moves)])
|
|
|
|
for label_str in gold.labels:
|
|
|
|
if label_str is not None and label_str not in label_set:
|
|
|
|
raise ValueError("Cannot get gold parser action: unknown label: %s" % label_str)
|
|
|
|
# Check projectivity --- other leading cause
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 04:58:08 +03:00
|
|
|
if nonproj.is_nonproj_tree(gold.heads):
|
2016-10-24 21:31:30 +03:00
|
|
|
raise ValueError(
|
2017-10-27 20:45:57 +03:00
|
|
|
"Could not find a gold-standard action to supervise the "
|
|
|
|
"dependency parser. Likely cause: the tree is "
|
|
|
|
"non-projective (i.e. it has crossing arcs -- see "
|
|
|
|
"spacy/syntax/nonproj.pyx for definitions). The ArcEager "
|
|
|
|
"transition system only supports projective trees. To "
|
|
|
|
"learn non-projective representations, transform the data "
|
|
|
|
"before training and after parsing. Either pass "
|
|
|
|
"make_projective=True to the GoldParse class, or use "
|
|
|
|
"spacy.syntax.nonproj.preprocess_training_data.")
|
2016-10-24 21:31:30 +03:00
|
|
|
else:
|
2017-08-26 03:50:55 +03:00
|
|
|
print(gold.orig_annot)
|
2016-11-25 18:01:52 +03:00
|
|
|
print(gold.words)
|
|
|
|
print(gold.heads)
|
|
|
|
print(gold.labels)
|
2017-08-26 03:50:55 +03:00
|
|
|
print(gold.sent_starts)
|
2016-10-24 21:31:30 +03:00
|
|
|
raise ValueError(
|
2017-10-27 20:45:57 +03:00
|
|
|
"Could not find a gold-standard action to supervise the"
|
|
|
|
"dependency parser. The GoldParse was projective. The "
|
|
|
|
"transition system has %d actions. State at failure: %s"
|
|
|
|
% (self.n_moves, stcls.print_state(gold.words)))
|
2015-06-10 05:20:23 +03:00
|
|
|
assert n_gold >= 1
|
2017-07-20 16:02:55 +03:00
|
|
|
|
|
|
|
def get_beam_annot(self, Beam beam):
|
2017-11-14 04:11:40 +03:00
|
|
|
length = (<StateC*>beam.at(0)).length
|
2017-07-20 16:02:55 +03:00
|
|
|
heads = [{} for _ in range(length)]
|
|
|
|
deps = [{} for _ in range(length)]
|
|
|
|
probs = beam.probs
|
|
|
|
for i in range(beam.size):
|
2017-11-14 04:11:40 +03:00
|
|
|
state = <StateC*>beam.at(i)
|
|
|
|
self.finalize_state(state)
|
|
|
|
if state.is_final():
|
2017-07-20 16:02:55 +03:00
|
|
|
prob = probs[i]
|
2017-11-14 04:11:40 +03:00
|
|
|
for j in range(state.length):
|
|
|
|
head = j + state._sent[j].head
|
|
|
|
dep = state._sent[j].dep
|
2017-07-20 16:02:55 +03:00
|
|
|
heads[j].setdefault(head, 0.0)
|
|
|
|
heads[j][head] += prob
|
|
|
|
deps[j].setdefault(dep, 0.0)
|
|
|
|
deps[j][dep] += prob
|
|
|
|
return heads, deps
|