mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-11-04 01:48:04 +03:00 
			
		
		
		
	* Add doc.cats to spacy.gold at the paragraph level
Support `doc.cats` as `"cats": [{"label": string, "value": number}]` in
the spacy JSON training format at the paragraph level.
* `spacy.gold.docs_to_json()` writes `docs.cats`
* `GoldCorpus` reads in cats in each `GoldParse`
* Update instances of gold_tuples to handle cats
Update iteration over gold_tuples / gold_parses to handle addition of
cats at the paragraph level.
* Add textcat to train CLI
* Add textcat options to train CLI
* Add textcat labels in `TextCategorizer.begin_training()`
* Add textcat evaluation to `Scorer`:
  * For binary exclusive classes with provided label: F1 for label
  * For 2+ exclusive classes: F1 macro average
  * For multilabel (not exclusive): ROC AUC macro average (currently
relying on sklearn)
* Provide user info on textcat evaluation settings, potential
incompatibilities
* Provide pipeline to Scorer in `Language.evaluate` for textcat config
* Customize train CLI output to include only metrics relevant to current
pipeline
* Add textcat evaluation to evaluate CLI
* Fix handling of unset arguments and config params
Fix handling of unset arguments and model confiug parameters in Scorer
initialization.
* Temporarily add sklearn requirement
* Remove sklearn version number
* Improve Scorer handling of models without textcats
* Fixing Scorer handling of models without textcats
* Update Scorer output for python 2.7
* Modify inf in Scorer for python 2.7
* Auto-format
Also make small adjustments to make auto-formatting with black easier and produce nicer results
* Move error message to Errors
* Update documentation
* Add cats to annotation JSON format [ci skip]
* Fix tpl flag and docs [ci skip]
* Switch to internal roc_auc_score
Switch to internal `roc_auc_score()` adapted from scikit-learn.
* Add AUCROCScore tests and improve errors/warnings
* Add tests for AUCROCScore and roc_auc_score
* Add missing error for only positive/negative values
* Remove unnecessary warnings and errors
* Make reduced roc_auc_score functions private
Because most of the checks and warnings have been stripped for the
internal functions and access is only intended through `ROCAUCScore`,
make the functions for roc_auc_score adapted from scikit-learn private.
* Check that data corresponds with multilabel flag
Check that the training instances correspond with the multilabel flag,
adding the multilabel flag if required.
* Add textcat score to early stopping check
* Add more checks to debug-data for textcat
* Add example training data for textcat
* Add more checks to textcat train CLI
* Check configuration when extending base model
* Fix typos
* Update textcat example data
* Provide licensing details and licenses for data
* Remove two labels with no positive instances from jigsaw-toxic-comment
data.
Co-authored-by: Ines Montani <ines@ines.io>
		
	
			
		
			
				
	
	
		
			54 lines
		
	
	
		
			1.5 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			54 lines
		
	
	
		
			1.5 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
from pathlib import Path
 | 
						|
import plac
 | 
						|
import spacy
 | 
						|
from spacy.gold import docs_to_json
 | 
						|
import srsly
 | 
						|
import sys
 | 
						|
 | 
						|
@plac.annotations(
 | 
						|
    model=("Model name. Defaults to 'en'.", "option", "m", str),
 | 
						|
    input_file=("Input file (jsonl)", "positional", None, Path),
 | 
						|
    output_dir=("Output directory", "positional", None, Path),
 | 
						|
    n_texts=("Number of texts to convert", "option", "t", int),
 | 
						|
)
 | 
						|
def convert(model='en', input_file=None, output_dir=None, n_texts=0):
 | 
						|
    # Load model with tokenizer + sentencizer only
 | 
						|
    nlp = spacy.load(model)
 | 
						|
    nlp.disable_pipes(*nlp.pipe_names)
 | 
						|
    sentencizer = nlp.create_pipe("sentencizer")
 | 
						|
    nlp.add_pipe(sentencizer, first=True)
 | 
						|
 | 
						|
    texts = []
 | 
						|
    cats = []
 | 
						|
    count = 0
 | 
						|
 | 
						|
    if not input_file.exists():
 | 
						|
        print("Input file not found:", input_file)
 | 
						|
        sys.exit(1)
 | 
						|
    else:
 | 
						|
        with open(input_file) as fileh:
 | 
						|
            for line in fileh:
 | 
						|
                data = srsly.json_loads(line)
 | 
						|
                texts.append(data["text"])
 | 
						|
                cats.append(data["cats"])
 | 
						|
 | 
						|
    if output_dir is not None:
 | 
						|
        output_dir = Path(output_dir)
 | 
						|
        if not output_dir.exists():
 | 
						|
            output_dir.mkdir()
 | 
						|
    else:
 | 
						|
        output_dir = Path(".")
 | 
						|
 | 
						|
    docs = []
 | 
						|
    for i, doc in enumerate(nlp.pipe(texts)):
 | 
						|
        doc.cats = cats[i]
 | 
						|
        docs.append(doc)
 | 
						|
        if n_texts > 0 and count == n_texts:
 | 
						|
            break
 | 
						|
        count += 1
 | 
						|
 | 
						|
    srsly.write_json(output_dir / input_file.with_suffix(".json"), [docs_to_json(docs)])
 | 
						|
 | 
						|
if __name__ == "__main__":
 | 
						|
    plac.call(convert)
 |