mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-11 04:08:09 +03:00
37c7c85a86
* Support nowrap setting in util.prints * Tidy up and fix whitespace * Simplify script and use read_jsonl helper * Add JSON schemas (see #2928) * Deprecate Doc.print_tree Will be replaced with Doc.to_json, which will produce a unified format * Add Doc.to_json() method (see #2928) Converts Doc objects to JSON using the same unified format as the training data. Method also supports serializing selected custom attributes in the doc._. space. * Remove outdated test * Add write_json and write_jsonl helpers * WIP: Update spacy train * Tidy up spacy train * WIP: Use wasabi for formatting * Add GoldParse helpers for JSON format * WIP: add debug-data command * Fix typo * Add missing import * Update wasabi pin * Add missing import * 💫 Refactor CLI (#2943) To be merged into #2932. ## Description - [x] refactor CLI To use [`wasabi`](https://github.com/ines/wasabi) - [x] use [`black`](https://github.com/ambv/black) for auto-formatting - [x] add `flake8` config - [x] move all messy UD-related scripts to `cli.ud` - [x] make converters function that take the opened file and return the converted data (instead of having them handle the IO) ### Types of change enhancement ## Checklist <!--- Before you submit the PR, go over this checklist and make sure you can tick off all the boxes. [] -> [x] --> - [x] I have submitted the spaCy Contributor Agreement. - [x] I ran the tests, and all new and existing tests passed. - [x] My changes don't require a change to the documentation, or if they do, I've added all required information. * Update wasabi pin * Delete old test * Update errors * Fix typo * Tidy up and format remaining code * Fix formatting * Improve formatting of messages * Auto-format remaining code * Add tok2vec stuff to spacy.train * Fix typo * Update wasabi pin * Fix path checks for when train() is called as function * Reformat and tidy up pretrain script * Update argument annotations * Raise error if model language doesn't match lang * Document new train command
70 lines
2.3 KiB
Python
70 lines
2.3 KiB
Python
# coding: utf8
|
|
from __future__ import unicode_literals, division, print_function
|
|
|
|
import plac
|
|
from pathlib import Path
|
|
import ujson
|
|
import cProfile
|
|
import pstats
|
|
import sys
|
|
import tqdm
|
|
import cytoolz
|
|
import thinc.extra.datasets
|
|
from wasabi import Printer
|
|
|
|
from ..util import load_model
|
|
|
|
|
|
@plac.annotations(
|
|
model=("Model to load", "positional", None, str),
|
|
inputs=("Location of input file. '-' for stdin.", "positional", None, str),
|
|
n_texts=("Maximum number of texts to use if available", "option", "n", int),
|
|
)
|
|
def profile(model, inputs=None, n_texts=10000):
|
|
"""
|
|
Profile a spaCy pipeline, to find out which functions take the most time.
|
|
Input should be formatted as one JSON object per line with a key "text".
|
|
It can either be provided as a JSONL file, or be read from sys.sytdin.
|
|
If no input file is specified, the IMDB dataset is loaded via Thinc.
|
|
"""
|
|
msg = Printer()
|
|
if inputs is not None:
|
|
inputs = _read_inputs(inputs, msg)
|
|
if inputs is None:
|
|
n_inputs = 25000
|
|
with msg.loading("Loading IMDB dataset via Thinc..."):
|
|
imdb_train, _ = thinc.extra.datasets.imdb()
|
|
inputs, _ = zip(*imdb_train)
|
|
msg.info("Loaded IMDB dataset and using {} examples".format(n_inputs))
|
|
inputs = inputs[:n_inputs]
|
|
with msg.loading("Loading model '{}'...".format(model)):
|
|
nlp = load_model(model)
|
|
msg.good("Loaded model '{}'".format(model))
|
|
texts = list(cytoolz.take(n_texts, inputs))
|
|
cProfile.runctx("parse_texts(nlp, texts)", globals(), locals(), "Profile.prof")
|
|
s = pstats.Stats("Profile.prof")
|
|
msg.divider("Profile stats")
|
|
s.strip_dirs().sort_stats("time").print_stats()
|
|
|
|
|
|
def parse_texts(nlp, texts):
|
|
for doc in nlp.pipe(tqdm.tqdm(texts), batch_size=16):
|
|
pass
|
|
|
|
|
|
def _read_inputs(loc, msg):
|
|
if loc == "-":
|
|
msg.info("Reading input from sys.stdin")
|
|
file_ = sys.stdin
|
|
file_ = (line.encode("utf8") for line in file_)
|
|
else:
|
|
input_path = Path(loc)
|
|
if not input_path.exists() or not input_path.is_file():
|
|
msg.fail("Not a valid input data file", loc, exits=1)
|
|
msg.info("Using data from {}".format(input_path.parts[-1]))
|
|
file_ = input_path.open()
|
|
for line in file_:
|
|
data = ujson.loads(line)
|
|
text = data["text"]
|
|
yield text
|