Merge pull request #1801 from sorenlind/avoid_dummy_args

Don't pass CLI command name as dummy argument
This commit is contained in:
Matthew Honnibal 2018-01-10 18:19:42 +01:00 committed by GitHub
commit f246fab0c1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 16 additions and 20 deletions

View File

@ -25,4 +25,4 @@ def blank(name, **kwargs):
def info(model=None, markdown=False):
return cli_info(None, model, markdown)
return cli_info(model, markdown)

View File

@ -28,7 +28,7 @@ if __name__ == '__main__':
command = sys.argv.pop(1)
sys.argv[0] = 'spacy %s' % command
if command in commands:
plac.call(commands[command])
plac.call(commands[command], sys.argv[1:])
else:
prints(
"Available: %s" % ', '.join(commands),

View File

@ -24,8 +24,7 @@ CONVERTERS = {
n_sents=("Number of sentences per doc", "option", "n", int),
converter=("Name of converter (auto, iob, conllu or ner)", "option", "c", str),
morphology=("Enable appending morphology to tags", "flag", "m", bool))
def convert(_cmd, input_file, output_dir, n_sents=1, morphology=False,
converter='auto'):
def convert(input_file, output_dir, n_sents=1, morphology=False, converter='auto'):
"""
Convert files into JSON format for use with train command and other
experiment management functions.

View File

@ -16,7 +16,7 @@ from .. import about
model=("model to download, shortcut or name)", "positional", None, str),
direct=("force direct download. Needs model name with version and won't "
"perform compatibility check", "flag", "d", bool))
def download(_cmd, model, direct=False):
def download(model, direct=False):
"""
Download compatible model from default download path using pip. Model
can be shortcut, model name or, if --direct flag is set, full model name
@ -38,8 +38,7 @@ def download(_cmd, model, direct=False):
# package, which fails if model was just installed via
# subprocess
package_path = get_package_path(model_name)
link(None, model_name, model, force=True,
model_path=package_path)
link(model_name, model, force=True, model_path=package_path)
except:
# Dirty, but since spacy.download and the auto-linking is
# mostly a convenience wrapper, it's best to show a success

View File

@ -25,8 +25,8 @@ numpy.random.seed(0)
displacy_path=("directory to output rendered parses as HTML", "option",
"dp", str),
displacy_limit=("limit of parses to render as HTML", "option", "dl", int))
def evaluate(_cmd, model, data_path, gpu_id=-1, gold_preproc=False,
displacy_path=None, displacy_limit=25):
def evaluate(model, data_path, gpu_id=-1, gold_preproc=False, displacy_path=None,
displacy_limit=25):
"""
Evaluate a model. To render a sample of parses in a HTML file, set an
output directory as the displacy_path argument.

View File

@ -13,7 +13,7 @@ from .. import util
@plac.annotations(
model=("optional: shortcut link of model", "positional", None, str),
markdown=("generate Markdown for GitHub issues", "flag", "md", str))
def info(_cmd, model=None, markdown=False):
def info(model=None, markdown=False):
"""Print info about spaCy installation. If a model shortcut link is
speficied as an argument, print model information. Flag --markdown
prints details in Markdown for easy copy-pasting to GitHub issues.

View File

@ -25,7 +25,7 @@ from ..util import prints, ensure_path, get_lang_class
prune_vectors=("optional: number of vectors to prune to",
"option", "V", int)
)
def init_model(_cmd, lang, output_dir, freqs_loc, clusters_loc=None, vectors_loc=None, prune_vectors=-1):
def init_model(lang, output_dir, freqs_loc, clusters_loc=None, vectors_loc=None, prune_vectors=-1):
"""
Create a new model from raw data, like word frequencies, Brown clusters
and word vectors.

View File

@ -13,7 +13,7 @@ from .. import util
origin=("package name or local path to model", "positional", None, str),
link_name=("name of shortuct link to create", "positional", None, str),
force=("force overwriting of existing link", "flag", "f", bool))
def link(_cmd, origin, link_name, force=False, model_path=None):
def link(origin, link_name, force=False, model_path=None):
"""
Create a symlink for models within the spacy/data directory. Accepts
either the name of a pip package, or the local path to the model data

View File

@ -20,7 +20,7 @@ from .. import about
"the command line prompt", "flag", "c", bool),
force=("force overwriting of existing model directory in output directory",
"flag", "f", bool))
def package(_cmd, input_dir, output_dir, meta_path=None, create_meta=False,
def package(input_dir, output_dir, meta_path=None, create_meta=False,
force=False):
"""
Generate Python package for model data, including meta and required

View File

@ -29,7 +29,7 @@ def read_inputs(loc):
@plac.annotations(
lang=("model/language", "positional", None, str),
inputs=("Location of input file", "positional", None, read_inputs))
def profile(_cmd, lang, inputs=None):
def profile(lang, inputs=None):
"""
Profile a spaCy pipeline, to find out which functions take the most time.
"""

View File

@ -38,7 +38,7 @@ numpy.random.seed(0)
version=("Model version", "option", "V", str),
meta_path=("Optional path to meta.json. All relevant properties will be "
"overwritten.", "option", "m", Path))
def train(_cmd, lang, output_dir, train_data, dev_data, n_iter=30, n_sents=0,
def train(lang, output_dir, train_data, dev_data, n_iter=30, n_sents=0,
use_gpu=-1, vectors=None, no_tagger=False,
no_parser=False, no_entities=False, gold_preproc=False,
version="0.0.0", meta_path=None):

View File

@ -10,7 +10,7 @@ from ..util import prints, get_data_path, read_json
from .. import about
def validate(_cmd):
def validate():
"""Validate that the currently installed version of spaCy is compatible
with the installed models. Should be run after `pip install -U spacy`.
"""

View File

@ -21,8 +21,7 @@ from ..util import prints, ensure_path
prune_vectors=("optional: number of vectors to prune to.",
"option", "V", int)
)
def make_vocab(_cmd, lang, output_dir, lexemes_loc,
vectors_loc=None, prune_vectors=-1):
def make_vocab(lang, output_dir, lexemes_loc, vectors_loc=None, prune_vectors=-1):
"""Compile a vocabulary from a lexicon jsonl file and word vectors."""
if not lexemes_loc.exists():
prints(lexemes_loc, title="Can't find lexical data", exits=1)

View File

@ -9,7 +9,6 @@ from ...cli.train import train
@pytest.mark.xfail
def test_cli_trained_model_can_be_saved(tmpdir):
cmd = None
lang = 'nl'
output_dir = str(tmpdir)
train_file = NamedTemporaryFile('wb', dir=output_dir, delete=False)
@ -86,6 +85,6 @@ def test_cli_trained_model_can_be_saved(tmpdir):
# spacy train -n 1 -g -1 nl output_nl training_corpus.json training \
# corpus.json
train(cmd, lang, output_dir, train_data, dev_data, n_iter=1)
train(lang, output_dir, train_data, dev_data, n_iter=1)
assert True