Add fallback in requirements check, only check once (#11735)

* Add fallback in requirements check, only check once

* Rename to skip_requirements_check

* Update spacy/cli/project/run.py

Co-authored-by: Paul O'Leary McCann <polm@dampfkraft.com>

Co-authored-by: Paul O'Leary McCann <polm@dampfkraft.com>
This commit is contained in:
Adriane Boyd 2022-11-07 14:46:08 +01:00 committed by GitHub
parent 6105f20d8a
commit e116395f89
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -53,6 +53,7 @@ def project_run(
force: bool = False, force: bool = False,
dry: bool = False, dry: bool = False,
capture: bool = False, capture: bool = False,
skip_requirements_check: bool = False,
) -> None: ) -> None:
"""Run a named script defined in the project.yml. If the script is part """Run a named script defined in the project.yml. If the script is part
of the default pipeline (defined in the "run" section), DVC is used to of the default pipeline (defined in the "run" section), DVC is used to
@ -69,6 +70,7 @@ def project_run(
sys.exit will be called with the return code. You should use capture=False sys.exit will be called with the return code. You should use capture=False
when you want to turn over execution to the command, and capture=True when you want to turn over execution to the command, and capture=True
when you want to run the command more like a function. when you want to run the command more like a function.
skip_requirements_check (bool): Whether to skip the requirements check.
""" """
config = load_project_config(project_dir, overrides=overrides) config = load_project_config(project_dir, overrides=overrides)
commands = {cmd["name"]: cmd for cmd in config.get("commands", [])} commands = {cmd["name"]: cmd for cmd in config.get("commands", [])}
@ -76,9 +78,10 @@ def project_run(
validate_subcommand(list(commands.keys()), list(workflows.keys()), subcommand) validate_subcommand(list(commands.keys()), list(workflows.keys()), subcommand)
req_path = project_dir / "requirements.txt" req_path = project_dir / "requirements.txt"
if config.get("check_requirements", True) and os.path.exists(req_path): if not skip_requirements_check:
with req_path.open() as requirements_file: if config.get("check_requirements", True) and os.path.exists(req_path):
_check_requirements([req.replace("\n", "") for req in requirements_file]) with req_path.open() as requirements_file:
_check_requirements([req.strip() for req in requirements_file])
if subcommand in workflows: if subcommand in workflows:
msg.info(f"Running workflow '{subcommand}'") msg.info(f"Running workflow '{subcommand}'")
@ -90,6 +93,7 @@ def project_run(
force=force, force=force,
dry=dry, dry=dry,
capture=capture, capture=capture,
skip_requirements_check=True,
) )
else: else:
cmd = commands[subcommand] cmd = commands[subcommand]
@ -338,6 +342,11 @@ def _check_requirements(requirements: List[str]) -> Tuple[bool, bool]:
failed_pkgs_msgs.append(dnf.report()) failed_pkgs_msgs.append(dnf.report())
except pkg_resources.VersionConflict as vc: except pkg_resources.VersionConflict as vc:
conflicting_pkgs_msgs.append(vc.report()) conflicting_pkgs_msgs.append(vc.report())
except Exception:
msg.warn(f"Unable to check requirement: {req} "
"Check that the requirement is formatted according to PEP "
"440, in particular that URLs are formatted as "
"'package_name @ URL'")
if len(failed_pkgs_msgs) or len(conflicting_pkgs_msgs): if len(failed_pkgs_msgs) or len(conflicting_pkgs_msgs):
msg.warn( msg.warn(