def validate_across_args(args) -> None: """Apply validations that need combinations of arguments to work.""" m = vars(args) command = m["command"] if u.is_mac(): job_mode = resolve_job_mode(m) mac_gpu_check(job_mode, command) if command == "cloud" and not m.get("force"): use_gpu = m.get("use_gpu") region = conf.extract_region(vars(args)) gpu_spec = args.gpu_spec tpu_spec = args.tpu_spec _validate_no_gpu_type(use_gpu, gpu_spec) # A TPU is valid with or without an attached GPU. _validate_accelerator_region(tpu_spec, region) if use_gpu: _validate_machine_type(gpu_spec, args.machine_type) _validate_accelerator_region(gpu_spec, region) return args
def test_extract_region(monkeypatch): if os.environ.get('REGION'): monkeypatch.delenv('REGION') assert c.extract_region({}) == c.DEFAULT_REGION # You have to provide a valid region. with pytest.raises(ArgumentTypeError): c.extract_region({"region": "face"}) # Same goes for the environment variable setting approach. monkeypatch.setenv('REGION', "face") with pytest.raises(ArgumentTypeError): c.extract_region({}) # an empty string is fine, and ignored. monkeypatch.setenv('REGION', "") assert c.extract_region({}) == c.DEFAULT_REGION assert c.extract_region({"region": "us-west1"}) == ct.US.west1
def run_app(arg_input): """Main function to run the Caliban app. Accepts a Namespace-type output of an argparse argument parser. """ args = vars(arg_input) script_args = c.extract_script_args(args) command = args["command"] if command == "cluster": return gke.cli.run_cli_command(args) job_mode = cli.resolve_job_mode(args) docker_args = cli.generate_docker_args(job_mode, args) docker_run_args = args.get("docker_run_args", []) if command == "shell": mount_home = not args['bare'] image_id = args.get("image_id") dlvm = args.get("dlvm") shell = args['shell'] docker.run_interactive(job_mode, dlvm=dlvm, image_id=image_id, run_args=docker_run_args, mount_home=mount_home, shell=shell, **docker_args) elif command == "notebook": port = args.get("port") lab = args.get("lab") dlvm = args.get("dlvm") version = args.get("jupyter_version") mount_home = not args['bare'] docker.run_notebook(job_mode, dlvm=dlvm, port=port, lab=lab, version=version, run_args=docker_run_args, mount_home=mount_home, **docker_args) elif command == "build": package = args["module"] docker.build_image(job_mode, package=package, **docker_args) elif command == 'status': caliban.history.cli.get_status(args) elif command == 'stop': caliban.history.cli.stop(args) elif command == 'resubmit': caliban.history.cli.resubmit(args) elif command == "run": dry_run = args["dry_run"] package = args["module"] image_id = args.get("image_id") dlvm = args.get("dlvm") exp_config = args.get("experiment_config") xgroup = args.get('xgroup') docker.run_experiments(job_mode, run_args=docker_run_args, script_args=script_args, image_id=image_id, dlvm=dlvm, experiment_config=exp_config, dry_run=dry_run, package=package, xgroup=xgroup, **docker_args) elif command == "cloud": project_id = c.extract_project_id(args) region = c.extract_region(args) cloud_key = c.extract_cloud_key(args) dry_run = args["dry_run"] package = args["module"] job_name = args.get("name") gpu_spec = args.get("gpu_spec") tpu_spec = args.get("tpu_spec") image_tag = args.get("image_tag") machine_type = args.get("machine_type") dlvm = args.get("dlvm") exp_config = args.get("experiment_config") labels = u.sanitize_labels(args.get("label") or []) xgroup = args.get('xgroup') # Arguments to internally build the image required to submit to Cloud. docker_m = {"job_mode": job_mode, "package": package, **docker_args} cloud.submit_ml_job( job_mode=job_mode, docker_args=docker_m, region=region, project_id=project_id, credentials_path=cloud_key, dry_run=dry_run, job_name=job_name, dlvm=dlvm, machine_type=machine_type, gpu_spec=gpu_spec, tpu_spec=tpu_spec, image_tag=image_tag, labels=labels, script_args=script_args, experiment_config=exp_config, xgroup=xgroup, ) else: logging.info("Unknown command: {}".format(command)) sys.exit(1)