def test_no_scenario(): with pytest.raises( jsonschema.ValidationError, match=r"'scenario' is a required property", ): load_config( str(pathlib.Path("tests/scenarios/broken/missing_scenario.json")))
def test_all_examples(): test_jsons = ( glob("tests/scenarios/tf1/*.json") + glob("tests/scenarios/tf2/*.json") + glob("tests/scenarios/pytorch/*.json") ) for json_path in test_jsons: load_config(str(json_path))
def test_invalid_module(): with pytest.raises( jsonschema.ValidationError, match= r"Failed validating 'pattern' in schema\[0\]\['properties'\]\['module'\]", ): load_config( str(pathlib.Path("tests/scenarios/broken/invalid_module.json")))
def test_invalid_dataset_framework(): with pytest.raises( jsonschema.ValidationError, match=r"is not one of \['tf', 'pytorch', 'numpy'\]", ): load_config( str( pathlib.Path( "tests/scenarios/broken/invalid_dataset_framework.json")))
def __init__(self, config_path: str, container_config_name="eval-config.json"): self.extra_env_vars = None self.config = load_config(config_path) self.tmp_config = os.path.join(paths.TMP, container_config_name) self.unix_config_path = Path( os.path.join(paths.DOCKER_TMP, container_config_name)).as_posix() kwargs = dict(runtime="runc") if self.config["sysconfig"].get("use_gpu", None): kwargs["runtime"] = "nvidia" if self.config["sysconfig"].get("external_github_repo", None): self._download_external() if self.config["sysconfig"].get("use_armory_private", None): self._download_private() image_name = self.config["sysconfig"].get("docker_image") kwargs["image_name"] = image_name # Download docker image on host docker_client = docker.from_env() try: docker_client.images.get(kwargs["image_name"]) except ImageNotFound: logger.info(f"Image {image_name} was not found. Downloading...") docker_client.images.pull(image_name) self.manager = ManagementInstance(**kwargs)
def test_scenario_configs(): scenario_jsons = glob("scenario_configs/*.json") for json_path in scenario_jsons: config = load_config(str(json_path)) assert (__version__ in config["sysconfig"]["docker_image"] ), "Docker image does not match version in repository"
def test_all_examples(): test_jsons = (glob("tests/scenarios/tf1/*.json") + glob("tests/scenarios/tf2/*.json") + glob("tests/scenarios/pytorch/*.json")) for json_path in test_jsons: config = load_config(str(json_path)) assert (__version__ in config["sysconfig"]["docker_image"] ), "Docker image does not match version in repository"
def _get_config(config_json, from_file=False): if from_file: config = load_config(config_json) else: config_base64_bytes = config_json.encode("utf-8") config_b64_bytes = base64.b64decode(config_base64_bytes) config_string = config_b64_bytes.decode("utf-8") config = json.loads(config_string) return config
def run(command_args, prog, description): parser = argparse.ArgumentParser(prog=prog, description=description) parser.add_argument("filepath", metavar="<json_config>", type=str, help="json config file") _debug(parser) _interactive(parser) _jupyter(parser) _port(parser) _use_gpu(parser) _gpus(parser) _no_docker(parser) parser.add_argument( "--output-dir", type=str, help="Override of default output directory prefix", ) parser.add_argument( "--output-filename", type=str, help="Override of default output filename prefix", ) parser.add_argument( "--check", action="store_true", help="Whether to quickly check to see if scenario code runs", ) args = parser.parse_args(command_args) coloredlogs.install(level=args.log_level) try: config = load_config(args.filepath) except ValidationError as e: logger.error( f"Could not validate config: {e.message} @ {'.'.join(e.absolute_path)}" ) sys.exit(1) except json.decoder.JSONDecodeError: logger.exception(f"Could not decode {args.filepath} as a json file.") if not args.filepath.lower().endswith(".json"): logger.warning(f"{args.filepath} is not a '*.json' file") sys.exit(1) _set_gpus(config, args.use_gpu, args.gpus) _set_outputs(config, args.output_dir, args.output_filename) rig = Evaluator(config, no_docker=args.no_docker) rig.run( interactive=args.interactive, jupyter=args.jupyter, host_port=args.port, check_run=args.check, )
def run_config(config_json, from_file=False, check=False, mongo_host=None): if from_file: config = load_config(config_json) else: config_base64_bytes = config_json.encode("utf-8") config_b64_bytes = base64.b64decode(config_base64_bytes) config_string = config_b64_bytes.decode("utf-8") config = json.loads(config_string) scenario_config = config.get("scenario") if scenario_config is None: raise KeyError('"scenario" missing from evaluation config') _scenario_setup(config) scenario = config_loading.load(scenario_config) scenario.set_check_run(check) scenario.evaluate(config, mongo_host)
def test_no_config(): with pytest.raises(FileNotFoundError): load_config("not_a_file.json")
def test_scenario_configs(): scenario_jsons = glob("scenario_configs/*.json") for json_path in scenario_jsons: load_config(str(json_path))
def test_no_config(self): with self.assertRaises(FileNotFoundError): load_config("not_a_file.txt")
def test_all_examples(): example_dir = pathlib.Path("examples/") for json_path in example_dir.glob("*.json"): load_config(str(json_path))
def __init__(self, config_path: Union[str, dict], container_config_name="eval-config.json"): self.host_paths = paths.host() self.docker_paths = paths.docker() if os.name != "nt": self.user_id, self.group_id = os.getuid(), os.getgid() else: self.user_id, self.group_id = 0, 0 self.extra_env_vars = dict() if isinstance(config_path, str): try: self.config = load_config(config_path) except json.decoder.JSONDecodeError: logger.error(f"Could not decode {config_path} as a json file.") if not config_path.lower().endswith(".json"): logger.warning(f"{config_path} is not a '*.json' file") logger.warning( "If using `armory run`, use a json config file.") raise elif isinstance(config_path, dict): self.config = config_path else: raise ValueError( f"config_path {config_path} must be a str or dict") ( self.container_subdir, self.tmp_dir, self.output_dir, ) = volumes_util.tmp_output_subdir() self.tmp_config = os.path.join(self.tmp_dir, container_config_name) self.external_repo_dir = paths.get_external(self.tmp_dir) self.docker_config_path = Path( os.path.join(self.docker_paths.tmp_dir, container_config_name)).as_posix() kwargs = dict(runtime="runc") if self.config["sysconfig"].get("use_gpu", None): kwargs["runtime"] = "nvidia" gpus = self.config["sysconfig"].get("gpus") if gpus is not None: self.extra_env_vars["NVIDIA_VISIBLE_DEVICES"] = gpus if self.config["sysconfig"].get("external_github_repo", None): self._download_external() self.extra_env_vars.update( {"PYTHONPATH": self.docker_paths.external_repo_dir}) if self.config["sysconfig"].get("use_armory_private", None): self._download_private() image_name = self.config["sysconfig"].get("docker_image") kwargs["image_name"] = image_name # Download docker image on host docker_client = docker.from_env() try: docker_client.images.get(kwargs["image_name"]) except ImageNotFound: logger.info(f"Image {image_name} was not found. Downloading...") docker_api.pull_verbose(docker_client, image_name) except requests.exceptions.ConnectionError: logger.error( f"Docker connection refused. Is Docker Daemon running?") raise self.manager = ManagementInstance(**kwargs)
def run(command_args, prog, description): parser = argparse.ArgumentParser(prog=prog, description=description) parser.add_argument( "filepath", metavar="<json_config>", type=str, help="json config file. Use '-' to accept standard input or pipe.", ) _debug(parser) _interactive(parser) _jupyter(parser) _port(parser) _use_gpu(parser) _no_gpu(parser) _gpus(parser) _no_docker(parser) _root(parser) parser.add_argument( "--output-dir", type=str, help="Override of default output directory prefix", ) parser.add_argument( "--output-filename", type=str, help="Override of default output filename prefix", ) parser.add_argument( "--check", action="store_true", help="Whether to quickly check to see if scenario code runs", ) parser.add_argument( "--num-eval-batches", type=int, help= "Number of batches to use for evaluation of benign and adversarial examples", ) parser.add_argument( "--skip-benign", action="store_true", help="Skip benign inference and metric calculations", ) parser.add_argument( "--skip-attack", action="store_true", help="Skip attack generation and metric calculations", ) parser.add_argument( "--validate-config", action="store_true", help="Validate model configuration against several checks", ) args = parser.parse_args(command_args) coloredlogs.install(level=args.log_level) try: if args.filepath == "-": if sys.stdin.isatty(): logging.error( "Cannot read config from raw 'stdin'; must pipe or redirect a file" ) sys.exit(1) logger.info("Reading config from stdin...") config = load_config_stdin() else: config = load_config(args.filepath) except ValidationError as e: logger.error( f"Could not validate config: {e.message} @ {'.'.join(e.absolute_path)}" ) sys.exit(1) except json.decoder.JSONDecodeError: if args.filepath == "-": logger.error("'stdin' did not provide a json-parsable input") else: logger.error(f"Could not decode '{args.filepath}' as a json file.") if not args.filepath.lower().endswith(".json"): logger.warning(f"{args.filepath} is not a '*.json' file") sys.exit(1) _set_gpus(config, args.use_gpu, args.no_gpu, args.gpus) _set_outputs(config, args.output_dir, args.output_filename) rig = Evaluator(config, no_docker=args.no_docker, root=args.root) exit_code = rig.run( interactive=args.interactive, jupyter=args.jupyter, host_port=args.port, check_run=args.check, num_eval_batches=args.num_eval_batches, skip_benign=args.skip_benign, skip_attack=args.skip_attack, validate_config=args.validate_config, ) sys.exit(exit_code)
def test_no_evaluation(self): with self.assertRaisesRegex(ValueError, "Evaluation field must contain"): load_config("tests/test_data/missing_eval.json")