def __init__(self, config_path: str, container_config_name="eval-config.json"): self.extra_env_vars = None self.config = load_config(config_path) self.tmp_config = os.path.join(paths.TMP, container_config_name) self.unix_config_path = Path( os.path.join(paths.DOCKER_TMP, container_config_name)).as_posix() kwargs = dict(runtime="runc") if self.config["sysconfig"].get("use_gpu", None): kwargs["runtime"] = "nvidia" if self.config["sysconfig"].get("external_github_repo", None): self._download_external() if self.config["sysconfig"].get("use_armory_private", None): self._download_private() image_name = self.config["sysconfig"].get("docker_image") kwargs["image_name"] = image_name # Download docker image on host docker_client = docker.from_env() try: docker_client.images.get(kwargs["image_name"]) except ImageNotFound: logger.info(f"Image {image_name} was not found. Downloading...") docker_client.images.pull(image_name) self.manager = ManagementInstance(**kwargs)
def download_all_data(command_args, prog, description): """ Script to download all datasets and model weights for offline usage. """ parser = argparse.ArgumentParser(prog=prog, description=description) parser.add_argument( "-d", "--debug", dest="log_level", action="store_const", const=logging.DEBUG, default=logging.INFO, help="Debug output (logging=DEBUG)", ) args = parser.parse_args(command_args) coloredlogs.install(level=args.log_level) print("Downloading all docker images....") docker_client = docker.from_env() docker_images = [ "twosixarmory/tf1:0.3.3", "twosixarmory/tf2:0.3.3", "twosixarmory/pytorch:0.3.3", ] for image in docker_images: try: docker_client.images.get(image) except ImageNotFound: print(f"Image {image} was not found. Downloading...") docker_client.images.pull(image) print("Downloading all datasets and model weights...") manager = ManagementInstance(image_name="twosixarmory/tf1:0.3.3") runner = manager.start_armory_instance() cmd = "; ".join([ "import logging", "import coloredlogs", "coloredlogs.install(logging.INFO)", "from armory.data import datasets", "from armory.data import model_weights", "datasets.download_all()", "model_weights.download_all()", ]) runner.exec_cmd(f"python -c '{cmd}'") manager.stop_armory_instance(runner)
def __init__( self, config: dict, no_docker: bool = False, root: bool = False, ): if not isinstance(config, dict): raise ValueError(f"config {config} must be a dict") self.config = config self.host_paths = paths.HostPaths() if os.path.exists(self.host_paths.armory_config): self.armory_global_config = load_global_config( self.host_paths.armory_config) else: self.armory_global_config = {"verify_ssl": True} date_time = datetime.datetime.utcnow().isoformat().replace(":", "") output_dir = self.config["sysconfig"].get("output_dir", None) eval_id = f"{output_dir}_{date_time}" if output_dir else date_time self.config["eval_id"] = eval_id self.output_dir = os.path.join(self.host_paths.output_dir, eval_id) self.tmp_dir = os.path.join(self.host_paths.tmp_dir, eval_id) if self.config["sysconfig"].get("use_gpu", None): kwargs = dict(runtime="nvidia") else: kwargs = dict(runtime="runc") image_name = self.config["sysconfig"].get("docker_image") kwargs["image_name"] = image_name self.no_docker = not image_name or no_docker self.root = root # Retrieve environment variables that should be used in evaluation self.extra_env_vars = dict() self._gather_env_variables() if self.no_docker: if self.root: raise ValueError( "running with --root is incompatible with --no-docker") self.manager = HostManagementInstance() return # Download docker image on host docker_client = docker.from_env() try: docker_client.images.get(kwargs["image_name"]) except ImageNotFound: logger.info(f"Image {image_name} was not found. Downloading...") if "twosixarmory" in image_name and "-dev" in image_name: raise ValueError(( "You are attempting to pull an armory developer " "docker image; however, these are not published. This " "is likely because you're running armory from its " "master branch. If you want a stable release with " "published docker images try pip installing 'armory-testbed' " "or checking out one of the stable branches on the git repository. " "If you'd like to continue working on the developer image please " "build it from source on your machine as described here: " "https://armory.readthedocs.io/en/latest/contributing/#development-docker-containers" )) docker_api.pull_verbose(docker_client, image_name) except requests.exceptions.ConnectionError: logger.error( "Docker connection refused. Is Docker Daemon running?") raise self.manager = ManagementInstance(**kwargs)
class Evaluator(object): def __init__( self, config: dict, no_docker: bool = False, root: bool = False, ): if not isinstance(config, dict): raise ValueError(f"config {config} must be a dict") self.config = config self.host_paths = paths.HostPaths() if os.path.exists(self.host_paths.armory_config): self.armory_global_config = load_global_config( self.host_paths.armory_config) else: self.armory_global_config = {"verify_ssl": True} date_time = datetime.datetime.utcnow().isoformat().replace(":", "") output_dir = self.config["sysconfig"].get("output_dir", None) eval_id = f"{output_dir}_{date_time}" if output_dir else date_time self.config["eval_id"] = eval_id self.output_dir = os.path.join(self.host_paths.output_dir, eval_id) self.tmp_dir = os.path.join(self.host_paths.tmp_dir, eval_id) if self.config["sysconfig"].get("use_gpu", None): kwargs = dict(runtime="nvidia") else: kwargs = dict(runtime="runc") image_name = self.config["sysconfig"].get("docker_image") kwargs["image_name"] = image_name self.no_docker = not image_name or no_docker self.root = root # Retrieve environment variables that should be used in evaluation self.extra_env_vars = dict() self._gather_env_variables() if self.no_docker: if self.root: raise ValueError( "running with --root is incompatible with --no-docker") self.manager = HostManagementInstance() return # Download docker image on host docker_client = docker.from_env() try: docker_client.images.get(kwargs["image_name"]) except ImageNotFound: logger.info(f"Image {image_name} was not found. Downloading...") if "twosixarmory" in image_name and "-dev" in image_name: raise ValueError(( "You are attempting to pull an armory developer " "docker image; however, these are not published. This " "is likely because you're running armory from its " "master branch. If you want a stable release with " "published docker images try pip installing 'armory-testbed' " "or checking out one of the stable branches on the git repository. " "If you'd like to continue working on the developer image please " "build it from source on your machine as described here: " "https://armory.readthedocs.io/en/latest/contributing/#development-docker-containers" )) docker_api.pull_verbose(docker_client, image_name) except requests.exceptions.ConnectionError: logger.error( "Docker connection refused. Is Docker Daemon running?") raise self.manager = ManagementInstance(**kwargs) def _gather_env_variables(self): """ Update the extra env variable dictionary to pass into container or run on host """ self.extra_env_vars["ARMORY_GITHUB_TOKEN"] = os.getenv( "ARMORY_GITHUB_TOKEN", default="") self.extra_env_vars["ARMORY_PRIVATE_S3_ID"] = os.getenv( "ARMORY_PRIVATE_S3_ID", default="") self.extra_env_vars["ARMORY_PRIVATE_S3_KEY"] = os.getenv( "ARMORY_PRIVATE_S3_KEY", default="") self.extra_env_vars["ARMORY_INCLUDE_SUBMISSION_BUCKETS"] = os.getenv( "ARMORY_INCLUDE_SUBMISSION_BUCKETS", default="") if not self.armory_global_config["verify_ssl"]: self.extra_env_vars["VERIFY_SSL"] = "false" if self.config["sysconfig"].get("use_gpu", None): gpus = self.config["sysconfig"].get("gpus") if gpus is not None: self.extra_env_vars["NVIDIA_VISIBLE_DEVICES"] = gpus if self.config["sysconfig"].get("set_pythonhashseed"): self.extra_env_vars["PYTHONHASHSEED"] = "0" # Because we may want to allow specification of ARMORY_TORCH_HOME # this constant path is placed here among the other imports self.extra_env_vars["TORCH_HOME"] = paths.runtime_paths().pytorch_dir self.extra_env_vars[environment.ARMORY_VERSION] = armory.__version__ def _cleanup(self): logger.info(f"Deleting tmp_dir {self.tmp_dir}") try: shutil.rmtree(self.tmp_dir) except OSError as e: if not isinstance(e, FileNotFoundError): logger.exception(f"Error removing tmp_dir {self.tmp_dir}") logger.info(f"Removing output_dir {self.output_dir} if empty") try: os.rmdir(self.output_dir) except OSError: pass def run( self, interactive=False, jupyter=False, host_port=None, command=None, check_run=False, num_eval_batches=None, skip_benign=None, ) -> int: exit_code = 0 if self.no_docker: if jupyter or interactive or command: raise ValueError( "jupyter, interactive, or bash commands only supported when running Docker containers." ) runner = self.manager.start_armory_instance( envs=self.extra_env_vars, ) try: exit_code = self._run_config( runner, check_run=check_run, num_eval_batches=num_eval_batches, skip_benign=skip_benign, ) except KeyboardInterrupt: logger.warning("Keyboard interrupt caught") finally: logger.warning("Cleaning up...") self._cleanup() return exit_code if check_run and (jupyter or interactive or command): raise ValueError( "check_run incompatible with interactive, jupyter, or command") # Handle docker and jupyter ports if jupyter or host_port: if host_port: ports = {host_port: host_port} else: ports = {8888: 8888} else: ports = None try: runner = self.manager.start_armory_instance( envs=self.extra_env_vars, ports=ports, user=self.get_id(), ) try: if jupyter: self._run_jupyter(runner, ports) elif interactive: self._run_interactive_bash( runner, check_run=check_run, num_eval_batches=num_eval_batches, skip_benign=skip_benign, ) elif command: exit_code = self._run_command(runner, command) else: exit_code = self._run_config( runner, check_run=check_run, num_eval_batches=num_eval_batches, skip_benign=skip_benign, ) except KeyboardInterrupt: logger.warning("Keyboard interrupt caught") finally: logger.warning("Shutting down container") self.manager.stop_armory_instance(runner) except requests.exceptions.RequestException as e: logger.exception("Starting instance failed.") if str(e).endswith( f'Bind for 0.0.0.0:{host_port} failed: port is already allocated")' ): logger.error( f"Port {host_port} already in use. Try a different one with '--port <port>'" ) elif (str( e ) == '400 Client Error: Bad Request ("Unknown runtime specified nvidia")' ): logger.error( 'NVIDIA runtime failed. Either install nvidia-docker or set config "use_gpu" to false' ) else: logger.error("Is Docker Daemon running?") self._cleanup() return exit_code def _b64_encode_config(self): bytes_config = json.dumps(self.config).encode("utf-8") base64_bytes = base64.b64encode(bytes_config) return base64_bytes.decode("utf-8") def _run_config( self, runner: ArmoryInstance, check_run=False, num_eval_batches=None, skip_benign=None, ) -> int: logger.info(bold(red("Running evaluation script"))) b64_config = self._b64_encode_config() options = self._build_options( check_run=check_run, num_eval_batches=num_eval_batches, skip_benign=skip_benign, ) if self.no_docker: kwargs = {} python = sys.executable else: kwargs = {"user": self.get_id()} python = "python" cmd = f"{python} -m armory.scenarios.base {b64_config}{options}" return runner.exec_cmd(cmd, **kwargs) def _run_command(self, runner: ArmoryInstance, command: str) -> int: logger.info(bold(red(f"Running bash command: {command}"))) return runner.exec_cmd(command, user=self.get_id(), expect_sentinel=False) def get_id(self): """ Return uid, gid """ # Windows docker does not require synchronizing file and # directoriy permissions via uid and gid. if os.name == "nt" or self.root: user_id = 0 group_id = 0 else: user_id = os.getuid() group_id = os.getgid() return f"{user_id}:{group_id}" def _run_interactive_bash( self, runner: ArmoryInstance, check_run=False, num_eval_batches=None, skip_benign=None, ) -> None: user_group_id = self.get_id() lines = [ "Container ready for interactive use.", bold( "*** In a new terminal, run the following to attach to the container:" ), bold( red(f" docker exec -it -u {user_group_id} {runner.docker_container.short_id} bash" )), ] if self.config.get("scenario"): options = self._build_options( check_run=check_run, num_eval_batches=num_eval_batches, skip_benign=skip_benign, ) tmp_dir = os.path.join(self.host_paths.tmp_dir, self.config["eval_id"]) os.makedirs(tmp_dir) self.tmp_config = os.path.join(tmp_dir, "interactive-config.json") docker_config_path = os.path.join( paths.runtime_paths().tmp_dir, self.config["eval_id"], "interactive-config.json", ) with open(self.tmp_config, "w") as f: f.write( json.dumps(self.config, sort_keys=True, indent=4) + "\n") lines.extend([ bold("*** To run your scenario in the container:"), bold( red(f" python -m armory.scenarios.base {docker_config_path}{options} --load-config-from-file" )), bold("*** To gracefully shut down container, press: Ctrl-C"), "", ]) logger.info("\n".join(lines)) while True: time.sleep(1) def _run_jupyter(self, runner: ArmoryInstance, ports: dict) -> None: if not self.root: logger.warning("Running Jupyter Lab as root inside the container.") user_group_id = self.get_id() port = list(ports.keys())[0] lines = [ "About to launch jupyter.", bold( "*** To connect on the command line as well, in a new terminal, run:" ), bold( red(f" docker exec -it -u {user_group_id} {runner.docker_container.short_id} bash" )), bold("*** To gracefully shut down container, press: Ctrl-C"), "", "Jupyter notebook log:", ] logger.info("\n".join(lines)) runner.exec_cmd( f"jupyter lab --ip=0.0.0.0 --port {port} --no-browser --allow-root", user="******", expect_sentinel=False, ) def _build_options(self, check_run, num_eval_batches, skip_benign): options = "" if self.no_docker: options += " --no-docker" if check_run: options += " --check" if logger.getEffectiveLevel() == logging.DEBUG: options += " --debug" if num_eval_batches: options += f" --num-eval-batches {num_eval_batches}" if skip_benign: options += " --skip-benign" return options
def __init__(self, config_path: Union[str, dict], container_config_name="eval-config.json"): self.host_paths = paths.host() self.docker_paths = paths.docker() if os.name != "nt": self.user_id, self.group_id = os.getuid(), os.getgid() else: self.user_id, self.group_id = 0, 0 self.extra_env_vars = dict() if isinstance(config_path, str): try: self.config = load_config(config_path) except json.decoder.JSONDecodeError: logger.error(f"Could not decode {config_path} as a json file.") if not config_path.lower().endswith(".json"): logger.warning(f"{config_path} is not a '*.json' file") logger.warning( "If using `armory run`, use a json config file.") raise elif isinstance(config_path, dict): self.config = config_path else: raise ValueError( f"config_path {config_path} must be a str or dict") ( self.container_subdir, self.tmp_dir, self.output_dir, ) = volumes_util.tmp_output_subdir() self.tmp_config = os.path.join(self.tmp_dir, container_config_name) self.external_repo_dir = paths.get_external(self.tmp_dir) self.docker_config_path = Path( os.path.join(self.docker_paths.tmp_dir, container_config_name)).as_posix() kwargs = dict(runtime="runc") if self.config["sysconfig"].get("use_gpu", None): kwargs["runtime"] = "nvidia" gpus = self.config["sysconfig"].get("gpus") if gpus is not None: self.extra_env_vars["NVIDIA_VISIBLE_DEVICES"] = gpus if self.config["sysconfig"].get("external_github_repo", None): self._download_external() self.extra_env_vars.update( {"PYTHONPATH": self.docker_paths.external_repo_dir}) if self.config["sysconfig"].get("use_armory_private", None): self._download_private() image_name = self.config["sysconfig"].get("docker_image") kwargs["image_name"] = image_name # Download docker image on host docker_client = docker.from_env() try: docker_client.images.get(kwargs["image_name"]) except ImageNotFound: logger.info(f"Image {image_name} was not found. Downloading...") docker_api.pull_verbose(docker_client, image_name) except requests.exceptions.ConnectionError: logger.error( f"Docker connection refused. Is Docker Daemon running?") raise self.manager = ManagementInstance(**kwargs)
class Evaluator(object): def __init__(self, config_path: Union[str, dict], container_config_name="eval-config.json"): self.host_paths = paths.host() self.docker_paths = paths.docker() if os.name != "nt": self.user_id, self.group_id = os.getuid(), os.getgid() else: self.user_id, self.group_id = 0, 0 self.extra_env_vars = dict() if isinstance(config_path, str): try: self.config = load_config(config_path) except json.decoder.JSONDecodeError: logger.error(f"Could not decode {config_path} as a json file.") if not config_path.lower().endswith(".json"): logger.warning(f"{config_path} is not a '*.json' file") logger.warning( "If using `armory run`, use a json config file.") raise elif isinstance(config_path, dict): self.config = config_path else: raise ValueError( f"config_path {config_path} must be a str or dict") ( self.container_subdir, self.tmp_dir, self.output_dir, ) = volumes_util.tmp_output_subdir() self.tmp_config = os.path.join(self.tmp_dir, container_config_name) self.external_repo_dir = paths.get_external(self.tmp_dir) self.docker_config_path = Path( os.path.join(self.docker_paths.tmp_dir, container_config_name)).as_posix() kwargs = dict(runtime="runc") if self.config["sysconfig"].get("use_gpu", None): kwargs["runtime"] = "nvidia" gpus = self.config["sysconfig"].get("gpus") if gpus is not None: self.extra_env_vars["NVIDIA_VISIBLE_DEVICES"] = gpus if self.config["sysconfig"].get("external_github_repo", None): self._download_external() self.extra_env_vars.update( {"PYTHONPATH": self.docker_paths.external_repo_dir}) if self.config["sysconfig"].get("use_armory_private", None): self._download_private() image_name = self.config["sysconfig"].get("docker_image") kwargs["image_name"] = image_name # Download docker image on host docker_client = docker.from_env() try: docker_client.images.get(kwargs["image_name"]) except ImageNotFound: logger.info(f"Image {image_name} was not found. Downloading...") docker_api.pull_verbose(docker_client, image_name) except requests.exceptions.ConnectionError: logger.error( f"Docker connection refused. Is Docker Daemon running?") raise self.manager = ManagementInstance(**kwargs) def _download_external(self): external_repo.download_and_extract_repo( self.config["sysconfig"]["external_github_repo"], external_repo_dir=self.external_repo_dir, ) def _download_private(self): external_repo.download_and_extract_repo( "twosixlabs/armory-private", external_repo_dir=self.external_repo_dir) self.extra_env_vars.update({ "ARMORY_PRIVATE_S3_ID": os.getenv("ARMORY_PRIVATE_S3_ID"), "ARMORY_PRIVATE_S3_KEY": os.getenv("ARMORY_PRIVATE_S3_KEY"), }) def _write_tmp(self): os.makedirs(self.tmp_dir, exist_ok=True) if os.path.exists(self.tmp_config): logger.warning( f"Overwriting previous temp config: {self.tmp_config}...") with open(self.tmp_config, "w") as f: f.write(json.dumps(self.config, sort_keys=True, indent=4) + "\n") def _delete_tmp(self): if os.path.exists(self.external_repo_dir): try: shutil.rmtree(self.external_repo_dir) except OSError as e: if not isinstance(e, FileNotFoundError): logger.exception( f"Error removing external repo {self.external_repo_dir}" ) logger.info(f"Deleting tmp_dir {self.tmp_dir}") try: shutil.rmtree(self.tmp_dir) except OSError as e: if not isinstance(e, FileNotFoundError): logger.exception(f"Error removing tmp_dir {self.tmp_dir}") logger.info(f"Removing output_dir {self.output_dir} if empty") try: os.rmdir(self.output_dir) except OSError: pass def run(self, interactive=False, jupyter=False, host_port=8888, command=None) -> None: container_port = 8888 self._write_tmp() ports = {container_port: host_port} if jupyter else None try: runner = self.manager.start_armory_instance( envs=self.extra_env_vars, ports=ports, container_subdir=self.container_subdir, ) logger.warning(f"Outputs will be written to {self.output_dir}") try: if jupyter: self._run_jupyter(runner, host_port=host_port) elif interactive: self._run_interactive_bash(runner) elif command: self._run_command(runner, command) else: self._run_config(runner) except KeyboardInterrupt: logger.warning("Keyboard interrupt caught") finally: logger.warning("Shutting down container") self.manager.stop_armory_instance(runner) except requests.exceptions.RequestException as e: logger.exception("Starting instance failed.") if str(e).endswith( f'Bind for 0.0.0.0:{host_port} failed: port is already allocated")' ): logger.error( f"Port {host_port} already in use. Try a different one with '--port <port>'" ) elif (str( e ) == '400 Client Error: Bad Request ("Unknown runtime specified nvidia")' ): logger.error( 'NVIDIA runtime failed. Either install nvidia-docker or set config "use_gpu" to false' ) else: logger.error("Is Docker Daemon running?") self._delete_tmp() def _run_config(self, runner) -> None: logger.info(bold(red("Running evaluation script"))) runner.exec_cmd( f"python -m armory.scenarios.base {self.docker_config_path}") def _run_command(self, runner, command) -> None: logger.info(bold(red(f"Running bash command: {command}"))) runner.exec_cmd(command) def _run_interactive_bash(self, runner) -> None: lines = [ "Container ready for interactive use.", bold( "*** In a new terminal, run the following to attach to the container:" ), bold( red(f" docker exec -it -u {self.user_id}:{self.group_id} {runner.docker_container.short_id} bash" )), ] if self.config.get("scenario"): lines.extend([ bold("*** To run your scenario in the container:"), bold( red(f" python -m armory.scenarios.base {self.docker_config_path}" )), bold("*** To gracefully shut down container, press: Ctrl-C"), "", ]) logger.info("\n".join(lines)) while True: time.sleep(1) def _run_jupyter(self, runner, host_port=8888) -> None: lines = [ "About to launch jupyter.", bold( "*** To connect to jupyter, please open the following in a browser:" ), bold(red(f" http://127.0.0.1:{host_port}")), bold( "*** To connect on the command line as well, in a new terminal, run:" ), bold( f" docker exec -it -u {self.user_id}:{self.group_id} {runner.docker_container.short_id} bash" ), bold("*** To gracefully shut down container, press: Ctrl-C"), "", "Jupyter notebook log:", ] logger.info("\n".join(lines)) runner.exec_cmd( "jupyter lab --ip=0.0.0.0 --no-browser --allow-root --NotebookApp.token=''", user="******", )
class Evaluator(object): def __init__(self, config_path: str, container_config_name="eval-config.json"): self.extra_env_vars = None self.config = load_config(config_path) self.tmp_config = os.path.join(paths.TMP, container_config_name) self.unix_config_path = Path( os.path.join(paths.DOCKER_TMP, container_config_name)).as_posix() kwargs = dict(runtime="runc") if self.config["sysconfig"].get("use_gpu", None): kwargs["runtime"] = "nvidia" if self.config["sysconfig"].get("external_github_repo", None): self._download_external() if self.config["sysconfig"].get("use_armory_private", None): self._download_private() image_name = self.config["sysconfig"].get("docker_image") kwargs["image_name"] = image_name # Download docker image on host docker_client = docker.from_env() try: docker_client.images.get(kwargs["image_name"]) except ImageNotFound: logger.info(f"Image {image_name} was not found. Downloading...") docker_client.images.pull(image_name) self.manager = ManagementInstance(**kwargs) def _download_external(self): external_repo.download_and_extract_repo( self.config["external_github_repo"]) def _download_private(self): external_repo.download_and_extract_repo("twosixlabs/armory-private") self.extra_env_vars = { "ARMORY_PRIVATE_S3_ID": os.getenv("ARMORY_PRIVATE_S3_ID"), "ARMORY_PRIVATE_S3_KEY": os.getenv("ARMORY_PRIVATE_S3_KEY"), } def _write_tmp(self): os.makedirs(paths.TMP, exist_ok=True) if os.path.exists(self.tmp_config): logger.warning(f"Overwriting {self.tmp_config}!") with open(self.tmp_config, "w") as f: json.dump(self.config, f) def _delete_tmp(self): if os.path.exists(paths.EXTERNAL_REPOS): try: shutil.rmtree(paths.EXTERNAL_REPOS) except OSError as e: if not isinstance(e, FileNotFoundError): logger.exception( f"Error removing external repo {paths.EXTERNAL_REPOS}") try: os.remove(self.tmp_config) except OSError as e: if not isinstance(e, FileNotFoundError): logger.exception( f"Error removing tmp config {self.tmp_config}") def run(self, interactive=False, jupyter=False, host_port=8888) -> None: container_port = 8888 self._write_tmp() ports = {container_port: host_port} if jupyter else None try: runner = self.manager.start_armory_instance( envs=self.extra_env_vars, ports=ports) try: if jupyter: self._run_jupyter(runner, host_port=host_port) elif interactive: self._run_interactive_bash(runner) else: self._run_config(runner) except KeyboardInterrupt: logger.warning("Keyboard interrupt caught") finally: logger.warning("Shutting down container") self.manager.stop_armory_instance(runner) except requests.exceptions.RequestException as e: logger.exception("Starting instance failed.") if str(e).endswith( f'Bind for 0.0.0.0:{host_port} failed: port is already allocated")' ): logger.error( f"Port {host_port} already in use. Try a different one with '--port <port>'" ) elif (isinstance(e, docker.errors.APIError) and str( e ) == r'400 Client Error: Bad Request ("Unknown runtime specified nvidia")' and self.config.get("use_gpu")): logger.error( 'nvidia runtime failed. Set config "use_gpu" to false') else: logger.error("Is Docker Daemon running?") self._delete_tmp() def _run_config(self, runner) -> None: logger.info(bold(red("Running evaluation script"))) runner.exec_cmd( f"python -m {self.config['evaluation']['eval_file']} {self.unix_config_path}" ) def _run_interactive_bash(self, runner) -> None: lines = [ "Container ready for interactive use.", bold( "*** In a new terminal, run the following to attach to the container:" ), bold( red(f" docker exec -itu0 {runner.docker_container.short_id} bash" )), bold("*** To run your script in the container:"), bold( red(f" python -m {self.config['evaluation']['eval_file']} {self.unix_config_path}" )), bold("*** To gracefully shut down container, press: Ctrl-C"), "", ] logger.info("\n".join(lines)) while True: time.sleep(1) @staticmethod def _run_jupyter(runner, host_port=8888) -> None: lines = [ "About to launch jupyter.", bold( "*** To connect to jupyter, please open the following in a browser:" ), bold(red(f" http://127.0.0.1:{host_port}")), bold( "*** To connect on the command line as well, in a new terminal, run:" ), bold( f" docker exec -itu0 {runner.docker_container.short_id} bash" ), bold("*** To gracefully shut down container, press: Ctrl-C"), "", "Jupyter notebook log:", ] logger.info("\n".join(lines)) runner.exec_cmd( "jupyter notebook --ip=0.0.0.0 --no-browser --allow-root --NotebookApp.token=''", user="******", )
def test_deletion(self): manager = ManagementInstance(image_name="twosixarmory/tf1:0.3.3") instance = manager.start_armory_instance() manager.stop_armory_instance(instance) self.assertEqual(manager.instances, {})
def test_creation(self): manager = ManagementInstance(image_name="twosixarmory/tf1:0.3.3") instance = manager.start_armory_instance() self.assertIsInstance(instance.docker_container, Container) self.assertIn(instance.docker_container.short_id, manager.instances)
def download(command_args, prog, description): """ Script to download all datasets and model weights for offline usage. """ parser = argparse.ArgumentParser(prog=prog, description=description) parser.add_argument( "-d", "--debug", dest="log_level", action="store_const", const=logging.DEBUG, default=logging.INFO, help="Debug output (logging=DEBUG)", ) parser.add_argument( metavar="<download data config file>", dest="download_config", type=str, action=DownloadConfig, help= f"Configuration for download of data. See {DEFAULT_SCENARIO}. Note: file must be under current working directory.", ) parser.add_argument( metavar="<scenario>", dest="scenario", type=str, default="all", help= "scenario for which to download data, 'list' for available scenarios, or blank to download all scenarios", nargs="?", ) try: args = parser.parse_args(command_args) except SystemExit: parser.print_help() raise coloredlogs.install(level=args.log_level) paths.host() if not armory.is_dev(): print("Downloading all docker images....") _pull_docker_images() print("Downloading requested datasets and model weights...") manager = ManagementInstance(image_name=images.TF1) runner = manager.start_armory_instance() cmd = "; ".join([ "import logging", "import coloredlogs", "coloredlogs.install(logging.INFO)", "from armory.data import datasets", "from armory.data import model_weights", f'datasets.download_all("{args.download_config}", "{args.scenario}")', f'model_weights.download_all("{args.download_config}", "{args.scenario}")', ]) runner.exec_cmd(f"python -c '{cmd}'") manager.stop_armory_instance(runner)