def setup_local_gpu(): # Check if we are using Linux and we have an NVIDIA card, and we are not rendering in AWS if not FLAGS.project_root.startswith("s3://"): host_os = get_os_type(config.LOCALHOST) if host_os == OSType.LINUX and pyvidia.get_nvidia_device() is not None: gpu_script = os.path.join(dir_scripts, "render", "setup_gpu.sh") print(glog.green("Setting up GPU environment...")) run_command(f"/bin/bash {gpu_script}", run_silently=not FLAGS.verbose) else: print( glog.yellow( "We can only access an Nvidia GPU from a Linux host. Skipping Docker GPU setup" ))
def build(client, docker_img): """Builds the Docker image. Args: client (DockerClient): Docker client configured to the host environment. docker_img (str): Name of the Docker image. Raises: Exception: If Docker encounters an issue during the build. """ try: print(glog.green("Preparing context"), end="") loading_context = RepeatedTimer(1, lambda: print(glog.green("."), end="")) building_image = False for line in client.api.build( path=os.path.dirname(os.path.abspath(FLAGS.dockerfile)), dockerfile=FLAGS.dockerfile, tag=docker_img, decode=True, ): if "stream" in line: if FLAGS.verbose: loading_context.stop() print(line["stream"].strip()) elif not building_image: print(glog.green("\nBuilding Docker image"), end="") building_image = True if "error" in line: raise Exception(line["error"]) except requests.exceptions.ConnectionError: print(glog.red("\nError: Docker is not running!")) loading_context.stop() exit(1) if not FLAGS.verbose: loading_context.stop() print("") # force newline
def run_ui(client, docker_img): """Starts the UI. Args: client (DockerClient): Docker client configured to the host environment. docker_img (str): Name of the Docker image. """ if not FLAGS.verbose: print(glog.green("Initializing container"), end="") loading_context = RepeatedTimer(1, lambda: print(glog.green("."), end="")) host_os = get_os_type(config.LOCALHOST) # Setup steps for X11 forwarding vary slightly per the host operating system volumes = { "/var/run/docker.sock": { "bind": "/var/run/docker.sock", "mode": "ro" } } if host_os == OSType.MAC or host_os == OSType.LINUX: volumes.update( {"/tmp/.X11-unix": { "bind": "/tmp/.X11-unix", "mode": "ro" }}) if host_os == OSType.MAC or host_os == OSType.LINUX: run_command(f"xhost + {config.LOCALHOST}", run_silently=not FLAGS.verbose) if host_os == OSType.LINUX: run_command(f"xhost + {config.DOCKER_LOCAL_HOSTNAME}", run_silently=not FLAGS.verbose) host_to_docker_path = {FLAGS.project_root: config.DOCKER_INPUT_ROOT} project = Project( FLAGS.project_root, FLAGS.cache, FLAGS.csv_path, FLAGS.s3_sample_frame, FLAGS.s3_ignore_fullsize_color, FLAGS.verbose, ) project.verify() cmds = [ "cd scripts/ui", f"""python3 -u dep.py \ --host_os={get_os_type(config.LOCALHOST)} \ --local_bin={FLAGS.local_bin} \ --master={FLAGS.master} \ --password={FLAGS.password} \ --project_root={FLAGS.project_root} \ --s3_ignore_fullsize_color={FLAGS.s3_ignore_fullsize_color} \ --s3_sample_frame={FLAGS.s3_sample_frame} \ --username={FLAGS.username} \ --verbose={FLAGS.verbose}""", ] docker_networks = client.networks.list() network_names = [docker_network.name for docker_network in docker_networks] if config.DOCKER_NETWORK not in network_names: client.networks.create(config.DOCKER_NETWORK, driver="bridge") project_address = Address(FLAGS.project_root) project_protocol = project_address.protocol if project_protocol == "smb": mounts = docker_mounts(FLAGS.project_root, host_to_docker_path, FLAGS.username, FLAGS.password) cmds = [f"mkdir {config.DOCKER_INPUT_ROOT}"] + mounts + cmds local_project_root = None elif project_protocol == "s3": glog.check_ne(FLAGS.csv_path, "", "csv_path cannot be empty if rendering on AWS") aws_util = AWSUtil(FLAGS.csv_path, s3_url=FLAGS.project_root) glog.check( aws_util.s3_bucket_is_valid(FLAGS.project_root), f"Invalid S3 project path: {FLAGS.project_root}", ) volumes.update({ FLAGS.csv_path: { "bind": config.DOCKER_AWS_CREDENTIALS, "mode": "rw" } }) project_name = project_address.path cache_path = os.path.join(os.path.expanduser(FLAGS.cache), project_name) os.makedirs(cache_path, exist_ok=True) volumes.update( {cache_path: { "bind": config.DOCKER_INPUT_ROOT, "mode": "rw" }}) local_project_root = cache_path else: glog.check( os.path.isdir(FLAGS.project_root), f"Invalid project path: {FLAGS.project_root}", ) volumes.update({ host_path: { "bind": docker_path, "mode": "rw" } for host_path, docker_path in host_to_docker_path.items() }) local_project_root = FLAGS.project_root ipc_dir = os.path.join(local_project_root, "ipc") os.makedirs(ipc_dir, exist_ok=True) volumes.update({ipc_dir: {"bind": config.DOCKER_IPC_ROOT, "mode": "rw"}}) cmd = f'/bin/bash -c "{" && ".join(cmds)}"' global container_name display = ":0" if host_os == OSType.LINUX else "host.docker.internal:0" runtime = "nvidia" if which("nvidia-docker") else "" if host_os != OSType.LINUX: display = "host.docker.internal:0" if not FLAGS.verbose: loading_context.stop() print("") try: container = client.containers.run( docker_img, command=cmd, detach=True, environment={"DISPLAY": display}, runtime=runtime, network=config.DOCKER_NETWORK, ports={ config.RABBITMQ_PORT: config.RABBITMQ_PORT, config.RABBITMQ_MANAGE_PORT: config.RABBITMQ_MANAGE_PORT, }, privileged=True, volumes=volumes, stderr=True, ) except docker.errors.APIError as e: if "port is already allocated" in str(e): raise Exception( "Failed to launch UI! Ensure: \n" "(1) No other instance of UI is running (check: docker ps) and\n" "(2) RabbitMQ is not running on your machine (check: ps aux | grep 'rabbitmq')" ) from None raise e container_name = container.name create_viewer_watchdog(client, ipc_dir, local_project_root)