def main(argv): """Downloads binary tar files from S3 and unpacks them locally. Args: argv (list[str]): List of arguments (used interally by abseil). """ os.makedirs(FLAGS.local_dir, exist_ok=True) if FLAGS.watch: event_handler = ViewerHandler() observer = Observer() observer.schedule(event_handler, path=FLAGS.local_dir, recursive=False) observer.start() # Download tar files glog.check(FLAGS.s3_dir.startswith("s3://"), "S3 directory must start with s3://") aws_util = AWSUtil(FLAGS.csv_path) try: print("Syncing files from S3...") aws_util.s3_sync( FLAGS.s3_dir, FLAGS.local_dir, exclude="*", include=["*.tar", "*.json"], run_silently=not FLAGS.verbose, ) except KeyboardInterrupt: if FLAGS.watch: observer.stop() if FLAGS.watch: observer.stop() observer.join() # One last pass for missed files tars = list(glob.iglob(f"{FLAGS.local_dir}/*.tar", recursive=False)) for fn in tars: extract_and_delete_tar(fn)
def run_ui(client, docker_img): """Starts the UI. Args: client (DockerClient): Docker client configured to the host environment. docker_img (str): Name of the Docker image. """ if not FLAGS.verbose: print(glog.green("Initializing container"), end="") loading_context = RepeatedTimer(1, lambda: print(glog.green("."), end="")) host_os = get_os_type(config.LOCALHOST) # Setup steps for X11 forwarding vary slightly per the host operating system volumes = { "/var/run/docker.sock": { "bind": "/var/run/docker.sock", "mode": "ro" } } if host_os == OSType.MAC or host_os == OSType.LINUX: volumes.update( {"/tmp/.X11-unix": { "bind": "/tmp/.X11-unix", "mode": "ro" }}) if host_os == OSType.MAC or host_os == OSType.LINUX: run_command(f"xhost + {config.LOCALHOST}", run_silently=not FLAGS.verbose) if host_os == OSType.LINUX: run_command(f"xhost + {config.DOCKER_LOCAL_HOSTNAME}", run_silently=not FLAGS.verbose) host_to_docker_path = {FLAGS.project_root: config.DOCKER_INPUT_ROOT} project = Project( FLAGS.project_root, FLAGS.cache, FLAGS.csv_path, FLAGS.s3_sample_frame, FLAGS.s3_ignore_fullsize_color, FLAGS.verbose, ) project.verify() cmds = [ "cd scripts/ui", f"""python3 -u dep.py \ --host_os={get_os_type(config.LOCALHOST)} \ --local_bin={FLAGS.local_bin} \ --master={FLAGS.master} \ --password={FLAGS.password} \ --project_root={FLAGS.project_root} \ --s3_ignore_fullsize_color={FLAGS.s3_ignore_fullsize_color} \ --s3_sample_frame={FLAGS.s3_sample_frame} \ --username={FLAGS.username} \ --verbose={FLAGS.verbose}""", ] docker_networks = client.networks.list() network_names = [docker_network.name for docker_network in docker_networks] if config.DOCKER_NETWORK not in network_names: client.networks.create(config.DOCKER_NETWORK, driver="bridge") project_address = Address(FLAGS.project_root) project_protocol = project_address.protocol if project_protocol == "smb": mounts = docker_mounts(FLAGS.project_root, host_to_docker_path, FLAGS.username, FLAGS.password) cmds = [f"mkdir {config.DOCKER_INPUT_ROOT}"] + mounts + cmds local_project_root = None elif project_protocol == "s3": glog.check_ne(FLAGS.csv_path, "", "csv_path cannot be empty if rendering on AWS") aws_util = AWSUtil(FLAGS.csv_path, s3_url=FLAGS.project_root) glog.check( aws_util.s3_bucket_is_valid(FLAGS.project_root), f"Invalid S3 project path: {FLAGS.project_root}", ) volumes.update({ FLAGS.csv_path: { "bind": config.DOCKER_AWS_CREDENTIALS, "mode": "rw" } }) project_name = project_address.path cache_path = os.path.join(os.path.expanduser(FLAGS.cache), project_name) os.makedirs(cache_path, exist_ok=True) volumes.update( {cache_path: { "bind": config.DOCKER_INPUT_ROOT, "mode": "rw" }}) local_project_root = cache_path else: glog.check( os.path.isdir(FLAGS.project_root), f"Invalid project path: {FLAGS.project_root}", ) volumes.update({ host_path: { "bind": docker_path, "mode": "rw" } for host_path, docker_path in host_to_docker_path.items() }) local_project_root = FLAGS.project_root ipc_dir = os.path.join(local_project_root, "ipc") os.makedirs(ipc_dir, exist_ok=True) volumes.update({ipc_dir: {"bind": config.DOCKER_IPC_ROOT, "mode": "rw"}}) cmd = f'/bin/bash -c "{" && ".join(cmds)}"' global container_name display = ":0" if host_os == OSType.LINUX else "host.docker.internal:0" runtime = "nvidia" if which("nvidia-docker") else "" if host_os != OSType.LINUX: display = "host.docker.internal:0" if not FLAGS.verbose: loading_context.stop() print("") try: container = client.containers.run( docker_img, command=cmd, detach=True, environment={"DISPLAY": display}, runtime=runtime, network=config.DOCKER_NETWORK, ports={ config.RABBITMQ_PORT: config.RABBITMQ_PORT, config.RABBITMQ_MANAGE_PORT: config.RABBITMQ_MANAGE_PORT, }, privileged=True, volumes=volumes, stderr=True, ) except docker.errors.APIError as e: if "port is already allocated" in str(e): raise Exception( "Failed to launch UI! Ensure: \n" "(1) No other instance of UI is running (check: docker ps) and\n" "(2) RabbitMQ is not running on your machine (check: ps aux | grep 'rabbitmq')" ) from None raise e container_name = container.name create_viewer_watchdog(client, ipc_dir, local_project_root)