コード例 #1
0
ファイル: setup.py プロジェクト: tonyle9/facebook360_dep
def spawn_worker(ip, num_containers, run_async):
    """Creates worker container(s) on the desired IP.

    Args:
        ip (str): IP of the machine to run the worker container.
        num_containers (int): Number of containers to be run.
        run_async (bool): Whether the spawning should happen synchronously or not.
    """
    print(f"Spawning worker on: {ip}...")

    remote_image = f"{FLAGS.master}:{config.DOCKER_REGISTRY_PORT}/{config.DOCKER_IMAGE}"
    configure_worker_daemon(ip)
    cmds = ["docker stop $(docker ps -a -q)", f"docker pull {remote_image}"]
    cmds += [docker_run_cmd(ip, remote_image)] * num_containers

    nc = NetcatClient(ip, config.NETCAT_PORT)

    os_type = get_os_type(ip)
    if os_type == OSType.LINUX:
        nc.run_script("setup_gpu.sh")

    if run_async:
        nc.run_async(cmds)
    else:
        nc.run(cmds)
    print(f"Completed setup of {ip}!")
コード例 #2
0
ファイル: setup.py プロジェクト: tonyle9/facebook360_dep
def configure_worker_daemon(ip):
    """Configures the Docker daemon to accept HTTP connections for using the local registry.

    Args:
        ip (str): IP of the worker.
    """
    os_type = get_os_type(ip)

    os_paths = {
        OSType.MAC: "~/.docker/",
        OSType.WINDOWS: "$env:userprofile\.docker",
        OSType.LINUX: "/etc/docker/",
    }

    os_restarts = {
        OSType.MAC: [
            """osascript -e 'quit app "Docker"'""",
            "open -a Docker",
            "until docker ps; sleep 2; done",
        ],
        OSType.WINDOWS: [
            "net stop docker",
            "net stop com.docker.service",
            'taskkill /IM "dockerd.exe" /F',
            'taskkill /IM "Docker for Windows.exe" /F',
            "net start docker",
            "net start com.docker.service",
            '& "c:\\Program Files\\Docker\\Docker\\Docker for Windows.exe"',
            "while (!(docker ps)) { sleep 2 };",
        ],
        OSType.LINUX: ["systemctl restart docker"],
    }

    registry = f"{FLAGS.master}:{config.DOCKER_REGISTRY_PORT}"
    daemon_json = os.path.join(os_paths[os_type], config.DOCKER_DAEMON_JSON)

    nc = NetcatClient(ip, config.NETCAT_PORT)
    results = nc.run([f"cat {daemon_json}"])
    try:
        relevant_part = r"\{[^\}]*\}"  # extracts section inside braces
        m = re.search(relevant_part, results)
        daemon_config = json.loads(m.group(0))
    except Exception:
        daemon_config = {}
    if "insecure-registries" in daemon_config:
        if registry in daemon_config["insecure-registries"]:
            return
    else:
        daemon_config["insecure-registries"] = []

    daemon_config["insecure-registries"].append(registry)
    new_daemon_config = json.dumps(daemon_config)
    configure_cmds = [f"echo '{new_daemon_config}' > {daemon_json}"]
    configure_cmds += os_restarts[os_type]
    nc.run(configure_cmds)
コード例 #3
0
def setup_local_gpu():
    # Check if we are using Linux and we have an NVIDIA card, and we are not rendering in AWS
    if not FLAGS.project_root.startswith("s3://"):
        host_os = get_os_type(config.LOCALHOST)
        if host_os == OSType.LINUX and pyvidia.get_nvidia_device() is not None:
            gpu_script = os.path.join(dir_scripts, "render", "setup_gpu.sh")
            print(glog.green("Setting up GPU environment..."))
            run_command(f"/bin/bash {gpu_script}",
                        run_silently=not FLAGS.verbose)
        else:
            print(
                glog.yellow(
                    "We can only access an Nvidia GPU from a Linux host. Skipping Docker GPU setup"
                ))
コード例 #4
0
def run_ui(client, docker_img):
    """Starts the UI.

    Args:
        client (DockerClient): Docker client configured to the host environment.
        docker_img (str): Name of the Docker image.
    """
    if not FLAGS.verbose:
        print(glog.green("Initializing container"), end="")
        loading_context = RepeatedTimer(1,
                                        lambda: print(glog.green("."), end=""))

    host_os = get_os_type(config.LOCALHOST)

    # Setup steps for X11 forwarding vary slightly per the host operating system
    volumes = {
        "/var/run/docker.sock": {
            "bind": "/var/run/docker.sock",
            "mode": "ro"
        }
    }
    if host_os == OSType.MAC or host_os == OSType.LINUX:
        volumes.update(
            {"/tmp/.X11-unix": {
                "bind": "/tmp/.X11-unix",
                "mode": "ro"
            }})

    if host_os == OSType.MAC or host_os == OSType.LINUX:
        run_command(f"xhost + {config.LOCALHOST}",
                    run_silently=not FLAGS.verbose)
    if host_os == OSType.LINUX:
        run_command(f"xhost + {config.DOCKER_LOCAL_HOSTNAME}",
                    run_silently=not FLAGS.verbose)

    host_to_docker_path = {FLAGS.project_root: config.DOCKER_INPUT_ROOT}

    project = Project(
        FLAGS.project_root,
        FLAGS.cache,
        FLAGS.csv_path,
        FLAGS.s3_sample_frame,
        FLAGS.s3_ignore_fullsize_color,
        FLAGS.verbose,
    )
    project.verify()

    cmds = [
        "cd scripts/ui",
        f"""python3 -u dep.py \
        --host_os={get_os_type(config.LOCALHOST)} \
        --local_bin={FLAGS.local_bin} \
        --master={FLAGS.master} \
        --password={FLAGS.password} \
        --project_root={FLAGS.project_root} \
        --s3_ignore_fullsize_color={FLAGS.s3_ignore_fullsize_color} \
        --s3_sample_frame={FLAGS.s3_sample_frame} \
        --username={FLAGS.username} \
        --verbose={FLAGS.verbose}""",
    ]

    docker_networks = client.networks.list()
    network_names = [docker_network.name for docker_network in docker_networks]
    if config.DOCKER_NETWORK not in network_names:
        client.networks.create(config.DOCKER_NETWORK, driver="bridge")

    project_address = Address(FLAGS.project_root)
    project_protocol = project_address.protocol
    if project_protocol == "smb":
        mounts = docker_mounts(FLAGS.project_root, host_to_docker_path,
                               FLAGS.username, FLAGS.password)
        cmds = [f"mkdir {config.DOCKER_INPUT_ROOT}"] + mounts + cmds

        local_project_root = None
    elif project_protocol == "s3":
        glog.check_ne(FLAGS.csv_path, "",
                      "csv_path cannot be empty if rendering on AWS")
        aws_util = AWSUtil(FLAGS.csv_path, s3_url=FLAGS.project_root)
        glog.check(
            aws_util.s3_bucket_is_valid(FLAGS.project_root),
            f"Invalid S3 project path: {FLAGS.project_root}",
        )

        volumes.update({
            FLAGS.csv_path: {
                "bind": config.DOCKER_AWS_CREDENTIALS,
                "mode": "rw"
            }
        })

        project_name = project_address.path
        cache_path = os.path.join(os.path.expanduser(FLAGS.cache),
                                  project_name)
        os.makedirs(cache_path, exist_ok=True)
        volumes.update(
            {cache_path: {
                "bind": config.DOCKER_INPUT_ROOT,
                "mode": "rw"
            }})

        local_project_root = cache_path
    else:
        glog.check(
            os.path.isdir(FLAGS.project_root),
            f"Invalid project path: {FLAGS.project_root}",
        )
        volumes.update({
            host_path: {
                "bind": docker_path,
                "mode": "rw"
            }
            for host_path, docker_path in host_to_docker_path.items()
        })
        local_project_root = FLAGS.project_root

    ipc_dir = os.path.join(local_project_root, "ipc")
    os.makedirs(ipc_dir, exist_ok=True)
    volumes.update({ipc_dir: {"bind": config.DOCKER_IPC_ROOT, "mode": "rw"}})

    cmd = f'/bin/bash -c "{" && ".join(cmds)}"'
    global container_name
    display = ":0" if host_os == OSType.LINUX else "host.docker.internal:0"
    runtime = "nvidia" if which("nvidia-docker") else ""
    if host_os != OSType.LINUX:
        display = "host.docker.internal:0"

    if not FLAGS.verbose:
        loading_context.stop()
        print("")

    try:
        container = client.containers.run(
            docker_img,
            command=cmd,
            detach=True,
            environment={"DISPLAY": display},
            runtime=runtime,
            network=config.DOCKER_NETWORK,
            ports={
                config.RABBITMQ_PORT: config.RABBITMQ_PORT,
                config.RABBITMQ_MANAGE_PORT: config.RABBITMQ_MANAGE_PORT,
            },
            privileged=True,
            volumes=volumes,
            stderr=True,
        )
    except docker.errors.APIError as e:
        if "port is already allocated" in str(e):
            raise Exception(
                "Failed to launch UI! Ensure: \n"
                "(1) No other instance of UI is running (check: docker ps) and\n"
                "(2) RabbitMQ is not running on your machine (check: ps aux | grep 'rabbitmq')"
            ) from None
        raise e
    container_name = container.name
    create_viewer_watchdog(client, ipc_dir, local_project_root)
コード例 #5
0
    def on_modified(self, event):
        """When a viewer file is modified from the UI, the appropriate viewer runs on the host.

        Args:
            event (watchdog.FileSystemEvent): Watchdog event for when viewer file has been modified.
        """
        if isinstance(event, DirModifiedEvent):
            return

        ipc_name = os.path.basename(event.src_path)
        host_os = get_os_type(config.LOCALHOST)
        if ipc_name == config.DOCKER_RIFT_VIEWER_IPC and host_os != OSType.WINDOWS:
            print(glog.yellow("RiftViewer is only supported on Windows!"))
            return

        app_name = config.get_app_name(ipc_name)
        if not app_name:
            print(glog.red(f"Invalid IPC name: {ipc_name}"))
            return

        try:
            output_dir = posixpath.join(self.local_project_root,
                                        config.OUTPUT_ROOT_NAME)
            if ipc_name == config.DOCKER_RIFT_VIEWER_IPC:
                fused_dir = posixpath.join(output_dir,
                                           image_type_paths["fused"])
                fused_json = self.get_fused_json(fused_dir)
                if not fused_json:
                    print(
                        glog.red(f"Cannot find fused rig json in {fused_dir}"))
                    return
                cmd_flags = f"""--rig={posixpath.join(fused_dir, fused_json)} \
                    --catalog={posixpath.join(fused_dir, "fused.json")} \
                    --strip_files={posixpath.join(fused_dir, "fused_0.bin")}"""
            elif ipc_name in [
                    config.DOCKER_SMR_IPC, config.DOCKER_SMR_ONSCREEN_IPC
            ]:
                flags_render = self.get_render_flags("export")

                if ipc_name == config.DOCKER_SMR_IPC:
                    flags_render["output"] = posixpath.join(
                        output_dir, image_type_paths["exports"])
                flags_smr = [flag["name"] for flag in bin_to_flags[app_name]]

                cmd_flags = ""
                ignore_onscreen = ["format", "output"]
                for flag in flags_render:
                    if flag in flags_smr:
                        if (flag in ignore_onscreen and ipc_name
                                == config.DOCKER_SMR_ONSCREEN_IPC):
                            continue
                        cmd_flags += f" --{flag}={flags_render[flag]}"
                cmd_flags = cmd_flags.replace(config.DOCKER_INPUT_ROOT,
                                              self.local_project_root)

            cmd = f"{posixpath.join(FLAGS.local_bin, app_name)} {cmd_flags}"
            if os.name != "nt":  # GLOG initiatives don't work in Powershell/cmd
                cmd = f"GLOG_alsologtostderr=1 GLOG_stderrthreshold=0 {cmd}"
            run_command(cmd)
        except Exception as e:
            print(glog.red(e))