示例#1
0
    def __init__(
        self,
        run_command,
        yaml=None,
        env=None,
        env_nosandbox=None,
        channels=[],
        dependencies=[],
        setup_commands=[],
        use_mamba=None,
    ):
        if env_nosandbox and any([yaml, env, channels, dependencies]):
            util.fail(
                f"'env_nosandbox' cannot be mixed with other Conda environment commands."
            )

        self.yaml = yaml
        self.env = env
        self.env_nosandbox = env_nosandbox
        self.run_command = run_command
        self.conda_env = CondaEnv(channels[:], dependencies[:])
        self.setup_commands = setup_commands
        self.use_mamba = (
            use_mamba if use_mamba is not None else bool(shutil.which("mamba"))
        )

        if env_nosandbox:
            self._validate_env_nosandbox()
示例#2
0
def down_existing(names: typing.List[str], force: bool):
    def find_active_proc(system_state):
        return [
            name for name in names if name in system_state.procs
            and system_state.procs[name].state != life_cycle.State.STOPPED
        ]

    active_proc = find_active_proc(life_cycle.system_state())
    if not active_proc:
        return

    if not force:
        util.fail(
            f"Conflicting processes already running: {', '.join(active_proc)}")

    for name in active_proc:
        life_cycle.set_ask(name, life_cycle.Ask.DOWN)

    ns = types.SimpleNamespace()
    ns.cv = threading.Condition()
    ns.sat = False

    def callback(system_state):
        if not find_active_proc(system_state):
            with ns.cv:
                ns.sat = True
                ns.cv.notify()

    watcher = life_cycle.system_state_watcher(callback)

    with ns.cv:
        success = ns.cv.wait_for(lambda: ns.sat, timeout=3.0)

    if not success:
        util.fail(f"Existing processes did not down in a timely manner.")
示例#3
0
 def _validate_env_nosandbox(self):
     result = subprocess.run(
         f"""
             eval "$(conda shell.bash hook)"
             conda activate {self.env_nosandbox}
         """,
         shell=True,
         executable="/bin/bash",
         stderr=subprocess.PIPE,
     )
     if result.returncode:
         util.fail(f"'env_nosandbox' not valid: {result.stderr}")
示例#4
0
def transitive_closure(proc_names):
    all_proc_names = set()
    fringe = list(proc_names)
    while fringe:
        proc_name = fringe.pop()
        if proc_name in all_proc_names:
            continue
        try:
            fringe.extend(defined_processes[proc_name].deps)
        except KeyError:
            util.fail(f"Unknown process: {proc_name}")
        all_proc_names.add(proc_name)
    return all_proc_names
示例#5
0
def get_proc_names(proc_names, include_deps):
    if not proc_names:
        return defined_processes.keys()

    if include_deps:
        proc_names = transitive_closure(proc_names)
    unknown_proc_names = [
        proc_name for proc_name in proc_names
        if proc_name not in defined_processes
    ]
    if unknown_proc_names:
        util.fail(f"Unknown proc_names: {', '.join(unknown_proc_names)}")
    if not proc_names:
        util.fail(f"No proc_names found")
    return proc_names
示例#6
0
 def __init__(self,
              image=None,
              dockerfile=None,
              mount=[],
              build_kwargs={},
              run_kwargs={}):
     if bool(image) == bool(dockerfile):
         util.fail(
             "Docker process must define exactly one of image or dockerfile"
         )
     self.image = image
     self.dockerfile = dockerfile
     self.mount = mount
     self.build_kwargs = build_kwargs
     self.run_kwargs = run_kwargs
示例#7
0
def cli(procs, deps, build, run, force, reset_logs):
    names = get_proc_names(procs, deps)
    names = [name for name in names if defined_processes[name].runtime]
    if not names:
        util.fail(f"No processes found")

    down_existing(names, force)

    if reset_logs:
        for name in names:
            a0.File.remove(f"{name}.log.a0")

    if build:
        for name in names:
            proc_def = defined_processes[name]
            print(f"building {name}...")
            proc_def.runtime._build(name, proc_def)
            print(f"built {name}\n")

    if run:
        for name in names:
            print(f"running {name}...")
            life_cycle.set_ask(name, life_cycle.Ask.UP)

            if os.fork() != 0:
                continue

            os.chdir("/")
            os.setsid()
            os.umask(0)

            if os.fork() != 0:
                sys.exit(0)

            proc_def = defined_processes[name]

            # Set up configuration.
            with util.common_env_context(proc_def):
                a0.Cfg(a0.env.topic()).write(json.dumps(proc_def.cfg))
                life_cycle.set_launcher_running(name, True)
                try:
                    asyncio.run(
                        proc_def.runtime._launcher(name, proc_def).run())
                except:
                    pass
                life_cycle.set_launcher_running(name, False)
                sys.exit(0)
示例#8
0
    def _build(self, name: str, proc_def: ProcDef):
        if not self.env_nosandbox:
            self._create_env(name, proc_def)

        if self.setup_commands:
            print(f"setting up conda env for {name}")
            setup_command = "\n".join(
                [util.shell_join(cmd) for cmd in self.setup_commands]
            )
            result = subprocess.run(
                f"""
                    eval "$(conda shell.bash hook)"
                    conda activate {self._env_name(name)}
                    cd {proc_def.root}
                    {setup_command}
                """,
                shell=True,
                executable="/bin/bash",
                capture_output=not CommonFlags.verbose,
            )
            if result.returncode:
                util.fail(f"Failed to set up conda env: {result.stderr}")
示例#9
0
    def _create_env(self, name: str, proc_def: ProcDef):
        env_path = f"/tmp/fbrp_conda_{name}.yml"

        env_content = self._generate_env_content(proc_def.root)
        env_content["name"] = self._env_name(name)

        with open(env_path, "w") as env_fp:
            json.dump(env_content, env_fp, indent=2)

        print(f"creating conda env for {name}. This will take a minute...")

        update_bin = "mamba" if self.use_mamba else "conda"
        # https://github.com/conda/conda/issues/7279
        # Updating an existing environment does not remove old packages, even with --prune.
        subprocess.run(
            [update_bin, "env", "remove", "-n", self._env_name(name)],
            capture_output=not CommonFlags.verbose,
        )
        result = subprocess.run(
            [update_bin, "env", "update", "--prune", "-f", env_path],
            capture_output=not CommonFlags.verbose,
        )
        if result.returncode:
            util.fail(f"Failed to set up conda env: {result.stderr}")
示例#10
0
    async def run(self):
        container = f"fbrp_{self.name}"

        # Environmental variables.
        env = util.common_env(self.proc_def)
        for kv in self.kwargs.pop("Env", []):
            if "=" in kv:
                k, v = kv.split("=", 1)
                env[k] = v
            else:
                del env[kv]
        env.update(self.proc_def.env)

        # Docker labels.
        labels = {
            "fbrp.name": self.name,
            "fbrp.container": container,
        }
        labels.update(self.kwargs.pop("labels", {}))

        # Mount volumes.
        for f in [
                "group",
                "gshadow",
                "inputrc",
                "localtime",
                "passwd",
                "shadow",
                "subgid",
                "subuid",
                "sudoers",
        ]:
            self.mount.append(f"/etc/{f}:/etc/{f}:ro")

        self.mount.append("/var/lib/sss:/var/lib/sss:ro")

        id_info = pwd.getpwuid(os.getuid())

        # Compose all the arguments.
        run_kwargs = {
            "Image": self.image,
            "Env": ["=".join(kv) for kv in env.items()],
            "Labels": labels,
            "User": f"{id_info.pw_uid}:{id_info.pw_gid}",
            "HostConfig": {
                "Privileged": True,
                "NetworkMode": "host",
                "PidMode": "host",
                "IpcMode": "host",
                "Binds": self.mount,
                "GroupAdd": [str(i) for i in os.getgroups()],
            },
        }
        util.nested_dict_update(run_kwargs, self.kwargs)

        life_cycle.set_state(self.name, life_cycle.State.STARTING)
        docker = aiodocker.Docker()

        try:
            self.proc = await docker.containers.create_or_replace(
                container, run_kwargs)
            await self.proc.start()
            proc_info = await self.proc.show()
        except Exception as e:
            life_cycle.set_state(self.name,
                                 life_cycle.State.STOPPED,
                                 return_code=-1,
                                 error_info=str(e))
            await docker.close()
            util.fail(
                f"Failed to start docker process: name={self.name} reason={e}")

        self.proc_pid = proc_info["State"]["Pid"]
        life_cycle.set_state(self.name, life_cycle.State.STARTED)

        async def log_pipe(logger, pipe):
            async for line in pipe:
                logger(line)

        self.down_task = asyncio.create_task(
            self.down_watcher(self.handle_down))
        try:
            await asyncio.gather(
                log_pipe(util.stdout_logger(),
                         self.proc.log(stdout=True, follow=True)),
                log_pipe(util.stderr_logger(),
                         self.proc.log(stderr=True, follow=True)),
                self.log_psutil(),
                self.death_handler(),
                self.down_task,
            )
        except asyncio.exceptions.CancelledError:
            # death_handler cancelled down listener.
            pass
        await docker.close()
示例#11
0
    def _build(self, name: str, proc_def: ProcDef):
        docker_api = docker.from_env()
        docker_api.lowlevel = docker.APIClient()

        if self.dockerfile:
            self.image = f"fbrp/{name}"

            dockerfile_path = os.path.join(os.path.dirname(proc_def.rule_file),
                                           self.dockerfile)

            build_kwargs = {
                "tag": self.image,
                "path": proc_def.root,
                "dockerfile": dockerfile_path,
                "rm": True,
            }
            build_kwargs.update(self.build_kwargs)

            for line in docker_api.lowlevel.build(**build_kwargs):
                lineinfo = json.loads(line.decode())
                if CommonFlags.verbose and "stream" in lineinfo:
                    print(lineinfo["stream"].strip())
                elif "errorDetail" in lineinfo:
                    util.fail(json.dumps(lineinfo["errorDetail"], indent=2))
        else:
            try:
                docker_api.images.get(self.image)
            except docker.errors.ImageNotFound:
                try:
                    for line in docker_api.lowlevel.pull(self.image,
                                                         stream=True):
                        lineinfo = json.loads(line.decode())
                        if CommonFlags.verbose and "status" in lineinfo:
                            print(lineinfo["status"].strip())
                except docker.errors.NotFound as e:
                    util.fail(e)

        uses_ldap = util.is_ldap_user()

        mount_map = {}
        for mnt in self.mount:
            parts = mnt.split(":")
            if len(parts) == 3:
                mount_map[parts[0]] = (parts[1], parts[2])
            elif len(parts) == 2:
                mount_map[parts[0]] = (parts[1], "")
            else:
                util.fail(f"Invalid mount: {mnt}")

        nfs_mounts = []
        for host, (container, _) in mount_map.items():
            try:
                os.makedirs(host)
            except FileExistsError:
                pass
            except FileNotFoundError:
                pass

            nfs_root = util.nfs_root(host)
            if nfs_root and nfs_root != host:
                nfs_mounts.append({
                    "host_nfs_root": nfs_root,
                    "container_nfs_root": f"/fbrp_nfs/{util.random_string()}",
                    "host": host,
                    "container": container,
                })

        for nfs_mount in nfs_mounts:
            if nfs_mount["host"] in mount_map:
                del mount_map[nfs_mount["host"]]

        if uses_ldap or nfs_mounts:
            dockerfile = [f"FROM {self.image}"]
            if uses_ldap:
                dockerfile.append(" && ".join([
                    "RUN apt update",
                    "DEBIAN_FRONTEND=noninteractive apt install -y sssd",
                    "rm -rf /var/lib/apt/lists/*",
                ]))
            for nfs_mount in nfs_mounts:
                relpath = os.path.relpath(nfs_mount["host"],
                                          nfs_mount["host_nfs_root"])
                container_fullpath = os.path.join(
                    nfs_mount["container_nfs_root"], relpath)
                mkdir = f"mkdir -p {os.path.dirname(nfs_mount['container'])}"
                link = f"ln -s {container_fullpath} {nfs_mount['container']}"
                dockerfile.append(f"RUN {mkdir} && {link}")

                mount_map[nfs_mount["host_nfs_root"]] = (
                    nfs_mount["container_nfs_root"],
                    "",
                )

            self.image = f"fbrp/{name}:netext"

            for line in docker_api.lowlevel.build(
                    fileobj=io.BytesIO("\n".join(dockerfile).encode("utf-8")),
                    rm=True,
                    tag=self.image,
                    labels={
                        "fbrp.type": "netext",
                        "fbrp.tmp": "1",
                    },
            ):
                lineinfo = json.loads(line.decode())
                if CommonFlags.verbose and "stream" in lineinfo:
                    print(lineinfo["stream"].strip())
                elif "errorDetail" in lineinfo:
                    util.fail(json.dumps(lineinfo["errorDetail"], indent=2))

        self.mount = [
            f"{host_path}:{container_path}:{options}"
            for host_path, (container_path, options) in mount_map.items()
        ]
示例#12
0
def cli(procs, old):
    # Find all defined processes.
    display_procs = defined_processes.items()
    # Filter out processes that have no runtime defined.
    # These processes were meant to chain or combine other processes, but haven't
    # gotten much use yet. Do we want to keep them?
    display_procs = {
        name: def_
        for name, def_ in display_procs if def_.runtime
    }

    # If processes have been specified, filter out the ones that aren't requested.
    if procs:
        display_procs = {
            name: def_
            for name, def_ in display_procs.items() if name in procs
        }

    # Fail if no processes are left.
    if not display_procs:
        util.fail(f"No processes found to log")

    # Give each process a random color.
    colors = [
        "\u001b[31m",  # "Red"
        "\u001b[32m",  # "Green"
        "\u001b[33m",  # "Yellow"
        "\u001b[34m",  # "Blue"
        "\u001b[35m",  # "Magenta"
        "\u001b[36m",  # "Cyan"
    ]
    random.shuffle(colors)
    reset_color = "\u001b[0m"

    # There will be a left hand column with the process name.
    # Find a common width for the name column.
    width = max(len(name) for name in display_procs)

    log_listeners = []

    def make_listener(i, name, def_):
        # Cache the left hand column.
        prefix = f"{colors[i % len(colors)]}{name}" + " " * (width - len(name))
        msg_tmpl = f"{prefix} | {{msg}}{reset_color}"

        # On message received, print it to stdout.
        def callback(pkt):
            print(msg_tmpl.format(msg=pkt.payload))

        # Create the listener.
        with util.common_env_context(def_):
            log_listeners.append(
                a0.LogListener(
                    name,
                    # TODO(lshamis): Make a flag for log level.
                    a0.LogLevel.DBG,
                    a0.INIT_OLDEST if old else a0.INIT_AWAIT_NEW,
                    a0.ITER_NEXT,
                    callback,
                ))

    # Make a log listener for each process.
    for i, (name, def_) in enumerate(display_procs.items()):
        make_listener(i, name, def_)

    # Block until ctrl-c is pressed.
    def onsignal(signum, frame):
        sys.exit(0)

    signal.signal(signal.SIGINT, onsignal)
    signal.signal(signal.SIGTERM, onsignal)
    signal.pause()