def test_create_container(self): config = ConfigLoader.load() step = Box( { "uses": "docker://alpine:3.9", "runs": ["echo hello"], "id": "kontainer_one", }, default_box=True, ) cid = pu.sanitized_name(step.id, config.wid) with DockerRunner(init_docker_client=True, config=config) as dr: c = dr._create_container(cid, step) self.assertEqual(c.status, "created") c.remove() step = Box( { "uses": "docker://alpine:3.9", "runs": ["echo", "hello_world"], "id": "KoNtAiNeR tWo", }, default_box=True, ) cid = pu.sanitized_name(step.id, config.wid) with DockerRunner(init_docker_client=True, config=config) as dr: c = dr._create_container(cid, step) self.assertEqual(c.status, "created") c.remove()
def __init__(self, **kw): super(KubernetesRunner, self).__init__(**kw) config.load_kube_config() c = Configuration() c.assert_hostname = False Configuration.set_default(c) self._kclient = core_v1_api.CoreV1Api() _, active_context = config.list_kube_config_contexts() self._namespace = self._config.resman_opts.get("namespace", "default") self._base_pod_name = pu.sanitized_name(f"pod", self._config.wid) self._base_pod_name = self._base_pod_name.replace("_", "-") self._init_pod_name = pu.sanitized_name("init-pod", self._config.wid) self._init_pod_name = self._init_pod_name.replace("_", "-") self._vol_claim_name = f"{self._base_pod_name}-pvc" self._vol_size = self._config.resman_opts.get("volume_size", "500Mi") self._init_pod_created = False self._vol_claim_created = False
def test_sanitized_name(self): name = "test action" santizied_name = pu.sanitized_name(name, '1234') self.assertEqual(santizied_name, "popper_test_action_1234") name = "test@action" santizied_name = pu.sanitized_name(name, '1234') self.assertEqual(santizied_name, "popper_test_action_1234") name = "test(action)" santizied_name = pu.sanitized_name(name, '1234') self.assertEqual(santizied_name, "popper_test_action__1234")
def test_create_container(self): config = ConfigLoader.load() step = Box( { "uses": "docker://alpine:3.9", "runs": ["echo hello"], "id": "kontainer_one", }, default_box=True, ) cid = pu.sanitized_name(step.id, config.wid) with PodmanRunner(init_podman_client=True, config=config) as pr: c = pr._create_container(cid, step) c_status_cmd = [ "podman", "container", "inspect", "-f", str("{{.State.Status}}"), c, ] __, _, c_status = HostRunner._exec_cmd(c_status_cmd, logging=False) self.assertEqual(c_status, "configured") cmd = ["podman", "container", "rm", c] HostRunner._exec_cmd(cmd, logging=False) step = Box( { "uses": "docker://alpine:3.9", "runs": ["echo", "hello_world"], "id": "KoNtAiNeR tWo", }, default_box=True, ) cid = pu.sanitized_name(step.id, config.wid) with PodmanRunner(init_podman_client=True, config=config) as pr: c = pr._create_container(cid, step) c_status_cmd = [ "podman", "container", "inspect", "-f", str("{{.State.Status}}"), c, ] __, _, c_status = HostRunner._exec_cmd(c_status_cmd, logging=False) self.assertEqual(c_status, "configured") cmd = ["podman", "container", "rm", c] HostRunner._exec_cmd(cmd, logging=False)
def run(self, step): """Execute the given step in docker.""" cid = pu.sanitized_name(step.id, self._config.wid) container = self._find_container(cid) if container and not self._config.reuse and not self._config.dry_run: container.remove(force=True) container = self._create_container(cid, step) log.info(f"[{step.id}] docker start") if self._config.dry_run: return 0 self._spawned_containers.add(container) try: container.start() cout = container.logs(stream=True) for line in cout: log.step_info(line.decode().rstrip()) e = container.wait()["StatusCode"] except Exception as exc: log.fail(exc) return e
def run(self, step): """Executes the given step in podman.""" cid = pu.sanitized_name(step.id, self._config.wid) container = self._find_container(cid) if not container and self._config.reuse: log.fail( f"Cannot find an existing container for step '{step.id}' to be reused" ) if container and not self._config.reuse and not self._config.dry_run: cmd = ["podman", "rm", "-f", container] HostRunner._exec_cmd(cmd, logging=False) container = None if not container and not self._config.reuse: container = self._create_container(cid, step) log.info(f"[{step.id}] podman start") if self._config.dry_run: return 0 self._spawned_containers.add(container) cmd = ["podman", "start", "-a", container] _, e, _ = HostRunner._exec_cmd(cmd) return e
def run(self, step): self._setup_singularity_cache() cid = pu.sanitized_name(step.id, self._config.wid) + ".sif" self._container = os.path.join(self._singularity_cache, cid) build, img, build_ctx_path = self._get_build_info(step) HostRunner._exec_cmd(["rm", "-rf", self._container]) if not self._config.dry_run: if build: recipefile = self._get_recipe_file(build_ctx_path, cid) HostRunner._exec_cmd( [ "singularity", "build", "--fakeroot", self._container, recipefile ], cwd=build_ctx_path, ) else: HostRunner._exec_cmd( ["singularity", "pull", self._container, img]) cmd = [self._create_cmd(step, cid)] self._spawned_containers.add(cid) ecode = self._submit_batch_job(cmd, step) self._spawned_containers.remove(cid) return ecode
def run(self, step): self._setup_singularity_cache() cid = pu.sanitized_name(step['name'], self._config.wid) + '.sif' self._container = os.path.join(self._singularity_cache, cid) build, img, build_ctx_path = self._get_build_info(step) HostRunner._exec_cmd(['rm', '-rf', self._container]) if not self._config.dry_run: if build: recipefile = self._get_recipe_file(build_ctx_path, cid) HostRunner._exec_cmd([ 'singularity', 'build', '--fakeroot', self._container, recipefile ], cwd=build_ctx_path) else: HostRunner._exec_cmd( ['singularity', 'pull', self._container, img]) cmd = [self._create_cmd(step, cid)] self._spawned_containers.add(cid) ecode = self._submit_batch_job(cmd, step) self._spawned_containers.remove(cid) return ecode
def run(self, reuse=False): """Parent function to handle the execution of the action. Args: reuse (bool): Whether to reuse containers or not. """ self.check_executable('singularity') singularity_cache = SingularityRunner.setup_singularity_cache(self.wid) if reuse: log.fail('Reusing containers in singularity runtime is ' 'currently not supported.') build, image, build_source = self.get_build_resources() container_path = os.path.join( singularity_cache, pu.sanitized_name(image, self.wid) + '.sif' ) if build: self.singularity_build_from_recipe(build_source, container_path) else: self.singularity_build_from_image(image, container_path) e = self.singularity_start(container_path) self.handle_exit(e)
def __init__(self, action, workspace, env, dry, skip_pull, wid): import vagrant super(VagrantRunner, self).__init__(action, workspace, env, dry, skip_pull, wid) self.cid = pu.sanitized_name(self.action['name'], wid) VagrantRunner.actions.add(self.action['name'])
def run(self, reuse=False): """Parent function to handle the execution of the action. Args: reuse(bool, optional): True if existing containers are to be reused.(Default value = False) Returns: None """ self.check_executable('singularity') singularity_cache = SingularityRunner.setup_singularity_cache(self.wid) if reuse: log.fail('Reusing containers in singularity engine is ' 'currently not supported.') build, image, build_source = self.get_build_resources() container_path = os.path.join( singularity_cache, pu.sanitized_name(image, self.wid) + '.sif') if build: self.singularity_build_from_recipe(build_source, container_path) else: self.singularity_build_from_image(image, container_path) e = self.singularity_start(container_path) self.handle_exit(e)
def run(self, step): self._setup_singularity_cache() cid = pu.sanitized_name(step.id, self._config.wid) + ".sif" self._container = os.path.join(self._singularity_cache, cid) build, img, _, _, build_ctx_path = self._get_build_info(step) if "shub://" in step.uses or "library://" in step.uses: build = False img = step.uses build_ctx_path = None self._exec_srun(["rm", "-rf", self._container], step) if build: recipefile = self._get_recipe_file(build_ctx_path, cid) log.info( f"srun singularity build {self._container}", extra={"pretag": f"[{step.id}]"}, ) self._exec_srun( [ "singularity", "build", "--fakeroot", self._container, recipefile, ], step, cwd=os.path.dirname(recipefile), ) else: log.info( f"srun singularity pull {self._container}", extra={"pretag": f"[{step.id}]"}, ) self._exec_srun(["singularity", "pull", self._container, img], step) cmd = self._create_cmd(step, cid) self._spawned_containers.add(cid) if self._config.resman_opts.get(step.id, {}).get("mpi", True): log.info(f"sbatch {" ".join(cmd)}", extra={"pretag": f"[{step.id}]"}) ecode = self._exec_mpi(cmd, step) else: log.info(f"srun {" ".join(cmd)}", extra={"pretag": f"[{step.id}]"}) ecode = self._exec_srun(cmd, step, logging=True) self._spawned_containers.remove(cid) return ecode
def test_create_container(self): config = PopperConfig() step = { 'uses': 'docker://alpine:3.9', 'runs': ['echo hello'], 'name': 'kontainer_one' } cid = pu.sanitized_name(step['name'], config.wid) with DockerRunner(init_docker_client=True, config=config) as dr: c = dr._create_container(cid, step) self.assertEqual(c.status, 'created') c.remove()
def run(self, step): self._setup_singularity_cache() cid = pu.sanitized_name(step['name'], self._config.wid) + '.sif' self._container = os.path.join(self._singularity_cache, cid) exists = os.path.exists(self._container) if exists and not self._config.dry_run and not self._config.skip_pull: os.remove(self._container) self._create_container(step, cid) ecode = self._singularity_start(step, cid) return ecode
def test_create_container(self): config = ConfigLoader.load() step_one = Box( { "uses": "docker://*****:*****@master", "args": ["ls"], "id": "kontainer_two", }, default_box=True, ) cid_one = pu.sanitized_name(step_one.id, config.wid) cid_two = pu.sanitized_name(step_two.id, config.wid) with SingularityRunner(config=config) as sr: sr._setup_singularity_cache() sr._create_container(step_one, cid_one) self.assertEqual( os.path.exists(os.path.join(sr._singularity_cache, cid_one)), True) os.remove(os.path.join(sr._singularity_cache, cid_one)) with SingularityRunner(config=config) as sr: sr._setup_singularity_cache() sr._create_container(step_one, cid_two) self.assertEqual( os.path.exists(os.path.join(sr._singularity_cache, cid_two)), True) os.remove(os.path.join(sr._singularity_cache, cid_two))
def test_create_container(self): config = PopperConfig() config.wid = "abcd" step_one = { 'uses': 'docker://*****:*****@master', 'args': ['ls'], 'name': 'kontainer_two', 'repo_dir': f'{os.environ["HOME"]}/.cache/popper/abcd/github.com/popperized/bin', 'step_dir': 'sh' } cid_one = pu.sanitized_name(step_one['name'], config.wid) cid_two = pu.sanitized_name(step_two['name'], config.wid) with SingularityRunner(config=config) as sr: sr._setup_singularity_cache() c_one = sr._create_container(step_one, cid_one) self.assertEqual( os.path.exists(os.path.join(sr._singularity_cache, cid_one)), True) os.remove(os.path.join(sr._singularity_cache, cid_one)) with SingularityRunner(config=config) as sr: sr._setup_singularity_cache() c_two = sr._create_container(step_one, cid_two) self.assertEqual( os.path.exists(os.path.join(sr._singularity_cache, cid_two)), True) os.remove(os.path.join(sr._singularity_cache, cid_two))
def _submit_batch_job(self, cmd, step): job_name = pu.sanitized_name(step.id, self._config.wid) temp_dir = "/tmp/popper/slurm/" os.makedirs(temp_dir, exist_ok=True) job_script = os.path.join(temp_dir, f"{job_name}.sh") out_file = os.path.join(temp_dir, f"{job_name}.out") # create/truncate log with open(out_file, "w"): pass with open(job_script, "w") as f: f.write("#!/bin/bash\n") f.write("\n".join(cmd)) sbatch_cmd = f"sbatch --wait --job-name {job_name} --output {out_file}" sbatch_cmd = sbatch_cmd.split() for k, v in self._config.resman_opts.get(step.id, {}).items(): sbatch_cmd.append(pu.key_value_to_flag(k, v)) sbatch_cmd.append(job_script) log.info(f'[{step.id}] {" ".join(sbatch_cmd)}') if self._config.dry_run: return 0 self._spawned_jobs.add(job_name) # start a tail (background) process on the output file self._start_out_stream(out_file) # submit the job and wait _, ecode, output = HostRunner._exec_cmd(sbatch_cmd, logging=False) # kill the tail process self._stop_out_stream() self._spawned_jobs.remove(job_name) return ecode
def _exec_mpi(self, cmd, step, **kwargs): self._set_config_vars(step) job_name = pu.sanitized_name(step.id, self._config.wid) mpi_cmd = ["mpirun", f"{' '.join(cmd)}"] job_script = os.path.join(f"{job_name}.sh") out_file = os.path.join(f"{job_name}.out") with open(out_file, "w"): pass with open(job_script, "w") as f: f.write("#!/bin/bash\n") f.write(f"#SBATCH --job-name={job_name}\n") f.write(f"#SBATCH --output={out_file}\n") f.write(f"#SBATCH --nodes={self._nodes}\n") f.write(f"#SBATCH --ntasks={self._ntasks}\n") f.write(f"#SBATCH --ntasks-per-node={self._ntasks_per_node}\n") if self._nodelist: f.write(f"#SBATCH --nodelist={self._nodelist}\n") f.write(" ".join(mpi_cmd)) sbatch_cmd = [ "sbatch", "--wait", ] sbatch_cmd.extend(self._get_resman_kwargs(step)) sbatch_cmd.extend([job_script]) log.debug(f"Command: {sbatch_cmd}") if self._config.dry_run: return 0 self._spawned_jobs.add(job_name) self._start_out_stream(out_file) _, ecode, _ = HostRunner._exec_cmd(sbatch_cmd, **kwargs) self._stop_out_stream() self._spawned_jobs.remove(job_name) return ecode
def test_find_container(self): config = ConfigLoader.load() step = Box( { "uses": "docker://alpine:3.9", "runs": ["echo hello"], "id": "kontainer_one", }, default_box=True, ) cid = pu.sanitized_name(step.id, config.wid) with PodmanRunner(init_podman_client=True, config=config) as pr: c = pr._find_container(cid) self.assertEqual(c, None) with PodmanRunner(init_podman_client=True, config=config) as pr: container = pr._create_container(cid, step) c = pr._find_container(cid) self.assertEqual(c, container) cmd = ["podman", "container", "rm", "-f", cid] HostRunner._exec_cmd(cmd, logging=False)
def run(self, step): """Execute the given step via slurm in the docker engine.""" cid = pu.sanitized_name(step['name'], self._config.wid) cmd = [] build, img, tag, dockerfile = self._get_build_info(step) cmd.append(f'docker rm -f {cid} || true') if build: cmd.append(f'docker build -t {img}:{tag} {dockerfile}') elif not self._config.skip_pull and not step.get('skip_pull', False): cmd.append(f'docker pull {img}:{tag}') cmd.append(self._create_cmd(step, f'{img}:{tag}', cid)) cmd.append(f'docker start --attach {cid}') self._spawned_containers.add(cid) ecode = self._submit_batch_job(cmd, step) self._spawned_containers.remove(cid) return ecode
def run(self, step): """Execute the given step via slurm in the docker engine.""" cid = pu.sanitized_name(step.id, self._config.wid) cmd = [] build, img, tag, build_ctx_path = self._get_build_info(step) cmd.append(f"docker rm -f {cid} || true") if build: cmd.append(f"docker build -t {img}:{tag} {build_ctx_path}") elif not self._config.skip_pull and not step.skip_pull: cmd.append(f"docker pull {img}:{tag}") cmd.append(self._create_cmd(step, f"{img}:{tag}", cid)) cmd.append(f"docker start --attach {cid}") self._spawned_containers.add(cid) ecode = self._submit_batch_job(cmd, step) self._spawned_containers.remove(cid) return ecode
def run(self, step): """Execute the given step in docker.""" cid = pu.sanitized_name(step.id, self._config.wid) container = self._find_container(cid) if not container and self._config.reuse: log.fail( f"Cannot find an existing container for step '{step.id}' to be reused" ) if container and not self._config.reuse and not self._config.dry_run: container.remove(force=True) container = None if not container and not self._config.reuse: container = self._create_container(cid, step) log.info("docker start", extra={"pretag": f"[{step.id}]"}) if self._config.dry_run: return 0 self._spawned_containers.add(container) try: container.start() if self._config.pty: dockerpty.start(self._d.api, container.id) else: cout = container.logs(stream=True) for line in cout: log.step_info(line.decode().rstrip()) e = container.wait()["StatusCode"] except Exception as exc: log.fail(exc) return e
def run(self, step): """Execute the given step in docker.""" cid = pu.sanitized_name(step['name'], self._config.wid) container = self._find_container(cid) if container and not self._config.reuse and not self._config.dry_run: container.remove(force=True) container = self._create_container(cid, step) log.info(f'[{step["name"]}] docker start') if self._config.dry_run: return 0 self._spawned_containers.add(container) container.start() cout = container.logs(stream=True) for line in cout: log.step_info(line.decode().rstrip()) e = container.wait()['StatusCode'] return e
def __init__(self, action, workspace, env, dry, skip_pull, wid): super(DockerRunner, self).__init__( action, workspace, env, dry, skip_pull, wid) self.cid = pu.sanitized_name(self.action['name'], wid) self.container = None
def test_singularity_start(self): repo = self.mk_repo() conf = PopperConfig(engine_name='singularity', workspace_dir=repo.working_dir) step = { 'uses': 'docker://*****:*****@master' args: 'ls' """) wf.parse() r.run(wf) wf = YMLWorkflow(""" version: '1' steps: - uses: 'docker://alpine:3.9' runs: ['sh', '-c', 'echo $FOO > hello.txt ; pwd'] env: { FOO: bar } """) wf.parse() r.run(wf) with open(os.path.join(repo.working_dir, 'hello.txt'), 'r') as f: self.assertEqual(f.read(), 'bar\n') wf = YMLWorkflow(""" version: '1' steps: - uses: 'docker://alpine:3.9' runs: 'nocommandisnamedlikethis' """) wf.parse() self.assertRaises(SystemExit, r.run, wf) repo.close()
def test_singularity_start(self): repo = self.mk_repo() conf = ConfigLoader.load(engine_name="singularity", workspace_dir=repo.working_dir) step = Box( { "uses": "docker://*****:*****@master", "args": ["ls"], }] } r.run(WorkflowParser.parse(wf_data=wf_data)) wf_data = { "steps": [{ "uses": "docker://alpine:3.9", "args": ["sh", "-c", "echo $FOO > hello.txt ; pwd"], "env": { "FOO": "bar" }, }] } r.run(WorkflowParser.parse(wf_data=wf_data)) with open(os.path.join(repo.working_dir, "hello.txt"), "r") as f: self.assertEqual(f.read(), "bar\n") wf_data = { "steps": [{ "uses": "docker://alpine:3.9", "args": ["nocommandisnamedlikethis"], }] } self.assertRaises(SystemExit, r.run, WorkflowParser.parse(wf_data=wf_data)) repo.close()