def docker_build(self, tag, path): log.info('{}[{}] docker build -t {} {}'.format(self.msg_prefix, self.action['name'], tag, path)) if self.dry_run: return self.docker_client.images.build(path=path, tag=tag, rm=True, pull=True)
def _build_and_push_image(self, step): """Clones the action repository, builds the image and pushes the image to an an image registry for a Pod to use. """ needs_build, _, img, tag, build_ctx_path = self._get_build_info(step) if not needs_build: return step.uses.replace("docker://", "") registry = self._config.resman_opts.get("registry", "docker.io") if not self._config.resman_opts.registry_user: raise Exception( "Expecting 'registry_user' option in configuration.") img = img.replace("/", "_") img = f"{registry}/{self._config.resman_opts.registry_user}/{img}" log.info(f"[{step.id}] docker build -t {img}:{tag} {build_ctx_path}") self._d.images.build(path=build_ctx_path, tag=f"{img}:{tag}", rm=True, pull=True) log.info(f"[{step.id}] docker push {img}:{tag}") self._d.images.push(img, tag=tag, decode=True) log.debug(f"image built {img}:{tag}") return f"{img}:{tag}"
def singularity_build_from_recipe( self, build_source, container_path): """Builds a container image from a recipefile. Args: build_source (str): The path to the build source, which contains all the resources required to build the Docker image. container_path (str): The path of the built container. """ container = os.path.basename(container_path) recipefile = os.path.join( build_source, 'Singularity.{}'.format(self.wid)) build_dest = os.path.dirname(container_path) log.info('{}[{}] singularity build {} {}'.format( self.msg_prefix, self.action['name'], container, recipefile) ) if not self.dry_run: if not self.singularity_exists(container_path): SingularityRunner.build_from_recipe( build_source, build_dest, container, self.wid)
def import_from_repo(action_ref, project_root): url, service, user, repo, action_dir, version = scm.parse(action_ref) cloned_project_dir = os.path.join("/tmp", service, user, repo) scm.clone(url, user, repo, cloned_project_dir, version) if not action_dir: ptw_one = os.path.join(cloned_project_dir, "main.workflow") ptw_two = os.path.join(cloned_project_dir, ".github/main.workflow") if os.path.isfile(ptw_one): path_to_workflow = ptw_one elif os.path.isfile(ptw_two): path_to_workflow = ptw_two else: log.fail("Unable to find main.workflow file") else: path_to_workflow = os.path.join(cloned_project_dir, action_dir) if not os.path.basename(path_to_workflow).endswith('.workflow'): path_to_workflow = os.path.join(path_to_workflow, 'main.workflow') if not os.path.isfile(path_to_workflow): log.fail("Unable to find a main.workflow file") if '.github/' in path_to_workflow: path_to_copy = os.path.dirname(os.path.dirname(path_to_workflow)) else: path_to_copy = os.path.dirname(path_to_workflow) copy_tree(path_to_copy, project_root) log.info("Successfully imported from {}".format(action_ref))
def run(self, wf): """Run the given workflow. Args: wf(Workflow): workflow to be executed Returns: None """ self._process_secrets(wf) self._clone_repos(wf) for step in wf.steps: log.debug(f"Executing step:\n{pu.prettystr(step)}") if step.uses == "sh": e = self._step_runner("host", step).run(step) else: e = self._step_runner(self._config.engine_name, step).run(step) if e != 0 and e != 78: log.fail(f"Step '{step.id}' failed ('{e}') !") log.info(f"Step '{step.id}' ran successfully !") if e == 78: break log.info(f"Workflow finished successfully.")
def run_pipeline(action, wfile, skip_clone, skip_pull, skip, workspace, reuse, dry_run, parallel, with_dependencies, on_failure): # Initialize a Worklow. During initialization all the validation # takes place automatically. wf = Workflow(wfile) pipeline = WorkflowRunner(wf) # Saving workflow instance for signal handling popper.cli.interrupt_params['parallel'] = parallel if reuse: log.warn("Using --reuse ignores any changes made to an action's logic " "or to an action block in the .workflow file.") if parallel: if sys.version_info[0] < 3: log.fail('--parallel is only supported on Python3') log.warn("Using --parallel may result in interleaved output. " "You may use --quiet flag to avoid confusion.") try: pipeline.run(action, skip_clone, skip_pull, skip, workspace, reuse, dry_run, parallel, with_dependencies) except SystemExit as e: if (e.code is not 0) and on_failure: pipeline.run(on_failure, skip_clone, skip_pull, list(), workspace, reuse, dry_run, parallel, with_dependencies) else: raise if action: log.info('Action "{}" finished successfully.'.format(action)) else: log.info('Workflow finished successfully.')
def run(self, step): """Execute the given step in docker.""" cid = pu.sanitized_name(step.id, self._config.wid) container = self._find_container(cid) if container and not self._config.reuse and not self._config.dry_run: container.remove(force=True) container = self._create_container(cid, step) log.info(f"[{step.id}] docker start") if self._config.dry_run: return 0 self._spawned_containers.add(container) try: container.start() cout = container.logs(stream=True) for line in cout: log.step_info(line.decode().rstrip()) e = container.wait()["StatusCode"] except Exception as exc: log.fail(exc) return e
def run(self, step): """Executes the given step in podman.""" cid = pu.sanitized_name(step.id, self._config.wid) container = self._find_container(cid) if not container and self._config.reuse: log.fail( f"Cannot find an existing container for step '{step.id}' to be reused" ) if container and not self._config.reuse and not self._config.dry_run: cmd = ["podman", "rm", "-f", container] HostRunner._exec_cmd(cmd, logging=False) container = None if not container and not self._config.reuse: container = self._create_container(cid, step) log.info(f"[{step.id}] podman start") if self._config.dry_run: return 0 self._spawned_containers.add(container) cmd = ["podman", "start", "-a", container] _, e, _ = HostRunner._exec_cmd(cmd) return e
def _singularity_start(self, step, cid): env = self._prepare_environment(step) # set the environment variables for k, v in env.items(): os.environ[k] = str(v) args = list(step.args) runs = list(step.runs) ecode = None if runs: info = f"[{step.id}] singularity exec {cid} {runs}" commands = runs start_fn = self._s.execute else: info = f"[{step.id}] singularity run {cid} {args}" commands = args start_fn = self._s.run log.info(info) if self._config.dry_run: return 0 options = self._get_container_options() output = start_fn(self._container, commands, stream=True, options=options) try: for line in output: log.step_info(line.strip("\n")) ecode = 0 except CalledProcessError as ex: ecode = ex.returncode return ecode
def cli(ctx, wfile, recursive): """ Creates a graph in the .dot format representing the workflow. """ def add_to_graph(graph_str, wf, parent, children): """Recursively goes through "next" and adds corresponding actions """ _parent = parent.replace(' ', '_').replace('-', '_') for n in children: _n = n.replace(' ', '_').replace('-', '_') graph_str += " {} -> {};\n".format(_parent, _n) for M in wf.get_action(n).get('next', []): graph_str = add_to_graph(graph_str, wf, n, [M]) return graph_str wfile_list = list() if recursive: wfile_list = pu.find_recursive_wfile() else: wfile_list.append(pu.find_default_wfile(wfile)) for wfile in wfile_list: pipeline = WorkflowRunner(wfile, False, False, False, False, True) wf = pipeline.wf workflow_name = wf.name.replace(' ', '_').replace('-', '_') graph_str = add_to_graph("", wf, workflow_name, wf.root) log.info("digraph G {\n" + graph_str + "}\n")
def cli(ctx, service): """Generates configuration files for distinct CI services. This command needs to be executed on the root of your Git repository folder. """ if service not in ci_files: log.fail("Unrecognized service " + service) project_root = scm.get_git_root_folder() if project_root != os.getcwd(): log.fail('This command needs to be executed on the root of your ' 'Git project folder (where the .git/ folder is located).') for ci_file, ci_file_content in pu.get_items(ci_files[service]): ci_file_content = ci_file_content ci_file = os.path.join(project_root, ci_file) # create parent folder if not os.path.isdir(os.path.dirname(ci_file)): os.makedirs(os.path.dirname(ci_file)) # write content with open(ci_file, 'w') as f: f.write(ci_file_content) log.info('Wrote {} configuration successfully.'.format(service))
def stop_running_tasks(self): """Stop containers started by Popper.""" for c in self._spawned_containers: log.info(f"Stopping container {c}") _, ecode, _ = HostRunner._exec_cmd(["podman", "stop", c], logging=False) if ecode != 0: log.warning(f"Failed to stop the {c} container")
def download_actions(self): """Clone actions that reference a repository.""" cloned = set() infoed = False for _, a in self.wf.actions: if ('docker://' in a['uses'] or 'shub://' in a['uses'] or './' in a['uses']): continue url, service, usr, repo, action_dir, version = scm.parse(a['uses']) repo_parent_dir = os.path.join( self.actions_cache_path, service, usr ) a['repo_dir'] = os.path.join(repo_parent_dir, repo) a['action_dir'] = action_dir if self.dry_run: continue if not infoed: log.info('[popper] cloning action repositories') infoed = True if '{}/{}'.format(usr, repo) in cloned: continue if not os.path.exists(repo_parent_dir): os.makedirs(repo_parent_dir) log.info('[popper] - {}/{}/{}@{}'.format(url, usr, repo, version)) scm.clone(url, usr, repo, repo_parent_dir, version) cloned.add('{}/{}'.format(usr, repo))
def docker_create(self, img): log.info('{}[{}] docker create {} {}'.format( self.msg_prefix, self.action['name'], img, ' '.join(self.action.get('args', '')) )) if self.dry_run: return env_vars = self.action.get('env', {}) for s in self.action.get('secrets', []): env_vars.update({s: os.environ.get(s)}) for e, v in self.env.items(): env_vars.update({e: v}) env_vars.update({'HOME': os.environ['HOME']}) volumes = [self.workspace, os.environ['HOME'], '/var/run/docker.sock'] log.debug('Invoking docker_create() method') self.container = self.docker_client.containers.create( image=img, command=self.action.get('args', None), name=self.cid, volumes={v: {'bind': v} for v in volumes}, working_dir=self.workspace, environment=env_vars, entrypoint=self.action.get('runs', None), detach=True )
def singularity_build_from_image(self, image, container_path): """Build a container from Docker image. Args: image(str): The docker image to build the container from. container_path(str): The path of the built container. Returns: None """ container = os.path.basename(container_path) if not self.skip_pull: log.info('{}[{}] singularity pull {} {}'.format( self.msg_prefix, self.action['name'], container, image)) if not self.dry_run: if not self.singularity_exists(container_path): s_client.pull(image=image, name=container, pull_folder=os.path.dirname(container_path)) else: if not self.singularity_exists(container_path): log.fail( 'The required singularity container \'{}\' was not found ' 'locally.'.format(container_path))
def docker_create(self, img): """Create a docker container from an image. Args: img(str): The image to use for building the container. Returns: None """ log.info('{}[{}] docker create {} {}'.format( self.msg_prefix, self.action['name'], img, ' '.join(self.action.get('args', '')))) if self.dry_run: return env = self.prepare_environment() volumes = self.prepare_volumes(env, include_docker_socket=True) self.container = self.d_client.containers.create( image=img, command=self.action.get('args', None), name=self.cid, volumes=volumes, working_dir=env['GITHUB_WORKSPACE'], environment=env, entrypoint=self.action.get('runs', None), detach=True)
def cli(ctx, service, install): """Generates configuration files for distinct CI services. This command needs to be executed on the root of your Git repository folder. """ project_root = scm.get_git_root_folder() if project_root != os.getcwd(): log.fail( 'This command needs to be executed on the root of your ' 'Git project folder (where the .git/ folder is located).') for ci_file, ci_file_content in pu.get_items(ci_files[service]): # Prepare and write the CI config file. ci_file = os.path.join(project_root, ci_file) if not os.path.isdir(os.path.dirname(ci_file)): os.makedirs(os.path.dirname(ci_file)) install_script_cmd = '' if install: if service == 'jenkins' or service == 'gitlab': log.fail( 'Scaffolding of custom install scripts is not ' 'supported for Jenkins and Gitlab CI. Include it ' 'manually depending upon the CI\'s OS.') elif service == 'travis': install_script_cmd = ('before_install: scripts/' 'install_scripts.sh') elif service == 'circle': install_script_cmd = 'bash scripts/install_scripts.sh' elif service == 'brigade': install_script_cmd = '"bash scripts/install_scripts.sh",' with open(ci_file, 'w') as f: f.write(reformat(ci_file_content.safe_substitute( {'install_scripts': install_script_cmd}))) # Prepare and Write the install scripts. if install: install = set(install) install_script_file = os.path.join( project_root, 'scripts', 'install_scripts.sh') script_content = base_script_content for runtime in install: script_content += install_scripts_content[runtime] if not os.path.isdir(os.path.dirname(install_script_file)): os.makedirs(os.path.dirname(install_script_file)) with open(install_script_file, 'w') as f: f.write(script_content) st = os.stat(install_script_file) os.chmod(install_script_file, st.st_mode | stat.S_IEXEC) log.info('Wrote {} configuration successfully.'.format(service))
def cli(ctx, subcommand): """Display help for a given command or popper default help """ if subcommand: target_command = popper_cli.get_command(ctx, subcommand) log.info(target_command.get_help(click.Context(popper_cli))) else: log.info(popper_cli.get_help(click.Context(popper_cli)))
def singularity_pull(self, image): """Pulls an docker or singularity images from hub. """ log.info('{}[{}] singularity pull {}'.format( self.msg_prefix, self.action['name'], image) ) if not self.dry_run: sclient.pull(image, name=self.image_name)
def singularity_build(self, path): """Builds an image from a recipefile. """ recipefile_path = os.path.join(path, 'Singularity') log.info('{}[{}] singularity build {} {}'.format( self.msg_prefix, self.action['name'], self.image_name, recipefile_path)) if not self.dry_run: sclient.build(recipefile_path, self.image_name)
def cli(ctx, wfile, skip, colors): """Creates a graph in the .dot format representing the workflow. """ # Args: # ctx(Popper.cli.context): For process inter-command communication # context is used.For reference visit # https://click.palletsprojects.com/en/7.x/commands # wfile(str): Name of the file containing definition of workflow. # skip(tuple): List of steps that are to be skipped. # colors(bool): Flag for colors. # Returns: # None def add_to_graph(dot_str, wf, parent, children, node_attrs, stage_edges): """Recursively goes over the children ("next" attribute) of the given parent, adding an edge from parent to children Args: dot_str(str): The intermediate string to which further nodes are to be added. wf(popper.parser.workflow): Instance of the workflow class. parent(str): Step Identifier. children(list/set): The node that is to be attached as a children. node_attrs(str): These are the attributes of the node of the graph. stage_edges(set): Intermediate sets containing the nodes and edges. Returns: str: The string containing nodes and their description. """ for n in children: edge = f' "{parent}" -> "{n}";\n' if edge in stage_edges: continue dot_str += edge + f' "{n}" [{node_attrs}];\n' stage_edges.add(edge) for M in wf.steps[n].get('next', []): dot_str = add_to_graph(dot_str, wf, n, [M], node_attrs, stage_edges) return dot_str wf = Workflow.new(wfile) wf.parse() wf = Workflow.skip_steps(wf, skip) wf.check_for_unreachable_steps() node_attrs = ('shape=box, style="filled{}", fillcolor=transparent{}') wf_attr = node_attrs.format(',rounded', ',color=red' if colors else '') act_attr = node_attrs.format('', ',color=cyan' if colors else '') dot_str = add_to_graph("", wf, wf.name, wf.root, act_attr, set()) dot_str += f' "{wf.name}" [{wf_attr}];\n' log.info("digraph G { graph [bgcolor=transparent];\n" + dot_str + "}\n")
def singularity_start(self, container_path): """Starts the container to execute commands or run the runscript with the supplied args inside the container. Args: container_path (str): The container image to run/execute. Returns: int: The container process returncode. """ env = self.prepare_environment(set_env=True) volumes = [ '{}:{}'.format(env['HOME'], env['HOME']), '{}:{}'.format(env['HOME'], '/github/home'), '{}:{}'.format(env['GITHUB_WORKSPACE'], env['GITHUB_WORKSPACE']), '{}:{}'.format(env['GITHUB_WORKSPACE'], '/github/workspace'), '{}:{}'.format(env['GITHUB_EVENT_PATH'], '/github/workflow/event.json') ] args = self.action.get('args', None) runs = self.action.get('runs', None) ecode = None if runs: info = '{}[{}] singularity exec {} {}'.format( self.msg_prefix, self.action['name'], container_path, runs) commands = runs start = s_client.execute else: info = '{}[{}] singularity run {} {}'.format( self.msg_prefix, self.action['name'], container_path, args) commands = args start = s_client.run log.info(info) if not self.dry_run: output = start(container_path, commands, bind=volumes, stream=True, options=[ '--userns', '--pwd', env['GITHUB_WORKSPACE']]) try: for line in output: log.action_info(line) ecode = 0 except subprocess.CalledProcessError as ex: ecode = ex.returncode else: ecode = 0 self.remove_environment() return ecode
def cli(ctx, service, file, substitution): """Generates configuration files for distinct CI services. This command needs to be executed on the root of your Git repository folder. """ if not os.path.exists(".git"): log.fail("This command needs to be executed on the root of your " "Git project folder (where the .git/ folder is located).") exporter = WorkflowExporter.get_exporter(service) exporter.export(file, substitution) log.info(f"Wrote {service} configuration successfully.")
def docker_pull(self, img): if not self.skip_pull: log.info('{}[{}] docker pull {}'.format(self.msg_prefix, self.action['name'], img)) if self.dry_run: return self.docker_client.images.pull(repository=img) else: if not self.docker_image_exists(img): log.fail('The required docker image {} was not found locally.'. format(img))
def docker_start(self): log.info('{}[{}] docker start '.format(self.msg_prefix, self.action['name'])) if self.dry_run: return 0 self.container.start() cout = self.container.logs(stream=True) for line in cout: log.action_info(pu.decode(line).strip('\n')) return self.container.wait()['StatusCode']
def vagrant_stop(self, vagrant_box_path): """Stop the Vagrant VM running at the specified path. Args: vagrant_box_path (str): The path to Vagrant VM's root. """ if self.dry_run: return log.info("[-] Stopping VM....") vagrant.Vagrant(root=vagrant_box_path).halt() time.sleep(5)
def _create_container(self, step, cid): build, image, build_ctx_path = self._get_build_info(step) if build: log.info(f"[{step.id}] singularity build {cid} {build_ctx_path}") if not self._config.dry_run: self._build_from_recipe(build_ctx_path, self._singularity_cache, cid) elif not self._config.skip_pull and not step.skip_pull: log.info(f"[{step.id}] singularity pull {cid} {image}") if not self._config.dry_run: self._s.pull(image=image, name=cid, pull_folder=self._singularity_cache)
def cli(ctx, action, update_cache): metadata = pu.fetch_metadata(update_cache) action_metadata = metadata.get(action, None) if not action_metadata: log.fail('Unable to find metadata for given action.') if not action_metadata['repo_readme']: log.fail('This repository does not have a README.md file.') log.info(action_metadata['repo_readme'])
def singularity_pull(self, image): """Pulls an docker or singularity images from hub. """ if not self.skip_pull: log.info('{}[{}] singularity pull {}'.format( self.msg_prefix, self.action['name'], image)) if not self.dry_run: sclient.pull(image, name=self.image_name) else: if not self.singularity_exists(): log.fail('The required singularity image {} was not found ' 'locally.'.format(self.image_name))
def docker_build(self, img, path): """Build a docker image from a Dockerfile. Args: img (str): The name of the image to build. path (str): The path to the Dockerfile and other resources. """ log.info('{}[{}] docker build -t {} {}'.format( self.msg_prefix, self.action['name'], img, path)) if self.dry_run: return docker_client.images.build(path=path, tag=img, rm=True, pull=True)