def load_config(name: str, path: str) -> None: """ Loads config into swarm """ configs = get_configs() if name in configs: remove_config(name) shell.execute(f"docker config create {name} {path}", os.environ)
def test_execute(self, *mocks): """ Ensure the shell's execution is done with the passed in environment """ env = {} shell.execute("docker ps", env) self.sh_mock.docker.assert_called_with("ps", _env=env)
def get_tag_version(default: str = None) -> str: """ Returns the version of code as returned by the `tag-version` cli command Args: default: the default version if it cannot be found, `unknown` by default print_warning: when `tag-version` results in error, print a warning """ # inject the version from tag-version command into the loaded environment tag_version = default or "unknown" try: proc = shell.execute("tag-version version --format docker", os.environ) except Exception as exc: try: error_message = exc.stderr.decode("utf8") # pylint: disable=E1101 except: error_message = exc.stderr # pylint: disable=E1101 if "not clean" in error_message: tag_version = f"{tag_version}-dirty" raise TagVersionError("Warning: tag-version failed", shell_exception=exc, tag_version=tag_version) else: tag_version = proc.stdout.decode("utf8").strip() return tag_version
def execute(self, command: str, **kwargs): """ Executes the given command """ # get the environment from kwargs or else use the workflow environment # use the `or` syntax so that the environment data is not evaluated unless env is not passed in env = kwargs.pop('_env', None) or self.workflow.environment.data return shell.execute(command, env, **kwargs)
def run_pod(self): args = self.workflow.args pod = self.select_pod() if args.container: target_container = f"--container {args.container}" else: target_container = "" command = ( f"{self.kubectl_command} -n {self.namespace} exec -it {pod} {target_container} -- " f'{" ".join(self.workflow.args_remainder)}' ) logging.debug(f"command={command}") return shell.execute(command, os.environ, _fg=True)
def get_docker_output(command: str, env: dict) -> str: """ Returns docker stdout as a string Args: command: the docker command to run env: the environment to run the command under Returns: str """ try: proc = shell.execute(command, env) except shell.ErrorReturnCode_1 as exc: exc_s = f"{exc}".lower() if "cannot connect to the docker daemon" in exc_s: raise NotConnected() raise DockerError(exc) return proc.stdout.decode("utf8")
def run_service(self): args = self.workflow.args line = self.select_container() container_info = line.strip() container_info_split = container_info.split() container_hash = container_info_split[0] container_prefix = container_info_split[1] container_host = container_info_split[3] # print(container_info) if container_host.startswith('ip-'): container_host = container_host.replace('ip-', '').replace('-', '.') host_info = f'{CF_REMOTE_USER}@{container_host}' docker_user = '' if args.user: docker_user = f'--user {args.user} ' command = f'ssh -t {host_info}' docker_command = ( f'docker exec -t -i {docker_user}{container_prefix}.{container_hash}' f' {" ".join(self.workflow.args_remainder)}') if args.sudo: docker_command = f'sudo {docker_command}' if not args.ssh: command = f'{command} {docker_command}' else: sys.stderr.write(f'docker_command: {docker_command}\n') logging.debug(f'command={command}') return shell.execute(command, os.environ, _fg=True)
def run_service(self): args = self.workflow.args line = self.select_container() container_info = line.strip() container_info_split = container_info.split() container_hash = container_info_split[0] container_prefix = container_info_split[1] container_host = container_info_split[3] # print(container_info) if container_host.startswith("ip-"): container_host = container_host.replace("ip-", "").replace("-", ".") host_info = f"{self.workflow.remote.username}@{container_host}" docker_user = "" if args.user: docker_user = f"--user {args.user} " command = f"ssh -t {host_info}" docker_command = ( f"docker exec -t -i {docker_user}{container_prefix}.{container_hash}" f' {" ".join(self.workflow.args_remainder)}') if args.sudo: docker_command = f"sudo {docker_command}" if not args.ssh: command = f"{command} {docker_command}" else: sys.stderr.write(f"docker_command: {docker_command}\n") logging.debug(f"command={command}") return shell.execute(command, os.environ, _fg=True)
def execute(self, command: str, **kwargs): env = os.environ return shell.execute(command, env, **kwargs)
#!/usr/bin/env python import argparse import base64 import os from compose_flow import shell parser = argparse.ArgumentParser() parser.add_argument("root") args = parser.parse_args() proc = shell.execute('docker config ls --format "{{ .Name }}"', os.environ) for config_name in proc.stdout.decode("utf8").splitlines(): proc = shell.execute( f'docker config inspect {config_name} --format "{{{{ json .Spec.Data }}}}"', os.environ, ) buf_b64 = proc.stdout.decode("utf8").replace('"', "") if not os.path.exists(args.root): os.makedirs(args.root) path = os.path.join(args.root, config_name) with open(path, "wb") as fh: fh.write(base64.b64decode(buf_b64))
def remove_config(name: str) -> None: """ Removes a config from the swarm """ shell.execute(f"docker config rm {name}", os.environ)