def list_images(self, services, tag, **kwargs): """List images""" services = services or list(self.kard.env.get_container().keys()) if tag is None: tag = self.kard.meta["tag"] for service in services: write(self.make_image_name(service, tag))
def _pull_image(self, image_name, registry_url, tag): """ Pull one image, retry few times to be robust to registry or network related issues. Usually, if an attempt fails, the next one will succeed. Args: * image_name: the name of the image to pull * registry_url: the DockerRegistry instance url * tag: the tag of the version to pull """ rep_tag = '{}/{}'.format(registry_url, image_name) try: self.docker.pull(repository=rep_tag, tag=tag) # Strip the repository tag self.docker.tag(image=':'.join((rep_tag, tag)), repository=image_name, tag=tag, force=True) except docker.errors.APIError as error: error_msg = 'Error while pulling the image {}: {}'.format( tag, error) write(error_msg) raise ImagePullError(error_msg)
def purge(self, except_tag=None, tag=None, repository=None): """Delete all images of this project. Only tag or except_tag can be specified simultaneously. Args: * except_tag: delete all image but this tag * tag: only delete this tag * repository: delete image reference in a specified repository """ services = list(self.kard.env.get_container().keys()) if except_tag is None: tag = tag or self.kard.meta['tag'] else: tag = '(?!{})$'.format(except_tag) images_to_del = [self.make_image_name(s, '*') for s in services] if repository: tmp = [] for image in images_to_del: tmp.append(image) tmp.append('/'.join((repository, image))) images_to_del = tmp images_regex = '(' + ')|('.join(images_to_del) + ')' for img in self.docker.images(): for repo_tag in img.get('RepoTags', []): if re.match(images_regex, repo_tag): write('Deleting image ' + repo_tag) try: self.docker.remove_image(repo_tag) except BaseException as exc: write(exc.message)
def get_meta(self, extra): """Ensure that all metadata are present""" default = self.env.get("default_meta") if "driver" in self.env: write( f"DEPRECATION: driver should be under default_meta in env {self.env_name}", error=True, ) merge(self.env["driver"], default.setdefault("driver", {}), overwrite=False) if not default: # This prevent an empty value in the YAML default = {} ret = default.copy() merge(extra, ret) required_values = ensure_definition_matches(definition=self.env.get( "required_meta", []), defaults=ret, data=extra) merge(required_values, extra) # Feature ret["features"] = self.env.get("default_features", []) return ret
def __init__(self, env_name, features=None, path=None): self.env_name = env_name self.pkr_path = get_pkr_path() self.path = path or self.default_path # First load the main file to add eventual dependencies env_path = self.path / env_name env_file_path = env_path / "env.yml" self.env = self._load_env_file(env_file_path) self.features = features or [] for feature in dedup_list(self.env.get("default_features", [])): write("WARNING: Feature {} is duplicated in env {}".format( feature, env_name), error=True) merge_lists(self.env.get("default_features", []), self.features) for feature in self.features: f_path = env_path / (feature + ".yml") if f_path.is_file(): content = self._load_env_file(f_path) feature_features = content.pop("default_features", []) for dup in dedup_list(feature_features): write( f"WARNING: Feature {dup} is duplicated in feature {feature} from env {env_name}", error=True, ) merge_lists(feature_features, self.features) content["default_features"] = feature_features merge(content, self.env)
def cmd_ps(self): """List containers with ips""" response = self.client.list_namespaced_pod(self.namespace) services = response.items for service in services: write(" - {}: {} - {}".format(service.metadata.name, service.status.phase, service.status.pod_ip))
def pull_images(self, services, registry, username, password, tag=None, parallel=None, **kwargs): """Pull images from a remote registry Args: * services: the name of the images to pull * registry: a DockerRegistry instance * remote_tag: the tag of the version to pull * parallel: pull parallelism """ services = services or list(self.kard.env.get_container().keys()) remote_tag = tag or self.kard.meta["tag"] tag = self.kard.meta["tag"] registry = self.get_registry(url=registry, username=username, password=password) if registry.username is not None: self._logon_remote_registry(registry) todos = [] for service in services: image_name = self.make_image_name(service) image = self.make_image_name(service, tag) todos.append((image, image_name)) if parallel: futures = [] with ThreadPoolExecutor(max_workers=parallel) as executor: for image, image_name in todos: futures.append(( image, executor.submit(self._pull_image, image_name, registry.url, tag, remote_tag), )) for image, future in futures: future.result() write("Pulling {} from {}/{}:{}...".format( image, registry.url, image_name, remote_tag)) write(" Done !" + "\n") sys.stdout.flush() else: for image, image_name in todos: write("Pulling {} from {}/{}:{}...".format( image, registry.url, image_name, remote_tag)) sys.stdout.flush() self._pull_image(image_name, registry.url, tag, remote_tag) write(" Done !" + "\n") write("All images has been pulled successfully !" + "\n")
def _logon_remote_registry(self, registry): """Push images to a remote registry Args: * registry: a DockerRegistry instance """ write('Logging to {}...'.format(registry.url)) self.docker.login(username=registry.username, password=registry.password, registry=registry.url)
def cmd_ps(self): """List containers with ips""" services = self._load_compose_config().services for service in [s["name"] for s in services]: container = self.get_container(self.make_container_name(service)) if container is None: container_ip = "stopped" else: container_ip = self.get_ip(container) write(" - {}: {}".format(service, container_ip))
def launch_container(self, command, image, volumes, v1=False, links=None): """Generic method to launch a container""" if v1: host_config = self.docker.create_host_config( binds=[":".join((v, k)) for k, v in volumes.items()], links={ l: l for l in [self.make_container_name(s) for s in links] }, ) networking_config = None else: host_config = self.docker.create_host_config( binds=[":".join((v, k)) for k, v in volumes.items()]) network_name = self.kard.meta["project_name"] + "_default".format() networking_config = self.docker.create_networking_config( {network_name: self.docker.create_endpoint_config()}) container = self.docker.create_container( image=image, name=self.make_container_name("init"), command=command, host_config=host_config, networking_config=networking_config, ) try: started = False ret = {"StatusCode": 1} attempt = 10 container_id = container.get("Id") while started not in ("running", "exited"): self.docker.start(container=container_id) info = self.docker.inspect_container(container=container_id) started = info["State"]["Status"] while ret["StatusCode"] != 0 and attempt > 0: attempt -= 1 time.sleep(3) ret = self.docker.wait(container=container_id) logs = self.docker.logs(container=container_id) write(logs) except: write(traceback.format_exc()) raise finally: self.docker.remove_container(container=container_id) if ret["StatusCode"] != 0: raise PkrException( "Container exited with non-zero status code {}".format(ret))
def stop(self, services=None): """Stops services""" k8s_files_path = self.kard.path / "k8s" for k8s_file in sorted(k8s_files_path.glob("*.yml"), reverse=True): if services and k8s_file.name[:-4] not in services: continue write("Processing {}".format(k8s_file)) out, _ = self.run_kubectl("delete -f {}".format(k8s_file)) write(out) sleep(0.5) self.write_configmap({})
def download_images(self, services, registry, tag=None, nopull=False): """Download images from a remote registry and save to kard Args: * services: the name of the images to download * registry: a DockerRegistry instance * tag: the tag of the version to download """ tag = tag or self.kard.meta['tag'] save_path = Path(self.kard.path) / 'images' write('Cleaning images destination {}'.format(save_path)) save_path.mkdir(exist_ok=True) for child in save_path.iterdir(): child.unlink() if not nopull: self.pull_images(services, registry, tag=tag) for service in services: image_path = save_path / "{}.tar".format(service) image_name = self.make_image_name(service, tag) write('Saving {} to {}'.format(image_name, image_path)) sys.stdout.flush() with open(image_path, 'wb') as f: for chunk in self.docker.get_image(image_name): f.write(chunk) write(' Done !' + '\n') write('All images has been saved successfully !' + '\n')
def push_images(self, services, registry, tag=None, other_tags=None): """Push images to a remote registry Args: * services: the name of the images to push * registry: a DockerRegistry instance * tag: the tag of the version to push """ tag = tag or self.kard.meta['tag'] if registry.username is not None: self._logon_remote_registry(registry) tags = [tag] tags.extend(other_tags) for service in services: image_name = self.make_image_name(service) image = self.make_image_name(service, tag) rep_tag = '{}/{}'.format(registry.url, image_name) for dest_tag in tags: write('Pushing {} to {}:{}'.format(image, rep_tag, dest_tag)) sys.stdout.flush() try: self.docker.tag( image=image, repository=rep_tag, tag=dest_tag, force=True) ret = self.docker.push( repository=rep_tag, tag=dest_tag, decode=True, stream=True) error = '' for stream in ret: if 'error' in stream: error += '\n' + stream['errorDetail']['message'] write(' Done !') except docker.errors.APIError as error: error_msg = '\nError while pushing the image {}: {}\n'.format( tag, error) raise error
def _wrap_call(extension, method_name, *args, **kwargs): method = getattr(extension.plugin(), method_name, None) if method is None: # it is OK if an extension does not implement all methods return try: return method(*args, **kwargs) except TimeoutError: write('Extension "{}" raise timeout error, step "{}"'.format( extension.name, method_name)) raise except Exception as exc: write('Extension "{}" raise exception, step "{}": {}'.format( extension.name, method_name, str(exc))) raise
def main(): """Main function""" try: parser = get_parser() cli_args = parser.parse_args() log.set_debug(cli_args.debug) set_use_env_var(not cli_args.no_env_var) cli_args.func(cli_args) except Exception as exc: # pylint: disable=W0703 # Here we do exception catching on parser as our parser # is dynamic to current directory (kard mostly), thus # we cannot ensure it will not fail if "--debug" in sys.argv or "-d" in sys.argv: log.set_debug(True) log.write("ERROR: ({}) {}".format(type(exc).__name__, exc), error=True) log.debug("".join(traceback.format_exception(*sys.exc_info()))) return 1 return 0
def main(): """Main function""" try: cli_args = get_parser().parse_args() # Setting the log mode debug = cli_args.__dict__.pop('debug') log.set_debug(debug) func = cli_args.__dict__.pop('func') func(cli_args) except KardInitializationException as exc: log.write(str(exc)) return 1 except Exception as exc: # pylint: disable=W0703 log.write('ERROR: ({}) {}'.format(type(exc).__name__, exc)) log.debug(''.join(traceback.format_exception(*sys.exc_info()))) return 1 return 0
def print_log(log): for key in (log_keys & set(log)): try: if key == 'status' and log.get(key) in ( 'Downloading', 'Extracting'): status_id = log.get('id') if last_log_id[0] is None: last_log_id[0] = status_id if last_log_id[0] != status_id: last_log_id[0] = status_id logfh.writeln(log['progress']) else: logfh.write_console(log['progress'] + '\r') else: logfh.write_console('\n') logfh.writeln(log.get(key)) except: write(traceback.format_exc()) raise
def print_log(log): for key in log_keys & set(log): try: if key == "status" and log.get(key) in ("Downloading", "Extracting"): status_id = log.get("id") if last_log_id[0] is None: last_log_id[0] = status_id if last_log_id[0] != status_id: last_log_id[0] = status_id logfh.writeln(log["progress"]) else: logfh.write_console(log["progress"] + "\r") else: logfh.write_console("\n") logfh.writeln(log.get(key)) except: write(traceback.format_exc()) raise
def _wrap_call(extension, method_name, *args, **kwargs): method = getattr(extension.plugin(), method_name, None) if method is None: # it is OK if an extension does not implement all methods return try: return method(*args, **kwargs) except TimeoutError: write('Extension "{}" raise timeout error, step "{}"'.format( extension.name, method_name)) raise except PkrException: # If this is a PkrException, we simply propagate it, and delegate its handling to the caller raise except Exception as exc: write('Extension "{}" raise an unknown exception, step "{}": {}'. format(extension.name, method_name, str(exc))) raise
def pull_images(self, services, registry, tag=None, parallel=None): """Pull images from a remote registry Args: * services: the name of the images to pull * registry: a DockerRegistry instance * tag: the tag of the version to pull * parallel: pull parallelism """ tag = tag or self.kard.meta['tag'] if registry.username is not None: self._logon_remote_registry(registry) todos = [] for service in services: image_name = self.make_image_name(service) image = self.make_image_name(service, tag) todos.append((image, image_name)) if parallel: futures = [] with ThreadPoolExecutor(max_workers=parallel) as executor: for image, image_name in todos: futures.append(( image, executor.submit(self._pull_image, image_name, registry.url, tag))) for image, future in futures: future.result() write('Pulling {} from {}...'.format(image, registry.url)) write(' Done !' + '\n') sys.stdout.flush() else: for image, image_name in todos: write('Pulling {} from {}...'.format(image, registry.url)) sys.stdout.flush() self._pull_image(image_name, registry.url, tag) write(' Done !' + '\n') write('All images has been pulled successfully !' + '\n')
def pull_images(self, services, registry, tag=None): """Pull images from a remote registry Args: * services: the name of the images to pull * registry: a DockerRegistry instance * tag: the tag of the version to pull """ if registry.username is not None: self._logon_remote_registry(registry) for service in services: image_name = self.make_image_name(service) image = image_name if tag == 'latest' \ else ':'.join((image_name, tag)) write('Pulling {} from {}...'.format(image, registry.url)) sys.stdout.flush() self._pull_image(image_name, registry.url, tag) write(' Done !' + '\n') write('All images has been pulled successfully !' + '\n')
def setup(args, kard): """Populate build_data with extension specific values Args: - args: the args passed in the env - kard: the kard object """ git_repo = args.get('git_repo', kard.meta.get('git_repo')) if git_repo is not None: src_path = kard.meta['src_path'] git_branch = args.get('git_branch', kard.meta.get('git_branch', 'master')) if not os.path.isdir(src_path): write('Fetching sources from {}:{} to {}'.format(git_repo, git_branch, src_path)) try: repo = Repo.clone_from(git_repo, src_path, branch=git_branch, single_branch=True, depth=1) for sub_module in repo.submodules: sub_module.update() except Exception as exc: write('Could not fetch repository: {}'.format(str(exc))) raise exc else: write('Using sources from {}'.format(src_path))
def _create_builder(self, purge=False): """Create the buildkit builder, never to be purged""" for builder in docker.buildx.list(): if builder.name == self.builder_name: if purge: builder.remove() else: break else: docker.buildx.create(name=self.builder_name, driver_options=self.buildkit_env) write(f"Start buildx builder {self.builder_name}") with open("/dev/null", "a") as devnull: os.dup2(sys.stdout.fileno(), 3) os.dup2(devnull.fileno(), sys.stdout.fileno()) try: docker.buildx.build(self.kard.path, progress=False, builder=self.builder_name) except DockerException: pass # Build was never intended to success, just to force builder start os.dup2(3, sys.stdout.fileno())
def push_images(self, services, registry, tag=None): """Push images to a remote registry Args: * services: the name of the images to push * registry: a DockerRegistry instance * tag: the tag of the version to push """ if registry.username is not None: self._logon_remote_registry(registry) for service in services: image = self.make_image_name(service) rep_tag = '{}/{}'.format(registry.url, image) if tag != 'latest': image = ':'.join([image, tag]) write('Pushing {} to {}...'.format(image, rep_tag)) sys.stdout.flush() self.docker.tag( image=image, repository=rep_tag, tag=tag, force=True) ret = self.docker.push( repository=rep_tag, tag=tag, decode=True, stream=True) error = '' for stream in ret: if 'error' in stream: error += '\n' + stream['errorDetail']['message'] write(' Done !' if error == '' else error + '\n')
def purge_images(self, tag=None, except_tag=None, repository=None, **kwargs): """Delete all images of this project. Only tag or except_tag can be specified simultaneously. Args: * except_tag: delete all image but this tag * tag: only delete this tag * repository: delete image reference in a specified repository """ services = list(self.kard.env.get_container().keys()) if except_tag is None: tag = tag or self.kard.meta["tag"] else: tag = "(?!{})$".format(except_tag) images_to_del = [self.make_image_name(s, tag) for s in services] if repository: tmp = [] for image in images_to_del: tmp.append(image) tmp.append("/".join((repository, image))) images_to_del = tmp images_regex = "(" + ")|(".join(images_to_del) + ")" for img in self.docker.images(): for repo_tag in img.get("RepoTags", []): if re.match(images_regex, repo_tag): write("Deleting image " + repo_tag) try: self.docker.remove_image(repo_tag) except BaseException as exc: write(exc)
def _load_env_file(self, path): """Load an environment with its dependencies recursively""" with path.open() as env_file: content = yaml.safe_load(env_file) if content is None: content = {} if "default_features" not in content: content["default_features"] = [] for imp_name in content.get(self.IMPORT_KEY, ()): imp_path = self.path / (imp_name + ".yml") imp_data = self._load_env_file(imp_path) imp_data.pop(self.IMPORT_KEY, None) imp_features = imp_data.pop("default_features", []) for dup in dedup_list(imp_features): write( f"WARNING: Feature {dup} is duplicated in import {imp_name} from env {self.env_name}", error=True, ) merge_lists(imp_features, content["default_features"]) content = merge(content, imp_data) return content
def get_templates(self): templates = super().get_templates() # Cleanup merged file if self.compose_file.exists(): self.compose_file.unlink() if "compose_file" not in self.driver_meta: write( "Warning: No docker-compose file is provided with this environment." ) return templates for file in [self.driver_meta["compose_file"]] + self.driver_meta.get( "compose_extension_files", []): templates.append({ "source": self.kard.env.pkr_path / file, "origin": (self.kard.env.pkr_path / file).parent, "destination": "", "subfolder": "compose", }) return templates
def import_images(self, services, tag=None): """Import images from kard to local docker Args: * services: the name of the images to load * tag: the tag of the version to load """ tag = tag or self.kard.meta['tag'] save_path = Path(self.kard.path) / 'images' for child in save_path.iterdir(): service = child.name[:-4] if service not in services: continue write('Importing {} ...'.format(child)) with open(child, 'rb') as f: rsp = self.docker.load_image(f.read()) for message in rsp: write(message.get('stream', '')) write('\n') write('All images have been loaded successfully !' + '\n')
def download_images(self, services, registry, username, password, tag=None, nopull=False, **kwargs): """Download images from a remote registry and save to kard Args: * services: the name of the images to download * registry: a DockerRegistry instance * tag: the tag of the version to download """ services = services or list(self.kard.env.get_container().keys()) tag = tag or self.kard.meta["tag"] save_path = Path(self.kard.path) / "images" write("Cleaning images destination {}".format(save_path)) save_path.mkdir(exist_ok=True) for child in save_path.iterdir(): child.unlink() if not nopull: self.pull_images(services, registry, username, password, tag=tag) for service in services: image_path = save_path / "{}.tar".format(service) image_name = self.make_image_name(service, tag) write("Saving {} to {}".format(image_name, image_path)) sys.stdout.flush() with open(image_path, "wb") as f: for chunk in self.docker.get_image(image_name): f.write(chunk) write(" Done !" + "\n") write("All images has been saved successfully !" + "\n")
def import_images(self, services, tag=None, **kwargs): """Import images from kard to local docker Args: * services: the name of the images to load * tag: the tag of the version to load """ services = services or list(self.kard.env.get_container().keys()) tag = tag or self.kard.meta["tag"] save_path = Path(self.kard.path) / "images" for child in save_path.iterdir(): service = child.name[:-4] if service not in services: continue write("Importing {} ...".format(child)) with open(child, "rb") as f: rsp = self.docker.load_image(f.read()) for message in rsp: write(message.get("stream", "")) write("\n") write("All images have been loaded successfully !" + "\n")