def update_submodules(self, depth: int = 0, jobs: int = 0) -> None: """ Update all submodules Returns: None """ os_command = [ "git", "submodule", "update", "--init", "--recursive", ] if depth: os_command += ["--depth", f"{depth}"] if jobs: os_command += ["--jobs", f"{jobs}"] logger.info(icon=f"{self.ICON} 🌱", title="Updating submodules: ", end="") result = run_os_command(os_command) if result.return_code: logger.std(result, raise_exception=True) logger.success()
def create_kubeconfig(self, track: str) -> Tuple[str, str]: """ Create temporary kubernetes configuration based on contents of KUBECONFIG_RAW or KUBECONFIG_RAW_<track>. Args: track: Current deployment track Returns: A tuple of kubeconfig and the variable name that was used """ name = "" key = "" possible_keys = ["KUBECONFIG_RAW"] if track: possible_keys.append(f"KUBECONFIG_RAW_{track.upper()}") for key in reversed(possible_keys): kubeconfig = os.environ.get(key, "") if not kubeconfig: continue fp, name = tempfile.mkstemp() with os.fdopen(fp, "w") as f: f.write(kubeconfig) logger.info(message=f"Created a kubeconfig file using {key}") break return name, key
def _handle_api_error(error: ApiException, raise_client_exception: bool = False) -> Any: """ Handle a ApiException from the Kubernetes client Args: error: ApiException to handle raise_client_exception: Should the method raise an error on client errors Returns: The the stringified version of the errors JSON body Raises: ApiException: If the ``raise_client_exception`` argument is set to ``True`` """ error_body = loads_json(error.body) if not error_body: error_body = {"message": "An unknown error occurred"} if Kubernetes._is_client_error(error.status): reason = camel_case_split(str(error_body.get("reason", "Unknown"))) logger.info( title=f"{cf.yellow}{cf.bold}{reason}{cf.reset}", message= f" ({error.status} {cf.italic}{error_body['message'].capitalize()}{cf.reset})", ) if raise_client_exception: raise error else: logger.error(error=error, raise_exception=True) return error_body
def build_stages(self, push_images: bool = True) -> List[DockerImage]: """ Build all stages of a Dockerfile and tag them """ built_images = [] stages = self.get_stages() with settings.plugin_manager.lifecycle.container_build(): for stage in stages: if not stage.build: continue if stage.development: logger.info( icon="ℹ️", title= f"Found test/development stage '{stage.name}', building that as well", ) image = self.build_stage(stage.name, final_image=stage.final, push_images=push_images) built_images.append(image) return built_images
def update_repos(self) -> None: logger.info(icon=f"{self.ICON} 🔄", title="Updating Helm repos: ", end="") result = run_os_command(["helm", "repo", "update"]) if not result.return_code: logger.success() else: logger.std(result, raise_exception=True)
def remove_repo(self, repo_name: str) -> None: logger.info( icon=f"{self.ICON} âž–", title=f"Removing Helm repo {repo_name}: ", end="", ) result = run_os_command(["helm", "repo", "remove", repo_name]) if not result.return_code: logger.success() else: logger.std(result, raise_exception=True)
def delete_image(self, image: DockerImage) -> None: logger.warning(icon=f"{self.ICON}", message="Removing Docker image") for tag in image.tags: logger.info(message=f"\t {image.repository}:{tag}: ", end="") delete_command = ["docker", "rmi", f"{image.repository}:{tag}"] result = run_os_command(delete_command, shell=False) if result.return_code: logger.std(result, raise_exception=False) else: logger.success()
def pull_image(self, image: str) -> bool: logger.info(icon=f"{self.ICON} ⏬", title=f"Pulling {image}:", end=" ") pull_command = ["docker", "pull", image] result = run_os_command(pull_command, shell=False) if result.return_code: logger.std(result, raise_exception=False) else: logger.success() return True return False
def setup_helm(self) -> None: """ Makes sure that Helm is ready to use Returns: None """ logger.info(icon=f"{self.ICON} 🚀", title="Initializing Helm") self.add_repo("stable", "https://charts.helm.sh/stable", update=False) self.add_repo("bitnami", "https://charts.bitnami.com/bitnami", update=False) self.update_repos()
def load_plugins(self) -> None: loading_plugins = False for plugin in KOLGA_CORE_PLUGINS: plugin_loaded, message = self._load_plugin(plugin) if not loading_plugins and plugin_loaded: logger.info( icon="🔌", title="Loading plugins:", ) loading_plugins = True if plugin_loaded: logger.info(f"{plugin.verbose_name}: {message}")
def add_repo(self, repo_name: str, repo_url: str, update: bool = True) -> None: logger.info( icon=f"{self.ICON} âž•", title=f"Adding Helm repo {repo_url} with name {repo_name}: ", end="", ) result = run_os_command(["helm", "repo", "add", repo_name, repo_url]) if not result.return_code: logger.success() else: logger.std(result, raise_exception=True) if update: self.update_repos()
def _create_file_secrets(self, key: str, value: str) -> None: logger.info( icon=f"{self.ICON} 🔑", message=f"Found secret with K8S_FILE_SECRET prefix {key}. Creating file type secret", ) file_secret_path = ( settings.active_ci.VALID_FILE_SECRET_PATH_PREFIXES[0] if settings.active_ci else "/tmp/" # nosec ) fp, name = mkstemp(dir=file_secret_path) with os.fdopen(fp, "w") as f: f.write(value) os.environ[key.upper()] = name
def delete( self, resource: str, name: Optional[str] = None, labels: Optional[Dict[str, str]] = None, namespace: str = settings.K8S_NAMESPACE, ) -> None: os_command = [ "kubectl", "delete", resource, "--ignore-not-found", "--wait=true", f"--namespace={namespace}", ] logger.info(icon=f"{self.ICON} 🗑️ ", title=f"Removing {resource}", end="") if labels: labels_str = self.labels_to_string(labels) os_command += ["-l", labels_str] logger.info(title=f" with labels {labels_str}", end="") if name: os_command += [name] logger.info(title=f" with name '{name}'", end="") logger.info(": ", end="") result = run_os_command(os_command, shell=True) if not result.return_code: logger.success() else: logger.std(result, raise_exception=True)
def get_certification_issuer(self, track: str) -> Optional[str]: logger.info(icon=f"{self.ICON} 🏵️️", title="Checking certification issuer", end="") raise_exception = False if settings.K8S_CLUSTER_ISSUER: cert_issuer: str = settings.K8S_CLUSTER_ISSUER logger.info(message=" (settings): ", end="") raise_exception = True else: cert_issuer = f"certificate-letsencrypt-{track}" logger.info(message=" (track): ", end="") os_command = ["kubectl", "get", "clusterissuer", cert_issuer] result = run_os_command(os_command, shell=True) if not result.return_code: logger.success(message=cert_issuer) return cert_issuer else: error_message = f'No issuer "{cert_issuer}" found, using cluster defaults' if raise_exception: logger.error(message=error_message, raise_exception=True) else: logger.info(message=error_message) return None
def _resource_command( self, resource: str, name: Optional[str] = None, labels: Optional[Dict[str, str]] = None, namespace: str = settings.K8S_NAMESPACE, ) -> List[str]: command_args = [resource, f"--namespace={namespace}"] if labels: labels_str = self.labels_to_string(labels) command_args += ["-l", labels_str] logger.info(title=f" with labels {labels_str}", end="") if name: command_args += [name] logger.info(title=f" with name '{name}'", end="") return command_args
def _create_basic_auth_data( self, basic_auth_users: List[BasicAuthUser] = settings.K8S_INGRESS_BASIC_AUTH ) -> Dict[str, str]: """ Create secret data from list of `BasicAuthUser` The user credentials from the list of users will be encrypted and added to a temporary file using the `htpasswd` tool from Apache. The file is then read and base64 encoded (as required by Kubernetes secrets). Args: basic_auth_users: List of `BasicAuthUser`s Returns: A dict with the key `auth` and base64 content of a htpasswd file as value """ logger.info(icon=f"{self.ICON} 🔨", title="Generating basic auth data: ", end="") if not basic_auth_users: return {} with tempfile.NamedTemporaryFile() as f: passwd_path = Path(f.name) for i, user in enumerate(basic_auth_users): os_command = ["htpasswd", "-b"] if i == 0: os_command.append("-c") os_command += [str(passwd_path), user.username, user.password] result = run_os_command(os_command) if result.return_code: logger.error( message= "The 'htpasswd' command failed to create an entry", raise_exception=True, ) encoded_file = self._b64_encode_file(passwd_path) logger.success() logger.info( message= f"\t {len(settings.K8S_INGRESS_BASIC_AUTH)} users will be added to basic auth" ) return {"auth": encoded_file}
def get_secrets(self) -> Dict[str, str]: if self.initialized: secrets_list = {} secret_path = ( settings.VAULT_PROJECT_SECRET_NAME if settings.VAULT_PROJECT_SECRET_NAME else f"{settings.PROJECT_NAME}-{self.track}" ) try: logger.info( icon=f"{self.ICON} 🔑", message=f"Checking for secrets in {settings.VAULT_KV_SECRET_MOUNT_POINT}/{secret_path}", ) secrets = {} if settings.VAULT_KV_VERSION == 2: secrets = self.client.secrets.kv.read_secret_version( path=secret_path, mount_point=settings.VAULT_KV_SECRET_MOUNT_POINT, ) secrets_list = secrets["data"]["data"] else: secrets = self.client.secrets.kv.v1.read_secret( path=secret_path, mount_point=settings.VAULT_KV_SECRET_MOUNT_POINT, ) secrets_list = secrets["data"] # Check secrets defined by Terraform if settings.VAULT_TF_SECRETS and settings.VAULT_KV_VERSION == 2: secrets_list = self._read_tf_secrets(secret_path, secrets_list) # Check for file type secrets for key, value in list(secrets_list.items()): if key.startswith(settings.K8S_FILE_SECRET_PREFIX): secrets_list.pop(key) self._create_file_secrets(key, value) except hvac.exceptions.InvalidPath as e: logger.error( icon=f"{self.ICON} 🔑", message="Secrets not found ", error=e, raise_exception=False, ) return secrets_list return {}
def _read_tf_secrets( self, secret_path: str, secrets_list: Dict[str, str] ) -> Dict[str, str]: logger.info( icon=f"{self.ICON} 🔑", message=f"Checking for secrets in {settings.VAULT_KV_SECRET_MOUNT_POINT}/{secret_path}-tf", ) tf_secrets = self.client.secrets.kv.read_secret_version( path=f"{secret_path}-tf", mount_point=settings.VAULT_KV_SECRET_MOUNT_POINT, ) # Check for duplicates and remove duplicate secret from tf secrets. for key in list(tf_secrets["data"]["data"]): if key in secrets_list: tf_secrets["data"]["data"].pop(key) secrets_list.update(tf_secrets["data"]["data"]) return secrets_list
def create_namespace(self, namespace: str = settings.K8S_NAMESPACE) -> str: """ Create a Kubernetes namespace Args: namespace: Name of the namespace to create Returns: On success, returns the name of the namespace Raises: ApiException: If the namespace creation fails by other means than a namespace conflict, something that happens if the namespace already exists. """ v1 = k8s_client.CoreV1Api(self.client) logger.info(icon=f"{self.ICON} 🔨", title=f"Checking namespace {namespace}: ", end="") try: v1.read_namespace(name=namespace) except ApiException as e: self._handle_api_error(e) if e.status != 404: raise e else: logger.success() return namespace v1_metadata = k8s_client.V1ObjectMeta( labels=self.get_namespace_labels(), name=namespace, ) v1_namespace = k8s_client.V1Namespace(metadata=v1_metadata) try: v1.create_namespace(v1_namespace) except ApiException as e: self._handle_api_error(e) if e.status != 409: # Namespace exists raise e logger.success() return namespace
def setup_helm(self) -> None: """ Makes sure that Helm is ready to use Returns: None """ logger.info(icon=f"{self.ICON} 🚀", title="Initializing Helm") # TODO: Remove once this is added by default and Helm 3 is stable self.add_repo("stable", "https://kubernetes-charts.storage.googleapis.com/", update=False) self.add_repo("bitnami", "https://charts.bitnami.com/bitnami", update=False) self.update_repos()
def build_stage( self, stage: str = "", final_image: bool = False, push_images: bool = True, disable_cache: bool = settings.BUILDKIT_CACHE_DISABLE, ) -> DockerImage: logger.info(icon=f"{self.ICON} 🔨", title=f"Building stage '{stage}': ") build_command = [ "docker", "buildx", "build", f"--file={self.dockerfile.absolute()}", f"--target={stage}", "--progress=plain", ] build_command.extend(self.get_build_arguments()) if push_images: build_command.append("--push") if not disable_cache: cache_to = self.create_cache_tag(postfix=stage) logger.info(title=f"\t ℹ️ Cache to: {cache_to}") build_command.append( f"--cache-to=type=registry,ref={cache_to},mode=max") for cache_tag in self.get_cache_tags(): logger.info(title=f"\t ℹ️ Cache from: {cache_tag}") build_command.append( f"--cache-from=type=registry,ref={cache_tag}") tags = self.get_image_tags(stage, final_image=final_image) for tag in tags: build_command.append(f"--tag={self.image_repo}:{tag}") build_command.append(f"{self.docker_context.absolute()}") image = DockerImage(repository=self.image_repo, tags=tags) with settings.plugin_manager.lifecycle.container_build_stage( image=image, stage=stage): result = run_os_command(build_command, shell=False) if result.return_code: logger.std(result, raise_exception=True) else: for tag in tags: logger.info(title=f"\t 🏷 Tagged: {self.image_repo}:{tag}") return image
def __init__( self, track: str, vault_addr: str = settings.VAULT_ADDR, skip_tls: bool = settings.VAULT_TLS_ENABLED, ) -> None: self.client = hvac.Client(url=vault_addr, verify=settings.VAULT_TLS_ENABLED) self.vault_addr = vault_addr self.skip_tls = skip_tls self.track = track self.initialized = False if self.vault_addr: self.initialized = True else: logger.info( icon=f"{self.ICON} ℹ️", message="VAULT_ADDR not defined. Skipping Vault usage.", )
def get( self, resource: str, name: Optional[str] = None, labels: Optional[Dict[str, str]] = None, namespace: str = settings.K8S_NAMESPACE, raise_exception: bool = True, ) -> SubprocessResult: os_command = ["kubectl", "get"] logger.info(icon=f"{self.ICON} ℹ️ ", title=f"Getting {resource}", end="") os_command += self._resource_command( resource=resource, name=name, labels=labels, namespace=namespace ) logger.info(": ", end="") result = run_os_command(os_command, shell=True) # nosec if not result.return_code: logger.success() else: logger.std(result, raise_exception=raise_exception) return result
def create_secret( self, data: Dict[str, str], namespace: str, track: str, project: Project, secret_name: str, encode: bool = True, ) -> None: deploy_name = get_deploy_name(track=track, postfix=project.name) v1 = k8s_client.CoreV1Api(self.client) v1_metadata = k8s_client.V1ObjectMeta(name=secret_name, namespace=namespace, labels={"release": deploy_name}) if encode: encoded_data = self._encode_secret(data) else: encoded_data = data body = k8s_client.V1Secret(data=encoded_data, metadata=v1_metadata, type="generic") logger.info( icon=f"{self.ICON} 🔨", title= f"Creating secret '{secret_name}' for namespace '{namespace}': ", end="", ) try: v1.create_namespaced_secret(namespace=namespace, body=body) except ApiException: try: v1.replace_namespaced_secret(name=secret_name, namespace=namespace, body=body) except ApiException as e: self._handle_api_error(e, raise_client_exception=True) logger.success()
def build_stage(self, stage: str = "", final_image: bool = False, push_images: bool = True) -> DockerImage: logger.info(icon=f"{self.ICON} 🔨", title=f"Building stage '{stage}': ") cache_tags = self.get_cache_tags() postfix = stage if not final_image else "" build_command = [ "docker", "buildx", "build", f"--file={self.dockerfile.absolute()}", f"--target={stage}", "--progress=plain", ] if push_images: build_command.append("--push") cache_to = self.create_cache_tag(postfix=postfix) logger.info(title=f"\t ℹ️ Cache to: {cache_to}") build_command.append( f"--cache-to=type=registry,ref={cache_to},mode=max") for cache_tag in cache_tags: logger.info(title=f"\t ℹ️ Cache from: {cache_tag}") build_command.append(f"--cache-from=type=registry,ref={cache_tag}") tags = self.get_image_tags(stage, final_image=final_image) for tag in tags: build_command.append(f"--tag={self.image_repo}:{tag}") build_command.append(f"{self.docker_context.absolute()}") result = run_os_command(build_command, shell=False) if result.return_code: logger.std(result, raise_exception=True) else: for tag in tags: logger.info(title=f"\t 🏷 Tagged: {self.image_repo}:{tag}") image = DockerImage(repository=self.image_repo, tags=tags) return image
def logs( self, labels: Optional[Dict[str, str]] = None, since_time: Optional[str] = None, namespace: str = settings.K8S_NAMESPACE, print_result: bool = True, raise_exception: bool = True, ) -> SubprocessResult: os_command = [ "kubectl", "logs", f"--namespace={namespace}", "--prefix=true", "--timestamps=true", "--tail=100", ] logger.info(icon=f"{self.ICON} 📋️️ ", title="Getting logs for resource: ", end="") if labels: labels_str = self.labels_to_string(labels) os_command += ["-l", labels_str] logger.info(title=f" with labels {labels_str}", end="") if since_time: os_command += [f"--since-time={since_time}"] logger.info(title=f" since {since_time}", end="") result = run_os_command(os_command, shell=True) if not result.return_code: logger.success() if print_result: logger.std(result) else: logger.std(result, raise_exception=raise_exception) return result
def create_application_deployment( self, project: Project, namespace: str, track: str, ) -> None: helm_path = self.get_helm_path() values: ApplicationDeploymentValues = { "application": { "initializeCommand": project.initialize_command, "migrateCommand": project.migrate_command, "probeFailureThreshold": project.probe_failure_threshold, "probeInitialDelay": project.probe_initial_delay, "probePeriod": project.probe_period, "livenessPath": project.liveness_path, "readinessPath": project.readiness_path, "secretName": project.secret_name, "track": track, }, "gitlab": { "app": settings.PROJECT_PATH_SLUG, "env": settings.ENVIRONMENT_SLUG, }, "image": project.image, "ingress": { "maxBodySize": settings.K8S_INGRESS_MAX_BODY_SIZE }, "namespace": namespace, "releaseOverride": f"{settings.ENVIRONMENT_SLUG}-{kuberenetes_safe_name(project.name)}", "replicaCount": project.replica_count, "service": { "targetPort": project.service_port, "url": project.url, "urls": [project.url, *project.additional_urls], }, "jobsOnly": settings.KOLGA_JOBS_ONLY, } if project.basic_auth_secret_name: values["ingress"][ "basicAuthSecret"] = project.basic_auth_secret_name if project.file_secret_name: values["application"]["fileSecretName"] = project.file_secret_name values["application"][ "fileSecretPath"] = settings.K8S_FILE_SECRET_MOUNTPATH if project.request_cpu: values["application"]["requestCpu"] = project.request_cpu if project.request_ram: values["application"]["requestRam"] = project.request_ram if project.temp_storage_path: values["application"][ "temporaryStoragePath"] = project.temp_storage_path cert_issuer = self.get_certification_issuer(track=track) if cert_issuer: values["ingress"]["clusterIssuer"] = cert_issuer if settings.K8S_CERTMANAGER_USE_OLD_API: values["ingress"][ "certManagerAnnotationPrefix"] = "certmanager.k8s.io" if settings.K8S_INGRESS_PREVENT_ROBOTS: values["ingress"]["preventRobots"] = True if settings.K8S_INGRESS_DISABLED: values["ingress"]["disabled"] = True if settings.K8S_LIVENESS_FILE: values["application"]["livenessFile"] = settings.K8S_LIVENESS_FILE if settings.K8S_READINESS_FILE: values["application"][ "readinessFile"] = settings.K8S_READINESS_FILE deployment_started_at = current_rfc3339_datetime() result = self.helm.upgrade_chart( chart_path=helm_path, name=project.deploy_name, namespace=namespace, values=values, raise_exception=False, ) if result.return_code: logger.info( icon=f"{self.ICON} 🏷️", title="Deployment values (without environment vars):", ) for line in yaml.dump(values).split("\n"): logger.info(message=f"\t{line}") application_labels = {"release": project.deploy_name} status = self.status(namespace=namespace, labels=application_labels) logger.info(message=str(status)) self.logs( labels=application_labels, since_time=deployment_started_at, namespace=namespace, raise_exception=False, print_result=True, ) raise DeploymentFailed() logger.info( icon=f"{self.ICON} 📄", title=f"Deployment can be accessed via {project.url}", )
def create_application_deployment( self, namespace: str, project: Project, track: str, ) -> None: helm_path = self.get_helm_path() values = self.get_application_deployment_values( namespace=namespace, project=project, track=track, ) application_labels = { "deploymentTime": values["deployment"]["timestamp"], "release": project.deploy_name, "track": track, } log_collector = KubeLoggerThread( labels=application_labels, namespace=namespace, ) with settings.plugin_manager.lifecycle.project_deployment( namespace=namespace, project=project, track=track): log_collector.start() result = self.helm.upgrade_chart( chart_path=helm_path, name=project.deploy_name, namespace=namespace, values=values, raise_exception=False, ) log_collector.stop() if result.return_code: logger.info( icon=f"{self.ICON} 🏷️", title="Deployment values (without environment vars):", ) for line in yaml.dump(values).split("\n"): logger.info(message=f"\t{line}") status = self.status(namespace=namespace, labels=application_labels) logger.info(message=str(status)) logger.info( icon=f"{self.ICON} 📋️️ ", title="Getting logs for resource: ", ) with log_collector.log_path.open() as f: for line in f: logger.info(line, end="") raise DeploymentFailed() if not settings.K8S_INGRESS_DISABLED: logger.info( icon=f"{self.ICON} 📄", title=f"Deployment can be accessed via {project.url}", )
def upgrade_chart( self, name: str, values: HelmValues, namespace: str, chart: str = "", chart_path: Optional[Path] = None, values_files: Optional[List[Path]] = None, install: bool = True, version: Optional[str] = None, raise_exception: bool = True, ) -> SubprocessResult: if chart_path: if not chart_path.is_absolute(): chart_path = settings.devops_root_path / chart_path if not chart_path.exists(): logger.error( message=f"Path '{str(chart_path)}' does not exist", error=OSError(), raise_exception=True, ) chart = str(chart_path) logger.info( icon=f"{self.ICON} 📄", title=f"Upgrading chart from '{chart}': ", end="", ) replica_timeout_multiplier = 2 if settings.K8S_REPLICACOUNT > 1 else 1 timeout = ( (settings.K8S_PROBE_INITIAL_DELAY * replica_timeout_multiplier) + (settings.K8S_PROBE_FAILURE_THRESHOLD * settings.K8S_PROBE_PERIOD) + 120 # Buffer time ) # Construct initial helm upgrade command install_arg = "--install" if install else "" helm_command = [ "helm", "upgrade", "--atomic", "--timeout", f"{timeout}s", "--history-max", "30", install_arg, "--namespace", f"{namespace}", ] if version: helm_command += ["--version", version] # Add values files if values_files: helm_command += self.get_chart_params(flag="--values", values=values_files) safe_name = kubernetes_safe_name(name=name) values_yaml = yaml.dump(values) with NamedTemporaryFile(buffering=0) as fobj: fobj.write(values_yaml.encode()) result = run_os_command( [*helm_command, "--values", fobj.name, f"{safe_name}", f"{chart}"], ) if result.return_code: logger.std(result, raise_exception=raise_exception) return result logger.success() logger.info(f"\tName: {safe_name} (orig: {name})") logger.info(f"\tNamespace: {namespace}") return result
def create_application_deployment( self, namespace: str, project: Project, track: str, ) -> None: pod_events: str = "" helm_path = self.get_helm_path() values = self.get_application_deployment_values( namespace=namespace, project=project, track=track, ) application_labels = { "deploymentTime": values["deployment"]["timestamp"], "release": project.deploy_name, "track": track, } log_collector = KubeLoggerThread( labels=application_labels, namespace=namespace, ) with settings.plugin_manager.lifecycle.project_deployment( namespace=namespace, project=project, track=track ): log_collector.start() result = self.helm.upgrade_chart( chart_path=helm_path, name=project.deploy_name, namespace=namespace, values=values, raise_exception=False, ) try: v1 = k8s_client.EventsV1Api(self.client) response: EventsV1EventList = v1.list_namespaced_event(namespace) events: List[List[str]] = [] event: EventsV1Event for event in response.items: events.append( [ event.deprecated_first_timestamp.strftime("%H:%M:%S"), f"{event.regarding.kind}/{event.regarding.name}", event.reason, event.note, ] ) pod_events = tabulate( events, headers=["Timestamp", "Object", "Reason", "Message"], tablefmt="orgtbl", ) except ApiException as e: logger.debug( "Exception when calling EventsV1beta1Api->list_namespaced_event: %s\n" % e ) except Exception as e: logger.debug("Getting events failed, ignoring: %s\n" % e) log_collector.stop() if result.return_code: logger.info( icon=f"{self.ICON} 🏷️", title="Deployment values (without environment vars):", ) for line in yaml.dump(values).split("\n"): logger.info(message=f"\t{line}") status = self.status(namespace=namespace, labels=application_labels) logger.info(message=str(status)) logger.info( icon=f"{self.ICON} 📋️️ ", title="Getting events for resource: ", ) logger.info(pod_events) logger.info( icon=f"{self.ICON} 📋️️ ", title="Getting logs for resource: ", ) with log_collector.log_path.open() as f: for line in f: logger.info(line, end="") raise DeploymentFailed() if not settings.K8S_INGRESS_DISABLED: logger.info( icon=f"{self.ICON} 📄", title=f"Deployment can be accessed via {project.url}", )