Пример #1
0
 def execute(self):
     """
     Import Occopus node definition, and build up the infrastructure
     through occopus container.
     """
     logger.info("Starting Occopus execution {}".format(self.ID))
     self.status = "executing"
     if not self._config_files_exists():
         logger.info("No config generated during translation, nothing to execute")
         self.status = "Skipped"
         return
     if self.dryrun:
         logger.info("DRY-RUN: Occopus execution in dry-run mode...")
         self.status = "DRY-RUN Deployment"
         return
     else:
         if self.created:
             run = False
             i = 0
             while not run and i < 5:
                 try:
                     logger.debug("Occopus import starting...")
                     result = self.occopus.exec_run("occopus-import {0}".format(self.occo_node_path))
                     logger.debug("Occopus import has been successful")
                     run = True
                 except Exception as e:
                     i += 1
                     logger.debug("{0}. Try {1} of 5.".format(str(e), i))
                     time.sleep(5)
             logger.debug(result)
             if "Successfully imported" in result[1].decode("utf-8"):
                 try:
                     logger.debug("Occopus build starting...")
                     exit_code, out = self.occopus.exec_run("occopus-build {} -i {} --auth_data_path {} --parallelize"
                                                       .format(self.occo_infra_path,
                                                               self.worker_infra_name,
                                                               self.auth_data_file))
                     if exit_code == 1:
                         raise AdaptorCritical(out)
                     occo_api_call = requests.post("http://{0}/infrastructures/{1}/attach"
                                               .format(self.occopus_address, self.worker_infra_name))
                     if occo_api_call.status_code != 200:
                         raise AdaptorCritical("Cannot submit infra to Occopus API!")
                     logger.debug("Occopus build has been successful")
                     
                 except docker.errors.APIError as e:
                     logger.error("{0}. Error caught in calling Docker container".format(str(e)))
                 except requests.exceptions.RequestException as e:
                     logger.error("{0}. Error caught in call to occopus API".format(str(e)))
             else:
                 logger.error("Occopus import was unsuccessful!")
                 raise AdaptorCritical("Occopus import was unsuccessful!")
         else:
             logger.error("Not connected to Occopus container!")
             raise AdaptorCritical("Occopus container connection was unsuccessful!")
     logger.info("Occopus executed")
     self.status = "executed"
    def __init__(self,
                 adaptor_id,
                 config,
                 dryrun,
                 validate=False,
                 template=None):

        super().__init__()
        if template and not isinstance(template, ToscaTemplate):
            raise AdaptorCritical("Template is not a valid TOSCAParser object")
        logger.info("Initialising the Pk adaptor with ID, config & TPL...")
        self.config = config
        self.dryrun = dryrun
        self.validate = validate
        self.pk_data = {}
        self.ID = adaptor_id
        self.status = "init"
        try:
            self.path = "{}{}.yaml".format(self.config['volume'], self.ID)
            self.tmp_path = "{}tmp_{}.yaml".format(self.config['volume'],
                                                   self.ID)
        except Exception as e:
            logger.error(e)
        self.tpl = template
        logger.info("Pk adaptor initialised")
Пример #3
0
 def _terraform_init(self):
     """ Run terraform init in the container """
     command = ["sh", "-c", "terraform init -no-color" + LOG_SUFFIX]
     exec_output = self._terraform_exec(command)
     if "successfully initialized" in exec_output:
         logger.debug("Terraform initialization has been successful")
     else:
         raise AdaptorCritical(
             "Terraform init failed: {}".format(exec_output))
Пример #4
0
    def _add_terraform_aws(self, properties):
        """ Add Terraform template for AWS to JSON"""

        # Get the credentials info
        credential = self._get_credential_info("ec2")

        # Check regions match
        region = properties.pop("region")
        aws_region = self.tf_json.provider.get("aws", {}).get("region")
        if aws_region and aws_region != region:
            raise AdaptorCritical(
                "Multiple different AWS regions is unsupported")

        # Add the provider
        aws_provider = {
            "version": "~> 2.54",
            "region": region,
            "access_key": credential["accesskey"],
            "secret_key": credential["secretkey"],
        }
        # Handle deprecated Occopus inputs
        properties.pop("interface_cloud", None)
        occo_endpoint = properties.pop("endpoint_cloud", None)
        endpoint = properties.pop("endpoint", occo_endpoint)
        if endpoint:
            aws_provider.setdefault("endpoints", {})["ec2"] = endpoint
        self.tf_json.add_provider("aws", aws_provider)

        instance_name = self.node_name
        cloud_init_file_name = "{}-cloud-init.yaml".format(instance_name)

        # Add the count variable
        self.tf_json.add_instance_variable(instance_name, self.min_instances)

        # Add the resource
        aws_instance = {
            instance_name: {
                **properties,
                "user_data":
                '${file("${path.module}/%s")}' % cloud_init_file_name,
                "instance_initiated_shutdown_behavior": "terminate",
                "for_each": "${toset(var.%s)}" % instance_name,
            }
        }
        # Add the name tag if no tags present
        aws_instance[instance_name].setdefault("tags", {"Name": instance_name})
        aws_instance[instance_name]["tags"]["Name"] += "${each.key}"
        self.tf_json.add_resource("aws_instance", aws_instance)

        # Add the IP output
        ip_output = {
            "private_ips":
            "${[for i in aws_instance.%s : i.private_ip]}" % instance_name,
            "public_ips":
            "${[for i in aws_instance.%s : i.public_ip]}" % instance_name,
        }
        self.tf_json.add_output(instance_name, ip_output)
Пример #5
0
 def update(self):
     """
     Check that if it's any change in the node definition or in the cloud-init file.
     If the node definition changed then rerun the build process. If the node definition
     changed first undeploy the infrastructure and rebuild it with the modified parameter.
     """
     self.status = "updating"
     self.min_instances = 1
     self.max_instances = 1
     logger.info("Updating the component config {}".format(self.ID))
     self.translate(True)
     if not self.node_def and os.path.exists(self.node_path):
         logger.debug("No nodes in ADT, removing running nodes")
         self._remove_tmp_files()
         self.undeploy()
         self.cleanup()
         self.status = "Updated - removed all nodes"
     elif not self.node_def:
         logger.debug("No nodes found to be orchestrated with Occopus")
         self._remove_tmp_files()
         self.status = "Updated - no Occopus nodes"
     elif not os.path.exists(self.node_path):
         logger.debug("No running infrastructure, starting from new")
         os.rename(self.node_path_tmp, self.node_path)
         os.rename(self.infra_def_path_output_tmp,
                   self.infra_def_path_output)
         self.execute()
         self.status = "updated"
     elif not self._differentiate(self.node_path, self.node_path_tmp):
         logger.debug(
             "Node def file different, replacing old config and executing")
         os.rename(self.node_path_tmp, self.node_path)
         os.rename(self.infra_def_path_output_tmp,
                   self.infra_def_path_output)
         # Detach from the infra and rebuild
         detach = requests.post(
             "http://{0}/infrastructures/{1}/detach".format(
                 self.occopus_address, self.worker_infra_name))
         if detach.status_code != 200:
             raise AdaptorCritical("Cannot detach infra from Occopus API!")
         self.execute()
         self.status = "updated"
     elif not self._differentiate(self.infra_def_path_output,
                                  self.infra_def_path_output_tmp):
         logger.debug(
             "Infra tmp file different, replacing old config and executing")
         os.rename(self.infra_def_path_output_tmp,
                   self.infra_def_path_output)
         self._remove_tmp_files()
         # Rerun Occopus build to refresh infra definition
         self.execute()
         self.status = "updated"
     else:
         self.status = 'updated (nothing to update)'
         logger.info("there are no changes in the Occopus files")
         self._remove_tmp_files()
 def query(self, query, app_id, dry_run=False):
     """ query """
     for adaptor in self._instantiate_adaptors(app_id, dry_run).values():
         try:
             result = adaptor.query(query)
         except AttributeError:
             continue
         else:
             return result
     else:
         raise AdaptorCritical("No query method available")
Пример #7
0
 def _terraform_apply(self, lock_timeout):
     """ Run terraform apply in the container """
     command = [
         "sh", "-c", "terraform apply -auto-approve -no-color" + LOG_SUFFIX
     ]
     exec_output = self._terraform_exec(command, lock_timeout)
     if "Apply complete" in exec_output:
         logger.debug("Terraform apply has been successful")
     else:
         raise AdaptorCritical(
             "Terraform apply failed: {}".format(exec_output))
Пример #8
0
 def _terraform_exec(self, command, lock_timeout=0):
     """ Execute the command in the terraform container """
     if not self.created:
         logger.error("Could not attach to Terraform container!")
         raise AdaptorCritical("Could not attach to Terraform container!")
     while True:
         exit_code, out = self.terraform.exec_run(
             command,
             workdir="{}".format(self.terra_path),
         )
         if exit_code > 0:
             logger.error("Terraform exec failed {}".format(out))
             raise AdaptorCritical("Terraform exec failed {}".format(out))
         elif lock_timeout > 0 and "Error locking state" in str(out):
             time.sleep(5)
             lock_timeout -= 5
             logger.debug(
                 "Waiting for lock, {}s until timeout".format(lock_timeout))
         else:
             break
     return str(out)
Пример #9
0
 def _terraform_destroy(self):
     """ Run terraform destroy in the container """
     command = [
         "sh",
         "-c",
         "terraform destroy -auto-approve -no-color" + LOG_SUFFIX,
     ]
     exec_output = self._terraform_exec(command, lock_timeout=600)
     if "Destroy complete" in exec_output:
         logger.debug("Terraform destroy successful...")
         self.status = "undeployed"
     else:
         raise AdaptorCritical("Undeploy failed: {}".format(exec_output))
Пример #10
0
    def translate(self, tmp=False):
        """
        Translate the self.tpl subset to Occopus node definition and infrastructure format
        The adaptor create a mapping between TOSCA and Occopus template descriptor.
        """
        self.node_def = {}
        logger.info("Starting OccoTranslation")
        self.status = "translating"

        for node in self.template.nodetemplates:

            if '_' in node.name:                
                raise AdaptorCritical("Underscores in node {} not allowed".format(node.name))
            self.node_name = node.name
            self.node_data = {}
            
            node = copy.deepcopy(node)
            occo_interface = self._node_data_get_interface(node)
            if not occo_interface:
                continue

            self._node_resolve_interface_data(node, occo_interface, "resource")
            cloud_type = utils.get_cloud_type(node, SUPPORTED_CLOUDS)

            if cloud_type == "cloudsigma":
                logger.info("CloudSigma resource detected")
                self._node_data_get_cloudsigma_host_properties(node, "resource")
            elif cloud_type == "ec2":
                logger.info("EC2 resource detected")
                self._node_data_get_ec2_host_properties(node, "resource")
            elif cloud_type == "cloudbroker":
                logger.info("CloudBroker resource detected")
                self._node_data_get_cloudbroker_host_properties(node, "resource")
            elif cloud_type == "nova":
                logger.info("Nova resource detected")
                self._node_data_get_nova_host_properties(node, "resource")

            self._get_policies(node)
            self._get_infra_def(tmp)

            node_type = self.node_prefix + self.node_name
            self.node_def.setdefault(node_type, [])
            self.node_def[node_type].append(self.node_data)
        if self.node_def:
            if tmp:
                utils.dump_order_yaml(self.node_def, self.node_path_tmp)
            elif self.validate is False:
                self.prepare_auth_file()
                utils.dump_order_yaml(self.node_def, self.node_path)

        self.status = "translated"
Пример #11
0
    def __init__(self,
                 adaptor_id,
                 config,
                 dryrun,
                 validate=False,
                 template=None):
        super().__init__()
        """
        Constructor method of the Adaptor
        """
        if template and not isinstance(template, ToscaTemplate):
            raise AdaptorCritical("Template is not a valid TOSCAParser object")
        self.status = "init"
        self.dryrun = dryrun
        self.config = config
        self.validate = validate
        self.node_prefix = "node_def:"
        self.node_name = ""
        self.worker_infra_name = "micado_worker_infra"
        self.min_instances = 1
        self.max_instances = 1
        self.ID = adaptor_id
        self.template = template
        self.auth_data_submitter = "/var/lib/micado/submitter/auth/auth_data.yaml"
        self.node_path = "{}{}.yaml".format(self.config['volume'], self.ID)
        self.node_path_tmp = "{}tmp_{}.yaml".format(self.config['volume'],
                                                    self.ID)
        self.infra_def_path_output = "{}{}-infra.yaml".format(
            self.config['volume'], self.ID)
        self.infra_def_path_output_tmp = "{}{}-infra.tmp.yaml".format(
            self.config['volume'], self.ID)
        self.infra_def_path_input = "./system/infrastructure_descriptor.yaml"
        self.cloudinit_path = "./system/cloud_init_worker.yaml"

        self.node_data = {}
        self.node_def = {}

        self.created = False
        self.client = None
        self.occopus = None
        if not self.dryrun:
            self._init_docker()

        self.occopus_address = "occopus:5000"
        self.auth_data_file = "/var/lib/micado/occopus/auth/auth_data.yaml"
        self.occo_node_path = "/var/lib/micado/occopus/submitter/{}.yaml".format(
            self.ID)
        self.occo_infra_path = "/var/lib/micado/occopus/submitter/{}-infra.yaml".format(
            self.ID)
        logger.info("Occopus Adaptor initialised")
Пример #12
0
    def __init__(self,
                 adaptor_id,
                 config,
                 dryrun,
                 validate=False,
                 template=None):
        """
        Constructor method of the Adaptor
        """
        super().__init__()
        if template and not isinstance(template, ToscaTemplate):
            raise AdaptorCritical("Template is not a valid TOSCAParser object")
        self.status = "init"
        self.dryrun = dryrun
        self.volume = config["volume"]
        self.validate = validate
        self.node_name = ""
        self.min_instances = 1
        self.max_instances = 1
        self.app_name = adaptor_id
        self.template = template

        self.terra_path = "/var/lib/micado/terraform/submitter/"

        self.tf_file = "{}{}.tf.json".format(self.volume, self.app_name)
        self.tf_file_tmp = "{}{}.tf.json.tmp".format(self.volume,
                                                     self.app_name)
        self.vars_file = "{}terraform.tfvars.json".format(self.volume)
        self.vars_file_tmp = "{}terraform.tfvars.json.tmp".format(self.volume)
        self.account_file = "{}accounts.json".format(self.volume)
        self.oci_auth_key = "{}oci_api_key.pem".format(self.volume)

        self.cloud_init_template = "./system/cloud_init_worker_tf.yaml"
        self.configure_template = "./system/configure_tf"
        self.configure_file = "/var/lib/micado/submitter/preprocess/egi/configure.py"
        self.auth_data_file = "/var/lib/micado/submitter/auth/auth_data.yaml"
        self.auth_gce = "/var/lib/micado/submitter/gce-auth/accounts.json"
        self.auth_oci = "/var/lib/micado/submitter/oci-auth/oci_api_key.pem"
        self.master_cert = "/var/lib/micado/submitter/system/master.pem"

        self.tf_json = TerraformDict()

        self.created = False
        self.terraform = None
        self.cloud_inits = set()
        if not self.dryrun:
            self._init_docker()

        logger.info("Terraform adaptor initialised")
def _name_check_node(node):
    errors = []
    if "_" in node.name:
        errors.append("TOSCA node names")
    if "_" in (node.get_property_value("name") or ""):
        errors.append("property: 'name'")
    if "_" in (node.get_property_value("container_name") or ""):
        errors.append("property: 'container_name'")

    if errors:
        errors = ", ".join(errors)
        logger.error(
            f"Failed name convention check (underscores) on node: {node.name}")
        raise AdaptorCritical(
            f"Underscores in node {node.name} not allowed for {errors}")
Пример #14
0
    def translate(self, update=False, write_files=True):
        """ Translate sections of the ADT into a Kubernetes Manifest """
        logger.info("Translating into Kubernetes Manifests")
        self.status = "Translating..."
        self.manifests = []
        self.tcp_ports = []
        self.ingress_conf = []
        self.ingress_secrets = {}

        for node in self.tpl.nodetemplates:
            if node.type.startswith("tosca.nodes.MiCADO"):
                self._translate_node_templates(node)

        # Look for a monitoring policy and attach default
        # metric exporters to the application
        for policy in self.tpl.policies:
            if policy.type.startswith(Prefix.MONITOR_POLICY):
                self._translate_monitoring_policy(policy)

            if policy.type.startswith(Prefix.NETWORK_POLICY):
                self._translate_security_policy(policy)

        if self.ingress_conf:
            self._deploy_zorp()
            self._manifest_secrets()

        if not self.manifests:
            logger.info("No nodes to orchestrate with Kubernetes. Skipping")
            self.status = "Skipped Translation"
            return

        for manifest in self.manifests:
            try:
                kubernetes_validate.validate(manifest, "1.18.0", strict=True)
            except kubernetes_validate.ValidationError as err:
                logger.error(f"K8s Validation: {err.message}")
                raise AdaptorCritical(
                    f"K8s Validation: {err.message}") from None

        if not write_files:
            pass
        elif update:
            utils.dump_list_yaml(self.manifests, self.manifest_tmp_path)
        elif self.validate is False:
            utils.dump_list_yaml(self.manifests, self.manifest_path)

        logger.info("Translation complete")
        self.status = "Translated"
    def undeploy(self, kill_nodes=True):
        """ Undeploy """
        logger.info("Undeploying Kubernetes workloads")
        self.status = "Undeploying..."
        error = False

        if self._skip_check():
            return

        if kill_nodes:
            # Delete nodes from the cluster
            operation = [
                "kubectl",
                "delete",
                "no",
                "-l",
                "micado.eu/node_type",
            ]
            try:
                logger.debug(f"Undeploy {operation}")
                subprocess.run(operation, stderr=subprocess.PIPE, check=True)
            except subprocess.CalledProcessError:
                logger.debug("Got error deleting nodes")
                error = True

        # Delete resources in the manifest
        operation = [
            "kubectl",
            "delete",
            "-f",
            self.manifest_path,
            "--timeout",
            "90s",
        ]
        try:
            logger.debug(f"Undeploy {operation}")
            subprocess.run(operation, stderr=subprocess.PIPE, check=True)
        except subprocess.CalledProcessError:
            logger.debug("Had some trouble removing Kubernetes workloads...")
            error = True

        if error:
            raise AdaptorCritical("Had some trouble removing workloads!")
        logger.info("Undeployment complete")
        self.status = "Undeployed"
 def __init__(self,
              adaptor_id,
              config,
              dryrun,
              validate=False,
              template=None):
     """ Constructor method of the Adaptor as described above """
     super().__init__()
     if template and not isinstance(template, ToscaTemplate):
         raise AdaptorCritical("Template is not a valid TOSCAParser object")
     self.tpl = template
     self.ID = adaptor_id
     self.config = config
     self.dryrun = dryrun
     self.endpoint = 'http://10.97.170.199:5003/'
     'v1.0/nodecerts'
     self.status = "init"
     if template is not None:
         self.policies = self.tpl.policies
     logger.debug("Initialising the SE adaptor with the ID and TPL")
    def __init__(self,
                 adaptor_id,
                 config,
                 dryrun,
                 validate=False,
                 template=None):
        """ init method of the Adaptor """
        super().__init__()

        logger.debug("Initialising Kubernetes Adaptor class...")
        self.status = "Initialising..."

        if template and not isinstance(template, ToscaTemplate):
            raise AdaptorCritical("Template is not a valid TOSCAParser object")

        self.ID = adaptor_id
        self.dryrun = dryrun
        self.short_id = "_".join(adaptor_id.split("_")[:-1]) or adaptor_id
        self.config = config
        self.tpl = template

        out_volume = self.config.get("volume", "files/output_configs")
        self.manifest_path = f"{out_volume}{self.ID}.yaml"
        self.manifest_tmp_path = f"{out_volume}tmp_{self.ID}.yaml"

        sys_volume = self.config.get("system", "system/")
        self.cadvisor_manifest_path = f"{sys_volume}cadvisor.yaml"
        self.nodex_manifest_path = f"{sys_volume}nodex.yaml"

        self.manifests = []
        self.services = []
        self.volumes = {}
        self.output = {}
        self.tcp_ports = []
        self.ingress_conf = []
        self.ingress_secrets = {}
        self.validate = validate
        logger.info("Kubernetes Adaptor is ready.")
        self.status = "Initialised"
    def execute(self, update=False):
        """ Execute """
        logger.info("Executing Kubernetes Manifests...")
        self.status = "executing..."

        if self._skip_check():
            return

        if update:
            operation = [
                "kubectl",
                "apply",
                "--prune",
                "-l",
                f"app.kubernetes.io/instance={self.short_id}",
                "-f",
                self.manifest_path,
            ]
        else:
            operation = [
                "kubectl",
                "create",
                "-f",
                self.manifest_path,
                "--save-config",
            ]
        try:
            logger.debug(f"Executing {operation}")
            subprocess.run(operation, stderr=subprocess.PIPE, check=True)

        except subprocess.CalledProcessError as e:
            logger.error(f"kubectl: {e.stderr}")
            raise AdaptorCritical(f"kubectl: {e.stderr}")

        logger.info("Kube objects deployed, trying to get outputs...")
        self._get_outputs()
        logger.info("Execution complete")
        self.status = "Executed"
Пример #19
0
    def translate(self, update=False):
        """
        Translate the self.tpl subset to Terraform node infrastructure format
        This fuction creates a mapping between TOSCA and Terraform template descriptor.
        """
        logger.info("Starting Terraform Translation")
        self.status = "translating..."
        self.tf_json = TerraformDict()

        for node in self.template.nodetemplates:

            if "_" in node.name:
                raise AdaptorCritical(
                    "Underscores in node {} not allowed".format(node.name))

            self.node_name = node.name
            node = copy.deepcopy(node)
            tf_interface = self._get_terraform_interface(node)
            if not tf_interface:
                continue

            self._get_policies(node)
            tf_options = utils.resolve_get_property(node,
                                                    tf_interface.get("create"))
            properties = self._get_properties_values(node)
            properties.update(tf_options)

            context = properties.get("context", {})
            cloud_init = self._node_data_get_context_section(context)
            self.cloud_inits.add(cloud_init)

            cloud_type = utils.get_cloud_type(node, SUPPORTED_CLOUDS)

            if cloud_type == "ec2":
                logger.debug("EC2 resource detected")
                self._rename_ec2_properties(properties)
                self._add_terraform_aws(properties)
            elif cloud_type == "nova":
                logger.debug("Nova resource detected")
                self._add_terraform_nova(properties)
            elif cloud_type == "azure":
                logger.debug("Azure resource detected")
                self._add_terraform_azure(properties)
            elif cloud_type == "gce":
                logger.debug("GCE resource detected")
                self._add_terraform_gce(properties)
            elif cloud_type == "oci":
                logger.debug("OCI resource detected")
                self._add_terraform_oci(properties)

        if not self.tf_json.provider:
            logger.info("No nodes to orchestrate with Terraform. Skipping...")
            self.status = "Skipped"
            return

        if update:
            logger.debug("Creating temp files")
            old_instance_vars = utils.load_json(self.vars_file)
            self.tf_json.update_instance_vars(old_instance_vars)
            self.tf_json.dump_json(self.tf_file_tmp, self.vars_file_tmp)

        elif not self.validate:
            self.tf_json.dump_json(self.tf_file, self.vars_file)
            self._rename_tmp_cloudinits()

        self.status = "Translated"
    def translate(self, update=False, write_files=True):
        """ Translate sections of the ADT into a Kubernetes Manifest """
        logger.info("Translating into Kubernetes Manifests")
        self.status = "Translating..."
        self.manifests = []
        self.tcp_ports = []
        self.ingress_conf = []
        self.ingress_secrets = {}

        for node in self.tpl.nodetemplates:
            if node.type.startswith("tosca.nodes.MiCADO"):
                self._translate_node_templates(node)

        # Look for a monitoring policy and attach default
        # metric exporters to the application
        for policy in self.tpl.policies:
            if policy.type.startswith(Prefix.MONITOR_POLICY):
                self._translate_monitoring_policy(policy)

            if policy.type.startswith(Prefix.NETWORK_POLICY):
                self._translate_security_policy(policy)

        if self.ingress_conf:
            self._deploy_zorp()
            self._manifest_secrets()

        if not self.manifests:
            logger.info("No nodes to orchestrate with Kubernetes. Skipping")
            self.status = "Skipped Translation"
            return

        unvalidated_kinds = self.config.get("unvalidated_kinds", [])
        k8s_version = self.config.get("k8s_version", "1.18.0")
        for manifest in self.manifests:
            if manifest["kind"] in unvalidated_kinds:
                continue
            try:
                kubernetes_validate.validate(manifest,
                                             k8s_version,
                                             strict=True)
            except ValidationError as err:
                message = f"Invalid K8s Manifest: {err.message}"
                logger.error(message)
                raise AdaptorCritical(message) from None
            except (InvalidSchemaError, SchemaNotFoundError):
                message = (
                    f"Schema for {manifest['apiVersion']}/{manifest['kind']} "
                    f"not found in Kubernetes v{k8s_version}")
                logger.error(message)
                raise AdaptorCritical(message) from None
            except VersionNotSupportedError:
                pass

        if not write_files:
            pass
        elif update:
            utils.dump_list_yaml(self.manifests, self.manifest_tmp_path)
        elif self.validate is False:
            utils.dump_list_yaml(self.manifests, self.manifest_path)

        logger.info("Translation complete")
        self.status = "Translated"