Ejemplo n.º 1
0
    def _compose_artifacts(self, node, repositories):
        """ Get TOSCA artifacts, write compose entry"""
        try:
            artifacts = node.entity_tpl.get("artifacts").values()
        except AttributeError:
            raise AdaptorCritical("No artifacts found!")

        for artifact in artifacts:
            if DOCKER_IMAGE in artifact.get("type"):
                break
        else:
            raise AdaptorCritical(
                "No artifact of type <{}>".format(DOCKER_IMAGE))

        repository = artifact.get("repository")
        if repository and "docker_hub" not in repository:
            for repo in repositories:
                if repository == repo.name:
                    repository = repo.reposit
                    break
        else:
            repository = ""

        image = "{}{}".format(repository, artifact["file"])

        # Write the compose data
        node = self.compose_data.setdefault("services",
                                            {}).setdefault(node.name, {})
        node["image"] = image
Ejemplo n.º 2
0
    def _get_artifacts(self, node, repositories):
        """ Get TOSCA artifacts """
        try:
            artifacts = node.entity_tpl.get("artifacts").values()
        except AttributeError:
            raise AdaptorCritical("No artifacts found!")

        artifact = None
        for artifact in artifacts:
            if DOCKER_IMAGE in artifact.get("type"):
                break
        else:
            raise AdaptorCritical(f'No artifact of type <{DOCKER_IMAGE}>')

        repository = artifact.get("repository")
        if repository and "docker_hub" not in repository:
            for repo in repositories:
                if repository == repo.name:
                    repository = repo.reposit
                    break
        else:
            repository = ""

        image = artifact["file"]
        image = f'{repository}{image}'

        # Write the compose data
        self._create_compose_image(node.name, image)
    def translate(self, update=False):
        """ Translate the relevant sections of the ADT into a Kubernetes Manifest """
        logger.info("Translating into Kubernetes Manifests")
        self.status = "Translating..."
        tpl_translate = self.tpl
        nodes = copy.deepcopy(tpl_translate.nodetemplates)
        repositories = copy.deepcopy(tpl_translate.repositories)

        for node in sorted(nodes, key=lambda x: x.type, reverse=True):
            interface = {}

            kube_interface = \
                [x for x in node.interfaces if KUBERNETES_INTERFACE in x.type]
            for operation in kube_interface:
                interface[operation.name] = operation.inputs or {}

            if DOCKER_CONTAINER in node.type and interface:
                if '_' in node.name:
                    logger.error(
                        "ERROR: Use of underscores in {} workload name prohibited"
                        .format(node.name))
                    raise AdaptorCritical(
                        "ERROR: Use of underscores in {} workload name prohibited"
                        .format(node.name))
                self._create_manifests(node, interface, repositories)

            elif CONTAINER_VOLUME in node.type and interface:
                name = node.get_property_value('name') or node.name
                if '_' in name:
                    logger.error(
                        "ERROR: Use of underscores in {} volume name prohibited"
                        .format(name))
                    raise AdaptorCritical(
                        "ERROR: Use of underscores in {} volume name prohibited"
                        .format(name))
                size = node.get_property_value('size') or '1Gi'

                pv_inputs = interface.get('create', {})
                labels = self._create_persistent_volume(name, pv_inputs, size)
                pvc_inputs = interface.get('configure', {})
                pvc_name = self._create_persistent_volume_claim(
                    name, pvc_inputs, labels, size)

                self.volumes.setdefault(node.name, pvc_name)

        if not self.manifests:
            logger.info(
                "No nodes to orchestrate with Kubernetes. Do you need this adaptor?"
            )
            self.status = "Skipped Translation"
            return

        if update:
            utils.dump_list_yaml(self.manifests, self.manifest_tmp_path)
        elif self.validate is False:
            utils.dump_list_yaml(self.manifests, self.manifest_path)

        logger.info("Translation complete")
        self.status = "Translated"
Ejemplo n.º 4
0
 def execute(self):
     """
     Import Occopus node definition, and build up the infrastructure
     through occopus container.
     """
     logger.info("Starting Occopus execution {}".format(self.ID))
     self.status = "executing"
     if not self._config_files_exists():
         logger.info("No config generated during translation, nothing to execute")
         self.status = "Skipped"
         return
     if self.dryrun:
         logger.info("DRY-RUN: Occopus execution in dry-run mode...")
         self.status = "DRY-RUN Deployment"
         return
     else:
         if self.created:
             run = False
             i = 0
             while not run and i < 5:
                 try:
                     logger.debug("Occopus import starting...")
                     result = self.occopus.exec_run("occopus-import {0}".format(self.occo_node_path))
                     logger.debug("Occopus import has been successful")
                     run = True
                 except Exception as e:
                     i += 1
                     logger.debug("{0}. Try {1} of 5.".format(str(e), i))
                     time.sleep(5)
             logger.debug(result)
             if "Successfully imported" in result[1].decode("utf-8"):
                 try:
                     logger.debug("Occopus build starting...")
                     exit_code, out = self.occopus.exec_run("occopus-build {} -i {} --auth_data_path {} --parallelize"
                                                       .format(self.occo_infra_path,
                                                               self.worker_infra_name,
                                                               self.auth_data_file))
                     if exit_code == 1:
                         raise AdaptorCritical(out)
                     occo_api_call = requests.post("http://{0}/infrastructures/{1}/attach"
                                               .format(self.occopus_address, self.worker_infra_name))
                     if occo_api_call.status_code != 200:
                         raise AdaptorCritical("Cannot submit infra to Occopus API!")
                     logger.debug("Occopus build has been successful")
                     
                 except docker.errors.APIError as e:
                     logger.error("{0}. Error caught in calling Docker container".format(str(e)))
                 except requests.exceptions.RequestException as e:
                     logger.error("{0}. Error caught in call to occopus API".format(str(e)))
             else:
                 logger.error("Occopus import was unsuccessful!")
                 raise AdaptorCritical("Occopus import was unsuccessful!")
         else:
             logger.error("Not connected to Occopus container!")
             raise AdaptorCritical("Occopus container connection was unsuccessful!")
     logger.info("Occopus executed")
     self.status = "executed"
Ejemplo n.º 5
0
    def execute(self):
        """ Deploy the stack onto the Swarm

        Executes the ``docker stack deploy`` command on the Docker-Compose file
        which was created in ``translate()``
        :raises: AdaptorCritical
        """
        logger.info("Starting Docker execution...")

        try:
            if self.config['dry_run'] is False:
                subprocess.run([
                    "docker", "stack", "deploy", "--with-registry-auth",
                    "--compose-file", self.path,
                    self.ID.split("_")[0]
                ],
                               stderr=subprocess.PIPE,
                               check=True)
            else:
                logger.info(
                    f'subprocess.run([\"docker\", \"stack\", \"deploy\", '
                    f'\"--compose-file\", \"docker-compose.yaml\", '
                    f'{self.ID.split("_")[0]}], check=True)')

        except subprocess.CalledProcessError as e:
            # FIXME: no-so-nice hack to force updates to correct own sequences
            logger.error("Got this error: {}".format(e.stderr))
            if ("update out of sequence" in str(e.stderr, 'utf-8')):
                logger.error("Trying update again")
                try:
                    subprocess.run([
                        "docker", "stack", "deploy", "--with-registry-auth",
                        "--compose-file", self.path,
                        self.ID.split("_")[0]
                    ],
                                   check=True)
                except subprocess.CalledProcessError:
                    raise AdaptorCritical("Cannot execute Docker")
                    logger.error("Cannot execute Docker")
            else:
                raise AdaptorCritical("Cannot execute Docker")
                logger.error("Cannot execute Docker")
        except KeyError:
            subprocess.run([
                "docker", "stack", "deploy", "--with-registry-auth",
                "--compose-file", self.path,
                self.ID.split("_")[0]
            ],
                           check=True)

        logger.info("Docker running, trying to get outputs...")
        self._get_outputs()
Ejemplo n.º 6
0
    def undeploy(self):
        """ Undeploy the stack from Docker

        Runs ``docker stack down`` using the given ID to bring down the stack.
        :raises: AdaptorCritical
        """
        logger.info("Undeploying the application")

        try:
            if self.config['dry_run'] is False:
                subprocess.run(
                    ["docker", "stack", "down",
                     self.ID.split("_")[0]],
                    check=True)
                logger.debug("Undeploy application with ID: {}".format(
                    self.ID))
            else:
                logger.debug(f'Undeploy application with ID: {self.ID}')

        except subprocess.CalledProcessError:
            logger.error("Cannot undeploy the stack")
            raise AdaptorCritical("Cannot undeploy the stack")
        except KeyError:
            subprocess.run(["docker", "stack", "down",
                            self.ID.split("_")[0]],
                           check=True)
            logger.debug("Undeploy application with ID: {}".format(self.ID))
        logger.info("Stack is down...")
    def _node_data_get_context_section(self,properties):
        """
        Create the context section in node definition
        """
        self.node_data.setdefault("contextualisation", {}) \
            .setdefault("type", "cloudinit")

        if properties.get("context") is not None:
            context=properties.get("context").value
            if context.get("cloud_config") is None:
                if context["append"]:
                    # Missing cloud-config and append set to yes
                    logger.info("You set append properties but you do not have cloud_config. Please check it again!")
                    raise AdaptorCritical("You set append properties but you don't have cloud_config. Please check it again!")
                else:
                    # Append false and cloud-config is not exist - get default cloud-init
                    logger.info("Get default cloud-config")
                    self.node_data.setdefault("contextualisation", {}) \
                    .setdefault("context_template", self._get_cloud_init(context.get("cloud_config"),False,False))
            else:
                if context["append"]:
                    # Append Tosca context to the default config
                    logger.info("Append the TOSCA cloud-config with the default config")
                    self.node_data.setdefault("contextualisation", {}) \
                    .setdefault("context_template", self._get_cloud_init(context["cloud_config"],True,False))
                else:
                    # Use the TOSCA context
                    logger.info("The adaptor will use the TOSCA cloud-config")
                    self.node_data.setdefault("contextualisation", {}) \
                    .setdefault("context_template", self._get_cloud_init(context["cloud_config"],False,True))
        else:
            self.node_data.setdefault("contextualisation", {}) \
                    .setdefault("context_template", self._get_cloud_init(None,False,False))
    def __init__(self,
                 adaptor_id,
                 config,
                 dryrun,
                 validate=False,
                 template=None):
        """ init method of the Adaptor """
        super().__init__()

        logger.debug("Initialising Kubernetes Adaptor class...")
        self.status = "Initialising..."

        if template and not isinstance(template, ToscaTemplate):
            raise AdaptorCritical("Template is not a valid TOSCAParser object")

        self.ID = adaptor_id
        self.dryrun = dryrun
        self.short_id = '_'.join(adaptor_id.split('_')[:-1])
        self.config = config
        self.tpl = template
        self.manifest_path = "{}{}.yaml".format(self.config['volume'], self.ID)
        self.manifest_tmp_path = "{}tmp_{}.yaml".format(
            self.config['volume'], self.ID)

        self.manifests = []
        self.services = []
        self.volumes = {}
        self.output = {}
        self.validate = validate
        logger.info("Kubernetes Adaptor is ready.")
        self.status = "Initialised"
Ejemplo n.º 9
0
    def translate(self, tmp=False):
        """ Translate the self.tpl subset to the Compose format

        Does the work of mapping the Docker relevant sections of TOSCA into a
        dictionary following the Docker-Compose format, then dumping output to
        a .yaml file in output_configs/
        :param bool tmp: Set ``True`` for update() - outputfile gets prefix ``tmp_``
        :raises: AdaptorCritical
        """

        logger.info("Starting translation to compose...")
        self.compose_data = {"version": COMPOSE_VERSION}

        for node in self.tpl.nodetemplates:
            if DOCKER_CONTAINER in node.type:
                self._compose_properties(node, "services")
                self._compose_artifacts(node, self.tpl.repositories)
                self._compose_requirements(node)
            elif DOCKER_NETWORK in node.type:
                self._compose_properties(node, "networks")
            elif DOCKER_VOLUME in node.type:
                self._compose_properties(node, "volumes")

        if not self.compose_data.get("services"):
            logger.error("No TOSCA nodes of Docker type!")
            raise AdaptorCritical("No TOSCA nodes of Docker type!")

        if tmp is False:
            utils.dump_order_yaml(self.compose_data, self.path)
        else:
            utils.dump_order_yaml(self.compose_data, self.tmp_path)
Ejemplo n.º 10
0
    def _add_terraform_aws(self, properties):
        """ Add Terraform template for AWS to JSON"""

        # Get the credentials info
        credential = self._get_credential_info("ec2")

        # Check regions match
        region = properties.pop("region")
        aws_region = self.tf_json.provider.get("aws", {}).get("region")
        if aws_region and aws_region != region:
            raise AdaptorCritical(
                "Multiple different AWS regions is unsupported")

        # Add the provider
        aws_provider = {
            "version": "~> 2.54",
            "region": region,
            "access_key": credential["accesskey"],
            "secret_key": credential["secretkey"],
        }
        # Handle deprecated Occopus inputs
        properties.pop("interface_cloud", None)
        occo_endpoint = properties.pop("endpoint_cloud", None)
        endpoint = properties.pop("endpoint", occo_endpoint)
        if endpoint:
            aws_provider.setdefault("endpoints", {})["ec2"] = endpoint
        self.tf_json.add_provider("aws", aws_provider)

        instance_name = self.node_name
        cloud_init_file_name = "{}-cloud-init.yaml".format(instance_name)

        # Add the count variable
        self.tf_json.add_instance_variable(instance_name, self.min_instances)

        # Add the resource
        aws_instance = {
            instance_name: {
                **properties,
                "user_data":
                '${file("${path.module}/%s")}' % cloud_init_file_name,
                "instance_initiated_shutdown_behavior": "terminate",
                "for_each": "${toset(var.%s)}" % instance_name,
            }
        }
        # Add the name tag if no tags present
        aws_instance[instance_name].setdefault("tags", {"Name": instance_name})
        aws_instance[instance_name]["tags"]["Name"] += "${each.key}"
        self.tf_json.add_resource("aws_instance", aws_instance)

        # Add the IP output
        ip_output = {
            "private_ips":
            "${[for i in aws_instance.%s : i.private_ip]}" % instance_name,
            "public_ips":
            "${[for i in aws_instance.%s : i.public_ip]}" % instance_name,
        }
        self.tf_json.add_output(instance_name, ip_output)
Ejemplo n.º 11
0
 def _terraform_init(self):
     """ Run terraform init in the container """
     command = ["sh", "-c", "terraform init -no-color" + LOG_SUFFIX]
     exec_output = self._terraform_exec(command)
     if "successfully initialized" in exec_output:
         logger.debug("Terraform initialization has been successful")
     else:
         raise AdaptorCritical(
             "Terraform init failed: {}".format(exec_output))
Ejemplo n.º 12
0
 def _terraform_apply(self, lock_timeout):
     """ Run terraform apply in the container """
     command = [
         "sh", "-c", "terraform apply -auto-approve -no-color" + LOG_SUFFIX
     ]
     exec_output = self._terraform_exec(command, lock_timeout)
     if "Apply complete" in exec_output:
         logger.debug("Terraform apply has been successful")
     else:
         raise AdaptorCritical(
             "Terraform apply failed: {}".format(exec_output))
 def __init__(self, adaptor_id, config, template=None):
     """ Constructor method of the Adaptor as described above """
     super().__init__()
     if template and not isinstance(template, ToscaTemplate):
         raise AdaptorCritical("Template is not a valid TOSCAParser object")
     self.tpl = template
     self.ID = adaptor_id
     self.config = config
     if template is not None:
         self.policies = self.tpl.policies
     logger.debug("Initialising the SE adaptor with the ID and TPL")
Ejemplo n.º 14
0
 def query(self, query, app_id, dry_run=False):
     """ query """
     for adaptor in self._instantiate_adaptors(app_id, dry_run).values():
         try:
             result = adaptor.query(query)
         except AttributeError:
             continue
         else:
             return result
     else:
         raise AdaptorCritical("No query method available")
Ejemplo n.º 15
0
    def __init__(self, adaptor_id, template=None):
        """ Constructor method of the Adaptor as described above """
        super().__init__()
        if template and not isinstance(template, ToscaTemplate):
            raise AdaptorCritical("Template is not a valid TOSCAParser object")

        logger.debug("Initialising the Docker adaptor with ID & TPL...")
        self.compose_data = {}
        self.ID = adaptor_id
        self.tpl = template
        self.output = dict()
        logger.info("DockerAdaptor ready to go!")
Ejemplo n.º 16
0
 def _terraform_exec(self, command, lock_timeout=0):
     """ Execute the command in the terraform container """
     if not self.created:
         logger.error("Could not attach to Terraform container!")
         raise AdaptorCritical("Could not attach to Terraform container!")
     while True:
         exit_code, out = self.terraform.exec_run(
             command,
             workdir="{}".format(self.terra_path),
         )
         if exit_code > 0:
             logger.error("Terraform exec failed {}".format(out))
             raise AdaptorCritical("Terraform exec failed {}".format(out))
         elif lock_timeout > 0 and "Error locking state" in str(out):
             time.sleep(5)
             lock_timeout -= 5
             logger.debug(
                 "Waiting for lock, {}s until timeout".format(lock_timeout))
         else:
             break
     return str(out)
Ejemplo n.º 17
0
 def _terraform_destroy(self):
     """ Run terraform destroy in the container """
     command = [
         "sh",
         "-c",
         "terraform destroy -auto-approve -no-color" + LOG_SUFFIX,
     ]
     exec_output = self._terraform_exec(command, lock_timeout=600)
     if "Destroy complete" in exec_output:
         logger.debug("Terraform destroy successful...")
         self.status = "undeployed"
     else:
         raise AdaptorCritical("Undeploy failed: {}".format(exec_output))
Ejemplo n.º 18
0
    def translate(self, tmp=False):
        """
        Translate the self.tpl subset to Occopus node definition and infrastructure format
        The adaptor create a mapping between TOSCA and Occopus template descriptor.
        """
        self.node_def = {}
        logger.info("Starting OccoTranslation")
        self.status = "translating"

        for node in self.template.nodetemplates:

            if '_' in node.name:                
                raise AdaptorCritical("Underscores in node {} not allowed".format(node.name))
            self.node_name = node.name
            self.node_data = {}
            
            node = copy.deepcopy(node)
            occo_interface = self._node_data_get_interface(node)
            if not occo_interface:
                continue

            self._node_resolve_interface_data(node, occo_interface, "resource")
            cloud_type = utils.get_cloud_type(node, SUPPORTED_CLOUDS)

            if cloud_type == "cloudsigma":
                logger.info("CloudSigma resource detected")
                self._node_data_get_cloudsigma_host_properties(node, "resource")
            elif cloud_type == "ec2":
                logger.info("EC2 resource detected")
                self._node_data_get_ec2_host_properties(node, "resource")
            elif cloud_type == "cloudbroker":
                logger.info("CloudBroker resource detected")
                self._node_data_get_cloudbroker_host_properties(node, "resource")
            elif cloud_type == "nova":
                logger.info("Nova resource detected")
                self._node_data_get_nova_host_properties(node, "resource")

            self._get_policies(node)
            self._get_infra_def(tmp)

            node_type = self.node_prefix + self.node_name
            self.node_def.setdefault(node_type, [])
            self.node_def[node_type].append(self.node_data)
        if self.node_def:
            if tmp:
                utils.dump_order_yaml(self.node_def, self.node_path_tmp)
            elif self.validate is False:
                self.prepare_auth_file()
                utils.dump_order_yaml(self.node_def, self.node_path)

        self.status = "translated"
    def __init__(self, adaptor_id, config, template=None):

        super().__init__()
        self.config = config
        if template and not isinstance(template, ToscaTemplate):
            raise AdaptorCritical("Template is not a valid TOSCAParser object")

        self.ID = adaptor_id
        self.tpl = template
        try:
            self.sp_data = utils.get_yaml_data(PATH_TO_POLICY)
        except FileNotFoundError:
            self.sp_data = {"services": {"_sample": {"scaledown": 0, "scaleup": 100}}}
        logger.info("ScalingPolicyAdaptor ready!")
 def __init__(self, adaptor_id, config, template=None):
     """ Constructor method of the Adaptor as described above """
     super().__init__()
     if template and not isinstance(template, ToscaTemplate):
         raise AdaptorCritical("Template is not a valid TOSCAParser object")
     self.tpl = template
     self.ID = adaptor_id
     self.config = config
     self.endpoint = 'http://10.97.170.199:5003/'
     'v1.0/nodecerts'
     self.status = "init"
     if template is not None:
         self.policies = self.tpl.policies
     logger.debug("Initialising the SE adaptor with the ID and TPL")
Ejemplo n.º 21
0
 def update(self):
     """
     Check that if it's any change in the node definition or in the cloud-init file.
     If the node definition changed then rerun the build process. If the node definition
     changed first undeploy the infrastructure and rebuild it with the modified parameter.
     """
     self.status = "updating"
     self.min_instances = 1
     self.max_instances = 1
     logger.info("Updating the component config {}".format(self.ID))
     self.translate(True)
     if not self.node_def and os.path.exists(self.node_path):
         logger.debug("No nodes in ADT, removing running nodes")
         self._remove_tmp_files()
         self.undeploy()
         self.cleanup()
         self.status = "Updated - removed all nodes"
     elif not self.node_def:
         logger.debug("No nodes found to be orchestrated with Occopus")
         self._remove_tmp_files()
         self.status = "Updated - no Occopus nodes"
     elif not os.path.exists(self.node_path):
         logger.debug("No running infrastructure, starting from new")
         os.rename(self.node_path_tmp, self.node_path)
         os.rename(self.infra_def_path_output_tmp, self.infra_def_path_output)
         self.execute()
         self.status = "updated"
     elif not self._differentiate(self.node_path,self.node_path_tmp):
         logger.debug("Node def file different, replacing old config and executing")
         os.rename(self.node_path_tmp, self.node_path)
         os.rename(self.infra_def_path_output_tmp, self.infra_def_path_output)
         # Detach from the infra and rebuild
         detach = requests.post("http://{0}/infrastructures/{1}/detach"
                                     .format(self.occopus_address, self.worker_infra_name))
         if detach.status_code != 200:
             raise AdaptorCritical("Cannot detach infra from Occopus API!")
         self.execute()
         self.status = "updated"
     elif not self._differentiate(self.infra_def_path_output, self.infra_def_path_output_tmp):
         logger.debug("Infra tmp file different, replacing old config and executing")
         os.rename(self.infra_def_path_output_tmp, self.infra_def_path_output)
         self._remove_tmp_files()
         # Rerun Occopus build to refresh infra definition
         self.execute()
         self.status = "updated"
     else:
         self.status = 'updated (nothing to update)'
         logger.info("there are no changes in the Occopus files")
         self._remove_tmp_files()
Ejemplo n.º 22
0
    def undeploy(self):
        """ Undeploy the stack from Docker

        Runs ``docker stack down`` using the given ID to bring down the stack.

        :raises: AdaptorCritical
        """
        logger.info("Undeploying the application")

        # Commented code is so for dry-runs (no Docker in test-env)
        try:
            #subprocess.run(["docker", "stack", "down", self.ID], check=True)
            logger.debug(f'Undeploy application with ID: {self.ID}')
        except subprocess.CalledProcessError:
            logger.error("Cannot undeploy the stack")
            raise AdaptorCritical("Cannot undeploy the stack")
        logger.info("Stack is down...")
def _get_container(node, properties, repositories, inputs):
    """ Return container spec """

    # Get image
    image = _get_image(node.entity_tpl, repositories)
    if not image:
        raise AdaptorCritical("No image specified for {}!".format(node.name))
    properties.setdefault('image', image)

    # Remove any known swarm-only keys
    for key in SWARM_PROPERTIES:
        if properties.pop(key, None):
            logger.warning('Removed Swarm-option {}'.format(key))

    # Translate common properties
    properties.setdefault('name', properties.pop('container_name', node.name))
    properties.setdefault('command', properties.pop('entrypoint', '').split())
    properties.setdefault('args', properties.pop('cmd', '').split())
    docker_env = properties.pop('environment', {})
    env = []
    for key, value in docker_env:
        env.append({'name': key, 'value': value})
    properties.setdefault('env', env)

    # Translate other properties
    docker_labels = properties.pop('labels', None)
    if docker_labels:
        inputs.setdefault('metadata', {}).setdefault('labels',
                                                     {}).update(docker_labels)
    docker_grace = properties.pop('stop_grace_period', None)
    if docker_grace:
        inputs.setdefault('terminationGracePeriodSeconds', docker_grace)
    docker_priv = properties.pop('privileged', None)
    if docker_priv:
        properties.setdefault('securityContext',
                              {}).setdefault('privileged', docker_priv)
    docker_pid = properties.pop('pid', None)
    if docker_pid == 'host':
        inputs.setdefault('hostPID', True)
    docker_netmode = properties.pop('network_mode', None)
    if docker_netmode == 'host':
        inputs.setdefault('hostNetwork', True)
    properties.setdefault('stdin', properties.pop('stdin_open', None))
    properties.setdefault('livenessProbe', properties.pop('healthcheck', None))

    return {key: val for key, val in properties.items() if val}
Ejemplo n.º 24
0
    def __init__(self, adaptor_id, config, template=None):

        super().__init__()
        if template and not isinstance(template, ToscaTemplate):
            raise AdaptorCritical("Template is not a valid TOSCAParser object")
        logger.info("Initialising the Pk adaptor with ID, config & TPL...")
        self.config = config
        self.pk_data = {}
        self.ID = adaptor_id
        try:
            self.path = "{}{}.yaml".format(self.config['volume'], self.ID)
            self.tmp_path = "{}tmp_{}.yaml".format(self.config['volume'],
                                                   self.ID)
        except Exception as e:
            logger.error(e)
        self.tpl = template
        logger.info("Pk adaptor initialised")
Ejemplo n.º 25
0
    def __init__(self, adaptor_id, config, template=None):
        """ Constructor method of the Adaptor as described above """
        super().__init__()
        if template and not isinstance(template, ToscaTemplate):
            raise AdaptorCritical("Template is not a valid TOSCAParser object")

        logger.debug("Initialising the Docker adaptor with ID & TPL...")

        self.config = config
        self.compose_data = {}
        logger.debug("\t\t\t\t\t {}".format(config))
        self.ID = adaptor_id
        self.path = "{}{}.yaml".format(self.config['volume'], self.ID)
        self.tmp_path = "{}tmp_{}.yaml".format(self.config['volume'], self.ID)
        self.tpl = template
        self.output = dict()
        logger.info("DockerAdaptor ready to go!")
Ejemplo n.º 26
0
    def __init__(self,
                 adaptor_id,
                 config,
                 dryrun,
                 validate=False,
                 template=None):
        """
        Constructor method of the Adaptor
        """
        super().__init__()
        if template and not isinstance(template, ToscaTemplate):
            raise AdaptorCritical("Template is not a valid TOSCAParser object")
        self.status = "init"
        self.dryrun = dryrun
        self.volume = config["volume"]
        self.validate = validate
        self.node_name = ""
        self.min_instances = 1
        self.max_instances = 1
        self.app_name = adaptor_id
        self.template = template

        self.terra_path = "/var/lib/micado/terraform/submitter/"

        self.tf_file = "{}{}.tf.json".format(self.volume, self.app_name)
        self.tf_file_tmp = "{}{}.tf.json.tmp".format(self.volume,
                                                     self.app_name)
        self.vars_file = "{}terraform.tfvars.json".format(self.volume)
        self.vars_file_tmp = "{}terraform.tfvars.json.tmp".format(self.volume)
        self.account_file = "{}accounts.json".format(self.volume)

        self.cloud_init_template = "./system/cloud_init_worker_tf.yaml"
        self.auth_data_file = "/var/lib/submitter/auth/auth_data.yaml"
        self.auth_gce = "/var/lib/submitter/gce-auth/accounts.json"
        self.master_cert = "/var/lib/submitter/system/master.pem"

        self.tf_json = TerraformDict()

        self.created = False
        self.terraform = None
        self.cloud_inits = set()
        if not self.dryrun:
            self._init_docker()

        logger.info("Terraform adaptor initialised")
    def undeploy(self):
        """ Undeploy """
        logger.info("Undeploying Kubernetes workloads")
        self.status = "Undeploying..."
        error = False

        # Try to delete workloads relying on hosted mounts first (WORKAROUND)
        operation = [
            "kubectl", "delete", "-n", "default", "-f", self.manifest_path,
            "-l", "!volume"
        ]
        try:
            if self.dryrun:
                logger.info(
                    "DRY-RUN: kubectl removes all workloads but hosted volumes..."
                )
            else:
                logger.debug("Undeploy {}".format(operation))
                subprocess.run(operation, stderr=subprocess.PIPE, check=True)
        except subprocess.CalledProcessError:
            logger.debug("Got error deleting non-hosted-mount workloads")
            error = True
        time.sleep(15)

        # Delete workloads hosting volumes
        operation = [
            "kubectl", "delete", "-n", "default", "-f", self.manifest_path,
            "-l", "volume"
        ]
        try:
            if self.dryrun:
                logger.info("DRY-RUN: kubectl removes remaining workloads...")
            else:
                logger.debug("Undeploy {}".format(operation))
                subprocess.run(operation, stderr=subprocess.PIPE, check=True)
        except subprocess.CalledProcessError:
            logger.debug("Had some trouble removing hosted volume workload...")
            error = True

        if error:
            raise AdaptorCritical("Had some trouble removing workloads!")
        logger.info("Undeployment complete")
        self.status = "Undeployed"
Ejemplo n.º 28
0
    def __init__(self, adaptor_id, config, template=None):
        super().__init__()
        """
        Constructor method of the Adaptor
        """
        if template and not isinstance(template, ToscaTemplate):
            raise AdaptorCritical("Template is not a valid TOSCAParser object")
        self.config = config
        self.node_name = "node_def:worker"
        self.worker_infra_name = "micado_worker_infra"
        self.min_instances = 1
        self.max_instances = 1
        self.ID = adaptor_id
        self.template = template
        self.node_path = "{}{}.yaml".format(self.config['volume'], self.ID)
        self.node_path_tmp = "{}tmp_{}.yaml".format(self.config['volume'],
                                                    self.ID)
        self.infra_def_path_output = "{}{}-infra.yaml".format(
            self.config['volume'], self.ID)
        self.infra_def_path_output_tmp = "{}-infra.tmp.yaml".format(
            self.config['volume'], self.ID)
        self.infra_def_path_input = "/var/lib/submitter/system/infrastructure_descriptor.yaml"
        self.cloudinit_path = "/var/lib/submitter/system/cloud_init_worker.yaml"

        self.node_data = {}
        self.cloudsigma = {}
        self.ec2 = {}
        self.nova = {}
        self.cloudbroker = {}

        self.created = False
        self.client = None
        self.occopus = None
        self._init_docker()

        self.occopus_address = "occopus:5000"
        self.auth_data_file = "/var/lib/micado/occopus/data/auth_data.yaml"
        self.occo_node_path = "/var/lib/micado/occopus/submitter/{}.yaml".format(
            self.ID)
        self.occo_infra_path = "/var/lib/micado/occopus/submitter/{}-infra.yaml".format(
            self.ID)
        logger.info("Occopus Adaptor initialised")
Ejemplo n.º 29
0
    def execute(self):
        """ Deploy the stack onto the Swarm

        Executes the ``docker stack deploy`` command on the Docker-Compose file
        which was created in ``translate()``

        :raises: AdaptorCritical
        """
        logger.info("Starting Docker execution...")

        # Commented code makes dry-runs possible (no Docker in our test env)
        try:
            #subprocess.run(["docker", "stack", "deploy", "--compose-file",
            #f'output_configs/{self.ID}.yaml', self.ID], check=True)
            logger.info(f'subprocess.run([\"docker\", \"stack\", \"deploy\", '
                        f'\"--compose-file\", \"docker-compose.yaml\", '
                        f'{self.ID}], check=True)')
        except subprocess.CalledProcessError:
            logger.error("Cannot execute Docker")
            raise AdaptorCritical("Cannot execute Docker")
        logger.info("Docker running, trying to get outputs...")
        self._get_outputs()
    def execute(self, update=False):
        """ Execute """
        logger.info("Executing Kubernetes Manifests...")
        self.status = "Executing..."

        if not self.manifests:
            logger.info(
                "No nodes to orchestrate with Kubernetes. Do you need this adaptor?"
            )
            self.status = "Skipped Execution"
            return

        if self.dryrun:
            logger.info("DRY-RUN: kubectl creates workloads...")
            self.status = "DRY-RUN Deployment"
            return

        if update:
            operation = [
                'kubectl', 'apply', '-n', 'default', '-f', self.manifest_path
            ]
        else:
            operation = [
                'kubectl', 'create', '-n', 'default', '-f', self.manifest_path,
                '--save-config'
            ]
        try:
            logger.debug("Executing {}".format(operation))
            subprocess.run(operation, stderr=subprocess.PIPE, check=True)

        except subprocess.CalledProcessError as e:
            logger.error("kubectl: {}".format(e.stderr))
            raise AdaptorCritical("kubectl: {}".format(e.stderr))

        logger.info("Kube objects deployed, trying to get outputs...")
        self._get_outputs()
        logger.info("Execution complete")
        self.status = "Executed"