Пример #1
0
    def _node_data_get_context_section(self, context):
        """
        Create the cloud-init config file
        """
        cloud_config = context.get("cloud_config")
        if not context:
            logger.debug("The adaptor will use a default cloud-config")
            node_init = self._get_cloud_init(None)
        elif not cloud_config:
            logger.debug(
                "No cloud-config provided... using default cloud-config")
            node_init = self._get_cloud_init(None)
        elif context.get("insert"):
            logger.debug("Insert the TOSCA cloud-config in the default config")
            node_init = self._get_cloud_init(cloud_config, "insert")
        elif context.get("append"):
            logger.debug("Append the TOSCA cloud-config to the default config")
            node_init = self._get_cloud_init(cloud_config, "append")
        else:
            logger.debug("Overwrite the default cloud-config")
            node_init = self._get_cloud_init(cloud_config, "overwrite")

        cloud_init_file_name = "{}-cloud-init.yaml".format(self.node_name)
        cloud_init_path = "{}{}".format(self.volume, cloud_init_file_name)
        cloud_init_path_tmp = "{}.tmp".format(cloud_init_path)

        utils.dump_order_yaml(node_init, cloud_init_path_tmp)
        return cloud_init_path
    def translate(self, tmp=False):
        """ Translate the self.tpl subset to the Compose format

        Does the work of mapping the Docker relevant sections of TOSCA into a
        dictionary following the Docker-Compose format, then dumping output to
        a .yaml file in output_configs/
        :param bool tmp: Set ``True`` for update() - outputfile gets prefix ``tmp_``
        :raises: AdaptorCritical
        """

        logger.info("Starting translation to compose...")
        self.compose_data = {"version": COMPOSE_VERSION}

        for node in self.tpl.nodetemplates:
            if DOCKER_CONTAINER in node.type:
                self._compose_properties(node, "services")
                self._compose_artifacts(node, self.tpl.repositories)
                self._compose_requirements(node)
            elif DOCKER_NETWORK in node.type:
                self._compose_properties(node, "networks")
            elif DOCKER_VOLUME in node.type:
                self._compose_properties(node, "volumes")

        if not self.compose_data.get("services"):
            logger.error("No TOSCA nodes of Docker type!")
            raise AdaptorCritical("No TOSCA nodes of Docker type!")

        if tmp is False:
            utils.dump_order_yaml(self.compose_data, self.path)
        else:
            utils.dump_order_yaml(self.compose_data, self.tmp_path)
Пример #3
0
    def translate(self, tmp=False):
        """
        Translate the self.tpl subset to Occopus node definition and infrastructure format
        The adaptor create a mapping between TOSCA and Occopus template descriptor.
        """
        self.node_def = {}
        logger.info("Starting OccoTranslation")
        self.status = "translating"

        for node in self.template.nodetemplates:

            if '_' in node.name:                
                raise AdaptorCritical("Underscores in node {} not allowed".format(node.name))
            self.node_name = node.name
            self.node_data = {}
            
            node = copy.deepcopy(node)
            occo_interface = self._node_data_get_interface(node)
            if not occo_interface:
                continue

            self._node_resolve_interface_data(node, occo_interface, "resource")
            cloud_type = utils.get_cloud_type(node, SUPPORTED_CLOUDS)

            if cloud_type == "cloudsigma":
                logger.info("CloudSigma resource detected")
                self._node_data_get_cloudsigma_host_properties(node, "resource")
            elif cloud_type == "ec2":
                logger.info("EC2 resource detected")
                self._node_data_get_ec2_host_properties(node, "resource")
            elif cloud_type == "cloudbroker":
                logger.info("CloudBroker resource detected")
                self._node_data_get_cloudbroker_host_properties(node, "resource")
            elif cloud_type == "nova":
                logger.info("Nova resource detected")
                self._node_data_get_nova_host_properties(node, "resource")

            self._get_policies(node)
            self._get_infra_def(tmp)

            node_type = self.node_prefix + self.node_name
            self.node_def.setdefault(node_type, [])
            self.node_def[node_type].append(self.node_data)
        if self.node_def:
            if tmp:
                utils.dump_order_yaml(self.node_def, self.node_path_tmp)
            elif self.validate is False:
                self.prepare_auth_file()
                utils.dump_order_yaml(self.node_def, self.node_path)

        self.status = "translated"
    def translate(self):
        """ Translate from TOSCA to scaling_policy.yaml """
        logger.info("Starting ScalingPolicy translation")

        for policy in self.tpl.policies:
            if policy.type in SIMPLE_POL:
                min_cpu = policy.get_property_value("min_cpu_consumption")
                max_cpu = policy.get_property_value("max_cpu_consumption")
                for target in policy.targets:
                    target = f'{self.ID[:8]}_{target}'
                    self.sp_data["services"].update(
                        {target: {"scaledown": min_cpu, "scaleup": max_cpu}})

        utils.dump_order_yaml(self.sp_data, PATH_TO_POLICY)
    def undeploy(self, update=False):
        """ Remove the relevant policy from scaling_policy.yaml """
        logger.info(f'Remove policy in scaling_policy with id {self.ID}')
        try:
            for key in self.sp_data["services"].keys():
                if self.ID[:8] in key:
                    self.sp_data["services"].pop(key)
                    if update:
                        self._force_removal(key)
        # hacky
        except RuntimeError:
            for key in self.sp_data["services"].keys():
                if self.ID[:8] in key:
                    self.sp_data["services"].pop(key)
                    if update:
                        self._force_removal(key)

        utils.dump_order_yaml(self.sp_data, PATH_TO_POLICY)
    def translate(self, tmp=False):
        """
        Translate the self.tpl subset to Occopus node definition and infrastructure format
        The adaptor create a mapping between TOSCA and Occopus template descriptor.
        """
        self.node_def = {}
        logger.info("Starting OccoTranslation")
        self.status = "translating"

        for node in self.template.nodetemplates:

            self.node_name = node.name.replace('_','-')
            self.node_data = {}

            cloud_type = self._node_data_get_interface(node, "resource")
            if not cloud_type:
                continue
            elif cloud_type == "cloudsigma":
                logger.info("CloudSigma resource detected")
                self._node_data_get_cloudsigma_host_properties(node, "resource")
            elif cloud_type == "ec2":
                logger.info("EC2 resource detected")
                self._node_data_get_ec2_host_properties(node, "resource")
            elif cloud_type == "cloudbroker":
                logger.info("CloudBroker resource detected")
                self._node_data_get_cloudbroker_host_properties(node, "resource")
            elif cloud_type == "nova":
                logger.info("Nova resource detected")
                self._node_data_get_nova_host_properties(node, "resource")

            self._get_policies()
            self._get_infra_def(tmp)

            node_type = self.node_prefix + self.node_name
            self.node_def.setdefault(node_type, [])
            self.node_def[node_type].append(self.node_data)

            if tmp:
                utils.dump_order_yaml(self.node_def, self.node_path_tmp)
            elif self.validate is False:
                utils.dump_order_yaml(self.node_def, self.node_path)

        self.status = "translated"
Пример #7
0
    def translate(self, tmp=False):
        """ Translate the self.tpl subset to the Compose format

        Does the work of mapping the Docker relevant sections of TOSCA into a
        dictionary following the Docker-Compose format, then dumping output to
        a .yaml file in output_configs/
        :param bool tmp: Set ``True`` for update() - outputfile gets prefix ``tmp_``
        :raises: AdaptorCritical
        """

        logger.info("Starting translation to compose...")
        self.compose_data = {"version": COMPOSE_VERSION}

        #Get the MTU from default bridge network
        try:
            inspect = json.loads(
                subprocess.check_output(
                    ['docker', 'network', 'inspect', 'bridge']))[0]
            self.mtu = inspect.get("Options").get(
                "com.docker.network.driver.mtu")
        except (subprocess.CalledProcessError, FileNotFoundError):
            logger.error("Could not get MTU from default network, using 1500!")
        if not self.mtu:
            self.mtu = 1500

        for node in self.tpl.nodetemplates:
            if DOCKER_CONTAINER in node.type:
                self._compose_properties(node, "services")
                self._compose_artifacts(node, self.tpl.repositories)
                self._compose_requirements(node)
            elif DOCKER_NETWORK in node.type:
                self._compose_properties(node, "networks")
            elif DOCKER_VOLUME in node.type:
                self._compose_properties(node, "volumes")

        if not self.compose_data.get("services"):
            logger.info("No Docker nodes in TOSCA. Do you need this adaptor?")

        if tmp is False:
            utils.dump_order_yaml(self.compose_data, self.path)
        else:
            utils.dump_order_yaml(self.compose_data, self.tmp_path)
Пример #8
0
    def translate(self, tmp=False):
        """
        Translate the self.tpl subset to Occopus node definition and infrastructure format
        Does the work of mapping the Occopus relevant sections of TOSCA into a
        dictionary, then dumping output to a .yaml files (infra and node def.) in output_configs/
        :param tmp: It is helping variable for update method. More information under update method
        :return:
        """
        self.node_data = {}
        logger.info("Starting OccoTranslation")
        ec2 = False
        nova = False
        cloudbroker = False
        cloudsigma = False

        for node in self.template.nodetemplates:
            if "tosca.nodes.MiCADO.Occopus.CloudSigma.Compute" in node.type:
                logger.info("CloudSigma resource detected")
                self._node_data_get_interface(node, "resource")
                self._node_data_get_cloudsigma_host_properties(
                    node, "resource")
                self._get_policies()
                self._get_infra_def(tmp)
                cloudsigma = True
            if "tosca.nodes.MiCADO.Occopus.EC2.Compute" in node.type:
                logger.info("EC2 resource detected")
                self._node_data_get_interface(node, "resource")
                self._node_data_get_ec2_host_properties(node, "resource")
                self._get_policies()
                self._get_infra_def(tmp)
                ec2 = True
            if "tosca.nodes.MiCADO.Occopus.CloudBroker.Compute" in node.type:
                logger.info("CloudBroker resource detected")
                self._node_data_get_interface(node, "resource")
                self._node_data_get_cloudbroker_host_properties(
                    node, "resource")
                self._get_policies()
                self._get_infra_def(tmp)
                cloudbroker = True
            if "tosca.nodes.MiCADO.Occopus.Nova.Compute" in node.type:
                logger.info("Nova resource detected")
                self._node_data_get_interface(node, "resource")
                self._node_data_get_nova_host_properties(node, "resource")
                self._get_policies()
                self._get_infra_def(tmp)
                nova = True

        if cloudsigma:
            self.cloudsigma = {self.node_name: []}
            self.cloudsigma[self.node_name].append(self.node_data)
            if tmp:
                utils.dump_order_yaml(self.cloudsigma, self.node_path_tmp)
            else:
                utils.dump_order_yaml(self.cloudsigma, self.node_path)
        elif ec2:
            self.ec2 = {self.node_name: []}
            self.ec2[self.node_name].append(self.node_data)
            if tmp:
                utils.dump_order_yaml(self.ec2, self.node_path_tmp)
            else:
                utils.dump_order_yaml(self.ec2, self.node_path)
        elif cloudbroker:
            self.cloudbroker = {self.node_name: []}
            self.cloudbroker[self.node_name].append(self.node_data)
            if tmp:
                utils.dump_order_yaml(self.cloudsigma, self.node_path_tmp)
            else:
                utils.dump_order_yaml(self.cloudbroker, self.node_path)
        elif nova:
            self.nova = {self.node_name: []}
            self.nova[self.node_name].append(self.node_data)
            if tmp:
                utils.dump_order_yaml(self.cloudsigma, self.node_path_tmp)
            else:
                utils.dump_order_yaml(self.nova, self.node_path)
Пример #9
0
 def _save_file(self, id_app, path):
     """ method called by the engine to dump the current template being treated to the files/templates directory, with as name
     the ID of the app.
     """
     data = utils.get_yaml_data(path)
     utils.dump_order_yaml(data, "files/templates/{}.yaml".format(id_app))