def get_node_type_priority(self, node_type_definitions, node_type_name):
        """

        :param node_type_definitions:
        :param node_type_name:
        :return:
        """
        (_, _, type_short) = utils.tosca_type_parse(node_type_name)
        if type_short == ROOT:
            return 0
        requirement_definitions_list = []
        tmp = node_type_name
        while tmp != None:
            definition = node_type_definitions.get(tmp, {})
            requirement_definitions_list.extend(definition.get(REQUIREMENTS, []))
            tmp = definition.get(DERIVED_FROM, None)
        requirement_definitions = dict()
        for i in range(len(requirement_definitions_list), 0, -1):
            requirement_definitions.update(requirement_definitions_list[i-1])
        max_dependency_priority = 0
        for k, v in requirement_definitions.items():
            req_node_type = v.get(NODE)
            (_, _, req_short_type) = utils.tosca_type_parse(req_node_type)
            if not req_short_type == ROOT and not req_node_type == node_type_name:
                p = self.get_node_type_priority(node_type_definitions, req_node_type)
                if p > max_dependency_priority or max_dependency_priority == 0:
                    max_dependency_priority = p + 1
        if max_dependency_priority >= self.MAX_NUM_PRIORITIES:
            ProviderResource.MAX_NUM_PRIORITIES = max_dependency_priority + 1
        return max_dependency_priority
    def resolve_in_template_dependencies(self):
        """
        TODO think through the logic to replace mentions by id
        Changes all mentions of node_templates by name in requirements, places dictionary with node_filter instead
        :return:
        """
        for node in self.node_templates:
            for req in node.requirements:
                for k, v in req.items():
                    if type(v) is str:
                        # The case when the requirement is a name of node or relationship template

                        # Store the dependencies of nodes
                        self.add_template_dependency(node.name, v)

                        nodetemplate = node.templates.get(v)
                        node_filter = dict()
                        properties = nodetemplate.get(PROPERTIES)
                        props_list = []
                        if properties:
                            for prop_name, prop in properties.items():
                                props_list.append({prop_name: prop})
                        capabilities = nodetemplate.get(CAPABILITIES)
                        caps_list = []
                        if capabilities:
                            for cap_name, cap in capabilities.items():
                                cap_props = cap.get(PROPERTIES, {})
                                cap_props_list = []
                                for prop_name, prop in cap_props.items():
                                    cap_props_list.append({prop_name, prop})
                                caps_list.append({PROPERTIES: cap_props_list})

                        if properties:
                            node_filter[PROPERTIES] = props_list
                        if capabilities:
                            node_filter[CAPABILITIES] = caps_list
                        req[k] = dict(
                            node_filter=node_filter
                        )
                    else:
                        # The case when requirement has parameters.
                        # Valid keys are ('node', 'node_filter', 'relationship', 'capability', 'occurrences')
                        # Only node and relationship might be a template name or a type
                        req_relationship = req[k].get(RELATIONSHIP)
                        req_node = req[k].get(NODE)

                        if req_relationship is not None:
                            _, _, type_name = utils.tosca_type_parse(req_relationship)
                            if type_name is None:
                                self.add_template_dependency(node.name, req_relationship)

                        if req_node is not None:
                            _, _, type_name = utils.tosca_type_parse(req_node)
                            if type_name is None:
                                self.add_template_dependency(node.name, req_node)
def translate(tosca_elements_map_to_provider, topology_template, provider):
    """
    Main function of this file, the only which is used outside the file
    :param tosca_elements_map_to_provider: dict from provider specific file
    tosca_elements_map_to_<provider>.yaml
    :param node_templates: input node_templates
    :return: list of new node templates and relationship templates and
    list of artifacts to be used for generating scripts
    """
    node_templates = topology_template.nodetemplates
    relationship_templates = topology_template.relationship_templates
    element_templates = node_templates + relationship_templates

    new_element_templates = {}
    artifacts = []
    conditions = []
    extra = dict()
    self = dict()
    self[ARTIFACTS] = []
    self[EXTRA] = dict()

    for element in element_templates:
        (namespace, _, _) = utils.tosca_type_parse(element.type)
        self[NAME] = element.name
        self[KEYNAME] = element.name
        self[BUFFER] = {}

        if namespace != provider:
            restructured_mapping = restructure_mapping(
                tosca_elements_map_to_provider, element, self)

            restructured_mapping, extra_mappings, new_conditions = restructure_mapping_facts(
                restructured_mapping)
            restructured_mapping.extend(extra_mappings)
            conditions.extend(new_conditions)

            tpl_structure = translate_node_from_tosca(restructured_mapping,
                                                      element.name, self)
            for tpl_name, temp_tpl in tpl_structure.items():
                for node_type, tpl in temp_tpl.items():
                    (_, element_type, _) = utils.tosca_type_parse(node_type)
                    tpl[TYPE] = node_type
                    new_element_templates[
                        element_type] = new_element_templates.get(
                            element_type, {})
                    new_element_templates[element_type].update(
                        {tpl_name: copy.deepcopy(tpl)})
        else:
            new_element = translate_element_from_provider(element)
            new_element_templates = utils.deep_update_dict(
                new_element_templates, new_element)

    conditions = set(conditions)

    return new_element_templates, self[ARTIFACTS], conditions, self[EXTRA]
def translate(service_tmpl):
    """
    Main function of this file, the only which is used outside the file
    :param tosca_elements_map_to_provider: dict from provider specific file
    tosca_elements_map_to_<provider>.yaml
    :param node_templates: input node_templates
    :return: list of new node templates and relationship templates and
    list of artifacts to be used for generating scripts
    """
    element_templates = copy.copy(service_tmpl.node_templates)
    element_templates.update(copy.copy(service_tmpl.relationship_templates))

    new_element_templates = {}
    template_mapping = {}
    self = dict()
    self[ARTIFACTS] = []
    self[EXTRA] = dict()
    self[PUBLIC_KEY] = service_tmpl.public_key_path

    for tmpl_name, element in element_templates.items():
        (namespace, _, _) = utils.tosca_type_parse(element[TYPE])
        self[NAME] = tmpl_name
        self[KEYNAME] = tmpl_name
        self[BUFFER] = {}

        if namespace != service_tmpl.provider:
            restructured_mapping = restructure_mapping(service_tmpl, element, tmpl_name, self)
            restructured_mapping = sort_host_ip_parameter(restructured_mapping, service_tmpl.host_ip_parameter)
            restructured_mapping = restructure_mapping_buffer(restructured_mapping, self)
            restructured_mapping, extra_mappings = restructure_mapping_facts(restructured_mapping, self, service_tmpl.is_delete, service_tmpl.cluster_name)
            restructured_mapping.extend(extra_mappings)
            restructured_mapping = restructure_get_attribute(restructured_mapping, service_tmpl, self)

            tpl_structure = translate_node_from_tosca(restructured_mapping, tmpl_name, self)
            for tpl_name, temp_tpl in tpl_structure.items():
                for node_type, tpl in temp_tpl.items():
                    (_, element_type, _) = utils.tosca_type_parse(node_type)
                    tpl[TYPE] = node_type
                    if tmpl_name not in template_mapping:
                        template_mapping[tmpl_name] = {tpl_name}
                    else:
                        template_mapping[tmpl_name].add(tpl_name)
                    new_element_templates[element_type] = new_element_templates.get(element_type, {})
                    new_element_templates[element_type].update({tpl_name: copy.deepcopy(tpl)})
        else:
            new_element = translate_element_from_provider(tmpl_name, element)
            new_element_templates = utils.deep_update_dict(new_element_templates, new_element)

    self_extra = utils.replace_brackets(self[EXTRA], False)
    self_artifacts = utils.replace_brackets(self[ARTIFACTS], False)
    execute(self_artifacts, service_tmpl.is_delete, service_tmpl.cluster_name)

    return new_element_templates, self_extra, template_mapping
Exemple #5
0
    def translate_to_provider(self):
        new_element_templates, new_extra, template_mapping = translate_to_provider(self)

        dict_tpl = {}
        self.template_mapping = template_mapping
        if new_element_templates.get(NODES):
            dict_tpl[NODE_TEMPLATES] = new_element_templates[NODES]
        if new_element_templates.get(RELATIONSHIPS):
            dict_tpl[RELATIONSHIP_TEMPLATES] = new_element_templates[RELATIONSHIPS]
        if new_element_templates.get(OUTPUTS):
            dict_tpl[OUTPUTS] = new_element_templates[OUTPUTS]
        if self.inputs:
            dict_tpl[INPUTS] = self.inputs

        rel_types = {}
        for k, v in self.definitions.items():
            (_, element_type, _) = utils.tosca_type_parse(k)
            if element_type == RELATIONSHIPS:
                rel_types[k] = v

        logging.debug("TOSCA template with non normative types for provider %s was generated: \n%s"
                      % (self.provider, yaml.dump(dict_tpl)))

        try:
            topology_tpl = TopologyTemplate(dict_tpl, self.definitions, rel_types=rel_types)
        except:
            logging.exception("Failed to parse intermidiate non-normative TOSCA template with OpenStack tosca-parser")
            sys.exit(1)

        self.extra_configuration_tool_params = utils.deep_update_dict(self.extra_configuration_tool_params, new_extra)

        self.node_templates = new_element_templates.get(NODES, {})
        self.relationship_templates = new_element_templates.get(RELATIONSHIPS, {})
        self.outputs = new_element_templates.get(OUTPUTS, {})
Exemple #6
0
    def resolve_in_template_dependencies(self):
        """
        TODO think through the logic to replace mentions by id
        Changes all mentions of node_templates by name in requirements, places dictionary with node_filter instead
        :return:
        """
        for node_name, node in self.node_templates.items():
            for req in node.get(REQUIREMENTS, []):
                for req_name, req_body in req.items():

                    # Valid keys are ('node', 'node_filter', 'relationship', 'capability', 'occurrences')
                    # Only node and relationship might be a template name or a type
                    req_relationship = req_body.get(RELATIONSHIP)
                    req_node = req_body.get(NODE)

                    if req_relationship is not None:
                        (_, _, type_name) = utils.tosca_type_parse(req_relationship)
                        if type_name is None:
                            self.add_template_dependency(node_name, req_relationship)
                            self._relation_target_source[req_relationship] = {
                                'source': node_name,
                                'target': req_node
                            }

                    if req_node is not None:
                        (_, _, type_name) = utils.tosca_type_parse(req_node)
                        if type_name is None:
                            self.add_template_dependency(node_name, req_node)

            node_types_from_requirements = set()
            req_definitions = self.definitions[node[TYPE]].get(REQUIREMENTS, [])
            for req in req_definitions:
                for req_name, req_def in req.items():
                    if req_def.get(NODE, None) is not None:
                        if req_def[NODE] != node[TYPE]:
                            node_types_from_requirements.add(req_def[NODE])
            for req_node_name, req_node_tmpl in self.node_templates.items():
                if req_node_tmpl[TYPE] in node_types_from_requirements:
                    self.add_template_dependency(node_name, req_node_name)

        # Search in all nodes for get function mentions and get its target name

        for node_name, node_tmpl in self.node_templates.items():
            self.search_get_function(node_name, node_tmpl)

        for rel_name, rel_tmpl in self.relationship_templates.items():
            self.search_get_function(rel_name, rel_tmpl)
def translate_element_from_provider(node):
    (_, element_type, _) = utils.tosca_type_parse(node.type)
    node_templates = {
        element_type: {
            node.name: copy.deepcopy(node.entity_tpl)
        }
    }
    return node_templates
def translate_element_from_provider(tmpl_name, node_tmpl):
    (_, element_type, _) = utils.tosca_type_parse(node_tmpl[TYPE])
    node_templates = {
        element_type: {
            tmpl_name: copy.deepcopy(node_tmpl)
        }
    }
    return node_templates
 def compute_node_priorities(self, node_type_definitions):
     """
     Use node type definitions to count priority of the node_type
     :param node_type_definitions: dict of node type definitions
     :return:
     """
     ProviderResource.MAX_NUM_PRIORITIES = 1
     node_priorities_by_type = {}
     for node_type_name, node_type_def in node_type_definitions.items():
         (namespace, element_type, type_short) = utils.tosca_type_parse(node_type_name)
         if type_short != ROOT and element_type == NODES and namespace != TOSCA:
             node_priorities_by_type[node_type_name] = self.get_node_type_priority(node_type_definitions,
                                                                                   node_type_name)
     return node_priorities_by_type
    def _provider_nodes(self):
        """
        Create a list of ProviderResource classes to represent a node in TOSCA
        :return: list of class objects inherited from ProviderResource
        """
        provider_nodes = list()
        for node in self.node_templates:
            (namespace, category, type_name) = utils.tosca_type_parse(node.type)
            is_software_component = node.name in self.software_component_names
            if namespace != self.provider and not is_software_component or category != NODES:
                ExceptionCollector.appendException(Exception('Unexpected values'))
            provider_node_instance = ProviderResource(self.provider, node, self.relationship_templates,
                                                      is_software_component)
            provider_nodes.append(provider_node_instance)

        return provider_nodes
 def init_queue(self, nodes_relationships_queue):
     elements_queue = []
     software_queue = []
     for v in nodes_relationships_queue:
         self.gather_global_operations(v)
     for op_name, op in self.global_operations_info.items():
         self.global_operations_info[op_name] = self.replace_all_get_functions(op)
     for v in nodes_relationships_queue:
         (_, element_type, _) = utils.tosca_type_parse(v.type)
         if element_type == NODES:
             new_conf_args = self.replace_all_get_functions(v.configuration_args)
             v.configuration_args = new_conf_args
             if v.is_software_component:
                 software_queue.append(v)
             else:
                 elements_queue.append(v)
     return elements_queue, software_queue
Exemple #12
0
    def __init__(self, requirement_definitions, provider):
        """
        Get the requirements of type list from requirement definitions
        :param requirement_definitions: list of requirement definitions with name added
        """
        self.provider = provider
        self.requirement_definitions = dict()
        for req in requirement_definitions:
            self.requirement_definitions[req[NAME]] = req
        # self.requirement_definitions = requirement_definitions
        self.requirement_names_of_type_list = set()
        self._node_name_by_requirement_name = dict()

        # NOTE generate the dictionary, where the keys are the name of requirement and
        # the values are the node_types of requirement
        for req_name, req_def in self.requirement_definitions.items():
            # req_name = req_def[NAME]
            req_node = req_def.get(NODE)
            if req_node:
                (_, _, type_name) = utils.tosca_type_parse(req_node)
                node_name = utils.snake_case(type_name)
                if node_name == 'root':
                    continue
                temp_req_val = self._node_name_by_requirement_name.get(req_name)
                if temp_req_val is None:
                    temp_req_val = node_name
                elif isinstance(temp_req_val, str):
                    temp_req_val = (temp_req_val, node_name)
                else:
                    temp_req_val = temp_req_val + (node_name)
                self._node_name_by_requirement_name[req_name] = temp_req_val

        # NOTE set the list required requirements and the list of multiple requirements (of type list)
        self.required_requirement_keys = set()
        for req_name, req_def in self.requirement_definitions.items():
            occurrences = req_def.get(OCCURRENCES, [0, 'UNBOUNDED'])  # list
            min_ocs = occurrences[0]
            max_ocs = occurrences[1]
            if int(min_ocs) > 0:
                self.required_requirement_keys.add(req_name)
            if str(max_ocs) == 'UNBOUNDED':
                self.requirement_names_of_type_list.add(req_name)
            elif int(max_ocs) > 1:
                self.requirement_names_of_type_list.add(req_name)
Exemple #13
0
 def _provider_nodes(self):
     """
     Create a list of ProviderResource classes to represent a node in TOSCA
     :return: list of class objects inherited from ProviderResource
     """
     provider_nodes = dict()
     for node_name, node in self.node_templates.items():
         (namespace, category, type_name) = utils.tosca_type_parse(node[TYPE])
         is_software_component = node[TYPE] in self.software_types
         if namespace != self.provider and not is_software_component or category != NODES:
             logging.error('Unexpected values: node \'%s\' not a software component and has a provider \'%s\'. '
                           'Node will be ignored' % (node.name, namespace))
         else:
             provider_node_instance = ProviderResource(self.provider, self.is_delete, self.cluster_name, self.configuration_tool, node,
                                                       node_name,
                                                       self.host_ip_parameter, self.definitions[node[TYPE]],
                                                       is_software_component=is_software_component)
             provider_nodes[node_name] = provider_node_instance
     return provider_nodes
def translate_node_from_tosca(restructured_mapping, tpl_name, self):
    """
    Translator from TOSCA definitions in provider definitions using rules from element_map_to_provider
    :param restructured_mapping: list of dicts(parameter, map, value)
    :param tpl_name: str
    :return: entity_tpl as dict
    """
    resulted_structure = {}

    for item in restructured_mapping:
        ExceptionCollector.start()
        self[PARAMETER] = item[PARAMETER]
        self[VALUE] = item[VALUE]
        mapped_param = restructure_value(mapping_value=item[MAP_KEY],
                                         self=self)
        ExceptionCollector.stop()
        if ExceptionCollector.exceptionsCaught():
            raise ValidationError(
                message='\nTranslating to provider failed: '.join(
                    ExceptionCollector.getExceptionsReport()))
        structures, keyname = get_structure_of_mapped_param(
            mapped_param, item[VALUE])

        for structure in structures:
            r = retrieve_node_templates(structure)
            for node_type, tpl in r.items():
                if not keyname:
                    (_, _, type_name) = utils.tosca_type_parse(node_type)
                    if not type_name:
                        ExceptionCollector.appendException()
                    keyname = self[KEYNAME] + "_" + utils.snake_case(type_name)
                node_tpl_with_name = {keyname: {node_type: tpl}}
                resulted_structure = utils.deep_update_dict(
                    resulted_structure, node_tpl_with_name)

    for keyname, node in resulted_structure.items():
        for node_type, tpl in node.items():
            if tpl.get(REQUIREMENTS):
                reqs = []
                for req_name, req in tpl[REQUIREMENTS].items():
                    reqs.append({req_name: req})
                resulted_structure[keyname][node_type][REQUIREMENTS] = reqs
    return resulted_structure
Exemple #15
0
    def init_graph(self, operations_graph):
        ts = TopologicalSorter(operations_graph)
        elements_queue = [*ts.static_order()]

        for v in elements_queue:
            self.gather_global_operations(v)
        for op_name, op in self.global_operations_info.items():
            self.global_operations_info[
                op_name] = self.replace_all_get_functions(op)
        for v in elements_queue:
            (_, element_type, _) = utils.tosca_type_parse(v.type)
            if element_type == NODES or element_type == RELATIONSHIPS:
                new_conf_args = self.replace_all_get_functions(
                    v.configuration_args)
                v.configuration_args = new_conf_args
            else:
                del operations_graph[v]
                for key in operations_graph:
                    if v in operations_graph[key]:
                        operations_graph[key].remove(v)
        return operations_graph
    def translate_to_provider(self):
        new_element_templates, new_artifacts, conditions_set, new_extra = translate_to_provider(
            self.tosca_elements_map_to_provider(), self.tosca_topology_template, self.provider)

        self.used_conditions_set = conditions_set
        dict_tpl = copy.deepcopy(self.tosca_topology_template.tpl)
        if new_element_templates.get(NODES):
            dict_tpl[NODE_TEMPLATES] = new_element_templates[NODES]
        if new_element_templates.get(RELATIONSHIPS):
            dict_tpl[RELATIONSHIP_TEMPLATES] = new_element_templates[RELATIONSHIPS]

        rel_types = []
        for k, v in self.provider_defs.items():
            (_, element_type, _) = utils.tosca_type_parse(k)
            if element_type == RELATIONSHIP_TYPES:
                rel_types.append(v)

        topology_tpl = TopologyTemplate(dict_tpl, self.full_provider_defs, rel_types)
        self.artifacts.extend(new_artifacts)
        self.extra_configuration_tool_params = utils.deep_update_dict(self.extra_configuration_tool_params, new_extra)

        return topology_tpl
Exemple #17
0
    def _get_full_defintion(self, definition, def_type, ready_set):
        if def_type in ready_set:
            return definition, def_type in self.software_types

        (_, _, def_type_short) = utils.tosca_type_parse(def_type)
        is_software_type = def_type_short == 'SoftwareComponent'
        is_software_parent = False
        parent_def_name = definition.get(DERIVED_FROM, None)
        if parent_def_name is not None:
            if def_type == parent_def_name:
                logging.critical("Invalid type \'%s\' is derived from itself" % def_type)
                sys.exit(1)
            if parent_def_name in ready_set:
                parent_definition = self.definitions[parent_def_name]
                is_software_parent = parent_def_name in self.software_types
            else:
                parent_definition, is_software_parent = \
                    self._get_full_defintion(self.definitions[parent_def_name], parent_def_name, ready_set)
            parent_definition = copy.deepcopy(parent_definition)
            definition = utils.deep_update_dict(parent_definition, definition)
        if is_software_type or is_software_parent:
            self.software_types.add(def_type)
        ready_set.add(def_type)
        return definition, def_type in self.software_types
Exemple #18
0
    def sort_nodes_and_operations_by_graph_dependency(self):
        """
            This method generates dict fith ProviderTemplates with operation, sorted by
            dependencies from normative and provider TOSCA templates
        """
        nodes = set(self.provider_nodes.keys())
        nodes = nodes.union(set(self.provider_relations.keys()))
        dependencies = {}
        lifecycle = ['configure', 'start', 'stop', 'delete']
        reversed_full_lifecycle = lifecycle[::-1] + ['create']
        # generate only dependencies from nodes
        for templ_name in nodes:
            set_intersection = nodes.intersection(self.template_dependencies.get(templ_name, set()))
            templ = self.provider_nodes.get(templ_name, self.provider_relations.get(templ_name))
            (_, element_type, _) = utils.tosca_type_parse(templ.type)
            if element_type == NODES:
                if 'interfaces' in templ.tmpl and 'Standard' in templ.tmpl['interfaces']:
                    new_operations = ['create']
                    # operation create always exists
                    for elem in lifecycle:
                        if elem in templ.tmpl['interfaces']['Standard']:
                            new_operations.append(elem)
                    # if there is any other operations - add ti new_operations and translate to dict
                    # in format {node.op: {node1, node2}}
                    # node requieres node1 and node2
                    if len(new_operations) == 1:
                        utils.deep_update_dict(dependencies, {templ_name + SEPARATOR + 'create': set_intersection})
                    else:
                        for i in range(1, len(new_operations)):
                            utils.deep_update_dict(dependencies, {
                                templ_name + SEPARATOR + new_operations[i]: {
                                    templ_name + SEPARATOR + new_operations[i - 1]}})
                        utils.deep_update_dict(dependencies,
                                               {templ_name + SEPARATOR + new_operations[0]: set_intersection})
                else:
                    utils.deep_update_dict(dependencies, {templ_name + SEPARATOR + 'create': set_intersection})
        new_normative_graph = {}

        # getting dependencies for create operaions of nodes, translated from 1 normative node
        for key, value in self.normative_nodes_graph.items():
            for elem in value:
                for op in reversed_full_lifecycle:
                    new_oper = elem + SEPARATOR + op
                    if new_oper in dependencies:
                        if key + SEPARATOR + 'create' in new_normative_graph:
                            new_normative_graph[key + SEPARATOR + 'create'].add(new_oper)
                        else:
                            new_normative_graph[key + SEPARATOR + 'create'] = {new_oper}
                        break
                else:
                    logging.error("Operation create not found")
                    sys.exit(1)
        # update dependencies
        dependencies = utils.deep_update_dict(dependencies, new_normative_graph)
        new_dependencies = {}
        # new_dependencies is needed for updating set operations
        # dict must be in format {node.op: {node1, node2}}
        for key, value in dependencies.items():
            new_set = set()
            for elem in value:
                for oper in reversed_full_lifecycle:
                    if elem + SEPARATOR + oper in dependencies:
                        new_set.add(elem + SEPARATOR + oper)
                        break
                    elif elem in dependencies:
                        new_set.add(elem)
                        break
            new_dependencies[key] = new_set

        # adding relationships operations pre_configure_source after create source node
        # pre_configure_target after create target node
        # add_source in parallel with pre_configure_source but in will be executed on target
        # post_configure_target after configure target node (if not configure then create - in parallel
        # with pre_configure_target)
        # post_configure_source after configure target node (if not configure then create - in parallel
        # with pre_configure_source)
        # other - not supported!
        for templ_name in nodes:
            templ = self.provider_nodes.get(templ_name, self.provider_relations.get(templ_name))
            (_, element_type, _) = utils.tosca_type_parse(templ.type)
            if element_type == RELATIONSHIPS:
                if 'interfaces' in templ.tmpl and 'Configure' in templ.tmpl['interfaces']:
                    if 'pre_configure_source' in templ.tmpl['interfaces']['Configure']:
                        new_dependencies = self.update_relationships(new_dependencies, templ.name, templ.source,
                                                                     'pre_configure_source', 'create', ['add_source'])
                    if 'pre_configure_target' in templ.tmpl['interfaces']['Configure']:
                        new_dependencies = self.update_relationships(new_dependencies, templ.name, templ.target,
                                                                     'pre_configure_target', 'create')
                    if 'post_configure_source' in templ.tmpl['interfaces']['Configure']:
                        if templ.source + SEPARATOR + 'configure' in new_dependencies:
                            new_dependencies = self.update_relationships(new_dependencies, templ.name, templ.source,
                                                                         'post_configure_source', 'configure')
                        else:
                            new_dependencies = self.update_relationships(new_dependencies, templ.name, templ.source,
                                                                         'post_configure_source', 'create')
                    if 'post_configure_target' in templ.tmpl['interfaces']['Configure']:
                        if templ.target + SEPARATOR + 'configure' in new_dependencies:
                            new_dependencies = self.update_relationships(new_dependencies, templ.name, templ.target,
                                                                         'post_configure_target', 'configure')
                        else:
                            new_dependencies = self.update_relationships(new_dependencies, templ.name, templ.target,
                                                                         'post_configure_target', 'create')
                    if 'add_source' in templ.tmpl['interfaces']['Configure']:
                        new_dependencies = self.update_relationships(new_dependencies, templ.name, templ.source,
                                                                     'add_source', 'create', ['pre_configure_source'])
                    if 'add_target' in templ.tmpl['interfaces']['Configure']:
                        logging.warning('Operation add_target not supported, it will be skipped')
                    if 'target_changed' in templ.tmpl['interfaces']['Configure']:
                        logging.warning('Operation target_changed not supported, it will be skipped')
                    if 'remove_target' in templ.tmpl['interfaces']['Configure']:
                        logging.warning('Operation remove_target not supported, it will be skipped')
        # mapping strings 'node.op' to provider template of this node with this operation
        templ_mappling = {}
        for elem in new_dependencies:
            templ_name = elem.split(SEPARATOR)[0]
            templ = copy.deepcopy(self.provider_nodes.get(templ_name, self.provider_relations.get(templ_name)))
            templ.operation = elem.split(SEPARATOR)[1]
            templ_mappling[elem] = templ
        templ_dependencies = {}
        reversed_templ_dependencies = {}
        # create dict where all elements will be replaced with provider template from templ_mappling
        # reversed_templ_dependencies needed for delete - it just a reversed version of graph
        for key, value in new_dependencies.items():
            new_set = set()
            for elem in value:
                new_set.add(templ_mappling[elem])
                if templ_mappling[elem] not in reversed_templ_dependencies:
                    reversed_templ_dependencies[templ_mappling[elem]] = {templ_mappling[key]}
                elif templ_mappling[key] not in reversed_templ_dependencies[templ_mappling[elem]]:
                    reversed_templ_dependencies[templ_mappling[elem]].add(templ_mappling[key])
            templ_dependencies[templ_mappling[key]] = new_set
        return templ_dependencies, reversed_templ_dependencies
def get_keyname_from_type(keyname, node_type):
    (_, _, type_name) = utils.tosca_type_parse(node_type)
    if not type_name:
        logging.critical("Unable to parse type name: %s" % json.dumps(node_type))
        sys.exit(1)
    return keyname + "_" + utils.snake_case(type_name)
Exemple #20
0
    def __init__(self,
                 provider,
                 is_delete,
                 cluster_name,
                 configuration_tool,
                 tmpl,
                 node_name,
                 host_ip_parameter,
                 node_type,
                 is_software_component=False,
                 is_relationship=False,
                 relation_target_source=dict()):
        """

        :param provider:
        :param tmpl:
        :param node_name:
        :param node_type:
        :param is_software_component:
        :param is_relationship:
        """

        self.provider = provider
        self.tmpl = tmpl
        self.cluster_name = cluster_name
        self.name = node_name
        self.type = tmpl[TYPE]
        (_, _, type_name) = utils.tosca_type_parse(self.type)
        self.type_name = type_name
        self.type_definition = node_type
        self.is_software_component = is_software_component
        self.host = None
        self.self = node_name
        self.target = None
        self.source = None
        self.operation = None
        self.is_delete = is_delete

        self.set_defaults()
        # NOTE: Get the parameters from template using provider definition
        self.configuration_args = dict()
        # NOTE: node is NodeTemplate instance
        for key in self.type_definition.get(PROPERTIES, {}).keys():
            value = self.tmpl.get(PROPERTIES, {}).get(key, None)
            if value is not None:
                self.configuration_args[key] = value

        # NOTE: filling the parameters from openstack definition to parse from input template
        if is_relationship:
            self.source = relation_target_source.get(self.name,
                                                     {}).get('source')
            self.target = relation_target_source.get(self.name,
                                                     {}).get('target')
        else:
            capability_defs = self.type_definition.get(CAPABILITIES, {})
            for cap_key, cap_def in capability_defs.items():
                properties = self.tmpl.get(CAPABILITIES, {}).get(cap_key)
                definition_property_keys = cap_def.get(PROPERTIES, {}).keys()
                if properties:
                    for def_prop_key in definition_property_keys:
                        value = properties.get_property_value(def_prop_key)
                        if value:
                            self.configuration_args[def_prop_key] = value

            for key, value in self.tmpl.get(ARTIFACTS, []):
                self.configuration_args[key] = value

            provider_requirements = ProviderRequirements(
                self.requirement_definitions, self.provider)
            self.requirements = provider_requirements.get_requirements(tmpl)

            for req_name, reqs in self.requirements.items():
                if isinstance(reqs, list):
                    iter_reqs = reqs
                else:
                    iter_reqs = [reqs]
                for req in iter_reqs:
                    relationship = req.definition[RELATIONSHIP]
                    (_, _, type_name) = utils.tosca_type_parse(relationship)
                    if type_name == 'HostedOn':
                        if self.host is not None:
                            logging.critical(
                                "Node \'\' can be hosted only on one node" %
                                self.name)
                            sys.exit(1)
                        if host_ip_parameter not in ('public_address',
                                                     'private_address'):
                            host_ip_parameter = 'private_address'
                        self.host = req.value + '_' + host_ip_parameter

            self.node_filter_artifacts = []
            for key, req in self.requirements.items():
                if type(req) is list:
                    self.configuration_args[key] = list(v.get_value()
                                                        for v in req)
                else:
                    self.configuration_args[key] = req.get_value()

            if configuration_tool == 'ansible':
                provider_config = ProviderConfiguration(provider)
                node_filter_config = provider_config.get_subsection(
                    ANSIBLE, NODE_FILTER)
                if not node_filter_config:
                    node_filter_config = {}
                node_filter_source_prefix = node_filter_config.get(
                    'node_filter_source_prefix', '')
                node_filter_source_postfix = node_filter_config.get(
                    'node_filter_source_postfix', '')
                node_filter_exceptions = node_filter_config.get(
                    'node_filter_exceptions', '')
                node_filter_inner_variable = node_filter_config.get(
                    'node_filter_inner_variable')
                for arg_key, arg in self.configuration_args.items():
                    if isinstance(arg, dict):
                        node_filter_key = arg.get(SOURCE, {}).get(NODE_FILTER)
                        node_filter_value = arg.get(VALUE)
                        node_filter_params = arg.get(PARAMETERS)

                        if node_filter_key and node_filter_value and node_filter_params:
                            node_filter_source = node_filter_source_prefix + node_filter_key + node_filter_source_postfix
                            if node_filter_exceptions.get(node_filter_key):
                                node_filter_source = node_filter_exceptions[
                                    node_filter_key]

                            NODE_FILTER_FACTS = 'node_filter_facts'
                            NODE_FILTER_FACTS_REGISTER = NODE_FILTER_FACTS + '_raw'
                            NODE_FILTER_FACTS_VALUE = NODE_FILTER_FACTS_REGISTER
                            if node_filter_inner_variable:
                                if isinstance(node_filter_inner_variable,
                                              dict):
                                    node_filter_inner_variable = node_filter_inner_variable.get(
                                        node_filter_key, '')
                                if isinstance(node_filter_inner_variable,
                                              six.string_types):
                                    node_filter_inner_variable = [
                                        node_filter_inner_variable
                                    ]
                                if isinstance(node_filter_inner_variable,
                                              list):
                                    for v in node_filter_inner_variable:
                                        NODE_FILTER_FACTS_VALUE += '[\"' + v + '\"]'
                                else:
                                    logging.error(
                                        "Provider configuration parameter "
                                        "\'ansible.node_filter: node_filter_inner_variable\' is missing "
                                        "or has unsupported value \'%s\'" %
                                        node_filter_inner_variable)
                                    sys.exit(1)

                            tmp_ansible_tasks = [{
                                SOURCE: node_filter_source,
                                VALUE: NODE_FILTER_FACTS_REGISTER,
                                EXECUTOR: configuration_tool,
                                PARAMETERS: {}
                            }, {
                                SOURCE: SET_FACT_SOURCE,
                                PARAMETERS: {
                                    "target_objects":
                                    "\\{\\{ " + NODE_FILTER_FACTS_VALUE +
                                    " \\}\\}"
                                },
                                VALUE: "tmp_value",
                                EXECUTOR: configuration_tool
                            }, {
                                SOURCE: 'debug',
                                PARAMETERS: {
                                    'var': 'target_objects'
                                },
                                VALUE: "tmp_value",
                                EXECUTOR: configuration_tool
                            }, {
                                SOURCE: SET_FACT_SOURCE,
                                PARAMETERS: {
                                    "input_facts": '{{ target_objects }}'
                                },
                                EXECUTOR: configuration_tool,
                                VALUE: "tmp_value"
                            }, {
                                SOURCE: SET_FACT_SOURCE,
                                PARAMETERS: {
                                    "input_args": node_filter_params
                                },
                                EXECUTOR: configuration_tool,
                                VALUE: "tmp_value"
                            }, {
                                SOURCE: IMPORT_TASKS_MODULE,
                                PARAMETERS: "artifacts/equals.yaml",
                                EXECUTOR: configuration_tool,
                                VALUE: "tmp_value"
                            }, {
                                SOURCE: SET_FACT_SOURCE,
                                PARAMETERS: {
                                    node_filter_value:
                                    "\{\{ matched_object[\"" +
                                    node_filter_value + "\"] \}\}"
                                },
                                VALUE: "tmp_value",
                                EXECUTOR: configuration_tool
                            }]
                            arg = str(
                                execute(tmp_ansible_tasks, self.is_delete,
                                        self.cluster_name, node_filter_value))
                    self.configuration_args[arg_key] = arg
Exemple #21
0
    def get_ansible_tasks_from_interface(self,
                                         element_object,
                                         target_directory,
                                         is_delete,
                                         operation,
                                         cluster_name,
                                         additional_args=None):
        if additional_args is None:
            additional_args = {}
        else:
            additional_args_global = copy.deepcopy(
                additional_args.get('global', {}))
            additional_args_element = copy.deepcopy(
                additional_args.get(element_object.name, {}))
            additional_args = utils.deep_update_dict(additional_args_global,
                                                     additional_args_element)
        ansible_tasks = []
        scripts = []

        primary = False
        for interface_name, interface in self.get_interfaces_from_node(
                element_object).items():
            interface_operation = interface.get(operation, {})
            if isinstance(interface_operation, six.string_types):
                implementations = interface_operation
                primary = True
            else:
                implementations = interface_operation.get(IMPLEMENTATION)
            (_, element_type, _) = utils.tosca_type_parse(element_object.type)
            if (interface_name == 'Standard' and element_type == NODES
                    or interface_name == 'Configure' and element_type
                    == RELATIONSHIPS) and implementations is not None:
                if isinstance(implementations, six.string_types):
                    implementations = [implementations]
                if isinstance(
                        implementations,
                        dict) and 'primary' in implementations and isinstance(
                            implementations['primary'], six.string_types):
                    implementations = [implementations['primary']]
                    primary = True
                scripts.extend(implementations)
                for script in implementations:
                    target_filename = os.path.join(utils.get_tmp_clouni_dir(),
                                                   cluster_name,
                                                   target_directory, script)
                    os.makedirs(os.path.dirname(target_filename),
                                exist_ok=True)
                    script_filename_1 = os.path.join(os.getcwd(), script)
                    script_filename_2 = os.path.join(
                        self.get_ansible_artifacts_directory(), script)
                    if os.path.isfile(script_filename_1):
                        copyfile(script_filename_1, target_filename)
                    elif os.path.isfile(script_filename_2):
                        copyfile(script_filename_2, target_filename)
                    else:
                        logging.error(
                            "Artifact filename %s was not found in %s or %s" %
                            (script, script_filename_1, script_filename_2))
                    if not primary and interface_operation.get(
                            INPUTS) is not None:
                        for input_name, input_value in interface_operation[
                                INPUTS].items():
                            ansible_tasks.append(
                                {SET_FACT: {
                                    input_name: input_value
                                }})
                    new_ansible_task = {IMPORT_TASKS_MODULE: target_filename}
                    new_ansible_task.update(additional_args)
                    ansible_tasks.append(new_ansible_task)
        return ansible_tasks
    def __init__(self, provider, node, relationship_templates, is_software_component=False):
        """

        :param node: class NodeTemplate from toscaparser
        :param relationship_templates: list of RelationshipTemplate from toscaparser
        """
        # NOTE: added as a parameter in toscatranslator.providers.common.tosca_template:ProviderToscaTemplate

        self.provider = provider
        self.nodetemplate = node
        self.name = node.name
        self.type = node.type
        (_, _, type_name) = utils.tosca_type_parse(self.type)
        self.type_name = type_name
        # NOTE: filling the parameters from openstack definition to parse from input template
        node_type = node.type_definition  # toscaparser.elements.nodetype.NodeType
        self.definitions_by_name = None
        self.requirement_definitions = None
        self.set_definitions_by_name(node_type)
        self.attribute_keys = list(self.attribute_definitions_by_name().keys())
        self.property_keys = list(self.property_definitions_by_name().keys())
        self.requirement_keys = list(self.requirement_definitions_by_name().keys())
        self.artifact_keys = list(self.artifact_definitions_by_name().keys())
        self.capability_keys = list(self.capability_definitions_by_name().keys())
        self.relationship_templates = list()
        self.dependency_order = 0
        self.is_software_component = is_software_component
        self.host = None

        # NOTE: Get the parameters from template using provider definition
        self.configuration_args = dict()
        # NOTE: node is NodeTemplate instance
        for key in self.property_keys:
            value = node.get_property_value(key)
            if value is not None:
                self.configuration_args[key] = value

        for key in self.attribute_keys:
            value = node.entity_tpl.get(ATTRIBUTES, {}).get(key)
            if value is not None:
                self.configuration_args[key] = value

        capability_defs = self.capability_definitions_by_name()
        for cap_key, cap_def in capability_defs.items():
            properties = node.get_capabilities().get(cap_key)
            definition_property_keys = cap_def.get(PROPERTIES, {}).keys()
            if properties:
                for def_prop_key in definition_property_keys:
                    value = properties.get_property_value(def_prop_key)
                    if value:
                        self.configuration_args[def_prop_key] = value

        if hasattr(node, ARTIFACTS):
            # TODO: oneliner
            for key, value in node.artifacts:
                self.configuration_args[key] = value

        relationship_template_names = set()
        provider_requirements = ProviderRequirements(self.requirement_definitions, self.provider)
        self.requirements = provider_requirements.get_requirements(node)
        if self.requirements.get('host', None) != None:
            self.host = self.requirements['host'][0].value
        self.node_filter_artifacts = []
        for key, req in self.requirements.items():
            if type(req) is list:
                self.configuration_args[key] = list(v.get_value() for v in req)
                temp_req = req
            else:
                self.configuration_args[key] = req.get_value()
                temp_req = [req]
            for v in temp_req:
                relation = v.relationship
                if relation is not None:
                    _, _, type_name = utils.tosca_type_parse(relation)
                    if type_name is None:
                        relationship_template_names.add(relation)


        for relation in relationship_templates:
            if relation.name in relationship_template_names:
                self.relationship_templates.append(relation)

        self.node_priority = self.compute_node_priorities(node.custom_def)
Exemple #23
0
    def gather_global_operations(self, element_object):
        """

        :param element_object:
        :return:
        """

        interfaces = []
        element_template_name = None
        (_, element_type, _) = utils.tosca_type_parse(element_object.type)
        if element_type == NODES:
            interfaces = self.get_interfaces_from_node(element_object)
            element_template_name = element_object.name
            op_required = self.list_get_operation_outputs(element_object.tmpl)
            self.manage_operation_output(op_required, element_template_name)
        elif element_type == RELATIONSHIPS:
            # NOTE interfaces can't be used as it contains the error ()
            interfaces = self.get_interfaces_from_relationship(element_object)
            element_template_name = element_object.name

        if not element_template_name:
            return

        operations = {}
        for interface_name, ops in interfaces.items():
            for operation_name, operation_data in ops.items():
                operations['_'.join([interface_name.lower(),
                                     operation_name])] = operation_data

        # Sort operations by dependency
        prev_len = len(operations) + 1
        required_operations = {}
        for op_name, op in operations.items():
            if isinstance(op, six.string_types):
                operations[op_name] = {IMPLEMENTATION: op}
            op_required = self.list_get_operation_outputs(op)
            required_operations[op_name] = op_required
            self.manage_operation_output(op_required, element_template_name)

        while len(operations) > 0 and prev_len > len(operations):
            ops_for_iter = copy.deepcopy(operations)
            prev_len = len(operations)
            for op_name, op in ops_for_iter.items():
                op_required = required_operations[op_name]
                if_executable_now = True
                for i in op_required:
                    if i[0] == SELF:
                        i[0] = element_template_name
                    temp_op_name = '_'.join(i[:3]).lower()
                    if temp_op_name not in self.global_operations_queue:
                        if_executable_now = False
                        break
                if if_executable_now:
                    temp_op_name = '_'.join([element_template_name,
                                             op_name]).lower()
                    self.global_operations_queue.append(temp_op_name)
                    updating_op_info = {temp_op_name: op}
                    utils.deep_update_dict(self.global_operations_info,
                                           updating_op_info)
                    operations.pop(op_name)

        if len(operations) > 0:
            logging.critical(
                "Resolving dependencies in template failed on element \'%s\'" %
                element_template_name)
            sys.exit(1)
Exemple #24
0
    def to_dsl(self,
               provider,
               operations_graph,
               reversed_operations_graph,
               cluster_name,
               is_delete,
               artifacts=None,
               target_directory=None,
               inputs=None,
               outputs=None,
               extra=None,
               debug=False):
        if artifacts is None:
            artifacts = []
        if target_directory is None:
            target_directory = self.initial_artifacts_directory

        self.artifacts = {}
        for art in artifacts:
            self.artifacts[art[NAME]] = art

        provider_config = ProviderConfiguration(provider)
        ansible_config = provider_config.get_section(ANSIBLE)
        node_filter_config = provider_config.get_subsection(
            ANSIBLE, NODE_FILTER)

        ids_file_path = os.getcwd(
        ) + '/id_vars_' + cluster_name + self.get_artifact_extension()

        self.init_global_variables(inputs)

        operations_graph = self.init_graph(operations_graph)
        # the graph of operations at the moment is a dictionary of copies of ProviderTemplatre objects,
        # of the form Node/Relationship: {the set of opers of Nodes/Relationships on which it depends}
        elements = TopologicalSorter(operations_graph)
        # use TopologicalSorter for creating graph

        if is_delete:
            reversed_operations_graph = self.init_graph(
                reversed_operations_graph)
            elements = TopologicalSorter(reversed_operations_graph)

        elements.prepare()
        # first operations from on top of the graph in state 'ready'

        ansible_playbook = []
        if not debug:
            self.prepare_for_run()
        # function for initializing tmp clouni directory
        q = Queue()
        # queue for node names + operations
        active = []
        # list of parallel active operations
        first = True

        while elements.is_active():
            node_name = None
            # try to get new finished operation from queue and find it in list of active
            # if get - mark done this operation (but before it find in graph)
            # if ready operations exists - get it and execute, remove from active
            try:
                node_name = q.get_nowait()
            except:
                time.sleep(1)
            if node_name is not None:
                for node in active:
                    if node.name == node_name.split(
                            SEPARATOR
                    )[0] and node.operation == node_name.split(SEPARATOR)[1]:
                        active.remove(node)
                        elements.done(node)
            for v in elements.get_ready():
                # in delete mode we skip all operations exept delete and create operation transforms to delete
                if is_delete:
                    if v.operation == 'create':
                        v.operation = 'delete'
                    else:
                        elements.done(v)
                        continue
                logging.debug("Creating ansible play from operation: %s" %
                              v.name + ':' + v.operation)
                extra_tasks_for_delete = self.get_extra_tasks_for_delete(
                    v.type, v.name.replace('-', '_'), ids_file_path)
                description_prefix, module_prefix = self.get_module_prefixes(
                    is_delete, ansible_config)
                description_by_type = self.ansible_description_by_type(
                    v.type_name, description_prefix)
                module_by_type = self.ansible_module_by_type(
                    v.type_name, module_prefix)
                ansible_play_for_elem = dict(name=description_prefix + ' ' +
                                             provider + ' cluster: ' + v.name +
                                             ':' + v.operation,
                                             hosts=self.default_host,
                                             tasks=[])
                # reload id_vars file
                if not is_delete and first:
                    first = False
                    ansible_play_for_elem['tasks'].append(
                        copy.deepcopy(
                            {FILE: {
                                PATH: ids_file_path,
                                STATE: 'absent'
                            }}))
                    ansible_play_for_elem['tasks'].append(
                        copy.deepcopy(
                            {FILE: {
                                PATH: ids_file_path,
                                STATE: 'touch'
                            }}))
                # create playbook for every operation
                if v.operation == 'delete':
                    if not v.is_software_component:
                        ansible_play_for_elem['tasks'].append(
                            copy.deepcopy({'include_vars': ids_file_path}))
                        ansible_tasks = self.get_ansible_tasks_for_delete(
                            v,
                            description_by_type,
                            module_by_type,
                            additional_args=extra)
                        ansible_tasks.extend(
                            self.get_ansible_tasks_from_interface(
                                v,
                                target_directory,
                                is_delete,
                                v.operation,
                                cluster_name,
                                additional_args=extra))
                        if not any(item == module_by_type
                                   for item in ansible_config.get(
                                       'modules_skipping_delete', [])):
                            ansible_play_for_elem['tasks'].extend(
                                copy.deepcopy(ansible_tasks))
                elif v.operation == 'create':
                    if not v.is_software_component:
                        ansible_play_for_elem['tasks'].extend(
                            copy.deepcopy(
                                self.get_ansible_tasks_for_inputs(inputs)))
                        ansible_tasks = self.get_ansible_tasks_for_create(
                            v,
                            target_directory,
                            node_filter_config,
                            description_by_type,
                            module_by_type,
                            additional_args=extra)
                        ansible_tasks.extend(
                            self.get_ansible_tasks_from_interface(
                                v,
                                target_directory,
                                is_delete,
                                v.operation,
                                cluster_name,
                                additional_args=extra))

                        ansible_play_for_elem['tasks'].extend(
                            copy.deepcopy(ansible_tasks))
                        ansible_play_for_elem['tasks'].extend(
                            copy.deepcopy(extra_tasks_for_delete))

                    else:
                        ansible_play_for_elem['hosts'] = v.host
                        ansible_play_for_elem['tasks'].extend(
                            copy.deepcopy(
                                self.get_ansible_tasks_from_interface(
                                    v,
                                    target_directory,
                                    is_delete,
                                    v.operation,
                                    cluster_name,
                                    additional_args=extra)))
                else:
                    (_, element_type, _) = utils.tosca_type_parse(v.type)
                    if element_type == NODES:
                        if v.is_software_component:
                            ansible_play_for_elem['hosts'] = v.host
                    # operations for relationships executes on target/source host depends on operation
                    elif element_type == RELATIONSHIPS:
                        if v.operation == 'pre_configure_target' or v.operation == 'post_configure_target' or v.operation == 'add_source':
                            for elem in operations_graph:
                                if elem.name == v.target:
                                    if elem.is_software_component:
                                        ansible_play_for_elem[
                                            'hosts'] = elem.host
                                    break
                        elif v.operation == 'pre_configure_source' or v.operation == 'post_configure_source':
                            for elem in operations_graph:
                                if elem.name == v.source:
                                    if elem.is_software_component:
                                        ansible_play_for_elem[
                                            'hosts'] = elem.host
                                    break
                        else:
                            logging.error(
                                "Unsupported operation for relationship in operation graph"
                            )
                            sys.exit(1)
                    else:
                        logging.error(
                            "Unsupported element type in operation graph")
                        sys.exit(1)
                    ansible_play_for_elem['tasks'].extend(
                        copy.deepcopy(
                            self.get_ansible_tasks_from_interface(
                                v,
                                target_directory,
                                is_delete,
                                v.operation,
                                cluster_name,
                                additional_args=extra)))
                ansible_playbook.append(ansible_play_for_elem)
                # run playbooks
                if not debug:
                    self.parallel_run([ansible_play_for_elem], v.name,
                                      v.operation, q, cluster_name)
                    # add element to active list
                    active.append(v)
                else:
                    elements.done(v)
        if is_delete:
            last_play = dict(name='Renew id_vars_example.yaml',
                             hosts=self.default_host,
                             tasks=[])
            last_play['tasks'].append(
                copy.deepcopy({FILE: {
                    PATH: ids_file_path,
                    STATE: 'absent'
                }}))
            if not debug:
                self.parallel_run([last_play], None, None, q, cluster_name)
                done = q.get()
                if done != 'Done':
                    logging.error("Something wrong with multiprocessing queue")
                    sys.exit(1)
            ansible_playbook.append(last_play)
        # delete dir with cluster_name in tmp clouni dir
        if not debug:
            rmtree(os.path.join(utils.get_tmp_clouni_dir(), cluster_name))
        return yaml.dump(ansible_playbook,
                         default_flow_style=False,
                         sort_keys=False)