def handle_expansion(self):
        hot_resources = []
        hot_type = 'OS::Aodh::GnocchiAggregationByResourcesAlarm'
        trigger_receivers = defaultdict(list)
        for node in self.policy.properties['targets']:
            for trigger in self.policy.entity_tpl['triggers']:
                for action in self.policy.\
                    entity_tpl['triggers'][trigger]['action']:
                    scale_name = action
                    action_sample = self.policy.\
                        entity_tpl['triggers'][trigger]['action'][action]
                    scale_type = action_sample['type']
                    scale_implement = action_sample['implementation']
                (entity, method) = scale_implement.split('.')
                receiver_prop = {}
                receiver_prop['cluster'] = {
                    "get_resource": "%s_cluster" % node
                }
                receiver_prop['action'] = SCALE_TYPE[scale_type]
                receiver_prop['type'] = method
                receiver_name = node + '_' + scale_name + '_receiver'
                trigger_receivers[trigger].append(receiver_name)
                receiver_resources = HotResource(self.nodetemplate,
                                                 type='OS::Senlin::Receiver',
                                                 name=receiver_name,
                                                 properties=receiver_prop)
                hot_resources.append(receiver_resources)

        for trigger in self.policy.entity_tpl['triggers']:
            sample = self.policy.\
                entity_tpl['triggers'][trigger]['condition']
            (meter_name, comparison_operator, threshold) = \
                sample["constraint"].split()
            threshold = threshold.strip("%")
            alarm_prop = {}
            alarm_prop["description"] = self.policy.entity_tpl['description']
            alarm_prop["metric"] = self.policy.\
                entity_tpl['triggers'][trigger]['event_type']['metric']
            alarm_prop["aggregation_method"] = \
                ALARM_STATISTIC[sample['aggregation_method']]
            alarm_prop["granularity"] = sample["granularity"]
            alarm_prop["evaluation_periods"] = sample["evaluations"]
            alarm_prop["threshold"] = threshold
            alarm_prop["resource_type"] = sample.get("resource_type",
                                                     "instance")
            alarm_prop["comparison_operator"] = \
                ALARM_COMPARISON_OPERATOR[comparison_operator]
            alarm_prop["repeat_actions"] = "True"
            alarm_prop["alarm_actions"] = []
            for index in range(len(trigger_receivers[trigger])):
                alarm_prop["alarm_actions"].\
                    append({'get_attr': [trigger_receivers[trigger][index],
                                         'channel',
                                         'alarm_url']})
            ceilometer_resources = HotResource(self.nodetemplate,
                                               type=hot_type,
                                               name=trigger + '_alarm',
                                               properties=alarm_prop)
            hot_resources.append(ceilometer_resources)
        return hot_resources
 def _create_connect_config(self, source_node, target_name,
                            connect_interface):
     connectsto_resources = []
     target_node = self._find_tosca_node(target_name)
     # the configuration can occur on the source or the target
     connect_config = connect_interface.get('pre_configure_target')
     if connect_config is not None:
         config_location = 'target'
     else:
         connect_config = connect_interface.get('pre_configure_source')
         if connect_config is not None:
             config_location = 'source'
         else:
             msg = _("Template error:  "
                     "no configuration found for ConnectsTo "
                     "in {1}").format(self.nodetemplate.name)
             log.error(msg)
             raise Exception(msg)
     config_name = source_node.name + '_' + target_name + '_connect_config'
     implement = connect_config.get('implementation')
     cwd = os.getcwd()
     if config_location == 'target':
         if self.csar_dir:
             os.chdir(self.csar_dir)
             get_file = os.path.abspath(implement)
         else:
             get_file = implement
         hot_config = HotResource(target_node,
                                  config_name,
                                  'OS::Heat::SoftwareConfig',
                                  {'config': {
                                      'get_file': get_file
                                  }},
                                  csar_dir=self.csar_dir)
     elif config_location == 'source':
         if self.csar_dir:
             os.chdir(self.csar_dir)
             get_file = os.path.abspath(implement)
         else:
             get_file = implement
         hot_config = HotResource(source_node,
                                  config_name,
                                  'OS::Heat::SoftwareConfig',
                                  {'config': {
                                      'get_file': get_file
                                  }},
                                  csar_dir=self.csar_dir)
     os.chdir(cwd)
     connectsto_resources.append(hot_config)
     hot_target = self._find_hot_resource_for_tosca(target_name)
     hot_source = self._find_hot_resource_for_tosca(source_node.name)
     connectsto_resources.append(
         hot_config.handle_connectsto(source_node, target_node, hot_source,
                                      hot_target, config_location,
                                      connect_interface))
     return connectsto_resources
 def _translate_get_operation_output_function(self, args, tosca_template):
     tosca_target = self._find_tosca_node(args[0],
                                          tosca_template)
     if tosca_target and len(args) >= 4:
         operations = HotResource.get_all_operations(tosca_target)
         # ignore Standard interface name,
         # it is the only one supported in the translator anyway
         op_name = args[2]
         output_name = args[3]
         if op_name in operations:
             operation = operations[op_name]
             if operation in self.hot_lookup:
                 matching_deploy = self.hot_lookup[operation]
                 matching_config_name = matching_deploy.\
                     properties['config']['get_resource']
                 matching_config = self.find_hot_resource(
                     matching_config_name)
                 if matching_config:
                     outputs = matching_config.properties.get('outputs')
                     if outputs is None:
                         outputs = []
                     outputs.append({'name': output_name})
                     matching_config.properties['outputs'] = outputs
                 return {'get_attr': [
                     matching_deploy.name,
                     output_name
                 ]}
Beispiel #4
0
 def handle_properties(self, resources):
     for node in self.policy.targets:
         self.properties = {}
         self.properties["auto_scaling_group_id"] = {'get_resource': node}
         self.properties["adjustment_type"] = "change_in_capacity "
         self.properties["scaling_adjustment"] = self.\
             policy.entity_tpl["properties"]["increment"]
     for index, resource in enumerate(resources):
         if resource.name in self.policy.targets and \
             resource.type != 'OS::Heat::AutoScalingGroup':
             temp = self.policy.entity_tpl["properties"]
             props = {}
             res = {}
             res["min_size"] = temp["min_instances"]
             res["max_size"] = temp["max_instances"]
             res["default_instances"] = temp["default_instances"]
             props['type'] = resource.type
             props['properties'] = resource.properties
             res['resources'] = props
             scaling_resources = \
                 HotResource(resource,
                             type='OS::Heat::AutoScalingGroup',
                             name=resource.name,
                             properties=res)
             resources.pop(index)
             resources.insert(index, scaling_resources)
     return resources
Beispiel #5
0
 def _translate_get_operation_output_function(self, args, tosca_template):
     tosca_target = self._find_tosca_node(args[0],
                                          tosca_template)
     if tosca_target and len(args) >= 4:
         operations = HotResource.get_all_operations(tosca_target)
         # ignore Standard interface name,
         # it is the only one supported in the translator anyway
         op_name = args[2]
         output_name = args[3]
         if op_name in operations:
             operation = operations[op_name]
             if operation in self.hot_lookup:
                 matching_deploy = self.hot_lookup[operation]
                 matching_config_name = matching_deploy.\
                     properties['config']['get_resource']
                 matching_config = self.find_hot_resource(
                     matching_config_name)
                 if matching_config:
                     outputs = matching_config.properties.get('outputs')
                     if outputs is None:
                         outputs = []
                     outputs.append({'name': output_name})
                     matching_config.properties['outputs'] = outputs
                 return {'get_attr': [
                     matching_deploy.name,
                     output_name
                 ]}
 def handle_expansion(self):
     extra_resources = list()
     hot_type = 'OS::Aodh::EventAlarm'
     for action_name in self.filter:
         prop = self._get_event_type(action_name)
         resrv_resources = HotResource(self.nodetemplate,
                                       type=hot_type,
                                       name=action_name,
                                       properties=prop)
         extra_resources.append(resrv_resources)
     return extra_resources
Beispiel #7
0
    def handle_expansion(self):
        # If the network resource should not be output (they are hidden),
        # there is no need to generate subnet resource
        if self.hide_resource:
            return

        tosca_props = self._get_tosca_props(
            self.nodetemplate.get_properties_objects())

        subnet_props = {}

        ip_pool_start = None
        ip_pool_end = None

        for key, value in tosca_props.items():
            if key in self.SUBNET_PROPS:
                if key == 'start_ip':
                    ip_pool_start = value
                elif key == 'end_ip':
                    ip_pool_end = value
                elif key == 'dhcp_enabled':
                    subnet_props['enable_dhcp'] = value
                else:
                    subnet_props[key] = value

        if 'network_id' in tosca_props:
            subnet_props['network'] = tosca_props['network_id']
        else:
            subnet_props['network'] = '{ get_resource: %s }' % (self.name)

        # Handle allocation pools
        # Do this only if both start_ip and end_ip are provided
        # If one of them is missing throw an exception.
        if ip_pool_start and ip_pool_end:
            allocation_pool = {}
            allocation_pool['start'] = ip_pool_start
            allocation_pool['end'] = ip_pool_end
            allocation_pools = [allocation_pool]
            subnet_props['allocation_pools'] = allocation_pools
        elif ip_pool_start:
            raise InvalidPropertyValueError(what='start_ip')
        elif ip_pool_end:
            raise InvalidPropertyValueError(what='end_ip')

        subnet_resource_name = self.name + self.SUBNET_SUFFIX

        hot_resources = [
            HotResource(self.nodetemplate,
                        type='OS::Neutron::Subnet',
                        name=subnet_resource_name,
                        properties=subnet_props)
        ]
        return hot_resources
 def handle_expansion(self):
     '''handle monitoring resources in case of multiple triggers'''
     extra_resources = list()
     extra_triggers = self.policy.entity_tpl["triggers"]
     for trigger_name, trigger_dict in extra_triggers.items():
         if trigger_name not in self.filter:
             self.filter.append(trigger_name)
             prop = self._get_monitoring_prop(trigger_dict)
             mon_resources = HotResource(self.nodetemplate,
                                         type='OS::Aodh::Alarm',
                                         name=trigger_name,
                                         properties=prop)
             extra_resources.append(mon_resources)
     return extra_resources
Beispiel #9
0
 def handle_expansion(self):
     sample = self.policy.\
         entity_tpl["triggers"]["resize_compute"]["condition"]
     prop = {}
     prop["description"] = self.policy.entity_tpl['description']
     prop["meter_name"] = "cpu_util"
     prop["statistic"] = sample["method"]
     prop["period"] = sample["period"]
     prop["threshold"] = sample["evaluations"]
     prop["comparison_operator"] = "gt"
     ceilometer_resources = HotResource(self.nodetemplate,
                                        type='OS::Ceilometer::Alarm',
                                        name=self.name + '_alarm',
                                        properties=prop)
     hot_resources = [ceilometer_resources]
     return hot_resources
    def handle_properties(self, resources):
        self.properties = {}
        self.properties["auto_scaling_group_id"] = {
            'get_resource': self.policy.name + '_group'
        }
        self.properties["adjustment_type"] = "change_in_capacity "
        self.properties["scaling_adjustment"] = self.\
            policy.entity_tpl["properties"]["increment"]
        self.properties["cooldown"] =\
            self.policy.entity_tpl["properties"]["cooldown"]
        delete_res_names = []
        scale_res = []
        for index, resource in enumerate(resources):
            if resource.name in self.policy.targets and \
                resource.type != 'OS::Heat::AutoScalingGroup':
                temp = self.policy.entity_tpl["properties"]
                props = {}
                res = {}
                res["min_size"] = temp["min_instances"]
                res["max_size"] = temp["max_instances"]
                res["desired_capacity"] = temp["default_instances"]
                res["cooldown"] = temp["cooldown"]
                props['type'] = resource.type
                props['properties'] = resource.properties
                res['resource'] = {'type': self.policy.name + '_res.yaml'}
                scaling_resources = \
                    HotResource(resource,
                                type='OS::Heat::AutoScalingGroup',
                                name=self.policy.name + '_group',
                                properties=res)

            if resource.type not in SCALING_RESOURCES:
                delete_res_names.append(resource.name)
                scale_res.append(resource)
        yaml_name = self.policy.name + '_res.yaml'
        self.nested_template = self._handle_nested_template(
            scale_res,
            yaml_name,
            self.hot_template_parameters)
        resources = [tmp_res
                     for tmp_res in resources
                     if tmp_res.name not in delete_res_names]
        resources.append(scaling_resources)
        return resources
    def handle_expansion(self):
        hot_resources = []
        flavor_props = {}

        tosca_capas = self.nodetemplate.get_capability('virtual_compute')
        tosca_c_props = tosca_capas.get_properties_objects()

        for prop in tosca_c_props:
            if prop.name == 'virtual_cpu':
                flavor_props['vcpus'] = prop.value['num_virtual_cpu']
            elif prop.name == 'virtual_memory':
                # Convert to MiB
                flavor_props['ram'] = \
                    MemoryUnit.convert_unit_size_to_num(
                        prop.value['virtual_mem_size'], 'MiB')
            elif prop.name == 'virtual_local_storage':
                # Convert to GiB
                flavor_props['disk'] = \
                    MemoryUnit.convert_unit_size_to_num(
                        prop.value[0]['size_of_storage'], 'GiB')
            elif prop.name == 'compute_requirements':
                if prop.value.get('nova_extra_specs') is not None:
                    nova_es = prop.value['nova_extra_specs']
                    try:
                        # Check if it is in yaml format
                        yaml.safe_load(nova_es)
                        flavor_props['extra_specs'] = nova_es
                    except Exception:
                        log.warning(('Unsupported format of '
                                     'compute_requirements, '
                                     'vdu_name:%s, nova_extra_specs:%s')
                                    % (self.name, nova_es))

        hot_resources.append(
            HotResource(
                self.nodetemplate,
                type='OS::Nova::Flavor',
                name=self.flavor_resource_name,
                properties=flavor_props,
            )
        )

        return hot_resources
 def handle_expansion(self):
     if self.policy.entity_tpl.get('triggers'):
         sample = self.policy.\
             entity_tpl["triggers"]["resize_compute"]["condition"]
         prop = {}
         prop["description"] = self.policy.entity_tpl.get('description')
         prop["meter_name"] = "cpu_util"
         if sample:
             prop["statistic"] = ALARM_STATISTIC[sample["method"]]
             prop["period"] = sample["period"]
             prop["threshold"] = sample["evaluations"]
         prop["comparison_operator"] = "gt"
         alarm_name = self.name.replace('_scale_in', '').\
             replace('_scale_out', '')
         ceilometer_resources = HotResource(self.nodetemplate,
                                            type='OS::Aodh::Alarm',
                                            name=alarm_name + '_alarm',
                                            properties=prop)
         hot_resources = [ceilometer_resources]
         return hot_resources
 def handle_expansion(self):
     if self.policy.entity_tpl.get('triggers'):
         hot_type = 'OS::Aodh::GnocchiAggregationByResourcesAlarm'
         sample = self.policy.\
             entity_tpl["triggers"]["resize_compute"]["condition"]
         prop = {}
         prop["description"] = self.policy.entity_tpl.get('description')
         prop["metric"] = "cpu_util"
         if sample:
             prop["aggregation_method"] = \
                 ALARM_STATISTIC[sample["aggregation_method"]]
             prop["granularity"] = sample["granularity"]
             prop["threshold"] = sample["evaluations"]
             prop["resource_type"] = sample.get("resource_type", "instance")
         prop["comparison_operator"] = "gt"
         alarm_name = self.name.replace('_scale_in', '').\
             replace('_scale_out', '')
         ceilometer_resources = HotResource(self.nodetemplate,
                                            type=hot_type,
                                            name=alarm_name + '_alarm',
                                            properties=prop)
         hot_resources = [ceilometer_resources]
         return hot_resources
    def translate_param_value(self, param_value, resource):
        tosca_template = None
        if resource:
            tosca_template = resource.nodetemplate

        get_property_args = None
        if isinstance(param_value, GetProperty):
            get_property_args = param_value.args
        # to remove when the parser is fixed to return GetProperty
        elif isinstance(param_value, dict) and 'get_property' in param_value:
            get_property_args = param_value['get_property']
        if get_property_args is not None:
            tosca_target, prop_name, prop_arg = \
                self.decipher_get_operation(get_property_args,
                                            tosca_template)
            if tosca_target:
                prop_value = tosca_target.get_property_value(prop_name)
                if prop_value:
                    prop_value = self.translate_param_value(
                        prop_value, resource)
                    return self._unfold_value(prop_value, prop_arg)
        get_attr_args = None
        if isinstance(param_value, GetAttribute):
            get_attr_args = param_value.result().args
        # to remove when the parser is fixed to return GetAttribute
        elif isinstance(param_value, dict) and 'get_attribute' in param_value:
            get_attr_args = param_value['get_attribute']
        if get_attr_args is not None:
            # for the attribute
            # get the proper target type to perform the translation
            tosca_target, attr_name, attr_arg = \
                self.decipher_get_operation(get_attr_args, tosca_template)
            attr_args = []
            if attr_arg:
                attr_args += attr_arg
            if tosca_target:
                if tosca_target in self.hot_lookup:
                    attr_value = self.hot_lookup[tosca_target].\
                        get_hot_attribute(attr_name, attr_args)
                    attr_value = self.translate_param_value(
                        attr_value, resource)
                    return self._unfold_value(attr_value, attr_arg)
        elif isinstance(param_value, dict) and 'get_artifact' in param_value:
            get_artifact_args = param_value['get_artifact']
            tosca_target, artifact_name, _ = \
                self.decipher_get_operation(get_artifact_args,
                                            tosca_template)

            if tosca_target:
                artifacts = HotResource.get_all_artifacts(tosca_target)
                if artifact_name in artifacts:
                    artifact = artifacts[artifact_name]
                    if artifact.get('type', None) == 'tosca.artifacts.File':
                        return {'get_file': artifact.get('file')}
        get_input_args = None
        if isinstance(param_value, GetInput):
            get_input_args = param_value.args
        elif isinstance(param_value, dict) and 'get_input' in param_value:
            get_input_args = param_value['get_input']
        if get_input_args is not None:
            if isinstance(get_input_args, list) \
                    and len(get_input_args) == 1:
                return {'get_param': self.translate_param_value(
                    get_input_args[0], resource)}
            else:
                return {'get_param': self.translate_param_value(
                    get_input_args, resource)}
        elif isinstance(param_value, dict) \
                and 'get_operation_output' in param_value:
            res = self._translate_get_operation_output_function(
                param_value['get_operation_output'], tosca_template)
            if res:
                return res
        concat_list = None
        if isinstance(param_value, Concat):
            concat_list = param_value.args
        elif isinstance(param_value, dict) and 'concat' in param_value:
            concat_list = param_value['concat']
        if concat_list is not None:
            res = self._translate_concat_function(concat_list, resource)
            if res:
                return res

        if isinstance(param_value, list):
            translated_list = []
            for elem in param_value:
                translated_elem = self.translate_param_value(elem, resource)
                if translated_elem:
                    translated_list.append(translated_elem)
            return translated_list

        if isinstance(param_value, BASE_TYPES):
            return param_value

        return None
    def handle_properties(self, resources):
        remove_resources = []
        networks = defaultdict(list)
        for index, resource in enumerate(resources):
            if resource.type == 'OS::Neutron::Port':
                for hot_resource in resource.depends_on_nodes:
                    if hot_resource.type != 'OS::Neutron::Net':
                        networks[hot_resource.name].\
                            append(
                            {'network': '%s' % resource.properties['network']}
                            )
                remove_resources.append(resource)
            elif resource.type == 'OS::Neutron::Net':
                remove_resources.append(resource)
            elif resource.name in self.policy.properties['targets'] and \
                resource.type != 'OS::Senlin::Policy':
                props = {}
                del resource.properties['user_data_format']
                del resource.properties['networks']
                props['type'] = SERVER_TYPE
                props['properties'] = resource.properties
                profile_resources = \
                    HotResource(resource,
                                type='OS::Senlin::Profile',
                                name=resource.name,
                                properties=props)
                resources.pop(index)
                resources.insert(index, profile_resources)
        for remove_resource in remove_resources:
            resources.remove(remove_resource)

        for index, resource in enumerate(resources):
            if resource.name in self.policy.properties['targets']:
                resource.properties['properties']['networks'] = \
                    networks[resource.name]

        for node in self.policy.properties['targets']:
            props = {}
            props["profile"] = {'get_resource': '%s' % node}
            temp = self.policy.entity_tpl["properties"]
            props["min_size"] = temp["min_instances"]
            props["max_size"] = temp["max_instances"]
            props["desired_capacity"] = temp["default_instances"]
            self.cluster_name = '%s_cluster' % node
            cluster_resources = \
                HotResource(self.nodetemplate,
                            type='OS::Senlin::Cluster',
                            name=self.cluster_name,
                            properties=props)
            resources.append(cluster_resources)

        trigger_num = len(self.policy.entity_tpl['triggers'])
        for num, trigger in enumerate(self.policy.entity_tpl['triggers']):
            target_cluster_nodes = []
            for action in self.policy.\
                entity_tpl['triggers'][trigger]['action']:
                scale_type = self.policy.\
                    entity_tpl['triggers'][trigger]['action'][action]['type']
            for node in self.policy.properties['targets']:
                target_cluster_nodes.\
                    append({"get_resource": "%s_cluster" % node})
            cluster_scale_type = SCALE_TYPE[scale_type]
            scale_in_props = \
                self._generate_scale_properties(target_cluster_nodes,
                                                cluster_scale_type)
            if num == trigger_num - 1:
                self.name = self.name + '_' + trigger
                self.properties = scale_in_props
                break
            policy_resources = \
                HotResource(self.nodetemplate,
                            type='OS::Senlin::Policy',
                            name=self.name + '_' + trigger,
                            properties=scale_in_props)
            resources.append(policy_resources)
        return resources
    def _translate_nodetemplates(self):

        log.debug(_('Translating the node templates.'))
        suffix = 0
        # Copy the TOSCA graph: nodetemplate
        for node in self.nodetemplates:
            base_type = HotResource.get_base_type(node.type_definition)
            hot_node = TOSCA_TO_HOT_TYPE[base_type.type](node)
            self.hot_resources.append(hot_node)
            self.hot_lookup[node] = hot_node

            # BlockStorage Attachment is a special case,
            # which doesn't match to Heat Resources 1 to 1.
            if base_type.type == "tosca.nodes.Compute":
                volume_name = None
                requirements = node.requirements
                if requirements:
                    # Find the name of associated BlockStorage node
                    for requires in requirements:
                        for value in requires.values():
                            if isinstance(value, dict):
                                for node_name in value.values():
                                    for n in self.nodetemplates:
                                        if n.name == node_name:
                                            volume_name = node_name
                                            break
                            else:  # unreachable code !
                                for n in self.nodetemplates:
                                    if n.name == node_name:
                                        volume_name = node_name
                                        break

                    suffix = suffix + 1
                    attachment_node = self._get_attachment_node(node,
                                                                suffix,
                                                                volume_name)
                    if attachment_node:
                        self.hot_resources.append(attachment_node)
                for i in self.tosca.inputs:
                    if (i.name == 'key_name' and
                            node.get_property_value('key_name') is None):
                        schema = {'type': i.type, 'default': i.default}
                        value = {"get_param": "key_name"}
                        prop = Property(i.name, value, schema)
                        node._properties.append(prop)

        for policy in self.policies:
            policy_type = policy.type_definition
            policy_node = TOSCA_TO_HOT_TYPE[policy_type.type](policy)
            self.hot_resources.append(policy_node)

        # Handle life cycle operations: this may expand each node
        # into multiple HOT resources and may change their name
        lifecycle_resources = []
        for resource in self.hot_resources:
            expanded = resource.handle_life_cycle()
            if expanded:
                lifecycle_resources += expanded
        self.hot_resources += lifecycle_resources

        # Handle configuration from ConnectsTo relationship in the TOSCA node:
        # this will generate multiple HOT resources, set of 2 for each
        # configuration
        connectsto_resources = []
        for node in self.nodetemplates:
            for requirement in node.requirements:
                for endpoint, details in six.iteritems(requirement):
                    relation = None
                    if isinstance(details, dict):
                        target = details.get('node')
                        relation = details.get('relationship')
                    else:
                        target = details
                    if (target and relation and
                            not isinstance(relation, six.string_types)):
                        interfaces = relation.get('interfaces')
                        connectsto_resources += \
                            self._create_connect_configs(node,
                                                         target,
                                                         interfaces)
        self.hot_resources += connectsto_resources

        # Copy the initial dependencies based on the relationship in
        # the TOSCA template
        for node in self.nodetemplates:
            for node_depend in node.related_nodes:
                # if the source of dependency is a server and the
                # relationship type is 'tosca.relationships.HostedOn',
                # add dependency as properties.server
                if node_depend.type == 'tosca.nodes.Compute' and \
                   node.related[node_depend].type == \
                   node.type_definition.HOSTEDON:
                    self.hot_lookup[node].properties['server'] = \
                        {'get_resource': self.hot_lookup[node_depend].name}
                # for all others, add dependency as depends_on
                else:
                    self.hot_lookup[node].depends_on.append(
                        self.hot_lookup[node_depend].top_of_chain())

                self.hot_lookup[node].depends_on_nodes.append(
                    self.hot_lookup[node_depend].top_of_chain())

        # handle hosting relationship
        for resource in self.hot_resources:
            resource.handle_hosting()

        # handle built-in properties of HOT resources
        # if a resource depends on other resources,
        # their properties need to be handled first.
        # Use recursion to handle the properties of the
        # dependent nodes in correct order
        self.processed_resources = []
        for resource in self.hot_resources:
            self._recursive_handle_properties(resource)

        # handle resources that need to expand to more than one HOT resource
        expansion_resources = []
        for resource in self.hot_resources:
            expanded = resource.handle_expansion()
            if expanded:
                expansion_resources += expanded
        self.hot_resources += expansion_resources

        # Resolve function calls:  GetProperty, GetAttribute, GetInput
        # at this point, all the HOT resources should have been created
        # in the graph.
        for resource in self.hot_resources:
            # traverse the reference chain to get the actual value
            inputs = resource.properties.get('input_values')
            if inputs:
                for name, value in six.iteritems(inputs):
                    inputs[name] = self._translate_input(value, resource)

        return self.hot_resources
    def _translate_nodetemplates(self):
        log.debug(_('Translating the node templates.'))
        suffix = 0
        # Copy the TOSCA graph: nodetemplate
        for node in self.nodetemplates:
            base_type = HotResource.get_base_type_str(node.type_definition)
            if base_type not in TOSCA_TO_HOT_TYPE:
                raise UnsupportedTypeError(type=_('%s') % base_type)
            hot_node = TOSCA_TO_HOT_TYPE[base_type](node,
                                                    csar_dir=self.csar_dir)
            self.hot_resources.append(hot_node)
            self.hot_lookup[node] = hot_node

            # BlockStorage Attachment is a special case,
            # which doesn't match to Heat Resources 1 to 1.
            if base_type == "tosca.nodes.Compute":
                requirements = node.requirements
                if requirements:
                    # Find the name of associated BlockStorage node
                    for requires in requirements:
                        volume_name = None
                        for value in requires.values():
                            if isinstance(value, dict):
                                for node_name in value.values():
                                    for n in self.nodetemplates:
                                        if n.name == node_name and \
                                            n.is_derived_from(
                                                "tosca.nodes.BlockStorage"):
                                            volume_name = node_name
                                            break
                            else:
                                for n in self.nodetemplates:
                                    if n.name == value and \
                                        n.is_derived_from(
                                            "tosca.nodes.BlockStorage"):
                                        volume_name = node_name
                                        break

                        if volume_name:
                            suffix = suffix + 1
                            attachment_node = self._get_attachment_node(
                                node, suffix, volume_name)
                            if attachment_node:
                                self.hot_resources.append(attachment_node)
                for i in self.tosca.inputs:
                    if (i.name == 'key_name'
                            and node.get_property_value('key_name') is None):
                        schema = {'type': i.type, 'default': i.default}
                        value = {"get_param": "key_name"}
                        prop = Property(i.name, value, schema)
                        node._properties.append(prop)

        for policy in self.policies:
            policy_type = policy.type_definition
            if policy.is_derived_from('tosca.policies.Scaling') and \
               policy_type.type != 'tosca.policies.Scaling.Cluster':
                TOSCA_TO_HOT_TYPE[policy_type.type] = \
                    TOSCA_TO_HOT_TYPE['tosca.policies.Scaling']
            if policy.is_derived_from('tosca.policies.Monitoring'):
                TOSCA_TO_HOT_TYPE[policy_type.type] = \
                    TOSCA_TO_HOT_TYPE['tosca.policies.Monitoring']
            if policy.is_derived_from('tosca.policies.Placement'):
                TOSCA_TO_HOT_TYPE[policy_type.type] = \
                    TOSCA_TO_HOT_TYPE['tosca.policies.Placement']
            if not policy.is_derived_from('tosca.policies.Monitoring') and \
                    not policy.is_derived_from('tosca.policies.Scaling') and \
                    policy_type.type not in TOSCA_TO_HOT_TYPE:
                raise UnsupportedTypeError(type=_('%s') % policy_type.type)
            elif policy_type.type == 'tosca.policies.Scaling.Cluster':
                self.hot_template_version = '2016-04-08'
            policy_node = TOSCA_TO_HOT_TYPE[policy_type.type](policy)
            self.hot_resources.append(policy_node)

        # Handle life cycle operations: this may expand each node
        # into multiple HOT resources and may change their name
        lifecycle_resources = []
        for resource in self.hot_resources:
            expanded_resources, deploy_lookup, last_deploy = resource.\
                handle_life_cycle()
            if expanded_resources:
                lifecycle_resources += expanded_resources
            if deploy_lookup:
                self.hot_lookup.update(deploy_lookup)
            if last_deploy:
                self.last_deploy_map[resource] = last_deploy
        self.hot_resources += lifecycle_resources

        # Handle configuration from ConnectsTo relationship in the TOSCA node:
        # this will generate multiple HOT resources, set of 2 for each
        # configuration
        connectsto_resources = []
        for node in self.nodetemplates:
            for requirement in node.requirements:
                for endpoint, details in requirement.items():
                    relation = None
                    if isinstance(details, dict):
                        target = details.get('node')
                        relation = details.get('relationship')
                    else:
                        target = details
                    if (target and relation
                            and not isinstance(relation, six.string_types)):
                        interfaces = relation.get('interfaces')
                        connectsto_resources += \
                            self._create_connect_configs(node,
                                                         target,
                                                         interfaces)
        self.hot_resources += connectsto_resources

        # Copy the initial dependencies based on the relationship in
        # the TOSCA template
        for node in self.nodetemplates:
            for node_depend in node.related_nodes:
                # if the source of dependency is a server and the
                # relationship type is 'tosca.relationships.HostedOn',
                # add dependency as properties.server
                base_type = HotResource.get_base_type_str(
                    node_depend.type_definition)
                if base_type == 'tosca.nodes.Compute' and \
                   node.related[node_depend].type == \
                   node.type_definition.HOSTEDON:
                    self.hot_lookup[node].properties['server'] = \
                        {'get_resource': self.hot_lookup[node_depend].name}
                # for all others, add dependency as depends_on
                else:
                    self.hot_lookup[node].depends_on.append(
                        self.hot_lookup[node_depend].top_of_chain())

                self.hot_lookup[node].depends_on_nodes.append(
                    self.hot_lookup[node_depend].top_of_chain())

                last_deploy = self.last_deploy_map.get(
                    self.hot_lookup[node_depend])
                if last_deploy and \
                    last_deploy not in self.hot_lookup[node].depends_on:
                    self.hot_lookup[node].depends_on.append(last_deploy)
                    self.hot_lookup[node].depends_on_nodes.append(last_deploy)

        # handle hosting relationship
        for resource in self.hot_resources:
            resource.handle_hosting()

        # handle built-in properties of HOT resources
        # if a resource depends on other resources,
        # their properties need to be handled first.
        # Use recursion to handle the properties of the
        # dependent nodes in correct order
        self.processed_resources = []
        for resource in self.hot_resources:
            if resource.type not in HOT_SCALING_POLICY_TYPE:
                self._recursive_handle_properties(resource)

        # handle resources that need to expand to more than one HOT resource
        expansion_resources = []
        for resource in self.hot_resources:
            expanded = resource.handle_expansion()
            if expanded:
                expansion_resources += expanded
        self.hot_resources += expansion_resources

        # Resolve function calls:  GetProperty, GetAttribute, GetInput
        # at this point, all the HOT resources should have been created
        # in the graph.
        for resource in self.hot_resources:
            # traverse the reference chain to get the actual value
            inputs = resource.properties.get('input_values')
            if inputs:
                for name, value in inputs.items():
                    inputs[name] = self.translate_param_value(value, resource)

        # remove resources without type defined
        # for example a SoftwareComponent without interfaces
        # would fall in this case
        to_remove = []
        for resource in self.hot_resources:
            if resource.type is None:
                to_remove.append(resource)

        for resource in to_remove:
            self.hot_resources.remove(resource)

        for resource in self.hot_resources:
            for removed_resource in to_remove:
                if removed_resource in resource.depends_on:
                    resource.depends_on.remove(removed_resource)

        return self.hot_resources
    def translate_param_value(self, param_value, resource):
        tosca_template = None
        if resource:
            tosca_template = resource.nodetemplate

        get_property_args = None
        if isinstance(param_value, GetProperty):
            get_property_args = param_value.args
        # to remove when the parser is fixed to return GetProperty
        elif isinstance(param_value, dict) and 'get_property' in param_value:
            get_property_args = param_value['get_property']
        if get_property_args is not None:
            tosca_target, prop_name, prop_arg = \
                self.decipher_get_operation(get_property_args,
                                            tosca_template)
            if tosca_target:
                prop_value = tosca_target.get_property_value(prop_name)
                if prop_value is not None:
                    prop_value = self.translate_param_value(
                        prop_value, resource)
                    return self._unfold_value(prop_value, prop_arg)
        get_attr_args = None
        if isinstance(param_value, GetAttribute):
            get_attr_args = param_value.result().args
        # to remove when the parser is fixed to return GetAttribute
        elif isinstance(param_value, dict) and 'get_attribute' in param_value:
            get_attr_args = param_value['get_attribute']
        if get_attr_args is not None:
            # for the attribute
            # get the proper target type to perform the translation
            tosca_target, attr_name, attr_arg = \
                self.decipher_get_operation(get_attr_args, tosca_template)
            attr_args = []
            if attr_arg:
                attr_args += attr_arg
            if tosca_target:
                if tosca_target in self.hot_lookup:
                    attr_value = self.hot_lookup[tosca_target].\
                        get_hot_attribute(attr_name, attr_args)
                    attr_value = self.translate_param_value(
                        attr_value, resource)
                    return self._unfold_value(attr_value, attr_arg)
        elif isinstance(param_value, dict) and 'get_artifact' in param_value:
            get_artifact_args = param_value['get_artifact']
            tosca_target, artifact_name, _ = \
                self.decipher_get_operation(get_artifact_args,
                                            tosca_template)

            if tosca_target:
                artifacts = HotResource.get_all_artifacts(tosca_target)
                if artifact_name in artifacts:
                    cwd = os.getcwd()
                    artifact = artifacts[artifact_name]
                    if self.csar_dir:
                        os.chdir(self.csar_dir)
                        get_file = os.path.abspath(artifact.get('file'))
                    else:
                        get_file = artifact.get('file')
                    if artifact.get('type', None) == 'tosca.artifacts.File':
                        return {'get_file': get_file}
            os.chdir(cwd)
        get_input_args = None
        if isinstance(param_value, GetInput):
            get_input_args = param_value.args
        elif isinstance(param_value, dict) and 'get_input' in param_value:
            get_input_args = param_value['get_input']
        if get_input_args is not None:
            if isinstance(get_input_args, list) \
                    and len(get_input_args) == 1:
                return {
                    'get_param':
                    self.translate_param_value(get_input_args[0], resource)
                }
            else:
                return {
                    'get_param':
                    self.translate_param_value(get_input_args, resource)
                }
        elif isinstance(param_value, GetOperationOutput):
            res = self._translate_get_operation_output_function(
                param_value.args, tosca_template)
            if res:
                return res
        elif isinstance(param_value, dict) \
                and 'get_operation_output' in param_value:
            res = self._translate_get_operation_output_function(
                param_value['get_operation_output'], tosca_template)
            if res:
                return res
        concat_list = None
        if isinstance(param_value, Concat):
            concat_list = param_value.args
        elif isinstance(param_value, dict) and 'concat' in param_value:
            concat_list = param_value['concat']
        if concat_list is not None:
            res = self._translate_concat_function(concat_list, resource)
            if res:
                return res

        if isinstance(param_value, list):
            translated_list = []
            for elem in param_value:
                translated_elem = self.translate_param_value(elem, resource)
                if translated_elem:
                    translated_list.append(translated_elem)
            return translated_list

        if isinstance(param_value, BASE_TYPES):
            return param_value

        return None
    def _translate_nodetemplates(self):

        log.debug(_('Translating the node templates.'))
        suffix = 0
        # Copy the TOSCA graph: nodetemplate
        for node in self.nodetemplates:
            base_type = HotResource.get_base_type(node.type_definition)
            hot_node = TOSCA_TO_HOT_TYPE[base_type.type](node)
            self.hot_resources.append(hot_node)
            self.hot_lookup[node] = hot_node

            # BlockStorage Attachment is a special case,
            # which doesn't match to Heat Resources 1 to 1.
            if base_type.type == "tosca.nodes.Compute":
                volume_name = None
                requirements = node.requirements
                if requirements:
                    # Find the name of associated BlockStorage node
                    for requires in requirements:
                        for value in requires.values():
                            if isinstance(value, dict):
                                for node_name in value.values():
                                    for n in self.nodetemplates:
                                        if n.name == node_name:
                                            volume_name = node_name
                                            break
                            else:  # unreachable code !
                                for n in self.nodetemplates:
                                    if n.name == node_name:
                                        volume_name = node_name
                                        break

                    suffix = suffix + 1
                    attachment_node = self._get_attachment_node(
                        node, suffix, volume_name)
                    if attachment_node:
                        self.hot_resources.append(attachment_node)
                for i in self.tosca.inputs:
                    if (i.name == 'key_name'
                            and node.get_property_value('key_name') is None):
                        schema = {'type': i.type, 'default': i.default}
                        value = {"get_param": "key_name"}
                        prop = Property(i.name, value, schema)
                        node._properties.append(prop)

        for policy in self.policies:
            policy_type = policy.type_definition
            policy_node = TOSCA_TO_HOT_TYPE[policy_type.type](policy)
            self.hot_resources.append(policy_node)

        # Handle life cycle operations: this may expand each node
        # into multiple HOT resources and may change their name
        lifecycle_resources = []
        for resource in self.hot_resources:
            expanded = resource.handle_life_cycle()
            if expanded:
                lifecycle_resources += expanded
        self.hot_resources += lifecycle_resources

        # Handle configuration from ConnectsTo relationship in the TOSCA node:
        # this will generate multiple HOT resources, set of 2 for each
        # configuration
        connectsto_resources = []
        for node in self.nodetemplates:
            for requirement in node.requirements:
                for endpoint, details in six.iteritems(requirement):
                    relation = None
                    if isinstance(details, dict):
                        target = details.get('node')
                        relation = details.get('relationship')
                    else:
                        target = details
                    if (target and relation
                            and not isinstance(relation, six.string_types)):
                        interfaces = relation.get('interfaces')
                        connectsto_resources += \
                            self._create_connect_configs(node,
                                                         target,
                                                         interfaces)
        self.hot_resources += connectsto_resources

        # Copy the initial dependencies based on the relationship in
        # the TOSCA template
        for node in self.nodetemplates:
            for node_depend in node.related_nodes:
                # if the source of dependency is a server and the
                # relationship type is 'tosca.relationships.HostedOn',
                # add dependency as properties.server
                if node_depend.type == 'tosca.nodes.Compute' and \
                   node.related[node_depend].type == \
                   node.type_definition.HOSTEDON:
                    self.hot_lookup[node].properties['server'] = \
                        {'get_resource': self.hot_lookup[node_depend].name}
                # for all others, add dependency as depends_on
                else:
                    self.hot_lookup[node].depends_on.append(
                        self.hot_lookup[node_depend].top_of_chain())

                self.hot_lookup[node].depends_on_nodes.append(
                    self.hot_lookup[node_depend].top_of_chain())

        # handle hosting relationship
        for resource in self.hot_resources:
            resource.handle_hosting()

        # handle built-in properties of HOT resources
        # if a resource depends on other resources,
        # their properties need to be handled first.
        # Use recursion to handle the properties of the
        # dependent nodes in correct order
        self.processed_resources = []
        for resource in self.hot_resources:
            self._recursive_handle_properties(resource)

        # handle resources that need to expand to more than one HOT resource
        expansion_resources = []
        for resource in self.hot_resources:
            expanded = resource.handle_expansion()
            if expanded:
                expansion_resources += expanded
        self.hot_resources += expansion_resources

        # Resolve function calls:  GetProperty, GetAttribute, GetInput
        # at this point, all the HOT resources should have been created
        # in the graph.
        for resource in self.hot_resources:
            # traverse the reference chain to get the actual value
            inputs = resource.properties.get('input_values')
            if inputs:
                for name, value in six.iteritems(inputs):
                    inputs[name] = self._translate_input(value, resource)

        return self.hot_resources
    def handle_properties(self, resources):
        # Resources of non HotResource
        # (Exclude derived)
        non_hot_rsrcs = [r for r in resources if type(r) is not HotResource]

        # Extract resource name from VduScalingAspectDeltas
        vsad_rsrcs = [
            r for r in non_hot_rsrcs if r.toscatype == SCALING_ASPECT_DELTA]
        vsad_rsrcs = [
            r for r in vsad_rsrcs if r.aspect == self.aspect_name]
        vsad_rsrc = vsad_rsrcs[0]

        # The names of the resource associated with the VDU.
        # Supporting resources below.
        #  - tosca.nodes.nfv.Vdu.Compute
        #  - tosca.nodes.nfv.VduCp
        #  - tosca.nodes.nfv.Vdu.VirtualBlockStorage
        vdu_infos = [
            r for r in resources
            if vsad_rsrc.targets is not None and r.name in vsad_rsrc.targets]
        if vdu_infos == []:
            log.warning('Can not create %s node '
                        'because target vdu does not defined.'
                        % self.aspect_name)

        related_rsrc_names = []
        related_vl_names = []
        for vdu_info in vdu_infos:
            vdu_name = vdu_info.name
            port_rsrc_names = [p.name for p in vdu_info.assoc_port_resources]
            strg_rsrc_names = [s for s in vdu_info.virtual_storages]
            related_rsrc_names.append(vdu_name)
            related_rsrc_names.extend(port_rsrc_names)
            related_rsrc_names.extend(strg_rsrc_names)

            # Extract virtual_link mapping to vdu_name
            cp_rsrcs = [
                r for r in non_hot_rsrcs
                if r.toscatype == VDU_CP and r.virtual_binding == vdu_name]
            related_vl_names.extend([
                cp.virtual_link for cp in cp_rsrcs
                if cp.virtual_link is not None])

        # Parameters defined in referenced YAML
        parameters = {}

        # Resources of scaling/non-scaling
        scl_rsrcs = []
        non_scl_rsrcs = []

        # Properties of :AutoScalingGroup
        asg_props = {}

        # Resources which are contain related_rsrc_names are not
        # call handle_expansion(), so call here and add resources.
        related_rsrcs = [
            r for r in resources if r.name in related_rsrc_names]
        exp_rsrcs = list(
            chain.from_iterable([
                r.handle_expansion()
                for r in related_rsrcs
                if r.handle_expansion() is not None]))

        # Allocate resources generated by handle_expansion()
        # to scaling or non-scaling resource.
        # Flavor is non-scaling resource.
        scl_rsrcs.extend([
            r for r in exp_rsrcs
            if r.type != HOT_FLAVOR])
        non_scl_rsrcs.extend([
            r for r in exp_rsrcs
            if r.type == HOT_FLAVOR])

        for resource in resources:
            # Allocate resources to scaling or non-scaling resource.
            if resource.type not in HEAT_NON_SCALING_RESOURCES and \
                    resource.name in related_rsrc_names:
                scl_rsrcs.append(resource)
            else:
                non_scl_rsrcs.append(resource)

            # Processing for VDU
            if resource.name in related_rsrc_names and \
                resource.type == 'OS::Nova::Server':
                self.vdu_name = resource.name

                # Target aspect
                target_aspect = \
                    self.policy.properties['aspects'][self.aspect_name]

                # Extract scaling_adjustment from VduScalingAspectDeltas.
                # VduScalingAspectDeltas can specify a delta for each scaling
                # step but only the first one is used and others are ignored.
                delta_names = target_aspect['step_deltas']
                self.delta_name = delta_names[0]
                self.scaling_adjustment = vsad_rsrc.deltas.get(self.delta_name)

                # Extract min_size from VduInitialDelta
                vid_rsrcs = [
                    r for r in non_hot_rsrcs
                    if r.toscatype == VDU_INITIAL_DELTA]
                initial_deltas = [
                    r for r in vid_rsrcs
                    if r.targets is not None and self.vdu_name in r.targets]
                min_size = None \
                    if initial_deltas == [] else initial_deltas[0].num

                res = {}
                if min_size is not None and \
                        self.scaling_adjustment is not None:
                    # Calculate max_size
                    max_scale_level = target_aspect['max_scale_level']
                    max_size = \
                        min_size + max_scale_level * self.scaling_adjustment
                    res["min_size"] = min_size
                    res["max_size"] = max_size
                else:
                    log.warning('No min_size or(and) max_size is found for '
                                'aspect_name:%s, VDU:%s' % (
                                    self.aspect_name, self.vdu_name))

                # desired_capacity needs to be replaced by users because it is
                # calculated using scaling aspect and instantiation level given
                # in instantiation VNF request.
                res["desired_capacity"] = self.DESIRED_CAPACITY
                res['resource'] = {'type': self.aspect_name + '.hot.yaml'}

                props = {}
                props['type'] = resource.type
                props['properties'] = resource.properties

                for vl_name in related_vl_names:
                    vl_id = '%s_id' % (vl_name.lower())
                    asg_props.update({
                        vl_id: '{ get_resource: %s }' % (vl_name)})

                # Replace flavor id
                flvr_name = resource.flavor_resource_name
                flvr_id = '%s_id' % (flvr_name.lower())
                asg_props.update({
                    flvr_id: '{ get_resource: %s }' % (flvr_name)})
                resource.properties['flavor'] = \
                    '{ get_param: %s }' % (flvr_id)
                parameters[flvr_id] = {'type': 'string'}

                res['resource'].update({
                    'properties': asg_props
                })
                non_scl_rsrcs.append(
                    HotResource(resource,
                                type='OS::Heat::AutoScalingGroup',
                                name=self.aspect_name,
                                properties=res))

            # Processing for CP related to target VDU
            elif resource.name in related_rsrc_names and \
                resource.type == 'OS::Neutron::Port':
                for vl_name in related_vl_names:
                    if vl_name == resource.virtual_link:
                        # Replace network id
                        vl_id = '%s_id' % (vl_name.lower())
                        resource.properties['network'] = \
                            '{ get_param: %s }' % (vl_id)
                        parameters[vl_id] = {'type': 'string'}

        # Create referenced YAML which is defined scaling resources
        yaml_name = self.aspect_name + '.hot.yaml'
        parameters = None if parameters == {} else parameters
        nested_template = self._handle_nested_template(
            scl_rsrcs,
            yaml_name,
            self.hot_template_parameters,
            parameters=parameters)

        return non_scl_rsrcs, nested_template
    def handle_expansion(self):
        hot_resources = []

        # Branch by IP or not
        if self.ip_protocol:
            tosca_props = self.get_tosca_props()

            # subnet props
            subnet_props = {}
            subnet_props['ip_version'] = self.ip_map.get(self.ip_protocol[0])
            subnet_props['network'] = '{ get_resource: %s }' % (self.name)

            for key, value in tosca_props.items():
                if key == 'vl_profile':
                    if 'virtual_link_protocol_data' in value:
                        vlpd = value['virtual_link_protocol_data']
                        if 'l3_protocol_data' in vlpd[0]:
                            l3pd = vlpd[0]['l3_protocol_data']
                            subnet_props['cidr'] = l3pd['cidr']

            subnet_resource_name = self.name + self.SUBNET_SUFFIX

            hot_resources.append(
                HotResource(
                    self.nodetemplate,
                    type='OS::Neutron::Subnet',
                    name=subnet_resource_name,
                    properties=subnet_props,
                ))

            # qospolicy_props props
            qospolicy_props = {}

            qospolicy_resource_name = self.name + self.QOSPOLICY_SUFFIX

            # bandwidth props
            bandwidth_props = {}
            bandwidth_props['policy'] = '{ get_resource: %s%s }' % (
                self.name,
                self.QOSPOLICY_SUFFIX,
            )

            bandwidth_resource_name = self.name + self.BANDWIDTH_SUFFIX

            # Create QoSPolicy and QoSBandwidthLimitRule resources
            # only when max_bitrate_requirements has leaf property.
            if self.is_leaf:
                hot_resources.append(
                    HotResource(
                        self.nodetemplate,
                        type='OS::Neutron::QoSPolicy',
                        name=qospolicy_resource_name,
                        properties=qospolicy_props,
                    ))

                bandwidth_props['max_kbps'] = self.max_kbps
                hot_resources.append(
                    HotResource(
                        self.nodetemplate,
                        type='OS::Neutron::QoSBandwidthLimitRule',
                        name=bandwidth_resource_name,
                        properties=bandwidth_props,
                    ))

        else:
            pass

        return hot_resources