def post_process_heat_template(heat_tpl, mgmt_ports, res_tpl, unsupported_res_prop=None): # # TODO(bobh) - remove when heat-translator can support literal strings. # def fix_user_data(user_data_string): user_data_string = re.sub('user_data: #', 'user_data: |\n #', user_data_string, re.MULTILINE) return re.sub('\n\n', '\n', user_data_string, re.MULTILINE) heat_tpl = fix_user_data(heat_tpl) # # End temporary workaround for heat-translator # heat_dict = yamlparser.simple_ordered_parse(heat_tpl) for outputname, portname in mgmt_ports.items(): ipval = {'get_attr': [portname, 'fixed_ips', 0, 'ip_address']} output = {outputname: {'value': ipval}} if 'outputs' in heat_dict: heat_dict['outputs'].update(output) else: heat_dict['outputs'] = output LOG.debug(_('Added output for %s'), outputname) add_resources_tpl(heat_dict, res_tpl) if unsupported_res_prop: convert_unsupported_res_prop(heat_dict, unsupported_res_prop) return yaml.dump(heat_dict)
def _generate_hot_alarm_resource(self, topology_tpl_dict): alarm_resource = dict() heat_tpl = self.heat_template_yaml heat_dict = yamlparser.simple_ordered_parse(heat_tpl) is_enabled_alarm = False if 'policies' in topology_tpl_dict: for policy_dict in topology_tpl_dict['policies']: name, policy_tpl_dict = list(policy_dict.items())[0] # need to parse triggers here: scaling in/out, respawn,... if policy_tpl_dict['type'] == \ 'tosca.policies.tacker.Alarming': is_enabled_alarm = True triggers = policy_tpl_dict['triggers'] for trigger_name, trigger_dict in triggers.items(): alarm_resource[trigger_name] =\ self._convert_to_heat_monitoring_resource({ trigger_name: trigger_dict}, self.vnf) heat_dict['resources'].update(alarm_resource) heat_tpl_yaml = yaml.dump(heat_dict) return (is_enabled_alarm, alarm_resource, heat_tpl_yaml )
def generate_hot(self): self._get_vnfd() dev_attrs = self._update_fields() vnfd_dict = yamlparser.simple_ordered_parse(self.vnfd_yaml) ####### VNF DICT is Orderded_dict LOG.debug('vnfd_dict %s', vnfd_dict) self._get_unsupported_resource_props(self.heatclient) is_tosca_format = False self._generate_hot_from_tosca(vnfd_dict, dev_attrs) is_tosca_format = True self.fields['template'] = self.heat_template_yaml print("\n") if is_tosca_format: self._handle_policies(vnfd_dict) if self.monitoring_dict: self.vnf['attributes']['monitoring_policy'] = jsonutils.dumps( self.monitoring_dict) if self.svcmonitoring_dict: self.vnf['attributes'][ 'service_monitoring_policy'] = jsonutils.dumps( self.svcmonitoring_dict)
def _test_samples(self, files): if files: for f in self._get_list_of_sample(files): with open(f, 'r') as _f: yaml_dict = None try: yaml_dict = yamlparser.simple_ordered_parse(_f.read()) except: # noqa pass self.assertIsNotNone(yaml_dict, "Yaml parser failed to parse %s" % f) utils.updateimports(yaml_dict) tosca = None try: tosca = tosca_template.ToscaTemplate( a_file=False, yaml_dict_tpl=yaml_dict) except: # noqa pass self.assertIsNotNone(tosca, "Tosca parser failed to parse %s" % f) utils.post_process_template(tosca) hot = None try: hot = tosca_translator.TOSCATranslator(tosca, {}).translate() except: # noqa pass self.assertIsNotNone( hot, "Heat-translator failed to translate %s" % f)
def post_process_heat_template(heat_tpl, mgmt_ports, res_tpl, unsupported_res_prop=None): # # TODO(bobh) - remove when heat-translator can support literal strings. # def fix_user_data(user_data_string): user_data_string = re.sub('user_data: #', 'user_data: |\n #', user_data_string, re.MULTILINE) return re.sub('\n\n', '\n', user_data_string, re.MULTILINE) heat_tpl = fix_user_data(heat_tpl) # # End temporary workaround for heat-translator # heat_dict = yamlparser.simple_ordered_parse(heat_tpl) for outputname, portname in mgmt_ports.items(): ipval = {'get_attr': [portname, 'fixed_ips', 0, 'ip_address']} output = {outputname: {'value': ipval}} if 'outputs' in heat_dict: heat_dict['outputs'].update(output) else: heat_dict['outputs'] = output LOG.debug(_('Added output for %s') % outputname) add_resources_tpl(heat_dict, res_tpl) if unsupported_res_prop: convert_unsupported_res_prop(heat_dict, unsupported_res_prop) return yaml.dump(heat_dict)
def get_scaling_group_dict(ht_template, scaling_policy_names): scaling_group_dict = dict() scaling_group_names = list() heat_dict = yamlparser.simple_ordered_parse(ht_template) for resource_name, resource_dict in heat_dict['resources'].items(): if resource_dict['type'] == SCALE_GROUP_RESOURCE: scaling_group_names.append(resource_name) if scaling_group_names: scaling_group_dict[scaling_policy_names[0]] = scaling_group_names[0] return scaling_group_dict
def generate_hot(): vnfd_dict = yamlparser.simple_ordered_parse(vnfd_yaml) LOG.debug('vnfd_dict %s', vnfd_dict) is_tosca_format = False if 'tosca_definitions_version' in vnfd_dict: (heat_template_yaml, monitoring_dict) = generate_hot_from_tosca(vnfd_dict) is_tosca_format = True else: (heat_template_yaml, monitoring_dict) = generate_hot_from_legacy(vnfd_dict) fields['template'] = heat_template_yaml if is_tosca_format: (is_scaling_needed, scaling_group_names, main_dict) = generate_hot_scaling( vnfd_dict['topology_template'], 'scaling.yaml') (is_enabled_alarm, alarm_resource, heat_tpl_yaml, sub_heat_tpl_yaml) =\ generate_hot_alarm_resource(vnfd_dict['topology_template'], heat_template_yaml) if is_enabled_alarm and not is_scaling_needed: heat_template_yaml = heat_tpl_yaml fields['template'] = heat_template_yaml if is_scaling_needed: if is_enabled_alarm: main_dict['resources'].update(alarm_resource) main_yaml = yaml.dump(main_dict) fields['template'] = main_yaml fields['files'] = {'scaling.yaml': sub_heat_tpl_yaml}\ if is_enabled_alarm else { 'scaling.yaml': heat_template_yaml} vnf['attributes']['heat_template'] = main_yaml # TODO(kanagaraj-manickam) when multiple groups are # supported, make this scaling atribute as # scaling name vs scaling template map and remove # scaling_group_names vnf['attributes']['scaling.yaml'] = heat_template_yaml vnf['attributes'][ 'scaling_group_names'] = jsonutils.dumps( scaling_group_names ) elif not is_scaling_needed: if not vnf['attributes'].get('heat_template'): vnf['attributes'][ 'heat_template'] = fields['template'] if monitoring_dict: vnf['attributes']['monitoring_policy'] = \ jsonutils.dumps(monitoring_dict)
def generate_hot(): vnfd_dict = yamlparser.simple_ordered_parse(vnfd_yaml) LOG.debug('vnfd_dict %s', vnfd_dict) is_tosca_format = False if 'tosca_definitions_version' in vnfd_dict: (heat_template_yaml, monitoring_dict) = generate_hot_from_tosca(vnfd_dict) is_tosca_format = True else: (heat_template_yaml, monitoring_dict) = generate_hot_from_legacy(vnfd_dict) fields['template'] = heat_template_yaml if is_tosca_format: (is_scaling_needed, scaling_group_names, main_dict) = generate_hot_scaling( vnfd_dict['topology_template'], 'scaling.yaml') (is_enabled_alarm, alarm_resource, heat_tpl_yaml, sub_heat_tpl_yaml) =\ generate_hot_alarm_resource(vnfd_dict['topology_template'], heat_template_yaml) if is_enabled_alarm and not is_scaling_needed: heat_template_yaml = heat_tpl_yaml fields['template'] = heat_template_yaml if is_scaling_needed: if is_enabled_alarm: main_dict['resources'].update(alarm_resource) main_yaml = yaml.dump(main_dict) fields['template'] = main_yaml fields['files'] = {'scaling.yaml': sub_heat_tpl_yaml}\ if is_enabled_alarm else { 'scaling.yaml': heat_template_yaml} vnf['attributes']['heat_template'] = main_yaml # TODO(kanagaraj-manickam) when multiple groups are # supported, make this scaling atribute as # scaling name vs scaling template map and remove # scaling_group_names vnf['attributes']['scaling.yaml'] = heat_template_yaml vnf['attributes'][ 'scaling_group_names'] = jsonutils.dumps( scaling_group_names ) elif not is_scaling_needed: if not vnf['attributes'].get('heat_template'): vnf['attributes'][ 'heat_template'] = fields['template'] if monitoring_dict: vnf['attributes']['monitoring_policy'] = \ jsonutils.dumps(monitoring_dict)
def update_nested_scaling_resources(nested_resources, mgmt_ports, metadata, res_tpl, unsupported_res_prop=None, grant_info=None, inst_req_info=None): nested_tpl = dict() for nested_resource_name, nested_resources_yaml in \ nested_resources.items(): nested_resources_dict =\ yamlparser.simple_ordered_parse(nested_resources_yaml) if metadata.get('vdus'): for vdu_name, metadata_dict in metadata['vdus'].items(): if nested_resources_dict['resources'].get(vdu_name): vdu_dict = nested_resources_dict['resources'][vdu_name] vdu_dict['properties']['metadata'] = metadata_dict convert_grant_info(nested_resources_dict, grant_info) # Replace external virtual links if specified in the inst_req_info if inst_req_info is not None: for ext_vl in inst_req_info.ext_virtual_links: _convert_ext_vls(nested_resources_dict, ext_vl) add_resources_tpl(nested_resources_dict, res_tpl) for res in nested_resources_dict["resources"].values(): if not res['type'] == HEAT_SOFTWARE_CONFIG: continue config = res["properties"]["config"] if 'get_file' in config: res["properties"]["config"] = open(config["get_file"]).read() if unsupported_res_prop: convert_unsupported_res_prop(nested_resources_dict, unsupported_res_prop) if mgmt_ports: for outputname, portname in mgmt_ports.items(): ipval = {'get_attr': [portname, 'fixed_ips', 0, 'ip_address']} output = {outputname: {'value': ipval}} if 'outputs' in nested_resources_dict: nested_resources_dict['outputs'].update(output) else: nested_resources_dict['outputs'] = output LOG.debug(_('Added output for %s'), outputname) yaml.SafeDumper.add_representer( OrderedDict, lambda dumper, value: represent_odict( dumper, u'tag:yaml.org,2002:map', value)) nested_tpl[nested_resource_name] =\ yaml.safe_dump(nested_resources_dict) return nested_tpl
def post_process_heat_template(heat_tpl, mgmt_ports, res_tpl): heat_dict = yamlparser.simple_ordered_parse(heat_tpl) for outputname, portname in mgmt_ports.items(): ipval = {'get_attr': [portname, 'fixed_ips', 0, 'ip_address']} output = {outputname: {'value': ipval}} if 'outputs' in heat_dict: heat_dict['outputs'].update(output) else: heat_dict['outputs'] = output LOG.debug(_('Added output for %s') % outputname) add_resources_tpl(heat_dict, res_tpl) return yaml.dump(heat_dict)
def update_nested_scaling_resources(name_yaml, nested_resources, mgmt_ports, metadata, res_tpl, unsupported_res_prop=None): nested_tpl = dict() if nested_resources: #nested_resource_name, nested_resources_yaml =\ # list(nested_resources.items())[0] nested_resources_dict =\ yamlparser.simple_ordered_parse(nested_resources) #LOG.debug(_('nested_resource_name %s'), nested_resource_name) #LOG.debug(_('nested_resources_yaml %s'), nested_resources_yaml) LOG.debug(_('nested_resources_dict %s'), nested_resources_dict) if metadata: LOG.debug(_('metadada %s'), metadata) for vdu_name, metadata_dict in metadata['vdus'].items(): LOG.debug(_('vdu_name %s |||| metadata_dict %s'), vdu_name, metadata_dict) LOG.debug(_('nested_resources_dict %s'), nested_resources_dict) nested_resources_dict['resources'][vdu_name]['properties']['metadata'] = \ metadata_dict add_resources_tpl(nested_resources_dict, res_tpl) for res in nested_resources_dict["resources"].values(): if not res['type'] == HEAT_SOFTWARE_CONFIG: continue config = res["properties"]["config"] if 'get_file' in config: res["properties"]["config"] = open(config["get_file"]).read() if unsupported_res_prop: convert_unsupported_res_prop(nested_resources_dict, unsupported_res_prop) for outputname, portname in mgmt_ports.items(): ipval = {'get_attr': [portname, 'fixed_ips', 0, 'ip_address']} output = {outputname: {'value': ipval}} if 'outputs' in nested_resources_dict: nested_resources_dict['outputs'].update(output) else: nested_resources_dict['outputs'] = output LOG.debug(_('Added output for %s'), outputname) yaml.SafeDumper.add_representer( OrderedDict, lambda dumper, value: represent_odict( dumper, u'tag:yaml.org,2002:map', value)) nested_tpl[name_yaml] =\ yaml.safe_dump(nested_resources_dict) return nested_tpl
def generate_hot(self): self._get_vnfd() dev_attrs = self._update_fields() vnfd_dict = yamlparser.simple_ordered_parse(self.vnfd_yaml) LOG.debug('vnfd_dict %s', vnfd_dict) self._get_unsupported_resource_props(self.heatclient) self._generate_hot_from_tosca(vnfd_dict, dev_attrs) self.fields['template'] = self.heat_template_yaml if not self.vnf['attributes'].get('heat_template'): self.vnf['attributes']['heat_template'] = self.fields['template'] if self.monitoring_dict: self.vnf['attributes']['monitoring_policy'] = jsonutils.dumps( self.monitoring_dict)
def post_process_heat_template(heat_tpl, mgmt_ports, metadata, res_tpl, unsupported_res_prop=None): # # TODO(bobh) - remove when heat-translator can support literal strings. # def fix_user_data(user_data_string): user_data_string = re.sub('user_data: #', 'user_data: |\n #', user_data_string, re.MULTILINE) return re.sub('\n\n', '\n', user_data_string, re.MULTILINE) heat_tpl = fix_user_data(heat_tpl) # # End temporary workaround for heat-translator # heat_dict = yamlparser.simple_ordered_parse(heat_tpl) for outputname, portname in mgmt_ports.items(): ipval = {'get_attr': [portname, 'fixed_ips', 0, 'ip_address']} output = {outputname: {'value': ipval}} if 'outputs' in heat_dict: heat_dict['outputs'].update(output) else: heat_dict['outputs'] = output LOG.debug('Added output for %s', outputname) if metadata: for vdu_name, metadata_dict in metadata['vdus'].items(): heat_dict['resources'][vdu_name]['properties']['metadata'] =\ metadata_dict add_resources_tpl(heat_dict, res_tpl) for res in heat_dict["resources"].values(): if not res['type'] == HEAT_SOFTWARE_CONFIG: continue config = res["properties"]["config"] if 'get_file' in config: res["properties"]["config"] = open(config["get_file"]).read() if unsupported_res_prop: convert_unsupported_res_prop(heat_dict, unsupported_res_prop) yaml.SafeDumper.add_representer( OrderedDict, lambda dumper, value: represent_odict( dumper, u'tag:yaml.org,2002:map', value)) return yaml.safe_dump(heat_dict)
def post_process_heat_template(heat_tpl, mgmt_ports, res_tpl, unsupported_res_prop=None): heat_dict = yamlparser.simple_ordered_parse(heat_tpl) for outputname, portname in mgmt_ports.items(): ipval = {'get_attr': [portname, 'fixed_ips', 0, 'ip_address']} output = {outputname: {'value': ipval}} if 'outputs' in heat_dict: heat_dict['outputs'].update(output) else: heat_dict['outputs'] = output LOG.debug(_('Added output for %s'), outputname) add_resources_tpl(heat_dict, res_tpl) if unsupported_res_prop: convert_unsupported_res_prop(heat_dict, unsupported_res_prop) return yaml.dump(heat_dict)
def process_input(self): """Process input of vnfd template""" self.attributes = self.vnf['vnfd']['attributes'].copy() self.vnfd_yaml = self.attributes.pop('vnfd', None) if self.vnfd_yaml is None: LOG.info("VNFD is not provided, so no vnf is created !!") return LOG.debug('vnfd_yaml %s', self.vnfd_yaml) vnfd_dict = yamlparser.simple_ordered_parse(self.vnfd_yaml) LOG.debug('vnfd_dict %s', vnfd_dict) # Read parameter and process inputs if 'get_input' in str(vnfd_dict): self._process_parameterized_input(self.vnf['attributes'], vnfd_dict) return vnfd_dict
def post_process_heat_template(heat_tpl, mgmt_ports, res_tpl,vnfc_dict, unsupported_res_prop=None): # # TODO(bobh) - remove when heat-translator can support literal strings. # def fix_user_data(user_data_string): user_data_string = re.sub('user_data: #', 'user_data: |\n #', user_data_string, re.MULTILINE) return re.sub('\n\n', '\n', user_data_string, re.MULTILINE) heat_tpl = fix_user_data(heat_tpl) # # End temporary workaround for heat-translator # heat_dict = yamlparser.simple_ordered_parse(heat_tpl) for outputname, portname in mgmt_ports.items(): ipval = {'get_attr': [portname, 'fixed_ips', 0, 'ip_address']} output = {outputname: {'value': ipval}} if 'outputs' in heat_dict: heat_dict['outputs'].update(output) else: heat_dict['outputs'] = output LOG.debug(_('Added output for %s'), outputname) add_resources_tpl(heat_dict, res_tpl) if unsupported_res_prop: convert_unsupported_res_prop(heat_dict, unsupported_res_prop) #VNFC Changes for VDU, vnfc_info in vnfc_dict.items(): if vnfc_info["vnfc_driver"] == "cloud-init": res_name = vnfc_info["name"] + "_create_config" heat_dict["resources"][res_name] = { "type": "OS::Heat::SoftwareConfig", "properties": { #"config": { "get_file": vnfc_info["create"] }, "config": open(vnfc_info["create"]).read(), "group": "script" } } heat_dict["resources"][VDU]["properties"]["user_data_format"] = "SOFTWARE_CONFIG" heat_dict["resources"][VDU]["properties"]["user_data"] = { "get_resource": res_name } del vnfc_dict[VDU] return yaml.dump(heat_dict)
def generate_hot(): vnfd_dict = yamlparser.simple_ordered_parse(vnfd_yaml) LOG.debug('vnfd_dict %s', vnfd_dict) if 'tosca_definitions_version' in vnfd_dict: (heat_template_yaml, monitoring_dict) = generate_hot_from_tosca(vnfd_dict) else: (heat_template_yaml, monitoring_dict) = generate_hot_from_legacy(vnfd_dict) fields['template'] = heat_template_yaml if not device['attributes'].get('heat_template'): device['attributes']['heat_template'] = \ fields['template'] if monitoring_dict: device['attributes']['monitoring_policy'] = \ jsonutils.dumps(monitoring_dict)
def generate_hot(self): self._get_vnfd() dev_attrs = self._update_fields() vnfd_dict = yamlparser.simple_ordered_parse(self.vnfd_yaml) LOG.debug('vnfd_dict %s', vnfd_dict) self._get_unsupported_resource_props(self.heatclient) self._generate_hot_from_tosca(vnfd_dict, dev_attrs) self.fields['template'] = self.heat_template_yaml if not self.vnf['attributes'].get('heat_template'): self.vnf['attributes']['heat_template'] = self.fields['template'] if self.monitoring_dict: self.vnf['attributes'][ 'monitoring_policy'] = jsonutils.dump_as_bytes( self.monitoring_dict) if self.appmonitoring_dict: self.vnf['attributes']['app_monitoring_policy'] = \ jsonutils.dump_as_bytes(self.appmonitoring_dict)
def post_process_heat_template_for_scaling(heat_tpl, mgmt_ports, metadata, alarm_resources, res_tpl, vol_res={}, unsupported_res_prop=None, unique_id=None, inst_req_info=None, grant_info=None, tosca=None): heat_dict = yamlparser.simple_ordered_parse(heat_tpl) if inst_req_info: check_inst_req_info_for_scaling(heat_dict, inst_req_info) convert_inst_req_info(heat_dict, inst_req_info, tosca) if grant_info: convert_grant_info(heat_dict, grant_info) yaml.SafeDumper.add_representer( OrderedDict, lambda dumper, value: represent_odict( dumper, u'tag:yaml.org,2002:map', value)) return yaml.safe_dump(heat_dict)
def update_nested_scaling_resources(nested_resources, mgmt_ports, metadata, res_tpl, unsupported_res_prop=None): nested_tpl = dict() if nested_resources: nested_resource_name, nested_resources_yaml =\ list(nested_resources.items())[0] nested_resources_dict =\ yamlparser.simple_ordered_parse(nested_resources_yaml) if metadata.get('vdus'): for vdu_name, metadata_dict in metadata['vdus'].items(): if nested_resources_dict['resources'].get(vdu_name): nested_resources_dict['resources'][vdu_name]['properties']['metadata'] = \ metadata_dict add_resources_tpl(nested_resources_dict, res_tpl) for res in nested_resources_dict["resources"].values(): if not res['type'] == HEAT_SOFTWARE_CONFIG: continue config = res["properties"]["config"] if 'get_file' in config: res["properties"]["config"] = open(config["get_file"]).read() if unsupported_res_prop: convert_unsupported_res_prop(nested_resources_dict, unsupported_res_prop) for outputname, portname in mgmt_ports.items(): ipval = {'get_attr': [portname, 'fixed_ips', 0, 'ip_address']} output = {outputname: {'value': ipval}} if 'outputs' in nested_resources_dict: nested_resources_dict['outputs'].update(output) else: nested_resources_dict['outputs'] = output LOG.debug(_('Added output for %s'), outputname) yaml.SafeDumper.add_representer( OrderedDict, lambda dumper, value: represent_odict( dumper, u'tag:yaml.org,2002:map', value)) nested_tpl[nested_resource_name] =\ yaml.safe_dump(nested_resources_dict) return nested_tpl
def _test_samples(self, files): if files: for f in self._get_list_of_sample(files): with open(f, 'r') as _f: yaml_dict = None try: yaml_dict = yamlparser.simple_ordered_parse(_f.read()) except: # noqa pass self.assertIsNotNone( yaml_dict, "Yaml parser failed to parse %s" % f) utils.updateimports(yaml_dict) tosca = None try: tosca = tosca_template.ToscaTemplate( a_file=False, yaml_dict_tpl=yaml_dict) except: # noqa pass self.assertIsNotNone( tosca, "Tosca parser failed to parse %s" % f) utils.post_process_template(tosca) hot = None try: hot = tosca_translator.TOSCATranslator(tosca, {}).translate() except: # noqa pass self.assertIsNotNone( hot, "Heat-translator failed to translate %s" % f)
def create(self, plugin, context, device): LOG.debug(_("device %s"), device) heatclient_ = HeatClient(context) attributes = device["device_template"]["attributes"].copy() vnfd_yaml = attributes.pop("vnfd", None) fields = dict( (key, attributes.pop(key)) for key in ("stack_name", "template_url", "template") if key in attributes ) for key in ("files", "parameters"): if key in attributes: fields[key] = jsonutils.loads(attributes.pop(key)) # overwrite parameters with given dev_attrs for device creation dev_attrs = device["attributes"].copy() fields.update( dict((key, dev_attrs.pop(key)) for key in ("stack_name", "template_url", "template") if key in dev_attrs) ) for key in ("files", "parameters"): if key in dev_attrs: fields.setdefault(key, {}).update(jsonutils.loads(dev_attrs.pop(key))) LOG.debug("vnfd_yaml %s", vnfd_yaml) if vnfd_yaml is not None: assert "template" not in fields assert "template_url" not in fields template_dict = yaml.load(HEAT_TEMPLATE_BASE) outputs_dict = {} template_dict["outputs"] = outputs_dict vnfd_dict = yamlparser.simple_ordered_parse(vnfd_yaml) LOG.debug("vnfd_dict %s", vnfd_dict) if "get_input" in vnfd_yaml: self._process_parameterized_input(dev_attrs, vnfd_dict) KEY_LIST = (("description", "description"),) for (key, vnfd_key) in KEY_LIST: if vnfd_key in vnfd_dict: template_dict[key] = vnfd_dict[vnfd_key] monitoring_dict = {"vdus": {}} for vdu_id, vdu_dict in vnfd_dict.get("vdus", {}).items(): template_dict.setdefault("resources", {})[vdu_id] = {"type": "OS::Nova::Server"} resource_dict = template_dict["resources"][vdu_id] KEY_LIST = (("image", "vm_image"), ("flavor", "instance_type")) resource_dict["properties"] = {} properties = resource_dict["properties"] for (key, vdu_key) in KEY_LIST: properties[key] = vdu_dict[vdu_key] if "network_interfaces" in vdu_dict: self._process_vdu_network_interfaces(vdu_id, vdu_dict, properties, template_dict) if "user_data" in vdu_dict and "user_data_format" in vdu_dict: properties["user_data_format"] = vdu_dict["user_data_format"] properties["user_data"] = vdu_dict["user_data"] elif "user_data" in vdu_dict or "user_data_format" in vdu_dict: raise vnfm.UserDataFormatNotFound() if "placement_policy" in vdu_dict and "availability_zone" in vdu_dict["placement_policy"]: properties["availability_zone"] = vdu_dict["placement_policy"]["availability_zone"] if "config" in vdu_dict: properties["config_drive"] = True metadata = properties.setdefault("metadata", {}) metadata.update(vdu_dict["config"]) for key, value in metadata.items(): metadata[key] = value[:255] monitoring_policy = vdu_dict.get("monitoring_policy", "noop") failure_policy = vdu_dict.get("failure_policy", "noop") # Convert the old monitoring specification to the new format # This should be removed after Mitaka if monitoring_policy == "ping" and failure_policy == "respawn": vdu_dict["monitoring_policy"] = {"ping": {"actions": {"failure": "respawn"}}} vdu_dict.pop("failure_policy") if monitoring_policy != "noop": monitoring_dict["vdus"][vdu_id] = vdu_dict["monitoring_policy"] # to pass necessary parameters to plugin upwards. for key in ("service_type",): if key in vdu_dict: device.setdefault("attributes", {})[vdu_id] = jsonutils.dumps({key: vdu_dict[key]}) if monitoring_dict.keys(): device["attributes"]["monitoring_policy"] = jsonutils.dumps(monitoring_dict) heat_template_yaml = yaml.dump(template_dict) fields["template"] = heat_template_yaml if not device["attributes"].get("heat_template"): device["attributes"]["heat_template"] = heat_template_yaml if "stack_name" not in fields: name = __name__ + "_" + self.__class__.__name__ + "-" + device["id"] if device["attributes"].get("failure_count"): name += ("-%s") % str(device["attributes"]["failure_count"]) fields["stack_name"] = name # service context is ignored LOG.debug(_("service_context: %s"), device.get("service_context", [])) LOG.debug(_("fields: %s"), fields) LOG.debug(_("template: %s"), fields["template"]) stack = heatclient_.create(fields) return stack["stack"]["id"]
def generate_hot_alarm_resource(topology_tpl_dict, heat_tpl): alarm_resource = dict() heat_dict = yamlparser.simple_ordered_parse(heat_tpl) sub_heat_dict = copy.deepcopy(heat_dict) is_enabled_alarm = False def _convert_to_heat_monitoring_prop(mon_policy): name, mon_policy_dict = list(mon_policy.items())[0] tpl_trigger_name = \ mon_policy_dict['triggers']['resize_compute'] tpl_condition = tpl_trigger_name['condition'] properties = {} properties['meter_name'] = tpl_trigger_name['metrics'] properties['comparison_operator'] = \ tpl_condition['comparison_operator'] properties['period'] = tpl_condition['period'] properties['evaluation_periods'] = tpl_condition['evaluations'] properties['statistic'] = tpl_condition['method'] properties['description'] = tpl_condition['constraint'] properties['threshold'] = tpl_condition['threshold'] # alarm url process here alarm_url = str(vnf['attributes'].get('alarm_url')) if alarm_url: LOG.debug('Alarm url in heat %s', alarm_url) properties['alarm_actions'] = [alarm_url] return properties def _convert_to_heat_monitoring_resource(mon_policy): mon_policy_hot = {'type': 'OS::Aodh::Alarm'} mon_policy_hot['properties'] = \ _convert_to_heat_monitoring_prop(mon_policy) if 'policies' in topology_tpl_dict: for policies in topology_tpl_dict['policies']: policy_name, policy_dt = list(policies.items())[0] if policy_dt['type'] == \ 'tosca.policy.tacker.Scaling': # Fixed metadata. it will be fixed # once targets are supported metadata_dict = dict() metadata_dict['metering.vnf_id'] = vnf['id'] sub_heat_dict['resources']['VDU1']['properties']['metadata'] =\ metadata_dict matching_metadata_dict = dict() matching_metadata_dict['metadata.user_metadata.vnf_id'] =\ vnf['id'] mon_policy_hot['properties']['matching_metadata'] =\ matching_metadata_dict break return mon_policy_hot if 'policies' in topology_tpl_dict: for policy_dict in topology_tpl_dict['policies']: name, policy_tpl_dict = list(policy_dict.items())[0] if policy_tpl_dict['type'] == \ 'tosca.policies.tacker.Alarming': is_enabled_alarm = True alarm_resource[name] =\ _convert_to_heat_monitoring_resource(policy_dict) heat_dict['resources'].update(alarm_resource) break heat_tpl_yaml = yaml.dump(heat_dict) sub_heat_tpl_yaml = yaml.dump(sub_heat_dict) return (is_enabled_alarm, alarm_resource, heat_tpl_yaml, sub_heat_tpl_yaml)
def create(self, plugin, context, device): LOG.debug(_('device %s'), device) heatclient_ = HeatClient(context) attributes = device['device_template']['attributes'].copy() vnfd_yaml = attributes.pop('vnfd', None) fields = dict((key, attributes.pop(key)) for key in ('stack_name', 'template_url', 'template') if key in attributes) for key in ('files', 'parameters'): if key in attributes: fields[key] = jsonutils.loads(attributes.pop(key)) # overwrite parameters with given dev_attrs for device creation dev_attrs = device['attributes'].copy() fields.update(dict((key, dev_attrs.pop(key)) for key in ('stack_name', 'template_url', 'template') if key in dev_attrs)) for key in ('files', 'parameters'): if key in dev_attrs: fields.setdefault(key, {}).update( jsonutils.loads(dev_attrs.pop(key))) LOG.debug('vnfd_yaml %s', vnfd_yaml) if vnfd_yaml is not None: assert 'template' not in fields assert 'template_url' not in fields template_dict = yaml.load(HEAT_TEMPLATE_BASE) outputs_dict = {} template_dict['outputs'] = outputs_dict vnfd_dict = yamlparser.simple_ordered_parse(vnfd_yaml) LOG.debug('vnfd_dict %s', vnfd_dict) if 'get_input' in vnfd_yaml: self._process_parameterized_input(dev_attrs, vnfd_dict) KEY_LIST = (('description', 'description'), ) for (key, vnfd_key) in KEY_LIST: if vnfd_key in vnfd_dict: template_dict[key] = vnfd_dict[vnfd_key] monitoring_dict = {'vdus': {}} for vdu_id, vdu_dict in vnfd_dict.get('vdus', {}).items(): template_dict.setdefault('resources', {})[vdu_id] = { "type": "OS::Nova::Server" } resource_dict = template_dict['resources'][vdu_id] KEY_LIST = (('image', 'vm_image'), ('flavor', 'instance_type')) resource_dict['properties'] = {} properties = resource_dict['properties'] for (key, vdu_key) in KEY_LIST: properties[key] = vdu_dict[vdu_key] if 'network_interfaces' in vdu_dict: self._process_vdu_network_interfaces(vdu_id, vdu_dict, properties, template_dict) if 'user_data' in vdu_dict and 'user_data_format' in vdu_dict: properties['user_data_format'] = vdu_dict[ 'user_data_format'] properties['user_data'] = vdu_dict['user_data'] elif 'user_data' in vdu_dict or 'user_data_format' in vdu_dict: raise vnfm.UserDataFormatNotFound() if ('placement_policy' in vdu_dict and 'availability_zone' in vdu_dict['placement_policy']): properties['availability_zone'] = vdu_dict[ 'placement_policy']['availability_zone'] if 'config' in vdu_dict: properties['config_drive'] = True metadata = properties.setdefault('metadata', {}) metadata.update(vdu_dict['config']) for key, value in metadata.items(): metadata[key] = value[:255] monitoring_policy = vdu_dict.get('monitoring_policy', 'noop') failure_policy = vdu_dict.get('failure_policy', 'noop') # Convert the old monitoring specification to the new format # This should be removed after Mitaka if monitoring_policy == 'ping' and failure_policy == 'respawn': vdu_dict['monitoring_policy'] = {'ping': { 'actions': { 'failure': 'respawn' }}} vdu_dict.pop('failure_policy') if monitoring_policy != 'noop': monitoring_dict['vdus'][vdu_id] = \ vdu_dict['monitoring_policy'] # to pass necessary parameters to plugin upwards. for key in ('service_type',): if key in vdu_dict: device.setdefault( 'attributes', {})[vdu_id] = jsonutils.dumps( {key: vdu_dict[key]}) if monitoring_dict.keys(): device['attributes']['monitoring_policy'] = jsonutils.dumps( monitoring_dict) heat_template_yaml = yaml.dump(template_dict) fields['template'] = heat_template_yaml if not device['attributes'].get('heat_template'): device['attributes']['heat_template'] = heat_template_yaml if 'stack_name' not in fields: name = (__name__ + '_' + self.__class__.__name__ + '-' + device['id']) if device['attributes'].get('failure_count'): name += ('-%s') % str(device['attributes']['failure_count']) fields['stack_name'] = name # service context is ignored LOG.debug(_('service_context: %s'), device.get('service_context', [])) LOG.debug(_('fields: %s'), fields) LOG.debug(_('template: %s'), fields['template']) stack = heatclient_.create(fields) return stack['stack']['id']
def create(self, plugin, context, device, auth_attr): LOG.debug(_('device %s'), device) attributes = device['device_template']['attributes'].copy() vnfd_yaml = attributes.pop('vnfd', None) fields = dict((key, attributes.pop(key)) for key in ('stack_name', 'template_url', 'template') if key in attributes) for key in ('files', 'parameters'): if key in attributes: fields[key] = jsonutils.loads(attributes.pop(key)) # overwrite parameters with given dev_attrs for device creation dev_attrs = device['attributes'].copy() fields.update(dict((key, dev_attrs.pop(key)) for key in ('stack_name', 'template_url', 'template') if key in dev_attrs)) for key in ('files', 'parameters'): if key in dev_attrs: fields.setdefault(key, {}).update( jsonutils.loads(dev_attrs.pop(key))) region_name = device.get('placement_attr', {}).get('region_name', None) heatclient_ = HeatClient(auth_attr, region_name) unsupported_res_prop = self.fetch_unsupported_resource_prop( heatclient_) LOG.debug('vnfd_yaml %s', vnfd_yaml) if vnfd_yaml is not None: vnfd_dict = yamlparser.simple_ordered_parse(vnfd_yaml) LOG.debug('vnfd_dict %s', vnfd_dict) monitoring_dict = {'vdus': {}} if 'tosca_definitions_version' in vnfd_dict: parsed_params = dev_attrs.pop('param_values', {}) toscautils.updateimports(vnfd_dict) try: tosca = ToscaTemplate(parsed_params=parsed_params, a_file=False, yaml_dict_tpl=vnfd_dict) except Exception as e: LOG.debug("tosca-parser error: %s", str(e)) raise vnfm.ToscaParserFailed(error_msg_details=str(e)) monitoring_dict = toscautils.get_vdu_monitoring(tosca) mgmt_ports = toscautils.get_mgmt_ports(tosca) res_tpl = toscautils.get_resources_dict(tosca, STACK_FLAVOR_EXTRA) toscautils.post_process_template(tosca) try: translator = TOSCATranslator(tosca, parsed_params) heat_template_yaml = translator.translate() except Exception as e: LOG.debug("heat-translator error: %s", str(e)) raise vnfm.HeatTranslatorFailed(error_msg_details=str(e)) heat_template_yaml = toscautils.post_process_heat_template( heat_template_yaml, mgmt_ports, res_tpl, unsupported_res_prop) else: assert 'template' not in fields assert 'template_url' not in fields template_dict = yaml.load(HEAT_TEMPLATE_BASE) outputs_dict = {} template_dict['outputs'] = outputs_dict if 'get_input' in vnfd_yaml: self._process_parameterized_input(dev_attrs, vnfd_dict) KEY_LIST = (('description', 'description'), ) for (key, vnfd_key) in KEY_LIST: if vnfd_key in vnfd_dict: template_dict[key] = vnfd_dict[vnfd_key] for vdu_id, vdu_dict in vnfd_dict.get('vdus', {}).items(): template_dict.setdefault('resources', {})[vdu_id] = { "type": "OS::Nova::Server" } resource_dict = template_dict['resources'][vdu_id] KEY_LIST = (('image', 'vm_image'), ('flavor', 'instance_type')) resource_dict['properties'] = {} properties = resource_dict['properties'] for (key, vdu_key) in KEY_LIST: properties[key] = vdu_dict[vdu_key] if 'network_interfaces' in vdu_dict: self._process_vdu_network_interfaces(vdu_id, vdu_dict, properties, template_dict, unsupported_res_prop) if ('user_data' in vdu_dict and 'user_data_format' in vdu_dict): properties['user_data_format'] = vdu_dict[ 'user_data_format'] properties['user_data'] = vdu_dict['user_data'] elif ('user_data' in vdu_dict or 'user_data_format' in vdu_dict): raise vnfm.UserDataFormatNotFound() if 'placement_policy' in vdu_dict: if 'availability_zone' in vdu_dict['placement_policy']: properties['availability_zone'] = vdu_dict[ 'placement_policy']['availability_zone'] if 'config' in vdu_dict: properties['config_drive'] = True metadata = properties.setdefault('metadata', {}) metadata.update(vdu_dict['config']) for key, value in metadata.items(): metadata[key] = value[:255] monitoring_policy = vdu_dict.get('monitoring_policy', 'noop') failure_policy = vdu_dict.get('failure_policy', 'noop') # Convert the old monitoring specification to the new # network. This should be removed after Mitaka if (monitoring_policy == 'ping' and failure_policy == 'respawn'): vdu_dict['monitoring_policy'] = { 'ping': {'actions': {'failure': 'respawn'}}} vdu_dict.pop('failure_policy') if monitoring_policy != 'noop': monitoring_dict['vdus'][vdu_id] = \ vdu_dict['monitoring_policy'] # to pass necessary parameters to plugin upwards. for key in ('service_type',): if key in vdu_dict: device.setdefault( 'attributes', {})[vdu_id] = jsonutils.dumps( {key: vdu_dict[key]}) heat_template_yaml = yaml.dump(template_dict) fields['template'] = heat_template_yaml if not device['attributes'].get('heat_template'): device['attributes']['heat_template'] = \ heat_template_yaml if monitoring_dict.keys(): device['attributes']['monitoring_policy'] = \ jsonutils.dumps(monitoring_dict) if 'stack_name' not in fields: name = (__name__ + '_' + self.__class__.__name__ + '-' + device['id']) if device['attributes'].get('failure_count'): name += ('-%s') % str(device['attributes']['failure_count']) fields['stack_name'] = name # service context is ignored LOG.debug(_('service_context: %s'), device.get('service_context', [])) LOG.debug(_('fields: %s'), fields) LOG.debug(_('template: %s'), fields['template']) stack = heatclient_.create(fields) return stack['stack']['id']
def generate_hot_alarm_resource(topology_tpl_dict, heat_tpl): alarm_resource = dict() heat_dict = yamlparser.simple_ordered_parse(heat_tpl) sub_heat_dict = copy.deepcopy(heat_dict) is_enabled_alarm = False def _convert_to_heat_monitoring_prop(mon_policy): name, mon_policy_dict = list(mon_policy.items())[0] tpl_trigger_name = \ mon_policy_dict['triggers']['resize_compute'] tpl_condition = tpl_trigger_name['condition'] properties = {} properties['meter_name'] = tpl_trigger_name['metrics'] properties['comparison_operator'] = \ tpl_condition['comparison_operator'] properties['period'] = tpl_condition['period'] properties['evaluation_periods'] = tpl_condition['evaluations'] properties['statistic'] = tpl_condition['method'] properties['description'] = tpl_condition['constraint'] properties['threshold'] = tpl_condition['threshold'] # alarm url process here alarm_url = str(vnf['attributes'].get('alarm_url')) if alarm_url: LOG.debug('Alarm url in heat %s', alarm_url) properties['alarm_actions'] = [alarm_url] return properties def _convert_to_heat_monitoring_resource(mon_policy): mon_policy_hot = {'type': 'OS::Aodh::Alarm'} mon_policy_hot['properties'] = \ _convert_to_heat_monitoring_prop(mon_policy) if 'policies' in topology_tpl_dict: for policies in topology_tpl_dict['policies']: policy_name, policy_dt = list(policies.items())[0] if policy_dt['type'] == \ 'tosca.policy.tacker.Scaling': # Fixed metadata. it will be fixed # once targets are supported metadata_dict = dict() metadata_dict['metering.vnf_id'] = vnf['id'] sub_heat_dict['resources']['VDU1']['properties']['metadata'] =\ metadata_dict matching_metadata_dict = dict() matching_metadata_dict['metadata.user_metadata.vnf_id'] =\ vnf['id'] mon_policy_hot['properties']['matching_metadata'] =\ matching_metadata_dict break return mon_policy_hot if 'policies' in topology_tpl_dict: for policy_dict in topology_tpl_dict['policies']: name, policy_tpl_dict = list(policy_dict.items())[0] if policy_tpl_dict['type'] == \ 'tosca.policies.tacker.Alarming': is_enabled_alarm = True alarm_resource[name] =\ _convert_to_heat_monitoring_resource(policy_dict) heat_dict['resources'].update(alarm_resource) break heat_tpl_yaml = yaml.dump(heat_dict) sub_heat_tpl_yaml = yaml.dump(sub_heat_dict) return (is_enabled_alarm, alarm_resource, heat_tpl_yaml, sub_heat_tpl_yaml)
def create(self, plugin, context, device): LOG.debug(_('device %s'), device) heatclient_ = HeatClient(context) attributes = device['device_template']['attributes'].copy() vnfd_yaml = attributes.pop('vnfd', None) fields = dict((key, attributes.pop(key)) for key in ('stack_name', 'template_url', 'template') if key in attributes) for key in ('files', 'parameters'): if key in attributes: fields[key] = jsonutils.loads(attributes.pop(key)) # overwrite parameters with given dev_attrs for device creation dev_attrs = device['attributes'].copy() fields.update( dict((key, dev_attrs.pop(key)) for key in ('stack_name', 'template_url', 'template') if key in dev_attrs)) for key in ('files', 'parameters'): if key in dev_attrs: fields.setdefault(key, {}).update( jsonutils.loads(dev_attrs.pop(key))) LOG.debug('vnfd_yaml %s', vnfd_yaml) if vnfd_yaml is not None: assert 'template' not in fields assert 'template_url' not in fields template_dict = yaml.load(HEAT_TEMPLATE_BASE) outputs_dict = {} template_dict['outputs'] = outputs_dict vnfd_dict = yamlparser.simple_ordered_parse(vnfd_yaml) LOG.debug('vnfd_dict %s', vnfd_dict) if 'get_input' in vnfd_yaml: self._process_parameterized_input(dev_attrs, vnfd_dict) KEY_LIST = (('description', 'description'), ) for (key, vnfd_key) in KEY_LIST: if vnfd_key in vnfd_dict: template_dict[key] = vnfd_dict[vnfd_key] monitoring_dict = {'vdus': {}} for vdu_id, vdu_dict in vnfd_dict.get('vdus', {}).items(): template_dict.setdefault('resources', {})[vdu_id] = { "type": "OS::Nova::Server" } resource_dict = template_dict['resources'][vdu_id] KEY_LIST = (('image', 'vm_image'), ('flavor', 'instance_type')) resource_dict['properties'] = {} properties = resource_dict['properties'] for (key, vdu_key) in KEY_LIST: properties[key] = vdu_dict[vdu_key] if 'network_interfaces' in vdu_dict: self._process_vdu_network_interfaces( vdu_id, vdu_dict, properties, template_dict) if 'user_data' in vdu_dict and 'user_data_format' in vdu_dict: properties['user_data_format'] = vdu_dict[ 'user_data_format'] properties['user_data'] = vdu_dict['user_data'] elif 'user_data' in vdu_dict or 'user_data_format' in vdu_dict: raise vnfm.UserDataFormatNotFound() if ('placement_policy' in vdu_dict and 'availability_zone' in vdu_dict['placement_policy']): properties['availability_zone'] = vdu_dict[ 'placement_policy']['availability_zone'] if 'config' in vdu_dict: properties['config_drive'] = True metadata = properties.setdefault('metadata', {}) metadata.update(vdu_dict['config']) for key, value in metadata.items(): metadata[key] = value[:255] monitoring_policy = vdu_dict.get('monitoring_policy', 'noop') failure_policy = vdu_dict.get('failure_policy', 'noop') # Convert the old monitoring specification to the new format # This should be removed after Mitaka if monitoring_policy == 'ping' and failure_policy == 'respawn': vdu_dict['monitoring_policy'] = { 'ping': { 'actions': { 'failure': 'respawn' } } } vdu_dict.pop('failure_policy') if monitoring_policy != 'noop': monitoring_dict['vdus'][vdu_id] = \ vdu_dict['monitoring_policy'] # to pass necessary parameters to plugin upwards. for key in ('service_type', ): if key in vdu_dict: device.setdefault('attributes', {})[vdu_id] = jsonutils.dumps( {key: vdu_dict[key]}) if monitoring_dict.keys(): device['attributes']['monitoring_policy'] = jsonutils.dumps( monitoring_dict) heat_template_yaml = yaml.dump(template_dict) fields['template'] = heat_template_yaml if not device['attributes'].get('heat_template'): device['attributes']['heat_template'] = heat_template_yaml if 'stack_name' not in fields: name = (__name__ + '_' + self.__class__.__name__ + '-' + device['id']) if device['attributes'].get('failure_count'): name += ('-%s') % str(device['attributes']['failure_count']) fields['stack_name'] = name # service context is ignored LOG.debug(_('service_context: %s'), device.get('service_context', [])) LOG.debug(_('fields: %s'), fields) LOG.debug(_('template: %s'), fields['template']) stack = heatclient_.create(fields) return stack['stack']['id']
def post_process_heat_template(heat_tpl, mgmt_ports, metadata, alarm_resources, res_tpl, vol_res={}, unsupported_res_prop=None, unique_id=None): # # TODO(bobh) - remove when heat-translator can support literal strings. # def fix_user_data(user_data_string): user_data_string = re.sub('user_data: #', 'user_data: |\n #', user_data_string, re.MULTILINE) return re.sub('\n\n', '\n', user_data_string, re.MULTILINE) heat_tpl = fix_user_data(heat_tpl) # # End temporary workaround for heat-translator # heat_dict = yamlparser.simple_ordered_parse(heat_tpl) for outputname, portname in mgmt_ports.items(): ipval = {'get_attr': [portname, 'fixed_ips', 0, 'ip_address']} output = {outputname: {'value': ipval}} if 'outputs' in heat_dict: heat_dict['outputs'].update(output) else: heat_dict['outputs'] = output LOG.debug('Added output for %s', outputname) if metadata.get('vdus'): for vdu_name, metadata_dict in metadata['vdus'].items(): metadata_dict['metering.server_group'] = \ (metadata_dict['metering.server_group'] + '-' + unique_id)[:15] if heat_dict['resources'].get(vdu_name): heat_dict['resources'][vdu_name]['properties']['metadata'] =\ metadata_dict query_metadata = alarm_resources.get('query_metadata') alarm_actions = alarm_resources.get('alarm_actions') event_types = alarm_resources.get('event_types') if query_metadata: for trigger_name, matching_metadata_dict in query_metadata.items(): if heat_dict['resources'].get(trigger_name): query_mtdata = dict() query_mtdata['query'] = \ query_metadata[trigger_name] heat_dict['resources'][trigger_name][ 'properties'].update(query_mtdata) if alarm_actions: for trigger_name, alarm_actions_dict in alarm_actions.items(): if heat_dict['resources'].get(trigger_name): heat_dict['resources'][trigger_name]['properties']. \ update(alarm_actions_dict) if event_types: for trigger_name, event_type in event_types.items(): if heat_dict['resources'].get(trigger_name): heat_dict['resources'][trigger_name]['properties'].update( event_type) add_resources_tpl(heat_dict, res_tpl) for res in heat_dict["resources"].values(): if not res['type'] == HEAT_SOFTWARE_CONFIG: continue config = res["properties"]["config"] if 'get_file' in config: res["properties"]["config"] = open(config["get_file"]).read() if vol_res.get('volumes'): add_volume_resources(heat_dict, vol_res) if unsupported_res_prop: convert_unsupported_res_prop(heat_dict, unsupported_res_prop) yaml.SafeDumper.add_representer(OrderedDict, lambda dumper, value: represent_odict(dumper, u'tag:yaml.org,2002:map', value)) return yaml.safe_dump(heat_dict)
def create(self, plugin, context, device, auth_attr): LOG.debug(_('device %s'), device) attributes = device['device_template']['attributes'].copy() vnfd_yaml = attributes.pop('vnfd', None) fields = dict((key, attributes.pop(key)) for key in ('stack_name', 'template_url', 'template') if key in attributes) for key in ('files', 'parameters'): if key in attributes: fields[key] = jsonutils.loads(attributes.pop(key)) # overwrite parameters with given dev_attrs for device creation dev_attrs = device['attributes'].copy() fields.update( dict((key, dev_attrs.pop(key)) for key in ('stack_name', 'template_url', 'template') if key in dev_attrs)) for key in ('files', 'parameters'): if key in dev_attrs: fields.setdefault(key, {}).update( jsonutils.loads(dev_attrs.pop(key))) region_name = device.get('placement_attr', {}).get('region_name', None) heatclient_ = HeatClient(auth_attr, region_name) unsupported_res_prop = self.fetch_unsupported_resource_prop( heatclient_) LOG.debug('vnfd_yaml %s', vnfd_yaml) if vnfd_yaml is not None: vnfd_dict = yamlparser.simple_ordered_parse(vnfd_yaml) LOG.debug('vnfd_dict %s', vnfd_dict) monitoring_dict = {'vdus': {}} if 'tosca_definitions_version' in vnfd_dict: parsed_params = dev_attrs.pop('param_values', {}) toscautils.updateimports(vnfd_dict) try: tosca = ToscaTemplate(parsed_params=parsed_params, a_file=False, yaml_dict_tpl=vnfd_dict) except Exception as e: LOG.debug("tosca-parser error: %s", str(e)) raise vnfm.ToscaParserFailed(error_msg_details=str(e)) monitoring_dict = toscautils.get_vdu_monitoring(tosca) mgmt_ports = toscautils.get_mgmt_ports(tosca) res_tpl = toscautils.get_resources_dict( tosca, STACK_FLAVOR_EXTRA) toscautils.post_process_template(tosca) try: translator = TOSCATranslator(tosca, parsed_params) heat_template_yaml = translator.translate() except Exception as e: LOG.debug("heat-translator error: %s", str(e)) raise vnfm.HeatTranslatorFailed(error_msg_details=str(e)) heat_template_yaml = toscautils.post_process_heat_template( heat_template_yaml, mgmt_ports, res_tpl, unsupported_res_prop) else: assert 'template' not in fields assert 'template_url' not in fields template_dict = yaml.load(HEAT_TEMPLATE_BASE) outputs_dict = {} template_dict['outputs'] = outputs_dict if 'get_input' in vnfd_yaml: self._process_parameterized_input(dev_attrs, vnfd_dict) KEY_LIST = (('description', 'description'), ) for (key, vnfd_key) in KEY_LIST: if vnfd_key in vnfd_dict: template_dict[key] = vnfd_dict[vnfd_key] for vdu_id, vdu_dict in vnfd_dict.get('vdus', {}).items(): template_dict.setdefault('resources', {})[vdu_id] = { "type": "OS::Nova::Server" } resource_dict = template_dict['resources'][vdu_id] KEY_LIST = (('image', 'vm_image'), ('flavor', 'instance_type')) resource_dict['properties'] = {} properties = resource_dict['properties'] for (key, vdu_key) in KEY_LIST: properties[key] = vdu_dict[vdu_key] if 'network_interfaces' in vdu_dict: self._process_vdu_network_interfaces( vdu_id, vdu_dict, properties, template_dict, unsupported_res_prop) if ('user_data' in vdu_dict and 'user_data_format' in vdu_dict): properties['user_data_format'] = vdu_dict[ 'user_data_format'] properties['user_data'] = vdu_dict['user_data'] elif ('user_data' in vdu_dict or 'user_data_format' in vdu_dict): raise vnfm.UserDataFormatNotFound() if 'placement_policy' in vdu_dict: if 'availability_zone' in vdu_dict['placement_policy']: properties['availability_zone'] = vdu_dict[ 'placement_policy']['availability_zone'] if 'config' in vdu_dict: properties['config_drive'] = True metadata = properties.setdefault('metadata', {}) metadata.update(vdu_dict['config']) for key, value in metadata.items(): metadata[key] = value[:255] if 'key_name' in vdu_dict: properties['key_name'] = vdu_dict['key_name'] monitoring_policy = vdu_dict.get('monitoring_policy', 'noop') failure_policy = vdu_dict.get('failure_policy', 'noop') # Convert the old monitoring specification to the new # network. This should be removed after Mitaka if (monitoring_policy == 'ping' and failure_policy == 'respawn'): vdu_dict['monitoring_policy'] = { 'ping': { 'actions': { 'failure': 'respawn' } } } vdu_dict.pop('failure_policy') if monitoring_policy != 'noop': monitoring_dict['vdus'][vdu_id] = \ vdu_dict['monitoring_policy'] # to pass necessary parameters to plugin upwards. for key in ('service_type', ): if key in vdu_dict: device.setdefault('attributes', {})[vdu_id] = jsonutils.dumps( {key: vdu_dict[key]}) heat_template_yaml = yaml.dump(template_dict) fields['template'] = heat_template_yaml if not device['attributes'].get('heat_template'): device['attributes']['heat_template'] = \ heat_template_yaml if monitoring_dict.keys(): device['attributes']['monitoring_policy'] = \ jsonutils.dumps(monitoring_dict) if 'stack_name' not in fields: name = (__name__ + '_' + self.__class__.__name__ + '-' + device['id']) if device['attributes'].get('failure_count'): name += ('-RESPAWN-%s') % str( device['attributes']['failure_count']) fields['stack_name'] = name # service context is ignored LOG.debug(_('service_context: %s'), device.get('service_context', [])) LOG.debug(_('fields: %s'), fields) LOG.debug(_('template: %s'), fields['template']) stack = heatclient_.create(fields) return stack['stack']['id']