def process_mgmt_intf(intf): if len(self.mgmt_intf) > 0: err_msg( _("{0}, Already processed another mgmt intf {1}, " "got another {2}").format(self, self.msmg_intf, intf)) self.log.error(err_msg) raise ValidationError(message=err_msg) self.mgmt_intf['protocol'] = 'tcp' if self.VNF_CONFIG in intf: process_vnf_config(intf.pop(self.VNF_CONFIG)) if self.PORT in intf: self.mgmt_intf[self.PORT] = intf.pop(self.PORT) self.props[self.PORT] = self.mgmt_intf[self.PORT] if 'vdu_id' in intf: for vdu in self.vdus: if intf['vdu_id'] == vdu.id: self.mgmt_intf[self.VDU] = vdu.get_name(self.name) intf.pop('vdu_id') break if self.DASHBOARD_PARAMS in intf: self.mgmt_intf[self.DASHBOARD_PARAMS] = \ intf.pop(self.DASHBOARD_PARAMS) if len(intf): self.log.warn( _("{0}, Did not process all in mgmt " "interface {1}").format(self, intf)) self.log.debug( _("{0}, Management interface: {1}").format( self, self.mgmt_intf))
def set_vld(self, name, vld_name): cp = self.get_cp(name) if cp: cp[self.VLD] = vld_name else: err_msg = (_("{0}, Did not find connection point {1}").format( self, name)) self.log.error(err_msg) raise ValidationError(message=err_msg)
def set_cp_type(self, name, cp_type): for idx, cp in enumerate(self.ext_cp): if cp[self.NAME] == name: cp[self.CP_TYPE] = cp_type self.ext_cp[idx] = cp self.log.debug( _("{0}, Updated CP: {1}").format(self, self.ext_cp[idx])) return err_msg = (_("{0}, Did not find connection point {1}").format( self, name)) self.log.error(err_msg) raise ValidationError(message=err_msg)
def get_yangs(self): '''Get the descriptors and convert to yang instances''' for filename in self.files: self.log.debug(_("Load file {0}").format(filename)) # Only one descriptor per file if tarfile.is_tarfile(filename): tar = open(filename, "r+b") archive = TarPackageArchive(self.log, tar) pkg = archive.create_package() self.pkgs.append(pkg) desc_type = pkg.descriptor_type if desc_type == TranslateDescriptors.NSD: if TranslateDescriptors.NSD not in self.yangs: self.yangs[TranslateDescriptors.NSD] = [] self.yangs[TranslateDescriptors.NSD]. \ append(pkg.descriptor_msg.as_dict()) if 'name' in pkg.descriptor_msg.as_dict() is not None: self.output_files['nsd'].append( pkg.descriptor_msg.as_dict()['name']) else: raise ValidationError( message= "NSD Descriptor name attribute is not populated ") elif desc_type == TranslateDescriptors.VNFD: if TranslateDescriptors.VNFD not in self.yangs: self.yangs[TranslateDescriptors.VNFD] = [] self.yangs[TranslateDescriptors.VNFD]. \ append(pkg.descriptor_msg.as_dict()) if 'name' in pkg.descriptor_msg.as_dict() is not None: self.output_files['vnfd'].append( pkg.descriptor_msg.as_dict()['name']) else: raise ValidationError( message= "VNFD Descriptor name attribute is not populated ") else: raise ValidationError( "Unknown descriptor type: {}".format(desc_type))
def process_scale_grp(dic): sg = {} self.log.debug(_("{0}, scale group: {1}").format(self, dic)) fields = [self.NAME, self.MIN_INST_COUNT, self.MAX_INST_COUNT] for key in fields: if key in dic: sg[key] = dic.pop(key) membs = {} for vnfd_memb in dic.pop(self.VNFD_MEMBERS): vnfd_idx = vnfd_memb[self.MEM_VNF_INDEX_REF] if vnfd_idx in self.vnfds: membs[self.vnfds[vnfd_idx].name] = \ vnfd_memb[self.COUNT] sg['vnfd_members'] = membs trigs = {} if self.SCALE_ACT in dic: for sg_act in dic.pop(self.SCALE_ACT): # Validate the primitive prim = sg_act.pop(self.NS_CONF_PRIM_REF) for cprim in self.conf_prims: if cprim[self.NAME] == prim: trigs[sg_act.pop(self.TRIGGER)] = prim break if len(sg_act): err_msg = ( _("{0}, Did not find config-primitive {1}").format( self, prim)) self.log.error(err_msg) raise ValidationError(message=err_msg) sg[self.CONFIG_ACTIONS] = trigs if len(dic): self.log.warn( _("{0}, Did not process all fields for {1}").format( self, dic)) self.log.debug(_("{0}, Scale group {1}").format(self, sg)) self.scale_grps.append(sg)
def _create_csar_files(self, output_dir, tmpl_out, archive=False): ''' for tmpl in tmpl_out: if ToscaTemplate.TOSCA not in tmpl: self.log.error(_("Did not find TOSCA template for {0}"). format(tmpl)) return ''' # Create sub for each NS template sub_folder_name = None if self.files: if len(self.output_files['nsd']) > 0: if len(self.output_files['nsd']) == 1: sub_folder_name = self.output_files['nsd'][0] else: raise ValidationError( message="Multiple NSD Descriptor uploaded ") elif len(self.output_files['vnfd']) > 0: if len(self.output_files['vnfd']) == 1: sub_folder_name = self.output_files['vnfd'][0] else: raise ValidationError( message= "Multiple VNFDs Descriptors uploaded without NSD") else: raise ValidationError(message="No NSD or VNFD uploaded") else: if 'nsd' in self.yangs: sub_folder_name = self.yangs['nsd'][0]['short_name'].replace( ' ', '_') elif 'vnfd' in self.yangs: sub_folder_name = self.yangs['vnfd'][0]['short_name'].replace( ' ', '_') subdir = os.path.join(output_dir, sub_folder_name) if os.path.exists(subdir): shutil.rmtree(subdir) os.makedirs(subdir) riftio_src_file = "{0}{1}".format( os.getenv('RIFT_INSTALL'), "/usr/rift/mano/common/riftiotypes.yaml") # Create the definitions dir def_dir = os.path.join(subdir, 'Definitions') os.makedirs(def_dir) shutil.copy2(riftio_src_file, def_dir + "/riftiotypes.yaml") tosca_meta_entry_file = None for tmpl_key in tmpl_out: tmpl = tmpl_out[tmpl_key] file_name = tmpl_key.replace(' ', '_') entry_file = os.path.join(def_dir, file_name + '.yaml') if file_name.endswith('nsd'): tosca_meta_entry_file = file_name self.log.debug(_("Writing file {0}").format(entry_file)) with open(entry_file, 'w+') as f: f.write(tmpl[ToscaTemplate.TOSCA]) if tosca_meta_entry_file is None: tosca_meta_entry_file = sub_folder_name # Create the Tosca meta meta_dir = os.path.join(subdir, 'TOSCA-Metadata') os.makedirs(meta_dir) meta = '''TOSCA-Meta-File-Version: 1.0 CSAR-Version: 1.1 Created-By: RIFT.io Entry-Definitions: Definitions/''' meta_data = "{}{}".format(meta, tosca_meta_entry_file + '.yaml') meta_file = os.path.join(meta_dir, 'TOSCA.meta') self.log.debug( _("Writing file {0}:\n{1}").format(meta_file, meta_data)) with open(meta_file, 'w+') as f: f.write(meta_data) # Copy other supporting files for key in tmpl_out: tmpl = tmpl_out[key] if ToscaTemplate.FILES in tmpl: for f in tmpl[ToscaTemplate.FILES]: self.log.debug(_("Copy supporting file {0}").format(f)) # Search in source packages if len(self.pkgs): for pkg in self.pkgs: # TODO(pjoseph): Need to add support for other file types fname = f[ToscaResource.NAME] dest_path = os.path.join(subdir, f[ToscaResource.DEST]) ftype = f[ToscaResource.TYPE] if ftype == 'image': image_file_map = rift.package.image.get_package_image_files( pkg) if fname in image_file_map: self.log.debug( _("Extracting image {0} to {1}"). format(fname, dest_path)) pkg.extract_file(image_file_map[fname], dest_path) break elif ftype == 'script': script_file_map = \ rift.package.script.PackageScriptExtractor.package_script_files(pkg) if fname in script_file_map: self.log.debug( _("Extracting script {0} to {1}"). format(fname, dest_path)) pkg.extract_file(script_file_map[fname], dest_path) break elif ftype == 'cloud_init': script_file_map = \ rift.package.cloud_init.PackageCloudInitExtractor.package_script_files(pkg) if fname in script_file_map: self.log.debug( _("Extracting script {0} to {1}"). format(fname, dest_path)) pkg.extract_file(script_file_map[fname], dest_path) break elif ftype == 'icons': icon_file_map = \ rift.package.icon.PackageIconExtractor.package_icon_files(pkg) if fname in icon_file_map: self.log.debug( _("Extracting script {0} to {1}"). format(fname, dest_path)) pkg.extract_file(icon_file_map[fname], dest_path) break else: self.log.warn( _("Unknown file type {0}: {1}").format( ftype, f)) #TODO(pjoseph): Search in other locations # Create the ZIP archive if archive: prev_dir = os.getcwd() os.chdir(subdir) try: zip_file = sub_folder_name + '.zip' zip_path = os.path.join(output_dir, zip_file) self.log.debug(_("Creating zip file {0}").format(zip_path)) zip_cmd = "zip -r {}.partial ." subprocess.check_call(zip_cmd.format(zip_path), shell=True, stdout=subprocess.DEVNULL) mv_cmd = "mv {0}.partial {0}" subprocess.check_call(mv_cmd.format(zip_path), shell=True, stdout=subprocess.DEVNULL) shutil.rmtree(subdir) return zip_path except subprocess.CalledProcessError as e: self.log.error( _("Creating CSAR archive failed: {0}").format(e)) except Exception as e: self.log.exception(e) finally: os.chdir(prev_dir)
def handle_yang(self, vnfds): self.log.debug( _("Process NSD desc {0}: {1}").format(self.name, self.yang)) def process_input_param(param): if self.XPATH in param: val = param.pop(self.XPATH) # Strip namesapce, catalog and nsd part self.inputs.append({ self.NAME: self.map_yang_name_to_tosca( val.replace( '/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd/nsd:', '')) }) if len(param): self.log.warn( _("{0}, Did not process the following for " "input param {1}: {2}").format(self, self.inputs, param)) self.log.debug(_("{0}, inputs: {1}").format(self, self.inputs[-1])) def process_const_vnfd(cvnfd): # Get the matching VNFD vnfd_id = cvnfd.pop(self.VNFD_ID_REF) for vnfd in vnfds: if vnfd.type == self.VNFD and vnfd.id == vnfd_id: self.vnf_id_to_vnf_map[vnfd_id] = vnfd.name self.vnfds[cvnfd.pop(self.MEM_VNF_INDEX)] = vnfd if self.START_BY_DFLT in cvnfd: vnfd.props[self.START_BY_DFLT] = \ cvnfd.pop(self.START_BY_DFLT) break if len(cvnfd): self.log.warn( _("{0}, Did not process the following for " "constituent vnfd {1}: {2}").format( self, vnfd_id, cvnfd)) self.log.debug(_("{0}, VNFD: {1}").format(self, self.vnfds)) def process_scale_grp(dic): sg = {} self.log.debug(_("{0}, scale group: {1}").format(self, dic)) fields = [self.NAME, self.MIN_INST_COUNT, self.MAX_INST_COUNT] for key in fields: if key in dic: sg[key] = dic.pop(key) membs = {} for vnfd_memb in dic.pop(self.VNFD_MEMBERS): vnfd_idx = vnfd_memb[self.MEM_VNF_INDEX_REF] if vnfd_idx in self.vnfds: membs[self.vnfds[vnfd_idx].name] = \ vnfd_memb[self.COUNT] sg['vnfd_members'] = membs trigs = {} if self.SCALE_ACT in dic: for sg_act in dic.pop(self.SCALE_ACT): # Validate the primitive prim = sg_act.pop(self.NS_CONF_PRIM_REF) for cprim in self.conf_prims: if cprim[self.NAME] == prim: trigs[sg_act.pop(self.TRIGGER)] = prim break if len(sg_act): err_msg = ( _("{0}, Did not find config-primitive {1}").format( self, prim)) self.log.error(err_msg) raise ValidationError(message=err_msg) sg[self.CONFIG_ACTIONS] = trigs if len(dic): self.log.warn( _("{0}, Did not process all fields for {1}").format( self, dic)) self.log.debug(_("{0}, Scale group {1}").format(self, sg)) self.scale_grps.append(sg) def process_initial_config(dic): icp = {} self.log.debug(_("{0}, initial config: {1}").format(self, dic)) for key in [self.NAME, self.SEQ, self.USER_DEF_SCRIPT]: if key in dic: icp[key] = dic.pop(key) params = [] if self.PARAM in dic: for p in dic.pop(self.PARAM): if (self.NAME in p and self.VALUE in p): params.append({ self.NAME: p[self.NAME], self.VALUE: p[self.VALUE] }) else: # TODO (pjoseph): Need to add support to read the # config file and get the value from that self.log.warn( _("{0}, Got parameter without value: {1}").format( self, p)) if len(params): icp[self.PARAM] = params if len(dic): self.log.warn( _("{0}, Did not process all fields for {1}").format( self, dic)) self.log.debug(_("{0}, Initial config {1}").format(self, icp)) self.initial_cfg.append({self.PROPERTIES: icp}) def process_service_primitive(dic): prop = {} params = [] for key in [self.NAME, self.USER_DEF_SCRIPT]: if key in dic: prop[key] = dic.pop(key) if self.PARAM in dic: for p in dic.pop(self.PARAM): p_entry = {} for name, value in p.items(): p_entry[name] = value params.append(p_entry) if len(params): prop[self.PARAM] = params conf_prim = { self.NAME: prop[self.NAME], self.DESC: 'TestDescription' } if self.USER_DEF_SCRIPT in prop: conf_prim[self.USER_DEF_SCRIPT] = prop[self.USER_DEF_SCRIPT] self.conf_prims.append(conf_prim) self.service_primitive.append({self.PROPERTIES: prop}) def process_vld(vld, dic): vld_conf = {} vld_prop = {} ip_profile_vld = None vld_name = None if 'ip_profile_ref' in vld: ip_profile_name = vld['ip_profile_ref'] if 'ip_profiles' in dic: for ip_prof in dic['ip_profiles']: if ip_profile_name == ip_prof['name']: ip_profile_vld = ip_prof if 'name' in vld: vld_name = vld['name'].replace('-', '_').replace(' ', '') if 'description' in vld: vld_conf['description'] = vld['description'] if 'vendor' in vld: vld_conf['vendor'] = vld['vendor'] if ip_profile_vld: if 'ip_profile_params' in ip_profile_vld: ip_param = ip_profile_vld['ip_profile_params'] if 'gateway_address' in ip_param: vld_conf['gateway_ip'] = ip_param['gateway_address'] if 'subnet_address' in ip_param: vld_conf['cidr'] = ip_param['subnet_address'] if 'ip_version' in ip_param: vld_conf['ip_version'] = ip_param[ 'ip_version'].replace('ipv', '') if vld_name: vld_prop = { vld_name: { 'type': self.T_ELAN, self.PROPERTIES: vld_conf } } self.vlds[vld_name] = { 'type': self.T_ELAN, self.PROPERTIES: vld_conf } self.vld_to_vnf_map[vld_name] = [] if 'vnfd_connection_point_ref' in vld: for vnfd_ref in vld['vnfd_connection_point_ref']: vnf_name = self.vnf_id_to_vnf_map[ vnfd_ref['vnfd_id_ref']] if vnf_name in self.vnf_to_vld_map: self.vnf_to_vld_map[vnf_name].append(vld_name) self._vnf_vld_conn_point_map[vnf_name].\ append((vld_name ,vnfd_ref['vnfd_connection_point_ref'])) else: self.vnf_to_vld_map[vnf_name] = [] self._vnf_vld_conn_point_map[vnf_name] = [] self.vnf_to_vld_map[vnf_name].append(vld_name) self._vnf_vld_conn_point_map[vnf_name].\ append((vld_name ,vnfd_ref['vnfd_connection_point_ref'])) def process_placement_group(placement_groups): for i in range(0, len(placement_groups)): placement_group = placement_groups[i] pg_name = "placement_{0}".format(i) pg_config = {} targets = [] if 'name' in placement_group: pg_config['name'] = placement_group['name'] if 'requirement' in placement_group: pg_config['requirement'] = placement_group['requirement'] if 'strategy' in placement_group: pg_config['strategy'] = placement_group['strategy'] if 'member_vnfd' in placement_group: for member_vnfd in placement_group['member_vnfd']: targets.append( self.vnf_id_to_vnf_map[member_vnfd['vnfd_id_ref']]) placement = { pg_name: { 'type': self.T_PLACEMENT, self.PROPERTIES: pg_config, self.TARGETS: str(targets) } } self.placement_groups.append(placement) def process_vnffgd(vnffgs, dic): associated_cp_names = [] all_cp_names = [] vnfd_sfc_map = {} conn_point_to_conection_node = {} conn_point_to_vnf_name_map = {} unigue_id_forwarder_path_map = OrderedDict() forwarder_name_to_constitent_vnf_map = OrderedDict() unique_id_classifier_map = OrderedDict() fp_path_count = 1 forwarder_count = 1 vnffg_to_unique_id_rsp_map = OrderedDict() vnffg_to_unique_id_classifier_map = OrderedDict() vnffg_to_associated_cp_names = OrderedDict() rsp_associated_cp_names = OrderedDict() vnffg_to_forwarder_map = OrderedDict() for vnffg in vnffgs: unique_id_rsp_map = {} for rs in vnffg['rsp']: unique_id_rsp_map[str(rs['id'])] = rs for class_identifier in vnffg['classifier']: unique_id_classifier_map[str( class_identifier['rsp_id_ref'])] = class_identifier associated_cp_names.append( class_identifier['vnfd_connection_point_ref']) all_cp_names.append( class_identifier['vnfd_connection_point_ref']) conn_point_to_vnf_name_map[class_identifier[ 'vnfd_connection_point_ref']] = self.vnf_id_to_vnf_map[ class_identifier['vnfd_id_ref']] vnfd_sfc_map[self.vnf_id_to_vnf_map[ class_identifier['vnfd_id_ref']]] = class_identifier[ 'vnfd_connection_point_ref'] rsp_associated_cp_names[str( class_identifier['rsp_id_ref'] )] = class_identifier['vnfd_connection_point_ref'] vnffg_to_unique_id_rsp_map[vnffg['name']] = unique_id_rsp_map vnffg_to_forwarder_map[vnffg['name']] = [] for vnffg in vnffgs: prop = {} fp_members = [] prop['type'] = self.T_VNFFG prop[self.DESC] = "Test" prop[self.PROPERTIES] = {} if 'vendor' in vnffg: prop[self.PROPERTIES]['vendor'] = vnffg['vendor'] if 'name' in vnffg: self.vnffgds[vnffg['name']] = prop for rs_id, rs in vnffg_to_unique_id_rsp_map[ vnffg['name']].items(): associated_cp_node_names = [] associated_vnf_names = [] number_of_endpoints = 0 if 'vnfd_connection_point_ref' in rs: number_of_endpoints = number_of_endpoints + len( rs['vnfd_connection_point_ref']) for vnf in rs['vnfd_connection_point_ref']: associated_vnf_names.append( str(self.vnf_id_to_vnf_map[ vnf['vnfd_id_ref']])) associated_cp_names.append( vnf['vnfd_connection_point_ref']) all_cp_names.append( vnf['vnfd_connection_point_ref']) conn_point_to_vnf_name_map[vnf[ 'vnfd_connection_point_ref']] = self.vnf_id_to_vnf_map[ vnf['vnfd_id_ref']] if "forwarder{}".format( fp_path_count ) not in forwarder_name_to_constitent_vnf_map: forwarder_name_to_constitent_vnf_map[ "forwarder{}".format( fp_path_count)] = associated_vnf_names vnffg_to_forwarder_map[vnffg['name']].append( "forwarder{}".format(fp_path_count)) fp_path_count = fp_path_count + 1 associated_cp_names = list(set(associated_cp_names)) for cp_name in associated_cp_names: for idx, vnfd in self.vnfds.items(): for vdu in vnfd.vdus: if cp_name == rsp_associated_cp_names[rs_id]: if cp_name in vdu.conn_point_to_conection_node: associated_cp_node_names.append( vdu.conn_point_to_conection_node[ cp_name]) #conn_point_to_conection_node[cp_name] = vdu.conn_point_to_conection_node[cp_name] for cp_name in all_cp_names: for idx, vnfd in self.vnfds.items(): for vdu in vnfd.vdus: if cp_name in vdu.conn_point_to_conection_node: conn_point_to_conection_node[ cp_name] = vdu.conn_point_to_conection_node[ cp_name] if len(associated_vnf_names) > 0: associated_vnf_names = list(set(associated_vnf_names)) vnf_str = ", ".join(associated_vnf_names) prop[self.PROPERTIES][ 'constituent_vnfs'] = "[{}]".format(vnf_str) if len(associated_cp_node_names) > 0: associated_cp_node_names = list( set(associated_cp_node_names)) connection_point_str = ", ".join( associated_cp_node_names) prop[self. PROPERTIES]['connection_point'] = "[{}]".format( ", ".join(associated_cp_node_names)) prop[self.PROPERTIES][ 'number_of_endpoints'] = number_of_endpoints fp_name = "Forwarding_path{}".format(forwarder_count) unigue_id_forwarder_path_map[fp_name] = rs_id fp_members.append(fp_name) forwarder_count = forwarder_count + 1 if len(fp_members) > 0: prop['members'] = [] for fp in fp_members: prop['members'].append(fp) fp_count = 1 for fp, idx in unigue_id_forwarder_path_map.items(): for vnffg_name, unique_id_rsp_map in vnffg_to_unique_id_rsp_map.items( ): if idx in unique_id_rsp_map: prop = {} prop['type'] = self.T_FP prop[self.PROPERTIES] = {} prop[self.PROPERTIES][self.DESC] = "Forwarder" prop[self.PROPERTIES]['policy'] = {} prop[self.PROPERTIES]['policy']['type'] = 'ACL' prop[self.PROPERTIES]['policy']['criteria'] = [] prop[self.PROPERTIES]['path'] = [] rsp = unique_id_rsp_map[idx] classifier = unique_id_classifier_map[idx] for match in classifier['match_attributes']: match_prop = {} if 'source_port' in match: port = "'{}'".format((match['source_port'])) prop[self. PROPERTIES]['policy']['criteria'].append( {'source_port_range': port}) if 'destination_port' in match: port = "'f'{}''".format( (match['destination_port'])) prop[self. PROPERTIES]['policy']['criteria'].append( {'destination_port_range': '5006'}) if 'ip_proto' in match: port = match['ip_proto'] prop[self. PROPERTIES]['policy']['criteria'].append( {'ip_proto': port}) if 'destination_ip_address' in match: port = "'{}'".format( (match['destination_ip_address'])) prop[self. PROPERTIES]['policy']['criteria'].append( {'ip_dst_prefix': port}) if 'vnfd_connection_point_ref' in classifier: if classifier[ 'vnfd_connection_point_ref'] in conn_point_to_vnf_name_map: if 'cp' not in prop[self.PROPERTIES]: prop[self.PROPERTIES]['cp'] = {} prop[self.PROPERTIES]['cp'][ 'forwarder'] = conn_point_to_vnf_name_map[ classifier[ 'vnfd_connection_point_ref']] prop[self.PROPERTIES]['cp'][ 'capability'] = conn_point_to_conection_node[ classifier[ 'vnfd_connection_point_ref']] for fp, vnf_list in forwarder_name_to_constitent_vnf_map.items( ): for vnf in vnf_list: for cp, vnf_name in conn_point_to_vnf_name_map.items( ): if vnf == vnf_name: self.substitution_mapping_forwarder.append( (vnf, fp, conn_point_to_conection_node[cp])) visited_forwarder = [] visited_path = None for path, vnfs in forwarder_name_to_constitent_vnf_map.items( ): for vnf in vnfs: if (vnf not in visited_forwarder) and ( path in vnffg_to_forwarder_map[vnffg_name]): path_prop = {} path_prop['forwarder'] = vnf path_prop['capability'] = path prop[self.PROPERTIES]['path'].append( path_prop) visited_forwarder.append(vnf) visited_path = path forwarder_name_to_constitent_vnf_map.pop(visited_path) self.forwarding_paths["Forwarding_path{}".format( fp_count)] = prop fp_count = fp_count + 1 self.vnfd_sfc_map = vnfd_sfc_map dic = deepcopy(self.yang) try: for key in self.REQUIRED_FIELDS: if key in dic: self.props[key] = dic.pop(key) self.id = self.props[self.ID] # Process constituent VNFDs vnfd_name_list = [] member_vnf_index_list = [] if self.CONST_VNFD in dic: for cvnfd in dic.pop(self.CONST_VNFD): if cvnfd[self.VNFD_ID_REF] not in member_vnf_index_list: member_vnf_index_list.append(cvnfd[self.VNFD_ID_REF]) process_const_vnfd(cvnfd) else: self.duplicate_vnfd_name_list.append( self.vnf_id_to_vnf_map[cvnfd[self.VNFD_ID_REF]]) # Process VLDs if self.VLD in dic: for vld_dic in dic.pop(self.VLD): process_vld(vld_dic, dic) #self.vlds.append(vld) #Process VNFFG if self.VNFFGD in dic: process_vnffgd(dic[self.VNFFGD], dic) # Process initial config primitives if self.INITIAL_CFG in dic: for icp_dic in dic.pop(self.INITIAL_CFG): process_initial_config(icp_dic) # NS service prmitive if self.CONF_PRIM in dic: for icp_dic in dic.pop(self.CONF_PRIM): process_service_primitive(icp_dic) # Process scaling group if self.SCALE_GRP in dic: for sg_dic in dic.pop(self.SCALE_GRP): process_scale_grp(sg_dic) # Process the input params if self.INPUT_PARAM_XPATH in dic: for param in dic.pop(self.INPUT_PARAM_XPATH): process_input_param(param) if 'placement_groups' in dic: process_placement_group(dic['placement_groups']) self.remove_ignored_fields(dic) if len(dic): self.log.warn( _("{0}, Did not process the following for " "NSD {1}: {2}").format(self, self.props, dic)) self.log.debug(_("{0}, NSD: {1}").format(self, self.props)) except Exception as e: err_msg = _("Exception processing NSD {0} : {1}"). \ format(self.name, e) self.log.error(err_msg) self.log.exception(e) raise ValidationError(message=err_msg)
def handle_yang(self): self.log.debug( _("Process VNFD desc {0}: {1}").format(self.name, self.yang)) def process_vnf_config(conf): vnf_conf = {} if self.CONFIG_ATTR in conf: for key, value in conf.pop(self.CONFIG_ATTR).items(): vnf_conf[key] = value if self.CONFIG_TMPL in conf: vnf_conf[self.CONFIG_TMPL] = conf.pop(self.CONFIG_TMPL) def copy_config_details(conf_type, conf_details): vnf_conf[self.CONFIG_TYPE] = conf_type vnf_conf[self.CONFIG_DETAILS] = conf_details for key in self.CONFIG_TYPES: if key in conf: copy_config_details(key, conf.pop(key)) break if len(conf): self.log.warn( _("{0}, Did not process all in VNF " "configuration {1}").format(self, conf)) self.log.debug(_("{0}, vnf config: {1}").format(self, vnf_conf)) self.props[self.VNF_CONFIG] = vnf_conf def process_mgmt_intf(intf): if len(self.mgmt_intf) > 0: err_msg( _("{0}, Already processed another mgmt intf {1}, " "got another {2}").format(self, self.msmg_intf, intf)) self.log.error(err_msg) raise ValidationError(message=err_msg) self.mgmt_intf['protocol'] = 'tcp' if self.VNF_CONFIG in intf: process_vnf_config(intf.pop(self.VNF_CONFIG)) if self.PORT in intf: self.mgmt_intf[self.PORT] = intf.pop(self.PORT) self.props[self.PORT] = self.mgmt_intf[self.PORT] if 'vdu_id' in intf: for vdu in self.vdus: if intf['vdu_id'] == vdu.id: self.mgmt_intf[self.VDU] = vdu.get_name(self.name) intf.pop('vdu_id') break if self.DASHBOARD_PARAMS in intf: self.mgmt_intf[self.DASHBOARD_PARAMS] = \ intf.pop(self.DASHBOARD_PARAMS) if len(intf): self.log.warn( _("{0}, Did not process all in mgmt " "interface {1}").format(self, intf)) self.log.debug( _("{0}, Management interface: {1}").format( self, self.mgmt_intf)) def process_http_ep(eps): self.log.debug("{}, HTTP EP: {}".format(self, eps)) for ep in eps: http_ep = {'protocol': 'http'} # Required for TOSCA http_ep[self.PATH] = ep.pop(self.PATH) http_ep[self.PORT] = ep.pop(self.PORT) http_ep[self.POLL_INTVL] = ep.pop(self.POLL_INTVL_SECS) if len(ep): self.log.warn( _("{0}, Did not process the following for " "http ep {1}").format(self, ep)) self.log.debug( _("{0}, http endpoint: {1}").format(self, http_ep)) self.http_ep.append(http_ep) def process_mon_param(params): for param in params: monp = {} fields = [ self.NAME, self.ID, 'value_type', 'units', 'group_tag', 'json_query_method', 'http_endpoint_ref', 'widget_type', self.DESC ] for key in fields: if key in param: monp[key] = param.pop(key) if len(param): self.log.warn( _("{0}, Did not process the following for " "monitporing-param {1}").format(self, param)) self.log.debug( _("{0}, Monitoring param: {1}").format(self, monp)) self.mon_param.append(monp) def process_cp(cps): for cp_dic in cps: self.log.debug("{}, CP: {}".format(self, cp_dic)) name = cp_dic.pop(self.NAME) for vdu in self.vdus: if vdu.has_cp(name): vdu.set_cp_type(name, cp_dic.pop(self.TYPE_Y)) break if len(cp_dic): self.log.warn( _("{0}, Did not process the following for " "connection-point {1}: {2}").format( self, name, cp_dic)) ENDPOINTS_MAP = { self.MGMT_INTF: process_mgmt_intf, self.HTTP_EP: process_http_ep, self.MON_PARAM: process_mon_param, 'connection_point': process_cp } dic = deepcopy(self.yang) try: for key in self.REQUIRED_FIELDS: self.props[key] = dic.pop(key) self.id = self.props[self.ID] # Process VDUs before CPs so as to update the CP struct in VDU # when we process CP later if self.VDU in dic: for vdu_dic in dic.pop(self.VDU): vdu = YangVdu(self.log, vdu_dic.pop(self.NAME), self.VDU, vdu_dic) vdu.process_vdu() self.vdus.append(vdu) for key in ENDPOINTS_MAP.keys(): if key in dic: ENDPOINTS_MAP[key](dic.pop(key)) self.remove_ignored_fields(dic) if len(dic): self.log.warn( _("{0}, Did not process the following for " "VNFD: {1}").format(self, dic)) self.log.debug(_("{0}, VNFD: {1}").format(self, self.props)) except Exception as e: err_msg = _("Exception processing VNFD {0} : {1}"). \ format(self.name, e) self.log.error(err_msg) raise ValidationError(message=err_msg)
def handle_yang(self): self.log.debug( _("Process VNFD desc {0}: {1}").format(self.name, self.yang)) def process_vnf_config(conf): vnf_conf = {} config = {} init_primitive_config = {} if 'config_template' in conf: config['config_template'] = conf['config_template'] if 'config_attributes' in conf: if 'config_delay' in conf['config_attributes']: config['config_delay'] = conf['config_attributes'][ 'config_delay'] if 'config_priority' in conf['config_attributes']: config['config_priority'] = conf['config_attributes'][ 'config_priority'] if 'config_type' in conf: config['config_type'] = conf['config_type'] if 'script' in conf: config['config_details'] = conf['script'] for conf_type in self.CONFIG_TYPES: if conf_type in conf: config['config_type'] = conf_type if len(config) > 0: vnf_conf['config'] = config if 'initial_config_primitive' in conf: init_config_prims = [] for init_conf_prim in conf['initial_config_primitive']: init_conf = {} if 'name' in init_conf_prim: init_conf['name'] = init_conf_prim['name'] if 'seq' in init_conf_prim: init_conf['seq'] = init_conf_prim['seq'] if 'user_defined_script' in init_conf_prim: init_conf['user_defined_script'] = init_conf_prim[ 'user_defined_script'] self.script_files.append( init_conf_prim['user_defined_script']) if 'parameter' in init_conf_prim: init_conf['parameter'] = [] for parameter in init_conf_prim['parameter']: init_conf['parameter'].append( {parameter['name']: parameter['value']}) init_config_prims.append(init_conf) vnf_conf['initial_config'] = init_config_prims self.vnf_configuration = vnf_conf def process_mgmt_intf(intf): if len(self.mgmt_intf) > 0: err_msg( _("{0}, Already processed another mgmt intf {1}, " "got another {2}").format(self, self.msmg_intf, intf)) self.log.error(err_msg) raise ValidationError(message=err_msg) self.mgmt_intf['protocol'] = 'tcp' if self.PORT in intf: self.mgmt_intf[self.PORT] = intf.pop(self.PORT) self.props[self.PORT] = self.mgmt_intf[self.PORT] if 'vdu_id' in intf: for vdu in self.vdus: if intf['vdu_id'] == vdu.id: self.mgmt_intf[self.VDU] = vdu.get_name(self.name) intf.pop('vdu_id') break if self.DASHBOARD_PARAMS in intf: self.mgmt_intf[self.DASHBOARD_PARAMS] = \ intf.pop(self.DASHBOARD_PARAMS) if len(intf): self.log.warn( _("{0}, Did not process all in mgmt " "interface {1}").format(self, intf)) self.log.debug( _("{0}, Management interface: {1}").format( self, self.mgmt_intf)) def process_http_ep(eps): self.log.debug("{}, HTTP EP: {}".format(self, eps)) for ep in eps: http_ep = {'protocol': 'http'} # Required for TOSCA http_ep[self.PATH] = ep.pop(self.PATH) http_ep[self.PORT] = ep.pop(self.PORT) if self.POLL_INTVL in http_ep: http_ep[self.POLL_INTVL] = ep.pop(self.POLL_INTVL_SECS) if len(ep): self.log.warn( _("{0}, Did not process the following for " "http ep {1}").format(self, ep)) self.log.debug( _("{0}, http endpoint: {1}").format(self, http_ep)) self.http_ep.append(http_ep) def process_mon_param(params): for param in params: monp = {} fields = [ self.NAME, self.ID, 'value_type', 'units', 'group_tag', 'json_query_method', 'http_endpoint_ref', 'widget_type', self.DESC ] mon_param = {} ui_param = {} if 'name' in param: mon_param['name'] = param['name'] if 'description' in param: mon_param['description'] = param['description'] if 'polling_interval' in param: mon_param['polling_interval'] = param['polling_interval'] if 'http_endpoint_ref' in param: mon_param['url_path'] = param['http_endpoint_ref'] if 'json_query_method' in param: mon_param['json_query_method'] = param[ 'json_query_method'].lower() #if 'value_type' in param: # mon_param['constraints'] = {} # mon_param['constraints']['value_type'] = YangVnfd.VALUE_TYPE_CONVERSION_MAP[param['value_type'].upper()] if 'group_tag' in param: ui_param['group_tag'] = param['group_tag'] if 'widget_type' in param: ui_param['widget_type'] = param['widget_type'].lower() if 'units' in param: ui_param['units'] = param['units'] mon_param['ui_data'] = ui_param self.mon_param.append(mon_param) if len(param): self.log.warn( _("{0}, Did not process the following for " "monitporing-param {1}").format(self, param)) self.log.debug( _("{0}, Monitoring param: {1}").format(self, monp)) #self.mon_param.append(monp) def process_cp(cps): for cp_dic in cps: self.log.debug("{}, CP: {}".format(self, cp_dic)) name = cp_dic.pop(self.NAME) for vdu in self.vdus: if vdu.has_cp(name): vdu.set_cp_type(name, cp_dic.pop(self.TYPE_Y)) break if len(cp_dic): self.log.warn( _("{0}, Did not process the following for " "connection-point {1}: {2}").format( self, name, cp_dic)) def process_service_type(dic): self.service_function_type = dic['service_function_type'] ENDPOINTS_MAP = { self.MGMT_INTF: process_mgmt_intf, self.HTTP_EP: process_http_ep, self.MON_PARAM: process_mon_param, 'connection_point': process_cp } dic = deepcopy(self.yang) try: for key in self.REQUIRED_FIELDS: if key in dic: self.props[key] = dic.pop(key) self.id = self.props[self.ID] # Process VDUs before CPs so as to update the CP struct in VDU # when we process CP later if self.VDU in dic: for vdu_dic in dic.pop(self.VDU): vdu = YangVdu(self.log, vdu_dic.pop(self.NAME), self.VDU, vdu_dic) vdu.process_vdu() self.vdus.append(vdu) for key in ENDPOINTS_MAP.keys(): if key in dic: ENDPOINTS_MAP[key](dic.pop(key)) if self.VNF_CONFIG in dic: process_vnf_config(dic.pop(self.VNF_CONFIG)) if 'service_function_type' in dic: process_service_type(dic) self.remove_ignored_fields(dic) if len(dic): self.log.warn( _("{0}, Did not process the following for " "VNFD: {1}").format(self, dic)) self.log.debug(_("{0}, VNFD: {1}").format(self, self.props)) except Exception as e: err_msg = _("Exception processing VNFD {0} : {1}"). \ format(self.name, e) self.log.error(err_msg) raise ValidationError(message=err_msg)
def handle_yang(self, vnfds): self.log.debug( _("Process NSD desc {0}: {1}").format(self.name, self.yang)) def process_input_param(param): if self.XPATH in param: val = param.pop(self.XPATH) # Strip namesapce, catalog and nsd part self.inputs.append({ self.NAME: self.map_yang_name_to_tosca( val.replace('/nsd:nsd-catalog/nsd:nsd/nsd:', '')) }) if len(param): self.log.warn( _("{0}, Did not process the following for " "input param {1}: {2}").format(self, self.inputs, param)) self.log.debug(_("{0}, inputs: {1}").format(self, self.inputs[-1])) def process_const_vnfd(cvnfd): # Get the matching VNFD vnfd_id = cvnfd.pop(self.VNFD_ID_REF) for vnfd in vnfds: if vnfd.type == self.VNFD and vnfd.id == vnfd_id: self.vnfds[cvnfd.pop(self.MEM_VNF_INDEX)] = vnfd if self.START_BY_DFLT in cvnfd: vnfd.props[self.START_BY_DFLT] = \ cvnfd.pop(self.START_BY_DFLT) break if len(cvnfd): self.log.warn( _("{0}, Did not process the following for " "constituent vnfd {1}: {2}").format( self, vnfd_id, cvnfd)) self.log.debug(_("{0}, VNFD: {1}").format(self, self.vnfds)) def process_scale_grp(dic): sg = {} self.log.debug(_("{0}, scale group: {1}").format(self, dic)) fields = [self.NAME, self.MIN_INST_COUNT, self.MAX_INST_COUNT] for key in fields: if key in dic: sg[key] = dic.pop(key) membs = {} for vnfd_memb in dic.pop(self.VNFD_MEMBERS): vnfd_idx = vnfd_memb[self.MEM_VNF_INDEX_REF] if vnfd_idx in self.vnfds: membs[self.vnfds[vnfd_idx].name] = \ vnfd_memb[self.COUNT] sg['vnfd_members'] = membs trigs = {} if self.SCALE_ACT in dic: for sg_act in dic.pop(self.SCALE_ACT): # Validate the primitive prim = sg_act.pop(self.NS_CONF_PRIM_REF) for cprim in self.conf_prims: if cprim[self.NAME] == prim: trigs[sg_act.pop(self.TRIGGER)] = prim break if len(sg_act): err_msg = ( _("{0}, Did not find config-primitive {1}").format( self, prim)) self.log.error(err_msg) raise ValidationError(message=err_msg) sg[self.CONFIG_ACTIONS] = trigs if len(dic): self.log.warn( _("{0}, Did not process all fields for {1}").format( self, dic)) self.log.debug(_("{0}, Scale group {1}").format(self, sg)) self.scale_grps.append(sg) def process_initial_config(dic): icp = {} self.log.debug(_("{0}, initial config: {1}").format(self, dic)) for key in [self.NAME, self.SEQ, self.USER_DEF_SCRIPT]: if key in dic: icp[key] = dic.pop(key) params = {} if self.PARAM in dic: for p in dic.pop(self.PARAM): if (self.NAME in p and self.VALUE in p): params[p[self.NAME]] = p[self.VALUE] else: # TODO (pjoseph): Need to add support to read the # config file and get the value from that self.log.warn( _("{0}, Got parameter without value: {1}").format( self, p)) if len(params): icp[self.PARAM] = params if len(dic): self.log.warn( _("{0}, Did not process all fields for {1}").format( self, dic)) self.log.debug(_("{0}, Initial config {1}").format(self, icp)) self.initial_cfg.append(icp) dic = deepcopy(self.yang) try: for key in self.REQUIRED_FIELDS: self.props[key] = dic.pop(key) self.id = self.props[self.ID] # Process constituent VNFDs if self.CONST_VNFD in dic: for cvnfd in dic.pop(self.CONST_VNFD): process_const_vnfd(cvnfd) # Process VLDs if self.VLD in dic: for vld_dic in dic.pop(self.VLD): vld = YangVld(self.log, vld_dic.pop(self.NAME), self.VLD, vld_dic) vld.process_vld(self.vnfds) self.vlds.append(vld) # Process config primitives if self.CONF_PRIM in dic: for cprim in dic.pop(self.CONF_PRIM): conf_prim = {self.NAME: cprim.pop(self.NAME)} if self.USER_DEF_SCRIPT in cprim: conf_prim[self.USER_DEF_SCRIPT] = \ cprim.pop(self.USER_DEF_SCRIPT) self.conf_prims.append(conf_prim) else: err_msg = (_( "{0}, Only user defined script supported " "in config-primitive for now {}: {}").format( self, conf_prim, cprim)) self.log.error(err_msg) raise ValidationError(message=err_msg) # Process scaling group if self.SCALE_GRP in dic: for sg_dic in dic.pop(self.SCALE_GRP): process_scale_grp(sg_dic) # Process initial config primitives if self.INITIAL_CFG in dic: for icp_dic in dic.pop(self.INITIAL_CFG): process_initial_config(icp_dic) # Process the input params if self.INPUT_PARAM_XPATH in dic: for param in dic.pop(self.INPUT_PARAM_XPATH): process_input_param(param) self.remove_ignored_fields(dic) if len(dic): self.log.warn( _("{0}, Did not process the following for " "NSD {1}: {2}").format(self, self.props, dic)) self.log.debug(_("{0}, NSD: {1}").format(self, self.props)) except Exception as e: err_msg = _("Exception processing NSD {0} : {1}"). \ format(self.name, e) self.log.error(err_msg) self.log.exception(e) raise ValidationError(message=err_msg)