def create_vnf(self, context, vnf): vnf_info = vnf['vnf'] name = vnf_info['name'] # if vnfd_template specified, create vnfd from template # create template dictionary structure same as needed in create_vnfd() if vnf_info.get('vnfd_template'): vnfd_name = utils.generate_resource_name(name, 'inline') vnfd = {'vnfd': {'attributes': {'vnfd': vnf_info['vnfd_template']}, 'name': vnfd_name, 'template_source': 'inline', 'service_types': [{'service_type': 'vnfd'}]}} vnf_info['vnfd_id'] = self.create_vnfd(context, vnfd).get('id') vnf_attributes = vnf_info['attributes'] if vnf_attributes.get('param_values'): param = vnf_attributes['param_values'] if isinstance(param, dict): # TODO(sripriya) remove this yaml dump once db supports storing # json format of yaml files in a separate column instead of # key value string pairs in vnf attributes table vnf_attributes['param_values'] = yaml.safe_dump(param) else: self._report_deprecated_yaml_str() if vnf_attributes.get('config'): config = vnf_attributes['config'] if isinstance(config, dict): # TODO(sripriya) remove this yaml dump once db supports storing # json format of yaml files in a separate column instead of # key value string pairs in vnf attributes table vnf_attributes['config'] = yaml.safe_dump(config) else: self._report_deprecated_yaml_str() infra_driver, vim_auth = self._get_infra_driver(context, vnf_info) if infra_driver not in self._vnf_manager: LOG.debug('unknown vim driver ' '%(infra_driver)s in %(drivers)s', {'infra_driver': infra_driver, 'drivers': cfg.CONF.tacker.infra_driver}) raise vnfm.InvalidInfraDriver(vim_name=infra_driver) vnf_dict = self._create_vnf(context, vnf_info, vim_auth, infra_driver) def create_vnf_wait(): self._create_vnf_wait(context, vnf_dict, vim_auth, infra_driver) print("",vnf_dict['attributes']) if 'service_monitoring_policy' in vnf_dict['attributes']: self.add_vnf_to_svcmonitor(context, vnf_dict) if vnf_dict['status'] is not constants.ERROR: self.add_vnf_to_monitor(context, vnf_dict) self.config_vnf(context, vnf_dict) self.spawn_n(create_vnf_wait) return vnf_dict
def create_vnffg(self, context, vnffg): vnffg_info = vnffg['vnffg'] name = vnffg_info['name'] if vnffg_info.get('vnffgd_template'): vnffgd_name = utils.generate_resource_name(name, 'inline') vnffgd = { 'vnffgd': { 'tenant_id': vnffg_info['tenant_id'], 'name': vnffgd_name, 'template': { 'vnffgd': vnffg_info['vnffgd_template'] }, 'template_source': 'inline', 'description': vnffg_info['description'] } } vnffg_info['vnffgd_id'] = \ self.create_vnffgd(context, vnffgd).get('id') vnffg_dict = super(NfvoPlugin, self)._create_vnffg_pre(context, vnffg) nfp = super(NfvoPlugin, self).get_nfp(context, vnffg_dict['forwarding_paths']) sfc = super(NfvoPlugin, self).get_sfc(context, nfp['chain_id']) match = super(NfvoPlugin, self).get_classifier(context, nfp['classifier_id'], fields='match')['match'] # grab the first VNF to check it's VIM type # we have already checked that all VNFs are in the same VIM vim_obj = self._get_vim_from_vnf( context, list(vnffg_dict['vnf_mapping'].values())[0]) # TODO(trozet): figure out what auth info we actually need to pass # to the driver. Is it a session, or is full vim obj good enough? driver_type = vim_obj['type'] try: fc_id = self._vim_drivers.invoke(driver_type, 'create_flow_classifier', name=vnffg_dict['name'], fc=match, auth_attr=vim_obj['auth_cred'], symmetrical=sfc['symmetrical']) sfc_id = self._vim_drivers.invoke(driver_type, 'create_chain', name=vnffg_dict['name'], vnfs=sfc['chain'], fc_id=fc_id, symmetrical=sfc['symmetrical'], auth_attr=vim_obj['auth_cred']) except Exception: with excutils.save_and_reraise_exception(): self.delete_vnffg(context, vnffg_id=vnffg_dict['id']) super(NfvoPlugin, self)._create_vnffg_post(context, sfc_id, fc_id, vnffg_dict) super(NfvoPlugin, self)._create_vnffg_status(context, vnffg_dict) return vnffg_dict
def create_vnf(self, context, vnf): vnf_info = vnf['vnf'] name = vnf_info['name'] # if vnfd_template specified, create vnfd from template # create template dictionary structure same as needed in create_vnfd() if vnf_info.get('vnfd_template'): vnfd_name = utils.generate_resource_name(name, 'inline') vnfd = {'vnfd': {'attributes': {'vnfd': vnf_info['vnfd_template']}, 'name': vnfd_name, 'template_source': 'inline', 'service_types': [{'service_type': 'vnfd'}]}} vnf_info['vnfd_id'] = self.create_vnfd(context, vnfd).get('id') infra_driver, vim_auth = self._get_infra_driver(context, vnf_info) if infra_driver not in self._vnf_manager: LOG.debug('unknown vim driver ' '%(infra_driver)s in %(drivers)s', {'infra_driver': infra_driver, 'drivers': cfg.CONF.tacker.infra_driver}) raise vnfm.InvalidInfraDriver(vim_name=infra_driver) vnf_attributes = vnf_info['attributes'] if vnf_attributes.get('param_values'): param = vnf_attributes['param_values'] if isinstance(param, dict): # TODO(sripriya) remove this yaml dump once db supports storing # json format of yaml files in a separate column instead of # key value string pairs in vnf attributes table vnf_attributes['param_values'] = yaml.safe_dump(param) else: raise vnfm.InvalidAPIAttributeType(atype=type(param)) if vnf_attributes.get('config'): config = vnf_attributes['config'] if isinstance(config, dict): # TODO(sripriya) remove this yaml dump once db supports storing # json format of yaml files in a separate column instead of # key value string pairs in vnf attributes table vnf_attributes['config'] = yaml.safe_dump(config) else: raise vnfm.InvalidAPIAttributeType(atype=type(config)) vnf_dict = self._create_vnf(context, vnf_info, vim_auth, infra_driver) def create_vnf_wait(): self._create_vnf_wait(context, vnf_dict, vim_auth, infra_driver) if 'app_monitoring_policy' in vnf_dict['attributes']: self.add_vnf_to_appmonitor(context, vnf_dict) if vnf_dict['status'] is not constants.ERROR: self.add_vnf_to_monitor(context, vnf_dict) self.config_vnf(context, vnf_dict) self.spawn_n(create_vnf_wait) return vnf_dict
def create_ns(self, context, ns): """Create NS and corresponding VNFs. :param ns: ns dict which contains nsd_id and attributes This method has 3 steps: step-1: substitute all get_input params to its corresponding values step-2: Build params dict for substitution mappings case through which VNFs will actually substitute their requirements. step-3: Create mistral workflow and execute the workflow """ ns_info = ns['ns'] name = ns_info['name'] if ns_info.get('nsd_template'): nsd_name = utils.generate_resource_name(name, 'inline') nsd = { 'nsd': { 'attributes': { 'nsd': ns_info['nsd_template'] }, 'description': ns_info['description'], 'name': nsd_name, 'template_source': 'inline', 'tenant_id': ns_info['tenant_id'] } } ns_info['nsd_id'] = self.create_nsd(context, nsd).get('id') nsd = self.get_nsd(context, ns['ns']['nsd_id']) nsd_dict = yaml.safe_load(nsd['attributes']['nsd']) vnfm_plugin = manager.TackerManager.get_service_plugins()['VNFM'] onboarded_vnfds = vnfm_plugin.get_vnfds(context, []) region_name = ns.setdefault('placement_attr', {}).get('region_name', None) vim_res = self.vim_client.get_vim(context, ns['ns']['vim_id'], region_name) driver_type = vim_res['vim_type'] if not ns['ns']['vim_id']: ns['ns']['vim_id'] = vim_res['vim_id'] # Step-1 param_values = ns['ns']['attributes'].get('param_values', {}) if 'get_input' in str(nsd_dict): self._process_parameterized_input(ns['ns']['attributes'], nsd_dict) # Step-2 vnfds = nsd['vnfds'] # vnfd_dict is used while generating workflow vnfd_dict = dict() for node_name, node_val in \ (nsd_dict['topology_template']['node_templates']).items(): if node_val.get('type') not in vnfds.keys(): continue vnfd_name = vnfds[node_val.get('type')] if not vnfd_dict.get(vnfd_name): vnfd_dict[vnfd_name] = { 'id': self._get_vnfd_id(vnfd_name, onboarded_vnfds), 'instances': [node_name] } else: vnfd_dict[vnfd_name]['instances'].append(node_name) if not node_val.get('requirements'): continue if not param_values.get(vnfd_name): param_values[vnfd_name] = {} param_values[vnfd_name]['substitution_mappings'] = dict() req_dict = dict() requirements = node_val.get('requirements') for requirement in requirements: req_name = list(requirement.keys())[0] req_val = list(requirement.values())[0] res_name = req_val + ns['ns']['nsd_id'][:11] req_dict[req_name] = res_name if req_val in nsd_dict['topology_template']['node_templates']: param_values[vnfd_name]['substitution_mappings'][ res_name] = nsd_dict['topology_template'][ 'node_templates'][req_val] param_values[vnfd_name]['substitution_mappings'][ 'requirements'] = req_dict ns['vnfd_details'] = vnfd_dict # Step-3 kwargs = {'ns': ns, 'params': param_values} # NOTE NoTasksException is raised if no tasks. workflow = self._vim_drivers.invoke( driver_type, 'prepare_and_create_workflow', resource='vnf', action='create', auth_dict=self.get_auth_dict(context), kwargs=kwargs) try: mistral_execution = self._vim_drivers.invoke( driver_type, 'execute_workflow', workflow=workflow, auth_dict=self.get_auth_dict(context)) except Exception as ex: LOG.error('Error while executing workflow: %s', ex) self._vim_drivers.invoke(driver_type, 'delete_workflow', workflow_id=workflow['id'], auth_dict=self.get_auth_dict(context)) raise ex ns_dict = super(NfvoPlugin, self).create_ns(context, ns) def _create_ns_wait(self_obj, ns_id, execution_id): exec_state = "RUNNING" mistral_retries = MISTRAL_RETRIES while exec_state == "RUNNING" and mistral_retries > 0: time.sleep(MISTRAL_RETRY_WAIT) exec_state = self._vim_drivers.invoke( driver_type, 'get_execution', execution_id=execution_id, auth_dict=self.get_auth_dict(context)).state LOG.debug('status: %s', exec_state) if exec_state == 'SUCCESS' or exec_state == 'ERROR': break mistral_retries = mistral_retries - 1 error_reason = None if mistral_retries == 0 and exec_state == 'RUNNING': error_reason = _( "NS creation is not completed within" " {wait} seconds as creation of mistral" " execution {mistral} is not completed").format( wait=MISTRAL_RETRIES * MISTRAL_RETRY_WAIT, mistral=execution_id) exec_obj = self._vim_drivers.invoke( driver_type, 'get_execution', execution_id=execution_id, auth_dict=self.get_auth_dict(context)) self._vim_drivers.invoke(driver_type, 'delete_execution', execution_id=execution_id, auth_dict=self.get_auth_dict(context)) self._vim_drivers.invoke(driver_type, 'delete_workflow', workflow_id=workflow['id'], auth_dict=self.get_auth_dict(context)) super(NfvoPlugin, self).create_ns_post(context, ns_id, exec_obj, vnfd_dict, error_reason) self.spawn_n(_create_ns_wait, self, ns_dict['id'], mistral_execution.id) return ns_dict
def update_vnffg(self, context, vnffg_id, vnffg): vnffg_info = vnffg['vnffg'] # put vnffg related objects in PENDING_UPDATE status vnffg_old = super(NfvoPlugin, self)._update_vnffg_status_pre(context, vnffg_id) name = vnffg_old['name'] # create inline vnffgd if given by user if vnffg_info.get('vnffgd_template'): vnffgd_name = utils.generate_resource_name(name, 'inline') vnffgd = { 'vnffgd': { 'tenant_id': vnffg_old['tenant_id'], 'name': vnffgd_name, 'template': { 'vnffgd': vnffg_info['vnffgd_template'] }, 'template_source': 'inline', 'description': vnffg_old['description'] } } try: vnffg_info['vnffgd_id'] = \ self.create_vnffgd(context, vnffgd).get('id') except Exception: with excutils.save_and_reraise_exception(): super(NfvoPlugin, self)._update_vnffg_status_post( context, vnffg_old, error=True, db_state=constants.ACTIVE) try: vnffg_dict = super(NfvoPlugin, self). \ _update_vnffg_pre(context, vnffg, vnffg_id, vnffg_old) except (nfvo.VnfMappingNotFoundException, nfvo.VnfMappingNotValidException): with excutils.save_and_reraise_exception(): if vnffg_info.get('vnffgd_template'): super(NfvoPlugin, self).delete_vnffgd(context, vnffg_info['vnffgd_id']) super(NfvoPlugin, self)._update_vnffg_status_post( context, vnffg_old, error=True, db_state=constants.ACTIVE) except nfvo.UpdateVnffgException: with excutils.save_and_reraise_exception(): super(NfvoPlugin, self).delete_vnffgd(context, vnffg_info['vnffgd_id']) super(NfvoPlugin, self)._update_vnffg_status_post(context, vnffg_old, error=True) nfp = super(NfvoPlugin, self).get_nfp(context, vnffg_dict['forwarding_paths']) sfc = super(NfvoPlugin, self).get_sfc(context, nfp['chain_id']) classifier_dict = dict() classifier_update = [] classifier_delete_ids = [] classifier_ids = [] for classifier_id in nfp['classifier_ids']: classifier_dict = super(NfvoPlugin, self).get_classifier( context, classifier_id, fields=['id', 'name', 'match', 'instance_id', 'status']) if classifier_dict['status'] == constants.PENDING_DELETE: classifier_delete_ids.append( classifier_dict.pop('instance_id')) else: classifier_ids.append(classifier_dict.pop('id')) classifier_update.append(classifier_dict) # TODO(gongysh) support different vim for each vnf vim_obj = self._get_vim_from_vnf( context, list(vnffg_dict['vnf_mapping'].values())[0]) driver_type = vim_obj['type'] try: fc_ids = [] self._vim_drivers.invoke(driver_type, 'remove_and_delete_flow_classifiers', chain_id=sfc['instance_id'], fc_ids=classifier_delete_ids, auth_attr=vim_obj['auth_cred']) for item in classifier_update: fc_ids.append( self._vim_drivers.invoke(driver_type, 'update_flow_classifier', chain_id=sfc['instance_id'], fc=item, auth_attr=vim_obj['auth_cred'])) n_sfc_chain_id = self._vim_drivers.invoke( driver_type, 'update_chain', vnfs=sfc['chain'], fc_ids=fc_ids, chain_id=sfc['instance_id'], auth_attr=vim_obj['auth_cred']) except Exception: with excutils.save_and_reraise_exception(): super(NfvoPlugin, self)._update_vnffg_status_post(context, vnffg_dict, error=True) classifiers_map = super(NfvoPlugin, self).create_classifiers_map( classifier_ids, fc_ids) super(NfvoPlugin, self)._update_vnffg_post(context, n_sfc_chain_id, classifiers_map, vnffg_dict) super(NfvoPlugin, self)._update_vnffg_status_post(context, vnffg_dict) return vnffg_dict
def create_ns(self, context, ns): """Create NS, corresponding VNFs, VNFFGs. :param ns: ns dict which contains nsd_id and attributes This method has 3 steps: step-1: substitute all get_input params to its corresponding values step-2: Build params dict for substitution mappings case through which VNFs will actually substitute their requirements. step-3: Create mistral workflow to create VNFs, VNFFG and execute the workflow """ ns_info = ns['ns'] name = ns_info['name'] if ns_info.get('nsd_template'): nsd_name = utils.generate_resource_name(name, 'inline') nsd = {'nsd': { 'attributes': {'nsd': ns_info['nsd_template']}, 'description': ns_info['description'], 'name': nsd_name, 'template_source': 'inline', 'tenant_id': ns_info['tenant_id']}} ns_info['nsd_id'] = self.create_nsd(context, nsd).get('id') nsd = self.get_nsd(context, ns['ns']['nsd_id']) nsd_dict = yaml.safe_load(nsd['attributes']['nsd']) vnfm_plugin = manager.TackerManager.get_service_plugins()['VNFM'] onboarded_vnfds = vnfm_plugin.get_vnfds(context, []) region_name = ns_info.get('placement_attr', {}).\ get('region_name', None) vim_res = self.vim_client.get_vim(context, ns['ns']['vim_id'], region_name) driver_type = vim_res['vim_type'] if not ns['ns']['vim_id']: ns['ns']['vim_id'] = vim_res['vim_id'] # TODO(phuoc): currently, create_ns function does not have # create_ns_pre function, that pre-defines information of a network # service. Creating ns_uuid keeps ns_id for consistency, it should be # provided as return value of create_ns_pre function in ns db. # Generate ns_uuid ns['ns']['ns_id'] = uuidutils.generate_uuid() # Step-1 param_values = ns['ns']['attributes'].get('param_values', {}) if 'get_input' in str(nsd_dict): self._process_parameterized_input(ns['ns']['attributes'], nsd_dict) # Step-2 vnfds = nsd['vnfds'] # vnfd_dict is used while generating workflow vnfd_dict = dict() for node_name, node_val in \ (nsd_dict['topology_template']['node_templates']).items(): if node_val.get('type') not in vnfds.keys(): continue vnfd_name = vnfds[node_val.get('type')] if not vnfd_dict.get(vnfd_name): vnfd_dict[vnfd_name] = { 'id': self._get_vnfd_id(vnfd_name, onboarded_vnfds), 'instances': [node_name] } else: vnfd_dict[vnfd_name]['instances'].append(node_name) if not node_val.get('requirements'): continue if not param_values.get(vnfd_name): param_values[vnfd_name] = {} param_values[vnfd_name]['substitution_mappings'] = dict() req_dict = dict() requirements = node_val.get('requirements') for requirement in requirements: req_name = list(requirement.keys())[0] req_val = list(requirement.values())[0] res_name = req_val + ns['ns']['nsd_id'][:11] req_dict[req_name] = res_name if req_val in nsd_dict['topology_template']['node_templates']: param_values[vnfd_name]['substitution_mappings'][ res_name] = nsd_dict['topology_template'][ 'node_templates'][req_val] param_values[vnfd_name]['substitution_mappings'][ 'requirements'] = req_dict ns['vnfd_details'] = vnfd_dict vnffgd_templates = self._get_vnffgds_from_nsd(nsd_dict) LOG.debug('vnffgd_templates: %s', vnffgd_templates) ns['vnffgd_templates'] = vnffgd_templates # Step-3 kwargs = {'ns': ns, 'params': param_values} # NOTE NoTasksException is raised if no tasks. workflow = self._vim_drivers.invoke( driver_type, 'prepare_and_create_workflow', resource='ns', action='create', auth_dict=self.get_auth_dict(context), kwargs=kwargs) try: mistral_execution = self._vim_drivers.invoke( driver_type, 'execute_workflow', workflow=workflow, auth_dict=self.get_auth_dict(context)) except Exception as ex: LOG.error('Error while executing workflow: %s', ex) self._vim_drivers.invoke(driver_type, 'delete_workflow', workflow_id=workflow['id'], auth_dict=self.get_auth_dict(context)) raise ex ns_dict = super(NfvoPlugin, self).create_ns(context, ns) def _create_ns_wait(self_obj, ns_id, execution_id): exec_state = "RUNNING" mistral_retries = MISTRAL_RETRIES while exec_state == "RUNNING" and mistral_retries > 0: time.sleep(MISTRAL_RETRY_WAIT) exec_state = self._vim_drivers.invoke( driver_type, 'get_execution', execution_id=execution_id, auth_dict=self.get_auth_dict(context)).state LOG.debug('status: %s', exec_state) if exec_state == 'SUCCESS' or exec_state == 'ERROR': break mistral_retries = mistral_retries - 1 # TODO(phuoc): add more information about error reason in case # of exec_state is 'ERROR' error_reason = None if mistral_retries == 0 and exec_state == 'RUNNING': error_reason = _( "NS creation is not completed within" " {wait} seconds as creation of mistral" " execution {mistral} is not completed").format( wait=MISTRAL_RETRIES * MISTRAL_RETRY_WAIT, mistral=execution_id) exec_obj = self._vim_drivers.invoke( driver_type, 'get_execution', execution_id=execution_id, auth_dict=self.get_auth_dict(context)) self._vim_drivers.invoke(driver_type, 'delete_execution', execution_id=execution_id, auth_dict=self.get_auth_dict(context)) self._vim_drivers.invoke(driver_type, 'delete_workflow', workflow_id=workflow['id'], auth_dict=self.get_auth_dict(context)) super(NfvoPlugin, self).create_ns_post( context, ns_id, exec_obj, vnfd_dict, vnffgd_templates, error_reason) self.spawn_n(_create_ns_wait, self, ns_dict['id'], mistral_execution.id) return ns_dict
def update_vnffg(self, context, vnffg_id, vnffg): vnffg_info = vnffg['vnffg'] # put vnffg related objects in PENDING_UPDATE status vnffg_old = super(NfvoPlugin, self)._update_vnffg_status_pre( context, vnffg_id) name = vnffg_old['name'] # create inline vnffgd if given by user if vnffg_info.get('vnffgd_template'): vnffgd_name = utils.generate_resource_name(name, 'inline') vnffgd = {'vnffgd': {'tenant_id': vnffg_old['tenant_id'], 'name': vnffgd_name, 'template': { 'vnffgd': vnffg_info['vnffgd_template']}, 'template_source': 'inline', 'description': vnffg_old['description']}} try: vnffg_info['vnffgd_id'] = \ self.create_vnffgd(context, vnffgd).get('id') except Exception: with excutils.save_and_reraise_exception(): super(NfvoPlugin, self)._update_vnffg_status_post(context, vnffg_old, error=True, db_state=constants.ACTIVE) try: vnffg_dict = super(NfvoPlugin, self). \ _update_vnffg_pre(context, vnffg, vnffg_id, vnffg_old) except (nfvo.VnfMappingNotFoundException, nfvo.VnfMappingNotValidException): with excutils.save_and_reraise_exception(): if vnffg_info.get('vnffgd_template'): super(NfvoPlugin, self).delete_vnffgd( context, vnffg_info['vnffgd_id']) super(NfvoPlugin, self)._update_vnffg_status_post( context, vnffg_old, error=True, db_state=constants.ACTIVE) except nfvo.UpdateVnffgException: with excutils.save_and_reraise_exception(): super(NfvoPlugin, self).delete_vnffgd(context, vnffg_info['vnffgd_id']) super(NfvoPlugin, self)._update_vnffg_status_post(context, vnffg_old, error=True) nfp = super(NfvoPlugin, self).get_nfp(context, vnffg_dict['forwarding_paths']) sfc = super(NfvoPlugin, self).get_sfc(context, nfp['chain_id']) classifier_dict = dict() classifier_update = [] classifier_delete_ids = [] classifier_ids = [] for classifier_id in nfp['classifier_ids']: classifier_dict = super(NfvoPlugin, self).get_classifier( context, classifier_id, fields=['id', 'name', 'match', 'instance_id', 'status']) if classifier_dict['status'] == constants.PENDING_DELETE: classifier_delete_ids.append( classifier_dict.pop('instance_id')) else: classifier_ids.append(classifier_dict.pop('id')) classifier_update.append(classifier_dict) # TODO(gongysh) support different vim for each vnf vim_obj = self._get_vim_from_vnf(context, list(vnffg_dict[ 'vnf_mapping'].values())[0]) driver_type = vim_obj['type'] try: fc_ids = [] self._vim_drivers.invoke(driver_type, 'remove_and_delete_flow_classifiers', chain_id=sfc['instance_id'], fc_ids=classifier_delete_ids, auth_attr=vim_obj['auth_cred']) for item in classifier_update: fc_ids.append(self._vim_drivers.invoke(driver_type, 'update_flow_classifier', chain_id=sfc['instance_id'], fc=item, auth_attr=vim_obj['auth_cred'])) n_sfc_chain_id = self._vim_drivers.invoke( driver_type, 'update_chain', vnfs=sfc['chain'], fc_ids=fc_ids, chain_id=sfc['instance_id'], auth_attr=vim_obj['auth_cred']) except Exception: with excutils.save_and_reraise_exception(): super(NfvoPlugin, self)._update_vnffg_status_post(context, vnffg_dict, error=True) classifiers_map = super(NfvoPlugin, self).create_classifiers_map( classifier_ids, fc_ids) super(NfvoPlugin, self)._update_vnffg_post(context, n_sfc_chain_id, classifiers_map, vnffg_dict) super(NfvoPlugin, self)._update_vnffg_status_post(context, vnffg_dict) return vnffg_dict
def create_vnffg(self, context, vnffg): vnffg_info = vnffg['vnffg'] name = vnffg_info['name'] if vnffg_info.get('vnffgd_template'): vnffgd_name = utils.generate_resource_name(name, 'inline') vnffgd = {'vnffgd': {'tenant_id': vnffg_info['tenant_id'], 'name': vnffgd_name, 'template': { 'vnffgd': vnffg_info['vnffgd_template']}, 'template_source': 'inline', 'description': vnffg_info['description']}} vnffg_info['vnffgd_id'] = \ self.create_vnffgd(context, vnffgd).get('id') vnffg_dict = super(NfvoPlugin, self)._create_vnffg_pre(context, vnffg) nfp = super(NfvoPlugin, self).get_nfp(context, vnffg_dict['forwarding_paths']) sfc = super(NfvoPlugin, self).get_sfc(context, nfp['chain_id']) name_match_list = [] for classifier_id in nfp['classifier_ids']: classifier_dict = super(NfvoPlugin, self).get_classifier( context, classifier_id, fields=['name', 'match']) name_match_list.append(classifier_dict) # grab the first VNF to check it's VIM type # we have already checked that all VNFs are in the same VIM vim_obj = self._get_vim_from_vnf( context, list(vnffg_dict['vnf_mapping'].values())[0]) # TODO(trozet): figure out what auth info we actually need to pass # to the driver. Is it a session, or is full vim obj good enough? correlation = super(NfvoPlugin, self)._get_correlation_template( context, vnffg_info) driver_type = vim_obj['type'] try: fc_ids = [] for item in name_match_list: fc_ids.append(self._vim_drivers.invoke(driver_type, 'create_flow_classifier', name=item['name'], fc=item['match'], auth_attr=vim_obj['auth_cred'])) sfc_id, path_id = self._vim_drivers.invoke(driver_type, 'create_chain', name=vnffg_dict['name'], path_id=sfc['path_id'], vnfs=sfc['chain'], fc_ids=fc_ids, symmetrical=sfc['symmetrical'], correlation=correlation, auth_attr=vim_obj['auth_cred']) except Exception: with excutils.save_and_reraise_exception(): self.delete_vnffg(context, vnffg_id=vnffg_dict['id']) classifiers_map = super(NfvoPlugin, self). \ create_classifiers_map(nfp['classifier_ids'], fc_ids) super(NfvoPlugin, self)._create_vnffg_post(context, sfc_id, path_id, classifiers_map, vnffg_dict) super(NfvoPlugin, self)._create_vnffg_status(context, vnffg_dict) return vnffg_dict