def _validate_pre_launch_executor_results(self, network_handler, device_data, image_name, image_id_result, provider_metadata_result, server_grp_id_result=None): interfaces = device_data.pop('interfaces', None) if not interfaces: LOG.exception(_LE('Failed to get interfaces for device creation.')) return None, _, _ image_id = image_id_result.get('result', None) if not image_id: LOG.error(_LE('Failed to get image id for device creation.')) self._delete_interfaces(device_data, interfaces, network_handler=network_handler) return None, _, _ if server_grp_id_result and not server_grp_id_result.get('result'): LOG.error( _LE('Validation failed for Nova anti-affinity ' 'server group.')) return None, _, _ provider_metadata = provider_metadata_result.get('result', None) if not provider_metadata: LOG.warning( _LW('Failed to get provider metadata for' ' device creation.')) provider_metadata = {} return interfaces, image_id, provider_metadata
def _validate_create_instance_executor_results(self, network_handler, device_data, interfaces, instance_id_result, port_details_result): token = device_data['token'] admin_tenant_id = device_data['admin_tenant_id'] instance_id = instance_id_result.get('result', None) if not instance_id: LOG.error( _LE('Failed to create instance with device data:' '%(data)s.'), {'data': device_data}) self._delete_interfaces(device_data, interfaces, network_handler=network_handler) return None, _ mgmt_neutron_port_info = port_details_result.get('result', None) if not mgmt_neutron_port_info: LOG.error(_LE('Failed to get management port details. ')) with nfp_ctx_mgr.NovaContextManager as ncm: ncm.retry(self.compute_handler_nova.delete_instance, token, admin_tenant_id, instance_id) self._delete_interfaces(device_data, interfaces, network_handler=network_handler) return None, _ return instance_id, mgmt_neutron_port_info
def _resource_owner_tenant_id(): user, pwd, tenant, auth_url = utils.get_keystone_creds() keystoneclient = keyclient.Client(username=user, password=pwd, auth_url=auth_url) try: tenant = keystoneclient.tenants.find(name=tenant) return tenant.id except k_exceptions.NotFound: with excutils.save_and_reraise_exception(reraise=True): LOG.error(_LE('No tenant with name %s exists.'), tenant) except k_exceptions.NoUniqueMatch: with excutils.save_and_reraise_exception(reraise=True): LOG.error(_LE('Multiple tenants matches found for %s'), tenant)
def _call_on_drivers(self, method_name, context, continue_on_failure=False): """Helper method for calling a method across all policy drivers. :param method_name: name of the method to call :param context: context parameter to pass to each method call :param continue_on_failure: whether or not to continue to call all policy drivers once one has raised an exception :raises: neutron.services.group_policy.common.GroupPolicyDriverError if any policy driver call fails. """ error = False drivers = (self.ordered_policy_drivers if not method_name.startswith('delete') else self.reverse_ordered_policy_drivers) for driver in drivers: try: getattr(driver.obj, method_name)(context) except Exception as e: if db_api.is_retriable(e): with excutils.save_and_reraise_exception(): LOG.debug( "Policy driver '%(name)s' failed in" " %(method)s, operation will be retried", { 'name': driver.name, 'method': method_name }) elif isinstance(e, gp_exc.GroupPolicyException) or isinstance( e, n_exc.NeutronException) or isinstance( e, oslo_policy.PolicyNotAuthorized): with excutils.save_and_reraise_exception(): LOG.exception( _LE("Policy driver '%(name)s' failed in" " %(method)s"), { 'name': driver.name, 'method': method_name }) else: error = True # We are eating a non-GBP/non-Neutron exception here LOG.exception( _LE("Policy driver '%(name)s' failed in %(method)s"), { 'name': driver.name, 'method': method_name }) if not continue_on_failure: break if error: raise gp_exc.GroupPolicyDriverError(method=method_name)
def load_plugin(namespace, plugin): try: # Try to resolve plugin by name mgr = driver.DriverManager(namespace, plugin) plugin_class = mgr.driver except RuntimeError as e1: # fallback to class name try: plugin_class = importutils.import_class(plugin) except ImportError as e2: LOG.exception(_LE("Error loading plugin by name, %s"), e1) LOG.exception(_LE("Error loading plugin by class, %s"), e2) raise ImportError(_("Plugin not found.")) return plugin_class()
def create_servicechain_instance(self, context, servicechain_instance): """Instance created. When a Servicechain Instance is created, all its nodes need to be instantiated. """ instance = self._process_commit_phase(context) if instance: return instance session = context.session deployers = {} with session.begin(subtransactions=True): instance = super(NodeCompositionPlugin, self).create_servicechain_instance( context, servicechain_instance) if len(instance['servicechain_specs']) > 1: raise exc.OneSpecPerInstanceAllowed() deployers = self._get_scheduled_drivers(context, instance, 'deploy') if not gutils.is_precommit_policy_driver_configured(): # Actual node deploy try: self._deploy_servicechain_nodes(context, deployers) except Exception: # Some node could not be deployed with excutils.save_and_reraise_exception(): LOG.error( _LE("Node deployment failed, " "deleting servicechain_instance %s"), instance['id']) self.delete_servicechain_instance(context, instance['id']) return instance
def _get_resource_status(self, context, resource_name, deployers=None): """ Invoke node drivers only for servicechain_instance. Node driver should implement get_status api to return status and status_details of servicechain_instance """ if resource_name == 'servicechain_instance': nodes_status = [] result = { 'status': 'BUILD', 'status_details': 'node deployment in progress' } if deployers: try: for deploy in deployers.values(): driver = deploy['driver'] nodes_status.append( driver.get_status(deploy['context'])) node_status = [node['status'] for node in nodes_status] if 'ERROR' in node_status: result['status'] = 'ERROR' result['status_details'] = 'node deployment failed' elif node_status.count('ACTIVE') == len( deployers.values()): result['status'] = 'ACTIVE' result['status_details'] = 'node deployment completed' except Exception as exc: LOG.error( _LE("Failed to get servicechain instance status " "from node driver, Error: %(exc)s"), {'exc': exc}) return return result result = {'status': 'ACTIVE', 'status_details': ''} return result
def request_endpoint_details(self, context, **kwargs): LOG.debug("APIC AIM handling get_endpoint_details for: %s", kwargs) try: request = kwargs.get('request') host = kwargs.get('host') result = { 'device': request['device'], 'timestamp': request['timestamp'], 'request_id': request['request_id'], 'gbp_details': self._get_gbp_details(context, request, host), 'neutron_details': ml2_rpc.RpcCallbacks(None, None).get_device_details( context, **request) } return result except Exception as e: LOG.error( _LE("An exception has occurred while requesting device " "gbp details for %s"), request.get('device')) LOG.exception(e) return None
def create_instance(self, nova, token, admin_tenant_id, image_id, flavor, interfaces_to_attach, instance_name, volume_support, volume_size, files=None, user_data=None, server_grp_id=None): try: instance_id = nova.create_instance(token, admin_tenant_id, image_id, flavor, interfaces_to_attach, instance_name, volume_support, volume_size, files=files, userdata=user_data, server_grp_id=server_grp_id) return instance_id except Exception as e: LOG.error(_LE('Failed to create instance.' 'Error: %(error)s'), {'error': e})
def get_network_function_map(context, network_function_id): request_data = None try: rpc_nso_client = transport.RPCClient(a_topics.NFP_NSO_TOPIC) nf_context = rpc_nso_client.cctxt.call( context, 'get_network_function_context', network_function_id=network_function_id) network_function_details = nf_context['network_function_details'] ports_info = nf_context['ports_info'] mngmt_port_info = nf_context['mngmt_port_info'] monitor_port_info = nf_context['monitor_port_info'] request_data = _prepare_structure(network_function_details, ports_info, mngmt_port_info, monitor_port_info) msg = (" %s " % (request_data)) LOG.debug(msg) return request_data except Exception as e: LOG.error( _LE("Failed to get network function map of " "network_function_id %(network_function_id)s : %(ex)s "), { 'ex': e, 'network_function_id': network_function_id }) return request_data
def get_policy_target_group_status(self, context): try: if (context.current['provided_policy_rule_sets'] and not context.current.get('proxied_group_id')): ptg_status = [] for sci in self._get_chains_by_prs( context, context.current['provided_policy_rule_sets']): servicechain_instance = self._get_servicechain_instance( context._plugin_context, sci) if (servicechain_instance['provider_ptg_id'] == context.current['id']): ptg_status.append({ 'status': servicechain_instance['status'], 'status_details': servicechain_instance['status_details'] }) # REVISIT: For now assuming there will be only # one sci associated with this ptg if ptg_status: context.current['status'] = ptg_status[0]['status'] context.current['status_details'] = ptg_status[0][ 'status_details'] except Exception: LOG.error(_LE('Failed to update ptg status'))
def _group_policy_plugin(self): # REVISIT(rkukura): Need initialization method after all # plugins are loaded to grab and store plugin. group_policy_plugin = directory.get_plugin(pconst.GROUP_POLICY) if not group_policy_plugin: LOG.error(_LE("No GroupPolicy service plugin found.")) raise exc.GroupPolicyDeploymentError() return group_policy_plugin
def _unmap(self, session, name, type_tag, prefix, enforce): pos = len(prefix) + len(type_tag) + 1 if self._map(session, "", type_tag, prefix) == name[:pos]: return name[pos:] elif enforce: LOG.error(_LE("Attempted to reverse-map invalid APIC name '%s'"), name) raise exceptions.InternalError()
def _servicechain_plugin(self): # REVISIT(rkukura): Need initialization method after all # plugins are loaded to grab and store plugin. servicechain_plugin = directory.get_plugin(pconst.SERVICECHAIN) if not servicechain_plugin: LOG.error(_LE("No Servicechain service plugin found.")) raise exc.GroupPolicyDeploymentError() return servicechain_plugin
def _l3_plugin(self): # REVISIT(rkukura): Need initialization method after all # plugins are loaded to grab and store plugin. l3_plugin = directory.get_plugin(nl_const.L3) if not l3_plugin: LOG.error(_LE("No L3 router service plugin found.")) raise exc.GroupPolicyDeploymentError() return l3_plugin
def _extend_router_dict_apic(self, router_res, router_db): LOG.debug("APIC AIM L3 Plugin extending router dict: %s", router_res) session = inspect(router_db).session try: self._md.extend_router_dict(session, router_db, router_res) self._include_router_extn_attr(session, router_res) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("APIC AIM extend_router_dict failed"))
def ensure_tenant(self, plugin_context, tenant_id): for driver in self.ordered_mech_drivers: if isinstance(driver.obj, driver_api.MechanismDriver): try: driver.obj.ensure_tenant(plugin_context, tenant_id) except Exception: LOG.exception(_LE("Mechanism driver '%s' failed in " "ensure_tenant"), driver.name) raise ml2_exc.MechanismDriverError(method="ensure_tenant")
def _qos_plugin(self): # Probably as well: # REVISIT(rkukura): Need initialization method after all # plugins are loaded to grab and store plugin. qos_plugin = directory.get_plugin(pconst.QOS) if not qos_plugin: LOG.error(_LE("No QoS service plugin found.")) raise exc.GroupPolicyDeploymentError() return qos_plugin
def _get_vrf_details(self, context, **kwargs): LOG.debug("APIC AIM handling _get_vrf_details for: %s", kwargs) try: return self._retrieve_vrf_details(context, **kwargs) except Exception as e: vrf = kwargs.get('vrf_id') LOG.error(_LE("An exception has occurred while retrieving vrf " "gbp details for %s"), vrf) LOG.exception(e) return {'l3_policy_id': vrf}
def initialize(self): self._gbp_plugin = None self._sc_plugin = None # Verify that proxy_group extension is loaded if pg_ext.PROXY_GROUP not in cfg.CONF.group_policy.extension_drivers: LOG.error( _LE("proxy_group GBP driver extension is mandatory for " "traffic stitching plumber.")) raise exc.GroupPolicyDeploymentError()
def _get_interfaces_for_device_create(self, token, admin_tenant_id, network_handler, device_data): try: mgmt_interface = self._create_management_interface( token, admin_tenant_id, device_data, network_handler) device_data['interfaces'] = [mgmt_interface] except Exception as e: LOG.exception( _LE('Failed to get interfaces for device creation.' 'Error: %(error)s'), {'error': e})
def get_gbp_details(self, context, **kwargs): LOG.debug("APIC AIM handling get_gbp_details for: %s", kwargs) try: return self._get_gbp_details(context, kwargs, kwargs.get('host')) except Exception as e: device = kwargs.get('device') LOG.error(_LE("An exception has occurred while retrieving device " "gbp details for %s"), device) LOG.exception(e) return {'device': device}
def chain_tenant_id(reraise=False): try: keystone = ChainMappingDriver.chain_tenant_keystone_client() except cfg.NoSuchOptError: return None if keystone: tenant = cfg.CONF.chain_mapping.chain_owner_tenant_name try: # Can it be retrieved directly, without a further keystone # call? tenant = keystone.tenants.find(name=tenant) return tenant.id except k_exceptions.NotFound: with excutils.save_and_reraise_exception(reraise=reraise): LOG.error(_LE('No tenant with name %s exists.'), tenant) except k_exceptions.NoUniqueMatch: with excutils.save_and_reraise_exception(reraise=reraise): LOG.error(_LE('Multiple tenants matches found for %s'), tenant)
def delete_network_function_device(self, device_data, network_handler=None): """ Delete the NFD :param device_data: NFD :type device_data: dict :returns: None -- Both on success and Failure :raises: exceptions.IncompleteData, exceptions.ComputePolicyNotSupported """ if (any(key not in device_data for key in ['service_details', 'mgmt_port_id']) or type(device_data['service_details']) is not dict or any(key not in device_data['service_details'] for key in ['service_vendor', 'device_type', 'network_mode']) or type(device_data['mgmt_port_id']) is not dict or any( key not in device_data['mgmt_port_id'] for key in ['id', 'port_classification', 'port_model'])): raise exceptions.IncompleteData() if (device_data['service_details']['device_type'] != nfp_constants.NOVA_MODE): raise exceptions.ComputePolicyNotSupported( compute_policy=device_data['service_details']['device_type']) token = self._get_token(device_data.get('token')) if not token: return None if device_data.get('id'): # delete the device instance # # this method will be invoked again # once the device instance deletion is completed with nfp_ctx_mgr.NovaContextManager.new( suppress=(Exception, )) as ncm: ncm.retry(self.compute_handler_nova.delete_instance, token, device_data['tenant_id'], device_data['id']) else: # device instance deletion is done, delete remaining resources try: interfaces = [device_data['mgmt_port_id']] self._delete_interfaces(device_data, interfaces, network_handler=network_handler) except Exception as e: LOG.error( _LE('Failed to delete the management data port(s). ' 'Error: %(error)s'), {'error': e})
def get_dhcp_agent_host(config): try: neutronclient = openstack_driver.NeutronClient(config) keystoneclient = openstack_driver.KeystoneClient(config) token = keystoneclient.get_admin_token() filters = {'agent_type': 'DHCP agent', 'alive': True} agents = neutronclient.get_agents(token, filters) if agents: return agents[0].get('host', None) except Exception as exc: LOG.error(_LE("Failed to get dhcp agent host : %(exc)s"), {'exc': exc})
def get_image_id(self, nova, token, admin_tenant_id, image_name): try: image_id = nova.get_image_id(token, admin_tenant_id, image_name) return image_id except Exception as e: LOG.error( _LE('Failed to get image id for device creation.' ' image name: %(image_name)s. Error: %(error)s'), { 'image_name': image_name, 'error': e })
def _call_on_ext_drivers(self, method_name, session, data, result): """Helper method for calling a method across all extension drivers.""" for driver in self.ordered_ext_drivers: try: getattr(driver.obj, method_name)(session, data, result) except (gp_exc.GroupPolicyException, n_exc.NeutronException): with excutils.save_and_reraise_exception(): LOG.exception( _LE("Extension driver '%(name)s' " "failed in %(method)s"), { 'name': driver.name, 'method': method_name }) except Exception: LOG.exception( _LE("Extension driver '%(name)s' " "failed in %(method)s"), { 'name': driver.name, 'method': method_name }) # We are replacing a non-GBP/non-Neutron exception here raise gp_exc.GroupPolicyDriverError(method=method_name)
def _delete_port(self, token, port_id): ''' delete neutron port ''' try: network_handler = self.network_handlers[nfp_constants.NEUTRON_MODE] network_handler.delete_port(token, port_id) except Exception as exc: LOG.error(_LE("Failed to delete port %(port_id)s. Error: %(exc)s"), { "port_id": port_id, 'exc': exc })
def _call_deploy_sc_node(self, context, instance): # Actual node deploy try: deployers = self._get_scheduled_drivers(context, instance, 'deploy') self._deploy_servicechain_nodes(context, deployers) except Exception: # Some node could not be deployed with excutils.save_and_reraise_exception(): LOG.error( _LE("Node deployment failed, " "servicechain_instance %s is in ERROR state"), instance['id'])
def _update_chains_consumer_modified(self, context, policy_target_group, instance_id, action): updaters = self._get_scheduled_drivers( context, self.get_servicechain_instance(context, instance_id), 'update') for update in updaters.values(): try: getattr(update['driver'], 'update_node_consumer_ptg_' + action)(update['context'], policy_target_group) except exc.NodeDriverError as ex: LOG.error( _LE("Node Update on policy target group modification " "failed, %s"), ex.message)