def _data_wrapper(self, context, tenant_id, nf, **kwargs): nfp_context = {} description, str_description = ( utils.get_vpn_description_from_nf(nf)) description.update({'tenant_id': tenant_id}) context_resource_data = self._get_resource_data(description, const.VPN) resource = kwargs['rsrc_type'] resource_data = kwargs['resource'] # REVISIT(dpak): We need to avoid resource description # dependency in OTC and instead use neutron context description. resource_data['description'] = str_description if resource.lower() == 'ipsec_site_connection': nfp_context = {'network_function_id': nf['id'], 'ipsec_site_connection_id': kwargs[ 'rsrc_id']} ctx_dict, rsrc_ctx_dict = self.\ _prepare_resource_context_dicts(context, tenant_id, resource, resource_data, context_resource_data) service_vm_context = utils.get_service_vm_context( description['service_vendor']) nfp_context.update({'neutron_context': ctx_dict, 'service_vm_context': service_vm_context, 'requester': 'nas_service', 'logging_context': module_context.get()['log_context']}) resource_type = 'vpn' kwargs.update({'neutron_context': rsrc_ctx_dict}) body = common.prepare_request_data(nfp_context, resource, resource_type, kwargs, description['service_vendor']) self._update_request_data(body, description) return body
def poll_event(self, event, spacing=2, max_times=sys.maxint): """To poll for an event. As a base class, it only does the polling descriptor preparation. NfpController class implements the required functionality. """ nfp_context = context.get() module = nfp_context['log_context']['namespace'] handler, ev_spacing = (self._event_handlers.get_poll_handler( event.id, module=module)) assert handler, "No poll handler found for event %s" % (event.id) assert spacing or ev_spacing, "No spacing specified for polling" if ev_spacing: spacing = ev_spacing if event.desc.type != nfp_event.POLL_EVENT: event = self._make_new_event(event) event.desc.uuid = event.desc.uuid + ":" + "POLL_EVENT" event.desc.type = nfp_event.POLL_EVENT event.desc.target = module event.desc.flag = None kwargs = {'spacing': spacing, 'max_times': max_times} poll_desc = nfp_event.PollDesc(**kwargs) setattr(event.desc, 'poll_desc', poll_desc) if not event.context: # Log nfp_context for event handling code event.context = context.purge() event.desc.path_type = event.context['event_desc'].get('path_type') event.desc.path_key = event.context['event_desc'].get('path_key') return event
def poll_event(self, event, spacing=2, max_times=sys.maxint): """To poll for an event. As a base class, it only does the polling descriptor preparation. NfpController class implements the required functionality. """ nfp_context = context.get() module = nfp_context['log_context']['namespace'] handler, ev_spacing = ( self._event_handlers.get_poll_handler(event.id, module=module)) assert handler, "No poll handler found for event %s" % (event.id) assert spacing or ev_spacing, "No spacing specified for polling" if ev_spacing: spacing = ev_spacing if event.desc.type != nfp_event.POLL_EVENT: event = self._make_new_event(event) event.desc.uuid = event.desc.uuid + ":" + "POLL_EVENT" event.desc.type = nfp_event.POLL_EVENT event.desc.target = module event.desc.flag = None kwargs = {'spacing': spacing, 'max_times': max_times} poll_desc = nfp_event.PollDesc(**kwargs) setattr(event.desc, 'poll_desc', poll_desc) if not event.context: # Log nfp_context for event handling code event.context = context.purge() event.desc.path_type = event.context['event_desc'].get('path_type') event.desc.path_key = event.context['event_desc'].get('path_key') return event
def register_events(self, event_descs, priority=0): """Register event handlers with core. """ nfp_context = context.get() module = nfp_context['log_context']['namespace'] # REVISIT (mak): change name to register_event_handlers() ? for event_desc in event_descs: self._event_handlers.register( event_desc.id, event_desc.handler, module, priority=priority)
def _data_wrapper(self, context, tenant_id, name, reason, nf, **kwargs): nfp_context = {} description = ast.literal_eval((nf['description'].split('\n'))[1]) description.update({'tenant_id': tenant_id}) context_resource_data = df.get_network_function_info( description, const.LOADBALANCERV2) # REVISIT(dpak): We need to avoid resource description # dependency in OTC and instead use neutron context description. if name.lower() == 'loadbalancer': lb_id = kwargs['loadbalancer']['id'] kwargs['loadbalancer'].update({'description': str(description)}) nfp_context = {'network_function_id': nf['id'], 'loadbalancer_id': kwargs['loadbalancer']['id']} elif name.lower() == 'listener': lb_id = kwargs['listener'].get('loadbalancer_id') kwargs['listener']['description'] = str(description) elif name.lower() == 'pool': lb_id = kwargs['pool'].get('loadbalancer_id') kwargs['pool']['description'] = str(description) elif name.lower() == 'member': pool = kwargs['member'].get('pool') if pool: lb_id = pool.get('loadbalancer_id') kwargs['member']['description'] = str(description) elif name.lower() == 'healthmonitor': pool = kwargs['healthmonitor'].get('pool') if pool: lb_id = pool.get('loadbalancer_id') kwargs['healthmonitor']['description'] = str(description) else: kwargs[name.lower()].update({'description': str(description)}) lb_id = kwargs[name.lower()].get('loadbalancer_id') args = {'tenant_id': tenant_id, 'lb_id': lb_id, 'context': context, 'description': str(description), 'context_resource_data': context_resource_data} ctx_dict, rsrc_ctx_dict = self._prepare_resource_context_dicts(**args) service_vm_context = utils.get_service_vm_context( description['service_vendor']) nfp_context.update({'neutron_context': ctx_dict, 'requester': 'nas_service', 'logging_context': module_context.get()['log_context'], 'service_vm_context': service_vm_context}) resource_type = 'loadbalancerv2' resource = name resource_data = {'neutron_context': rsrc_ctx_dict} resource_data.update(**kwargs) body = common.prepare_request_data(nfp_context, resource, resource_type, resource_data, description['service_vendor']) return body
def path_complete_event(self): """Create event for path completion """ nfp_context = context.get() event = self.new_event(id='PATH_COMPLETE') event.desc.path_type = nfp_context['event_desc'].get('path_type') event.desc.path_key = nfp_context['event_desc'].get('path_key') if self.PROCESS_TYPE == "worker": self.pipe_send(self._pipe, event) else: self._manager.process_events([event])
def nfp_modules_post_init(conf, nfp_modules, nfp_controller): nfp_context = context.get() for module in nfp_modules: try: namespace = module.__name__.split(".")[-1] nfp_context['log_context']['namespace'] = namespace module.nfp_module_post_init(nfp_controller, conf) except AttributeError: message = ("(module - %s) - does not implement" "nfp_module_post_init(), ignoring") % (identify(module)) LOG.debug(message)
def _get_nfp_msg(self, msg): nfp_context = context.get() log_context = nfp_context['log_context'] if log_context: ctxt = "[%s] [NFI:%s] [NFD:%s]" % (log_context.get( 'meta_id', '-'), log_context.get( 'nfi_id', '-'), log_context.get('nfd_id', '-')) msg = "%s %s" % (ctxt, msg) component = '' if hasattr(CONF, 'module'): component = CONF.module msg = "[%s] %s" % (component, msg) return msg
def _get_nfp_msg(self, msg): nfp_context = context.get() log_context = nfp_context['log_context'] if log_context: ctxt = "[%s] [NFI:%s] [NFD:%s]" % (log_context.get( 'meta_id', '-'), log_context.get('nfi_id', '-'), log_context.get('nfd_id', '-')) msg = "%s %s" % (ctxt, msg) component = '' if hasattr(CONF, 'module'): component = CONF.module msg = "[%s] %s" % (component, msg) return msg
def load_nfp_modules_from_path(conf, controller, path): """ Load all nfp modules from configured directory. """ pymodules = [] nfp_context = context.get() try: base_module = __import__(path, globals(), locals(), ['modules'], -1) modules_dir = base_module.__path__[0] try: files = os.listdir(modules_dir) for pyfile in set([f for f in files if f.endswith(".py")]): try: pymodule = __import__(path, globals(), locals(), [pyfile[:-3]], -1) pymodule = eval('pymodule.%s' % (pyfile[:-3])) try: namespace = pyfile[:-3].split(".")[-1] nfp_context['log_context']['namespace'] = namespace pymodule.nfp_module_init(controller, conf) pymodules += [pymodule] message = "(module - %s) - Initialized" % ( identify(pymodule)) LOG.debug(message) except AttributeError as e: exc_type, exc_value, exc_traceback = sys.exc_info() message = "Traceback: %s" % (exc_traceback) LOG.error(message) message = ("(module - %s) - does not implement" "nfp_module_init()") % (identify(pymodule)) LOG.warning(message) except ImportError: message = "Failed to import module %s" % (pyfile) LOG.error(message) except OSError: message = "Failed to read files from %s" % (modules_dir) LOG.error(message) except ImportError: message = "Failed to import module from path %s" % ( path) LOG.error(message) return pymodules
def load_nfp_modules_from_path(conf, controller, path): """ Load all nfp modules from configured directory. """ pymodules = [] nfp_context = context.get() try: base_module = __import__(path, globals(), locals(), ['modules'], -1) modules_dir = base_module.__path__[0] try: files = os.listdir(modules_dir) for pyfile in set([f for f in files if f.endswith(".py")]): try: pymodule = __import__(path, globals(), locals(), [pyfile[:-3]], -1) pymodule = eval('pymodule.%s' % (pyfile[:-3])) try: namespace = pyfile[:-3].split(".")[-1] nfp_context['log_context']['namespace'] = namespace pymodule.nfp_module_init(controller, conf) pymodules += [pymodule] message = "(module - %s) - Initialized" % ( identify(pymodule)) LOG.debug(message) except AttributeError as e: exc_type, exc_value, exc_traceback = sys.exc_info() message = "Traceback: %s" % (exc_traceback) LOG.error(message) message = ("(module - %s) - does not implement" "nfp_module_init()") % (identify(pymodule)) LOG.warn(message) except ImportError: message = "Failed to import module %s" % (pyfile) LOG.error(message) except OSError: message = "Failed to read files from %s" % (modules_dir) LOG.error(message) except ImportError: message = "Failed to import module from path %s" % ( path) LOG.error(message) return pymodules
def _data_wrapper(self, context, firewall, host, nf, reason): # Hardcoding the position for fetching data since we are owning # its positional change description = ast.literal_eval((nf['description'].split('\n'))[1]) description.update({'tenant_id': firewall['tenant_id']}) context_resource_data = self._get_resource_data( description, const.FIREWALL) fw_mac = description['provider_ptg_info'][0] # REVISIT(dpak): We need to avoid resource description # dependency in OTC and instead use neutron context description. firewall.update({'description': str(description)}) kwargs = { 'context': context, 'context_resource_data': context_resource_data, 'firewall_policy_id': firewall['firewall_policy_id'], 'description': str(description), 'tenant_id': firewall['tenant_id'] } ctx_dict, rsrc_ctx_dict = self._prepare_resource_context_dicts( **kwargs) service_vm_context = utils.get_service_vm_context( description['service_vendor']) nfp_context = { 'network_function_id': nf['id'], 'neutron_context': ctx_dict, 'fw_mac': fw_mac, 'requester': 'nas_service', 'logging_context': module_context.get()['log_context'], 'service_vm_context': service_vm_context } resource = resource_type = 'firewall' resource_data = { resource: firewall, 'host': host, 'neutron_context': rsrc_ctx_dict } body = common.prepare_request_data(nfp_context, resource, resource_type, resource_data, description['service_vendor']) self._update_request_data(body, description) return body
def test_poll_event_with_decorator_spacing(self, mock_compress, mock_pipe_send): mock_pipe_send.side_effect = self.mocked_pipe_send mock_compress.side_effect = self.mocked_compress conf = oslo_config.CONF conf.nfp_modules_path = NFP_MODULES_PATH controller = nfp_controller.NfpController(conf, singleton=False) self.controller = controller nfp_controller.load_nfp_modules(conf, controller) # Mock launching of a worker controller.launch(1) controller._update_manager() self.controller = controller wait_obj = multiprocessing.Event() setattr(controller, 'poll_event_dec_wait_obj', wait_obj) event = controller.create_event( id='POLL_EVENT_DECORATOR', data='NO_DATA') # Update descriptor desc = nfp_event.EventDesc(**{}) setattr(event, 'desc', desc) # Explicitly make it none event.desc.worker = None ctx = nfp_context.get() ctx['log_context']['namespace'] = 'nfp_module' controller.poll_event(event) # controller._manager.manager_run() start_time = time.time() # relinquish for 2secs time.sleep(2) # controller.poll() controller.poll_event_dec_wait_obj.wait(0.1) called = controller.poll_event_dec_wait_obj.is_set() end_time = time.time() self.assertTrue(called) self.assertTrue(round(end_time - start_time) == 2.0)
def _data_wrapper(self, context, tenant_id, nf, **kwargs): nfp_context = {} description, str_description = (utils.get_vpn_description_from_nf(nf)) description.update({'tenant_id': tenant_id}) context_resource_data = self._get_resource_data(description, const.VPN) resource = kwargs['rsrc_type'] resource_data = kwargs['resource'] # REVISIT(dpak): We need to avoid resource description # dependency in OTC and instead use neutron context description. resource_data['description'] = str_description if resource.lower() == 'ipsec_site_connection': nfp_context = { 'network_function_id': nf['id'], 'ipsec_site_connection_id': kwargs['rsrc_id'] } ctx_dict, rsrc_ctx_dict = self.\ _prepare_resource_context_dicts(context, tenant_id, resource, resource_data, context_resource_data) service_vm_context = utils.get_service_vm_context( description['service_vendor']) nfp_context.update({ 'neutron_context': ctx_dict, 'service_vm_context': service_vm_context, 'requester': 'nas_service', 'logging_context': module_context.get()['log_context'] }) resource_type = 'vpn' kwargs.update({'neutron_context': rsrc_ctx_dict}) body = common.prepare_request_data(nfp_context, resource, resource_type, kwargs, description['service_vendor']) self._update_request_data(body, description) return body
def _data_wrapper(self, context, firewall, host, nf, reason): # Hardcoding the position for fetching data since we are owning # its positional change description = ast.literal_eval((nf['description'].split('\n'))[1]) description.update({'tenant_id': firewall['tenant_id']}) context_resource_data = self._get_resource_data(description, const.FIREWALL) fw_mac = description['provider_ptg_info'][0] # REVISIT(dpak): We need to avoid resource description # dependency in OTC and instead use neutron context description. firewall.update({'description': str(description)}) kwargs = {'context': context, 'context_resource_data': context_resource_data, 'firewall_policy_id': firewall[ 'firewall_policy_id'], 'description': str(description), 'tenant_id': firewall['tenant_id']} ctx_dict, rsrc_ctx_dict = self._prepare_resource_context_dicts( **kwargs) service_vm_context = utils.get_service_vm_context( description['service_vendor']) nfp_context = {'network_function_id': nf['id'], 'neutron_context': ctx_dict, 'fw_mac': fw_mac, 'requester': 'nas_service', 'logging_context': module_context.get()['log_context'], 'service_vm_context': service_vm_context} resource = resource_type = 'firewall' resource_data = {resource: firewall, 'host': host, 'neutron_context': rsrc_ctx_dict} body = common.prepare_request_data(nfp_context, resource, resource_type, resource_data, description['service_vendor']) self._update_request_data(body, description) return body
def _data_wrapper(self, context, tenant_id, name, reason, nf, **kwargs): nfp_context = {} description = ast.literal_eval((nf['description'].split('\n'))[1]) description.update({'tenant_id': tenant_id}) context_resource_data = df.get_network_function_info( description, const.LOADBALANCERV2) # REVISIT(dpak): We need to avoid resource description # dependency in OTC and instead use neutron context description. if name.lower() == 'loadbalancer': lb_id = kwargs['loadbalancer']['id'] kwargs['loadbalancer'].update({'description': str(description)}) nfp_context = { 'network_function_id': nf['id'], 'loadbalancer_id': kwargs['loadbalancer']['id'] } elif name.lower() == 'listener': lb_id = kwargs['listener'].get('loadbalancer_id') kwargs['listener']['description'] = str(description) elif name.lower() == 'pool': lb_id = kwargs['pool'].get('loadbalancer_id') kwargs['pool']['description'] = str(description) elif name.lower() == 'member': pool = kwargs['member'].get('pool') if pool: lb_id = pool.get('loadbalancer_id') kwargs['member']['description'] = str(description) elif name.lower() == 'healthmonitor': pool = kwargs['healthmonitor'].get('pool') if pool: lb_id = pool.get('loadbalancer_id') kwargs['healthmonitor']['description'] = str(description) else: kwargs[name.lower()].update({'description': str(description)}) lb_id = kwargs[name.lower()].get('loadbalancer_id') args = { 'tenant_id': tenant_id, 'lb_id': lb_id, 'context': context, 'description': str(description), 'context_resource_data': context_resource_data } ctx_dict, rsrc_ctx_dict = self._prepare_resource_context_dicts(**args) service_vm_context = utils.get_service_vm_context( description['service_vendor']) nfp_context.update({ 'neutron_context': ctx_dict, 'requester': 'nas_service', 'logging_context': module_context.get()['log_context'], 'service_vm_context': service_vm_context }) resource_type = 'loadbalancerv2' resource = name resource_data = {'neutron_context': rsrc_ctx_dict} resource_data.update(**kwargs) body = common.prepare_request_data(nfp_context, resource, resource_type, resource_data, description['service_vendor']) return body
def test_events_sequencing_negative(self, mock_pipe_send): mock_pipe_send.side_effect = self.mocked_pipe_send conf = oslo_config.CONF conf.nfp_modules_path = NFP_MODULES_PATH controller = nfp_controller.NfpController(conf, singleton=False) self.controller = controller nfp_controller.load_nfp_modules(conf, controller) # Mock launching of a worker controller.launch(1) controller._update_manager() self.controller = controller wait_obj = multiprocessing.Event() setattr(controller, 'sequence_event_1_wait_obj', wait_obj) wait_obj = multiprocessing.Event() setattr(controller, 'sequence_event_2_wait_obj', wait_obj) event_1 = controller.create_event( id='SEQUENCE_EVENT_1', data='NO_DATA', serialize=True, binding_key='SEQUENCE') event_2 = controller.create_event( id='SEQUENCE_EVENT_2', data='NO_DATA', serialize=True, binding_key='SEQUENCE') controller.post_event(event_1) controller.post_event(event_2) controller._manager.manager_run() controller.sequence_event_1_wait_obj.wait(1) called = controller.sequence_event_1_wait_obj.is_set() self.assertTrue(called) controller._manager.manager_run() controller.sequence_event_2_wait_obj.wait(1) called = controller.sequence_event_2_wait_obj.is_set() # Should not be called self.assertFalse(called) controller.event_complete(event_1) controller.event_complete(event_2) @mock.patch( 'gbpservice.nfp.core.controller.NfpController.pipe_send') @mock.patch( 'gbpservice.nfp.core.controller.NfpController.compress') def test_poll_event(self, mock_compress, mock_pipe_send): mock_pipe_send.side_effect = self.mocked_pipe_send mock_compress.side_effect = self.mocked_compress conf = oslo_config.CONF conf.nfp_modules_path = NFP_MODULES_PATH controller = nfp_controller.NfpController(conf, singleton=False) self.controller = controller nfp_controller.load_nfp_modules(conf, controller) # Mock launching of a worker controller.launch(1) controller._update_manager() self.controller = controller wait_obj = multiprocessing.Event() setattr(controller, 'poll_event_wait_obj', wait_obj) event = controller.create_event( id='POLL_EVENT', data='NO_DATA') # Update descriptor desc = nfp_event.EventDesc(**{}) setattr(event, 'desc', desc) event.desc.worker = controller.get_childrens().keys()[0] ctx = nfp_context.get() ctx['log_context']['namespace'] = 'nfp_module' controller.poll_event(event, spacing=1) # controller._manager.manager_run() start_time = time.time() # relinquish for 1sec time.sleep(1) # controller.poll() controller.poll_event_wait_obj.wait(0.1) called = controller.poll_event_wait_obj.is_set() end_time = time.time() self.assertTrue(called) self.assertTrue(round(end_time - start_time) == 1.0)