def test_rebalance(self, fake_repop, fake_get_all_sms, fake_add_rsc): sm1 = mock.Mock( resource_id='sm1', send_message=mock.Mock(return_value=True), ) sm2 = mock.Mock( resource_id='sm2', resource='sm2_resource', send_message=mock.Mock(return_value=True), ) fake_get_all_sms.side_effect = [ set([sm1]), set([sm1, sm2]), ] fake_hash = mock.Mock(rebalance=mock.Mock(), ) self.w.hash_ring_mgr = fake_hash msg = event.Event( resource=self.resource, crud=event.REBALANCE, body={ 'members': ['foo', 'bar'], }, ) self.w.handle_message('*', msg) fake_hash.rebalance.assert_called_with(['foo', 'bar']) self.assertTrue(fake_repop.called) exp_event = event.Event(resource='sm2_resource', crud=event.UPDATE, body={}) sm2.send_message.assert_called_with(exp_event) sm2._add_resource_to_work_queue(sm2)
def test__should_process_no_router_id(self, fake_hash): fake_ring_manager = fake_hash.HashRingManager() fake_ring_manager.ring.get_hosts.return_value = [self.w.host] self.w.hash_ring_mgr = fake_ring_manager self.fake_cache.get_by_tenant.return_value = ( '9846d012-3c75-11e5-b476-8321b3ff1a1d') r = event.Resource( driver=router.Router.RESOURCE_NAME, id=None, tenant_id='fake_tenant_id', ) expected_r = event.Resource( driver=router.Router.RESOURCE_NAME, id='9846d012-3c75-11e5-b476-8321b3ff1a1d', tenant_id='fake_tenant_id', ) msg = event.Event( resource=r, crud=event.CREATE, body={'key': 'value'}, ) expected = event.Event( resource=expected_r, crud=event.CREATE, body={'key': 'value'}, ) self.assertEqual(expected, self.w._should_process_message(self.target, msg))
def test_existing_resource_of_many(self): sms = {} for resource_id in ['5678', 'ABCD', 'EFGH']: r = event.Resource( tenant_id=self.tenant_id, id=resource_id, driver=router.Router.RESOURCE_NAME, ) msg = event.Event( resource=r, crud=event.CREATE, body={'key': 'value'}, ) # First time creates... sm1 = self.trm.get_state_machines(msg, self.ctx)[0] sms[resource_id] = sm1 # Second time should return the same objects... r = event.Resource( id='5678', tenant_id=self.tenant_id, driver=router.Router.RESOURCE_NAME, ) msg = event.Event( resource=r, crud=event.CREATE, body={'key': 'value'}, ) sm2 = self.trm.get_state_machines(msg, self.ctx)[0] self.assertIs(sm2, sms['5678'])
def setUp(self): super(TestWildcardMessages, self).setUp() self.tenant_id_1 = 'a8f964d4-6631-11e5-a79f-525400cfc32a' self.tenant_id_2 = 'ef1a6e90-6631-11e5-83cb-525400cfc326' self.w._should_process_message = mock.MagicMock(return_value=self.msg) # Create some tenants for msg in [ event.Event( resource=event.Resource( driver=router.Router.RESOURCE_NAME, id='ABCD', tenant_id=self.tenant_id_1, ), crud=event.CREATE, body={'key': 'value'}, ), event.Event( resource=event.Resource(driver=router.Router.RESOURCE_NAME, id='EFGH', tenant_id=self.tenant_id_2), crud=event.CREATE, body={'key': 'value'}, ) ]: self.w.handle_message(msg.resource.tenant_id, msg)
def test_report_status_dispatched(self): with mock.patch.object(self.w, 'report_status') as meth: self.w.handle_message( 'debug', event.Event('*', event.COMMAND, {'command': commands.WORKERS_DEBUG})) meth.assert_called_once_with()
def _repopulate(self): """Repopulate local state machines given the new DHT After the hash ring has been rebalanced, this ensures the workers' TRMs are populated with the correct set of state machines given the current layout of the ring. We also consult the dispatcher to ensure we're creating state machines on the correct worker process. This also cleans up state machines that are no longer mapped here. """ LOG.debug('Running post-rebalance repopulate for worker %s', self.proc_name) for resource in populate.repopulate(): target_hosts = self.hash_ring_mgr.ring.get_hosts(resource.id) if self.host not in target_hosts: tid = _normalize_uuid(resource.tenant_id) if tid in self.tenant_managers: trm = self.tenant_managers[tid] trm.unmanage_resource(resource.id) continue tgt = self.scheduler.dispatcher.pick_workers(resource.tenant_id)[0] if tgt['worker'].name != self.proc_name: # Typically, state machine creation doesn't happen until the # dispatcher has scheduled a msg to a single worker. rebalances # are scheduled to all workers so we need to consult the # dispatcher here to avoid creating state machines in all # workers. continue for trm in self._get_trms(resource.tenant_id): # creates a state machine if one does not exist. e = event.Event(resource=resource, crud=None, body={}) trm.get_state_machines(e, self._context)
def _rebalance(self, message): # rebalance the ring with the new membership. self.hash_ring_mgr.rebalance(message.body.get('members')) # We leverage the rebalance event to both seed the local node's # hash ring when it comes online, and to also rebalance it in # reaction to cluster events. Exit early if we're only responding # to a bootstrapping rebalance, we don't need to worry about adjusting # state because there is none yet. if message.body.get('node_bootstrap'): # replay any messages that may have accumulated while we were # waiting to finish cluster bootstrap self._replay_deferred_messages() return # track which SMs we initially owned orig_sms = self._get_all_state_machines() # rebuild the TRMs and SMs based on new ownership self._repopulate() # TODO(adam_g): Replace the UPDATE with a POST_REBALANCE commnand # that triggers a driver method instead of generic update. # for newly owned resources, issue a post-rebalance update. for sm in (self._get_all_state_machines() - orig_sms): post_rebalance = event.Event(resource=sm.resource, crud=event.UPDATE, body={}) LOG.debug('Sending post-rebalance update for %s', sm.resource_id) if sm.send_message(post_rebalance): self._add_resource_to_work_queue(sm)
def test_handle_message_report_status(self): with mock.patch('astara.worker.cfg.CONF') as conf: conf.coordination = mock.Mock(enabled=False) self.w.handle_message( 'debug', event.Event('*', event.COMMAND, {'command': commands.WORKERS_DEBUG})) self.assertTrue(conf.log_opt_values.called)
def router_deleted(self, ctxt, router_id): tenant_id = _get_tenant_id_for_message(ctxt) resource = event.Resource('router', router_id, tenant_id) crud = event.DELETE e = event.Event(resource, crud, None) self.notification_queue.put((e.resource.tenant_id, e))
def testManage(self): self.enable_debug(tenant_id='this-tenant-id') self.w.handle_message( '*', event.Event('*', event.COMMAND, { 'command': commands.TENANT_MANAGE, 'tenant_id': 'this-tenant-id' }), ) self.assert_not_in_debug(tenant_id='this-tenant-id')
def testManageNoLock(self): self.enable_debug(resource_id='this-resource-id') self.w.handle_message( '*', event.Event( '*', event.COMMAND, { 'command': commands.RESOURCE_MANAGE, 'resource_id': 'this-resource-id' }), ) self.assert_not_in_debug(resource_id='this-resource-id')
def process_notification(tenant_id, event_type, payload): """Process an incoming notification event This gets called from the notifications layer to determine whether this driver should process an incoming notification event. It is responsible for translating an incoming notificatino to an Event object appropriate for this driver. :param tenant_id: str The UUID tenant_id for the incoming event :param event_type: str event type, for example loadbalancer.create.end :param payload: The payload body of the incoming event :returns: A populated Event objet if it should process, or None if not """ if event_type.startswith('loadbalancerstatus.update'): # these are generated when we sync state return lb_id = (payload.get('loadbalancer', {}).get('id') or payload.get('listener', {}).get('loadbalancer_id') or payload.get('loadbalancer_id')) update_notifications = [ 'listener.create.start', 'pool.create.start', 'member.create.end', 'member.delete.end', ] # some events do not contain a lb id. if not lb_id and event_type not in update_notifications: return if event_type == 'loadbalancer.create.end': crud = event.CREATE elif event_type == 'loadbalancer.delete.end': crud = event.DELETE elif event_type in update_notifications: crud = event.UPDATE else: crud = None if not crud: LOG.info('Could not determine CRUD for event: %s ', event_type) return resource = event.Resource(driver=LoadBalancer.RESOURCE_NAME, id=lb_id, tenant_id=tenant_id) e = event.Event( resource=resource, crud=crud, body=payload, ) return e
def test_process_notification_router_delete(self): payload = {'router_id': 'fake_router_id'} r = event.Resource(driver=router.Router.RESOURCE_NAME, id='fake_router_id', tenant_id='fake_tenant_id') e = event.Event( resource=r, crud=event.DELETE, body=payload, ) self._test_notification('router.delete.end', payload, e)
def test(self, mock_cfg): mock_cfg.CONF = mock.MagicMock(log_opt_values=mock.MagicMock()) mock_cfg.CONF.coordination.enabled = False tenant_id = '*' resource_id = '*' msg = event.Event(resource=resource_id, crud=event.COMMAND, body={'command': commands.CONFIG_RELOAD}) self.w.handle_message(tenant_id, msg) self.assertTrue(mock_cfg.CONF.called) self.assertTrue(mock_cfg.CONF.log_opt_values.called)
def testWithDebugs(self): self.enable_debug(tenant_id='this-tenant-id') self.w.handle_message( '*', event.Event('*', event.COMMAND, { 'command': commands.TENANT_DEBUG, 'tenant_id': 'this-tenant-id' }), ) is_debug, _ = self.dbapi.tenant_in_debug('this-tenant-id') self.assertTrue(is_debug)
def test_process_notification_interesting_notifications(self): for notification in router._ROUTER_INTERESTING_NOTIFICATIONS: payload = {'router': {'id': 'fake_router_id'}} r = event.Resource(driver=router.Router.RESOURCE_NAME, id='fake_router_id', tenant_id='fake_tenant_id') e = event.Event( resource=r, crud=event.UPDATE, body=payload, ) self._test_notification(notification, payload, e)
def test_process_notification_lb_delete(self): payload = {'loadbalancer': {'id': 'fake_lb_id'}} r = event.Resource( driver=loadbalancer.LoadBalancer.RESOURCE_NAME, id='fake_lb_id', tenant_id='fake_tenant_id') e = event.Event( resource=r, crud=event.DELETE, body=payload, ) self._test_notification('loadbalancer.delete.end', payload, e)
def test_cluster_changed(self, fake_members, fake_start): fake_members.__get__ = mock.Mock(return_value=['foo', 'bar']) self.coordinator = coordination.RugCoordinator(self.queue) expected_rebalance_event = event.Event( resource=event.Resource('*', '*', '*'), crud=event.REBALANCE, body={'members': ['foo', 'bar']}) self.coordinator.cluster_changed(event=None) expected = ('*', expected_rebalance_event) res = self.queue.get() self.assertEqual(res, expected)
def test__should_process_command_debug_config(self): for cmd in [commands.WORKERS_DEBUG, commands.CONFIG_RELOAD]: r = event.Resource( tenant_id=self.tenant_id, id=self.router_id, driver='router', ) msg = event.Event( resource=r, crud=event.COMMAND, body={'command': cmd}, ) self.assertTrue(self.w._should_process_command(msg))
def info(self, ctxt, publisher_id, event_type, payload, metadata): tenant_id = _get_tenant_id_for_message(ctxt, payload) crud = event.UPDATE e = None events = [] if event_type.startswith('astara.command'): LOG.debug('received a command: %r', payload) crud = event.COMMAND if payload.get('command') == commands.POLL: r = event.Resource(driver='*', id='*', tenant_id='*') e = event.Event(resource=r, crud=event.POLL, body={}) self.notification_queue.put(('*', e)) return else: # If the message does not specify a tenant, send it to everyone tenant_id = payload.get('tenant_id', '*') router_id = payload.get('router_id') resource = event.Resource(driver='*', id=router_id, tenant_id=tenant_id) events.append(event.Event(resource, crud, payload)) else: for driver in drivers.enabled_drivers(): driver_event = driver.process_notification( tenant_id, event_type, payload) if driver_event: events.append(driver_event) if not events: LOG.debug('Could not construct any events from %s /w payload: %s', event_type, payload) return LOG.debug('Generated %s events from %s /w payload: %s', len(events), event_type, payload) for e in events: self.notification_queue.put((e.resource.tenant_id, e))
def test_get_state_machine_no_resoruce_id(self): r = event.Resource( tenant_id=self.tenant_id, id=None, driver=router.Router.RESOURCE_NAME, ) msg = event.Event( resource=r, crud=event.CREATE, body={'key': 'value'}, ) self.assertRaises(tenant.InvalidIncomingMessage, self.trm.get_state_machines, msg, self.ctx)
def testManageUnlocked(self): self.enable_debug(resource_id='this-resource-id') lock = threading.Lock() self.w._resource_locks['this-resource-id'] = lock self.w.handle_message( '*', event.Event( '*', event.COMMAND, { 'command': commands.RESOURCE_MANAGE, 'resource_id': 'this-resource-id' }), ) self.assert_not_in_debug(resource_id='this-resource-id')
def testWithDebugs(self): self.w.handle_message( '*', event.Event( '*', event.COMMAND, { 'command': commands.RESOURCE_DEBUG, 'resource_id': 'this-resource-id', 'reason': 'foo' }), ) self.enable_debug(resource_id='this-resource-id') self.assertIn(('this-resource-id', 'foo'), self.dbapi.resources_in_debug())
def test__should_process_no_router_id_no_router_found(self): self.fake_cache.get_by_tenant.return_value = None r = event.Resource( driver=router.Router.RESOURCE_NAME, id=None, tenant_id='fake_tenant_id', ) msg = event.Event( resource=r, crud=event.CREATE, body={'key': 'value'}, ) self.assertFalse(self.w._should_process_message(self.target, msg))
def setUp(self): super(TestRebalance, self).setUp() self.fake_host = 'fake_host' self.w.host = 'fake_host' self.resource_id = '56232034-a852-11e5-854e-035a3632659f' self.tenant_id = '601128de-a852-11e5-a09d-cf6fa26e6e6b' self.resource = event.Resource('router', self.resource_id, self.tenant_id) self.msg = event.Event( resource=self.resource, crud=None, body={'key': 'value'}, )
def test_handle_message_should_process(self, fake_should_process, fake_deliver): # ensure we plumb through the return of should_process to # deliver_message, in case some processing has been done on # it new_msg = event.Event( resource=self.resource, crud=event.CREATE, body={'key': 'value'}, ) fake_should_process.return_value = new_msg self.w.handle_message(self.target, self.msg) fake_deliver.assert_called_with(self.target, new_msg) fake_should_process.assert_called_with(self.target, self.msg)
def test_rebalance_bootstrap(self, fake_repop, fake_replay): fake_hash = mock.Mock(rebalance=mock.Mock(), ) self.w.hash_ring_mgr = fake_hash msg = event.Event( resource=self.resource, crud=event.REBALANCE, body={ 'members': ['foo', 'bar'], 'node_bootstrap': True }, ) self.w.handle_message('*', msg) fake_hash.rebalance.assert_called_with(['foo', 'bar']) self.assertTrue(fake_replay.called) self.assertFalse(fake_repop.called)
def setUp(self): super(TestCreatingResource, self).setUp() self.tenant_id = '98dd9c41-d3ac-4fd6-8927-567afa0b8fc3' self.router_id = 'ac194fc5-f317-412e-8611-fb290629f624' self.hostname = 'astara' self.resource = event.Resource(router.Router.RESOURCE_NAME, self.router_id, self.tenant_id) self.msg = event.Event( resource=self.resource, crud=event.CREATE, body={'key': 'value'}, ) self.w._should_process_message = mock.MagicMock(return_value=self.msg)
def test_resource_cache_hit(self): self.resource_cache._tenant_resources = { router.Router.RESOURCE_NAME: { 'fake_tenant_id': 'fake_cached_resource_id', } } r = event.Resource( tenant_id='fake_tenant_id', id='fake_resource_id', driver=router.Router.RESOURCE_NAME, ) msg = event.Event(resource=r, crud=event.UPDATE, body={}) res = self.resource_cache.get_by_tenant( resource=r, worker_context=self.worker_context, message=msg) self.assertEqual(res, 'fake_cached_resource_id') self.assertFalse(self.w._context.neutron.get_router_for_tenant.called)
def test_resource_cache_miss(self): r = event.Resource( tenant_id='fake_tenant_id', id='fake_fetched_resource_id', driver=router.Router.RESOURCE_NAME, ) msg = event.Event( resource=r, crud=event.UPDATE, body={}, ) res = self.resource_cache.get_by_tenant( resource=r, worker_context=self.worker_context, message=msg) self.assertEqual(res, 'fake_fetched_resource_id') self.w._context.neutron.get_router_for_tenant.assert_called_with( 'fake_tenant_id')