def test_observe_state(self): watcher = k8s_watcher.K8sWatcher() watcher._renew_klient_watch() thd = 1 watcher._observe_thread_state[thd] = {'watch_stop': False} resp = mock.Mock(closed=False) list_mock = mock.Mock(return_value=resp) with patch.object(watcher.klient, 'list', new=list_mock): watcher._observe_objects(watcher.klient.watch.stream, api_v1.AciContainersObject, 1) self.assertEqual(resp, watcher._observe_thread_state[thd]['http_resp']) resp.closed = True watcher._observe_objects(watcher.klient.watch.stream, api_v1.AciContainersObject, 1) ts = watcher._observe_thread_state[thd]['http_resp'] self.assertEqual(True, ts.closed) stream_mock = mock.Mock(side_effect=Exception('FAKE ERROR')) with patch.object(watcher.klient.watch, 'stream', new=stream_mock): watcher._observe_objects(watcher.klient.watch.stream, api_v1.AciContainersObject, 1) exc = watcher._observe_thread_state[thd]['watch_exception'] self.assertEqual(Exception, type(exc))
def test_no_tree_update_on_event(self): bd = resource.BridgeDomain(tenant_name='t1', name='bd1') bd_db_obj = self.ctx.store.make_db_obj(bd) bd_db_obj.update({ 'kind': bd_db_obj.kind, 'apiVersion': bd_db_obj.api_version }) ev = {'type': 'ADDED', 'object': bd_db_obj} watcher = k8s_watcher.K8sWatcher() self.assertEqual(set(['tn-t1']), watcher._process_event(ev)) # no-change event self.assertEqual(set(), watcher._process_event(ev)) # no real change ev['type'] = 'MODIFIED' self.assertEqual(set(), watcher._process_event(ev)) # change to irrelevant attribute ev['object']['spec']['someAttr'] = 'someValue' self.assertEqual(set(), watcher._process_event(ev)) # delete ev['type'] = 'DELETED' self.assertEqual(set(['tn-t1']), watcher._process_event(ev))
def __init__(self, conf): self.run_daemon_loop = True self.host = conf.aim.aim_service_identifier aim_ctx = context.AimContext(store=api.get_store()) # This config manager is shared between multiple threads. Therefore # all DB activity through this config manager will use the same # DB session which can result in conflicts. # TODO(amitbose) Fix ConfigManager to not use cached AimContext self.conf_manager = aim_cfg.ConfigManager(aim_ctx, self.host) self.k8s_watcher = None self.single_aid = False if conf.aim.aim_store == 'k8s': self.single_aid = True self.k8s_watcher = k8s_watcher.K8sWatcher() self.k8s_watcher.run() self.multiverse = [] # Define multiverse pairs, First position is desired state self.multiverse += [ # Configuration Universe (AIM to ACI) {DESIRED: aim_universe.AimDbUniverse().initialize( self.conf_manager, self.multiverse), CURRENT: aci_universe.AciUniverse().initialize( self.conf_manager, self.multiverse)}, # Operational Universe (ACI to AIM) {DESIRED: aci_universe.AciOperationalUniverse().initialize( self.conf_manager, self.multiverse), CURRENT: aim_universe.AimDbOperationalUniverse().initialize( self.conf_manager, self.multiverse)}, # Monitored Universe (ACI to AIM) {DESIRED: aci_universe.AciMonitoredUniverse().initialize( self.conf_manager, self.multiverse), CURRENT: aim_universe.AimDbMonitoredUniverse().initialize( self.conf_manager, self.multiverse)}, ] # Operational Universes. ACI operational info will be synchronized into # AIM's self.manager = aim_manager.AimManager() self.tree_manager = tree_manager.HashTreeManager() self.agent_id = 'aid-%s' % self.host self.agent = resource.Agent(id=self.agent_id, agent_type=AGENT_TYPE, host=self.host, binary_file=AGENT_BINARY, description=AGENT_DESCRIPTION, version=AGENT_VERSION) # Register agent self.agent = self.manager.create(aim_ctx, self.agent, overwrite=True) # Report procedure should happen asynchronously self.polling_interval = self.conf_manager.get_option_and_subscribe( self._change_polling_interval, 'agent_polling_interval', group='aim') self.report_interval = self.conf_manager.get_option_and_subscribe( self._change_report_interval, 'agent_report_interval', group='aim') self.squash_time = self.conf_manager.get_option_and_subscribe( self._change_squash_time, 'agent_event_squash_time', group='aim') self._spawn_heartbeat_loop() self.events = event_handler.EventHandler().initialize( self.conf_manager) self.max_down_time = 4 * self.report_interval
def test_observe_thread_dead(self): watcher = k8s_watcher.K8sWatcher() with patch.object(watcher, '_observe_objects'): watcher._start_observers(['a']) time.sleep(1) # yield self.assertEqual(k8s_watcher.K8SObserverStopped, type(watcher._check_observers()))
def test_check_observer_dies_timeout(self): watcher = k8s_watcher.K8sWatcher(self.ctx) watcher._observe_thread_state = {mock.Mock(): {}} # doens't die self.assertIsNone(watcher._check_observers()) watcher._check_time -= 30 * 60 # dies self.assertIsNotNone(watcher._check_observers())
def setUp(self, initialize_hooks=True): super(TestAimDBBase, self).setUp() self.test_id = uuidutils.generate_uuid() aim_cfg.OPTION_SUBSCRIBER_MANAGER = None aci_universe.ws_context = None if not os.environ.get(K8S_STORE_VENV): CONF.set_override('aim_store', 'sql', 'aim') self.engine = api.get_engine() if not TestAimDBBase._TABLES_ESTABLISHED: model_base.Base.metadata.create_all(self.engine) TestAimDBBase._TABLES_ESTABLISHED = True # Uncomment the line below to log SQL statements. Additionally, to # log results of queries, change INFO to DEBUG # # logging.getLogger('sqlalchemy.engine').setLevel(logging.DEBUG) def clear_tables(): with self.engine.begin() as conn: for table in reversed( model_base.Base.metadata.sorted_tables): conn.execute(table.delete()) self.addCleanup(clear_tables) self.old_add_commit_hook = ( aim_store.SqlAlchemyStore.add_commit_hook) aim_store.SqlAlchemyStore.add_commit_hook = _add_commit_hook def restore_commit_hook(): aim_store.SqlAlchemyStore.add_commit_hook = ( self.old_add_commit_hook) self.addCleanup(restore_commit_hook) aim_store.SqlAlchemyStore._after_transaction_end_2 = ( _after_transaction_end_2) else: CONF.set_override('aim_store', 'k8s', 'aim') CONF.set_override('k8s_namespace', self.test_id, 'aim_k8s') k8s_config_path = os.environ.get(K8S_CONFIG_ENV) if k8s_config_path: CONF.set_override('k8s_config_path', k8s_config_path, 'aim_k8s') aim_store.K8sStore._post_delete = _k8s_post_delete aim_store.K8sStore._post_create = _k8s_post_create global k8s_watcher_instance k8s_watcher_instance = k8s_watcher.K8sWatcher() k8s_watcher_instance.event_handler = mock.Mock() k8s_watcher_instance._renew_klient_watch = mock.Mock() self.addCleanup(self._cleanup_objects) self.store = api.get_store(expire_on_commit=True, initialize_hooks=initialize_hooks) self.ctx = context.AimContext(store=self.store) self.cfg_manager = aim_cfg.ConfigManager(self.ctx, '') self.tt_mgr = tree_manager.HashTreeManager() resource.ResourceBase.__eq__ = resource_equal self.cfg_manager.replace_all(CONF) self.sys_id = self.cfg_manager.get_option('aim_system_id', 'aim')
def test_pod_event_filter(self): watcher = k8s_watcher.K8sWatcher() watcher._renew_klient_watch() thd = 1 watcher._observe_thread_state[thd] = {'watch_stop': False} self.assertTrue(watcher.q.empty()) ev = { 'type': 'ADDED', 'object': { 'kind': 'Pod', 'spec': { 'hostNetwork': True } } } ev_exp = copy.copy(ev) ev_exp['type'] = 'DELETED' stream_mock = mock.Mock() with patch.object(watcher.klient.watch, 'stream', new=stream_mock): stream_mock.return_value = [copy.copy(ev)] watcher._observe_objects(watcher.klient.watch.stream, api_v1.Pod, 1, None) self.assertEqual(ev_exp, watcher.q.get_nowait()) ev['object']['spec']['hostNetwork'] = False stream_mock.return_value = [copy.copy(ev)] watcher._observe_objects(watcher.klient.watch.stream, api_v1.Pod, 1, None) self.assertEqual(ev, watcher.q.get_nowait()) ev['object']['spec'].pop('hostNetwork', None) stream_mock.return_value = [copy.copy(ev)] watcher._observe_objects(watcher.klient.watch.stream, api_v1.Pod, 1, None) self.assertEqual(ev, watcher.q.get_nowait()) ev['type'] = 'MODIFIED' ev['object']['spec']['hostNetwork'] = True stream_mock.return_value = [copy.copy(ev)] watcher._observe_objects(watcher.klient.watch.stream, api_v1.Pod, 1, None) self.assertEqual(ev_exp, watcher.q.get_nowait())
def test_connection_monitor(self): watcher = k8s_watcher.K8sWatcher() resp = mock.Mock(closed=False) thd = 1 watcher._observe_thread_state[thd] = { 'http_resp': resp, 'thread': mock.Mock(dead=False) } self.assertIsNone(watcher._check_observers()) resp.closed = True self.assertEqual(k8s_watcher.K8SObserverStopped, type(watcher._check_observers())) resp.closed = False watcher._observe_thread_state[thd]['watch_exception'] = Exception() self.assertEqual(Exception, type(watcher._check_observers()))
def test_endpoints_event_filter(self): watcher = k8s_watcher.K8sWatcher() watcher._renew_klient_watch() thd = 1 watcher._observe_thread_state[thd] = {'watch_stop': False} self.assertTrue(watcher.q.empty()) ev = { 'type': 'MODIFIED', 'object': { 'kind': 'Endpoints', 'metadata': {} } } stream_mock = mock.Mock(return_value=[ev]) with patch.object(watcher.klient.watch, 'stream', new=stream_mock): for n in ['kube-controller-manager', 'kube-scheduler']: ev['object']['metadata']['name'] = n watcher._observe_objects(watcher.klient.watch.stream, api_v1.Endpoints, 1) self.assertTrue(watcher.q.empty())
def run(ctx): event_handler.EventHandler().initialize(None) w = k8s_watcher.K8sWatcher() w.run() while True: time.sleep(5)
def test_process_pod_status_event(self): watcher = k8s_watcher.K8sWatcher() store = self.ctx.store ns = resource.VmmInjectedNamespace(domain_type='Kubernetes', domain_name='kubernetes', controller_name='kube-cluster', name='ns-%s' % self.test_id) pod = resource.VmmInjectedContGroup(domain_type=ns.domain_type, domain_name=ns.domain_name, controller_name=ns.controller_name, namespace_name=ns.name, name='pod1') pod_ht_key = watcher.tt_builder.tt_maker._build_hash_tree_key(pod) self.mgr.create(self.ctx, ns) pod_db_obj = store.make_db_obj(pod) store.add(pod_db_obj) pod_db_obj = store.query(api_v1.Pod, resource.VmmInjectedContGroup, namespace_name=ns.name, name=pod.name)[0] pod.name = 'hidden-pod1' hidden_pod_ht_key = ( watcher.tt_builder.tt_maker._build_hash_tree_key(pod)) hidden_pod_db_obj = store.make_db_obj(pod) hidden_pod_db_obj['spec']['hostNetwork'] = True store.add(hidden_pod_db_obj) hidden_pod_db_obj = store.query(api_v1.Pod, resource.VmmInjectedContGroup, namespace_name=ns.name, name=pod.name)[0] # test pod that is not hidden stat = status.AciStatus(resource_type='VmmInjectedContGroup', resource_id=pod_db_obj.aim_id, resource_root=pod.root) for t in ['ADDED', 'MODIFIED', 'DELETED']: ev = {'event_type': t, 'resource': stat} exp_ev = copy.copy(ev) watcher._process_pod_status_event(ev) self.assertEqual(exp_ev, ev) # seed the hash-tree with the non-hidden pod ev = {'type': 'ADDED', 'object': pod_db_obj} watcher._process_event(ev) cfg_tree = watcher.trees['config'][pod.root] self.assertIsNotNone(cfg_tree.find(pod_ht_key)) # test pod that is hidden stat.resource_id = hidden_pod_db_obj.aim_id for t in ['ADDED', 'MODIFIED', 'DELETED']: ev = {'event_type': t, 'resource': stat} exp_ev = copy.copy(ev) exp_ev['event_type'] = 'DELETED' watcher._process_pod_status_event(ev) self.assertEqual(exp_ev, ev) ev2 = {'type': t, 'object': store.make_db_obj(stat)} watcher._process_event(ev2) self.assertIsNone(cfg_tree.find(hidden_pod_ht_key))
def test_endpoints_event(self): watcher = k8s_watcher.K8sWatcher() store = self.ctx.store ns = resource.VmmInjectedNamespace(domain_type='Kubernetes', domain_name='kubernetes', controller_name='kube-cluster', name='ns-%s' % self.test_id) svc = resource.VmmInjectedService(domain_type=ns.domain_type, domain_name=ns.domain_name, controller_name=ns.controller_name, namespace_name=ns.name, name='svc1', service_ports=[{ 'port': '23', 'protocol': 'tcp', 'target_port': '45' }], endpoints=[{ 'ip': '1.2.3.4', 'pod_name': 'foo' }, { 'ip': '2.1.3.4', 'pod_name': 'bar' }]) svc_db_obj = store.make_db_obj(svc) ep_db_obj = svc_db_obj.endpoints ep_db_obj['subsets'][0]['ports'] = [{'port': 80}] ev_obj = {'kind': ep_db_obj.kind, 'apiVersion': ep_db_obj.api_version} ev_obj.update(ep_db_obj) ev = {'type': 'ADDED', 'object': ev_obj} # event with no Service object self.assertIsNone(watcher._parse_event(ev)) def _verify_event_processing(exp_svc): res = watcher._parse_event(ev) self.assertEqual('modified', res['event_type']) self.assertEqual(resource.VmmInjectedService, type(res['resource'])) for attr in ['name', 'namespace_name', 'endpoints']: self.assertEqual(getattr(exp_svc, attr), getattr(res['resource'], attr)) aff_ten = watcher._process_event(ev) self.assertEqual(set(['vmmp-Kubernetes']), aff_ten) cfg_tree = watcher.trees['config']['vmmp-Kubernetes'] ht_key = ( watcher.tt_builder.tt_maker._build_hash_tree_key(exp_svc)) ht_children = [ x.key for x in cfg_tree.find(ht_key).get_children() if 'vmmInjectedSvcEp|' in x.key[-1] ] self.assertEqual(len(exp_svc.endpoints), len(ht_children)) for e in exp_svc.endpoints: child_key = ht_key + ('vmmInjectedSvcEp|%s' % e['pod_name'], ) self.assertTrue(child_key in ht_children, child_key) # create Service and Endpoints, send event self.mgr.create(self.ctx, ns) self.mgr.create(self.ctx, svc) store.klient.create(type(ep_db_obj), ep_db_obj['metadata']['namespace'], ep_db_obj) _verify_event_processing(svc) # update Endpoints, send event ep_db_obj['subsets'][0]['addresses'] = ( ep_db_obj['subsets'][0]['addresses'][:-1]) store.klient.replace(type(ep_db_obj), ep_db_obj['metadata']['name'], ep_db_obj['metadata']['namespace'], ep_db_obj) svc.endpoints = svc.endpoints[:-1] ev['type'] = 'MODIFIED' _verify_event_processing(svc) # delete Endpoints, send event store.klient.delete(type(ep_db_obj), ep_db_obj['metadata']['name'], ep_db_obj['metadata']['namespace'], {}) ev['type'] = 'DELETED' svc.endpoints = [] _verify_event_processing(svc)