def setUp(self): super(TestEndpointManager, self).setUp() self.config = load_config("felix_default.cfg", env_dict={"FELIX_FELIXHOSTNAME": "hostname"}) self.m_updater = Mock(spec=IptablesUpdater) self.m_dispatch = Mock(spec=DispatchChains) self.m_rules_mgr = Mock(spec=RulesManager) self.m_status_reporter = Mock(spec=EtcdStatusReporter) self.mgr = EndpointManager(self.config, "IPv4", self.m_updater, self.m_dispatch, self.m_rules_mgr, self.m_status_reporter) self.mgr.get_and_incref = Mock() self.mgr.decref = Mock()
def setUp(self): super(TestEndpointManager, self).setUp() self.m_config = Mock(autospec=config.Config) self.m_ipt_upds = { 4: Mock(autospec=IptablesUpdater), 6: Mock(autospec=IptablesUpdater), } self.m_disp_chns = Mock(autospec=DispatchChains) self.m_rules_mgr = Mock(autospec=RulesManager) self.ep_mgr = EndpointManager(self.m_config, self.m_ipt_upds, self.m_disp_chns, self.m_rules_mgr)
def setUp(self): super(TestEndpointManager, self).setUp() self.config = load_config("felix_default.cfg", env_dict={ "FELIX_FELIXHOSTNAME": "hostname"}) self.m_updater = Mock(spec=IptablesUpdater) self.m_dispatch = Mock(spec=DispatchChains) self.m_rules_mgr = Mock(spec=RulesManager) self.m_status_reporter = Mock(spec=EtcdStatusReporter) self.mgr = EndpointManager(self.config, "IPv4", self.m_updater, self.m_dispatch, self.m_rules_mgr, self.m_status_reporter) self.mgr.get_and_incref = Mock() self.mgr.decref = Mock()
def setUp(self): super(TestEndpointManager, self).setUp() self.m_config = Mock(spec=config.Config) self.m_config.HOSTNAME = "hostname" self.m_updater = Mock(spec=IptablesUpdater) self.m_dispatch = Mock(spec=DispatchChains) self.m_rules_mgr = Mock(spec=RulesManager) self.m_status_reporter = Mock(spec=EtcdStatusReporter) self.mgr = EndpointManager(self.m_config, "IPv4", self.m_updater, self.m_dispatch, self.m_rules_mgr, self.m_status_reporter) self.mgr.get_and_incref = Mock() self.mgr.decref = Mock()
def _main_greenlet(config): """ The root of our tree of greenlets. Responsible for restarting its children if desired. """ try: _log.info("Connecting to etcd to get our configuration.") hosts_ipset_v4 = IpsetActor(HOSTS_IPSET_V4) etcd_api = EtcdAPI(config, hosts_ipset_v4) etcd_api.start() # Ask the EtcdAPI to fill in the global config object before we # proceed. We don't yet support config updates. config_loaded = etcd_api.load_config(async=False) config_loaded.wait() _log.info("Main greenlet: Configuration loaded, starting remaining " "actors...") v4_filter_updater = IptablesUpdater("filter", ip_version=4) v4_nat_updater = IptablesUpdater("nat", ip_version=4) v4_ipset_mgr = IpsetManager(IPV4) v4_masq_manager = MasqueradeManager(IPV4, v4_nat_updater) v4_rules_manager = RulesManager(4, v4_filter_updater, v4_ipset_mgr) v4_dispatch_chains = DispatchChains(config, 4, v4_filter_updater) v4_ep_manager = EndpointManager(config, IPV4, v4_filter_updater, v4_dispatch_chains, v4_rules_manager) v6_filter_updater = IptablesUpdater("filter", ip_version=6) v6_ipset_mgr = IpsetManager(IPV6) v6_rules_manager = RulesManager(6, v6_filter_updater, v6_ipset_mgr) v6_dispatch_chains = DispatchChains(config, 6, v6_filter_updater) v6_ep_manager = EndpointManager(config, IPV6, v6_filter_updater, v6_dispatch_chains, v6_rules_manager) update_splitter = UpdateSplitter(config, [v4_ipset_mgr, v6_ipset_mgr], [v4_rules_manager, v6_rules_manager], [v4_ep_manager, v6_ep_manager], [v4_filter_updater, v6_filter_updater], v4_masq_manager) iface_watcher = InterfaceWatcher(update_splitter) _log.info("Starting actors.") hosts_ipset_v4.start() update_splitter.start() v4_filter_updater.start() v4_nat_updater.start() v4_ipset_mgr.start() v4_masq_manager.start() v4_rules_manager.start() v4_dispatch_chains.start() v4_ep_manager.start() v6_filter_updater.start() v6_ipset_mgr.start() v6_rules_manager.start() v6_dispatch_chains.start() v6_ep_manager.start() iface_watcher.start() top_level_actors = [ hosts_ipset_v4, update_splitter, v4_nat_updater, v4_filter_updater, v4_nat_updater, v4_ipset_mgr, v4_masq_manager, v4_rules_manager, v4_dispatch_chains, v4_ep_manager, v6_filter_updater, v6_ipset_mgr, v6_rules_manager, v6_dispatch_chains, v6_ep_manager, iface_watcher, etcd_api, ] monitored_items = [actor.greenlet for actor in top_level_actors] # Install the global rules before we start polling for updates. _log.info("Installing global rules.") install_global_rules(config, v4_filter_updater, v6_filter_updater, v4_nat_updater) # Start polling for updates. These kicks make the actors poll # indefinitely. _log.info("Starting polling for interface and etcd updates.") f = iface_watcher.watch_interfaces(async=True) monitored_items.append(f) etcd_api.start_watch(update_splitter, async=True) # Register a SIG_USR handler to trigger a diags dump. def dump_top_level_actors(log): for a in top_level_actors: # The output will include queue length and the like. log.info("%s", a) futils.register_diags("Top-level actors", dump_top_level_actors) try: gevent.signal(signal.SIGUSR1, functools.partial(futils.dump_diags)) except AttributeError: # It doesn't matter too much if we fail to do this. _log.warning("Unable to install diag dump handler") pass # Wait for something to fail. _log.info("All top-level actors started, waiting on failures...") stopped_greenlets_iter = gevent.iwait(monitored_items) stopped_greenlet = next(stopped_greenlets_iter) try: stopped_greenlet.get() except Exception: _log.exception("Greenlet failed: %s", stopped_greenlet) raise else: _log.error("Greenlet %s unexpectedly returned.", stopped_greenlet) raise AssertionError("Greenlet unexpectedly returned") except: _log.exception("Exception killing main greenlet") raise
def _main_greenlet(config): """ The root of our tree of greenlets. Responsible for restarting its children if desired. """ try: _log.info("Connecting to etcd to get our configuration.") etcd_watcher = EtcdWatcher(config) etcd_watcher.start() # Ask the EtcdWatcher to fill in the global config object before we # proceed. We don't yet support config updates. etcd_watcher.load_config(async=False) _log.info("Main greenlet: Configuration loaded, starting remaining " "actors...") v4_filter_updater = IptablesUpdater("filter", ip_version=4) v4_nat_updater = IptablesUpdater("nat", ip_version=4) v4_ipset_mgr = IpsetManager(IPV4) v4_rules_manager = RulesManager(4, v4_filter_updater, v4_ipset_mgr) v4_dispatch_chains = DispatchChains(config, 4, v4_filter_updater) v4_ep_manager = EndpointManager(config, IPV4, v4_filter_updater, v4_dispatch_chains, v4_rules_manager) v6_filter_updater = IptablesUpdater("filter", ip_version=6) v6_ipset_mgr = IpsetManager(IPV6) v6_rules_manager = RulesManager(6, v6_filter_updater, v6_ipset_mgr) v6_dispatch_chains = DispatchChains(config, 6, v6_filter_updater) v6_ep_manager = EndpointManager(config, IPV6, v6_filter_updater, v6_dispatch_chains, v6_rules_manager) update_splitter = UpdateSplitter( config, [v4_ipset_mgr, v6_ipset_mgr], [v4_rules_manager, v6_rules_manager], [v4_ep_manager, v6_ep_manager], [v4_filter_updater, v6_filter_updater]) iface_watcher = InterfaceWatcher(update_splitter) _log.info("Starting actors.") update_splitter.start() v4_filter_updater.start() v4_nat_updater.start() v4_ipset_mgr.start() v4_rules_manager.start() v4_dispatch_chains.start() v4_ep_manager.start() v6_filter_updater.start() v6_ipset_mgr.start() v6_rules_manager.start() v6_dispatch_chains.start() v6_ep_manager.start() iface_watcher.start() monitored_items = [ update_splitter.greenlet, v4_nat_updater.greenlet, v4_filter_updater.greenlet, v4_nat_updater.greenlet, v4_ipset_mgr.greenlet, v4_rules_manager.greenlet, v4_dispatch_chains.greenlet, v4_ep_manager.greenlet, v6_filter_updater.greenlet, v6_ipset_mgr.greenlet, v6_rules_manager.greenlet, v6_dispatch_chains.greenlet, v6_ep_manager.greenlet, iface_watcher.greenlet, etcd_watcher.greenlet ] # Install the global rules before we start polling for updates. _log.info("Installing global rules.") install_global_rules(config, v4_filter_updater, v6_filter_updater, v4_nat_updater) # Start polling for updates. These kicks make the actors poll # indefinitely. _log.info("Starting polling for interface and etcd updates.") f = iface_watcher.watch_interfaces(async=True) monitored_items.append(f) f = etcd_watcher.watch_etcd(update_splitter, async=True) monitored_items.append(f) # Wait for something to fail. _log.info("All top-level actors started, waiting on failures...") stopped_greenlets_iter = gevent.iwait(monitored_items) stopped_greenlet = next(stopped_greenlets_iter) try: stopped_greenlet.get() except Exception: _log.exception("Greenlet failed: %s", stopped_greenlet) raise else: _log.error("Greenlet %s unexpectedly returned.", stopped_greenlet) raise AssertionError("Greenlet unexpectedly returned") except: _log.exception("Exception killing main greenlet") raise
def _main_greenlet(config): """ The root of our tree of greenlets. Responsible for restarting its children if desired. """ try: _log.info("Connecting to etcd to get our configuration.") etcd_watcher = EtcdWatcher(config) etcd_watcher.start() # Ask the EtcdWatcher to fill in the global config object before we # proceed. We don't yet support config updates. etcd_watcher.load_config(async=False) _log.info("Main greenlet: Configuration loaded, starting remaining " "actors...") v4_filter_updater = IptablesUpdater("filter", ip_version=4) v4_nat_updater = IptablesUpdater("nat", ip_version=4) v4_ipset_mgr = IpsetManager(IPV4) v4_rules_manager = RulesManager(4, v4_filter_updater, v4_ipset_mgr) v4_dispatch_chains = DispatchChains(config, 4, v4_filter_updater) v4_ep_manager = EndpointManager(config, IPV4, v4_filter_updater, v4_dispatch_chains, v4_rules_manager) v6_filter_updater = IptablesUpdater("filter", ip_version=6) v6_ipset_mgr = IpsetManager(IPV6) v6_rules_manager = RulesManager(6, v6_filter_updater, v6_ipset_mgr) v6_dispatch_chains = DispatchChains(config, 6, v6_filter_updater) v6_ep_manager = EndpointManager(config, IPV6, v6_filter_updater, v6_dispatch_chains, v6_rules_manager) update_splitter = UpdateSplitter(config, [v4_ipset_mgr, v6_ipset_mgr], [v4_rules_manager, v6_rules_manager], [v4_ep_manager, v6_ep_manager], [v4_filter_updater, v6_filter_updater]) iface_watcher = InterfaceWatcher(update_splitter) _log.info("Starting actors.") update_splitter.start() v4_filter_updater.start() v4_nat_updater.start() v4_ipset_mgr.start() v4_rules_manager.start() v4_dispatch_chains.start() v4_ep_manager.start() v6_filter_updater.start() v6_ipset_mgr.start() v6_rules_manager.start() v6_dispatch_chains.start() v6_ep_manager.start() iface_watcher.start() monitored_items = [ update_splitter.greenlet, v4_nat_updater.greenlet, v4_filter_updater.greenlet, v4_nat_updater.greenlet, v4_ipset_mgr.greenlet, v4_rules_manager.greenlet, v4_dispatch_chains.greenlet, v4_ep_manager.greenlet, v6_filter_updater.greenlet, v6_ipset_mgr.greenlet, v6_rules_manager.greenlet, v6_dispatch_chains.greenlet, v6_ep_manager.greenlet, iface_watcher.greenlet, etcd_watcher.greenlet ] # Install the global rules before we start polling for updates. _log.info("Installing global rules.") install_global_rules(config, v4_filter_updater, v6_filter_updater, v4_nat_updater) # Start polling for updates. These kicks make the actors poll # indefinitely. _log.info("Starting polling for interface and etcd updates.") f = iface_watcher.watch_interfaces(async=True) monitored_items.append(f) f = etcd_watcher.watch_etcd(update_splitter, async=True) monitored_items.append(f) # Wait for something to fail. _log.info("All top-level actors started, waiting on failures...") stopped_greenlets_iter = gevent.iwait(monitored_items) stopped_greenlet = next(stopped_greenlets_iter) try: stopped_greenlet.get() except Exception: _log.exception("Greenlet failed: %s", stopped_greenlet) raise else: _log.error("Greenlet %s unexpectedly returned.", stopped_greenlet) raise AssertionError("Greenlet unexpectedly returned") except: _log.exception("Exception killing main greenlet") raise
def _main_greenlet(config): """ The root of our tree of greenlets. Responsible for restarting its children if desired. """ try: _log.info("Connecting to etcd to get our configuration.") hosts_ipset_v4 = IpsetActor(HOSTS_IPSET_V4) etcd_api = EtcdAPI(config, hosts_ipset_v4) etcd_api.start() # Ask the EtcdAPI to fill in the global config object before we # proceed. We don't yet support config updates. config_loaded = etcd_api.load_config(async=False) config_loaded.wait() # Ensure the Kernel's global options are correctly configured for # Calico. devices.configure_global_kernel_config() _log.info("Main greenlet: Configuration loaded, starting remaining " "actors...") v4_filter_updater = IptablesUpdater("filter", ip_version=4, config=config) v4_nat_updater = IptablesUpdater("nat", ip_version=4, config=config) v4_ipset_mgr = IpsetManager(IPV4) v4_masq_manager = MasqueradeManager(IPV4, v4_nat_updater) v4_rules_manager = RulesManager(4, v4_filter_updater, v4_ipset_mgr) v4_dispatch_chains = DispatchChains(config, 4, v4_filter_updater) v4_ep_manager = EndpointManager(config, IPV4, v4_filter_updater, v4_dispatch_chains, v4_rules_manager, etcd_api.status_reporter) v6_raw_updater = IptablesUpdater("raw", ip_version=6, config=config) v6_filter_updater = IptablesUpdater("filter", ip_version=6, config=config) v6_ipset_mgr = IpsetManager(IPV6) v6_rules_manager = RulesManager(6, v6_filter_updater, v6_ipset_mgr) v6_dispatch_chains = DispatchChains(config, 6, v6_filter_updater) v6_ep_manager = EndpointManager(config, IPV6, v6_filter_updater, v6_dispatch_chains, v6_rules_manager, etcd_api.status_reporter) update_splitter = UpdateSplitter(config, [v4_ipset_mgr, v6_ipset_mgr], [v4_rules_manager, v6_rules_manager], [v4_ep_manager, v6_ep_manager], [v4_filter_updater, v6_filter_updater, v6_raw_updater, v4_nat_updater], v4_masq_manager) iface_watcher = InterfaceWatcher(update_splitter) _log.info("Starting actors.") hosts_ipset_v4.start() update_splitter.start() v4_filter_updater.start() v4_nat_updater.start() v4_ipset_mgr.start() v4_masq_manager.start() v4_rules_manager.start() v4_dispatch_chains.start() v4_ep_manager.start() v6_raw_updater.start() v6_filter_updater.start() v6_ipset_mgr.start() v6_rules_manager.start() v6_dispatch_chains.start() v6_ep_manager.start() iface_watcher.start() top_level_actors = [ hosts_ipset_v4, update_splitter, v4_nat_updater, v4_filter_updater, v4_nat_updater, v4_ipset_mgr, v4_masq_manager, v4_rules_manager, v4_dispatch_chains, v4_ep_manager, v6_raw_updater, v6_filter_updater, v6_ipset_mgr, v6_rules_manager, v6_dispatch_chains, v6_ep_manager, iface_watcher, etcd_api, ] monitored_items = [actor.greenlet for actor in top_level_actors] # Install the global rules before we start polling for updates. _log.info("Installing global rules.") install_global_rules(config, v4_filter_updater, v6_filter_updater, v4_nat_updater, v6_raw_updater) # Start polling for updates. These kicks make the actors poll # indefinitely. _log.info("Starting polling for interface and etcd updates.") f = iface_watcher.watch_interfaces(async=True) monitored_items.append(f) etcd_api.start_watch(update_splitter, async=True) # Register a SIG_USR handler to trigger a diags dump. def dump_top_level_actors(log): for a in top_level_actors: # The output will include queue length and the like. log.info("%s", a) futils.register_diags("Top-level actors", dump_top_level_actors) futils.register_process_statistics() try: gevent.signal(signal.SIGUSR1, functools.partial(futils.dump_diags)) except AttributeError: # It doesn't matter too much if we fail to do this. _log.warning("Unable to install diag dump handler") pass # Wait for something to fail. _log.info("All top-level actors started, waiting on failures...") stopped_greenlets_iter = gevent.iwait(monitored_items) stopped_greenlet = next(stopped_greenlets_iter) try: stopped_greenlet.get() except Exception: _log.exception("Greenlet failed: %s", stopped_greenlet) raise else: _log.error("Greenlet %s unexpectedly returned.", stopped_greenlet) raise AssertionError("Greenlet unexpectedly returned") except: _log.exception("Exception killing main greenlet") raise
def _main_greenlet(config): """ The root of our tree of greenlets. Responsible for restarting its children if desired. """ try: _log.info("Connecting to etcd to get our configuration.") hosts_ipset_v4 = IpsetActor(HOSTS_IPSET_V4) etcd_api = EtcdAPI(config, hosts_ipset_v4) etcd_api.start() # Ask the EtcdAPI to fill in the global config object before we # proceed. We don't yet support config updates. config_loaded = etcd_api.load_config(async=False) config_loaded.wait() # Ensure the Kernel's global options are correctly configured for # Calico. devices.configure_global_kernel_config() _log.info("Main greenlet: Configuration loaded, starting remaining " "actors...") monitored_items = [] if config.PROM_METRICS_ENABLED: httpd = HTTPServer(("0.0.0.0", config.PROM_METRICS_PORT), MetricsHandler) stats_server = gevent.Greenlet(httpd.serve_forever) stats_server.start() monitored_items.append(stats_server) v4_filter_updater = IptablesUpdater("filter", ip_version=4, config=config) v4_nat_updater = IptablesUpdater("nat", ip_version=4, config=config) v4_ipset_mgr = IpsetManager(IPV4, config) v4_masq_manager = MasqueradeManager(IPV4, v4_nat_updater) v4_rules_manager = RulesManager(config, 4, v4_filter_updater, v4_ipset_mgr) v4_dispatch_chains = DispatchChains(config, 4, v4_filter_updater) v4_fip_manager = FloatingIPManager(config, 4, v4_nat_updater) v4_ep_manager = EndpointManager(config, IPV4, v4_filter_updater, v4_dispatch_chains, v4_rules_manager, v4_fip_manager, etcd_api.status_reporter) cleanup_updaters = [v4_filter_updater, v4_nat_updater] cleanup_ip_mgrs = [v4_ipset_mgr] update_splitter_args = [v4_ipset_mgr, v4_rules_manager, v4_ep_manager, v4_masq_manager, v4_nat_updater] v6_enabled = os.path.exists("/proc/sys/net/ipv6") if v6_enabled: v6_raw_updater = IptablesUpdater("raw", ip_version=6, config=config) v6_filter_updater = IptablesUpdater("filter", ip_version=6, config=config) v6_nat_updater = IptablesUpdater("nat", ip_version=6, config=config) v6_ipset_mgr = IpsetManager(IPV6, config) v6_rules_manager = RulesManager(config, 6, v6_filter_updater, v6_ipset_mgr) v6_dispatch_chains = DispatchChains(config, 6, v6_filter_updater) v6_fip_manager = FloatingIPManager(config, 6, v6_nat_updater) v6_ep_manager = EndpointManager(config, IPV6, v6_filter_updater, v6_dispatch_chains, v6_rules_manager, v6_fip_manager, etcd_api.status_reporter) cleanup_updaters.append(v6_filter_updater) cleanup_ip_mgrs.append(v6_ipset_mgr) update_splitter_args += [v6_ipset_mgr, v6_rules_manager, v6_ep_manager, v6_raw_updater, v6_nat_updater] cleanup_mgr = CleanupManager(config, cleanup_updaters, cleanup_ip_mgrs) update_splitter_args.append(cleanup_mgr) update_splitter = UpdateSplitter(update_splitter_args) iface_watcher = InterfaceWatcher(update_splitter) _log.info("Starting actors.") hosts_ipset_v4.start() cleanup_mgr.start() v4_filter_updater.start() v4_nat_updater.start() v4_ipset_mgr.start() v4_masq_manager.start() v4_rules_manager.start() v4_dispatch_chains.start() v4_ep_manager.start() v4_fip_manager.start() if v6_enabled: v6_raw_updater.start() v6_filter_updater.start() v6_ipset_mgr.start() v6_nat_updater.start() v6_rules_manager.start() v6_dispatch_chains.start() v6_ep_manager.start() v6_fip_manager.start() iface_watcher.start() top_level_actors = [ hosts_ipset_v4, cleanup_mgr, v4_filter_updater, v4_nat_updater, v4_ipset_mgr, v4_masq_manager, v4_rules_manager, v4_dispatch_chains, v4_ep_manager, v4_fip_manager, iface_watcher, etcd_api, ] if v6_enabled: top_level_actors += [ v6_raw_updater, v6_filter_updater, v6_nat_updater, v6_ipset_mgr, v6_rules_manager, v6_dispatch_chains, v6_ep_manager, v6_fip_manager, ] monitored_items += [actor.greenlet for actor in top_level_actors] # Try to ensure that the nf_conntrack_netlink kernel module is present. # This works around an issue[1] where the first call to the "conntrack" # command fails while waiting for the module to load. # [1] https://github.com/projectcalico/calico/issues/986 load_nf_conntrack() # Install the global rules before we start polling for updates. _log.info("Installing global rules.") install_global_rules(config, v4_filter_updater, v4_nat_updater, ip_version=4) if v6_enabled: install_global_rules(config, v6_filter_updater, v6_nat_updater, ip_version=6, raw_updater=v6_raw_updater) # Start polling for updates. These kicks make the actors poll # indefinitely. _log.info("Starting polling for interface and etcd updates.") f = iface_watcher.watch_interfaces(async=True) monitored_items.append(f) etcd_api.start_watch(update_splitter, async=True) # Register a SIG_USR handler to trigger a diags dump. def dump_top_level_actors(log): for a in top_level_actors: # The output will include queue length and the like. log.info("%s", a) futils.register_diags("Top-level actors", dump_top_level_actors) futils.register_process_statistics() try: gevent.signal(signal.SIGUSR1, functools.partial(futils.dump_diags)) except AttributeError: # It doesn't matter too much if we fail to do this. _log.warning("Unable to install diag dump handler") pass # Wait for something to fail. _log.info("All top-level actors started, waiting on failures...") stopped_greenlets_iter = gevent.iwait(monitored_items) stopped_greenlet = next(stopped_greenlets_iter) try: stopped_greenlet.get() except Exception: _log.exception("Greenlet failed: %s", stopped_greenlet) raise else: _log.error("Greenlet %s unexpectedly returned.", stopped_greenlet) raise AssertionError("Greenlet unexpectedly returned") except: _log.exception("Exception killing main greenlet") raise
def _main_greenlet(config): """ The root of our tree of greenlets. Responsible for restarting its children if desired. """ try: _log.info("Connecting to etcd to get our configuration.") hosts_ipset_v4 = IpsetActor(HOSTS_IPSET_V4) etcd_api = EtcdAPI(config, hosts_ipset_v4) etcd_api.start() # Ask the EtcdAPI to fill in the global config object before we # proceed. We don't yet support config updates. config_loaded = etcd_api.load_config(async=False) config_loaded.wait() # Ensure the Kernel's global options are correctly configured for # Calico. devices.configure_global_kernel_config() _log.info("Main greenlet: Configuration loaded, starting remaining " "actors...") monitored_items = [] if config.PROM_METRICS_ENABLED: httpd = HTTPServer(("0.0.0.0", config.PROM_METRICS_PORT), MetricsHandler) stats_server = gevent.Greenlet(httpd.serve_forever) stats_server.start() monitored_items.append(stats_server) v4_filter_updater = IptablesUpdater("filter", ip_version=4, config=config) v4_nat_updater = IptablesUpdater("nat", ip_version=4, config=config) v4_ipset_mgr = IpsetManager(IPV4, config) v4_masq_manager = MasqueradeManager(IPV4, v4_nat_updater) v4_rules_manager = RulesManager(config, 4, v4_filter_updater, v4_ipset_mgr) v4_dispatch_chains = DispatchChains(config, 4, v4_filter_updater) v4_fip_manager = FloatingIPManager(config, 4, v4_nat_updater) v4_ep_manager = EndpointManager( config, IPV4, v4_filter_updater, v4_dispatch_chains, v4_rules_manager, v4_fip_manager, etcd_api.status_reporter, ) cleanup_updaters = [v4_filter_updater, v4_nat_updater] cleanup_ip_mgrs = [v4_ipset_mgr] update_splitter_args = [v4_ipset_mgr, v4_rules_manager, v4_ep_manager, v4_masq_manager, v4_nat_updater] v6_enabled = os.path.exists("/proc/sys/net/ipv6") if v6_enabled: v6_raw_updater = IptablesUpdater("raw", ip_version=6, config=config) v6_filter_updater = IptablesUpdater("filter", ip_version=6, config=config) v6_nat_updater = IptablesUpdater("nat", ip_version=6, config=config) v6_ipset_mgr = IpsetManager(IPV6, config) v6_rules_manager = RulesManager(config, 6, v6_filter_updater, v6_ipset_mgr) v6_dispatch_chains = DispatchChains(config, 6, v6_filter_updater) v6_fip_manager = FloatingIPManager(config, 6, v6_nat_updater) v6_ep_manager = EndpointManager( config, IPV6, v6_filter_updater, v6_dispatch_chains, v6_rules_manager, v6_fip_manager, etcd_api.status_reporter, ) cleanup_updaters.append(v6_filter_updater) cleanup_ip_mgrs.append(v6_ipset_mgr) update_splitter_args += [v6_ipset_mgr, v6_rules_manager, v6_ep_manager, v6_raw_updater, v6_nat_updater] cleanup_mgr = CleanupManager(config, cleanup_updaters, cleanup_ip_mgrs) update_splitter_args.append(cleanup_mgr) update_splitter = UpdateSplitter(update_splitter_args) iface_watcher = InterfaceWatcher(update_splitter) _log.info("Starting actors.") hosts_ipset_v4.start() cleanup_mgr.start() v4_filter_updater.start() v4_nat_updater.start() v4_ipset_mgr.start() v4_masq_manager.start() v4_rules_manager.start() v4_dispatch_chains.start() v4_ep_manager.start() v4_fip_manager.start() if v6_enabled: v6_raw_updater.start() v6_filter_updater.start() v6_ipset_mgr.start() v6_nat_updater.start() v6_rules_manager.start() v6_dispatch_chains.start() v6_ep_manager.start() v6_fip_manager.start() iface_watcher.start() top_level_actors = [ hosts_ipset_v4, cleanup_mgr, v4_filter_updater, v4_nat_updater, v4_ipset_mgr, v4_masq_manager, v4_rules_manager, v4_dispatch_chains, v4_ep_manager, v4_fip_manager, iface_watcher, etcd_api, ] if v6_enabled: top_level_actors += [ v6_raw_updater, v6_filter_updater, v6_nat_updater, v6_ipset_mgr, v6_rules_manager, v6_dispatch_chains, v6_ep_manager, v6_fip_manager, ] monitored_items += [actor.greenlet for actor in top_level_actors] # Try to ensure that the nf_conntrack_netlink kernel module is present. # This works around an issue[1] where the first call to the "conntrack" # command fails while waiting for the module to load. # [1] https://github.com/projectcalico/calico/issues/986 load_nf_conntrack() # Install the global rules before we start polling for updates. _log.info("Installing global rules.") install_global_rules(config, v4_filter_updater, v4_nat_updater, ip_version=4) if v6_enabled: install_global_rules(config, v6_filter_updater, v6_nat_updater, ip_version=6, raw_updater=v6_raw_updater) # Start polling for updates. These kicks make the actors poll # indefinitely. _log.info("Starting polling for interface and etcd updates.") f = iface_watcher.watch_interfaces(async=True) monitored_items.append(f) etcd_api.start_watch(update_splitter, async=True) # Register a SIG_USR handler to trigger a diags dump. def dump_top_level_actors(log): for a in top_level_actors: # The output will include queue length and the like. log.info("%s", a) futils.register_diags("Top-level actors", dump_top_level_actors) futils.register_process_statistics() try: gevent.signal(signal.SIGUSR1, functools.partial(futils.dump_diags)) except AttributeError: # It doesn't matter too much if we fail to do this. _log.warning("Unable to install diag dump handler") pass # Wait for something to fail. _log.info("All top-level actors started, waiting on failures...") stopped_greenlets_iter = gevent.iwait(monitored_items) stopped_greenlet = next(stopped_greenlets_iter) try: stopped_greenlet.get() except Exception: _log.exception("Greenlet failed: %s", stopped_greenlet) raise else: _log.error("Greenlet %s unexpectedly returned.", stopped_greenlet) raise AssertionError("Greenlet unexpectedly returned") except: _log.exception("Exception killing main greenlet") raise
class TestEndpointManager(BaseTestCase): def setUp(self): super(TestEndpointManager, self).setUp() self.config = load_config("felix_default.cfg", env_dict={ "FELIX_FELIXHOSTNAME": "hostname"}) self.m_updater = Mock(spec=IptablesUpdater) self.m_dispatch = Mock(spec=DispatchChains) self.m_rules_mgr = Mock(spec=RulesManager) self.m_status_reporter = Mock(spec=EtcdStatusReporter) self.mgr = EndpointManager(self.config, "IPv4", self.m_updater, self.m_dispatch, self.m_rules_mgr, self.m_status_reporter) self.mgr.get_and_incref = Mock() self.mgr.decref = Mock() def test_create(self): obj = self.mgr._create(ENDPOINT_ID) self.assertTrue(isinstance(obj, LocalEndpoint)) def test_on_started(self): ep = {"name": "tap1234"} self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True) self.step_actor(self.mgr) m_endpoint = Mock(spec=LocalEndpoint) self.mgr._on_object_started(ENDPOINT_ID, m_endpoint) self.assertEqual( m_endpoint.on_endpoint_update.mock_calls, [mock.call(ep, async=True)] ) def test_on_datamodel_in_sync(self): ep = {"name": "tap1234"} self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True) self.step_actor(self.mgr) self.mgr.on_datamodel_in_sync(async=True) self.step_actor(self.mgr) self.assertEqual( self.m_dispatch.apply_snapshot.mock_calls, [mock.call(frozenset(["tap1234"]), async=True)] ) # Second call should have no effect. self.m_dispatch.apply_snapshot.reset_mock() self.mgr.on_datamodel_in_sync(async=True) self.step_actor(self.mgr) self.assertEqual(self.m_dispatch.apply_snapshot.mock_calls, []) def test_endpoint_update_not_our_host(self): ep = {"name": "tap1234"} with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol: self.mgr.on_endpoint_update(EndpointId("notus", "b", "c", "d"), ep, async=True) self.step_actor(self.mgr) self.assertFalse(m_sol.called) def test_endpoint_live_obj(self): ep = {"name": "tap1234"} # First send in an update to trigger creation. self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True) self.step_actor(self.mgr) self.assertEqual(self.mgr.get_and_incref.mock_calls, [mock.call(ENDPOINT_ID)]) m_endpoint = Mock(spec=LocalEndpoint) self.mgr.objects_by_id[ENDPOINT_ID] = m_endpoint # Then send a second update to check that it gets passed on to the # LocalEndpoint. with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol: m_sol.return_value = True self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True) self.step_actor(self.mgr) self.assertEqual(m_sol.mock_calls, [mock.call(ENDPOINT_ID)]) self.assertEqual(m_endpoint.on_endpoint_update.mock_calls, [mock.call(ep, force_reprogram=False, async=True)]) self.assertTrue(ENDPOINT_ID in self.mgr.local_endpoint_ids) # Finally, send in a deletion. m_endpoint.on_endpoint_update.reset_mock() with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol: m_sol.return_value = True self.mgr.on_endpoint_update(ENDPOINT_ID, None, async=True) self.step_actor(self.mgr) self.assertEqual(m_endpoint.on_endpoint_update.mock_calls, [mock.call(None, force_reprogram=False, async=True)]) self.assertEqual(self.mgr.decref.mock_calls, [mock.call(ENDPOINT_ID)]) self.assertFalse(ENDPOINT_ID in self.mgr.local_endpoint_ids) def test_on_interface_update_unknown(self): with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol: self.mgr.on_interface_update("foo", True, async=True) self.step_actor(self.mgr) self.assertFalse(m_sol.called) def test_on_interface_update_known(self): ep = {"name": "tap1234"} m_endpoint = Mock(spec=LocalEndpoint) self.mgr.objects_by_id[ENDPOINT_ID] = m_endpoint with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol: m_sol.return_value = True self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True) self.mgr.on_interface_update("tap1234", True, async=True) self.step_actor(self.mgr) self.assertEqual( m_endpoint.on_interface_update.mock_calls, [mock.call(True, async=True)] ) def test_on_interface_update_known_but_not_live(self): ep = {"name": "tap1234"} m_endpoint = Mock(spec=LocalEndpoint) self.mgr.objects_by_id[ENDPOINT_ID] = m_endpoint with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol: m_sol.return_value = False self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True) self.mgr.on_interface_update("tap1234", True, async=True) self.step_actor(self.mgr) self.assertEqual(m_endpoint.on_interface_update.mock_calls, [])
def _main_greenlet(): """ The root of our tree of greenlets. Responsible for restarting its children if desired. """ try: _log.info("Connecting to etcd to get our configuration.") hosts_ipset_v4 = IpsetActor(HOSTS_IPSET_V4) monitored_items = [] # The parent process sends us communication pipes as FD 3 and 4. Open # those as files. Wrap the resulting files in a FileObject to make # them cooperate with gevent. pipe_from_parent = FileObject(os.fdopen(3, 'rb', -1), 'rb') pipe_to_parent = FileObject(os.fdopen(4, 'wb', -1), 'wb') config = Config() datastore = DatastoreAPI(config, pipe_from_parent, pipe_to_parent, hosts_ipset_v4) datastore.start() monitored_items.append(datastore.greenlet) # Ask the DatastoreAPI to fill in the global config object before we # proceed. We don't yet support config updates. config_loaded = datastore.load_config(async=False) config_loaded.wait() # Ensure the Kernel's global options are correctly configured for # Calico. devices.configure_global_kernel_config(config) # Check the commands we require are present. futils.check_command_deps() _log.info("Main greenlet: Configuration loaded, starting remaining " "actors...") if config.PROM_METRICS_ENABLED: httpd = HTTPServer(("0.0.0.0", config.PROM_METRICS_DRIVER_PORT), MetricsHandler) stats_server = gevent.Greenlet(httpd.serve_forever) stats_server.start() monitored_items.append(stats_server) v4_filter_updater = IptablesUpdater("filter", ip_version=4, config=config) v4_nat_updater = IptablesUpdater("nat", ip_version=4, config=config) v4_ipset_mgr = IpsetManager(IPV4, config) v4_masq_manager = MasqueradeManager(IPV4, v4_nat_updater) v4_rules_manager = RulesManager(config, 4, v4_filter_updater, v4_ipset_mgr) v4_ep_dispatch_chains = WorkloadDispatchChains(config, 4, v4_filter_updater) v4_if_dispatch_chains = HostEndpointDispatchChains( config, 4, v4_filter_updater) v4_fip_manager = FloatingIPManager(config, 4, v4_nat_updater) v4_ep_manager = EndpointManager(config, IPV4, v4_filter_updater, v4_ep_dispatch_chains, v4_if_dispatch_chains, v4_rules_manager, v4_fip_manager, datastore.write_api) cleanup_updaters = [v4_filter_updater, v4_nat_updater] cleanup_ip_mgrs = [v4_ipset_mgr] managers = [ v4_ipset_mgr, v4_rules_manager, v4_ep_manager, v4_masq_manager, v4_nat_updater ] actors_to_start = [ hosts_ipset_v4, v4_filter_updater, v4_nat_updater, v4_ipset_mgr, v4_masq_manager, v4_rules_manager, v4_ep_dispatch_chains, v4_if_dispatch_chains, v4_ep_manager, v4_fip_manager, ] # Determine if ipv6 is enabled using the config option. if config.IPV6_SUPPORT == "true": v6_enabled = True ipv6_reason = None elif config.IPV6_SUPPORT == "auto": v6_enabled, ipv6_reason = futils.detect_ipv6_supported() else: v6_enabled = False ipv6_reason = "Ipv6Support is 'false'" if v6_enabled: v6_raw_updater = IptablesUpdater("raw", ip_version=6, config=config) v6_filter_updater = IptablesUpdater("filter", ip_version=6, config=config) v6_nat_updater = IptablesUpdater("nat", ip_version=6, config=config) v6_ipset_mgr = IpsetManager(IPV6, config) v6_rules_manager = RulesManager(config, 6, v6_filter_updater, v6_ipset_mgr) v6_ep_dispatch_chains = WorkloadDispatchChains( config, 6, v6_filter_updater) v6_if_dispatch_chains = HostEndpointDispatchChains( config, 6, v6_filter_updater) v6_fip_manager = FloatingIPManager(config, 6, v6_nat_updater) v6_ep_manager = EndpointManager(config, IPV6, v6_filter_updater, v6_ep_dispatch_chains, v6_if_dispatch_chains, v6_rules_manager, v6_fip_manager, datastore.write_api) cleanup_updaters.append(v6_filter_updater) cleanup_ip_mgrs.append(v6_ipset_mgr) managers += [ v6_ipset_mgr, v6_rules_manager, v6_ep_manager, v6_raw_updater, v6_nat_updater ] actors_to_start += [ v6_raw_updater, v6_filter_updater, v6_nat_updater, v6_ipset_mgr, v6_rules_manager, v6_ep_dispatch_chains, v6_if_dispatch_chains, v6_ep_manager, v6_fip_manager, ] else: # Keep the linter happy. _log.warn("IPv6 support disabled: %s.", ipv6_reason) v6_filter_updater = None v6_nat_updater = None v6_raw_updater = None v6_if_dispatch_chains = None cleanup_mgr = CleanupManager(config, cleanup_updaters, cleanup_ip_mgrs) managers.append(cleanup_mgr) update_splitter = UpdateSplitter(managers) iface_watcher = InterfaceWatcher(update_splitter) actors_to_start += [ cleanup_mgr, iface_watcher, ] _log.info("Starting actors.") for actor in actors_to_start: actor.start() monitored_items += [actor.greenlet for actor in actors_to_start] # Try to ensure that the nf_conntrack_netlink kernel module is present. # This works around an issue[1] where the first call to the "conntrack" # command fails while waiting for the module to load. # [1] https://github.com/projectcalico/felix/issues/986 load_nf_conntrack() # Install the global rules before we start polling for updates. _log.info("Installing global rules.") # Dispatch chain needs to make its configuration before we insert the # top-level chains. v4_if_dispatch_chains.configure_iptables(async=False) install_global_rules(config, v4_filter_updater, v4_nat_updater, ip_version=4) if v6_enabled: # Dispatch chain needs to make its configuration before we insert # the top-level chains. v6_if_dispatch_chains.configure_iptables(async=False) install_global_rules(config, v6_filter_updater, v6_nat_updater, ip_version=6, raw_updater=v6_raw_updater) # Start polling for updates. These kicks make the actors poll # indefinitely. _log.info("Starting polling for interface and etcd updates.") f = iface_watcher.watch_interfaces(async=True) monitored_items.append(f) datastore.start_watch(update_splitter, async=True) # Register a SIG_USR handler to trigger a diags dump. def dump_top_level_actors(log): for a in actors_to_start: # The output will include queue length and the like. log.info("%s", a) futils.register_diags("Top-level actors", dump_top_level_actors) futils.register_process_statistics() try: gevent.signal(signal.SIGUSR1, functools.partial(futils.dump_diags)) except AttributeError: # It doesn't matter too much if we fail to do this. _log.warning("Unable to install diag dump handler") pass gevent.signal(signal.SIGTERM, functools.partial(shut_down, datastore)) gevent.signal(signal.SIGINT, functools.partial(shut_down, datastore)) # Wait for something to fail. _log.info("All top-level actors started, waiting on failures...") stopped_greenlets_iter = gevent.iwait(monitored_items) stopped_greenlet = next(stopped_greenlets_iter) try: stopped_greenlet.get() except Exception: _log.exception("Greenlet failed: %s", stopped_greenlet) raise else: _log.error("Greenlet %s unexpectedly returned.", stopped_greenlet) raise AssertionError("Greenlet unexpectedly returned") except: _log.exception("Exception killing main greenlet") raise
class TestEndpointManager(BaseTestCase): def setUp(self): super(TestEndpointManager, self).setUp() self.config = load_config("felix_default.cfg", env_dict={"FELIX_FELIXHOSTNAME": "hostname"}) self.m_updater = Mock(spec=IptablesUpdater) self.m_dispatch = Mock(spec=DispatchChains) self.m_rules_mgr = Mock(spec=RulesManager) self.m_status_reporter = Mock(spec=EtcdStatusReporter) self.mgr = EndpointManager(self.config, "IPv4", self.m_updater, self.m_dispatch, self.m_rules_mgr, self.m_status_reporter) self.mgr.get_and_incref = Mock() self.mgr.decref = Mock() def test_create(self): obj = self.mgr._create(ENDPOINT_ID) self.assertTrue(isinstance(obj, LocalEndpoint)) def test_on_started(self): ep = {"name": "tap1234"} self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True) self.step_actor(self.mgr) m_endpoint = Mock(spec=LocalEndpoint) self.mgr._on_object_started(ENDPOINT_ID, m_endpoint) self.assertEqual(m_endpoint.on_endpoint_update.mock_calls, [mock.call(ep, async=True)]) def test_on_datamodel_in_sync(self): ep = {"name": "tap1234"} self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True) self.step_actor(self.mgr) self.mgr.on_datamodel_in_sync(async=True) self.step_actor(self.mgr) self.assertEqual(self.m_dispatch.apply_snapshot.mock_calls, [mock.call(frozenset(["tap1234"]), async=True)]) # Second call should have no effect. self.m_dispatch.apply_snapshot.reset_mock() self.mgr.on_datamodel_in_sync(async=True) self.step_actor(self.mgr) self.assertEqual(self.m_dispatch.apply_snapshot.mock_calls, []) def test_endpoint_update_not_our_host(self): ep = {"name": "tap1234"} with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol: self.mgr.on_endpoint_update(EndpointId("notus", "b", "c", "d"), ep, async=True) self.step_actor(self.mgr) self.assertFalse(m_sol.called) def test_endpoint_live_obj(self): ep = {"name": "tap1234"} # First send in an update to trigger creation. self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True) self.step_actor(self.mgr) self.assertEqual(self.mgr.get_and_incref.mock_calls, [mock.call(ENDPOINT_ID)]) m_endpoint = Mock(spec=LocalEndpoint) self.mgr.objects_by_id[ENDPOINT_ID] = m_endpoint # Then send a second update to check that it gets passed on to the # LocalEndpoint. with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol: m_sol.return_value = True self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True) self.step_actor(self.mgr) self.assertEqual(m_sol.mock_calls, [mock.call(ENDPOINT_ID)]) self.assertEqual(m_endpoint.on_endpoint_update.mock_calls, [mock.call(ep, force_reprogram=False, async=True)])
class TestEndpointManager(BaseTestCase): def setUp(self): super(TestEndpointManager, self).setUp() self.config = load_config("felix_default.cfg", env_dict={ "FELIX_FELIXHOSTNAME": "hostname"}) self.m_updater = Mock(spec=IptablesUpdater) self.m_dispatch = Mock(spec=DispatchChains) self.m_rules_mgr = Mock(spec=RulesManager) self.m_fip_manager = Mock(spec=FloatingIPManager) self.m_status_reporter = Mock(spec=EtcdStatusReporter) self.mgr = EndpointManager(self.config, "IPv4", self.m_updater, self.m_dispatch, self.m_rules_mgr, self.m_fip_manager, self.m_status_reporter) self.mgr.get_and_incref = Mock() self.mgr.decref = Mock() def test_create(self): obj = self.mgr._create(ENDPOINT_ID) self.assertTrue(isinstance(obj, LocalEndpoint)) def test_on_started(self): ep = {"name": "tap1234"} self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True) self.step_actor(self.mgr) m_endpoint = Mock(spec=LocalEndpoint) self.mgr.objects_by_id[ENDPOINT_ID] = m_endpoint self.mgr._on_object_started(ENDPOINT_ID, m_endpoint) self.assertEqual( m_endpoint.on_endpoint_update.mock_calls, [mock.call(ep, async=True)] ) def test_on_datamodel_in_sync(self): ep = {"name": "tap1234"} self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True) self.step_actor(self.mgr) self.mgr.on_datamodel_in_sync(async=True) self.step_actor(self.mgr) self.assertEqual( self.m_dispatch.apply_snapshot.mock_calls, [mock.call(frozenset(["tap1234"]), async=True)] ) # Second call should have no effect. self.m_dispatch.apply_snapshot.reset_mock() self.mgr.on_datamodel_in_sync(async=True) self.step_actor(self.mgr) self.assertEqual(self.m_dispatch.apply_snapshot.mock_calls, []) def test_tiered_policy_ordering_and_updates(self): """ Check that the tier_sequence ordering is updated correctly as we add and remove tiers and policies. """ # Make sure we have an endpoint so that we can check that it gets # put in the dirty set. self.mgr.on_datamodel_in_sync(async=True) self.mgr.on_endpoint_update(ENDPOINT_ID, {"name": "tap12345"}, async=True) self.step_actor(self.mgr) # Pretend that the endpoint is alive so that we'll send updates to id. m_endpoint = Mock(spec=LocalEndpoint) self.mgr.objects_by_id[ENDPOINT_ID] = m_endpoint self.mgr._is_starting_or_live = Mock(return_value=True) # Add a profile into the tier so it'll apply to the endpoint. pol_id_a = TieredPolicyId("a", "a1") self.mgr.on_policy_selector_update(pol_id_a, parse_selector("all()"), 10, async=True) pol_id_b = TieredPolicyId("b", "b1") self.mgr.on_policy_selector_update(pol_id_b, parse_selector("all()"), 10, async=True) pol_id_c1 = TieredPolicyId("c1", "c1") self.mgr.on_policy_selector_update(pol_id_c1, parse_selector("all()"), 10, async=True) pol_id_c2 = TieredPolicyId("c2", "c2") self.mgr.on_policy_selector_update(pol_id_c2, parse_selector("all()"), 10, async=True) pol_id_c3 = TieredPolicyId("c3", "c3") self.mgr.on_policy_selector_update(pol_id_c3, parse_selector("all()"), 10, async=True) self.step_actor(self.mgr) # Since we haven't set the tier ID yet, the policy won't get applied... self.assertEqual(m_endpoint.on_tiered_policy_update.mock_calls, [mock.call(OrderedDict(), async=True)] * 5) m_endpoint.on_tiered_policy_update.reset_mock() # Adding a tier should trigger an update, adding the tier and policy. self.mgr.on_tier_data_update("a", {"order": 1}, async=True) self.step_actor(self.mgr) self.assertEqual(self.mgr.endpoints_with_dirty_policy, set()) tiers = OrderedDict() tiers["a"] = [pol_id_a] self.assertEqual(m_endpoint.on_tiered_policy_update.mock_calls, [mock.call(tiers, async=True)]) m_endpoint.on_tiered_policy_update.reset_mock() # Idempotent update should get squashed. self.mgr.on_tier_data_update("a", {"order": 2}, async=True) self.mgr.on_tier_data_update("a", {"order": 2}, async=True) self.step_actor(self.mgr) self.assertEqual(m_endpoint.on_tiered_policy_update.mock_calls, []) # Adding another tier should trigger an update. self.mgr.on_tier_data_update("b", {"order": 3}, async=True) self.step_actor(self.mgr) tiers = OrderedDict() tiers["a"] = [pol_id_a] tiers["b"] = [pol_id_b] self.assertEqual(m_endpoint.on_tiered_policy_update.mock_calls, [mock.call(tiers, async=True)]) m_endpoint.on_tiered_policy_update.reset_mock() # Swapping the order should trigger an update. self.mgr.on_tier_data_update("b", {"order": 1}, async=True) self.step_actor(self.mgr) tiers = OrderedDict() tiers["b"] = [pol_id_b] tiers["a"] = [pol_id_a] self.assertEqual(m_endpoint.on_tiered_policy_update.mock_calls, [mock.call(tiers, async=True)]) m_endpoint.on_tiered_policy_update.reset_mock() # Check deletion and that it's idempotent. self.mgr.on_tier_data_update("b", None, async=True) self.step_actor(self.mgr) self.mgr.on_policy_selector_update(pol_id_b, None, None, async=True) self.mgr.on_policy_selector_update(pol_id_b, None, None, async=True) self.step_actor(self.mgr) self.mgr.on_tier_data_update("b", None, async=True) self.step_actor(self.mgr) self.mgr.on_policy_selector_update(pol_id_b, None, None, async=True) self.mgr.on_policy_selector_update(pol_id_b, None, None, async=True) self.step_actor(self.mgr) tiers = OrderedDict() tiers["a"] = [pol_id_a] self.assertEqual( m_endpoint.on_tiered_policy_update.mock_calls, [mock.call(tiers, async=True)] * 2 # One for policy, one for tier. ) m_endpoint.on_tiered_policy_update.reset_mock() # Check lexicographic tie-breaker. self.mgr.on_tier_data_update("c1", {"order": 0}, async=True) self.mgr.on_tier_data_update("c2", {"order": 0}, async=True) self.mgr.on_tier_data_update("c3", {"order": 0}, async=True) self.step_actor(self.mgr) tiers = OrderedDict() # All 'c's should sort before 'a' due to explicit ordering but 'c's # should sort in lexicographic order. tiers["c1"] = [pol_id_c1] tiers["c2"] = [pol_id_c2] tiers["c3"] = [pol_id_c3] tiers["a"] = [pol_id_a] actual_call = m_endpoint.on_tiered_policy_update.mock_calls[-1] expected_call = mock.call(tiers, async=True) self.assertEqual(actual_call, expected_call, msg="\nExpected: %s\n Got: %s" % (expected_call, actual_call)) m_endpoint.on_tiered_policy_update.reset_mock() def test_label_inheritance(self): # Make sure we have an endpoint so that we can check that it gets # put in the dirty set. These have no labels at all so we test # that no labels gets translated to an empty dict. self.mgr.on_endpoint_update(ENDPOINT_ID, {"name": "tap12345", "profile_ids": ["prof1"]}, async=True) self.mgr.on_endpoint_update(ENDPOINT_ID_2, {"name": "tap23456", "profile_ids": ["prof2"]}, async=True) # And we need a selector to pick out one of the endpoints by the labels # attached to its parent. self.mgr.on_policy_selector_update(TieredPolicyId("a", "b"), parse_selector('a == "b"'), 10, async=True) self.step_actor(self.mgr) with mock.patch.object(self.mgr, "_update_dirty_policy") as m_update: self.mgr.on_prof_labels_set("prof1", {"a": "b"}, async=True) self.step_actor(self.mgr) # Only the first endpoint should end up matching the selector. self.assertEqual(self.mgr.endpoints_with_dirty_policy, set([ENDPOINT_ID])) # And an update should be triggered. self.assertEqual(m_update.mock_calls, [mock.call()]) def test_endpoint_update_not_our_host(self): ep = {"name": "tap1234"} with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol: self.mgr.on_endpoint_update(EndpointId("notus", "b", "c", "d"), ep, async=True) self.step_actor(self.mgr) self.assertFalse(m_sol.called) def test_endpoint_live_obj(self): ep = {"name": "tap1234"} # First send in an update to trigger creation. self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True) self.step_actor(self.mgr) self.assertEqual(self.mgr.get_and_incref.mock_calls, [mock.call(ENDPOINT_ID)]) m_endpoint = Mock(spec=LocalEndpoint) self.mgr.objects_by_id[ENDPOINT_ID] = m_endpoint # Then send a second update to check that it gets passed on to the # LocalEndpoint. with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol: m_sol.return_value = True self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True) self.step_actor(self.mgr) self.assertEqual(m_sol.mock_calls, [mock.call(ENDPOINT_ID)]) self.assertEqual(m_endpoint.on_endpoint_update.mock_calls, [mock.call(ep, force_reprogram=False, async=True)]) self.assertTrue(ENDPOINT_ID in self.mgr.local_endpoint_ids) # Finally, send in a deletion. m_endpoint.on_endpoint_update.reset_mock() with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol: m_sol.return_value = True self.mgr.on_endpoint_update(ENDPOINT_ID, None, async=True) self.step_actor(self.mgr) self.assertEqual(m_endpoint.on_endpoint_update.mock_calls, [mock.call(None, force_reprogram=False, async=True)]) self.assertEqual(self.mgr.decref.mock_calls, [mock.call(ENDPOINT_ID)]) self.assertFalse(ENDPOINT_ID in self.mgr.local_endpoint_ids) def test_on_interface_update_unknown(self): with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol: self.mgr.on_interface_update("foo", True, async=True) self.step_actor(self.mgr) self.assertFalse(m_sol.called) def test_on_interface_update_known(self): ep = {"name": "tap1234"} m_endpoint = Mock(spec=LocalEndpoint) self.mgr.objects_by_id[ENDPOINT_ID] = m_endpoint with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol: m_sol.return_value = True self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True) self.mgr.on_interface_update("tap1234", True, async=True) self.step_actor(self.mgr) self.assertEqual( m_endpoint.on_interface_update.mock_calls, [mock.call(True, async=True)] ) def test_on_interface_update_known_but_not_live(self): ep = {"name": "tap1234"} m_endpoint = Mock(spec=LocalEndpoint) self.mgr.objects_by_id[ENDPOINT_ID] = m_endpoint with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol: m_sol.return_value = False self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True) self.mgr.on_interface_update("tap1234", True, async=True) self.step_actor(self.mgr) self.assertEqual(m_endpoint.on_interface_update.mock_calls, [])
class TestEndpointManager(BaseTestCase): def setUp(self): super(TestEndpointManager, self).setUp() self.config = load_config("felix_default.cfg", env_dict={ "FELIX_FELIXHOSTNAME": "hostname"}) self.m_updater = Mock(spec=IptablesUpdater) self.m_dispatch = Mock(spec=DispatchChains) self.m_rules_mgr = Mock(spec=RulesManager) self.m_fip_manager = Mock(spec=FloatingIPManager) self.m_status_reporter = Mock(spec=EtcdStatusReporter) self.mgr = EndpointManager(self.config, "IPv4", self.m_updater, self.m_dispatch, self.m_rules_mgr, self.m_fip_manager, self.m_status_reporter) self.mgr.get_and_incref = Mock() self.mgr.decref = Mock() def test_create(self): obj = self.mgr._create(ENDPOINT_ID) self.assertTrue(isinstance(obj, LocalEndpoint)) def test_on_started(self): ep = {"name": "tap1234"} self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True) self.step_actor(self.mgr) m_endpoint = Mock(spec=LocalEndpoint) self.mgr.objects_by_id[ENDPOINT_ID] = m_endpoint self.mgr._on_object_started(ENDPOINT_ID, m_endpoint) self.assertEqual( m_endpoint.on_endpoint_update.mock_calls, [mock.call(ep, async=True)] ) def test_on_datamodel_in_sync(self): ep = {"name": "tap1234"} self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True) self.step_actor(self.mgr) self.mgr.on_datamodel_in_sync(async=True) self.step_actor(self.mgr) self.assertEqual( self.m_dispatch.apply_snapshot.mock_calls, [mock.call(frozenset(["tap1234"]), async=True)] ) # Second call should have no effect. self.m_dispatch.apply_snapshot.reset_mock() self.mgr.on_datamodel_in_sync(async=True) self.step_actor(self.mgr) self.assertEqual(self.m_dispatch.apply_snapshot.mock_calls, []) def test_tiered_policy_ordering_and_updates(self): """ Check that the tier_sequence ordering is updated correctly as we add and remove tiers and policies. """ # Make sure we have an endpoint so that we can check that it gets # put in the dirty set. self.mgr.on_datamodel_in_sync(async=True) self.mgr.on_endpoint_update(ENDPOINT_ID, {"name": "tap12345"}, async=True) self.step_actor(self.mgr) # Pretend that the endpoint is alive so that we'll send updates to id. m_endpoint = Mock(spec=LocalEndpoint) self.mgr.objects_by_id[ENDPOINT_ID] = m_endpoint self.mgr._is_starting_or_live = Mock(return_value=True) # Add a profile into the tier so it'll apply to the endpoint. pol_id_a = TieredPolicyId("a", "a1") self.mgr.on_policy_selector_update(pol_id_a, parse_selector("all()"), 10, async=True) pol_id_b = TieredPolicyId("b", "b1") self.mgr.on_policy_selector_update(pol_id_b, parse_selector("all()"), 10, async=True) pol_id_c1 = TieredPolicyId("c1", "c1") self.mgr.on_policy_selector_update(pol_id_c1, parse_selector("all()"), 10, async=True) pol_id_c2 = TieredPolicyId("c2", "c2") self.mgr.on_policy_selector_update(pol_id_c2, parse_selector("all()"), 10, async=True) pol_id_c3 = TieredPolicyId("c3", "c3") self.mgr.on_policy_selector_update(pol_id_c3, parse_selector("all()"), 10, async=True) self.step_actor(self.mgr) # Since we haven't set the tier ID yet, the policy won't get applied... self.assertEqual(m_endpoint.on_tiered_policy_update.mock_calls, [mock.call(OrderedDict(), async=True)] * 5) m_endpoint.on_tiered_policy_update.reset_mock() # Adding a tier should trigger an update, adding the tier and policy. self.mgr.on_tier_data_update("a", {"order": 1}, async=True) self.step_actor(self.mgr) self.assertEqual(self.mgr.endpoints_with_dirty_policy, set()) tiers = OrderedDict() tiers["a"] = [pol_id_a] self.assertEqual(m_endpoint.on_tiered_policy_update.mock_calls, [mock.call(tiers, async=True)]) m_endpoint.on_tiered_policy_update.reset_mock() # Idempotent update should get squashed. self.mgr.on_tier_data_update("a", {"order": 2}, async=True) self.mgr.on_tier_data_update("a", {"order": 2}, async=True) self.step_actor(self.mgr) self.assertEqual(m_endpoint.on_tiered_policy_update.mock_calls, []) # Adding another tier should trigger an update. self.mgr.on_tier_data_update("b", {"order": 3}, async=True) self.step_actor(self.mgr) tiers = OrderedDict() tiers["a"] = [pol_id_a] tiers["b"] = [pol_id_b] self.assertEqual(m_endpoint.on_tiered_policy_update.mock_calls, [mock.call(tiers, async=True)]) m_endpoint.on_tiered_policy_update.reset_mock() # Swapping the order should trigger an update. self.mgr.on_tier_data_update("b", {"order": 1}, async=True) self.step_actor(self.mgr) tiers = OrderedDict() tiers["b"] = [pol_id_b] tiers["a"] = [pol_id_a] self.assertEqual(m_endpoint.on_tiered_policy_update.mock_calls, [mock.call(tiers, async=True)]) m_endpoint.on_tiered_policy_update.reset_mock() # Check deletion and that it's idempotent. self.mgr.on_tier_data_update("b", None, async=True) self.step_actor(self.mgr) self.mgr.on_policy_selector_update(pol_id_b, None, None, async=True) self.mgr.on_policy_selector_update(pol_id_b, None, None, async=True) self.step_actor(self.mgr) self.mgr.on_tier_data_update("b", None, async=True) self.step_actor(self.mgr) self.mgr.on_policy_selector_update(pol_id_b, None, None, async=True) self.mgr.on_policy_selector_update(pol_id_b, None, None, async=True) self.step_actor(self.mgr) tiers = OrderedDict() tiers["a"] = [pol_id_a] self.assertEqual( m_endpoint.on_tiered_policy_update.mock_calls, [mock.call(tiers, async=True)] * 2 # One for policy, one for tier. ) m_endpoint.on_tiered_policy_update.reset_mock() # Check lexicographic tie-breaker. self.mgr.on_tier_data_update("c1", {"order": 0}, async=True) self.mgr.on_tier_data_update("c2", {"order": 0}, async=True) self.mgr.on_tier_data_update("c3", {"order": 0}, async=True) self.step_actor(self.mgr) tiers = OrderedDict() # All 'c's should sort before 'a' due to explicit ordering but 'c's # should sort in lexicographic order. tiers["c1"] = [pol_id_c1] tiers["c2"] = [pol_id_c2] tiers["c3"] = [pol_id_c3] tiers["a"] = [pol_id_a] actual_call = m_endpoint.on_tiered_policy_update.mock_calls[-1] expected_call = mock.call(tiers, async=True) self.assertEqual(actual_call, expected_call, msg="\nExpected: %s\n Got: %s" % (expected_call, actual_call)) m_endpoint.on_tiered_policy_update.reset_mock() def test_label_inheritance(self): # Make sure we have an endpoint so that we can check that it gets # put in the dirty set. These have no labels at all so we test # that no labels gets translated to an empty dict. self.mgr.on_endpoint_update(ENDPOINT_ID, {"name": "tap12345", "profile_ids": ["prof1"]}, async=True) self.mgr.on_endpoint_update(ENDPOINT_ID_2, {"name": "tap23456", "profile_ids": ["prof2"]}, async=True) # And we need a selector to pick out one of the endpoints by the labels # attached to its parent. self.mgr.on_policy_selector_update(TieredPolicyId("a", "b"), parse_selector('a == "b"'), 10, async=True) self.step_actor(self.mgr) with mock.patch.object(self.mgr, "_update_dirty_policy") as m_update: self.mgr.on_prof_labels_set("prof1", {"a": "b"}, async=True) self.step_actor(self.mgr) # Only the first endpoint should end up matching the selector. self.assertEqual(self.mgr.endpoints_with_dirty_policy, set([ENDPOINT_ID])) # And an update should be triggered. self.assertEqual(m_update.mock_calls, [mock.call()]) def test_endpoint_update_not_our_host(self): ep = {"name": "tap1234"} with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol: self.mgr.on_endpoint_update(EndpointId("notus", "b", "c", "d"), ep, async=True) self.step_actor(self.mgr) self.assertFalse(m_sol.called) def test_endpoint_live_obj(self): ep = {"name": "tap1234"} # First send in an update to trigger creation. self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True) self.step_actor(self.mgr) self.assertEqual(self.mgr.get_and_incref.mock_calls, [mock.call(ENDPOINT_ID)]) m_endpoint = Mock(spec=LocalEndpoint) self.mgr.objects_by_id[ENDPOINT_ID] = m_endpoint # Then send a second update to check that it gets passed on to the # LocalEndpoint. with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol: m_sol.return_value = True self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True) self.step_actor(self.mgr) self.assertEqual(m_sol.mock_calls, [mock.call(ENDPOINT_ID)]) self.assertEqual(m_endpoint.on_endpoint_update.mock_calls, [mock.call(ep, force_reprogram=False, async=True)])