def setUp(self): super(TestIptablesUpdater, self).setUp() self.stub = IptablesStub("filter") self.m_config = Mock() self.m_config.REFRESH_INTERVAL = 0 # disable refresh thread self.ipt = IptablesUpdater("filter", self.m_config, 4) self.ipt._execute_iptables = self.stub.apply_iptables_restore self.check_output_patch = patch("gevent.subprocess.check_output", autospec=True) self.m_check_output = self.check_output_patch.start() self.m_check_output.side_effect = self.fake_check_output
def setUp(self): super(TestIptablesUpdater, self).setUp() self.stub = IptablesStub("filter") self.ipt = IptablesUpdater("filter", 4) self.ipt._execute_iptables = self.stub.apply_iptables_restore self.check_output_patch = patch("gevent.subprocess.check_output", autospec=True) self.m_check_output = self.check_output_patch.start() self.m_check_output.side_effect = self.fake_check_output
def setUp(self): super(TestIptablesUpdater, self).setUp() self.stub = IptablesStub("filter") env_dict = {"FELIX_REFRESHINTERVAL": "0"} self.config = load_config("felix_default.cfg", env_dict=env_dict) self.ipt = IptablesUpdater("filter", self.config, 4) self.ipt._execute_iptables = self.stub.apply_iptables_restore self.check_output_patch = patch("gevent.subprocess.check_output", autospec=True) self.m_check_output = self.check_output_patch.start() self.m_check_output.side_effect = self.fake_check_output
def _main_greenlet(config): """ The root of our tree of greenlets. Responsible for restarting its children if desired. """ try: _log.info("Connecting to etcd to get our configuration.") hosts_ipset_v4 = IpsetActor(HOSTS_IPSET_V4) etcd_api = EtcdAPI(config, hosts_ipset_v4) etcd_api.start() # Ask the EtcdAPI to fill in the global config object before we # proceed. We don't yet support config updates. config_loaded = etcd_api.load_config(async=False) config_loaded.wait() _log.info("Main greenlet: Configuration loaded, starting remaining " "actors...") v4_filter_updater = IptablesUpdater("filter", ip_version=4) v4_nat_updater = IptablesUpdater("nat", ip_version=4) v4_ipset_mgr = IpsetManager(IPV4) v4_masq_manager = MasqueradeManager(IPV4, v4_nat_updater) v4_rules_manager = RulesManager(4, v4_filter_updater, v4_ipset_mgr) v4_dispatch_chains = DispatchChains(config, 4, v4_filter_updater) v4_ep_manager = EndpointManager(config, IPV4, v4_filter_updater, v4_dispatch_chains, v4_rules_manager) v6_filter_updater = IptablesUpdater("filter", ip_version=6) v6_ipset_mgr = IpsetManager(IPV6) v6_rules_manager = RulesManager(6, v6_filter_updater, v6_ipset_mgr) v6_dispatch_chains = DispatchChains(config, 6, v6_filter_updater) v6_ep_manager = EndpointManager(config, IPV6, v6_filter_updater, v6_dispatch_chains, v6_rules_manager) update_splitter = UpdateSplitter(config, [v4_ipset_mgr, v6_ipset_mgr], [v4_rules_manager, v6_rules_manager], [v4_ep_manager, v6_ep_manager], [v4_filter_updater, v6_filter_updater], v4_masq_manager) iface_watcher = InterfaceWatcher(update_splitter) _log.info("Starting actors.") hosts_ipset_v4.start() update_splitter.start() v4_filter_updater.start() v4_nat_updater.start() v4_ipset_mgr.start() v4_masq_manager.start() v4_rules_manager.start() v4_dispatch_chains.start() v4_ep_manager.start() v6_filter_updater.start() v6_ipset_mgr.start() v6_rules_manager.start() v6_dispatch_chains.start() v6_ep_manager.start() iface_watcher.start() top_level_actors = [ hosts_ipset_v4, update_splitter, v4_nat_updater, v4_filter_updater, v4_nat_updater, v4_ipset_mgr, v4_masq_manager, v4_rules_manager, v4_dispatch_chains, v4_ep_manager, v6_filter_updater, v6_ipset_mgr, v6_rules_manager, v6_dispatch_chains, v6_ep_manager, iface_watcher, etcd_api, ] monitored_items = [actor.greenlet for actor in top_level_actors] # Install the global rules before we start polling for updates. _log.info("Installing global rules.") install_global_rules(config, v4_filter_updater, v6_filter_updater, v4_nat_updater) # Start polling for updates. These kicks make the actors poll # indefinitely. _log.info("Starting polling for interface and etcd updates.") f = iface_watcher.watch_interfaces(async=True) monitored_items.append(f) etcd_api.start_watch(update_splitter, async=True) # Register a SIG_USR handler to trigger a diags dump. def dump_top_level_actors(log): for a in top_level_actors: # The output will include queue length and the like. log.info("%s", a) futils.register_diags("Top-level actors", dump_top_level_actors) try: gevent.signal(signal.SIGUSR1, functools.partial(futils.dump_diags)) except AttributeError: # It doesn't matter too much if we fail to do this. _log.warning("Unable to install diag dump handler") pass # Wait for something to fail. _log.info("All top-level actors started, waiting on failures...") stopped_greenlets_iter = gevent.iwait(monitored_items) stopped_greenlet = next(stopped_greenlets_iter) try: stopped_greenlet.get() except Exception: _log.exception("Greenlet failed: %s", stopped_greenlet) raise else: _log.error("Greenlet %s unexpectedly returned.", stopped_greenlet) raise AssertionError("Greenlet unexpectedly returned") except: _log.exception("Exception killing main greenlet") raise
def _main_greenlet(config): """ The root of our tree of greenlets. Responsible for restarting its children if desired. """ try: _log.info("Connecting to etcd to get our configuration.") etcd_watcher = EtcdWatcher(config) etcd_watcher.start() # Ask the EtcdWatcher to fill in the global config object before we # proceed. We don't yet support config updates. etcd_watcher.load_config(async=False) _log.info("Main greenlet: Configuration loaded, starting remaining " "actors...") v4_filter_updater = IptablesUpdater("filter", ip_version=4) v4_nat_updater = IptablesUpdater("nat", ip_version=4) v4_ipset_mgr = IpsetManager(IPV4) v4_rules_manager = RulesManager(4, v4_filter_updater, v4_ipset_mgr) v4_dispatch_chains = DispatchChains(config, 4, v4_filter_updater) v4_ep_manager = EndpointManager(config, IPV4, v4_filter_updater, v4_dispatch_chains, v4_rules_manager) v6_filter_updater = IptablesUpdater("filter", ip_version=6) v6_ipset_mgr = IpsetManager(IPV6) v6_rules_manager = RulesManager(6, v6_filter_updater, v6_ipset_mgr) v6_dispatch_chains = DispatchChains(config, 6, v6_filter_updater) v6_ep_manager = EndpointManager(config, IPV6, v6_filter_updater, v6_dispatch_chains, v6_rules_manager) update_splitter = UpdateSplitter( config, [v4_ipset_mgr, v6_ipset_mgr], [v4_rules_manager, v6_rules_manager], [v4_ep_manager, v6_ep_manager], [v4_filter_updater, v6_filter_updater]) iface_watcher = InterfaceWatcher(update_splitter) _log.info("Starting actors.") update_splitter.start() v4_filter_updater.start() v4_nat_updater.start() v4_ipset_mgr.start() v4_rules_manager.start() v4_dispatch_chains.start() v4_ep_manager.start() v6_filter_updater.start() v6_ipset_mgr.start() v6_rules_manager.start() v6_dispatch_chains.start() v6_ep_manager.start() iface_watcher.start() monitored_items = [ update_splitter.greenlet, v4_nat_updater.greenlet, v4_filter_updater.greenlet, v4_nat_updater.greenlet, v4_ipset_mgr.greenlet, v4_rules_manager.greenlet, v4_dispatch_chains.greenlet, v4_ep_manager.greenlet, v6_filter_updater.greenlet, v6_ipset_mgr.greenlet, v6_rules_manager.greenlet, v6_dispatch_chains.greenlet, v6_ep_manager.greenlet, iface_watcher.greenlet, etcd_watcher.greenlet ] # Install the global rules before we start polling for updates. _log.info("Installing global rules.") install_global_rules(config, v4_filter_updater, v6_filter_updater, v4_nat_updater) # Start polling for updates. These kicks make the actors poll # indefinitely. _log.info("Starting polling for interface and etcd updates.") f = iface_watcher.watch_interfaces(async=True) monitored_items.append(f) f = etcd_watcher.watch_etcd(update_splitter, async=True) monitored_items.append(f) # Wait for something to fail. _log.info("All top-level actors started, waiting on failures...") stopped_greenlets_iter = gevent.iwait(monitored_items) stopped_greenlet = next(stopped_greenlets_iter) try: stopped_greenlet.get() except Exception: _log.exception("Greenlet failed: %s", stopped_greenlet) raise else: _log.error("Greenlet %s unexpectedly returned.", stopped_greenlet) raise AssertionError("Greenlet unexpectedly returned") except: _log.exception("Exception killing main greenlet") raise
class TestIptablesUpdater(BaseTestCase): def setUp(self): super(TestIptablesUpdater, self).setUp() self.stub = IptablesStub("filter") self.ipt = IptablesUpdater("filter", 4) self.ipt._execute_iptables = self.stub.apply_iptables_restore self.check_output_patch = patch("gevent.subprocess.check_output", autospec=True) self.m_check_output = self.check_output_patch.start() self.m_check_output.side_effect = self.fake_check_output def fake_check_output(self, cmd, *args, **kwargs): if cmd == ["iptables-save", "--table", "filter"]: return self.stub.generate_iptables_save() elif cmd == ['iptables', '--wait', '--list', '--table', 'filter']: return self.stub.generate_iptables_list() else: raise AssertionError("Unexpected call %r" % cmd) def tearDown(self): self.check_output_patch.stop() super(TestIptablesUpdater, self).tearDown() def test_rewrite_chains_stub(self): """ Tests that referencing a chain causes it to get stubbed out. """ self.ipt.rewrite_chains( {"foo": ["--append foo --jump bar"]}, {"foo": set(["bar"])}, async=True, ) self.step_actor(self.ipt) self.assertEqual(self.stub.chains_contents, {"foo": ["--append foo --jump bar"], 'bar': [MISSING_CHAIN_DROP % "bar"] }) def test_delete_required_chain_stub(self): """ Tests that deleting a required chain stubs it out instead. """ # Exit the graceful restart period, during which we do not stub out # chains. self.ipt.cleanup(async=True) # Install a couple of chains. foo depends on bar. self.ipt.rewrite_chains( {"foo": ["--append foo --jump bar"], "bar": ["--append bar --jump ACCEPT"]}, {"foo": set(["bar"]), "bar": set()}, async=True, ) self.step_actor(self.ipt) # Both chains should be programmed as normal. self.assertEqual(self.stub.chains_contents, {"foo": ["--append foo --jump bar"], 'bar': ["--append bar --jump ACCEPT"] }) # Deleting bar should stub it out instead. self.ipt.delete_chains(["bar"], async=True) self.step_actor(self.ipt) self.assertEqual(self.stub.chains_contents, {"foo": ["--append foo --jump bar"], 'bar': [MISSING_CHAIN_DROP % "bar"] }) def test_cleanup_with_dependencies(self): # Set up the dataplane with some chains that the IptablesUpdater # doesn't know about and some that it will know about. self.stub.apply_iptables_restore(""" *filter :INPUT DROP [10:505] :FORWARD DROP [0:0] :OUTPUT ACCEPT [40:1600] # These non-felix chains should be ignored :ignore-me - :ignore-me-too - # These are left-over felix chains. Some depend on each other. They # can only be cleaned up in the correct order. :felix-foo - [0:0] :felix-bar - :felix-foo - :felix-baz - :felix-biff - --append felix-foo --src 10.0.0.1/32 --jump felix-bar # baz depends on biff; cleanup needs to detect that. --append felix-baz --src 10.0.0.2/32 --jump felix-biff --append felix-biff --src 10.0.0.3/32 --jump DROP --append ignore-me --jump ignore-me-too --append ignore-me-too --jump DROP """.splitlines()) # IptablesUpdater hears about some chains before the cleanup. These # partially overlap with the ones that are already there. self.ipt.rewrite_chains( {"felix-foo": ["--append felix-foo --jump felix-bar", "--append felix-foo --jump felix-baz", "--append felix-foo --jump felix-boff"], "felix-bar": ["--append felix-bar --jump ACCEPT"]}, # felix-foo depends on: # * a new chain that's also being programmed # * a pre-existing chain that is present at start of day # * a new chain that isn't present at all. {"felix-foo": set(["felix-bar", "felix-baz", "felix-boff"]), "felix-bar": set()}, async=True, ) self.step_actor(self.ipt) # Dataplane should now have all the new chains in place, including # a stub for felix-boff. However, the old chains should not have been # cleaned up. self.stub.assert_chain_contents({ "INPUT": [], "FORWARD": [], "OUTPUT": [], "ignore-me": ["--append ignore-me --jump ignore-me-too"], "ignore-me-too": ["--append ignore-me-too --jump DROP"], "felix-foo": ["--append felix-foo --jump felix-bar", "--append felix-foo --jump felix-baz", "--append felix-foo --jump felix-boff"], "felix-bar": ["--append felix-bar --jump ACCEPT"], "felix-baz": ["--append felix-baz --src 10.0.0.2/32 " "--jump felix-biff"], "felix-boff": [MISSING_CHAIN_DROP % "felix-boff"], "felix-biff": ["--append felix-biff --src 10.0.0.3/32 --jump DROP"], }) # Issue the cleanup. self.ipt.cleanup(async=True) self.step_actor(self.ipt) # Should now have stubbed-out chains for all the ones that are not # programmed. self.stub.assert_chain_contents({ # Non felix chains ignored: "INPUT": [], "FORWARD": [], "OUTPUT": [], "ignore-me": ["--append ignore-me --jump ignore-me-too"], "ignore-me-too": ["--append ignore-me-too --jump DROP"], # Explicitly-programmed chains programmed. "felix-foo": ["--append felix-foo --jump felix-bar", "--append felix-foo --jump felix-baz", "--append felix-foo --jump felix-boff"], "felix-bar": ["--append felix-bar --jump ACCEPT"], # All required but unknown chains stubbed. "felix-baz": [MISSING_CHAIN_DROP % "felix-baz"], "felix-boff": [MISSING_CHAIN_DROP % "felix-boff"], # felix-biff deleted, even though it was referenced by felix-baz # before. })
class TestIptablesUpdater(BaseTestCase): def setUp(self): super(TestIptablesUpdater, self).setUp() self.stub = IptablesStub("filter") env_dict = {"FELIX_REFRESHINTERVAL": "0"} self.config = load_config("felix_default.cfg", env_dict=env_dict) self.ipt = IptablesUpdater("filter", self.config, 4) self.ipt._execute_iptables = self.stub.apply_iptables_restore self.check_output_patch = patch("gevent.subprocess.check_output", autospec=True) self.m_check_output = self.check_output_patch.start() self.m_check_output.side_effect = self.fake_check_output def fake_check_output(self, cmd, *args, **kwargs): _log.info("Stubbing out call to %s", cmd) if cmd == ["iptables-save", "--table", "filter"]: return self.stub.generate_iptables_save() elif cmd == ['iptables', '--wait', '--list', '--numeric', '--table', 'filter']: return self.stub.generate_iptables_list() else: raise AssertionError("Unexpected call %r" % cmd) def tearDown(self): self.check_output_patch.stop() super(TestIptablesUpdater, self).tearDown() def test_rewrite_chains_stub(self): """ Tests that referencing a chain causes it to get stubbed out. """ self.ipt.rewrite_chains( {"foo": ["--append foo --jump bar"]}, {"foo": set(["bar"])}, async=True, ) self.step_actor(self.ipt) self.assertEqual(self.stub.chains_contents, {"foo": ["--append foo --jump bar"], 'bar': drop_rules("bar")}) def test_rewrite_chains_cover(self): """ Hits remaining code paths in rewrite chains. """ cb = Mock() self.ipt.rewrite_chains( {"foo": ["--append foo --jump bar"]}, {"foo": set(["bar"])}, async=True, callback=cb, ) self.step_actor(self.ipt) cb.assert_called_once_with(None) def test_delete_required_chain_stub(self): """ Tests that deleting a required chain stubs it out instead. """ # Exit the graceful restart period, during which we do not stub out # chains. self.ipt.cleanup(async=True) # Install a couple of chains. foo depends on bar. self.ipt.rewrite_chains( {"foo": ["--append foo --jump bar"], "bar": ["--append bar --jump ACCEPT"]}, {"foo": set(["bar"]), "bar": set()}, async=True, ) self.step_actor(self.ipt) # Both chains should be programmed as normal. self.assertEqual(self.stub.chains_contents, {"foo": ["--append foo --jump bar"], 'bar': ["--append bar --jump ACCEPT"] }) # Deleting bar should stub it out instead. self.ipt.delete_chains(["bar"], async=True) self.step_actor(self.ipt) self.assertEqual(self.stub.chains_contents, {"foo": ["--append foo --jump bar"], 'bar': drop_rules("bar") }) def test_cleanup_with_dependencies(self): # Set up the dataplane with some chains that the IptablesUpdater # doesn't know about and some that it will know about. self.stub.apply_iptables_restore(""" *filter :INPUT DROP [10:505] :FORWARD DROP [0:0] :OUTPUT ACCEPT [40:1600] # These non-felix chains should be ignored :ignore-me - :ignore-me-too - # These are left-over felix chains. Some depend on each other. They # can only be cleaned up in the correct order. :felix-foo - [0:0] :felix-bar - :felix-foo - :felix-baz - :felix-biff - --append felix-foo --src 10.0.0.1/32 --jump felix-bar # baz depends on biff; cleanup needs to detect that. --append felix-baz --src 10.0.0.2/32 --jump felix-biff --append felix-biff --src 10.0.0.3/32 --jump DROP --append ignore-me --jump ignore-me-too --append ignore-me-too --jump DROP """.splitlines()) # IptablesUpdater hears about some chains before the cleanup. These # partially overlap with the ones that are already there. self.ipt._load_chain_names_from_iptables(async=True) self.ipt.rewrite_chains( {"felix-foo": ["--append felix-foo --jump felix-bar", "--append felix-foo --jump felix-baz", "--append felix-foo --jump felix-boff"], "felix-bar": ["--append felix-bar --jump ACCEPT"]}, # felix-foo depends on: # * a new chain that's also being programmed # * a pre-existing chain that is present at start of day # * a new chain that isn't present at all. {"felix-foo": set(["felix-bar", "felix-baz", "felix-boff"]), "felix-bar": set()}, async=True, ) self.step_actor(self.ipt) # Dataplane should now have all the new chains in place, including # a stub for felix-boff. However, the old chains should not have been # cleaned up. self.stub.assert_chain_contents({ "INPUT": [], "FORWARD": [], "OUTPUT": [], "ignore-me": ["--append ignore-me --jump ignore-me-too"], "ignore-me-too": ["--append ignore-me-too --jump DROP"], "felix-foo": ["--append felix-foo --jump felix-bar", "--append felix-foo --jump felix-baz", "--append felix-foo --jump felix-boff"], "felix-bar": ["--append felix-bar --jump ACCEPT"], "felix-baz": ["--append felix-baz --src 10.0.0.2/32 " "--jump felix-biff"], "felix-boff": drop_rules("felix-boff"), "felix-biff": ["--append felix-biff --src 10.0.0.3/32 --jump DROP"], }) # Issue the cleanup. self.ipt.cleanup(async=True) self.step_actor(self.ipt) # Should now have stubbed-out chains for all the ones that are not # programmed. self.stub.assert_chain_contents({ # Non felix chains ignored: "INPUT": [], "FORWARD": [], "OUTPUT": [], "ignore-me": ["--append ignore-me --jump ignore-me-too"], "ignore-me-too": ["--append ignore-me-too --jump DROP"], # Explicitly-programmed chains programmed. "felix-foo": ["--append felix-foo --jump felix-bar", "--append felix-foo --jump felix-baz", "--append felix-foo --jump felix-boff"], "felix-bar": ["--append felix-bar --jump ACCEPT"], # All required but unknown chains stubbed. "felix-baz": drop_rules("felix-baz"), "felix-boff": drop_rules("felix-boff"), # felix-biff deleted, even though it was referenced by felix-baz # before. }) def test_delete_during_grace_period(self): """ Test explicit deletion of a referenced chain during the grace period. The chain _should_ be stubbed out. """ # Simulate a felix restart where the chains we're about to manipulate # already exist. self.stub.apply_iptables_restore(""" *filter # These are left-over felix chains. Some depend on each other. They # can only be cleaned up in the correct order. :felix-foo - [0:0] :felix-bar - :felix-baz - --append felix-foo --src 10.0.0.1/32 --jump felix-bar --append felix-bar --src 10.0.0.2/32 --jump DROP --append felix-baz --src 10.0.0.3/32 --jump DROP """.splitlines()) self.ipt._load_chain_names_from_iptables(async=True) # IptablesUpdater hears about all the chains before the cleanup. # Chains have dependencies. self.ipt.rewrite_chains( {"felix-foo": ["--append felix-foo --jump felix-bar"], "felix-bar": ["--append felix-bar --jump ACCEPT"], "felix-baz": ["--append felix-baz --jump ACCEPT"]}, {"felix-foo": set(["felix-bar"]), "felix-bar": set(), "felix-baz": set()}, async=True, ) self.step_actor(self.ipt) # Dataplane should now have all the new chains in place. self.stub.assert_chain_contents({ "felix-foo": ["--append felix-foo --jump felix-bar"], "felix-bar": ["--append felix-bar --jump ACCEPT"], "felix-baz": ["--append felix-baz --jump ACCEPT"], }) # Then delete bar and baz. The former should be stubbed because it # is required by chain foo. The latter should be deleted. self.ipt.delete_chains(["felix-bar", "felix-baz"], async=True) self.step_actor(self.ipt) self.stub.assert_chain_contents({ "felix-foo": ["--append felix-foo --jump felix-bar"], "felix-bar": drop_rules("felix-bar"), }) def test_cleanup_bad_read_back(self): # IptablesUpdater hears about some chains before the cleanup. self.ipt.rewrite_chains( {"felix-foo": ["--append felix-foo --jump felix-boff"]}, {"felix-foo": set(["felix-boff"])}, async=True, ) self.step_actor(self.ipt) self.stub.assert_chain_contents({ "felix-foo": ["--append felix-foo --jump felix-boff"], "felix-boff": drop_rules("felix-boff"), }) # Some other process then breaks our chains. self.stub.chains_contents = {} self.stub.iptables_save_output = [ None, # Start of cleanup. # End of cleanup. Out of sync: "*filter\n" ":INPUT DROP [68:4885]\n" ":FORWARD DROP [0:0]\n" ":OUTPUT ACCEPT [20:888]\n" ":DOCKER - [0:0]\n" "-A INPUT -i lxcbr0 -p tcp -m tcp --dport 53 -j ACCEPT\n" "-A FORWARD -o lxcbr0 -j ACCEPT\n" "COMMIT\n" ] _log.info("Forcing iptables-save to always return %s", self.stub.iptables_save_output) # Issue the cleanup. with patch.object(fiptables._log, "error") as m_error: self.ipt.cleanup(async=True) self.step_actor(self.ipt) m_error.assert_called_once_with( ANY, set([]), set([]), set(["felix-foo", "felix-boff"]) ) self.stub.assert_chain_contents({ "felix-foo": ["--append felix-foo --jump felix-boff"], "felix-boff": drop_rules("felix-boff"), }) def test_ensure_rule_inserted(self): fragment = "FOO --jump DROP" with patch.object(self.ipt, "_execute_iptables") as m_exec: m_exec.side_effect = iter([FailedSystemCall("Message", [], 1, "", "line 2 failed"), None, None]) self.ipt.ensure_rule_inserted(fragment, async=True) self.step_actor(self.ipt) self.assertEqual( m_exec.mock_calls, [ call(["*filter", "--delete FOO --jump DROP", "--insert FOO --jump DROP", "COMMIT"], fail_log_level=logging.DEBUG), call(["*filter", "--insert FOO --jump DROP", "COMMIT"]), ]) self.assertTrue(fragment in self.ipt._inserted_rule_fragments) def test_insert_remove_tracking(self): fragment = "FOO --jump DROP" with patch.object(self.ipt, "_execute_iptables") as m_exec: m_exec.side_effect = [ # Insert. None, # Remove: requires an exception to terminate loop. None, FailedSystemCall("Message", [], 1, "", "line 2 failed"), # Insert. None, ] self.ipt.ensure_rule_inserted(fragment, async=True) self.step_actor(self.ipt) self.assertTrue(fragment in self.ipt._inserted_rule_fragments) self.assertTrue(fragment not in self.ipt._removed_rule_fragments) self.ipt.ensure_rule_removed(fragment, async=True) self.step_actor(self.ipt) self.assertTrue(fragment not in self.ipt._inserted_rule_fragments) self.assertTrue(fragment in self.ipt._removed_rule_fragments) self.ipt.ensure_rule_inserted(fragment, async=True) self.step_actor(self.ipt) self.assertTrue(fragment in self.ipt._inserted_rule_fragments) self.assertTrue(fragment not in self.ipt._removed_rule_fragments) def test_ensure_rule_removed(self): fragment = "FOO --jump DROP" with patch.object(self.ipt, "_execute_iptables") as m_exec: m_exec.side_effect = iter([None, FailedSystemCall("Message", [], 1, "", "line 2 failed")]) self.ipt.ensure_rule_removed(fragment, async=True) self.step_actor(self.ipt) exp_call = call([ '*filter', '--delete FOO --jump DROP', 'COMMIT', ], fail_log_level=logging.DEBUG) self.assertEqual(m_exec.mock_calls, [exp_call] * 2) def test_ensure_rule_removed_not_present(self): with patch.object(self.ipt, "_execute_iptables") as m_exec: m_exec.side_effect = iter([FailedSystemCall("Message", [], 1, "", "line 2 failed")]) self.ipt.ensure_rule_removed("FOO --jump DROP", async=True) self.step_actor(self.ipt) exp_call = call([ '*filter', '--delete FOO --jump DROP', 'COMMIT', ], fail_log_level=logging.DEBUG) self.assertEqual(m_exec.mock_calls, [exp_call]) def test_ensure_rule_removed_missing_dep(self): with patch.object(self.ipt, "_execute_iptables") as m_exec: m_exec.side_effect = iter([ FailedSystemCall("Message", [], 1, "", "at line: 2\n" "ipset doesn't exist")]) self.ipt.ensure_rule_removed("FOO --jump DROP", async=True) self.step_actor(self.ipt) exp_call = call([ '*filter', '--delete FOO --jump DROP', 'COMMIT', ], fail_log_level=logging.DEBUG) self.assertEqual(m_exec.mock_calls, [exp_call]) def test_ensure_rule_removed_error(self): with patch.object(self.ipt, "_execute_iptables") as m_exec: m_exec.side_effect = iter([FailedSystemCall("Message", [], 1, "", "the foo is barred")]) f = self.ipt.ensure_rule_removed("FOO --jump DROP", async=True) self.step_actor(self.ipt) self.assertRaises(FailedSystemCall, f.get) exp_call = call([ '*filter', '--delete FOO --jump DROP', 'COMMIT', ], fail_log_level=logging.DEBUG) self.assertEqual(m_exec.mock_calls, [exp_call]) def test_refresh_iptables(self): self.ipt.ensure_rule_inserted("INPUT -j ACCEPT", async=True) self.ipt.ensure_rule_inserted("INPUT -j DROP", async=True) self.ipt.ensure_rule_removed("INPUT -j DROP", async=True) self.step_actor(self.ipt) self.ipt.refresh_iptables(async=True) with patch.object(self.ipt, "_insert_rule") as m_insert_rule: with patch.object(self.ipt, "_remove_rule") as m_remove_rule: self.step_actor(self.ipt) m_insert_rule.assert_called_once_with("INPUT -j ACCEPT", log_level=logging.DEBUG) m_remove_rule.assert_called_once_with("INPUT -j DROP", log_level=logging.DEBUG)
def _main_greenlet(config): """ The root of our tree of greenlets. Responsible for restarting its children if desired. """ try: _log.info("Connecting to etcd to get our configuration.") etcd_watcher = EtcdWatcher(config) etcd_watcher.start() # Ask the EtcdWatcher to fill in the global config object before we # proceed. We don't yet support config updates. etcd_watcher.load_config(async=False) _log.info("Main greenlet: Configuration loaded, starting remaining " "actors...") v4_filter_updater = IptablesUpdater("filter", ip_version=4) v4_nat_updater = IptablesUpdater("nat", ip_version=4) v4_ipset_mgr = IpsetManager(IPV4) v4_rules_manager = RulesManager(4, v4_filter_updater, v4_ipset_mgr) v4_dispatch_chains = DispatchChains(config, 4, v4_filter_updater) v4_ep_manager = EndpointManager(config, IPV4, v4_filter_updater, v4_dispatch_chains, v4_rules_manager) v6_filter_updater = IptablesUpdater("filter", ip_version=6) v6_ipset_mgr = IpsetManager(IPV6) v6_rules_manager = RulesManager(6, v6_filter_updater, v6_ipset_mgr) v6_dispatch_chains = DispatchChains(config, 6, v6_filter_updater) v6_ep_manager = EndpointManager(config, IPV6, v6_filter_updater, v6_dispatch_chains, v6_rules_manager) update_splitter = UpdateSplitter(config, [v4_ipset_mgr, v6_ipset_mgr], [v4_rules_manager, v6_rules_manager], [v4_ep_manager, v6_ep_manager], [v4_filter_updater, v6_filter_updater]) iface_watcher = InterfaceWatcher(update_splitter) _log.info("Starting actors.") update_splitter.start() v4_filter_updater.start() v4_nat_updater.start() v4_ipset_mgr.start() v4_rules_manager.start() v4_dispatch_chains.start() v4_ep_manager.start() v6_filter_updater.start() v6_ipset_mgr.start() v6_rules_manager.start() v6_dispatch_chains.start() v6_ep_manager.start() iface_watcher.start() monitored_items = [ update_splitter.greenlet, v4_nat_updater.greenlet, v4_filter_updater.greenlet, v4_nat_updater.greenlet, v4_ipset_mgr.greenlet, v4_rules_manager.greenlet, v4_dispatch_chains.greenlet, v4_ep_manager.greenlet, v6_filter_updater.greenlet, v6_ipset_mgr.greenlet, v6_rules_manager.greenlet, v6_dispatch_chains.greenlet, v6_ep_manager.greenlet, iface_watcher.greenlet, etcd_watcher.greenlet ] # Install the global rules before we start polling for updates. _log.info("Installing global rules.") install_global_rules(config, v4_filter_updater, v6_filter_updater, v4_nat_updater) # Start polling for updates. These kicks make the actors poll # indefinitely. _log.info("Starting polling for interface and etcd updates.") f = iface_watcher.watch_interfaces(async=True) monitored_items.append(f) f = etcd_watcher.watch_etcd(update_splitter, async=True) monitored_items.append(f) # Wait for something to fail. _log.info("All top-level actors started, waiting on failures...") stopped_greenlets_iter = gevent.iwait(monitored_items) stopped_greenlet = next(stopped_greenlets_iter) try: stopped_greenlet.get() except Exception: _log.exception("Greenlet failed: %s", stopped_greenlet) raise else: _log.error("Greenlet %s unexpectedly returned.", stopped_greenlet) raise AssertionError("Greenlet unexpectedly returned") except: _log.exception("Exception killing main greenlet") raise
def _main_greenlet(config): """ The root of our tree of greenlets. Responsible for restarting its children if desired. """ try: _log.info("Connecting to etcd to get our configuration.") hosts_ipset_v4 = IpsetActor(HOSTS_IPSET_V4) etcd_api = EtcdAPI(config, hosts_ipset_v4) etcd_api.start() # Ask the EtcdAPI to fill in the global config object before we # proceed. We don't yet support config updates. config_loaded = etcd_api.load_config(async=False) config_loaded.wait() # Ensure the Kernel's global options are correctly configured for # Calico. devices.configure_global_kernel_config() _log.info("Main greenlet: Configuration loaded, starting remaining " "actors...") v4_filter_updater = IptablesUpdater("filter", ip_version=4, config=config) v4_nat_updater = IptablesUpdater("nat", ip_version=4, config=config) v4_ipset_mgr = IpsetManager(IPV4) v4_masq_manager = MasqueradeManager(IPV4, v4_nat_updater) v4_rules_manager = RulesManager(4, v4_filter_updater, v4_ipset_mgr) v4_dispatch_chains = DispatchChains(config, 4, v4_filter_updater) v4_ep_manager = EndpointManager(config, IPV4, v4_filter_updater, v4_dispatch_chains, v4_rules_manager, etcd_api.status_reporter) v6_raw_updater = IptablesUpdater("raw", ip_version=6, config=config) v6_filter_updater = IptablesUpdater("filter", ip_version=6, config=config) v6_ipset_mgr = IpsetManager(IPV6) v6_rules_manager = RulesManager(6, v6_filter_updater, v6_ipset_mgr) v6_dispatch_chains = DispatchChains(config, 6, v6_filter_updater) v6_ep_manager = EndpointManager(config, IPV6, v6_filter_updater, v6_dispatch_chains, v6_rules_manager, etcd_api.status_reporter) update_splitter = UpdateSplitter(config, [v4_ipset_mgr, v6_ipset_mgr], [v4_rules_manager, v6_rules_manager], [v4_ep_manager, v6_ep_manager], [v4_filter_updater, v6_filter_updater, v6_raw_updater, v4_nat_updater], v4_masq_manager) iface_watcher = InterfaceWatcher(update_splitter) _log.info("Starting actors.") hosts_ipset_v4.start() update_splitter.start() v4_filter_updater.start() v4_nat_updater.start() v4_ipset_mgr.start() v4_masq_manager.start() v4_rules_manager.start() v4_dispatch_chains.start() v4_ep_manager.start() v6_raw_updater.start() v6_filter_updater.start() v6_ipset_mgr.start() v6_rules_manager.start() v6_dispatch_chains.start() v6_ep_manager.start() iface_watcher.start() top_level_actors = [ hosts_ipset_v4, update_splitter, v4_nat_updater, v4_filter_updater, v4_nat_updater, v4_ipset_mgr, v4_masq_manager, v4_rules_manager, v4_dispatch_chains, v4_ep_manager, v6_raw_updater, v6_filter_updater, v6_ipset_mgr, v6_rules_manager, v6_dispatch_chains, v6_ep_manager, iface_watcher, etcd_api, ] monitored_items = [actor.greenlet for actor in top_level_actors] # Install the global rules before we start polling for updates. _log.info("Installing global rules.") install_global_rules(config, v4_filter_updater, v6_filter_updater, v4_nat_updater, v6_raw_updater) # Start polling for updates. These kicks make the actors poll # indefinitely. _log.info("Starting polling for interface and etcd updates.") f = iface_watcher.watch_interfaces(async=True) monitored_items.append(f) etcd_api.start_watch(update_splitter, async=True) # Register a SIG_USR handler to trigger a diags dump. def dump_top_level_actors(log): for a in top_level_actors: # The output will include queue length and the like. log.info("%s", a) futils.register_diags("Top-level actors", dump_top_level_actors) futils.register_process_statistics() try: gevent.signal(signal.SIGUSR1, functools.partial(futils.dump_diags)) except AttributeError: # It doesn't matter too much if we fail to do this. _log.warning("Unable to install diag dump handler") pass # Wait for something to fail. _log.info("All top-level actors started, waiting on failures...") stopped_greenlets_iter = gevent.iwait(monitored_items) stopped_greenlet = next(stopped_greenlets_iter) try: stopped_greenlet.get() except Exception: _log.exception("Greenlet failed: %s", stopped_greenlet) raise else: _log.error("Greenlet %s unexpectedly returned.", stopped_greenlet) raise AssertionError("Greenlet unexpectedly returned") except: _log.exception("Exception killing main greenlet") raise
def _main_greenlet(config): """ The root of our tree of greenlets. Responsible for restarting its children if desired. """ try: _log.info("Connecting to etcd to get our configuration.") hosts_ipset_v4 = IpsetActor(HOSTS_IPSET_V4) etcd_api = EtcdAPI(config, hosts_ipset_v4) etcd_api.start() # Ask the EtcdAPI to fill in the global config object before we # proceed. We don't yet support config updates. config_loaded = etcd_api.load_config(async=False) config_loaded.wait() # Ensure the Kernel's global options are correctly configured for # Calico. devices.configure_global_kernel_config() _log.info("Main greenlet: Configuration loaded, starting remaining " "actors...") monitored_items = [] if config.PROM_METRICS_ENABLED: httpd = HTTPServer(("0.0.0.0", config.PROM_METRICS_PORT), MetricsHandler) stats_server = gevent.Greenlet(httpd.serve_forever) stats_server.start() monitored_items.append(stats_server) v4_filter_updater = IptablesUpdater("filter", ip_version=4, config=config) v4_nat_updater = IptablesUpdater("nat", ip_version=4, config=config) v4_ipset_mgr = IpsetManager(IPV4, config) v4_masq_manager = MasqueradeManager(IPV4, v4_nat_updater) v4_rules_manager = RulesManager(config, 4, v4_filter_updater, v4_ipset_mgr) v4_dispatch_chains = DispatchChains(config, 4, v4_filter_updater) v4_fip_manager = FloatingIPManager(config, 4, v4_nat_updater) v4_ep_manager = EndpointManager(config, IPV4, v4_filter_updater, v4_dispatch_chains, v4_rules_manager, v4_fip_manager, etcd_api.status_reporter) cleanup_updaters = [v4_filter_updater, v4_nat_updater] cleanup_ip_mgrs = [v4_ipset_mgr] update_splitter_args = [v4_ipset_mgr, v4_rules_manager, v4_ep_manager, v4_masq_manager, v4_nat_updater] v6_enabled = os.path.exists("/proc/sys/net/ipv6") if v6_enabled: v6_raw_updater = IptablesUpdater("raw", ip_version=6, config=config) v6_filter_updater = IptablesUpdater("filter", ip_version=6, config=config) v6_nat_updater = IptablesUpdater("nat", ip_version=6, config=config) v6_ipset_mgr = IpsetManager(IPV6, config) v6_rules_manager = RulesManager(config, 6, v6_filter_updater, v6_ipset_mgr) v6_dispatch_chains = DispatchChains(config, 6, v6_filter_updater) v6_fip_manager = FloatingIPManager(config, 6, v6_nat_updater) v6_ep_manager = EndpointManager(config, IPV6, v6_filter_updater, v6_dispatch_chains, v6_rules_manager, v6_fip_manager, etcd_api.status_reporter) cleanup_updaters.append(v6_filter_updater) cleanup_ip_mgrs.append(v6_ipset_mgr) update_splitter_args += [v6_ipset_mgr, v6_rules_manager, v6_ep_manager, v6_raw_updater, v6_nat_updater] cleanup_mgr = CleanupManager(config, cleanup_updaters, cleanup_ip_mgrs) update_splitter_args.append(cleanup_mgr) update_splitter = UpdateSplitter(update_splitter_args) iface_watcher = InterfaceWatcher(update_splitter) _log.info("Starting actors.") hosts_ipset_v4.start() cleanup_mgr.start() v4_filter_updater.start() v4_nat_updater.start() v4_ipset_mgr.start() v4_masq_manager.start() v4_rules_manager.start() v4_dispatch_chains.start() v4_ep_manager.start() v4_fip_manager.start() if v6_enabled: v6_raw_updater.start() v6_filter_updater.start() v6_ipset_mgr.start() v6_nat_updater.start() v6_rules_manager.start() v6_dispatch_chains.start() v6_ep_manager.start() v6_fip_manager.start() iface_watcher.start() top_level_actors = [ hosts_ipset_v4, cleanup_mgr, v4_filter_updater, v4_nat_updater, v4_ipset_mgr, v4_masq_manager, v4_rules_manager, v4_dispatch_chains, v4_ep_manager, v4_fip_manager, iface_watcher, etcd_api, ] if v6_enabled: top_level_actors += [ v6_raw_updater, v6_filter_updater, v6_nat_updater, v6_ipset_mgr, v6_rules_manager, v6_dispatch_chains, v6_ep_manager, v6_fip_manager, ] monitored_items += [actor.greenlet for actor in top_level_actors] # Try to ensure that the nf_conntrack_netlink kernel module is present. # This works around an issue[1] where the first call to the "conntrack" # command fails while waiting for the module to load. # [1] https://github.com/projectcalico/calico/issues/986 load_nf_conntrack() # Install the global rules before we start polling for updates. _log.info("Installing global rules.") install_global_rules(config, v4_filter_updater, v4_nat_updater, ip_version=4) if v6_enabled: install_global_rules(config, v6_filter_updater, v6_nat_updater, ip_version=6, raw_updater=v6_raw_updater) # Start polling for updates. These kicks make the actors poll # indefinitely. _log.info("Starting polling for interface and etcd updates.") f = iface_watcher.watch_interfaces(async=True) monitored_items.append(f) etcd_api.start_watch(update_splitter, async=True) # Register a SIG_USR handler to trigger a diags dump. def dump_top_level_actors(log): for a in top_level_actors: # The output will include queue length and the like. log.info("%s", a) futils.register_diags("Top-level actors", dump_top_level_actors) futils.register_process_statistics() try: gevent.signal(signal.SIGUSR1, functools.partial(futils.dump_diags)) except AttributeError: # It doesn't matter too much if we fail to do this. _log.warning("Unable to install diag dump handler") pass # Wait for something to fail. _log.info("All top-level actors started, waiting on failures...") stopped_greenlets_iter = gevent.iwait(monitored_items) stopped_greenlet = next(stopped_greenlets_iter) try: stopped_greenlet.get() except Exception: _log.exception("Greenlet failed: %s", stopped_greenlet) raise else: _log.error("Greenlet %s unexpectedly returned.", stopped_greenlet) raise AssertionError("Greenlet unexpectedly returned") except: _log.exception("Exception killing main greenlet") raise
def _main_greenlet(config): """ The root of our tree of greenlets. Responsible for restarting its children if desired. """ try: _log.info("Connecting to etcd to get our configuration.") hosts_ipset_v4 = IpsetActor(HOSTS_IPSET_V4) etcd_api = EtcdAPI(config, hosts_ipset_v4) etcd_api.start() # Ask the EtcdAPI to fill in the global config object before we # proceed. We don't yet support config updates. config_loaded = etcd_api.load_config(async=False) config_loaded.wait() # Ensure the Kernel's global options are correctly configured for # Calico. devices.configure_global_kernel_config() _log.info("Main greenlet: Configuration loaded, starting remaining " "actors...") monitored_items = [] if config.PROM_METRICS_ENABLED: httpd = HTTPServer(("0.0.0.0", config.PROM_METRICS_PORT), MetricsHandler) stats_server = gevent.Greenlet(httpd.serve_forever) stats_server.start() monitored_items.append(stats_server) v4_filter_updater = IptablesUpdater("filter", ip_version=4, config=config) v4_nat_updater = IptablesUpdater("nat", ip_version=4, config=config) v4_ipset_mgr = IpsetManager(IPV4, config) v4_masq_manager = MasqueradeManager(IPV4, v4_nat_updater) v4_rules_manager = RulesManager(config, 4, v4_filter_updater, v4_ipset_mgr) v4_dispatch_chains = DispatchChains(config, 4, v4_filter_updater) v4_fip_manager = FloatingIPManager(config, 4, v4_nat_updater) v4_ep_manager = EndpointManager( config, IPV4, v4_filter_updater, v4_dispatch_chains, v4_rules_manager, v4_fip_manager, etcd_api.status_reporter, ) cleanup_updaters = [v4_filter_updater, v4_nat_updater] cleanup_ip_mgrs = [v4_ipset_mgr] update_splitter_args = [v4_ipset_mgr, v4_rules_manager, v4_ep_manager, v4_masq_manager, v4_nat_updater] v6_enabled = os.path.exists("/proc/sys/net/ipv6") if v6_enabled: v6_raw_updater = IptablesUpdater("raw", ip_version=6, config=config) v6_filter_updater = IptablesUpdater("filter", ip_version=6, config=config) v6_nat_updater = IptablesUpdater("nat", ip_version=6, config=config) v6_ipset_mgr = IpsetManager(IPV6, config) v6_rules_manager = RulesManager(config, 6, v6_filter_updater, v6_ipset_mgr) v6_dispatch_chains = DispatchChains(config, 6, v6_filter_updater) v6_fip_manager = FloatingIPManager(config, 6, v6_nat_updater) v6_ep_manager = EndpointManager( config, IPV6, v6_filter_updater, v6_dispatch_chains, v6_rules_manager, v6_fip_manager, etcd_api.status_reporter, ) cleanup_updaters.append(v6_filter_updater) cleanup_ip_mgrs.append(v6_ipset_mgr) update_splitter_args += [v6_ipset_mgr, v6_rules_manager, v6_ep_manager, v6_raw_updater, v6_nat_updater] cleanup_mgr = CleanupManager(config, cleanup_updaters, cleanup_ip_mgrs) update_splitter_args.append(cleanup_mgr) update_splitter = UpdateSplitter(update_splitter_args) iface_watcher = InterfaceWatcher(update_splitter) _log.info("Starting actors.") hosts_ipset_v4.start() cleanup_mgr.start() v4_filter_updater.start() v4_nat_updater.start() v4_ipset_mgr.start() v4_masq_manager.start() v4_rules_manager.start() v4_dispatch_chains.start() v4_ep_manager.start() v4_fip_manager.start() if v6_enabled: v6_raw_updater.start() v6_filter_updater.start() v6_ipset_mgr.start() v6_nat_updater.start() v6_rules_manager.start() v6_dispatch_chains.start() v6_ep_manager.start() v6_fip_manager.start() iface_watcher.start() top_level_actors = [ hosts_ipset_v4, cleanup_mgr, v4_filter_updater, v4_nat_updater, v4_ipset_mgr, v4_masq_manager, v4_rules_manager, v4_dispatch_chains, v4_ep_manager, v4_fip_manager, iface_watcher, etcd_api, ] if v6_enabled: top_level_actors += [ v6_raw_updater, v6_filter_updater, v6_nat_updater, v6_ipset_mgr, v6_rules_manager, v6_dispatch_chains, v6_ep_manager, v6_fip_manager, ] monitored_items += [actor.greenlet for actor in top_level_actors] # Try to ensure that the nf_conntrack_netlink kernel module is present. # This works around an issue[1] where the first call to the "conntrack" # command fails while waiting for the module to load. # [1] https://github.com/projectcalico/calico/issues/986 load_nf_conntrack() # Install the global rules before we start polling for updates. _log.info("Installing global rules.") install_global_rules(config, v4_filter_updater, v4_nat_updater, ip_version=4) if v6_enabled: install_global_rules(config, v6_filter_updater, v6_nat_updater, ip_version=6, raw_updater=v6_raw_updater) # Start polling for updates. These kicks make the actors poll # indefinitely. _log.info("Starting polling for interface and etcd updates.") f = iface_watcher.watch_interfaces(async=True) monitored_items.append(f) etcd_api.start_watch(update_splitter, async=True) # Register a SIG_USR handler to trigger a diags dump. def dump_top_level_actors(log): for a in top_level_actors: # The output will include queue length and the like. log.info("%s", a) futils.register_diags("Top-level actors", dump_top_level_actors) futils.register_process_statistics() try: gevent.signal(signal.SIGUSR1, functools.partial(futils.dump_diags)) except AttributeError: # It doesn't matter too much if we fail to do this. _log.warning("Unable to install diag dump handler") pass # Wait for something to fail. _log.info("All top-level actors started, waiting on failures...") stopped_greenlets_iter = gevent.iwait(monitored_items) stopped_greenlet = next(stopped_greenlets_iter) try: stopped_greenlet.get() except Exception: _log.exception("Greenlet failed: %s", stopped_greenlet) raise else: _log.error("Greenlet %s unexpectedly returned.", stopped_greenlet) raise AssertionError("Greenlet unexpectedly returned") except: _log.exception("Exception killing main greenlet") raise
def _main_greenlet(): """ The root of our tree of greenlets. Responsible for restarting its children if desired. """ try: _log.info("Connecting to etcd to get our configuration.") hosts_ipset_v4 = IpsetActor(HOSTS_IPSET_V4) monitored_items = [] # The parent process sends us communication pipes as FD 3 and 4. Open # those as files. Wrap the resulting files in a FileObject to make # them cooperate with gevent. pipe_from_parent = FileObject(os.fdopen(3, 'rb', -1), 'rb') pipe_to_parent = FileObject(os.fdopen(4, 'wb', -1), 'wb') config = Config() datastore = DatastoreAPI(config, pipe_from_parent, pipe_to_parent, hosts_ipset_v4) datastore.start() monitored_items.append(datastore.greenlet) # Ask the DatastoreAPI to fill in the global config object before we # proceed. We don't yet support config updates. config_loaded = datastore.load_config(async=False) config_loaded.wait() # Ensure the Kernel's global options are correctly configured for # Calico. devices.configure_global_kernel_config(config) # Check the commands we require are present. futils.check_command_deps() _log.info("Main greenlet: Configuration loaded, starting remaining " "actors...") if config.PROM_METRICS_ENABLED: httpd = HTTPServer(("0.0.0.0", config.PROM_METRICS_DRIVER_PORT), MetricsHandler) stats_server = gevent.Greenlet(httpd.serve_forever) stats_server.start() monitored_items.append(stats_server) v4_filter_updater = IptablesUpdater("filter", ip_version=4, config=config) v4_nat_updater = IptablesUpdater("nat", ip_version=4, config=config) v4_ipset_mgr = IpsetManager(IPV4, config) v4_masq_manager = MasqueradeManager(IPV4, v4_nat_updater) v4_rules_manager = RulesManager(config, 4, v4_filter_updater, v4_ipset_mgr) v4_ep_dispatch_chains = WorkloadDispatchChains(config, 4, v4_filter_updater) v4_if_dispatch_chains = HostEndpointDispatchChains( config, 4, v4_filter_updater) v4_fip_manager = FloatingIPManager(config, 4, v4_nat_updater) v4_ep_manager = EndpointManager(config, IPV4, v4_filter_updater, v4_ep_dispatch_chains, v4_if_dispatch_chains, v4_rules_manager, v4_fip_manager, datastore.write_api) cleanup_updaters = [v4_filter_updater, v4_nat_updater] cleanup_ip_mgrs = [v4_ipset_mgr] managers = [ v4_ipset_mgr, v4_rules_manager, v4_ep_manager, v4_masq_manager, v4_nat_updater ] actors_to_start = [ hosts_ipset_v4, v4_filter_updater, v4_nat_updater, v4_ipset_mgr, v4_masq_manager, v4_rules_manager, v4_ep_dispatch_chains, v4_if_dispatch_chains, v4_ep_manager, v4_fip_manager, ] # Determine if ipv6 is enabled using the config option. if config.IPV6_SUPPORT == "true": v6_enabled = True ipv6_reason = None elif config.IPV6_SUPPORT == "auto": v6_enabled, ipv6_reason = futils.detect_ipv6_supported() else: v6_enabled = False ipv6_reason = "Ipv6Support is 'false'" if v6_enabled: v6_raw_updater = IptablesUpdater("raw", ip_version=6, config=config) v6_filter_updater = IptablesUpdater("filter", ip_version=6, config=config) v6_nat_updater = IptablesUpdater("nat", ip_version=6, config=config) v6_ipset_mgr = IpsetManager(IPV6, config) v6_rules_manager = RulesManager(config, 6, v6_filter_updater, v6_ipset_mgr) v6_ep_dispatch_chains = WorkloadDispatchChains( config, 6, v6_filter_updater) v6_if_dispatch_chains = HostEndpointDispatchChains( config, 6, v6_filter_updater) v6_fip_manager = FloatingIPManager(config, 6, v6_nat_updater) v6_ep_manager = EndpointManager(config, IPV6, v6_filter_updater, v6_ep_dispatch_chains, v6_if_dispatch_chains, v6_rules_manager, v6_fip_manager, datastore.write_api) cleanup_updaters.append(v6_filter_updater) cleanup_ip_mgrs.append(v6_ipset_mgr) managers += [ v6_ipset_mgr, v6_rules_manager, v6_ep_manager, v6_raw_updater, v6_nat_updater ] actors_to_start += [ v6_raw_updater, v6_filter_updater, v6_nat_updater, v6_ipset_mgr, v6_rules_manager, v6_ep_dispatch_chains, v6_if_dispatch_chains, v6_ep_manager, v6_fip_manager, ] else: # Keep the linter happy. _log.warn("IPv6 support disabled: %s.", ipv6_reason) v6_filter_updater = None v6_nat_updater = None v6_raw_updater = None v6_if_dispatch_chains = None cleanup_mgr = CleanupManager(config, cleanup_updaters, cleanup_ip_mgrs) managers.append(cleanup_mgr) update_splitter = UpdateSplitter(managers) iface_watcher = InterfaceWatcher(update_splitter) actors_to_start += [ cleanup_mgr, iface_watcher, ] _log.info("Starting actors.") for actor in actors_to_start: actor.start() monitored_items += [actor.greenlet for actor in actors_to_start] # Try to ensure that the nf_conntrack_netlink kernel module is present. # This works around an issue[1] where the first call to the "conntrack" # command fails while waiting for the module to load. # [1] https://github.com/projectcalico/felix/issues/986 load_nf_conntrack() # Install the global rules before we start polling for updates. _log.info("Installing global rules.") # Dispatch chain needs to make its configuration before we insert the # top-level chains. v4_if_dispatch_chains.configure_iptables(async=False) install_global_rules(config, v4_filter_updater, v4_nat_updater, ip_version=4) if v6_enabled: # Dispatch chain needs to make its configuration before we insert # the top-level chains. v6_if_dispatch_chains.configure_iptables(async=False) install_global_rules(config, v6_filter_updater, v6_nat_updater, ip_version=6, raw_updater=v6_raw_updater) # Start polling for updates. These kicks make the actors poll # indefinitely. _log.info("Starting polling for interface and etcd updates.") f = iface_watcher.watch_interfaces(async=True) monitored_items.append(f) datastore.start_watch(update_splitter, async=True) # Register a SIG_USR handler to trigger a diags dump. def dump_top_level_actors(log): for a in actors_to_start: # The output will include queue length and the like. log.info("%s", a) futils.register_diags("Top-level actors", dump_top_level_actors) futils.register_process_statistics() try: gevent.signal(signal.SIGUSR1, functools.partial(futils.dump_diags)) except AttributeError: # It doesn't matter too much if we fail to do this. _log.warning("Unable to install diag dump handler") pass gevent.signal(signal.SIGTERM, functools.partial(shut_down, datastore)) gevent.signal(signal.SIGINT, functools.partial(shut_down, datastore)) # Wait for something to fail. _log.info("All top-level actors started, waiting on failures...") stopped_greenlets_iter = gevent.iwait(monitored_items) stopped_greenlet = next(stopped_greenlets_iter) try: stopped_greenlet.get() except Exception: _log.exception("Greenlet failed: %s", stopped_greenlet) raise else: _log.error("Greenlet %s unexpectedly returned.", stopped_greenlet) raise AssertionError("Greenlet unexpectedly returned") except: _log.exception("Exception killing main greenlet") raise
class TestIptablesUpdater(BaseTestCase): def setUp(self): super(TestIptablesUpdater, self).setUp() self.stub = IptablesStub("filter") self.m_config = Mock() self.m_config.REFRESH_INTERVAL = 0 # disable refresh thread self.ipt = IptablesUpdater("filter", self.m_config, 4) self.ipt._execute_iptables = self.stub.apply_iptables_restore self.check_output_patch = patch("gevent.subprocess.check_output", autospec=True) self.m_check_output = self.check_output_patch.start() self.m_check_output.side_effect = self.fake_check_output def fake_check_output(self, cmd, *args, **kwargs): _log.info("Stubbing out call to %s", cmd) if cmd == ["iptables-save", "--table", "filter"]: return self.stub.generate_iptables_save() elif cmd == ['iptables', '--wait', '--list', '--table', 'filter']: return self.stub.generate_iptables_list() else: raise AssertionError("Unexpected call %r" % cmd) def tearDown(self): self.check_output_patch.stop() super(TestIptablesUpdater, self).tearDown() def test_rewrite_chains_stub(self): """ Tests that referencing a chain causes it to get stubbed out. """ self.ipt.rewrite_chains( {"foo": ["--append foo --jump bar"]}, {"foo": set(["bar"])}, async=True, ) self.step_actor(self.ipt) self.assertEqual( self.stub.chains_contents, { "foo": ["--append foo --jump bar"], 'bar': [MISSING_CHAIN_DROP % "bar"] }) def test_rewrite_chains_cover(self): """ Hits remaining code paths in rewrite chains. """ cb = Mock() self.ipt.rewrite_chains( {"foo": ["--append foo --jump bar"]}, {"foo": set(["bar"])}, suppress_upd_log=True, async=True, callback=cb, ) self.step_actor(self.ipt) cb.assert_called_once_with(None) def test_delete_required_chain_stub(self): """ Tests that deleting a required chain stubs it out instead. """ # Exit the graceful restart period, during which we do not stub out # chains. self.ipt.cleanup(async=True) # Install a couple of chains. foo depends on bar. self.ipt.rewrite_chains( { "foo": ["--append foo --jump bar"], "bar": ["--append bar --jump ACCEPT"] }, { "foo": set(["bar"]), "bar": set() }, async=True, ) self.step_actor(self.ipt) # Both chains should be programmed as normal. self.assertEqual( self.stub.chains_contents, { "foo": ["--append foo --jump bar"], 'bar': ["--append bar --jump ACCEPT"] }) # Deleting bar should stub it out instead. self.ipt.delete_chains(["bar"], async=True) self.step_actor(self.ipt) self.assertEqual( self.stub.chains_contents, { "foo": ["--append foo --jump bar"], 'bar': [MISSING_CHAIN_DROP % "bar"] }) def test_cleanup_with_dependencies(self): # Set up the dataplane with some chains that the IptablesUpdater # doesn't know about and some that it will know about. self.stub.apply_iptables_restore(""" *filter :INPUT DROP [10:505] :FORWARD DROP [0:0] :OUTPUT ACCEPT [40:1600] # These non-felix chains should be ignored :ignore-me - :ignore-me-too - # These are left-over felix chains. Some depend on each other. They # can only be cleaned up in the correct order. :felix-foo - [0:0] :felix-bar - :felix-foo - :felix-baz - :felix-biff - --append felix-foo --src 10.0.0.1/32 --jump felix-bar # baz depends on biff; cleanup needs to detect that. --append felix-baz --src 10.0.0.2/32 --jump felix-biff --append felix-biff --src 10.0.0.3/32 --jump DROP --append ignore-me --jump ignore-me-too --append ignore-me-too --jump DROP """.splitlines()) # IptablesUpdater hears about some chains before the cleanup. These # partially overlap with the ones that are already there. self.ipt.rewrite_chains( { "felix-foo": [ "--append felix-foo --jump felix-bar", "--append felix-foo --jump felix-baz", "--append felix-foo --jump felix-boff" ], "felix-bar": ["--append felix-bar --jump ACCEPT"] }, # felix-foo depends on: # * a new chain that's also being programmed # * a pre-existing chain that is present at start of day # * a new chain that isn't present at all. { "felix-foo": set(["felix-bar", "felix-baz", "felix-boff"]), "felix-bar": set() }, async=True, ) self.step_actor(self.ipt) # Dataplane should now have all the new chains in place, including # a stub for felix-boff. However, the old chains should not have been # cleaned up. self.stub.assert_chain_contents({ "INPUT": [], "FORWARD": [], "OUTPUT": [], "ignore-me": ["--append ignore-me --jump ignore-me-too"], "ignore-me-too": ["--append ignore-me-too --jump DROP"], "felix-foo": [ "--append felix-foo --jump felix-bar", "--append felix-foo --jump felix-baz", "--append felix-foo --jump felix-boff" ], "felix-bar": ["--append felix-bar --jump ACCEPT"], "felix-baz": ["--append felix-baz --src 10.0.0.2/32 " "--jump felix-biff"], "felix-boff": [MISSING_CHAIN_DROP % "felix-boff"], "felix-biff": ["--append felix-biff --src 10.0.0.3/32 --jump DROP"], }) # Issue the cleanup. self.ipt.cleanup(async=True) self.step_actor(self.ipt) # Should now have stubbed-out chains for all the ones that are not # programmed. self.stub.assert_chain_contents({ # Non felix chains ignored: "INPUT": [], "FORWARD": [], "OUTPUT": [], "ignore-me": ["--append ignore-me --jump ignore-me-too"], "ignore-me-too": ["--append ignore-me-too --jump DROP"], # Explicitly-programmed chains programmed. "felix-foo": [ "--append felix-foo --jump felix-bar", "--append felix-foo --jump felix-baz", "--append felix-foo --jump felix-boff" ], "felix-bar": ["--append felix-bar --jump ACCEPT"], # All required but unknown chains stubbed. "felix-baz": [MISSING_CHAIN_DROP % "felix-baz"], "felix-boff": [MISSING_CHAIN_DROP % "felix-boff"], # felix-biff deleted, even though it was referenced by felix-baz # before. }) def test_cleanup_bad_read_back(self): # IptablesUpdater hears about some chains before the cleanup. self.ipt.rewrite_chains( {"felix-foo": ["--append felix-foo --jump felix-boff"]}, {"felix-foo": set(["felix-boff"])}, async=True, ) self.step_actor(self.ipt) self.stub.assert_chain_contents({ "felix-foo": ["--append felix-foo --jump felix-boff"], "felix-boff": [MISSING_CHAIN_DROP % "felix-boff"], }) # Some other process then breaks our chains. self.stub.chains_contents = {} self.stub.iptables_save_output = [ None, # Start of cleanup. # End of cleanup. Out of sync: "*filter\n" ":INPUT DROP [68:4885]\n" ":FORWARD DROP [0:0]\n" ":OUTPUT ACCEPT [20:888]\n" ":DOCKER - [0:0]\n" "-A INPUT -i lxcbr0 -p tcp -m tcp --dport 53 -j ACCEPT\n" "-A FORWARD -o lxcbr0 -j ACCEPT\n" "COMMIT\n" ] _log.info("Forcing iptables-save to always return %s", self.stub.iptables_save_output) # Issue the cleanup. with patch.object(fiptables._log, "error") as m_error: self.ipt.cleanup(async=True) self.step_actor(self.ipt) m_error.assert_called_once_with(ANY, set([]), set([]), set(["felix-foo", "felix-boff"])) self.stub.assert_chain_contents({ "felix-foo": ["--append felix-foo --jump felix-boff"], "felix-boff": [MISSING_CHAIN_DROP % "felix-boff"], }) def test_ensure_rule_inserted(self): fragment = "FOO --jump DROP" with patch.object(self.ipt, "_execute_iptables") as m_exec: m_exec.side_effect = iter([ FailedSystemCall("Message", [], 1, "", "line 2 failed"), None, None ]) self.ipt.ensure_rule_inserted(fragment, async=True) self.step_actor(self.ipt) self.assertEqual(m_exec.mock_calls, [ call([ "*filter", "--delete FOO --jump DROP", "--insert FOO --jump DROP", "COMMIT" ], fail_log_level=logging.DEBUG), call(["*filter", "--insert FOO --jump DROP", "COMMIT"]), ]) self.assertTrue(fragment in self.ipt._inserted_rule_fragments) def test_insert_remove_tracking(self): fragment = "FOO --jump DROP" with patch.object(self.ipt, "_execute_iptables") as m_exec: m_exec.side_effect = [ # Insert. None, # Remove: requires an exception to terminate loop. None, FailedSystemCall("Message", [], 1, "", "line 2 failed"), # Insert. None, ] self.ipt.ensure_rule_inserted(fragment, async=True) self.step_actor(self.ipt) self.assertTrue(fragment in self.ipt._inserted_rule_fragments) self.assertTrue(fragment not in self.ipt._removed_rule_fragments) self.ipt.ensure_rule_removed(fragment, async=True) self.step_actor(self.ipt) self.assertTrue(fragment not in self.ipt._inserted_rule_fragments) self.assertTrue(fragment in self.ipt._removed_rule_fragments) self.ipt.ensure_rule_inserted(fragment, async=True) self.step_actor(self.ipt) self.assertTrue(fragment in self.ipt._inserted_rule_fragments) self.assertTrue(fragment not in self.ipt._removed_rule_fragments) def test_ensure_rule_removed(self): fragment = "FOO --jump DROP" with patch.object(self.ipt, "_execute_iptables") as m_exec: m_exec.side_effect = iter([ None, FailedSystemCall("Message", [], 1, "", "line 2 failed") ]) self.ipt.ensure_rule_removed(fragment, async=True) self.step_actor(self.ipt) exp_call = call([ '*filter', '--delete FOO --jump DROP', 'COMMIT', ], fail_log_level=logging.DEBUG) self.assertEqual(m_exec.mock_calls, [exp_call] * 2) def test_ensure_rule_removed_not_present(self): with patch.object(self.ipt, "_execute_iptables") as m_exec: m_exec.side_effect = iter( [FailedSystemCall("Message", [], 1, "", "line 2 failed")]) self.ipt.ensure_rule_removed("FOO --jump DROP", async=True) self.step_actor(self.ipt) exp_call = call([ '*filter', '--delete FOO --jump DROP', 'COMMIT', ], fail_log_level=logging.DEBUG) self.assertEqual(m_exec.mock_calls, [exp_call]) def test_ensure_rule_removed_missing_dep(self): with patch.object(self.ipt, "_execute_iptables") as m_exec: m_exec.side_effect = iter([ FailedSystemCall("Message", [], 1, "", "at line: 2\n" "ipset doesn't exist") ]) self.ipt.ensure_rule_removed("FOO --jump DROP", async=True) self.step_actor(self.ipt) exp_call = call([ '*filter', '--delete FOO --jump DROP', 'COMMIT', ], fail_log_level=logging.DEBUG) self.assertEqual(m_exec.mock_calls, [exp_call]) def test_ensure_rule_removed_error(self): with patch.object(self.ipt, "_execute_iptables") as m_exec: m_exec.side_effect = iter( [FailedSystemCall("Message", [], 1, "", "the foo is barred")]) f = self.ipt.ensure_rule_removed("FOO --jump DROP", async=True) self.step_actor(self.ipt) self.assertRaises(FailedSystemCall, f.get) exp_call = call([ '*filter', '--delete FOO --jump DROP', 'COMMIT', ], fail_log_level=logging.DEBUG) self.assertEqual(m_exec.mock_calls, [exp_call]) def test_refresh_iptables(self): self.ipt.ensure_rule_inserted("INPUT -j ACCEPT", async=True) self.ipt.ensure_rule_inserted("INPUT -j DROP", async=True) self.ipt.ensure_rule_removed("INPUT -j DROP", async=True) self.step_actor(self.ipt) self.ipt.refresh_iptables(async=True) with patch.object(self.ipt, "_insert_rule") as m_insert_rule: with patch.object(self.ipt, "_remove_rule") as m_remove_rule: self.step_actor(self.ipt) m_insert_rule.assert_called_once_with("INPUT -j ACCEPT", log_level=logging.DEBUG) m_remove_rule.assert_called_once_with("INPUT -j DROP", log_level=logging.DEBUG)