Пример #1
0
 def reset(self):
     self.created_refs = defaultdict(list)
     self.acquired_refs = {}
     self.config = Mock()
     self.config.MAX_IPSET_SIZE = 1234
     self.mgr = IpsetManager(IPV4, self.config)
     self.m_create = Mock(spec=self.mgr._create, side_effect=self.m_create)
     self.mgr._create = self.m_create
Пример #2
0
 def test_create(self):
     with patch("calico.felix.ipsets.Ipset") as m_Ipset:
         mgr = IpsetManager(IPV4, self.config)
         tag_ipset = mgr._create("tagid")
     self.assertEqual(tag_ipset.name_stem, "tagid")
     m_Ipset.assert_called_once_with('felix-v4-tagid',
                                     'felix-tmp-v4-tagid',
                                     'inet', 'hash:ip',
                                     max_elem=1234)
Пример #3
0
 def test_create(self):
     with patch("calico.felix.ipsets.Ipset") as m_Ipset:
         mgr = IpsetManager(IPV4, self.config)
         tag_ipset = mgr._create("tagid")
     self.assertEqual(tag_ipset.name_stem, "tagid")
     m_Ipset.assert_called_once_with('felix-v4-tagid',
                                     'felix-tmp-v4-tagid',
                                     'inet',
                                     'hash:ip',
                                     max_elem=1234)
Пример #4
0
 def reset(self):
     self.created_refs = defaultdict(list)
     self.acquired_refs = {}
     self.mgr = IpsetManager(IPV4)
     self.m_create = Mock(spec=self.mgr._create,
                          side_effect = self.m_create)
     self.mgr._create = self.m_create
Пример #5
0
 def reset(self):
     self.created_refs = defaultdict(list)
     self.acquired_refs = {}
     self.config = Mock()
     self.config.MAX_IPSET_SIZE = 1234
     self.mgr = IpsetManager(IPV4, self.config)
     self.m_create = Mock(spec=self.mgr._create,
                          side_effect = self.m_create)
     self.mgr._create = self.m_create
Пример #6
0
class TestIpsetManager(BaseTestCase):
    def setUp(self):
        super(TestIpsetManager, self).setUp()
        self.reset()

    def reset(self):
        self.created_refs = defaultdict(list)
        self.acquired_refs = {}
        self.mgr = IpsetManager(IPV4)
        self.m_create = Mock(spec=self.mgr._create,
                             side_effect = self.m_create)
        self.mgr._create = self.m_create

    def m_create(self, tag_id):
        _log.info("Creating ipset %s", tag_id)
        ipset = Mock(spec=TagIpset)

        ipset._manager = None
        ipset._id = None
        ipset.ref_mgmt_state = CREATED
        ipset.ref_count = 0
        ipset.owned_ipset_names.return_value = ["felix-v4-" + tag_id,
                                                "felix-v4-tmp-" + tag_id]

        ipset.tag = tag_id
        self.created_refs[tag_id].append(ipset)
        return ipset

    def test_tag_then_endpoint(self):
        # Send in the messages.
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        # Let the actor process them.
        self.step_mgr()
        self.assert_one_ep_one_tag()

    def test_endpoint_then_tag(self):
        # Send in the messages.
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        # Let the actor process them.
        self.step_mgr()
        self.assert_one_ep_one_tag()

    def test_endpoint_then_tag_idempotent(self):
        for _ in xrange(3):
            # Send in the messages.
            self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
            self.mgr.on_tags_update("prof1", ["tag1"], async=True)
            # Let the actor process them.
            self.step_mgr()
            self.assert_one_ep_one_tag()

    def assert_one_ep_one_tag(self):
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
        })
        self.assertEqual(self.mgr.ip_owners_by_tag, {
            "tag1": {
                "10.0.0.1": {
                    "prof1": set([
                        EP_ID_1_1
                    ])
                }
            }
        })

    def test_change_ip(self):
        # Initial set-up.
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.step_mgr()
        # Update the endpoint's IPs:
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1_NEW_IP, async=True)
        self.step_mgr()

        self.assertEqual(self.mgr.ip_owners_by_tag, {
            "tag1": {
                "10.0.0.2": {
                    "prof1": set([
                        EP_ID_1_1
                    ])
                },
                "10.0.0.3": {
                    "prof1": set([
                        EP_ID_1_1
                    ])
                }
            }
        })

    def test_tag_updates(self):
        # Initial set-up.
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.step_mgr()

        # Add a tag, keep a tag.
        self.mgr.on_tags_update("prof1", ["tag1", "tag2"], async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.ip_owners_by_tag, {
            "tag1": {
                "10.0.0.1": {
                    "prof1": set([
                        EP_ID_1_1
                    ])
                }
            },
            "tag2": {
                "10.0.0.1": {
                    "prof1": set([
                        EP_ID_1_1
                    ])
                }
            }
        })
        self.assertEqual(self.mgr.tags_by_prof_id, {"prof1": ["tag1", "tag2"]})

        # Remove a tag.
        self.mgr.on_tags_update("prof1", ["tag2"], async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.ip_owners_by_tag, {
            "tag2": {
                "10.0.0.1": {
                    "prof1": set([
                        EP_ID_1_1
                    ])
                }
            }
        })

        # Delete the tags:
        self.mgr.on_tags_update("prof1", None, async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.ip_owners_by_tag, {})
        self.assertEqual(self.mgr.tags_by_prof_id, {})

    def step_mgr(self):
        self.step_actor(self.mgr)
        self.assertEqual(self.mgr._dirty_tags, set())

    def test_update_profile_and_ips(self):
        # Initial set-up.
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_tags_update("prof3", ["tag3"], async=True)
        self.step_mgr()

        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1_NEW_PROF_IP, async=True)
        self.step_mgr()

        self.assertEqual(self.mgr.ip_owners_by_tag, {
            "tag3": {
                "10.0.0.3": {
                    "prof3": set([
                        EP_ID_1_1
                    ])
                }
            }
        })
        self.assertEqual(self.mgr.endpoint_ids_by_profile_id, {
            "prof3": set([EP_ID_1_1])
        })

    def test_optimize_out_v6(self):
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_endpoint_update(EP_ID_2_1, EP_2_1_IPV6, async=True)
        self.step_mgr()
        # Index should contain only 1_1:
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
        })

    def test_optimize_out_no_nets(self):
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_endpoint_update(EP_ID_2_1, EP_2_1_NO_NETS, async=True)
        self.step_mgr()
        # Index should contain only 1_1:
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
        })
        # Should be happy to then add it in.
        self.mgr.on_endpoint_update(EP_ID_2_1, EP_2_1, async=True)
        self.step_mgr()
        # Index should contain both:
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
            EP_ID_2_1: EP_DATA_2_1,
        })

    def test_duplicate_ips(self):
        # Add in two endpoints with the same IP.
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_endpoint_update(EP_ID_2_1, EP_2_1, async=True)
        self.step_mgr()
        # Index should contain both:
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
            EP_ID_2_1: EP_DATA_2_1,
        })
        self.assertEqual(self.mgr.ip_owners_by_tag, {
            "tag1": {
                "10.0.0.1": {
                    "prof1": set([
                        EP_ID_1_1,
                        EP_ID_2_1,
                    ])
                }
            }
        })

        # Second profile tags arrive:
        self.mgr.on_tags_update("prof2", ["tag1", "tag2"], async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.ip_owners_by_tag, {
            "tag1": {
                "10.0.0.1": {
                    "prof1": set([
                        EP_ID_1_1,
                        EP_ID_2_1,
                    ]),
                    "prof2": set([
                        EP_ID_1_1,
                    ])
                }
            },
            "tag2": {
                "10.0.0.1": {
                    "prof2": set([
                        EP_ID_1_1,
                    ])
                }
            },
        })

        # Remove one, check the index gets updated.
        self.mgr.on_endpoint_update(EP_ID_2_1, None, async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
        })
        self.assertEqual(self.mgr.ip_owners_by_tag, {
            "tag1": {
                "10.0.0.1": {
                    "prof1": set([
                        EP_ID_1_1,
                    ]),
                    "prof2": set([
                        EP_ID_1_1,
                    ])
                }
            },
            "tag2": {
                "10.0.0.1": {
                    "prof2": set([
                        EP_ID_1_1,
                    ])
                }
            },
        })

        # Remove the other, index should get completely cleaned up.
        self.mgr.on_endpoint_update(EP_ID_1_1, None, async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {})
        self.assertEqual(self.mgr.ip_owners_by_tag, {})

    def on_ref_acquired(self, tag_id, ipset):
        self.acquired_refs[tag_id] = ipset

    @patch("calico.felix.ipsets.list_ipset_names", autospec=True)
    @patch("calico.felix.futils.check_call", autospec=True)
    def test_cleanup(self, m_check_call, m_list_ipsets):
        # Start with a couple ipsets.
        self.mgr.get_and_incref("foo", callback=self.on_ref_acquired,
                                async=True)
        self.mgr.get_and_incref("bar", callback=self.on_ref_acquired,
                                async=True)
        self.step_mgr()
        self.assertEqual(set(self.created_refs.keys()),
                         set(["foo", "bar"]))

        # Notify ready so that the ipsets are marked as started.
        self._notify_ready(["foo", "bar"])
        self.step_mgr()

        # Then decref "bar" so that it gets marked as stopping.
        self.mgr.decref("bar", async=True)
        self.step_mgr()
        self.assertEqual(
            self.mgr.stopping_objects_by_id,
            {"bar": set(self.created_refs["bar"])}
        )

        # Return mix of expected and unexpected ipsets.
        m_list_ipsets.return_value = [
            "not-felix-foo",
            "felix-v6-foo",
            "felix-v6-bazzle",
            "felix-v4-foo",
            "felix-v4-bar",
            "felix-v4-baz",
            "felix-v4-biff",
        ]
        m_check_call.side_effect = iter([
            # Exception on any individual call should be ignored.
            FailedSystemCall("Dummy", [], None, None, None),
            None,
        ])
        self.mgr.cleanup(async=True)
        self.step_mgr()

        # Explicitly check that exactly the right delete calls were made.
        # assert_has_calls would ignore extra calls.
        self.assertEqual(sorted(m_check_call.mock_calls),
                         sorted([
                             call(["ipset", "destroy", "felix-v4-biff"]),
                             call(["ipset", "destroy", "felix-v4-baz"]),
                         ]))

    def test_apply_snapshot_mainline(self):
        self.mgr.apply_snapshot(
            {"prof1": ["tag1"], "prof2": ["B"], "prof3": ["B"]},
            {EP_ID_1_1: EP_1_1,
             EP_ID_2_1: EP_2_1},
            async=True,
        )
        self.mgr.get_and_incref("tag1",
                                callback=self.on_ref_acquired,
                                async=True)
        self.step_mgr()
        self.mgr.on_object_startup_complete("tag1",
                                            self.created_refs["tag1"][0],
                                            async=True)
        self.step_mgr()
        self.mgr.apply_snapshot(
            {"prof1": ["tag1", "tag2"]},
            {EP_ID_1_1: EP_1_1},
            async=True,
        )
        self.step_mgr()
        self.assertEqual(self.mgr.tags_by_prof_id,
                         {"prof1": ["tag1", "tag2"]})
        self.assertEqual(self.mgr.endpoint_data_by_ep_id,
                         {EP_ID_1_1: EP_DATA_1_1})
        ipset = self.acquired_refs["tag1"]
        self.assertEqual(
            ipset.replace_members.mock_calls,
            [
                call(set(['10.0.0.1']), force_reprogram=True, async=True),
                call(set(['10.0.0.1']), force_reprogram=True, async=True),
            ]
        )

    def test_apply_snapshot_forces_reprogram(self):
        # Apply a snapshot but mock the finish call so that we can check that
        # apply_snapshot set the flag...
        self.mgr.apply_snapshot(
            {"prof1": ["A"], "prof2": ["B"]},
            {EP_ID_1_1: EP_1_1,
             EP_ID_2_1: EP_2_1},
            async=True,
        )
        # noinspection PyUnresolvedReferences
        with patch.object(self.mgr, "_finish_msg_batch"):
            self.step_actor(self.mgr)
        self.assertTrue(self.mgr._force_reprogram)

    def test_finish_msg_batch_clears_reprogram_flag(self):
        # Apply a snapshot and step the actor for real, should clear the flag.
        self.mgr.apply_snapshot(
            {"prof1": ["A"]},
            {EP_ID_1_1: EP_1_1},
            async=True,
        )
        self.step_mgr()
        self.assertFalse(self.mgr._force_reprogram)

    def _notify_ready(self, tags):
        for tag in tags:
            self.mgr.on_object_startup_complete(tag, self.created_refs[tag][0],
                                                async=True)
        self.step_mgr()
Пример #7
0
class TestIpsetManager(BaseTestCase):
    def setUp(self):
        super(TestIpsetManager, self).setUp()
        self.reset()

    def reset(self):
        self.created_refs = defaultdict(list)
        self.acquired_refs = {}
        self.config = Mock()
        self.config.MAX_IPSET_SIZE = 1234
        self.mgr = IpsetManager(IPV4, self.config)
        self.m_create = Mock(spec=self.mgr._create, side_effect=self.m_create)
        self.real_create = self.mgr._create
        self.mgr._create = self.m_create

    def m_create(self, tag_or_sel):
        _log.info("Creating ipset %s", tag_or_sel)

        # Do the real creation, to kick off selector indexing, for example.
        with patch("calico.felix.ipsets.RefCountedIpsetActor", autospec=True):
            self.real_create(tag_or_sel)

        # But return a mock...
        ipset = Mock(spec=RefCountedIpsetActor)

        ipset._manager = None
        ipset._id = None
        ipset.ref_mgmt_state = CREATED
        ipset.ref_count = 0
        if isinstance(tag_or_sel, SelectorExpression):
            name_stem = tag_or_sel.unique_id[:8]
        else:
            name_stem = tag_or_sel
        ipset.owned_ipset_names.return_value = [
            "felix-v4-" + name_stem, "felix-v4-tmp-" + name_stem
        ]
        ipset.name_stem = name_stem
        self.created_refs[tag_or_sel].append(ipset)
        return ipset

    def test_create(self):
        with patch("calico.felix.ipsets.Ipset") as m_Ipset:
            mgr = IpsetManager(IPV4, self.config)
            tag_ipset = mgr._create("tagid")
        self.assertEqual(tag_ipset.name_stem, "tagid")
        m_Ipset.assert_called_once_with('felix-v4-tagid',
                                        'felix-tmp-v4-tagid',
                                        'inet',
                                        'hash:ip',
                                        max_elem=1234)

    def test_maybe_start_gates_on_in_sync(self):
        with patch("calico.felix.refcount.ReferenceManager."
                   "_maybe_start") as m_maybe_start:
            self.mgr._maybe_start("tag-123")
            self.assertFalse(m_maybe_start.called)
            self.mgr.on_datamodel_in_sync(async=True)
            self.mgr.on_datamodel_in_sync(async=True)  # No-op
            self.step_mgr()
            self.mgr._maybe_start("tag-123")
            self.assertEqual(m_maybe_start.mock_calls, [call("tag-123")])

    def test_tag_then_endpoint(self):
        # Send in the messages.
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        # Let the actor process them.
        self.step_mgr()
        self.assert_one_ep_one_tag()
        # Undo our messages to check that the index is correctly updated,
        self.mgr.on_tags_update("prof1", None, async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, None, async=True)
        self.step_mgr()
        self.assert_index_empty()

    def test_endpoint_then_tag(self):
        # Send in the messages.
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        # Let the actor process them.
        self.step_mgr()
        self.assert_one_ep_one_tag()

    def test_endpoint_then_tag_idempotent(self):
        for _ in xrange(3):
            # Send in the messages.
            self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
            self.mgr.on_tags_update("prof1", ["tag1"], async=True)
            # Let the actor process them.
            self.step_mgr()
            self.assert_one_ep_one_tag()

    def assert_one_ep_one_tag(self):
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
        })
        self.assertEqual(self.mgr.tag_membership_index.ip_owners_by_tag,
                         {"tag1": {
                             "10.0.0.1": ("prof1", EP_ID_1_1),
                         }})

    def test_selector_then_endpoint(self):
        # Send in the messages.  this selector should match even though there
        # are no labels in the endpoint.
        selector = parse_selector("all()")
        self.mgr.get_and_incref(selector,
                                callback=self.on_ref_acquired,
                                async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        # Let the actor process them.
        self.step_mgr()

        self.assert_one_selector_one_ep(selector)

        # Undo our messages to check that the index is correctly updated.
        self.mgr.decref(selector, async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, None, async=True)
        self.step_mgr()
        self.assert_index_empty()

    def test_endpoint_then_selector(self):
        # Send in the messages.
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        selector = parse_selector("all()")
        self.mgr.get_and_incref(selector,
                                callback=self.on_ref_acquired,
                                async=True)
        # Let the actor process them.
        self.step_mgr()

        self.assert_one_selector_one_ep(selector)

        # Undo our messages to check that the index is correctly updated.
        self.mgr.on_endpoint_update(EP_ID_1_1, None, async=True)
        self.mgr.decref(selector, async=True)
        self.step_mgr()
        self.assert_index_empty()

    def test_non_trivial_selector_parent_match(self):
        """
        Test a selector that relies on both directly-set labels and
        inherited ones.
        """
        # Send in the messages.  this selector should match even though there
        # are no labels in the endpoint.
        selector = parse_selector("a == 'a1' && p == 'p1'")
        self.mgr.get_and_incref(selector,
                                callback=self.on_ref_acquired,
                                async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1_LABELS, async=True)
        # Let the actor process them.
        self.step_mgr()

        # Should be no match yet.
        self.assertEqual(self.mgr.tag_membership_index.ip_owners_by_tag, {})

        # Now fire in a parent label.
        self.mgr.on_prof_labels_set("prof1", {"p": "p1"}, async=True)
        self.step_mgr()

        # Should now have a match.
        self.assert_one_selector_one_ep(selector)

        # Undo our messages to check that the index is correctly updated.
        self.mgr.on_prof_labels_set("prof1", None, async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.tag_membership_index.ip_owners_by_tag, {})
        self.mgr.decref(selector, async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, None, async=True)
        self.step_mgr()
        self.assert_index_empty()

    def test_endpoint_ip_update_with_selector_match(self):
        """
        Test a selector that relies on both directly-set labels and
        inherited ones.
        """
        # Send in the messages.  this selector should match even though there
        # are no labels in the endpoint.
        selector = parse_selector("a == 'a1'")
        self.mgr.get_and_incref(selector,
                                callback=self.on_ref_acquired,
                                async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1_LABELS, async=True)
        self.step_mgr()

        # Should now have a match.
        self.assert_one_selector_one_ep(selector)

        # Now update the IPs, should update the index.
        self.mgr.on_endpoint_update(EP_ID_1_1,
                                    EP_1_1_LABELS_NEW_IP,
                                    async=True)
        self.step_mgr()

        self.assertEqual(self.mgr.tag_membership_index.ip_owners_by_tag,
                         {selector: {
                             "10.0.0.2": ("dummy", EP_ID_1_1),
                         }})

        # Undo our messages to check that the index is correctly updated.
        self.mgr.decref(selector, async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, None, async=True)
        self.step_mgr()
        self.assert_index_empty()

    def assert_one_selector_one_ep(self, selector):
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
        })
        self.assertEqual(self.mgr.tag_membership_index.ip_owners_by_tag,
                         {selector: {
                             "10.0.0.1": ("dummy", EP_ID_1_1),
                         }})

    def assert_index_empty(self):
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {})
        self.assertEqual(self.mgr.tag_membership_index.ip_owners_by_tag, {})

    def test_change_ip(self):
        # Initial set-up.
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.step_mgr()
        # Update the endpoint's IPs:
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1_NEW_IP, async=True)
        self.step_mgr()

        self.assertEqual(
            self.mgr.tag_membership_index.ip_owners_by_tag, {
                "tag1": {
                    "10.0.0.2": ("prof1", EP_ID_1_1),
                    "10.0.0.3": ("prof1", EP_ID_1_1),
                }
            })

    def test_tag_updates(self):
        # Initial set-up.
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.step_mgr()

        # Add a tag, keep a tag.
        self.mgr.on_tags_update("prof1", ["tag1", "tag2"], async=True)
        self.step_mgr()
        self.assertEqual(
            self.mgr.tag_membership_index.ip_owners_by_tag, {
                "tag1": {
                    "10.0.0.1": ("prof1", EP_ID_1_1),
                },
                "tag2": {
                    "10.0.0.1": ("prof1", EP_ID_1_1),
                }
            })
        self.assertEqual(self.mgr.tags_by_prof_id, {"prof1": ["tag1", "tag2"]})

        # Remove a tag.
        self.mgr.on_tags_update("prof1", ["tag2"], async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.tag_membership_index.ip_owners_by_tag,
                         {"tag2": {
                             "10.0.0.1": ("prof1", EP_ID_1_1),
                         }})

        # Delete the tags:
        self.mgr.on_tags_update("prof1", None, async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.tag_membership_index.ip_owners_by_tag, {})
        self.assertEqual(self.mgr.tags_by_prof_id, {})

    def step_mgr(self):
        self.step_actor(self.mgr)

    def test_update_profile_and_ips(self):
        # Initial set-up.
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_tags_update("prof3", ["tag3"], async=True)
        self.step_mgr()

        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1_NEW_PROF_IP, async=True)
        self.step_mgr()

        self.assertEqual(self.mgr.tag_membership_index.ip_owners_by_tag,
                         {"tag3": {
                             "10.0.0.3": ("prof3", EP_ID_1_1)
                         }})
        self.assertEqual(self.mgr.endpoint_ids_by_profile_id,
                         {"prof3": set([EP_ID_1_1])})

    def test_optimize_out_v6(self):
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_endpoint_update(EP_ID_2_1, EP_2_1_IPV6, async=True)
        self.step_mgr()
        # Index should contain only 1_1:
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
        })

    def test_optimize_out_no_nets(self):
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_endpoint_update(EP_ID_2_1, EP_2_1_NO_NETS, async=True)
        self.step_mgr()
        # Index should contain only 1_1:
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
        })
        # Should be happy to then add it in.
        self.mgr.on_endpoint_update(EP_ID_2_1, EP_2_1, async=True)
        self.step_mgr()
        # Index should contain both:
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
            EP_ID_2_1: EP_DATA_2_1,
        })

    def test_duplicate_ips(self):
        # Add in two endpoints with the same IP.
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_endpoint_update(EP_ID_2_1, EP_2_1, async=True)
        self.step_mgr()
        # Index should contain both:
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
            EP_ID_2_1: EP_DATA_2_1,
        })
        self.assertEqual(
            self.mgr.tag_membership_index.ip_owners_by_tag, {
                "tag1": {
                    "10.0.0.1": set([
                        ("prof1", EP_ID_1_1),
                        ("prof1", EP_ID_2_1),
                    ])
                }
            })

        # Second profile tags arrive:
        self.mgr.on_tags_update("prof2", ["tag1", "tag2"], async=True)
        self.step_mgr()
        self.assertEqual(
            self.mgr.tag_membership_index.ip_owners_by_tag, {
                "tag1": {
                    "10.0.0.1":
                    set([
                        ("prof1", EP_ID_1_1),
                        ("prof1", EP_ID_2_1),
                        ("prof2", EP_ID_1_1),
                    ])
                },
                "tag2": {
                    "10.0.0.1": ("prof2", EP_ID_1_1),
                },
            })

        # Remove one, check the index gets updated.
        self.mgr.on_endpoint_update(EP_ID_2_1, None, async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
        })
        self.assertEqual(
            self.mgr.tag_membership_index.ip_owners_by_tag, {
                "tag1": {
                    "10.0.0.1": set([
                        ("prof1", EP_ID_1_1),
                        ("prof2", EP_ID_1_1),
                    ])
                },
                "tag2": {
                    "10.0.0.1": ("prof2", EP_ID_1_1),
                },
            })

        # Remove the other, index should get completely cleaned up.
        self.mgr.on_endpoint_update(EP_ID_1_1, None, async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {})
        self.assertEqual(
            self.mgr.tag_membership_index.ip_owners_by_tag, {},
            "ip_owners_by_tag should be empty, not %s" %
            pformat(self.mgr.tag_membership_index.ip_owners_by_tag))

    def on_ref_acquired(self, tag_id, ipset):
        self.acquired_refs[tag_id] = ipset

    @patch("calico.felix.ipsets.list_ipset_names", autospec=True)
    @patch("calico.felix.futils.check_call", autospec=True)
    def test_cleanup(self, m_check_call, m_list_ipsets):
        # We're testing the in-sync processing
        self.mgr.on_datamodel_in_sync(async=True)
        # Start with a couple ipsets.
        self.mgr.get_and_incref("foo",
                                callback=self.on_ref_acquired,
                                async=True)
        self.mgr.get_and_incref("bar",
                                callback=self.on_ref_acquired,
                                async=True)
        self.step_mgr()
        self.assertEqual(set(self.created_refs.keys()), set(["foo", "bar"]))

        # Notify ready so that the ipsets are marked as started.
        self._notify_ready(["foo", "bar"])
        self.step_mgr()

        # Then decref "bar" so that it gets marked as stopping.
        self.mgr.decref("bar", async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.stopping_objects_by_id,
                         {"bar": set(self.created_refs["bar"])})

        # Return mix of expected and unexpected ipsets.
        m_list_ipsets.return_value = [
            "not-felix-foo",
            "felix-v6-foo",
            "felix-v6-bazzle",
            "felix-v4-foo",
            "felix-v4-bar",
            "felix-v4-baz",
            "felix-v4-biff",
        ]
        m_check_call.side_effect = iter([
            # Exception on any individual call should be ignored.
            FailedSystemCall("Dummy", [], None, None, None),
            None,
        ])
        self.mgr.cleanup(async=True)
        self.step_mgr()

        # Explicitly check that exactly the right delete calls were made.
        # assert_has_calls would ignore extra calls.
        self.assertEqual(
            sorted(m_check_call.mock_calls),
            sorted([
                call(["ipset", "destroy", "felix-v4-biff"]),
                call(["ipset", "destroy", "felix-v4-baz"]),
            ]))

    def test_update_dirty(self):
        self.mgr._datamodel_in_sync = True
        m_ipset = Mock(spec=RefCountedIpsetActor)
        self.mgr.objects_by_id["tag-123"] = m_ipset
        with patch.object(self.mgr, "_is_starting_or_live",
                          autospec=True) as m_sol:
            m_sol.return_value = True
            with patch.object(self.mgr.tag_membership_index,
                              "get_and_reset_changes_by_tag",
                              autospec=True) as m_get_and_reset:
                m_get_and_reset.return_value = ({
                    "tag-123": set(["10.0.0.1"])
                }, {
                    "tag-123": set(["10.0.0.2"])
                })
                self.mgr._update_dirty_active_ipsets()
                self.assertEqual(m_ipset.add_members.mock_calls,
                                 [call(set(["10.0.0.1"]), async=True)])
                self.assertEqual(m_ipset.remove_members.mock_calls,
                                 [call(set(["10.0.0.2"]), async=True)])

    def _notify_ready(self, tags):
        for tag in tags:
            self.mgr.on_object_startup_complete(tag,
                                                self.created_refs[tag][0],
                                                async=True)
        self.step_mgr()
Пример #8
0
def _main_greenlet(config):
    """
    The root of our tree of greenlets.  Responsible for restarting
    its children if desired.
    """
    try:
        _log.info("Connecting to etcd to get our configuration.")
        etcd_watcher = EtcdWatcher(config)
        etcd_watcher.start()
        # Ask the EtcdWatcher to fill in the global config object before we
        # proceed.  We don't yet support config updates.
        etcd_watcher.load_config(async=False)

        _log.info("Main greenlet: Configuration loaded, starting remaining "
                  "actors...")
        v4_filter_updater = IptablesUpdater("filter", ip_version=4)
        v4_nat_updater = IptablesUpdater("nat", ip_version=4)
        v4_ipset_mgr = IpsetManager(IPV4)
        v4_rules_manager = RulesManager(4, v4_filter_updater, v4_ipset_mgr)
        v4_dispatch_chains = DispatchChains(config, 4, v4_filter_updater)
        v4_ep_manager = EndpointManager(config,
                                        IPV4,
                                        v4_filter_updater,
                                        v4_dispatch_chains,
                                        v4_rules_manager)

        v6_filter_updater = IptablesUpdater("filter", ip_version=6)
        v6_ipset_mgr = IpsetManager(IPV6)
        v6_rules_manager = RulesManager(6, v6_filter_updater, v6_ipset_mgr)
        v6_dispatch_chains = DispatchChains(config, 6, v6_filter_updater)
        v6_ep_manager = EndpointManager(config,
                                        IPV6,
                                        v6_filter_updater,
                                        v6_dispatch_chains,
                                        v6_rules_manager)

        update_splitter = UpdateSplitter(config,
                                         [v4_ipset_mgr, v6_ipset_mgr],
                                         [v4_rules_manager, v6_rules_manager],
                                         [v4_ep_manager, v6_ep_manager],
                                         [v4_filter_updater, v6_filter_updater])
        iface_watcher = InterfaceWatcher(update_splitter)

        _log.info("Starting actors.")
        update_splitter.start()

        v4_filter_updater.start()
        v4_nat_updater.start()
        v4_ipset_mgr.start()
        v4_rules_manager.start()
        v4_dispatch_chains.start()
        v4_ep_manager.start()

        v6_filter_updater.start()
        v6_ipset_mgr.start()
        v6_rules_manager.start()
        v6_dispatch_chains.start()
        v6_ep_manager.start()

        iface_watcher.start()

        monitored_items = [
            update_splitter.greenlet,

            v4_nat_updater.greenlet,
            v4_filter_updater.greenlet,
            v4_nat_updater.greenlet,
            v4_ipset_mgr.greenlet,
            v4_rules_manager.greenlet,
            v4_dispatch_chains.greenlet,
            v4_ep_manager.greenlet,

            v6_filter_updater.greenlet,
            v6_ipset_mgr.greenlet,
            v6_rules_manager.greenlet,
            v6_dispatch_chains.greenlet,
            v6_ep_manager.greenlet,

            iface_watcher.greenlet,
            etcd_watcher.greenlet
        ]

        # Install the global rules before we start polling for updates.
        _log.info("Installing global rules.")
        install_global_rules(config, v4_filter_updater, v6_filter_updater,
                             v4_nat_updater)

        # Start polling for updates. These kicks make the actors poll
        # indefinitely.
        _log.info("Starting polling for interface and etcd updates.")
        f = iface_watcher.watch_interfaces(async=True)
        monitored_items.append(f)
        f = etcd_watcher.watch_etcd(update_splitter, async=True)
        monitored_items.append(f)

        # Wait for something to fail.
        _log.info("All top-level actors started, waiting on failures...")
        stopped_greenlets_iter = gevent.iwait(monitored_items)

        stopped_greenlet = next(stopped_greenlets_iter)
        try:
            stopped_greenlet.get()
        except Exception:
            _log.exception("Greenlet failed: %s", stopped_greenlet)
            raise
        else:
            _log.error("Greenlet %s unexpectedly returned.", stopped_greenlet)
            raise AssertionError("Greenlet unexpectedly returned")
    except:
        _log.exception("Exception killing main greenlet")
        raise
Пример #9
0
def _main_greenlet(config):
    """
    The root of our tree of greenlets.  Responsible for restarting
    its children if desired.
    """
    try:
        _log.info("Connecting to etcd to get our configuration.")
        hosts_ipset_v4 = IpsetActor(HOSTS_IPSET_V4)

        etcd_api = EtcdAPI(config, hosts_ipset_v4)
        etcd_api.start()
        # Ask the EtcdAPI to fill in the global config object before we
        # proceed.  We don't yet support config updates.
        config_loaded = etcd_api.load_config(async=False)
        config_loaded.wait()

        # Ensure the Kernel's global options are correctly configured for
        # Calico.
        devices.configure_global_kernel_config()

        _log.info("Main greenlet: Configuration loaded, starting remaining "
                  "actors...")

        monitored_items = []
        if config.PROM_METRICS_ENABLED:
            httpd = HTTPServer(("0.0.0.0", config.PROM_METRICS_PORT),
                               MetricsHandler)
            stats_server = gevent.Greenlet(httpd.serve_forever)
            stats_server.start()
            monitored_items.append(stats_server)

        v4_filter_updater = IptablesUpdater("filter", ip_version=4,
                                            config=config)
        v4_nat_updater = IptablesUpdater("nat", ip_version=4, config=config)
        v4_ipset_mgr = IpsetManager(IPV4, config)
        v4_masq_manager = MasqueradeManager(IPV4, v4_nat_updater)
        v4_rules_manager = RulesManager(config,
                                        4,
                                        v4_filter_updater,
                                        v4_ipset_mgr)
        v4_dispatch_chains = DispatchChains(config, 4, v4_filter_updater)
        v4_fip_manager = FloatingIPManager(config, 4, v4_nat_updater)
        v4_ep_manager = EndpointManager(config,
                                        IPV4,
                                        v4_filter_updater,
                                        v4_dispatch_chains,
                                        v4_rules_manager,
                                        v4_fip_manager,
                                        etcd_api.status_reporter)

        cleanup_updaters = [v4_filter_updater, v4_nat_updater]
        cleanup_ip_mgrs = [v4_ipset_mgr]
        update_splitter_args = [v4_ipset_mgr,
                                v4_rules_manager,
                                v4_ep_manager,
                                v4_masq_manager,
                                v4_nat_updater]

        v6_enabled = os.path.exists("/proc/sys/net/ipv6")
        if v6_enabled:
            v6_raw_updater = IptablesUpdater("raw", ip_version=6, config=config)
            v6_filter_updater = IptablesUpdater("filter", ip_version=6,
                                                config=config)
            v6_nat_updater = IptablesUpdater("nat", ip_version=6, config=config)
            v6_ipset_mgr = IpsetManager(IPV6, config)
            v6_rules_manager = RulesManager(config,
                                            6,
                                            v6_filter_updater,
                                            v6_ipset_mgr)
            v6_dispatch_chains = DispatchChains(config, 6, v6_filter_updater)
            v6_fip_manager = FloatingIPManager(config, 6, v6_nat_updater)
            v6_ep_manager = EndpointManager(config,
                                            IPV6,
                                            v6_filter_updater,
                                            v6_dispatch_chains,
                                            v6_rules_manager,
                                            v6_fip_manager,
                                            etcd_api.status_reporter)
            cleanup_updaters.append(v6_filter_updater)
            cleanup_ip_mgrs.append(v6_ipset_mgr)
            update_splitter_args += [v6_ipset_mgr,
                                     v6_rules_manager,
                                     v6_ep_manager,
                                     v6_raw_updater,
                                     v6_nat_updater]

        cleanup_mgr = CleanupManager(config, cleanup_updaters, cleanup_ip_mgrs)
        update_splitter_args.append(cleanup_mgr)
        update_splitter = UpdateSplitter(update_splitter_args)
        iface_watcher = InterfaceWatcher(update_splitter)

        _log.info("Starting actors.")
        hosts_ipset_v4.start()
        cleanup_mgr.start()

        v4_filter_updater.start()
        v4_nat_updater.start()
        v4_ipset_mgr.start()
        v4_masq_manager.start()
        v4_rules_manager.start()
        v4_dispatch_chains.start()
        v4_ep_manager.start()
        v4_fip_manager.start()

        if v6_enabled:
            v6_raw_updater.start()
            v6_filter_updater.start()
            v6_ipset_mgr.start()
            v6_nat_updater.start()
            v6_rules_manager.start()
            v6_dispatch_chains.start()
            v6_ep_manager.start()
            v6_fip_manager.start()

        iface_watcher.start()

        top_level_actors = [
            hosts_ipset_v4,
            cleanup_mgr,

            v4_filter_updater,
            v4_nat_updater,
            v4_ipset_mgr,
            v4_masq_manager,
            v4_rules_manager,
            v4_dispatch_chains,
            v4_ep_manager,
            v4_fip_manager,

            iface_watcher,
            etcd_api,
        ]

        if v6_enabled:
            top_level_actors += [
                v6_raw_updater,
                v6_filter_updater,
                v6_nat_updater,
                v6_ipset_mgr,
                v6_rules_manager,
                v6_dispatch_chains,
                v6_ep_manager,
                v6_fip_manager,
            ]

        monitored_items += [actor.greenlet for actor in top_level_actors]

        # Try to ensure that the nf_conntrack_netlink kernel module is present.
        # This works around an issue[1] where the first call to the "conntrack"
        # command fails while waiting for the module to load.
        # [1] https://github.com/projectcalico/calico/issues/986
        load_nf_conntrack()

        # Install the global rules before we start polling for updates.
        _log.info("Installing global rules.")
        install_global_rules(config, v4_filter_updater, v4_nat_updater,
                             ip_version=4)
        if v6_enabled:
            install_global_rules(config, v6_filter_updater, v6_nat_updater,
                                 ip_version=6, raw_updater=v6_raw_updater)

        # Start polling for updates. These kicks make the actors poll
        # indefinitely.
        _log.info("Starting polling for interface and etcd updates.")
        f = iface_watcher.watch_interfaces(async=True)
        monitored_items.append(f)
        etcd_api.start_watch(update_splitter, async=True)

        # Register a SIG_USR handler to trigger a diags dump.
        def dump_top_level_actors(log):
            for a in top_level_actors:
                # The output will include queue length and the like.
                log.info("%s", a)
        futils.register_diags("Top-level actors", dump_top_level_actors)
        futils.register_process_statistics()
        try:
            gevent.signal(signal.SIGUSR1, functools.partial(futils.dump_diags))
        except AttributeError:
            # It doesn't matter too much if we fail to do this.
            _log.warning("Unable to install diag dump handler")
            pass

        # Wait for something to fail.
        _log.info("All top-level actors started, waiting on failures...")
        stopped_greenlets_iter = gevent.iwait(monitored_items)

        stopped_greenlet = next(stopped_greenlets_iter)
        try:
            stopped_greenlet.get()
        except Exception:
            _log.exception("Greenlet failed: %s", stopped_greenlet)
            raise
        else:
            _log.error("Greenlet %s unexpectedly returned.", stopped_greenlet)
            raise AssertionError("Greenlet unexpectedly returned")
    except:
        _log.exception("Exception killing main greenlet")
        raise
Пример #10
0
def _main_greenlet(config):
    """
    The root of our tree of greenlets.  Responsible for restarting
    its children if desired.
    """
    try:
        _log.info("Connecting to etcd to get our configuration.")
        hosts_ipset_v4 = IpsetActor(HOSTS_IPSET_V4)

        etcd_api = EtcdAPI(config, hosts_ipset_v4)
        etcd_api.start()
        # Ask the EtcdAPI to fill in the global config object before we
        # proceed.  We don't yet support config updates.
        config_loaded = etcd_api.load_config(async=False)
        config_loaded.wait()

        # Ensure the Kernel's global options are correctly configured for
        # Calico.
        devices.configure_global_kernel_config()

        _log.info("Main greenlet: Configuration loaded, starting remaining "
                  "actors...")
        v4_filter_updater = IptablesUpdater("filter", ip_version=4,
                                            config=config)
        v4_nat_updater = IptablesUpdater("nat", ip_version=4, config=config)
        v4_ipset_mgr = IpsetManager(IPV4)
        v4_masq_manager = MasqueradeManager(IPV4, v4_nat_updater)
        v4_rules_manager = RulesManager(4, v4_filter_updater, v4_ipset_mgr)
        v4_dispatch_chains = DispatchChains(config, 4, v4_filter_updater)
        v4_ep_manager = EndpointManager(config,
                                        IPV4,
                                        v4_filter_updater,
                                        v4_dispatch_chains,
                                        v4_rules_manager,
                                        etcd_api.status_reporter)

        v6_raw_updater = IptablesUpdater("raw", ip_version=6, config=config)
        v6_filter_updater = IptablesUpdater("filter", ip_version=6,
                                            config=config)
        v6_ipset_mgr = IpsetManager(IPV6)
        v6_rules_manager = RulesManager(6, v6_filter_updater, v6_ipset_mgr)
        v6_dispatch_chains = DispatchChains(config, 6, v6_filter_updater)
        v6_ep_manager = EndpointManager(config,
                                        IPV6,
                                        v6_filter_updater,
                                        v6_dispatch_chains,
                                        v6_rules_manager,
                                        etcd_api.status_reporter)

        update_splitter = UpdateSplitter(config,
                                         [v4_ipset_mgr, v6_ipset_mgr],
                                         [v4_rules_manager, v6_rules_manager],
                                         [v4_ep_manager, v6_ep_manager],
                                         [v4_filter_updater,
                                          v6_filter_updater,
                                          v6_raw_updater,
                                          v4_nat_updater],
                                         v4_masq_manager)
        iface_watcher = InterfaceWatcher(update_splitter)

        _log.info("Starting actors.")
        hosts_ipset_v4.start()
        update_splitter.start()

        v4_filter_updater.start()
        v4_nat_updater.start()
        v4_ipset_mgr.start()
        v4_masq_manager.start()
        v4_rules_manager.start()
        v4_dispatch_chains.start()
        v4_ep_manager.start()

        v6_raw_updater.start()
        v6_filter_updater.start()
        v6_ipset_mgr.start()
        v6_rules_manager.start()
        v6_dispatch_chains.start()
        v6_ep_manager.start()

        iface_watcher.start()

        top_level_actors = [
            hosts_ipset_v4,
            update_splitter,

            v4_nat_updater,
            v4_filter_updater,
            v4_nat_updater,
            v4_ipset_mgr,
            v4_masq_manager,
            v4_rules_manager,
            v4_dispatch_chains,
            v4_ep_manager,

            v6_raw_updater,
            v6_filter_updater,
            v6_ipset_mgr,
            v6_rules_manager,
            v6_dispatch_chains,
            v6_ep_manager,

            iface_watcher,
            etcd_api,
        ]

        monitored_items = [actor.greenlet for actor in top_level_actors]

        # Install the global rules before we start polling for updates.
        _log.info("Installing global rules.")
        install_global_rules(config, v4_filter_updater, v6_filter_updater,
                             v4_nat_updater, v6_raw_updater)

        # Start polling for updates. These kicks make the actors poll
        # indefinitely.
        _log.info("Starting polling for interface and etcd updates.")
        f = iface_watcher.watch_interfaces(async=True)
        monitored_items.append(f)
        etcd_api.start_watch(update_splitter, async=True)

        # Register a SIG_USR handler to trigger a diags dump.
        def dump_top_level_actors(log):
            for a in top_level_actors:
                # The output will include queue length and the like.
                log.info("%s", a)
        futils.register_diags("Top-level actors", dump_top_level_actors)
        futils.register_process_statistics()
        try:
            gevent.signal(signal.SIGUSR1, functools.partial(futils.dump_diags))
        except AttributeError:
            # It doesn't matter too much if we fail to do this.
            _log.warning("Unable to install diag dump handler")
            pass

        # Wait for something to fail.
        _log.info("All top-level actors started, waiting on failures...")
        stopped_greenlets_iter = gevent.iwait(monitored_items)

        stopped_greenlet = next(stopped_greenlets_iter)
        try:
            stopped_greenlet.get()
        except Exception:
            _log.exception("Greenlet failed: %s", stopped_greenlet)
            raise
        else:
            _log.error("Greenlet %s unexpectedly returned.", stopped_greenlet)
            raise AssertionError("Greenlet unexpectedly returned")
    except:
        _log.exception("Exception killing main greenlet")
        raise
Пример #11
0
 def reset(self):
     self.created_refs = defaultdict(list)
     self.acquired_refs = {}
     self.mgr = IpsetManager(IPV4)
     self.m_create = Mock(spec=self.mgr._create, side_effect=self.m_create)
     self.mgr._create = self.m_create
Пример #12
0
def _main_greenlet(config):
    """
    The root of our tree of greenlets.  Responsible for restarting
    its children if desired.
    """
    try:
        _log.info("Connecting to etcd to get our configuration.")
        hosts_ipset_v4 = IpsetActor(HOSTS_IPSET_V4)

        etcd_api = EtcdAPI(config, hosts_ipset_v4)
        etcd_api.start()
        # Ask the EtcdAPI to fill in the global config object before we
        # proceed.  We don't yet support config updates.
        config_loaded = etcd_api.load_config(async=False)
        config_loaded.wait()

        # Ensure the Kernel's global options are correctly configured for
        # Calico.
        devices.configure_global_kernel_config()

        _log.info("Main greenlet: Configuration loaded, starting remaining " "actors...")

        monitored_items = []
        if config.PROM_METRICS_ENABLED:
            httpd = HTTPServer(("0.0.0.0", config.PROM_METRICS_PORT), MetricsHandler)
            stats_server = gevent.Greenlet(httpd.serve_forever)
            stats_server.start()
            monitored_items.append(stats_server)

        v4_filter_updater = IptablesUpdater("filter", ip_version=4, config=config)
        v4_nat_updater = IptablesUpdater("nat", ip_version=4, config=config)
        v4_ipset_mgr = IpsetManager(IPV4, config)
        v4_masq_manager = MasqueradeManager(IPV4, v4_nat_updater)
        v4_rules_manager = RulesManager(config, 4, v4_filter_updater, v4_ipset_mgr)
        v4_dispatch_chains = DispatchChains(config, 4, v4_filter_updater)
        v4_fip_manager = FloatingIPManager(config, 4, v4_nat_updater)
        v4_ep_manager = EndpointManager(
            config,
            IPV4,
            v4_filter_updater,
            v4_dispatch_chains,
            v4_rules_manager,
            v4_fip_manager,
            etcd_api.status_reporter,
        )

        cleanup_updaters = [v4_filter_updater, v4_nat_updater]
        cleanup_ip_mgrs = [v4_ipset_mgr]
        update_splitter_args = [v4_ipset_mgr, v4_rules_manager, v4_ep_manager, v4_masq_manager, v4_nat_updater]

        v6_enabled = os.path.exists("/proc/sys/net/ipv6")
        if v6_enabled:
            v6_raw_updater = IptablesUpdater("raw", ip_version=6, config=config)
            v6_filter_updater = IptablesUpdater("filter", ip_version=6, config=config)
            v6_nat_updater = IptablesUpdater("nat", ip_version=6, config=config)
            v6_ipset_mgr = IpsetManager(IPV6, config)
            v6_rules_manager = RulesManager(config, 6, v6_filter_updater, v6_ipset_mgr)
            v6_dispatch_chains = DispatchChains(config, 6, v6_filter_updater)
            v6_fip_manager = FloatingIPManager(config, 6, v6_nat_updater)
            v6_ep_manager = EndpointManager(
                config,
                IPV6,
                v6_filter_updater,
                v6_dispatch_chains,
                v6_rules_manager,
                v6_fip_manager,
                etcd_api.status_reporter,
            )
            cleanup_updaters.append(v6_filter_updater)
            cleanup_ip_mgrs.append(v6_ipset_mgr)
            update_splitter_args += [v6_ipset_mgr, v6_rules_manager, v6_ep_manager, v6_raw_updater, v6_nat_updater]

        cleanup_mgr = CleanupManager(config, cleanup_updaters, cleanup_ip_mgrs)
        update_splitter_args.append(cleanup_mgr)
        update_splitter = UpdateSplitter(update_splitter_args)
        iface_watcher = InterfaceWatcher(update_splitter)

        _log.info("Starting actors.")
        hosts_ipset_v4.start()
        cleanup_mgr.start()

        v4_filter_updater.start()
        v4_nat_updater.start()
        v4_ipset_mgr.start()
        v4_masq_manager.start()
        v4_rules_manager.start()
        v4_dispatch_chains.start()
        v4_ep_manager.start()
        v4_fip_manager.start()

        if v6_enabled:
            v6_raw_updater.start()
            v6_filter_updater.start()
            v6_ipset_mgr.start()
            v6_nat_updater.start()
            v6_rules_manager.start()
            v6_dispatch_chains.start()
            v6_ep_manager.start()
            v6_fip_manager.start()

        iface_watcher.start()

        top_level_actors = [
            hosts_ipset_v4,
            cleanup_mgr,
            v4_filter_updater,
            v4_nat_updater,
            v4_ipset_mgr,
            v4_masq_manager,
            v4_rules_manager,
            v4_dispatch_chains,
            v4_ep_manager,
            v4_fip_manager,
            iface_watcher,
            etcd_api,
        ]

        if v6_enabled:
            top_level_actors += [
                v6_raw_updater,
                v6_filter_updater,
                v6_nat_updater,
                v6_ipset_mgr,
                v6_rules_manager,
                v6_dispatch_chains,
                v6_ep_manager,
                v6_fip_manager,
            ]

        monitored_items += [actor.greenlet for actor in top_level_actors]

        # Try to ensure that the nf_conntrack_netlink kernel module is present.
        # This works around an issue[1] where the first call to the "conntrack"
        # command fails while waiting for the module to load.
        # [1] https://github.com/projectcalico/calico/issues/986
        load_nf_conntrack()

        # Install the global rules before we start polling for updates.
        _log.info("Installing global rules.")
        install_global_rules(config, v4_filter_updater, v4_nat_updater, ip_version=4)
        if v6_enabled:
            install_global_rules(config, v6_filter_updater, v6_nat_updater, ip_version=6, raw_updater=v6_raw_updater)

        # Start polling for updates. These kicks make the actors poll
        # indefinitely.
        _log.info("Starting polling for interface and etcd updates.")
        f = iface_watcher.watch_interfaces(async=True)
        monitored_items.append(f)
        etcd_api.start_watch(update_splitter, async=True)

        # Register a SIG_USR handler to trigger a diags dump.
        def dump_top_level_actors(log):
            for a in top_level_actors:
                # The output will include queue length and the like.
                log.info("%s", a)

        futils.register_diags("Top-level actors", dump_top_level_actors)
        futils.register_process_statistics()
        try:
            gevent.signal(signal.SIGUSR1, functools.partial(futils.dump_diags))
        except AttributeError:
            # It doesn't matter too much if we fail to do this.
            _log.warning("Unable to install diag dump handler")
            pass

        # Wait for something to fail.
        _log.info("All top-level actors started, waiting on failures...")
        stopped_greenlets_iter = gevent.iwait(monitored_items)

        stopped_greenlet = next(stopped_greenlets_iter)
        try:
            stopped_greenlet.get()
        except Exception:
            _log.exception("Greenlet failed: %s", stopped_greenlet)
            raise
        else:
            _log.error("Greenlet %s unexpectedly returned.", stopped_greenlet)
            raise AssertionError("Greenlet unexpectedly returned")
    except:
        _log.exception("Exception killing main greenlet")
        raise
Пример #13
0
def _main_greenlet(config):
    """
    The root of our tree of greenlets.  Responsible for restarting
    its children if desired.
    """
    try:
        _log.info("Connecting to etcd to get our configuration.")
        etcd_watcher = EtcdWatcher(config)
        etcd_watcher.start()
        # Ask the EtcdWatcher to fill in the global config object before we
        # proceed.  We don't yet support config updates.
        etcd_watcher.load_config(async=False)

        _log.info("Main greenlet: Configuration loaded, starting remaining "
                  "actors...")
        v4_filter_updater = IptablesUpdater("filter", ip_version=4)
        v4_nat_updater = IptablesUpdater("nat", ip_version=4)
        v4_ipset_mgr = IpsetManager(IPV4)
        v4_rules_manager = RulesManager(4, v4_filter_updater, v4_ipset_mgr)
        v4_dispatch_chains = DispatchChains(config, 4, v4_filter_updater)
        v4_ep_manager = EndpointManager(config, IPV4, v4_filter_updater,
                                        v4_dispatch_chains, v4_rules_manager)

        v6_filter_updater = IptablesUpdater("filter", ip_version=6)
        v6_ipset_mgr = IpsetManager(IPV6)
        v6_rules_manager = RulesManager(6, v6_filter_updater, v6_ipset_mgr)
        v6_dispatch_chains = DispatchChains(config, 6, v6_filter_updater)
        v6_ep_manager = EndpointManager(config, IPV6, v6_filter_updater,
                                        v6_dispatch_chains, v6_rules_manager)

        update_splitter = UpdateSplitter(
            config, [v4_ipset_mgr, v6_ipset_mgr],
            [v4_rules_manager, v6_rules_manager],
            [v4_ep_manager, v6_ep_manager],
            [v4_filter_updater, v6_filter_updater])
        iface_watcher = InterfaceWatcher(update_splitter)

        _log.info("Starting actors.")
        update_splitter.start()

        v4_filter_updater.start()
        v4_nat_updater.start()
        v4_ipset_mgr.start()
        v4_rules_manager.start()
        v4_dispatch_chains.start()
        v4_ep_manager.start()

        v6_filter_updater.start()
        v6_ipset_mgr.start()
        v6_rules_manager.start()
        v6_dispatch_chains.start()
        v6_ep_manager.start()

        iface_watcher.start()

        monitored_items = [
            update_splitter.greenlet, v4_nat_updater.greenlet,
            v4_filter_updater.greenlet, v4_nat_updater.greenlet,
            v4_ipset_mgr.greenlet, v4_rules_manager.greenlet,
            v4_dispatch_chains.greenlet, v4_ep_manager.greenlet,
            v6_filter_updater.greenlet, v6_ipset_mgr.greenlet,
            v6_rules_manager.greenlet, v6_dispatch_chains.greenlet,
            v6_ep_manager.greenlet, iface_watcher.greenlet,
            etcd_watcher.greenlet
        ]

        # Install the global rules before we start polling for updates.
        _log.info("Installing global rules.")
        install_global_rules(config, v4_filter_updater, v6_filter_updater,
                             v4_nat_updater)

        # Start polling for updates. These kicks make the actors poll
        # indefinitely.
        _log.info("Starting polling for interface and etcd updates.")
        f = iface_watcher.watch_interfaces(async=True)
        monitored_items.append(f)
        f = etcd_watcher.watch_etcd(update_splitter, async=True)
        monitored_items.append(f)

        # Wait for something to fail.
        _log.info("All top-level actors started, waiting on failures...")
        stopped_greenlets_iter = gevent.iwait(monitored_items)

        stopped_greenlet = next(stopped_greenlets_iter)
        try:
            stopped_greenlet.get()
        except Exception:
            _log.exception("Greenlet failed: %s", stopped_greenlet)
            raise
        else:
            _log.error("Greenlet %s unexpectedly returned.", stopped_greenlet)
            raise AssertionError("Greenlet unexpectedly returned")
    except:
        _log.exception("Exception killing main greenlet")
        raise
Пример #14
0
 def reset(self):
     self.mgr = IpsetManager(IPV4)
     self.m_create = Mock(spec=self.mgr._create)
     self.mgr._create = self.m_create
Пример #15
0
class TestIpsetManager(BaseTestCase):
    def setUp(self):
        super(TestIpsetManager, self).setUp()
        self.reset()

    def reset(self):
        self.mgr = IpsetManager(IPV4)
        self.m_create = Mock(spec=self.mgr._create)
        self.mgr._create = self.m_create

    def test_tag_then_enpdpoint(self):
        # Send in the messages.
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        # Let the actor process them.
        self.step_mgr()
        self.assert_one_ep_one_tag()

    def test_endpoint_then_tag(self):
        # Send in the messages.
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        # Let the actor process them.
        self.step_mgr()
        self.assert_one_ep_one_tag()

    def test_endpoint_then_tag_idempotent(self):
        for _ in xrange(3):
            # Send in the messages.
            self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
            self.mgr.on_tags_update("prof1", ["tag1"], async=True)
            # Let the actor process them.
            self.step_mgr()
            self.assert_one_ep_one_tag()

    def assert_one_ep_one_tag(self):
        self.assertEqual(self.mgr.endpoints_by_ep_id, {
            EP_ID_1_1: EP_1_1,
        })
        self.assertEqual(self.mgr.ip_owners_by_tag, {
            "tag1": {
                "10.0.0.1": {
                    "prof1": set([
                        EP_ID_1_1
                    ])
                }
            }
        })

    def test_change_ip(self):
        # Initial set-up.
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.step_mgr()
        # Update the endpoint's IPs:
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1_NEW_IP, async=True)
        self.step_mgr()

        self.assertEqual(self.mgr.ip_owners_by_tag, {
            "tag1": {
                "10.0.0.2": {
                    "prof1": set([
                        EP_ID_1_1
                    ])
                },
                "10.0.0.3": {
                    "prof1": set([
                        EP_ID_1_1
                    ])
                }
            }
        })

    def test_tag_updates(self):
        # Initial set-up.
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.step_mgr()

        # Add a tag, keep a tag.
        self.mgr.on_tags_update("prof1", ["tag1", "tag2"], async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.ip_owners_by_tag, {
            "tag1": {
                "10.0.0.1": {
                    "prof1": set([
                        EP_ID_1_1
                    ])
                }
            },
            "tag2": {
                "10.0.0.1": {
                    "prof1": set([
                        EP_ID_1_1
                    ])
                }
            }
        })
        self.assertEqual(self.mgr.tags_by_prof_id, {"prof1": ["tag1", "tag2"]})

        # Remove a tag.
        self.mgr.on_tags_update("prof1", ["tag2"], async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.ip_owners_by_tag, {
            "tag2": {
                "10.0.0.1": {
                    "prof1": set([
                        EP_ID_1_1
                    ])
                }
            }
        })

        # Delete the tags:
        self.mgr.on_tags_update("prof1", None, async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.ip_owners_by_tag, {})
        self.assertEqual(self.mgr.tags_by_prof_id, {})

    def step_mgr(self):
        self.step_actor(self.mgr)
        self.assertEqual(self.mgr._dirty_tags, set())

    def test_update_profile_and_ips(self):
        # Initial set-up.
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_tags_update("prof3", ["tag3"], async=True)
        self.step_mgr()

        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1_NEW_PROF_IP, async=True)
        self.step_mgr()

        self.assertEqual(self.mgr.ip_owners_by_tag, {
            "tag3": {
                "10.0.0.3": {
                    "prof3": set([
                        EP_ID_1_1
                    ])
                }
            }
        })
        self.assertEqual(self.mgr.endpoint_ids_by_profile_id, {
            "prof3": set([EP_ID_1_1])
        })

    def test_duplicate_ips(self):
        # Add in two endpoints with the same IP.
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_endpoint_update(EP_ID_2_1, EP_2_1, async=True)
        self.step_mgr()
        # Index should contain both:
        self.assertEqual(self.mgr.endpoints_by_ep_id, {
            EP_ID_1_1: EP_1_1,
            EP_ID_2_1: EP_2_1,
        })
        self.assertEqual(self.mgr.ip_owners_by_tag, {
            "tag1": {
                "10.0.0.1": {
                    "prof1": set([
                        EP_ID_1_1,
                        EP_ID_2_1,
                    ])
                }
            }
        })

        # Second profile tags arrive:
        self.mgr.on_tags_update("prof2", ["tag1", "tag2"], async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.ip_owners_by_tag, {
            "tag1": {
                "10.0.0.1": {
                    "prof1": set([
                        EP_ID_1_1,
                        EP_ID_2_1,
                    ]),
                    "prof2": set([
                        EP_ID_1_1,
                    ])
                }
            },
            "tag2": {
                "10.0.0.1": {
                    "prof2": set([
                        EP_ID_1_1,
                    ])
                }
            },
        })

        # Remove one, check the index gets updated.
        self.mgr.on_endpoint_update(EP_ID_2_1, None, async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.endpoints_by_ep_id, {
            EP_ID_1_1: EP_1_1,
        })
        self.assertEqual(self.mgr.ip_owners_by_tag, {
            "tag1": {
                "10.0.0.1": {
                    "prof1": set([
                        EP_ID_1_1,
                    ]),
                    "prof2": set([
                        EP_ID_1_1,
                    ])
                }
            },
            "tag2": {
                "10.0.0.1": {
                    "prof2": set([
                        EP_ID_1_1,
                    ])
                }
            },
        })

        # Remove the other, index should get completely cleaned up.
        self.mgr.on_endpoint_update(EP_ID_1_1, None, async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.endpoints_by_ep_id, {})
        self.assertEqual(self.mgr.ip_owners_by_tag, {})
Пример #16
0
def _main_greenlet():
    """
    The root of our tree of greenlets.  Responsible for restarting
    its children if desired.
    """
    try:
        _log.info("Connecting to etcd to get our configuration.")
        hosts_ipset_v4 = IpsetActor(HOSTS_IPSET_V4)

        monitored_items = []

        # The parent process sends us communication pipes as FD 3 and 4. Open
        # those as files.  Wrap the resulting files in a FileObject to make
        # them cooperate with gevent.
        pipe_from_parent = FileObject(os.fdopen(3, 'rb', -1), 'rb')
        pipe_to_parent = FileObject(os.fdopen(4, 'wb', -1), 'wb')

        config = Config()
        datastore = DatastoreAPI(config, pipe_from_parent, pipe_to_parent,
                                 hosts_ipset_v4)
        datastore.start()
        monitored_items.append(datastore.greenlet)

        # Ask the DatastoreAPI to fill in the global config object before we
        # proceed.  We don't yet support config updates.
        config_loaded = datastore.load_config(async=False)
        config_loaded.wait()

        # Ensure the Kernel's global options are correctly configured for
        # Calico.
        devices.configure_global_kernel_config(config)

        # Check the commands we require are present.
        futils.check_command_deps()

        _log.info("Main greenlet: Configuration loaded, starting remaining "
                  "actors...")

        if config.PROM_METRICS_ENABLED:
            httpd = HTTPServer(("0.0.0.0", config.PROM_METRICS_DRIVER_PORT),
                               MetricsHandler)
            stats_server = gevent.Greenlet(httpd.serve_forever)
            stats_server.start()
            monitored_items.append(stats_server)

        v4_filter_updater = IptablesUpdater("filter",
                                            ip_version=4,
                                            config=config)
        v4_nat_updater = IptablesUpdater("nat", ip_version=4, config=config)
        v4_ipset_mgr = IpsetManager(IPV4, config)
        v4_masq_manager = MasqueradeManager(IPV4, v4_nat_updater)
        v4_rules_manager = RulesManager(config, 4, v4_filter_updater,
                                        v4_ipset_mgr)
        v4_ep_dispatch_chains = WorkloadDispatchChains(config, 4,
                                                       v4_filter_updater)
        v4_if_dispatch_chains = HostEndpointDispatchChains(
            config, 4, v4_filter_updater)
        v4_fip_manager = FloatingIPManager(config, 4, v4_nat_updater)
        v4_ep_manager = EndpointManager(config, IPV4, v4_filter_updater,
                                        v4_ep_dispatch_chains,
                                        v4_if_dispatch_chains,
                                        v4_rules_manager, v4_fip_manager,
                                        datastore.write_api)

        cleanup_updaters = [v4_filter_updater, v4_nat_updater]
        cleanup_ip_mgrs = [v4_ipset_mgr]
        managers = [
            v4_ipset_mgr, v4_rules_manager, v4_ep_manager, v4_masq_manager,
            v4_nat_updater
        ]

        actors_to_start = [
            hosts_ipset_v4,
            v4_filter_updater,
            v4_nat_updater,
            v4_ipset_mgr,
            v4_masq_manager,
            v4_rules_manager,
            v4_ep_dispatch_chains,
            v4_if_dispatch_chains,
            v4_ep_manager,
            v4_fip_manager,
        ]

        # Determine if ipv6 is enabled using the config option.
        if config.IPV6_SUPPORT == "true":
            v6_enabled = True
            ipv6_reason = None
        elif config.IPV6_SUPPORT == "auto":
            v6_enabled, ipv6_reason = futils.detect_ipv6_supported()
        else:
            v6_enabled = False
            ipv6_reason = "Ipv6Support is 'false'"

        if v6_enabled:
            v6_raw_updater = IptablesUpdater("raw",
                                             ip_version=6,
                                             config=config)
            v6_filter_updater = IptablesUpdater("filter",
                                                ip_version=6,
                                                config=config)
            v6_nat_updater = IptablesUpdater("nat",
                                             ip_version=6,
                                             config=config)
            v6_ipset_mgr = IpsetManager(IPV6, config)
            v6_rules_manager = RulesManager(config, 6, v6_filter_updater,
                                            v6_ipset_mgr)
            v6_ep_dispatch_chains = WorkloadDispatchChains(
                config, 6, v6_filter_updater)
            v6_if_dispatch_chains = HostEndpointDispatchChains(
                config, 6, v6_filter_updater)
            v6_fip_manager = FloatingIPManager(config, 6, v6_nat_updater)
            v6_ep_manager = EndpointManager(config, IPV6, v6_filter_updater,
                                            v6_ep_dispatch_chains,
                                            v6_if_dispatch_chains,
                                            v6_rules_manager, v6_fip_manager,
                                            datastore.write_api)
            cleanup_updaters.append(v6_filter_updater)
            cleanup_ip_mgrs.append(v6_ipset_mgr)
            managers += [
                v6_ipset_mgr, v6_rules_manager, v6_ep_manager, v6_raw_updater,
                v6_nat_updater
            ]
            actors_to_start += [
                v6_raw_updater,
                v6_filter_updater,
                v6_nat_updater,
                v6_ipset_mgr,
                v6_rules_manager,
                v6_ep_dispatch_chains,
                v6_if_dispatch_chains,
                v6_ep_manager,
                v6_fip_manager,
            ]
        else:
            # Keep the linter happy.
            _log.warn("IPv6 support disabled: %s.", ipv6_reason)
            v6_filter_updater = None
            v6_nat_updater = None
            v6_raw_updater = None
            v6_if_dispatch_chains = None

        cleanup_mgr = CleanupManager(config, cleanup_updaters, cleanup_ip_mgrs)
        managers.append(cleanup_mgr)
        update_splitter = UpdateSplitter(managers)
        iface_watcher = InterfaceWatcher(update_splitter)
        actors_to_start += [
            cleanup_mgr,
            iface_watcher,
        ]

        _log.info("Starting actors.")
        for actor in actors_to_start:
            actor.start()

        monitored_items += [actor.greenlet for actor in actors_to_start]

        # Try to ensure that the nf_conntrack_netlink kernel module is present.
        # This works around an issue[1] where the first call to the "conntrack"
        # command fails while waiting for the module to load.
        # [1] https://github.com/projectcalico/felix/issues/986
        load_nf_conntrack()

        # Install the global rules before we start polling for updates.
        _log.info("Installing global rules.")
        # Dispatch chain needs to make its configuration before we insert the
        # top-level chains.
        v4_if_dispatch_chains.configure_iptables(async=False)
        install_global_rules(config,
                             v4_filter_updater,
                             v4_nat_updater,
                             ip_version=4)
        if v6_enabled:
            # Dispatch chain needs to make its configuration before we insert
            # the top-level chains.
            v6_if_dispatch_chains.configure_iptables(async=False)
            install_global_rules(config,
                                 v6_filter_updater,
                                 v6_nat_updater,
                                 ip_version=6,
                                 raw_updater=v6_raw_updater)

        # Start polling for updates. These kicks make the actors poll
        # indefinitely.
        _log.info("Starting polling for interface and etcd updates.")
        f = iface_watcher.watch_interfaces(async=True)
        monitored_items.append(f)
        datastore.start_watch(update_splitter, async=True)

        # Register a SIG_USR handler to trigger a diags dump.
        def dump_top_level_actors(log):
            for a in actors_to_start:
                # The output will include queue length and the like.
                log.info("%s", a)

        futils.register_diags("Top-level actors", dump_top_level_actors)
        futils.register_process_statistics()
        try:
            gevent.signal(signal.SIGUSR1, functools.partial(futils.dump_diags))
        except AttributeError:
            # It doesn't matter too much if we fail to do this.
            _log.warning("Unable to install diag dump handler")
            pass
        gevent.signal(signal.SIGTERM, functools.partial(shut_down, datastore))
        gevent.signal(signal.SIGINT, functools.partial(shut_down, datastore))

        # Wait for something to fail.
        _log.info("All top-level actors started, waiting on failures...")
        stopped_greenlets_iter = gevent.iwait(monitored_items)

        stopped_greenlet = next(stopped_greenlets_iter)
        try:
            stopped_greenlet.get()
        except Exception:
            _log.exception("Greenlet failed: %s", stopped_greenlet)
            raise
        else:
            _log.error("Greenlet %s unexpectedly returned.", stopped_greenlet)
            raise AssertionError("Greenlet unexpectedly returned")
    except:
        _log.exception("Exception killing main greenlet")
        raise
Пример #17
0
class TestIpsetManager(BaseTestCase):
    def setUp(self):
        super(TestIpsetManager, self).setUp()
        self.reset()

    def reset(self):
        self.created_refs = defaultdict(list)
        self.acquired_refs = {}
        self.config = Mock()
        self.config.MAX_IPSET_SIZE = 1234
        self.mgr = IpsetManager(IPV4, self.config)
        self.m_create = Mock(spec=self.mgr._create,
                             side_effect = self.m_create)
        self.real_create = self.mgr._create
        self.mgr._create = self.m_create

    def m_create(self, tag_or_sel):
        _log.info("Creating ipset %s", tag_or_sel)

        # Do the real creation, to kick off selector indexing, for example.
        with patch("calico.felix.ipsets.RefCountedIpsetActor", autospec=True):
            self.real_create(tag_or_sel)

        # But return a mock...
        ipset = Mock(spec=RefCountedIpsetActor)

        ipset._manager = None
        ipset._id = None
        ipset.ref_mgmt_state = CREATED
        ipset.ref_count = 0
        if isinstance(tag_or_sel, SelectorExpression):
            name_stem = tag_or_sel.unique_id[:8]
        else:
            name_stem = tag_or_sel
        ipset.owned_ipset_names.return_value = ["felix-v4-" + name_stem,
                                                "felix-v4-tmp-" + name_stem]
        ipset.name_stem = name_stem
        self.created_refs[tag_or_sel].append(ipset)
        return ipset

    def test_create(self):
        with patch("calico.felix.ipsets.Ipset") as m_Ipset:
            mgr = IpsetManager(IPV4, self.config)
            tag_ipset = mgr._create("tagid")
        self.assertEqual(tag_ipset.name_stem, "tagid")
        m_Ipset.assert_called_once_with('felix-v4-tagid',
                                        'felix-tmp-v4-tagid',
                                        'inet', 'hash:ip',
                                        max_elem=1234)

    def test_maybe_start_gates_on_in_sync(self):
        with patch("calico.felix.refcount.ReferenceManager."
                   "_maybe_start") as m_maybe_start:
            self.mgr._maybe_start("tag-123")
            self.assertFalse(m_maybe_start.called)
            self.mgr.on_datamodel_in_sync(async=True)
            self.mgr.on_datamodel_in_sync(async=True)  # No-op
            self.step_mgr()
            self.mgr._maybe_start("tag-123")
            self.assertEqual(m_maybe_start.mock_calls,
                             [call("tag-123")])

    def test_tag_then_endpoint(self):
        # Send in the messages.
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        # Let the actor process them.
        self.step_mgr()
        self.assert_one_ep_one_tag()
        # Undo our messages to check that the index is correctly updated,
        self.mgr.on_tags_update("prof1", None, async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, None, async=True)
        self.step_mgr()
        self.assert_index_empty()

    def test_endpoint_then_tag(self):
        # Send in the messages.
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        # Let the actor process them.
        self.step_mgr()
        self.assert_one_ep_one_tag()

    def test_endpoint_then_tag_idempotent(self):
        for _ in xrange(3):
            # Send in the messages.
            self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
            self.mgr.on_tags_update("prof1", ["tag1"], async=True)
            # Let the actor process them.
            self.step_mgr()
            self.assert_one_ep_one_tag()

    def assert_one_ep_one_tag(self):
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
        })
        self.assertEqual(self.mgr.tag_membership_index.ip_owners_by_tag, {
            "tag1": {
                "10.0.0.1": ("prof1", EP_ID_1_1),
            }
        })

    def test_selector_then_endpoint(self):
        # Send in the messages.  this selector should match even though there
        # are no labels in the endpoint.
        selector = parse_selector("all()")
        self.mgr.get_and_incref(selector,
                                callback=self.on_ref_acquired,
                                async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        # Let the actor process them.
        self.step_mgr()

        self.assert_one_selector_one_ep(selector)

        # Undo our messages to check that the index is correctly updated.
        self.mgr.decref(selector, async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, None, async=True)
        self.step_mgr()
        self.assert_index_empty()

    def test_endpoint_then_selector(self):
        # Send in the messages.
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        selector = parse_selector("all()")
        self.mgr.get_and_incref(selector,
                                callback=self.on_ref_acquired,
                                async=True)
        # Let the actor process them.
        self.step_mgr()

        self.assert_one_selector_one_ep(selector)

        # Undo our messages to check that the index is correctly updated.
        self.mgr.on_endpoint_update(EP_ID_1_1, None, async=True)
        self.mgr.decref(selector, async=True)
        self.step_mgr()
        self.assert_index_empty()

    def test_non_trivial_selector_parent_match(self):
        """
        Test a selector that relies on both directly-set labels and
        inherited ones.
        """
        # Send in the messages.  this selector should match even though there
        # are no labels in the endpoint.
        selector = parse_selector("a == 'a1' && p == 'p1'")
        self.mgr.get_and_incref(selector,
                                callback=self.on_ref_acquired,
                                async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1_LABELS, async=True)
        # Let the actor process them.
        self.step_mgr()

        # Should be no match yet.
        self.assertEqual(self.mgr.tag_membership_index.ip_owners_by_tag, {})

        # Now fire in a parent label.
        self.mgr.on_prof_labels_set("prof1", {"p": "p1"}, async=True)
        self.step_mgr()

        # Should now have a match.
        self.assert_one_selector_one_ep(selector)

        # Undo our messages to check that the index is correctly updated.
        self.mgr.on_prof_labels_set("prof1", None, async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.tag_membership_index.ip_owners_by_tag, {})
        self.mgr.decref(selector, async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, None, async=True)
        self.step_mgr()
        self.assert_index_empty()

    def test_endpoint_ip_update_with_selector_match(self):
        """
        Test a selector that relies on both directly-set labels and
        inherited ones.
        """
        # Send in the messages.  this selector should match even though there
        # are no labels in the endpoint.
        selector = parse_selector("a == 'a1'")
        self.mgr.get_and_incref(selector,
                                callback=self.on_ref_acquired,
                                async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1_LABELS, async=True)
        self.step_mgr()

        # Should now have a match.
        self.assert_one_selector_one_ep(selector)

        # Now update the IPs, should update the index.
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1_LABELS_NEW_IP,
                                    async=True)
        self.step_mgr()

        self.assertEqual(self.mgr.tag_membership_index.ip_owners_by_tag, {
            selector: {
                "10.0.0.2": ("dummy", EP_ID_1_1),
            }
        })

        # Undo our messages to check that the index is correctly updated.
        self.mgr.decref(selector, async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, None, async=True)
        self.step_mgr()
        self.assert_index_empty()

    def assert_one_selector_one_ep(self, selector):
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
        })
        self.assertEqual(self.mgr.tag_membership_index.ip_owners_by_tag, {
            selector: {
                "10.0.0.1": ("dummy", EP_ID_1_1),
            }
        })

    def assert_index_empty(self):
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {})
        self.assertEqual(self.mgr.tag_membership_index.ip_owners_by_tag, {})

    def test_change_ip(self):
        # Initial set-up.
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.step_mgr()
        # Update the endpoint's IPs:
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1_NEW_IP, async=True)
        self.step_mgr()

        self.assertEqual(self.mgr.tag_membership_index.ip_owners_by_tag, {
            "tag1": {
                "10.0.0.2": ("prof1", EP_ID_1_1),
                "10.0.0.3": ("prof1", EP_ID_1_1),
            }
        })

    def test_tag_updates(self):
        # Initial set-up.
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.step_mgr()

        # Add a tag, keep a tag.
        self.mgr.on_tags_update("prof1", ["tag1", "tag2"], async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.tag_membership_index.ip_owners_by_tag, {
            "tag1": {
                "10.0.0.1": ("prof1", EP_ID_1_1),
            },
            "tag2": {
                "10.0.0.1": ("prof1", EP_ID_1_1),
            }
        })
        self.assertEqual(self.mgr.tags_by_prof_id, {"prof1": ["tag1", "tag2"]})

        # Remove a tag.
        self.mgr.on_tags_update("prof1", ["tag2"], async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.tag_membership_index.ip_owners_by_tag, {
            "tag2": {
                "10.0.0.1": ("prof1", EP_ID_1_1),
            }
        })

        # Delete the tags:
        self.mgr.on_tags_update("prof1", None, async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.tag_membership_index.ip_owners_by_tag, {})
        self.assertEqual(self.mgr.tags_by_prof_id, {})

    def step_mgr(self):
        self.step_actor(self.mgr)

    def test_update_profile_and_ips(self):
        # Initial set-up.
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_tags_update("prof3", ["tag3"], async=True)
        self.step_mgr()

        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1_NEW_PROF_IP, async=True)
        self.step_mgr()

        self.assertEqual(self.mgr.tag_membership_index.ip_owners_by_tag, {
            "tag3": {
                "10.0.0.3": ("prof3", EP_ID_1_1)
            }
        })
        self.assertEqual(self.mgr.endpoint_ids_by_profile_id, {
            "prof3": set([EP_ID_1_1])
        })

    def test_optimize_out_v6(self):
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_endpoint_update(EP_ID_2_1, EP_2_1_IPV6, async=True)
        self.step_mgr()
        # Index should contain only 1_1:
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
        })

    def test_optimize_out_no_nets(self):
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_endpoint_update(EP_ID_2_1, EP_2_1_NO_NETS, async=True)
        self.step_mgr()
        # Index should contain only 1_1:
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
        })
        # Should be happy to then add it in.
        self.mgr.on_endpoint_update(EP_ID_2_1, EP_2_1, async=True)
        self.step_mgr()
        # Index should contain both:
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
            EP_ID_2_1: EP_DATA_2_1,
        })

    def test_duplicate_ips(self):
        # Add in two endpoints with the same IP.
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_endpoint_update(EP_ID_2_1, EP_2_1, async=True)
        self.step_mgr()
        # Index should contain both:
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
            EP_ID_2_1: EP_DATA_2_1,
        })
        self.assertEqual(self.mgr.tag_membership_index.ip_owners_by_tag, {
            "tag1": {
                "10.0.0.1": set([
                    ("prof1", EP_ID_1_1),
                    ("prof1", EP_ID_2_1),
                ])
            }
        })

        # Second profile tags arrive:
        self.mgr.on_tags_update("prof2", ["tag1", "tag2"], async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.tag_membership_index.ip_owners_by_tag, {
            "tag1": {
                "10.0.0.1": set([
                    ("prof1", EP_ID_1_1),
                    ("prof1", EP_ID_2_1),
                    ("prof2", EP_ID_1_1),
                ])
            },
            "tag2": {
                "10.0.0.1": ("prof2", EP_ID_1_1),
            },
        })

        # Remove one, check the index gets updated.
        self.mgr.on_endpoint_update(EP_ID_2_1, None, async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
        })
        self.assertEqual(self.mgr.tag_membership_index.ip_owners_by_tag, {
            "tag1": {
                "10.0.0.1": set([
                    ("prof1", EP_ID_1_1),
                    ("prof2", EP_ID_1_1),
                ])
            },
            "tag2": {
                "10.0.0.1": ("prof2", EP_ID_1_1),
            },
        })

        # Remove the other, index should get completely cleaned up.
        self.mgr.on_endpoint_update(EP_ID_1_1, None, async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {})
        self.assertEqual(self.mgr.tag_membership_index.ip_owners_by_tag, {},
                         "ip_owners_by_tag should be empty, not %s" %
                         pformat(self.mgr.tag_membership_index.ip_owners_by_tag))

    def on_ref_acquired(self, tag_id, ipset):
        self.acquired_refs[tag_id] = ipset

    @patch("calico.felix.ipsets.list_ipset_names", autospec=True)
    @patch("calico.felix.futils.check_call", autospec=True)
    def test_cleanup(self, m_check_call, m_list_ipsets):
        # We're testing the in-sync processing
        self.mgr.on_datamodel_in_sync(async=True)
        # Start with a couple ipsets.
        self.mgr.get_and_incref("foo", callback=self.on_ref_acquired,
                                async=True)
        self.mgr.get_and_incref("bar", callback=self.on_ref_acquired,
                                async=True)
        self.step_mgr()
        self.assertEqual(set(self.created_refs.keys()),
                         set(["foo", "bar"]))

        # Notify ready so that the ipsets are marked as started.
        self._notify_ready(["foo", "bar"])
        self.step_mgr()

        # Then decref "bar" so that it gets marked as stopping.
        self.mgr.decref("bar", async=True)
        self.step_mgr()
        self.assertEqual(
            self.mgr.stopping_objects_by_id,
            {"bar": set(self.created_refs["bar"])}
        )

        # Return mix of expected and unexpected ipsets.
        m_list_ipsets.return_value = [
            "not-felix-foo",
            "felix-v6-foo",
            "felix-v6-bazzle",
            "felix-v4-foo",
            "felix-v4-bar",
            "felix-v4-baz",
            "felix-v4-biff",
        ]
        m_check_call.side_effect = iter([
            # Exception on any individual call should be ignored.
            FailedSystemCall("Dummy", [], None, None, None),
            None,
        ])
        self.mgr.cleanup(async=True)
        self.step_mgr()

        # Explicitly check that exactly the right delete calls were made.
        # assert_has_calls would ignore extra calls.
        self.assertEqual(sorted(m_check_call.mock_calls),
                         sorted([
                             call(["ipset", "destroy", "felix-v4-biff"]),
                             call(["ipset", "destroy", "felix-v4-baz"]),
                         ]))

    def test_update_dirty(self):
        self.mgr._datamodel_in_sync = True
        m_ipset = Mock(spec=RefCountedIpsetActor)
        self.mgr.objects_by_id["tag-123"] = m_ipset
        with patch.object(self.mgr, "_is_starting_or_live",
                          autospec=True) as m_sol:
            m_sol.return_value = True
            with patch.object(self.mgr.tag_membership_index,
                              "get_and_reset_changes_by_tag",
                              autospec=True) as m_get_and_reset:
                m_get_and_reset.return_value = ({"tag-123": set(["10.0.0.1"])},
                                                {"tag-123": set(["10.0.0.2"])})
                self.mgr._update_dirty_active_ipsets()
                self.assertEqual(
                    m_ipset.add_members.mock_calls,
                    [call(set(["10.0.0.1"]), async=True)]
                )
                self.assertEqual(
                    m_ipset.remove_members.mock_calls,
                    [call(set(["10.0.0.2"]), async=True)]
                )

    def _notify_ready(self, tags):
        for tag in tags:
            self.mgr.on_object_startup_complete(tag, self.created_refs[tag][0],
                                                async=True)
        self.step_mgr()
Пример #18
0
def _main_greenlet(config):
    """
    The root of our tree of greenlets.  Responsible for restarting
    its children if desired.
    """
    try:
        _log.info("Connecting to etcd to get our configuration.")
        hosts_ipset_v4 = IpsetActor(HOSTS_IPSET_V4)

        etcd_api = EtcdAPI(config, hosts_ipset_v4)
        etcd_api.start()
        # Ask the EtcdAPI to fill in the global config object before we
        # proceed.  We don't yet support config updates.
        config_loaded = etcd_api.load_config(async=False)
        config_loaded.wait()

        _log.info("Main greenlet: Configuration loaded, starting remaining "
                  "actors...")
        v4_filter_updater = IptablesUpdater("filter", ip_version=4)
        v4_nat_updater = IptablesUpdater("nat", ip_version=4)
        v4_ipset_mgr = IpsetManager(IPV4)
        v4_masq_manager = MasqueradeManager(IPV4, v4_nat_updater)
        v4_rules_manager = RulesManager(4, v4_filter_updater, v4_ipset_mgr)
        v4_dispatch_chains = DispatchChains(config, 4, v4_filter_updater)
        v4_ep_manager = EndpointManager(config,
                                        IPV4,
                                        v4_filter_updater,
                                        v4_dispatch_chains,
                                        v4_rules_manager)

        v6_filter_updater = IptablesUpdater("filter", ip_version=6)
        v6_ipset_mgr = IpsetManager(IPV6)
        v6_rules_manager = RulesManager(6, v6_filter_updater, v6_ipset_mgr)
        v6_dispatch_chains = DispatchChains(config, 6, v6_filter_updater)
        v6_ep_manager = EndpointManager(config,
                                        IPV6,
                                        v6_filter_updater,
                                        v6_dispatch_chains,
                                        v6_rules_manager)

        update_splitter = UpdateSplitter(config,
                                         [v4_ipset_mgr, v6_ipset_mgr],
                                         [v4_rules_manager, v6_rules_manager],
                                         [v4_ep_manager, v6_ep_manager],
                                         [v4_filter_updater,
                                          v6_filter_updater],
                                         v4_masq_manager)
        iface_watcher = InterfaceWatcher(update_splitter)

        _log.info("Starting actors.")
        hosts_ipset_v4.start()
        update_splitter.start()

        v4_filter_updater.start()
        v4_nat_updater.start()
        v4_ipset_mgr.start()
        v4_masq_manager.start()
        v4_rules_manager.start()
        v4_dispatch_chains.start()
        v4_ep_manager.start()

        v6_filter_updater.start()
        v6_ipset_mgr.start()
        v6_rules_manager.start()
        v6_dispatch_chains.start()
        v6_ep_manager.start()

        iface_watcher.start()

        top_level_actors = [
            hosts_ipset_v4,
            update_splitter,

            v4_nat_updater,
            v4_filter_updater,
            v4_nat_updater,
            v4_ipset_mgr,
            v4_masq_manager,
            v4_rules_manager,
            v4_dispatch_chains,
            v4_ep_manager,

            v6_filter_updater,
            v6_ipset_mgr,
            v6_rules_manager,
            v6_dispatch_chains,
            v6_ep_manager,

            iface_watcher,
            etcd_api,
        ]

        monitored_items = [actor.greenlet for actor in top_level_actors]

        # Install the global rules before we start polling for updates.
        _log.info("Installing global rules.")
        install_global_rules(config, v4_filter_updater, v6_filter_updater,
                             v4_nat_updater)

        # Start polling for updates. These kicks make the actors poll
        # indefinitely.
        _log.info("Starting polling for interface and etcd updates.")
        f = iface_watcher.watch_interfaces(async=True)
        monitored_items.append(f)
        etcd_api.start_watch(update_splitter, async=True)

        # Register a SIG_USR handler to trigger a diags dump.
        def dump_top_level_actors(log):
            for a in top_level_actors:
                # The output will include queue length and the like.
                log.info("%s", a)
        futils.register_diags("Top-level actors", dump_top_level_actors)
        try:
            gevent.signal(signal.SIGUSR1, functools.partial(futils.dump_diags))
        except AttributeError:
            # It doesn't matter too much if we fail to do this.
            _log.warning("Unable to install diag dump handler")
            pass

        # Wait for something to fail.
        _log.info("All top-level actors started, waiting on failures...")
        stopped_greenlets_iter = gevent.iwait(monitored_items)

        stopped_greenlet = next(stopped_greenlets_iter)
        try:
            stopped_greenlet.get()
        except Exception:
            _log.exception("Greenlet failed: %s", stopped_greenlet)
            raise
        else:
            _log.error("Greenlet %s unexpectedly returned.", stopped_greenlet)
            raise AssertionError("Greenlet unexpectedly returned")
    except:
        _log.exception("Exception killing main greenlet")
        raise
Пример #19
0
class TestIpsetManager(BaseTestCase):
    def setUp(self):
        super(TestIpsetManager, self).setUp()
        self.reset()

    def reset(self):
        self.created_refs = defaultdict(list)
        self.acquired_refs = {}
        self.mgr = IpsetManager(IPV4)
        self.m_create = Mock(spec=self.mgr._create, side_effect=self.m_create)
        self.mgr._create = self.m_create

    def m_create(self, tag_id):
        _log.info("Creating ipset %s", tag_id)
        ipset = Mock(spec=TagIpset)

        ipset._manager = None
        ipset._id = None
        ipset.ref_mgmt_state = CREATED
        ipset.ref_count = 0
        ipset.owned_ipset_names.return_value = [
            "felix-v4-" + tag_id, "felix-v4-tmp-" + tag_id
        ]

        ipset.tag = tag_id
        self.created_refs[tag_id].append(ipset)
        return ipset

    def test_tag_then_endpoint(self):
        # Send in the messages.
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        # Let the actor process them.
        self.step_mgr()
        self.assert_one_ep_one_tag()

    def test_endpoint_then_tag(self):
        # Send in the messages.
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        # Let the actor process them.
        self.step_mgr()
        self.assert_one_ep_one_tag()

    def test_endpoint_then_tag_idempotent(self):
        for _ in xrange(3):
            # Send in the messages.
            self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
            self.mgr.on_tags_update("prof1", ["tag1"], async=True)
            # Let the actor process them.
            self.step_mgr()
            self.assert_one_ep_one_tag()

    def assert_one_ep_one_tag(self):
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
        })
        self.assertEqual(self.mgr.ip_owners_by_tag,
                         {"tag1": {
                             "10.0.0.1": ("prof1", EP_ID_1_1),
                         }})

    def test_change_ip(self):
        # Initial set-up.
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.step_mgr()
        # Update the endpoint's IPs:
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1_NEW_IP, async=True)
        self.step_mgr()

        self.assertEqual(
            self.mgr.ip_owners_by_tag, {
                "tag1": {
                    "10.0.0.2": ("prof1", EP_ID_1_1),
                    "10.0.0.3": ("prof1", EP_ID_1_1),
                }
            })

    def test_tag_updates(self):
        # Initial set-up.
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.step_mgr()

        # Add a tag, keep a tag.
        self.mgr.on_tags_update("prof1", ["tag1", "tag2"], async=True)
        self.step_mgr()
        self.assertEqual(
            self.mgr.ip_owners_by_tag, {
                "tag1": {
                    "10.0.0.1": ("prof1", EP_ID_1_1),
                },
                "tag2": {
                    "10.0.0.1": ("prof1", EP_ID_1_1),
                }
            })
        self.assertEqual(self.mgr.tags_by_prof_id, {"prof1": ["tag1", "tag2"]})

        # Remove a tag.
        self.mgr.on_tags_update("prof1", ["tag2"], async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.ip_owners_by_tag,
                         {"tag2": {
                             "10.0.0.1": ("prof1", EP_ID_1_1),
                         }})

        # Delete the tags:
        self.mgr.on_tags_update("prof1", None, async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.ip_owners_by_tag, {})
        self.assertEqual(self.mgr.tags_by_prof_id, {})

    def step_mgr(self):
        self.step_actor(self.mgr)
        self.assertEqual(self.mgr._dirty_tags, set())

    def test_update_profile_and_ips(self):
        # Initial set-up.
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_tags_update("prof3", ["tag3"], async=True)
        self.step_mgr()

        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1_NEW_PROF_IP, async=True)
        self.step_mgr()

        self.assertEqual(self.mgr.ip_owners_by_tag,
                         {"tag3": {
                             "10.0.0.3": ("prof3", EP_ID_1_1)
                         }})
        self.assertEqual(self.mgr.endpoint_ids_by_profile_id,
                         {"prof3": set([EP_ID_1_1])})

    def test_optimize_out_v6(self):
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_endpoint_update(EP_ID_2_1, EP_2_1_IPV6, async=True)
        self.step_mgr()
        # Index should contain only 1_1:
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
        })

    def test_optimize_out_no_nets(self):
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_endpoint_update(EP_ID_2_1, EP_2_1_NO_NETS, async=True)
        self.step_mgr()
        # Index should contain only 1_1:
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
        })
        # Should be happy to then add it in.
        self.mgr.on_endpoint_update(EP_ID_2_1, EP_2_1, async=True)
        self.step_mgr()
        # Index should contain both:
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
            EP_ID_2_1: EP_DATA_2_1,
        })

    def test_duplicate_ips(self):
        # Add in two endpoints with the same IP.
        self.mgr.on_tags_update("prof1", ["tag1"], async=True)
        self.mgr.on_endpoint_update(EP_ID_1_1, EP_1_1, async=True)
        self.mgr.on_endpoint_update(EP_ID_2_1, EP_2_1, async=True)
        self.step_mgr()
        # Index should contain both:
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
            EP_ID_2_1: EP_DATA_2_1,
        })
        self.assertEqual(
            self.mgr.ip_owners_by_tag, {
                "tag1": {
                    "10.0.0.1": set([
                        ("prof1", EP_ID_1_1),
                        ("prof1", EP_ID_2_1),
                    ])
                }
            })

        # Second profile tags arrive:
        self.mgr.on_tags_update("prof2", ["tag1", "tag2"], async=True)
        self.step_mgr()
        self.assertEqual(
            self.mgr.ip_owners_by_tag, {
                "tag1": {
                    "10.0.0.1":
                    set([
                        ("prof1", EP_ID_1_1),
                        ("prof1", EP_ID_2_1),
                        ("prof2", EP_ID_1_1),
                    ])
                },
                "tag2": {
                    "10.0.0.1": ("prof2", EP_ID_1_1),
                },
            })

        # Remove one, check the index gets updated.
        self.mgr.on_endpoint_update(EP_ID_2_1, None, async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {
            EP_ID_1_1: EP_DATA_1_1,
        })
        self.assertEqual(
            self.mgr.ip_owners_by_tag, {
                "tag1": {
                    "10.0.0.1": set([
                        ("prof1", EP_ID_1_1),
                        ("prof2", EP_ID_1_1),
                    ])
                },
                "tag2": {
                    "10.0.0.1": ("prof2", EP_ID_1_1),
                },
            })

        # Remove the other, index should get completely cleaned up.
        self.mgr.on_endpoint_update(EP_ID_1_1, None, async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.endpoint_data_by_ep_id, {})
        self.assertEqual(
            self.mgr.ip_owners_by_tag, {},
            "ip_owners_by_tag should be empty, not %s" %
            pformat(self.mgr.ip_owners_by_tag))

    def on_ref_acquired(self, tag_id, ipset):
        self.acquired_refs[tag_id] = ipset

    @patch("calico.felix.ipsets.list_ipset_names", autospec=True)
    @patch("calico.felix.futils.check_call", autospec=True)
    def test_cleanup(self, m_check_call, m_list_ipsets):
        # Start with a couple ipsets.
        self.mgr.get_and_incref("foo",
                                callback=self.on_ref_acquired,
                                async=True)
        self.mgr.get_and_incref("bar",
                                callback=self.on_ref_acquired,
                                async=True)
        self.step_mgr()
        self.assertEqual(set(self.created_refs.keys()), set(["foo", "bar"]))

        # Notify ready so that the ipsets are marked as started.
        self._notify_ready(["foo", "bar"])
        self.step_mgr()

        # Then decref "bar" so that it gets marked as stopping.
        self.mgr.decref("bar", async=True)
        self.step_mgr()
        self.assertEqual(self.mgr.stopping_objects_by_id,
                         {"bar": set(self.created_refs["bar"])})

        # Return mix of expected and unexpected ipsets.
        m_list_ipsets.return_value = [
            "not-felix-foo",
            "felix-v6-foo",
            "felix-v6-bazzle",
            "felix-v4-foo",
            "felix-v4-bar",
            "felix-v4-baz",
            "felix-v4-biff",
        ]
        m_check_call.side_effect = iter([
            # Exception on any individual call should be ignored.
            FailedSystemCall("Dummy", [], None, None, None),
            None,
        ])
        self.mgr.cleanup(async=True)
        self.step_mgr()

        # Explicitly check that exactly the right delete calls were made.
        # assert_has_calls would ignore extra calls.
        self.assertEqual(
            sorted(m_check_call.mock_calls),
            sorted([
                call(["ipset", "destroy", "felix-v4-biff"]),
                call(["ipset", "destroy", "felix-v4-baz"]),
            ]))

    def test_apply_snapshot_mainline(self):
        self.mgr.apply_snapshot(
            {
                "prof1": ["tag1"],
                "prof2": ["B"],
                "prof3": ["B"]
            },
            {
                EP_ID_1_1: EP_1_1,
                EP_ID_2_1: EP_2_1
            },
            async=True,
        )
        self.mgr.get_and_incref("tag1",
                                callback=self.on_ref_acquired,
                                async=True)
        self.step_mgr()
        self.mgr.on_object_startup_complete("tag1",
                                            self.created_refs["tag1"][0],
                                            async=True)
        self.step_mgr()
        self.mgr.apply_snapshot(
            {"prof1": ["tag1", "tag2"]},
            {EP_ID_1_1: EP_1_1},
            async=True,
        )
        self.step_mgr()
        self.assertEqual(self.mgr.tags_by_prof_id, {"prof1": ["tag1", "tag2"]})
        self.assertEqual(self.mgr.endpoint_data_by_ep_id,
                         {EP_ID_1_1: EP_DATA_1_1})
        ipset = self.acquired_refs["tag1"]
        self.assertEqual(ipset.replace_members.mock_calls, [
            call(set(['10.0.0.1']), force_reprogram=True, async=True),
            call(set(['10.0.0.1']), force_reprogram=True, async=True),
        ])

    def test_apply_snapshot_forces_reprogram(self):
        # Apply a snapshot but mock the finish call so that we can check that
        # apply_snapshot set the flag...
        self.mgr.apply_snapshot(
            {
                "prof1": ["A"],
                "prof2": ["B"]
            },
            {
                EP_ID_1_1: EP_1_1,
                EP_ID_2_1: EP_2_1
            },
            async=True,
        )
        # noinspection PyUnresolvedReferences
        with patch.object(self.mgr, "_finish_msg_batch"):
            self.step_actor(self.mgr)
        self.assertTrue(self.mgr._force_reprogram)

    def test_finish_msg_batch_clears_reprogram_flag(self):
        # Apply a snapshot and step the actor for real, should clear the flag.
        self.mgr.apply_snapshot(
            {"prof1": ["A"]},
            {EP_ID_1_1: EP_1_1},
            async=True,
        )
        self.step_mgr()
        self.assertFalse(self.mgr._force_reprogram)

    def _notify_ready(self, tags):
        for tag in tags:
            self.mgr.on_object_startup_complete(tag,
                                                self.created_refs[tag][0],
                                                async=True)
        self.step_mgr()