def test_clean_up_endpoint_status(self): self.m_config.REPORT_ENDPOINT_STATUS = True ep_id = WloadEndpointId("foo", "openstack", "workloadid", "endpointid") empty_dir = Mock() empty_dir.key = ("/calico/felix/v1/host/foo/workload/" "openstack/foobar") empty_dir.dir = True missing_ep = Mock() missing_ep.key = ("/calico/felix/v1/host/foo/workload/" "openstack/aworkload/endpoint/anendpoint") self.m_client.read.return_value.leaves = [ empty_dir, missing_ep, ] with patch.object(self.rep, "_mark_endpoint_dirty") as m_mark: self.rep.clean_up_endpoint_statuses(async=True) self.step_actor(self.rep) # Missing endpoint should have been marked for cleanup. m_mark.assert_called_once_with( WloadEndpointId("foo", "openstack", "aworkload", "anendpoint") )
def test_endpoint_set_invalid(self): self.dispatch("/calico/v1/host/h1/workload/o1/w1/endpoint/e1", "set", value="{}") self.m_splitter.on_endpoint_update.assert_called_once_with( WloadEndpointId("h1", "o1", "w1", "e1"), None, )
def test_endpoint_set(self): self.dispatch("/calico/v1/host/h1/workload/o1/w1/endpoint/e1", "set", value=ENDPOINT_STR) self.m_splitter.on_endpoint_update.assert_called_once_with( WloadEndpointId("h1", "o1", "w1", "e1"), VALID_ENDPOINT, )
def test_on_endpoint_status_v4_v6(self): # Send in endpoint status updates for v4 and v6. endpoint_id = WloadEndpointId("foo", "bar", "baz", "biff") with patch("gevent.spawn_later", autospec=True) as m_spawn: self.rep.on_endpoint_status_changed(endpoint_id, IPV4, {"status": "up"}, async=True) self.rep.on_endpoint_status_changed(endpoint_id, IPV6, {"status": "down"}, async=True) self.step_actor(self.rep) # Should record the status. self.assertEqual( self.rep._endpoint_status, { IPV4: {endpoint_id: {"status": "up"}}, IPV6: {endpoint_id: {"status": "down"}}, } ) # And do a write. self.assertEqual( self.m_client.set.mock_calls, [call("/calico/felix/v1/host/foo/workload/bar/baz/endpoint/biff", JSONString({"status": "down"}))] )
def on_endpoint_delete(self, response, hostname, orchestrator, workload_id, endpoint_id): """Handler for endpoint deleted, passes the update to the splitter.""" combined_id = WloadEndpointId(hostname, orchestrator, workload_id, endpoint_id) _log.debug("Endpoint %s deleted", combined_id) _stats.increment("Endpoint deleted") self.splitter.on_endpoint_update(combined_id, None)
def test_resync(self): endpoint_id = WloadEndpointId("foo", "bar", "baz", "biff") self.rep.on_endpoint_status_changed(endpoint_id, IPV4, {"status": "up"}, async=True) endpoint_id_2 = WloadEndpointId("foo", "bar", "baz", "boff") self.rep.on_endpoint_status_changed(endpoint_id_2, IPV6, {"status": "up"}, async=True) with patch("gevent.spawn_later", autospec=True) as m_spawn: self.step_actor(self.rep) self.rep._on_timer_pop(async=True) self.step_actor(self.rep) self.assertEqual(self.rep._older_dirty_endpoints, set()) self.assertEqual(self.rep._newer_dirty_endpoints, set()) self.rep.resync(async=True) self.step_actor(self.rep) self.assertEqual(self.rep._older_dirty_endpoints, set()) self.assertEqual(self.rep._newer_dirty_endpoints, set([endpoint_id, endpoint_id_2]))
def on_endpoint_set(self, response, hostname, orchestrator, workload_id, endpoint_id): """Handler for endpoint updates, passes the update to the splitter.""" combined_id = WloadEndpointId(hostname, orchestrator, workload_id, endpoint_id) _log.debug("Endpoint %s updated", combined_id) _stats.increment("Endpoint created/updated") endpoint = parse_endpoint(self._config, combined_id, response.value) self.splitter.on_endpoint_update(combined_id, endpoint)
def test_endpoint_del(self): """ Test endpoint-only deletion. """ self.dispatch("/calico/v1/host/h1/workload/o1/w1/endpoint/e1", action="delete") self.m_splitter.on_endpoint_update.assert_called_once_with( WloadEndpointId("h1", "o1", "w1", "e1"), None, )
def on_wl_endpoint_remove(self, msg): """Handler for endpoint updates, passes the update to the splitter. :param msg felixbackend_pb2.WorkloadEndpointUpdate""" hostname = self._config.HOSTNAME orchestrator = msg.id.orchestrator_id workload_id = msg.id.workload_id endpoint_id = msg.id.endpoint_id combined_id = WloadEndpointId(hostname, orchestrator, workload_id, endpoint_id) _log.debug("Endpoint %s removed", combined_id) _stats.increment("Endpoint removed") self.splitter.on_endpoint_update(combined_id, None)
def test_on_endpoint_status_changed_disabled(self): self.m_config.REPORT_ENDPOINT_STATUS = False endpoint_id = WloadEndpointId("foo", "bar", "baz", "biff") with patch("gevent.spawn_later", autospec=True) as m_spawn: self.rep.on_endpoint_status_changed(endpoint_id, IPV4, {"status": "up"}, async=True) self.step_actor(self.rep) self.assertFalse(m_spawn.called) self.assertEqual(self.rep._endpoint_status[IPV4], {}) # Nothing queued. self.assertEqual(self.rep._newer_dirty_endpoints, set()) self.assertEqual(self.rep._older_dirty_endpoints, set())
def test_on_endpoint_status_failure(self): # Send in an endpoint status update. endpoint_id = WloadEndpointId("foo", "bar", "baz", "biff") self.m_client.set.side_effect = EtcdException() with patch("gevent.spawn_later", autospec=True) as m_spawn: self.rep.on_endpoint_status_changed(endpoint_id, IPV4, {"status": "up"}, async=True) self.step_actor(self.rep) # Should do the write. self.assertEqual(self.m_client.set.mock_calls, [ call("/calico/felix/v1/host/foo/workload/bar/baz/endpoint/biff", JSONString({"status": "up"})) ]) # But endpoint should be re-queued in the newer set. self.assertEqual(self.rep._newer_dirty_endpoints, set([endpoint_id])) self.assertEqual(self.rep._older_dirty_endpoints, set())
def on_wl_endpoint_update(self, msg): """Handler for endpoint updates, passes the update to the splitter. :param msg felixbackend_pb2.WorkloadEndpointUpdate""" hostname = self._config.HOSTNAME orchestrator = msg.id.orchestrator_id workload_id = msg.id.workload_id endpoint_id = msg.id.endpoint_id combined_id = WloadEndpointId(hostname, orchestrator, workload_id, endpoint_id) _log.debug("Endpoint %s updated", combined_id) _stats.increment("Endpoint created/updated") endpoint = { "state": msg.endpoint.state, "name": msg.endpoint.name, "mac": msg.endpoint.mac or None, "profile_ids": msg.endpoint.profile_ids, "ipv4_nets": msg.endpoint.ipv4_nets, "ipv6_nets": msg.endpoint.ipv6_nets, "tiers": convert_pb_tiers(msg.endpoint.tiers), } self.splitter.on_endpoint_update(combined_id, endpoint)
from netaddr import IPAddress from calico.datamodel_v1 import WloadEndpointId, HostEndpointId from calico.felix.futils import IPV4, FailedSystemCall, CommandOutput, IPV6 from calico.felix.ipsets import (EndpointData, IpsetManager, IpsetActor, RefCountedIpsetActor, EMPTY_ENDPOINT_DATA, Ipset, list_ipset_names) from calico.felix.refcount import CREATED from calico.felix.test.base import BaseTestCase # Logger _log = logging.getLogger(__name__) patch.object = getattr(patch, "object") # Keep PyCharm linter happy. EP_ID_1_1 = WloadEndpointId("host1", "orch", "wl1_1", "ep1_1") EP_1_1 = { "profile_ids": ["prof1", "prof2"], "ipv4_nets": ["10.0.0.1/32"], } EP_1_1_LABELS = { "profile_ids": ["prof1", "prof2"], "ipv4_nets": ["10.0.0.1/32"], "labels": { "a": "a1", } } EP_1_1_LABELS_NEW_IP = { "profile_ids": ["prof1", "prof2"], "ipv4_nets": ["10.0.0.2/32"], "labels": {
def create_id(self): return WloadEndpointId("localhost", "orchestrator", "workload", "endpoint")
def test_mark_endpoint_dirty_already_dirty(self): endpoint_id = WloadEndpointId("a", "b", "c", "d") self.rep._older_dirty_endpoints.add(endpoint_id) self.rep._mark_endpoint_dirty(endpoint_id) self.assertFalse(endpoint_id in self.rep._newer_dirty_endpoints)
def test_validate_endpoint(self): # This test method hit s afew cases that we don't hit above but it's # hard to understand. Please don't add more tests like this! combined_id = WloadEndpointId("host", "orchestrator", "workload", "valid_name-ok.") endpoint_dict = {'profile_id': "valid.prof-name", 'state': "active", 'name': "tapabcdef", 'mac': "78:2b:cb:9f:ae:1c", 'ipv4_nets': [], 'ipv6_nets': []} config = Config('tap', 'localhost') ep_copy = endpoint_dict.copy() self.validate_endpoint(config, combined_id, ep_copy) self.assertTrue(ep_copy.get('profile_id') is None) self.assertEqual(ep_copy.get('profile_ids'), ["valid.prof-name"]) # Now break it various ways. # Bad endpoint ID. for bad_str in ("with spaces", "$stuff", "^%@"): bad_id = WloadEndpointId("host", "orchestrator", "workload", bad_str) with self.assertRaisesRegexp(ValidationFailed, "Invalid endpoint ID"): self.validate_endpoint(config, bad_id, endpoint_dict.copy()) # Bad dictionary. with self.assertRaisesRegexp(ValidationFailed, "Expected endpoint to be a dict"): self.validate_endpoint(config, combined_id, [1, 2, 3]) # No state, invalid state. bad_dict = endpoint_dict.copy() del bad_dict['state'] with self.assertRaisesRegexp(ValidationFailed, "Missing 'state' field"): self.validate_endpoint(config, combined_id, bad_dict) bad_dict['state'] = "invalid" with self.assertRaisesRegexp(ValidationFailed, "Expected 'state' to be"): self.validate_endpoint(config, combined_id, bad_dict) # Missing name. bad_dict = endpoint_dict.copy() del bad_dict['name'] with self.assertRaisesRegexp(ValidationFailed, "Missing 'name' field"): self.validate_endpoint(config, combined_id, bad_dict) # It's OK to be missing a MAC. ok_dict = endpoint_dict.copy() del ok_dict['mac'] self.validate_endpoint(config, combined_id, ok_dict) bad_dict['name'] = [1, 2, 3] bad_dict['mac'] = 73 with self.assertRaisesRegexp(ValidationFailed, "Expected 'name' to be a string.*" + "Invalid MAC"): self.validate_endpoint(config, combined_id, bad_dict) # Bad profile ID bad_dict = endpoint_dict.copy() bad_dict['profile_id'] = "str£ing" with self.assertRaisesRegexp(ValidationFailed, "Invalid profile ID"): self.validate_endpoint(config, combined_id, bad_dict) bad_dict = endpoint_dict.copy() del bad_dict['profile_id'] bad_dict['profile_ids'] = [1, 2, 3] with self.assertRaisesRegexp(ValidationFailed, "Expected profile IDs to be strings"): self.validate_endpoint(config, combined_id, bad_dict) # Bad interface name - acceptable if not local. bad_dict = endpoint_dict.copy() bad_dict['name'] = "vethabcdef" self.validate_endpoint(config, combined_id, bad_dict) local_id = WloadEndpointId("localhost", "orchestrator", "workload", "valid_name-ok.") with self.assertRaisesRegexp(ValidationFailed, "does not start with"): self.validate_endpoint(config, local_id, bad_dict) # Valid networks. good_dict = endpoint_dict.copy() good_dict['ipv4_nets'] = ["1.2.3.4/32", "172.0.0.0/8", "3.4.5.6"] good_dict['ipv6_nets'] = ["::1/128", "::", "2001:db8:abc:1400::/54"] self.validate_endpoint(config, combined_id, good_dict.copy()) # Invalid networks bad_dict = good_dict.copy() bad_dict['ipv4_nets'] = ["1.2.3.4/32", "172.0.0.0/8", "2001:db8:abc:1400::/54"] with self.assertRaisesRegexp(ValidationFailed, "not a valid IPv4 CIDR"): self.validate_endpoint(config, combined_id, bad_dict.copy()) bad_dict['ipv4_nets'] = ["1.2.3.4/32", "172.0.0.0/8", "nonsense"] with self.assertRaisesRegexp(ValidationFailed, "not a valid IPv4 CIDR"): self.validate_endpoint(config, combined_id, bad_dict.copy()) bad_dict = good_dict.copy() bad_dict['ipv6_nets'] = ["::1/128", "::", "1.2.3.4/8"] with self.assertRaisesRegexp(ValidationFailed, "not a valid IPv6 CIDR"): self.validate_endpoint(config, combined_id, bad_dict.copy()) bad_dict['ipv6_nets'] = ["::1/128", "::", "nonsense"] with self.assertRaisesRegexp(ValidationFailed, "not a valid IPv6 CIDR"): self.validate_endpoint(config, combined_id, bad_dict.copy()) # Gateway IPs. good_dict['ipv4_gateway'] = "1.2.3.4" good_dict['ipv6_gateway'] = "2001:db8:abc:1400::" self.validate_endpoint(config, combined_id, good_dict.copy()) bad_dict = good_dict.copy() bad_dict['ipv4_gateway'] = "2001:db8:abc:1400::" with self.assertRaisesRegexp(ValidationFailed, "not a valid IPv4 gateway"): self.validate_endpoint(config, combined_id, bad_dict.copy()) bad_dict['ipv4_gateway'] = "nonsense" with self.assertRaisesRegexp(ValidationFailed, "not a valid IPv4 gateway"): self.validate_endpoint(config, combined_id, bad_dict.copy()) bad_dict = good_dict.copy() bad_dict['ipv6_gateway'] = "1.2.3.4" with self.assertRaisesRegexp(ValidationFailed, "not a valid IPv6 gateway"): self.validate_endpoint(config, combined_id, bad_dict.copy()) bad_dict['ipv6_gateway'] = "nonsense" with self.assertRaisesRegexp(ValidationFailed, "not a valid IPv6 gateway"): self.validate_endpoint(config, combined_id, bad_dict.copy()) # Labels, empty. good_dict["labels"] = {} self.validate_endpoint(config, combined_id, good_dict) self.assertEqual(good_dict["labels"], {}) # Labels, valid. good_dict["labels"] = {"a": "b"} self.validate_endpoint(config, combined_id, good_dict) self.assertEqual(good_dict["labels"], {"a": "b"}) # Labels, bad type. bad_dict = good_dict.copy() bad_dict["labels"] = [] with self.assertRaisesRegexp(ValidationFailed, "Expected labels to be a dict"): self.validate_endpoint(config, combined_id, bad_dict.copy()) # Labels, bad value. bad_dict = good_dict.copy() bad_dict["labels"] = {"a": {}} with self.assertRaisesRegexp(ValidationFailed, "Invalid label value"): self.validate_endpoint(config, combined_id, bad_dict.copy()) # Labels, bad key. bad_dict = good_dict.copy() bad_dict["labels"] = {"a+|%": {}} with self.assertRaisesRegexp(ValidationFailed, "Invalid label name 'a+|%'."): self.validate_endpoint(config, combined_id, bad_dict.copy())
def test_on_endpoint_status_mainline(self): # Send in an endpoint status update. endpoint_id = WloadEndpointId("foo", "bar", "baz", "biff") with patch("gevent.spawn_later", autospec=True) as m_spawn: self.rep.on_endpoint_status_changed(endpoint_id, IPV4, {"status": "up"}, async=True) self.step_actor(self.rep) # Should record the status. self.assertEqual( self.rep._endpoint_status[IPV4], { endpoint_id: {"status": "up"} } ) # And do a write. self.assertEqual( self.m_client.set.mock_calls, [call("/calico/felix/v1/host/foo/workload/bar/baz/endpoint/biff", JSONString({"status": "up"}))] ) # Since we did a write, the rate limit timer should be scheduled. self.assertEqual( m_spawn.mock_calls, [call(ANY, self.rep._on_timer_pop, async=True)] ) self.assertTrue(self.rep._timer_scheduled) self.assertFalse(self.rep._reporting_allowed) # Send in another update, shouldn't get written until we pop the timer. self.m_client.reset_mock() with patch("gevent.spawn_later", autospec=True) as m_spawn: self.rep.on_endpoint_status_changed(endpoint_id, IPV4, None, async=True) self.step_actor(self.rep) self.assertFalse(self.m_client.set.called) # Timer already scheduled, shouldn't get rescheduled. self.assertFalse(m_spawn.called) # Pop the timer, should trigger write and reschedule. with patch("gevent.spawn_later", autospec=True) as m_spawn: self.rep._on_timer_pop(async=True) self.step_actor(self.rep) self.maxDiff = 10000 self.assertEqual( self.m_client.delete.mock_calls, [ call("/calico/felix/v1/host/foo/workload/bar/baz/endpoint/" "biff"), call("calico/felix/v1/host/foo/workload/bar/baz/endpoint", dir=True, timeout=5), call("calico/felix/v1/host/foo/workload/bar/baz", dir=True, timeout=5), call("calico/felix/v1/host/foo/workload/bar", dir=True, timeout=5), call("calico/felix/v1/host/foo/workload", dir=True, timeout=5), ] ) # Rate limit timer should be scheduled. self.assertEqual( m_spawn.mock_calls, [call(ANY, self.rep._on_timer_pop, async=True)] ) spawn_delay = m_spawn.call_args[0][0] self.assertTrue(spawn_delay >= 0.89999) self.assertTrue(spawn_delay <= 1.10001) self.assertTrue(self.rep._timer_scheduled) self.assertFalse(self.rep._reporting_allowed) # Cache should be cleaned up. self.assertEqual(self.rep._endpoint_status[IPV4], {}) # Nothing queued. self.assertEqual(self.rep._newer_dirty_endpoints, set()) self.assertEqual(self.rep._older_dirty_endpoints, set())