def __init__(self, name, start_calico=True, dind=True): self.name = name self.dind = dind self.workloads = set() # This variable is used to assert on destruction that this object was # cleaned up. If not used as a context manager, users of this object self._cleaned = False if dind: # TODO use pydocker docker.rm("-f", self.name, _ok_code=[0, 1]) docker.run("--privileged", "-v", os.getcwd()+":/code", "--name", self.name, "-tid", "calico/dind") self.ip = docker.inspect("--format", "{{ .NetworkSettings.IPAddress }}", self.name).stdout.rstrip() self.ip6 = docker.inspect("--format", "{{ .NetworkSettings." "GlobalIPv6Address }}", self.name).stdout.rstrip() # Make sure docker is up docker_ps = partial(self.execute, "docker ps") retry_until_success(docker_ps, ex_class=CalledProcessError, retries=100) self.execute("docker load --input /code/calico_containers/calico-node.tar && " "docker load --input /code/calico_containers/busybox.tar") else: self.ip = get_ip() if start_calico: self.start_calico_node()
def __init__(self, name, start_calico=True, dind=True): self.name = name self.dind = dind self.workloads = set() # This variable is used to assert on destruction that this object was # cleaned up. If not used as a context manager, users of this object self._cleaned = False if dind: docker.rm("-f", self.name, _ok_code=[0, 1]) docker.run("--privileged", "-v", os.getcwd()+":/code", "--name", self.name, "-e", "DOCKER_DAEMON_ARGS=" "--kv-store=consul:%s:8500" % utils.get_ip(), "-tid", "calico/dind") self.ip = docker.inspect("--format", "{{ .NetworkSettings.IPAddress }}", self.name).stdout.rstrip() self.ip6 = docker.inspect("--format", "{{ .NetworkSettings." "GlobalIPv6Address }}", self.name).stdout.rstrip() # Make sure docker is up docker_ps = partial(self.execute, "docker ps") retry_until_success(docker_ps, ex_class=CalledProcessError) self.execute("docker load --input /code/calico_containers/calico-node.tar && " "docker load --input /code/calico_containers/busybox.tar") else: self.ip = get_ip() if start_calico: self.start_calico_node() self.assert_driver_up()
def test_empty_policy_for_forward_traffic(self): """ Test empty policy deny local and forward traffic. """ self.test_can_connect_by_default() self.add_gateway_external_iface() self.add_gateway_internal_iface() # Add empty policy forward, but only to host endpoint. self.add_policy({ 'apiVersion': 'v1', 'kind': 'policy', 'metadata': {'name': 'empty-forward'}, 'spec': { 'order': 500, 'selector': 'has(nodeEth)', 'ingress': [], 'egress': [], 'applyOnForward': True, 'types': ['ingress', 'egress'] } }) retry_until_success(self.assert_host_can_not_curl_local, 3) retry_until_success(self.assert_gateway_can_not_curl_ext, 3) retry_until_success(self.assert_host_can_not_curl_ext, 3) retry_until_success(self.assert_hostwl_can_not_access_workload, 3) retry_until_success(self.assert_workload_can_not_curl_ext, 3)
def test_egress_policy_can_allow_through_traffic(self): self.add_policy({ 'apiVersion': 'v1', 'kind': 'policy', 'metadata': { 'name': 'port80-ext' }, 'spec': { 'order': 10, 'ingress': [ { 'action': 'deny', }, ], 'egress': [ { 'protocol': 'tcp', 'destination': { 'ports': [80] }, 'action': 'allow' }, ], 'selector': 'role == "gateway-ext"' } }) self.add_gateway_external_iface() retry_until_success(self.assert_host_can_curl_ext, 3)
def assert_driver_up(self): """ Check that Calico Docker Driver is up by checking the existence of the unix socket. """ sock_exists = partial(self.execute, "[ -e %s ]" % CALICO_DRIVER_SOCK) retry_until_success(sock_exists, ex_class=CalledProcessError)
def __init__(self, name): self.name = name self.workloads = set() # This variable is used to assert on destruction that this object was # cleaned up. If not used as a context manager, users of this object self._cleaned = False log_and_run("docker rm -f %s || true" % self.name) log_and_run("docker run --privileged -tid" "-v `pwd`/docker:/usr/local/bin/docker " "-v %s:/code --name %s " "calico/dind:libnetwork --cluster-store=etcd://%s:2379" % (os.getcwd(), self.name, utils.get_ip())) self.ip = log_and_run("docker inspect --format " "'{{ .NetworkSettings.IPAddress }}' %s" % self.name) # Make sure docker is up docker_ps = partial(self.execute, "docker ps") retry_until_success(docker_ps, ex_class=CalledProcessError, retries=10) self.execute("gunzip -c /code/calico-node.tgz | docker load") self.execute("gunzip -c /code/busybox.tgz | docker load") self.execute( "gunzip -c /code/calico-node-libnetwork.tgz | docker load") self.start_calico_node()
def test_empty_policy_for_forward_traffic(self): """ Test empty policy deny local and forward traffic. """ self.test_can_connect_by_default() self.add_gateway_external_iface() self.add_gateway_internal_iface() # Add empty policy forward, but only to host endpoint. self.add_policy({ 'apiVersion': 'projectcalico.org/v3', 'kind': 'GlobalNetworkPolicy', 'metadata': { 'name': 'empty-forward', }, 'spec': { 'order': 500, 'selector': 'has(nodeEth)', 'ingress': [], 'egress': [], 'applyOnForward': True, 'types': ['Ingress', 'Egress'] } }) retry_until_success(self.assert_host_can_not_curl_local, 3) retry_until_success(self.assert_gateway_can_not_curl_ext, 3) retry_until_success(self.assert_host_can_not_curl_ext, 3) retry_until_success(self.assert_hostwl_can_not_access_workload, 3) retry_until_success(self.assert_workload_can_not_curl_ext, 3)
def test_policy_only_selectors_allow_traffic(self): self.host1.add_resource([ { 'apiVersion': 'v1', 'kind': 'policy', 'metadata': {'name': 'allowFooBarToBazBop'}, 'spec': { 'ingress': [ { 'source': {'selector': 'foo == "bar"'}, 'action': 'allow', }, ], 'egress': [{'action': 'deny'}], 'selector': 'baz == "bop"' } }, { 'apiVersion': 'v1', 'kind': 'policy', 'metadata': {'name': 'allowFooBarEgress'}, 'spec': { 'selector': 'foo == "bar"', 'egress': [{'action': 'allow'}] } } ]) retry_until_success(lambda: self.assert_ip_connectivity( workload_list=[self.workload1_nw1_foo_bar, self.workload4_nw2_foo_bar], ip_pass_list=[self.workload2_nw2_baz_bop.ip], ip_fail_list=[self.workload3_nw1_foo_bing.ip]), 3)
def __init__(self, name): self.name = name self.workloads = set() # This variable is used to assert on destruction that this object was # cleaned up. If not used as a context manager, users of this object self._cleaned = False log_and_run("docker rm -f %s || true" % self.name) log_and_run("docker run --privileged -tid" "-v `pwd`/docker:/usr/local/bin/docker " "-v %s:/code --name %s " "calico/dind:libnetwork --cluster-store=etcd://%s:2379" % (os.getcwd(), self.name, utils.get_ip())) self.ip = log_and_run("docker inspect --format " "'{{ .NetworkSettings.IPAddress }}' %s" % self.name) # Make sure docker is up docker_ps = partial(self.execute, "docker ps") retry_until_success(docker_ps, ex_class=CalledProcessError, retries=10) self.execute("gunzip -c /code/calico-node.tgz | docker load") self.execute("gunzip -c /code/busybox.tgz | docker load") self.execute("gunzip -c /code/calico-node-libnetwork.tgz | docker load") self.start_calico_node()
def test_readiness(self): """ A simple base case to check if calico/node becomes ready. """ with DockerHost('host1', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS ) as host1: retry_until_success(host1.assert_is_ready, retries=30)
def test_node_status(self): """ Test that the status command can be executed. """ with DockerHost('host', dind=False, start_calico=True) as host: def node_status(): host.calicoctl("node status") retry_until_success(node_status, retries=10, ex_class=Exception)
def test_liveness_multihost(self): """ A simple base case to check if calico/node becomes live. """ with DockerHost('host1', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS) as host1, \ DockerHost('host2', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS) as host2: retry_until_success(host1.assert_is_live, retries=30) retry_until_success(host2.assert_is_live, retries=30)
def test_readiness(self): """ A simple base case to check if calico/node becomes ready. """ with DockerHost('host1', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS ) as host1: retry_until_success( host1.execute, retries=10, command="docker exec calico-node /bin/readiness -bird -felix")
def test_liveness_env_port(self): """ A simple base case to check if calico/node becomes live. Uses environment variable as port number. """ with DockerHost('host1', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host1: host1.start_calico_node( env_options= "-e FELIX_HEALTHPORT=9011 -e FELIX_HEALTHENABLED=true") retry_until_success(host1.assert_is_live, retries=30)
def _test_single_route_reflector(self, backend='bird', bgpconfig_as_num=64514, peer_as_num=64514): """ Run a multi-host test using a single route reflector and global peering. """ with DockerHost('host1', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host1, \ DockerHost('host2', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host2, \ RouteReflectorCluster(1, 1) as rrc: # Start both hosts using specific backends. host1.start_calico_node("--backend=%s" % backend) host2.start_calico_node("--backend=%s" % backend) # Set the default AS number - as this is used by the RR mesh, and # turn off the node-to-node mesh (do this from any host). update_bgp_config(host1, nodeMesh=False, asNum=bgpconfig_as_num) # Create a workload on each host in the same network. network1 = host1.create_network("subnet1") workload_host1 = host1.create_workload("workload1", network=network1) workload_host2 = host2.create_workload("workload2", network=network1) # Allow network to converge (which it won't) self.assert_false( workload_host1.check_can_ping(workload_host2.ip, retries=5)) # Set global config telling all calico nodes to peer with the # route reflector. This can be run from either host. rg = rrc.get_redundancy_group() assert len(rg) == 1 create_bgp_peer(host1, "global", rg[0].ip, peer_as_num) # Allow network to converge (which it now will). retry_until_success(host1.assert_is_ready, retries=30, felix=False) retry_until_success(host2.assert_is_ready, retries=30, felix=False) check_bird_status(host1, [("global", rg[0].ip, "Established")]) check_bird_status(host2, [("global", rg[0].ip, "Established")]) self.assert_true( workload_host1.check_can_ping(workload_host2.ip, retries=20)) # And check connectivity in both directions. self.assert_ip_connectivity( workload_list=[workload_host1, workload_host2], ip_pass_list=[workload_host1.ip, workload_host2.ip])
def assert_ipip_routing(self, host1, workload_host1, workload_host2, expect_ipip): """ Test whether IPIP is being used as expected on host1 when pinging workload_host2 from workload_host1. """ def check(): orig_tx = self.get_tunl_tx(host1) workload_host1.execute("ping -c 2 -W 1 %s" % workload_host2.ip) if expect_ipip: self.assertEqual(self.get_tunl_tx(host1), orig_tx + 2) else: self.assertEqual(self.get_tunl_tx(host1), orig_tx) retry_until_success(check, retries=10)
def assert_ipip_routing(self, host1, workload_host1, workload_host2, expect_ipip): """ Test whether IPIP is being used as expected on host1 when pinging workload_host2 from workload_host1. """ def check(): orig_tx = self.get_tunl_tx(host1) workload_host1.execute("ping -c 2 -W 1 %s" % workload_host2.ip) if expect_ipip: assert self.get_tunl_tx(host1) == orig_tx + 2 else: assert self.get_tunl_tx(host1) == orig_tx retry_until_success(check, retries=10)
def test_no_policy_allows_no_traffic(self): retry_until_success(lambda: self.assert_ip_connectivity( workload_list=[self.workload1_nw1_foo_bar, self.workload2_nw2_baz_bop, self.workload3_nw1_foo_bing], ip_pass_list=[], ip_fail_list=[self.workload4_nw2_foo_bar.ip]), 2) retry_until_success(lambda: self.assert_ip_connectivity( workload_list=[self.workload2_nw2_baz_bop, self.workload3_nw1_foo_bing, self.workload4_nw2_foo_bar], ip_pass_list=[], ip_fail_list=[self.workload1_nw1_foo_bar.ip]), 2) retry_until_success(lambda: self.assert_ip_connectivity( workload_list=[self.workload1_nw1_foo_bar, self.workload3_nw1_foo_bing, self.workload4_nw2_foo_bar], ip_pass_list=[], ip_fail_list=[self.workload2_nw2_baz_bop.ip]), 2) retry_until_success(lambda: self.assert_ip_connectivity( workload_list=[self.workload1_nw1_foo_bar, self.workload2_nw2_baz_bop, self.workload4_nw2_foo_bar], ip_pass_list=[], ip_fail_list=[self.workload3_nw1_foo_bing.ip]), 2)
def test_default_deny_for_local_traffic(self): """ Test default deny for local traffic after host endpoint been created. """ self.test_can_connect_by_default() self.add_gateway_external_iface() self.add_gateway_internal_iface() retry_until_success(self.assert_host_can_not_curl_local, 3) retry_until_success(self.assert_gateway_can_not_curl_ext, 3) retry_until_success(self.assert_host_can_curl_ext, 3) retry_until_success(self.assert_hostwl_can_access_workload, 3) retry_until_success(self.assert_workload_can_curl_ext, 3)
def test_liveness_bird_down(self): """ Simulate bird service to be down. """ with DockerHost('host1', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS ) as host1: retry_until_success(host1.assert_is_ready, retries=30) host1.execute( "docker exec -it calico-node sv stop /etc/service/enabled/bird" ) # Check that the readiness script is reporting 'not ready' self.assertRaisesRegexp( CalledProcessError, "calico/node is not ready: bird/confd is not live: Service bird is not running.", host1.execute, "docker exec calico-node /bin/calico-node -bird-live")
def test_conflicting_ingress_and_egress_policy(self, in_action, out_action): # If there is policy on the ingress and egress interface then both should # get applied and 'deny' should win. self.add_host_iface() self.add_gateway_external_iface() self.add_gateway_internal_iface() self.add_policy({ 'apiVersion': 'v1', 'kind': 'policy', 'metadata': { 'name': 'port80-int' }, 'spec': { 'order': 10, 'ingress': [ { 'action': in_action }, ], 'egress': [], 'selector': 'role == "gateway-int"' } }) self.add_policy({ 'apiVersion': 'v1', 'kind': 'policy', 'metadata': { 'name': 'port80-ext' }, 'spec': { 'order': 10, 'ingress': [], 'egress': [ { 'action': out_action }, ], 'selector': 'role == "gateway-ext"' } }) retry_until_success(self.assert_host_can_not_curl_ext, 3)
def test_local_forward_opposite_policy_1(self): """ Test local and forward got opposite allow/deny rules. """ self.test_can_connect_by_default() self.add_gateway_external_iface() self.add_gateway_internal_iface() # Add local ingress deny, egress allow and lower forward ingress allow, forward egress deny self.add_ingress_policy(200, 'Deny', False) self.add_ingress_policy(500, 'Allow', True) self.add_egress_policy(200, 'Allow', False) self.add_egress_policy(500, 'Deny', True) retry_until_success(self.assert_host_can_not_curl_local, 3) retry_until_success(self.assert_gateway_can_curl_ext, 3) retry_until_success(self.assert_host_can_not_curl_ext, 3) retry_until_success(self.assert_hostwl_can_access_workload, 3) retry_until_success(self.assert_workload_can_not_curl_ext, 3)
def test_add_container(self): """ Test adding container to calico networking after it exists. """ with DockerHost('host', dind=False) as host: # Create a container with --net=none, add a calico interface to # it then check felix programs a route. node = host.create_workload("node", network=NET_NONE) host.calicoctl("container add %s 192.168.1.1" % node) # Create the profile, get the endpoint IDs for the containers and # add the profile to the endpoint so felix will pick it up. host.calicoctl("profile add TEST_GROUP") ep = host.calicoctl("container %s endpoint-id show" % node) host.calicoctl("endpoint %s profile set TEST_GROUP" % ep) # Wait for felix to program down the route. check_route = partial(host.execute, "ip route | grep '192\.168\.1\.1'") retry_until_success(check_route, ex_class=CalledProcessError)
def test_local_forward_opposite_policy_1(self): """ Test local and forward got opposite allow/deny rules. """ self.test_can_connect_by_default() self.add_gateway_external_iface() self.add_gateway_internal_iface() # Add local ingress deny, egress allow and lower forward ingress allow, forward egress deny self.add_ingress_policy(200, 'deny', False) self.add_ingress_policy(500, 'allow', True) self.add_egress_policy(200, 'allow', False) self.add_egress_policy(500, 'deny', True) retry_until_success(self.assert_host_can_not_curl_local, 3) retry_until_success(self.assert_gateway_can_curl_ext, 3) retry_until_success(self.assert_host_can_not_curl_ext, 3) retry_until_success(self.assert_hostwl_can_access_workload, 3) retry_until_success(self.assert_workload_can_not_curl_ext, 3)
def test_ingress_policy_can_allow_through_traffic(self): self.add_policy({ 'apiVersion': 'v1', 'kind': 'policy', 'metadata': {'name': 'port80-int'}, 'spec': { 'order': 10, 'ingress': [ { 'protocol': 'tcp', 'destination': {'ports': [80]}, 'action': 'allow' }, ], 'egress': [ {'action': 'deny'}, ], 'selector': 'role == "gateway-int"' } }) self.add_gateway_internal_iface() retry_until_success(self.assert_host_can_curl_ext, 3)
def test_conflicting_ingress_and_egress_policy(self, in_action, out_action): # If there is policy on the ingress and egress interface then both should # get applied and 'deny' should win. self.add_host_iface() self.add_gateway_external_iface() self.add_gateway_internal_iface() self.add_policy({ 'apiVersion': 'v1', 'kind': 'policy', 'metadata': {'name': 'port80-int'}, 'spec': { 'order': 10, 'ingress': [ { 'action': in_action }, ], 'egress': [], 'selector': 'role == "gateway-int"' } }) self.add_policy({ 'apiVersion': 'v1', 'kind': 'policy', 'metadata': {'name': 'port80-ext'}, 'spec': { 'order': 10, 'ingress': [], 'egress': [ { 'action': out_action }, ], 'selector': 'role == "gateway-ext"' } }) retry_until_success(self.assert_host_can_not_curl_ext, 3)
def __init__(self, host, name, driver="calico", ipam_driver="calico-ipam", subnet=None): """ Create the network. :param host: The Docker Host which creates the network :param name: The name of the network. This must be unique per cluster and is the user-facing identifier for the network. (Calico itself will get a UUID for the network via the driver API and will not get the name). :param driver: The name of the network driver to use. (The Calico driver is the default.) :param ipam_driver: The name of the IPAM driver to use, or None to use the default driver. :param subnet: The subnet IP pool to assign IPs from. :return: A DockerNetwork object. """ self.name = name self.driver = driver self.deleted = False self.init_host = host """The host which created the network.""" driver_option = ("--driver %s" % driver) if driver else "" ipam_option = ("--ipam-driver %s" % ipam_driver) if ipam_driver else "" subnet_option = ("--subnet %s" % subnet) if subnet else "" # Check if network is present before we create it try: host.execute("docker network inspect %s" % name) # Network exists - delete it host.execute("docker network rm " + name) except CommandExecError: # Network didn't exist, no problem. pass # Create the network, cmd = "docker network create %s %s %s %s" % \ (driver_option, ipam_option, subnet_option, name) docker_net_create = partial(host.execute, cmd) self.uuid = retry_until_success(docker_net_create)
def __init__(self, host, name, driver="calico", ipam_driver="calico-ipam", subnet=None): """ Create the network. :param host: The Docker Host which creates the network :param name: The name of the network. This must be unique per cluster and is the user-facing identifier for the network. (Calico itself will get a UUID for the network via the driver API and will not get the name). :param driver: The name of the network driver to use. (The Calico driver is the default.) :param ipam_driver: The name of the IPAM driver to use, or None to use the default driver. :param subnet: The subnet IP pool to assign IPs from. :return: A DockerNetwork object. """ self.name = name self.driver = driver self.deleted = False self.init_host = host """The host which created the network.""" driver_option = ("--driver %s" % driver) if driver else "" ipam_option = ("--ipam-driver %s" % ipam_driver) if ipam_driver else "" subnet_option = ("--subnet %s" % subnet) if subnet else "" # Create the network, if this fails - attempt deletion and then # try again. cmd = "docker network create %s %s %s %s" % \ (driver_option, ipam_option, subnet_option, name) docker_net_create = partial(host.execute, cmd) try: self.uuid = retry_until_success(docker_net_create) except CommandExecError: host.execute("docker network rm " + name, raise_exception_on_failure=False) self.uuid = host.execute(cmd)
def test_can_connect_by_default(self): """ Test if traffic is allowed with no policy setup. """ retry_until_success(self.assert_host_can_curl_local, 3) retry_until_success(self.assert_gateway_can_curl_ext, 3) retry_until_success(self.assert_host_can_curl_ext, 3) retry_until_success(self.assert_hostwl_can_access_workload, 3) retry_until_success(self.assert_workload_can_curl_ext, 3) self.add_host_iface() # Adding the host endpoints should break connectivity until we add policy back in. # Add allow policy for host, make sure it applies to forward and has order lower than # empty forward. self.add_policy({ 'apiVersion': 'v1', 'kind': 'policy', 'metadata': {'name': 'host-out'}, 'spec': { 'order': 100, 'selector': 'nodeEth == "host"', 'egress': [{'action': 'allow'}], 'ingress': [{'action': 'allow'}], 'applyOnForward': True, } }) retry_until_success(self.assert_host_can_curl_ext, 3)
def test_local_allow_with_forward_empty(self): """ Test local allow does not affect forward traffic with empty policy. """ self.test_empty_policy_for_forward_traffic() # Add local ingress/egress allow. self.add_ingress_policy(200, 'allow', False) self.add_egress_policy(200, 'allow', False) retry_until_success(self.assert_host_can_curl_local, 3) retry_until_success(self.assert_gateway_can_curl_ext, 3) retry_until_success(self.assert_host_can_not_curl_ext, 3) retry_until_success(self.assert_hostwl_can_not_access_workload, 3) retry_until_success(self.assert_workload_can_not_curl_ext, 3) # Add local&forward ingress/egress allow. self.add_ingress_policy(200, 'allow', True) self.add_egress_policy(200, 'allow', True) retry_until_success(self.assert_host_can_curl_local, 3) retry_until_success(self.assert_gateway_can_curl_ext, 3) retry_until_success(self.assert_host_can_curl_ext, 3) retry_until_success(self.assert_hostwl_can_access_workload, 3) retry_until_success(self.assert_workload_can_curl_ext, 3)
def test_node_status_resilience(self, test_host, pid_name): """ Test that newly restarted BGP backend processes consistently transition to an Established state. Test using different BGP backends. We run a multi-host test for this to test peering between two gobgp backends and a single BIRD backend. """ with DockerHost('host1', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host1, \ DockerHost('host2', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host2, \ DockerHost('host3', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=True) as host3: # Set the default AS number. update_bgp_config(host1, asNum=LARGE_AS_NUM) # Start host1 using the inherited AS, and host2 using a specified # AS (same as default). These hosts use the gobgp backend, whereas # host3 uses BIRD. host1.start_calico_node("--backend=gobgp") host2.start_calico_node("--backend=gobgp --as=%s" % LARGE_AS_NUM) # Create a network and a couple of workloads on each host. network1 = host1.create_network("subnet1") workload_host1 = host1.create_workload("workload1", network=network1) workload_host2 = host2.create_workload("workload2", network=network1) workload_host3 = host3.create_workload("workload3", network=network1) # Allow network to converge self.assert_true( workload_host1.check_can_ping(workload_host2.ip, retries=10)) # Check connectivity in both directions self.assert_ip_connectivity( workload_list=[workload_host1, workload_host2, workload_host3], ip_pass_list=[ workload_host1.ip, workload_host2.ip, workload_host3.ip ]) hosts = [host1, host2, host3] workloads = [workload_host1, workload_host2, workload_host3] _log.debug("==== docker exec -it calico-node ps -a ====") _log.debug( hosts[test_host].execute("docker exec -it calico-node ps -a")) # Check the BGP status on the BIRD/GoBGP host. def check_connected(): for target in hosts: expected = [("node-to-node mesh", h.ip, "Established") for h in hosts if h is not target] _log.debug("expected : %s", expected) check_bird_status(target, expected) def delete_workload(host, host_workload): host.calicoctl("ipam release --ip=%s" % host_workload.ip) host.execute("docker rm -f %s" % host_workload.name) host.workloads.remove(host_workload) def pid_parse(pid_str): if '\r\n' in pid_str: pid_list = pid_str.split('\r\n') return pid_list else: return [pid_str] iterations = 3 for iteration in range(1, iterations + 1): _log.debug("Iteration %s", iteration) _log.debug("Host under test: %s", hosts[test_host].name) _log.debug("Identify and pkill process: %s", pid_name) pre_pkill = hosts[test_host].execute( "docker exec -it calico-node pgrep %s" % pid_name) pre_pkill_list = pid_parse(pre_pkill) _log.debug("Pre pkill list: %s", pre_pkill_list) hosts[test_host].execute( "docker exec -it calico-node pkill %s" % pid_name) _log.debug('check connected and retry until "Established"') retry_until_success(check_connected, retries=20, ex_class=Exception) post_pkill = hosts[test_host].execute( "docker exec -it calico-node pgrep %s" % pid_name) post_pkill_list = pid_parse(post_pkill) _log.debug("Post pkill list: %s", post_pkill_list) assert pre_pkill_list != post_pkill_list, "The pids should not be the same after pkill" new_workloads = [] for workload in workloads: new_workload = "%s_%s" % (workload, iteration) new_workloads.append(new_workload) # create new workloads index = 0 for new_workload in new_workloads: new_workload = hosts[index].create_workload( new_workload, network=network1) _log.debug("host: %s and workload: %s", hosts[index].name, new_workload.name) # Wait for the workload to be networked. self.assert_true( new_workload.check_can_ping(workload_host3.ip, retries=10)) # Check connectivity in both directions self.assert_ip_connectivity(workload_list=[ workload_host1, workload_host2, workload_host3, new_workload ], ip_pass_list=[ workload_host1.ip, workload_host2.ip, workload_host3.ip, new_workload.ip ]) delete_workload(hosts[index], new_workload) index += 1
def test_local_deny_with_lower_forward_allow(self): """ Test local deny with lower order does not affect forward allow policy. """ self.test_empty_policy_for_forward_traffic() # setup a deny for all traffic # Add local&forward ingress/egress allow. self.add_ingress_policy(300, 'allow', True) self.add_egress_policy(300, 'allow', True) retry_until_success(self.assert_host_can_curl_local, 3) retry_until_success(self.assert_gateway_can_curl_ext, 3) retry_until_success(self.assert_host_can_curl_ext, 3) retry_until_success(self.assert_hostwl_can_access_workload, 3) retry_until_success(self.assert_workload_can_curl_ext, 3) # Add local ingress/egress deny. self.add_ingress_policy(200, 'deny', False) self.add_egress_policy(200, 'deny', False) retry_until_success(self.assert_host_can_not_curl_local, 3) retry_until_success(self.assert_gateway_can_not_curl_ext, 3) retry_until_success(self.assert_host_can_curl_ext, 3) retry_until_success(self.assert_hostwl_can_access_workload, 3) retry_until_success(self.assert_workload_can_curl_ext, 3)
def setUpClass(cls): # Wipe etcd once before any test in this class runs. _log.debug("Wiping etcd") wipe_etcd(HOST_IPV4) # We set up 2 hosts on top of which running nine workloads in three namespaces. # Host1 has 5 workloads. # 2 in namespace nsa: [nsa_h1_wl0] [nsa_h1_wl1] # 1 in namespace nsb: [nsb_h1_wl0] # 2 in default namespace: [default_h1_wl0] [omit_h1_wl0] # *omit* means 'namespace' field is not specified during workload setup. # # Host2 has 4 workloads. # 1 in namespace nsa: [nsa_h2_wl0] # 2 in namespace nsb: [nsb_h2_wl0] [nsb_h2_wl1] # 1 in namespace default: [default_h2_wl0] # # Global network policies and network policies then apply on namespaced # workload endpoints with mixed orders. The test checks connectivity of # 4 workloads [nsa_h1_wl0, nsb_h2_wl0, default_h1_wl0, omit_h1_wl0] from # other workloads. # Create two hosts. cls.hosts = [] cls.host1 = DockerHost("cali-host1", additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, post_docker_commands=POST_DOCKER_COMMANDS, start_calico=False) cls.host1_hostname = cls.host1.execute("hostname") cls.host2 = DockerHost("cali-host2", additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, post_docker_commands=POST_DOCKER_COMMANDS, start_calico=False) cls.host2_hostname = cls.host2.execute("hostname") cls.hosts.append(cls.host1) cls.hosts.append(cls.host2) # Start calico node on hosts. for host in cls.hosts: host.start_calico_node(env_options=" -e FELIX_HEALTHENABLED=true ") handle_failure(lambda: retry_until_success(cls.host1.assert_is_ready, retries=20)) handle_failure(lambda: retry_until_success(cls.host2.assert_is_ready, retries=20)) # Prepare namespace profile so that we can use namespaceSelector for non-k8s deployment. # CNI will use the existing profile which is setup here instead of creating its own. cls.add_ns_profile('nsa') cls.add_ns_profile('nsb') cls.add_ns_profile('default') # Create calico network. cls.calinet = cls.host1.create_network("calinet") # Create workloads for host1 # For CNI, network is used for cni_name but nothing else. # We set network to same value as namespace name to let cni program a # namespace profile for us. cls.nsa_wl = cls.host1.create_workload( "nsa_h1_wl0", image="workload", network="nsa", labels=["wep=nsa_h1_wl0"], namespace="nsa") cls.host1.create_workload( "nsa_h1_wl1", image="workload", network="nsa", labels=["wep=nsa_h1_wl1"], namespace="nsa") cls.host1.create_workload( "nsb_h1_wl0", image="workload", network="nsb", labels=["wep=nsb_h1_wl0"], namespace="nsb") cls.default_wl = cls.host1.create_workload( "default_h1_wl0", image="workload", network="default", labels=["wep=default_h1_wl0"], namespace="default") cls.omit_wl = cls.host1.create_workload( "omit_h1_wl0", image="workload", network="default", labels=["wep=omit_h1_wl0"], namespace=None) # Create workloads for host2 cls.nsb_wl = cls.host2.create_workload( "nsb_h2_wl0", image="workload", network="nsb", labels=["wep=nsb_h2_wl0"], namespace="nsb") cls.host2.create_workload( "nsb_h2_wl1", image="workload", network="nsb", labels=["wep=nsb_h2_wl1"], namespace="nsb") cls.host2.create_workload( "nsa_h2_wl0", image="workload", network="nsa", labels=["wep=nsa_h2_wl0"], namespace="nsa") cls.host2.create_workload( "default_h2_wl0", image="workload", network="default", labels=["wep=default_h2_wl0"], namespace="default") # Work out workload set for different namespaces. cls.all_workloads = cls.host1.workloads.union(cls.host2.workloads) cls.wl_nsa = filter(lambda x: x.namespace == "nsa", cls.all_workloads) cls.wl_nsb = filter(lambda x: x.namespace == "nsb", cls.all_workloads) cls.wl_default = filter(lambda x: x.namespace == "default" or x.namespace is None, cls.all_workloads) clear_on_failures() add_on_failure(cls.host1.log_extra_diags) add_on_failure(cls.host2.log_extra_diags)
def test_host_endpoint_combinations(self): """ Test combinations of untracked, preDNAT, normal and forward policies. """ self.test_can_connect_by_default() self.add_gateway_external_iface() self.add_gateway_internal_iface() # Test untracked policy. self.add_untrack_gw_int(500, 'allow') self.add_untrack_gw_ext(500, 'allow') retry_until_success(self.assert_host_can_curl_local, 3) retry_until_success(self.assert_gateway_can_curl_ext, 3) # Untracked packets skip masquerade rule for packet from host # via gateway to ext server. retry_until_success(self.assert_host_can_not_curl_ext, 3) # Conntrack state invalid, default workload policy will drop packet. retry_until_success(self.assert_hostwl_can_not_access_workload, 3) # Packet from workload will be masqueraded by cali-nat-outgoing. It # can reach external server but return packet will be dropped by not having # a conntrack entry to do a reverse SNAT. retry_until_success(self.assert_workload_can_not_curl_ext, 3) # Configure external server to use gateway as default gateway. # So we dont need to masquerade internal ip. # External server sees internal ip and knows how to send response # back. self.set_ext_container_default_route("cali-st-ext-nginx") retry_until_success(self.assert_host_can_curl_ext, 3) self.del_untrack_gw_int() self.del_untrack_gw_ext() # Deny host endpoint ingress. # Ingress packet dropped. Egress packet accepted. self.add_ingress_policy(200, 'deny', True) self.add_egress_policy(200, 'allow', True) retry_until_success(self.assert_host_can_not_curl_local, 3) retry_until_success(self.assert_host_can_not_curl_ext, 3) retry_until_success(self.assert_hostwl_can_not_access_workload, 3) retry_until_success(self.assert_gateway_can_curl_ext, 3) retry_until_success(self.assert_workload_can_curl_ext, 3) # Skip normal and forward policy if preDNAT policy accept packet. self.add_prednat_ingress(500, 'allow') retry_until_success(self.assert_host_can_curl_local, 3) retry_until_success(self.assert_host_can_curl_ext, 3) retry_until_success(self.assert_hostwl_can_access_workload, 3) retry_until_success(self.assert_gateway_can_curl_ext, 3) retry_until_success(self.assert_workload_can_curl_ext, 3) self.add_prednat_ingress(200, 'deny') retry_until_success(self.assert_host_can_not_curl_local, 3) retry_until_success(self.assert_host_can_not_curl_ext, 3) retry_until_success(self.assert_hostwl_can_not_access_workload, 3) retry_until_success(self.assert_gateway_can_curl_ext, 3) retry_until_success(self.assert_workload_can_curl_ext, 3) # Skip preDNAT, normal and forward policy if untracked policy accept packet. self.add_untrack_gw_int(500, 'allow') retry_until_success(self.assert_host_can_curl_local, 3) retry_until_success(self.assert_gateway_can_curl_ext, 3) retry_until_success(self.assert_host_can_not_curl_ext, 3) # We need to add egress allow because if host send request to external server, # return traffic will not match any conntrack entry hence been dropped by # cali-fhfw-eth1. An untracked egress allow skips normal forward policy. self.add_untrack_gw_ext(500, 'allow') # Traffic to/from workload will be dropped by workload default policy # since conntrack entry is invalid. retry_until_success(self.assert_host_can_curl_ext, 3) # Traffic to/from workload will be dropped by workload default policy # since conntrack entry is invalid. retry_until_success(self.assert_hostwl_can_not_access_workload, 3) retry_until_success(self.assert_workload_can_not_curl_ext, 3)
def test_ingress_and_egress_policy_can_allow_through_traffic(self): self.add_gateway_external_iface() self.add_gateway_internal_iface() self.add_host_iface() # Adding the host endpoints should break connectivity until we add policy back in. retry_until_success(self.assert_host_can_not_curl_ext, 3) # Add in the policy... self.add_policy({ 'apiVersion': 'v1', 'kind': 'policy', 'metadata': {'name': 'host-out'}, 'spec': { 'order': 10, 'selector': 'role == "host"', 'egress': [{'action': 'allow'}], 'ingress': [{'action': 'allow'}], } }) self.add_policy({ 'apiVersion': 'v1', 'kind': 'policy', 'metadata': {'name': 'port80-int'}, 'spec': { 'order': 10, 'ingress': [ { 'protocol': 'tcp', 'destination': { 'ports': [80], 'net': self.ext_server_ip + "/32", }, 'source': { 'selector': 'role == "host"', }, 'action': 'allow' }, ], 'egress': [], 'selector': 'role == "gateway-int"' } }) self.add_policy({ 'apiVersion': 'v1', 'kind': 'policy', 'metadata': {'name': 'port80-ext'}, 'spec': { 'order': 10, 'ingress': [], 'egress': [ { 'protocol': 'tcp', 'destination': { 'ports': [80], 'net': self.ext_server_ip + "/32", }, 'source': { 'selector': 'role == "host"', }, 'action': 'allow' }, ], 'selector': 'role == "gateway-ext"' } }) retry_until_success(self.assert_host_can_curl_ext, 3)
def wait_for_node_log(self, expected_log): check = functools.partial(self.assert_calico_node_log_contains, expected_log) retry_until_success(check, 5, ex_class=AssertionError)
def remove_pol_and_endpoints(self): self.delete_all("pol") self.delete_all("hostEndpoint") # Wait for felix to remove the policy and allow traffic through the gateway. retry_until_success(self.assert_host_can_curl_ext)
def test_local_egress_allow_with_lower_egress_forward_deny(self): """ Test local egress allow does not affect forward egress deny with lower order. """ self.test_can_connect_by_default() self.add_gateway_external_iface() self.add_gateway_internal_iface() # Add local egress allow and forward egress deny self.add_egress_policy(200, 'Allow', False) self.add_egress_policy(500, 'Deny', True) retry_until_success(self.assert_host_can_not_curl_local, 3) retry_until_success(self.assert_gateway_can_curl_ext, 3) retry_until_success(self.assert_host_can_not_curl_ext, 3) retry_until_success(self.assert_hostwl_can_access_workload, 3) retry_until_success(self.assert_workload_can_not_curl_ext, 3) # Add workload ingress deny self.add_workload_ingress(800, 'Deny') retry_until_success(self.assert_host_can_not_curl_local, 3) retry_until_success(self.assert_gateway_can_curl_ext, 3) retry_until_success(self.assert_host_can_not_curl_ext, 3) retry_until_success(self.assert_hostwl_can_not_access_workload, 3) retry_until_success(self.assert_workload_can_not_curl_ext, 3)
def test_disable_bgp_export(self): """ Verify that disableBGPExport in an IP pool makes bird not export it correctly. """ with DockerHost('host1', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS) as host1, \ DockerHost('host2', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS) as host2: # Wait until both hosts are ready retry_until_success(host1.assert_is_ready, retries=30) retry_until_success(host2.assert_is_ready, retries=30) # Create IPPool pool1 with disableBGPExport=true pool1 = { 'apiVersion': 'projectcalico.org/v3', 'kind': 'IPPool', 'metadata': { 'name': 'ippool-name-1' }, 'spec': { 'cidr': '192.168.1.0/24', 'ipipMode': 'Always', 'disableBGPExport': True }, } host1.writejson("pool1.json", pool1) host1.calicoctl("create -f pool1.json") # Create IPPool pool2 with explicit disableBGPExport=false pool2 = { 'apiVersion': 'projectcalico.org/v3', 'kind': 'IPPool', 'metadata': { 'name': 'ippool-name-2' }, 'spec': { 'cidr': '192.168.2.0/24', 'ipipMode': 'Always', 'disableBGPExport': False }, } host1.writejson("pool2.json", pool2) host1.calicoctl("create -f pool2.json") # Create IPPool pool3 with no disableBGPExport (false is the default) pool3 = { 'apiVersion': 'projectcalico.org/v3', 'kind': 'IPPool', 'metadata': { 'name': 'ippool-name-3' }, 'spec': { 'cidr': '192.168.3.0/24', 'ipipMode': 'Always' }, } host1.writejson("pool3.json", pool3) host1.calicoctl("create -f pool3.json") # Create one workload on each IP pool network1 = host1.create_network("subnet1") workload1 = host1.create_workload("workload1", network=network1, ip='192.168.1.1') workload2 = host1.create_workload("workload2", network=network1, ip='192.168.2.1') workload3 = host1.create_workload("workload3", network=network1, ip='192.168.3.1') # host2's name in host1's bird cfg is Mesh_xxx_xxx_xxx_xxx (based on its IP address) nameHost2 = 'Mesh_' + host2.ip.replace('.', '_') def _get_re_from_pool(pool): """ Get a regex for blocks in 'birdcl show route' output from an IP pool CIDR """ no_mask = pool.rsplit('/', 1)[0] no_last_octet = no_mask.rsplit('.', 1)[0] regex = re.escape( no_last_octet) + r'\.\d{1,3}/\d{1,2}\s+blackhole' return regex # Verify that pool2 and pool3 are exported and pool1 is not output = host1.execute( "docker exec calico-node birdcl show route export %s" % nameHost2) for pool in ['192.168.2.0/24', '192.168.3.0/24']: self.assertRegexpMatches( output, _get_re_from_pool(pool), "pool '%s' should be present in 'birdcl show route export' output" % pool) for pool in ['192.168.1.0/24']: self.assertNotRegexpMatches( output, _get_re_from_pool(pool), "pool '%s' should not be present in 'birdcl show route export' output" % pool) # Verify that pool1 is filtered from being exported and pool2 and pool3 are not output = host1.execute( "docker exec calico-node birdcl show route noexport %s" % nameHost2) for pool in ['192.168.1.0/24']: self.assertRegexpMatches( output, _get_re_from_pool(pool), "pool '%s' should be present in 'birdcl show route noexport' output" % pool) for pool in ['192.168.2.0/24', '192.168.3.0/24']: self.assertNotRegexpMatches( output, _get_re_from_pool(pool), "pool '%s' should not be present in 'birdcl show route noexport' output" % pool)
def test_host_endpoint_combinations(self): """ Test combinations of untracked, preDNAT, normal and forward policies. """ self.test_can_connect_by_default() self.add_gateway_external_iface() self.add_gateway_internal_iface() # Test untracked policy. self.add_untrack_gw_int(500, 'Allow') self.add_untrack_gw_ext(500, 'Allow') retry_until_success(self.assert_host_can_curl_local, 3) retry_until_success(self.assert_gateway_can_curl_ext, 3) # Untracked packets skip masquerade rule for packet from host # via gateway to ext server. retry_until_success(self.assert_host_can_not_curl_ext, 3) # Conntrack state invalid, default workload policy will drop packet. retry_until_success(self.assert_hostwl_can_not_access_workload, 3) # Packet from workload will be masqueraded by cali-nat-outgoing. It # can reach external server but return packet will be dropped by not having # a conntrack entry to do a reverse SNAT. retry_until_success(self.assert_workload_can_not_curl_ext, 3) # Configure external server to use gateway as default gateway. # So we dont need to masquerade internal ip. # External server sees internal ip and knows how to send response # back. self.set_ext_container_default_route("cali-st-ext-nginx") retry_until_success(self.assert_host_can_curl_ext, 3) self.del_untrack_gw_int() self.del_untrack_gw_ext() # Deny host endpoint ingress. # Ingress packet dropped. Egress packet accepted. self.add_ingress_policy(200, 'Deny', True) self.add_egress_policy(200, 'Allow', True) retry_until_success(self.assert_host_can_not_curl_local, 3) retry_until_success(self.assert_host_can_not_curl_ext, 3) retry_until_success(self.assert_hostwl_can_not_access_workload, 3) retry_until_success(self.assert_gateway_can_curl_ext, 3) retry_until_success(self.assert_workload_can_curl_ext, 3) # Skip normal and forward policy if preDNAT policy accept packet. self.add_prednat_ingress(500, 'Allow') retry_until_success(self.assert_host_can_curl_local, 3) retry_until_success(self.assert_host_can_curl_ext, 3) retry_until_success(self.assert_hostwl_can_access_workload, 3) retry_until_success(self.assert_gateway_can_curl_ext, 3) retry_until_success(self.assert_workload_can_curl_ext, 3) self.add_prednat_ingress(200, 'Deny') retry_until_success(self.assert_host_can_not_curl_local, 3) retry_until_success(self.assert_host_can_not_curl_ext, 3) retry_until_success(self.assert_hostwl_can_not_access_workload, 3) retry_until_success(self.assert_gateway_can_curl_ext, 3) retry_until_success(self.assert_workload_can_curl_ext, 3) # Skip preDNAT, normal and forward policy if untracked policy accept packet. self.add_untrack_gw_int(500, 'Allow') retry_until_success(self.assert_host_can_curl_local, 3) retry_until_success(self.assert_gateway_can_curl_ext, 3) retry_until_success(self.assert_host_can_not_curl_ext, 3) # We need to add egress allow because if host send request to external server, # return traffic will not match any conntrack entry hence been dropped by # cali-fhfw-eth1. An untracked egress allow skips normal forward policy. self.add_untrack_gw_ext(500, 'Allow') # Traffic to/from workload will be dropped by workload default policy # since conntrack entry is invalid. retry_until_success(self.assert_host_can_curl_ext, 3) # Traffic to/from workload will be dropped by workload default policy # since conntrack entry is invalid. retry_until_success(self.assert_hostwl_can_not_access_workload, 3) retry_until_success(self.assert_workload_can_not_curl_ext, 3)
def test_node_status_resilience(self, test_host, pid_name): """ Test that newly restarted BGP backend processes consistently transition to an Established state. Test using different BGP backends. We run a multi-host test for this to test peering between two gobgp backends and a single BIRD backend. """ with DockerHost('host1', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host1, \ DockerHost('host2', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host2, \ DockerHost('host3', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=True) as host3: # Set the default AS number. host1.calicoctl("config set asNumber %s" % LARGE_AS_NUM) # Start host1 using the inherited AS, and host2 using a specified # AS (same as default). These hosts use the gobgp backend, whereas # host3 uses BIRD. host1.start_calico_node("--backend=gobgp") host2.start_calico_node("--backend=gobgp --as=%s" % LARGE_AS_NUM) # Create a network and a couple of workloads on each host. network1 = host1.create_network("subnet1") workload_host1 = host1.create_workload("workload1", network=network1) workload_host2 = host2.create_workload("workload2", network=network1) workload_host3 = host3.create_workload("workload3", network=network1) # Allow network to converge self.assert_true(workload_host1.check_can_ping(workload_host2.ip, retries=10)) # Check connectivity in both directions self.assert_ip_connectivity(workload_list=[workload_host1, workload_host2, workload_host3], ip_pass_list=[workload_host1.ip, workload_host2.ip, workload_host3.ip]) hosts = [host1, host2, host3] workloads = [workload_host1, workload_host2, workload_host3] _log.debug("==== docker exec -it calico-node ps -a ====") _log.debug(hosts[test_host].execute("docker exec -it calico-node ps -a")) # Check the BGP status on the BIRD/GoBGP host. def check_connected(): for target in hosts: expected = [("node-to-node mesh", h.ip, "Established") for h in hosts if h is not target] _log.debug("expected : %s", expected) check_bird_status(target, expected) def delete_workload(host, host_workload): host.calicoctl("ipam release --ip=%s" % host_workload.ip) host.execute("docker rm -f %s" % host_workload.name) host.workloads.remove(host_workload) def pid_parse(pid_str): if '\r\n' in pid_str: pid_list = pid_str.split('\r\n') return pid_list else: return [pid_str] iterations = 3 for iteration in range(1, iterations+1): _log.debug("Iteration %s", iteration) _log.debug("Host under test: %s", hosts[test_host].name) _log.debug("Identify and pkill process: %s", pid_name) pre_pkill = hosts[test_host].execute("docker exec -it calico-node pgrep %s" % pid_name) pre_pkill_list = pid_parse(pre_pkill) _log.debug("Pre pkill list: %s", pre_pkill_list) hosts[test_host].execute("docker exec -it calico-node pkill %s" % pid_name) _log.debug('check connected and retry until "Established"') retry_until_success(check_connected, retries=20, ex_class=Exception) post_pkill = hosts[test_host].execute("docker exec -it calico-node pgrep %s" % pid_name) post_pkill_list = pid_parse(post_pkill) _log.debug("Post pkill list: %s", post_pkill_list) assert pre_pkill_list != post_pkill_list, "The pids should not be the same after pkill" new_workloads = [] for workload in workloads: new_workload = "%s_%s" % (workload, iteration) new_workloads.append(new_workload) # create new workloads index = 0 for new_workload in new_workloads: new_workload = hosts[index].create_workload(new_workload, network=network1) _log.debug("host: %s and workload: %s", hosts[index].name, new_workload.name) # Check connectivity in both directions self.assert_ip_connectivity(workload_list=[workload_host1, workload_host2, workload_host3, new_workload], ip_pass_list=[workload_host1.ip, workload_host2.ip, workload_host3.ip, new_workload.ip]) delete_workload(hosts[index], new_workload) index += 1
def test_local_egress_allow_with_lower_egress_forward_deny(self): """ Test local egress allow does not affect forward egress deny with lower order. """ self.test_can_connect_by_default() self.add_gateway_external_iface() self.add_gateway_internal_iface() # Add local egress allow and forward egress deny self.add_egress_policy(200, 'allow', False) self.add_egress_policy(500, 'deny', True) retry_until_success(self.assert_host_can_not_curl_local, 3) retry_until_success(self.assert_gateway_can_curl_ext, 3) retry_until_success(self.assert_host_can_not_curl_ext, 3) retry_until_success(self.assert_hostwl_can_access_workload, 3) retry_until_success(self.assert_workload_can_not_curl_ext, 3) # Add workload ingress deny self.add_workload_ingress(800, 'deny') retry_until_success(self.assert_host_can_not_curl_local, 3) retry_until_success(self.assert_gateway_can_curl_ext, 3) retry_until_success(self.assert_host_can_not_curl_ext, 3) retry_until_success(self.assert_hostwl_can_not_access_workload, 3) retry_until_success(self.assert_workload_can_not_curl_ext, 3)
def test_bird_readiness(self): """ Test readiness when BGP connections are severed. """ with DockerHost('host1', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS) as host1, \ DockerHost('host2', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS) as host2: retry_until_success(host1.assert_is_ready, retries=30) retry_until_success(host2.assert_is_ready, retries=30) # Create a network and a couple of workloads on each host. network1 = host1.create_network("subnet1") workload_host1 = host1.create_workload("workload1", network=network1) workload_host2 = host2.create_workload("workload2", network=network1) # Allow network to converge self.assert_true( workload_host1.check_can_ping(workload_host2.ip, retries=10)) # Check connectivity in both directions self.assert_ip_connectivity( workload_list=[workload_host1, workload_host2], ip_pass_list=[workload_host1.ip, workload_host2.ip]) # Block bgp connectivity between hosts host1.execute( "iptables -t raw -I PREROUTING -p tcp -m multiport --dport 179 -j DROP" ) host2.execute( "iptables -t raw -I PREROUTING -p tcp -m multiport --dport 179 -j DROP" ) host1.execute("docker exec -it calico-node pkill -9 bird") host2.execute("docker exec -it calico-node pkill -9 bird") # Check that the readiness script is reporting 'not ready' self.assertRaisesRegexp( CalledProcessError, "calico/node is not ready: BIRD is not ready: BGP not established with", host1.execute, "docker exec calico-node /bin/calico-node -bird-ready -felix-ready" ) self.assertRaisesRegexp( CalledProcessError, "calico/node is not ready: BIRD is not ready: BGP not established with", host1.execute, "docker exec calico-node /bin/calico-node -bird-ready -felix-ready" ) # Restore connectivity host1.execute( "iptables -t raw -D PREROUTING -p tcp -m multiport --dports 179 -j DROP" ) host2.execute( "iptables -t raw -D PREROUTING -p tcp -m multiport --dports 179 -j DROP" ) _log.debug('check connected and retry until "Established"') retry_until_success(host1.assert_is_ready, retries=30) retry_until_success(host2.assert_is_ready, retries=30) check_bird_status(host1, [("node-to-node mesh", host2.ip, "Established")]) check_bird_status(host2, [("node-to-node mesh", host1.ip, "Established")])
def test_local_deny_with_lower_forward_allow(self): """ Test local deny with lower order does not affect forward allow policy. """ self.test_empty_policy_for_forward_traffic( ) # setup a deny for all traffic # Add local&forward ingress/egress allow. self.add_ingress_policy(300, 'Allow', True) self.add_egress_policy(300, 'Allow', True) retry_until_success(self.assert_host_can_curl_local, 3) retry_until_success(self.assert_gateway_can_curl_ext, 3) retry_until_success(self.assert_host_can_curl_ext, 3) retry_until_success(self.assert_hostwl_can_access_workload, 3) retry_until_success(self.assert_workload_can_curl_ext, 3) # Add local ingress/egress deny. self.add_ingress_policy(200, 'Deny', False) self.add_egress_policy(200, 'Deny', False) retry_until_success(self.assert_host_can_not_curl_local, 3) retry_until_success(self.assert_gateway_can_not_curl_ext, 3) retry_until_success(self.assert_host_can_curl_ext, 3) retry_until_success(self.assert_hostwl_can_access_workload, 3) retry_until_success(self.assert_workload_can_curl_ext, 3)
def remove_pol_and_endpoints(self): self.delete_all("globalnetworkpolicy") self.delete_all("hostEndpoint") # Wait for felix to remove the policy and allow traffic through the gateway. retry_until_success(self.assert_host_can_curl_ext)