def test_defaults(self): """ Test default BGP configuration commands. """ with DockerHost('host', start_calico=False, dind=False) as host: # TODO: Re-enable or remove after decsision is made on the defaults # Check default AS command #response = host.calicoctl("get BGPConfiguration -o yaml") #bgpcfg = yaml.safe_load(response) #self.assertEquals(bgpcfg['items'][0]['spec']['asNumber'], 64512) # Set the default AS number. update_bgp_config(host, asNum=12345) self.assertEquals(get_bgp_spec(host)['asNumber'], 12345) with self.assertRaises(CommandExecError): update_bgp_config(host, asNum=99999999999999999999999) with self.assertRaises(CommandExecError): update_bgp_config(host, asNum='abcde') # Check BGP mesh command if 'nodeToNodeMeshEnabled' in get_bgp_spec(host): self.assertEquals( get_bgp_spec(host)['nodeToNodeMeshEnabled'], True) update_bgp_config(host, nodeMesh=False) self.assertEquals( get_bgp_spec(host)['nodeToNodeMeshEnabled'], False) update_bgp_config(host, nodeMesh=True) self.assertEquals( get_bgp_spec(host)['nodeToNodeMeshEnabled'], True)
def test_bgp_backends(self): """ Test using different BGP backends. We run a multi-host test for this to test peering between two gobgp backends and a single BIRD backend. """ with DockerHost('host1', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host1, \ DockerHost('host2', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host2, \ DockerHost('host3', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=True) as host3: # Set the default AS number. update_bgp_config(host1, asNum=LARGE_AS_NUM) # Start host1 using the inherited AS, and host2 using a specified # AS (same as default). These hosts use the gobgp backend, whereas # host3 uses BIRD. host1.start_calico_node("--backend=gobgp") host2.start_calico_node("--backend=gobgp --as=%s" % LARGE_AS_NUM) # Create a network and a couple of workloads on each host. network1 = host1.create_network("subnet1", subnet=DEFAULT_IPV4_POOL_CIDR) workload_host1 = host1.create_workload("workload1", network=network1, ip=DEFAULT_IPV4_ADDR_1) workload_host2 = host2.create_workload("workload2", network=network1, ip=DEFAULT_IPV4_ADDR_2) workload_host3 = host3.create_workload("workload3", network=network1, ip=DEFAULT_IPV4_ADDR_3) # Allow network to converge self.assert_true( workload_host1.check_can_ping(workload_host2.ip, retries=10)) # Check connectivity in both directions self.assert_ip_connectivity( workload_list=[workload_host1, workload_host2, workload_host3], ip_pass_list=[ workload_host1.ip, workload_host2.ip, workload_host3.ip ]) # Check the BGP status on the BIRD/GoBGP host. hosts = [host1, host2, host3] for target in hosts: expected = [("node-to-node mesh", h.ip, "Established") for h in hosts if h is not target] check_bird_status(target, expected)
def _test_global_peers(self, backend='bird'): """ Test global BGP peer configuration. Test by turning off the mesh and configuring the mesh as a set of global peers. """ with DockerHost('host1', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host1, \ DockerHost('host2', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host2: # Start both hosts using specific AS numbers. host1.start_calico_node("--backend=%s --as=%s" % (backend, LARGE_AS_NUM)) host2.start_calico_node("--backend=%s --as=%s" % (backend, LARGE_AS_NUM)) # Create a network and a couple of workloads on each host. network1 = host1.create_network("subnet1") workload_host1 = host1.create_workload("workload1", network=network1, ip=DEFAULT_IPV4_ADDR_1) workload_host2 = host2.create_workload("workload2", network=network1, ip=DEFAULT_IPV4_ADDR_2) # Allow network to converge self.assert_true( workload_host1.check_can_ping(DEFAULT_IPV4_ADDR_2, retries=10)) # Turn the node-to-node mesh off and wait for connectivity to drop. update_bgp_config(host1, nodeMesh=False) self.assert_true( workload_host1.check_cant_ping(DEFAULT_IPV4_ADDR_2, retries=10)) # Configure global peers to explicitly set up a mesh. create_bgp_peer(host1, 'global', host2.ip, LARGE_AS_NUM) create_bgp_peer(host2, 'global', host1.ip, LARGE_AS_NUM) # Allow network to converge self.assert_true( workload_host1.check_can_ping(DEFAULT_IPV4_ADDR_2, retries=10)) # Check connectivity in both directions self.assert_ip_connectivity( workload_list=[workload_host1, workload_host2], ip_pass_list=[DEFAULT_IPV4_ADDR_1, DEFAULT_IPV4_ADDR_2]) # Check the BGP status on each host. Connections from a node to # itself will be idle since this is invalid BGP configuration. check_bird_status(host1, [("global", host2.ip, "Established")]) check_bird_status(host2, [("global", host1.ip, "Established")])
def _test_route_reflector_cluster(self, backend='bird'): """ Run a multi-host test using a cluster of route reflectors and node specific peerings. """ with DockerHost('host1', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host1, \ DockerHost('host2', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host2, \ DockerHost('host3', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host3, \ RouteReflectorCluster(2, 2) as rrc: # Start both hosts using specific backends. host1.start_calico_node("--backend=%s" % backend) host2.start_calico_node("--backend=%s" % backend) host3.start_calico_node("--backend=%s" % backend) # Set the default AS number - as this is used by the RR mesh, and # turn off the node-to-node mesh (do this from any host). update_bgp_config(host1, asNum=64513, nodeMesh=False) # Create a workload on each host in the same network. network1 = host1.create_network("subnet1") workload_host1 = host1.create_workload("workload1", network=network1) workload_host2 = host2.create_workload("workload2", network=network1) workload_host3 = host3.create_workload("workload3", network=network1) # Allow network to converge (which it won't) self.assert_false(workload_host1.check_can_ping(workload_host2.ip, retries=5)) self.assert_true(workload_host1.check_cant_ping(workload_host3.ip)) self.assert_true(workload_host2.check_cant_ping(workload_host3.ip)) # Set distributed peerings between the hosts, each host peering # with a different set of redundant route reflectors. for host in [host1, host2, host3]: for rr in rrc.get_redundancy_group(): create_bgp_peer(host, "node", rr.ip, 64513, metadata={'name': host.name + rr.name}) # Allow network to converge (which it now will). self.assert_true(workload_host1.check_can_ping(workload_host2.ip, retries=10)) self.assert_true(workload_host1.check_can_ping(workload_host3.ip, retries=10)) self.assert_true(workload_host2.check_can_ping(workload_host3.ip, retries=10)) # And check connectivity in both directions. self.assert_ip_connectivity(workload_list=[workload_host1, workload_host2, workload_host3], ip_pass_list=[workload_host1.ip, workload_host2.ip, workload_host3.ip])
def test_route_reflector_cluster_resilience(self): """ Runs a cluster of route reflectors, brings one node down, and ensures that traffic still flows """ with DockerHost('host1', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host1, \ DockerHost('host2', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host2, \ RouteReflectorCluster(2, 1) as rrc: # Start both hosts using specific backends. host1.start_calico_node("--backend=bird") host2.start_calico_node("--backend=bird") update_bgp_config(host1, asNum=64513, nodeMesh=False) # Create a workload on each host in the same network. network1 = host1.create_network("subnet1") workload_host1 = host1.create_workload("workload1", network=network1) workload_host2 = host2.create_workload("workload2", network=network1) # Assert no network connectivity self.assert_false( workload_host1.check_can_ping(workload_host2.ip, retries=5)) # Peer the hosts with the route reflectors for host in [host1, host2]: for rr in rrc.get_redundancy_group(): create_bgp_peer( host, "node", rr.ip, 64513, metadata={'name': host.name + rr.name.lower()}) # Assert network connectivity self.assert_true( workload_host1.check_can_ping(workload_host2.ip, retries=10)) self.assert_ip_connectivity( workload_list=[workload_host1, workload_host2], ip_pass_list=[workload_host1.ip, workload_host2.ip]) # Bring down a node rrc.redundancy_groups[0][0].cleanup() # Assert that network is still connected self.assert_true( workload_host1.check_can_ping(workload_host2.ip, retries=10)) self.assert_ip_connectivity( workload_list=[workload_host1, workload_host2], ip_pass_list=[workload_host1.ip, workload_host2.ip])
def _test_single_route_reflector(self, backend='bird', bgpconfig_as_num=64514, peer_as_num=64514): """ Run a multi-host test using a single route reflector and global peering. """ with DockerHost('host1', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host1, \ DockerHost('host2', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host2, \ RouteReflectorCluster(1, 1) as rrc: # Start both hosts using specific backends. host1.start_calico_node("--backend=%s" % backend) host2.start_calico_node("--backend=%s" % backend) # Set the default AS number - as this is used by the RR mesh, and # turn off the node-to-node mesh (do this from any host). update_bgp_config(host1, nodeMesh=False, asNum=bgpconfig_as_num) # Create a workload on each host in the same network. network1 = host1.create_network("subnet1") workload_host1 = host1.create_workload("workload1", network=network1) workload_host2 = host2.create_workload("workload2", network=network1) # Allow network to converge (which it won't) self.assert_false( workload_host1.check_can_ping(workload_host2.ip, retries=5)) # Set global config telling all calico nodes to peer with the # route reflector. This can be run from either host. rg = rrc.get_redundancy_group() assert len(rg) == 1 create_bgp_peer(host1, "global", rg[0].ip, peer_as_num) # Allow network to converge (which it now will). retry_until_success(host1.assert_is_ready, retries=30, felix=False) retry_until_success(host2.assert_is_ready, retries=30, felix=False) check_bird_status(host1, [("global", rg[0].ip, "Established")]) check_bird_status(host2, [("global", rg[0].ip, "Established")]) self.assert_true( workload_host1.check_can_ping(workload_host2.ip, retries=20)) # And check connectivity in both directions. self.assert_ip_connectivity( workload_list=[workload_host1, workload_host2], ip_pass_list=[workload_host1.ip, workload_host2.ip])
def _test_as_num(self, backend='bird'): """ Test using different AS number for the node-to-node mesh. We run a multi-host test for this as we need to set up real BGP peers. """ with DockerHost('host1', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host1, \ DockerHost('host2', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host2: # Set the default AS number. update_bgp_config(host1, asNum=LARGE_AS_NUM) # Start host1 using the inherited AS, and host2 using a specified # AS (same as default). host1.start_calico_node("--backend=%s" % backend) host2.start_calico_node("--backend=%s --as=%s" % (backend, LARGE_AS_NUM)) # Create a network and a couple of workloads on each host. network1 = host1.create_network("subnet1", subnet=DEFAULT_IPV4_POOL_CIDR) workload_host1 = host1.create_workload("workload1", network=network1, ip=DEFAULT_IPV4_ADDR_1) workload_host2 = host2.create_workload("workload2", network=network1, ip=DEFAULT_IPV4_ADDR_2) # Allow network to converge self.assert_true( workload_host1.check_can_ping(DEFAULT_IPV4_ADDR_2, retries=10)) # Check connectivity in both directions self.assert_ip_connectivity( workload_list=[workload_host1, workload_host2], ip_pass_list=[DEFAULT_IPV4_ADDR_1, DEFAULT_IPV4_ADDR_2]) # Check the BGP status on each host. check_bird_status(host1, [("node-to-node mesh", host2.ip, "Established")]) check_bird_status(host2, [("node-to-node mesh", host1.ip, "Established")])
def test_defaults(self): """ Test default BGP configuration commands. """ with DockerHost('host', start_calico=False, dind=False) as host: # As the v3 data model now stands, there is no way to query what # the default AS number is, in the absence of any resources. Also, # if you create a BGPConfiguration resource that does not specify # an AS number, and then read it back, the output does not include # the default AS number. # # So we can't test the default AS number directly with calicoctl # operations. We can of course test it indirectly: see # test_bird_single_route_reflector_default_as in # test_single_route_reflector.py. # Set the global-default AS number. update_bgp_config(host, asNum=12345) self.assertEquals(get_bgp_spec(host)['asNumber'], 12345) with self.assertRaises(CommandExecError): update_bgp_config(host, asNum=99999999999999999999999) with self.assertRaises(CommandExecError): update_bgp_config(host, asNum='abcde') # Check BGP mesh command if 'nodeToNodeMeshEnabled' in get_bgp_spec(host): self.assertEquals( get_bgp_spec(host)['nodeToNodeMeshEnabled'], True) update_bgp_config(host, nodeMesh=False) self.assertEquals( get_bgp_spec(host)['nodeToNodeMeshEnabled'], False) update_bgp_config(host, nodeMesh=True) self.assertEquals( get_bgp_spec(host)['nodeToNodeMeshEnabled'], True)
def test_node_status_resilience(self, test_host, pid_name): """ Test that newly restarted BGP backend processes consistently transition to an Established state. Test using different BGP backends. We run a multi-host test for this to test peering between two gobgp backends and a single BIRD backend. """ with DockerHost('host1', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host1, \ DockerHost('host2', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host2, \ DockerHost('host3', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=True) as host3: # Set the default AS number. update_bgp_config(host1, asNum=LARGE_AS_NUM) # Start host1 using the inherited AS, and host2 using a specified # AS (same as default). These hosts use the gobgp backend, whereas # host3 uses BIRD. host1.start_calico_node("--backend=gobgp") host2.start_calico_node("--backend=gobgp --as=%s" % LARGE_AS_NUM) # Create a network and a couple of workloads on each host. network1 = host1.create_network("subnet1") workload_host1 = host1.create_workload("workload1", network=network1) workload_host2 = host2.create_workload("workload2", network=network1) workload_host3 = host3.create_workload("workload3", network=network1) # Allow network to converge self.assert_true( workload_host1.check_can_ping(workload_host2.ip, retries=10)) # Check connectivity in both directions self.assert_ip_connectivity( workload_list=[workload_host1, workload_host2, workload_host3], ip_pass_list=[ workload_host1.ip, workload_host2.ip, workload_host3.ip ]) hosts = [host1, host2, host3] workloads = [workload_host1, workload_host2, workload_host3] _log.debug("==== docker exec -it calico-node ps -a ====") _log.debug( hosts[test_host].execute("docker exec -it calico-node ps -a")) # Check the BGP status on the BIRD/GoBGP host. def check_connected(): for target in hosts: expected = [("node-to-node mesh", h.ip, "Established") for h in hosts if h is not target] _log.debug("expected : %s", expected) check_bird_status(target, expected) def delete_workload(host, host_workload): host.calicoctl("ipam release --ip=%s" % host_workload.ip) host.execute("docker rm -f %s" % host_workload.name) host.workloads.remove(host_workload) def pid_parse(pid_str): if '\r\n' in pid_str: pid_list = pid_str.split('\r\n') return pid_list else: return [pid_str] iterations = 3 for iteration in range(1, iterations + 1): _log.debug("Iteration %s", iteration) _log.debug("Host under test: %s", hosts[test_host].name) _log.debug("Identify and pkill process: %s", pid_name) pre_pkill = hosts[test_host].execute( "docker exec -it calico-node pgrep %s" % pid_name) pre_pkill_list = pid_parse(pre_pkill) _log.debug("Pre pkill list: %s", pre_pkill_list) hosts[test_host].execute( "docker exec -it calico-node pkill %s" % pid_name) _log.debug('check connected and retry until "Established"') retry_until_success(check_connected, retries=20, ex_class=Exception) post_pkill = hosts[test_host].execute( "docker exec -it calico-node pgrep %s" % pid_name) post_pkill_list = pid_parse(post_pkill) _log.debug("Post pkill list: %s", post_pkill_list) assert pre_pkill_list != post_pkill_list, "The pids should not be the same after pkill" new_workloads = [] for workload in workloads: new_workload = "%s_%s" % (workload, iteration) new_workloads.append(new_workload) # create new workloads index = 0 for new_workload in new_workloads: new_workload = hosts[index].create_workload( new_workload, network=network1) _log.debug("host: %s and workload: %s", hosts[index].name, new_workload.name) # Wait for the workload to be networked. self.assert_true( new_workload.check_can_ping(workload_host3.ip, retries=10)) # Check connectivity in both directions self.assert_ip_connectivity(workload_list=[ workload_host1, workload_host2, workload_host3, new_workload ], ip_pass_list=[ workload_host1.ip, workload_host2.ip, workload_host3.ip, new_workload.ip ]) delete_workload(hosts[index], new_workload) index += 1
def _test_internal_route_reflector(self, backend='bird', bgpconfig_as_num=64514, peer_as_num=64514): """ Run a multi-host test using an internal route reflector. """ with DockerHost('host1', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host1, \ DockerHost('host2', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host2, \ DockerHost('host3', additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host3: # Start all hosts using specific backends. host1.start_calico_node("--backend=%s" % backend) host2.start_calico_node("--backend=%s" % backend) host3.start_calico_node("--backend=%s" % backend) # Set the default AS number - as this is used by the RR mesh, and # turn off the node-to-node mesh (do this from any host). update_bgp_config(host1, nodeMesh=False, asNum=bgpconfig_as_num) # Create a workload on each host in the same network. network1 = host1.create_network("subnet1") workload_host1 = host1.create_workload("workload1", network=network1) workload_host2 = host2.create_workload("workload2", network=network1) workload_host3 = host3.create_workload("workload3", network=network1) # Allow network to converge (which it won't) self.assert_false( workload_host1.check_can_ping(workload_host2.ip, retries=5)) # Make host2 act as a route reflector. node2 = host2.calicoctl("get Node %s -o yaml" % host2.get_hostname()) node2cfg = yaml.safe_load(node2) logger.info("host2 Node: %s", node2cfg) node2cfg['spec']['bgp']['routeReflectorClusterID'] = '224.0.0.2' node2cfg['metadata']['labels'] = { 'routeReflectorClusterID': node2cfg['spec']['bgp']['routeReflectorClusterID'], } host2.add_resource(node2cfg) # Configure peerings - note, NOT a full mesh - from the # other nodes to the route reflector. host2.add_resource({ 'apiVersion': 'projectcalico.org/v3', 'kind': 'BGPPeer', 'metadata': { 'name': 'rr-peerings', }, 'spec': { 'nodeSelector': '!has(routeReflectorClusterID)', 'peerSelector': 'has(routeReflectorClusterID)', }, }) # Allow network to converge (which it now will). self.assert_true( workload_host1.check_can_ping(workload_host2.ip, retries=10)) # And check connectivity in both directions. self.assert_ip_connectivity( workload_list=[workload_host1, workload_host2, workload_host3], ip_pass_list=[ workload_host1.ip, workload_host2.ip, workload_host3.ip ], retries=5)
def _test_gce_int(self, with_ipip, backend, host1, host2, rrc): host1.start_calico_node("--backend={0}".format(backend)) host2.start_calico_node("--backend={0}".format(backend)) # Before creating any workloads, set the initial IP-in-IP state. host1.set_ipip_enabled(with_ipip) if rrc: # Set the default AS number - as this is used by the RR mesh, # and turn off the node-to-node mesh (do this from any host). update_bgp_config(host1, asNum=64513, nodeMesh=False) # Peer from each host to the route reflector. for host in [host1, host2]: for rr in rrc.get_redundancy_group(): create_bgp_peer(host, "node", rr.ip, 64513, metadata={'name': host.name}) # Create a network and a workload on each host. network1 = host1.create_network("subnet1") workload_host1 = host1.create_workload("workload1", network=network1) workload_host2 = host2.create_workload("workload2", network=network1) for _ in [1, 2]: # Check we do or don't have connectivity between the workloads, # according to the IP-in-IP setting. if with_ipip: # Allow network to converge. self.assert_true( workload_host1.check_can_ping(workload_host2.ip, retries=10)) # Check connectivity in both directions self.assert_ip_connectivity( workload_list=[workload_host1, workload_host2], ip_pass_list=[workload_host1.ip, workload_host2.ip]) for host in [host1, host2]: # Check that we are using IP-in-IP for some routes. assert "tunl0" in host.execute("ip r") # Get current links and addresses, as a baseline for the # following changes test. host.execute("ip l") host.execute("ip a") # Check that routes are not flapping, on either host, by # running 'ip monitor' for 10s and checking that it does # not indicate any changes other than those associated with # an IP address being added to the tunl0 device. ip_changes = host.execute( "timeout -t 10 ip -t monitor 2>&1 || true") lines = ip_changes.split("\n") assert len(lines) >= 1, "No output from ip monitor" expected_lines_following = 0 for line in lines: if expected_lines_following > 0: expected_lines_following -= 1 elif "Terminated" in line: # "Terminated" pass elif "Timestamp" in line: # e.g. "Timestamp: Wed Nov 1 18:02:38 2017 689895 usec" pass elif ": tunl0" in line: # e.g. "2: tunl0 inet 192.168.128.1/32 scope global tunl0" # " valid_lft forever preferred_lft forever" # Indicates IP address being added to the tunl0 device. expected_lines_following = 1 elif line.startswith("local") and "dev tunl0" in line: # e.g. "local 192.168.128.1 dev tunl0 table local ..." # Local routing table entry associated with tunl0 # device IP address. pass elif "172.17.0.1 dev eth0 lladdr" in line: # Ex: "172.17.0.1 dev eth0 lladdr 02:03:04:05:06:07 REACHABLE" # Indicates that the host just learned the MAC # address of its default gateway. pass else: assert False, "Unexpected ip monitor line: %r" % line else: # Expect non-connectivity between workloads on different hosts. self.assert_false( workload_host1.check_can_ping(workload_host2.ip, retries=10)) if not rrc: # Check the BGP status on each host. check_bird_status( host1, [("node-to-node mesh", host2.ip, "Established")]) check_bird_status( host2, [("node-to-node mesh", host1.ip, "Established")]) # Flip the IP-in-IP state for the next iteration. with_ipip = not with_ipip host1.set_ipip_enabled(with_ipip)
def _test_gce_int(self, with_ipip, backend, host1, host2, rrc): host1.start_calico_node("--backend={0}".format(backend)) host2.start_calico_node("--backend={0}".format(backend)) # Before creating any workloads, set the initial IP-in-IP state. host1.set_ipip_enabled(with_ipip) if rrc: # Set the default AS number - as this is used by the RR mesh, # and turn off the node-to-node mesh (do this from any host). update_bgp_config(host1, asNum=64513, nodeMesh=False) # Peer from each host to the route reflector. for host in [host1, host2]: for rr in rrc.get_redundancy_group(): create_bgp_peer(host, "node", rr.ip, 64513, metadata={'name':host.name}) # Create a network and a workload on each host. network1 = host1.create_network("subnet1") workload_host1 = host1.create_workload("workload1", network=network1) workload_host2 = host2.create_workload("workload2", network=network1) for _ in [1, 2]: # Check we do or don't have connectivity between the workloads, # according to the IP-in-IP setting. if with_ipip: # Allow network to converge. self.assert_true( workload_host1.check_can_ping(workload_host2.ip, retries=10)) # Check connectivity in both directions self.assert_ip_connectivity(workload_list=[workload_host1, workload_host2], ip_pass_list=[workload_host1.ip, workload_host2.ip]) # Check that we are using IP-in-IP for some routes. assert "tunl0" in host1.execute("ip r") assert "tunl0" in host2.execute("ip r") # Check that routes are not flapping: the following shell # script checks that there is no output for 10s from 'ip # monitor', on either host. The "-le 1" is to allow for # something (either 'timeout' or 'ip monitor', not sure) saying # 'Terminated' when the 10s are up. (Note that all commands # here are Busybox variants; I tried 'grep -v' to eliminate the # Terminated line, but for some reason it didn't work.) for host in [host1, host2]: host.execute("changes=`timeout -t 10 ip -t monitor 2>&1`; " + "echo \"$changes\"; " + "test `echo \"$changes\" | wc -l` -le 1") else: # Expect non-connectivity between workloads on different hosts. self.assert_false( workload_host1.check_can_ping(workload_host2.ip, retries=10)) if not rrc: # Check the BGP status on each host. check_bird_status(host1, [("node-to-node mesh", host2.ip, "Established")]) check_bird_status(host2, [("node-to-node mesh", host1.ip, "Established")]) # Flip the IP-in-IP state for the next iteration. with_ipip = not with_ipip host1.set_ipip_enabled(with_ipip)