def setUp(self): super(_TestBGPAdvert, self).setUp() # Create bgp test namespace self.ns = "bgp-test" self.create_namespace(self.ns) self.nodes, self.ips, _ = node_info() self.external_node_ip = start_external_node_with_bgp( "kube-node-extra", bird_peer_config=self.get_bird_conf(), ) # Enable debug logging self.update_ds_env("calico-node", "kube-system", "BGP_LOGSEVERITYSCREEN", "debug") # Establish BGPPeer from cluster nodes to node-extra calicoctl("""apply -f - << EOF apiVersion: projectcalico.org/v3 kind: BGPPeer metadata: name: node-extra.peer%s EOF """ % self.get_extra_peer_spec())
def setUp(self): TestBase.setUp(self) self.ns_name = generate_unique_id(5, prefix="spoof") self.create_namespace(self.ns_name) # Create two client pods that live for the duration of the # test. We will use 'kubectl exec' to try sending/receiving # from these at particular times. # # We do it this way because it takes a # relatively long time (7 seconds?) in this test setup for # Calico routing and policy to be set up correctly for a newly # created pod. nodes, _, _ = node_info() kubectl( "run " "access " "-n %s " "--image busybox " "--overrides='{\"spec\": {\"nodeName\":\"%s\"}}' " "--command /bin/sh -- -c \"nc -l -u -p 5000 &> /root/snoop.txt\"" % (self.ns_name, nodes[1])) kubectl("run " "scapy " "-n %s " "--image calico/scapy:v2.4.0 " "--overrides='{\"spec\": {\"nodeName\":\"%s\"}}' " "--command /bin/sleep -- 3600" % (self.ns_name, nodes[2])) kubectl("wait --timeout=2m --for=condition=ready" + " pod/scapy -n %s" % self.ns_name) kubectl("wait --timeout=2m --for=condition=ready" + " pod/access -n %s" % self.ns_name)
def setUp(self): TestBase.setUp(self) # Get 2 worker node names. The first name # returned is always the master. self.nodes, self.ips, self.ip6s = node_info() self.test_node = self.nodes[1] self.test_node_ip = self.ips[1] self.test_node_ip6s = self.ip6s[1] self.status_name = "node-status-0" create_status(self.status_name, self.test_node, 10)
def _test_restart_route_churn(self, num_repeats, restart_func, expect_churn): with DiagsCollector(): # Get 2 worker node names, one to monitor routes and one # to have its calico-node restarted. The first name # returned is always the master, so skip that. nodes, ips, _ = node_info() self.assertGreater(len(nodes), 2) monitor_node = nodes[1] self.restart_node = nodes[2] self.restart_node_ip = ips[2] # Start running ip monitor on the monitor node, to monitor # IPv4 route changes. We use "fd00:10:244" to identify # and exclude IPv6 workload block routes like # fd00:10:244:0:1cc0:b1ac:ad47:e7c0/122. These definitely # _do_ flap when the host of that block restarts, but it # is not yet clear why this is; specifically it is not yet # known if it indicates anything wrong with calico/node's # GR setup. See # https://marc.info/?l=bird-users&m=158298182509702&w=2 # for the mailing list discussion so far. run("docker exec -d %s sh -c 'stdbuf -oL ip -ts monitor route | stdbuf -oL grep -v fd00:10:244 > rmon.txt'" % monitor_node) # Find the name of the calico-node pod on the restart node. self.get_restart_node_pod_name() # Restart the calico-node several times, on the other node. for i in range(num_repeats): # Restart it. _log.info("Iteration %d: restart pod %s", i, self.restart_pod_name) restart_func(self) # Kill the ip monitor process. run("docker exec %s pkill ip" % monitor_node) # Dump the monitor output. monitor_output = run("docker exec %s cat rmon.txt" % monitor_node) if expect_churn: # Assert that it is not empty. self.assertNotEqual(monitor_output, "") else: # Assert that it is empty. self.assertEqual(monitor_output, "")