예제 #1
0
    def setUp(self):
        TestBase.setUp(self)
        self.create_namespace("policy-demo")
        self.deploy("nginx:1.7.9", "nginx", "policy-demo", 80)

        # Create two client pods that live for the duration of the
        # test.  We will use 'kubectl exec' to try wgets from these at
        # particular times.
        #
        # We do it this way - instead of one-shot pods that are
        # created, try wget, and then exit - because it takes a
        # relatively long time (7 seconds?) in this test setup for
        # Calico routing and policy to be set up correctly for a newly
        # created pod.  In particular it's possible that connection
        # from a just-created pod will fail because that pod's IP has
        # not yet propagated to the IP set for the ingress policy on
        # the server pod - which can confuse test code that is
        # expecting connection failure for some other reason.
        kubectl("run --generator=run-pod/v1 access -n policy-demo" +
                " --image busybox --command /bin/sleep -- 3600")
        kubectl("run --generator=run-pod/v1 no-access -n policy-demo" +
                " --image busybox --command /bin/sleep -- 3600")
        kubectl("wait --timeout=2m --for=condition=available" +
                " deployment/nginx -n policy-demo")
        kubectl("wait --timeout=2m --for=condition=ready" +
                " pod/access -n policy-demo")
        kubectl("wait --timeout=2m --for=condition=ready" +
                " pod/no-access -n policy-demo")
예제 #2
0
 def send_and_check_vxlan_spoof():
     self.send_spoofed_vxlan_packet(self.ns_name, "scapy",
                                    "10.192.0.3", remote_pod_ip,
                                    "vxlan-spoofed")
     kubectl(
         "exec -t -n %s access grep -- vxlan-spoofed /root/snoop.txt"
         % self.ns_name)
 def send_and_check_ipip_spoof():
     self.send_spoofed_ipip_packet(self.ns_name, "scapy",
                                   "10.192.0.3", remote_pod_ip,
                                   "ipip-spoofed")
     kubectl(
         "exec -t -n %s access -- grep ipip-spoofed /root/snoop.txt"
         % self.ns_name)
예제 #4
0
 def check_connected(name):
     try:
         kubectl("exec " + name + " -n policy-demo" +
                 " -- /bin/wget -O /dev/null -q --timeout=1 nginx")
     except subprocess.CalledProcessError:
         _log.exception("Failed to wget from nginx service")
         return False
     _log.debug("Contacted service")
     return True
예제 #5
0
 def clear_conntrack():
     node_dict = json.loads(kubectl("get po "
                                    "-n kube-system "
                                    "-l k8s-app=calico-node "
                                    "-o json"))
     # Flush conntrack in every calico-node pod
     for entry in node_dict["items"]:
         node = entry["metadata"]["name"]
         kubectl("exec -n kube-system %s conntrack -- -F" % node)
예제 #6
0
 def tearDown(self):
     # Delete deployment
     self.delete_and_confirm(self.ns_name, "ns")
     # Change pool to use IPIP
     default_pool = json.loads(calicoctl("get ippool default-ipv4-ippool -o json"))
     default_pool["spec"]["vxlanMode"] = "Never"
     default_pool["spec"]["ipipMode"] = "Always"
     calicoctl_apply_dict(default_pool)
     # restart calico-nodes
     kubectl("delete po -n kube-system -l k8s-app=calico-node")
     kubectl("wait --timeout=2m --for=condition=ready" +
             " pods -l k8s-app=calico-node -n kube-system")
예제 #7
0
 def send_packet(ns_name, name, remote_pod_ip, message):
     try:
         kubectl("exec " + name + " -ti -n %s -- "
                 "scapy << EOF\n"
                 "send("
                 "IP(dst='%s')/"
                 "UDP(dport=5000, sport=5000)/"
                 "Raw(load='%s'))\n" % (ns_name, remote_pod_ip, message))
     except subprocess.CalledProcessError:
         _log.exception("Failed to send from scapy")
         return False
     _log.debug("scapy sent direct packet")
     return True
예제 #8
0
    def setUp(self):
        TestBase.setUp(self)
        self.ns_name = generate_unique_id(5, prefix="spoof")
        self.create_namespace(self.ns_name)
        # Create two client pods that live for the duration of the
        # test.  We will use 'kubectl exec' to try sending/receiving
        # from these at particular times.
        #
        # We do it this way because it takes a
        # relatively long time (7 seconds?) in this test setup for
        # Calico routing and policy to be set up correctly for a newly
        # created pod.
        kubectl(
            "run --generator=run-pod/v1 "
            "access "
            "-n %s "
            "--image busybox "
            "--overrides='{\"spec\": {\"nodeName\":\"kube-node-1\"}}' "
            "--command /bin/sh -- -c \"nc -l -u -p 5000 &> /root/snoop.txt\"" %
            self.ns_name)
        kubectl("run --generator=run-pod/v1 "
                "scapy "
                "-n %s "
                "--image ehlers/scapy "
                "--overrides='{\"spec\": {\"nodeName\":\"kube-node-2\"}}' "
                "--command /bin/sleep -- 3600" % self.ns_name)

        kubectl("wait --timeout=2m --for=condition=ready" +
                " pod/scapy -n %s" % self.ns_name)
        kubectl("wait --timeout=2m --for=condition=ready" +
                " pod/access -n %s" % self.ns_name)
예제 #9
0
    def test_ipip_spoof(self):
        with DiagsCollector():
            # Change pool to use IPIP if necessary
            default_pool = json.loads(
                calicoctl("get ippool default-ipv4-ippool -o json"))
            if default_pool["spec"]["vxlanMode"] != "Never" or default_pool[
                    "spec"]["ipipMode"] != "Always":
                default_pool["spec"]["vxlanMode"] = "Never"
                default_pool["spec"]["ipipMode"] = "Always"
                calicoctl_apply_dict(default_pool)
                # restart calico-nodes
                kubectl("delete po -n kube-system -l k8s-app=calico-node")
                kubectl("wait --timeout=2m --for=condition=ready" +
                        " pods -l k8s-app=calico-node -n kube-system")

            # get busybox pod IP
            remote_pod_ip = retry_until_success(
                self.get_pod_ip, function_args=["access", self.ns_name])
            print(remote_pod_ip)

            # clear conntrack table on all hosts
            self.clear_conntrack()
            # test connectivity works pod-pod
            retry_until_success(self.send_and_check,
                                function_args=["ipip-normal", remote_pod_ip])

            # clear conntrack table on all hosts
            self.clear_conntrack()

            def send_and_check_ipip_spoof():
                self.send_spoofed_ipip_packet(self.ns_name, "scapy",
                                              "10.192.0.3", remote_pod_ip,
                                              "ipip-spoofed")
                kubectl(
                    "exec -t -n %s access grep -- ipip-spoofed /root/snoop.txt"
                    % self.ns_name)

            def assert_cannot_spoof_ipip():
                failed = True
                try:
                    send_and_check_ipip_spoof()
                except subprocess.CalledProcessError:
                    failed = False
                if failed:
                    print("ERROR - succeeded in sending spoofed IPIP packet")
                    raise ConnectionError

            # test connectivity does NOT work when spoofing
            retry_until_success(assert_cannot_spoof_ipip)
def create_status(name, node, interval):
    kubectl("""apply -f - <<EOF
apiVersion: projectcalico.org/v3
kind: CalicoNodeStatus
metadata:
    name: %s
spec:
    node: %s
    classes:
    - Agent
    - BGP
    - Routes
    updatePeriodSeconds: %d
EOF
""" % (name, node, interval))
예제 #11
0
 def get_svc_host_ipv6(self, svc, ns):
     ipv4 = kubectl("get po -l app=%s -n %s -o json | jq -r .items[0].status.hostIP" %
                    (svc, ns)).strip()
     for i in range(len(self.ipv4s)):
         if ipv4 == self.ipv4s[i]:
             return self.ipv6s[i]
     assert False
예제 #12
0
 def send_spoofed_vxlan_packet(ns_name, name, remote_node_ip, remote_pod_ip, message):
     try:
         kubectl("exec " + name + " -ti -n %s -- "
                                  "scapy3 << EOF\n"
                                  "send("
                                  "IP(dst='%s')/"
                                  "UDP(dport=4789)/"
                                  "VXLAN(vni=4096)/"
                                  "Ether()/"
                                  "IP(dst='%s')/"
                                  "UDP(dport=5000, sport=5000)/"
                                  "Raw(load='%s'))\n" % (ns_name, remote_node_ip, remote_pod_ip, message))
     except subprocess.CalledProcessError:
         _log.exception("Failed to send spoofed VXLAN packet from scapy")
         return False
     _log.debug("scapy sent spoofed VXLAN packet")
     return True
예제 #13
0
 def get_svc_loadbalancer_ip(self, svc, ns):
     for i in range(10):
         lb_ip = kubectl(
             "get svc %s -n %s -o json | jq -r .status.loadBalancer.ingress[0].ip"
             % (svc, ns)).strip()
         if lb_ip != "null":
             return lb_ip
         time.sleep(1)
     raise Exception("No LoadBalancer IP found for service: %s/%s" %
                     (ns, svc))
예제 #14
0
    def setUp(self):
        super(_TestBGPAdvert, self).setUp()

        # Create bgp test namespace
        self.ns = "bgp-test"
        self.create_namespace(self.ns)

        self.nodes, self.ips, _ = node_info()
        self.external_node_ip = start_external_node_with_bgp(
            "kube-node-extra",
            bird_peer_config=self.get_bird_conf(),
        )

        # Enable debug logging
        self.update_ds_env("calico-node", "kube-system",
                           {"BGP_LOGSEVERITYSCREEN": "debug"})

        # Establish BGPPeer from cluster nodes to node-extra
        calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPPeer
metadata:
  name: node-extra.peer%s
EOF
""" % self.get_extra_peer_spec())

        kubectl("""apply -f - <<EOF
apiVersion: v1
kind: Secret
metadata:
  name: bgp-secrets
  namespace: kube-system
type: Opaque
stringData:
  rr-password: very-secret
EOF
""")
예제 #15
0
    def test_cluster_ip_advertisement(self):
        """
        Runs the tests for service cluster IP advertisement
        - Create both a Local and a Cluster type NodePort service with a single replica.
          - assert only local and cluster CIDR routes are advertised.
          - assert /32 routes are used, source IP is preserved.
        - Scale the Local NP service so it is running on multiple nodes, assert ECMP routing, source IP is preserved.
        - Delete both services, assert only cluster CIDR route is advertised.
        """
        with DiagsCollector():

            calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
  name: default
spec:
  serviceClusterIPs:
  - cidr: 10.96.0.0/12
EOF
""")

            # Assert that a route to the service IP range is present.
            retry_until_success(
                lambda: self.assertIn("10.96.0.0/12", self.get_routes()))

            # Create both a Local and a Cluster type NodePort service with a single replica.
            local_svc = "nginx-local"
            cluster_svc = "nginx-cluster"
            self.deploy("nginx:1.7.9", local_svc, self.ns, 80)
            self.deploy("nginx:1.7.9",
                        cluster_svc,
                        self.ns,
                        80,
                        traffic_policy="Cluster")
            self.wait_until_exists(local_svc, "svc", self.ns)
            self.wait_until_exists(cluster_svc, "svc", self.ns)

            # Get clusterIPs.
            local_svc_ip = self.get_svc_cluster_ip(local_svc, self.ns)
            cluster_svc_ip = self.get_svc_cluster_ip(cluster_svc, self.ns)

            # Wait for the deployments to roll out.
            self.wait_for_deployment(local_svc, self.ns)
            self.wait_for_deployment(cluster_svc, self.ns)

            # Assert that both nginx service can be curled from the external node.
            retry_until_success(curl, function_args=[local_svc_ip])
            retry_until_success(curl, function_args=[cluster_svc_ip])

            # Assert that local clusterIP is an advertised route and cluster clusterIP is not.
            retry_until_success(
                lambda: self.assertIn(local_svc_ip, self.get_routes()))
            retry_until_success(
                lambda: self.assertNotIn(cluster_svc_ip, self.get_routes()))

            # Create a network policy that only accepts traffic from the external node.
            kubectl("""apply -f - << EOF
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: allow-tcp-80-ex
  namespace: bgp-test
spec:
  podSelector: {}
  policyTypes:
  - Ingress
  ingress:
  - from:
    - ipBlock: { cidr: %s/32 }
    ports:
    - protocol: TCP
      port: 80
EOF
""" % self.external_node_ip)

            # Connectivity to nginx-local should always succeed.
            for i in range(attempts):
                retry_until_success(curl, function_args=[local_svc_ip])

            # Connectivity to nginx-cluster will rarely succeed because it is load-balanced across all nodes.
            # When the traffic hits a node that doesn't host one of the service's pod, it will be re-routed
            #  to another node and SNAT will cause the policy to drop the traffic.
            # Try to curl 10 times.
            try:
                for i in range(attempts):
                    curl(cluster_svc_ip)
                self.fail(
                    "external node should not be able to consistently access the cluster svc"
                )
            except subprocess.CalledProcessError:
                pass

            # Scale the local_svc to 4 replicas
            self.scale_deployment(local_svc, self.ns, 4)
            self.wait_for_deployment(local_svc, self.ns)
            self.assert_ecmp_routes(local_svc_ip,
                                    [self.ips[1], self.ips[2], self.ips[3]])
            for i in range(attempts):
                retry_until_success(curl, function_args=[local_svc_ip])

            # Delete both services.
            self.delete_and_confirm(local_svc, "svc", self.ns)
            self.delete_and_confirm(cluster_svc, "svc", self.ns)

            # Assert that clusterIP is no longer an advertised route.
            retry_until_success(
                lambda: self.assertNotIn(local_svc_ip, self.get_routes()))
예제 #16
0
 def add_svc_external_ips(self, svc, ns, ips):
     ipsStr = ','.join('"{0}"'.format(ip) for ip in ips)
     patchStr = "{\"spec\": {\"externalIPs\": [%s]}}" % (ipsStr)
     return kubectl("patch svc %s -n %s --patch '%s'" %
                    (svc, ns, patchStr)).strip()
예제 #17
0
 def get_svc_host_ip(self, svc, ns):
     return kubectl(
         "get po -l app=%s -n %s -o json | jq -r .items[0].status.hostIP" %
         (svc, ns)).strip()
예제 #18
0
 def get_svc_cluster_ip(self, svc, ns):
     return kubectl("get svc %s -n %s -o json | jq -r .spec.clusterIP" %
                    (svc, ns)).strip()
예제 #19
0
 def tearDown(self):
     # Delete deployment
     kubectl("delete --grace-period 0 pod access -n policy-demo")
     kubectl("delete --grace-period 0 pod no-access -n policy-demo")
     self.delete_and_confirm("policy-demo", "ns")
예제 #20
0
    def test_rr(self):
        self.tearDown()
        self.setUpRR()

        # Create ExternalTrafficPolicy Local service with one endpoint on node-1
        kubectl("""apply -f - << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-rr
  namespace: bgp-test
  labels:
    app: nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx
      run: nginx-rr
  template:
    metadata:
      labels:
        app: nginx
        run: nginx-rr
    spec:
      containers:
      - name: nginx-rr
        image: nginx:1.7.9
        ports:
        - containerPort: 80
      nodeSelector:
        beta.kubernetes.io/os: linux
        kubernetes.io/hostname: kube-node-1
---
apiVersion: v1
kind: Service
metadata:
  name: nginx-rr
  namespace: bgp-test
  labels:
    app: nginx
    run: nginx-rr
spec:
  ports:
  - port: 80
    targetPort: 80
  selector:
    app: nginx
    run: nginx-rr
  type: NodePort
  externalTrafficPolicy: Local
EOF
""")

        calicoctl("get nodes -o yaml")
        calicoctl("get bgppeers -o yaml")
        calicoctl("get bgpconfigs -o yaml")

        # Update the node-2 to behave as a route-reflector
        json_str = calicoctl("get node kube-node-2 -o json")
        node_dict = json.loads(json_str)
        node_dict['metadata']['labels']['i-am-a-route-reflector'] = 'true'
        node_dict['spec']['bgp']['routeReflectorClusterID'] = '224.0.0.1'
        calicoctl("""apply -f - << EOF
%s
EOF
""" % json.dumps(node_dict))

        # Disable node-to-node mesh and configure bgp peering
        # between node-1 and RR and also between external node and RR
        calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata: {name: default}
spec:
  nodeToNodeMeshEnabled: false
  asNumber: 64512
EOF
""")
        calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPPeer
metadata: {name: kube-node-1}
spec:
  node: kube-node-1
  peerIP: 10.192.0.4
  asNumber: 64512
EOF
""")
        svc_json = kubectl("get svc nginx-rr -n bgp-test -o json")
        svc_dict = json.loads(svc_json)
        svcRoute = svc_dict['spec']['clusterIP']
        retry_until_success(lambda: self.assertIn(svcRoute, self.get_routes()))
def delete_status(name):
    kubectl("delete caliconodestatuses.crd.projectcalico.org %s" % name)
예제 #22
0
    def test_cluster_ip_advertisement(self):
        """
        Runs the tests for service cluster IPv6 advertisement
        - Create both a Local and a Cluster type NodePort service with a single replica.
          - assert only local and cluster CIDR routes are advertised.
          - assert /128 routes are used, source IP is preserved.
        - Scale the Local NP service so it is running on multiple nodes, assert ECMP routing, source IP is preserved.
        - Delete both services, assert only cluster CIDR route is advertised.
        """
        with DiagsCollector():

            calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
  name: default
spec:
  serviceClusterIPs:
  - cidr: fd00:10:96::/112
EOF
""")

            # Assert that a route to the service IP range is present.
            retry_until_success(lambda: self.assertIn("fd00:10:96::/112", self.get_routes()))

            # Create both a Local and a Cluster type NodePort service with a single replica.
            local_svc = "nginx-local"
            cluster_svc = "nginx-cluster"
            self.deploy("gcr.io/kubernetes-e2e-test-images/test-webserver:1.0", local_svc, self.ns, 80, ipv6=True)
            self.deploy("gcr.io/kubernetes-e2e-test-images/test-webserver:1.0", cluster_svc, self.ns, 80, traffic_policy="Cluster", ipv6=True)
            self.wait_until_exists(local_svc, "svc", self.ns)
            self.wait_until_exists(cluster_svc, "svc", self.ns)

            # Get clusterIPs.
            local_svc_ip = self.get_svc_cluster_ip(local_svc, self.ns)
            cluster_svc_ip = self.get_svc_cluster_ip(cluster_svc, self.ns)

            # Wait for the deployments to roll out.
            self.wait_for_deployment(local_svc, self.ns)
            self.wait_for_deployment(cluster_svc, self.ns)

            # Assert that both nginx service can be curled from the external node.
            retry_until_success(curl, function_args=[local_svc_ip])
            retry_until_success(curl, function_args=[cluster_svc_ip])

            # Assert that local clusterIP is an advertised route and cluster clusterIP is not.
            retry_until_success(lambda: self.assertIn(local_svc_ip, self.get_routes()))
            retry_until_success(lambda: self.assertNotIn(cluster_svc_ip, self.get_routes()))

            # Create a network policy that only accepts traffic from the external node.
            kubectl("""apply -f - << EOF
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: allow-tcp-80-ex
  namespace: bgp-test
spec:
  podSelector: {}
  policyTypes:
  - Ingress
  ingress:
  - from:
    - ipBlock: { cidr: %s/128 }
    ports:
    - protocol: TCP
      port: 80
EOF
""" % self.external_node_ip)

            # Connectivity to nginx-local should always succeed.
            for i in range(attempts):
              retry_until_success(curl, function_args=[local_svc_ip])

            # NOTE: Unlike in the IPv6 case (in test_bgp_advert.py) we cannot successfully test that
            # connectivity to nginx-cluster is load-balanced across all nodes (and hence, with the
            # above policy in place, will sometimes fail and sometimes succeed), because our current
            # observation is that Linux's IPv6 ECMP route choice does _not_ depend on source port,
            # even though it is documented as such when fib_multipath_hash_policy == 1.

            # Scale the local_svc to 4 replicas
            self.scale_deployment(local_svc, self.ns, 4)
            self.wait_for_deployment(local_svc, self.ns)
            self.assert_ecmp_routes(local_svc_ip, [self.ipv6s[1], self.ipv6s[2], self.ipv6s[3]])
            for i in range(attempts):
              retry_until_success(curl, function_args=[local_svc_ip])

            # Delete both services.
            self.delete_and_confirm(local_svc, "svc", self.ns)
            self.delete_and_confirm(cluster_svc, "svc", self.ns)

            # Assert that clusterIP is no longer an advertised route.
            retry_until_success(lambda: self.assertNotIn(local_svc_ip, self.get_routes()))
예제 #23
0
    def test_external_ip_advertisement(self):
        """
        Runs the tests for service external IP advertisement
        """
        with DiagsCollector():

            # Whitelist two IP ranges for the external IPs we'll test with
            calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
  name: default
spec:
  serviceExternalIPs:
  - cidr: 175.200.0.0/16
  - cidr: 200.255.0.0/24
EOF
""")

            # Create both a Local and a Cluster type NodePort service with a single replica.
            local_svc = "nginx-local"
            cluster_svc = "nginx-cluster"
            self.deploy("nginx:1.7.9", local_svc, self.ns, 80)
            self.deploy("nginx:1.7.9",
                        cluster_svc,
                        self.ns,
                        80,
                        traffic_policy="Cluster")
            self.wait_until_exists(local_svc, "svc", self.ns)
            self.wait_until_exists(cluster_svc, "svc", self.ns)

            # Get clusterIPs.
            local_svc_ip = self.get_svc_cluster_ip(local_svc, self.ns)
            cluster_svc_ip = self.get_svc_cluster_ip(cluster_svc, self.ns)

            # Wait for the deployments to roll out.
            self.wait_for_deployment(local_svc, self.ns)
            self.wait_for_deployment(cluster_svc, self.ns)

            # Assert that clusterIPs are not advertised.
            retry_until_success(
                lambda: self.assertNotIn(local_svc_ip, self.get_routes()))
            retry_until_success(
                lambda: self.assertNotIn(cluster_svc_ip, self.get_routes()))

            # Create a network policy that only accepts traffic from the external node.
            kubectl("""apply -f - << EOF
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: allow-tcp-80-ex
  namespace: bgp-test
spec:
  podSelector: {}
  policyTypes:
  - Ingress
  ingress:
  - from:
    - ipBlock: { cidr: %s/32 }
    ports:
    - protocol: TCP
      port: 80
EOF
""" % self.external_node_ip)

            # Get host IPs for the nginx pods.
            local_svc_host_ip = self.get_svc_host_ip(local_svc, self.ns)
            cluster_svc_host_ip = self.get_svc_host_ip(cluster_svc, self.ns)

            # Select an IP from each external IP CIDR.
            local_svc_external_ip = "175.200.1.1"
            cluster_svc_external_ip = "200.255.255.1"

            # Add external IPs to the two services.
            self.add_svc_external_ips(local_svc, self.ns,
                                      [local_svc_external_ip])
            self.add_svc_external_ips(cluster_svc, self.ns,
                                      [cluster_svc_external_ip])

            # Verify that external IPs for local service is advertised but not the cluster service.
            local_svc_externalips_route = "%s via %s" % (local_svc_external_ip,
                                                         local_svc_host_ip)
            cluster_svc_externalips_route = "%s via %s" % (
                cluster_svc_external_ip, cluster_svc_host_ip)
            retry_until_success(lambda: self.assertIn(
                local_svc_externalips_route, self.get_routes()))
            retry_until_success(lambda: self.assertNotIn(
                cluster_svc_externalips_route, self.get_routes()))

            # Scale the local_svc to 4 replicas.
            self.scale_deployment(local_svc, self.ns, 4)
            self.wait_for_deployment(local_svc, self.ns)

            # Verify that we have ECMP routes for the external IP of the local service.
            retry_until_success(lambda: self.assert_ecmp_routes(
                local_svc_external_ip, [self.ips[1], self.ips[2], self.ips[3]])
                                )

            # Delete both services, assert only cluster CIDR route is advertised.
            self.delete_and_confirm(local_svc, "svc", self.ns)
            self.delete_and_confirm(cluster_svc, "svc", self.ns)

            # Assert that external IP is no longer an advertised route.
            retry_until_success(lambda: self.assertNotIn(
                local_svc_externalips_route, self.get_routes()))
예제 #24
0
 def send_and_check(self, payload, remote_pod_ip):
     self.send_packet(self.ns_name, "scapy", remote_pod_ip, payload)
     kubectl("exec -t -n %s access grep -- %s /root/snoop.txt" %
             (self.ns_name, payload))
def read_status(name):
    status_json = kubectl("get caliconodestatus %s -o json" % name)
    status_dict = json.loads(status_json)
    return status_dict['status']
예제 #26
0
 def get_pod_ip(podname, ns_name):
     pod = json.loads(
         kubectl("get po -n %s %s -o json" % (ns_name, podname)))
     return pod["status"]["podIP"]
예제 #27
0
    def test_rr(self):
        # Create ExternalTrafficPolicy Local service with one endpoint on node-1
        kubectl("""apply -f - << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-rr
  namespace: bgp-test
  labels:
    app: nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx
      run: nginx-rr
  template:
    metadata:
      labels:
        app: nginx
        run: nginx-rr
    spec:
      containers:
      - name: nginx-rr
        image: nginx:1.7.9
        ports:
        - containerPort: 80
      nodeSelector:
        beta.kubernetes.io/os: linux
        kubernetes.io/hostname: %s
---
apiVersion: v1
kind: Service
metadata:
  name: nginx-rr
  namespace: bgp-test
  labels:
    app: nginx
    run: nginx-rr
spec:
  externalIPs:
  - 175.200.1.1
  ports:
  - port: 80
    targetPort: 80
  selector:
    app: nginx
    run: nginx-rr
  type: NodePort
  externalTrafficPolicy: Local
EOF
""" % self.nodes[1])

        calicoctl("get nodes -o yaml")
        calicoctl("get bgppeers -o yaml")
        calicoctl("get bgpconfigs -o yaml")

        # Update the node-2 to behave as a route-reflector
        json_str = calicoctl("get node %s -o json" % self.nodes[2])
        node_dict = json.loads(json_str)
        node_dict['metadata']['labels']['i-am-a-route-reflector'] = 'true'
        node_dict['spec']['bgp']['routeReflectorClusterID'] = '224.0.0.1'
        calicoctl("""apply -f - << EOF
%s
EOF
""" % json.dumps(node_dict))

        # Disable node-to-node mesh, add cluster and external IP CIDRs to
        # advertise, and configure BGP peering between the cluster nodes and the
        # RR.  (The BGP peering from the external node to the RR is included in
        # bird_conf_rr above.)
        calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
  name: default
spec:
  nodeToNodeMeshEnabled: false
  asNumber: 64512
  serviceClusterIPs:
  - cidr: 10.96.0.0/12
  serviceExternalIPs:
  - cidr: 175.200.0.0/16
EOF
""")

        calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPPeer
metadata: {name: peer-with-rr}
spec:
  peerIP: %s
  asNumber: 64512
EOF
""" % self.ips[2])
        svc_json = kubectl("get svc nginx-rr -n bgp-test -o json")
        svc_dict = json.loads(svc_json)
        cluster_ip = svc_dict['spec']['clusterIP']
        external_ip = svc_dict['spec']['externalIPs'][0]
        retry_until_success(
            lambda: self.assertIn(cluster_ip, self.get_routes()))
        retry_until_success(
            lambda: self.assertIn(external_ip, self.get_routes()))
예제 #28
0
    def test_node_exclusion(self):
        """
        Tests the node exclusion label.
        - Create services, assert advertised from all nodes.
        - Label one node so that it is excluded, assert that routes are withdrawn from that node.
        - Delete / recreate service, assert it is still advertised from the correct nodes.
        - Remove the exclusion label, assert that the node re-advertises the svc.
        """
        with DiagsCollector():

            calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
  name: default
spec:
  serviceClusterIPs:
  - cidr: 10.96.0.0/12
  serviceExternalIPs:
  - cidr: 175.200.0.0/16
EOF
""")

            # Assert that a route to the service IP range is present.
            cluster_cidr = "10.96.0.0/12"
            retry_until_success(
                lambda: self.assertIn(cluster_cidr, self.get_routes()))

            # Create both a Local and a Cluster type NodePort service with a single replica.
            local_svc = "nginx-local"
            cluster_svc = "nginx-cluster"
            self.deploy("nginx:1.7.9", local_svc, self.ns, 80)
            self.deploy("nginx:1.7.9",
                        cluster_svc,
                        self.ns,
                        80,
                        traffic_policy="Cluster")
            self.wait_until_exists(local_svc, "svc", self.ns)
            self.wait_until_exists(cluster_svc, "svc", self.ns)

            # Get clusterIPs.
            local_svc_ip = self.get_svc_cluster_ip(local_svc, self.ns)
            cluster_svc_ip = self.get_svc_cluster_ip(cluster_svc, self.ns)

            # Wait for the deployments to roll out.
            self.wait_for_deployment(local_svc, self.ns)
            self.wait_for_deployment(cluster_svc, self.ns)

            # Assert that both nginx service can be curled from the external node.
            retry_until_success(curl, function_args=[local_svc_ip])
            retry_until_success(curl, function_args=[cluster_svc_ip])

            # Assert that local clusterIP is an advertised route and cluster clusterIP is not.
            retry_until_success(
                lambda: self.assertIn(local_svc_ip, self.get_routes()))
            retry_until_success(
                lambda: self.assertNotIn(cluster_svc_ip, self.get_routes()))

            # Connectivity should always succeed.
            for i in range(attempts):
                retry_until_success(curl, function_args=[local_svc_ip])
                retry_until_success(curl, function_args=[cluster_svc_ip])

            # Scale local service to 4 replicas
            self.scale_deployment(local_svc, self.ns, 4)
            self.wait_for_deployment(local_svc, self.ns)
            self.wait_for_deployment(cluster_svc, self.ns)

            # Assert routes are correct and services are accessible.
            # Local service should only be advertised from nodes that can run pods.
            # The cluster CIDR should be advertised from all nodes.
            self.assert_ecmp_routes(local_svc_ip,
                                    [self.ips[1], self.ips[2], self.ips[3]])
            self.assert_ecmp_routes(
                cluster_cidr,
                [self.ips[0], self.ips[1], self.ips[2], self.ips[3]])
            for i in range(attempts):
                retry_until_success(curl, function_args=[local_svc_ip])

            # Label one node in order to exclude it from service advertisement.
            # After this, we should expect that all routes from that node are
            # withdrawn.
            kubectl(
                "label node %s node.kubernetes.io/exclude-from-external-load-balancers=true"
                % self.nodes[1])

            # Assert routes are correct and services are accessible.
            # It should no longer have a route via self.nodes[1]
            self.assert_ecmp_routes(local_svc_ip, [self.ips[2], self.ips[3]])
            self.assert_ecmp_routes(cluster_cidr,
                                    [self.ips[0], self.ips[2], self.ips[3]])

            # Should work the same for external IP cidr.
            external_ip_cidr = "175.200.0.0/16"
            self.assert_ecmp_routes(external_ip_cidr,
                                    [self.ips[0], self.ips[2], self.ips[3]])

            # Should still be reachable through other nodes.
            for i in range(attempts):
                retry_until_success(curl, function_args=[local_svc_ip])
                retry_until_success(curl, function_args=[cluster_svc_ip])

            # Delete the local service, confirm that it is no longer advertised.
            self.delete_and_confirm(local_svc, "svc", self.ns)
            retry_until_success(
                lambda: self.assertNotIn(local_svc_ip, self.get_routes()))

            # Re-create the local service. Assert it is advertised from the correct nodes,
            # but not from the excluded node.
            self.create_service(local_svc, local_svc, self.ns, 80)
            self.wait_until_exists(local_svc, "svc", self.ns)
            local_svc_ip = self.get_svc_cluster_ip(local_svc, self.ns)
            self.assert_ecmp_routes(local_svc_ip, [self.ips[2], self.ips[3]])
            for i in range(attempts):
                retry_until_success(curl, function_args=[local_svc_ip])

            # Add an external IP to the local svc and assert it follows the same
            # advertisement rules.
            local_svc_external_ip = "175.200.1.1"
            self.add_svc_external_ips(local_svc, self.ns,
                                      [local_svc_external_ip])
            self.assert_ecmp_routes(local_svc_external_ip,
                                    [self.ips[2], self.ips[3]])

            # Enable the excluded node. Assert that the node starts
            # advertising service routes again.
            kubectl(
                "label node %s node.kubernetes.io/exclude-from-external-load-balancers=false --overwrite"
                % self.nodes[1])
            self.assert_ecmp_routes(local_svc_ip,
                                    [self.ips[1], self.ips[2], self.ips[3]])
            self.assert_ecmp_routes(local_svc_external_ip,
                                    [self.ips[1], self.ips[2], self.ips[3]])
            self.assert_ecmp_routes(
                cluster_cidr,
                [self.ips[0], self.ips[1], self.ips[2], self.ips[3]])
            for i in range(attempts):
                retry_until_success(curl, function_args=[local_svc_ip])

            # Delete both services.
            self.delete_and_confirm(local_svc, "svc", self.ns)
            self.delete_and_confirm(cluster_svc, "svc", self.ns)

            # Assert that clusterIP is no longer an advertised route.
            retry_until_success(
                lambda: self.assertNotIn(local_svc_ip, self.get_routes()))