Beispiel #1
0
    def test_single_host_ipv6(self):
        """
        Test mainline functionality without using an orchestrator plugin
        """
        with DockerHost('host', dind=False) as host:
            host.calicoctl("profile add TEST_GROUP")

            # Create a workload on each host.
            workload1 = host.create_workload("workload1")
            workload2 = host.create_workload("workload2")

            # Add the nodes to Calico networking.
            host.calicoctl("container add %s fd80:24e2:f998:72d6::1" %
                           workload1)
            host.calicoctl("container add %s fd80:24e2:f998:72d6::2" %
                           workload2)

            # Now add the profiles - one using set and one using append
            host.calicoctl("container %s profile set TEST_GROUP" % workload1)
            host.calicoctl("container %s profile append TEST_GROUP" %
                           workload2)

            # # Check it works
            workload1.assert_can_ping("fd80:24e2:f998:72d6::2", retries=3)
            workload2.assert_can_ping("fd80:24e2:f998:72d6::1", retries=3)
Beispiel #2
0
    def test_container_to_host(self):
        """
        Test that a container can ping the host.

        This function is important for Mesos, since the containerized executor
        needs to exchange messages with the Mesos Slave process on the host.

        Note also that we do not use the Docker Network driver for this test.
        The Docker Container Network Model defines a "network" as a group of
        endpoints that can communicate with each other, but are isolated from
        everything else.  Thus, an endpoint of a Docker network should not be
        able to ping the host.
        """
        with DockerHost('host', dind=False) as host:
            host.calicoctl("profile add TEST")

            # Use standard docker bridge networking.
            node1 = host.create_workload("node1")

            # Add the nodes to Calico networking.
            host.calicoctl("container add %s 192.168.100.1" % node1)

            # Get the endpoint IDs for the containers
            ep1 = host.calicoctl("container %s endpoint-id show" % node1)

            # Now add the profiles.
            host.calicoctl("endpoint %s profile set TEST" % ep1)

            # Check it works.  Note that the profile allows all outgoing
            # traffic by default, and conntrack should allow the reply.
            node1.assert_can_ping(host.ip, retries=10)
Beispiel #3
0
    def test_single_host_ipv4(self):
        """
        Test mainline functionality without using an orchestrator plugin
        """
        with DockerHost('host', dind=False) as host:
            host.calicoctl("profile add TEST_GROUP")

            # Create a workload on each host.
            workload1 = host.create_workload("workload1")
            workload2 = host.create_workload("workload2")

            # Add the nodes to Calico networking.
            host.calicoctl("container add %s 192.168.1.1" % workload1)
            host.calicoctl("container add %s 192.168.1.2" % workload2)

            # Now add the profiles - one using set and one using append
            host.calicoctl("container %s profile set TEST_GROUP" % workload1)
            host.calicoctl("container %s profile append TEST_GROUP" %
                           workload2)

            # TODO - assert on output of endpoint show and endpoint profile
            # show commands.

            # Check it works
            workload1.assert_can_ping("192.168.1.2", retries=3)
            workload2.assert_can_ping("192.168.1.1", retries=3)

            # Test the teardown commands
            host.calicoctl("profile remove TEST_GROUP")
            host.calicoctl("container remove %s" % workload1)
            host.calicoctl("container remove %s" % workload2)
            host.calicoctl("pool remove 192.168.0.0/16")
            host.calicoctl("node stop")
            host.calicoctl("node remove")
Beispiel #4
0
 def setUpClass(cls):
     super(MultiHostIpam, cls).setUpClass()
     cls.hosts = []
     cls.hosts.append(
         DockerHost("host1",
                    additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
                    post_docker_commands=POST_DOCKER_COMMANDS,
                    start_calico=False))
     cls.hosts.append(
         DockerHost("host2",
                    additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
                    post_docker_commands=POST_DOCKER_COMMANDS,
                    start_calico=False))
     cls.hosts[0].start_calico_node()
     cls.hosts[1].start_calico_node()
     cls.network = cls.hosts[0].create_network("testnet1")
Beispiel #5
0
    def test_gce_rr(self, with_ipip):
        """As test_gce except with a route reflector instead of mesh config."""
        with DockerHost('host1',
                        additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
                        simulate_gce_routing=True,
                        start_calico=False) as host1, \
             DockerHost('host2',
                        additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
                        simulate_gce_routing=True,
                        start_calico=False) as host2, \
             RouteReflectorCluster(1, 1) as rrc:

            rr_ip = None
            for rr in rrc.get_redundancy_group():
                rr_ip = rr.ip
            self._test_gce_int(with_ipip, 'bird', host1, host2, rr_ip)
    def _test_add_autoassigned(self, version):
        """
        Test "calicoctl container add <container> ipv<version>"
        """
        with DockerHost('host', dind=False) as host:
            # Test that auto-assiging IPv4 addresses gives what we expect
            ip = "ipv" + str(version)
            workloads = self._setup_env(host, count=2, ip=ip)

            # IPs are assigned sequentially from the selected block.
            first_ip = IPAddress(workloads[0].ip)
            assert first_ip.version == version
            assert IPAddress(workloads[1].ip) == first_ip + 1, \
                "Assigned %s, expected %s" % (workloads[1].ip, first_ip + 1)

            # Test each workload can ping the other
            workloads[0].assert_can_ping(workloads[1].ip, retries=3)
            workloads[1].assert_can_ping(workloads[0].ip, retries=3)

            host.calicoctl("container remove {0}".format("workload0"))
            host.calicoctl("container remove {0}".format("workload1"))

            host.remove_workloads()

            # Test that recreating returns the next two IPs (IPs are not
            # reassigned automatically unless we have run out of IPs).
            workloads = self._setup_env(host, count=2, ip=ip)
            assert IPAddress(workloads[0].ip) == first_ip + 2, \
                "Assigned %s, expected %s" % (workloads[0].ip, first_ip + 2)
            assert IPAddress(workloads[1].ip) == first_ip + 3, \
                "Assigned %s, expected %s" % (workloads[1].ip, first_ip + 3)

            # Test each workload can ping the other
            workloads[0].assert_can_ping(workloads[1].ip, retries=3)
            workloads[1].assert_can_ping(workloads[0].ip, retries=3)
Beispiel #7
0
    def test_ipip_addr_assigned(self):
        with DockerHost('host', dind=False, start_calico=False) as host:
            # Set up first pool before Node is started, to ensure we get tunl IP on boot
            ipv4_pool = IPNetwork("10.0.1.0/24")
            self.pool_action(host, "create", ipv4_pool, ipip_mode="Always")
            host.start_calico_node()
            self.assert_tunl_ip(host, ipv4_pool, expect=True)

            # Disable the IP Pool, and make sure the tunl IP is not from this IP pool anymore.
            self.pool_action(host, "apply", ipv4_pool, ipip_mode="Always", disabled=True)
            self.assert_tunl_ip(host, ipv4_pool, expect=False)

            # Re-enable the IP pool and make sure the tunl IP is assigned from that IP pool again.
            self.pool_action(host, "apply", ipv4_pool, ipip_mode="Always")
            self.assert_tunl_ip(host, ipv4_pool, expect=True)

            # Test that removing pool removes the tunl IP.
            self.pool_action(host, "delete", ipv4_pool, ipip_mode="Always")
            self.assert_tunl_ip(host, ipv4_pool, expect=False)

            # Test that re-adding the pool triggers the confd watch and we get an IP
            self.pool_action(host, "create", ipv4_pool, ipip_mode="Always")
            self.assert_tunl_ip(host, ipv4_pool, expect=True)

            # Test that by adding another pool, then deleting the first,
            # we remove the original IP, and allocate a new one from the new pool
            new_ipv4_pool = IPNetwork("192.168.0.0/16")
            self.pool_action(host, "create", new_ipv4_pool, ipip_mode="Always", pool_name="pool-b")
            self.pool_action(host, "delete", ipv4_pool)
            self.assert_tunl_ip(host, new_ipv4_pool)
Beispiel #8
0
 def test_readiness_multihost(self):
     """
     A simple base case to check if calico/node becomes ready.
     """
     with DockerHost('host1',
                     additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS) as host1, \
             DockerHost('host2',
                        additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS) as host2:
         retry_until_success(
             host1.execute,
             retries=10,
             command="docker exec calico-node /bin/readiness -bird -felix")
         retry_until_success(
             host2.execute,
             retries=10,
             command="docker exec calico-node /bin/readiness -bird -felix")
Beispiel #9
0
    def test_defaults(self):
        """
        Test default BGP configuration commands.
        """
        with DockerHost('host', start_calico=False, dind=False) as host:
            # TODO: Re-enable or remove after decsision is made on the defaults
            # Check default AS command
            #response = host.calicoctl("get BGPConfiguration -o yaml")
            #bgpcfg = yaml.safe_load(response)
            #self.assertEquals(bgpcfg['items'][0]['spec']['asNumber'], 64512)

            # Set the default AS number.
            update_bgp_config(host, asNum=12345)

            self.assertEquals(get_bgp_spec(host)['asNumber'], 12345)

            with self.assertRaises(CommandExecError):
                update_bgp_config(host, asNum=99999999999999999999999)
            with self.assertRaises(CommandExecError):
                update_bgp_config(host, asNum='abcde')

            # Check BGP mesh command
            if 'nodeToNodeMeshEnabled' in get_bgp_spec(host):
                self.assertEquals(
                    get_bgp_spec(host)['nodeToNodeMeshEnabled'], True)

            update_bgp_config(host, nodeMesh=False)
            self.assertEquals(
                get_bgp_spec(host)['nodeToNodeMeshEnabled'], False)

            update_bgp_config(host, nodeMesh=True)
            self.assertEquals(
                get_bgp_spec(host)['nodeToNodeMeshEnabled'], True)
 def test_diags(self):
     """
     Test that the diags command successfully creates a tar.gz file.
     """
     with DockerHost('host', dind=False, start_calico=False) as host:
         results = host.calicoctl("diags")
         self.assertIn(".tar.gz", results)
Beispiel #11
0
    def test_pool_crud(self):
        """
        Test that a basic CRUD flow for pool commands.
        """
        with DockerHost('host', dind=False, start_calico=False) as host:

            # Set up the ipv4 and ipv6 pools to use
            ipv4_pool = "10.0.1.0/24"
            ipv6_pool = "fed0:8001::/64"

            # Run pool commands to add the ipv4 pool and show the pools
            host.calicoctl("pool add %s" % ipv4_pool)
            pool_out = host.calicoctl("pool show")

            # Assert output contains the ipv4 pool, but not the ipv6
            self.assertIn(ipv4_pool, pool_out)
            self.assertNotIn(ipv6_pool, pool_out)

            # Run pool commands to add the ipv6 pool and show the pools
            host.calicoctl("pool add %s" % ipv6_pool)
            pool_out = host.calicoctl("pool show")

            # Assert output contains both the ipv4 pool and the ipv6
            self.assertIn(ipv4_pool, pool_out)
            self.assertIn(ipv6_pool, pool_out)

            # Remove both the ipv4 pool and ipv6 pool
            host.calicoctl("pool remove %s" % ipv4_pool)
            host.calicoctl("pool remove %s" % ipv6_pool)
            pool_out = host.calicoctl("pool show")

            # Assert the pool show output does not contain either pool
            self.assertNotIn(ipv4_pool, pool_out)
            self.assertNotIn(ipv6_pool, pool_out)
Beispiel #12
0
 def setUpClass(cls):
     # First, create a (fake) host to run things in
     cls.host = DockerHost(
         "host",
         additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
         start_calico=False,
         dind=False)
Beispiel #13
0
    def test_as_num(self):
        """
        Test using different AS number for the node-to-node mesh.

        We run a multi-host test for this as we need to set up real BGP peers.
        """
        with DockerHost('host1', start_calico=False) as host1, \
             DockerHost('host2', start_calico=False) as host2:

            # Set the default AS number.
            host1.calicoctl("bgp default-node-as %s" % LARGE_AS_NUM)

            # Start host1 using the inherited AS, and host2 using a specified
            # AS (same as default).
            host1.start_calico_node()
            host2.start_calico_node("--as=%s" % LARGE_AS_NUM)

            # Create a profile to associate with both workloads
            host1.calicoctl("profile add TEST_GROUP")

            # Create the network on host1, but it should be usable from all
            # hosts.
            workload_host1 = host1.create_workload("workload1")
            workload_host2 = host2.create_workload("workload2")

            # Add the workloads to Calico networking
            host1.calicoctl("container add %s %s" % (workload_host1,
                                                     DEFAULT_IPV4_ADDR_1))
            host2.calicoctl("container add %s %s" % (workload_host2,
                                                     DEFAULT_IPV4_ADDR_2))

            # Now add the profiles - one using set and one using append
            host1.calicoctl("container %s profile set TEST_GROUP" % workload_host1)
            host2.calicoctl("container %s profile append TEST_GROUP" % workload_host2)

            # Allow network to converge
            workload_host1.assert_can_ping(DEFAULT_IPV4_ADDR_2, retries=10)

            # Check connectivity in both directions
            self.assert_ip_connectivity(workload_list=[workload_host1,
                                                       workload_host2],
                                        ip_pass_list=[DEFAULT_IPV4_ADDR_1,
                                                      DEFAULT_IPV4_ADDR_2])

            # Check the BGP status on each host.
            check_bird_status(host1, [("node-to-node mesh", host2.ip, "Established")])
            check_bird_status(host2, [("node-to-node mesh", host1.ip, "Established")])
Beispiel #14
0
    def _test_single_route_reflector(self, backend='bird'):
        """
        Run a multi-host test using a single route reflector and global
        peering.
        """
        with DockerHost('host1',
                        additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
                        start_calico=False) as host1, \
             DockerHost('host2',
                        additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
                        start_calico=False) as host2, \
             RouteReflectorCluster(1, 1) as rrc:

            # Start both hosts using specific backends.
            host1.start_calico_node("--backend=%s" % backend)
            host2.start_calico_node("--backend=%s" % backend)

            # Set the default AS number - as this is used by the RR mesh, and
            # turn off the node-to-node mesh (do this from any host).
            host1.calicoctl("config set asNumber 64514")
            host1.calicoctl("config set nodeToNodeMesh off")

            # Create a workload on each host in the same network.
            network1 = host1.create_network("subnet1")
            workload_host1 = host1.create_workload("workload1",
                                                   network=network1)
            workload_host2 = host2.create_workload("workload2",
                                                   network=network1)

            # Allow network to converge (which it won't)
            self.assert_false(
                workload_host1.check_can_ping(workload_host2.ip, retries=5))

            # Set global config telling all calico nodes to peer with the
            # route reflector.  This can be run from either host.
            rg = rrc.get_redundancy_group()
            assert len(rg) == 1
            create_bgp_peer(host1, "global", rg[0].ip, 64514)

            # Allow network to converge (which it now will).
            self.assert_true(
                workload_host1.check_can_ping(workload_host2.ip, retries=10))

            # And check connectivity in both directions.
            self.assert_ip_connectivity(
                workload_list=[workload_host1, workload_host2],
                ip_pass_list=[workload_host1.ip, workload_host2.ip])
    def setUpClass(cls):
        wipe_etcd(get_ip())

        # Rough idea for setup
        #
        #    Network1                  Network2
        #
        #   container1                 container2
        #    foo = bar                  baz = bop
        #
        #   container3                 container4
        #    foo = bing                 foo = bar

        cls.hosts = []
        cls.host1 = DockerHost(
                "host1",
                additional_docker_options=ADDITIONAL_DOCKER_OPTIONS,
                post_docker_commands=POST_DOCKER_COMMANDS,
                start_calico=False)
        cls.host1_hostname = cls.host1.execute("hostname")
        cls.hosts.append(cls.host1)
        cls.host2 = DockerHost(
                "host2",
                additional_docker_options=ADDITIONAL_DOCKER_OPTIONS,
                post_docker_commands=POST_DOCKER_COMMANDS,
                start_calico=False)
        cls.host2_hostname = cls.host1.execute("hostname")
        cls.hosts.append(cls.host2)

        for host in cls.hosts:
            host.start_calico_node(options='--use-docker-networking-container-labels')

        cls.network1 = cls.host1.create_network("network1")
        cls.network2 = cls.host1.create_network("network2")

        cls.workload1_nw1_foo_bar = cls.host1.create_workload(
                "workload1", network=cls.network1,
                labels=["org.projectcalico.label.foo=bar"])
        cls.workload2_nw2_baz_bop = cls.host1.create_workload(
                "workload2", network=cls.network2,
                labels=["org.projectcalico.label.baz=bop"])
        cls.workload3_nw1_foo_bing = cls.host2.create_workload(
                "workload3", network=cls.network1,
                labels=["org.projectcalico.label.foo=bing"])
        cls.workload4_nw2_foo_bar = cls.host2.create_workload(
                "workload4", network=cls.network2,
                labels=["org.projectcalico.label.foo=bar"])
Beispiel #16
0
 def setUpClass(cls):
     super(TestBase, cls).setUpClass()
     cls.hosts = []
     cls.hosts.append(
         DockerHost("host1",
                    additional_docker_options=ADDITIONAL_DOCKER_OPTIONS,
                    post_docker_commands=POST_DOCKER_COMMANDS,
                    start_calico=False))
     cls.hosts.append(
         DockerHost("host2",
                    additional_docker_options=ADDITIONAL_DOCKER_OPTIONS,
                    post_docker_commands=POST_DOCKER_COMMANDS,
                    start_calico=False))
     cls.hosts[0].start_calico_node()
     cls.hosts[1].start_calico_node()
     cls.network = cls.hosts[0].create_network("testnet1",
                                               ipam_driver="calico-ipam")
 def test_readiness(self):
     """
     A simple base case to check if calico/node becomes ready.
     """
     with DockerHost('host1',
                     additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS
                     ) as host1:
         retry_until_success(host1.assert_is_ready, retries=30)
Beispiel #18
0
    def _test_as_num(self, backend='bird'):
        """
        Test using different AS number for the node-to-node mesh.

        We run a multi-host test for this as we need to set up real BGP peers.
        """
        with DockerHost('host1',
                        additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
                        start_calico=False) as host1, \
             DockerHost('host2',
                        additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
                        start_calico=False) as host2:

            # Set the default AS number.
            update_bgp_config(host1, asNum=LARGE_AS_NUM)

            # Start host1 using the inherited AS, and host2 using a specified
            # AS (same as default).
            host1.start_calico_node("--backend=%s" % backend)
            host2.start_calico_node("--backend=%s --as=%s" %
                                    (backend, LARGE_AS_NUM))

            # Create a network and a couple of workloads on each host.
            network1 = host1.create_network("subnet1",
                                            subnet=DEFAULT_IPV4_POOL_CIDR)
            workload_host1 = host1.create_workload("workload1",
                                                   network=network1,
                                                   ip=DEFAULT_IPV4_ADDR_1)
            workload_host2 = host2.create_workload("workload2",
                                                   network=network1,
                                                   ip=DEFAULT_IPV4_ADDR_2)

            # Allow network to converge
            self.assert_true(
                workload_host1.check_can_ping(DEFAULT_IPV4_ADDR_2, retries=10))

            # Check connectivity in both directions
            self.assert_ip_connectivity(
                workload_list=[workload_host1, workload_host2],
                ip_pass_list=[DEFAULT_IPV4_ADDR_1, DEFAULT_IPV4_ADDR_2])

            # Check the BGP status on each host.
            check_bird_status(host1,
                              [("node-to-node mesh", host2.ip, "Established")])
            check_bird_status(host2,
                              [("node-to-node mesh", host1.ip, "Established")])
Beispiel #19
0
    def _test_global_peers(self, backend='bird'):
        """
        Test global BGP peer configuration.

        Test by turning off the mesh and configuring the mesh as
        a set of global peers.
        """
        with DockerHost('host1',
                        additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
                        start_calico=False) as host1, \
             DockerHost('host2',
                        additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
                        start_calico=False) as host2:
            # Start both hosts using specific AS numbers.
            host1.start_calico_node("--backend=%s --as=%s" % (backend, LARGE_AS_NUM))
            host2.start_calico_node("--backend=%s --as=%s" % (backend, LARGE_AS_NUM))

            # Create a network and a couple of workloads on each host.
            network1 = host1.create_network("subnet1", subnet=DEFAULT_IPV4_POOL_CIDR)
            workload_host1 = host1.create_workload("workload1", network=network1, ip=DEFAULT_IPV4_ADDR_1)
            workload_host2 = host2.create_workload("workload2", network=network1, ip=DEFAULT_IPV4_ADDR_2)

            # Allow network to converge
            self.assert_true(workload_host1.check_can_ping(DEFAULT_IPV4_ADDR_2, retries=10))

            # Turn the node-to-node mesh off and wait for connectivity to drop.
            update_bgp_config(host1, nodeMesh=False)
            self.assert_true(workload_host1.check_cant_ping(DEFAULT_IPV4_ADDR_2, retries=10))

            # Configure global peers to explicitly set up a mesh.
            create_bgp_peer(host1, 'global', host2.ip, LARGE_AS_NUM)
            create_bgp_peer(host2, 'global', host1.ip, LARGE_AS_NUM)

            # Allow network to converge
            self.assert_true(workload_host1.check_can_ping(DEFAULT_IPV4_ADDR_2, retries=10))

            # Check connectivity in both directions
            self.assert_ip_connectivity(workload_list=[workload_host1,
                                                       workload_host2],
                                        ip_pass_list=[DEFAULT_IPV4_ADDR_1,
                                                      DEFAULT_IPV4_ADDR_2])

            # Check the BGP status on each host.  Connections from a node to
            # itself will be idle since this is invalid BGP configuration.
            check_bird_status(host1, [("global", host2.ip, "Established")])
            check_bird_status(host2, [("global", host1.ip, "Established")])
Beispiel #20
0
    def test_assign_specific_ip(self):
        """
        Test that a libnetwork assigned IP is allocated to the container with
        Calico when using the '--ip' flag on docker run.
        """
        with DockerHost('host1',
                        additional_docker_options=ADDITIONAL_DOCKER_OPTIONS,
                        post_docker_commands=POST_DOCKER_COMMANDS,
                        start_calico=False) as host1, \
            DockerHost('host2',
                       additional_docker_options=ADDITIONAL_DOCKER_OPTIONS,
                       post_docker_commands=POST_DOCKER_COMMANDS,
                       start_calico=False) as host2:

            host1.start_calico_node("--libnetwork")
            host2.start_calico_node("--libnetwork")

            # Set up one endpoints on each host
            workload1_ip = "192.168.1.101"
            workload2_ip = "192.168.1.102"
            subnet = "192.168.0.0/16"
            network = host1.create_network("testnet", subnet=subnet)
            workload1 = host1.create_workload("workload1",
                                              network=network,
                                              ip=workload1_ip)
            workload2 = host2.create_workload("workload2",
                                              network=network,
                                              ip=workload2_ip)

            self.assertEquals(workload1_ip, workload1.ip)
            self.assertEquals(workload2_ip, workload2.ip)

            # Allow network to converge
            # Check connectivity with assigned IPs
            workload1.assert_can_ping(workload2_ip, retries=5)
            workload2.assert_can_ping(workload1_ip, retries=5)

            # Disconnect endpoints from the network
            # Assert can't ping and endpoints are removed from Calico
            network.disconnect(host1, workload1)
            network.disconnect(host2, workload2)
            workload1.assert_cant_ping(workload2_ip, retries=5)
            assert_number_endpoints(host1, 0)
            assert_number_endpoints(host2, 0)
            network.delete()
    def test_global_peers(self):
        """
        Test global BGP peer configuration by turning off the mesh and
        configuring the mesh as a set of global peers.
        """
        with DockerHost('host1', start_calico=False) as host1, \
             DockerHost('host2', start_calico=False) as host2:

            # Start both hosts using specific AS numbers.
            host1.start_calico_node(as_num="64513")
            host1.assert_driver_up()
            host2.start_calico_node(as_num="64513")
            host2.assert_driver_up()

            # Create the network on host1, but it should be usable from all
            # hosts.
            net = host1.create_network(str(uuid.uuid4()))
            workload_host1 = host1.create_workload("workload1", network=net)
            workload_host2 = host2.create_workload("workload2", network=net)

            # Allow network to converge
            workload_host1.assert_can_ping(workload_host2.ip, retries=10)

            # Turn the node-to-node mesh off and wait for connectivity to drop.
            host1.calicoctl("bgp node-mesh off")
            workload_host1.assert_cant_ping(workload_host2.ip, retries=10)

            # Configure global peers to explicitly set up a mesh.  This means
            # each node will try to peer with itself which will fail.
            host1.calicoctl("bgp peer add %s as 64513" % host2.ip)
            host1.calicoctl("bgp peer add %s as 64513" % host1.ip)

            # Allow network to converge
            workload_host1.assert_can_ping(workload_host2.ip, retries=10)

            # And check connectivity in both directions.
            self.assert_connectivity(
                pass_list=[workload_host1, workload_host2])

            # Check the BGP status on each host.  Connections from a node to
            # itself will be idle since this is invalid BGP configuration.
            self._check_status(host1, [("global", host1.ip, "Idle"),
                                       ("global", host2.ip, "Established")])
            self._check_status(host2, [("global", host1.ip, "Established"),
                                       ("global", host2.ip, "Idle")])
Beispiel #22
0
 def test_node_run_dryrun(self):
     """
     Test that dryrun does not output ETCD_AUTHORITY or ETCD_SCHEME.
     """
     with DockerHost('host', dind=False, start_calico=False) as host:
         output = host.calicoctl("node run --dryrun")
         assert "ETCD_AUTHORITY" not in output
         assert "ETCD_SCHEME" not in output
         assert "ETCD_ENDPOINTS" in output
    def test_node_status(self):
        """
        Test that the status command can be executed.
        """
        with DockerHost('host', dind=False, start_calico=True) as host:

            def node_status():
                host.calicoctl("node status")

            retry_until_success(node_status, retries=10, ex_class=Exception)
    def setUpClass(cls):
        cls.log_banner("TEST SET UP STARTING: %s", cls.__name__)

        cls.hosts = []
        cls.hosts.append(
            DockerHost("host1",
                       additional_docker_options=ADDITIONAL_DOCKER_OPTIONS,
                       post_docker_commands=POST_DOCKER_COMMANDS,
                       start_calico=False))
        cls.hosts.append(
            DockerHost("host2",
                       additional_docker_options=ADDITIONAL_DOCKER_OPTIONS,
                       post_docker_commands=POST_DOCKER_COMMANDS,
                       start_calico=False))
        for host in cls.hosts:
            host.execute("mkdir -p /var/log/calico/felix/")
            host.writefile(felix_logfile, before_data)
            host.attach_log_analyzer()
        cls.expect_errors = False
Beispiel #25
0
    def test_multi_host(self):
        """
        Test mainline functionality without using an orchestrator plugin on
        multiple hosts.
        """
        with DockerHost('host1') as host1, DockerHost(
                'host2', start_calico=False) as host2:
            # Start calico manually on host2
            host2.start_calico_node_with_docker()

            # TODO ipv6 too
            host1.calicoctl("profile add TEST_GROUP")

            # Use standard docker bridge networking for one and --net=none
            # for the other
            workload1 = host1.create_workload("workload1")
            workload2 = host2.create_workload("workload2", network=NET_NONE)

            # Add the nodes to Calico networking.
            host1.calicoctl("container add %s 192.168.1.1" % workload1)
            host2.calicoctl("container add %s 192.168.1.2" % workload2)

            # Now add the profiles - one using set and one using append
            host1.calicoctl("container %s profile set TEST_GROUP" % workload1)
            host2.calicoctl("container %s profile append TEST_GROUP" %
                            workload2)

            # TODO - assert on output of endpoint show and endpoint profile
            # show commands.

            # Check it works
            workload1.assert_can_ping("192.168.1.2", retries=3)
            workload2.assert_can_ping("192.168.1.1", retries=3)

            # Test the teardown commands
            host1.calicoctl("profile remove TEST_GROUP")
            host1.calicoctl("container remove %s" % workload1)
            host2.calicoctl("container remove %s" % workload2)
            host1.calicoctl("pool remove 192.168.0.0/16")
            host1.calicoctl("node stop")
            host1.calicoctl("node remove")
            host2.calicoctl("node stop")
            host2.calicoctl("node remove")
Beispiel #26
0
 def test_liveness_env_port(self):
     """
     A simple base case to check if calico/node becomes live. Uses environment variable as port number.
     """
     with DockerHost('host1',
                     additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
                     start_calico=False) as host1:
         host1.start_calico_node(
             env_options=
             "-e FELIX_HEALTHPORT=9011 -e FELIX_HEALTHENABLED=true")
         retry_until_success(host1.assert_is_live, retries=30)
    def test_single_route_reflector(self):
        """
        Run a multi-host test using a single route reflector and global
        peering.
        """
        with DockerHost('host1') as host1, \
             DockerHost('host2') as host2, \
             RouteReflectorCluster(1, 1) as rrc:

            # Create the network on host1, but it should be usable from all
            # hosts.
            net = host1.create_network(str(uuid.uuid4()))

            # Turn off the node-to-node mesh (do this from any host), and
            # change the default AS Number (arbitrary choice).
            host1.calicoctl("bgp default-node-as 64514")
            host1.calicoctl("bgp node-mesh off")

            workload_host1 = host1.create_workload("workload1", network=net)
            workload_host2 = host2.create_workload("workload2", network=net)

            # Allow network to converge (which it won't)
            try:
                workload_host1.assert_can_ping(workload_host2.ip, retries=5)
            except AssertionError:
                pass
            else:
                raise AssertionError("Hosts can ping each other")

            # Set global config telling all calico nodes to peer with the
            # route reflector.  This can be run from either host.
            rg = rrc.get_redundancy_group()
            assert len(rg) == 1
            host1.calicoctl("bgp peer add %s as 64514" % rg[0].ip)

            # Allow network to converge (which it now will).
            workload_host1.assert_can_ping(workload_host2.ip, retries=10)

            # And check connectivity in both directions.
            self.assert_connectivity(
                pass_list=[workload_host1, workload_host2])
Beispiel #28
0
    def test_multi_host(self):
        """
        Test mainline functionality without using an orchestrator plugin on
        multiple hosts.
        """
        with DockerHost('host1') as host1, DockerHost('host2') as host2:
            # TODO ipv6 too
            host1.calicoctl("profile add TEST_GROUP")

            # Use standard docker bridge networking for one and --net=none
            # for the other
            node1 = host1.create_workload("node1")
            node2 = host2.create_workload("node2", network=NET_NONE)

            # Add the nodes to Calico networking.
            host1.calicoctl("container add %s 192.168.1.1" % node1)
            host2.calicoctl("container add %s 192.168.1.2" % node2)

            # Get the endpoint IDs for the containers
            ep1 = host1.calicoctl("container %s endpoint-id show" % node1)
            ep2 = host2.calicoctl("container %s endpoint-id show" % node2)

            # Now add the profiles - one using set and one using append
            host1.calicoctl("endpoint %s profile set TEST_GROUP" % ep1)
            host2.calicoctl("endpoint %s profile append TEST_GROUP" % ep2)

            # TODO - assert on output of endpoint show and endpoint profile
            # show commands.

            # Check it works
            node1.assert_can_ping("192.168.1.2", retries=3)
            node2.assert_can_ping("192.168.1.1", retries=3)


            # Test the teardown commands
            host1.calicoctl("profile remove TEST_GROUP")
            host1.calicoctl("container remove %s" % node1)
            host2.calicoctl("container remove %s" % node2)
            host1.calicoctl("pool remove 192.168.0.0/16")
            host1.calicoctl("node stop")
            host2.calicoctl("node stop")
Beispiel #29
0
    def test_add_autoassigned_pool_ipv4(self):
        """
        Test "calicoctl container add <container> <IPv4 CIDR>"
        (192.168.0.0/16)
        """
        with DockerHost('host', dind=False) as host:
            # Test that auto-assiging IPv4 addresses gives what we expect
            workloads = self._setup_env(host, count=2,
                                        ip=self.DEFAULT_IPV4_POOL)

            workloads[0].assert_can_ping("192.168.0.1", retries=3)
            workloads[1].assert_can_ping("192.168.0.0", retries=3)
Beispiel #30
0
    def test_add_autoassigned_pool_ipv6(self):
        """
        Test "calicoctl container add <container> <IPv6 CIDR>"
        (fd80:24e2:f998:72d6::/64)
        """
        with DockerHost('host', dind=False) as host:
            # Test that auto-assiging IPv6 addresses gives what we expect
            workloads = self._setup_env(host, count=2,
                                        ip=self.DEFAULT_IPV6_POOL)

            workloads[0].assert_can_ping("fd80:24e2:f998:72d6::1", retries=3)
            workloads[1].assert_can_ping("fd80:24e2:f998:72d6::", retries=3)
    def test_endpoint_commands(self):
        """
        Run a mainline multi-host test using endpoint commands

        Performs more complicated endpoint profile assignments to test
        the append, set, and remove commands in situations where the commands
        specify multiple profiles at once.
        """
        host1 = DockerHost('host1')
        host2 = DockerHost('host2')

        ip_main = "192.168.1.1"
        ip_a = "192.168.1.2"
        ip_b = "192.168.1.3"
        ip_c = "192.168.1.4"

        workload_main = host1.create_workload("workload_main", ip_main)
        host2.create_workload("workload_a", ip_a)
        host2.create_workload("workload_b", ip_b)
        host2.create_workload("workload_c", ip_c)

        host1.calicoctl("profile add PROF_A")
        host1.calicoctl("profile add PROF_B")
        host1.calicoctl("profile add PROF_C")

        host2.calicoctl("container workload_a profile set PROF_A")
        host2.calicoctl("container workload_b profile set PROF_B")
        host2.calicoctl("container workload_c profile set PROF_C")

        # Test set single profile
        host1.calicoctl("container %s profile set PROF_A" % workload_main)
        workload_main.assert_can_ping(ip_a, retries=4)
        workload_main.assert_cant_ping(ip_b)
        workload_main.assert_cant_ping(ip_c)

        # Test set multiple profiles (note: PROF_A should now be removed)
        host1.calicoctl("container %s profile set PROF_B PROF_C" % workload_main)
        workload_main.assert_cant_ping(ip_a, retries=4)
        workload_main.assert_can_ping(ip_b)
        workload_main.assert_can_ping(ip_c)

        # Test set profile to None
        host1.calicoctl("endpoint %s profile set" % workload_main_endpoint_id)
        workload_main.assert_cant_ping(ip_a, retries=4)
        workload_main.assert_cant_ping(ip_b)
        workload_main.assert_cant_ping(ip_c)

        # Append a single profile
        host1.calicoctl("endpoint %s profile append PROF_A" % workload_main_endpoint_id)
        workload_main.assert_can_ping(ip_a, retries=4)
        workload_main.assert_cant_ping(ip_b)
        workload_main.assert_cant_ping(ip_c)

        # Append two profiles at once
        host1.calicoctl("endpoint %s profile append PROF_B PROF_C" % workload_main_endpoint_id)
        workload_main.assert_can_ping(ip_a, retries=4)
        workload_main.assert_can_ping(ip_b)
        workload_main.assert_can_ping(ip_c)

        # Remove a single profile
        host1.calicoctl("endpoint %s profile remove PROF_C" % workload_main_endpoint_id)
        workload_main.assert_can_ping(ip_a, retries=4)
        workload_main.assert_can_ping(ip_b)
        workload_main.assert_cant_ping(ip_c)

        # Remove two profiles at once
        host1.calicoctl("endpoint %s profile remove PROF_A PROF_B" % workload_main_endpoint_id)
        workload_main.assert_cant_ping(ip_a, retries=4)
        workload_main.assert_cant_ping(ip_b)
        workload_main.assert_cant_ping(ip_c)
    def test_endpoint_commands_mainline(self):
        """
        Run a mainline multi-host test using endpoint commands.

        This test uses the "endpoint profile set" command to assign
        endpoints to profiles according to the following topology:
            Host1: [workload_A, workload_B, workload_C]
            Host2: [workload_D, workload_E]
            Creates a profile that connects A, C, & E
            Creates an additional isolated profile for B.
            Creates an additional isolated profile for D.
        IP Connectivity is then tested to ensure that only workloads
        in the same profile can ping one another
        """
        host1 = DockerHost('host1')
        host2 = DockerHost('host2')

        ip_a = "192.168.1.1"
        ip_b = "192.168.1.2"
        ip_c = "192.168.1.3"
        ip_d = "192.168.1.4"
        ip_e = "192.168.1.5"

        workload_a = host1.create_workload("workload_a", ip_a)
        workload_b = host1.create_workload("workload_b", ip_b)
        workload_c = host1.create_workload("workload_c", ip_c)
        workload_d = host2.create_workload("workload_d", ip_d)
        workload_e = host2.create_workload("workload_e", ip_e)

        host1.calicoctl("profile add PROF_1_3_5")
        host1.calicoctl("profile add PROF_2")
        host1.calicoctl("profile add PROF_4")

        host1.calicoctl("container %s profile set PROF_1_3_5" % workload_a)
        host1.calicoctl("container %s profile set PROF_2" % workload_b)
        host1.calicoctl("container %s profile set PROF_1_3_5" % workload_c)
        host2.calicoctl("container %s profile set PROF_4" % workload_d)
        host2.calicoctl("container %s profile set PROF_1_3_5" % workload_e)

        self.assert_connectivity(pass_list=[workload_a, workload_c, workload_e],
                                 fail_list=[workload_b, workload_d])

        self.assert_connectivity(pass_list=[workload_b],
                                 fail_list=[workload_a, workload_c, workload_d, workload_e])

        self.assert_connectivity(pass_list=[workload_d],
                                 fail_list=[workload_a, workload_b, workload_c, workload_e])