def test_converter(testname, fail_expected, error_text=None): """ Convert a v1 object to v3, then apply the result and read it back. """ test_converter.__name__ = testname # Let's start every test afresh wipe_etcd(get_ip()) testdata = data[testname] # Convert data to V3 API using the tool under test rc = calicoctl("convert", data=testdata) if not fail_expected: logger.debug("Trying to convert manifest from V1 to V3") rc.assert_no_error() # Get the converted yaml and clean it up (remove fields we don't care about) converted_data = clean_calico_data(yaml.safe_load(rc.output)) original_resource = rc # Apply the converted data rc = calicoctl("create", data=original_resource.output) logger.debug("Trying to create resource using converted manifest") rc.assert_no_error() rc = calicoctl("get %s %s -o yaml" % (converted_data['kind'], name(converted_data))) # Comparison here needs to be against cleaned versions of data to remove Creation Timestamp logger.debug("Comparing 'get'ted output with original converted yaml") cleaned_output = yaml.safe_dump( clean_calico_data( yaml.safe_load(rc.output), extra_keys_to_remove=['projectcalico.org/orchestrator', 'namespace'] ) ) original_resource.assert_data(cleaned_output) else: rc.assert_error(error_text)
def setUp(self): try: self.host.execute("docker rm -f calico-node") except CommandExecError: # Presumably calico-node wasn't running pass wipe_etcd(get_ip())
def setUpClass(cls): # Wipe etcd once before any test in this class runs. _log.debug("Wiping etcd") wipe_etcd(HOST_IPV4) # Test felix configurations. # Create two hosts. cls.hosts = [] cls.host1 = DockerHost( "cali-host1", additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, post_docker_commands=POST_DOCKER_COMMANDS, start_calico=False) cls.host1_hostname = cls.host1.execute("hostname") cls.host2 = DockerHost( "cali-host2", additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, post_docker_commands=POST_DOCKER_COMMANDS, start_calico=False) cls.host2_hostname = cls.host2.execute("hostname") cls.hosts.append(cls.host1) cls.hosts.append(cls.host2) # Start calico node on hosts. for host in cls.hosts: host.start_calico_node() _log.info("host1 IP: %s , host2 IP: %s", cls.host1.ip, cls.host2.ip) clear_on_failures() add_on_failure(cls.host1.log_extra_diags) add_on_failure(cls.host2.log_extra_diags)
def setUp(self): try: self.host.execute("docker rm -f calico-node") except CommandExecError: # Presumably calico-node wasn't running pass wipe_etcd(get_ip())
def setUpClass(cls): wipe_etcd(get_ip()) # Rough idea for setup # # Network1 Network2 # # container1 container2 # foo = bar baz = bop # # container3 container4 # foo = bing foo = bar cls.hosts = [] cls.host1 = DockerHost( "host1", additional_docker_options=ADDITIONAL_DOCKER_OPTIONS, post_docker_commands=POST_DOCKER_COMMANDS, start_calico=False, networking=NETWORKING_LIBNETWORK) cls.host1_hostname = cls.host1.execute("hostname") cls.hosts.append(cls.host1) cls.host2 = DockerHost( "host2", additional_docker_options=ADDITIONAL_DOCKER_OPTIONS, post_docker_commands=POST_DOCKER_COMMANDS, start_calico=False, networking=NETWORKING_LIBNETWORK) cls.host2_hostname = cls.host1.execute("hostname") cls.hosts.append(cls.host2) for host in cls.hosts: host.start_calico_node( options='--use-docker-networking-container-labels') cls.network1 = cls.host1.create_network("network1") cls.network2 = cls.host1.create_network("network2") cls.workload1_nw1_foo_bar = cls.host1.create_workload( "workload1", network=cls.network1, labels=["org.projectcalico.label.foo=bar"]) cls.workload2_nw2_baz_bop = cls.host1.create_workload( "workload2", network=cls.network2, labels=["org.projectcalico.label.baz=bop"]) cls.workload3_nw1_foo_bing = cls.host2.create_workload( "workload3", network=cls.network1, labels=["org.projectcalico.label.foo=bing"]) cls.workload4_nw2_foo_bar = cls.host2.create_workload( "workload4", network=cls.network2, labels=["org.projectcalico.label.foo=bar"])
def setUp(self, clear_etcd=True): """ Clean up before every test. """ self.ip = HOST_IPV4 if clear_etcd: wipe_etcd(self.ip) # Log a newline to ensure that the first log appears on its own line. logger.info("") clear_on_failures()
def setUpClass(cls): wipe_etcd(get_ip()) # Rough idea for setup # # Network1 Network2 # # container1 container2 # foo = bar baz = bop # # container3 container4 # foo = bing foo = bar cls.hosts = [] cls.host1 = DockerHost( "host1", additional_docker_options=ADDITIONAL_DOCKER_OPTIONS, post_docker_commands=POST_DOCKER_COMMANDS, start_calico=False, networking=NETWORKING_LIBNETWORK) cls.host1_hostname = cls.host1.execute("hostname") cls.hosts.append(cls.host1) cls.host2 = DockerHost( "host2", additional_docker_options=ADDITIONAL_DOCKER_OPTIONS, post_docker_commands=POST_DOCKER_COMMANDS, start_calico=False, networking=NETWORKING_LIBNETWORK) cls.host2_hostname = cls.host1.execute("hostname") cls.hosts.append(cls.host2) for host in cls.hosts: host.start_calico_node(options='--use-docker-networking-container-labels') cls.network1 = cls.host1.create_network("network1") cls.network2 = cls.host1.create_network("network2") cls.workload1_nw1_foo_bar = cls.host1.create_workload( "workload1", network=cls.network1, labels=["org.projectcalico.label.foo=bar"]) cls.workload2_nw2_baz_bop = cls.host1.create_workload( "workload2", network=cls.network2, labels=["org.projectcalico.label.baz=bop"]) cls.workload3_nw1_foo_bing = cls.host2.create_workload( "workload3", network=cls.network1, labels=["org.projectcalico.label.foo=bing"]) cls.workload4_nw2_foo_bar = cls.host2.create_workload( "workload4", network=cls.network2, labels=["org.projectcalico.label.foo=bar"])
def wipe_etcd(self): wipe_etcd(self.ip)
def setUpClass(cls): # Wipe etcd once before any test in this class runs. _log.debug("Wiping etcd") wipe_etcd(HOST_IPV4) # We set up 2 hosts on top of which running nine workloads in three namespaces. # Host1 has 5 workloads. # 2 in namespace nsa: [nsa_h1_wl0] [nsa_h1_wl1] # 1 in namespace nsb: [nsb_h1_wl0] # 2 in default namespace: [default_h1_wl0] [omit_h1_wl0] # *omit* means 'namespace' field is not specified during workload setup. # # Host2 has 4 workloads. # 1 in namespace nsa: [nsa_h2_wl0] # 2 in namespace nsb: [nsb_h2_wl0] [nsb_h2_wl1] # 1 in namespace default: [default_h2_wl0] # # Global network policies and network policies then apply on namespaced # workload endpoints with mixed orders. The test checks connectivity of # 4 workloads [nsa_h1_wl0, nsb_h2_wl0, default_h1_wl0, omit_h1_wl0] from # other workloads. # Create two hosts. cls.hosts = [] cls.host1 = DockerHost("cali-host1", additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, post_docker_commands=POST_DOCKER_COMMANDS, start_calico=False) cls.host1_hostname = cls.host1.execute("hostname") cls.host2 = DockerHost("cali-host2", additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, post_docker_commands=POST_DOCKER_COMMANDS, start_calico=False) cls.host2_hostname = cls.host2.execute("hostname") cls.hosts.append(cls.host1) cls.hosts.append(cls.host2) # Start calico node on hosts. for host in cls.hosts: host.start_calico_node(env_options=" -e FELIX_HEALTHENABLED=true ") handle_failure(lambda: retry_until_success(cls.host1.assert_is_ready, retries=20)) handle_failure(lambda: retry_until_success(cls.host2.assert_is_ready, retries=20)) # Prepare namespace profile so that we can use namespaceSelector for non-k8s deployment. # CNI will use the existing profile which is setup here instead of creating its own. cls.add_ns_profile('nsa') cls.add_ns_profile('nsb') cls.add_ns_profile('default') # Create calico network. cls.calinet = cls.host1.create_network("calinet") # Create workloads for host1 # For CNI, network is used for cni_name but nothing else. # We set network to same value as namespace name to let cni program a # namespace profile for us. cls.nsa_wl = cls.host1.create_workload( "nsa_h1_wl0", image="workload", network="nsa", labels=["wep=nsa_h1_wl0"], namespace="nsa") cls.host1.create_workload( "nsa_h1_wl1", image="workload", network="nsa", labels=["wep=nsa_h1_wl1"], namespace="nsa") cls.host1.create_workload( "nsb_h1_wl0", image="workload", network="nsb", labels=["wep=nsb_h1_wl0"], namespace="nsb") cls.default_wl = cls.host1.create_workload( "default_h1_wl0", image="workload", network="default", labels=["wep=default_h1_wl0"], namespace="default") cls.omit_wl = cls.host1.create_workload( "omit_h1_wl0", image="workload", network="default", labels=["wep=omit_h1_wl0"], namespace=None) # Create workloads for host2 cls.nsb_wl = cls.host2.create_workload( "nsb_h2_wl0", image="workload", network="nsb", labels=["wep=nsb_h2_wl0"], namespace="nsb") cls.host2.create_workload( "nsb_h2_wl1", image="workload", network="nsb", labels=["wep=nsb_h2_wl1"], namespace="nsb") cls.host2.create_workload( "nsa_h2_wl0", image="workload", network="nsa", labels=["wep=nsa_h2_wl0"], namespace="nsa") cls.host2.create_workload( "default_h2_wl0", image="workload", network="default", labels=["wep=default_h2_wl0"], namespace="default") # Work out workload set for different namespaces. cls.all_workloads = cls.host1.workloads.union(cls.host2.workloads) cls.wl_nsa = filter(lambda x: x.namespace == "nsa", cls.all_workloads) cls.wl_nsb = filter(lambda x: x.namespace == "nsb", cls.all_workloads) cls.wl_default = filter(lambda x: x.namespace == "default" or x.namespace is None, cls.all_workloads) clear_on_failures() add_on_failure(cls.host1.log_extra_diags) add_on_failure(cls.host2.log_extra_diags)
def setUpClass(cls): # Wipe etcd once before any test in this class runs. _log.debug("Wiping etcd") wipe_etcd(HOST_IPV4) # We set up an additional docker network to act as the external # network. The Gateway container is connected to both networks. # and we configure it as a NAT gateway. # # "cali-st-ext" host # container # | # "cali-st-ext" docker # bridge # | # Gateway Host # container container # \ / # default docker # bridge # We are testing two host endpoints including # gw_int connecting gateway with host through internal network. # gw_ext connecting gateway with external server. # # We are testing five access patterns. # Host to external server through gateway. # Host -> gw_int(untracked ingress, preDNAT) -> gw_int(forward ingress) -> # gw_ext(forward egress) -> gw_ext(untracked egress) -> external server. # # Host to workload running on gateway. # Host -> gw_int(untracked ingress, preDNAT) -> gw_int(forward ingress) -> # workload (workload ingress) # # Host to process running on gateway. # Host -> gw_int(untracked ingress, preDNAT) -> gw_int(normal ingress) # # Process running on gateway to external server. # Process -> gw_ext(normal egress) -> gw_ext(untracked egress) # # Workload running on gateway to external server. # Workload (workload egress) -> gw_ext(forward egress) -> gw_ext(untracked egress) # First, create the hosts and the gateway. cls.hosts = [] cls.gateway = DockerHost( "cali-st-gw", additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, post_docker_commands=POST_DOCKER_COMMANDS, start_calico=False) cls.gateway_hostname = cls.gateway.execute("hostname") cls.host = DockerHost( "cali-st-host", additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, post_docker_commands=POST_DOCKER_COMMANDS, start_calico=False) cls.host_hostname = cls.host.execute("hostname") cls.hosts.append(cls.gateway) cls.hosts.append(cls.host) # Delete the nginx container if it still exists. We need to do this # before we try to remove the network. log_and_run("docker rm -f cali-st-ext-nginx || true") # Create the external network. log_and_run("docker network rm cali-st-ext || true") # Use 172.19.0.0 to avoid clash with normal docker subnet and # docker-in-docker subnet log_and_run( "docker network create --driver bridge --subnet 172.19.0.0/16 cali-st-ext" ) # And an nginx server on the external network only. log_and_run("docker run" " --network=cali-st-ext" " -d" " --name=cali-st-ext-nginx" " nginx") for host in cls.hosts: host.start_calico_node() # Run local httpd server on gateway. cls.gateway.execute( "echo '<HTML> Local process </HTML>' > $HOME/index.html && httpd -p 80 -h $HOME" ) # Get the internal IP of the gateway. We do this before we add the second # network since it means we don't have to figure out which IP is which. int_ip = str(cls.gateway.ip) cls.gateway_int_ip = int_ip _log.info("Gateway internal IP: %s", cls.gateway_int_ip) # Add the gateway to the external network. log_and_run("docker network connect cali-st-ext cali-st-gw") # Get the external IP of the gateway. ext_ip = log_and_run( "docker inspect --format " "'{{with index .NetworkSettings.Networks" " \"cali-st-ext\"}}{{.IPAddress}}{{end}}' cali-st-gw") cls.gateway_ext_ip = ext_ip _log.info("Gateway external IP: %s", cls.gateway_ext_ip) # Get the IP of the external server. ext_ip = cls.get_container_ip("cali-st-ext-nginx") cls.ext_server_ip = ext_ip _log.info("External server IP: %s", cls.ext_server_ip) # Configure the internal host to use the gateway for the external IP. cls.host.execute("ip route add %s via %s" % (cls.ext_server_ip, cls.gateway_int_ip)) # Configure the gateway to forward and NAT. cls.gateway.execute("sysctl -w net.ipv4.ip_forward=1") cls.gateway.execute( "iptables -t nat -A POSTROUTING --destination %s -j MASQUERADE" % cls.ext_server_ip) cls.calinet = cls.gateway.create_network("calinet") cls.gateway_workload = cls.gateway.create_workload( "gw-wl", image="workload", network=cls.calinet, labels=["org.projectcalico.label.wep=gateway"]) cls.host_workload = cls.host.create_workload( "host-wl", image="workload", network=cls.calinet, labels=["org.projectcalico.label.wep=host"]) clear_on_failures() add_on_failure(cls.host.log_extra_diags) add_on_failure(cls.gateway.log_extra_diags)
def wipe_etcd(self): wipe_etcd(self.ip)
def setUpClass(cls): wipe_etcd(HOST_IPV4)
def setUpClass(cls): wipe_etcd(HOST_IPV4)