Esempio n. 1
0
 def test_pod_mult_intf_with_kebelet_restart_on_master(self):
     """
         1.verifies multi interface pods can reach to public network when  fabric forwarding is enabled
         2.restart the kubelet service on master
         3.re verify  pods can reach to public network when  fabric forwarding is enabled
     """
     namespace = self.setup_namespace(isolation=True)
     pod1, pod2 = self.common_setup_for_multi_intf(namespace=namespace.name)
     self.verify_rechability_between_multi_intf_pods(pod1, pod2)
     #check the cluster status before kube manager restart
     cluster_status, error_nodes_before = ContrailStatusChecker(
         self.inputs).wait_till_contrail_cluster_stable()
     #Check the default route on both the pods before kube manager restart
     self.verify_default_route(pod1[0])
     self.verify_default_route(pod2[0])
     #check the interface index before kube manager restart
     self.verify_pod_intf_index(pod1)
     self.verify_pod_intf_index(pod2)
     #restart the kubelet service on master
     self.inputs.restart_service(service_name="kubelet",
                                 host_ips=[self.inputs.k8s_master_ip])
     cluster_status, error_nodes_after = ContrailStatusChecker(
         self.inputs).wait_till_contrail_cluster_stable()
     assert cmp(error_nodes_before, error_nodes_after
                ) == 0, "cluster is not stable after kube manager restart"
     self.verify_default_route(pod1[0])
     self.verify_default_route(pod2[0])
     self.verify_pod_intf_index(pod1)
     self.verify_pod_intf_index(pod2)
     self.verify_rechability_between_multi_intf_pods(pod1, pod2)
 def test_pod_with_multi_intf_agent_restart(self):
     '''
     Test ping between 2 PODs created in 2 different namespace
     with multiple network attachment definitions
     Ping should pass in default mode
     Ping should fail when namespace isolation enabled
     Restart contrail-vrouter_aget
     Ping between 2 PODs again
     Ping should pass in default mode
     Ping should fail when namespace isolation enabled
     '''
     pod1, pod2 = self.common_setup_for_multi_intf()
     self.verify_rechability_between_multi_intf_pods(pod1, pod2)
     #check the cluster status before kube manager restart
     cluster_status, error_nodes_before = ContrailStatusChecker(
         self.inputs).wait_till_contrail_cluster_stable()
     #Check the default route on both the pods before kube manager restart
     self.verify_default_route(pod1[0])
     self.verify_default_route(pod2[0])
     #check the interface index before kube manager restart
     self.verify_pod_intf_index(pod1)
     self.verify_pod_intf_index(pod2)
     #restart conrtail vrouter agent
     for compute_ip in self.inputs.compute_ips:
         self.inputs.restart_service('contrail-vrouter-agent', [compute_ip],
                                     container='agent')
     cluster_status, error_nodes_after = ContrailStatusChecker(
         self.inputs).wait_till_contrail_cluster_stable()
     assert cmp(error_nodes_before, error_nodes_after
                ) == 0, "cluster is not stable after kube manager restart"
     self.verify_default_route(pod1[0])
     self.verify_default_route(pod2[0])
     self.verify_pod_intf_index(pod1)
     self.verify_pod_intf_index(pod2)
     self.verify_rechability_between_multi_intf_pods(pod1, pod2)
Esempio n. 3
0
 def test_pod_multi_inf_with_contrail_apiserver_restart(self):
     '''
         Verifies that Kube APIs are correctly recieved and processed by Contrail
         API server post contrail-api restart.
         Steps:
            1. Before creating any k8s object, restart the contrail API server
            2. Create a service with 2 pods running cirros
            3. From the local node, do a wget on the ingress public ip
         Validate that service and its loadbalancing works
     '''
     namespace = self.setup_namespace(isolation=True)
     pod1, pod2 = self.common_setup_for_multi_intf(namespace=namespace.name)
     self.verify_default_route(pod1[0])
     self.verify_default_route(pod2[0])
     self.verify_pod_intf_index(pod1)
     self.verify_pod_intf_index(pod2)
     cluster_status, error_nodes_before = ContrailStatusChecker(
         self.inputs).wait_till_contrail_cluster_stable()
     self.verify_rechability_between_multi_intf_pods(pod1, pod2)
     self.inputs.restart_service("contrail-api",
                                 self.inputs.cfgm_ips,
                                 container="api-server")
     cluster_status, error_nodes_after = ContrailStatusChecker(
         self.inputs).wait_till_contrail_cluster_stable()
     assert cmp(error_nodes_before, error_nodes_after
                ) == 0, "cluster is not stable after kube manager restart"
     self.verify_default_route(pod1[0])
     self.verify_default_route(pod2[0])
     self.verify_pod_intf_index(pod1)
     self.verify_pod_intf_index(pod2)
     self.verify_rechability_between_multi_intf_pods(pod1, pod2)
Esempio n. 4
0
    def test_vcenter_compute_vm_reboot(self):
        '''
        Description:  Rebooting ContrailVM and verify compute services.
        Test steps:
               1. Create two vms and ping between them
               2. reboot contrail-compute VM's
               3. Verify contrail-status
               4. ping between existing vms
               5. Create two more guest VM's
               6. ping between the vm after compute VM reboot
        Pass criteria: Contrail status should be active after reboot and ping between the VM has to work.
        Maintainer : [email protected]
        '''

        vn1_name = get_random_name('vn50')
        vn1_vm1_name = get_random_name('vm1_reboot')
        vn1_vm2_name = get_random_name('vm2_reboot')
        vn1_fixture = self.create_vn(vn1_name, [get_random_cidr()])
        vm1_fixture = self.create_vm(vn_fixture=vn1_fixture,
                                     vm_name=vn1_vm1_name,
                                     image_name='vcenter_tiny_vm')
        vm2_fixture = self.create_vm(vn_fixture=vn1_fixture,
                                     vm_name=vn1_vm2_name,
                                     image_name='vcenter_tiny_vm')
        vm1_fixture.wait_till_vm_is_up()
        vm2_fixture.wait_till_vm_is_up()
        self.vm_host1 = vm1_fixture.vm_obj.host
        self.vm_host2 = vm2_fixture.vm_obj.host
        cluster_status, error_nodes = ContrailStatusChecker(
            self.inputs).wait_till_contrail_cluster_stable()
        assert cluster_status, 'Cluster is not stable...'
        for compute_vm in self.inputs.compute_ips:
            self.inputs.run_cmd_on_server(compute_vm, 'reboot')
        sleep(60)
        for compute_vm in self.inputs.compute_ips:
            self.inputs.run_cmd_on_server(compute_vm, 'ifconfig ens192 up')
        cluster_status, error_nodes = ContrailStatusChecker(
            self.inputs).wait_till_contrail_cluster_stable()
        vm1_fixture.wait_till_vm_is_up()
        vm2_fixture.wait_till_vm_is_up()
        assert vm1_fixture.ping_with_certainty(dst_vm_fixture=vm2_fixture),\
            "Ping from %s to %s failed" % (vn1_vm1_name, vn1_vm2_name)
        assert vm2_fixture.ping_with_certainty(dst_vm_fixture=vm1_fixture),\
            "Ping from %s to %s failed" % (vn1_vm2_name, vn1_vm1_name)

        vn1_vm1_name = get_random_name('vm1_after_reboot')
        vn1_vm2_name = get_random_name('vm2_after_reboot')
        vm1_fixture = self.create_vm(vn_fixture=vn1_fixture,
                                     vm_name=vn1_vm1_name,
                                     image_name='vcenter_tiny_vm')
        vm2_fixture = self.create_vm(vn_fixture=vn1_fixture,
                                     vm_name=vn1_vm2_name,
                                     image_name='vcenter_tiny_vm')
        vm1_fixture.wait_till_vm_is_up()
        vm2_fixture.wait_till_vm_is_up()
        assert vm1_fixture.ping_with_certainty(dst_vm_fixture=vm2_fixture),\
            "Ping from %s to %s failed" % (vn1_vm1_name, vn1_vm2_name)
        assert vm2_fixture.ping_with_certainty(dst_vm_fixture=vm1_fixture),\
            "Ping from %s to %s failed" % (vn1_vm2_name, vn1_vm1_name)
        return True
 def test_pod_with_multi_intf_kube_manager_restart(self):
     """
     :return: None
     Test ping between 2 PODs  created in a given namespace
     Each pod is spawned using multiple interfaces with network attachment definitions
     Ping should pass with corresponding network attachments
     Restart contrail-kube-manager
     Ping between 2 PODs again across management , left and right networks
     contrail components should be stable
     Ping should pass
     """
     pod1, pod2 = self.common_setup_for_multi_intf()
     self.verify_rechability_between_multi_intf_pods(pod1, pod2)
     #check the cluster status before kube manager restart
     cluster_status, error_nodes_before = ContrailStatusChecker(
         self.inputs).wait_till_contrail_cluster_stable()
     #Check the default route on both the pods before kube manager restart
     self.verify_default_route(pod1[0])
     self.verify_default_route(pod2[0])
     #check the interface index before kube manager restart
     self.verify_pod_intf_index(pod1)
     self.verify_pod_intf_index(pod2)
     #Now restart the kube manager contrainer
     self.restart_kube_manager()
     cluster_status, error_nodes_after = ContrailStatusChecker(
         self.inputs).wait_till_contrail_cluster_stable()
     assert cmp(error_nodes_before, error_nodes_after
                ) == 0, "cluster is not stable after kube manager restart"
     #check if the default route is intact with pod default network
     self.verify_default_route(pod1[0])
     self.verify_default_route(pod2[0])
     self.verify_pod_intf_index(pod1)
     self.verify_pod_intf_index(pod2)
     self.verify_rechability_between_multi_intf_pods(pod1, pod2)
Esempio n. 6
0
 def test_ingress_isolation_vrouter_agent_restart(self):
     """
     Test test case verifies ingress operations post restart of vrouter-agent
     Verify:
     1. This test case verifies the connectivity to ingress existing in isolated namespace
     2. Also verifies connectivity of ingress existing in an non isolated namespace from pod in isolated namespace
     Restart vrouter-agent and verify both the points again
     """
     client1, client2, client3 = self.setup_common_namespaces_pods(
         prov_service=True, prov_ingress=True)
     assert self.validate_nginx_lb([client3[0], client3[1]],
                                   client3[5].cluster_ip,
                                   test_pod=client1[2])
     client1[4].disable_service_isolation()
     assert self.validate_nginx_lb([client1[0], client1[1]],
                                   client1[5].external_ips[0])
     for compute_ip in self.inputs.compute_ips:
         self.inputs.restart_service('contrail-vrouter-agent', [compute_ip],
                                     container='agent')
     cluster_status, error_nodes = ContrailStatusChecker(
     ).wait_till_contrail_cluster_stable()
     assert cluster_status, 'Cluster is not stable after restart'
     self.sleep(5)
     assert self.validate_nginx_lb([client3[0], client3[1]],
                                   client3[5].cluster_ip,
                                   test_pod=client1[2])
     assert self.validate_nginx_lb([client1[0], client1[1]],
                                   client1[5].external_ips[0])
    def test_hbs_with_docker_restart_on_kube_master(self):
        pod1, pod2, pod3, pod4, namespace_name = self.run_test(
            vn1_name='myvn',
            vn2_name="myvn2",
            tag_type='tier',
            tag_value='web_kube',
            tag2_value='db_kube',
            tag_obj_name='vmi',
            inter_compute=True)
        self.inputs.restart_service(service_name="docker",
                                    host_ips=[self.inputs.k8s_master_ip])
        time.sleep(200)
        cluster_status, error_nodes = ContrailStatusChecker(
            self.inputs).wait_till_contrail_cluster_stable()
        #assert cluster_status, 'All nodes and services not up. Failure nodes are: %s' % (
        #            error_nodes)

        assert pod1.ping_with_certainty(pod2.pod_ip,
                                        expectation=True,
                                        count='5',
                                        hbf_enabled=True)
        assert pod2.ping_with_certainty(pod1.pod_ip,
                                        expectation=True,
                                        count='5',
                                        hbf_enabled=True)
        assert pod3.ping_with_certainty(pod4.pod_ip,
                                        expectation=False,
                                        count='5',
                                        hbf_enabled=True)
        assert pod4.ping_with_certainty(pod3.pod_ip,
                                        expectation=False,
                                        count='5',
                                        hbf_enabled=True)
Esempio n. 8
0
    def test_hbs_with_contrail_apiserver_restart(self):
        pod1, pod2, pod3, pod4, namespace_name = self.run_test(
            vn1_name='myvn',
            vn2_name="myvn2",
            tag_type='tier',
            tag_value='web_api',
            tag2_value='db_api',
            tag_obj_name='vmi',
            inter_compute=True)
        self.inputs.restart_service("contrail-api",
                                    self.inputs.cfgm_ips,
                                    container="api-server")
        time.sleep(200)
        cluster_status, error_nodes = ContrailStatusChecker(
            self.inputs).wait_till_contrail_cluster_stable()
        # some services go to initializing , seems like setup so removing assert for now
        #assert cluster_status, 'All nodes and services not up. Failure nodes are: %s' % (
        #            error_nodes)

        assert pod1.ping_with_certainty(pod2.pod_ip,
                                        expectation=True,
                                        count='5',
                                        hbf_enabled=True)
        assert pod2.ping_with_certainty(pod1.pod_ip,
                                        expectation=True,
                                        count='5',
                                        hbf_enabled=True)
        assert pod3.ping_with_certainty(pod4.pod_ip,
                                        expectation=False,
                                        count='5',
                                        hbf_enabled=True)
        assert pod4.ping_with_certainty(pod3.pod_ip,
                                        expectation=False,
                                        count='5',
                                        hbf_enabled=True)
Esempio n. 9
0
 def test_vrouter_xconnect(self):
     '''Test vrouter cross connect mode by taking vrouter agent down
     1. get compute node ip
     2. stop vrouter_agent
     3. Try to ssh to compute node from cfgm
     4. Verify Xconnect mode
     5. start vrouter-agent'''
     result = True
     cmd_stop = 'service contrail-vrouter-agent stop'
     cmd_start = 'service contrail-vrouter-agent start'
     verify_Xconnect = "vif --list | grep Flags:X"
     compute_ip = self.inputs.compute_ips[0]
     self.inputs.run_cmd_on_server(compute_ip, cmd_stop, container='agent')
     self.logger.info('Verify Xconnect mode ')
     output = self.inputs.run_cmd_on_server(compute_ip,
                                            issue_cmd=verify_Xconnect)
     if not output:
         result = result and False
     else:
         self.logger.info('Xconnect mode got enabled')
     self.inputs.run_cmd_on_server(compute_ip,
                                   issue_cmd=cmd_start,
                                   container='agent')
     status = ContrailStatusChecker(self.inputs)
     status.wait_till_contrail_cluster_stable([compute_ip])
     assert result, 'Xconnect mode not enabled'
Esempio n. 10
0
    def test_single_node_failover(self):
        '''
        Stop all contrail containers contrail-kube-manager, controller,
        analytics, analyticsdb on one of the HA-enabled controller nodes
        Validate that a new kube-manager from among the other nodes is active
        We should be able to delete and add pods during this time and the pods
        should get their IPs

        '''
        css = ContrailStatusChecker(self.inputs)
        containers = [
            'contrail-kube-manager', 'api-server', 'schema'
            'analyticsdb', 'analytics-api', 'collector'
        ]
        km_h = self.connections.get_kube_manager_h()
        node = km_h.ip
        # Setup pods
        pods = self.setup_my_pods(2)
        self.verify_pods(pods)

        self.stop_containers(node, containers, wait=2)
        self.delete_pods(pods)
        self.verify_pods_are_not_in_k8s(pods)

        # Setup pods again now
        pods = self.setup_my_pods(2)
        assert css.wait_till_contrail_cluster_stable()[0]
        self.verify_pods(pods)
        self.verify_mesh_ping(pods)
        # Delete the pods now and verify cleanup
        self.delete_pods(pods)
        self.verify_pods_are_deleted(pods)
Esempio n. 11
0
    def test_vcenter_vrouter_agent_restart(self):
        ''' Test steps:
               1. Create two guest VM's
               2. ping between the vms
               3. restart agents on Contrail VM'
               4. Verify contrail-status
               5. ping between the vms
        Pass criteria: Contrail status should be active after restart  and ping between the VM has to work.'''

        vn1_name = get_random_name('vn50')
        vn1_vm1_name = get_random_name('vm1')
        vn1_vm2_name = get_random_name('vm2')
        vn1_fixture = self.create_vn(vn1_name, [get_random_cidr()])
        vm1_fixture = self.create_vm(vn_fixture=vn1_fixture, vm_name=vn1_vm1_name, image_name='vcenter_tiny_vm')
        vm2_fixture = self.create_vm(vn_fixture=vn1_fixture, vm_name=vn1_vm2_name, image_name='vcenter_tiny_vm')
        vm1_fixture.wait_till_vm_is_up()
        vm2_fixture.wait_till_vm_is_up()
        assert vm1_fixture.ping_with_certainty(dst_vm_fixture=vm2_fixture),\
            "Ping from %s to %s failed" % (vn1_vm1_name, vn1_vm2_name)
        assert vm2_fixture.ping_with_certainty(dst_vm_fixture=vm1_fixture),\
            "Ping from %s to %s failed" % (vn1_vm2_name, vn1_vm1_name)
        contrail_vms = self.inputs.compute_ips
        self.inputs.restart_service('contrail-vrouter-agent', contrail_vms,
                                 container='agent',
                                 verify_service=True)
        cluster_status, error_nodes = ContrailStatusChecker(self.inputs).wait_till_contrail_cluster_stable()
        assert cluster_status, 'Cluster is not stable after restart...'
        assert vm1_fixture.ping_with_certainty(dst_vm_fixture=vm2_fixture),\
            "Ping from %s to %s failed" % (vn1_vm1_name, vn1_vm2_name)
        assert vm2_fixture.ping_with_certainty(dst_vm_fixture=vm1_fixture),\
            "Ping from %s to %s failed" % (vn1_vm2_name, vn1_vm1_name)
Esempio n. 12
0
 def test_pod_with_node_reboot_compute(self):
     '''
     Verify setup of 2 PODs created in 2 different namespace
     Test ping between pods
     Ping should pass
     Reboot Compute
     Re-verify setup of 2 PODs across 2 different namespace
     Re-verify test ping between pods
     Ping should pass
     '''
     namespace1 = self.setup_namespace()
     pod1 = self.setup_busybox_pod(namespace=namespace1.name)
     assert pod1.verify_on_setup()
     namespace2 = self.setup_namespace()
     pod2 = self.setup_busybox_pod(namespace=namespace2.name)
     assert pod2.verify_on_setup()
     assert pod1.ping_to_ip(pod2.pod_ip, expectation=True)
     compute_node = pod1.nodename
     # Reboot the node
     self.inputs.reboot(compute_node)
     #time.sleep(70)
     # Verify after reboot
     status, svcs = ContrailStatusChecker(
         self.inputs).wait_till_contrail_cluster_stable(compute_node,
                                                        refresh=True)
     assert pod1.verify_on_setup()
     assert pod2.verify_on_setup()
     assert pod1.ping_with_certainty(pod2.pod_ip, expectation=True)
Esempio n. 13
0
    def add_remove_server(self, operation, server_ip, section, option,
                           client_role, client_process, container_name, index = 0):
        ''' This function add or remove an entry from list of servers 
        configured in .conf file of the client.
        It reads .conf file to get the list.
        It then searches if entry already exist or not and do the operation
        Values to argument "client_process" can be any process which we
                see under contrail-status output eg "contrail-control"
        Values to argument "operation" can be:
                "add" or "remove"
        Values to argument "client_role" can be:
                "agent", "control", "config", "analytics" and "database"
        '''
        config_file = 'entrypoint.sh'
        client_conf_file = client_process + ".conf"
        cmd_set = "openstack-config --get /etc/contrail/" + client_conf_file
        cmd = cmd_set + " " + section + " " + option

        if client_role == "agent":
            for ip in self.inputs.compute_ips:
                server_list = self.get_new_server_list(operation, ip,
                                                       cmd, server_ip, index,
                                                       container = "agent")
                self.configure_server_list(ip, client_process,
                        section, option, server_list, config_file, container_name, container = "agent")
        elif client_role in ["control", "dns"]:
            for ip in self.inputs.bgp_ips:
                server_list = self.get_new_server_list(operation, ip,
                                                       cmd, server_ip, index,
                                                       container = client_role)
                self.configure_server_list(ip, client_process,
                        section, option, server_list, config_file, container_name, container = client_role)
        elif client_role == "config":
            for ip in self.inputs.cfgm_ips:
                server_list = self.get_new_server_list(operation, ip,
                                                       cmd, server_ip, index,
                                                       container = 'config-nodemgr')
                self.configure_server_list(ip, client_process,
                        section, option, server_list, config_file, container_name, container = "config-nodemgr")
        elif client_role == "analytics":
            for ip in self.inputs.collector_ips:
                server_list = self.get_new_server_list(operation, ip,
                                                       cmd, server_ip, index,
                                                       container = "analytics-api")
                self.configure_server_list(ip, client_process,
                        section, option, server_list, config_file, container_name, container = "analytics-api")
        elif client_role == "database":
            for ip in self.inputs.database_ips:
                server_list = self.get_new_server_list(operation, ip,
                                                       cmd, server_ip, index,
                                                       container = "analytics-cassandra")
                self.configure_server_list(ip, client_process,
                        section, option, server_list, config_file, container_name, container = "analytics-cassandra")
        status_checker = ContrailStatusChecker(self.inputs)
        result = status_checker.wait_till_contrail_cluster_stable()[0]
        if result == False:
            assert result, "Contrail cluster not up after add/remove of entry"
 def reload_vrouter(self, wait=True):
     '''Reload vrouter module without restarting the compute node
     '''
     self.logger.info('Reloading vrouter module on %s' % (self.ip))
     self.execute_cmd('service supervisor-vrouter stop; '
                      'modprobe -r vrouter || rmmod vrouter; '
                      'service supervisor-vrouter start')
     if wait:
         status = ContrailStatusChecker(self.inputs)
         status.wait_till_contrail_cluster_stable([self.ip])
Esempio n. 15
0
    def test_deployment_with_agent_restart(self):
        ''' Create a deployment object with 3 pod replicas and Verify http service works across the pod replicas
            Verify deletion of the deployment object cleans up all the pods which it had created 
            Restart vrouter agent on all the nodes and verify redeploying the deployment object with pod replicas take into effect 
            Re-verify the deployment passes and pods work as expected using http service with new set of replicas
        '''
        client_pod = self.setup_busybox_pod()
        namespace = 'default'
        labels = {'deployment': 'test'}
        dep = self.setup_nginx_deployment(name='dep-test',
                                          replicas=3,
                                          pod_labels=labels)
        assert dep.verify_on_setup()
        service = self.setup_http_service(namespace=namespace, labels=labels)
        server_pods = dep.get_pods_list()
        s_pod_fixtures = []
        for x in server_pods:
            s_pod_fixture = self.setup_nginx_pod(name=x.metadata.name)
            self.verify_nginx_pod(s_pod_fixture)
            s_pod_fixtures.append(s_pod_fixture)
        assert self.validate_nginx_lb(s_pod_fixtures,
                                      service.cluster_ip,
                                      test_pod=client_pod)
        #import pdb;pdb.set_trace()
        for compute_ip in self.inputs.compute_ips:
            self.inputs.restart_service('contrail-vrouter-agent', [compute_ip],
                                        container='agent')

        cluster_status, error_nodes = ContrailStatusChecker(
        ).wait_till_contrail_cluster_stable()
        assert cluster_status, 'Cluster is not stable after restart'
        self.sleep(5)
        assert self.validate_nginx_lb(s_pod_fixtures,
                                      service.cluster_ip,
                                      test_pod=client_pod)
        self.perform_cleanup(dep)
        self.sleep(1)
        '''After restart of the vrouter agent recreate the deployment obect 
           With additional pod replicas'''
        dep = self.setup_nginx_deployment(name='dep-test',
                                          replicas=5,
                                          pod_labels=labels)

        assert dep.verify_on_setup()
        service = self.setup_http_service(namespace=namespace, labels=labels)
        server_pods = dep.get_pods_list()
        s_pod_fixtures = []
        for x in server_pods:
            s_pod_fixture = self.setup_nginx_pod(name=x.metadata.name)
            self.verify_nginx_pod(s_pod_fixture)
            s_pod_fixtures.append(s_pod_fixture)
        assert self.validate_nginx_lb(s_pod_fixtures,
                                      service.cluster_ip,
                                      test_pod=client_pod)
Esempio n. 16
0
    def test_vrouter_kernal_module_unload_reload(self):
        '''
        1. create Vn and two vms
        2. Verify ping between vms ping should pass
        3. Stop Vrouter services and Unload Vrouter Kernal Module and verify status
        4. Reload Vrouter Module and start Vrouter Services
        5. Verify ping between vms ping should pass
        '''
        compute_ip = self.inputs.compute_ips[0]
        compute_control_ip = self.inputs.compute_control_ips[0]
        if compute_ip == compute_control_ip:
            raise self.skipTest('Skipping Test. Need multi_interface testbed')
        result = True
        cmd_vr_unload = 'modprobe -r vrouter'
        cmd_vr_reload = 'modprobe -a vrouter'
        vn1_fixture = self.create_vn()
        vm1_fixture = self.create_vm(vn1_fixture, image_name='cirros')
        vm2_fixture = self.create_vm(vn1_fixture, image_name='cirros')
        vm1_fixture.wait_till_vm_is_up()
        vm2_fixture.wait_till_vm_is_up()
        compute_ip = vm1_fixture.vm_node_ip
        assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip)
        self.inputs.stop_service('supervisor-vrouter',
                                 host_ips=[compute_ip],
                                 container='agent')
        self.inputs.run_cmd_on_server(compute_ip, issue_cmd=cmd_vr_unload)
        status = self.inputs.run_cmd_on_server(
            compute_ip, issue_cmd='lsmod | grep vrouter')
        if status:
            result = result and False
            self.logger.info('Vrouter kernel module failed to unload')
        else:
            self.logger.info('Vrouter kernel module unloaded successfully')
        self.logger.info('Reloading vrouter kernel module')
        self.inputs.run_cmd_on_server(compute_ip, issue_cmd=cmd_vr_reload)
        status = self.inputs.run_cmd_on_server(
            compute_ip, issue_cmd='lsmod | grep vrouter')
        if not status:
            result = result and False
            self.logger.error('Vrouter kernel module failed to reload')
        else:
            self.logger.info('Vrouter kernel module reloaded successfully')
        self.inputs.start_service('supervisor-vrouter',
                                  host_ips=[compute_ip],
                                  container='agent')
        status = ContrailStatusChecker(self.inputs)
        status.wait_till_contrail_cluster_stable()
        assert result, 'Vrouter kernel module failed to unload and reload'

        #Get the latest metadata ip of the instance after vrouter reload
        vm1_fixture.get_local_ip(refresh=True)
        assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip)
Esempio n. 17
0
 def test_pod_multi_intf_with_kube_apiserver_restart(self):
     '''
        Create an isolated namespace
        Spwan the multi interface pods
        Verify the multi interfaces are associated with pods
        Verify the rechability acoss the pods
        Verify teh indexes of the interfaces
        Verify the default route thgouh etho
        perform the kube-apis service restrt
        once the cluster stabilizes
        Verify the pods rechabilyt and their indexes with defult routes
     '''
     namespace = self.setup_namespace(isolation=True)
     pod1, pod2 = self.common_setup_for_multi_intf(namespace=namespace.name)
     self.verify_rechability_between_multi_intf_pods(pod1, pod2)
     #check the cluster status before kube manager restart
     cluster_status, error_nodes_before = ContrailStatusChecker(
         self.inputs).wait_till_contrail_cluster_stable()
     #Check the default route on both the pods before kube manager restart
     self.verify_default_route(pod1[0])
     self.verify_default_route(pod2[0])
     #check the interface index before kube manager restart
     self.verify_pod_intf_index(pod1)
     self.verify_pod_intf_index(pod2)
     #restart the contrail kube api service on the masters
     self.inputs.restart_service("kube-apiserver",
                                 [self.inputs.k8s_master_ip],
                                 container="kube-apiserver",
                                 verify_service=False)
     #Wait till the cluster stabilizes
     cluster_status, error_nodes_after = ContrailStatusChecker(
         self.inputs).wait_till_contrail_cluster_stable()
     assert cmp(error_nodes_before, error_nodes_after
                ) == 0, "cluster is not stable after kube manager restart"
     self.verify_default_route(pod1[0])
     self.verify_default_route(pod2[0])
     self.verify_pod_intf_index(pod1)
     self.verify_pod_intf_index(pod2)
     self.verify_rechability_between_multi_intf_pods(pod1, pod2)
Esempio n. 18
0
 def test_pod_with_multi_intf_defualt_route(self):
     """
        :return: None
        Test ping between 2 PODs  created in a given namespace
        Each pod is spawned using multiple interfaces with network attachment definitions
        Ping should pass with corresponding network attachments
        Verify the default route is installe via eth0 and with defualt pod network
     """
     pod1, pod2 = self.common_setup_for_multi_intf()
     cluster_status, error_nodes = ContrailStatusChecker(
         self.inputs).wait_till_contrail_cluster_stable()
     self.verify_rechability_between_multi_intf_pods(pod1, pod2)
     self.verify_default_route(pod1[0])
Esempio n. 19
0
 def start_containers(self, node_ip, containers):
     '''
     containers: list of container names to be started
     node_ip: Node on which containers need to be started
     '''
     self.remove_from_cleanups(self.start_containers,
         (self, node_ip, containers))
     for container in containers:
         self.logger.info('Starting container %s on %s' %(container, node_ip))
         self.inputs.start_container(node_ip, container)
     self.sleep(60)
     assert ContrailStatusChecker().wait_till_contrail_cluster_stable(
         nodes=[node_ip])[0]
 def reload_vrouter(self, wait=True):
     '''Reload vrouter module without restarting the compute node
     '''
     self.logger.info('Reloading vrouter module on %s' % (self.ip))
     if self.inputs.host_data[self.ip].get('containers', {}).get('agent'):
         stop_cmd = 'docker exec -it agent service supervisor-vrouter stop'
         start_cmd = 'docker exec -it agent service supervisor-vrouter start'
     else:
         stop_cmd = 'service supervisor-vrouter stop'
         start_cmd = 'service supervisor-vrouter start'
     self.execute_cmd('%s; '
         'modprobe -r vrouter || rmmod vrouter; '
         '%s ' % (stop_cmd, start_cmd), container=None)
     if wait:
         status = ContrailStatusChecker(self.inputs)
         status.wait_till_contrail_cluster_stable([self.ip])
Esempio n. 21
0
    def test_ipam_persistence_across_restart_reboots(self):
        '''
        Description: Test to validate IPAM persistence across restarts and reboots of nodes.
        Test steps:
                   1. Create a IPAM.
                   2. Create a VN and launch VMs in it.
                   3. Restart the contrail-vrouter-agent and contrail-control services.
        Pass criteria: The VMs should be back to ACTIVE state and the ping between them should PASS.
        Maintainer : [email protected]
        '''
        ipam_obj=self.useFixture( IPAMFixture(connections= self.connections, name = get_random_name('my-ipam')))
        assert ipam_obj.verify_on_setup()

        ts = time.time()
        vn_name = get_random_name('vn')
        vn_fixture=self.useFixture( VNFixture(project_name= self.project.project_name, connections= self.connections,
                                 vn_name= vn_name, inputs= self.inputs, subnets=['22.1.1.0/24'], ipam_fq_name = ipam_obj.fq_name))
        assert vn_fixture.verify_on_setup()

        vm1_fixture = self.useFixture(VMFixture(connections=self.connections,project_name = self.inputs.project_name,
                                                vn_obj=vn_fixture.obj, vm_name = get_random_name('vm1')))
        vm2_fixture = self.useFixture(VMFixture(connections=self.connections,project_name = self.inputs.project_name,
                                                vn_obj=vn_fixture.obj, vm_name = get_random_name('vm2')))

        assert vm1_fixture.verify_on_setup()
        assert vm2_fixture.verify_on_setup()

        self.nova_h.wait_till_vm_is_up( vm1_fixture.vm_obj )
        self.nova_h.wait_till_vm_is_up( vm2_fixture.vm_obj )
        assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip)
        self.logger.info('Will restart the services now')
        for compute_ip in self.inputs.compute_ips:
            pass
            self.inputs.restart_service('contrail-vrouter-agent',[compute_ip])
        for bgp_ip in self.inputs.bgp_ips:
            self.inputs.restart_service('contrail-control',[bgp_ip])
            pass

        cluster_status, error_nodes = ContrailStatusChecker().wait_till_contrail_cluster_stable()
        assert cluster_status, 'Cluster is not stable after restart'
        self.logger.info('Will check if the ipam persists and ping b/w VMs is still successful')
        assert ipam_obj.verify_on_setup()
        msg = 'VM verification failed after process restarts'
        assert vm1_fixture.verify_on_setup(), msg
        assert vm2_fixture.verify_on_setup(), msg
        assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip)
        return True
Esempio n. 22
0
 def test_snat_with_docker_restart_on_master(self):
     """
         1.verifies pods can reach to public network when snat is enabled
         2.restart the docker service on master
         3.re verify  pods can reach to public network when snat is enabled
     """
     client1, client2, client3, client4 = self.setup_common_namespaces_pods(
         isolation=True, ip_fabric_snat=True, ip_fabric_forwarding=True)
     self.verify_ping_between_pods_across_namespaces_and_public_network(
         client1, client2, client3, client4)
     self.inputs.restart_service(service_name="docker",
                                 host_ips=[self.inputs.k8s_master_ip])
     time.sleep(30)
     cluster_status, error_nodes = ContrailStatusChecker(
         self.inputs).wait_till_contrail_cluster_stable()
     assert cluster_status, 'All nodes and services not up. Failure nodes are: %s' % (
         error_nodes)
     self.verify_ping_between_pods_across_namespaces_and_public_network(
         client1, client2, client3, client4)
Esempio n. 23
0
    def test_km_active_backup(self):
        '''
        Create a pod A
        Restart an active km, check one of the others becomes active
        Create another pod B. Check B can reach A
        Restart the active km, check one of the others becomes active
        Create another pod C. Check C can reach A, B
        Restart the active km again, check one of the others becomes active
        Create another pod D. Check D can reach A, B, C
        '''
        css = ContrailStatusChecker(self.inputs)
        pod1 = self.setup_busybox_pod()
        assert pod1.wait_till_pod_is_up()

        (active_km, backup_kms) = self.get_active_backup_kms(refresh=True)
        self.restart_kube_manager([active_km])
        css.wait_till_contrail_cluster_stable(nodes=[active_km])
        (active_km_1, backup_kms_1) = self.get_active_backup_kms(refresh=True)
        assert active_km_1 in backup_kms, 'New KM was not chosen as active'
        pod2 = self.setup_busybox_pod()
        assert pod2.wait_till_pod_is_up()
        assert self.verify_reachability(pod2, [pod1])

        self.restart_kube_manager([active_km_1])
        css.wait_till_contrail_cluster_stable(nodes=[active_km])
        (active_km_2, backup_kms_2) = self.get_active_backup_kms(refresh=True)
        assert active_km_2 in backup_kms_1, 'New KM was not chosen as active'
        pod3 = self.setup_busybox_pod()
        assert pod3.wait_till_pod_is_up()
        assert self.verify_reachability(pod3, [pod1, pod2])

        self.restart_kube_manager([active_km_2])
        css.wait_till_contrail_cluster_stable(nodes=[active_km])
        (active_km_3, backup_kms_3) = self.get_active_backup_kms(refresh=True)
        assert active_km_3 in backup_kms_2, 'New KM was not chosen as active'
        pod4 = self.setup_busybox_pod()
        assert pod4.wait_till_pod_is_up()
        assert self.verify_reachability(pod4, [pod1, pod2, pod3])
Esempio n. 24
0
    def test_process_restart_in_policy_between_vns(self):
        ''' Test to validate that with policy having rule to check icmp fwding between VMs on different VNs , ping between VMs should pass
        with process restarts
            1. Pick 2 VN's from resource pool which has one VM each
            2. Create policy with icmp allow rule between those VN's and bind it networks
            3. Ping from one VM to another VM
            4. Restart process 'vrouter' and 'control' on setup
            5. Ping again between VM's after process restart
        Pass criteria: Step 2,3,4 and 5 should pass
        '''
        vn1_name = get_random_name('vn1')
        vn1_subnets = ["192.168.1.0/24"]
        vn2_name = get_random_name('vn2')
        vn2_subnets = ["192.168.2.0/24"]
        policy1_name = 'policy1'
        policy2_name = 'policy2'
        rules = [
            {
                'direction': '<>',
                'simple_action': 'pass',
                'protocol': 'icmp',
                'source_network': vn1_name,
                'dest_network': vn2_name,
            },
        ]
        rev_rules = [
            {
                'direction': '<>',
                'simple_action': 'pass',
                'protocol': 'icmp',
                'source_network': vn2_name,
                'dest_network': vn1_name,
            },
        ]
        policy1_fixture = self.useFixture(
            PolicyFixture(policy_name=policy1_name,
                          rules_list=rules,
                          inputs=self.inputs,
                          connections=self.connections))
        policy2_fixture = self.useFixture(
            PolicyFixture(policy_name=policy2_name,
                          rules_list=rev_rules,
                          inputs=self.inputs,
                          connections=self.connections))
        vn1_fixture = self.create_vn(vn1_name, vn1_subnets, option='contrail')
        assert vn1_fixture.verify_on_setup()
        vn1_fixture.bind_policies([policy1_fixture.policy_fq_name],
                                  vn1_fixture.vn_id)
        self.addCleanup(vn1_fixture.unbind_policies, vn1_fixture.vn_id,
                        [policy1_fixture.policy_fq_name])
        vn2_fixture = self.create_vn(vn2_name, vn2_subnets, option='contrail')
        assert vn2_fixture.verify_on_setup()
        vn2_fixture.bind_policies([policy2_fixture.policy_fq_name],
                                  vn2_fixture.vn_id)
        self.addCleanup(vn2_fixture.unbind_policies, vn2_fixture.vn_id,
                        [policy2_fixture.policy_fq_name])
        vn1_vm1_name = get_random_name('vn1_vm1')
        vn2_vm1_name = get_random_name('vn2_vm1')
        vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name)
        vm2_fixture = self.create_vm(vn2_fixture, vn2_vm1_name)
        assert vm1_fixture.wait_till_vm_is_up()
        assert vm2_fixture.wait_till_vm_is_up()
        assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip)

        for compute_ip in self.inputs.compute_ips:
            self.inputs.restart_service('contrail-vrouter-agent', [compute_ip],
                                        container='agent')
        for bgp_ip in self.inputs.bgp_ips:
            self.inputs.restart_service('contrail-control', [bgp_ip],
                                        container='controller')
        for cfgm_ip in self.inputs.cfgm_ips:
            self.inputs.restart_service('contrail-api', [cfgm_ip],
                                        container='controller')

        # Wait for cluster to be stable
        cs_obj = ContrailStatusChecker(self.inputs)
        clusterstatus, error_nodes = cs_obj.wait_till_contrail_cluster_stable()
        assert clusterstatus, ('Hash of error nodes and services : %s' %
                               (error_nodes))

        assert self.verification_after_process_restart_in_policy_between_vns()
        for cfgm_name in self.inputs.cfgm_names:
            assert self.analytics_obj.verify_cfgm_uve_module_state\
                        (self.inputs.collector_names[0],
                        cfgm_name,'contrail-api')

        vn1_vm2_name = get_random_name('vn1_vm2')
        vn2_vm2_name = get_random_name('vn2_vm2')

        vn3_name = get_random_name('vn3')
        vn3_subnets = ["192.168.4.0/24"]
        vn3_fixture = self.create_vn(vn3_name, vn3_subnets, option='contrail')
        assert vn1_fixture.verify_on_setup()

        vm3_fixture = self.create_vm(vn1_fixture, vn1_vm2_name)
        assert vm3_fixture.verify_on_setup()
        vm4_fixture = self.create_vm(vn2_fixture, vn2_vm2_name)
        assert vm4_fixture.verify_on_setup()
        vm3_fixture.wait_till_vm_is_up()
        vm4_fixture.wait_till_vm_is_up()
        assert vm3_fixture.ping_with_certainty(vm4_fixture.vm_ip)
Esempio n. 25
0
    def test_vdns_with_diff_zone(self):
        ''' Test vdns in different zones with multi projects '''
        var_obj = self.InitForZoneTests()
        vdns_fixt1 = {}
        ipam_mgmt_obj = {}
        for project in var_obj.project_list:
            dns_server_name = var_obj.proj_vdns[project]
            self.logger.info(
                'Creating vdns server:%s in project:%s',
                dns_server_name,
                project)
            domain_name = '%s.net' % (project)
            ttl = 100
            # VDNS creation
            dns_data = VirtualDnsType(
                domain_name=domain_name, dynamic_records_from_client=True,
                default_ttl_seconds=ttl, record_order='random')
            vdns_fixt1[project] = self.useFixture(
                VdnsFixture(
                    self.inputs,
                    self.connections,
                    vdns_name=dns_server_name,
                    dns_data=dns_data))
            result, msg = vdns_fixt1[project].verify_on_setup()
            self.assertTrue(result, msg)
            dns_server = IpamDnsAddressType(
                virtual_dns_server_name=vdns_fixt1[project].vdns_fq_name)
            ipam_mgmt_obj[project] = IpamType(
                ipam_dns_method='virtual-dns-server',
                ipam_dns_server=dns_server)
        ipam_fixt = {}
        vn_fixt = {}
        vm_fix = {}
        pol_fixt = {}
        for proj in var_obj.project_list:
            # User creation
            user_fixture = self.useFixture(
                UserFixture(
                    connections=self.admin_connections,
                    username=var_obj.proj_user[proj],
                    password=var_obj.proj_pass[proj]))
            # Project creation
            project_fixture = self.useFixture(
                ProjectFixture(
                    project_name=proj,
                    username=var_obj.proj_user[proj],
                    password=var_obj.proj_pass[proj],
                    connections=self.admin_connections))
            user_fixture.add_user_to_tenant(proj, var_obj.proj_user[proj], 'admin')
            project_fixture.set_user_creds(var_obj.proj_user[proj], var_obj.proj_pass[proj])
            project_inputs = ContrailTestInit(
                    self.ini_file,
                    stack_user=project_fixture.project_username,
                    stack_password=project_fixture.project_user_password,
                    stack_tenant=proj,
                    logger=self.logger)
            project_connections = ContrailConnections(project_inputs,
                                                      logger=self.logger)
            self.logger.info(
                'Default SG to be edited for allow all on project: %s' % proj)
            project_fixture.set_sec_group_for_allow_all(proj, 'default')
            # Ipam creation
            ipam_fixt[proj] = self.useFixture(IPAMFixture(var_obj.ipam_list[proj], vdns_obj= vdns_fixt1[proj].obj,
                        project_obj=project_fixture, ipamtype=ipam_mgmt_obj[proj]))
            # VN Creation
            vn_fixt[proj] = self.useFixture(
                VNFixture(
                    project_name=proj,
                    connections=project_connections,
                    vn_name=var_obj.vn_list[proj],
                    inputs=project_inputs,
                    subnets=var_obj.vn_nets[proj],
                    ipam_fq_name=ipam_fixt[proj].getObj().get_fq_name()))
            vn_quantum_obj = self.orch.get_vn_obj_if_present(vn_name=var_obj.vn_list[proj], project_id=project_fixture.uuid)
            # VM creation
            vm_fix[proj] = self.useFixture(
                VMFixture(
                    project_name=proj,
                    connections=project_connections,
                    vn_obj=vn_quantum_obj,
                    vm_name=var_obj.vm_list[proj]))
            vm_fix[proj].verify_vm_launched()
            vm_fix[proj].verify_on_setup()
            vm_fix[proj].wait_till_vm_is_up()
            msg = "Ping by using name %s is failed. Dns server \
                  should resolve VM name to IP" % (var_obj.vm_list[proj])
            self.assertTrue(
                vm_fix[proj].ping_with_certainty(ip=var_obj.vm_list[proj]), msg)
            vm_ip = vm_fix[proj].get_vm_ip_from_vm(
                vn_fq_name=vm_fix[proj].vn_fq_name)
            vm_rev_ip = vm_ip.split('.')
            vm_rev_ip = '.'.join(
                (vm_rev_ip[3], vm_rev_ip[2], vm_rev_ip[1], vm_rev_ip[0]))
            vm_rev_ip = vm_rev_ip + '.in-addr.arpa'
            rev_zone = var_obj.vn_nets[proj][0].split('/')[0].split('.')
            rev_zone = '.'.join((rev_zone[0], rev_zone[1], rev_zone[2]))
            rev_zone = rev_zone + '.in-addr.arpa'
            # Frame the Expected DNS data for VM, one for 'A' record and
            # another 'PTR' record.
            domain_name = '%s.net' % (proj)
            rec_name = var_obj.vm_list[proj] + "." + domain_name
            agent_inspect_h = self.agent_inspect[vm_fix[proj].vm_node_ip]
            assigned_dns_ips = agent_inspect_h.get_vna_discovered_dns_server()
            vm_dns_exp_data = [{'rec_data': vm_ip,
                                'rec_type': 'A',
                                'rec_class': 'IN',
                                'rec_ttl': str(ttl),
                                'rec_name': rec_name,
                                'installed': 'yes',
                                'zone': domain_name},
                               {'rec_data': rec_name,
                                'rec_type': 'PTR',
                                'rec_class': 'IN',
                                'rec_ttl': str(ttl),
                                'rec_name': vm_rev_ip,
                                'installed': 'yes',
                                'zone': rev_zone}]
            self.verify_vm_dns_data(vm_dns_exp_data, assigned_dns_ips[0])
            vm_dns_exp_data = []
        self.logger.info(
            'Restart supervisor-config & supervisor-control and test ping')
        for bgp_ip in self.inputs.bgp_ips:
            self.inputs.restart_service('supervisor-control', [bgp_ip],
										container='controller')
        for cfgm_ip in self.inputs.cfgm_ips:
            self.inputs.restart_service('supervisor-config', [cfgm_ip],
										container='controller')
        status_checker = ContrailStatusChecker(self.inputs)
        self.logger.debug("Waiting for all the services to be UP")
        assert status_checker.wait_till_contrail_cluster_stable()[0],\
                "All services could not come UP after restart"
        for proj in var_obj.project_list:
            msg = "Ping by using name %s is failed. Dns server \
                  should resolve VM name to IP" % (var_obj.vm_list[proj])
            self.assertTrue(
                vm_fix[proj].ping_with_certainty(ip=var_obj.vm_list[proj]), msg)
        return True
Esempio n. 26
0
    def test_ecmp_svc_in_network_with_3_instance_service_restarts(self):
        """
        Description: Validate ECMP after restarting control and vrouter services with service chainin
        g in-network mode datapath having service instance
        Test steps:
                   1.Creating vm's - vm1 and vm2 in networks vn1 and vn2.
                   2.Creating a service instance in in-network mode with 3 instances.
                   3.Creating a service chain by applying the service instance as a service in a po
        licy between the VNs.
                   4.Checking for ping and traffic between vm1 and vm2.
        Pass criteria: Ping between the VMs should be successful and TCP traffic should reach vm2 fr
        om vm1 and vice-versa even after the restarts.
        Maintainer : [email protected]
        """
        ret_dict = self.verify_svc_chain(max_inst=3,
                                         service_mode='in-network',
                                         create_svms=True,
                                         **self.common_args)
        si_fixture = ret_dict['si_fixture']
        svm_ids = si_fixture.svm_ids
        self.get_rt_info_tap_intf_list(self.left_vn_fixture,
                                       self.left_vm_fixture,
                                       self.right_vm_fixture, svm_ids,
                                       si_fixture)
        dst_vm_list = [self.right_vm_fixture]
        self.verify_traffic_flow(self.left_vm_fixture, dst_vm_list, si_fixture,
                                 self.left_vn_fixture)
        for compute_ip in self.inputs.compute_ips:
            self.inputs.restart_service('contrail-vrouter', [compute_ip],
                                        container='agent')

        # Wait for service stability
        cs_checker = ContrailStatusChecker(self.inputs)
        cluster_status, error_nodes = cs_checker.wait_till_contrail_cluster_stable(
            self.inputs.compute_ips)
        assert cluster_status, 'Hash of error nodes and services : %s' % (
            error_nodes)

        self.left_vm_fixture.wait_till_vm_is_up(refresh=True)
        self.right_vm_fixture.wait_till_vm_is_up(refresh=True)

        self.get_rt_info_tap_intf_list(self.left_vn_fixture,
                                       self.left_vm_fixture,
                                       self.right_vm_fixture, svm_ids,
                                       si_fixture)
        fab_connections.clear()
        self.verify_traffic_flow(self.left_vm_fixture, dst_vm_list, si_fixture,
                                 self.left_vn_fixture)
        for bgp_ip in self.inputs.bgp_ips:
            self.inputs.restart_service('contrail-control', [bgp_ip],
                                        container='control')

        cluster_status, error_nodes = cs_checker.wait_till_contrail_cluster_stable(
            self.inputs.bgp_ips)
        assert cluster_status, 'Hash of error nodes and services : %s' % (
            error_nodes)

        self.get_rt_info_tap_intf_list(self.left_vn_fixture,
                                       self.left_vm_fixture,
                                       self.right_vm_fixture, svm_ids,
                                       si_fixture)

        self.verify_traffic_flow(self.left_vm_fixture, dst_vm_list, si_fixture,
                                 self.left_vn_fixture)
Esempio n. 27
0
    def test_ingress_fanout_with_vrouter_agent_restart(self):
        '''Creating a fanout ingress with 2 different host having 2 different path along with a default backend
           This host are supported by repective service.  Service has required backend pod with required path
           mentioned in ingress rule.  From the local node, do a wget on the ingress public ip
           Validate that service and its loadbalancing works.
           Restart the Kube manager
           Re verify the loadbalancing works after the kubemanager restart
        '''

        app1 = 'http_test1'
        app2 = 'http_test2'
        labels1 = {'app': app1}
        labels2 = {'app': app2}
        service_name1 = 's1'
        service_name2 = 's2'
        path1 = 'foo'
        path2 = 'bar'
        host1 = 'foo.bar.com'
        host2 = 'bar.foo.com'
        ingress_name = 'testingress'
        namespace = self.setup_namespace(name='default')
        assert namespace.verify_on_setup()

        service1 = self.setup_http_service(namespace=namespace.name,
                                           labels=labels1,
                                           name=service_name1)

        service2 = self.setup_http_service(namespace=namespace.name,
                                           labels=labels2,
                                           name=service_name2)

        pod1 = self.setup_nginx_pod(namespace=namespace.name, labels=labels1)
        pod2 = self.setup_nginx_pod(namespace=namespace.name, labels=labels1)
        pod3 = self.setup_nginx_pod(namespace=namespace.name, labels=labels2)
        pod4 = self.setup_nginx_pod(namespace=namespace.name, labels=labels2)

        rules = [{
            'host': host1,
            'http': {
                'paths': [{
                    'path': '/' + path1,
                    'backend': {
                        'service_name': service_name1,
                        'service_port': 80
                    }
                }]
            }
        }, {
            'host': host2,
            'http': {
                'paths': [{
                    'path': '/' + path2,
                    'backend': {
                        'service_name': service_name2,
                        'service_port': 80
                    }
                }]
            }
        }]
        default_backend = {'service_name': service_name1, 'service_port': 80}

        ingress = self.setup_ingress(name=ingress_name,
                                     namespace=namespace.name,
                                     rules=rules,
                                     default_backend=default_backend)
        assert ingress.verify_on_setup()

        pod5 = self.setup_busybox_pod(namespace=namespace.name)
        self.verify_nginx_pod(pod1, path=path1)
        self.verify_nginx_pod(pod2, path=path1)
        self.verify_nginx_pod(pod3, path=path2)
        self.verify_nginx_pod(pod4, path=path2)

        assert pod5.verify_on_setup()

        # Now validate ingress from within the cluster network
        assert self.validate_nginx_lb([pod1, pod2],
                                      ingress.cluster_ip,
                                      test_pod=pod5,
                                      path=path1,
                                      host=host1)
        assert self.validate_nginx_lb([pod3, pod4],
                                      ingress.cluster_ip,
                                      test_pod=pod5,
                                      path=path2,
                                      host=host2)

        # Now validate ingress from public network
        assert self.validate_nginx_lb([pod1, pod2],
                                      ingress.external_ips[0],
                                      path=path1,
                                      host=host1)
        assert self.validate_nginx_lb([pod3, pod4],
                                      ingress.external_ips[0],
                                      path=path2,
                                      host=host2)
        for compute_ip in self.inputs.compute_ips:
            self.inputs.restart_service('contrail-vrouter-agent', [compute_ip],
                                        container='agent')
        cluster_status, error_nodes = ContrailStatusChecker(
            self.inputs).wait_till_contrail_cluster_stable(
                nodes=self.inputs.compute_ips, roles="vrouter")
        assert cluster_status, 'Cluster is not stable after restart'
        assert self.validate_nginx_lb([pod1, pod2],
                                      ingress.cluster_ip,
                                      test_pod=pod5,
                                      path=path1,
                                      host=host1)
        assert self.validate_nginx_lb([pod3, pod4],
                                      ingress.cluster_ip,
                                      test_pod=pod5,
                                      path=path2,
                                      host=host2)
Esempio n. 28
0
    def test_process_restart_with_rr_set(self):
        ''' Test to validate rr works fine 
        with process restarts
            1. Pick 2 VN's from resource pool which has one VM each
            2. Set control node as RR
            3. Ping from one VM to another VM
            4. Restart process 'vrouter' and 'control' on setup
            5. Ping again between VM's after process restart
        Pass criteria: Step 2,3,4 and 5 should pass
        '''
        if len(set(self.inputs.bgp_ips)) < 3:
            self.logger.info(
                "Skipping Test. At least 3 control node required to run the test"
            )
            raise self.skipTest(
                "Skipping Test. At least 3 control node required to run the test"
            )

        vn1_name = get_random_name('vn1')
        vn1_subnets = ['192.168.1.0/24']
        vn1_vm1_name = get_random_name('vn1_vm1')
        vn1_vm2_name = get_random_name('vn1_vm2')
        vn1_fixture = self.create_vn(vn1_name, vn1_subnets)
        assert vn1_fixture.verify_on_setup()
        vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name)
        vm2_fixture = self.create_vm(vn1_fixture, vn1_vm2_name)
        assert vm1_fixture.wait_till_vm_is_up()
        assert vm2_fixture.wait_till_vm_is_up()
        assert vm1_fixture.ping_to_ip(vm2_fixture.vm_ip)
        assert vm2_fixture.ping_to_ip(vm1_fixture.vm_ip)
        # Take the first BGP node
        ctrl_node_name = self.inputs.bgp_names[0]
        ctrl_node_ip = self.inputs.host_data[ctrl_node_name]['control-ip']
        ctrl_node_host_ip = self.inputs.host_data[ctrl_node_name]['host_ip']
        #set it as the RR
        ctrl_fixture = self.useFixture(
            control_node.CNFixture(connections=self.connections,
                                   inputs=self.inputs,
                                   router_name=ctrl_node_name,
                                   router_ip=ctrl_node_ip))
        cluster_id = ipv4_to_decimal(ctrl_node_ip)

        if ctrl_fixture.set_cluster_id(cluster_id):
            self.logger.info("cluster id set")
        else:
            self.logger.error("cluster id not set")
            assert False
        #Calculating connection matrix.The mesh connections should be removed
        connection_dicts = get_connection_matrix(self.inputs, ctrl_node_name)
        #Verifying bgp connections.The non-rr nodes should have only one bgp connection to RR
        #RR should have bgp connections to both the non-rrs
        for entry in connection_dicts:
            if verify_peer_in_control_nodes(self.cn_inspect, entry,
                                            connection_dicts[entry],
                                            self.logger):
                self.logger.info("BGP connections are proper")
            else:
                self.logger.error("BGP connections are not proper")

        for compute_ip in self.inputs.compute_ips:
            if compute_ip in self.inputs.dpdk_ips:
                self.inputs.stop_service('contrail-vrouter-agent',
                                         [compute_ip],
                                         container='agent')
                self.inputs.restart_service('contrail-vrouter-agent-dpdk',
                                            [compute_ip],
                                            container='agent-dpdk')
                self.inputs.start_service('contrail-vrouter-agent',
                                          [compute_ip],
                                          container='agent')
            else:
                self.inputs.restart_service('contrail-vrouter-agent',
                                            [compute_ip],
                                            container='agent')

        for bgp_ip in self.inputs.bgp_ips:
            self.inputs.restart_service('contrail-control', [bgp_ip],
                                        container='control')
        for cfgm_ip in self.inputs.cfgm_ips:
            self.inputs.restart_service('contrail-api', [cfgm_ip],
                                        container='api-server')

        # Wait for cluster to be stable
        cs_obj = ContrailStatusChecker(self.inputs)
        clusterstatus, error_nodes = cs_obj.wait_till_contrail_cluster_stable()
        assert clusterstatus, ('Hash of error nodes and services : %s' %
                               (error_nodes))

        assert self.verification_after_process_restart_in_rr()
        for cfgm_name in self.inputs.cfgm_names:
            assert self.analytics_obj.verify_cfgm_uve_module_state\
                        (self.inputs.collector_names[0],
                        cfgm_name,'contrail-api')
        #RR should have bgp connections to both the non-rrs
        for entry in connection_dicts:
            if verify_peer_in_control_nodes(self.cn_inspect, entry,
                                            connection_dicts[entry],
                                            self.logger):
                self.logger.info("BGP connections are proper after restarts")
            else:
                self.logger.error(
                    "BGP connections are not proper after restarts")
        assert vm1_fixture.wait_till_vm_is_up()
        assert vm2_fixture.wait_till_vm_is_up()
        assert vm1_fixture.ping_to_ip(vm2_fixture.vm_ip)
        assert vm2_fixture.ping_to_ip(vm1_fixture.vm_ip)