コード例 #1
0
ファイル: test_km_ha.py プロジェクト: Juniper/contrail-test
    def test_single_node_failover(self):
        '''
        Stop all contrail containers contrail-kube-manager, controller,
        analytics, analyticsdb on one of the HA-enabled controller nodes
        Validate that a new kube-manager from among the other nodes is active
        We should be able to delete and add pods during this time and the pods
        should get their IPs

        '''
        css = ContrailStatusChecker(self.inputs)
        containers = ['contrail-kube-manager', 'api-server', 'schema'
                      'analyticsdb', 'analytics-api', 'collector']
        km_h = self.connections.get_kube_manager_h()
        node = km_h.ip
        # Setup pods
        pods = self.setup_my_pods(2)
        self.verify_pods(pods)

        self.stop_containers(node, containers, wait=2)
        self.delete_pods(pods)
        self.verify_pods_are_not_in_k8s(pods)

        # Setup pods again now
        pods = self.setup_my_pods(2)
        assert css.wait_till_contrail_cluster_stable()[0]
        self.verify_pods(pods)
        self.verify_mesh_ping(pods)
        # Delete the pods now and verify cleanup
        self.delete_pods(pods)
        self.verify_pods_are_deleted(pods)
コード例 #2
0
 def reload_vrouter(self, wait=True):
     '''Reload vrouter module without restarting the compute node
     '''
     self.logger.info('Reloading vrouter module on %s' % (self.ip))
     self.execute_cmd('service supervisor-vrouter stop; '
         'modprobe -r vrouter || rmmod vrouter; '
         'service supervisor-vrouter start')
     if wait:
         status = ContrailStatusChecker(self.inputs)
         status.wait_till_contrail_cluster_stable([self.ip])
コード例 #3
0
ファイル: base.py プロジェクト: Juniper/contrail-test
 def add_remove_server(self, operation, server_ip, section, option,
                        client_role, client_process, index = 0):
     ''' This function add or remove an entry from list of servers 
     configured in .conf file of the client.
     It reads .conf file to get the list.
     It then searches if entry already exist or not and do the operation
     Values to argument "client_process" can be any process which we
             see under contrail-status output eg "contrail-control"
     Values to argument "operation" can be:
             "add" or "remove"
     Values to argument "client_role" can be:
             "agent", "control", "config", "analytics" and "database"
     '''
     client_conf_file = client_process + ".conf"
     cmd_set = "openstack-config --get /etc/contrail/" + client_conf_file
     cmd = cmd_set + " " + section + " " + option
     if client_role == "agent":
         for ip in self.inputs.compute_ips:
             server_list = self.get_new_server_list(operation, ip,
                                                    cmd, server_ip, index,
                                                    container = "agent")
             self.configure_server_list(ip, client_process,
                     section, option, server_list, container = "agent")
     elif client_role in ["control", "dns"]:
         for ip in self.inputs.bgp_ips:
             server_list = self.get_new_server_list(operation, ip,
                                                    cmd, server_ip, index,
                                                    container = client_role)
             self.configure_server_list(ip, client_process,
                     section, option, server_list, container = client_role)
     elif client_role == "config":
         for ip in self.inputs.cfgm_ips:
             server_list = self.get_new_server_list(operation, ip,
                                                    cmd, server_ip, index,
                                                    container = "api-server")
             self.configure_server_list(ip, client_process,
                     section, option, server_list, container = "api-server")
     elif client_role == "analytics":
         for ip in self.inputs.collector_ips:
             server_list = self.get_new_server_list(operation, ip,
                                                    cmd, server_ip, index,
                                                    container = "analytics-api")
             self.configure_server_list(ip, client_process,
                     section, option, server_list, container = "analytics-api")
     elif client_role == "database":
         for ip in self.inputs.database_ips:
             server_list = self.get_new_server_list(operation, ip,
                                                    cmd, server_ip, index,
                                                    container = "analytics-cassandra")
             self.configure_server_list(ip, client_process,
                     section, option, server_list, container = "analytics-cassandra")
     status_checker = ContrailStatusChecker(self.inputs)
     result = status_checker.wait_till_contrail_cluster_stable()[0]
     if result == False:
         assert result, "Contrail cluster not up after add/remove of entry"
コード例 #4
0
    def test_vrouter_kernal_module_unload_reload(self):
        '''
        1. create Vn and two vms
        2. Verify ping between vms ping should pass
        3. Stop Vrouter services and Unload Vrouter Kernal Module and verify status
        4. Reload Vrouter Module and start Vrouter Services
        5. Verify ping between vms ping should pass
        '''
        compute_ip = self.inputs.compute_ips[0]
        compute_control_ip = self.inputs.compute_control_ips[0]
        if compute_ip == compute_control_ip:
            raise self.skipTest(
                    'Skipping Test. Need multi_interface testbed')
        result = True
        cmd_vr_unload = 'modprobe -r vrouter'
        cmd_vr_reload = 'modprobe -a vrouter'
        vn1_fixture = self.create_vn()
        vm1_fixture = self.create_vm(vn1_fixture,
                                              image_name='cirros')
        vm2_fixture = self.create_vm(vn1_fixture,
                                              image_name='cirros')
        vm1_fixture.wait_till_vm_is_up()
        vm2_fixture.wait_till_vm_is_up()
        compute_ip = vm1_fixture.vm_node_ip
        assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip)
        self.inputs.stop_service('supervisor-vrouter', host_ips=[compute_ip],
                                 container='agent')
        self.inputs.run_cmd_on_server(compute_ip, issue_cmd=cmd_vr_unload)
        status = self.inputs.run_cmd_on_server(compute_ip,issue_cmd = 'lsmod | grep vrouter')
        if status:
            result = result and False
            self.logger.info('Vrouter kernel module failed to unload')
        else:
            self.logger.info('Vrouter kernel module unloaded successfully')
        self.logger.info('Reloading vrouter kernel module')
        self.inputs.run_cmd_on_server(compute_ip, issue_cmd=cmd_vr_reload)
        status = self.inputs.run_cmd_on_server(compute_ip,issue_cmd = 'lsmod | grep vrouter')
        if not status:
            result = result and False
            self.logger.error('Vrouter kernel module failed to reload')
        else:
            self.logger.info('Vrouter kernel module reloaded successfully')
        self.inputs.start_service('supervisor-vrouter', host_ips=[compute_ip],
                                  container='agent')
        status = ContrailStatusChecker(self.inputs)
        status.wait_till_contrail_cluster_stable()
        assert result,'Vrouter kernel module failed to unload and reload'

        #Get the latest metadata ip of the instance after vrouter reload
        vm1_fixture.get_local_ip(refresh=True)
        assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip)
コード例 #5
0
 def reload_vrouter(self, wait=True):
     '''Reload vrouter module without restarting the compute node
     '''
     self.logger.info('Reloading vrouter module on %s' % (self.ip))
     #ToDo msenthil - Need to check how to reload kernel module
     if self.inputs.host_data[self.ip].get('containers', {}).get('agent'):
         stop_cmd = 'docker exec -it agent service supervisor-vrouter stop'
         start_cmd = 'docker exec -it agent service supervisor-vrouter start'
     else:
         stop_cmd = 'service supervisor-vrouter stop'
         start_cmd = 'service supervisor-vrouter start'
     self.execute_cmd('%s; '
         'modprobe -r vrouter || rmmod vrouter; '
         '%s ' % (stop_cmd, start_cmd), container=None)
     if wait:
         status = ContrailStatusChecker(self.inputs)
         status.wait_till_contrail_cluster_stable([self.ip])
コード例 #6
0
 def test_vrouter_xconnect(self):
     '''Test vrouter cross connect mode by taking vrouter agent down
     1. get compute node ip
     2. stop vrouter_agent
     3. Try to ssh to compute node from cfgm
     4. Verify Xconnect mode
     5. start vrouter-agent'''
     result = True
     verify_Xconnect ="vif --list | grep Flags:X"
     compute_ip = self.inputs.compute_ips[0]
     self.inputs.stop_service('contrail-vrouter-agent',[compute_ip],container='agent')
     self.logger.info('Verify Xconnect mode ')
     output=self.inputs.run_cmd_on_server(compute_ip,issue_cmd=verify_Xconnect)
     if not output:
         result = result and False
     else:
         self.logger.info('Xconnect mode got enabled')
     self.inputs.start_service('contrail-vrouter-agent',[compute_ip],container='agent')
     status = ContrailStatusChecker(self.inputs)
     status.wait_till_contrail_cluster_stable([compute_ip])
     assert result,'Xconnect mode not enabled'
コード例 #7
0
ファイル: test_km_ha.py プロジェクト: Juniper/contrail-test
    def test_km_active_backup(self):
        '''
        Create a pod A
        Restart an active km, check one of the others becomes active
        Create another pod B. Check B can reach A
        Restart the active km, check one of the others becomes active
        Create another pod C. Check C can reach A, B
        Restart the active km again, check one of the others becomes active
        Create another pod D. Check D can reach A, B, C
        '''
        css = ContrailStatusChecker(self.inputs)
        pod1 = self.setup_busybox_pod()
        assert pod1.wait_till_pod_is_up()

        (active_km, backup_kms) = self.get_active_backup_kms(refresh=True)
        self.restart_kube_manager([active_km])
        css.wait_till_contrail_cluster_stable(nodes=[active_km])
        (active_km_1, backup_kms_1) = self.get_active_backup_kms(refresh=True)
        assert active_km_1 in backup_kms, 'New KM was not chosen as active'
        pod2 = self.setup_busybox_pod()
        assert pod2.wait_till_pod_is_up()
        assert self.verify_reachability(pod2, [pod1])

        self.restart_kube_manager([active_km_1])
        css.wait_till_contrail_cluster_stable(nodes=[active_km])
        (active_km_2, backup_kms_2) = self.get_active_backup_kms(refresh=True)
        assert active_km_2 in backup_kms_1, 'New KM was not chosen as active'
        pod3 = self.setup_busybox_pod()
        assert pod3.wait_till_pod_is_up()
        assert self.verify_reachability(pod3, [pod1, pod2])

        self.restart_kube_manager([active_km_2])
        css.wait_till_contrail_cluster_stable(nodes=[active_km])
        (active_km_3, backup_kms_3) = self.get_active_backup_kms(refresh=True)
        assert active_km_3 in backup_kms_2, 'New KM was not chosen as active'
        pod4 = self.setup_busybox_pod()
        assert pod4.wait_till_pod_is_up()
        assert self.verify_reachability(pod4, [pod1, pod2, pod3])
コード例 #8
0
 def test_vdns_with_diff_zone(self):
     ''' Test vdns in different zones with multi projects '''
     var_obj = self.InitForZoneTests()
     vdns_fixt1 = {}
     ipam_mgmt_obj = {}
     for project in var_obj.project_list:
         dns_server_name = var_obj.proj_vdns[project]
         self.logger.info(
             'Creating vdns server:%s in project:%s',
             dns_server_name,
             project)
         domain_name = '%s.net' % (project)
         ttl = 100
         # VDNS creation
         dns_data = VirtualDnsType(
             domain_name=domain_name, dynamic_records_from_client=True,
             default_ttl_seconds=ttl, record_order='random')
         vdns_fixt1[project] = self.useFixture(
             VdnsFixture(
                 self.inputs,
                 self.connections,
                 vdns_name=dns_server_name,
                 dns_data=dns_data))
         result, msg = vdns_fixt1[project].verify_on_setup()
         self.assertTrue(result, msg)
         dns_server = IpamDnsAddressType(
             virtual_dns_server_name=vdns_fixt1[project].vdns_fq_name)
         ipam_mgmt_obj[project] = IpamType(
             ipam_dns_method='virtual-dns-server',
             ipam_dns_server=dns_server)
     ipam_fixt = {}
     vn_fixt = {}
     vm_fix = {}
     pol_fixt = {}
     for proj in var_obj.project_list:
         # User creation
         user_fixture = self.useFixture(
             UserFixture(
                 connections=self.admin_connections,
                 username=var_obj.proj_user[proj],
                 password=var_obj.proj_pass[proj]))
         # Project creation
         project_fixture = self.useFixture(
             ProjectFixture(
                 project_name=proj,
                 username=var_obj.proj_user[proj],
                 password=var_obj.proj_pass[proj],
                 connections=self.admin_connections))
         user_fixture.add_user_to_tenant(proj, var_obj.proj_user[proj], 'admin')
         project_fixture.set_user_creds(var_obj.proj_user[proj], var_obj.proj_pass[proj])
         project_inputs = ContrailTestInit(
                 self.ini_file,
                 stack_user=project_fixture.project_username,
                 stack_password=project_fixture.project_user_password,
                 stack_tenant=proj,
                 logger=self.logger)
         project_connections = ContrailConnections(project_inputs,
                                                   logger=self.logger)
         self.logger.info(
             'Default SG to be edited for allow all on project: %s' % proj)
         project_fixture.set_sec_group_for_allow_all(proj, 'default')
         # Ipam creation
         ipam_fixt[proj] = self.useFixture(IPAMFixture(var_obj.ipam_list[proj], vdns_obj= vdns_fixt1[proj].obj,
                     project_obj=project_fixture, ipamtype=ipam_mgmt_obj[proj]))
         # VN Creation
         vn_fixt[proj] = self.useFixture(
             VNFixture(
                 project_name=proj,
                 connections=project_connections,
                 vn_name=var_obj.vn_list[proj],
                 inputs=project_inputs,
                 subnets=var_obj.vn_nets[proj],
                 ipam_fq_name=ipam_fixt[proj].getObj().get_fq_name()))
         vn_quantum_obj = self.orch.get_vn_obj_if_present(vn_name=var_obj.vn_list[proj], project_id=project_fixture.uuid)
         # VM creation
         vm_fix[proj] = self.useFixture(
             VMFixture(
                 project_name=proj,
                 connections=project_connections,
                 vn_obj=vn_quantum_obj,
                 vm_name=var_obj.vm_list[proj]))
         vm_fix[proj].verify_vm_launched()
         vm_fix[proj].verify_on_setup()
         vm_fix[proj].wait_till_vm_is_up()
         msg = "Ping by using name %s is failed. Dns server \
               should resolve VM name to IP" % (var_obj.vm_list[proj])
         self.assertTrue(
             vm_fix[proj].ping_with_certainty(ip=var_obj.vm_list[proj]), msg)
         vm_ip = vm_fix[proj].get_vm_ip_from_vm(
             vn_fq_name=vm_fix[proj].vn_fq_name)
         vm_rev_ip = vm_ip.split('.')
         vm_rev_ip = '.'.join(
             (vm_rev_ip[3], vm_rev_ip[2], vm_rev_ip[1], vm_rev_ip[0]))
         vm_rev_ip = vm_rev_ip + '.in-addr.arpa'
         rev_zone = var_obj.vn_nets[proj][0].split('/')[0].split('.')
         rev_zone = '.'.join((rev_zone[0], rev_zone[1], rev_zone[2]))
         rev_zone = rev_zone + '.in-addr.arpa'
         # Frame the Expected DNS data for VM, one for 'A' record and
         # another 'PTR' record.
         domain_name = '%s.net' % (proj)
         rec_name = var_obj.vm_list[proj] + "." + domain_name
         agent_inspect_h = self.agent_inspect[vm_fix[proj].vm_node_ip]
         assigned_dns_ips = agent_inspect_h.get_vna_discovered_dns_server()
         vm_dns_exp_data = [{'rec_data': vm_ip,
                             'rec_type': 'A',
                             'rec_class': 'IN',
                             'rec_ttl': str(ttl),
                             'rec_name': rec_name,
                             'installed': 'yes',
                             'zone': domain_name},
                            {'rec_data': rec_name,
                             'rec_type': 'PTR',
                             'rec_class': 'IN',
                             'rec_ttl': str(ttl),
                             'rec_name': vm_rev_ip,
                             'installed': 'yes',
                             'zone': rev_zone}]
         self.verify_vm_dns_data(vm_dns_exp_data, assigned_dns_ips[0])
         vm_dns_exp_data = []
     self.logger.info(
         'Restart supervisor-config & supervisor-control and test ping')
     for bgp_ip in self.inputs.bgp_ips:
         self.inputs.restart_service('supervisor-control', [bgp_ip])
     for cfgm_ip in self.inputs.cfgm_ips:
         self.inputs.restart_service('supervisor-config', [cfgm_ip])
     status_checker = ContrailStatusChecker(self.inputs)
     self.logger.debug("Waiting for all the services to be UP")
     assert status_checker.wait_till_contrail_cluster_stable()[0],\
             "All services could not come UP after restart"
     for proj in var_obj.project_list:
         msg = "Ping by using name %s is failed. Dns server \
               should resolve VM name to IP" % (var_obj.vm_list[proj])
         self.assertTrue(
             vm_fix[proj].ping_with_certainty(ip=var_obj.vm_list[proj]), msg)
     return True
コード例 #9
0
ファイル: test_ecmp.py プロジェクト: Ankitja/contrail-test
    def test_ecmp_svc_in_network_with_3_instance_service_restarts(self):
        """
        Description: Validate ECMP after restarting control and vrouter services with service chainin
        g in-network mode datapath having service instance
        Test steps:
                   1.Creating vm's - vm1 and vm2 in networks vn1 and vn2.
                   2.Creating a service instance in in-network mode with 3 instances.
                   3.Creating a service chain by applying the service instance as a service in a po
        licy between the VNs.
                   4.Checking for ping and traffic between vm1 and vm2.
        Pass criteria: Ping between the VMs should be successful and TCP traffic should reach vm2 fr
        om vm1 and vice-versa even after the restarts.
        Maintainer : [email protected]
        """
        ret_dict = self.verify_svc_chain(max_inst=3,
                                         service_mode='in-network',
                                         create_svms=True,
                                         **self.common_args)
        si_fixture = ret_dict['si_fixture']
        svm_ids = si_fixture.svm_ids
        self.get_rt_info_tap_intf_list(
            self.left_vn_fixture, self.left_vm_fixture, self.right_vm_fixture,
            svm_ids, si_fixture)
        dst_vm_list = [self.right_vm_fixture]
        self.verify_traffic_flow(self.left_vm_fixture, dst_vm_list,
            si_fixture, self.left_vn_fixture)
        for compute_ip in self.inputs.compute_ips:
            self.inputs.restart_service('contrail-vrouter', [compute_ip],
                                        container='agent')

        # Wait for service stability
        cs_checker = ContrailStatusChecker()
        cluster_status, error_nodes = cs_checker.wait_till_contrail_cluster_stable(
                                          self.inputs.compute_ips)
        assert cluster_status, 'Hash of error nodes and services : %s' % (
                                    error_nodes)

        self.left_vm_fixture.wait_till_vm_is_up()
        self.right_vm_fixture.wait_till_vm_is_up()

        self.get_rt_info_tap_intf_list(
            self.left_vn_fixture, self.left_vm_fixture, self.right_vm_fixture,
            svm_ids, si_fixture)
        fab_connections.clear()
        self.verify_traffic_flow(self.left_vm_fixture, dst_vm_list,
            si_fixture, self.left_vn_fixture)
        for bgp_ip in self.inputs.bgp_ips:
            self.inputs.restart_service('contrail-control', [bgp_ip],
                                        container='control')

        cluster_status, error_nodes = cs_checker.wait_till_contrail_cluster_stable(
                                          self.inputs.bgp_ips)
        assert cluster_status, 'Hash of error nodes and services : %s' % (
                                    error_nodes)

        self.left_vm_fixture.wait_till_vm_is_up()
        self.right_vm_fixture.wait_till_vm_is_up()

        self.get_rt_info_tap_intf_list(
            self.left_vn_fixture, self.left_vm_fixture, self.right_vm_fixture,
            svm_ids, si_fixture)

        fab_connections.clear()
        self.verify_traffic_flow(self.left_vm_fixture, dst_vm_list,
            si_fixture, self.left_vn_fixture)
コード例 #10
0
    def test_process_restart_in_policy_between_vns(self):
        ''' Test to validate that with policy having rule to check icmp fwding between VMs on different VNs , ping between VMs should pass
        with process restarts
            1. Pick 2 VN's from resource pool which has one VM each
            2. Create policy with icmp allow rule between those VN's and bind it networks
            3. Ping from one VM to another VM
            4. Restart process 'vrouter' and 'control' on setup
            5. Ping again between VM's after process restart
        Pass criteria: Step 2,3,4 and 5 should pass
        '''
        vn1_name = get_random_name('vn1')
        vn1_subnets = ["192.168.1.0/24"]
        vn2_name = get_random_name('vn2')
        vn2_subnets = ["192.168.2.0/24"]
        policy1_name = 'policy1'
        policy2_name = 'policy2'
        rules = [
            {
                'direction': '<>', 'simple_action': 'pass',
                'protocol': 'icmp',
                'source_network': vn1_name,
                'dest_network': vn2_name,
            },
        ]
        rev_rules = [
            {  
                'direction': '<>', 'simple_action': 'pass',
                'protocol': 'icmp',
                'source_network': vn2_name,
                'dest_network': vn1_name,
            },
        ]
        policy1_fixture = self.useFixture(
            PolicyFixture(
                policy_name=policy1_name, rules_list=rules, inputs=self.inputs,
                connections=self.connections))
        policy2_fixture = self.useFixture(
            PolicyFixture(
                policy_name=policy2_name, 
                rules_list=rev_rules, inputs=self.inputs,
                connections=self.connections))
        vn1_fixture = self.create_vn(vn1_name, vn1_subnets, option='contrail')
        assert vn1_fixture.verify_on_setup()
        vn1_fixture.bind_policies(
            [policy1_fixture.policy_fq_name], vn1_fixture.vn_id)
        self.addCleanup(vn1_fixture.unbind_policies,
                        vn1_fixture.vn_id, [policy1_fixture.policy_fq_name])
        vn2_fixture = self.create_vn(vn2_name, vn2_subnets, option='contrail')
        assert vn2_fixture.verify_on_setup()
        vn2_fixture.bind_policies(
            [policy2_fixture.policy_fq_name], vn2_fixture.vn_id)
        self.addCleanup(vn2_fixture.unbind_policies,
                        vn2_fixture.vn_id, [policy2_fixture.policy_fq_name])
        vn1_vm1_name = get_random_name('vn1_vm1')
        vn2_vm1_name = get_random_name('vn2_vm1')
        vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name)
        vm2_fixture = self.create_vm(vn2_fixture, vn2_vm1_name)
        assert vm1_fixture.wait_till_vm_is_up()
        assert vm2_fixture.wait_till_vm_is_up()
        assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip)

        for compute_ip in self.inputs.compute_ips:
            self.inputs.restart_service('contrail-vrouter-agent', [compute_ip])
        for bgp_ip in self.inputs.bgp_ips:
            self.inputs.restart_service('contrail-control', [bgp_ip])
        for cfgm_ip in self.inputs.cfgm_ips:
            self.inputs.restart_service('contrail-api', [cfgm_ip])

        # Wait for cluster to be stable
        cs_obj = ContrailStatusChecker(self.inputs)
        clusterstatus, error_nodes = cs_obj.wait_till_contrail_cluster_stable()
        assert clusterstatus, (
            'Hash of error nodes and services : %s' % (error_nodes))

        assert self.verification_after_process_restart_in_policy_between_vns()
        for cfgm_name in self.inputs.cfgm_names:
            assert self.analytics_obj.verify_cfgm_uve_module_state\
                        (self.inputs.collector_names[0],
                        cfgm_name,'contrail-api')

        vn1_vm2_name = get_random_name('vn1_vm2')
        vn2_vm2_name = get_random_name('vn2_vm2')

        vn3_name = get_random_name('vn3')
        vn3_subnets = ["192.168.4.0/24"]
        vn3_fixture = self.create_vn(vn3_name, vn3_subnets, option='contrail')
        assert vn1_fixture.verify_on_setup()

        vm3_fixture = self.create_vm(vn1_fixture, vn1_vm2_name)
        assert vm3_fixture.verify_on_setup()
        vm4_fixture = self.create_vm(vn2_fixture, vn2_vm2_name)
        assert vm4_fixture.verify_on_setup()
        vm3_fixture.wait_till_vm_is_up()
        vm4_fixture.wait_till_vm_is_up()
        assert vm3_fixture.ping_with_certainty(vm4_fixture.vm_ip)
コード例 #11
0
ファイル: test_rr.py プロジェクト: Juniper/contrail-test
    def test_process_restart_with_rr_set(self):
        ''' Test to validate rr works fine 
        with process restarts
            1. Pick 2 VN's from resource pool which has one VM each
            2. Set control node as RR
            3. Ping from one VM to another VM
            4. Restart process 'vrouter' and 'control' on setup
            5. Ping again between VM's after process restart
        Pass criteria: Step 2,3,4 and 5 should pass
        '''
        if len(set(self.inputs.bgp_ips)) < 3:
            self.logger.info(
                "Skipping Test. At least 3 control node required to run the test")
            raise self.skipTest(
                "Skipping Test. At least 3 control node required to run the test")

        vn1_name = get_random_name('vn1')
        vn1_subnets = ['192.168.1.0/24']
        vn1_vm1_name = get_random_name('vn1_vm1')
        vn1_vm2_name = get_random_name('vn1_vm2')
        vn1_fixture = self.create_vn(vn1_name, vn1_subnets)
        assert vn1_fixture.verify_on_setup()
        vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name)
        vm2_fixture = self.create_vm(vn1_fixture, vn1_vm2_name)
        assert vm1_fixture.wait_till_vm_is_up()
        assert vm2_fixture.wait_till_vm_is_up()
        assert vm1_fixture.ping_to_ip(vm2_fixture.vm_ip)
        assert vm2_fixture.ping_to_ip(vm1_fixture.vm_ip)
        # Take the first BGP node
        ctrl_node_name = self.inputs.bgp_names[0]
        ctrl_node_ip = self.inputs.host_data[ctrl_node_name]['control-ip']
        ctrl_node_host_ip = self.inputs.host_data[ctrl_node_name]['host_ip']
        #set it as the RR
        ctrl_fixture = self.useFixture(
                control_node.CNFixture(
                          connections=self.connections,
                          inputs=self.inputs,
                          router_name=ctrl_node_name,
                          router_ip=ctrl_node_ip
                          )) 
        cluster_id = ipv4_to_decimal(ctrl_node_ip)
       
        if ctrl_fixture.set_cluster_id(cluster_id):
            self.logger.info("cluster id set")
        else:
            self.logger.error("cluster id not set")
            assert False
        #Calculating connection matrix.The mesh connections should be removed
        connection_dicts = get_connection_matrix(self.inputs,ctrl_node_name)
        #Verifying bgp connections.The non-rr nodes should have only one bgp connection to RR
        #RR should have bgp connections to both the non-rrs
        for entry in connection_dicts:
            if verify_peer_in_control_nodes(self.cn_inspect,entry,connection_dicts[entry],self.logger):
                self.logger.info("BGP connections are proper")
            else: 
                self.logger.error("BGP connections are not proper")

        for compute_ip in self.inputs.compute_ips:
            if compute_ip in self.inputs.dpdk_ips:
                self.inputs.stop_service('contrail-vrouter-agent', [compute_ip],
                                         container='agent')
                self.inputs.restart_service('contrail-vrouter-agent-dpdk', [compute_ip],
                                            container='agent-dpdk')
                self.inputs.start_service('contrail-vrouter-agent', [compute_ip],
                                          container='agent')
            else:
                self.inputs.restart_service('contrail-vrouter-agent', [compute_ip],
                                        container='agent')

        for bgp_ip in self.inputs.bgp_ips:
            self.inputs.restart_service('contrail-control', [bgp_ip],
                                        container='control')
        for cfgm_ip in self.inputs.cfgm_ips:
            self.inputs.restart_service('contrail-api', [cfgm_ip],
                                        container='api-server')

        # Wait for cluster to be stable
        cs_obj = ContrailStatusChecker(self.inputs)
        clusterstatus, error_nodes = cs_obj.wait_till_contrail_cluster_stable()
        assert clusterstatus, (
            'Hash of error nodes and services : %s' % (error_nodes))

        assert self.verification_after_process_restart_in_rr()
        for cfgm_name in self.inputs.cfgm_names:
            assert self.analytics_obj.verify_cfgm_uve_module_state\
                        (self.inputs.collector_names[0],
                        cfgm_name,'contrail-api')
        #RR should have bgp connections to both the non-rrs
        for entry in connection_dicts:
            if verify_peer_in_control_nodes(self.cn_inspect,entry,connection_dicts[entry],self.logger):
                self.logger.info("BGP connections are proper after restarts")
            else: 
                self.logger.error("BGP connections are not proper after restarts")
        assert vm1_fixture.wait_till_vm_is_up()
        assert vm2_fixture.wait_till_vm_is_up()
        assert vm1_fixture.ping_to_ip(vm2_fixture.vm_ip)
        assert vm2_fixture.ping_to_ip(vm1_fixture.vm_ip)
コード例 #12
0
 def test_vdns_with_same_zone(self):
     ''' Test vdns in same zone with multi projects/vdns-servers '''
     var_obj = self.InitForZoneTests()
     vdns_fixt1 = {}
     ipam_mgmt_obj = {}
     for project in var_obj.project_list:
         dns_server_name = var_obj.proj_vdns[project]
         self.logger.info('Creating vdns server:%s in project:%s',
                          dns_server_name, project)
         domain_name = 'juniper.net'
         ttl = 100
         # VDNS creation
         dns_data = VirtualDnsType(domain_name=domain_name,
                                   dynamic_records_from_client=True,
                                   default_ttl_seconds=ttl,
                                   record_order='random')
         vdns_fixt1[project] = self.useFixture(
             VdnsFixture(self.inputs,
                         self.connections,
                         vdns_name=dns_server_name,
                         dns_data=dns_data))
         result, msg = vdns_fixt1[project].verify_on_setup()
         self.assertTrue(result, msg)
         dns_server = IpamDnsAddressType(
             virtual_dns_server_name=vdns_fixt1[project].vdns_fq_name)
         ipam_mgmt_obj[project] = IpamType(
             ipam_dns_method='virtual-dns-server',
             ipam_dns_server=dns_server)
     ipam_fixt = {}
     vn_fixt = {}
     vm_fix = {}
     pol_fixt = {}
     for proj in var_obj.project_list:
         # User creation
         user_fixture = self.useFixture(
             UserFixture(connections=self.admin_connections,
                         username=var_obj.proj_user[proj],
                         password=var_obj.proj_pass[proj]))
         # Project creation
         project_fixture = self.useFixture(
             ProjectFixture(project_name=proj,
                            username=var_obj.proj_user[proj],
                            password=var_obj.proj_pass[proj],
                            connections=self.admin_connections))
         user_fixture.add_user_to_tenant(proj, var_obj.proj_user[proj],
                                         'admin')
         project_fixture.set_user_creds(var_obj.proj_user[proj],
                                        var_obj.proj_pass[proj])
         project_inputs = ContrailTestInit(
             self.ini_file,
             stack_user=project_fixture.project_username,
             stack_password=project_fixture.project_user_password,
             stack_tenant=proj,
             logger=self.logger)
         project_connections = ContrailConnections(project_inputs,
                                                   logger=self.logger)
         self.logger.info(
             'Default SG to be edited for allow all on project: %s' % proj)
         project_fixture.set_sec_group_for_allow_all(proj, 'default')
         # Ipam creation
         ipam_fixt[proj] = self.useFixture(
             IPAMFixture(var_obj.ipam_list[proj],
                         vdns_obj=vdns_fixt1[proj].obj,
                         connections=project_connections,
                         ipamtype=ipam_mgmt_obj[proj]))
         # VN Creation
         vn_fixt[proj] = self.useFixture(
             VNFixture(project_name=proj,
                       connections=project_connections,
                       vn_name=var_obj.vn_list[proj],
                       inputs=project_inputs,
                       subnets=var_obj.vn_nets[proj],
                       ipam_fq_name=ipam_fixt[proj].getObj().get_fq_name()))
         vn_quantum_obj = self.orch.get_vn_obj_if_present(
             vn_name=var_obj.vn_list[proj], project_id=project_fixture.uuid)
         # VM creation
         vm_fix[proj] = self.useFixture(
             VMFixture(project_name=proj,
                       connections=project_connections,
                       vn_obj=vn_quantum_obj,
                       vm_name=var_obj.vm_list[proj]))
         vm_fix[proj].verify_vm_launched()
         vm_fix[proj].verify_on_setup()
         vm_fix[proj].wait_till_vm_is_up()
         msg = "Ping by using name %s is failed. Dns server \
               should resolve VM name to IP" % (var_obj.vm_list[proj])
         self.assertTrue(
             vm_fix[proj].ping_with_certainty(ip=var_obj.vm_list[proj]),
             msg)
         vm_ip = vm_fix[proj].get_vm_ip_from_vm(
             vn_fq_name=vm_fix[proj].vn_fq_name)
         vm_rev_ip = vm_ip.split('.')
         vm_rev_ip = '.'.join(
             (vm_rev_ip[3], vm_rev_ip[2], vm_rev_ip[1], vm_rev_ip[0]))
         vm_rev_ip = vm_rev_ip + '.in-addr.arpa'
         rev_zone = var_obj.vn_nets[proj][0].split('/')[0].split('.')
         rev_zone = '.'.join((rev_zone[0], rev_zone[1], rev_zone[2]))
         rev_zone = rev_zone + '.in-addr.arpa'
         # Frame the Expected DNS data for VM, one for 'A' record and
         # another 'PTR' record.
         rec_name = var_obj.vm_list[proj] + "." + domain_name
         agent_inspect_h = self.agent_inspect[vm_fix[proj].vm_node_ip]
         assigned_dns_ips = agent_inspect_h.get_vna_dns_server()
         vm_dns_exp_data = [{
             'rec_data': vm_ip,
             'rec_type': 'A',
             'rec_class': 'IN',
             'rec_ttl': str(ttl),
             'rec_name': rec_name,
             'installed': 'yes',
             'zone': domain_name
         }, {
             'rec_data': rec_name,
             'rec_type': 'PTR',
             'rec_class': 'IN',
             'rec_ttl': str(ttl),
             'rec_name': vm_rev_ip,
             'installed': 'yes',
             'zone': rev_zone
         }]
         self.verify_vm_dns_data(vm_dns_exp_data, assigned_dns_ips[0])
         vm_dns_exp_data = []
     self.logger.info(
         'Restart supervisor-config & supervisor-control and test ping')
     for bgp_ip in self.inputs.bgp_ips:
         self.inputs.restart_service('supervisor-control', [bgp_ip],
                                     container='controller')
     for cfgm_ip in self.inputs.cfgm_ips:
         self.inputs.restart_service('supervisor-config', [cfgm_ip],
                                     container='controller')
     status_checker = ContrailStatusChecker(self.inputs)
     self.logger.debug("Waiting for all the services to be UP")
     assert status_checker.wait_till_contrail_cluster_stable()[0],\
             "All services could not come UP after restart"
     for proj in var_obj.project_list:
         msg = "Ping by using name %s is failed. Dns server \
               should resolve VM name to IP" % (var_obj.vm_list[proj])
         self.assertTrue(
             vm_fix[proj].ping_with_certainty(ip=var_obj.vm_list[proj]),
             msg)
     return True
コード例 #13
0
ファイル: test_ingress.py プロジェクト: pulkitt/contrail-test
    def test_ingress_fanout_with_vrouter_agent_restart(self):
        '''Creating a fanout ingress with 2 different host having 2 different path along with a default backend
           This host are supported by repective service.  Service has required backend pod with required path
           mentioned in ingress rule.  From the local node, do a wget on the ingress public ip
           Validate that service and its loadbalancing works.
           Restart the Kube manager
           Re verify the loadbalancing works after the kubemanager restart
        '''

        app1 = 'http_test1'
        app2 = 'http_test2'
        labels1 = {'app': app1}
        labels2 = {'app': app2}
        service_name1 = 's1'
        service_name2 = 's2'
        path1 = 'foo'
        path2 = 'bar'
        host1 = 'foo.bar.com'
        host2 = 'bar.foo.com'
        ingress_name = 'testingress'
        namespace = self.setup_namespace(name='default')
        assert namespace.verify_on_setup()

        service1 = self.setup_http_service(namespace=namespace.name,
                                           labels=labels1,
                                           name=service_name1)

        service2 = self.setup_http_service(namespace=namespace.name,
                                           labels=labels2,
                                           name=service_name2)

        pod1 = self.setup_nginx_pod(namespace=namespace.name, labels=labels1)
        pod2 = self.setup_nginx_pod(namespace=namespace.name, labels=labels1)
        pod3 = self.setup_nginx_pod(namespace=namespace.name, labels=labels2)
        pod4 = self.setup_nginx_pod(namespace=namespace.name, labels=labels2)

        rules = [{
            'host': host1,
            'http': {
                'paths': [{
                    'path': '/' + path1,
                    'backend': {
                        'service_name': service_name1,
                        'service_port': 80
                    }
                }]
            }
        }, {
            'host': host2,
            'http': {
                'paths': [{
                    'path': '/' + path2,
                    'backend': {
                        'service_name': service_name2,
                        'service_port': 80
                    }
                }]
            }
        }]
        default_backend = {'service_name': service_name1, 'service_port': 80}

        ingress = self.setup_ingress(name=ingress_name,
                                     namespace=namespace.name,
                                     rules=rules,
                                     default_backend=default_backend)
        assert ingress.verify_on_setup()

        pod5 = self.setup_busybox_pod(namespace=namespace.name)
        self.verify_nginx_pod(pod1, path=path1)
        self.verify_nginx_pod(pod2, path=path1)
        self.verify_nginx_pod(pod3, path=path2)
        self.verify_nginx_pod(pod4, path=path2)

        assert pod5.verify_on_setup()

        # Now validate ingress from within the cluster network
        assert self.validate_nginx_lb([pod1, pod2],
                                      ingress.cluster_ip,
                                      test_pod=pod5,
                                      path=path1,
                                      host=host1)
        assert self.validate_nginx_lb([pod3, pod4],
                                      ingress.cluster_ip,
                                      test_pod=pod5,
                                      path=path2,
                                      host=host2)

        # Now validate ingress from public network
        assert self.validate_nginx_lb([pod1, pod2],
                                      ingress.external_ips[0],
                                      path=path1,
                                      host=host1)
        assert self.validate_nginx_lb([pod3, pod4],
                                      ingress.external_ips[0],
                                      path=path2,
                                      host=host2)
        for compute_ip in self.inputs.compute_ips:
            self.inputs.restart_service('contrail-vrouter-agent', [compute_ip],
                                        container='agent')
        cluster_status, error_nodes = ContrailStatusChecker(
        ).wait_till_contrail_cluster_stable()
        assert cluster_status, 'Cluster is not stable after restart'
        assert self.validate_nginx_lb([pod1, pod2],
                                      ingress.cluster_ip,
                                      test_pod=pod5,
                                      path=path1,
                                      host=host1)
        assert self.validate_nginx_lb([pod3, pod4],
                                      ingress.cluster_ip,
                                      test_pod=pod5,
                                      path=path2,
                                      host=host2)
コード例 #14
0
    def test_ecmp_svc_in_network_with_3_instance_service_restarts(self):
        """
        Description: Validate ECMP after restarting control and vrouter services with service chainin
        g in-network mode datapath having service instance
        Test steps:
                   1.Creating vm's - vm1 and vm2 in networks vn1 and vn2.
                   2.Creating a service instance in in-network mode with 3 instances.
                   3.Creating a service chain by applying the service instance as a service in a po
        licy between the VNs.
                   4.Checking for ping and traffic between vm1 and vm2.
        Pass criteria: Ping between the VMs should be successful and TCP traffic should reach vm2 fr
        om vm1 and vice-versa even after the restarts.
        Maintainer : [email protected]
        """
        ret_dict = self.verify_svc_chain(max_inst=3,
                                         service_mode='in-network',
                                         create_svms=True,
                                         **self.common_args)
        si_fixture = ret_dict['si_fixture']
        svm_ids = si_fixture.svm_ids
        self.get_rt_info_tap_intf_list(self.left_vn_fixture,
                                       self.left_vm_fixture,
                                       self.right_vm_fixture, svm_ids,
                                       si_fixture)
        dst_vm_list = [self.right_vm_fixture]
        self.verify_traffic_flow(self.left_vm_fixture, dst_vm_list, si_fixture,
                                 self.left_vn_fixture)
        for compute_ip in self.inputs.compute_ips:
            self.inputs.restart_service('contrail-vrouter', [compute_ip],
                                        container='agent')

        # Wait for service stability
        cs_checker = ContrailStatusChecker()
        cluster_status, error_nodes = cs_checker.wait_till_contrail_cluster_stable(
            self.inputs.compute_ips)
        assert cluster_status, 'Hash of error nodes and services : %s' % (
            error_nodes)

        self.left_vm_fixture.wait_till_vm_is_up()
        self.right_vm_fixture.wait_till_vm_is_up()

        self.get_rt_info_tap_intf_list(self.left_vn_fixture,
                                       self.left_vm_fixture,
                                       self.right_vm_fixture, svm_ids,
                                       si_fixture)
        fab_connections.clear()
        self.verify_traffic_flow(self.left_vm_fixture, dst_vm_list, si_fixture,
                                 self.left_vn_fixture)
        for bgp_ip in self.inputs.bgp_ips:
            self.inputs.restart_service('contrail-control', [bgp_ip],
                                        container='controller')

        cluster_status, error_nodes = cs_checker.wait_till_contrail_cluster_stable(
            self.inputs.bgp_ips)
        assert cluster_status, 'Hash of error nodes and services : %s' % (
            error_nodes)

        self.left_vm_fixture.wait_till_vm_is_up()
        self.right_vm_fixture.wait_till_vm_is_up()

        self.get_rt_info_tap_intf_list(self.left_vn_fixture,
                                       self.left_vm_fixture,
                                       self.right_vm_fixture, svm_ids,
                                       si_fixture)

        fab_connections.clear()
        self.verify_traffic_flow(self.left_vm_fixture, dst_vm_list, si_fixture,
                                 self.left_vn_fixture)
コード例 #15
0
    def add_remove_server(self,
                          operation,
                          server_ip,
                          section,
                          option,
                          client_role,
                          client_process,
                          container_name,
                          index=0):
        ''' This function add or remove an entry from list of servers 
        configured in .conf file of the client.
        It reads .conf file to get the list.
        It then searches if entry already exist or not and do the operation
        Values to argument "client_process" can be any process which we
                see under contrail-status output eg "contrail-control"
        Values to argument "operation" can be:
                "add" or "remove"
        Values to argument "client_role" can be:
                "agent", "control", "config", "analytics" and "database"
        '''
        config_file = 'entrypoint.sh'
        client_conf_file = client_process + ".conf"
        cmd_set = "openstack-config --get /etc/contrail/" + client_conf_file
        cmd = cmd_set + " " + section + " " + option

        if client_role == "agent":
            for ip in self.inputs.compute_ips:
                server_list = self.get_new_server_list(operation,
                                                       ip,
                                                       cmd,
                                                       server_ip,
                                                       index,
                                                       container="agent")
                self.configure_server_list(ip,
                                           client_process,
                                           section,
                                           option,
                                           server_list,
                                           config_file,
                                           container_name,
                                           container="agent")
        elif client_role in ["control", "dns"]:
            for ip in self.inputs.bgp_ips:
                server_list = self.get_new_server_list(operation,
                                                       ip,
                                                       cmd,
                                                       server_ip,
                                                       index,
                                                       container=client_role)
                self.configure_server_list(ip,
                                           client_process,
                                           section,
                                           option,
                                           server_list,
                                           config_file,
                                           container_name,
                                           container=client_role)
        elif client_role == "config":
            for ip in self.inputs.cfgm_ips:
                server_list = self.get_new_server_list(
                    operation,
                    ip,
                    cmd,
                    server_ip,
                    index,
                    container='config-nodemgr')
                self.configure_server_list(ip,
                                           client_process,
                                           section,
                                           option,
                                           server_list,
                                           config_file,
                                           container_name,
                                           container="config-nodemgr")
        elif client_role == "analytics":
            for ip in self.inputs.collector_ips:
                server_list = self.get_new_server_list(
                    operation,
                    ip,
                    cmd,
                    server_ip,
                    index,
                    container="analytics-api")
                self.configure_server_list(ip,
                                           client_process,
                                           section,
                                           option,
                                           server_list,
                                           config_file,
                                           container_name,
                                           container="analytics-api")
        elif client_role == "database":
            for ip in self.inputs.database_ips:
                server_list = self.get_new_server_list(
                    operation,
                    ip,
                    cmd,
                    server_ip,
                    index,
                    container="analytics-cassandra")
                self.configure_server_list(ip,
                                           client_process,
                                           section,
                                           option,
                                           server_list,
                                           config_file,
                                           container_name,
                                           container="analytics-cassandra")
        status_checker = ContrailStatusChecker(self.inputs)
        result = status_checker.wait_till_contrail_cluster_stable()[0]
        if result == False:
            assert result, "Contrail cluster not up after add/remove of entry"
コード例 #16
0
ファイル: test_rr.py プロジェクト: rofra/contrail-test
    def test_process_restart_with_rr_set(self):
        ''' Test to validate rr works fine 
        with process restarts
            1. Pick 2 VN's from resource pool which has one VM each
            2. Set control node as RR
            3. Ping from one VM to another VM
            4. Restart process 'vrouter' and 'control' on setup
            5. Ping again between VM's after process restart
        Pass criteria: Step 2,3,4 and 5 should pass
        '''
        if len(set(self.inputs.bgp_ips)) < 3:
            self.logger.info(
                "Skipping Test. At least 3 control node required to run the test"
            )
            raise self.skipTest(
                "Skipping Test. At least 3 control node required to run the test"
            )

        vn1_name = get_random_name('vn1')
        vn1_subnets = ['192.168.1.0/24']
        vn1_vm1_name = get_random_name('vn1_vm1')
        vn1_vm2_name = get_random_name('vn1_vm2')
        vn1_fixture = self.create_vn(vn1_name, vn1_subnets)
        assert vn1_fixture.verify_on_setup()
        vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name)
        vm2_fixture = self.create_vm(vn1_fixture, vn1_vm2_name)
        assert vm1_fixture.wait_till_vm_is_up()
        assert vm2_fixture.wait_till_vm_is_up()
        assert vm1_fixture.ping_to_ip(vm2_fixture.vm_ip)
        assert vm2_fixture.ping_to_ip(vm1_fixture.vm_ip)
        # Take the first BGP node
        ctrl_node_name = self.inputs.bgp_names[0]
        ctrl_node_ip = self.inputs.host_data[ctrl_node_name]['control-ip']
        ctrl_node_host_ip = self.inputs.host_data[ctrl_node_name]['host_ip']
        #set it as the RR
        ctrl_fixture = self.useFixture(
            control_node.CNFixture(connections=self.connections,
                                   inputs=self.inputs,
                                   router_name=ctrl_node_name,
                                   router_ip=ctrl_node_ip))
        cluster_id = ipv4_to_decimal(ctrl_node_ip)

        if ctrl_fixture.set_cluster_id(cluster_id):
            self.logger.info("cluster id set")
        else:
            self.logger.error("cluster id not set")
            assert False
        #Calculating connection matrix.The mesh connections should be removed
        connection_dicts = get_connection_matrix(self.inputs, ctrl_node_name)
        #Verifying bgp connections.The non-rr nodes should have only one bgp connection to RR
        #RR should have bgp connections to both the non-rrs
        for entry in connection_dicts:
            if verify_peer_in_control_nodes(self.cn_inspect, entry,
                                            connection_dicts[entry],
                                            self.logger):
                self.logger.info("BGP connections are proper")
            else:
                self.logger.error("BGP connections are not proper")

        for compute_ip in self.inputs.compute_ips:
            if compute_ip in self.inputs.dpdk_ips:
                self.inputs.stop_service('contrail-vrouter-agent',
                                         [compute_ip],
                                         container='agent')
                self.inputs.restart_service('contrail-vrouter-agent-dpdk',
                                            [compute_ip],
                                            container='agent-dpdk')
                self.inputs.start_service('contrail-vrouter-agent',
                                          [compute_ip],
                                          container='agent')
            else:
                self.inputs.restart_service('contrail-vrouter-agent',
                                            [compute_ip],
                                            container='agent')

        for bgp_ip in self.inputs.bgp_ips:
            self.inputs.restart_service('contrail-control', [bgp_ip],
                                        container='control')
        for cfgm_ip in self.inputs.cfgm_ips:
            self.inputs.restart_service('contrail-api', [cfgm_ip],
                                        container='api-server')

        # Wait for cluster to be stable
        cs_obj = ContrailStatusChecker(self.inputs)
        clusterstatus, error_nodes = cs_obj.wait_till_contrail_cluster_stable()
        assert clusterstatus, ('Hash of error nodes and services : %s' %
                               (error_nodes))

        assert self.verification_after_process_restart_in_rr()
        for cfgm_name in self.inputs.cfgm_names:
            assert self.analytics_obj.verify_cfgm_uve_module_state\
                        (self.inputs.collector_names[0],
                        cfgm_name,'contrail-api')
        #RR should have bgp connections to both the non-rrs
        for entry in connection_dicts:
            if verify_peer_in_control_nodes(self.cn_inspect, entry,
                                            connection_dicts[entry],
                                            self.logger):
                self.logger.info("BGP connections are proper after restarts")
            else:
                self.logger.error(
                    "BGP connections are not proper after restarts")
        assert vm1_fixture.wait_till_vm_is_up()
        assert vm2_fixture.wait_till_vm_is_up()
        assert vm1_fixture.ping_to_ip(vm2_fixture.vm_ip)
        assert vm2_fixture.ping_to_ip(vm1_fixture.vm_ip)