Example #1
0
    def cold_reboot(self, ip, option):
        ''' API to power clycle node for a given IP address '''
        if option != 'on':
            cmd = 'if ! grep -Rq "GRUB_RECORDFAIL_TIMEOUT" /etc/default/grub; then echo "GRUB_RECORDFAIL_TIMEOUT=10" >> /etc/default/grub; update-grub ; fi ;sed -i s/GRUB_CMDLINE_LINUX_DEFAULT.*/GRUB_CMDLINE_LINUX_DEFAULT=\"nomodeset\"/g /etc/default/grub ; update-grub;'
            self.logger.info('command executed  %s' % cmd)
            self.inputs.run_cmd_on_server(ip, cmd)
            # This is required for hardware initialization failure
            cmd = 'echo  "blacklist mei" > /etc/modprobe.d/mei.conf;'
            self.inputs.run_cmd_on_server(ip, cmd)
            cmd = 'echo  "blacklist mei_me" > /etc/modprobe.d/mei_me.conf;'
            self.inputs.run_cmd_on_server(ip, cmd)
            cmd = 'if ! grep -Rq "mei_me" /etc/modprobe.d/blacklist.conf ; then echo "blacklist mei_me" >> /etc/modprobe.d/blacklist.conf; fi ;'
            self.inputs.run_cmd_on_server(ip, cmd)

        ipmi_addr = self.get_ipmi_address(ip)
        cmd = 'ipmitool -H "%s" -U %s -P %s chassis power "%s"' % (
            ipmi_addr, self.inputs.ipmi_username, self.inputs.ipmi_password,
            option)
        self.logger.info('command executed  %s' % cmd)
        local(cmd)
        # clear the fab connections
        sleep(20)
        self.connections.update_inspect_handles()
        fab_connections.clear()
        sleep(420)
        return True
Example #2
0
 def reset_handles(self, hosts, service=None):
     ''' resetting cfgm_ip , bgp_ips , compute_ips required for ha testing during node failures '''
     vip = self.inputs.vip['contrail']
     for host in hosts:
         if vip in self.inputs.cfgm_ips:
             self.inputs.cfgm_ips[self.inputs.cfgm_ips.index(vip)] = host
         if vip in self.inputs.cfgm_control_ips:
             self.inputs.cfgm_control_ips[
                 self.inputs.cfgm_control_ips.index(vip)] = host
         if service != 'haproxy':
             if vip in self.inputs.bgp_ips:
                 self.inputs.bgp_ips[self.inputs.bgp_ips.index(vip)] = host
         if vip in self.inputs.collector_ips:
             self.inputs.collector_ips[self.inputs.collector_ips.index(
                 vip)] = host
         if vip in self.inputs.ds_server_ip:
             self.inputs.ds_server_ip[self.inputs.ds_server_ip.index(
                 vip)] = host
         if self.inputs.cfgm_ip == vip:
             self.inputs.cfgm_ip = host
             self.connections.get_vnc_lib_handle()
     self.connections.update_inspect_handles()
     for host in hosts:
         if host in self.inputs.ha_tmp_list:
             self.inputs.ha_tmp_list.remove(host)
     fab_connections.clear()
Example #3
0
    def isolate_node(self,ip,state):
        ''' API to power cycle node for a given IP address '''
        username= self.inputs.host_data[ip]['username']
        password= self.inputs.host_data[ip]['password']
        # block all traffic except ssh port for isolating the node 
        cmd = 'iptables -A INPUT -p tcp --dport 22 -j ACCEPT'
        self.inputs.run_cmd_on_server(ip,cmd, username=username ,password=password)
        self.logger.info('command executed  %s' %cmd)
        cmd = 'iptables -A OUTPUT -p tcp --sport 22 -j ACCEPT'
        self.inputs.run_cmd_on_server(ip,cmd, username=username ,password=password)
        self.logger.info('command executed  %s' %cmd)
        cmd = 'iptables -A INPUT -j %s'%(state)
        self.inputs.run_cmd_on_server(ip,cmd, username=username ,password=password)
        self.logger.info('command executed  %s' %cmd)
        cmd = 'iptables -A OUTPUT -j %s'%(state)
        self.inputs.run_cmd_on_server(ip,cmd, username=username ,password=password)
        self.logger.info('command executed  %s' %cmd)
        cmd = 'iptables -A FORWARD -j %s'%(state)
        self.inputs.run_cmd_on_server(ip,cmd, username=username ,password=password)
        self.logger.info('command executed  %s' %cmd)
        cmd = 'cat /proc/net/route'
        res = self.inputs.run_cmd_on_server(ip,cmd,username=username,password=password)
        self.get_gw(res) 
        if state == 'ACCEPT':
            cmd = 'iptables -F '
            self.inputs.run_cmd_on_server(ip,cmd, username=username ,password=password)
            self.logger.info('command executed  %s' %cmd)
        fab_connections.clear()

        return True
Example #4
0
 def update_handles(self, hosts, service=None):
     ''' Updates the handles when a node is isolated or removed from list '''
     vip = self.inputs.vip['contrail']
     for host in hosts:
         if host in self.inputs.cfgm_ips:
             self.inputs.cfgm_ips[self.inputs.cfgm_ips.index(host)] = vip
         if host in self.inputs.cfgm_control_ips:
             self.inputs.cfgm_control_ips[
                 self.inputs.cfgm_control_ips.index(host)] = vip
         if service != 'haproxy':
             if host in self.inputs.bgp_ips:
                 self.inputs.bgp_ips[self.inputs.bgp_ips.index(host)] = vip
         if host in self.inputs.collector_ips:
             self.inputs.collector_ips[self.inputs.collector_ips.index(
                 host)] = vip
         if host in self.inputs.ds_server_ip:
             self.inputs.ds_server_ip[self.inputs.ds_server_ip.index(
                 host)] = vip
         self.inputs.ha_tmp_list.append(host)
         if self.inputs.cfgm_ip == host:
             self.inputs.cfgm_ip = vip
             self.connections.get_vnc_lib_handle()
     self.connections.update_inspect_handles()
     if service:
         self.addCleanup(self.reset_handles, hosts, service=service)
     fab_connections.clear()
    def verify_traffic_flow(self,
                            src_vm,
                            dst_vm_list,
                            si_fix,
                            src_vn,
                            src_ip=None,
                            dst_ip=None):
        fab_connections.clear()
        src_ip = src_vm.vm_ip
        if dst_ip == None:
            dst_ip = dst_vm_list[0].vm_ip
        src_vm.install_pkg("Traffic")
        for vm in dst_vm_list:
            vm.install_pkg("Traffic")
        sleep(5)
        stream_list = self.setup_streams(src_vm,
                                         dst_vm_list,
                                         src_ip=src_ip,
                                         dst_ip=dst_ip)
        sender, receiver = self.start_traffic(src_vm,
                                              dst_vm_list,
                                              stream_list,
                                              src_ip=src_ip,
                                              dst_ip=dst_ip)
        self.verify_flow_thru_si(si_fix, src_vn)
        self.verify_flow_records(src_vm, src_ip=src_ip, dst_ip=dst_ip)
        self.stop_traffic(sender, receiver, dst_vm_list, stream_list)

        return True
Example #6
0
 def reboot(self,ip):
     ''' API to reboot a node for a given IP address '''
     self.inputs.run_cmd_on_server(ip, 'reboot')
     sleep(420)
     self.connections.update_inspect_handles()
     fab_connections.clear()
     return True
Example #7
0
 def reboot(self, ip):
     ''' API to reboot a node for a given IP address '''
     self.inputs.run_cmd_on_server(ip, 'reboot')
     sleep(420)
     self.connections.update_inspect_handles()
     fab_connections.clear()
     return True
Example #8
0
 def isolate_node(self,ctrl_ip,state):
     ''' API to isolate node for a given IP address '''
     host_ip = self.inputs.host_data[ctrl_ip]['host_ip']
     ''' Since its not a multi interface returning it'''
     if host_ip == ctrl_ip:
         raise self.skipTest("This test is not supported when data/control ip are same")
     username= self.inputs.host_data[host_ip]['username']
     password= self.inputs.host_data[host_ip]['password']
     cmd = 'intf=$(ip addr show | grep %s | awk \'{print $7}\') ; ifconfig $intf %s' %(ctrl_ip,state)
     self.logger.info('command executed  %s' %cmd)
     self.inputs.run_cmd_on_server(host_ip,cmd,username=username,password=password)
     fab_connections.clear()
     sleep(420)
     return True
Example #9
0
 def test_ecmp_svc_in_network_with_3_instance_reboot_nodes(self):
     """Validate ECMP after restarting control and vrouter services with service chaining in-network mode datapath having
     service instance. Check the ECMP behaviour after rebooting the nodes"""
     cmd = 'reboot'
     self.verify_svc_in_network_datapath(
         si_count=1, svc_scaling=True, max_inst=3, flavor='contrail_flavor_2cpu')
     svm_ids = self.si_fixtures[0].svm_ids
     self.get_rt_info_tap_intf_list(
         self.vn1_fixture, self.vm1_fixture, self.vm2_fixture, svm_ids)
     
     dst_vm_list= [self.vm2_fixture]
     self.verify_traffic_flow(self.vm1_fixture, dst_vm_list, self.si_fixtures[0], self.vn1_fixture)
     self.logger.info('Will reboot the Compute and Control nodes')
     nodes= []
     nodes= list(set(self.inputs.compute_ips + self.inputs.bgp_ips))
     for node in nodes:
         if node != self.inputs.cfgm_ips[0]:
             self.logger.info('Will reboot the node %s' %
                              socket.gethostbyaddr(node)[0])
             self.inputs.run_cmd_on_server(
                 node, cmd, username='******', password='******')
         else:
             self.logger.info(
                 'Node %s is the first cfgm. Will skip rebooting it.' %
                 socket.gethostbyaddr(node)[0])
     self.logger.info('Sleeping for 240 seconds')
     sleep(240)
     self.logger.info(
         'Will check the state of the SIs and power it ON, if it is in SHUTOFF state')
     # We need to check the status of only those VMs associated with this project
     si_svms= []
     si_svms= self.get_svms_in_si(self.si_fixtures[0], self.inputs.project_name)
     vms= [self.vm1_fixture, self.vm2_fixture]
     for svm in si_svms:
         if self.nova_fixture.wait_till_vm_is_active(svm)[1] != 'ACTIVE':
             self.logger.info('Will Power-On %s' % svm.name)
             svm.start()
     for vm in vms:
         if self.nova_fixture.wait_till_vm_is_active(vm.vm_obj)[1] != 'ACTIVE':
             self.logger.info('Will Power-On %s' % vm.vm_obj.name)
             vm.vm_obj.start()
     self.logger.info('Sleeping for 30 seconds')
     sleep(30)
     self.get_rt_info_tap_intf_list(
         self.vn1_fixture, self.vm1_fixture, self.vm2_fixture, svm_ids)
     fab_connections.clear()
     self.vm1_fixture.wait_till_vm_is_up()
     self.vm2_fixture.wait_till_vm_is_up()
     self.verify_traffic_flow(self.vm1_fixture, dst_vm_list, self.si_fixtures[0], self.vn1_fixture)
     return True
Example #10
0
 def isolate_node(self,ctrl_ip,state):
     ''' API to isolate node for a given IP address '''
     host_ip = self.inputs.host_data[ctrl_ip]['host_ip']
     ''' Since its not a multi interface returning it'''
     if host_ip == ctrl_ip:
         raise self.skipTest("This test is not supported when data/control ip are same")
     username= self.inputs.host_data[host_ip]['username']
     password= self.inputs.host_data[host_ip]['password']
     cmd = 'intf=$(ip addr show | grep %s | awk \'{print $7}\') ; ifconfig $intf %s' %(ctrl_ip,state)
     self.logger.info('command executed  %s' %cmd)
     self.inputs.run_cmd_on_server(host_ip,cmd,username=username,password=password)
     fab_connections.clear()
     sleep(420)
     return True
Example #11
0
    def test_ecmp_svc_in_network_with_3_instance_service_restarts(self):
        """
        Description: Validate ECMP after restarting control and vrouter services with service chainin
        g in-network mode datapath having service instance
        Test steps:
                   1.Creating vm's - vm1 and vm2 in networks vn1 and vn2.
                   2.Creating a service instance in in-network mode with 3 instances.
                   3.Creating a service chain by applying the service instance as a service in a po
        licy between the VNs.
                   4.Checking for ping and traffic between vm1 and vm2.
        Pass criteria: Ping between the VMs should be successful and TCP traffic should reach vm2 fr
        om vm1 and vice-versa even after the restarts.
        Maintainer : [email protected]
        """
        self.verify_svc_in_network_datapath(si_count=1,
                                            svc_scaling=True,
                                            max_inst=3)
        svm_ids = self.si_fixtures[0].svm_ids
        self.get_rt_info_tap_intf_list(self.vn1_fixture, self.vm1_fixture,
                                       self.vm2_fixture, svm_ids)
        dst_vm_list = [self.vm2_fixture]
        self.verify_traffic_flow(self.vm1_fixture, dst_vm_list,
                                 self.si_fixtures[0], self.vn1_fixture)
        for compute_ip in self.inputs.compute_ips:
            self.inputs.restart_service('contrail-vrouter', [compute_ip])
        self.logger.info('Sleeping for 30 seconds')
        sleep(30)

        self.vm1_fixture.wait_till_vm_is_up()
        self.vm2_fixture.wait_till_vm_is_up()

        self.get_rt_info_tap_intf_list(self.vn1_fixture, self.vm1_fixture,
                                       self.vm2_fixture, svm_ids)
        fab_connections.clear()
        self.verify_traffic_flow(self.vm1_fixture, dst_vm_list,
                                 self.si_fixtures[0], self.vn1_fixture)
        for bgp_ip in self.inputs.bgp_ips:
            self.inputs.restart_service('contrail-control', [bgp_ip])
        self.logger.info('Sleeping for 30 seconds')
        sleep(30)

        self.vm1_fixture.wait_till_vm_is_up()
        self.vm2_fixture.wait_till_vm_is_up()

        self.get_rt_info_tap_intf_list(self.vn1_fixture, self.vm1_fixture,
                                       self.vm2_fixture, svm_ids)
        fab_connections.clear()
        self.verify_traffic_flow(self.vm1_fixture, dst_vm_list,
                                 self.si_fixtures[0], self.vn1_fixture)
        return True
Example #12
0
    def verify_traffic_flow(self, src_vm, dst_vm_list, si_fix, src_vn, src_ip= None, dst_ip= None):
        fab_connections.clear()
        src_ip= src_vm.vm_ip
        if dst_ip == None: 
            dst_ip= dst_vm_list[0].vm_ip
        src_vm.install_pkg("Traffic")
        for vm in dst_vm_list:
            vm.install_pkg("Traffic")
        sleep(5)
        stream_list= self.setup_streams(src_vm, dst_vm_list, src_ip= src_ip, dst_ip= dst_ip)
        sender, receiver= self.start_traffic(src_vm, dst_vm_list, stream_list, src_ip= src_ip, dst_ip= dst_ip)
        self.verify_flow_thru_si(si_fix, src_vn)
        self.verify_flow_records(src_vm, src_ip= src_ip, dst_ip= dst_ip)
        self.stop_traffic(sender, receiver, dst_vm_list, stream_list)
		
        return True
Example #13
0
    def test_ecmp_svc_in_network_with_3_instance_service_restarts(self):
        """
        Description: Validate ECMP after restarting control and vrouter services with service chainin
        g in-network mode datapath having service instance
        Test steps:
                   1.Creating vm's - vm1 and vm2 in networks vn1 and vn2.
                   2.Creating a service instance in in-network mode with 3 instances.
                   3.Creating a service chain by applying the service instance as a service in a po
        licy between the VNs.
                   4.Checking for ping and traffic between vm1 and vm2.
        Pass criteria: Ping between the VMs should be successful and TCP traffic should reach vm2 fr
        om vm1 and vice-versa even after the restarts.
        Maintainer : [email protected]
        """
        self.verify_svc_in_network_datapath(
            si_count=1, svc_scaling=True, max_inst=3)
        svm_ids = self.si_fixtures[0].svm_ids
        self.get_rt_info_tap_intf_list(
            self.vn1_fixture, self.vm1_fixture, self.vm2_fixture, svm_ids)
        dst_vm_list= [self.vm2_fixture]
        self.verify_traffic_flow(self.vm1_fixture, dst_vm_list, self.si_fixtures[0], self.vn1_fixture)
        for compute_ip in self.inputs.compute_ips:
            self.inputs.restart_service('contrail-vrouter', [compute_ip])
        self.logger.info('Sleeping for 30 seconds')
        sleep(30)
        
        self.vm1_fixture.wait_till_vm_is_up()
        self.vm2_fixture.wait_till_vm_is_up()

        self.get_rt_info_tap_intf_list(
            self.vn1_fixture, self.vm1_fixture, self.vm2_fixture, svm_ids)
        fab_connections.clear()
        self.verify_traffic_flow(self.vm1_fixture, dst_vm_list, self.si_fixtures[0], self.vn1_fixture)
        for bgp_ip in self.inputs.bgp_ips:
            self.inputs.restart_service('contrail-control', [bgp_ip])
        self.logger.info('Sleeping for 30 seconds')
        sleep(30)

        self.vm1_fixture.wait_till_vm_is_up()
        self.vm2_fixture.wait_till_vm_is_up()

        self.get_rt_info_tap_intf_list(
           self.vn1_fixture, self.vm1_fixture, self.vm2_fixture, svm_ids)
        fab_connections.clear()
        self.verify_traffic_flow(self.vm1_fixture, dst_vm_list, self.si_fixtures[0], self.vn1_fixture)
        return True
Example #14
0
 def update_handles(self, hosts, service):
     ''' Updates the handles when a node is isolated or removed from list '''
     vip = self.inputs.vip['contrail']
     for host in hosts:
         if host in self.inputs.cfgm_ips:
             self.inputs.cfgm_ips[self.inputs.cfgm_ips.index(host)] = vip
         if host in self.inputs.cfgm_control_ips:
             self.inputs.cfgm_control_ips[self.inputs.cfgm_control_ips.index(host)] = vip
         if host in self.inputs.bgp_ips:
             self.inputs.bgp_ips[self.inputs.bgp_ips.index(host)] = vip
         if host in self.inputs.collector_ips:
             self.inputs.collector_ips[self.inputs.collector_ips.index(host)] = vip
         if host in self.inputs.ds_server_ip:
             self.inputs.ds_server_ip[self.inputs.ds_server_ip.index(host)] = vip
         self.inputs.ha_tmp_list.append(host)
     self.connections.update_inspect_handles()
     self.addCleanup(self.reset_handles, hosts, service=service)
     fab_connections.clear()
Example #15
0
 def reset_handles(self, hosts, service):
     ''' resetting cfgm_ip , bgp_ips , compute_ips required for ha testing during node failures '''
     vip = self.inputs.vip['contrail']
     for host in hosts:
         if vip in self.inputs.cfgm_ips:
             self.inputs.cfgm_ips[self.inputs.cfgm_ips.index(vip)] = host
         if vip in self.inputs.cfgm_control_ips:
             self.inputs.cfgm_control_ips[self.inputs.cfgm_control_ips.index(vip)] = host
         if vip in self.inputs.bgp_ips:
             self.inputs.bgp_ips[self.inputs.bgp_ips.index(vip)] = host
         if vip in self.inputs.collector_ips:
             self.inputs.collector_ips[self.inputs.collector_ips.index(vip)] = host
         if vip in self.inputs.ds_server_ip:
             self.inputs.ds_server_ip[self.inputs.ds_server_ip.index(vip)] = host
     self.connections.update_inspect_handles()
     for host in hosts:
         if host in self.inputs.ha_tmp_list:
             self.inputs.ha_tmp_list.remove(host)
     fab_connections.clear()
Example #16
0
    def test_ecmp_svc_in_network_with_3_instance_service_restarts(self):
        """Validate ECMP after restarting control and vrouter services with service chaining in-network mode datapath having
        service instance"""
        self.verify_svc_in_network_datapath(
            si_count=1, svc_scaling=True, max_inst=3)
        svm_ids = self.si_fixtures[0].svm_ids
        self.get_rt_info_tap_intf_list(
            self.vn1_fixture, self.vm1_fixture, svm_ids)
        dst_vm_list= [self.vm2_fixture]
#        self.verify_traffic_flow(self.vm1_fixture, dst_vm_list)
        self.verify_traffic_flow(self.vm1_fixture, dst_vm_list, self.si_fixtures[0], self.vn1_fixture)
        for compute_ip in self.inputs.compute_ips:
            self.inputs.restart_service('contrail-vrouter', [compute_ip])
        self.logger.info('Sleeping for 30 seconds')
        sleep(30)
        
        self.vm1_fixture.verify_vm_in_agent()
        self.vm1_fixture.wait_till_vm_is_up()
        self.vm2_fixture.verify_vm_in_agent()
        self.vm2_fixture.wait_till_vm_is_up()

        self.get_rt_info_tap_intf_list(
            self.vn1_fixture, self.vm1_fixture, svm_ids)
        fab_connections.clear()
#        self.verify_traffic_flow(self.vm1_fixture, dst_vm_list)
        self.verify_traffic_flow(self.vm1_fixture, dst_vm_list, self.si_fixtures[0], self.vn1_fixture)
        for bgp_ip in self.inputs.bgp_ips:
            self.inputs.restart_service('contrail-control', [bgp_ip])
        self.logger.info('Sleeping for 30 seconds')
        sleep(30)

        self.vm1_fixture.verify_vm_in_agent()
        self.vm1_fixture.wait_till_vm_is_up()
        self.vm2_fixture.verify_vm_in_agent()
        self.vm2_fixture.wait_till_vm_is_up()

        self.get_rt_info_tap_intf_list(
            self.vn1_fixture, self.vm1_fixture, svm_ids)
        fab_connections.clear()
#        self.verify_traffic_flow(self.vm1_fixture, dst_vm_list)
        self.verify_traffic_flow(self.vm1_fixture, dst_vm_list, self.si_fixtures[0], self.vn1_fixture)
        return True
    def execCmd(self, cmd, username, password, node, local_ip):
        fab_connections.clear()
        with hide('everything'):
            with settings(
                    host_string='%s@%s' % (username, local_ip),
                    password=password,
                    warn_only=True, abort_on_prompts=False, debug=True):
                if 'show' in cmd:
                    result = run_netconf_on_node(
                        host_string='%s@%s' % (
                                    username, node),
                        password=password,
                        cmds=cmd, op_format='json')
                #ssh_conf_file_alternate = "-o UserKnownHostsFile=/dev/null -o strictHostKeyChecking=no"
                else:
                    output = run_fab_cmd_on_node(
                        host_string='%s@%s' % (username, node),
                        password=password, cmd=cmd, as_sudo=True)

        return result
    def execCmd(self, cmd, username, password, node, local_ip):
        fab_connections.clear()
        with hide('everything'):
            with settings(
                    host_string='%s@%s' % (username, local_ip),
                    password=password,
                    warn_only=True, abort_on_prompts=False, debug=True):
                if 'show' in cmd:
                    result = run_netconf_on_node(
                        host_string='%s@%s' % (
                                    username, node),
                        password=password,
                        cmds=cmd, op_format='json')
                #ssh_conf_file_alternate = "-o UserKnownHostsFile=/dev/null -o strictHostKeyChecking=no"
                else:
                    output = run_fab_cmd_on_node(
                        host_string='%s@%s' % (username, node),
                        password=password, cmd=cmd, as_sudo=True)

        return result
Example #19
0
 def cold_reboot(self,ip,option):
     ''' API to power clycle node for a given IP address '''
     ipmi_addr = self.get_ipmi_address(ip)
     # ToDo: Use python based ipmi shutdown wrapper rather than ipmitool
     test_ip = self.inputs.cfgm_ips[0]
     cmd = 'wget http://us.archive.ubuntu.com/ubuntu/pool/universe/i/ipmitool/ipmitool_1.8.13-1ubuntu0.1_amd64.deb'
     self.logger.info('command executed  %s' %cmd)
     self.inputs.run_cmd_on_server(test_ip,cmd)
     cmd = 'dpkg -i /root/ipmitool_1.8.13-1ubuntu0.1_amd64.deb'
     self.logger.info('command executed  %s' %cmd)
     self.inputs.run_cmd_on_server(test_ip,cmd)
     cmd = 'rm -rf /root/ipmitool_1.8.13-1ubuntu0.1_amd64.deb'
     self.logger.info('command executed  %s' %cmd)
     self.inputs.run_cmd_on_server(test_ip,cmd)
     # TODO removed later , when support is there to execute test from test node.
     cmd = '/usr/bin/ipmitool -H "%s" -U %s -P %s chassis power "%s"'%(ipmi_addr,self.inputs.ipmi_username,self.inputs.ipmi_password,option)
     self.logger.info('command executed  %s' %cmd)
     self.inputs.run_cmd_on_server(test_ip,cmd)
     # clear the fab connections
     sleep(10)
     fab_connections.clear()
     return True
Example #20
0
    def cold_reboot(self, ip, option):
        ''' API to power clycle node for a given IP address '''
        if option != 'on':
            cmd = 'if ! grep -Rq "GRUB_RECORDFAIL_TIMEOUT" /etc/default/grub; then echo "GRUB_RECORDFAIL_TIMEOUT=10" >> /etc/default/grub; update-grub ; fi ;sed -i s/GRUB_CMDLINE_LINUX_DEFAULT.*/GRUB_CMDLINE_LINUX_DEFAULT=\"nomodeset\"/g /etc/default/grub ; update-grub;'
            self.logger.info('command executed  %s' % cmd)
            self.inputs.run_cmd_on_server(ip, cmd)
            # This is required for hardware initialization failure
            cmd = 'echo  "blacklist mei" > /etc/modprobe.d/mei.conf;'
            self.inputs.run_cmd_on_server(ip, cmd)
            cmd = 'echo  "blacklist mei_me" > /etc/modprobe.d/mei_me.conf;'
            self.inputs.run_cmd_on_server(ip, cmd)
            cmd = 'if ! grep -Rq "mei_me" /etc/modprobe.d/blacklist.conf ; then echo "blacklist mei_me" >> /etc/modprobe.d/blacklist.conf; fi ;'
            self.inputs.run_cmd_on_server(ip, cmd)

        ipmi_addr = self.get_ipmi_address(ip)
        # ToDo: Use python based ipmi shutdown wrapper rather than ipmitool
        test_ip = self.inputs.cfgm_ips[0]
        cmd = 'wget http://us.archive.ubuntu.com/ubuntu/pool/universe/i/ipmitool/ipmitool_1.8.13-1ubuntu0.2_amd64.deb'
        self.logger.info('command executed  %s' % cmd)
        self.inputs.run_cmd_on_server(test_ip, cmd)
        cmd = 'dpkg -i /root/ipmitool_1.8.13-1ubuntu0.2_amd64.deb'
        self.logger.info('command executed  %s' % cmd)
        self.inputs.run_cmd_on_server(test_ip, cmd)
        cmd = 'rm -rf /root/ipmitool_1.8.13-1ubuntu0.2_amd64.deb'
        self.logger.info('command executed  %s' % cmd)
        self.inputs.run_cmd_on_server(test_ip, cmd)
        # TODO removed later , when support is there to execute test from test node.
        cmd = '/usr/bin/ipmitool -H "%s" -U %s -P %s chassis power "%s"' % (
            ipmi_addr, self.inputs.ipmi_username, self.inputs.ipmi_password,
            option)
        self.logger.info('command executed  %s' % cmd)
        self.inputs.run_cmd_on_server(test_ip, cmd)
        # clear the fab connections
        sleep(20)
        self.connections.update_inspect_handles()
        fab_connections.clear()
        sleep(420)
        return True
Example #21
0
 def reset_handles(self, hosts, service=None):
     ''' resetting cfgm_ip , bgp_ips , compute_ips required for ha testing during node failures '''
     vip = self.inputs.contrail_internal_vip
     for host in hosts:
         if vip in self.inputs.cfgm_ips:
             self.inputs.cfgm_ips[self.inputs.cfgm_ips.index(vip)] = host
         if vip in self.inputs.cfgm_control_ips:
             self.inputs.cfgm_control_ips[self.inputs.cfgm_control_ips.index(vip)] = host
         if service != 'haproxy':
             if vip in self.inputs.bgp_ips:
                 self.inputs.bgp_ips[self.inputs.bgp_ips.index(vip)] = host
         if vip in self.inputs.collector_ips:
             self.inputs.collector_ips[self.inputs.collector_ips.index(vip)] = host
         if vip in self.inputs.ds_server_ip:
             self.inputs.ds_server_ip[self.inputs.ds_server_ip.index(vip)] = host
         if self.inputs.cfgm_ip == vip:
             self.inputs.cfgm_ip = host
             self.connections.update_vnc_lib_fixture()
     self.connections.update_inspect_handles()
     for host in hosts:
         if host in self.inputs.ha_tmp_list:
             self.inputs.ha_tmp_list.remove(host)
     fab_connections.clear()
Example #22
0
 def update_handles(self, hosts, service=None):
     ''' Updates the handles when a node is isolated or removed from list '''
     vip = self.inputs.contrail_internal_vip
     for host in hosts:
         if host in self.inputs.cfgm_ips:
             self.inputs.cfgm_ips[self.inputs.cfgm_ips.index(host)] = vip
         if host in self.inputs.cfgm_control_ips:
             self.inputs.cfgm_control_ips[self.inputs.cfgm_control_ips.index(host)] = vip
         if service != 'haproxy':
             if host in self.inputs.bgp_ips:
                 self.inputs.bgp_ips[self.inputs.bgp_ips.index(host)] = vip
         if host in self.inputs.collector_ips:
             self.inputs.collector_ips[self.inputs.collector_ips.index(host)] = vip
         if host in self.inputs.ds_server_ip:
             self.inputs.ds_server_ip[self.inputs.ds_server_ip.index(host)] = vip
         self.inputs.ha_tmp_list.append(host)
         if self.inputs.cfgm_ip == host:
             self.inputs.cfgm_ip = vip
             self.connections.update_vnc_lib_fixture()
     self.connections.update_inspect_handles()
     if service:
         self.addCleanup(self.reset_handles, hosts, service=service)
     fab_connections.clear()
Example #23
0
    def cold_reboot(self,ip,option):
        ''' API to power clycle node for a given IP address '''
        if option != 'on': 
            cmd = 'if ! grep -Rq "GRUB_RECORDFAIL_TIMEOUT" /etc/default/grub; then echo "GRUB_RECORDFAIL_TIMEOUT=10" >> /etc/default/grub; update-grub ; fi ;sed -i s/GRUB_CMDLINE_LINUX_DEFAULT.*/GRUB_CMDLINE_LINUX_DEFAULT=\"nomodeset\"/g /etc/default/grub ; update-grub;'
            self.logger.info('command executed  %s' %cmd)
            self.inputs.run_cmd_on_server(ip, cmd)
            # This is required for hardware initialization failure
            cmd = 'echo  "blacklist mei" > /etc/modprobe.d/mei.conf;'
            self.inputs.run_cmd_on_server(ip, cmd)
            cmd = 'echo  "blacklist mei_me" > /etc/modprobe.d/mei_me.conf;'
            self.inputs.run_cmd_on_server(ip, cmd)
            cmd = 'if ! grep -Rq "mei_me" /etc/modprobe.d/blacklist.conf ; then echo "blacklist mei_me" >> /etc/modprobe.d/blacklist.conf; fi ;'
            self.inputs.run_cmd_on_server(ip, cmd) 

        ipmi_addr = self.get_ipmi_address(ip)
        # ToDo: Use python based ipmi shutdown wrapper rather than ipmitool
        test_ip = self.inputs.cfgm_ips[0]
        cmd = 'wget http://us.archive.ubuntu.com/ubuntu/pool/universe/i/ipmitool/ipmitool_1.8.13-1ubuntu0.2_amd64.deb'
        self.logger.info('command executed  %s' %cmd)
        self.inputs.run_cmd_on_server(test_ip,cmd)
        cmd = 'dpkg -i /root/ipmitool_1.8.13-1ubuntu0.2_amd64.deb'
        self.logger.info('command executed  %s' %cmd)
        self.inputs.run_cmd_on_server(test_ip,cmd)
        cmd = 'rm -rf /root/ipmitool_1.8.13-1ubuntu0.2_amd64.deb'
        self.logger.info('command executed  %s' %cmd)
        self.inputs.run_cmd_on_server(test_ip,cmd)
        # TODO removed later , when support is there to execute test from test node.
        cmd = '/usr/bin/ipmitool -H "%s" -U %s -P %s chassis power "%s"'%(ipmi_addr,self.inputs.ipmi_username,self.inputs.ipmi_password,option)
        self.logger.info('command executed  %s' %cmd)
        self.inputs.run_cmd_on_server(test_ip,cmd)
        # clear the fab connections
        sleep(20)
        self.connections.update_inspect_handles()
        fab_connections.clear()
        sleep(420)
        return True
Example #24
0
 def cold_reboot(self,ip,option):
     self.set_ipmi_address()
     ipmi_addr = self.get_ipmi_address(ip)
     username= self.inputs.host_data[self.inputs.cfgm_ips[0]]['username']
     password= self.inputs.host_data[self.inputs.cfgm_ips[0]]['password']
     # Move this one to install script
     test_ip = self.inputs.cfgm_ips[0]
     cmd = 'wget http://us.archive.ubuntu.com/ubuntu/pool/universe/i/ipmitool/ipmitool_1.8.13-1ubuntu0.1_amd64.deb'
     self.logger.info('command executed  %s' %cmd)
     self.inputs.run_cmd_on_server(test_ip,cmd , username=username ,password=password)
     cmd = 'dpkg -i /root/ipmitool_1.8.13-1ubuntu0.1_amd64.deb'
     self.logger.info('command executed  %s' %cmd)
     self.inputs.run_cmd_on_server(test_ip,cmd , username=username ,password=password)
     cmd = 'rm -rf /root/ipmitool_1.8.13-1ubuntu0.1_amd64.deb'
     self.logger.info('command executed  %s' %cmd)
     self.inputs.run_cmd_on_server(test_ip,cmd , username=username ,password=password)
     # TODO removed later , when support is there to execute test from test node.
     self.inputs.run_cmd_on_server(test_ip,cmd , username = username ,password = password)
     cmd = '/usr/bin/ipmitool -H "%s" -U %s -P %s chassis power "%s"'%(ipmi_addr,self.inputs.ipmi_username,self.inputs.ipmi_password,option)
     self.logger.info('command executed  %s' %cmd)
     self.inputs.run_cmd_on_server(test_ip,cmd , username=username ,password=password)
     # clear the fab connections
     fab_connections.clear()
     return True
Example #25
0
    def cold_reboot(self,ip,option):
        ''' API to power clycle node for a given IP address '''
        if option != 'on':
            cmd = 'if ! grep -Rq "GRUB_RECORDFAIL_TIMEOUT" /etc/default/grub; then echo "GRUB_RECORDFAIL_TIMEOUT=10" >> /etc/default/grub; update-grub ; fi ;sed -i s/GRUB_CMDLINE_LINUX_DEFAULT.*/GRUB_CMDLINE_LINUX_DEFAULT=\"nomodeset\"/g /etc/default/grub ; update-grub;'
            self.logger.info('command executed  %s' %cmd)
            self.inputs.run_cmd_on_server(ip, cmd)
            # This is required for hardware initialization failure
            cmd = 'echo  "blacklist mei" > /etc/modprobe.d/mei.conf;'
            self.inputs.run_cmd_on_server(ip, cmd)
            cmd = 'echo  "blacklist mei_me" > /etc/modprobe.d/mei_me.conf;'
            self.inputs.run_cmd_on_server(ip, cmd)
            cmd = 'if ! grep -Rq "mei_me" /etc/modprobe.d/blacklist.conf ; then echo "blacklist mei_me" >> /etc/modprobe.d/blacklist.conf; fi ;'
            self.inputs.run_cmd_on_server(ip, cmd)

        ipmi_addr = self.get_ipmi_address(ip)
        cmd = 'ipmitool -H "%s" -U %s -P %s chassis power "%s"'%(ipmi_addr,self.inputs.ipmi_username,self.inputs.ipmi_password,option)
        self.logger.info('command executed  %s' %cmd)
        local(cmd)
        # clear the fab connections
        sleep(20)
        self.connections.update_inspect_handles()
        fab_connections.clear()
        sleep(420)
        return True
Example #26
0
def run_cmd_through_node(host_string, cmd, password=None, gateway=None,
                         gateway_password=None, with_sudo=False, timeout=120,
                         as_daemon=False, raw=False, cd=None, warn_only=True,
                         logger=None):
    """ Run command on remote node through another node (gateway).
        This is useful to run commands on VMs through compute node
    Args:
        host_string: host_string on which the command to run
        password: Password
        cmd: command
        gateway: host_string of the node through which host_string will connect
        gateway_password: Password of gateway hoststring
        with_sudo: use Sudo
        timeout: timeout
        cd: change directory to provided parameter
        as_daemon: run in background
        raw: If raw is True, will return the fab _AttributeString object itself without removing any unwanted output
    """
    logger = logger or contrail_logging.getLogger(__name__)
    fab_connections.clear()
    kwargs = {}
    if as_daemon:
        cmd = 'nohup ' + cmd + ' &'
        kwargs['pty']=False

    if cd:
        cmd = 'cd %s; %s' % (cd, cmd)

    (username, host_ip) = host_string.split('@')

    if username == 'root':
        with_sudo = False

    shell = '/bin/bash -l -c'

    if username == 'cirros':
        shell = '/bin/sh -l -c'

    _run = safe_sudo if with_sudo else safe_run

    #with hide('everything'), settings(host_string=host_string,
    with settings(host_string=host_string,
                                      gateway=gateway,
                                      warn_only=warn_only,
                                      shell=shell,
                                      disable_known_hosts=True,
                                      abort_on_prompts=False):
        env.forward_agent = True
        gateway_hoststring = gateway if re.match(r'\w+@[\d\.]+:\d+', gateway) else gateway + ':22'
        node_hoststring = host_string if re.match(r'\w+@[\d\.]+:\d+', host_string) else host_string + ':22'
        if password:
            env.passwords.update({node_hoststring: password})
            # If gateway_password is not set, guess same password
            # (if key is used, it will be tried before password)
            if not gateway_password:
                env.passwords.update({gateway_hoststring: password})

        if gateway_password:
            env.passwords.update({gateway_hoststring: gateway_password})
            if not password:
                env.passwords.update({node_hoststring: gateway_password})

        logger.debug(cmd)
        tries = 1
        output = None
        while tries > 0:
            try:
                output = _run(cmd, timeout=timeout, **kwargs)
            except CommandTimeout:
                pass
            if (output) and ('Fatal error' in output):
                tries -= 1
                time.sleep(5)
            else:
                break
        # end while

        if not raw:
            real_output = remove_unwanted_output(output)
        else:
            real_output = output
        return real_output
Example #27
0
def remote_cmd(host_string, cmd, password=None, gateway=None,
               gateway_password=None, with_sudo=False, timeout=120,
               as_daemon=False, raw=False, cwd=None, warn_only=True, tries=1,
               pidfile=None, logger=None, abort_on_prompts=True):
    """ Run command on remote node.
    remote_cmd method to be used to run command on any remote nodes - whether it
    is a remote server or VM or between VMs. This method has capability to
    handle:

    1. run remote command on remote server from test node
    2. Run remote command on node-a through node-b from the test node
      * in this case node-a is the target node, node-b is gateway, and the
    nodes will be connect from testnode
      * This is to avoid situation to login to remote node (node-b in this
        case) and run script (fab script or pexpect or any such code) on
        that remote node (node-b) against running command on target node
        (node-a)
    3. Run remote command on VM thorugh compute node - Same usecase as of #2
    4. Run remote commands between VMs - say copy a file from vm1 to vm2
    through compute node of vm1.
      * This will use ssh-agent forward to avoid copying ssh private keys to
        subsequent servers - Previously we used to copy ssh private keys to
        compute node and then copy the same file to vm1 in order to be able
        to connect from vm1 to vm2.
      * The commands will be running sitting on the test node then run an
      * "ssh/scp" command on vm1 through compute node of vm1 with
        agent_forward on
      * in this case flow is like this: test_node ->
        compute_of_vm1(gateway - passthrough) -> vm1 (run ssh/scp there) ->
        vm2 (final command is run)

    Args:
        tries: Number of retries in case of failure
        host_string: host_string on which the command to run
        password: Password
        cmd: command
        gateway: host_string of the node through which host_string will connect
        gateway_password: Password of gateway hoststring
        with_sudo: use Sudo
        timeout: timeout
        cwd: change directory to provided parameter
        as_daemon: run in background
        warn_only: run fab with warn_only
        raw: If raw is True, will return the fab _AttributeString object itself without removing any unwanted output
        pidfile : When run in background, use pidfile to store the pid of the
                  running process
        abort_on_prompts : Run command with abort_on_prompts set to True
                           Note that this SystemExit does get caught and
                           counted against `tries`
    """
    if not logger:
        logger = contrail_logging.getLogger(__name__)
    logger.debug('Running remote_cmd, Cmd : %s, host_string: %s, password: %s'
        'gateway: %s, gateway password: %s' %(cmd, host_string, password,
            gateway, gateway_password))
    fab_connections.clear()
    if as_daemon:
        cmd = 'nohup ' + cmd + ' & '
        if pidfile:
            cmd = '%s echo $! > %s' % (cmd, pidfile)

    if cwd:
        cmd = 'cd %s; %s' % (cwd, cmd)

    (username, host_ip) = host_string.split('@')

    if username == 'root':
        with_sudo = False

    shell = '/bin/bash -l -c'

    if username == 'cirros':
        shell = '/bin/sh -l -c'

    # For tiny images, running the commnads in sudo requires this
    if username == 'tc' and with_sudo:
        shell = False

    _run = sudo if with_sudo else run

    # with hide('everything'), settings(host_string=host_string,
    #with hide('everything'), settings(
    with settings(
            host_string=host_string,
            gateway=gateway,
            warn_only=warn_only,
            shell=shell,
            disable_known_hosts=True,
            abort_on_prompts=abort_on_prompts):
        update_env_passwords(host_string, password, gateway, gateway_password)

        logger.debug(cmd)
        output = None
        while tries > 0:
            try:
                output = _run(cmd, timeout=timeout, pty=not as_daemon, shell=shell)
            except (CommandTimeout, NetworkError, SystemExit) as e:
                logger.exception('Unable to run command %s: %s' % (cmd, str(e)))
                tries -= 1
                time.sleep(5)
                continue

            if output and 'Fatal error' in output:
                tries -= 1
                time.sleep(5)
            else:
                break
        # end while

        if raw:
            real_output = output
        else:
            real_output = remove_unwanted_output(output)

        return real_output
Example #28
0
def remote_copy(src, dest, src_password=None, src_gw=None, src_gw_password=None,
                dest_password=None, dest_gw=None, dest_gw_password=None,
                with_sudo=False, warn_only=True):
    """ Copy files/folders to remote server or VM (in case of VM,
        copy will happen through gateway node - i.e compute node)

    Args:
        src: source can be remote or local
                in case of remote node, it should be in the form of
                    node1:/tmp/source_directory
                in case of local node, it can be just a file/directory path
                    /tmp/source_file
        dest: Can be remote or local
                in case of remote node, it should be in the form of
                    node1:/tmp/destination_directory
                in case of local node, it can be just a file/directory path
                    /tmp/
        src_password: source node password if required
        src_gw: host_string of the node through which source will be connecting
        src_gw_password: src_gw password if required
        dest_password: destination node password if required
        dest_gw: host_string of the node through which destination will be connecting
        dest_gw_password: src_gw password if required
        with_sudo: use Sudo
        warn_only: run fab with warn_only
    """
    fab_connections.clear()

    # dest is local file path
    if re.match(r"^[\t\s]*/", dest):
        dest_node = None
        dest_path = dest
    # dest is remote path
    elif re.match(r"^.*:", dest):
        dest = re.split(':', dest)
        dest_node = dest[0]
        dest_path = dest[1]
    else:
        raise AttributeError("Invalid destination path - %s " % dest)

    # src is local file path
    if re.match(r"^[\t\s]*/", src):
        if os.path.exists(src):
            src_node = None
            src_path = src
        else:
            raise IOError("Source not found - %s No such file or directory" % src)
    # src is remote path
    elif re.match(r"^.*:", src):
        src = re.split(':', src)
        src_node = src[0]
        src_path = src[1]
    else:
        raise AttributeError("Invalid source path - %s" % src)

    if src_node:
        # Source is remote
        with settings(host_string=src_node, gateway=src_gw,
                      warn_only=warn_only, disable_known_hosts=True,
                      abort_on_prompts=False):
            update_env_passwords(src_node, src_password, src_gw, src_gw_password)
            try:
                if exists(src_path, use_sudo=with_sudo):
                    if dest_node:
                        # Both source and destination are remote
                        local_dest = tempfile.mkdtemp()
                        get(src_path, local_dest, use_sudo=True)
                        src_path = os.path.join(local_dest, os.listdir(local_dest)[0])
                    else:
                        # Source is remote and destination is local
                        # Copied to destination
                        get(src_path, dest_path, use_sudo=True)
                        return True
                else:
                    raise IOError("Source not found - %s No such file or directory" % src)
            except NetworkError:
                pass

    if dest_node:
        # Source is either local or remote
        with settings(host_string=dest_node, gateway=dest_gw,
                      warn_only=warn_only, disable_known_hosts=True,
                      abort_on_prompts=False):
            update_env_passwords(dest_node, dest_password, dest_gw, dest_gw_password)
            try:
                put(src_path, dest_path, use_sudo=True)
                return True
            except NetworkError:
                pass
    else:
        # Both are local
        local("cp -r %s %s" % (src_path, dest_path))
        return True
Example #29
0
    def test_upgrade_only(self):
        """ Test to upgrade contrail software from existing build to new build and then rebooting resource vm's
        """
        result = True

        if set(self.inputs.compute_ips) & set(self.inputs.cfgm_ips):
            raise self.skipTest("Skipping Test. Cfgm and Compute nodes should be different to run  this test case")
        self.logger.info("STARTING UPGRADE")
        username = self.inputs.host_data[self.inputs.cfgm_ip]["username"]
        password = self.inputs.host_data[self.inputs.cfgm_ip]["password"]
        with settings(
            host_string="%s@%s" % (username, self.inputs.cfgm_ip),
            password,
            warn_only=True,
            abort_on_prompts=False,
            debug=True,
        ):
            status = run("cd /tmp/temp/;ls")
            self.logger.debug("%s" % status)

            m = re.search(r"contrail-install-packages(-|_)(.*)(_all.deb|.noarch.rpm)", status)
            assert m, "Failed in importing rpm"
            rpms = m.group(0)
            rpm_type = m.group(3)

            if re.search(r"noarch.rpm", rpm_type):
                status = run("yum -y localinstall /tmp/temp/" + rpms)
                self.logger.debug("LOG for yum -y localinstall command: \n %s" % status)
                assert not (status.return_code), "Failed in running: yum -y localinstall /tmp/temp/" + rpms

            else:
                status = run("dpkg -i /tmp/temp/" + rpms)
                self.logger.debug("LOG for dpkg -i debfile  command: \n %s" % status)
                assert not (status.return_code), "Failed in running: dpkg -i /tmp/temp/" + rpms

            status = run("cd /opt/contrail/contrail_packages;./setup.sh")
            self.logger.debug("LOG for /opt/contrail/contrail_packages;./setup.sh command: \n %s" % status)
            assert not (status.return_code), "Failed in running : cd /opt/contrail/contrail_packages;./setup.sh"

            upgrade_cmd = "cd /opt/contrail/utils;fab upgrade_contrail:%s,/tmp/temp/%s" % (base_rel, rpms)
            status = run(upgrade_cmd)
            self.logger.debug("LOG for fab upgrade_contrail command: \n %s" % status)
            assert not (status.return_code), (
                "Failed in running : cd /opt/contrail/utils;fab upgrade_contrail:/tmp/temp/" + rpms
            )

            m = re.search("contrail-install-packages(.*)([0-9]{3,4})(.*)(_all.deb|.el6.noarch.rpm)", rpms)
            build_id = m.group(2)
            status = run("contrail-version | awk '{if (NR!=1 && NR!=2) {print $1, $2, $3}}'")
            self.logger.debug("contrail-version :\n %s" % status)
            assert not (status.return_code)
            lists = status.split("\r\n")
            for module in lists:
                success = re.search(build_id, module)
                if not success:
                    contrail_mod = re.search("contrail-", module) and not (
                        re.search("contrail-openstack-dashboard", module)
                    )

                    if not contrail_mod:
                        success = True
                result = result and success
                if not (result):
                    self.logger.error(" Failure while upgrading " + module + "should have upgraded to " + build_id)
                    assert result, "Failed to Upgrade " + module

            if result:
                self.logger.info("Successfully upgraded all modules")

            time.sleep(90)
            connections.clear()
            self.logger.info("Will REBOOT the SHUTOFF VMs")
            for vm in self.nova_h.get_vm_list():
                if vm.status != "ACTIVE":
                    self.logger.info("Will Power-On %s" % vm.name)
                    vm.start()
                    self.nova_h.wait_till_vm_is_active(vm)

            run("rm -rf /tmp/temp")
            run("rm -rf /opt/contrail/utils/fabfile/testbeds/testbed.py")

        return result
Example #30
0
    def test_ecmp_svc_in_network_with_3_instance_service_restarts(self):
        """
        Description: Validate ECMP after restarting control and vrouter services with service chainin
        g in-network mode datapath having service instance
        Test steps:
                   1.Creating vm's - vm1 and vm2 in networks vn1 and vn2.
                   2.Creating a service instance in in-network mode with 3 instances.
                   3.Creating a service chain by applying the service instance as a service in a po
        licy between the VNs.
                   4.Checking for ping and traffic between vm1 and vm2.
        Pass criteria: Ping between the VMs should be successful and TCP traffic should reach vm2 fr
        om vm1 and vice-versa even after the restarts.
        Maintainer : [email protected]
        """
        ret_dict = self.verify_svc_chain(max_inst=3,
                                         service_mode='in-network',
                                         create_svms=True,
                                         **self.common_args)
        si_fixture = ret_dict['si_fixture']
        svm_ids = si_fixture.svm_ids
        self.get_rt_info_tap_intf_list(
            self.left_vn_fixture, self.left_vm_fixture, self.right_vm_fixture,
            svm_ids, si_fixture)
        dst_vm_list = [self.right_vm_fixture]
        self.verify_traffic_flow(self.left_vm_fixture, dst_vm_list,
            si_fixture, self.left_vn_fixture)
        for compute_ip in self.inputs.compute_ips:
            self.inputs.restart_service('contrail-vrouter', [compute_ip],
                                        container='agent')

        # Wait for service stability
        cs_checker = ContrailStatusChecker()
        cluster_status, error_nodes = cs_checker.wait_till_contrail_cluster_stable(
                                          self.inputs.compute_ips)
        assert cluster_status, 'Hash of error nodes and services : %s' % (
                                    error_nodes)

        self.left_vm_fixture.wait_till_vm_is_up()
        self.right_vm_fixture.wait_till_vm_is_up()

        self.get_rt_info_tap_intf_list(
            self.left_vn_fixture, self.left_vm_fixture, self.right_vm_fixture,
            svm_ids, si_fixture)
        fab_connections.clear()
        self.verify_traffic_flow(self.left_vm_fixture, dst_vm_list,
            si_fixture, self.left_vn_fixture)
        for bgp_ip in self.inputs.bgp_ips:
            self.inputs.restart_service('contrail-control', [bgp_ip],
                                        container='control')

        cluster_status, error_nodes = cs_checker.wait_till_contrail_cluster_stable(
                                          self.inputs.bgp_ips)
        assert cluster_status, 'Hash of error nodes and services : %s' % (
                                    error_nodes)

        self.left_vm_fixture.wait_till_vm_is_up()
        self.right_vm_fixture.wait_till_vm_is_up()

        self.get_rt_info_tap_intf_list(
            self.left_vn_fixture, self.left_vm_fixture, self.right_vm_fixture,
            svm_ids, si_fixture)

        fab_connections.clear()
        self.verify_traffic_flow(self.left_vm_fixture, dst_vm_list,
            si_fixture, self.left_vn_fixture)
Example #31
0
    def test_to_upgrade(self):
        '''Test to upgrade contrail software from existing build to new build and then rebooting resource vm's
        '''
        result = True

        if(set(self.inputs.compute_ips) & set(self.inputs.cfgm_ips)):
            raise self.skipTest(
                "Skipping Test. Cfgm and Compute nodes should be different to run  this test case")
        self.logger.info("STARTING UPGRADE")
        username = self.inputs.host_data[self.inputs.cfgm_ip]['username']
        password = self.inputs.host_data[self.inputs.cfgm_ip]['password']
        with settings(
            host_string='%s@%s' % (
                username, self.inputs.cfgm_ips[0]),
                password = password, warn_only=True, abort_on_prompts=False, debug=True):
            status = run("cd /tmp/temp/;ls")
            self.logger.debug("%s" % status)

            m = re.search(
                r'contrail-install-packages(-|_)(.*)(_all.deb|.noarch.rpm)', status)
            assert m, 'Failed in importing rpm'
            rpms = m.group(0)
            rpm_type = m.group(3)

            if re.search(r'noarch.rpm', rpm_type):
                status = run("yum -y localinstall /tmp/temp/" + rpms)
                self.logger.debug(
                    "LOG for yum -y localinstall command: \n %s" % status)
                assert not(
                    status.return_code), 'Failed in running: yum -y localinstall /tmp/temp/' + rpms

            else:
                status = run("dpkg -i /tmp/temp/" + rpms)
                self.logger.debug(
                    "LOG for dpkg -i debfile  command: \n %s" % status)
                assert not(
                    status.return_code), 'Failed in running: dpkg -i /tmp/temp/' + rpms

            status = run("cd /opt/contrail/contrail_packages;./setup.sh")
            self.logger.debug(
                "LOG for /opt/contrail/contrail_packages;./setup.sh command: \n %s" % status)
            assert not(
                status.return_code), 'Failed in running : cd /opt/contrail/contrail_packages;./setup.sh'

            status = run("cd /opt/contrail/utils" + ";" +
                         "fab upgrade_contrail:%s,/tmp/temp/%s" % (self.res.base_rel, rpms))
            self.logger.debug(
                "LOG for fab upgrade_contrail command: \n %s" % status)
            assert not(
                status.return_code), 'Failed in running : cd /opt/contrail/utils;fab upgrade_contrail:/tmp/temp/' + rpms

            m = re.search(
                'contrail-install-packages(.*)([0-9]{2,4})(.*)(_all.deb|.el6.noarch.rpm)', rpms)
            build_id = m.group(2)
            status = run(
                "contrail-version | grep contrail- | grep -v contrail-openstack-dashboard | awk '{print $1, $2, $3}'")
            self.logger.debug("contrail-version :\n %s" % status)
            assert not(status.return_code)
            lists = status.split('\r\n')
            for module in lists:
                success = re.search(build_id, module)
                result = result and success
                if not (result):
                    self.logger.error(' Failure while upgrading ' +
                                      module + 'should have upgraded to ' + build_id)
                    assert result, 'Failed to Upgrade ' + module

            if result:
                self.logger.info("Successfully upgraded all modules")

            time.sleep(90)
            connections.clear()
            self.logger.info('Will REBOOT the SHUTOFF VMs')
            for vm in self.nova_h.get_vm_list():
                if vm.status != 'ACTIVE':
                    self.logger.info('Will Power-On %s' % vm.name)
                    vm.start()
                    self.nova_h.wait_till_vm_is_active(vm)

            run("rm -rf /tmp/temp")
            run("rm -rf /opt/contrail/utils/fabfile/testbeds/testbed.py")

        return result
Example #32
0
    def test_ecmp_svc_in_network_with_3_instance_service_restarts(self):
        """
        Description: Validate ECMP after restarting control and vrouter services with service chainin
        g in-network mode datapath having service instance
        Test steps:
                   1.Creating vm's - vm1 and vm2 in networks vn1 and vn2.
                   2.Creating a service instance in in-network mode with 3 instances.
                   3.Creating a service chain by applying the service instance as a service in a po
        licy between the VNs.
                   4.Checking for ping and traffic between vm1 and vm2.
        Pass criteria: Ping between the VMs should be successful and TCP traffic should reach vm2 fr
        om vm1 and vice-versa even after the restarts.
        Maintainer : [email protected]
        """
        ret_dict = self.verify_svc_chain(max_inst=3,
                                         service_mode='in-network',
                                         create_svms=True,
                                         **self.common_args)
        si_fixture = ret_dict['si_fixture']
        svm_ids = si_fixture.svm_ids
        self.get_rt_info_tap_intf_list(self.left_vn_fixture,
                                       self.left_vm_fixture,
                                       self.right_vm_fixture, svm_ids,
                                       si_fixture)
        dst_vm_list = [self.right_vm_fixture]
        self.verify_traffic_flow(self.left_vm_fixture, dst_vm_list, si_fixture,
                                 self.left_vn_fixture)
        for compute_ip in self.inputs.compute_ips:
            self.inputs.restart_service('contrail-vrouter', [compute_ip],
                                        container='agent')

        # Wait for service stability
        cs_checker = ContrailStatusChecker(self.inputs)
        cluster_status, error_nodes = cs_checker.wait_till_contrail_cluster_stable(
            self.inputs.compute_ips)
        assert cluster_status, 'Hash of error nodes and services : %s' % (
            error_nodes)

        self.left_vm_fixture.wait_till_vm_is_up(refresh=True)
        self.right_vm_fixture.wait_till_vm_is_up(refresh=True)

        self.get_rt_info_tap_intf_list(self.left_vn_fixture,
                                       self.left_vm_fixture,
                                       self.right_vm_fixture, svm_ids,
                                       si_fixture)
        fab_connections.clear()
        self.verify_traffic_flow(self.left_vm_fixture, dst_vm_list, si_fixture,
                                 self.left_vn_fixture)
        for bgp_ip in self.inputs.bgp_ips:
            self.inputs.restart_service('contrail-control', [bgp_ip],
                                        container='control')

        cluster_status, error_nodes = cs_checker.wait_till_contrail_cluster_stable(
            self.inputs.bgp_ips)
        assert cluster_status, 'Hash of error nodes and services : %s' % (
            error_nodes)

        self.get_rt_info_tap_intf_list(self.left_vn_fixture,
                                       self.left_vm_fixture,
                                       self.right_vm_fixture, svm_ids,
                                       si_fixture)

        self.verify_traffic_flow(self.left_vm_fixture, dst_vm_list, si_fixture,
                                 self.left_vn_fixture)
Example #33
0
def remote_copy(src,
                dest,
                src_password=None,
                src_gw=None,
                src_gw_password=None,
                dest_password=None,
                dest_gw=None,
                dest_gw_password=None,
                with_sudo=False,
                warn_only=True):
    """ Copy files/folders to remote server or VM (in case of VM,
        copy will happen through gateway node - i.e compute node)

    Args:
        src: source can be remote or local
                in case of remote node, it should be in the form of
                    node1:/tmp/source_directory
                in case of local node, it can be just a file/directory path
                    /tmp/source_file
        dest: Can be remote or local
                in case of remote node, it should be in the form of
                    node1:/tmp/destination_directory
                in case of local node, it can be just a file/directory path
                    /tmp/
        src_password: source node password if required
        src_gw: host_string of the node through which source will be connecting
        src_gw_password: src_gw password if required
        dest_password: destination node password if required
        dest_gw: host_string of the node through which destination will be connecting
        dest_gw_password: src_gw password if required
        with_sudo: use Sudo
        warn_only: run fab with warn_only
    """
    fab_connections.clear()

    # dest is local file path
    if re.match(r"^[\t\s]*/", dest):
        dest_node = None
        dest_path = dest
    # dest is remote path
    elif re.match(r"^.*:", dest):
        dest = re.split(':', dest)
        dest_node = dest[0]
        dest_path = dest[1]
    else:
        raise AttributeError("Invalid destination path - %s " % dest)

    # src is local file path
    if re.match(r"^[\t\s]*/", src):
        if os.path.exists(src):
            src_node = None
            src_path = src
        else:
            raise IOError("Source not found - %s No such file or directory" %
                          src)
    # src is remote path
    elif re.match(r"^.*:", src):
        src = re.split(':', src)
        src_node = src[0]
        src_path = src[1]
    else:
        raise AttributeError("Invalid source path - %s" % src)

    if src_node:
        # Source is remote
        with settings(host_string=src_node,
                      gateway=src_gw,
                      warn_only=warn_only,
                      disable_known_hosts=True,
                      abort_on_prompts=False):
            update_env_passwords(src_node, src_password, src_gw,
                                 src_gw_password)
            try:
                if exists(src_path, use_sudo=with_sudo):
                    if dest_node:
                        # Both source and destination are remote
                        local_dest = tempfile.mkdtemp()
                        get(src_path, local_dest, use_sudo=True)
                        src_path = os.path.join(local_dest,
                                                os.listdir(local_dest)[0])
                    else:
                        # Source is remote and destination is local
                        # Copied to destination
                        get(src_path, dest_path, use_sudo=True)
                        return True
                else:
                    raise IOError(
                        "Source not found - %s No such file or directory" %
                        src)
            except NetworkError:
                pass

    if dest_node:
        # Source is either local or remote
        with settings(host_string=dest_node,
                      gateway=dest_gw,
                      warn_only=warn_only,
                      disable_known_hosts=True,
                      abort_on_prompts=False):
            update_env_passwords(dest_node, dest_password, dest_gw,
                                 dest_gw_password)
            try:
                put(src_path, dest_path, use_sudo=True)
                return True
            except NetworkError:
                pass
    else:
        # Both are local
        local("cp -r %s %s" % (src_path, dest_path))
        return True
Example #34
0
def remote_cmd(host_string,
               cmd,
               password=None,
               gateway=None,
               gateway_password=None,
               with_sudo=False,
               timeout=120,
               as_daemon=False,
               raw=False,
               cwd=None,
               warn_only=True,
               tries=1,
               pidfile=None,
               logger=None):
    """ Run command on remote node.
    remote_cmd method to be used to run command on any remote nodes - whether it
    is a remote server or VM or between VMs. This method has capability to
    handle:

    1. run remote command on remote server from test node
    2. Run remote command on node-a through node-b from the test node
      * in this case node-a is the target node, node-b is gateway, and the
    nodes will be connect from testnode
      * This is to avoid situation to login to remote node (node-b in this
        case) and run script (fab script or pexpect or any such code) on
        that remote node (node-b) against running command on target node
        (node-a)
    3. Run remote command on VM thorugh compute node - Same usecase as of #2
    4. Run remote commands between VMs - say copy a file from vm1 to vm2
    through compute node of vm1.
      * This will use ssh-agent forward to avoid copying ssh private keys to
        subsequent servers - Previously we used to copy ssh private keys to
        compute node and then copy the same file to vm1 in order to be able
        to connect from vm1 to vm2.
      * The commands will be running sitting on the test node then run an
      * "ssh/scp" command on vm1 through compute node of vm1 with
        agent_forward on
      * in this case flow is like this: test_node ->
        compute_of_vm1(gateway - passthrough) -> vm1 (run ssh/scp there) ->
        vm2 (final command is run)

    Args:
        tries: Number of retries in case of failure
        host_string: host_string on which the command to run
        password: Password
        cmd: command
        gateway: host_string of the node through which host_string will connect
        gateway_password: Password of gateway hoststring
        with_sudo: use Sudo
        timeout: timeout
        cwd: change directory to provided parameter
        as_daemon: run in background
        warn_only: run fab with warn_only
        raw: If raw is True, will return the fab _AttributeString object itself without removing any unwanted output
        pidfile : When run in background, use pidfile to store the pid of the 
                  running process
    """
    if not logger:
        logger = contrail_logging.getLogger(__name__)
    logger.debug('Running remote_cmd, Cmd : %s, host_string: %s, password: %s'
                 'gateway: %s, gateway password: %s' %
                 (cmd, host_string, password, gateway, gateway_password))
    fab_connections.clear()
    kwargs = {}
    if as_daemon:
        cmd = 'nohup ' + cmd + ' & '
        kwargs.update({'pty': False})
        if pidfile:
            cmd = '%s echo $! > %s' % (cmd, pidfile)

    if cwd:
        cmd = 'cd %s; %s' % (cwd, cmd)

    (username, host_ip) = host_string.split('@')

    if username == 'root':
        with_sudo = False

    shell = '/bin/bash -l -c'

    if username == 'cirros':
        shell = '/bin/sh -l -c'

    _run = sudo if with_sudo else run

    # with hide('everything'), settings(host_string=host_string,
    with hide('everything'), settings(host_string=host_string,
                                      gateway=gateway,
                                      warn_only=warn_only,
                                      shell=shell,
                                      disable_known_hosts=True,
                                      abort_on_prompts=False):
        update_env_passwords(host_string, password, gateway, gateway_password)

        logger.debug(cmd)
        output = None
        while tries > 0:
            try:
                output = _run(cmd, timeout=timeout, **kwargs)
            except (CommandTimeout, NetworkError) as e:
                logger.warn('Unable to run command %s: %s' % (cmd, str(e)))
                tries -= 1
                time.sleep(5)
                continue

            if output and 'Fatal error' in output:
                tries -= 1
                time.sleep(5)
            else:
                break
        # end while

        if raw:
            real_output = output
        else:
            real_output = remove_unwanted_output(output)

        return real_output
    def test_upgrade_only(self):
        ''' Test to upgrade contrail software from existing build to new build and then rebooting resource vm's
        '''
        result = True

        if(set(self.inputs.compute_ips) & set(self.inputs.cfgm_ips)):
            raise self.skipTest(
                "Skipping Test. Cfgm and Compute nodes should be different to run  this test case")
        self.logger.info("STARTING UPGRADE")
        username = self.inputs.host_data[self.inputs.cfgm_ip]['username']
        password = self.inputs.host_data[self.inputs.cfgm_ip]['password']
        with settings(
            host_string='%s@%s' % (
                username, self.inputs.cfgm_ip),
                password, warn_only=True, abort_on_prompts=False, debug=True):
            status = run("cd /tmp/temp/;ls")
            self.logger.debug("%s" % status)

            m = re.search(
                r'contrail-install-packages(-|_)(.*)(_all.deb|.noarch.rpm)', status)
            assert m, 'Failed in importing rpm'
            rpms = m.group(0)
            rpm_type = m.group(3)

            if re.search(r'noarch.rpm', rpm_type):
                status = run("yum -y localinstall /tmp/temp/" + rpms)
                self.logger.debug(
                    "LOG for yum -y localinstall command: \n %s" % status)
                assert not(
                    status.return_code), 'Failed in running: yum -y localinstall /tmp/temp/' + rpms

            else:
                status = run("dpkg -i /tmp/temp/" + rpms)
                self.logger.debug(
                    "LOG for dpkg -i debfile  command: \n %s" % status)
                assert not(
                    status.return_code), 'Failed in running: dpkg -i /tmp/temp/' + rpms

            status = run("cd /opt/contrail/contrail_packages;./setup.sh")
            self.logger.debug(
                "LOG for /opt/contrail/contrail_packages;./setup.sh command: \n %s" % status)
            assert not(
                status.return_code), 'Failed in running : cd /opt/contrail/contrail_packages;./setup.sh'

            upgrade_cmd = "cd /opt/contrail/utils;fab upgrade_contrail:%s,/tmp/temp/%s" % (base_rel, rpms)
            status = run(upgrade_cmd)
            self.logger.debug(
                "LOG for fab upgrade_contrail command: \n %s" % status)
            assert not(
                status.return_code), 'Failed in running : cd /opt/contrail/utils;fab upgrade_contrail:/tmp/temp/' + rpms

            m = re.search(
                'contrail-install-packages(.*)([0-9]{3,4})(.*)(_all.deb|.el6.noarch.rpm)', rpms)
            build_id = m.group(2)
            status = run(
                "contrail-version | awk '{if (NR!=1 && NR!=2) {print $1, $2, $3}}'")
            self.logger.debug("contrail-version :\n %s" % status)
            assert not(status.return_code)
            lists = status.split('\r\n')
            for module in lists:
                success = re.search(build_id, module)
                if not success:
                    contrail_mod = re.search(
                        'contrail-', module) and not(re.search('contrail-openstack-dashboard', module))

                    if not contrail_mod:
                        success = True
                result = result and success
                if not (result):
                    self.logger.error(' Failure while upgrading ' +
                                      module + 'should have upgraded to ' + build_id)
                    assert result, 'Failed to Upgrade ' + module

            if result:
                self.logger.info("Successfully upgraded all modules")

            time.sleep(90)
            connections.clear()
            self.logger.info('Will REBOOT the SHUTOFF VMs')
            for vm in self.nova_h.get_vm_list():
                if vm.status != 'ACTIVE':
                    self.logger.info('Will Power-On %s' % vm.name)
                    vm.start()
                    self.nova_h.wait_till_vm_is_active(vm)

            run("rm -rf /tmp/temp")
            run("rm -rf /opt/contrail/utils/fabfile/testbeds/testbed.py")

        return result
Example #36
0
def run_cmd_through_node(host_string,
                         cmd,
                         password=None,
                         gateway=None,
                         gateway_password=None,
                         with_sudo=False,
                         timeout=120,
                         as_daemon=False,
                         raw=False,
                         cd=None,
                         warn_only=True,
                         logger=None):
    """ Run command on remote node through another node (gateway).
        This is useful to run commands on VMs through compute node
    Args:
        host_string: host_string on which the command to run
        password: Password
        cmd: command
        gateway: host_string of the node through which host_string will connect
        gateway_password: Password of gateway hoststring
        with_sudo: use Sudo
        timeout: timeout
        cd: change directory to provided parameter
        as_daemon: run in background
        raw: If raw is True, will return the fab _AttributeString object itself without removing any unwanted output
    """
    logger = logger or contrail_logging.getLogger(__name__)
    fab_connections.clear()
    kwargs = {}
    if as_daemon:
        cmd = 'nohup ' + cmd + ' &'
        kwargs['pty'] = False

    if cd:
        cmd = 'cd %s; %s' % (cd, cmd)

    (username, host_ip) = host_string.split('@')

    if username == 'root':
        with_sudo = False

    shell = '/bin/bash -l -c'

    if username == 'cirros':
        shell = '/bin/sh -l -c'

    _run = safe_sudo if with_sudo else safe_run

    #with hide('everything'), settings(host_string=host_string,
    with settings(host_string=host_string,
                  gateway=gateway,
                  warn_only=warn_only,
                  shell=shell,
                  disable_known_hosts=True,
                  abort_on_prompts=False):
        env.forward_agent = True
        gateway_hoststring = gateway if re.match(r'\w+@[\d\.]+:\d+',
                                                 gateway) else gateway + ':22'
        node_hoststring = host_string if re.match(
            r'\w+@[\d\.]+:\d+', host_string) else host_string + ':22'
        if password:
            env.passwords.update({node_hoststring: password})
            # If gateway_password is not set, guess same password
            # (if key is used, it will be tried before password)
            if not gateway_password:
                env.passwords.update({gateway_hoststring: password})

        if gateway_password:
            env.passwords.update({gateway_hoststring: gateway_password})
            if not password:
                env.passwords.update({node_hoststring: gateway_password})

        logger.debug(cmd)
        tries = 1
        output = None
        while tries > 0:
            try:
                output = _run(cmd, timeout=timeout, **kwargs)
            except CommandTimeout:
                pass
            if (output) and ('Fatal error' in output):
                tries -= 1
                time.sleep(5)
            else:
                break
        # end while

        if not raw:
            real_output = remove_unwanted_output(output)
        else:
            real_output = output
        return real_output
Example #37
0
    def verify_traffic_flow(self, src_vm, dst_vm):
        fab_connections.clear()
        vm_list = [src_vm, dst_vm]
        for vm in vm_list:
            out = vm.wait_till_vm_is_up()
            if out == False:
                return {'result': out, 'msg': "%s failed to come up" % vm.vm_name}
            else:
                self.logger.info('Installing Traffic package on %s ...' %
                                 vm.vm_name)
                vm.install_pkg("Traffic")

        self.logger.info("-" * 80)
        self.logger.info('Starting TCP Traffic from %s to %s' %
                         (src_vm.vm_ip, dst_vm.vm_ip))
        self.logger.info("-" * 80)
        stream_list = []
        profile = {}
        sender = {}
        receiver = {}

        tx_vm_node_ip = self.inputs.host_data[
            self.nova_fixture.get_nova_host_of_vm(src_vm.vm_obj)]['host_ip']
        tx_local_host = Host(
            tx_vm_node_ip, self.inputs.username, self.inputs.password)
        send_host = Host(src_vm.local_ip, src_vm.vm_username,
                         src_vm.vm_password)

        rx_vm_node_ip = self.inputs.host_data[
            self.nova_fixture.get_nova_host_of_vm(dst_vm.vm_obj)]['host_ip']
        rx_local_host = Host(
            rx_vm_node_ip, self.inputs.username, self.inputs.password)
        recv_host = Host(dst_vm.local_ip, dst_vm.vm_username,
                         dst_vm.vm_password)

        stream1 = Stream(protocol="ip", proto="tcp", src=src_vm.vm_ip,
                         dst=dst_vm.vm_ip, sport=8000, dport=9000)
        stream2 = Stream(protocol="ip", proto="tcp", src=src_vm.vm_ip,
                         dst=dst_vm.vm_ip, sport=8000, dport=9001)
        stream3 = Stream(protocol="ip", proto="tcp", src=src_vm.vm_ip,
                         dst=dst_vm.vm_ip, sport=8000, dport=9002)
        count = 0
        stream_list = [stream1, stream2, stream3]

        for stream in stream_list:
            profile[stream] = {}
            sender[stream] = {}
            receiver[stream] = {}
            count = count + 1
            send_filename = 'sendtcp_%s' % count
            recv_filename = 'recvtcp_%s' % count
            profile[stream] = ContinuousProfile(
                stream=stream, listener=dst_vm.vm_ip, chksum=True)
            sender[stream] = Sender(
                send_filename, profile[stream], tx_local_host, send_host, self.inputs.logger)
            receiver[stream] = Receiver(
                recv_filename, profile[stream], rx_local_host, recv_host, self.inputs.logger)
            receiver[stream].start()
            sender[stream].start()
        self.logger.info('Sending traffic for 10 seconds')
        time.sleep(10)
        self.logger.info('Checking Flow records')

        flow_result = False
        flow_result2 = False
        flow_result3 = False

        rev_flow_result = False
        rev_flow_result1 = False
        rev_flow_result2 = False
        src_vm_vrf_name = src_vm.vn_fq_name + ':' + src_vm.vn_name
        vn_vrf_id = src_vm.get_vrf_id(src_vm.vn_fq_name, src_vm_vrf_name)

        src_port = unicode(8000)
        dpi1 = unicode(9000)
        dpi2 = unicode(9001)
        dpi3 = unicode(9002)
        dpi_list = [dpi1, dpi2, dpi3]

        vm_node_ips = []
        vm_node_ips.append(src_vm.vm_node_ip)
        if (src_vm.vm_node_ip != dst_vm.vm_node_ip):
            vm_node_ips.append(dst_vm.vm_node_ip)

#        inspect_h100= self.agent_inspect[src_vm.vm_node_ip]
#        flow_rec1= inspect_h100.get_vna_fetchflowrecord(vrf=vn_vrf_id,sip=src_vm.vm_ip,dip=dst_vm.vm_ip,sport=src_port,dport=dpi1,protocol='6')
#        flow_rec2= inspect_h100.get_vna_fetchflowrecord(vrf=vn_vrf_id,sip=src_vm.vm_ip,dip=dst_vm.vm_ip,sport=src_port,dport=dpi2,protocol='6')
#        flow_rec3= inspect_h100.get_vna_fetchflowrecord(vrf=vn_vrf_id,sip=src_vm.vm_ip,dip=dst_vm.vm_ip,sport=src_port,dport=dpi3,protocol='6')
#        flow_recs= []
#        flow_recs= [flow_rec1, flow_rec2, flow_rec3]
#        print flow_recs
#        flow_result= True
#        i= 0
#        for flow_rec in flow_recs:
#            if flow_rec is None:
#                flow_result= False
#            if flow_result is True:
#                i += 1
#        self.logger.info('%s Flows from %s to %s exist on Agent %s'%(i, src_vm.vm_ip, dst_vm.vm_ip, src_vm.vm_node_ip))

        for agent_ip in self.inputs.compute_ips:
            inspect_h = self.agent_inspect[agent_ip]
            rev_flow_result = False
            for iter in range(25):
                self.logger.debug('**** Iteration %s *****' % iter)
                reverseflowrecords = []
                reverseflowrecords = inspect_h.get_vna_fetchallflowrecords()
                if type(reverseflowrecords) == types.NoneType:
                    self.logger.debug('No flows on %s.' % agent_ip)
                    break
                else:
                    for rec in reverseflowrecords:
                        if ((rec['sip'] == dst_vm.vm_ip) and (rec['protocol'] == '6')):
                            self.logger.info(
                                'Reverse Flow from %s to %s exists.' %
                                (dst_vm.vm_ip, src_vm.vm_ip))
                            rev_flow_result = True
                            break
                        else:
                            rev_flow_result = False
                    if rev_flow_result:
                        break
                    else:
                        iter += 1
                        sleep(10)
            if rev_flow_result:
                break

        self.logger.info('Stopping Traffic now')
        for stream in stream_list:
            sender[stream].stop()
            time.sleep(5)
        for stream in stream_list:
            receiver[stream].stop()
            time.sleep(5)
        stream_sent_count = {}
        stream_recv_count = {}
        result = True
        for stream in stream_list:
            if sender[stream].sent == None:
                sender[stream].sent = 0
            if receiver[stream].recv == None:
                receiver[stream].recv = 0
            stream_sent_count[stream] = sender[stream].sent
            stream_recv_count[stream] = receiver[stream].recv
            pkt_diff = (stream_sent_count[stream] - stream_recv_count[stream])
            if pkt_diff < 0:
                self.logger.debug('Some problem with Scapy. Please check')
            elif pkt_diff in range(0, 6):
                self.logger.info(
                    '%s packets sent and %s packets received in Stream%s. No Packet Loss seen.' %
                    (stream_sent_count[stream], stream_recv_count[stream], stream_list.index(stream)))
            else:
                result = False
                assert result, '%s packets sent and %s packets received in Stream%s. Packet Loss.' % (
                    stream_sent_count[stream], stream_recv_count[stream], stream_list.index(stream))
#        if i < 1:
#            flow_result= False
#        assert flow_result,'Flows from %s to %s not seen on Agent %s'%(src_vm.vm_ip, dst_vm.vm_ip, src_vm.vm_node_ip)
        assert rev_flow_result, 'Reverse Flow from %s to %s not seen' % (
            dst_vm.vm_ip, src_vm.vm_ip)

        return True