Esempio n. 1
0
 def start_tcpdump(self, session, tap_intf, vlan=None,  vm_fixtures=[], pcap_on_vm=False, no_header = False):
     filt_str = ''
     if not no_header:
         filt_str = 'udp port 8099'
     if not pcap_on_vm:
         pcap = '/tmp/mirror-%s_%s.pcap' % (tap_intf, get_random_name())
         cmd = 'rm -f %s' % pcap
         execute_cmd(session, cmd, self.logger)
         assert check_pcap_file_exists(session, pcap, expect=False),'pcap file still exists'
         if vlan:
             filt_str = 'greater 1200'
         cmd = "sudo tcpdump -ni %s -U %s -w %s" % (tap_intf, filt_str, pcap)
         self.logger.info("Starting tcpdump to capture the mirrored packets.")
         execute_cmd(session, cmd, self.logger)
         assert check_pcap_file_exists(session, pcap),'pcap file does not exist'
         return pcap
     else:
         pcap = '/tmp/%s.pcap' % (get_random_name())
         cmd_to_tcpdump = [ 'tcpdump -ni %s %s -w %s 1>/dev/null 2>/dev/null' % (tap_intf, filt_str, pcap) ]
         pidfile = pcap + '.pid'
         vm_fix_pcap_pid_files =[]
         for vm_fixture in vm_fixtures:
             vm_fixture.run_cmd_on_vm(cmds=cmd_to_tcpdump, as_daemon=True, pidfile=pidfile, as_sudo=True)
             vm_fix_pcap_pid_files.append((vm_fixture, pcap, pidfile))
         return vm_fix_pcap_pid_files
Esempio n. 2
0
 def stop_tcpdump(self, session):
     self.logger.info("Stopping any tcpdump process running")
     cmd = 'kill $(pidof tcpdump)'
     execute_cmd(session, cmd, self.logger)
     self.logger.info("Removing any encap-pcap files in /tmp")
     cmd = 'rm -f /tmp/encap*pcap'
     execute_cmd(session, cmd, self.logger)
Esempio n. 3
0
def stop_tcpdump_for_intf(session, pcap, logger=None):
    if not logger:
        logger = contrail_logging.getLogger(__name__)
    cmd = 'sudo kill $(ps -ef|grep tcpdump | grep pcap| awk \'{print $2}\')'
    execute_cmd(session, cmd, logger)
    sleep(2)
    return True
Esempio n. 4
0
 def set_cpu_performance(self, hosts):
     sessions = {}
     cmd = 'for f in /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor ; do echo performance > $f; cat $f; done'
     for i in range(0, 2):
         session = ssh(hosts[i]['host_ip'], hosts[i]['username'], hosts[i]['password'])
         execute_cmd(session, cmd, self.logger)
     return
Esempio n. 5
0
def stop_tcpdump_for_intf(session, pcap, logger=None):
    if not logger:
        logger = contrail_logging.getLogger(__name__)
    cmd = 'sudo kill $(ps -ef|grep tcpdump | grep pcap| awk \'{print $2}\')'
    execute_cmd(session, cmd, logger)
    sleep(2)
    return True
Esempio n. 6
0
 def stop_tcpdump(self, session):
     self.logger.info("Stopping any tcpdump process running")
     cmd = 'kill $(pidof tcpdump)'
     execute_cmd(session, cmd, self.logger)
     self.logger.info("Removing any encap-pcap files in /tmp")
     cmd = 'rm -f /tmp/encap*pcap'
     execute_cmd(session, cmd, self.logger)
Esempio n. 7
0
 def verify_mirroring(self, si_fix, src_vm, dst_vm, mirr_vm=None):
     result = True
     if mirr_vm:
         svm = mirr_vm.vm_obj
     else:
         svms = self.get_svms_in_si(si_fix)
         svm = svms[0]
     if svm.status == 'ACTIVE':
         svm_name = svm.name
         host = self.get_svm_compute(svm_name)
         if mirr_vm:
             tapintf = self.get_svm_tapintf(svm_name)
         else:
             tapintf = self.get_bridge_svm_tapintf(svm_name, 'left')
         session = ssh(host['host_ip'], host['username'], host['password'])
         cmd = 'sudo tcpdump -nni %s -c 5 > /tmp/%s_out.log' % (tapintf, tapintf)
         execute_cmd(session, cmd, self.logger)
         assert src_vm.ping_with_certainty(dst_vm.vm_ip)
         sleep(10)
         output_cmd = 'sudo cat /tmp/%s_out.log' % tapintf
         out, err = execute_cmd_out(session, output_cmd, self.logger)
         print(out)
         if '8099' in out:
             self.logger.info('Mirroring action verified')
         else:
             result = False
             self.logger.warning('No mirroring action seen')
     return result
Esempio n. 8
0
 def verify_mirroring(self, si_fix, src_vm, dst_vm, mirr_vm=None):
     result = True
     if mirr_vm:
         svm = mirr_vm.vm_obj
     else:
         svms = self.get_svms_in_si(si_fix)
         svm = svms[0]
     if svm.status == 'ACTIVE':
         svm_name = svm.name
         host = self.get_svm_compute(svm_name)
         if mirr_vm:
             tapintf = self.get_svm_tapintf(svm_name)
         else:
            tapintf = self.get_bridge_svm_tapintf(svm_name, 'left')
         session = ssh(host['host_ip'], host['username'], host['password'])
         cmd = 'sudo tcpdump -nni %s -c 5 > /tmp/%s_out.log' % (tapintf, tapintf)
         execute_cmd(session, cmd, self.logger)
         assert src_vm.ping_with_certainty(dst_vm.vm_ip)
         sleep(10)
         output_cmd = 'sudo cat /tmp/%s_out.log' % tapintf
         out, err = execute_cmd_out(session, output_cmd, self.logger)
         print out
         if '8099' in out:
             self.logger.info('Mirroring action verified')
         else:
             result = False
             self.logger.warning('No mirroring action seen')
     return result
Esempio n. 9
0
 def set_cpu_performance(self, hosts):
     sessions = {}
     cmd = 'for f in /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor ; do echo performance > $f; cat $f; done'
     for i in range(0, 2):
         session = ssh(hosts[i]['host_ip'], hosts[i]['username'], hosts[i]['password'])
         execute_cmd(session, cmd, self.logger)
     return
Esempio n. 10
0
 def start_tcpdump(self, server_ip, tap_intf):
     session = ssh(server_ip,self.inputs.host_data[server_ip]['username'],self.inputs.host_data[server_ip]['password'])
     pcap = '/tmp/%s.pcap' % tap_intf
     cmd = "tcpdump -nei %s tcp -w %s" % (tap_intf, pcap)
     self.logger.info("Staring tcpdump to capture the packets on server %s" % (server_ip))
     execute_cmd(session, cmd, self.logger)
     return pcap, session
Esempio n. 11
0
 def verify_vrrp_action(self, src_vm, dst_vm, ip, vsrx=False):
     result = False
     self.logger.info('Will ping %s from %s and check if %s responds' % (
         ip, src_vm.vm_name, dst_vm.vm_name))
     compute_ip = dst_vm.vm_node_ip
     compute_user = self.inputs.host_data[compute_ip]['username']
     compute_password = self.inputs.host_data[compute_ip]['password']
     session = ssh(compute_ip, compute_user, compute_password)
     if vsrx:
         vm_tapintf = dst_vm.tap_intf[dst_vm.vn_fq_names[1]]['name']
     else:
         vm_tapintf = dst_vm.tap_intf[dst_vm.vn_fq_name]['name']
     cmd = 'sudo tcpdump -nni %s -c 2 icmp > /tmp/%s_out.log' % (
         vm_tapintf, vm_tapintf)
     execute_cmd(session, cmd, self.logger)
     assert src_vm.ping_with_certainty(ip)
     output_cmd = 'cat /tmp/%s_out.log' % vm_tapintf
     output, err = execute_cmd_out(session, output_cmd, self.logger)
     if src_vm.vm_ip in output:
         result = True
         self.logger.info(
             '%s is seen responding to ICMP Requests' % dst_vm.vm_name)
     else:
         self.logger.error(
             'ICMP Requests to %s not seen on the VRRP Master' % ip)
         result = False
     return result
Esempio n. 12
0
 def verify_vrrp_action(self, src_vm, dst_vm, ip, vsrx=False):
     result = False
     self.logger.info('Will ping %s from %s and check if %s responds' % (
         ip, src_vm.vm_name, dst_vm.vm_name))
     compute_ip = dst_vm.vm_node_ip
     compute_user = self.inputs.host_data[compute_ip]['username']
     compute_password = self.inputs.host_data[compute_ip]['password']
     session = ssh(compute_ip, compute_user, compute_password)
     if vsrx:
         vm_tapintf = dst_vm.tap_intf[dst_vm.vn_fq_names[1]]['name']
     else:
         vm_tapintf = dst_vm.tap_intf[dst_vm.vn_fq_name]['name']
     cmd = 'tcpdump -nni %s -c 10 > /tmp/%s_out.log' % (
         vm_tapintf, vm_tapintf)
     execute_cmd(session, cmd, self.logger)
     assert src_vm.ping_with_certainty(ip), 'Ping to vIP failure'
     output_cmd = 'cat /tmp/%s_out.log' % vm_tapintf
     output, err = execute_cmd_out(session, output_cmd, self.logger)
     if ip in output:
         result = True
         self.logger.info(
             '%s is seen responding to ICMP Requests' % dst_vm.vm_name)
     else:
         self.logger.error('ICMP Requests not seen on the VRRP Master')
         result = False
     return result
Esempio n. 13
0
 def start_tcpdump(self, session, tap_intf, vlan=None,  vm_fixtures=[], pcap_on_vm=False, no_header = False):
     filt_str = ''
     if not no_header:
         filt_str = 'udp port 8099'
     if not pcap_on_vm:
         pcap = '/tmp/mirror-%s_%s.pcap' % (tap_intf, get_random_name())
         cmd = 'rm -f %s' % pcap
         execute_cmd(session, cmd, self.logger)
         assert check_pcap_file_exists(session, pcap, expect=False),'pcap file still exists'
         if vlan:
             filt_str = 'greater 1200'
         cmd = "sudo tcpdump -ni %s -U %s -w %s" % (tap_intf, filt_str, pcap)
         self.logger.info("Starting tcpdump to capture the mirrored packets.")
         execute_cmd(session, cmd, self.logger)
         assert check_pcap_file_exists(session, pcap),'pcap file does not exist'
         return pcap
     else:
         pcap = '/tmp/%s.pcap' % (get_random_name())
         cmd_to_tcpdump = [ 'tcpdump -ni %s %s -w %s 1>/dev/null 2>/dev/null' % (tap_intf, filt_str, pcap) ]
         pidfile = pcap + '.pid'
         vm_fix_pcap_pid_files =[]
         for vm_fixture in vm_fixtures:
             vm_fixture.run_cmd_on_vm(cmds=cmd_to_tcpdump, as_daemon=True, pidfile=pidfile, as_sudo=True)
             vm_fix_pcap_pid_files.append((vm_fixture, pcap, pidfile))
         return vm_fix_pcap_pid_files
def start_tcpdump_for_intf(ip, username, password, interface, filters='-v', logger=None):
    if not logger:
        logger = logging.getLogger(__name__)
    session = ssh(ip, username, password)
    pcap = '/tmp/%s_%s.pcap' % (interface, get_random_name())
    cmd = 'tcpdump -ni %s -U %s -w %s' % (interface, filters, pcap)
    execute_cmd(session, cmd, logger)
    return (session, pcap)
def start_tcpdump_for_intf(ip, username, password, interface, filters='-v', logger=None):
    if not logger:
        logger = contrail_logging.getLogger(__name__)
    session = ssh(ip, username, password)
    pcap = '/tmp/%s_%s.pcap' % (interface, get_random_name())
    cmd = 'tcpdump -nni %s -U %s -w %s' % (interface, filters, pcap)
    execute_cmd(session, cmd, logger)
    return (session, pcap)
Esempio n. 16
0
 def start_tcpdump(self, session, tap_intf):
     pcap = '/tmp/mirror-%s.pcap' % tap_intf
     cmd = 'rm -f %s' % pcap
     execute_cmd(session, cmd, self.logger)
     cmd = "tcpdump -ni %s udp port 8099 -w %s" % (tap_intf, pcap)
     self.logger.info("Staring tcpdump to capture the mirrored packets.")
     execute_cmd(session, cmd, self.logger)
     return pcap
Esempio n. 17
0
 def start_tcpdump(self, session, tap_intf):
     pcap = "/tmp/mirror-%s.pcap" % tap_intf
     cmd = "rm -f %s" % pcap
     execute_cmd(session, cmd, self.logger)
     sleep(5)
     cmd = "tcpdump -ni %s udp port 8099 -w %s" % (tap_intf, pcap)
     self.logger.info("Staring tcpdump to capture the mirrored packets.")
     execute_cmd(session, cmd, self.logger)
     return pcap
Esempio n. 18
0
 def start_tcpdump(self, server_ip, tap_intf):
     session = ssh(server_ip, self.inputs.host_data[server_ip]['username'],
                   self.inputs.host_data[server_ip]['password'])
     pcap = '/tmp/%s.pcap' % tap_intf
     cmd = "tcpdump -nei %s tcp -w %s" % (tap_intf, pcap)
     self.logger.info(
         "Staring tcpdump to capture the packets on server %s" %
         (server_ip))
     execute_cmd(session, cmd, self.logger)
     return pcap, session
Esempio n. 19
0
 def stop_tcpdump(self, session, pcap):
     self.logger.info("Waiting for the tcpdump write to complete.")
     sleep(30)
     cmd = 'kill $(pidof tcpdump)'
     execute_cmd(session, cmd, self.logger)
     cmd = 'tcpdump -r %s | wc -l' % pcap
     out, err = execute_cmd_out(session, cmd, self.logger)
     count = int(out.strip('\n'))
     cmd = 'rm -f %s' % pcap
     execute_cmd(session, cmd, self.logger)
     return count
Esempio n. 20
0
 def start_tcpdump(self, session, tap_intf, vlan=None):
     pcap = '/tmp/mirror-%s.pcap' % tap_intf
     cmd = 'rm -f %s' % pcap
     execute_cmd(session, cmd, self.logger)
     filt_str = ''
     if not vlan:
         filt_str = 'udp port 8099'
     cmd = "sudo tcpdump -ni %s %s -w %s" % (tap_intf, filt_str, pcap)
     self.logger.info("Starting tcpdump to capture the mirrored packets.")
     execute_cmd(session, cmd, self.logger)
     return pcap
Esempio n. 21
0
def start_tcpdump_for_vm_intf(obj, vm_fix, vn_fq_name, filters='-v'):
    compute_ip = vm_fix.vm_node_ip
    compute_user = obj.inputs.host_data[compute_ip]['username']
    compute_password = obj.inputs.host_data[compute_ip]['password']
    session = ssh(compute_ip, compute_user, compute_password)
    vm_tapintf = vm_fix.tap_intf[vn_fq_name]['name']
    pcap = '/tmp/%s_%s.pcap' % (vm_tapintf, get_random_name())
    cmd = 'tcpdump -ni %s -U %s -w %s' % (vm_tapintf, filters, pcap)
    execute_cmd(session, cmd, obj.logger)

    return (session, pcap)
Esempio n. 22
0
 def stop_tcpdump(self, session, pcap):
     self.logger.info("Waiting for the tcpdump write to complete.")
     sleep(30)
     cmd = 'sudo kill $(pidof tcpdump)'
     execute_cmd(session, cmd, self.logger)
     cmd = 'sudo tcpdump -r %s | wc -l' % pcap
     out, err = execute_cmd_out(session, cmd, self.logger)
     count = int(out.strip('\n'))
     cmd = 'sudo rm -f %s' % pcap
     execute_cmd(session, cmd, self.logger)
     return count
Esempio n. 23
0
 def stop_tcp_dump(self, sessions):
     self.logger.info("Waiting for the tcpdump write to complete.")
     sleep(30)
     cmd = 'kill $(pidof tcpdump)'
     execute_cmd(sessions[0], cmd, self.logger)
     execute_cmd(sessions[0], 'sync', self.logger)
     cmd = 'tcpdump -r %s | wc -l' % sessions[1]
     out, err = execute_cmd_out(sessions[0], cmd, self.logger)
     count = int(out.strip('\n'))
     #cmd = 'rm -f %s' % sessions[1]
     #execute_cmd(sessions[0], cmd, self.logger)
     return count
Esempio n. 24
0
 def stop_tcp_dump(self, sessions):
     self.logger.info("Waiting for the tcpdump write to complete.")
     sleep(30)
     cmd = 'kill $(pidof tcpdump)'
     execute_cmd(sessions[0], cmd, self.logger)
     execute_cmd(sessions[0], 'sync', self.logger)
     cmd = 'tcpdump -r %s | wc -l' % sessions[1]
     out, err = execute_cmd_out(sessions[0], cmd, self.logger)
     count = int(out.strip('\n'))
     #cmd = 'rm -f %s' % sessions[1]
     #execute_cmd(sessions[0], cmd, self.logger)
     return count
Esempio n. 25
0
 def start_tcp_dump(self, vm_fixture):
     sessions =[]
     vm_name = vm_fixture.vm_name
     host = self.inputs.host_data[vm_fixture.vm_node_ip]
     inspect_h = self.agent_inspect[vm_fixture.vm_node_ip]
     tapintf = inspect_h.get_vna_tap_interface_by_ip(vm_fixture.vm_ip)[0]['name']
     pcap = '/tmp/%s.pcap' % tapintf
     cmd = "tcpdump -ni %s udp -w %s" % (tapintf, pcap)
     session = ssh(host['host_ip'], host['username'], host['password'])
     self.logger.info("Staring tcpdump to capture the packets.")
     execute_cmd(session, cmd, self.logger)
     sessions.extend((session, pcap))
     return sessions
Esempio n. 26
0
 def start_tcp_dump(self, vm_fixture):
     sessions =[]
     vm_name = vm_fixture.vm_name
     host = self.inputs.host_data[vm_fixture.vm_node_ip]
     inspect_h = self.agent_inspect[vm_fixture.vm_node_ip]
     tapintf = inspect_h.get_vna_tap_interface_by_ip(vm_fixture.vm_ip)[0]['name']
     pcap = '/tmp/%s.pcap' % tapintf
     cmd = "sudo tcpdump -ni %s udp -w %s" % (tapintf, pcap)
     session = ssh(host['host_ip'], host['username'], host['password'])
     self.logger.info("Staring tcpdump to capture the packets.")
     execute_cmd(session, cmd, self.logger)
     sessions.extend((session, pcap))
     return sessions
Esempio n. 27
0
def stop_tcpdump_on_vm_verify_cnt(obj, session, pcap, exp_count=None):

    cmd = 'tcpdump -r %s | wc -l' % pcap
    out, err = execute_cmd_out(session, cmd, obj.logger)
    count = int(out.strip('\n'))
    if exp_count and count != exp_count:
	obj.logger.warn("%s packets are found in tcpdump output but expected %s" % (count, exp_count))	
	return False
    elif count == 0:
        obj.logger.warn("No packets are found in tcpdump output but expected something")
        return False

    obj.logger.info("%s packets are found in tcpdump output", count)
    cmd = 'rm -f %s' % pcap
    execute_cmd(session, cmd, obj.logger)
    cmd = 'kill $(pidof tcpdump)'
    execute_cmd(session, cmd, obj.logger)
    return True 
Esempio n. 28
0
    def stop_tcpdump(self, session, pcap, filt=''):
        self.logger.debug("Waiting for the tcpdump write to complete.")
        sleep(5)
        cmd = 'kill $(pidof tcpdump)'
        execute_cmd(session, cmd, self.logger)
        execute_cmd(session, 'sync', self.logger)

        cmd = 'tcpdump -n -r %s %s ' % (pcap, filt)
        out, err = execute_cmd_out(session, cmd, self.logger)
        self.logger.debug('Stopped tcpdump, out : %s, err : %s' % (out, err))

        cmd = 'tcpdump -n -r %s %s | wc -l' % (pcap, filt)
        out, err = execute_cmd_out(session, cmd, self.logger)
        count = int(out.strip('\n'))
        cmd = 'rm -f %s' % pcap
        #TODO
        # Temporary for debugging
#        execute_cmd(session, cmd, self.logger)
        return count
Esempio n. 29
0
 def verify_flow_thru_si(self, si_fix, src_vn):
     
     self.logger.info('Will start a tcpdump on the left-interfaces of the Service Instances to find out which flow is entering which Service Instance')
     flowcount= 0
     result= True
     si_count= si_fix.max_inst
     for i in range(1, si_count+1):
         svm_name = si_fix.si_name + '_%s'%i
         host = self.get_svm_compute(svm_name)
         tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn)
         session = ssh(host['host_ip'], host['username'], host['password'])
         cmd = 'tcpdump -ni %s proto 17 -vvv -c 5 > /tmp/%s_out.log' % (tapintf,tapintf)
         execute_cmd(session, cmd, self.logger)
     sleep(5)
     self.logger.info('***** Will check the result of tcpdump *****')
     for i in range(1, si_count+1):
         svm_name = si_fix.si_name + '_%s'%i
         host = self.get_svm_compute(svm_name)
         tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn)
         session = ssh(host['host_ip'], host['username'], host['password'])
         output_cmd = 'cat /tmp/%s_out.log' % tapintf 
         out, err = execute_cmd_out(session, output_cmd, self.logger)
         if '9000' in out:
             flowcount= flowcount + 1
             self.logger.info('Flow with dport 9000 seen flowing inside %s'%svm_name)
         if '9001' in out:
             flowcount= flowcount + 1
             self.logger.info('Flow with dport 9001 seen flowing inside %s'%svm_name)
         if '9002' in out:
             flowcount= flowcount + 1
             self.logger.info('Flow with dport 9002 seen flowing inside %s'%svm_name)
     if flowcount > 1:
         self.logger.info('Flows are distributed across the Service Instances')
     else:
         result= False
     assert result, 'No Flow distribution seen' 
Esempio n. 30
0
 def stop_tcpdump(self, session, pcap, filt=""):
     self.logger.info("Waiting for the tcpdump write to complete.")
     sleep(30)
     cmd = "kill $(pidof tcpdump)"
     execute_cmd(session, cmd, self.logger)
     execute_cmd(session, "sync", self.logger)
     cmd = "tcpdump -r %s %s | wc -l" % (pcap, filt)
     out, err = execute_cmd_out(session, cmd, self.logger)
     count = int(out.strip("\n"))
     cmd = "rm -f %s" % pcap
     execute_cmd(session, cmd, self.logger)
     return count
Esempio n. 31
0
 def stop_tcpdump(self, session, pcap, filt=''):
     self.logger.debug("Waiting for the tcpdump write to complete.")
     sleep(2)
     cmd = 'sudo kill $(ps -ef|grep tcpdump | grep %s| awk \'{print $2}\')' % pcap
     execute_cmd(session, cmd, self.logger)
     execute_cmd(session, 'sync', self.logger)
     sleep(3)
     cmd = 'sudo tcpdump -n -r %s %s | wc -l' % (pcap, filt)
     out, err = execute_cmd_out(session, cmd, self.logger)
     count = int(out.strip('\n'))
     cmd = 'sudo tcpdump -n -r %s' % pcap
     #TODO
     # Temporary for debugging
     execute_cmd(session, cmd, self.logger)
     return count
Esempio n. 32
0
 def stop_tcpdump(self, session, pcap, filt='', vm_fix_pcap_pid_files=[], pcap_on_vm=False):
     self.logger.debug("Waiting for the tcpdump write to complete.")
     sleep(2)
     if not pcap_on_vm:
         cmd = 'sudo kill $(ps -ef|grep tcpdump | grep %s| awk \'{print $2}\')' %pcap
         execute_cmd(session, cmd, self.logger)
         execute_cmd(session, 'sync', self.logger)
         sleep(3)
         cmd = 'sudo tcpdump -n -r %s %s | wc -l' % (pcap, filt)
         out, err = execute_cmd_out(session, cmd, self.logger)
         count = int(out.strip('\n'))
         cmd = 'sudo tcpdump -n -r %s' % pcap
         #TODO
         # Temporary for debugging
         execute_cmd(session, cmd, self.logger)
         return count
     else:
         output = []
         pkt_count = []
         for vm_fix, pcap, pidfile in vm_fix_pcap_pid_files:
             cmd_to_output  = 'tcpdump -nr %s %s' % (pcap, filt)
             cmd_to_kill = 'cat %s | xargs kill ' % (pidfile)
             count = cmd_to_output + '| wc -l'
             vm_fix.run_cmd_on_vm(cmds=[cmd_to_kill], as_sudo=True)
             sleep(2)
             vm_fix.run_cmd_on_vm(cmds=[cmd_to_output], as_sudo=True)
             output.append(vm_fix.return_output_cmd_dict[cmd_to_output])
             vm_fix.run_cmd_on_vm(cmds=[count], as_sudo=True)
             pkt_count_list = vm_fix.return_output_cmd_dict[count].split('\n')
             try:
                 pkts = pkt_count_list[2]
             except:
                 pkts = pkt_count_list[1]
             pkts = int(pkts)
             pkt_count.append(pkts)
             total_pkts = sum(pkt_count)
         return output, total_pkts
Esempio n. 33
0
 def stop_tcpdump(self, session, pcap, filt='', vm_fix_pcap_pid_files=[], pcap_on_vm=False):
     self.logger.debug("Waiting for the tcpdump write to complete.")
     sleep(2)
     if not pcap_on_vm:
         cmd = 'sudo kill $(ps -ef|grep tcpdump | grep %s| awk \'{print $2}\')' %pcap
         execute_cmd(session, cmd, self.logger)
         execute_cmd(session, 'sync', self.logger)
         sleep(3)
         cmd = 'sudo tcpdump -n -r %s %s | wc -l' % (pcap, filt)
         out, err = execute_cmd_out(session, cmd, self.logger)
         count = int(out.strip('\n'))
         cmd = 'sudo tcpdump -n -r %s' % pcap
         #TODO
         # Temporary for debugging
         execute_cmd(session, cmd, self.logger)
         return count
     else:
         output = []
         pkt_count = []
         for vm_fix, pcap, pidfile in vm_fix_pcap_pid_files:
             cmd_to_output  = 'tcpdump -nr %s %s' % (pcap, filt)
             cmd_to_kill = 'cat %s | xargs kill ' % (pidfile)
             count = cmd_to_output + '| wc -l'
             vm_fix.run_cmd_on_vm(cmds=[cmd_to_kill], as_sudo=True)
             sleep(2)
             vm_fix.run_cmd_on_vm(cmds=[cmd_to_output], as_sudo=True)
             output.append(vm_fix.return_output_cmd_dict[cmd_to_output])
             vm_fix.run_cmd_on_vm(cmds=[count], as_sudo=True)
             pkt_count_list = vm_fix.return_output_cmd_dict[count].split('\n')
             try:
                 pkts = pkt_count_list[2]
             except:
                 pkts = pkt_count_list[1]
             pkts = int(pkts)
             pkt_count.append(pkts)
             total_pkts = sum(pkt_count)
         return output, total_pkts
Esempio n. 34
0
 def start_tcpdump_on_intf(self, host, tapintf):
     session = ssh(host['host_ip'], host['username'], host['password'])
     cmd = 'sudo tcpdump -nni %s -c 1 proto 1 > /tmp/%s_out.log 2>&1' % (
         tapintf, tapintf)
     execute_cmd(session, cmd, self.logger)
Esempio n. 35
0
 def start_tcpdump(self, session, cmd):
     self.logger.info("Starting tcpdump to capture the packets.")
     result = execute_cmd(session, cmd, self.logger)
Esempio n. 36
0
def stop_tcpdump_for_vm_intf(obj, session, pcap):
    cmd = 'rm -f %s' % pcap
    execute_cmd(session, cmd, obj.logger)
    cmd = 'kill $(ps -ef|grep tcpdump | grep pcap| awk \'{print $2}\')'
    execute_cmd(session, cmd, obj.logger)
    return True
Esempio n. 37
0
 def start_tcpdump_on_intf(self, host, tapintf):
     session = ssh(host['host_ip'], host['username'], host['password'])
     cmd = 'tcpdump -nni %s -c 10 > /tmp/%s_out.log' % (tapintf, tapintf)
     execute_cmd(session, cmd, self.logger)
Esempio n. 38
0
    def verify_flow_thru_si(self, si_fix, src_vn=None, ecmp_hash=None, flow_count=3):
        self.logger.info(
            'Will start a tcpdump on the left-interfaces of the Service Instances to find out which flow is entering which Service Instance')
        flowcount = 0
        result = True
        flow_pattern = {}
        svms = self.get_svms_in_si(si_fix)
        svms = sorted(set(svms))
        svm_list = si_fix.svm_list
        svm_index = 0
        vm_fix_pcap_pid_files = []

        # Capturing packets based upon source port
        src_port = "8000"
        filters = '\'(src port %s)\'' % (src_port)
        if None in svms:
            svms.remove(None)
        for svm_fixture in svm_list:
            svm = svm_fixture.vm_obj
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host_name = self.connections.orch.get_host_of_vm(svm)
                host = self.inputs.host_data[host_name]
                if src_vn is not None:
                    #tapintf = svm_fixture.tap_intf[src_vn.vn_fq_name]['name']
                    tapintf = self.connections.orch.get_vm_tap_interface(\
                                      svm_fixture.tap_intf[src_vn.vn_fq_name])
                else:
                    tapintf = self.connections.orch.get_vm_tap_interface(svm_fixture.tap_intf[si_fix.left_vn_fq_name])
                    filters = ''
                if self.inputs.pcap_on_vm:
                    tcpdump_files = start_tcpdump_for_vm_intf(
                        None, [svm_list[svm_index]], None, filters=filters, pcap_on_vm=True, vm_intf='eth1', svm=True)
                    svm_index = svm_index + 1
                    vm_fix_pcap_pid_files.append(tcpdump_files)
                else:
                    session = ssh(
                        host['host_ip'], host['username'], host['password'])
                    cmd = 'sudo tcpdump -nni %s %s -c 20 > /tmp/%s_out.log' % (
                        tapintf, filters, tapintf)
                    execute_cmd(session, cmd, self.logger)
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        sleep(15)

        self.logger.info('%%%%% Will check the result of tcpdump %%%%%')
        svms = self.get_svms_in_si(si_fix)
        svms = sorted(set(svms))
        if None in svms:
            svms.remove(None)
        svm_index = 0
        for svm_fixture in svm_list:
            svm = svm_fixture.vm_obj
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host_name = self.connections.orch.get_host_of_vm(svm)
                host = self.inputs.host_data[host_name]
                if src_vn is not None:
                    tapintf = self.connections.orch.get_vm_tap_interface(\
                                      svm_fixture.tap_intf[src_vn.vn_fq_name])
                    #tapintf = svm_fixture.tap_intf[src_vn.vn_fq_name]['name']
                else:
                    direction = 'left'
                    tapintf = self.connections.orch.get_vm_tap_interface(svm_fixture.tap_intf[si_fix.left_vn_fq_name])
                if not self.inputs.pcap_on_vm:
                    session = ssh(
                        host['host_ip'], host['username'], host['password'])
                    output_cmd = 'cat /tmp/%s_out.log' % tapintf
                    out, err = execute_cmd_out(session, output_cmd, self.logger)
                else:
                    out, pkt_count = stop_tcpdump_for_vm_intf(
                        None, None, None, vm_fix_pcap_pid_files=vm_fix_pcap_pid_files[svm_index], svm=True)
                    svm_index = svm_index + 1
                    out = out[0]
                for i in range(0,flow_count):
                    dport = str(9000+i)
                    if dport in out:
                        flowcount = flowcount + 1
                        self.logger.info(
                            'Flow with dport %s seen flowing inside %s' % (dport,svm_name))
                        flow_pattern[dport] = svm_name
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        if flowcount > 0:
            self.logger.info(
                'Flows are distributed across the Service Instances as :')
            self.logger.info('%s' % flow_pattern)
        else:
            result = False

        if ecmp_hash and ecmp_hash != 'default':
            # count the number of hash fields set
            hash_var_count = sum(ecmp_hash.values())

            # Incase, only one hash field is set, all flows should go through
            # single service instance. One exception here is destination_port.
            # Destination port varies for traffic streams.So, for destination
            # port, traffic will get load balanced even if ecmp hash is
            # configured "destination_port" alone.
            if hash_var_count == 1 and (not 'destination_port' in ecmp_hash):
                flow_pattern_ref = flow_pattern['9000']
                if all(flow_pattern_ref  == item for item in flow_pattern.values()):
                    self.logger.info(
                        'Flows are flowing through Single Service Instance: %s, as per config hash: %s' % (flow_pattern_ref, ecmp_hash))
                    self.logger.info('%s' % flow_pattern)
                else:
                    result = False
                    self.logger.error(
                        'Flows are flowing through multiple Service Instances:%s, where as it should not as per config hash:%s' % (flow_pattern, ecmp_hash))
                    assert result, 'Config hash is not working for: %s' % ( ecmp_hash)
            # Incase, multiple ecmp hash fields are configured or default ecmp
            # hash is present or only 'destionation_port' is configured in
            # ecmp_hash
            else:
                flow_pattern_ref = flow_pattern['9000']
                if all(flow_pattern_ref  == item for item in flow_pattern.values()):
                    result = False
                    self.logger.error(
                        'Flows are flowing through Single Service Instance:%s, where as it should not as per config hash:%s' % (flow_pattern_ref, ecmp_hash))
                    #assert result, 'Config hash is not working fine.'
                else:
                    self.logger.info(
                        'Flows are flowing through multiple Service Instances:%s, as per config hash: %s' % (flow_pattern, ecmp_hash))
                    self.logger.info('%s' % flow_pattern)
        else:
            assert result, 'No Flow distribution seen'
Esempio n. 39
0
    def verify_flow_thru_si(self, si_fix, src_vn=None):
        self.logger.info(
            'Will start a tcpdump on the left-interfaces of the Service Instances to find out which flow is entering which Service Instance')
        flowcount = 0
        result = True
        flow_pattern = {}
        svms = self.get_svms_in_si(si_fix, self.inputs.project_name)
        svms = sorted(set(svms))
        if None in svms:
            svms.remove(None)
        for svm in svms:
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host = self.get_svm_compute(svm_name)
                if src_vn is not None:
                    tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn)
                else:
                    direction = 'left'
                    tapintf = self.get_bridge_svm_tapintf(svm_name, direction)
                session = ssh(
                    host['host_ip'], host['username'], host['password'])
                cmd = 'tcpdump -nni %s -c 10 > /tmp/%s_out.log' % (
                    tapintf, tapintf)
                execute_cmd(session, cmd, self.logger)
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        sleep(15)

        self.logger.info('%%%%% Will check the result of tcpdump %%%%%')
        svms = self.get_svms_in_si(si_fix, self.inputs.project_name)
        svms = sorted(set(svms))
        if None in svms:
            svms.remove(None)
        for svm in svms:
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host = self.get_svm_compute(svm_name)
                if src_vn is not None:
                    tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn)
                else:
                    direction = 'left'
                    tapintf = self.get_bridge_svm_tapintf(svm_name, direction)
                session = ssh(
                    host['host_ip'], host['username'], host['password'])
                output_cmd = 'cat /tmp/%s_out.log' % tapintf
                out, err = execute_cmd_out(session, output_cmd, self.logger)
                if '9000' in out:
                    flowcount = flowcount + 1
                    self.logger.info(
                        'Flow with dport 9000 seen flowing inside %s' % svm_name)
                    flow_pattern['9000'] = svm_name
                if '9001' in out:
                    flowcount = flowcount + 1
                    self.logger.info(
                        'Flow with dport 9001 seen flowing inside %s' % svm_name)
                    flow_pattern['9001'] = svm_name
                if '9002' in out:
                    flowcount = flowcount + 1
                    self.logger.info(
                        'Flow with dport 9002 seen flowing inside %s' % svm_name)
                    flow_pattern['9002'] = svm_name
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        if flowcount > 0:
            self.logger.info(
                'Flows are distributed across the Service Instances as :')
            self.logger.info('%s' % flow_pattern)
        else:
            result = False
        assert result, 'No Flow distribution seen'
Esempio n. 40
0
 def start_tcpdump(self, session, cmd, pcap=None):
     self.logger.info("Starting tcpdump to capture the packets.")
     result = execute_cmd(session, cmd, self.logger)
     if pcap:
         assert check_pcap_file_exists(session, pcap),'pcap not got created'
Esempio n. 41
0
    def verify_flow_thru_si(self, si_fix, src_vn=None):
        self.logger.info(
            'Will start a tcpdump on the left-interfaces of the Service Instances to find out which flow is entering which Service Instance'
        )
        flowcount = 0
        result = True
        flow_pattern = {}
        svms = self.get_svms_in_si(si_fix, self.inputs.project_name)
        svms = sorted(set(svms))
        if None in svms:
            svms.remove(None)
        for svm in svms:
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host = self.get_svm_compute(svm_name)
                if src_vn is not None:
                    tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn)
                else:
                    direction = 'left'
                    tapintf = self.get_bridge_svm_tapintf(svm_name, direction)
                session = ssh(host['host_ip'], host['username'],
                              host['password'])
                cmd = 'tcpdump -nni %s -c 10 > /tmp/%s_out.log' % (tapintf,
                                                                   tapintf)
                execute_cmd(session, cmd, self.logger)
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        sleep(15)

        self.logger.info('%%%%% Will check the result of tcpdump %%%%%')
        svms = self.get_svms_in_si(si_fix, self.inputs.project_name)
        svms = sorted(set(svms))
        if None in svms:
            svms.remove(None)
        for svm in svms:
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host = self.get_svm_compute(svm_name)
                if src_vn is not None:
                    tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn)
                else:
                    direction = 'left'
                    tapintf = self.get_bridge_svm_tapintf(svm_name, direction)
                session = ssh(host['host_ip'], host['username'],
                              host['password'])
                output_cmd = 'cat /tmp/%s_out.log' % tapintf
                out, err = execute_cmd_out(session, output_cmd, self.logger)
                if '9000' in out:
                    flowcount = flowcount + 1
                    self.logger.info(
                        'Flow with dport 9000 seen flowing inside %s' %
                        svm_name)
                    flow_pattern['9000'] = svm_name
                if '9001' in out:
                    flowcount = flowcount + 1
                    self.logger.info(
                        'Flow with dport 9001 seen flowing inside %s' %
                        svm_name)
                    flow_pattern['9001'] = svm_name
                if '9002' in out:
                    flowcount = flowcount + 1
                    self.logger.info(
                        'Flow with dport 9002 seen flowing inside %s' %
                        svm_name)
                    flow_pattern['9002'] = svm_name
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        if flowcount > 0:
            self.logger.info(
                'Flows are distributed across the Service Instances as :')
            self.logger.info('%s' % flow_pattern)
        else:
            result = False
        assert result, 'No Flow distribution seen'
Esempio n. 42
0
 def start_tcpdump_on_intf(self, host, tapintf):
     session = ssh(host["host_ip"], host["username"], host["password"])
     cmd = "tcpdump -nni %s -c 10 > /tmp/%s_out.log" % (tapintf, tapintf)
     execute_cmd(session, cmd, self.logger)
Esempio n. 43
0
 def start_tcpdump(self, session, cmd, pcap=None):
     self.logger.info("Starting tcpdump to capture the packets.")
     result = execute_cmd(session, cmd, self.logger)
     if pcap:
         assert check_pcap_file_exists(session,
                                       pcap), 'pcap not got created'
Esempio n. 44
0
    def verify_flow_thru_si(self,
                            si_fix,
                            src_vn=None,
                            ecmp_hash=None,
                            flow_count=3):
        self.logger.info(
            'Will start a tcpdump on the left-interfaces of the Service Instances to find out which flow is entering which Service Instance'
        )
        flowcount = 0
        result = True
        flow_pattern = {}
        svms = self.get_svms_in_si(si_fix)
        svms = sorted(set(svms))
        if None in svms:
            svms.remove(None)
        for svm in svms:
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host = self.get_svm_compute(svm_name)
                if src_vn is not None:
                    tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn)
                else:
                    direction = 'left'
                    tapintf = self.get_bridge_svm_tapintf(svm_name, direction)
                session = ssh(host['host_ip'], host['username'],
                              host['password'])
                cmd = 'tcpdump -nni %s -c 10 > /tmp/%s_out.log' % (tapintf,
                                                                   tapintf)
                execute_cmd(session, cmd, self.logger)
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        sleep(15)

        self.logger.info('%%%%% Will check the result of tcpdump %%%%%')
        svms = self.get_svms_in_si(si_fix)
        svms = sorted(set(svms))
        if None in svms:
            svms.remove(None)
        for svm in svms:
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host = self.get_svm_compute(svm_name)
                if src_vn is not None:
                    tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn)
                else:
                    direction = 'left'
                    tapintf = self.get_bridge_svm_tapintf(svm_name, direction)
                session = ssh(host['host_ip'], host['username'],
                              host['password'])
                output_cmd = 'cat /tmp/%s_out.log' % tapintf
                out, err = execute_cmd_out(session, output_cmd, self.logger)

                for i in range(0, flow_count):
                    dport = str(9000 + i)
                    if dport in out:
                        flowcount = flowcount + 1
                        self.logger.info(
                            'Flow with dport %s seen flowing inside %s' %
                            (dport, svm_name))
                        flow_pattern[dport] = svm_name
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        if flowcount > 0:
            self.logger.info(
                'Flows are distributed across the Service Instances as :')
            self.logger.info('%s' % flow_pattern)
        else:
            result = False

        if ecmp_hash and ecmp_hash != 'default':
            # count the number of hash fields set
            hash_var_count = sum(ecmp_hash.values())

            # Incase, only one hash field is set, all flows should go through
            # single service instance. One exception here is destination_port.
            # Destination port varies for traffic streams.So, for destination
            # port, traffic will get load balanced even if ecmp hash is
            # configured "destination_port" alone.
            if hash_var_count == 1 and (not 'destination_port' in ecmp_hash):
                flow_pattern_ref = flow_pattern['9000']
                if all(flow_pattern_ref == item
                       for item in flow_pattern.values()):
                    self.logger.info(
                        'Flows are flowing through Single Service Instance: %s, as per config hash: %s'
                        % (flow_pattern_ref, ecmp_hash))
                    self.logger.info('%s' % flow_pattern)
                else:
                    result = False
                    self.logger.error(
                        'Flows are flowing through multiple Service Instances:%s, where as it should not as per config hash:%s'
                        % (flow_pattern, ecmp_hash))
                    assert result, 'Config hash is not working for: %s' % (
                        ecmp_hash)
            # Incase, multiple ecmp hash fields are configured or default ecmp
            # hash is present or only 'destionation_port' is configured in
            # ecmp_hash
            else:
                flow_pattern_ref = flow_pattern['9000']
                if all(flow_pattern_ref == item
                       for item in flow_pattern.values()):
                    result = False
                    self.logger.error(
                        'Flows are flowing through Single Service Instance:%s, where as it should not as per config hash:%s'
                        % (flow_pattern_ref, ecmp_hash))
                    #assert result, 'Config hash is not working fine.'
                else:
                    self.logger.info(
                        'Flows are flowing through multiple Service Instances:%s, as per config hash: %s'
                        % (flow_pattern, ecmp_hash))
                    self.logger.info('%s' % flow_pattern)
        else:
            assert result, 'No Flow distribution seen'
Esempio n. 45
0
    def verify_flow_thru_si(self, si_fix, src_vn=None, ecmp_hash=None, flow_count=3):
        self.logger.info(
            'Will start a tcpdump on the left-interfaces of the Service Instances to find out which flow is entering which Service Instance')
        flowcount = 0
        result = True
        flow_pattern = {}
        svms = self.get_svms_in_si(si_fix)
        svms = sorted(set(svms))
        svm_list = si_fix.svm_list
        svm_index = 0
        vm_fix_pcap_pid_files = []

        # Capturing packets based upon source port
        src_port = "8000"
        filters = '\'(src port %s)\'' % (src_port)
        if None in svms:
            svms.remove(None)
        for svm_fixture in svm_list:
            svm = svm_fixture.vm_obj
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host_name = self.connections.orch.get_host_of_vm(svm)
                host = self.inputs.host_data[host_name]
                if src_vn is not None:
                    #tapintf = svm_fixture.tap_intf[src_vn.vn_fq_name]['name']
                    tapintf = self.connections.orch.get_vm_tap_interface(\
                                      svm_fixture.tap_intf[src_vn.vn_fq_name])
                else:
                    direction = 'left'
                    tapintf = self.get_bridge_svm_tapintf(svm_name, direction)
                if self.inputs.pcap_on_vm:
                    tcpdump_files = start_tcpdump_for_vm_intf(
                        None, [svm_list[svm_index]], None, filters=filters, pcap_on_vm=True, vm_intf='eth1', svm=True)
                    svm_index = svm_index + 1
                    vm_fix_pcap_pid_files.append(tcpdump_files)
                else:
                    session = ssh(
                        host['host_ip'], host['username'], host['password'])
                    cmd = 'sudo tcpdump -nni %s %s -c 20 > /tmp/%s_out.log' % (
                        tapintf, filters, tapintf)
                    execute_cmd(session, cmd, self.logger)
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        sleep(15)

        self.logger.info('%%%%% Will check the result of tcpdump %%%%%')
        svms = self.get_svms_in_si(si_fix)
        svms = sorted(set(svms))
        if None in svms:
            svms.remove(None)
        svm_index = 0
        for svm_fixture in svm_list:
            svm = svm_fixture.vm_obj
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host_name = self.connections.orch.get_host_of_vm(svm)
                host = self.inputs.host_data[host_name]
                if src_vn is not None:
                    tapintf = self.connections.orch.get_vm_tap_interface(\
                                      svm_fixture.tap_intf[src_vn.vn_fq_name])
                    #tapintf = svm_fixture.tap_intf[src_vn.vn_fq_name]['name']
                else:
                    direction = 'left'
                    tapintf = self.get_bridge_svm_tapintf(svm_name, direction)
                if not self.inputs.pcap_on_vm:
                    session = ssh(
                        host['host_ip'], host['username'], host['password'])
                    output_cmd = 'cat /tmp/%s_out.log' % tapintf
                    out, err = execute_cmd_out(session, output_cmd, self.logger)
                else:
                    out, pkt_count = stop_tcpdump_for_vm_intf(
                        None, None, None, vm_fix_pcap_pid_files=vm_fix_pcap_pid_files[svm_index], svm=True)
                    svm_index = svm_index + 1
                    out = out[0]
                for i in range(0,flow_count):
                    dport = str(9000+i)
                    if dport in out:
                        flowcount = flowcount + 1
                        self.logger.info(
                            'Flow with dport %s seen flowing inside %s' % (dport,svm_name))
                        flow_pattern[dport] = svm_name
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        if flowcount > 0:
            self.logger.info(
                'Flows are distributed across the Service Instances as :')
            self.logger.info('%s' % flow_pattern)
        else:
            result = False

        if ecmp_hash and ecmp_hash != 'default':
            # count the number of hash fields set
            hash_var_count = sum(ecmp_hash.values())

            # Incase, only one hash field is set, all flows should go through
            # single service instance. One exception here is destination_port.
            # Destination port varies for traffic streams.So, for destination
            # port, traffic will get load balanced even if ecmp hash is
            # configured "destination_port" alone.
            if hash_var_count == 1 and (not 'destination_port' in ecmp_hash):
                flow_pattern_ref = flow_pattern['9000']
                if all(flow_pattern_ref  == item for item in flow_pattern.values()):
                    self.logger.info(
                        'Flows are flowing through Single Service Instance: %s, as per config hash: %s' % (flow_pattern_ref, ecmp_hash))
                    self.logger.info('%s' % flow_pattern)
                else:
                    result = False
                    self.logger.error(
                        'Flows are flowing through multiple Service Instances:%s, where as it should not as per config hash:%s' % (flow_pattern, ecmp_hash))
                    assert result, 'Config hash is not working for: %s' % ( ecmp_hash)
            # Incase, multiple ecmp hash fields are configured or default ecmp
            # hash is present or only 'destionation_port' is configured in
            # ecmp_hash
            else:
                flow_pattern_ref = flow_pattern['9000']
                if all(flow_pattern_ref  == item for item in flow_pattern.values()):
                    result = False
                    self.logger.error(
                        'Flows are flowing through Single Service Instance:%s, where as it should not as per config hash:%s' % (flow_pattern_ref, ecmp_hash))
                    #assert result, 'Config hash is not working fine.'
                else:
                    self.logger.info(
                        'Flows are flowing through multiple Service Instances:%s, as per config hash: %s' % (flow_pattern, ecmp_hash))
                    self.logger.info('%s' % flow_pattern)
        else:
            assert result, 'No Flow distribution seen'
Esempio n. 46
0
 def start_tcpdump(self, session, cmd):
     self.logger.info("Starting tcpdump to capture the packets.")
     result = execute_cmd(session, cmd, self.logger)