def search_in_pcap(session, pcap, search_string):
    cmd = 'tcpdump -v -r %s | grep "%s"' % (pcap, search_string)
    out, err = execute_cmd_out(session, cmd)
    if search_string in out:
        return True
    else:
        return False
示例#2
0
def verify_tcpdump_count(obj, session, pcap, exp_count=None, exact_match=True, mac=None):

    if mac:
        cmd = 'tcpdump -r %s | grep %s | wc -l' % (pcap,mac)
    else:
        cmd = 'tcpdump -r %s | wc -l' % pcap
    out, err = execute_cmd_out(session, cmd, obj.logger)
    count = int(out.strip('\n'))
    result = True
    if exp_count is not None:
        if count != exp_count and exact_match:
            obj.logger.warn("%s packets are found in tcpdump output file %s but \
                                        expected %s" % (count, pcap, exp_count))
            result = False
        elif count > exp_count and not exact_match:
            obj.logger.debug("%s packets are found in tcpdump output file %s but \
                             expected %s, which is fine" % (count, pcap, exp_count))
        elif count < exp_count and not exact_match:
            obj.logger.warn("%s packets are found in tcpdump output file %s but \
                             expected atleast %s" % (count, pcap, exp_count))
            result = False
    else:
        if count == 0:
            obj.logger.warn("No packets are found in tcpdump output file %s but \
                                        expected some packets" % (pcap))
            result = False

    if result:
        obj.logger.info(
            "%s packets are found in tcpdump output as expected",
            count)
        stop_tcpdump_for_vm_intf(obj, session, pcap)
    return result
示例#3
0
 def verify_vrrp_action(self, src_vm, dst_vm, ip, vsrx=False):
     result = False
     self.logger.info('Will ping %s from %s and check if %s responds' % (
         ip, src_vm.vm_name, dst_vm.vm_name))
     compute_ip = dst_vm.vm_node_ip
     compute_user = self.inputs.host_data[compute_ip]['username']
     compute_password = self.inputs.host_data[compute_ip]['password']
     session = ssh(compute_ip, compute_user, compute_password)
     if vsrx:
         vm_tapintf = dst_vm.tap_intf[dst_vm.vn_fq_names[1]]['name']
     else:
         vm_tapintf = dst_vm.tap_intf[dst_vm.vn_fq_name]['name']
     cmd = 'tcpdump -nni %s -c 10 > /tmp/%s_out.log' % (
         vm_tapintf, vm_tapintf)
     execute_cmd(session, cmd, self.logger)
     assert src_vm.ping_with_certainty(ip), 'Ping to vIP failure'
     output_cmd = 'cat /tmp/%s_out.log' % vm_tapintf
     output, err = execute_cmd_out(session, output_cmd, self.logger)
     if ip in output:
         result = True
         self.logger.info(
             '%s is seen responding to ICMP Requests' % dst_vm.vm_name)
     else:
         self.logger.error('ICMP Requests not seen on the VRRP Master')
         result = False
     return result
示例#4
0
 def verify_mirroring(self, si_fix, src_vm, dst_vm, mirr_vm=None):
     result = True
     if mirr_vm:
         svm = mirr_vm.vm_obj
     else:
         svms = self.get_svms_in_si(si_fix)
         svm = svms[0]
     if svm.status == 'ACTIVE':
         svm_name = svm.name
         host = self.get_svm_compute(svm_name)
         if mirr_vm:
             tapintf = self.get_svm_tapintf(svm_name)
         else:
            tapintf = self.get_bridge_svm_tapintf(svm_name, 'left')
         session = ssh(host['host_ip'], host['username'], host['password'])
         cmd = 'sudo tcpdump -nni %s -c 5 > /tmp/%s_out.log' % (tapintf, tapintf)
         execute_cmd(session, cmd, self.logger)
         assert src_vm.ping_with_certainty(dst_vm.vm_ip)
         sleep(10)
         output_cmd = 'sudo cat /tmp/%s_out.log' % tapintf
         out, err = execute_cmd_out(session, output_cmd, self.logger)
         print out
         if '8099' in out:
             self.logger.info('Mirroring action verified')
         else:
             result = False
             self.logger.warning('No mirroring action seen')
     return result
示例#5
0
 def stop_tcpdump_on_intf(self, host, tapintf):
     session = ssh(host['host_ip'], host['username'], host['password'])
     self.logger.info('Waiting for tcpdump to complete')
     time.sleep(20)
     output_cmd = 'cat /tmp/%s_out.log' % tapintf
     out, err = execute_cmd_out(session, output_cmd, self.logger)
     return out
示例#6
0
 def stop_tcpdump_on_intf(self, host, tapintf):
     session = ssh(host["host_ip"], host["username"], host["password"])
     self.logger.info("Waiting for tcpdump to complete")
     time.sleep(20)
     output_cmd = "cat /tmp/%s_out.log" % tapintf
     out, err = execute_cmd_out(session, output_cmd, self.logger)
     return out
示例#7
0
def check_pcap_file_exists(session, pcap, expect=True):
    cmd = 'ls -d /tmp/* | grep -w %s ' % (pcap)
    out, err = execute_cmd_out(session, cmd)
    out = bool(out)
    logger = contrail_logging.getLogger(__name__)
    if expect and not out:
        logger.warn("Pcap file not created yet..waiting")
    if expect and out or not expect and not out:
        return True
    return False
示例#8
0
 def stop_tcpdump(self, session, pcap):
     self.logger.info("Waiting for the tcpdump write to complete.")
     sleep(30)
     cmd = 'sudo kill $(pidof tcpdump)'
     execute_cmd(session, cmd, self.logger)
     cmd = 'sudo tcpdump -r %s | wc -l' % pcap
     out, err = execute_cmd_out(session, cmd, self.logger)
     count = int(out.strip('\n'))
     cmd = 'sudo rm -f %s' % pcap
     execute_cmd(session, cmd, self.logger)
     return count
示例#9
0
 def stop_tcpdump(self, session, pcap):
     self.logger.info("Waiting for the tcpdump write to complete.")
     sleep(30)
     cmd = 'sudo kill $(pidof tcpdump)'
     execute_cmd(session, cmd, self.logger)
     cmd = 'sudo tcpdump -r %s | wc -l' % pcap
     out, err = execute_cmd_out(session, cmd, self.logger)
     count = int(out.strip('\n'))
     cmd = 'sudo rm -f %s' % pcap
     execute_cmd(session, cmd, self.logger)
     return count
示例#10
0
 def check_bfd_packets(self, vm, vn):
     interface = vm.tap_intf[vn.vn_fq_name]['name']
     ip = self.inputs.host_data[vm.vm_node_ip]['host_ip']
     session = ssh(ip,self.inputs.host_data[ip]['username'],self.inputs.host_data[ip]['password'])
     cmd = "sudo timeout 30 tcpdump -nei %s ip | grep BFD" % (interface)
     self.logger.info("Starting tcpdump to capture the BFD packets on %s in server %s" % (interface, ip))
     out, err = execute_cmd_out(session, cmd, self.logger)
     result = False
     if out:
         result = True
     return result
示例#11
0
 def stop_tcp_dump(self, sessions):
     self.logger.info("Waiting for the tcpdump write to complete.")
     sleep(30)
     cmd = 'kill $(pidof tcpdump)'
     execute_cmd(sessions[0], cmd, self.logger)
     execute_cmd(sessions[0], 'sync', self.logger)
     cmd = 'tcpdump -r %s | wc -l' % sessions[1]
     out, err = execute_cmd_out(sessions[0], cmd, self.logger)
     count = int(out.strip('\n'))
     #cmd = 'rm -f %s' % sessions[1]
     #execute_cmd(sessions[0], cmd, self.logger)
     return count
示例#12
0
 def stop_tcpdump(self, session, pcap, filt=""):
     self.logger.info("Waiting for the tcpdump write to complete.")
     sleep(30)
     cmd = "kill $(pidof tcpdump)"
     execute_cmd(session, cmd, self.logger)
     execute_cmd(session, "sync", self.logger)
     cmd = "tcpdump -r %s %s | wc -l" % (pcap, filt)
     out, err = execute_cmd_out(session, cmd, self.logger)
     count = int(out.strip("\n"))
     cmd = "rm -f %s" % pcap
     execute_cmd(session, cmd, self.logger)
     return count
示例#13
0
 def stop_tcp_dump(self, sessions):
     self.logger.info("Waiting for the tcpdump write to complete.")
     sleep(30)
     cmd = 'kill $(pidof tcpdump)'
     execute_cmd(sessions[0], cmd, self.logger)
     execute_cmd(sessions[0], 'sync', self.logger)
     cmd = 'tcpdump -r %s | wc -l' % sessions[1]
     out, err = execute_cmd_out(sessions[0], cmd, self.logger)
     count = int(out.strip('\n'))
     #cmd = 'rm -f %s' % sessions[1]
     #execute_cmd(sessions[0], cmd, self.logger)
     return count
示例#14
0
 def stop_tcpdump(self, session, pcap, filt=''):
     self.logger.debug("Waiting for the tcpdump write to complete.")
     sleep(2)
     cmd = 'sudo kill $(ps -ef|grep tcpdump | grep %s| awk \'{print $2}\')' % pcap
     execute_cmd(session, cmd, self.logger)
     execute_cmd(session, 'sync', self.logger)
     sleep(3)
     cmd = 'sudo tcpdump -n -r %s %s | wc -l' % (pcap, filt)
     out, err = execute_cmd_out(session, cmd, self.logger)
     count = int(out.strip('\n'))
     cmd = 'sudo tcpdump -n -r %s' % pcap
     #TODO
     # Temporary for debugging
     execute_cmd(session, cmd, self.logger)
     return count
示例#15
0
def stop_tcpdump_on_vm_verify_cnt(obj, session, pcap, exp_count=None):

    cmd = 'tcpdump -r %s | wc -l' % pcap
    out, err = execute_cmd_out(session, cmd, obj.logger)
    count = int(out.strip('\n'))
    if exp_count and count != exp_count:
	obj.logger.warn("%s packets are found in tcpdump output but expected %s" % (count, exp_count))	
	return False
    elif count == 0:
        obj.logger.warn("No packets are found in tcpdump output but expected something")
        return False

    obj.logger.info("%s packets are found in tcpdump output", count)
    cmd = 'rm -f %s' % pcap
    execute_cmd(session, cmd, obj.logger)
    cmd = 'kill $(pidof tcpdump)'
    execute_cmd(session, cmd, obj.logger)
    return True 
示例#16
0
def verify_tcpdump_count(obj, session, pcap, exp_count=None, mac=None, raw_count=False,
        exact_match=True, vm_fix_pcap_pid_files=[], svm=False, grep_string=None):
    grep_string = grep_string or 'length'
    if raw_count:
        new_grep_string = 'wc -l'
    else:
        new_grep_string = 'grep -c %s' % grep_string
    if mac:
        cmd = 'sudo tcpdump -nnr %s ether host %s | %s' % (pcap, mac, new_grep_string)
    else:
        cmd = 'sudo tcpdump -nnr %s | %s' % (pcap, new_grep_string)
    if not vm_fix_pcap_pid_files:
        out, err = execute_cmd_out(session, cmd, obj.logger)
        count = int(out.strip('\n'))
    else:
        output, count = stop_tcpdump_for_vm_intf(
            None, None, pcap, vm_fix_pcap_pid_files=vm_fix_pcap_pid_files, svm=svm)
    result = True
    if exp_count is not None:
        if count != exp_count and exact_match:
            obj.logger.warn("%s packets are found in tcpdump output file %s but \
                                        expected %s" % (count, pcap, exp_count))
            result = False
        elif count > exp_count and not exact_match:
            obj.logger.debug("%s packets are found in tcpdump output file %s but \
                             expected %s, which is fine" % (count, pcap, exp_count))
        elif count < exp_count and not exact_match:
            obj.logger.warn("%s packets are found in tcpdump output file %s but \
                             expected atleast %s" % (count, pcap, exp_count))
            result = False
    else:
        if count == 0:
            obj.logger.warn("No packets are found in tcpdump output file %s but \
                                        expected some packets" % (pcap))
            result = False

    if result:
        obj.logger.info(
            "%s packets are found in tcpdump output as expected",
            count)
        if not vm_fix_pcap_pid_files:
            stop_tcpdump_for_vm_intf(obj, session, pcap)
    return result
示例#17
0
 def verify_vrrp_action(self, src_vm, dst_vm, ip):
     result = False
     self.logger.info('Will ping %s from %s and check if %s responds' %
                      (ip, src_vm.vm_name, dst_vm.vm_name))
     compute_ip = dst_vm.vm_node_ip
     compute_user = self.inputs.host_data[compute_ip]['username']
     compute_password = self.inputs.host_data[compute_ip]['password']
     session = ssh(compute_ip, compute_user, compute_password)
     vm_tapintf = dst_vm.tap_intf[dst_vm.vn_fq_name]['name']
     cmd = 'tcpdump -nni %s -c 10 > /tmp/%s_out.log' % (vm_tapintf,
                                                        vm_tapintf)
     execute_cmd(session, cmd, self.logger)
     assert src_vm.ping_with_certainty(ip), 'Ping to vIP failure'
     output_cmd = 'cat /tmp/%s_out.log' % vm_tapintf
     output, err = execute_cmd_out(session, output_cmd, self.logger)
     if ip in output:
         result = True
         self.logger.info('%s is seen responding to ICMP Requests' %
                          dst_vm.vm_name)
     else:
         self.logger.error('ICMP Requests not seen on the VRRP Master')
         result = False
     return result
示例#18
0
def verify_tcpdump_count(obj, session, pcap, exp_count=None):

    cmd = 'tcpdump -r %s | wc -l' % pcap
    out, err = execute_cmd_out(session, cmd, obj.logger)
    count = int(out.strip('\n'))
    result = True 
    if exp_count is not None:
        if count != exp_count:
            obj.logger.warn("%s packets are found in tcpdump output file %s but \
                                        expected %s" % (count, pcap, exp_count))
            result = False
    else:
        if count == 0:
            obj.logger.warn("No packets are found in tcpdump output file %s but \
                                        expected some packets" % (pcap))
            result = False

    if result:
        obj.logger.info(
            "%s packets are found in tcpdump output as expected",
            count)
        stop_tcpdump_for_vm_intf(obj, session, pcap)
    return result 
示例#19
0
文件: verify.py 项目: nuthanc/tf-test
 def stop_tcpdump(self, session, pcap, filt='', vm_fix_pcap_pid_files=[], pcap_on_vm=False):
     self.logger.debug("Waiting for the tcpdump write to complete.")
     sleep(2)
     if not pcap_on_vm:
         cmd = 'sudo kill $(ps -ef|grep tcpdump | grep %s| awk \'{print $2}\')' %pcap
         execute_cmd(session, cmd, self.logger)
         execute_cmd(session, 'sync', self.logger)
         sleep(3)
         cmd = 'sudo tcpdump -n -r %s %s | wc -l' % (pcap, filt)
         out, err = execute_cmd_out(session, cmd, self.logger)
         count = int(out.strip('\n'))
         cmd = 'sudo tcpdump -n -r %s' % pcap
         #TODO
         # Temporary for debugging
         execute_cmd(session, cmd, self.logger)
         return count
     else:
         output = []
         pkt_count = []
         for vm_fix, pcap, pidfile in vm_fix_pcap_pid_files:
             cmd_to_output  = 'tcpdump -nr %s %s' % (pcap, filt)
             cmd_to_kill = 'cat %s | xargs kill ' % (pidfile)
             count = cmd_to_output + '| wc -l'
             vm_fix.run_cmd_on_vm(cmds=[cmd_to_kill], as_sudo=True)
             sleep(2)
             vm_fix.run_cmd_on_vm(cmds=[cmd_to_output], as_sudo=True)
             output.append(vm_fix.return_output_cmd_dict[cmd_to_output])
             vm_fix.run_cmd_on_vm(cmds=[count], as_sudo=True)
             pkt_count_list = vm_fix.return_output_cmd_dict[count].split('\n')
             try:
                 pkts = pkt_count_list[2]
             except:
                 pkts = pkt_count_list[1]
             pkts = int(pkts)
             pkt_count.append(pkts)
             total_pkts = sum(pkt_count)
         return output, total_pkts
示例#20
0
 def stop_tcpdump(self, session, pcap, filt='', vm_fix_pcap_pid_files=[], pcap_on_vm=False):
     self.logger.debug("Waiting for the tcpdump write to complete.")
     sleep(2)
     if not pcap_on_vm:
         cmd = 'sudo kill $(ps -ef|grep tcpdump | grep %s| awk \'{print $2}\')' %pcap
         execute_cmd(session, cmd, self.logger)
         execute_cmd(session, 'sync', self.logger)
         sleep(3)
         cmd = 'sudo tcpdump -n -r %s %s | wc -l' % (pcap, filt)
         out, err = execute_cmd_out(session, cmd, self.logger)
         count = int(out.strip('\n'))
         cmd = 'sudo tcpdump -n -r %s' % pcap
         #TODO
         # Temporary for debugging
         execute_cmd(session, cmd, self.logger)
         return count
     else:
         output = []
         pkt_count = []
         for vm_fix, pcap, pidfile in vm_fix_pcap_pid_files:
             cmd_to_output  = 'tcpdump -nr %s %s' % (pcap, filt)
             cmd_to_kill = 'cat %s | xargs kill ' % (pidfile)
             count = cmd_to_output + '| wc -l'
             vm_fix.run_cmd_on_vm(cmds=[cmd_to_kill], as_sudo=True)
             sleep(2)
             vm_fix.run_cmd_on_vm(cmds=[cmd_to_output], as_sudo=True)
             output.append(vm_fix.return_output_cmd_dict[cmd_to_output])
             vm_fix.run_cmd_on_vm(cmds=[count], as_sudo=True)
             pkt_count_list = vm_fix.return_output_cmd_dict[count].split('\n')
             try:
                 pkts = pkt_count_list[2]
             except:
                 pkts = pkt_count_list[1]
             pkts = int(pkts)
             pkt_count.append(pkts)
             total_pkts = sum(pkt_count)
         return output, total_pkts
示例#21
0
 def verify_flow_thru_si(self, si_fix, src_vn):
     
     self.logger.info('Will start a tcpdump on the left-interfaces of the Service Instances to find out which flow is entering which Service Instance')
     flowcount= 0
     result= True
     si_count= si_fix.max_inst
     for i in range(1, si_count+1):
         svm_name = si_fix.si_name + '_%s'%i
         host = self.get_svm_compute(svm_name)
         tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn)
         session = ssh(host['host_ip'], host['username'], host['password'])
         cmd = 'tcpdump -ni %s proto 17 -vvv -c 5 > /tmp/%s_out.log' % (tapintf,tapintf)
         execute_cmd(session, cmd, self.logger)
     sleep(5)
     self.logger.info('***** Will check the result of tcpdump *****')
     for i in range(1, si_count+1):
         svm_name = si_fix.si_name + '_%s'%i
         host = self.get_svm_compute(svm_name)
         tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn)
         session = ssh(host['host_ip'], host['username'], host['password'])
         output_cmd = 'cat /tmp/%s_out.log' % tapintf 
         out, err = execute_cmd_out(session, output_cmd, self.logger)
         if '9000' in out:
             flowcount= flowcount + 1
             self.logger.info('Flow with dport 9000 seen flowing inside %s'%svm_name)
         if '9001' in out:
             flowcount= flowcount + 1
             self.logger.info('Flow with dport 9001 seen flowing inside %s'%svm_name)
         if '9002' in out:
             flowcount= flowcount + 1
             self.logger.info('Flow with dport 9002 seen flowing inside %s'%svm_name)
     if flowcount > 1:
         self.logger.info('Flows are distributed across the Service Instances')
     else:
         result= False
     assert result, 'No Flow distribution seen' 
示例#22
0
    def verify_flow_thru_si(self, si_fix, src_vn=None, ecmp_hash=None, flow_count=3):
        self.logger.info(
            'Will start a tcpdump on the left-interfaces of the Service Instances to find out which flow is entering which Service Instance')
        flowcount = 0
        result = True
        flow_pattern = {}
        svms = self.get_svms_in_si(si_fix)
        svms = sorted(set(svms))
        svm_list = si_fix.svm_list
        svm_index = 0
        vm_fix_pcap_pid_files = []

        # Capturing packets based upon source port
        src_port = "8000"
        filters = '\'(src port %s)\'' % (src_port)
        if None in svms:
            svms.remove(None)
        for svm_fixture in svm_list:
            svm = svm_fixture.vm_obj
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host_name = self.connections.orch.get_host_of_vm(svm)
                host = self.inputs.host_data[host_name]
                if src_vn is not None:
                    #tapintf = svm_fixture.tap_intf[src_vn.vn_fq_name]['name']
                    tapintf = self.connections.orch.get_vm_tap_interface(\
                                      svm_fixture.tap_intf[src_vn.vn_fq_name])
                else:
                    direction = 'left'
                    tapintf = self.get_bridge_svm_tapintf(svm_name, direction)
                if self.inputs.pcap_on_vm:
                    tcpdump_files = start_tcpdump_for_vm_intf(
                        None, [svm_list[svm_index]], None, filters=filters, pcap_on_vm=True, vm_intf='eth1', svm=True)
                    svm_index = svm_index + 1
                    vm_fix_pcap_pid_files.append(tcpdump_files)
                else:
                    session = ssh(
                        host['host_ip'], host['username'], host['password'])
                    cmd = 'sudo tcpdump -nni %s %s -c 20 > /tmp/%s_out.log' % (
                        tapintf, filters, tapintf)
                    execute_cmd(session, cmd, self.logger)
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        sleep(15)

        self.logger.info('%%%%% Will check the result of tcpdump %%%%%')
        svms = self.get_svms_in_si(si_fix)
        svms = sorted(set(svms))
        if None in svms:
            svms.remove(None)
        svm_index = 0
        for svm_fixture in svm_list:
            svm = svm_fixture.vm_obj
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host_name = self.connections.orch.get_host_of_vm(svm)
                host = self.inputs.host_data[host_name]
                if src_vn is not None:
                    tapintf = self.connections.orch.get_vm_tap_interface(\
                                      svm_fixture.tap_intf[src_vn.vn_fq_name])
                    #tapintf = svm_fixture.tap_intf[src_vn.vn_fq_name]['name']
                else:
                    direction = 'left'
                    tapintf = self.get_bridge_svm_tapintf(svm_name, direction)
                if not self.inputs.pcap_on_vm:
                    session = ssh(
                        host['host_ip'], host['username'], host['password'])
                    output_cmd = 'cat /tmp/%s_out.log' % tapintf
                    out, err = execute_cmd_out(session, output_cmd, self.logger)
                else:
                    out, pkt_count = stop_tcpdump_for_vm_intf(
                        None, None, None, vm_fix_pcap_pid_files=vm_fix_pcap_pid_files[svm_index], svm=True)
                    svm_index = svm_index + 1
                    out = out[0]
                for i in range(0,flow_count):
                    dport = str(9000+i)
                    if dport in out:
                        flowcount = flowcount + 1
                        self.logger.info(
                            'Flow with dport %s seen flowing inside %s' % (dport,svm_name))
                        flow_pattern[dport] = svm_name
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        if flowcount > 0:
            self.logger.info(
                'Flows are distributed across the Service Instances as :')
            self.logger.info('%s' % flow_pattern)
        else:
            result = False

        if ecmp_hash and ecmp_hash != 'default':
            # count the number of hash fields set
            hash_var_count = sum(ecmp_hash.values())

            # Incase, only one hash field is set, all flows should go through
            # single service instance. One exception here is destination_port.
            # Destination port varies for traffic streams.So, for destination
            # port, traffic will get load balanced even if ecmp hash is
            # configured "destination_port" alone.
            if hash_var_count == 1 and (not 'destination_port' in ecmp_hash):
                flow_pattern_ref = flow_pattern['9000']
                if all(flow_pattern_ref  == item for item in flow_pattern.values()):
                    self.logger.info(
                        'Flows are flowing through Single Service Instance: %s, as per config hash: %s' % (flow_pattern_ref, ecmp_hash))
                    self.logger.info('%s' % flow_pattern)
                else:
                    result = False
                    self.logger.error(
                        'Flows are flowing through multiple Service Instances:%s, where as it should not as per config hash:%s' % (flow_pattern, ecmp_hash))
                    assert result, 'Config hash is not working for: %s' % ( ecmp_hash)
            # Incase, multiple ecmp hash fields are configured or default ecmp
            # hash is present or only 'destionation_port' is configured in
            # ecmp_hash
            else:
                flow_pattern_ref = flow_pattern['9000']
                if all(flow_pattern_ref  == item for item in flow_pattern.values()):
                    result = False
                    self.logger.error(
                        'Flows are flowing through Single Service Instance:%s, where as it should not as per config hash:%s' % (flow_pattern_ref, ecmp_hash))
                    #assert result, 'Config hash is not working fine.'
                else:
                    self.logger.info(
                        'Flows are flowing through multiple Service Instances:%s, as per config hash: %s' % (flow_pattern, ecmp_hash))
                    self.logger.info('%s' % flow_pattern)
        else:
            assert result, 'No Flow distribution seen'
示例#23
0
    def tcpdump_analyze_on_compute(
            self,
            comp_ip,
            pcaptype,
            vxlan_id=None,
            vlan_id=None):
        sleep(2)
        sessions = {}
        compute_user = self.inputs.host_data[comp_ip]['username']
        compute_password = self.inputs.host_data[comp_ip]['password']
        session = ssh(comp_ip, compute_user, compute_password)
        self.logger.info("Analyzing on compute node %s" % comp_ip)
        if pcaptype == 'UDP':
            pcaps1 = '/tmp/encap-udp.pcap'
            pcaps2 = '/tmp/encap-gre.pcap'
            cmd2 = 'tcpdump  -r %s | grep UDP |wc -l' % pcaps1
            out2, err = execute_cmd_out(session, cmd2, self.logger)
            cmd3 = 'tcpdump  -r %s | grep GRE | wc -l' % pcaps2
            out3, err = execute_cmd_out(session, cmd3, self.logger)
            count2 = int(out2.strip('\n'))
            count3 = int(out3.strip('\n'))
            if count2 != 0 and count3 == 0:
                self.logger.info(
                    "%s UDP encapsulated packets are seen and %s GRE encapsulated packets are seen as expected" %
                    (count2, count3))
                return True
            else:
                errmsg = "%s UDP encapsulated packets are seen and %s GRE encapsulated packets are seen.Not expected" % (
                    count2, count3)
                self.logger.error(errmsg)
                assert False, errmsg
        elif pcaptype == 'GRE':
            pcaps1 = '/tmp/encap-udp.pcap'
            pcaps2 = '/tmp/encap-gre.pcap'
            cmd2 = 'tcpdump  -r %s | grep UDP |wc -l' % pcaps1
            out2, err = execute_cmd_out(session, cmd2, self.logger)
            cmd3 = 'tcpdump  -r %s | grep GRE | wc -l' % pcaps2
            out3, err = execute_cmd_out(session, cmd3, self.logger)
            count2 = int(out2.strip('\n'))
            count3 = int(out3.strip('\n'))
            if count2 == 0 and count3 != 0:
                self.logger.info(
                    "%s GRE encapsulated packets are seen and %s UDP encapsulated packets are seen as expected" %
                    (count3, count2))
                # self.tcpdump_stop_on_all_compute()
                self.tcpdump_stop_on_compute(comp_ip)
                return True
            else:
                errmsg = "%s UDP encapsulated packets are seen and %s GRE encapsulated packets are seen.Not expected" % (
                    count2, count3)
                self.logger.error(errmsg)
                # self.tcpdump_stop_on_all_compute()
                self.tcpdump_stop_on_compute(comp_ip)
                assert False, errmsg

        elif pcaptype == 'VXLAN':
            pcaps1 = '/tmp/encap-udp.pcap'
            pcaps2 = '/tmp/encap-gre.pcap'
            pcaps3 = '/tmp/encap-vxlan.pcap'
            cmd2 = 'tcpdump  -r %s | grep UDP |wc -l' % pcaps1
            out2, err = execute_cmd_out(session, cmd2, self.logger)
            cmd3 = 'tcpdump  -r %s | grep GRE | wc -l' % pcaps2
            out3, err = execute_cmd_out(session, cmd3, self.logger)
            count2 = int(out2.strip('\n'))
            count3 = int(out3.strip('\n'))

            cmd3 = 'tcpdump  -r %s | grep UDP |wc -l' % pcaps3
            out3, err = execute_cmd_out(session, cmd3, self.logger)
            count = int(out3.strip('\n'))

            if count2 == 0 and count3 == 0 and count != 0:
                self.logger.info(
                    "%s GRE encapsulated packets are seen and %s UDP encapsulated packets are seen and %s vxlan packets are seen  as expected" %
                    (count3, count2, count))
                # self.tcpdump_stop_on_all_compute()
                if vxlan_id is not None:
                    cmd4 = 'tcpdump -AX -r %s | grep ' % pcaps3 + \
                        vxlan_id + ' |wc -l'
                    out4, err = execute_cmd_out(session, cmd4, self.logger)
                    count_vxlan_id = int(out4.strip('\n'))

                    if count_vxlan_id < count:
                        errmsg = "%s vxlan packet are seen with %s vxlan_id . Not Expected . " % (
                            count, count_vxlan_id)
                        self.tcpdump_stop_on_compute(comp_ip)
                        self.logger.error(errmsg)
                        assert False, errmsg
                    else:
                        self.logger.info(
                            "%s vxlan packets are seen with %s vxlan_id as expexted . " %
                            (count, count_vxlan_id))
                        self.tcpdump_stop_on_compute(comp_ip)
            else:
                errmsg = "%s UDP encapsulated packets are seen and %s GRE encapsulated packets are seen.Not expected, %s vxlan packet seen" % (
                    count2, count3, count)
                self.logger.error(errmsg)
                # self.tcpdump_stop_on_all_compute()
                self.tcpdump_stop_on_compute(comp_ip)
                assert False, errmsg
            if vlan_id is not None:
                cmd5 = 'tcpdump -AX -r %s | grep %s |wc -l' % (pcaps3, vlan_id)
                out5, err = execute_cmd_out(session, cmd5, self.logger)
                count_vlan_id = int(out5.strip('\n'))

                if count_vlan_id < count:
                    errmsg = "%s vxlan packet are seen with %s vlan_id . Not Expected . " % (
                        count, count_vlan_id)
                    self.logger.error(errmsg)
                    assert False, errmsg
                else:
                    self.logger.info(
                        "%s vxlan packets are seen with %s vlan_id as expexted . " %
                        (count, count_vlan_id))
        return True
def read_tcpdump(obj, session, pcap):
    cmd = 'tcpdump -n -r %s' % pcap
    out, err = execute_cmd_out(session, cmd, obj.logger)
    return out
def delete_pcap(session, pcap):
    execute_cmd_out(session, 'rm -f %s' % (pcap))
示例#26
0
    def verify_flow_thru_si(self, si_fix, src_vn=None):
        self.logger.info(
            'Will start a tcpdump on the left-interfaces of the Service Instances to find out which flow is entering which Service Instance'
        )
        flowcount = 0
        result = True
        flow_pattern = {}
        svms = self.get_svms_in_si(si_fix, self.inputs.project_name)
        svms = sorted(set(svms))
        if None in svms:
            svms.remove(None)
        for svm in svms:
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host = self.get_svm_compute(svm_name)
                if src_vn is not None:
                    tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn)
                else:
                    direction = 'left'
                    tapintf = self.get_bridge_svm_tapintf(svm_name, direction)
                session = ssh(host['host_ip'], host['username'],
                              host['password'])
                cmd = 'tcpdump -nni %s -c 10 > /tmp/%s_out.log' % (tapintf,
                                                                   tapintf)
                execute_cmd(session, cmd, self.logger)
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        sleep(15)

        self.logger.info('%%%%% Will check the result of tcpdump %%%%%')
        svms = self.get_svms_in_si(si_fix, self.inputs.project_name)
        svms = sorted(set(svms))
        if None in svms:
            svms.remove(None)
        for svm in svms:
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host = self.get_svm_compute(svm_name)
                if src_vn is not None:
                    tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn)
                else:
                    direction = 'left'
                    tapintf = self.get_bridge_svm_tapintf(svm_name, direction)
                session = ssh(host['host_ip'], host['username'],
                              host['password'])
                output_cmd = 'cat /tmp/%s_out.log' % tapintf
                out, err = execute_cmd_out(session, output_cmd, self.logger)
                if '9000' in out:
                    flowcount = flowcount + 1
                    self.logger.info(
                        'Flow with dport 9000 seen flowing inside %s' %
                        svm_name)
                    flow_pattern['9000'] = svm_name
                if '9001' in out:
                    flowcount = flowcount + 1
                    self.logger.info(
                        'Flow with dport 9001 seen flowing inside %s' %
                        svm_name)
                    flow_pattern['9001'] = svm_name
                if '9002' in out:
                    flowcount = flowcount + 1
                    self.logger.info(
                        'Flow with dport 9002 seen flowing inside %s' %
                        svm_name)
                    flow_pattern['9002'] = svm_name
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        if flowcount > 0:
            self.logger.info(
                'Flows are distributed across the Service Instances as :')
            self.logger.info('%s' % flow_pattern)
        else:
            result = False
        assert result, 'No Flow distribution seen'
示例#27
0
    def verify_flow_thru_si(self,
                            si_fix,
                            src_vn=None,
                            ecmp_hash=None,
                            flow_count=3):
        self.logger.info(
            'Will start a tcpdump on the left-interfaces of the Service Instances to find out which flow is entering which Service Instance'
        )
        flowcount = 0
        result = True
        flow_pattern = {}
        svms = self.get_svms_in_si(si_fix)
        svms = sorted(set(svms))
        if None in svms:
            svms.remove(None)
        for svm in svms:
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host = self.get_svm_compute(svm_name)
                if src_vn is not None:
                    tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn)
                else:
                    direction = 'left'
                    tapintf = self.get_bridge_svm_tapintf(svm_name, direction)
                session = ssh(host['host_ip'], host['username'],
                              host['password'])
                cmd = 'tcpdump -nni %s -c 10 > /tmp/%s_out.log' % (tapintf,
                                                                   tapintf)
                execute_cmd(session, cmd, self.logger)
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        sleep(15)

        self.logger.info('%%%%% Will check the result of tcpdump %%%%%')
        svms = self.get_svms_in_si(si_fix)
        svms = sorted(set(svms))
        if None in svms:
            svms.remove(None)
        for svm in svms:
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host = self.get_svm_compute(svm_name)
                if src_vn is not None:
                    tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn)
                else:
                    direction = 'left'
                    tapintf = self.get_bridge_svm_tapintf(svm_name, direction)
                session = ssh(host['host_ip'], host['username'],
                              host['password'])
                output_cmd = 'cat /tmp/%s_out.log' % tapintf
                out, err = execute_cmd_out(session, output_cmd, self.logger)

                for i in range(0, flow_count):
                    dport = str(9000 + i)
                    if dport in out:
                        flowcount = flowcount + 1
                        self.logger.info(
                            'Flow with dport %s seen flowing inside %s' %
                            (dport, svm_name))
                        flow_pattern[dport] = svm_name
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        if flowcount > 0:
            self.logger.info(
                'Flows are distributed across the Service Instances as :')
            self.logger.info('%s' % flow_pattern)
        else:
            result = False

        if ecmp_hash and ecmp_hash != 'default':
            # count the number of hash fields set
            hash_var_count = sum(ecmp_hash.values())

            # Incase, only one hash field is set, all flows should go through
            # single service instance. One exception here is destination_port.
            # Destination port varies for traffic streams.So, for destination
            # port, traffic will get load balanced even if ecmp hash is
            # configured "destination_port" alone.
            if hash_var_count == 1 and (not 'destination_port' in ecmp_hash):
                flow_pattern_ref = flow_pattern['9000']
                if all(flow_pattern_ref == item
                       for item in flow_pattern.values()):
                    self.logger.info(
                        'Flows are flowing through Single Service Instance: %s, as per config hash: %s'
                        % (flow_pattern_ref, ecmp_hash))
                    self.logger.info('%s' % flow_pattern)
                else:
                    result = False
                    self.logger.error(
                        'Flows are flowing through multiple Service Instances:%s, where as it should not as per config hash:%s'
                        % (flow_pattern, ecmp_hash))
                    assert result, 'Config hash is not working for: %s' % (
                        ecmp_hash)
            # Incase, multiple ecmp hash fields are configured or default ecmp
            # hash is present or only 'destionation_port' is configured in
            # ecmp_hash
            else:
                flow_pattern_ref = flow_pattern['9000']
                if all(flow_pattern_ref == item
                       for item in flow_pattern.values()):
                    result = False
                    self.logger.error(
                        'Flows are flowing through Single Service Instance:%s, where as it should not as per config hash:%s'
                        % (flow_pattern_ref, ecmp_hash))
                    #assert result, 'Config hash is not working fine.'
                else:
                    self.logger.info(
                        'Flows are flowing through multiple Service Instances:%s, as per config hash: %s'
                        % (flow_pattern, ecmp_hash))
                    self.logger.info('%s' % flow_pattern)
        else:
            assert result, 'No Flow distribution seen'
示例#28
0
def read_tcpdump(obj, session, pcap):
    cmd = 'sudo tcpdump -n -r %s' % pcap
    out, err = execute_cmd_out(session, cmd, obj.logger)
    return out
def delete_pcap(session, pcap):
    execute_cmd_out(session, 'rm -f %s' % (pcap))
示例#30
0
    def verify_flow_thru_si(self, si_fix, src_vn=None):
        self.logger.info(
            'Will start a tcpdump on the left-interfaces of the Service Instances to find out which flow is entering which Service Instance')
        flowcount = 0
        result = True
        flow_pattern = {}
        svms = self.get_svms_in_si(si_fix, self.inputs.project_name)
        svms = sorted(set(svms))
        if None in svms:
            svms.remove(None)
        for svm in svms:
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host = self.get_svm_compute(svm_name)
                if src_vn is not None:
                    tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn)
                else:
                    direction = 'left'
                    tapintf = self.get_bridge_svm_tapintf(svm_name, direction)
                session = ssh(
                    host['host_ip'], host['username'], host['password'])
                cmd = 'tcpdump -nni %s -c 10 > /tmp/%s_out.log' % (
                    tapintf, tapintf)
                execute_cmd(session, cmd, self.logger)
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        sleep(15)

        self.logger.info('%%%%% Will check the result of tcpdump %%%%%')
        svms = self.get_svms_in_si(si_fix, self.inputs.project_name)
        svms = sorted(set(svms))
        if None in svms:
            svms.remove(None)
        for svm in svms:
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host = self.get_svm_compute(svm_name)
                if src_vn is not None:
                    tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn)
                else:
                    direction = 'left'
                    tapintf = self.get_bridge_svm_tapintf(svm_name, direction)
                session = ssh(
                    host['host_ip'], host['username'], host['password'])
                output_cmd = 'cat /tmp/%s_out.log' % tapintf
                out, err = execute_cmd_out(session, output_cmd, self.logger)
                if '9000' in out:
                    flowcount = flowcount + 1
                    self.logger.info(
                        'Flow with dport 9000 seen flowing inside %s' % svm_name)
                    flow_pattern['9000'] = svm_name
                if '9001' in out:
                    flowcount = flowcount + 1
                    self.logger.info(
                        'Flow with dport 9001 seen flowing inside %s' % svm_name)
                    flow_pattern['9001'] = svm_name
                if '9002' in out:
                    flowcount = flowcount + 1
                    self.logger.info(
                        'Flow with dport 9002 seen flowing inside %s' % svm_name)
                    flow_pattern['9002'] = svm_name
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        if flowcount > 0:
            self.logger.info(
                'Flows are distributed across the Service Instances as :')
            self.logger.info('%s' % flow_pattern)
        else:
            result = False
        assert result, 'No Flow distribution seen'
示例#31
0
    def tcpdump_analyze_on_compute(self,
                                   comp_ip,
                                   pcaptype,
                                   vxlan_id=None,
                                   vlan_id=None):
        sleep(2)
        sessions = {}
        compute_user = self.inputs.host_data[comp_ip]['username']
        compute_password = self.inputs.host_data[comp_ip]['password']
        session = ssh(comp_ip, compute_user, compute_password)
        self.logger.info("Analyzing on compute node %s" % comp_ip)
        if pcaptype == 'UDP':
            pcaps1 = '/tmp/encap-udp.pcap'
            pcaps2 = '/tmp/encap-gre.pcap'
            cmd2 = 'tcpdump  -r %s | grep UDP |wc -l' % pcaps1
            out2, err = execute_cmd_out(session, cmd2, self.logger)
            cmd3 = 'tcpdump  -r %s | grep GRE | wc -l' % pcaps2
            out3, err = execute_cmd_out(session, cmd3, self.logger)
            count2 = int(out2.strip('\n'))
            count3 = int(out3.strip('\n'))
            if count2 != 0 and count3 == 0:
                self.logger.info(
                    "%s UDP encapsulated packets are seen and %s GRE encapsulated packets are seen as expected"
                    % (count2, count3))
                return True
            else:
                errmsg = "%s UDP encapsulated packets are seen and %s GRE encapsulated packets are seen.Not expected" % (
                    count2, count3)
                self.logger.error(errmsg)
                assert False, errmsg
        elif pcaptype == 'GRE':
            pcaps1 = '/tmp/encap-udp.pcap'
            pcaps2 = '/tmp/encap-gre.pcap'
            cmd2 = 'tcpdump  -r %s | grep UDP |wc -l' % pcaps1
            out2, err = execute_cmd_out(session, cmd2, self.logger)
            cmd3 = 'tcpdump  -r %s | grep GRE | wc -l' % pcaps2
            out3, err = execute_cmd_out(session, cmd3, self.logger)
            count2 = int(out2.strip('\n'))
            count3 = int(out3.strip('\n'))
            if count2 == 0 and count3 != 0:
                self.logger.info(
                    "%s GRE encapsulated packets are seen and %s UDP encapsulated packets are seen as expected"
                    % (count3, count2))
                # self.tcpdump_stop_on_all_compute()
                self.tcpdump_stop_on_compute(comp_ip)
                return True
            else:
                errmsg = "%s UDP encapsulated packets are seen and %s GRE encapsulated packets are seen.Not expected" % (
                    count2, count3)
                self.logger.error(errmsg)
                # self.tcpdump_stop_on_all_compute()
                self.tcpdump_stop_on_compute(comp_ip)
                assert False, errmsg

        elif pcaptype == 'VXLAN':
            pcaps1 = '/tmp/encap-udp.pcap'
            pcaps2 = '/tmp/encap-gre.pcap'
            pcaps3 = '/tmp/encap-vxlan.pcap'
            cmd2 = 'tcpdump  -r %s | grep UDP |wc -l' % pcaps1
            out2, err = execute_cmd_out(session, cmd2, self.logger)
            cmd3 = 'tcpdump  -r %s | grep GRE | wc -l' % pcaps2
            out3, err = execute_cmd_out(session, cmd3, self.logger)
            count2 = int(out2.strip('\n'))
            count3 = int(out3.strip('\n'))

            cmd3 = 'tcpdump  -r %s | grep UDP |wc -l' % pcaps3
            out3, err = execute_cmd_out(session, cmd3, self.logger)
            count = int(out3.strip('\n'))

            if count2 == 0 and count3 == 0 and count != 0:
                self.logger.info(
                    "%s GRE encapsulated packets are seen and %s UDP encapsulated packets are seen and %s vxlan packets are seen  as expected"
                    % (count3, count2, count))
                # self.tcpdump_stop_on_all_compute()
                if vxlan_id is not None:
                    cmd4 = 'tcpdump -AX -r %s | grep ' % pcaps3 + \
                        vxlan_id + ' |wc -l'
                    out4, err = execute_cmd_out(session, cmd4, self.logger)
                    count_vxlan_id = int(out4.strip('\n'))

                    if count_vxlan_id < count:
                        errmsg = "%s vxlan packet are seen with %s vxlan_id . Not Expected . " % (
                            count, count_vxlan_id)
                        self.tcpdump_stop_on_compute(comp_ip)
                        self.logger.error(errmsg)
                        assert False, errmsg
                    else:
                        self.logger.info(
                            "%s vxlan packets are seen with %s vxlan_id as expexted . "
                            % (count, count_vxlan_id))
                        self.tcpdump_stop_on_compute(comp_ip)
            else:
                errmsg = "%s UDP encapsulated packets are seen and %s GRE encapsulated packets are seen.Not expected, %s vxlan packet seen" % (
                    count2, count3, count)
                self.logger.error(errmsg)
                # self.tcpdump_stop_on_all_compute()
                self.tcpdump_stop_on_compute(comp_ip)
                assert False, errmsg
            if vlan_id is not None:
                cmd5 = 'tcpdump -AX -r %s | grep %s |wc -l' % (pcaps3, vlan_id)
                out5, err = execute_cmd_out(session, cmd5, self.logger)
                count_vlan_id = int(out5.strip('\n'))

                if count_vlan_id < count:
                    errmsg = "%s vxlan packet are seen with %s vlan_id . Not Expected . " % (
                        count, count_vlan_id)
                    self.logger.error(errmsg)
                    assert False, errmsg
                else:
                    self.logger.info(
                        "%s vxlan packets are seen with %s vlan_id as expexted . "
                        % (count, count_vlan_id))
        return True
 def verify_packets(self, packet_type, pcap_path_with_file_name,
                    expected_count =None, dot1p = None, dscp = None, 
                    mpls_exp = None):
     '''
     This function parses tcpdump file.
     It verifies that field in packet in pcap file are same as expected by user or not.
     "packet_type" is mandatory and can be set to any string containing "exp", "dot1p",
     "dscp" or any or all of them.
     Verification done for following values:
         1. DSCP field
         2. VLAN Priority code point
         3. MPLS EXP bits
     This function can also be used to parse any .pcap present on any node
     even if the start capture was not done by 'TestQosTraffic' object.
     '''
     if self.session == None:
         if not self.username and not self.node_ip and not self.password:
             self.logger.error("Either of IP, username or password not"
                               " specified")
             self.logger.error("Cannot open ssh session to the node")
             return False
         else:
             self.session = ssh(self.node_ip, self.username,\
                                    self.password)
         cmd = "ls -al %s" % pcap_path_with_file_name
         out, err = execute_cmd_out(self.session, cmd)
         if out:
             self.pcap = out.split('\n')[0].split()[-1]
         elif err:
             self.logger.error("%s" % err)
             return False
     if expected_count:
         result = verify_tcpdump_count(self, self.session, self.pcap,
                                       expected_count, exact_match=False)
         if not result:
             return result
     file_transfer = self.compute_node_fixture.file_transfer(
                         "get", self.pcap, self.pcap.split('/')[-1])
     if not file_transfer:
         self.logger.error("Unable to transfer file to local system")
         return False
     file_name = self.pcap.split('/')[-1]
     if self.encap_type:
         if not self.verify_encap_type(self.encap_type, file_name):
             return False
     f = open(file_name, 'r')
     pcap = dpkt.pcap.Reader(f)
     count = 0
     for ts,buff in pcap:
         ether = dpkt.ethernet.Ethernet(buff)
         self.logger.debug("Verifying for packet number %d" % count)
         count = count+1
         if "dot1p" in packet_type and\
         self._check_underlay_interface_is_tagged():
             if isinstance(dot1p,int):
                 string = ''
                 try:
                     priority = ether.vlan_tags[0].pri
                 except AttributeError, e:
                     self.logger.error(e)
                     return False
                 if priority == dot1p:
                     self.logger.debug("Validated dot1p marking of %s" 
                                       % (dot1p))
                 else:
                     self.logger.error("Mismatch between actual and"
                                       " expected PCP")
                     self.logger.error("Expected PCP : %s, Actual PCP :%s"\
                                       % (dot1p,priority))
                     return False
             else:
                 self.logger.error("dot1p to be compared not mentioned")
                 return False
         if "dscp" in packet_type:
             if isinstance(dscp,int):
                 ip = ether.data
                 try:
                     actual_dscp = int(bin(ip.tos >> 2), 2)
                 except AttributeError, e:
                     self.logger.error(e)
                     return False
                 if dscp == actual_dscp:
                     self.logger.debug("Validated DSCP marking of %s" % 
                                       (dscp))
                 else:
                     self.logger.error("Mismatch between actual and"
                                       " expected DSCP")
                     self.logger.error("Expected DSCP: %s, Actual DSCP:%s"\
                                       % (dscp,actual_dscp))
                     return False
             else:
                 self.logger.error("dscp to be compared not mentioned")
                 return False
示例#33
0
 def verify_packets(self,
                    packet_type,
                    pcap_path_with_file_name,
                    expected_count=None,
                    dot1p=None,
                    dscp=None,
                    mpls_exp=None):
     '''
     This function parses tcpdump file.
     It verifies that field in packet in pcap file are same as expected by user or not.
     "packet_type" is mandatory and can be set to any string containing "exp", "dot1p",
     "dscp" or any or all of them.
     Verification done for following values:
         1. DSCP field
         2. VLAN Priority code point
         3. MPLS EXP bits
     This function can also be used to parse any .pcap present on any node
     even if the start capture was not done by 'TestQosTraffic' object.
     '''
     if self.session == None:
         if not self.username and not self.node_ip and not self.password:
             self.logger.error("Either of IP, username or password not"
                               " specified")
             self.logger.error("Cannot open ssh session to the node")
             return False
         else:
             self.session = ssh(self.node_ip, self.username,\
                                    self.password)
         cmd = "ls -al %s" % pcap_path_with_file_name
         out, err = execute_cmd_out(self.session, cmd)
         if out:
             self.pcap = out.split('\n')[0].split()[-1]
         elif err:
             self.logger.error("%s" % err)
             return False
     if expected_count:
         result = verify_tcpdump_count(self,
                                       self.session,
                                       self.pcap,
                                       expected_count,
                                       exact_match=False)
         if not result:
             return result
     file_transfer = self.compute_node_fixture.file_transfer(
         "get", self.pcap,
         self.pcap.split('/')[-1])
     if not file_transfer:
         self.logger.error("Unable to transfer file to local system")
         return False
     file_name = self.pcap.split('/')[-1]
     if self.encap_type and self.encap_type != "MPLS_any":
         if not self.verify_encap_type(self.encap_type, file_name):
             return False
     f = open(file_name, 'r')
     pcap = dpkt.pcap.Reader(f)
     count = 0
     for ts, buff in pcap:
         ether = dpkt.ethernet.Ethernet(buff)
         self.logger.debug("Verifying for packet number %d" % count)
         count = count + 1
         if "dot1p" in packet_type and\
         self._check_underlay_interface_is_tagged():
             if isinstance(dot1p, int):
                 string = ''
                 try:
                     priority = ether.vlan_tags[0].pri
                 except AttributeError, e:
                     self.logger.error(e)
                     return False
                 if priority == dot1p:
                     self.logger.debug("Validated dot1p marking of %s" %
                                       (dot1p))
                 else:
                     self.logger.error("Mismatch between actual and"
                                       " expected PCP")
                     self.logger.error("Expected PCP : %s, Actual PCP :%s"\
                                       % (dot1p,priority))
                     return False
             else:
                 self.logger.error("dot1p to be compared not mentioned")
                 return False
         if "dscp" in packet_type:
             if isinstance(dscp, int):
                 ip = ether.data
                 try:
                     actual_dscp = int(bin(ip.tos >> 2), 2)
                 except AttributeError, e:
                     self.logger.error(e)
                     return False
                 if dscp == actual_dscp:
                     self.logger.debug("Validated DSCP marking of %s" %
                                       (dscp))
                 else:
                     self.logger.error("Mismatch between actual and"
                                       " expected DSCP")
                     self.logger.error("Expected DSCP: %s, Actual DSCP:%s"\
                                       % (dscp,actual_dscp))
                     return False
             else:
                 self.logger.error("dscp to be compared not mentioned")
                 return False
示例#34
0
    def verify_flow_thru_si(self, si_fix, src_vn=None, ecmp_hash=None, flow_count=3):
        self.logger.info(
            'Will start a tcpdump on the left-interfaces of the Service Instances to find out which flow is entering which Service Instance')
        flowcount = 0
        result = True
        flow_pattern = {}
        svms = self.get_svms_in_si(si_fix)
        svms = sorted(set(svms))
        svm_list = si_fix.svm_list
        svm_index = 0
        vm_fix_pcap_pid_files = []

        # Capturing packets based upon source port
        src_port = "8000"
        filters = '\'(src port %s)\'' % (src_port)
        if None in svms:
            svms.remove(None)
        for svm_fixture in svm_list:
            svm = svm_fixture.vm_obj
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host_name = self.connections.orch.get_host_of_vm(svm)
                host = self.inputs.host_data[host_name]
                if src_vn is not None:
                    #tapintf = svm_fixture.tap_intf[src_vn.vn_fq_name]['name']
                    tapintf = self.connections.orch.get_vm_tap_interface(\
                                      svm_fixture.tap_intf[src_vn.vn_fq_name])
                else:
                    tapintf = self.connections.orch.get_vm_tap_interface(svm_fixture.tap_intf[si_fix.left_vn_fq_name])
                    filters = ''
                if self.inputs.pcap_on_vm:
                    tcpdump_files = start_tcpdump_for_vm_intf(
                        None, [svm_list[svm_index]], None, filters=filters, pcap_on_vm=True, vm_intf='eth1', svm=True)
                    svm_index = svm_index + 1
                    vm_fix_pcap_pid_files.append(tcpdump_files)
                else:
                    session = ssh(
                        host['host_ip'], host['username'], host['password'])
                    cmd = 'sudo tcpdump -nni %s %s -c 20 > /tmp/%s_out.log' % (
                        tapintf, filters, tapintf)
                    execute_cmd(session, cmd, self.logger)
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        sleep(15)

        self.logger.info('%%%%% Will check the result of tcpdump %%%%%')
        svms = self.get_svms_in_si(si_fix)
        svms = sorted(set(svms))
        if None in svms:
            svms.remove(None)
        svm_index = 0
        for svm_fixture in svm_list:
            svm = svm_fixture.vm_obj
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host_name = self.connections.orch.get_host_of_vm(svm)
                host = self.inputs.host_data[host_name]
                if src_vn is not None:
                    tapintf = self.connections.orch.get_vm_tap_interface(\
                                      svm_fixture.tap_intf[src_vn.vn_fq_name])
                    #tapintf = svm_fixture.tap_intf[src_vn.vn_fq_name]['name']
                else:
                    direction = 'left'
                    tapintf = self.connections.orch.get_vm_tap_interface(svm_fixture.tap_intf[si_fix.left_vn_fq_name])
                if not self.inputs.pcap_on_vm:
                    session = ssh(
                        host['host_ip'], host['username'], host['password'])
                    output_cmd = 'cat /tmp/%s_out.log' % tapintf
                    out, err = execute_cmd_out(session, output_cmd, self.logger)
                else:
                    out, pkt_count = stop_tcpdump_for_vm_intf(
                        None, None, None, vm_fix_pcap_pid_files=vm_fix_pcap_pid_files[svm_index], svm=True)
                    svm_index = svm_index + 1
                    out = out[0]
                for i in range(0,flow_count):
                    dport = str(9000+i)
                    if dport in out:
                        flowcount = flowcount + 1
                        self.logger.info(
                            'Flow with dport %s seen flowing inside %s' % (dport,svm_name))
                        flow_pattern[dport] = svm_name
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        if flowcount > 0:
            self.logger.info(
                'Flows are distributed across the Service Instances as :')
            self.logger.info('%s' % flow_pattern)
        else:
            result = False

        if ecmp_hash and ecmp_hash != 'default':
            # count the number of hash fields set
            hash_var_count = sum(ecmp_hash.values())

            # Incase, only one hash field is set, all flows should go through
            # single service instance. One exception here is destination_port.
            # Destination port varies for traffic streams.So, for destination
            # port, traffic will get load balanced even if ecmp hash is
            # configured "destination_port" alone.
            if hash_var_count == 1 and (not 'destination_port' in ecmp_hash):
                flow_pattern_ref = flow_pattern['9000']
                if all(flow_pattern_ref  == item for item in flow_pattern.values()):
                    self.logger.info(
                        'Flows are flowing through Single Service Instance: %s, as per config hash: %s' % (flow_pattern_ref, ecmp_hash))
                    self.logger.info('%s' % flow_pattern)
                else:
                    result = False
                    self.logger.error(
                        'Flows are flowing through multiple Service Instances:%s, where as it should not as per config hash:%s' % (flow_pattern, ecmp_hash))
                    assert result, 'Config hash is not working for: %s' % ( ecmp_hash)
            # Incase, multiple ecmp hash fields are configured or default ecmp
            # hash is present or only 'destionation_port' is configured in
            # ecmp_hash
            else:
                flow_pattern_ref = flow_pattern['9000']
                if all(flow_pattern_ref  == item for item in flow_pattern.values()):
                    result = False
                    self.logger.error(
                        'Flows are flowing through Single Service Instance:%s, where as it should not as per config hash:%s' % (flow_pattern_ref, ecmp_hash))
                    #assert result, 'Config hash is not working fine.'
                else:
                    self.logger.info(
                        'Flows are flowing through multiple Service Instances:%s, as per config hash: %s' % (flow_pattern, ecmp_hash))
                    self.logger.info('%s' % flow_pattern)
        else:
            assert result, 'No Flow distribution seen'