Пример #1
0
 def stop_tcpdump_on_intf(self, host, tapintf):
     session = ssh(host["host_ip"], host["username"], host["password"])
     self.logger.info("Waiting for tcpdump to complete")
     time.sleep(20)
     output_cmd = "cat /tmp/%s_out.log" % tapintf
     out, err = execute_cmd_out(session, output_cmd, self.logger)
     return out
Пример #2
0
 def verify_port_mirroring(self, src_vm, dst_vm, mirr_vm):
     result = True
     svm = mirr_vm.vm_obj
     if svm.status == 'ACTIVE':
         svm_name = svm.name
         host = self.get_svm_compute(svm_name)
         tapintf = self.get_svm_tapintf(svm_name)
     session = ssh(host['host_ip'], host['username'], host['password'])
     pcap = self.start_tcpdump(session, tapintf)
     assert src_vm.ping_with_certainty(dst_vm.vm_ip, count=5, size='1400')
     self.logger.info('Ping from %s to %s executed with c=5, expected mirrored packets 5 Ingress,5 Egress count = 10'
         % (src_vm.vm_ip, dst_vm.vm_ip))
     exp_count = 10
     filt = '| grep \"length [1-9][4-9][0-9][0-9][0-9]*\"'
     mirror_pkt_count = self.stop_tcpdump(session, pcap, filt)
     sleep(10)
     errmsg = "%s ICMP Packets mirrored to the analyzer VM %s,"\
              "Expected %s packets" % (
                  mirror_pkt_count, svm_name, exp_count)
     if mirror_pkt_count < exp_count:
         self.logger.error(errmsg)
         assert False, errmsg
     self.logger.info("%s ICMP packets are mirrored to the analyzer "
                      "service VM '%s'", mirror_pkt_count, svm_name)
     return result
Пример #3
0
 def tcpdump_stop_on_all_compute(self):
     sessions = {}
     for compute_ip in self.inputs.compute_ips:
         compute_user = self.inputs.host_data[compute_ip]['username']
         compute_password = self.inputs.host_data[compute_ip]['password']
         session = ssh(compute_ip, compute_user, compute_password)
         self.stop_tcpdump(session)
Пример #4
0
 def start_tcpdump(self, server_ip, tap_intf):
     session = ssh(server_ip,self.inputs.host_data[server_ip]['username'],self.inputs.host_data[server_ip]['password'])
     pcap = '/tmp/%s.pcap' % tap_intf
     cmd = "tcpdump -nei %s tcp -w %s" % (tap_intf, pcap)
     self.logger.info("Staring tcpdump to capture the packets on server %s" % (server_ip))
     execute_cmd(session, cmd, self.logger)
     return pcap, session
Пример #5
0
    def tcpdump_on_all_analyzer(self, si_fixture):
        sessions = {}
        svm_fixtures = si_fixture.svm_list
        for svm in svm_fixtures:
            svm_name = svm.vm_name
            host = self.inputs.host_data[svm.vm_node_ip]
            #tapintf = self.get_svm_tapintf(svm_name)
            tapintf = list(svm.tap_intf.values())[0]['name']
            session = ssh(host['host_ip'], host['username'], host['password'])
            pcap_vm = self.inputs.pcap_on_vm
            pcap = self.start_tcpdump(session, tapintf, pcap_on_vm=pcap_vm)
            sessions.update({svm_name: (session, pcap)})

        if self.inputs.pcap_on_vm:
            conn_list = []
            svm_list = si_fixture._svm_list
            vm_fix_pcap_pid_files = self.start_tcpdump(None,
                                                       tap_intf='eth0',
                                                       vm_fixtures=svm_list,
                                                       pcap_on_vm=True)
            conn_list.append(vm_fix_pcap_pid_files)
            conn_list.append(sessions)
            return conn_list

        return sessions
Пример #6
0
 def verify_vrrp_action(self, src_vm, dst_vm, ip, vsrx=False):
     result = False
     self.logger.info('Will ping %s from %s and check if %s responds' % (
         ip, src_vm.vm_name, dst_vm.vm_name))
     compute_ip = dst_vm.vm_node_ip
     compute_user = self.inputs.host_data[compute_ip]['username']
     compute_password = self.inputs.host_data[compute_ip]['password']
     session = ssh(compute_ip, compute_user, compute_password)
     if vsrx:
         vm_tapintf = dst_vm.tap_intf[dst_vm.vn_fq_names[1]]['name']
     else:
         vm_tapintf = dst_vm.tap_intf[dst_vm.vn_fq_name]['name']
     cmd = 'sudo tcpdump -nni %s -c 2 icmp > /tmp/%s_out.log' % (
         vm_tapintf, vm_tapintf)
     execute_cmd(session, cmd, self.logger)
     assert src_vm.ping_with_certainty(ip)
     output_cmd = 'cat /tmp/%s_out.log' % vm_tapintf
     output, err = execute_cmd_out(session, output_cmd, self.logger)
     if src_vm.vm_ip in output:
         result = True
         self.logger.info(
             '%s is seen responding to ICMP Requests' % dst_vm.vm_name)
     else:
         self.logger.error(
             'ICMP Requests to %s not seen on the VRRP Master' % ip)
         result = False
     return result
Пример #7
0
 def set_cpu_performance(self, hosts):
     sessions = {}
     cmd = 'for f in /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor ; do echo performance > $f; cat $f; done'
     for i in range(0, 2):
         session = ssh(hosts[i]['host_ip'], hosts[i]['username'], hosts[i]['password'])
         execute_cmd(session, cmd, self.logger)
     return
Пример #8
0
    def tcpdump_start_on_all_compute(self):
        for compute_ip in self.inputs.compute_ips:
            compute_user = self.inputs.host_data[compute_ip]['username']
            compute_password = self.inputs.host_data[compute_ip]['password']
            session = ssh(compute_ip, compute_user, compute_password)
            self.stop_tcpdump(session)
            inspect_h = self.agent_inspect[compute_ip]
            comp_intf = inspect_h.get_vna_interface_by_type('eth')
            if len(comp_intf) == 1:
                comp_intf = comp_intf[0]
            self.logger.info('Agent interface name: %s' % comp_intf)
            pcap1 = '/tmp/encap-udp.pcap'
            pcap2 = '/tmp/encap-gre.pcap'
            pcap3 = '/tmp/encap-vxlan.pcap'
            cmd1 = 'tcpdump -ni %s udp port 51234 -w %s -s 0' % (
                comp_intf, pcap1)
            cmd_udp = "nohup " + cmd1 + " >& /dev/null < /dev/null &"
            cmd2 = 'tcpdump -ni %s proto 47 -w %s -s 0' % (comp_intf, pcap2)
            cmd_gre = "nohup " + cmd2 + " >& /dev/null < /dev/null &"
            cmd3 = 'tcpdump -ni %s dst port 4789 -w %s -s 0' % (
                comp_intf, pcap3)
            cmd_vxlan = "nohup " + cmd3 + " >& /dev/null < /dev/null &"

            self.start_tcpdump(session, cmd_udp)
            self.start_tcpdump(session, cmd_gre)
            self.start_tcpdump(session, cmd_vxlan)
Пример #9
0
 def verify_mirroring(self, si_fix, src_vm, dst_vm, mirr_vm=None):
     result = True
     if mirr_vm:
         svm = mirr_vm.vm_obj
     else:
         svms = self.get_svms_in_si(si_fix)
         svm = svms[0]
     if svm.status == 'ACTIVE':
         svm_name = svm.name
         host = self.get_svm_compute(svm_name)
         if mirr_vm:
             tapintf = self.get_svm_tapintf(svm_name)
         else:
            tapintf = self.get_bridge_svm_tapintf(svm_name, 'left')
         session = ssh(host['host_ip'], host['username'], host['password'])
         cmd = 'sudo tcpdump -nni %s -c 5 > /tmp/%s_out.log' % (tapintf, tapintf)
         execute_cmd(session, cmd, self.logger)
         assert src_vm.ping_with_certainty(dst_vm.vm_ip)
         sleep(10)
         output_cmd = 'sudo cat /tmp/%s_out.log' % tapintf
         out, err = execute_cmd_out(session, output_cmd, self.logger)
         print out
         if '8099' in out:
             self.logger.info('Mirroring action verified')
         else:
             result = False
             self.logger.warning('No mirroring action seen')
     return result
Пример #10
0
 def verify_mirroring(self, si_fix, src_vm, dst_vm, mirr_vm=None):
     result = True
     if mirr_vm:
         svm = mirr_vm.vm_obj
     else:
         svms = self.get_svms_in_si(si_fix)
         svm = svms[0]
     if svm.status == 'ACTIVE':
         svm_name = svm.name
         host = self.get_svm_compute(svm_name)
         if mirr_vm:
             tapintf = self.get_svm_tapintf(svm_name)
         else:
             tapintf = self.get_bridge_svm_tapintf(svm_name, 'left')
         session = ssh(host['host_ip'], host['username'], host['password'])
         cmd = 'sudo tcpdump -nni %s -c 5 > /tmp/%s_out.log' % (tapintf, tapintf)
         execute_cmd(session, cmd, self.logger)
         assert src_vm.ping_with_certainty(dst_vm.vm_ip)
         sleep(10)
         output_cmd = 'sudo cat /tmp/%s_out.log' % tapintf
         out, err = execute_cmd_out(session, output_cmd, self.logger)
         print(out)
         if '8099' in out:
             self.logger.info('Mirroring action verified')
         else:
             result = False
             self.logger.warning('No mirroring action seen')
     return result
Пример #11
0
    def config_intf_mirroring(self, src_vm_fixture, analyzer_ip_address, analyzer_name, routing_instance, \
            src_port=None, sub_intf=False, parent_intf=False, nic_mirror=False, header = 1, nh_mode = 'dynamic', direction = 'both', analyzer_mac_address = '', mirror_vm_fixture = None):

        #Short desc of what the header values are:
        #header 1 is the default value, which is header enabled. All present testcases will have this as default, should not affect legacy cases
        #header 2 is for dynamic mirroring, with juniper header, and directionality of traffic and want the header verification to be done
        #header 3 would mean header disabled. In this case, routes have to be imported from other VN vrf, so a change vn properties is needed
        if header == 3:
            self.add_vn_mirror_properties()
            analyzer_mac_address = mirror_vm_fixture.mac_addr[self.vn3_fixture.vn_fq_name]

        vnc = src_vm_fixture.vnc_lib_h
        vlan = None
        tap_intf_obj = None
        parent_tap_intf_obj = None
        vlan = None
        tap_intf_objs = src_vm_fixture.get_tap_intf_of_vm()
        for tap_intf_obj in tap_intf_objs:
            intf_type = 'tap'
            if self.inputs.ns_agilio_vrouter_data:
                intf_type = 'nfp'
            if intf_type in tap_intf_obj['name']:
                parent_tap_intf_uuid = tap_intf_obj['uuid']
            else:
                sub_intf_tap_intf_uuid = tap_intf_obj['uuid']
        if not sub_intf:
            tap_intf_uuid = src_vm_fixture.get_tap_intf_of_vm()[0]['uuid']
            tap_intf_obj = vnc.virtual_machine_interface_read(id=tap_intf_uuid)
        else:
            tap_intf_obj = src_port
            vlan = self.vlan

        if parent_intf:
            parent_tap_intf_obj = vnc.virtual_machine_interface_read(id=parent_tap_intf_uuid)
        if header == 1 or header == 2:
            header_value = True
        else:
            header_value = False
        if not nic_mirror:
            self.enable_intf_mirroring(vnc, tap_intf_obj, analyzer_ip_address, analyzer_name, routing_instance, header = header_value, nh_mode = nh_mode, direction = direction, analyzer_mac_address = analyzer_mac_address)
            if parent_intf:
                self.logger.info("Intf mirroring enabled on both sub intf port and parent port")
                self.enable_intf_mirroring(vnc, parent_tap_intf_obj, analyzer_ip_address, analyzer_name, routing_instance, header = header_value, nh_mode = nh_mode, direction = direction, analyzer_mac_address = analyzer_mac_address)
            return vnc, tap_intf_obj, parent_tap_intf_obj, vlan
        else:
            self.enable_intf_mirroring(vnc, tap_intf_obj, analyzer_ip_address=None, analyzer_name=analyzer_name, \
                routing_instance=None, udp_port=None, nic_assisted_mirroring=True, nic_assisted_mirroring_vlan=100, header = header_value, nh_mode = nh_mode, direction = direction, analyzer_mac_address = analyzer_mac_address)
            if src_vm_fixture.vm_obj.status == 'ACTIVE':
                host = self.get_svm_compute(src_vm_fixture.vm_obj.name)
            session = ssh(host['host_ip'], host['username'], host['password'])
            agent_physical_interface = src_vm_fixture.agent_inspect[host['host_ip']].get_agent_physical_interface()
            pcap = self.start_tcpdump(session, agent_physical_interface, vlan=100)
            src_vm_fixture.ping_with_certainty(mirror_vm_fixture.vm_ip, count=11, size='1400')
            filt = '-e | grep \"vlan 100\"'
            mirror_pkt_count = self.stop_tcpdump(session, pcap, filt)
            if mirror_pkt_count == 0:
                self.logger.error("Nic mirroring doesn't works correctly")
                result = result and False
            else:
                self.logger.info("Nic mirroring works correctly")
Пример #12
0
 def stop_tcpdump_on_intf(self, host, tapintf):
     session = ssh(host['host_ip'], host['username'], host['password'])
     self.logger.info('Waiting for tcpdump to complete')
     time.sleep(20)
     output_cmd = 'cat /tmp/%s_out.log' % tapintf
     out, err = execute_cmd_out(session, output_cmd, self.logger)
     return out
Пример #13
0
    def tcpdump_start_on_all_compute(self):
        for compute_ip in self.inputs.compute_ips:
            compute_user = self.inputs.host_data[compute_ip]['username']
            compute_password = self.inputs.host_data[compute_ip]['password']
            session = ssh(compute_ip, compute_user, compute_password)
            self.stop_tcpdump(session)
            inspect_h = self.agent_inspect[compute_ip]
            comp_intf = inspect_h.get_vna_interface_by_type('eth')
            if len(comp_intf) >= 1:
                comp_intf = comp_intf[0]
            self.logger.info('Agent interface name: %s' % comp_intf)
            pcap1 = '/tmp/encap-udp.pcap'
            pcap2 = '/tmp/encap-gre.pcap'
            pcap3 = '/tmp/encap-vxlan.pcap'
            cmd1 = 'tcpdump -ni %s udp port 6635 -w %s -s 0' % (comp_intf,
                                                                pcap1)
            cmd_udp = "nohup " + cmd1 + " >& /dev/null < /dev/null &"
            cmd2 = 'tcpdump -ni %s proto 47 -w %s -s 0' % (comp_intf, pcap2)
            cmd_gre = "nohup " + cmd2 + " >& /dev/null < /dev/null &"
            cmd3 = 'tcpdump -ni %s dst port 4789 -w %s -s 0' % (comp_intf,
                                                                pcap3)
            cmd_vxlan = "nohup " + cmd3 + " >& /dev/null < /dev/null &"

            self.start_tcpdump(session, cmd_udp, pcap1)
            self.start_tcpdump(session, cmd_gre, pcap2)
            self.start_tcpdump(session, cmd_vxlan, pcap3)
        #just to make sure tcpdump starts listening
        sleep(5)
Пример #14
0
 def tcpdump_stop_on_all_compute(self):
     sessions = {}
     for compute_ip in self.inputs.compute_ips:
         compute_user = self.inputs.host_data[compute_ip]['username']
         compute_password = self.inputs.host_data[compute_ip]['password']
         session = ssh(compute_ip, compute_user, compute_password)
         self.stop_tcpdump(session)
Пример #15
0
 def set_cpu_performance(self, hosts):
     sessions = {}
     cmd = 'for f in /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor ; do echo performance > $f; cat $f; done'
     for i in range(0, 2):
         session = ssh(hosts[i]['host_ip'], hosts[i]['username'], hosts[i]['password'])
         execute_cmd(session, cmd, self.logger)
     return
Пример #16
0
 def verify_vrrp_action(self, src_vm, dst_vm, ip, vsrx=False):
     result = False
     self.logger.info('Will ping %s from %s and check if %s responds' % (
         ip, src_vm.vm_name, dst_vm.vm_name))
     compute_ip = dst_vm.vm_node_ip
     compute_user = self.inputs.host_data[compute_ip]['username']
     compute_password = self.inputs.host_data[compute_ip]['password']
     session = ssh(compute_ip, compute_user, compute_password)
     if vsrx:
         vm_tapintf = dst_vm.tap_intf[dst_vm.vn_fq_names[1]]['name']
     else:
         vm_tapintf = dst_vm.tap_intf[dst_vm.vn_fq_name]['name']
     cmd = 'tcpdump -nni %s -c 10 > /tmp/%s_out.log' % (
         vm_tapintf, vm_tapintf)
     execute_cmd(session, cmd, self.logger)
     assert src_vm.ping_with_certainty(ip), 'Ping to vIP failure'
     output_cmd = 'cat /tmp/%s_out.log' % vm_tapintf
     output, err = execute_cmd_out(session, output_cmd, self.logger)
     if ip in output:
         result = True
         self.logger.info(
             '%s is seen responding to ICMP Requests' % dst_vm.vm_name)
     else:
         self.logger.error('ICMP Requests not seen on the VRRP Master')
         result = False
     return result
Пример #17
0
 def stop_tcpdump_on_intf(self, host, tapintf):
     session = ssh(host['host_ip'], host['username'], host['password'])
     self.logger.info('Waiting for tcpdump to complete')
     time.sleep(10)
     output_cmd = 'sudo cat /tmp/%s_out.log' % tapintf
     out, err = execute_cmd_out(session, output_cmd, self.logger)
     return out
Пример #18
0
    def verify_port_mirroring(self,
                              src_vm,
                              dst_vm,
                              mirr_vm,
                              vlan=None,
                              parent=False):
        result = True
        svm = mirr_vm.vm_obj
        if svm.status == 'ACTIVE':
            svm_name = svm.name
            host = self.get_svm_compute(svm_name)
            tapintf = self.get_svm_tapintf(svm_name)
        # Intf mirroring enabled on either sub intf or parent port
        exp_count = 10
        if parent:
            # Intf mirroring enabled on both sub intf and parent port
            exp_count = 20
        if self.inputs.pcap_on_vm:
            vm_fix_pcap_pid_files = start_tcpdump_for_vm_intf(
                None, [mirr_vm],
                None,
                filters='udp port 8099',
                pcap_on_vm=True)
        else:
            session = ssh(host['host_ip'], host['username'], host['password'])
            pcap = self.start_tcpdump(session, tapintf, vlan=vlan)
        src_ip = src_vm.vm_ip
        dst_ip = dst_vm.vm_ip
        if vlan:
            sub_intf = 'eth0.' + str(vlan)
            cmds = "/sbin/ifconfig " + sub_intf + " | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'"
            src_ip = src_vm.run_cmd_on_vm(cmds=[cmds]).values()[0]
            dst_ip = dst_vm.run_cmd_on_vm(cmds=[cmds]).values()[0]
        assert src_vm.ping_with_certainty(dst_ip, count=5, size='1200')
        self.logger.info(
            'Ping from %s to %s executed with c=5, expected mirrored packets 5 Ingress,5 Egress count = 10'
            % (src_ip, dst_ip))
        filters = '| grep \"length [1-9][2-9][0-9][0-9][0-9]*\"'
        if self.inputs.pcap_on_vm:
            output, mirror_pkt_count = stop_tcpdump_for_vm_intf(
                None,
                None,
                None,
                vm_fix_pcap_pid_files=vm_fix_pcap_pid_files,
                filters=filters)
            mirror_pkt_count = int(mirror_pkt_count[0])
        else:
            mirror_pkt_count = self.stop_tcpdump(session, pcap, filters)
        errmsg = "%s ICMP Packets mirrored to the analyzer VM %s,"\
                 "Expected %s packets" % (
                     mirror_pkt_count, svm_name, exp_count)
        if mirror_pkt_count < exp_count:
            self.logger.error(errmsg)
            assert False, errmsg

        self.logger.info(
            "%s ICMP packets are mirrored to the analyzer "
            "service VM '%s'", mirror_pkt_count, svm_name)
        return result
Пример #19
0
def start_tcpdump_for_intf(ip, username, password, interface, filters='-v', logger=None):
    if not logger:
        logger = logging.getLogger(__name__)
    session = ssh(ip, username, password)
    pcap = '/tmp/%s_%s.pcap' % (interface, get_random_name())
    cmd = 'tcpdump -ni %s -U %s -w %s' % (interface, filters, pcap)
    execute_cmd(session, cmd, logger)
    return (session, pcap)
def start_tcpdump_for_intf(ip, username, password, interface, filters='-v', logger=None):
    if not logger:
        logger = contrail_logging.getLogger(__name__)
    session = ssh(ip, username, password)
    pcap = '/tmp/%s_%s.pcap' % (interface, get_random_name())
    cmd = 'tcpdump -nni %s -U %s -w %s' % (interface, filters, pcap)
    execute_cmd(session, cmd, logger)
    return (session, pcap)
Пример #21
0
    def config_intf_mirroring(self, src_vm_fixture, analyzer_ip_address, analyzer_name, routing_instance, \
            src_port=None, sub_intf=False, parent_intf=False, nic_mirror=False, header = 1, nh_mode = 'dynamic', direction = 'both', analyzer_mac_address = '', mirror_vm_fixture = None):

        #Short desc of what the header values are:
        #header 1 is the default value, which is header enabled. All present testcases will have this as default, should not affect legacy cases
        #header 2 is for dynamic mirroring, with juniper header, and directionality of traffic and want the header verification to be done
        #header 3 would mean header disabled. In this case, routes have to be imported from other VN vrf, so a change vn properties is needed
        if header == 3:
            self.add_vn_mirror_properties()
            analyzer_mac_address = mirror_vm_fixture.mac_addr[self.vn3_fixture.vn_fq_name]

        vnc = src_vm_fixture.vnc_lib_h
        vlan = None
        tap_intf_obj = None
        parent_tap_intf_obj = None
        vlan = None
        tap_intf_objs = src_vm_fixture.get_tap_intf_of_vm()
        for tap_intf_obj in tap_intf_objs:
            if 'tap' in tap_intf_obj['name']:
                parent_tap_intf_uuid = tap_intf_obj['uuid']
            else:
                sub_intf_tap_intf_uuid = tap_intf_obj['uuid']
        if not sub_intf:
            tap_intf_uuid = src_vm_fixture.get_tap_intf_of_vm()[0]['uuid']
            tap_intf_obj = vnc.virtual_machine_interface_read(id=tap_intf_uuid)
        else:
            tap_intf_obj = src_port
            vlan = self.vlan

        if parent_intf:
            parent_tap_intf_obj = vnc.virtual_machine_interface_read(id=parent_tap_intf_uuid)
        if header == 1 or header == 2:
            header_value = True
        else:
            header_value = False
        if not nic_mirror:
            self.enable_intf_mirroring(vnc, tap_intf_obj, analyzer_ip_address, analyzer_name, routing_instance, header = header_value, nh_mode = nh_mode, direction = direction, analyzer_mac_address = analyzer_mac_address)
            if parent_intf:
                self.logger.info("Intf mirroring enabled on both sub intf port and parent port")
                self.enable_intf_mirroring(vnc, parent_tap_intf_obj, analyzer_ip_address, analyzer_name, routing_instance, header = header_value, nh_mode = nh_mode, direction = direction, analyzer_mac_address = analyzer_mac_address)
            return vnc, tap_intf_obj, parent_tap_intf_obj, vlan
        else:
            self.enable_intf_mirroring(vnc, tap_intf_obj, analyzer_ip_address=None, analyzer_name=analyzer_name, \
                routing_instance=None, udp_port=None, nic_assisted_mirroring=True, nic_assisted_mirroring_vlan=100, header = header_value, nh_mode = nh_mode, direction = direction, analyzer_mac_address = analyzer_mac_address)
            if src_vm_fixture.vm_obj.status == 'ACTIVE':
                host = self.get_svm_compute(src_vm_fixture.vm_obj.name)
            session = ssh(host['host_ip'], host['username'], host['password'])
            agent_physical_interface = src_vm_fixture.agent_inspect[host['host_ip']].get_agent_physical_interface()
            pcap = self.start_tcpdump(session, agent_physical_interface, vlan=100)
            src_vm_fixture.ping_with_certainty(mirror_vm_fixture.vm_ip, count=11, size='1400')
            filt = '-e | grep \"vlan 100\"'
            mirror_pkt_count = self.stop_tcpdump(session, pcap, filt)
            if mirror_pkt_count == 0:
                self.logger.error("Nic mirroring doesn't works correctly")
                result = result and False
            else:
                self.logger.info("Nic mirroring works correctly")
Пример #22
0
    def config_intf_mirroring(self, src_vm_fixture, analyzer_ip_address, analyzer_name, routing_instance, \
            src_port=None, sub_intf=False, parent_intf=False, nic_mirror=False):

        vnc = src_vm_fixture.vnc_lib_h
        vlan = None
        tap_intf_obj = None
        parent_tap_intf_obj = None
        vlan = None
        tap_intf_objs = src_vm_fixture.get_tap_intf_of_vm()
        for tap_intf_obj in tap_intf_objs:
            if 'tap' in tap_intf_obj['name']:
                parent_tap_intf_uuid = tap_intf_obj['uuid']
            else:
                sub_intf_tap_intf_uuid = tap_intf_obj['uuid']
        if not sub_intf:
            tap_intf_uuid = src_vm_fixture.get_tap_intf_of_vm()[0]['uuid']
            tap_intf_obj = vnc.virtual_machine_interface_read(id=tap_intf_uuid)
        else:
            tap_intf_obj = src_port
            vlan = self.vlan

        if parent_intf:
            parent_tap_intf_obj = vnc.virtual_machine_interface_read(
                id=parent_tap_intf_uuid)
        if not nic_mirror:
            self.enable_intf_mirroring(vnc, tap_intf_obj, analyzer_ip_address,
                                       analyzer_name, routing_instance)
            if parent_intf:
                self.logger.info(
                    "Intf mirroring enabled on both sub intf port and parent port"
                )
                self.enable_intf_mirroring(vnc, parent_tap_intf_obj,
                                           analyzer_ip_address, analyzer_name,
                                           routing_instance)
            return vnc, tap_intf_obj, parent_tap_intf_obj, vlan
        else:
            self.enable_intf_mirroring(vnc, tap_intf_obj, analyzer_ip_address=None, analyzer_name=analyzer_name, \
                routing_instance=None, udp_port=None, nic_assisted_mirroring=True, nic_assisted_mirroring_vlan=100)
            if src_vm_fixture.vm_obj.status == 'ACTIVE':
                host = self.get_svm_compute(src_vm_fixture.vm_obj.name)
            session = ssh(host['host_ip'], host['username'], host['password'])
            agent_physical_interface = src_vm_fixture.agent_inspect[
                host['host_ip']].get_agent_physical_interface()
            pcap = self.start_tcpdump(session,
                                      agent_physical_interface,
                                      vlan=100)
            src_vm_fixture.ping_with_certainty(mirror_vm_fixture.vm_ip,
                                               count=11,
                                               size='1400')
            filt = '-e | grep \"vlan 100\"'
            mirror_pkt_count = self.stop_tcpdump(session, pcap, filt)
            if mirror_pkt_count == 0:
                self.logger.error("Nic mirroring doesn't works correctly")
                result = result and False
            else:
                self.logger.info("Nic mirroring works correctly")
Пример #23
0
 def start_tcpdump(self, server_ip, tap_intf):
     session = ssh(server_ip, self.inputs.host_data[server_ip]['username'],
                   self.inputs.host_data[server_ip]['password'])
     pcap = '/tmp/%s.pcap' % tap_intf
     cmd = "tcpdump -nei %s tcp -w %s" % (tap_intf, pcap)
     self.logger.info(
         "Staring tcpdump to capture the packets on server %s" %
         (server_ip))
     execute_cmd(session, cmd, self.logger)
     return pcap, session
Пример #24
0
    def tcpdump_on_analyzer(self, si_prefix):
        sessions = {}
        svm_name = si_prefix + '_1'
        host = self.get_svm_compute(svm_name)
        tapintf = self.get_svm_tapintf(svm_name)
        session = ssh(host['host_ip'], host['username'], host['password'])
        pcap = self.start_tcpdump(session, tapintf)
        sessions.update({svm_name: (session, pcap)})

        return sessions
Пример #25
0
    def tcpdump_on_analyzer(self, si_prefix):
        sessions = {}
        svm_name = si_prefix + '_1'
        host = self.get_svm_compute(svm_name)
        tapintf = self.get_svm_tapintf(svm_name)
        session = ssh(host['host_ip'], host['username'], host['password'])
        pcap = self.start_tcpdump(session, tapintf)
        sessions.update({svm_name: (session, pcap)})

        return sessions
Пример #26
0
    def tcpdump_on_all_analyzer(self, si_prefix, si_count=1):
        sessions = {}
        for i in range(0, si_count):
            svm_name = si_prefix + str(i + 1) + '_1'
            host = self.get_svm_compute(svm_name)
            tapintf = self.get_svm_tapintf(svm_name)
            session = ssh(host['host_ip'], host['username'], host['password'])
            pcap = self.start_tcpdump(session, tapintf)
            sessions.update({svm_name: (session, pcap)})

        return sessions
Пример #27
0
 def check_bfd_packets(self, vm, vn):
     interface = vm.tap_intf[vn.vn_fq_name]['name']
     ip = self.inputs.host_data[vm.vm_node_ip]['host_ip']
     session = ssh(ip,self.inputs.host_data[ip]['username'],self.inputs.host_data[ip]['password'])
     cmd = "sudo timeout 30 tcpdump -nei %s ip | grep BFD" % (interface)
     self.logger.info("Starting tcpdump to capture the BFD packets on %s in server %s" % (interface, ip))
     out, err = execute_cmd_out(session, cmd, self.logger)
     result = False
     if out:
         result = True
     return result
Пример #28
0
def start_tcpdump_for_vm_intf(obj, vm_fix, vn_fq_name, filters='-v'):
    compute_ip = vm_fix.vm_node_ip
    compute_user = obj.inputs.host_data[compute_ip]['username']
    compute_password = obj.inputs.host_data[compute_ip]['password']
    session = ssh(compute_ip, compute_user, compute_password)
    vm_tapintf = vm_fix.tap_intf[vn_fq_name]['name']
    pcap = '/tmp/%s_%s.pcap' % (vm_tapintf, get_random_name())
    cmd = 'tcpdump -ni %s -U %s -w %s' % (vm_tapintf, filters, pcap)
    execute_cmd(session, cmd, obj.logger)

    return (session, pcap)
Пример #29
0
    def tcpdump_on_all_analyzer(self, si_fixture):
        sessions = {}
        svms = self.get_svms_in_si(si_fixture)
        for svm in svms:
            svm_name = svm.name
            host = self.get_svm_compute(svm_name)
            tapintf = self.get_svm_tapintf(svm_name)
            session = ssh(host['host_ip'], host['username'], host['password'])
            pcap = self.start_tcpdump(session, tapintf)
            sessions.update({svm_name: (session, pcap)})

        return sessions
Пример #30
0
    def tcpdump_on_all_analyzer(self, si_fixtures, si_prefix, si_count=1):
        sessions = {}
        for i in range(0, si_count):
            si_fixture = si_fixtures[i]
            svm_name = "__".join(si_fixture.si_fq_name) + "__" + str(1)
            host = self.get_svm_compute(svm_name)
            tapintf = self.get_svm_tapintf(svm_name)
            session = ssh(host['host_ip'], host['username'], host['password'])
            pcap = self.start_tcpdump(session, tapintf)
            sessions.update({svm_name: (session, pcap)})

        return sessions
Пример #31
0
    def tcpdump_on_all_analyzer(self, si_fixtures, si_prefix, si_count=1):
        sessions = {}
        for i in range(0, si_count):
            si_fixture = si_fixtures[i]
            svm_name = "__".join(si_fixture.si_fq_name) + "__" + str(1)
            host = self.get_svm_compute(svm_name)
            tapintf = self.get_svm_tapintf(svm_name)
            session = ssh(host['host_ip'], host['username'], host['password'])
            pcap = self.start_tcpdump(session, tapintf)
            sessions.update({svm_name: (session, pcap)})

        return sessions
Пример #32
0
    def verify_port_mirroring(self, src_vm, dst_vm, mirr_vm, vlan=None, parent=False, direction = 'both', no_header = False):
        result = True
        svm = mirr_vm.vm_obj
        if svm.status == 'ACTIVE':
            svm_name = svm.name
            host = self.get_svm_compute(svm_name)
            tapintf = self.get_svm_tapintf(svm_name)
        # Intf mirroring enabled on either sub intf or parent port
        exp_count = 10
        if direction != 'both':
            exp_count = 5
        if parent:
            # Intf mirroring enabled on both sub intf and parent port
            exp_count = 20
        if self.inputs.pcap_on_vm:
            filters = ''
            if not no_header:
                filters='udp port 8099'
            vm_fix_pcap_pid_files = start_tcpdump_for_vm_intf(
                None, [mirr_vm], None, filters=filters, pcap_on_vm=True)
        else:
            session = ssh(host['host_ip'], host['username'], host['password'])
            pcap = self.start_tcpdump(session, tapintf, vlan=vlan, no_header = no_header)
        src_ip = src_vm.vm_ip
        dst_ip = dst_vm.vm_ip
        if vlan:
            sub_intf = 'eth0.' + str(vlan)
            cmds = "/sbin/ifconfig " + sub_intf + " | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'"
            src_ip = src_vm.run_cmd_on_vm(cmds=[cmds]).values()[0]
            dst_ip = dst_vm.run_cmd_on_vm(cmds=[cmds]).values()[0]
        assert src_vm.ping_with_certainty(dst_ip, count=5, size='1200')
        #lets wait 10 sec for tcpdump to capture all the packets
        sleep(10)
        self.logger.info('Ping from %s to %s executed with c=5, expected mirrored packets 5 Ingress,5 Egress count = 10'
            % (src_ip, dst_ip))
        filters = '| grep \"length [1-9][2-9][0-9][0-9][0-9]*\"'
        if self.inputs.pcap_on_vm:
            output, mirror_pkt_count = stop_tcpdump_for_vm_intf(
                None, None, None, vm_fix_pcap_pid_files=vm_fix_pcap_pid_files, filters=filters, verify_on_all=True)
            mirror_pkt_count = int(mirror_pkt_count[0])
        else:
            mirror_pkt_count = self.stop_tcpdump(session, pcap, filters)
        errmsg = "%s ICMP Packets mirrored to the analyzer VM %s,"\
                 "Expected %s packets" % (
                     mirror_pkt_count, svm_name, exp_count)
        if mirror_pkt_count < exp_count:
            self.logger.error(errmsg)
            assert False, errmsg

        self.logger.info("%s ICMP packets are mirrored to the analyzer "
                         "service VM '%s'", mirror_pkt_count, svm_name)
        return result
Пример #33
0
 def start_tcp_dump(self, vm_fixture):
     sessions =[]
     vm_name = vm_fixture.vm_name
     host = self.inputs.host_data[vm_fixture.vm_node_ip]
     inspect_h = self.agent_inspect[vm_fixture.vm_node_ip]
     tapintf = inspect_h.get_vna_tap_interface_by_ip(vm_fixture.vm_ip)[0]['name']
     pcap = '/tmp/%s.pcap' % tapintf
     cmd = "sudo tcpdump -ni %s udp -w %s" % (tapintf, pcap)
     session = ssh(host['host_ip'], host['username'], host['password'])
     self.logger.info("Staring tcpdump to capture the packets.")
     execute_cmd(session, cmd, self.logger)
     sessions.extend((session, pcap))
     return sessions
Пример #34
0
 def start_tcp_dump(self, vm_fixture):
     sessions =[]
     vm_name = vm_fixture.vm_name
     host = self.inputs.host_data[vm_fixture.vm_node_ip]
     inspect_h = self.agent_inspect[vm_fixture.vm_node_ip]
     tapintf = inspect_h.get_vna_tap_interface_by_ip(vm_fixture.vm_ip)[0]['name']
     pcap = '/tmp/%s.pcap' % tapintf
     cmd = "tcpdump -ni %s udp -w %s" % (tapintf, pcap)
     session = ssh(host['host_ip'], host['username'], host['password'])
     self.logger.info("Staring tcpdump to capture the packets.")
     execute_cmd(session, cmd, self.logger)
     sessions.extend((session, pcap))
     return sessions
Пример #35
0
    def tcpdump_on_all_analyzer(self, si_fixtures, si_prefix, si_count=1):
        sessions = {}
        for i in range(0, si_count):
            si_fixture = si_fixtures[i]
            svm_name = si_fixture.si_obj.uuid + '__' + str(i + 1)
            svm_name=self.inputs.domain_name + '__' + self.inputs.project_name + '__' + svm_name
            host = self.get_svm_compute(svm_name)
            tapintf = self.get_svm_tapintf(svm_name)
            session = ssh(host['host_ip'], host['username'], host['password'])
            pcap = self.start_tcpdump(session, tapintf)
            sessions.update({svm_name: (session, pcap)})

        return sessions
Пример #36
0
    def tcpdump_on_all_analyzer(self, si_fixtures, si_prefix, si_count=1):
        sessions = {}
        for i in range(0, si_count):
            si_fixture = si_fixtures[i]
            svms = self.get_svms_in_si(si_fixture, self.inputs.project_name)
        for svm in svms:
            svm_name = svm.name
            host = self.get_svm_compute(svm_name)
            tapintf = self.get_svm_tapintf(svm_name)
            session = ssh(host['host_ip'], host['username'], host['password'])
            pcap = self.start_tcpdump(session, tapintf)
            sessions.update({svm_name: (session, pcap)})

        return sessions
Пример #37
0
    def tcpdump_on_all_analyzer(self, si_fixture):
        sessions = {}
        svms = self.get_svms_in_si(si_fixture)
        svm_fixtures = si_fixture.svm_list
        for svm in svm_fixtures:
            svm_name = svm.vm_name
            host = self.inputs.host_data[svm.vm_node_ip]
            #tapintf = self.get_svm_tapintf(svm_name)
            tapintf = svm.tap_intf.values()[0]['name']
            session = ssh(host['host_ip'], host['username'], host['password'])
            pcap = self.start_tcpdump(session, tapintf)
            sessions.update({svm_name: (session, pcap)})

        return sessions
Пример #38
0
    def tcpdump_on_all_analyzer(self, si_fixtures, si_prefix, si_count=1):
        sessions = {}
        for i in range(0, si_count):
            si_fixture = si_fixtures[i]
            svms = self.get_svms_in_si(si_fixture, self.inputs.project_name)
        for svm in svms:
            svm_name = svm.name
            host = self.get_svm_compute(svm_name)
            tapintf = self.get_svm_tapintf(svm_name)
            session = ssh(host["host_ip"], host["username"], host["password"])
            pcap = self.start_tcpdump(session, tapintf)
            sessions.update({svm_name: (session, pcap)})

        return sessions
Пример #39
0
 def verify_flow_thru_si(self, si_fix, src_vn):
     
     self.logger.info('Will start a tcpdump on the left-interfaces of the Service Instances to find out which flow is entering which Service Instance')
     flowcount= 0
     result= True
     si_count= si_fix.max_inst
     for i in range(1, si_count+1):
         svm_name = si_fix.si_name + '_%s'%i
         host = self.get_svm_compute(svm_name)
         tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn)
         session = ssh(host['host_ip'], host['username'], host['password'])
         cmd = 'tcpdump -ni %s proto 17 -vvv -c 5 > /tmp/%s_out.log' % (tapintf,tapintf)
         execute_cmd(session, cmd, self.logger)
     sleep(5)
     self.logger.info('***** Will check the result of tcpdump *****')
     for i in range(1, si_count+1):
         svm_name = si_fix.si_name + '_%s'%i
         host = self.get_svm_compute(svm_name)
         tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn)
         session = ssh(host['host_ip'], host['username'], host['password'])
         output_cmd = 'cat /tmp/%s_out.log' % tapintf 
         out, err = execute_cmd_out(session, output_cmd, self.logger)
         if '9000' in out:
             flowcount= flowcount + 1
             self.logger.info('Flow with dport 9000 seen flowing inside %s'%svm_name)
         if '9001' in out:
             flowcount= flowcount + 1
             self.logger.info('Flow with dport 9001 seen flowing inside %s'%svm_name)
         if '9002' in out:
             flowcount= flowcount + 1
             self.logger.info('Flow with dport 9002 seen flowing inside %s'%svm_name)
     if flowcount > 1:
         self.logger.info('Flows are distributed across the Service Instances')
     else:
         result= False
     assert result, 'No Flow distribution seen' 
Пример #40
0
    def config_intf_mirroring(self, src_vm_fixture, analyzer_ip_address, analyzer_name, routing_instance, \
            src_port=None, sub_intf=False, parent_intf=False, nic_mirror=False):

        vnc = src_vm_fixture.vnc_lib_h
        vlan = None
        tap_intf_obj = None
        parent_tap_intf_obj = None
        vlan = None
        tap_intf_objs = src_vm_fixture.get_tap_intf_of_vm()
        for tap_intf_obj in tap_intf_objs:
            if 'tap' in tap_intf_obj['name']:
                parent_tap_intf_uuid = tap_intf_obj['uuid']
            else:
                sub_intf_tap_intf_uuid = tap_intf_obj['uuid']
        if not sub_intf:
            tap_intf_uuid = src_vm_fixture.get_tap_intf_of_vm()[0]['uuid']
            tap_intf_obj = vnc.virtual_machine_interface_read(id=tap_intf_uuid)
        else:
            tap_intf_obj = src_port
            vlan = self.vlan

        if parent_intf:
            parent_tap_intf_obj = vnc.virtual_machine_interface_read(id=parent_tap_intf_uuid)
        if not nic_mirror:
            self.enable_intf_mirroring(vnc, tap_intf_obj, analyzer_ip_address, analyzer_name, routing_instance)
            if parent_intf:
                self.logger.info("Intf mirroring enabled on both sub intf port and parent port")
                self.enable_intf_mirroring(vnc, parent_tap_intf_obj, analyzer_ip_address, analyzer_name, routing_instance)
            return vnc, tap_intf_obj, parent_tap_intf_obj, vlan
        else:
            self.enable_intf_mirroring(vnc, tap_intf_obj, analyzer_ip_address=None, analyzer_name=analyzer_name, \
                routing_instance=None, udp_port=None, nic_assisted_mirroring=True, nic_assisted_mirroring_vlan=100)
            if src_vm_fixture.vm_obj.status == 'ACTIVE':
                host = self.get_svm_compute(src_vm_fixture.vm_obj.name)
            session = ssh(host['host_ip'], host['username'], host['password'])
            agent_physical_interface = src_vm_fixture.agent_inspect[host['host_ip']].get_agent_physical_interface()
            pcap = self.start_tcpdump(session, agent_physical_interface, vlan=100)
            src_vm_fixture.ping_with_certainty(mirror_vm_fixture.vm_ip, count=11, size='1400')
            filt = '-e | grep \"vlan 100\"'
            mirror_pkt_count = self.stop_tcpdump(session, pcap, filt)
            if mirror_pkt_count == 0:
                self.logger.error("Nic mirroring doesn't works correctly")
                result = result and False
            else:
                self.logger.info("Nic mirroring works correctly")
Пример #41
0
    def tcpdump_on_all_analyzer(self, si_fixture):
        sessions = {}
        svms = self.get_svms_in_si(si_fixture)
        svm_fixtures = si_fixture.svm_list
        for svm in svm_fixtures:
            svm_name = svm.vm_name
            host = self.inputs.host_data[svm.vm_node_ip]
            #tapintf = self.get_svm_tapintf(svm_name)
            tapintf = svm.tap_intf.values()[0]['name']
            session = ssh(host['host_ip'], host['username'], host['password'])
            pcap = self.start_tcpdump(session, tapintf)
            sessions.update({svm_name: (session, pcap)})

        if self.inputs.pcap_on_vm:
            conn_list = []
            svm_list = si_fixture._svm_list
            vm_fix_pcap_pid_files = self.start_tcpdump(None, tap_intf='eth0', vm_fixtures=svm_list, pcap_on_vm=True)
            conn_list.append(vm_fix_pcap_pid_files)
            conn_list.append(sessions)
            return conn_list

        return sessions
Пример #42
0
 def start_tcpdump_on_intf(self, host, tapintf):
     session = ssh(host['host_ip'], host['username'], host['password'])
     cmd = 'sudo tcpdump -nni %s -c 1 proto 1 > /tmp/%s_out.log 2>&1' % (
         tapintf, tapintf)
     execute_cmd(session, cmd, self.logger)
Пример #43
0
 def start_tcpdump_on_intf(self, host, tapintf):
     session = ssh(host["host_ip"], host["username"], host["password"])
     cmd = "tcpdump -nni %s -c 10 > /tmp/%s_out.log" % (tapintf, tapintf)
     execute_cmd(session, cmd, self.logger)
Пример #44
0
    def verify_flow_thru_si(self, si_fix, src_vn=None, ecmp_hash=None, flow_count=3):
        self.logger.info(
            'Will start a tcpdump on the left-interfaces of the Service Instances to find out which flow is entering which Service Instance')
        flowcount = 0
        result = True
        flow_pattern = {}
        svms = self.get_svms_in_si(si_fix)
        svms = sorted(set(svms))
        svm_list = si_fix.svm_list
        svm_index = 0
        vm_fix_pcap_pid_files = []

        # Capturing packets based upon source port
        src_port = "8000"
        filters = '\'(src port %s)\'' % (src_port)
        if None in svms:
            svms.remove(None)
        for svm_fixture in svm_list:
            svm = svm_fixture.vm_obj
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host_name = self.connections.orch.get_host_of_vm(svm)
                host = self.inputs.host_data[host_name]
                if src_vn is not None:
                    #tapintf = svm_fixture.tap_intf[src_vn.vn_fq_name]['name']
                    tapintf = self.connections.orch.get_vm_tap_interface(\
                                      svm_fixture.tap_intf[src_vn.vn_fq_name])
                else:
                    tapintf = self.connections.orch.get_vm_tap_interface(svm_fixture.tap_intf[si_fix.left_vn_fq_name])
                    filters = ''
                if self.inputs.pcap_on_vm:
                    tcpdump_files = start_tcpdump_for_vm_intf(
                        None, [svm_list[svm_index]], None, filters=filters, pcap_on_vm=True, vm_intf='eth1', svm=True)
                    svm_index = svm_index + 1
                    vm_fix_pcap_pid_files.append(tcpdump_files)
                else:
                    session = ssh(
                        host['host_ip'], host['username'], host['password'])
                    cmd = 'sudo tcpdump -nni %s %s -c 20 > /tmp/%s_out.log' % (
                        tapintf, filters, tapintf)
                    execute_cmd(session, cmd, self.logger)
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        sleep(15)

        self.logger.info('%%%%% Will check the result of tcpdump %%%%%')
        svms = self.get_svms_in_si(si_fix)
        svms = sorted(set(svms))
        if None in svms:
            svms.remove(None)
        svm_index = 0
        for svm_fixture in svm_list:
            svm = svm_fixture.vm_obj
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host_name = self.connections.orch.get_host_of_vm(svm)
                host = self.inputs.host_data[host_name]
                if src_vn is not None:
                    tapintf = self.connections.orch.get_vm_tap_interface(\
                                      svm_fixture.tap_intf[src_vn.vn_fq_name])
                    #tapintf = svm_fixture.tap_intf[src_vn.vn_fq_name]['name']
                else:
                    direction = 'left'
                    tapintf = self.connections.orch.get_vm_tap_interface(svm_fixture.tap_intf[si_fix.left_vn_fq_name])
                if not self.inputs.pcap_on_vm:
                    session = ssh(
                        host['host_ip'], host['username'], host['password'])
                    output_cmd = 'cat /tmp/%s_out.log' % tapintf
                    out, err = execute_cmd_out(session, output_cmd, self.logger)
                else:
                    out, pkt_count = stop_tcpdump_for_vm_intf(
                        None, None, None, vm_fix_pcap_pid_files=vm_fix_pcap_pid_files[svm_index], svm=True)
                    svm_index = svm_index + 1
                    out = out[0]
                for i in range(0,flow_count):
                    dport = str(9000+i)
                    if dport in out:
                        flowcount = flowcount + 1
                        self.logger.info(
                            'Flow with dport %s seen flowing inside %s' % (dport,svm_name))
                        flow_pattern[dport] = svm_name
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        if flowcount > 0:
            self.logger.info(
                'Flows are distributed across the Service Instances as :')
            self.logger.info('%s' % flow_pattern)
        else:
            result = False

        if ecmp_hash and ecmp_hash != 'default':
            # count the number of hash fields set
            hash_var_count = sum(ecmp_hash.values())

            # Incase, only one hash field is set, all flows should go through
            # single service instance. One exception here is destination_port.
            # Destination port varies for traffic streams.So, for destination
            # port, traffic will get load balanced even if ecmp hash is
            # configured "destination_port" alone.
            if hash_var_count == 1 and (not 'destination_port' in ecmp_hash):
                flow_pattern_ref = flow_pattern['9000']
                if all(flow_pattern_ref  == item for item in flow_pattern.values()):
                    self.logger.info(
                        'Flows are flowing through Single Service Instance: %s, as per config hash: %s' % (flow_pattern_ref, ecmp_hash))
                    self.logger.info('%s' % flow_pattern)
                else:
                    result = False
                    self.logger.error(
                        'Flows are flowing through multiple Service Instances:%s, where as it should not as per config hash:%s' % (flow_pattern, ecmp_hash))
                    assert result, 'Config hash is not working for: %s' % ( ecmp_hash)
            # Incase, multiple ecmp hash fields are configured or default ecmp
            # hash is present or only 'destionation_port' is configured in
            # ecmp_hash
            else:
                flow_pattern_ref = flow_pattern['9000']
                if all(flow_pattern_ref  == item for item in flow_pattern.values()):
                    result = False
                    self.logger.error(
                        'Flows are flowing through Single Service Instance:%s, where as it should not as per config hash:%s' % (flow_pattern_ref, ecmp_hash))
                    #assert result, 'Config hash is not working fine.'
                else:
                    self.logger.info(
                        'Flows are flowing through multiple Service Instances:%s, as per config hash: %s' % (flow_pattern, ecmp_hash))
                    self.logger.info('%s' % flow_pattern)
        else:
            assert result, 'No Flow distribution seen'
Пример #45
0
    def verify_flow_thru_si(self, si_fix, src_vn=None):
        self.logger.info(
            'Will start a tcpdump on the left-interfaces of the Service Instances to find out which flow is entering which Service Instance'
        )
        flowcount = 0
        result = True
        flow_pattern = {}
        svms = self.get_svms_in_si(si_fix, self.inputs.project_name)
        svms = sorted(set(svms))
        if None in svms:
            svms.remove(None)
        for svm in svms:
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host = self.get_svm_compute(svm_name)
                if src_vn is not None:
                    tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn)
                else:
                    direction = 'left'
                    tapintf = self.get_bridge_svm_tapintf(svm_name, direction)
                session = ssh(host['host_ip'], host['username'],
                              host['password'])
                cmd = 'tcpdump -nni %s -c 10 > /tmp/%s_out.log' % (tapintf,
                                                                   tapintf)
                execute_cmd(session, cmd, self.logger)
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        sleep(15)

        self.logger.info('%%%%% Will check the result of tcpdump %%%%%')
        svms = self.get_svms_in_si(si_fix, self.inputs.project_name)
        svms = sorted(set(svms))
        if None in svms:
            svms.remove(None)
        for svm in svms:
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host = self.get_svm_compute(svm_name)
                if src_vn is not None:
                    tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn)
                else:
                    direction = 'left'
                    tapintf = self.get_bridge_svm_tapintf(svm_name, direction)
                session = ssh(host['host_ip'], host['username'],
                              host['password'])
                output_cmd = 'cat /tmp/%s_out.log' % tapintf
                out, err = execute_cmd_out(session, output_cmd, self.logger)
                if '9000' in out:
                    flowcount = flowcount + 1
                    self.logger.info(
                        'Flow with dport 9000 seen flowing inside %s' %
                        svm_name)
                    flow_pattern['9000'] = svm_name
                if '9001' in out:
                    flowcount = flowcount + 1
                    self.logger.info(
                        'Flow with dport 9001 seen flowing inside %s' %
                        svm_name)
                    flow_pattern['9001'] = svm_name
                if '9002' in out:
                    flowcount = flowcount + 1
                    self.logger.info(
                        'Flow with dport 9002 seen flowing inside %s' %
                        svm_name)
                    flow_pattern['9002'] = svm_name
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        if flowcount > 0:
            self.logger.info(
                'Flows are distributed across the Service Instances as :')
            self.logger.info('%s' % flow_pattern)
        else:
            result = False
        assert result, 'No Flow distribution seen'
Пример #46
0
    def verify_port_mirroring(self, src_vm, dst_vm, mirr_vm, vlan=None, parent=False, direction = 'both', no_header = False):
        result = True
        svm = mirr_vm.vm_obj
        if svm.status == 'ACTIVE':
            svm_name = svm.name
            host = self.get_svm_compute(svm_name)
            tapintf = self.get_svm_tapintf(svm_name)
        # Intf mirroring enabled on either sub intf or parent port
        exp_count = 10
        if direction != 'both':
            exp_count = 5
        if parent:
            # Intf mirroring enabled on both sub intf and parent port
            exp_count = 20
        if self.inputs.pcap_on_vm:
            filters = ''
            if not no_header:
                filters='udp port 8099'
            vm_fix_pcap_pid_files = start_tcpdump_for_vm_intf(
                None, [mirr_vm], None, filters=filters, pcap_on_vm=True)
        else:
            session = ssh(host['host_ip'], host['username'], host['password'])
            pcap = self.start_tcpdump(session, tapintf, vlan=vlan, no_header = no_header)
        src_ip = src_vm.vm_ip
        dst_ip = dst_vm.vm_ip
        if vlan:
            sub_intf = 'eth0.' + str(vlan)
            cmds = "/sbin/ifconfig " + sub_intf + " | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'"
            if self.inputs.get_af() == 'v6':
               cmds = "/sbin/ifconfig " + sub_intf + " | grep 'inet6 addr:'"
            src_ip_list = list(src_vm.run_cmd_on_vm(cmds=[cmds]).values())
            src_ip = src_ip_list[0]
            dst_ip_list = list(dst_vm.run_cmd_on_vm(cmds=[cmds]).values())
            dst_ip = dst_ip_list[0]
            if self.inputs.get_af() == 'v6':
                src_list = src_ip.split(' ')
                dst_list = dst_ip.split(' ')
                for line in src_list:
                    if line.find('/') != -1 and line.find('fe80') == -1:
                        src_ip = line
                        break
                src_ip = line.split('/')[0].strip()
                for line in dst_list:
                    if line.find('/') != -1 and line.find('fe80') == -1:
                        dst_ip = line
                        break
                dst_ip = line.split('/')[0].strip()
        assert src_vm.ping_with_certainty(dst_ip, count=5, size='1200')
        #lets wait 10 sec for tcpdump to capture all the packets
        sleep(10)
        self.logger.info('Ping from %s to %s executed with c=5, expected mirrored packets 5 Ingress,5 Egress count = 10'
            % (src_ip, dst_ip))
        filters = '| grep \"length [1-9][2-9][0-9][0-9][0-9]*\"'
        if self.inputs.pcap_on_vm:
            output, mirror_pkt_count = stop_tcpdump_for_vm_intf(
                None, None, None, vm_fix_pcap_pid_files=vm_fix_pcap_pid_files, filters=filters, verify_on_all=True)
            mirror_pkt_count = int(mirror_pkt_count[0])
        else:
            mirror_pkt_count = self.stop_tcpdump(session, pcap, filters)
        errmsg = "%s ICMP Packets mirrored to the analyzer VM %s,"\
                 "Expected %s packets" % (
                     mirror_pkt_count, svm_name, exp_count)
        if mirror_pkt_count < exp_count:
            self.logger.error(errmsg)
            assert False, errmsg

        self.logger.info("%s ICMP packets are mirrored to the analyzer "
                         "service VM '%s'", mirror_pkt_count, svm_name)
        return result
Пример #47
0
 def start_tcpdump_on_intf(self, host, tapintf):
     session = ssh(host['host_ip'], host['username'], host['password'])
     cmd = 'tcpdump -nni %s -c 10 > /tmp/%s_out.log' % (tapintf, tapintf)
     execute_cmd(session, cmd, self.logger)
Пример #48
0
 def verify_packets(self, packet_type, pcap_path_with_file_name,
                    expected_count =None, dot1p = None, dscp = None, 
                    mpls_exp = None):
     '''
     This function parses tcpdump file.
     It verifies that field in packet in pcap file are same as expected by user or not.
     "packet_type" is mandatory and can be set to any string containing "exp", "dot1p",
     "dscp" or any or all of them.
     Verification done for following values:
         1. DSCP field
         2. VLAN Priority code point
         3. MPLS EXP bits
     This function can also be used to parse any .pcap present on any node
     even if the start capture was not done by 'TestQosTraffic' object.
     '''
     if self.session == None:
         if not self.username and not self.node_ip and not self.password:
             self.logger.error("Either of IP, username or password not"
                               " specified")
             self.logger.error("Cannot open ssh session to the node")
             return False
         else:
             self.session = ssh(self.node_ip, self.username,\
                                    self.password)
         cmd = "ls -al %s" % pcap_path_with_file_name
         out, err = execute_cmd_out(self.session, cmd)
         if out:
             self.pcap = out.split('\n')[0].split()[-1]
         elif err:
             self.logger.error("%s" % err)
             return False
     if expected_count:
         result = verify_tcpdump_count(self, self.session, self.pcap,
                                       expected_count, exact_match=False)
         if not result:
             return result
     file_transfer = self.compute_node_fixture.file_transfer(
                         "get", self.pcap, self.pcap.split('/')[-1])
     if not file_transfer:
         self.logger.error("Unable to transfer file to local system")
         return False
     file_name = self.pcap.split('/')[-1]
     if self.encap_type:
         if not self.verify_encap_type(self.encap_type, file_name):
             return False
     f = open(file_name, 'r')
     pcap = dpkt.pcap.Reader(f)
     count = 0
     for ts,buff in pcap:
         ether = dpkt.ethernet.Ethernet(buff)
         self.logger.debug("Verifying for packet number %d" % count)
         count = count+1
         if "dot1p" in packet_type and\
         self._check_underlay_interface_is_tagged():
             if isinstance(dot1p,int):
                 string = ''
                 try:
                     priority = ether.vlan_tags[0].pri
                 except AttributeError, e:
                     self.logger.error(e)
                     return False
                 if priority == dot1p:
                     self.logger.debug("Validated dot1p marking of %s" 
                                       % (dot1p))
                 else:
                     self.logger.error("Mismatch between actual and"
                                       " expected PCP")
                     self.logger.error("Expected PCP : %s, Actual PCP :%s"\
                                       % (dot1p,priority))
                     return False
             else:
                 self.logger.error("dot1p to be compared not mentioned")
                 return False
         if "dscp" in packet_type:
             if isinstance(dscp,int):
                 ip = ether.data
                 try:
                     actual_dscp = int(bin(ip.tos >> 2), 2)
                 except AttributeError, e:
                     self.logger.error(e)
                     return False
                 if dscp == actual_dscp:
                     self.logger.debug("Validated DSCP marking of %s" % 
                                       (dscp))
                 else:
                     self.logger.error("Mismatch between actual and"
                                       " expected DSCP")
                     self.logger.error("Expected DSCP: %s, Actual DSCP:%s"\
                                       % (dscp,actual_dscp))
                     return False
             else:
                 self.logger.error("dscp to be compared not mentioned")
                 return False
Пример #49
0
    def tcpdump_analyze_on_compute(self,
                                   comp_ip,
                                   pcaptype,
                                   vxlan_id=None,
                                   vlan_id=None):
        sleep(2)
        sessions = {}
        compute_user = self.inputs.host_data[comp_ip]['username']
        compute_password = self.inputs.host_data[comp_ip]['password']
        session = ssh(comp_ip, compute_user, compute_password)
        self.logger.info("Analyzing on compute node %s" % comp_ip)
        if pcaptype == 'UDP':
            pcaps1 = '/tmp/encap-udp.pcap'
            pcaps2 = '/tmp/encap-gre.pcap'
            cmd2 = 'tcpdump  -r %s | grep UDP |wc -l' % pcaps1
            out2, err = execute_cmd_out(session, cmd2, self.logger)
            cmd3 = 'tcpdump  -r %s | grep GRE | wc -l' % pcaps2
            out3, err = execute_cmd_out(session, cmd3, self.logger)
            count2 = int(out2.strip('\n'))
            count3 = int(out3.strip('\n'))
            if count2 != 0 and count3 == 0:
                self.logger.info(
                    "%s UDP encapsulated packets are seen and %s GRE encapsulated packets are seen as expected"
                    % (count2, count3))
                return True
            else:
                errmsg = "%s UDP encapsulated packets are seen and %s GRE encapsulated packets are seen.Not expected" % (
                    count2, count3)
                self.logger.error(errmsg)
                assert False, errmsg
        elif pcaptype == 'GRE':
            pcaps1 = '/tmp/encap-udp.pcap'
            pcaps2 = '/tmp/encap-gre.pcap'
            cmd2 = 'tcpdump  -r %s | grep UDP |wc -l' % pcaps1
            out2, err = execute_cmd_out(session, cmd2, self.logger)
            cmd3 = 'tcpdump  -r %s | grep GRE | wc -l' % pcaps2
            out3, err = execute_cmd_out(session, cmd3, self.logger)
            count2 = int(out2.strip('\n'))
            count3 = int(out3.strip('\n'))
            if count2 == 0 and count3 != 0:
                self.logger.info(
                    "%s GRE encapsulated packets are seen and %s UDP encapsulated packets are seen as expected"
                    % (count3, count2))
                # self.tcpdump_stop_on_all_compute()
                self.tcpdump_stop_on_compute(comp_ip)
                return True
            else:
                errmsg = "%s UDP encapsulated packets are seen and %s GRE encapsulated packets are seen.Not expected" % (
                    count2, count3)
                self.logger.error(errmsg)
                # self.tcpdump_stop_on_all_compute()
                self.tcpdump_stop_on_compute(comp_ip)
                assert False, errmsg

        elif pcaptype == 'VXLAN':
            pcaps1 = '/tmp/encap-udp.pcap'
            pcaps2 = '/tmp/encap-gre.pcap'
            pcaps3 = '/tmp/encap-vxlan.pcap'
            cmd2 = 'tcpdump  -r %s | grep UDP |wc -l' % pcaps1
            out2, err = execute_cmd_out(session, cmd2, self.logger)
            cmd3 = 'tcpdump  -r %s | grep GRE | wc -l' % pcaps2
            out3, err = execute_cmd_out(session, cmd3, self.logger)
            count2 = int(out2.strip('\n'))
            count3 = int(out3.strip('\n'))

            cmd3 = 'tcpdump  -r %s | grep UDP |wc -l' % pcaps3
            out3, err = execute_cmd_out(session, cmd3, self.logger)
            count = int(out3.strip('\n'))

            if count2 == 0 and count3 == 0 and count != 0:
                self.logger.info(
                    "%s GRE encapsulated packets are seen and %s UDP encapsulated packets are seen and %s vxlan packets are seen  as expected"
                    % (count3, count2, count))
                # self.tcpdump_stop_on_all_compute()
                if vxlan_id is not None:
                    cmd4 = 'tcpdump -AX -r %s | grep ' % pcaps3 + \
                        vxlan_id + ' |wc -l'
                    out4, err = execute_cmd_out(session, cmd4, self.logger)
                    count_vxlan_id = int(out4.strip('\n'))

                    if count_vxlan_id < count:
                        errmsg = "%s vxlan packet are seen with %s vxlan_id . Not Expected . " % (
                            count, count_vxlan_id)
                        self.tcpdump_stop_on_compute(comp_ip)
                        self.logger.error(errmsg)
                        assert False, errmsg
                    else:
                        self.logger.info(
                            "%s vxlan packets are seen with %s vxlan_id as expexted . "
                            % (count, count_vxlan_id))
                        self.tcpdump_stop_on_compute(comp_ip)
            else:
                errmsg = "%s UDP encapsulated packets are seen and %s GRE encapsulated packets are seen.Not expected, %s vxlan packet seen" % (
                    count2, count3, count)
                self.logger.error(errmsg)
                # self.tcpdump_stop_on_all_compute()
                self.tcpdump_stop_on_compute(comp_ip)
                assert False, errmsg
            if vlan_id is not None:
                cmd5 = 'tcpdump -AX -r %s | grep %s |wc -l' % (pcaps3, vlan_id)
                out5, err = execute_cmd_out(session, cmd5, self.logger)
                count_vlan_id = int(out5.strip('\n'))

                if count_vlan_id < count:
                    errmsg = "%s vxlan packet are seen with %s vlan_id . Not Expected . " % (
                        count, count_vlan_id)
                    self.logger.error(errmsg)
                    assert False, errmsg
                else:
                    self.logger.info(
                        "%s vxlan packets are seen with %s vlan_id as expexted . "
                        % (count, count_vlan_id))
        return True
Пример #50
0
    def verify_flow_thru_si(self,
                            si_fix,
                            src_vn=None,
                            ecmp_hash=None,
                            flow_count=3):
        self.logger.info(
            'Will start a tcpdump on the left-interfaces of the Service Instances to find out which flow is entering which Service Instance'
        )
        flowcount = 0
        result = True
        flow_pattern = {}
        svms = self.get_svms_in_si(si_fix)
        svms = sorted(set(svms))
        if None in svms:
            svms.remove(None)
        for svm in svms:
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host = self.get_svm_compute(svm_name)
                if src_vn is not None:
                    tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn)
                else:
                    direction = 'left'
                    tapintf = self.get_bridge_svm_tapintf(svm_name, direction)
                session = ssh(host['host_ip'], host['username'],
                              host['password'])
                cmd = 'tcpdump -nni %s -c 10 > /tmp/%s_out.log' % (tapintf,
                                                                   tapintf)
                execute_cmd(session, cmd, self.logger)
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        sleep(15)

        self.logger.info('%%%%% Will check the result of tcpdump %%%%%')
        svms = self.get_svms_in_si(si_fix)
        svms = sorted(set(svms))
        if None in svms:
            svms.remove(None)
        for svm in svms:
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host = self.get_svm_compute(svm_name)
                if src_vn is not None:
                    tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn)
                else:
                    direction = 'left'
                    tapintf = self.get_bridge_svm_tapintf(svm_name, direction)
                session = ssh(host['host_ip'], host['username'],
                              host['password'])
                output_cmd = 'cat /tmp/%s_out.log' % tapintf
                out, err = execute_cmd_out(session, output_cmd, self.logger)

                for i in range(0, flow_count):
                    dport = str(9000 + i)
                    if dport in out:
                        flowcount = flowcount + 1
                        self.logger.info(
                            'Flow with dport %s seen flowing inside %s' %
                            (dport, svm_name))
                        flow_pattern[dport] = svm_name
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        if flowcount > 0:
            self.logger.info(
                'Flows are distributed across the Service Instances as :')
            self.logger.info('%s' % flow_pattern)
        else:
            result = False

        if ecmp_hash and ecmp_hash != 'default':
            # count the number of hash fields set
            hash_var_count = sum(ecmp_hash.values())

            # Incase, only one hash field is set, all flows should go through
            # single service instance. One exception here is destination_port.
            # Destination port varies for traffic streams.So, for destination
            # port, traffic will get load balanced even if ecmp hash is
            # configured "destination_port" alone.
            if hash_var_count == 1 and (not 'destination_port' in ecmp_hash):
                flow_pattern_ref = flow_pattern['9000']
                if all(flow_pattern_ref == item
                       for item in flow_pattern.values()):
                    self.logger.info(
                        'Flows are flowing through Single Service Instance: %s, as per config hash: %s'
                        % (flow_pattern_ref, ecmp_hash))
                    self.logger.info('%s' % flow_pattern)
                else:
                    result = False
                    self.logger.error(
                        'Flows are flowing through multiple Service Instances:%s, where as it should not as per config hash:%s'
                        % (flow_pattern, ecmp_hash))
                    assert result, 'Config hash is not working for: %s' % (
                        ecmp_hash)
            # Incase, multiple ecmp hash fields are configured or default ecmp
            # hash is present or only 'destionation_port' is configured in
            # ecmp_hash
            else:
                flow_pattern_ref = flow_pattern['9000']
                if all(flow_pattern_ref == item
                       for item in flow_pattern.values()):
                    result = False
                    self.logger.error(
                        'Flows are flowing through Single Service Instance:%s, where as it should not as per config hash:%s'
                        % (flow_pattern_ref, ecmp_hash))
                    #assert result, 'Config hash is not working fine.'
                else:
                    self.logger.info(
                        'Flows are flowing through multiple Service Instances:%s, as per config hash: %s'
                        % (flow_pattern, ecmp_hash))
                    self.logger.info('%s' % flow_pattern)
        else:
            assert result, 'No Flow distribution seen'
Пример #51
0
    def verify_flow_thru_si(self, si_fix, src_vn=None, ecmp_hash=None, flow_count=3):
        self.logger.info(
            'Will start a tcpdump on the left-interfaces of the Service Instances to find out which flow is entering which Service Instance')
        flowcount = 0
        result = True
        flow_pattern = {}
        svms = self.get_svms_in_si(si_fix)
        svms = sorted(set(svms))
        svm_list = si_fix.svm_list
        svm_index = 0
        vm_fix_pcap_pid_files = []

        # Capturing packets based upon source port
        src_port = "8000"
        filters = '\'(src port %s)\'' % (src_port)
        if None in svms:
            svms.remove(None)
        for svm_fixture in svm_list:
            svm = svm_fixture.vm_obj
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host_name = self.connections.orch.get_host_of_vm(svm)
                host = self.inputs.host_data[host_name]
                if src_vn is not None:
                    #tapintf = svm_fixture.tap_intf[src_vn.vn_fq_name]['name']
                    tapintf = self.connections.orch.get_vm_tap_interface(\
                                      svm_fixture.tap_intf[src_vn.vn_fq_name])
                else:
                    direction = 'left'
                    tapintf = self.get_bridge_svm_tapintf(svm_name, direction)
                if self.inputs.pcap_on_vm:
                    tcpdump_files = start_tcpdump_for_vm_intf(
                        None, [svm_list[svm_index]], None, filters=filters, pcap_on_vm=True, vm_intf='eth1', svm=True)
                    svm_index = svm_index + 1
                    vm_fix_pcap_pid_files.append(tcpdump_files)
                else:
                    session = ssh(
                        host['host_ip'], host['username'], host['password'])
                    cmd = 'sudo tcpdump -nni %s %s -c 20 > /tmp/%s_out.log' % (
                        tapintf, filters, tapintf)
                    execute_cmd(session, cmd, self.logger)
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        sleep(15)

        self.logger.info('%%%%% Will check the result of tcpdump %%%%%')
        svms = self.get_svms_in_si(si_fix)
        svms = sorted(set(svms))
        if None in svms:
            svms.remove(None)
        svm_index = 0
        for svm_fixture in svm_list:
            svm = svm_fixture.vm_obj
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host_name = self.connections.orch.get_host_of_vm(svm)
                host = self.inputs.host_data[host_name]
                if src_vn is not None:
                    tapintf = self.connections.orch.get_vm_tap_interface(\
                                      svm_fixture.tap_intf[src_vn.vn_fq_name])
                    #tapintf = svm_fixture.tap_intf[src_vn.vn_fq_name]['name']
                else:
                    direction = 'left'
                    tapintf = self.get_bridge_svm_tapintf(svm_name, direction)
                if not self.inputs.pcap_on_vm:
                    session = ssh(
                        host['host_ip'], host['username'], host['password'])
                    output_cmd = 'cat /tmp/%s_out.log' % tapintf
                    out, err = execute_cmd_out(session, output_cmd, self.logger)
                else:
                    out, pkt_count = stop_tcpdump_for_vm_intf(
                        None, None, None, vm_fix_pcap_pid_files=vm_fix_pcap_pid_files[svm_index], svm=True)
                    svm_index = svm_index + 1
                    out = out[0]
                for i in range(0,flow_count):
                    dport = str(9000+i)
                    if dport in out:
                        flowcount = flowcount + 1
                        self.logger.info(
                            'Flow with dport %s seen flowing inside %s' % (dport,svm_name))
                        flow_pattern[dport] = svm_name
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        if flowcount > 0:
            self.logger.info(
                'Flows are distributed across the Service Instances as :')
            self.logger.info('%s' % flow_pattern)
        else:
            result = False

        if ecmp_hash and ecmp_hash != 'default':
            # count the number of hash fields set
            hash_var_count = sum(ecmp_hash.values())

            # Incase, only one hash field is set, all flows should go through
            # single service instance. One exception here is destination_port.
            # Destination port varies for traffic streams.So, for destination
            # port, traffic will get load balanced even if ecmp hash is
            # configured "destination_port" alone.
            if hash_var_count == 1 and (not 'destination_port' in ecmp_hash):
                flow_pattern_ref = flow_pattern['9000']
                if all(flow_pattern_ref  == item for item in flow_pattern.values()):
                    self.logger.info(
                        'Flows are flowing through Single Service Instance: %s, as per config hash: %s' % (flow_pattern_ref, ecmp_hash))
                    self.logger.info('%s' % flow_pattern)
                else:
                    result = False
                    self.logger.error(
                        'Flows are flowing through multiple Service Instances:%s, where as it should not as per config hash:%s' % (flow_pattern, ecmp_hash))
                    assert result, 'Config hash is not working for: %s' % ( ecmp_hash)
            # Incase, multiple ecmp hash fields are configured or default ecmp
            # hash is present or only 'destionation_port' is configured in
            # ecmp_hash
            else:
                flow_pattern_ref = flow_pattern['9000']
                if all(flow_pattern_ref  == item for item in flow_pattern.values()):
                    result = False
                    self.logger.error(
                        'Flows are flowing through Single Service Instance:%s, where as it should not as per config hash:%s' % (flow_pattern_ref, ecmp_hash))
                    #assert result, 'Config hash is not working fine.'
                else:
                    self.logger.info(
                        'Flows are flowing through multiple Service Instances:%s, as per config hash: %s' % (flow_pattern, ecmp_hash))
                    self.logger.info('%s' % flow_pattern)
        else:
            assert result, 'No Flow distribution seen'
Пример #52
0
 def verify_packets(self,
                    packet_type,
                    pcap_path_with_file_name,
                    expected_count=None,
                    dot1p=None,
                    dscp=None,
                    mpls_exp=None):
     '''
     This function parses tcpdump file.
     It verifies that field in packet in pcap file are same as expected by user or not.
     "packet_type" is mandatory and can be set to any string containing "exp", "dot1p",
     "dscp" or any or all of them.
     Verification done for following values:
         1. DSCP field
         2. VLAN Priority code point
         3. MPLS EXP bits
     This function can also be used to parse any .pcap present on any node
     even if the start capture was not done by 'TestQosTraffic' object.
     '''
     if self.session == None:
         if not self.username and not self.node_ip and not self.password:
             self.logger.error("Either of IP, username or password not"
                               " specified")
             self.logger.error("Cannot open ssh session to the node")
             return False
         else:
             self.session = ssh(self.node_ip, self.username,\
                                    self.password)
         cmd = "ls -al %s" % pcap_path_with_file_name
         out, err = execute_cmd_out(self.session, cmd)
         if out:
             self.pcap = out.split('\n')[0].split()[-1]
         elif err:
             self.logger.error("%s" % err)
             return False
     if expected_count:
         result = verify_tcpdump_count(self,
                                       self.session,
                                       self.pcap,
                                       expected_count,
                                       exact_match=False)
         if not result:
             return result
     file_transfer = self.compute_node_fixture.file_transfer(
         "get", self.pcap,
         self.pcap.split('/')[-1])
     if not file_transfer:
         self.logger.error("Unable to transfer file to local system")
         return False
     file_name = self.pcap.split('/')[-1]
     if self.encap_type and self.encap_type != "MPLS_any":
         if not self.verify_encap_type(self.encap_type, file_name):
             return False
     f = open(file_name, 'r')
     pcap = dpkt.pcap.Reader(f)
     count = 0
     for ts, buff in pcap:
         ether = dpkt.ethernet.Ethernet(buff)
         self.logger.debug("Verifying for packet number %d" % count)
         count = count + 1
         if "dot1p" in packet_type and\
         self._check_underlay_interface_is_tagged():
             if isinstance(dot1p, int):
                 string = ''
                 try:
                     priority = ether.vlan_tags[0].pri
                 except AttributeError, e:
                     self.logger.error(e)
                     return False
                 if priority == dot1p:
                     self.logger.debug("Validated dot1p marking of %s" %
                                       (dot1p))
                 else:
                     self.logger.error("Mismatch between actual and"
                                       " expected PCP")
                     self.logger.error("Expected PCP : %s, Actual PCP :%s"\
                                       % (dot1p,priority))
                     return False
             else:
                 self.logger.error("dot1p to be compared not mentioned")
                 return False
         if "dscp" in packet_type:
             if isinstance(dscp, int):
                 ip = ether.data
                 try:
                     actual_dscp = int(bin(ip.tos >> 2), 2)
                 except AttributeError, e:
                     self.logger.error(e)
                     return False
                 if dscp == actual_dscp:
                     self.logger.debug("Validated DSCP marking of %s" %
                                       (dscp))
                 else:
                     self.logger.error("Mismatch between actual and"
                                       " expected DSCP")
                     self.logger.error("Expected DSCP: %s, Actual DSCP:%s"\
                                       % (dscp,actual_dscp))
                     return False
             else:
                 self.logger.error("dscp to be compared not mentioned")
                 return False
Пример #53
0
    def verify_flow_thru_si(self, si_fix, src_vn=None):
        self.logger.info(
            'Will start a tcpdump on the left-interfaces of the Service Instances to find out which flow is entering which Service Instance')
        flowcount = 0
        result = True
        flow_pattern = {}
        svms = self.get_svms_in_si(si_fix, self.inputs.project_name)
        svms = sorted(set(svms))
        if None in svms:
            svms.remove(None)
        for svm in svms:
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host = self.get_svm_compute(svm_name)
                if src_vn is not None:
                    tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn)
                else:
                    direction = 'left'
                    tapintf = self.get_bridge_svm_tapintf(svm_name, direction)
                session = ssh(
                    host['host_ip'], host['username'], host['password'])
                cmd = 'tcpdump -nni %s -c 10 > /tmp/%s_out.log' % (
                    tapintf, tapintf)
                execute_cmd(session, cmd, self.logger)
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        sleep(15)

        self.logger.info('%%%%% Will check the result of tcpdump %%%%%')
        svms = self.get_svms_in_si(si_fix, self.inputs.project_name)
        svms = sorted(set(svms))
        if None in svms:
            svms.remove(None)
        for svm in svms:
            self.logger.info('SVM %s is in %s state' % (svm.name, svm.status))
            if svm.status == 'ACTIVE':
                svm_name = svm.name
                host = self.get_svm_compute(svm_name)
                if src_vn is not None:
                    tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn)
                else:
                    direction = 'left'
                    tapintf = self.get_bridge_svm_tapintf(svm_name, direction)
                session = ssh(
                    host['host_ip'], host['username'], host['password'])
                output_cmd = 'cat /tmp/%s_out.log' % tapintf
                out, err = execute_cmd_out(session, output_cmd, self.logger)
                if '9000' in out:
                    flowcount = flowcount + 1
                    self.logger.info(
                        'Flow with dport 9000 seen flowing inside %s' % svm_name)
                    flow_pattern['9000'] = svm_name
                if '9001' in out:
                    flowcount = flowcount + 1
                    self.logger.info(
                        'Flow with dport 9001 seen flowing inside %s' % svm_name)
                    flow_pattern['9001'] = svm_name
                if '9002' in out:
                    flowcount = flowcount + 1
                    self.logger.info(
                        'Flow with dport 9002 seen flowing inside %s' % svm_name)
                    flow_pattern['9002'] = svm_name
            else:
                self.logger.info('%s is not in ACTIVE state' % svm.name)
        if flowcount > 0:
            self.logger.info(
                'Flows are distributed across the Service Instances as :')
            self.logger.info('%s' % flow_pattern)
        else:
            result = False
        assert result, 'No Flow distribution seen'
Пример #54
0
    def tcpdump_analyze_on_compute(
            self,
            comp_ip,
            pcaptype,
            vxlan_id=None,
            vlan_id=None):
        sleep(2)
        sessions = {}
        compute_user = self.inputs.host_data[comp_ip]['username']
        compute_password = self.inputs.host_data[comp_ip]['password']
        session = ssh(comp_ip, compute_user, compute_password)
        self.logger.info("Analyzing on compute node %s" % comp_ip)
        if pcaptype == 'UDP':
            pcaps1 = '/tmp/encap-udp.pcap'
            pcaps2 = '/tmp/encap-gre.pcap'
            cmd2 = 'tcpdump  -r %s | grep UDP |wc -l' % pcaps1
            out2, err = execute_cmd_out(session, cmd2, self.logger)
            cmd3 = 'tcpdump  -r %s | grep GRE | wc -l' % pcaps2
            out3, err = execute_cmd_out(session, cmd3, self.logger)
            count2 = int(out2.strip('\n'))
            count3 = int(out3.strip('\n'))
            if count2 != 0 and count3 == 0:
                self.logger.info(
                    "%s UDP encapsulated packets are seen and %s GRE encapsulated packets are seen as expected" %
                    (count2, count3))
                return True
            else:
                errmsg = "%s UDP encapsulated packets are seen and %s GRE encapsulated packets are seen.Not expected" % (
                    count2, count3)
                self.logger.error(errmsg)
                assert False, errmsg
        elif pcaptype == 'GRE':
            pcaps1 = '/tmp/encap-udp.pcap'
            pcaps2 = '/tmp/encap-gre.pcap'
            cmd2 = 'tcpdump  -r %s | grep UDP |wc -l' % pcaps1
            out2, err = execute_cmd_out(session, cmd2, self.logger)
            cmd3 = 'tcpdump  -r %s | grep GRE | wc -l' % pcaps2
            out3, err = execute_cmd_out(session, cmd3, self.logger)
            count2 = int(out2.strip('\n'))
            count3 = int(out3.strip('\n'))
            if count2 == 0 and count3 != 0:
                self.logger.info(
                    "%s GRE encapsulated packets are seen and %s UDP encapsulated packets are seen as expected" %
                    (count3, count2))
                # self.tcpdump_stop_on_all_compute()
                self.tcpdump_stop_on_compute(comp_ip)
                return True
            else:
                errmsg = "%s UDP encapsulated packets are seen and %s GRE encapsulated packets are seen.Not expected" % (
                    count2, count3)
                self.logger.error(errmsg)
                # self.tcpdump_stop_on_all_compute()
                self.tcpdump_stop_on_compute(comp_ip)
                assert False, errmsg

        elif pcaptype == 'VXLAN':
            pcaps1 = '/tmp/encap-udp.pcap'
            pcaps2 = '/tmp/encap-gre.pcap'
            pcaps3 = '/tmp/encap-vxlan.pcap'
            cmd2 = 'tcpdump  -r %s | grep UDP |wc -l' % pcaps1
            out2, err = execute_cmd_out(session, cmd2, self.logger)
            cmd3 = 'tcpdump  -r %s | grep GRE | wc -l' % pcaps2
            out3, err = execute_cmd_out(session, cmd3, self.logger)
            count2 = int(out2.strip('\n'))
            count3 = int(out3.strip('\n'))

            cmd3 = 'tcpdump  -r %s | grep UDP |wc -l' % pcaps3
            out3, err = execute_cmd_out(session, cmd3, self.logger)
            count = int(out3.strip('\n'))

            if count2 == 0 and count3 == 0 and count != 0:
                self.logger.info(
                    "%s GRE encapsulated packets are seen and %s UDP encapsulated packets are seen and %s vxlan packets are seen  as expected" %
                    (count3, count2, count))
                # self.tcpdump_stop_on_all_compute()
                if vxlan_id is not None:
                    cmd4 = 'tcpdump -AX -r %s | grep ' % pcaps3 + \
                        vxlan_id + ' |wc -l'
                    out4, err = execute_cmd_out(session, cmd4, self.logger)
                    count_vxlan_id = int(out4.strip('\n'))

                    if count_vxlan_id < count:
                        errmsg = "%s vxlan packet are seen with %s vxlan_id . Not Expected . " % (
                            count, count_vxlan_id)
                        self.tcpdump_stop_on_compute(comp_ip)
                        self.logger.error(errmsg)
                        assert False, errmsg
                    else:
                        self.logger.info(
                            "%s vxlan packets are seen with %s vxlan_id as expexted . " %
                            (count, count_vxlan_id))
                        self.tcpdump_stop_on_compute(comp_ip)
            else:
                errmsg = "%s UDP encapsulated packets are seen and %s GRE encapsulated packets are seen.Not expected, %s vxlan packet seen" % (
                    count2, count3, count)
                self.logger.error(errmsg)
                # self.tcpdump_stop_on_all_compute()
                self.tcpdump_stop_on_compute(comp_ip)
                assert False, errmsg
            if vlan_id is not None:
                cmd5 = 'tcpdump -AX -r %s | grep %s |wc -l' % (pcaps3, vlan_id)
                out5, err = execute_cmd_out(session, cmd5, self.logger)
                count_vlan_id = int(out5.strip('\n'))

                if count_vlan_id < count:
                    errmsg = "%s vxlan packet are seen with %s vlan_id . Not Expected . " % (
                        count, count_vlan_id)
                    self.logger.error(errmsg)
                    assert False, errmsg
                else:
                    self.logger.info(
                        "%s vxlan packets are seen with %s vlan_id as expexted . " %
                        (count, count_vlan_id))
        return True