Пример #1
0
    def sles_pnfs_test(self, sles, sles_mounts, sles_client, sles_paths, require_inband):
       fnames = []
       for (sles_client, path) in zip(sles, sles_paths):
             print "sles_mounts.path  is :", path
             fnames.append(join(path, 'sles-'+sles_client.address))

       fnames = dispatcher_tuple(fnames)
       pcaps = PE(sles)
       fds = sles.open_file(fnames, os.O_CREAT | os.O_RDWR | os.O_SYNC | os.O_DIRECT, 0644)
       sles.write_file(fds, 0, 32, 5)
       sles.read_file(fds, 0, 32, 5)
       sles.close_file(fds)
       time.sleep(1) # DELME
       results = pcaps.get_nfs_ops(False)
       calls_only = pcaps.get_nfs_ops_by_and_filter(results, is_call=True)
       writes_only = pcaps.get_nfs_ops_by_and_filter(results, op_type=PE_NfsOpType.WRITE, is_call=True)
       read_only = pcaps.get_nfs_ops_by_and_filter(results, op_type=PE_NfsOpType.READ, is_call=True)
       assert len(writes_only) > 0, "No writes detected during the recorded period"
       assert len(read_only) > 0, "No read detected during the recorded period"
       for (sles_client, single_client_nfs_ops) in zip(sles, calls_only):
            self._logger.debug("RHEL CLIENT {}".format(sles_client.address))
            for nfs_op in single_client_nfs_ops:
                if nfs_op.op_type == PE_NfsOpType.WRITE or nfs_op.op_type == PE_NfsOpType.READ:
                    self._logger.info('  NFS OP %s at %s' % (nfs_op.op_type.name, nfs_op.time_relative))
                    self._logger.info('  OP DETAILS:\n%s' % pprint.pformat(nfs_op.__dict__))
                    if require_inband:
                        assert nfs_op.ip_dst == ctx.cluster.address, "I/O that doesn't go to the MDS found"
                    else:
                        assert nfs_op.ip_dst != ctx.cluster.address, "I/O that goes to the MDS found"
Пример #2
0
    def test_group_test6(self, conf_3u_2g):

# pcap from dd - chechk that possible write-read to same directory,
#write from first user, and close, read-only from secound user and deleg...., RDWR from third user with difference permitions.
        directory = 'user_dir'
        print 'mounts', self.mounts
        for mount in self.mounts:
            print 'path from mount', mount.path
        path = "{}/{}".format(self.mounts[0].path,directory)
        print "print", path
        users = []
        users = self.udb.get_users(self.groups[0])
        ctx.clients[0].execute(['mkdir', '-p',path], user=users[0])
        ctx.clients[0].chmod(path, 0777)
        fname = path + '/user_file'
        res = ctx.clients[0].execute(['id'], user=users[0])
        self._logger.info(res.stdout)
        self._logger.debug("Starting PCAPs")
        pcaps=PE(ctx.cluster.get_active_dd(),tmp_dir='/opt')
        fd0 = ctx.clients[0].open_file(fname, os.O_CREAT | os.O_RDWR , 0777, users[0])
 # write in the loop
        ctx.clients[0].write_file(fd0, 32, 64, 4, users[0])
        ctx.clients[0].read_file(fd0, 0, 32, 4, users[0])
        ctx.clients[0].close_file(fd0, users[0])
        ctx.clients[0].close_agent()

#  secound user from secound group write to each directory
        path2 = "{}/{}".format(self.mounts[1].path,directory)
        print "path2", path2
        users2 = []
        users2 = self.udb.get_users(self.groups[1])
        ctx.clients[1].execute(['chmod', '-R', '777', path2])
        fname2 = '{}/{}'.format(path2, 'user_file')
        print "file_name", fname2
        fd1 = ctx.clients[1].open_file(fname2, os.O_CREAT | os.O_RDONLY | os.O_ASYNC, 0777, users2[0])
        fv = self._read_loop(ctx.clients[1], fd1, 0, 32, 4, users2[0], block=False)

#        3- user from secound group write to each directory
        path3 = "{}/{}".format(self.mounts[2].path,directory)
        print "path3", path3
        print "user2[1] = ", users2[1]
        ctx.clients[2].execute(['chmod', '-R', '777', path3])
        fname3 = '{}/{}'.format(path3, 'user_file')
        print "file_name", fname3
        fd2 = ctx.clients[2].open_file(fname3, os.O_CREAT | os.O_RDWR | os.O_ASYNC , 0777, users2[1])
        ctx.clients[2].write_file(fd2, 32, 64, 4, users2[1])

        time.sleep(1)
        fv.kill()
        fv.get()

        ctx.clients[2].close_file(fd2, users2[1])
        ctx.clients[1].close_file(fd1, users2[0])
        self._logger.info("AFTER CLOSE")

        results = pcaps.get_nfs_ops(False)
        ctx.clients[2].close_agent()
        ctx.clients[1].close_agent()
Пример #3
0
    def test_group_test4(self, conf_3u_2g):

# pcap from dd - chechk that possible write-read to same directory from difference users with difference permitions.
        directory = 'user_dir'
        print 'mounts', self.mounts
        for mount in self.mounts:
            print 'path from mount', mount.path
        path = "{}/{}".format(self.mounts[0].path,directory)
        print "print", path
        users = []
        users = self.udb.get_users(self.groups[0])
        ctx.clients[0].execute(['mkdir', '-p',path], user=users[0])
        ctx.clients[0].chmod(path, 0777)
        fname = path + '/user_file'
        res = ctx.clients[0].execute(['id'], user=users[0])
        self._logger.info(res.stdout)
        self._logger.debug("Starting PCAPs")
        pcaps=PE(ctx.cluster.get_active_dd())
        fd0 = ctx.clients[0].open_file(fname, os.O_CREAT | os.O_RDWR | os.O_SYNC | os.O_DIRECT, 0777, users[0])
 # write in thr loop
        fv = self._write_loop(ctx.clients[0], fd0, 0, 32, 4, users[0], block=False)

        ctx.clients[0].read_file(fd0, 0, 32, 4, users[0])

#  secound user from secound group write to each directory
        path2 = "{}/{}".format(self.mounts[1].path,directory)
        print "path2", path2
        users2 = []
        users2 = self.udb.get_users(self.groups[1])
        #ctx.clients[1].execute(['cd',path2], user=users[1])
        ctx.clients[1].execute(['chmod', '-R', '777', path2])
        fname2 = '{}/{}'.format(path2, 'user_file')
        print "file_name", fname2
 #       ctx.clients[1].chmod(path, 0777)
        fd1 = ctx.clients[1].open_file(fname2, os.O_CREAT | os.O_RDWR | os.O_SYNC | os.O_DIRECT, 0777, users2[1])
        ctx.clients[1].write_file(fd1, 32, 64, 4, users2[1])

        time.sleep(1)
        fv.kill()
        fv.get()

        ctx.clients[0].close_file(fd0, users[0])
        ctx.clients[1].close_file(fd1, users2[1])
        self._logger.info("AFTER CLOSE")

        results = pcaps.get_nfs_ops(False)
        ctx.clients[0].close_agent()
        ctx.clients[1].close_agent()
Пример #4
0
 def test_group_test3(self, conf_3u_2g):
     directory = 'create_user1'
     path = "{0}{1}{2}".format(self.mounts[0].path,"/",directory)
     users = []
     users = self.udb.get_users(self.groups[0])
     ctx.clients[0].execute(['mkdir', '-p',path], user=users[0])
     ctx.clients[0].chmod(path, 0777)
     fname1 = path + '/create_user1'
     self._logger.debug("Starting PCAPs")
     pcaps=PE(ctx.cluster.get_active_dd())
     fd0 = ctx.clients[0].open_file(fname1, os.O_CREAT | os.O_RDWR | os.O_SYNC | os.O_DIRECT, 0777, users[0])
     directory = 'create_user2'
     path = "{0}{1}{2}".format(self.mounts[1].path,"/",directory)
     users = self.udb.get_users(self.groups[1])
     ctx.clients[1].execute(['mkdir', '-p',path], user=users[0])
     ctx.clients[1].chmod(path, 0777)
     fname2 = path + '/create_user2'
     fd1 = ctx.clients[1].open_file(fname2, os.O_CREAT | os.O_RDWR | os.O_SYNC | os.O_DIRECT, 0655, users[1])
     results = pcaps.get_nfs_ops(False)
     ctx.clients[0].close_agent()
     ctx.clients[1].close_agent()
Пример #5
0
    def test_rpm_uninstall_install(self, get_pd_share):
   # test that all clients still pnfs- mode
       mnt_tmpl = Mount(get_pd_share, NfsVersion.pnfs4_2) # can change path=''
       mounts = ctx.clients.nfs.mount(mnt_tmpl)

       fedora = []
       sles = []
       rhel = []
       fedora_mounts = []
       sles_mounts = []
       rhel_mounts = []
       rhel_paths = []
       sles_paths = []
       for client, mount in zip(ctx.clients, mounts):
            if client.os.type is OsType.Fedora:
                fedora.append(client)
                fedora_mounts.append(mount)
            if client.os.type is OsType.Sles:
                sles.append(client)
                sles_mounts.append(mount)
                sles_paths.append(mount.path)

            if client.os.type is OsType.Rhel:
                rhel.append(client)
                rhel_mounts.append(mount)
                rhel_paths.append(mount.path)

       fedora = dispatcher_tuple(fedora)

       rhel = dispatcher_tuple(rhel)
       rhel_mounts = dispatcher_tuple(rhel_mounts)
       rhel_rpm = rhel.execute(['rpm', '-qa', '|', 'grep', 'pd-nfs']).stdout

       rhel.nfs.umount(rhel_mounts)
       rhel.execute(['rpm', '-e', '%s' % (rhel_rpm)])
       print "rhel-rpm  is :", rhel_rpm

       rhel.close_agent()
       self._logger.info("Rebooting RHEL clients")
       rhel.reboot()
       self._logger.info("Waiting for RHEL clients to start")
       time.sleep(5)
       rhel.wait_for(attempt=10, interval=10)
       time.sleep(60)

       fnames = []
       rhel.nfs.mount(rhel_mounts)
       for (rhel_client, path) in zip(rhel, rhel_paths):
             print "rhel_mounts.path  is :", path
             fnames.append(join(path, 'rhel-'+rhel_client.address))

       time.sleep(2)
       fnames = dispatcher_tuple(fnames)
       pcaps = PE(rhel)

       fds = rhel.open_file(fnames, os.O_CREAT | os.O_RDWR | os.O_SYNC | os.O_DIRECT, 0644)
       rhel.write_file(fds, 0, 32, 5)
       rhel.read_file(fds, 0, 32, 5)
       rhel.close_file(fds)
       time.sleep(1) # DELME

       results = pcaps.get_nfs_ops(False)
       calls_only = pcaps.get_nfs_ops_by_and_filter(results, is_call=True)
       writes_only = pcaps.get_nfs_ops_by_and_filter(results, op_type=PE_NfsOpType.WRITE, is_call=True)
       read_only = pcaps.get_nfs_ops_by_and_filter(results, op_type=PE_NfsOpType.READ, is_call=True)
       assert len(writes_only) > 0, "No writes detected during the recorded period"
       assert len(read_only) > 0, "No read detected during the recorded period"

       for (rhel_client, single_client_nfs_ops) in zip(rhel, calls_only):
         self._logger.debug("RHEL CLIENT {}".format(rhel_client.address))
         for nfs_op in single_client_nfs_ops:
                 if nfs_op.op_type == PE_NfsOpType.WRITE or nfs_op.op_type == PE_NfsOpType.READ:
                     self._logger.info('  NFS OP %s at %s' % (nfs_op.op_type.name, nfs_op.time_relative))
                     self._logger.info('  OP DETAILS:\n%s' % pprint.pformat(nfs_op.__dict__))
                     assert nfs_op.ip_dst == ctx.cluster.address, "I/O that doesn't go to the MDS found"
       print "rhel is ok"

 # sles tests
       sles = dispatcher_tuple(sles)
       sles_mounts = dispatcher_tuple(sles_mounts)
       sles_rpm = sles.execute(['rpm', '-qa', '|', 'grep', 'pd-nfs']).stdout
       sles.nfs.umount(sles_mounts)
       sles.execute(['rpm', '-e', '%s' % (sles_rpm)])
       print "sles-rpm  is :", sles_rpm
       sles.close_agent()
       self._logger.info("Rebooting RHEL clients")
       sles.reboot()
       sles.wait_for(attempt=10, interval=10)
       time.sleep(60)
#       sles.execute(['rmmod', 'nfs_layout_flexfiles',])
       fnames = []
       sles.nfs.mount(sles_mounts)
       for (sles_client, path) in zip(sles, sles_paths):
             print "sles_mounts.path  is :", path
             fnames.append(join(path, 'sles-'+sles_client.address))

       time.sleep(2)
       fnames = dispatcher_tuple(fnames)
       pcaps = PE(sles)

       fds = sles.open_file(fnames, os.O_CREAT | os.O_RDWR | os.O_SYNC | os.O_DIRECT, 0644)
       sles.write_file(fds, 0, 32, 5)
       sles.read_file(fds, 0, 32, 5)
       sles.close_file(fds)
       time.sleep(1) # DELME

       results = pcaps.get_nfs_ops(False)
       calls_only = pcaps.get_nfs_ops_by_and_filter(results, is_call=True)
       writes_only = pcaps.get_nfs_ops_by_and_filter(results, op_type=PE_NfsOpType.WRITE, is_call=True)
       read_only = pcaps.get_nfs_ops_by_and_filter(results, op_type=PE_NfsOpType.READ, is_call=True)
       assert len(writes_only) > 0, "No writes detected during the recorded period"
       assert len(read_only) > 0, "No read detected during the recorded period"

       for (sles_client, single_client_nfs_ops) in zip(sles, calls_only):
            self._logger.debug("RHEL CLIENT {}".format(sles_client.address))
            for nfs_op in single_client_nfs_ops:
                if nfs_op.op_type == PE_NfsOpType.WRITE or nfs_op.op_type == PE_NfsOpType.READ:
                    self._logger.info('  NFS OP %s at %s' % (nfs_op.op_type.name, nfs_op.time_relative))
                    self._logger.info('  OP DETAILS:\n%s' % pprint.pformat(nfs_op.__dict__))
                    assert nfs_op.ip_dst == ctx.cluster.address, "I/O that doesn't go to the MDS found"

       print "sles is ok"

       c=fedora.execute(['uname', '-r']).stdout
       print c

 # rpm_install_back(self, get_pd_share):
       rhel.execute(['rpm', '-ivh', '/opt/' + rhel_rpm[0].rstrip() + '.rpm'])
        # Add reboot
       rhel.close_agent()
       self._logger.info("Rebooting RHEL clients")
       rhel.reboot()
       self._logger.info("Waiting for RHEL clients to start")
       time.sleep(5)
       rhel.wait_for(attempt=10, interval=10)
       time.sleep(60)
       self._logger.info("Mounting RHEL clients")
       rhel.nfs.mount(rhel_mounts)

       self.rhel_pnfs_test(rhel, rhel_mounts, rhel_client, rhel_paths, require_inband=False)
       print "END rhel_pnfs_test"

  # rpm_install_back(self, get_pd_share):
       print "Sles begining"
       sles.execute(['rpm', '-ivh', '/opt/' + sles_rpm[0].rstrip() + '.x86_64.rpm'])
       print "Sles"
        # Add reboot
       sles.close_agent()
       self._logger.info("Rebooting Sles clients")
       sles.reboot()
       self._logger.info("Waiting for Sles clients to start")
       sles.wait_for(attempt=10, interval=10)
       time.sleep(60)
       self._logger.info("Mounting Sles clients")
       sles.nfs.mount(sles_mounts)
       print "START sles_pnfs_test"
       self.sles_pnfs_test(sles, sles_mounts, sles_client, sles_paths, require_inband=False)
       print "END sles_pnfs_test"
    def test_layout_pipeline_basic_readwrite(self, setup_env):
        """
        Client C1 opens file F1
        C1 writes to F1
        Trigger mobility
        C1 Reads from F1
        C1 writes to F1
        Test also does some validations with the captured packets to make sure there was a LAYOUT_RECALL, LAYOUTGET
        and READ. The relative_time from the traces is tracked to ensure the LAYOUTGET is sent 'immediately' after a
        RECALL, and that READ happens 'immediately' after a LAYOUTGET
        (verify that there was no time lag in traffic resumption)
        """
        threshold = 2.00
        pcaps = PE(self.clientA)
        self._logger.info("Creating file {}".format(self.fnameA))
        self.clientA.write_to_file(self.fnameA,
                                   '/dev/urandom',
                                   bs='3G',
                                   count=1,
                                   timeout=600)
        file_details = self.clientA.file_declare(self.fnameA, self.mount.share)
        inode_id = file_details.inode
        share_id = self.mounts[0].share.internal_id
        inode_info_before = ctx.cluster.execute_tool(PrTestCaps().get_inode(
            share_id, inode_id))
        ctx.cluster.execute_tool(PrTestCaps().recall_layout(
            share_id, inode_id,
            inode_info_before.last_result['layout_rw']['rw_iomode'],
            inode_info_before.last_result['layout_rw']['rw_gen']))
        self.clientA.open_file(
            self.fnameA, fileflags.O_CREAT | fileflags.O_RDWR
            | fileflags.O_SYNC | fileflags.O_DIRECT, 0644)

        self.clientA.write_to_file(self.fnameA,
                                   '/dev/urandom',
                                   bs='2G',
                                   count=1,
                                   timeout=600)

        self._logger.info(
            'Triggering Mobility in the background and sleep for 1 sec')
        mobility_ctx = self.trigger_mobility(self.clientA, self.mount,
                                             self.fnameA)

        self.clientA.read_from_file(self.fnameA,
                                    bs='4k',
                                    count=10,
                                    timeout=600)
        self.clientA.write_to_file(self.fnameA,
                                   '/dev/urandom',
                                   bs='1G',
                                   count=1,
                                   timeout=600)
        self._logger.info('Checking to see if mobility is complete')
        self.wait_for_mobility_to_finish(self.clientA, self.mount, self.fnameA,
                                         mobility_ctx['target_volume'])

        nfs_ops = pcaps.get_nfs_ops()
        conditions = [{
            'op_type': PE_NfsOpType.LAYOUTGET,
            'is_call': True,
            'ip_src': self.clientA.address,
            'ip_dst': ctx.cluster.address
        }]
        pcaps.verify_basic_conditions(nfs_ops, conditions, client_id=0)
        loget_packets_call = pcaps.get_nfs_ops_by_and_filter(
            nfs_ops,
            op_type=PE_NfsOpType.LAYOUTGET,
            ip=self.clientA.address,
            ip_origin='src',
            is_call=True)
        # Expect the first LAYOUT get to have RW iomode, note down the SEQ ID
        loget_call_seqid_one = loget_packets_call[0][1].layout_seqid
        loget_call_stateid_one = loget_packets_call[0][1].layout_stateid
        loget_call_layout_iomode_one = loget_packets_call[0][1].layout_iomode

        loget_call_seqid_two = loget_packets_call[0][1].layout_seqid
        loget_call_stateid_two = loget_packets_call[0][2].layout_stateid
        loget_call_reqtime_two = loget_packets_call[0][2].time_relative

        loget_packets_resp = pcaps.get_nfs_ops_by_and_filter(
            nfs_ops,
            op_type=PE_NfsOpType.LAYOUTGET,
            ip=self.clientA.address,
            ip_origin='dst',
            is_call=False)

        loget_resp_mirrorfh_one = loget_packets_resp[0][2].layout_mirror_fh
        loget_resp_stateid_one = loget_packets_resp[0][1].layout_stateid
        loget_resp_mirrorfh_two = loget_packets_resp[0][3].layout_mirror_fh

        assert loget_call_layout_iomode_one == 1, "Expected layout iomode: 1, found {}".\
            format(loget_call_layout_iomode_one)

        packet_index = self._search_for_seqid(loget_packets_call,
                                              int(loget_call_seqid_one, 16))
        assert packet_index is not None, 'Could not find a LAYOUGET response from the server'

        read_req_packet = pcaps.get_nfs_ops_by_and_filter(
            nfs_ops,
            op_type=PE_NfsOpType.READ,
            ip=self.clientA.address,
            ip_origin='src',
            is_call=True)
        # Note down the read request time
        read_req_time = read_req_packet[0][0].time_relative
        read_req_dst_check_packet = pcaps.get_nfs_ops_by_and_filter(
            read_req_packet,
            op_type=PE_NfsOpType.READ,
            ip=ctx.data_stores.address[0],
            ip_origin='dst',
            is_call=True)
        expected_packet_attributes = {
            'filehandle': loget_resp_mirrorfh_one,
            'nfsvers': 3,
            'op_type': PE_NfsOpType.READ
        }
        self._check_packets_for_values(read_req_dst_check_packet[0][0],
                                       expected_packet_attributes)

        # Look for CB_LAYOUTRECALL request and response
        conditions = [{
            'op_type': PE_NfsOpType.CB_LAYOUTRECALL,
            'ip_src': ctx.cluster.address,
            'ip_dst': self.clientA.address,
            'is_call': True
        }]

        layoutrecall_packet = pcaps.get_nfs_ops_by_and_filter(
            nfs_ops,
            op_type=PE_NfsOpType.CB_LAYOUTRECALL,
            ip=ctx.cluster.address,
            ip_origin='src',
            is_call=True)
        # Note down the RECALL time
        layoutrecall_time = layoutrecall_packet[0][1].time_relative
        pcaps.verify_basic_conditions(nfs_ops, conditions, client_id=0)

        expected_packet_attributes = {
            'layout_iomode': 2,
            'layout_stateid': loget_resp_stateid_one
        }
        self._check_packets_for_values(loget_packets_call[0][2],
                                       expected_packet_attributes)
        expected_packet_attributes = {
            'layout_seqid': int(loget_call_seqid_one, 16) + 1
        }
        self._check_packets_for_values(loget_packets_resp[0][2],
                                       expected_packet_attributes)
        # Note down the LAYOUT REQUEST time
        # Validate that there is a LAYOUTGET resp from the DD to client

        # If the layout stateid between the call and response is different, the layout seq id on the response should
        # not bump by 1.
        loget_call_seqid = int(loget_call_seqid_two, 16)
        if loget_call_stateid_one == loget_call_stateid_two:
            packet_index = self._search_for_seqid(loget_packets_resp,
                                                  loget_call_seqid)
        elif loget_call_stateid_one != loget_call_stateid_two:
            packet_index = self._search_for_seqid(loget_packets_resp,
                                                  loget_call_seqid + 1)
        assert packet_index is not None, 'Could not find a LAYOUGET response from the server'

        assert loget_resp_mirrorfh_one != loget_resp_mirrorfh_two, \
            "We have seen a successful LAYOUTRECALL but the 2nd LAYOUTGET seems to have gotten the same DS fh!"

        # Capture the WRITE request
        write_req_packet = pcaps.get_nfs_ops_by_and_filter(
            nfs_ops,
            op_type=PE_NfsOpType.WRITE,
            ip=self.clientA.address,
            ip_origin='src',
            is_call=True)
        # Verify that the WRITE request is going to the DS
        write_req_dst_check_packet = pcaps.get_nfs_ops_by_and_filter(
            write_req_packet,
            op_type=PE_NfsOpType.WRITE,
            ip=ctx.data_stores.address[0],
            ip_origin='dst',
            is_call=True)
        # Verify that the WRITE req has the DS fh, the nfs version is v3
        expected_packet_attributes = {
            'filehandle': loget_packets_resp[0][1].layout_mirror_fh,
            'nfsvers': 3,
            'op_type': PE_NfsOpType.WRITE
        }
        self._check_packets_for_values(write_req_dst_check_packet[0][0],
                                       expected_packet_attributes)

        # Validate the response/request times
        self._logger.info(
            'Layout was recalled at: {}'.format(layoutrecall_time))
        self._logger.info(
            'Layout was requested at: {}'.format(loget_call_reqtime_two))
        self._logger.info('READ request was at {}'.format(read_req_time))
        if float(loget_call_reqtime_two) - float(
                layoutrecall_time) > threshold:
            assert float(loget_call_reqtime_two)-float(layoutrecall_time) < threshold, \
             'The Layout was recalled at: {} and a new Layout was requested at: {}'.format(layoutrecall_time,
                                                                                           loget_call_reqtime_two)
        assert float(read_req_time)-float(loget_call_reqtime_two) < threshold, \
            'Possible delay in I/O resuming. Read request was sent by the client at: {}'.format(read_req_time)
        self.clientA.close_agent()