Пример #1
0
    def test_umount_client_in_the_w_r_time(self, get_pd_share):
        mnt_tmpl = Mount(get_pd_share, NfsVersion.pnfs4_2) # can change path=''
        mounts = ctx.clients.nfs.mount(mnt_tmpl)
        fnames = []

        ctx.clients[0].mkdirs(mounts[0].path + '/test_umount')
        for (client, path) in zip(ctx.clients, mounts.path):
            fname = join(path + '/test_umount', "fileA-" + client.address)
            fnames.append(fname)
        fnames = dispatcher_tuple(fnames)

        fds = ctx.clients.open_file(fnames, os.O_CREAT | os.O_RDWR | os.O_SYNC | os.O_DIRECT, 0644)
        fvs = ctx.clients.write_file(fds, 0, 32, 2, block=False)
        time.sleep(1) # Let the writes start

        for client, mount in zip(ctx.clients, mounts):
            try:
                self._logger.debug("client: {0} mount: {1}".format(client, mount))
                res = client.nfs.umount(mount)
                raise Exception("umount succeeded while it was expected to fail")
            except CliOperationFailed as x:
                #print x
                assert x.rc == 16, "Unexpected umount behavior"
#                 if x.rc == 16: # Device busy
#                     pass
#                 else:
#                     raise Fatal

        for fv in fvs:
            fv.get()

        ctx.clients.close_file(fds)
        ctx.clients.close_agent()
Пример #2
0
    def test_owner_create_w_r_others_w_r_as_root(self, get_pd_share):

        fname = 'owner'
        num_of_files = 35
        i = 0
        mnt_tmpl = Mount(get_pd_share, NfsVersion.pnfs4_2) # can change path=''
        mounts = ctx.clients.nfs.mount(mnt_tmpl)

# user write.. others read (userrs)

        udb = UserGroupGen(ctx.clients)
        udb.generate(3,2)
        groups = udb.get_groups()
        users = udb.get_users(groups[0])
        directory = 'owner_1'
        path = "{0}{1}{2}".format(mounts[0].path,"/",directory)

        ctx.clients[0].execute(['mkdir', '-p',path], user=users[0])
        ctx.clients[0].chmod(path, 0666)
        fname = path + '/basic_3'
        fd0 = ctx.clients[0].open_file(fname, os.O_CREAT | os.O_RDWR | os.O_SYNC | os.O_DIRECT, 0777, users[0])
        self._logger.info("BEFORE OPEN")
        fd = clients.open_file(fname, os.O_CREAT | os.O_RDWR | os.O_SYNC | os.O_DIRECT, 0755)
        self._logger.info("BEFORE WRITE")
        clients.write_file(fd, 0, 32, 2)
        self._logger.info("BEFORE CLOSE")
        clients.read_file(fd, 32, 32, 2)
        clients.close_file(fd)
        self._logger.info("AFTER CLOSE")
Пример #3
0
    def test_pnfs_verify(self, get_pd_share):
        mnt_tmpl = Mount(get_pd_share, NfsVersion.pnfs) # can change path=''
        mounts = ctx.clients.nfs.mount(mnt_tmpl)
        for (client, path) in zip(ctx.clients, mounts.path):
            fname = "verify-pnfs-" + client.address
            #client.execute(["echo", "blah", ">", join(path, fname)])

            nfsstat_file = join("/tmp", "nfsstat.out")
            client.execute(["nfsstat", "-Z", "1", "-l", ">", nfsstat_file, "&", "KILLPID=$!", "&&", "echo", "blah", ">", join(path, fname),\
                         "&&", "sync", "&&", "kill", "-2", "$KILLPID" ])
            res = client.execute(['cat', nfsstat_file])

            nfsinfo = dict()
            for line in res.stdout.splitlines():
                if not line.startswith('nfs'):
                    continue
                _, version, _, opt, count = line.split()

                try:
                    nfsinfo[version].update([(opt.strip()[:-1], int(count.strip()))])
                except KeyError:
                    nfsinfo[version] = {opt.strip()[:-1]: int(count.strip())}

            try:
                if nfsinfo['v3']['write'] < 1:
                    raise KeyError
            except KeyError:
                raise Exception('Test Failed - inband IO {}'.format(nfsinfo))
Пример #4
0
    def test_write_from_all_clients_to_same_file(self, get_pd_share):
        num_of_files = 1000
        mnt_tmpl = Mount(get_pd_share, NfsVersion.pnfs4_2) # can change path=''
        mounts = ctx.clients.nfs.mount(mnt_tmpl)

        ctx.clients.mkdirs(mounts.path + '/test_smoke')
        for i in xrange(num_of_files):
          ctx.clients.execute(["nfsstat", "-Z", "1", "-l", ">", nfsstat_file, "&", "KILLPID=$!", "&&",'dd', 'if=/dev/zero', 'of=%s/test_smoke/smoke_%s' % (mounts[0].path, i),
                                  'bs=24k', 'count=1', 'conv=fsync', 'conv=notrunc',"&&", "sync", "&&", "kill", "-2", "$KILLPID"])
        res = client.execute(['cat', nfsstat_file])
        nfsinfo = dict()
        for line in res.stdout.splitlines():
                if not line.startswith('nfs'):
                    continue
                _, version, _, opt, count = line.split()

        try:
            nfsinfo[version].update([(opt.strip()[:-1], int(count.strip()))])
        except KeyError:
            nfsinfo[version] = {opt.strip()[:-1]: int(count.strip())}

        try:
            if nfsinfo['v3']['write'] < 1:
                    raise KeyError
        except KeyError:
                raise Exception('Test Failed - inband IO {}'.format(nfsinfo))
 def verify_failed_share_creation(self, path, check_mount=True):
     before_shares = dict(ctx.cluster.shares)
     res = ctx.cluster.execute([
         "find",
         "%s" % pdfs, "-maxdepth", "1", "-type", "d", "-name", "0x\*"
     ])
     before_counter = len(res.stdout.split('\n'))
     yield
     after_shares = dict(ctx.cluster.shares)
     # Verify that the share was not created at the SM level
     assert len(set(after_shares.values()) - set(before_shares.values())
                ) == 0, "share should not have been created"
     #res = ctx.cluster.execute('find %s -maxdepth 1 -type d -name 0x\*' % pdfs)
     res = ctx.cluster.execute([
         "find",
         "%s" % pdfs, "-maxdepth", "1", "-type", "d", "-name", "0x\*"
     ])
     after_counter = len(res.stdout.split('\n'))
     # Verify that the share was not created at the PDFS level
     assert after_counter - before_counter == 0, "irrelevant KVS directory was found"
     share = ShareView(path=path)
     mnt_template = Mount(share, NfsVersion.nfs4_1)
     # Verify that the share was not created at the PROTOD level
     if check_mount:
         try:
             mount = ctx.clients[0].nfs.mount(mnt_template)
         except:
             return
         ctx.clients[0].nfs.umount(mount)
         raise AssertionError("mount should not have succeeded")
Пример #6
0
    def test_stat_dir(self, get_pd_share):
        share = get_pd_share
        mnt_tmpl = Mount(share, NfsVersion.pnfs)
        mounts = ctx.clients.nfs.mount(mnt_tmpl)
        dirnames = []
        for (client, path) in zip(ctx.clients, mounts.path):
            dirname = os.path.join(path, "dirA-" + client.address)
            dirnames.append(dirname)

        dirnames = dispatcher_tuple(dirnames)
        self._logger.info('Creating directory')
        res = ctx.clients.makedir(dirnames, 0755)

        self._logger.info('Listing directory')
        res = ctx.clients.stat(dirnames)
        for expected_dir in dirnames:
            dir_found = False
            for actual_dir in res:
                actual_dir_basename = actual_dir['Name']
                expected_dir_basename = os.path.basename(expected_dir)
                if actual_dir_basename == expected_dir_basename:
                    self._logger.debug("Dir {0} found in actual results")
                    dir_found = True
                    break
            assert dir_found, "Directory {0} not found".format(expected_dir)
            assert actual_dir['IsDir'], "Entry {0} is not a directory".format(
                expected_dir)

        self._logger.info('Removing directory')
        res = ctx.clients.rmdir(dirnames)

        self._logger.debug('Closing agent')
        ctx.clients.close_agent()
Пример #7
0
 def call_mount(self, share_name, mount_point):
     ctx.clients[0].mkdirs(mount_point)
     share = ctx.cluster.shares[share_name]
     mnt_objs = Mount(share, NfsVersion.pnfs,
                      path=str(mount_point))
     mnt_cl = ctx.clients[0].nfs.mount(mnt_objs)
     return mnt_cl
Пример #8
0
 def call_mount(self, share_name, mount_point):
     if not ctx.clients[0].exists(mount_point):
         ctx.clients[0].mkdirs(mount_point)
     share = ctx.cluster.shares[share_name]
     mnt = Mount(share, NfsVersion.pnfs, path=str(mount_point))
     mnt_cl = ctx.clients[0].nfs.mount(mnt)
     self._logger.info('mnt_cl is: %s' % mnt_cl)
     return mnt_cl
Пример #9
0
 def conf_3u_2g(self, get_pd_share):
     self.mnt_tmpl = Mount(get_pd_share, NfsVersion.pnfs4_2) # can change path=''
     print 'mnt_tmpl', self.mnt_tmpl
     self.mounts = ctx.clients.nfs.mount(self.mnt_tmpl)
     self.udb = UserGroupGen(ctx.clients)
     self.udb.generate(3,2)
     self.groups = self.udb.get_groups()
     yield
     self.udb.clean()
Пример #10
0
 def conf_2u_1g(self, get_pd_share):
     mnt_tmpl = Mount(get_pd_share, NfsVersion.pnfs4_2) # can change path=''
     mounts = ctx.clients.nfs.mount(mnt_tmpl)
     udb = UserGroupGen(ctx.clients)
     udb.generate(2,1)
     groups = udb.get_groups()
     users = udb.get_users(groups[0])
     yield
     self.udb.clean()
Пример #11
0
    def test_write_from_one_list_dir_from_others(self, get_pd_share):
        num_of_files = 1000
        mnt_tmpl = Mount(get_pd_share, NfsVersion.pnfs4_2) # can change path=''
        mounts = ctx.clients.nfs.mount(mnt_tmpl)

        ctx.clients[0].mkdirs(mounts[0].path + '/test_02')
        for i in xrange(num_of_files):
            ctx.clients[0].execute(['dd', 'if=/dev/zero', 'of=%s/test_02/file_%s' % (mounts[0].path, i),
                                    'bs=24k', 'count=1', 'conv=fsync', 'conv=notrunc'])
        for (client, mount) in zip(ctx.clients, mounts):
            ret = client.listdir(mount.path + '/test_02')
            assert len(ret) != num_of_files, 'client {0} incomplete files under dir test_02'.format(client.address)
Пример #12
0
    def test_create_many_files_from_all_clients_and_list(self, get_pd_share):
        num_of_files = 25000
        mnt_tmpl = Mount(get_pd_share, NfsVersion.pnfs4_2) # can change path=''
        mounts = ctx.clients.nfs.mount(mnt_tmpl)

        ctx.clients[0].mkdirs(mounts[0].path + '/test_03')
        for i in xrange(num_of_files):
            for (client, mount) in zip(ctx.clients, mounts):
                client.execute(['dd', 'if=/dev/zero', 'of=%s/test_03/file_%s_%s' % (mount.path, client.address, i),
                                     'bs=24k', 'count=5', 'conv=fsync', 'conv=notrunc'])
        for (client, mount) in zip(ctx.clients, mounts):
            ret = client.listdir(mount.path + '/test_03')
            print " # of actual files =", len(ret)
            print " # of expected files =", num_of_files * len(ctx.clients)
            assert len(ret) == num_of_files * len(ctx.clients), 'client {0} incomplete files under dir test_02'.format(client.address)
Пример #13
0
 def setup(self):
     num_of_shares = max(len(ctx.clients), SHARES)
     for _ in xrange(num_of_shares):
         ctx.cluster.cli.share_create_by_obj(RandDdShare())
     mnt_templates = []
     clients = []
     for idx, share in enumerate(ctx.cluster.shares.values()):
         mnt_templates.append(Mount(share, NfsVersion.nfs4_1))
         clients.append(ctx.clients[idx % len(ctx.clients)])
     mnt_templates = dispatcher_tuple(mnt_templates)
     clients = dispatcher_tuple(clients)
     mounts = clients.nfs.mount(mnt_templates)
     yield mounts, clients
     clients[0].clean_dir(mounts[0].path, timeout=1200)
     clients.nfs.umount(mounts)
     clients.remove(mounts.path)
Пример #14
0
    def test_create_user_basic(self, get_pd_share):

        mnt_tmpl = Mount(get_pd_share, NfsVersion.pnfs4_2) # can change path=''
        mounts = ctx.clients.nfs.mount(mnt_tmpl)
        udb = UserGroupGen(ctx.clients)
        udb.generate(3,2)
        groups = udb.get_groups()
        users = udb.get_users(groups[0])
        directory = 'create_user1'
        path = "{0}{1}{2}".format(mounts[0].path,"/",directory)

        ctx.clients[0].execute(['mkdir', '-p',path], user=users[0])
        ctx.clients[0].chmod(path, 0777)
        fname = path + '/create_user1'
        fd0 = ctx.clients[0].open_file(fname, os.O_CREAT | os.O_RDWR | os.O_SYNC | os.O_DIRECT, 0777, users[0])
        udb.clean()
        ctx.clients[0].close_agent()
Пример #15
0
    def test_create_remove_dir(self, get_pd_share):
        share = get_pd_share
        mnt_tmpl = Mount(share, NfsVersion.pnfs)
        mounts = ctx.clients.nfs.mount(mnt_tmpl)
        dirnames = []
        for (client, path) in zip(ctx.clients, mounts.path):
            dirname = os.path.join(path, "dirA-" + client.address)
            dirnames.append(dirname)

        dirnames = dispatcher_tuple(dirnames)
        self._logger.debug('Creating directory')
        res = ctx.clients.makedir(dirnames, 0755)

        self._logger.debug('Removing directory')
        res = ctx.clients.rmdir(dirnames)

        self._logger.debug('Closing agent')
        ctx.clients.close_agent()
Пример #16
0
    def test_create_open_write_from_first_user_read_from_secound_root(self, get_pd_share):

        mnt_tmpl = Mount(get_pd_share, NfsVersion.pnfs4_2) # can change path=''
        mounts = ctx.clients.nfs.mount(mnt_tmpl)

# Create users on the clients
        udb = UserGroupGen(ctx.clients)
        udb.generate(3,2)
        groups = udb.get_groups()
        users = udb.get_users(groups[0])
        directory = 'group_1'
        path = "{0}{1}{2}".format(mounts[0].path,"/",directory)

        print "kyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy"

        ctx.clients[0].execute(['mkdir', '-p',path], user=users[0])
        ctx.clients[0].chmod(path, 0666)
        fname = path + '/write_from_first_user_read_from_secound'
        res = ctx.clients[0].execute(['id'], user=users[0])
        self._logger.info(res.stdout)
        fd0 = ctx.clients[0].open_file(fname, os.O_CREAT | os.O_RDWR | os.O_SYNC | os.O_DIRECT, 0777, users[0])
        dir = '/group_1'
        ctx.clients[0].mkdirs(dir)
        ctx.clients[0].execute(['chown', '-h', '-R', '--from=' + users[0],':',groups[0], format(my_user), directory])
        fname0 = self.get_test_file(mounts[0], dir, fname)
        fname1 = self.get_test_file(mounts[1], dir, fname)

        self._logger.info("BEFORE OPEN")

        fd0 = ctx.clients[0].open_file(fname0, os.O_CREAT | os.O_RDWR | os.O_SYNC | os.O_DIRECT, 0644, 'client0')
        time.sleep(2)
        fd1 = ctx.clients[1].open_file(fname1, os.O_RDWR | os.O_SYNC | os.O_DIRECT, 0644)
        #fds = dispatcher_tuple((fd0,) + fds)
        self._logger.info("BEFORE WRITE")
        ctx.clients[0].write_file(fd0, 0, 32, 2)
#         self._logger.info("BEFORE CLOSE")
        ctx.clients[1].read_file(fd1, 0, 32, 2)
        ctx.clients[0].close_file(fd0)
        ctx.clients[1].close_file(fd1)
        self._logger.info("AFTER CLOSE")
        ctx.clients.close_agent()
Пример #17
0
    def test_wrie_from_owner_cyclically_write_from_others(self, get_pd_share):
        # chechk only that system not crashed...
        # add chechk for list , more than 3 clients.
        #assert len(ctx.client) >= 3 , "There not enough clients for the test.".len(ctx.client)

        mnt_tmpl = Mount(get_pd_share, NfsVersion.pnfs4_2) # can change path=''
        mounts = ctx.clients.nfs.mount(mnt_tmpl)

        assert len(ctx.clients) >= 3 , "There not enough clients for the test. Must be more them 3 clients.  Existing only - "+str(len(ctx.clients))

        ctx.clients[0].mkdirs(mounts[0].path + '/test_04')
        fv = ctx.clients[0].execute(['dd', 'if=/dev/zero', 'of=%s/test_04/file' % (mounts[0].path),
                                     'bs=1M', 'count=1000', 'conv=fsync', 'conv=notrunc'], block=False)
        time.sleep(1)
          # Add loop for all clients
        for i in range(1,len(ctx.clients)-1):
            ctx.clients[i].execute(['dd', 'of=/dev/null', 'if=%s/test_04/file' % (mounts[i].path),
                                'bs=1M', 'count=1000', 'iflag=direct'])
            ctx.clients[i+1].execute(['dd', 'if=/dev/zero', 'of=%s/test_04/file' % (mounts[i+1].path),
                                     'bs=4k', 'count=1', 'conv=fsync', 'conv=notrunc'])
        fv.get()
Пример #18
0
    def test_user_write_to_specific_group(self, get_pd_share):
        num_of_files = 10000

        mnt_tmpl = Mount(get_pd_share, NfsVersion.pnfs4_2) # can change path=''
        mounts = ctx.clients.nfs.mount(mnt_tmpl)
        udb = UserGroupGen(ctx.clients)
        udb.generate(3,2)
        groups = udb.get_groups()
        users = udb.get_users(groups[0])
        directory = 'user_write_to_specific_group'
        path = "{0}{1}{2}".format(mounts[0].path,"/",directory)
        ctx.clients[0].execute(['mkdir', '-p',path], user=users[0])
        ctx.clients[0].chmod(path, 0666)
        fname = path + '/specific_group'
        fd0 = ctx.clients[0].open_file(fname, os.O_CREAT | os.O_RDWR | os.O_SYNC | os.O_DIRECT, 0777, users[0])
        udb.clean()
        ctx.clients[0].close_agent()
        ctx.clients[0].execute(['chown', '{0}:smoke'.format(users[0]), dir])
        ctx.clients[0].execute(['dd', 'if=/dev/zero', 'of=%s/test_02/TTTTTT_%s' % (mounts.path, 1),
                                    'bs=24k', 'count=1', 'conv=fsync', 'conv=notrunc'], user=my_user)
        ctx.clients[0].execute([ 'dd', 'of=/dev/null', 'if=%s/test_02/TTTTTT_%s' % (mounts.path, 1),
                                  'bs=14k', 'count=3', 'iflag=direct'],user=my_user)
Пример #19
0
    def test_pNFS_behaviour_one_client(self, get_pd_share):
        mnt_tmpl = Mount(get_pd_share, NfsVersion.pnfs4_2) # can change path=''
        mount = ctx.clients[0].nfs.mount(mnt_tmpl)
        fname = ctx.clients[0].address
        nfsstat_file = join("/tmp", "nfsstat.out")
        path = mount.path + '/test_pNFS_behaviour_01'
        ctx.clients[0].mkdirs(path)

        ctx.clients[0].execute(["nfsstat", "-Z", "1", "-l", ">", nfsstat_file, "&", "KILLPID=$!", "&&", "echo", "verify-pnfs-one", ">", join(path, fname),\
                         "&&", "sync", "&&", "kill", "-2", "$KILLPID" ])
        res = ctx.clients[0].execute(['cat', nfsstat_file])

        nfsinfo = dict()
        for line in res.stdout.splitlines():
            if not line.startswith('nfs'):
                continue
            _, version, _, opt, count = line.split()
            try:
                    nfsinfo[version].update([(opt.strip()[:-1], int(count.strip()))])
            except KeyError:
                    nfsinfo[version] = {opt.strip()[:-1]: int(count.strip())}

        assert nfsinfo['v3']['write'] > 0, 'Test Failed - inband IO {}'.format(nfsinfo)
 def verify_share_creation(self):
     before_shares = dict(ctx.cluster.shares)
     res = ctx.cluster.execute([
         "find",
         "%s" % pdfs, "-maxdepth", "1", "-type", "d", "-name", "0x\*"
     ])
     before_counter = len(res.stdout.split('\n'))
     yield
     after_shares = dict(ctx.cluster.shares)
     added = set(after_shares.values()) - set(before_shares.values())
     # Verify that the share was created at the SM level
     assert len(
         added) == 1, "share was not created or something weird happened"
     res = ctx.cluster.execute([
         "find",
         "%s" % pdfs, "-maxdepth", "1", "-type", "d", "-name", "0x\*"
     ])
     after_counter = len(res.stdout.split('\n'))
     # Verify that the share was created at the PDFS level
     assert after_counter - before_counter == 1, "KVS was not created or something weird happened"
     # Verify that the share was created at the PROTOD level
     mnt_template = Mount(list(added)[0], NfsVersion.nfs4_1)
     mount = ctx.clients[0].nfs.mount(mnt_template)
     ctx.clients[0].nfs.umount(mount)
Пример #21
0
    def test_poc_script_phase_3(self, get_pd_share):

        hypervisor = ctx.hypervisor
        clients = [
            hypervisor.get_first_vm(c.name)
            for c in hypervisor._get_vms_in_folder(self._CLIENTS_DIR)
        ]
        if not ctx.clients and not clients:
            with self.step('Create vm on VVOL'):
                client, address = self._clone_vm_on_vvol(hypervisor)
                ctx.clients += Client(
                    address=address,
                    username='******',
                    password='******',
                    hw_mgmt=(hypervisor._address, hypervisor._username,
                             hypervisor._password, client.vm.name))
        else:
            for c in clients:
                ctx.clients += Client(address=c.wait_for_ip(),
                                      username='******',
                                      password='******',
                                      hw_mgmt=(hypervisor._address,
                                               hypervisor._username,
                                               hypervisor._password, c.name))

        with self.step('share-objective-remove'):
            obj = self._POC_SMART_SHARE
            if obj:
                ctx.cluster.cli.share_objective_remove(name=obj.name, path='/')
            self._POC_SMART_SHARE = None

        with self.step('Deploy tools on clients'):
            ctx.clients.deploy()

        with self.step('limit datastore bandwidth'):
            # get non vm ds caps bw (pdfs cli on vmdk file)
            vmdk_file = self._get_largest_vmdk(get_pd_share,
                                               ctx.clients[0].hw._vm_name)
            vmdk_inode = ctx.cluster.file_inode(vmdk_file)
            fi = ctx.cluster.get_file_info(get_pd_share, vmdk_inode)
            vm_ds = fi.instances[0].data_store
            vm_ds_ip = vm_ds.node.mgmt_ip_address.address
            ds = choice(
                [ds for ds in ctx.cluster.data_stores.values() if ds != vm_ds])
            ds_write_bw = ds.storage_capabilities.performance.write_bandwidth
            # set TC on vm ds to half
            with ctx.get_ds('address', vm_ds_ip).tc.limit('pddata',
                                                          bw=ds_write_bw / 2):
                with self.step('share-objective-add'):
                    poc_share = get_pd_share
                    poc_smart = obj or ctx.cluster.smart_objectives.\
                        get('poc_smart_obj')
                    obj = ctx.cluster.cli.\
                        share_objective_add(name=poc_share.name,
                                            objective=poc_smart.name,
                                            path='/')
                    self._POC_SMART_SHARE = obj

                with self.step('run IO on VM'):
                    dir_name = str(RandPath())
                    ctx.clients.mkdirs(dir_name)
                    dirs = dispatcher_tuple([
                        Mount(poc_share, NfsVersion.no_nfs, path=dir_name)
                        for _ in ctx.clients
                    ])
                    fio = FioCaps().basic(teardown=False)
                    ctx.clients.execute_tool(fio, dirs)

            with self.step('Wait for move'):
                # wait for move, timeout - 300 seconds (pdfs cli on vmdk file)
                t2 = t1 = time()
                while t2 - t1 < self._COPY_TIMEOUT:
                    new_fi = ctx.cluster.get_file_info(get_pd_share,
                                                       vmdk_inode)
                    if {vm_ds} != set(
                        [inst.data_store for inst in new_fi.instances]):
                        break
                    sleep(1)
                    t2 = time()
                assert t2 - t1 < self._COPY_TIMEOUT, \
                    'the move process took more than {} seconds' \
                    ''.format(self._COPY_TIMEOUT)
Пример #22
0
    def test_rpm_uninstall_install(self, get_pd_share):
   # test that all clients still pnfs- mode
       mnt_tmpl = Mount(get_pd_share, NfsVersion.pnfs4_2) # can change path=''
       mounts = ctx.clients.nfs.mount(mnt_tmpl)

       fedora = []
       sles = []
       rhel = []
       fedora_mounts = []
       sles_mounts = []
       rhel_mounts = []
       rhel_paths = []
       sles_paths = []
       for client, mount in zip(ctx.clients, mounts):
            if client.os.type is OsType.Fedora:
                fedora.append(client)
                fedora_mounts.append(mount)
            if client.os.type is OsType.Sles:
                sles.append(client)
                sles_mounts.append(mount)
                sles_paths.append(mount.path)

            if client.os.type is OsType.Rhel:
                rhel.append(client)
                rhel_mounts.append(mount)
                rhel_paths.append(mount.path)

       fedora = dispatcher_tuple(fedora)

       rhel = dispatcher_tuple(rhel)
       rhel_mounts = dispatcher_tuple(rhel_mounts)
       rhel_rpm = rhel.execute(['rpm', '-qa', '|', 'grep', 'pd-nfs']).stdout

       rhel.nfs.umount(rhel_mounts)
       rhel.execute(['rpm', '-e', '%s' % (rhel_rpm)])
       print "rhel-rpm  is :", rhel_rpm

       rhel.close_agent()
       self._logger.info("Rebooting RHEL clients")
       rhel.reboot()
       self._logger.info("Waiting for RHEL clients to start")
       time.sleep(5)
       rhel.wait_for(attempt=10, interval=10)
       time.sleep(60)

       fnames = []
       rhel.nfs.mount(rhel_mounts)
       for (rhel_client, path) in zip(rhel, rhel_paths):
             print "rhel_mounts.path  is :", path
             fnames.append(join(path, 'rhel-'+rhel_client.address))

       time.sleep(2)
       fnames = dispatcher_tuple(fnames)
       pcaps = PE(rhel)

       fds = rhel.open_file(fnames, os.O_CREAT | os.O_RDWR | os.O_SYNC | os.O_DIRECT, 0644)
       rhel.write_file(fds, 0, 32, 5)
       rhel.read_file(fds, 0, 32, 5)
       rhel.close_file(fds)
       time.sleep(1) # DELME

       results = pcaps.get_nfs_ops(False)
       calls_only = pcaps.get_nfs_ops_by_and_filter(results, is_call=True)
       writes_only = pcaps.get_nfs_ops_by_and_filter(results, op_type=PE_NfsOpType.WRITE, is_call=True)
       read_only = pcaps.get_nfs_ops_by_and_filter(results, op_type=PE_NfsOpType.READ, is_call=True)
       assert len(writes_only) > 0, "No writes detected during the recorded period"
       assert len(read_only) > 0, "No read detected during the recorded period"

       for (rhel_client, single_client_nfs_ops) in zip(rhel, calls_only):
         self._logger.debug("RHEL CLIENT {}".format(rhel_client.address))
         for nfs_op in single_client_nfs_ops:
                 if nfs_op.op_type == PE_NfsOpType.WRITE or nfs_op.op_type == PE_NfsOpType.READ:
                     self._logger.info('  NFS OP %s at %s' % (nfs_op.op_type.name, nfs_op.time_relative))
                     self._logger.info('  OP DETAILS:\n%s' % pprint.pformat(nfs_op.__dict__))
                     assert nfs_op.ip_dst == ctx.cluster.address, "I/O that doesn't go to the MDS found"
       print "rhel is ok"

 # sles tests
       sles = dispatcher_tuple(sles)
       sles_mounts = dispatcher_tuple(sles_mounts)
       sles_rpm = sles.execute(['rpm', '-qa', '|', 'grep', 'pd-nfs']).stdout
       sles.nfs.umount(sles_mounts)
       sles.execute(['rpm', '-e', '%s' % (sles_rpm)])
       print "sles-rpm  is :", sles_rpm
       sles.close_agent()
       self._logger.info("Rebooting RHEL clients")
       sles.reboot()
       sles.wait_for(attempt=10, interval=10)
       time.sleep(60)
#       sles.execute(['rmmod', 'nfs_layout_flexfiles',])
       fnames = []
       sles.nfs.mount(sles_mounts)
       for (sles_client, path) in zip(sles, sles_paths):
             print "sles_mounts.path  is :", path
             fnames.append(join(path, 'sles-'+sles_client.address))

       time.sleep(2)
       fnames = dispatcher_tuple(fnames)
       pcaps = PE(sles)

       fds = sles.open_file(fnames, os.O_CREAT | os.O_RDWR | os.O_SYNC | os.O_DIRECT, 0644)
       sles.write_file(fds, 0, 32, 5)
       sles.read_file(fds, 0, 32, 5)
       sles.close_file(fds)
       time.sleep(1) # DELME

       results = pcaps.get_nfs_ops(False)
       calls_only = pcaps.get_nfs_ops_by_and_filter(results, is_call=True)
       writes_only = pcaps.get_nfs_ops_by_and_filter(results, op_type=PE_NfsOpType.WRITE, is_call=True)
       read_only = pcaps.get_nfs_ops_by_and_filter(results, op_type=PE_NfsOpType.READ, is_call=True)
       assert len(writes_only) > 0, "No writes detected during the recorded period"
       assert len(read_only) > 0, "No read detected during the recorded period"

       for (sles_client, single_client_nfs_ops) in zip(sles, calls_only):
            self._logger.debug("RHEL CLIENT {}".format(sles_client.address))
            for nfs_op in single_client_nfs_ops:
                if nfs_op.op_type == PE_NfsOpType.WRITE or nfs_op.op_type == PE_NfsOpType.READ:
                    self._logger.info('  NFS OP %s at %s' % (nfs_op.op_type.name, nfs_op.time_relative))
                    self._logger.info('  OP DETAILS:\n%s' % pprint.pformat(nfs_op.__dict__))
                    assert nfs_op.ip_dst == ctx.cluster.address, "I/O that doesn't go to the MDS found"

       print "sles is ok"

       c=fedora.execute(['uname', '-r']).stdout
       print c

 # rpm_install_back(self, get_pd_share):
       rhel.execute(['rpm', '-ivh', '/opt/' + rhel_rpm[0].rstrip() + '.rpm'])
        # Add reboot
       rhel.close_agent()
       self._logger.info("Rebooting RHEL clients")
       rhel.reboot()
       self._logger.info("Waiting for RHEL clients to start")
       time.sleep(5)
       rhel.wait_for(attempt=10, interval=10)
       time.sleep(60)
       self._logger.info("Mounting RHEL clients")
       rhel.nfs.mount(rhel_mounts)

       self.rhel_pnfs_test(rhel, rhel_mounts, rhel_client, rhel_paths, require_inband=False)
       print "END rhel_pnfs_test"

  # rpm_install_back(self, get_pd_share):
       print "Sles begining"
       sles.execute(['rpm', '-ivh', '/opt/' + sles_rpm[0].rstrip() + '.x86_64.rpm'])
       print "Sles"
        # Add reboot
       sles.close_agent()
       self._logger.info("Rebooting Sles clients")
       sles.reboot()
       self._logger.info("Waiting for Sles clients to start")
       sles.wait_for(attempt=10, interval=10)
       time.sleep(60)
       self._logger.info("Mounting Sles clients")
       sles.nfs.mount(sles_mounts)
       print "START sles_pnfs_test"
       self.sles_pnfs_test(sles, sles_mounts, sles_client, sles_paths, require_inband=False)
       print "END sles_pnfs_test"