Пример #1
0
def test_get_correct_tuple():
    class O(object):
        pass

    ll = dispatcher_tuple([O, O])
    assert type(ll) == _CallableDispatcherTuple
    ll = dispatcher_tuple([O(), O()])
    assert type(ll) == _DispatcherTuple
Пример #2
0
 def __init__(self):
     self.__servers = dispatcher_tuple([])
     self.__clients = dispatcher_tuple([])
     self.__cluster = None
     self.__data_stores = dispatcher_tuple([])
     self.__data_portals = dispatcher_tuple([])
     self.__da_dss = dispatcher_tuple([])
     self.__variables = None
     self.__seed = None
     self.__working_dir = None
     self._results = []
Пример #3
0
 def _loader_data_stores(self, attr):
     data_stores = [
         get_correct_ds(node_update=partial(self.cluster.rest.node_list,
                                            use_cache=False),
                        **cl) for cl in attr
     ]
     self.__data_stores = dispatcher_tuple(data_stores)
Пример #4
0
    def test_read_completed_ops_dio(self, get_pnfs4_2_mount_points):
        mounts = get_pnfs4_2_mount_points
        fnames = []

        # TODO - cleanup to existing file
        ctx.clients[0].mkdirs(mounts[0].path + '/test_layoutstat')
        for (client, path) in zip(ctx.clients, mounts.path):
            fname = join(path + '/test_layoutstat', "write-test1-" + client.address)
            fnames.append(fname)

        fnames = dispatcher_tuple(fnames)
        fds = ctx.clients.open_file(fnames, os.O_CREAT | os.O_RDWR | os.O_SYNC | os.O_DIRECT, 0644)
        ctx.clients.write_file(fds, 0, 1024, 1)
        # Loop for 4 WriteOpsCompleted and one WriteOpsRequested 
        res = self.io_spread(fds, io_type=IoType.READ, interval_count=4, blocks_count=1024)
        self._logger.debug("io_count_completed: {}".format(res['io_count_completed']))
        ctx.clients.close_file(fds)
        ctx.clients.close_agent()

        inode = client.file_inode(fname)
        self._logger.debug('fname {}, inode: {}'.format(fname, inode))
        sleep(self.DB_UPDATE_INTERVAL)
        stats = ctx.cluster.influx_db.layout_stats(inode=inode)
        stats_entry=stats[0]
        self._logger.debug('rawReadOpsCompleted LayoutStats:', stats_entry.rawReadOpsCompleted)
      
        assert stats_entry.rawReadOpsCompleted[-1] == res['io_count_completed'], 'inode {} (completed) read ops performed by test {} LAYOUTSTATS reported {}'.format(inode, res['io_count_completed'], stats_entry.rawReadOpsCompleted[-1])
Пример #5
0
 def setup(self):
     num_of_shares = max(len(ctx.clients), SHARES)
     for _ in xrange(num_of_shares):
         ctx.cluster.cli.share_create_by_obj(RandDdShare())
     mnt_templates = []
     clients = []
     for idx, share in enumerate(ctx.cluster.shares.values()):
         mnt_templates.append(Mount(share, NfsVersion.nfs4_1))
         clients.append(ctx.clients[idx % len(ctx.clients)])
     mnt_templates = dispatcher_tuple(mnt_templates)
     clients = dispatcher_tuple(clients)
     mounts = clients.nfs.mount(mnt_templates)
     yield mounts, clients
     clients[0].clean_dir(mounts[0].path, timeout=1200)
     clients.nfs.umount(mounts)
     clients.remove(mounts.path)
Пример #6
0
    def test_stat_dir(self, get_pd_share):
        share = get_pd_share
        mnt_tmpl = Mount(share, NfsVersion.pnfs)
        mounts = ctx.clients.nfs.mount(mnt_tmpl)
        dirnames = []
        for (client, path) in zip(ctx.clients, mounts.path):
            dirname = os.path.join(path, "dirA-" + client.address)
            dirnames.append(dirname)

        dirnames = dispatcher_tuple(dirnames)
        self._logger.info('Creating directory')
        res = ctx.clients.makedir(dirnames, 0755)

        self._logger.info('Listing directory')
        res = ctx.clients.stat(dirnames)
        for expected_dir in dirnames:
            dir_found = False
            for actual_dir in res:
                actual_dir_basename = actual_dir['Name']
                expected_dir_basename = os.path.basename(expected_dir)
                if actual_dir_basename == expected_dir_basename:
                    self._logger.debug("Dir {0} found in actual results")
                    dir_found = True
                    break
            assert dir_found, "Directory {0} not found".format(expected_dir)
            assert actual_dir['IsDir'], "Entry {0} is not a directory".format(
                expected_dir)

        self._logger.info('Removing directory')
        res = ctx.clients.rmdir(dirnames)

        self._logger.debug('Closing agent')
        ctx.clients.close_agent()
Пример #7
0
    def test_dme_recovery(self, get_nfs4_1_mount_points):
        mount = get_nfs4_1_mount_points[0]
        fvs = []
        filenames = []
        instances = set([])
        num_of_files = 10
        for _ in xrange(num_of_files):
            filename = str(RandPath(mount.path))
            filenames.append(filename)
            fvs.append(ctx.clients[0].write_to_file(filename,
                                                    bs='4k',
                                                    count=1024,
                                                    block=False))
        fvs = dispatcher_tuple(fvs)

        sleep(3)
        ctx.cluster.service.stop('pd-dme')
        sleep(5)
        ctx.cluster.service.wait_for('pd-dme')
        fvs.get()

        for filename in filenames:
            fd = ctx.clients[0].file_declare(filename, mount.share)
            instances.add(fd.instances[0].instance_id)
        assert len(instances) == num_of_files, 'files got same instance_id'
Пример #8
0
 def agg_multiple_ds_multiple_client(self, mounts, test_type, fio_parameter,
                                     influx_parameter, files_per_client):
     """
     :param mounts: the mounts related to the clients
     :param test_type:string, which kind of operations to perform -
     write/read ops
     :param fio_parameter: string, parameter to extract from FIO results
     :param influx_parameter: string, parameter to extract from Influx DB
     results
     :param files_per_client: int, how many files to create per client
     :return: dictionary contains FIO and InfluxDB aggregated results per DS.
     """
     agg_fio = {}
     agg_influx = {}
     clients_results = []
     clients = ctx.clients
     for client, mount in zip(clients, mounts):
         clients_results.append(
             self.agg_multiple_ds_single_client(mount,
                                                test_type,
                                                fio_parameter,
                                                influx_parameter,
                                                files_per_client,
                                                client,
                                                block=False))
     clients_results = dispatcher_tuple(clients_results).get()
     for client_result in clients_results:
         for (ds_id, (fio_res, influx_res)) in client_result.items():
             if ds_id in agg_fio:
                 agg_fio[ds_id] += fio_res
             else:
                 agg_fio[ds_id] = fio_res
             agg_influx[ds_id] = influx_res
     return agg_fio, agg_influx
Пример #9
0
    def sles_pnfs_test(self, sles, sles_mounts, sles_client, sles_paths, require_inband):
       fnames = []
       for (sles_client, path) in zip(sles, sles_paths):
             print "sles_mounts.path  is :", path
             fnames.append(join(path, 'sles-'+sles_client.address))

       fnames = dispatcher_tuple(fnames)
       pcaps = PE(sles)
       fds = sles.open_file(fnames, os.O_CREAT | os.O_RDWR | os.O_SYNC | os.O_DIRECT, 0644)
       sles.write_file(fds, 0, 32, 5)
       sles.read_file(fds, 0, 32, 5)
       sles.close_file(fds)
       time.sleep(1) # DELME
       results = pcaps.get_nfs_ops(False)
       calls_only = pcaps.get_nfs_ops_by_and_filter(results, is_call=True)
       writes_only = pcaps.get_nfs_ops_by_and_filter(results, op_type=PE_NfsOpType.WRITE, is_call=True)
       read_only = pcaps.get_nfs_ops_by_and_filter(results, op_type=PE_NfsOpType.READ, is_call=True)
       assert len(writes_only) > 0, "No writes detected during the recorded period"
       assert len(read_only) > 0, "No read detected during the recorded period"
       for (sles_client, single_client_nfs_ops) in zip(sles, calls_only):
            self._logger.debug("RHEL CLIENT {}".format(sles_client.address))
            for nfs_op in single_client_nfs_ops:
                if nfs_op.op_type == PE_NfsOpType.WRITE or nfs_op.op_type == PE_NfsOpType.READ:
                    self._logger.info('  NFS OP %s at %s' % (nfs_op.op_type.name, nfs_op.time_relative))
                    self._logger.info('  OP DETAILS:\n%s' % pprint.pformat(nfs_op.__dict__))
                    if require_inband:
                        assert nfs_op.ip_dst == ctx.cluster.address, "I/O that doesn't go to the MDS found"
                    else:
                        assert nfs_op.ip_dst != ctx.cluster.address, "I/O that goes to the MDS found"
Пример #10
0
    def test_umount_client_in_the_w_r_time(self, get_pd_share):
        mnt_tmpl = Mount(get_pd_share, NfsVersion.pnfs4_2) # can change path=''
        mounts = ctx.clients.nfs.mount(mnt_tmpl)
        fnames = []

        ctx.clients[0].mkdirs(mounts[0].path + '/test_umount')
        for (client, path) in zip(ctx.clients, mounts.path):
            fname = join(path + '/test_umount', "fileA-" + client.address)
            fnames.append(fname)
        fnames = dispatcher_tuple(fnames)

        fds = ctx.clients.open_file(fnames, os.O_CREAT | os.O_RDWR | os.O_SYNC | os.O_DIRECT, 0644)
        fvs = ctx.clients.write_file(fds, 0, 32, 2, block=False)
        time.sleep(1) # Let the writes start

        for client, mount in zip(ctx.clients, mounts):
            try:
                self._logger.debug("client: {0} mount: {1}".format(client, mount))
                res = client.nfs.umount(mount)
                raise Exception("umount succeeded while it was expected to fail")
            except CliOperationFailed as x:
                #print x
                assert x.rc == 16, "Unexpected umount behavior"
#                 if x.rc == 16: # Device busy
#                     pass
#                 else:
#                     raise Fatal

        for fv in fvs:
            fv.get()

        ctx.clients.close_file(fds)
        ctx.clients.close_agent()
Пример #11
0
 def _loader_data_portals(self, attr):
     data_portals = [
         DataPortal(node_update=partial(self.cluster.rest.node_list,
                                        use_cache=False),
                    **cl) for cl in attr
     ]
     self.__data_portals = dispatcher_tuple(data_portals)
Пример #12
0
    def test_vcenter_e2e_basic(self, vcenter, get_mount_points):
        share = vcenter.share
        mounts = get_mount_points
        mount = mounts[0]

        assert len(ctx.clients) > 1, 'Need at least 2 clients'
        pdclient = ctx.clients[0]
        vcclient = ctx.clients[-1]

        self.fio = VcenterFio().vcenter()
        params = self.fio._conf_params
        directory = params['directory']
        fname = params['filename']
        vcclient.mkdirs(directory)
        filename = os.path.join(directory, fname)

        with self.step('deploy on clients'):
            ctx.clients.deploy(['fio'])

        with self.step('gather info'):
            vmdk_path = self._get_vmdk_path(pdclient, mount.path,
                                            vcenter.vm_name)
            vmdk_details = pdclient.file_declare(vmdk_path, mount.share)
            data_stores = set(ctx.cluster.data_stores.values())
            source_obses = set(
                (instance.data_store for instance in vmdk_details.instances))
            non_source_obses = data_stores - source_obses
            target_obs = random.choice(tuple(non_source_obses))

        with self.step('execute tool'):
            execute_task = vcclient.execute_tool(self.fio, mount, block=False)

        with self.step('mob job'):
            ctx.cluster.dmc.enqueue_mobility_job(vmdk_details.obj_id,
                                                 mount.share, [target_obs])

            mobwait = self.mobwait(vmdk_details,
                                   target_obs,
                                   attempt=300,
                                   interval=10,
                                   block=False)

        with self.step('run in parallel and wait for everything to finish'):
            dispatcher_tuple((execute_task.get, mobwait.get))()
Пример #13
0
    def test_iops_fio_write(self, get_pnfs4_2_mount_points):
        # Create mount and file name
        mounts = get_pnfs4_2_mount_points
        fnames = []
        inodes = []
        tolerance = 15 # %
        ctx.clients[0].mkdirs(mounts[0].path + '/test_layoutstat')

        # TODO: Add support for multiple clients
        for (client, path) in zip(ctx.clients, mounts.path):
            fname = join(path + '/test_layoutstat', "iops_fio_write_" + client.address)
            fnames.append(fname)
        fnames = dispatcher_tuple(fnames)

        # Create file with size=2G using fio tool
        self._logger.info("Creating and laying out file")
        self.fio_run(mounts, fname, runtime='1s', teardown=False)

        inode = client.file_inode(fname)
        inodes.append(inode)
        self._logger.debug('fname {}, inode: {}'.format(fname, inode))

        # TODO: Check if sleep is needed
        # TODO: Check if the sleep doesn't cover up a delta calculation bug
        sleep(self.DB_UPDATE_INTERVAL)

        # Running fio-with existing file again and take results from fio tools
        self._logger.info("Performing I/O")
        fio_res = self.fio_run(mounts, fname, runtime='121s')
        last_write_iops = fio_res.last_result.values()[0]['write_iops']

        self._logger.debug("Waiting for {}".format(self.DB_UPDATE_INTERVAL))
        sleep(self.DB_UPDATE_INTERVAL)

        self._logger.debug("Fetching DB values")
        for inode in inodes:
            stats = ctx.cluster.influx_db.layout_stats(inode=inode)
        stats_entry = stats[0]
        LastRawWriteOpsCompleted = stats_entry.rawWriteOpsCompleted[-1]
        LastRawDuration = self.nano2sec(stats_entry.rawDuration[-1])

        # Calculation  second_write_iops (from fio) = 
        # TODO: Catch potential division by zero
        DBiops = LastRawWriteOpsCompleted / LastRawDuration
        self._logger.debug("Last LastRawDuration: {} nanosec".format(stats_entry.rawDuration[-1]))
        self._logger.debug("Last LastRawDuration: {} sec".format(LastRawDuration))
        self._logger.debug("Last LastRawWriteOpsCompleted: {}".format(LastRawWriteOpsCompleted))
        self._logger.debug("DB write_iops: {} (LastRawWriteOpsCompleted / LastRawDuration)".format(DBiops))
        self._logger.debug("FIO TOOL write_iops: {} ".format(last_write_iops))

        self._logger.info("Comparing values returned by fio to LAYOUTSTATS sent to InfluxDB")
        DBiops = int(DBiops)
        last_write_iops = int(last_write_iops)
        assert self.fuzzy_compare(DBiops, last_write_iops, tolerance), \
            "Comparing {} to {} with tolerance {}% failed".format(DBiops, last_write_iops, tolerance)
Пример #14
0
    def test_latency_fio_read(self, get_pnfs4_2_mount_points):
        # Create mount and file name
        mounts = get_pnfs4_2_mount_points
        fnames = []
        inodes = []
        tolerance = 15 # %
        ctx.clients[0].mkdirs(mounts[0].path + '/test_layoutstat')
        for (client, path) in zip(ctx.clients, mounts.path):
            fname = join(path + '/test_layoutstat', "latency_fio_read_" + client.address)
            fnames.append(fname)
        fnames = dispatcher_tuple(fnames)
        # Create file with size=2G using fio tool
        self._logger.info("Creating and laying out file")
        self.fio_run(mounts, fname, runtime='1s', teardown=False)

        inode = client.file_inode(fname)
        inodes.append(inode)
        self._logger.debug('fname {}, inode: {}'.format(fname, inode))

        # TODO: Check if sleep is needed
        # TODO: Check if the sleep doesn't cover up a delta calculation bug
        sleep(self.DB_UPDATE_INTERVAL)

        # Running fio-with existing file again and take results from fio tools
        self._logger.info("Performing I/O")
        fio_res = self.fio_run(mounts, fname, runtime='121s')
        fio_latency_read = fio_res.last_result.values()[0]['completion_read_mean']
        fio_latency_read = float(fio_latency_read)*1000 # Converted to nano
        self._logger.debug("Waiting for {}".format(self.DB_UPDATE_INTERVAL))
        sleep(self.DB_UPDATE_INTERVAL)

        self._logger.debug("Fetching DB values")
        for inode in inodes:
            stats = ctx.cluster.influx_db.layout_stats(inode=inode)
        stats_entry = stats[0]
        LastRawReadOpsCompleted = stats_entry.rawReadOpsCompleted[-1]
        LastRawReadAggrCompletionTime = stats_entry.rawReadAggrCompletionTime[-1] # Time in nano

        # Calculation  second_read_iops (from fio) = 
        # TODO: Catch potential division by zero
        DB_latency_read = LastRawReadAggrCompletionTime / LastRawReadOpsCompleted
        self._logger.debug("Last rawReadAggrCompletionTime: {} nanosec".format(stats_entry.rawReadAggrCompletionTime[-1]))
        self._logger.debug("Last rawReadAggrCompletionTime: {} sec".format(LastRawReadAggrCompletionTime))
        self._logger.debug("Last LastRawReadOpsCompleted: {}".format(LastRawReadOpsCompleted))
        self._logger.debug("DB latency_read: {} (LastRawReadAggrCompletionTime / LastRawReadOpsCompleted)".format(DB_latency_read))
        self._logger.debug("FIO TOOL latency_read: {} ".format(DB_latency_read))
        DB_latency_read = int(DB_latency_read)
        self._logger.info("Comparing values returned by fio to LAYOUTSTATS sent to InfluxDB")
        assert self.fuzzy_compare(DB_latency_read, fio_latency_read, tolerance), \
            "Comparing {} to {} with tolerance {}% failed".format(DB_latency_read, fio_latency_read, tolerance)
Пример #15
0
    def test_group_user_and_root(self, get_pd_share):
        self.conf_3u_2g(get_pd_share)

        fd0 = ctx.clients[0].open_file(fnames[0], os.O_CREAT | os.O_RDWR | os.O_SYNC | os.O_DIRECT, 0644)
        fds = ctx.clients[1:].open_file(fnames[1:], os.O_RDWR | os.O_SYNC | os.O_DIRECT, 0644)
        fds = dispatcher_tuple((fd0,) + fds)
        self._logger.info("BEFORE WRITE")
        ctx.clients.write_file(fds, 0, 32, 2)
        self._logger.info("BEFORE CLOSE")
        ctx.clients.read_file(fds, 0, 32, 2)
        ctx.clients.close_file(fds)
        self._logger.info("AFTER CLOSE")
        ctx.clients.close_agent()
        ctx.clients[0].execute(['exit'], block=True)
        ctx.clients[0].execute(['userdel', 'client0'], block=True)
        ctx.clients[0].execute(['groupdel', 'smoke'], block=True)
Пример #16
0
 def files_location_verification(self, fds, dest):
     fvs = []
     try:
         for fd in fds:
             fvs.append(
                 fd.wait_for_mobility_process(dest,
                                              attempt=30,
                                              interval=30,
                                              block=False))
         fvs = dispatcher_tuple(fvs)
         fvs.get()
     except CompoundException:
         self.dbg_assert(False, "set A Files should reside in destination "
                         "Vol",
                         dest_vol=dest,
                         set_a_files=fds)
Пример #17
0
    def test_create_remove_dir(self, get_pd_share):
        share = get_pd_share
        mnt_tmpl = Mount(share, NfsVersion.pnfs)
        mounts = ctx.clients.nfs.mount(mnt_tmpl)
        dirnames = []
        for (client, path) in zip(ctx.clients, mounts.path):
            dirname = os.path.join(path, "dirA-" + client.address)
            dirnames.append(dirname)

        dirnames = dispatcher_tuple(dirnames)
        self._logger.debug('Creating directory')
        res = ctx.clients.makedir(dirnames, 0755)

        self._logger.debug('Removing directory')
        res = ctx.clients.rmdir(dirnames)

        self._logger.debug('Closing agent')
        ctx.clients.close_agent()
Пример #18
0
def test_multi_level_set():
    class O(object):
        def __init__(self, i):
            self.p = dispatcher_tuple([P(i), P(i+1)])
            self. ii = 5

        def ooo(self):
            return 'ooo'

    class P(object):
        def __init__(self, i):
            self.i = i

        def ppp(self, a):
            return 'ppp {0}'.format(a)

    ll = dispatcher_tuple([O(0), O(2), O(4)])
    ll.p.yyy = 5
    assert ll.p.yyy == ((5, 5), (5, 5), (5, 5))
Пример #19
0
def test_inner_exception():
    class MyExcp(Exception):
        pass

    class O(object):
        def foo(self):
            raise MyExcp('MyExcp')

    class P(object):
        def foo(self):
            return 'foo'
    ll = dispatcher_tuple([O(), P(), O()])
    try:
        ll.foo()
        assert False
    except CompoundException as e:
        assert e.routines[1].get() == 'foo'
        try:
            e.routines[0].get()
            assert False
        except MyExcp:
            pass
Пример #20
0
def test_multi_level():
    class O(object):
        def __init__(self, i):
            self.p = dispatcher_tuple([P(i), P(i+1)])
            self. ii = 5

        def ooo(self):
            return 'ooo'

    class P(object):
        def __init__(self, i):
            self.i = i

        def ppp(self, a):
            return 'ppp {0}'.format(a)

    ll = dispatcher_tuple([O(0), O(2), O(4)])
    assert ll.ii == (5, 5, 5)
    assert ll.ooo() == ('ooo', 'ooo', 'ooo')
    assert ll.p.i == ((0, 1), (2, 3), (4, 5))
    assert ll.p.ppp(1) == (('ppp 1', 'ppp 1'), ('ppp 1', 'ppp 1'),
                           ('ppp 1', 'ppp 1'))
Пример #21
0
def test_multi_level_del():
    class O(object):
        def __init__(self, i):
            self.p = dispatcher_tuple([P(i), P(i+1)])
            self. ii = 5

        def ooo(self):
            return 'ooo'

    class P(object):
        def __init__(self, i):
            self.i = i

        def ppp(self, a):
            return 'ppp {0}'.format(a)

    ll = dispatcher_tuple([O(0), O(2), O(4)])
    for lobject in ll:
        for pobject in lobject.p:
            assert hasattr(pobject, 'i') == True
    del ll.p.i
    for lobject in ll:
        for pobject in lobject.p:
            assert hasattr(pobject, 'i') == False
Пример #22
0
    def test_playbook6(self, playbook5_fixture):
        """
        function which creates mobility, based on Place On by Volume
        """
        vols = ctx.cluster.data_stores.values()
        vol_a, vol_b = vols[0], vols[1]
        obj_vol_a_files, vol_a_path = self.dir_and_files_creator(
            self.TEST_PATH, self.SET_A_SUFFIX,
            self.PLAYBOOK6_SET_A_FILE_NUMERS)
        obj_vol_b_files, vol_b_path = self.dir_and_files_creator(
            self.TEST_PATH,
            self.SET_B_SUFFIX,
            self.PLAYBOOK6_SET_B_FILE_NUMERS,
            extension='.txt')
        self.define_objective(name=self.OBJECTIVE_A,
                              assignment_path='/',
                              place_on_list='volume:{0}'.format(vol_a.name))
        set_a_res, set_a_fds = self.scatter(obj_vol_a_files)
        set_b_res, set_b_fds = self.scatter(obj_vol_b_files)
        # Expecting result: every file should reside in VolA .
        # the objective was set to VolA under the global share.
        self.dispersed_between_some_volumes(set_a_res, [vol_a.internal_id],
                                            self.PLAYBOOK6_SET_A_FILE_NUMERS)
        self.dispersed_between_some_volumes(set_b_res, [vol_a.internal_id],
                                            self.PLAYBOOK6_SET_B_FILE_NUMERS)
        # Expect test files (suffix .txt - (SET_B) to move to VolB.
        self.define_objective(name=self.OBJECTIVE_B,
                              assignment_path='/',
                              place_on_list='volume:{0}'.format(vol_b.name))
        self.condition = ctx.cluster.cli.condition_create(name='test_files',
                                                          pattern='*.txt')
        self.smart_obj.append(
            ctx.cluster.cli.smart_objective_create(name='test_files',
                                                   rule='{0}:{1}'.format(
                                                       self.condition.name,
                                                       self.OBJECTIVE_B)))

        sleep(120)
        fvs = []
        try:
            for fd_b in set_b_fds:
                fvs.append(
                    fd_b.wait_for_mobility_process(vol_b,
                                                   attempt=30,
                                                   interval=30,
                                                   block=False))
            fvs = dispatcher_tuple(fvs)
            fvs.get()
        except CompoundException:
            raise CompoundException

        # Negative check, expect set b won't move due to objective update.
        try:
            for fd_a in set_a_fds:
                fd_a.wait_for_mobility_process(vol_a, attempt=1)
        except Exception:
            # if the Exception is caught there is no error.
            self.dbg_assert(False,
                            "set A Files should reside in VolA",
                            files=set_a_fds,
                            vol_a=vol_a)
        self.define_objective(name=self.OBJECTIVE_VOL_A_RETURN,
                              assignment_path='/',
                              place_on_list='volume:{0}'.format(vol_a.name))
        self.smart_obj.append(
            ctx.cluster.cli.smart_objective_create(
                name='vol_a_return',
                rule='{0}:{1}'.format(self.condition.name,
                                      self.OBJECTIVE_VOL_A_RETURN)))
        sleep(120)
        fvs = []
        try:
            for fd_b in set_b_fds:
                fvs.append(
                    fd_b.wait_for_mobility_process(vol_a,
                                                   attempt=30,
                                                   interval=30,
                                                   block=False))
            fvs = dispatcher_tuple(fvs)
            fvs.get()
        except CompoundException:
            self.dbg_assert(False, "set b files should reside in vol_a due to "
                            "VolAReturnObjective",
                            vol=vol_a,
                            set_b=set_b_fds)
Пример #23
0
    def test_poc_script_phase_3(self, get_pd_share):

        hypervisor = ctx.hypervisor
        clients = [
            hypervisor.get_first_vm(c.name)
            for c in hypervisor._get_vms_in_folder(self._CLIENTS_DIR)
        ]
        if not ctx.clients and not clients:
            with self.step('Create vm on VVOL'):
                client, address = self._clone_vm_on_vvol(hypervisor)
                ctx.clients += Client(
                    address=address,
                    username='******',
                    password='******',
                    hw_mgmt=(hypervisor._address, hypervisor._username,
                             hypervisor._password, client.vm.name))
        else:
            for c in clients:
                ctx.clients += Client(address=c.wait_for_ip(),
                                      username='******',
                                      password='******',
                                      hw_mgmt=(hypervisor._address,
                                               hypervisor._username,
                                               hypervisor._password, c.name))

        with self.step('share-objective-remove'):
            obj = self._POC_SMART_SHARE
            if obj:
                ctx.cluster.cli.share_objective_remove(name=obj.name, path='/')
            self._POC_SMART_SHARE = None

        with self.step('Deploy tools on clients'):
            ctx.clients.deploy()

        with self.step('limit datastore bandwidth'):
            # get non vm ds caps bw (pdfs cli on vmdk file)
            vmdk_file = self._get_largest_vmdk(get_pd_share,
                                               ctx.clients[0].hw._vm_name)
            vmdk_inode = ctx.cluster.file_inode(vmdk_file)
            fi = ctx.cluster.get_file_info(get_pd_share, vmdk_inode)
            vm_ds = fi.instances[0].data_store
            vm_ds_ip = vm_ds.node.mgmt_ip_address.address
            ds = choice(
                [ds for ds in ctx.cluster.data_stores.values() if ds != vm_ds])
            ds_write_bw = ds.storage_capabilities.performance.write_bandwidth
            # set TC on vm ds to half
            with ctx.get_ds('address', vm_ds_ip).tc.limit('pddata',
                                                          bw=ds_write_bw / 2):
                with self.step('share-objective-add'):
                    poc_share = get_pd_share
                    poc_smart = obj or ctx.cluster.smart_objectives.\
                        get('poc_smart_obj')
                    obj = ctx.cluster.cli.\
                        share_objective_add(name=poc_share.name,
                                            objective=poc_smart.name,
                                            path='/')
                    self._POC_SMART_SHARE = obj

                with self.step('run IO on VM'):
                    dir_name = str(RandPath())
                    ctx.clients.mkdirs(dir_name)
                    dirs = dispatcher_tuple([
                        Mount(poc_share, NfsVersion.no_nfs, path=dir_name)
                        for _ in ctx.clients
                    ])
                    fio = FioCaps().basic(teardown=False)
                    ctx.clients.execute_tool(fio, dirs)

            with self.step('Wait for move'):
                # wait for move, timeout - 300 seconds (pdfs cli on vmdk file)
                t2 = t1 = time()
                while t2 - t1 < self._COPY_TIMEOUT:
                    new_fi = ctx.cluster.get_file_info(get_pd_share,
                                                       vmdk_inode)
                    if {vm_ds} != set(
                        [inst.data_store for inst in new_fi.instances]):
                        break
                    sleep(1)
                    t2 = time()
                assert t2 - t1 < self._COPY_TIMEOUT, \
                    'the move process took more than {} seconds' \
                    ''.format(self._COPY_TIMEOUT)
Пример #24
0
 def _loader_servers(self, attr):
     servers = [Server(**cl) for cl in attr]
     self.__servers = dispatcher_tuple(servers)
Пример #25
0
 def __init__(self, i):
     self.p = dispatcher_tuple([P(i), P(i+1)])
     self. ii = 5
Пример #26
0
 def _loader_clients(self, attr):
     clients = [Client(cluster=self.cluster, **cl) for cl in attr]
     self.__clients = dispatcher_tuple(clients)
Пример #27
0
def test_slicing():
    ll = dispatcher_tuple([1, 2])
    assert ll[1:] == (2,)
Пример #28
0
 def data_directors(self):
     return self.__cluster.data_directors \
         if self.__cluster else dispatcher_tuple([])
Пример #29
0
 def _loader_da_dss(self, attr):
     da_dss = [DaDs(**cl) for cl in attr]
     self.__da_dss = dispatcher_tuple(da_dss)
Пример #30
0
 def _loader_hypervisors(self, attr):
     hypervisors = [Hypervisor(**cl) for cl in attr]
     self.__hypervisors = dispatcher_tuple(hypervisors)