def fio_run_read_delay_write(self,
                                 client,
                                 mount,
                                 fname,
                                 runtime,
                                 direct=1,
                                 size='100M',
                                 teardown=True):

        global_conf = copy(self._FIO_GLOBALS)
        global_conf.update({
            'filename': fname,
            'runtime': runtime,
            'direct': direct
        })

        jobs = {}
        job_reads = copy(self._FIO_IOPS_OPT)
        job_reads.update({'size': size, 'rw': 'randread'})
        jobs['reads'] = job_reads
        job_writes = copy(self._FIO_IOPS_OPT)
        job_writes.update({'size': size, 'rw': 'randwrite', 'startdelay': 2})
        jobs['writes'] = job_writes

        self._logger.debug("Initializing fio read-delay-write settings")
        fio_cap = FioCaps(conf_file='pipelining.fio')
        fio_cap.edit_conf_file(global_conf, **jobs)
        fio = fio_cap.generic(teardown=teardown)

        self._logger.info(
            "Running fio read-delay-write on file {} for {}".format(
                fname, runtime))
        fio_res = client.execute_tool(fio, mount)
        return fio_res
 def fio_init(self,
              fname,
              runtime='30s',
              direct=1,
              size='1G',
              rw='randrw',
              iodepth='8',
              teardown=True):
     """
     :summary: Start fio with the provided arguments
     :param fname: Name of the file to perform I/O to/from
     :param runtime: Time to run (after laying out the file)
     :param direct: User O_DIRECT
     :param size: File size
     :param rw: I/O type by fio definitions, e.g. read, write, randread, randwrite, randrw
     :param teardown: Passed directly to fio, leave at default unless you want to keep the file
     :return: FIO object to be used with execute_tool method
     """
     fio_cap = FioCaps(conf_file='fio-conf')
     global_conf = copy(self._FIO_GLOBALS)
     global_conf.update({
         'filename': fname,
         'runtime': runtime,
         'direct': direct,
         'iodepth': iodepth
     })
     job_conf = copy(self._FIO_IOPS_OPT)
     job_conf.update({'size': size, 'rw': rw})
     fio_cap.edit_conf_file(global_sec=global_conf, job_a=job_conf)
     fio = fio_cap.generic(teardown=teardown)
     return fio
Esempio n. 3
0
    def test_dme_restart(self, get_pnfs_mount_points):
        mounts = get_pnfs_mount_points
        fio = FioCaps().short()
        ctx.clients.execute_tool(fio, mounts)

        ctx.cluster.service.stop('pd-dme')
        sleep(5)
        ctx.cluster.service.wait_for('pd-dme')

        fio = FioCaps().short()
        ctx.clients.execute_tool(fio, mounts)
Esempio n. 4
0
 def fio_process(self, frequency, duration=420):
     """
     :param frequency: expected avg frequency
     :param duration: duration of execution
     :return: list of threads
     """
     f = self.create_fio_conf_file(self.fname,
                                   frequency / (len(ctx.clients) * 2),
                                   duration)
     fio = FioCaps(conf_file=f)
     fio.generic([])
     return [
         ctx.clients[index].execute_tool(fio, mount, block=False)
         for index, mount in enumerate(self.mounts)
     ]
Esempio n. 5
0
 def test_1_parallel_io_all_shares(self, setup):
     mounts = setup[0]
     clients = setup[1]
     fvs = []
     for client, mount in zip(clients, mounts):
         fio = FioCaps(conf_file='tonian_settings/' +
                       utils.uld_mix_generator(15)).short()
         fvs.append(client.execute_tool(fio, mount, block=False))
     for fv in fvs:
         fv.get()
Esempio n. 6
0
    def test_poc_script_phase_3(self, get_pd_share):

        hypervisor = ctx.hypervisor
        clients = [
            hypervisor.get_first_vm(c.name)
            for c in hypervisor._get_vms_in_folder(self._CLIENTS_DIR)
        ]
        if not ctx.clients and not clients:
            with self.step('Create vm on VVOL'):
                client, address = self._clone_vm_on_vvol(hypervisor)
                ctx.clients += Client(
                    address=address,
                    username='******',
                    password='******',
                    hw_mgmt=(hypervisor._address, hypervisor._username,
                             hypervisor._password, client.vm.name))
        else:
            for c in clients:
                ctx.clients += Client(address=c.wait_for_ip(),
                                      username='******',
                                      password='******',
                                      hw_mgmt=(hypervisor._address,
                                               hypervisor._username,
                                               hypervisor._password, c.name))

        with self.step('share-objective-remove'):
            obj = self._POC_SMART_SHARE
            if obj:
                ctx.cluster.cli.share_objective_remove(name=obj.name, path='/')
            self._POC_SMART_SHARE = None

        with self.step('Deploy tools on clients'):
            ctx.clients.deploy()

        with self.step('limit datastore bandwidth'):
            # get non vm ds caps bw (pdfs cli on vmdk file)
            vmdk_file = self._get_largest_vmdk(get_pd_share,
                                               ctx.clients[0].hw._vm_name)
            vmdk_inode = ctx.cluster.file_inode(vmdk_file)
            fi = ctx.cluster.get_file_info(get_pd_share, vmdk_inode)
            vm_ds = fi.instances[0].data_store
            vm_ds_ip = vm_ds.node.mgmt_ip_address.address
            ds = choice(
                [ds for ds in ctx.cluster.data_stores.values() if ds != vm_ds])
            ds_write_bw = ds.storage_capabilities.performance.write_bandwidth
            # set TC on vm ds to half
            with ctx.get_ds('address', vm_ds_ip).tc.limit('pddata',
                                                          bw=ds_write_bw / 2):
                with self.step('share-objective-add'):
                    poc_share = get_pd_share
                    poc_smart = obj or ctx.cluster.smart_objectives.\
                        get('poc_smart_obj')
                    obj = ctx.cluster.cli.\
                        share_objective_add(name=poc_share.name,
                                            objective=poc_smart.name,
                                            path='/')
                    self._POC_SMART_SHARE = obj

                with self.step('run IO on VM'):
                    dir_name = str(RandPath())
                    ctx.clients.mkdirs(dir_name)
                    dirs = dispatcher_tuple([
                        Mount(poc_share, NfsVersion.no_nfs, path=dir_name)
                        for _ in ctx.clients
                    ])
                    fio = FioCaps().basic(teardown=False)
                    ctx.clients.execute_tool(fio, dirs)

            with self.step('Wait for move'):
                # wait for move, timeout - 300 seconds (pdfs cli on vmdk file)
                t2 = t1 = time()
                while t2 - t1 < self._COPY_TIMEOUT:
                    new_fi = ctx.cluster.get_file_info(get_pd_share,
                                                       vmdk_inode)
                    if {vm_ds} != set(
                        [inst.data_store for inst in new_fi.instances]):
                        break
                    sleep(1)
                    t2 = time()
                assert t2 - t1 < self._COPY_TIMEOUT, \
                    'the move process took more than {} seconds' \
                    ''.format(self._COPY_TIMEOUT)
    def test_performance_mobility_playbook1(self, get_slow_ds,
                                            get_mount_points):
        assert len(ctx.cluster.data_stores) == 1, \
            'Test cannot run with more than single storage-volumes after'
        ctx.clients.execute(['rpcdebug', '-m', 'nfs', '-s', 'all'])
        mount = get_mount_points[0]
        slow_ds = get_slow_ds
        slow_ds_ip = slow_ds.node.mgmt_ip_address.address
        fast_ds_physical = choice(
            [ds for ds in ctx.data_stores if ds.address != slow_ds_ip])

        with self.step('Create set_a files'):
            files = [
                join(mount.path + "/a_file" + str(file_index))
                for file_index in xrange(3)
            ]
            a_fds = []
            for f in files:
                ctx.clients[0].truncate(f, '500M')
                fd = ctx.clients[0].file_declare(f, mount.share)
                a_fds.append(fd)
                fd.verify_location(slow_ds)

        # Create objective with an ask of multiple times the read_iops of the slow_ds so later
        # when fast_ds is added the file with automatically move to fast_ds as the
        # requested read_iops can be met by the fast_ds
        with self.step('Create smart-objective on set_b files'):
            slow_ds_read_iops = int(
                slow_ds.storage_capabilities.performance.read_performance.iops)
            min_read_iops = slow_ds_read_iops * 2
            iopso = ctx.cluster.cli.basic_objective_create(
                name='any_iops', min_read_iops=min_read_iops)
            cond = ctx.cluster.cli.condition_create(
                name='bfile_activity',
                pattern='b_file*',
            )
            rule = '{}:{}'.format(cond.name, iopso.name)
            so = ctx.cluster.cli.smart_objective_create(name='Smart_Perf_B',
                                                        rule=rule)
            ctx.cluster.cli.share_objective_add(name=mount.share.name,
                                                objective=so.name,
                                                path='/')
            # SLO updates are not reflected in real time so sleep for few secs before file creation
            sleep(3)

        with self.step('Create set_b files'):
            files = [
                join(mount.path + "/b_file" + str(file_index))
                for file_index in xrange(3)
            ]
            b_fds = []
            for f in files:
                ctx.clients[0].truncate(f, '500M')
                fd = ctx.clients[0].file_declare(f, mount.share)
                b_fds.append(fd)
                fd.verify_location(slow_ds)

        with self.step('Add second faster ds'):
            ds_name = fast_ds_physical.get_hostname()
            un_node = ctx.cluster.unauthenticated_nodes[ds_name]
            ctx.cluster.cli._node_add(fast_ds_physical, name=un_node.name)
            lv = fast_ds_physical.get_free_export()
            fast_ds = ctx.cluster.cli.ds_add(node_name=ds_name,
                                             logical_volume_name=lv.name)

        with self.step('Wait for set_b files to move to faster ds'):
            fvs = []
            for fd in b_fds:
                fvs.append(
                    fd.wait_for_mobility(fast_ds,
                                         attempt=60,
                                         interval=30,
                                         block=False))
            fvs = dispatcher_tuple(fvs)
            fvs.get()

        with self.step('Create smart-objective on set_a files'):
            cond_a = ctx.cluster.cli.condition_create(name='afile_activity',
                                                      pattern='a_file*',
                                                      activity='100')
            rule_a = '{}:{}'.format(cond_a.name, iopso.name)
            so_a = ctx.cluster.cli.smart_objective_create(name='Smart_Perf_A',
                                                          rule=rule_a)
            ctx.cluster.cli.share_objective_add(name=mount.share.name,
                                                objective=so_a.name,
                                                path='/')

        with self.step('Run FIO on set_a_IO files'):
            fio_cap = FioCaps(conf_file='fio-set-a')
            conf = copy(self._FIO_GLOBALS)
            conf.update({'filename': a_fds[0].filename})
            fio_cap.edit_conf_file(global_sec=conf, job_a=self._FIO_IOPS_OPT)
            fio = fio_cap.generic(options={'runtime': 300}, teardown=False)
            fio_fv = ctx.clients[0].execute_tool(fio, mount, block=False)

        with self.step('Wait for set_a_io_files files to move to faster ds'):
            a_fds[0].wait_for_mobility(fast_ds, attempt=60)
            fio_fv.get()

        with self.step('Verify set_a non IO files didn\'t move'):
            for fd in a_fds[1:]:
                fd.verify_location(slow_ds)
Esempio n. 8
0
 def fiocaps(self):
     return FioCaps()
    def agg_multiple_ds_single_client(self,
                                      mount,
                                      test_type,
                                      fio_parameter,
                                      influx_parameter,
                                      files_per_client,
                                      client=None):
        """
        :param mount: the mount object related to the single client
        :param test_type:string, which kind of operations to perform -
        write/read ops
        :param fio_parameter: string, parameter to extract from FIO results
        :param influx_parameter: string, parameter to extract from Influx DB
        results
        :param files_per_client: int, how many files to create per client
        :return: dictionary contains FIO and InfluxDB aggregated results per DS.
        """
        client_results = {}
        client = client or ctx.clients[0]
        absolute_path = join(mount.path, "layoutstat", client.address)
        with self.step("Open directory, and run truncate."):
            client.mkdirs(absolute_path)
            file_names = [
                join(absolute_path, 'file' + str(file_index))
                for file_index in range(files_per_client)
            ]
            ds_id_path_dict = self.truncate_function(client, file_names,
                                                     mount.share, self.SIZE)

        with self.step("Run FIO & Gather statistics about the files."):
            fio_cap = FioCaps(conf_file='layoutstat-aggregate-fio-conf')
            for ds_id in ds_id_path_dict:
                fio_cap.edit_conf_file(global_sec=self._FIO_GLOBALS,
                                       job1={
                                           'rw':
                                           test_type,
                                           'nrfiles':
                                           len(ds_id_path_dict[ds_id]),
                                           'filename':
                                           ':'.join(ds_id_path_dict[ds_id])
                                       })
                fio = fio_cap.generic()
                self._logger.info(
                    "client {0} is starting FIO for DS ID {1}".format(
                        client.address, str(ds_id)))
                fio_res = client.execute_tool(fio, mount)
                ds_fio_res = float(
                    fio_res.last_result.values()[0][fio_parameter])
                # Wait until DB is up to date.
                sleep(self.DB_UPDATE_INTERVAL)
                # Extracting the related rows from the Influx DB .
                ds_influx_res = getattr(
                    ctx.cluster.influx_db.aggregate_performance(
                        type='ds',
                        id=str(ds_id),
                        order_by='time desc',
                        limit=str(self.LAYOUTSTATS_ROWS))[0], influx_parameter)
                # Calculating average
                ds_influx_res = sum(ds_influx_res) / self.LAYOUTSTATS_ROWS
                # Units conversion from nanoSeconds to Seconds
                ds_influx_res = convert_time_units \
                    (ds_influx_res, source_units='N', dest_units='S')
                client_results[ds_id] = (ds_fio_res, ds_influx_res)
                self._logger.info("The results on client {0} for DS ID {1} "
                                  "is DS_Agg_Influx: {2} & DS_FIO: {3}".format(
                                      client.address, str(ds_id),
                                      ds_influx_res, ds_fio_res))

            return client_results
Esempio n. 10
0
 def fio_init_rate_iops(self, fname, runtime, teardown):
     fio_cap = FioCaps(conf_file='fio-conf')
     conf = copy(self._FIO_GLOBALS)
     conf.update({'filename': fname, 'runtime': runtime, 'rate_iops': 100})
     fio_cap.edit_conf_file(global_sec=conf, job_a=self._FIO_IOPS_OPT)
     self.fio = fio_cap.generic(teardown=teardown)
Esempio n. 11
0
 def fio_init(self, fname, runtime, direct=1, teardown=True):
     fio_cap = FioCaps(conf_file='fio-conf')
     conf = copy(self._FIO_GLOBALS)
     conf.update({'filename': fname, 'runtime': runtime, 'direct': direct})
     fio_cap.edit_conf_file(global_sec=conf, job_a=self._FIO_IOPS_OPT)
     self.fio = fio_cap.generic(teardown=teardown)