Example #1
0
    def initialize(self): 
        super(LibrbdFio, self).initialize()

        # Clean and Create the run directory
        common.clean_remote_dir(self.run_dir)
        common.make_remote_dir(self.run_dir)

        logger.info('Pausing for 60s for idle monitoring.')
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s/*' % self.run_dir, self.out_dir)

        self.mkimages()

        # populate the fio files
        ps = []
        logger.info('Attempting to populating fio files...')
        if (self.use_existing_volumes == False):
          for volnum in xrange(self.volumes_per_client):
              rbd_name = 'cbt-librbdfio-`%s`-%d' % (common.get_fqdn_cmd(), volnum)
              pre_cmd = 'sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=%s --invalidate=0  --rw=write --numjobs=%s --bs=4M --size %dM %s --output-format=%s > /dev/null' % (self.cmd_path, self.pool_name, rbd_name, self.numjobs, self.vol_size, self.names, self.fio_out_format)
              p = common.pdsh(settings.getnodes('clients'), pre_cmd)
              ps.append(p)
          for p in ps:
              p.wait()
        return True
Example #2
0
    def mkgetputcmd(self, cred_file, gwnum):
        # grab the executable to use
        getput_cmd = '%s ' % self.cmd_path

        # Set the options
        if self.container_prefix is not None:
            container_prefix_flag = '-c%s' % self.container_prefix
            if self.ctype == 'byproc' or self.ctype == 'bynodegw':
                container_prefix_flag = '%s-gw%s' % (container_prefix_flag, gwnum)
            getput_cmd += '%s ' % container_prefix_flag

        # For now we'll only test distinct objects per client/gw 
        if self.object_prefix is not None:
            getput_cmd += '-o%s-`%s`-gw%s ' % (self.object_prefix, common.get_fqdn_cmd(), gwnum)
        else:
            getput_cmd += '-o`%s`-gw%s ' % (common.get_fqdn_cmd(), gwnum)

        getput_cmd += '-s%s ' % self.op_size
        getput_cmd += '-t%s ' % self.test
        getput_cmd += '--procs %s ' % self.procs
        if self.ops_per_proc is not None:
            getput_cmd += '-n%s ' % self.ops_per_proc
        if self.runtime is not None:
            getput_cmd += '--runtime %s ' % self.runtime
        if self.ctype is not None:
            getput_cmd += '--ctype %s ' % self.ctype
        if self.debug is not None:
            getput_cmd += '--debug %s ' % self.debug
        if self.logops is not None:
            getput_cmd += '--logops %s ' % self.logops
        if self.grace is not None:
            getput_cmd += '--grace %s ' % self.grace

        getput_cmd += '--cred %s ' % cred_file

        # End the getput_cmd
        getput_cmd += '> %s/output.gw%s' % (self.run_dir, gwnum)

        return getput_cmd
Example #3
0
    def __init__(self, archive_dir, cluster, config):
        super(LibrbdFio, self).__init__(archive_dir, cluster, config)

        # FIXME there are too many permutations, need to put results in SQLITE3
        self.cmd_path = config.get('cmd_path', '/usr/bin/fio')
        self.pool_profile = config.get('pool_profile', 'default')
        self.data_pool_profile = config.get('data_pool_profile', None)
        self.time = config.get('time', None)
        self.time_based = bool(config.get('time_based', False))
        self.ramp = config.get('ramp', None)
        self.iodepth = config.get('iodepth', 16)
        self.numjobs = config.get('numjobs', 1)
        self.end_fsync = config.get('end_fsync', 0)
        self.mode = config.get('mode', 'write')
        self.rwmixread = config.get('rwmixread', 50)
        self.rwmixwrite = 100 - self.rwmixread
        self.log_avg_msec = config.get('log_avg_msec', None)
        #        self.ioengine = config.get('ioengine', 'libaio')
        self.op_size = config.get('op_size', 4194304)
        self.pgs = config.get('pgs', 2048)
        self.vol_size = config.get('vol_size', 65536)
        self.vol_object_size = config.get('vol_object_size', 22)
        self.volumes_per_client = config.get('volumes_per_client', 1)
        self.procs_per_volume = config.get('procs_per_volume', 1)
        self.random_distribution = config.get('random_distribution', None)
        self.rate_iops = config.get('rate_iops', None)
        self.fio_out_format = "json,normal"
        self.data_pool = None
        # use_existing_volumes needs to be true to set the pool and rbd names
        self.use_existing_volumes = config.get('use_existing_volumes', False)
        self.pool_name = config.get("poolname", "cbt-librbdfio")
        self.rbdname = config.get('rbdname', '')

        self.total_procs = self.procs_per_volume * self.volumes_per_client * len(
            settings.getnodes('clients').split(','))
        self.run_dir = '%s/osd_ra-%08d/op_size-%08d/concurrent_procs-%03d/iodepth-%03d/%s' % (
            self.run_dir, int(self.osd_ra), int(self.op_size),
            int(self.total_procs), int(self.iodepth), self.mode)
        self.out_dir = self.archive_dir

        self.norandommap = config.get("norandommap", False)
        self.wait_pgautoscaler_timeout = config.get(
            "wait_pgautoscaler_timeout", -1)
        # Make the file names string (repeated across volumes)
        self.names = ''
        for proc_num in range(self.procs_per_volume):
            rbd_name = 'cbt-librbdfio-`%s`-file-%d' % (common.get_fqdn_cmd(),
                                                       proc_num)
            self.names += '--name=%s ' % rbd_name
Example #4
0
    def mkfiocmd(self, volnum):
        rbdname = 'cbt-librbdfio-`%s`-%d' % (common.get_fqdn_cmd(), volnum)
        out_file = '%s/output.%d' % (self.run_dir, volnum)

        fio_cmd = 'sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=%s --invalidate=0' % (
            self.cmd_path_full, self.pool_name, rbdname)
        fio_cmd += ' --rw=%s' % self.mode
        fio_cmd += ' --output-format=%s' % self.fio_out_format
        if (self.mode == 'readwrite' or self.mode == 'randrw'):
            fio_cmd += ' --rwmixread=%s --rwmixwrite=%s' % (self.rwmixread,
                                                            self.rwmixwrite)
#        fio_cmd += ' --ioengine=%s' % self.ioengine
        if self.time is not None:
            fio_cmd += ' --runtime=%s' % self.time
        if self.time_based is True:
            fio_cmd += ' --time_based'
        if self.ramp is not None:
            fio_cmd += ' --ramp_time=%s' % self.ramp
        fio_cmd += ' --numjobs=%s' % self.numjobs
        fio_cmd += ' --direct=1'
        fio_cmd += ' --bs=%dB' % self.op_size
        fio_cmd += ' --iodepth=%d' % self.iodepth
        fio_cmd += ' --end_fsync=%s' % self.end_fsync
        #        if self.vol_size:
        #            fio_cmd += ' -- size=%dM' % self.vol_size
        if self.norandommap:
            fio_cmd += ' --norandommap'
        if self.log_iops:
            fio_cmd += ' --write_iops_log=%s' % out_file
        if self.log_bw:
            fio_cmd += ' --write_bw_log=%s' % out_file
        if self.log_lat:
            fio_cmd += ' --write_lat_log=%s' % out_file
        if 'recovery_test' in self.cluster.config:
            fio_cmd += ' --time_based'
        if self.random_distribution is not None:
            fio_cmd += ' --random_distribution=%s' % self.random_distribution
        if self.log_avg_msec is not None:
            fio_cmd += ' --log_avg_msec=%s' % self.log_avg_msec
        if self.rate_iops is not None:
            fio_cmd += ' --rate_iops=%s' % self.rate_iops

        # End the fio_cmd
        fio_cmd += ' %s > %s' % (self.names, out_file)
        return fio_cmd
Example #5
0
    def fio_command_extra(self, ep_num):
        cmd = ''

        # typical directory endpoints
        if self.endpoint_type == 'directory':
            for proc_num in range(self.procs_per_endpoint):
                cmd += ' --name=%s/`%s`-%s-%s' % (self.endpoints[ep_num], common.get_fqdn_cmd(), ep_num, proc_num)

        # handle rbd endpoints with the librbbd engine.
        elif self.endpoint_type == 'rbd':
            pool_name, rbd_name = self.endpoints[ep_num].split("/")
            cmd += ' --clientname=admin'
            cmd += ' --pool=%s' % pool_name
            cmd += ' --rbdname=%s' % rbd_name
            cmd += ' --invalidate=0'
            for proc_num in range(self.procs_per_endpoint):
                rbd_name = '%s-%d' % (self.endpoints[ep_num], proc_num)
                cmd += ' --name=%s' % rbd_name
        return cmd
Example #6
0
    def __init__(self, cluster, config):
        super(LibrbdFio, self).__init__(cluster, config)

        # FIXME there are too many permutations, need to put results in SQLITE3 
        self.cmd_path = config.get('cmd_path', '/usr/bin/fio')
        self.pool_profile = config.get('pool_profile', 'default')
        self.data_pool_profile = config.get('data_pool_profile', None)
        self.time =  str(config.get('time', None))
        self.time_based = bool(config.get('time_based', False))
        self.ramp = str(config.get('ramp', None))
        self.iodepth = config.get('iodepth', 16)
        self.numjobs = config.get('numjobs', 1)
        self.end_fsync = str(config.get('end_fsync', 0))
        self.mode = config.get('mode', 'write')
        self.rwmixread = config.get('rwmixread', 50)
        self.rwmixwrite = 100 - self.rwmixread
        self.log_avg_msec = config.get('log_avg_msec', None)
#        self.ioengine = config.get('ioengine', 'libaio')
        self.op_size = config.get('op_size', 4194304)
        self.pgs = config.get('pgs', 2048)
        self.vol_size = config.get('vol_size', 65536)
        self.vol_order = config.get('vol_order', 22)
        self.volumes_per_client = config.get('volumes_per_client', 1)
        self.procs_per_volume = config.get('procs_per_volume', 1)
        self.random_distribution = config.get('random_distribution', None)
        self.rate_iops = config.get('rate_iops', None)
        self.pool_name = "cbt-librbdfio"
        self.fio_out_format = "json,normal"
        self.data_pool = None 
        self.use_existing_volumes = config.get('use_existing_volumes', False)

	self.total_procs = self.procs_per_volume * self.volumes_per_client * len(settings.getnodes('clients').split(','))
        self.run_dir = '%s/osd_ra-%08d/op_size-%08d/concurrent_procs-%03d/iodepth-%03d/%s' % (self.run_dir, int(self.osd_ra), int(self.op_size), int(self.total_procs), int(self.iodepth), self.mode)
        self.out_dir = self.archive_dir

        self.norandommap = config.get("norandommap", False)
        # Make the file names string (repeated across volumes)
        self.names = ''
        for proc_num in xrange(self.procs_per_volume):
            rbd_name = 'cbt-librbdfio-`%s`-file-%d' % (common.get_fqdn_cmd(), proc_num)
            self.names += '--name=%s ' % rbd_name
Example #7
0
    def mkfiocmd(self, volnum):
        rbdname = 'cbt-librbdfio-`%s`-%d' % (common.get_fqdn_cmd(), volnum)
        out_file = '%s/output.%d' % (self.run_dir, volnum)

        fio_cmd = 'sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=%s --invalidate=0' % (self.cmd_path_full, self.pool_name, rbdname)
        fio_cmd += ' --rw=%s' % self.mode
        fio_cmd += ' --output-format=%s' % self.fio_out_format
        if (self.mode == 'readwrite' or self.mode == 'randrw'):
            fio_cmd += ' --rwmixread=%s --rwmixwrite=%s' % (self.rwmixread, self.rwmixwrite)
#        fio_cmd += ' --ioengine=%s' % self.ioengine
        if self.time is not None:
            fio_cmd += ' --runtime=%s' % self.time
        if self.time_based is True:
            fio_cmd += ' --time_based'
        if self.ramp is not None:
            fio_cmd += ' --ramp_time=%s' % self.ramp
        fio_cmd += ' --numjobs=%s' % self.numjobs
        fio_cmd += ' --direct=1'
        fio_cmd += ' --bs=%dB' % self.op_size
        fio_cmd += ' --iodepth=%d' % self.iodepth
        fio_cmd += ' --end_fsync=%s' % self.end_fsync
#        if self.vol_size:
#            fio_cmd += ' -- size=%dM' % self.vol_size
        if self.norandommap:
            fio_cmd += ' --norandommap' 
        fio_cmd += ' --write_iops_log=%s' % out_file
        fio_cmd += ' --write_bw_log=%s' % out_file
        fio_cmd += ' --write_lat_log=%s' % out_file
        if 'recovery_test' in self.cluster.config:
            fio_cmd += ' --time_based'
        if self.random_distribution is not None:
            fio_cmd += ' --random_distribution=%s' % self.random_distribution
        if self.log_avg_msec is not None:
            fio_cmd += ' --log_avg_msec=%s' % self.log_avg_msec
        if self.rate_iops is not None:
            fio_cmd += ' --rate_iops=%s' % self.rate_iops

        # End the fio_cmd
        fio_cmd += ' %s > %s' % (self.names, out_file)
        return fio_cmd
Example #8
0
    def initialize(self):
        super(KvmRbdFio, self).initialize()
        common.pdsh(settings.getnodes('clients', 'osds', 'mons', 'rgws'),
                    'sudo rm -rf %s' % self.run_dir,
                    continue_if_error=False).communicate()
        common.make_remote_dir(self.run_dir)
        clnts = settings.getnodes('clients')
        logger.info('creating mountpoints...')
        for b in self.block_devices:
            bnm = os.path.basename(b)
            mtpt = '/srv/rbdfio-`%s`-%s' % (common.get_fqdn_cmd(), bnm)
            common.pdsh(clnts,
                        'sudo mkfs.ext4 %s' % b,
                        continue_if_error=False).communicate()
            common.pdsh(clnts,
                        'sudo mkdir -p %s' % mtpt,
                        continue_if_error=False).communicate()
            common.pdsh(clnts,
                        'sudo mount -t ext4 -o noatime %s %s' % (b, mtpt),
                        continue_if_error=False).communicate()
        logger.info('Attempting to initialize fio files...')
        initializer_list = []
        for i in range(self.concurrent_procs):
            b = self.block_devices[i % len(self.block_devices)]
            bnm = os.path.basename(b)
            mtpt = '/srv/rbdfio-`hostname -s`-%s' % bnm
            fiopath = os.path.join(mtpt, 'fio%d.img' % i)
            pre_cmd = 'sudo %s --rw=write -ioengine=sync --bs=4M ' % self.fio_cmd
            pre_cmd = '%s --size %dM --name=%s > /dev/null' % (
                pre_cmd, self.vol_size, fiopath)
            initializer_list.append(
                common.pdsh(clnts, pre_cmd, continue_if_error=False))
        for p in initializer_list:
            p.communicate()

        # Create the run directory
        common.pdsh(clnts, 'rm -rf %s' % self.run_dir,
                    continue_if_error=False).communicate()
        common.make_remote_dir(self.run_dir)
Example #9
0
    def initialize(self):
        super(LibrbdFio, self).initialize()

        # Clean and Create the run directory
        common.clean_remote_dir(self.run_dir)
        common.make_remote_dir(self.run_dir)

        logger.info('Running scrub monitoring.')
        monitoring.start("%s/scrub_monitoring" % self.run_dir)
        self.cluster.check_scrub()
        monitoring.stop()

        logger.info('Pausing for 60s for idle monitoring.')
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s/*' % self.run_dir, self.out_dir)

        self.mkimages()

        # populate the fio files
        ps = []
        logger.info('Attempting to populating fio files...')
        if (self.use_existing_volumes == False):
            for volnum in xrange(self.volumes_per_client):
                rbd_name = 'cbt-librbdfio-`%s`-%d' % (common.get_fqdn_cmd(),
                                                      volnum)
                pre_cmd = 'sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=%s --invalidate=0  --rw=write --numjobs=%s --bs=4M --size %dM %s > /dev/null' % (
                    self.cmd_path, self.pool_name, rbd_name, self.numjobs,
                    self.vol_size, self.names)
                p = common.pdsh(settings.getnodes('clients'), pre_cmd)
                ps.append(p)
            for p in ps:
                p.wait()
        return True
Example #10
0
    def initialize(self): 
        super(KvmRbdFio, self).initialize()
        common.pdsh(settings.getnodes('clients', 'osds', 'mons', 'rgws'),
                    'sudo rm -rf %s' % self.run_dir,
                    continue_if_error=False).communicate()
        common.make_remote_dir(self.run_dir)
        clnts = settings.getnodes('clients')
        logger.info('creating mountpoints...')
        for b in self.block_devices:
            bnm = os.path.basename(b)
            mtpt = '/srv/rbdfio-`%s`-%s' % (common.get_fqdn_cmd(), bnm)
            common.pdsh(clnts, 'sudo mkfs.ext4 %s' % b,
                        continue_if_error=False).communicate()
            common.pdsh(clnts, 'sudo mkdir -p %s' % mtpt,
                        continue_if_error=False).communicate()
            common.pdsh(clnts, 'sudo mount -t ext4 -o noatime %s %s' % (b,mtpt),
                        continue_if_error=False).communicate()
        logger.info('Attempting to initialize fio files...')
        initializer_list = []
        for i in range(self.concurrent_procs):
            b = self.block_devices[i % len(self.block_devices)]
            bnm = os.path.basename(b)
            mtpt = '/srv/rbdfio-`hostname -s`-%s' % bnm
            fiopath = os.path.join(mtpt, 'fio%d.img' % i)
            pre_cmd = 'sudo %s --rw=write -ioengine=sync --bs=4M ' % self.fio_cmd
            pre_cmd = '%s --size %dM --name=%s > /dev/null' % (
                       pre_cmd, self.vol_size, fiopath)
            initializer_list.append(common.pdsh(clnts, pre_cmd,
                                    continue_if_error=False))
        for p in initializer_list:
             p.communicate()

        # Create the run directory
        common.pdsh(clnts, 'rm -rf %s' % self.run_dir, 
                    continue_if_error=False).communicate()
        common.make_remote_dir(self.run_dir)
Example #11
0
 def get_local_rbd_name(self, ep_num):
     return '`%s`-%d' % (common.get_fqdn_cmd(), ep_num)
Example #12
0
    def _run(self, mode, run_dir, out_dir, max_objects, runtime):
        # We'll always drop caches for rados bench
        self.dropcaches()

        if self.concurrent_ops:
            concurrent_ops_str = '--concurrent-ios %s' % self.concurrent_ops

        rados_version = self.get_rados_version()

        # Max Objects
        max_objects_str = ''
        if max_objects:
            if rados_version < 10:
                raise ValueError('max_objects not supported by rados_version < 10')
            max_objects_str = '--max-objects %s' % max_objects

        # Operation type 
        op_type = mode
        if mode == 'prefill':
            op_type = 'write'

        if op_type == 'write':
            op_size_str = '-b %s' % self.op_size
        else:
            op_size_str = ''  

        # Write to OMAP
        write_omap_str = ''
        if self.write_omap:
            if rados_version < 10:
                raise ValueError('write_omap not supported by rados_version < 10')
            write_omap_str = '--write-omap'

        run_dir = os.path.join(self.run_dir, run_dir)
        common.make_remote_dir(run_dir)

        # dump the cluster config
        self.cluster.dump_config(run_dir)

        # Run the backfill testing thread if requested (but not for prefill)
        if mode != 'prefill' and 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(run_dir, recovery_callback)

        # Run rados bench
        with monitoring.monitor(run_dir) as monitor:
            logger.info('Running radosbench %s test.' % mode)
            ps = []
            for i in range(self.concurrent_procs):
                out_file = '%s/output.%s' % (run_dir, i)
                objecter_log = '%s/objecter.%s.log' % (run_dir, i)
                if self.pool_per_proc:
                    # support previous behavior of 1 storage pool per rados process
                    pool_name_cmd = 'rados-bench-`{fqdn_cmd}`-{i}'
                    pool_name = pool_name_cmd.format(fqdn_cmd=common.get_fqdn_cmd(), i=i)
                    run_name = ''
                else:
                    # default behavior is to use a single storage pool
                    pool_name = self.pool
                    run_name_fmt = '--run-name {object_set_id} `{fqdn_cmd}`-{i}'
                    run_name = run_name_fmt.format(
                        object_set_id=self.object_set_id,
                        fqdn_cmd=common.get_fqdn_cmd(),
                        i=i)
                rados_bench_cmd_fmt = \
                    '{cmd} -c {conf} -p {pool} bench {op_size_arg} {duration} ' \
                    '{op_type} {concurrent_ops_arg} {max_objects_arg} ' \
                    '{write_omap_arg} {run_name} --no-cleanup ' \
                    '2> {stderr} > {stdout}'
                rados_bench_cmd = rados_bench_cmd_fmt.format(
                    cmd=self.cmd_path_full,
                    conf=self.tmp_conf,
                    pool=pool_name,
                    op_size_arg=op_size_str,
                    duration=runtime,
                    op_type=op_type,
                    concurrent_ops_arg=concurrent_ops_str,
                    max_objects_arg=max_objects_str,
                    write_omap_arg=write_omap_str,
                    run_name=run_name,
                    stderr=objecter_log,
                    stdout=out_file)
                p = common.pdsh(settings.getnodes('clients'), rados_bench_cmd)
                ps.append(p)
            for p in ps:
                p.wait()

        # If we were doing recovery, wait until it's done (but not for prefill).
        if mode != 'prefill' and 'recovery_test' in self.cluster.config:
            self.cluster.wait_recovery_done()

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(run_dir)

        out_dir = os.path.join(self.out_dir, out_dir)
        common.sync_files('%s/*' % run_dir, out_dir)
        self.analyze(out_dir)
Example #13
0
    def _run(self, mode, run_dir, out_dir):
        # We'll always drop caches for rados bench
        self.dropcaches()

        if self.concurrent_ops:
            concurrent_ops_str = '--concurrent-ios %s' % self.concurrent_ops
        #determine rados version

        rados_version_str = self.get_rados_version()

        m = re.findall("version (\d+)", rados_version_str)
        if not m:
           m = re.findall("version v(\d+)", rados_version_str)

        rados_version = int(m[0])

        if mode in ['write'] or rados_version < 9:
            op_size_str = '-b %s' % self.op_size
        else:
            op_size_str = ''

        # Max Objects
        max_objects_str = ''
        if self.max_objects and rados_version < 9:
           raise ValueError('max_objects not supported by rados_version < 9')
        if self.max_objects and rados_version > 9:
           max_objects_str = '--max-objects %s' % self.max_objects

        # Write to OMAP
        write_omap_str = ''
        if self.write_omap and rados_version < 9:
           raise ValueError('write_omap not supported by rados_version < 9')
        if self.write_omap and rados_version > 9:
           write_omap_str = '--write-omap'


        common.make_remote_dir(run_dir)

        # dump the cluster config
        self.cluster.dump_config(run_dir)

        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(run_dir, recovery_callback)

        # Run rados bench
        monitoring.start(run_dir)
        logger.info('Running radosbench %s test.' % mode)
        ps = []
        for i in xrange(self.concurrent_procs):
            out_file = '%s/output.%s' % (run_dir, i)
            objecter_log = '%s/objecter.%s.log' % (run_dir, i)
            # default behavior is to use a single storage pool 
            pool_name = self.pool

            run_name = '--run-name %s`%s`-%s'%(self.object_set_id, common.get_fqdn_cmd(), i)
            if self.pool_per_proc: # support previous behavior of 1 storage pool per rados process
                pool_name = 'rados-bench-``-%s'% (common.get_fqdn_cmd(), i)
                run_name = ''
            rados_bench_cmd = '%s -c %s -p %s bench %s %s %s %s %s %s %s --no-cleanup 2> %s > %s' % \
                 (self.cmd_path_full, self.tmp_conf, pool_name, op_size_str, self.time, mode, concurrent_ops_str, max_objects_str, write_omap_str, run_name, objecter_log, out_file)
            p = common.pdsh(settings.getnodes('clients'), rados_bench_cmd)
            ps.append(p)
        for p in ps:
            p.wait()
        monitoring.stop(run_dir)

        # If we were doing recovery, wait until it's done.
        if 'recovery_test' in self.cluster.config:
            self.cluster.wait_recovery_done()

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(run_dir)
        common.sync_files('%s/*' % run_dir, out_dir)
        self.analyze(out_dir)
Example #14
0
    def _run(self, mode, run_dir, out_dir):
        # We'll always drop caches for rados bench
        self.dropcaches()

        if self.concurrent_ops:
            concurrent_ops_str = '--concurrent-ios %s' % self.concurrent_ops
        #determine rados version

        rados_version_str = self.get_rados_version()

        m = re.findall("version (\d+)", rados_version_str)
        if not m:
            m = re.findall("version v(\d+)", rados_version_str)

        rados_version = int(m[0])

        if mode in ['write'] or rados_version < 9:
            op_size_str = '-b %s' % self.op_size
        else:
            op_size_str = ''

        # Max Objects
        max_objects_str = ''
        if self.max_objects and rados_version < 9:
            raise ValueError('max_objects not supported by rados_version < 9')
        if self.max_objects and rados_version > 9:
            max_objects_str = '--max-objects %s' % self.max_objects

        # Write to OMAP
        write_omap_str = ''
        if self.write_omap and rados_version < 9:
            raise ValueError('write_omap not supported by rados_version < 9')
        if self.write_omap and rados_version > 9:
            write_omap_str = '--write-omap'

        common.make_remote_dir(run_dir)

        # dump the cluster config
        self.cluster.dump_config(run_dir)

        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(run_dir, recovery_callback)

        # Run rados bench
        monitoring.start(run_dir)
        logger.info('Running radosbench %s test.' % mode)
        ps = []
        for i in xrange(self.concurrent_procs):
            out_file = '%s/output.%s' % (run_dir, i)
            objecter_log = '%s/objecter.%s.log' % (run_dir, i)
            # default behavior is to use a single storage pool
            pool_name = self.pool

            run_name = '--run-name %s`%s`-%s' % (self.object_set_id,
                                                 common.get_fqdn_cmd(), i)
            if self.pool_per_proc:  # support previous behavior of 1 storage pool per rados process
                pool_name = 'rados-bench-``-%s' % (common.get_fqdn_cmd(), i)
                run_name = ''
            rados_bench_cmd = '%s -c %s -p %s bench %s %s %s %s %s %s %s --no-cleanup 2> %s > %s' % \
                 (self.cmd_path_full, self.tmp_conf, pool_name, op_size_str, self.time, mode, concurrent_ops_str, max_objects_str, write_omap_str, run_name, objecter_log, out_file)
            p = common.pdsh(settings.getnodes('clients'), rados_bench_cmd)
            ps.append(p)
        for p in ps:
            p.wait()
        monitoring.stop(run_dir)

        # If we were doing recovery, wait until it's done.
        if 'recovery_test' in self.cluster.config:
            self.cluster.wait_recovery_done()

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(run_dir)
        common.sync_files('%s/*' % run_dir, out_dir)
        self.analyze(out_dir)