예제 #1
0
파일: cosbench.py 프로젝트: x-ion-de/cbt
    def run(self):
        super(Cosbench, self).run()
        self.dropcaches()
        self.cluster.dump_config(self.run_dir)
        monitoring.start(self.run_dir)

        # Run cosbench test
        try:
            self._run()
        except KeyboardInterrupt:
            logger.warning("accept keyboard interrupt, cancel this run")
            conf = self.config
            stdout, stderr = common.pdsh(
                "%s@%s" % (self.user, conf["controller"]),
                'sh %s/cli.sh cancel %s' %
                (conf["cosbench_dir"], self.runid)).communicate()
            logger.info("%s", stdout)

        self.check_workload_status()
        self.check_cosbench_res_dir()

        monitoring.stop(self.run_dir)
        self.cluster.dump_historic_ops(self.run_dir)
        common.sync_files('%s/*' % self.run_dir, self.out_dir)
        common.sync_files(
            '%s/archive/%s*' % (self.config["cosbench_dir"], self.runid),
            self.out_dir)
예제 #2
0
    def initialize(self):
        super(LibrbdFio, self).initialize()

        logger.info('Running scrub monitoring.')
        monitoring.start("%s/scrub_monitoring" % self.run_dir)
        self.cluster.check_scrub()
        monitoring.stop()

        logger.info('Pausing for 60s for idle monitoring.')
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s/*' % self.run_dir, self.out_dir)

        self.mkimages()

        # Create the run directory
        common.make_remote_dir(self.run_dir)

        # populate the fio files
        ps = []
        logger.info('Attempting to populating fio files...')
        for i in xrange(self.volumes_per_client):
            pre_cmd = 'sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=cbt-librbdfio-`hostname -s`-%d --invalidate=0  --rw=write --numjobs=%s --bs=4M --size %dM %s > /dev/null' % (
                self.cmd_path, self.poolname, i, self.numjobs, self.vol_size,
                self.names)
            p = common.pdsh(settings.getnodes('clients'), pre_cmd)
            ps.append(p)
        for p in ps:
            p.wait()
        return True
예제 #3
0
파일: librbdfio.py 프로젝트: varshar16/cbt
    def initialize(self):
        super(LibrbdFio, self).initialize()

        # Clean and Create the run directory
        common.clean_remote_dir(self.run_dir)
        common.make_remote_dir(self.run_dir)

        logger.info('Pausing for 60s for idle monitoring.')
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s/*' % self.run_dir, self.out_dir)

        self.mkimages()

        # populate the fio files
        ps = []
        logger.info('Attempting to populating fio files...')
        if (self.use_existing_volumes == False):
            for volnum in range(self.volumes_per_client):
                rbd_name = 'cbt-librbdfio-`%s`-%d' % (common.get_fqdn_cmd(),
                                                      volnum)
                pre_cmd = 'sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=%s --invalidate=0  --rw=write --numjobs=%s --bs=4M --size %dM %s --output-format=%s > /dev/null' % (
                    self.cmd_path, self.pool_name, rbd_name, self.numjobs,
                    self.vol_size, self.names, self.fio_out_format)
                p = common.pdsh(settings.getnodes('clients'), pre_cmd)
                ps.append(p)
            for p in ps:
                p.wait()
예제 #4
0
    def run(self):
        super(KvmRbdFio, self).run()
        # We'll always drop caches for rados bench
        self.dropcaches()

        monitoring.start(self.run_dir)

        time.sleep(5)
        names = ""
        for i in xrange(self.concurrent_procs):
            names += "--name=/srv/rbdfio-`hostname -s`-%d/cbt-kvmrbdfio " % i
        out_file = '%s/output' % self.run_dir
        pre_cmd = 'sudo fio --rw=read -ioengine=sync --numjobs=1 --bs=4M --runtime=1 --size %dM %s > /dev/null' % (self.vol_size * 9/10, names)
        fio_cmd = 'sudo fio --rw=%s -ioengine=%s --runtime=%s --numjobs=1 --direct=1 --bs=%dB --iodepth=%d --size %dM %s > %s' %  (self.mode, self.ioengine, self.time, self.op_size, self.iodepth, self.vol_size * 9/10, names, out_file)
        print 'Attempting to populating fio files...'
        common.pdsh(settings.cluster.get('clients'), pre_cmd).communicate()
        print 'Running rbd fio %s test.' % self.mode
        common.pdsh(settings.cluster.get('clients'), fio_cmd).communicate()
#        ps = []
#        for i in xrange(self.concurrent_procs):
#            out_file = '%s/output.%s' % (self.run_dir, i)
#            p = common.pdsh(settings.cluster.get('clients'), 'sudo fio --rw=%s -ioengine=%s --runtime=%s --name=/srv/rbdfio-`hostname -s`-%d/cbt-rbdfio --numjobs=1 --direct=1 --bs=%dB --iodepth=%d --size %dM > %s' % (self.mode, self.ioengine, self.time, i, self.op_size, self.iodepth, self.vol_size * 9/10, out_file))
#            ps.append(p)
#        for p in ps:
#            p.wait()
        monitoring.stop(self.run_dir)
        common.sync_files('%s/*' % self.run_dir, self.out_dir)
예제 #5
0
파일: rbdfio.py 프로젝트: zeeshan-07/cbt
 def mkimages(self):
     monitoring.start("%s/pool_monitoring" % self.run_dir)
     self.cluster.rmpool(self.poolname, self.pool_profile)
     self.cluster.mkpool(self.poolname, self.pool_profile, 'rbd')
     common.pdsh(
         settings.getnodes('clients'),
         '/usr/bin/rbd create cbt-kernelrbdfio-`hostname -s` --size %s --pool %s'
         % (self.vol_size, self.poolname)).communicate()
     common.pdsh(
         settings.getnodes('clients'),
         'sudo rbd map cbt-kernelrbdfio-`hostname -s` --pool %s --id admin'
         % self.poolname).communicate()
     common.pdsh(
         settings.getnodes('clients'),
         'sudo mkfs.xfs /dev/rbd/cbt-kernelrbdfio/cbt-kernelrbdfio-`hostname -s`'
     ).communicate()
     common.pdsh(
         settings.getnodes('clients'),
         'sudo mkdir -p -m0755 -- %s/cbt-kernelrbdfio-`hostname -s`' %
         self.cluster.mnt_dir).communicate()
     common.pdsh(
         settings.getnodes('clients'),
         'sudo mount -t xfs -o noatime,inode64 /dev/rbd/cbt-kernelrbdfio/cbt-kernelrbdfio-`hostname -s` %s/cbt-kernelrbdfio-`hostname -s`'
         % self.cluster.mnt_dir).communicate()
     monitoring.stop()
예제 #6
0
    def initialize(self): 
        super(RbdFio, self).initialize()
        self.cleanup()

        if not self.use_existing:
            self.cluster.initialize()
            self.cluster.dump_config(self.run_dir)

            # Setup the pools
            monitoring.start("%s/pool_monitoring" % self.run_dir)
            common.pdsh(settings.getnodes('head'), 'sudo ceph -c %s osd pool create rbdfio %d %d' % (self.tmp_conf, self.pgs, self.pgs)).communicate()
            common.pdsh(settings.getnodes('head'), 'sudo ceph -c %s osd pool set rbdfio size 1' % self.tmp_conf).communicate()
            print 'Checking Healh after pool creation.'
            self.cluster.check_health()
            monitoring.stop()

            # Mount the filesystem
            common.pdsh(settings.getnodes('clients'), 'sudo modprobe rbd').communicate()
            for i in xrange(self.concurrent_procs):
                common.pdsh(settings.getnodes('clients'), 'sudo rbd -c %s create rbdfio/rbdfio-`hostname -s`-%d --size %d' % (self.tmp_conf, i, self.vol_size)).communicate()
                common.pdsh(settings.getnodes('clients'), 'sudo echo "%s %s rbdfio rbdfio-`hostname -s`-%d" | sudo tee /sys/bus/rbd/add && sudo /sbin/udevadm settle' % (self.rbdadd_mons, self.rbdadd_options, i)).communicate()
                common.pdsh(settings.getnodes('clients'), 'sudo mkfs.xfs /dev/rbd/rbdfio/rbdfio-`hostname -s`-%d' % i).communicate()
                common.pdsh(settings.getnodes('clients'), 'sudo mkdir -p -m0755 -- %s/mnt/rbdfio-`hostname -s`-%d' % (self.tmp_dir, i)).communicate()
                common.pdsh(settings.getnodes('clients'), 'sudo mount -t xfs -o noatime,inode64 /dev/rbd/rbdfio/rbdfio-`hostname -s`-%d %s/mnt/rbdfio-`hostname -s`-%d' % (i, self.tmp_dir, i)).communicate()

        print 'Running scrub monitoring'
        monitoring.start("%s/scrub_monitoring" % self.run_dir)
        self.cluster.check_scrub()
        monitoring.stop()

        # Create the run directory
        common.make_remote_dir(self.run_dir)
예제 #7
0
파일: getput.py 프로젝트: bengland2/cbt
    def run(self):
        # First create a credential file for each gateway
        self.mkcredfiles()

        # We'll always drop caches for rados bench
        self.dropcaches()
        
        # dump the cluster config
        self.cluster.dump_config(self.run_dir)

        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(self.run_dir, recovery_callback)

        # Run getput 
        monitoring.start(self.run_dir)
        logger.info('Running getput %s test.' % self.test)

        ps = []
        for i in xrange(0, len(self.auth_urls)):
            cmd = self.mkgetputcmd("%s/gw%02d.cred" % (self.run_dir, i), i)
            p = common.pdsh(settings.getnodes('clients'), cmd)
            ps.append(p)
        for p in ps:
            p.wait()
        monitoring.stop(self.run_dir)

        # If we were doing recovery, wait until it's done.
        if 'recovery_test' in self.cluster.config:
            self.cluster.wait_recovery_done()

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(self.run_dir)
        common.sync_files('%s/*' % self.run_dir, self.out_dir)
예제 #8
0
    def run(self):
        super(KvmRbdFio, self).run()
        # We'll always drop caches for rados bench
        self.dropcaches()

        monitoring.start(self.run_dir)

        time.sleep(5)
        names = ""
        for i in xrange(self.concurrent_procs):
            names += "--name=/srv/rbdfio-`hostname -s`-%d/cbt-kvmrbdfio " % i
        out_file = '%s/output' % self.run_dir
        pre_cmd = 'sudo fio --rw=read -ioengine=sync --numjobs=1 --bs=4M --runtime=1 --size %dM %s > /dev/null' % (
            self.vol_size * 9 / 10, names)
        fio_cmd = 'sudo fio --rw=%s -ioengine=%s --runtime=%s --numjobs=1 --direct=1 --bs=%dB --iodepth=%d --size %dM %s > %s' % (
            self.mode, self.ioengine, self.time, self.op_size, self.iodepth,
            self.vol_size * 9 / 10, names, out_file)
        print 'Attempting to populating fio files...'
        common.pdsh(settings.getnodes('clients'), pre_cmd).communicate()
        print 'Running rbd fio %s test.' % self.mode
        common.pdsh(settings.getnodes('clients'), fio_cmd).communicate()
        #        ps = []
        #        for i in xrange(self.concurrent_procs):
        #            out_file = '%s/output.%s' % (self.run_dir, i)
        #            p = common.pdsh(settings.cluster.get('clients'), 'sudo fio --rw=%s -ioengine=%s --runtime=%s --name=/srv/rbdfio-`hostname -s`-%d/cbt-rbdfio --numjobs=1 --direct=1 --bs=%dB --iodepth=%d --size %dM > %s' % (self.mode, self.ioengine, self.time, i, self.op_size, self.iodepth, self.vol_size * 9/10, out_file))
        #            ps.append(p)
        #        for p in ps:
        #            p.wait()
        monitoring.stop(self.run_dir)
        common.sync_files('%s/*' % self.run_dir, self.out_dir)
예제 #9
0
    def initialize(self):
        super(Cosbench, self).initialize()

        logger.debug('Running cosbench and radosgw check.')
        self.prerun_check()

        logger.debug('Running scrub monitoring.')
        monitoring.start("%s/scrub_monitoring" % self.run_dir)
        self.cluster.check_scrub()
        monitoring.stop()

        logger.debug('Pausing for 60s for idle monitoring.')
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s' % self.run_dir, self.out_dir)

        # Create the run directory
        common.make_remote_dir(self.run_dir)

        conf = self.config
        if not self.config["template"]:
            self.config["template"] = "default"
        self.config["workload"] = self.choose_template("default", conf)
        self.prepare_xml(self.config["workload"])
        return True
예제 #10
0
    def run(self):
        super(RbdFio, self).run()
        # Set client readahead
        self.set_client_param('read_ahead_kb', self.client_ra)

        # We'll always drop caches for rados bench
        self.dropcaches()

        common.make_remote_dir(self.run_dir)
        monitoring.start(self.run_dir)
        # Run rados bench
        print 'Running rbd fio %s test.' % self.mode
        names = ""
        for i in xrange(self.concurrent_procs):
            names += "--name=%s/mnt/rbdfio-`hostname -s`-%d/cbt-rbdfio " % (self.tmp_dir, i)
        out_file = '%s/output' % self.run_dir
        fio_cmd = 'sudo fio --rw=%s -ioengine=%s --runtime=%s --numjobs=1 --direct=1 --bs=%dB --iodepth=%d --size %dM %s > %s' %  (self.mode, self.ioengine, self.time, self.op_size, self.iodepth, self.vol_size * 9/10, names, out_file)
        common.pdsh(settings.getnodes('clients'), fio_cmd).communicate()
#        ps = []
#        for i in xrange(self.concurrent_procs):
#            out_file = '%s/output.%s' % (self.run_dir, i)
#            p = common.pdsh(settings.cluster.get('clients'), 'sudo fio --rw=%s -ioengine=%s --runtime=%s --name=/srv/rbdfio-`hostname -s`-%d/cbt-rbdfio --numjobs=1 --direct=1 --bs=%dB --iodepth=%d --size %dM > %s' % (self.mode, self.ioengine, self.time, i, self.op_size, self.iodepth, self.vol_size * 9/10, out_file))
#            ps.append(p)
#        for p in ps:
#            p.wait()
        monitoring.stop(self.run_dir)
        common.sync_files('%s/*' % self.run_dir, self.out_dir)
예제 #11
0
    def _run(self, mode, run_dir, out_dir):
        # We'll always drop caches for rados bench
        self.dropcaches()

        if self.concurrent_ops:
            concurrent_ops_str = '--concurrent-ios %s' % self.concurrent_ops
        op_size_str = '-b %s' % self.op_size

        common.make_remote_dir(run_dir)

        # dump the cluster config
        common.dump_config(run_dir)

        monitoring.start(run_dir)
        # Run rados bench
        print 'Running radosbench read test.'
        ps = []
        for i in xrange(self.concurrent_procs):
            out_file = '%s/output.%s' % (run_dir, i)
            objecter_log = '%s/objecter.%s.log' % (run_dir, i)
            p = common.pdsh(settings.getnodes('clients'), '/usr/bin/rados -p rados-bench-`hostname -s`-%s %s bench %s %s %s --no-cleanup 2> %s > %s' % (i, op_size_str, self.time, mode, concurrent_ops_str, objecter_log, out_file))
            ps.append(p)
        for p in ps:
            p.wait()
        monitoring.stop(run_dir)

        # Get the historic ops
        common.dump_historic_ops(run_dir)
        common.sync_files('%s/*' % run_dir, out_dir)
예제 #12
0
    def initialize(self):
        super(Cosbench, self).initialize()

        logger.debug('Running cosbench and radosgw check.')
        self.prerun_check()

        logger.debug('Running scrub monitoring.')
        monitoring.start("%s/scrub_monitoring" % self.run_dir)
        self.cluster.check_scrub()
        monitoring.stop()

        logger.debug('Pausing for 60s for idle monitoring.')
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s' % self.run_dir, self.out_dir)

        # Create the run directory
        common.make_remote_dir(self.run_dir)

        conf = self.config
        if not self.config["template"]:
            self.config["template"] = "default"
        self.config["workload"] = self.choose_template("default", conf)
        self.prepare_xml(self.config["workload"])
        return True
예제 #13
0
    def _run(self, mode, run_dir, out_dir):
        # We'll always drop caches for rados bench
        self.dropcaches()

        if self.concurrent_ops:
            concurrent_ops_str = "--concurrent-ios %s" % self.concurrent_ops
        op_size_str = "-b %s" % self.op_size

        common.make_remote_dir(run_dir)
        monitoring.start(run_dir)
        # Run rados bench
        print "Running radosbench read test."
        ps = []
        for i in xrange(self.concurrent_procs):
            out_file = "%s/output.%s" % (run_dir, i)
            objecter_log = "%s/objecter.%s.log" % (run_dir, i)
            p = common.pdsh(
                settings.cluster.get("clients"),
                "/usr/bin/rados -p rados-bench-%s %s bench %s %s %s --no-cleanup 2> %s > %s"
                % (i, op_size_str, self.time, mode, concurrent_ops_str, objecter_log, out_file),
            )
            ps.append(p)
        for p in ps:
            p.wait()
        monitoring.stop(run_dir)
        common.sync_files("%s/*" % run_dir, out_dir)
예제 #14
0
    def initialize(self):
        super(LibrbdFio, self).initialize()

        print "Running scrub monitoring."
        monitoring.start("%s/scrub_monitoring" % self.run_dir)
        self.cluster.check_scrub()
        monitoring.stop()

        print "Pausing for 60s for idle monitoring."
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files("%s/*" % self.run_dir, self.out_dir)

        self.mkimages()

        # Create the run directory
        common.make_remote_dir(self.run_dir)

        # populate the fio files
        ps = []
        print "Attempting to populating fio files..."
        for i in xrange(self.volumes_per_client):
            pre_cmd = (
                "sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=cbt-librbdfio-`hostname -s`-%d --invalidate=0  --rw=write --numjobs=%s --bs=4M --size %dM %s > /dev/null"
                % (self.cmd_path, self.poolname, i, self.numjobs, self.vol_size, self.names)
            )
            p = common.pdsh(settings.getnodes("clients"), pre_cmd)
            ps.append(p)
        for p in ps:
            p.wait()
        return True
예제 #15
0
    def run(self):
        super(Fio, self).run()

        # We'll always drop caches for rados bench
        self.dropcaches()

        # dump the cluster config
        self.cluster.dump_config(self.run_dir)

        monitoring.start(self.run_dir)

        time.sleep(5)

        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(self.run_dir, recovery_callback)

        logger.info('Running fio %s test.', self.mode)
        ps = []
        for i in xrange(self.endpoints_per_client):
            p = common.pdsh(settings.getnodes('clients'), self.run_command(i))
            ps.append(p)
        for p in ps:
            p.wait()
        # If we were doing recovery, wait until it's done.
        if 'recovery_test' in self.cluster.config:
            self.cluster.wait_recovery_done()

        monitoring.stop(self.run_dir)

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(self.run_dir)
        common.sync_files('%s/*' % self.run_dir, self.out_dir)
        self.analyze(self.out_dir)
예제 #16
0
    def run(self):
        super(LibrbdFio, self).run()

        # We'll always drop caches for rados bench
        self.dropcaches()

        # dump the cluster config
        self.cluster.dump_config(self.run_dir)

        monitoring.start(self.run_dir)

        time.sleep(5)

        # Run the backfill testing thread if requested
        if "recovery_test" in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(self.run_dir, recovery_callback)

        print "Running rbd fio %s test." % self.mode
        ps = []
        for i in xrange(self.volumes_per_client):
            fio_cmd = self.mkfiocmd(i)
            p = common.pdsh(settings.getnodes("clients"), fio_cmd)
            ps.append(p)
        for p in ps:
            p.wait()
        # If we were doing recovery, wait until it's done.
        if "recovery_test" in self.cluster.config:
            self.cluster.wait_recovery_done()

        monitoring.stop(self.run_dir)

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(self.run_dir)
        common.sync_files("%s/*" % self.run_dir, self.out_dir)
예제 #17
0
    def run(self):
        super(CephTestRados, self).run()
        
        # Remake the pool
        self.mkpool()
        self.dropcaches()
        self.cluster.dump_config(self.run_dir)
        monitoring.start(self.run_dir)
        time.sleep(5)
        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(self.run_dir, recovery_callback)

        logger.info('Running ceph_test_rados.')
        ps = []
        for i in xrange(1):
            p = common.pdsh(settings.getnodes('clients'), self.mkcmd())
            ps.append(p)
        for p in ps:
            p.wait()
        # If we were doing recovery, wait until it's done.
        if 'recovery_test' in self.cluster.config:
            self.cluster.wait_recovery_done()

        monitoring.stop(self.run_dir)

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(self.run_dir)
        common.sync_files('%s/*' % self.run_dir, self.out_dir)
예제 #18
0
    def run(self):
        super(KvmRbdFio, self).run()
        # Set client readahead
        self.set_client_param('read_ahead_kb', self.client_ra)
        clnts = settings.getnodes('clients')

        # We'll always drop caches for rados bench
        self.dropcaches()

        monitoring.start(self.run_dir)

        time.sleep(5)
        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(self.run_dir, recovery_callback)

        logger.info('Starting rbd fio %s test.', self.mode)

        fio_process_list = []
        for i in range(self.concurrent_procs):
            b = self.block_devices[i % len(self.block_devices)]
            bnm = os.path.basename(b)
            mtpt = '/srv/rbdfio-`hostname -s`-%s' % bnm
            fiopath = os.path.join(mtpt, 'fio%d.img' % i)
            out_file = '%s/output.%d' % (self.run_dir, i)
            fio_cmd = 'sudo %s' % self.fio_cmd
            fio_cmd += ' --rw=%s' % self.mode
            if (self.mode == 'readwrite' or self.mode == 'randrw'):
                fio_cmd += ' --rwmixread=%s --rwmixwrite=%s' % (
                    self.rwmixread, self.rwmixwrite)
            fio_cmd += ' --ioengine=%s' % self.ioengine
            fio_cmd += ' --runtime=%s' % self.time
            fio_cmd += ' --ramp_time=%s' % self.ramp
            if self.startdelay:
                fio_cmd += ' --startdelay=%s' % self.startdelay
            if self.rate_iops:
                fio_cmd += ' --rate_iops=%s' % self.rate_iops
            fio_cmd += ' --numjobs=%s' % self.numjobs
            fio_cmd += ' --direct=1'
            fio_cmd += ' --bs=%dB' % self.op_size
            fio_cmd += ' --iodepth=%d' % self.iodepth
            fio_cmd += ' --size=%dM' % self.vol_size
            if self.log_iops:
                fio_cmd += ' --write_iops_log=%s' % out_file
            if self.log_bw:
                fio_cmd += ' --write_bw_log=%s' % out_file
            if self.log_lat:
                fio_cmd += ' --write_lat_log=%s' % out_file
            if 'recovery_test' in self.cluster.config:
                fio_cmd += ' --time_based'
            fio_cmd += ' --name=%s > %s' % (fiopath, out_file)
            fio_process_list.append(
                common.pdsh(clnts, fio_cmd, continue_if_error=False))
        for p in fio_process_list:
            p.communicate()
        monitoring.stop(self.run_dir)
        logger.info('Finished rbd fio test')

        common.sync_files('%s/*' % self.run_dir, self.out_dir)
예제 #19
0
    def run(self):
        super(RbdFio, self).run()
        # We'll always drop caches for rados bench
        self.dropcaches()

        common.make_remote_dir(self.run_dir)
        monitoring.start(self.run_dir)
        # Run rados bench
        print 'Running rbd fio %s test.' % self.mode
        names = ""
        for i in xrange(self.concurrent_procs):
            names += "--name=/srv/rbdfio-`hostname -s`-%d/cbt-rbdfio " % i
        out_file = '%s/output' % self.run_dir
        fio_cmd = 'sudo fio --rw=%s -ioengine=%s --runtime=%s --numjobs=1 --direct=1 --bs=%dB --iodepth=%d --size %dM %s > %s' % (
            self.mode, self.ioengine, self.time, self.op_size, self.iodepth,
            self.vol_size * 9 / 10, names, out_file)
        common.pdsh(settings.getnodes('clients'), fio_cmd).communicate()
        #        ps = []
        #        for i in xrange(self.concurrent_procs):
        #            out_file = '%s/output.%s' % (self.run_dir, i)
        #            p = common.pdsh(settings.cluster.get('clients'), 'sudo fio --rw=%s -ioengine=%s --runtime=%s --name=/srv/rbdfio-`hostname -s`-%d/cbt-rbdfio --numjobs=1 --direct=1 --bs=%dB --iodepth=%d --size %dM > %s' % (self.mode, self.ioengine, self.time, i, self.op_size, self.iodepth, self.vol_size * 9/10, out_file))
        #            ps.append(p)
        #        for p in ps:
        #            p.wait()
        monitoring.stop(self.run_dir)
        common.sync_files('%s/*' % self.run_dir, self.out_dir)
예제 #20
0
    def initialize(self): 
        super(RbdFio, self).initialize()

        logger.info('Running scrub monitoring.')
        monitoring.start("%s/scrub_monitoring" % self.run_dir)
        self.cluster.check_scrub()
        monitoring.stop()

        logger.info('Pausing for 60s for idle monitoring.')
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s/*' % self.run_dir, self.out_dir)

        self.mkimages()
 
        # Create the run directory
        common.make_remote_dir(self.run_dir)

        # populate the fio files
        logger.info('Attempting to populating fio files...')
        pre_cmd = 'sudo %s --ioengine=%s --rw=write --numjobs=%s --bs=4M --size %dM %s > /dev/null' % (self.cmd_path, self.ioengine, self.numjobs, self.vol_size*0.9, self.names)
        common.pdsh(settings.getnodes('clients'), pre_cmd).communicate()

        return True
예제 #21
0
    def run(self):
        super(CephTestRados, self).run()

        # Remake the pool
        self.mkpool()
        self.dropcaches()
        self.cluster.dump_config(self.run_dir)
        monitoring.start(self.run_dir)
        time.sleep(5)
        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(self.run_dir, recovery_callback)

        logger.info('Running ceph_test_rados.')
        ps = []
        for i in xrange(1):
            p = common.pdsh(settings.getnodes('clients'), self.mkcmd())
            ps.append(p)
        for p in ps:
            p.wait()
        # If we were doing recovery, wait until it's done.
        if 'recovery_test' in self.cluster.config:
            self.cluster.wait_recovery_done()

        monitoring.stop(self.run_dir)

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(self.run_dir)
        common.sync_files('%s/*' % self.run_dir, self.out_dir)
예제 #22
0
파일: librbdfio.py 프로젝트: bengland2/cbt
    def initialize(self): 
        super(LibrbdFio, self).initialize()

        # Clean and Create the run directory
        common.clean_remote_dir(self.run_dir)
        common.make_remote_dir(self.run_dir)

        logger.info('Pausing for 60s for idle monitoring.')
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s/*' % self.run_dir, self.out_dir)

        self.mkimages()

        # populate the fio files
        ps = []
        logger.info('Attempting to populating fio files...')
        if (self.use_existing_volumes == False):
          for volnum in xrange(self.volumes_per_client):
              rbd_name = 'cbt-librbdfio-`%s`-%d' % (common.get_fqdn_cmd(), volnum)
              pre_cmd = 'sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=%s --invalidate=0  --rw=write --numjobs=%s --bs=4M --size %dM %s --output-format=%s > /dev/null' % (self.cmd_path, self.pool_name, rbd_name, self.numjobs, self.vol_size, self.names, self.fio_out_format)
              p = common.pdsh(settings.getnodes('clients'), pre_cmd)
              ps.append(p)
          for p in ps:
              p.wait()
        return True
예제 #23
0
파일: getput.py 프로젝트: yunfeiguan/cbt
    def run(self):
        # First create a credential file for each gateway
        self.mkcredfiles()

        # We'll always drop caches for rados bench
        self.dropcaches()

        # dump the cluster config
        self.cluster.dump_config(self.run_dir)

        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(self.run_dir, recovery_callback)

        # Run getput
        monitoring.start(self.run_dir)
        logger.info('Running getput %s test.' % self.test)

        ps = []
        for i in range(0, len(self.auth_urls)):
            cmd = self.mkgetputcmd("%s/gw%02d.cred" % (self.run_dir, i), i)
            p = common.pdsh(settings.getnodes('clients'), cmd)
            ps.append(p)
        for p in ps:
            p.wait()
        monitoring.stop(self.run_dir)

        # If we were doing recovery, wait until it's done.
        if 'recovery_test' in self.cluster.config:
            self.cluster.wait_recovery_done()

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(self.run_dir)
        common.sync_files('%s/*' % self.run_dir, self.out_dir)
예제 #24
0
    def initialize(self): 
        super(LibrbdFio, self).initialize()

        print 'Running scrub monitoring.'
        monitoring.start("%s/scrub_monitoring" % self.run_dir)
        self.cluster.check_scrub()
        monitoring.stop()

        print 'Pausing for 60s for idle monitoring.'
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s/*' % self.run_dir, self.out_dir)

        self.mkimages()

        # Create the run directory
        common.make_remote_dir(self.run_dir)

        # populate the fio files
        print 'Attempting to populating fio files...'
        pre_cmd = 'sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=cbt-librbdfio-`hostname -s` --invalidate=0  --rw=write --numjobs=%s --bs=4M --size %dM %s > /dev/null' % (self.cmd_path, self.poolname, self.numjobs, self.vol_size, self.names)
        common.pdsh(settings.getnodes('clients'), pre_cmd).communicate()

        return True
예제 #25
0
    def run(self):
        super(RbdFio, self).run()

        # Set client readahead
        self.set_client_param('read_ahead_kb', self.client_ra)

        # We'll always drop caches for rados bench
        self.dropcaches()

        monitoring.start(self.run_dir)

        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(self.run_dir, recovery_callback)

        time.sleep(5)
        out_file = '%s/output' % self.run_dir
        fio_cmd = 'sudo %s' % (self.cmd_path_full)
        fio_cmd += ' --rw=%s' % self.mode
        if (self.mode == 'readwrite' or self.mode == 'randrw'):
            fio_cmd += ' --rwmixread=%s --rwmixwrite=%s' % (self.rwmixread,
                                                            self.rwmixwrite)
        fio_cmd += ' --ioengine=%s' % self.ioengine
        if self.time is not None:
            fio_cmd += ' --runtime=%s' % self.time
        if self.ramp is not None:
            fio_cmd += ' --ramp_time=%s' % self.ramp
        fio_cmd += ' --numjobs=%s' % self.numjobs
        fio_cmd += ' --direct=%s' % self.direct
        fio_cmd += ' --bs=%dB' % self.op_size
        fio_cmd += ' --iodepth=%d' % self.iodepth
        if self.vol_size:
            fio_cmd += ' --size=%dM' % (int(self.vol_size) * 0.9)
        if self.log_iops:
            fio_cmd += ' --write_iops_log=%s' % out_file
        if self.log_bw:
            fio_cmd += ' --write_bw_log=%s' % out_file
        if self.log_lat:
            fio_cmd += ' --write_lat_log=%s' % out_file
        if 'recovery_test' in self.cluster.config:
            fio_cmd += ' --time_based'
        if self.random_distribution is not None:
            fio_cmd += ' --random_distribution=%s' % self.random_distribution
        fio_cmd += ' %s > %s' % (self.names, out_file)
        if self.log_avg_msec is not None:
            fio_cmd += ' --log_avg_msec=%s' % self.log_avg_msec
        logger.info('Running rbd fio %s test.', self.mode)
        common.pdsh(settings.getnodes('clients'), fio_cmd).communicate()

        # If we were doing recovery, wait until it's done.
        if 'recovery_test' in self.cluster.config:
            self.cluster.wait_recovery_done()

        monitoring.stop(self.run_dir)

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(self.run_dir)
        common.sync_files('%s/*' % self.run_dir, self.out_dir)
예제 #26
0
    def run(self):
        super(LibrbdFio, self).run()

        # We'll always drop caches for rados bench
        self.dropcaches()

        # dump the cluster config
        self.cluster.dump_config(self.run_dir)

        monitoring.start(self.run_dir)

        time.sleep(5)
        out_file = '%s/output' % self.run_dir
        fio_cmd = 'sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=cbt-librbdfio-`hostname -s` --invalidate=0' % (
            self.cmd_path_full, self.poolname)
        fio_cmd += ' --rw=%s' % self.mode
        if (self.mode == 'readwrite' or self.mode == 'randrw'):
            fio_cmd += ' --rwmixread=%s --rwmixwrite=%s' % (self.rwmixread,
                                                            self.rwmixwrite)
#        fio_cmd += ' --ioengine=%s' % self.ioengine
        if self.time is not None:
            fio_cmd += ' --runtime=%s' % self.time
        if self.ramp is not None:
            fio_cmd += ' --ramp_time=%s' % self.ramp
        fio_cmd += ' --numjobs=%s' % self.numjobs
        fio_cmd += ' --direct=1'
        fio_cmd += ' --bs=%dB' % self.op_size
        fio_cmd += ' --iodepth=%d' % self.iodepth
        fio_cmd += ' --end_fsync=%s' % self.end_fsync
        #        if self.vol_size:
        #            fio_cmd += ' -- size=%dM' % self.vol_size
        fio_cmd += ' --write_iops_log=%s' % out_file
        fio_cmd += ' --write_bw_log=%s' % out_file
        fio_cmd += ' --write_lat_log=%s' % out_file
        if 'recovery_test' in self.cluster.config:
            fio_cmd += ' --time_based'
        if self.random_distribution is not None:
            fio_cmd += ' --random_distribution=%s' % self.random_distribution
        if self.log_avg_msec is not None:
            fio_cmd += ' --log_avg_msec=%s' % self.log_avg_msec
        fio_cmd += ' %s > %s' % (self.names, out_file)

        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(self.run_dir, recovery_callback)

        print 'Running rbd fio %s test.' % self.mode
        common.pdsh(settings.getnodes('clients'), fio_cmd).communicate()

        # If we were doing recovery, wait until it's done.
        if 'recovery_test' in self.cluster.config:
            self.cluster.wait_recovery_done()

        monitoring.stop(self.run_dir)

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(self.run_dir)
        common.sync_files('%s/*' % self.run_dir, self.out_dir)
예제 #27
0
 def mkimages(self):
     monitoring.start("%s/pool_monitoring" % self.run_dir)
     self.cluster.rmpool(self.poolname, self.pool_profile)
     self.cluster.mkpool(self.poolname, self.pool_profile)
     for node in settings.getnodes('clients').split(','):
         node = node.rpartition("@")[2]
         common.pdsh(settings.getnodes('head'), '/usr/bin/rbd create cbt-librbdfio-%s --size %s --pool %s --order %s' % (node, self.vol_size, self.poolname, self.vol_order)).communicate()
     monitoring.stop()
예제 #28
0
 def mkpools(self):
     monitoring.start("%s/pool_monitoring" % self.run_dir)
     for i in xrange(self.concurrent_procs):
         for node in settings.getnodes('clients').split(','):
             node = node.rpartition("@")[2]
             self.cluster.rmpool('rados-bench-%s-%s' % (node, i), self.pool_profile)
             self.cluster.mkpool('rados-bench-%s-%s' % (node, i), self.pool_profile)
     monitoring.stop()
예제 #29
0
 def mkpools(self):
     monitoring.start("%s/pool_monitoring" % self.run_dir)
     for i in xrange(self.concurrent_procs):
         for node in settings.getnodes('clients').split(','):
             node = node.rpartition("@")[2]
             self.cluster.rmpool('rados-bench-%s-%s' % (node, i), self.pool_profile)
             self.cluster.mkpool('rados-bench-%s-%s' % (node, i), self.pool_profile)
     monitoring.stop()
예제 #30
0
    def run(self):
        super(LibrbdFio, self).run()

        # We'll always drop caches for rados bench
        self.dropcaches()

        # dump the cluster config
        self.cluster.dump_config(self.run_dir)

        monitoring.start(self.run_dir)

        time.sleep(5)
        out_file = '%s/output' % self.run_dir
        fio_cmd = 'sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=cbt-librbdfio-`hostname -s` --invalidate=0' % (self.cmd_path_full, self.poolname)
        fio_cmd += ' --rw=%s' % self.mode
        if (self.mode == 'readwrite' or self.mode == 'randrw'):
            fio_cmd += ' --rwmixread=%s --rwmixwrite=%s' % (self.rwmixread, self.rwmixwrite)
#        fio_cmd += ' --ioengine=%s' % self.ioengine
        if self.time is not None:
            fio_cmd += ' --runtime=%s' % self.time
        if self.ramp is not None:
            fio_cmd += ' --ramp_time=%s' % self.ramp
        fio_cmd += ' --numjobs=%s' % self.numjobs
        fio_cmd += ' --direct=1'
        fio_cmd += ' --bs=%dB' % self.op_size
        fio_cmd += ' --iodepth=%d' % self.iodepth
        fio_cmd += ' --end_fsync=%s' % self.end_fsync
#        if self.vol_size:
#            fio_cmd += ' -- size=%dM' % self.vol_size
        fio_cmd += ' --write_iops_log=%s' % out_file
        fio_cmd += ' --write_bw_log=%s' % out_file
        fio_cmd += ' --write_lat_log=%s' % out_file
        if 'recovery_test' in self.cluster.config:
            fio_cmd += ' --time_based'
        if self.random_distribution is not None:
            fio_cmd += ' --random_distribution=%s' % self.random_distribution
        if self.log_avg_msec is not None:
            fio_cmd += ' --log_avg_msec=%s' % self.log_avg_msec
        fio_cmd += ' %s > %s' % (self.names, out_file)

        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(self.run_dir, recovery_callback)

        print 'Running rbd fio %s test.' % self.mode
        common.pdsh(settings.getnodes('clients'), fio_cmd).communicate()


        # If we were doing recovery, wait until it's done.
        if 'recovery_test' in self.cluster.config:
            self.cluster.wait_recovery_done()

        monitoring.stop(self.run_dir)

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(self.run_dir)
        common.sync_files('%s/*' % self.run_dir, self.out_dir)
    def _run(self, mode, run_dir, out_dir):
        # We'll always drop caches for rados bench
        #self.dropcaches()

        if self.concurrent_ops:
            concurrent_ops_str = '--concurrent-ios %s' % self.concurrent_ops
        #determine rados version
        #rados_version_str = subprocess.check_output(["rados", "-v"])
        rados_version_str, err = common.pdsh(
            settings.getnodes('head'), '/usr/bin/rados -v').communicate()
        m = re.findall("version (\d+)", rados_version_str)
        rados_version = int(m[0])

        if mode in ['write'] or rados_version < 9:
            op_size_str = '-b %s' % self.op_size
        else:
            op_size_str = ''

        common.make_remote_dir(run_dir)

        # dump the cluster config
        self.cluster.dump_config(run_dir)

        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(run_dir, recovery_callback)

        # Run rados bench
        monitoring.start(run_dir)
        logger.info('Running radosbench %s test.' % mode)
        ps = []
        for i in xrange(self.concurrent_procs):
            out_file = '%s/output.%s' % (run_dir, i)
            objecter_log = '%s/objecter.%s.log' % (run_dir, i)
            # default behavior is to use a single storage pool
            pool_name = self.pool
            run_name = '--run-name %s`hostname -s`-%s' % (self.object_set_id,
                                                          i)
            if self.pool_per_proc:  # support previous behavior of 1 storage pool per rados process
                pool_name = 'rados-bench-`hostname -s`-%s' % i
                run_name = ''
            rados_bench_cmd = '%s -c %s -p %s bench %s %s %s %s %s --no-cleanup 2> %s > %s' % \
                 (self.cmd_path_full, self.tmp_conf, pool_name, op_size_str, self.time, mode, concurrent_ops_str, run_name, objecter_log, out_file)
            p = common.pdsh(settings.getnodes('clients'), rados_bench_cmd)
            ps.append(p)
        for p in ps:
            p.wait()
        monitoring.stop(run_dir)

        # If we were doing recovery, wait until it's done.
        if 'recovery_test' in self.cluster.config:
            self.cluster.wait_recovery_done()

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(run_dir)
        common.sync_files('%s/*' % run_dir, out_dir)
예제 #32
0
    def initialize(self):
        super(Cosbench, self).initialize()

        logger.debug('Running cosbench and radosgw check.')
        self.prerun_check()

        logger.debug('Pausing for 60s for idle monitoring.')
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s' % self.run_dir, self.out_dir)

        # Create the run directory
        common.make_remote_dir(self.run_dir)

        conf = self.config
        if not self.config["template"]:
            self.config["template"] = "default"
        self.config["workload"] = self.choose_template("default", conf)

        # add a "prepare" stage if mode is read or mix
        if not self.container_prepare_check():
            workstage_init = {
                "name": "init",
                "work": {
                    "type":
                    "init",
                    "workers":
                    conf["workers"],
                    "config":
                    "containers=r(1,%s);cprefix=%s-%s-%s" %
                    (conf["containers_max"], conf["obj_size"], conf["mode"],
                     conf["objects_max"])
                }
            }
            workstage_prepare = {
                "name": "prepare",
                "work": {
                    "type":
                    "prepare",
                    "workers":
                    conf["workers"],
                    "config":
                    "containers=r(1,%s);objects=r(1,%s);cprefix=%s-%s-%s;sizes=c(%s)%s"
                    % (conf["containers_max"], conf["objects_max"],
                       conf["obj_size"], conf["mode"], conf["objects_max"],
                       conf["obj_size_num"], conf["obj_size_unit"])
                }
            }
            self.config["workload"]["workflow"]["workstage"].insert(
                0, workstage_prepare)
            self.config["workload"]["workflow"]["workstage"].insert(
                0, workstage_init)

        self.prepare_xml(self.config["workload"])
        return True
예제 #33
0
    def run(self):
        super(StdFioBench, self).run()
        # Set client readahead
        self.set_client_param('read_ahead_kb', self.client_ra)

        # We'll always drop caches for rados bench
        self.dropcaches()

        monitoring.start(self.run_dir)

        time.sleep(5)
        out_file = '%s/output' % self.run_dir
        fio_cmd = 'sudo %s' % self.fio_cmd
        fio_cmd += ' --rw=%s' % self.mode
        if (self.mode == 'readwrite' or self.mode == 'randrw'):
            fio_cmd += ' --rwmixread=%s --rwmixwrite=%s' % (self.rwmixread,
                                                            self.rwmixwrite)
        fio_cmd += ' --ioengine=%s' % self.ioengine
        fio_cmd += ' --runtime=%s' % self.time
        fio_cmd += ' --ramp_time=%s' % self.ramp
        fio_cmd += ' --numjobs=%s' % self.numjobs
        fio_cmd += ' --direct=1'
        fio_cmd += ' --randrepeat=0'
        fio_cmd += ' --group_reporting'
        fio_cmd += ' --bs=%dB' % self.op_size
        fio_cmd += ' --iodepth=%d' % self.iodepth
        fio_cmd += ' --size=%dM' % self.vol_size
        fio_cmd += ' --output-format=%s' % self.output_format
        if (self.output_format == 'normal'):
            fio_cmd += ' --write_iops_log=%s' % out_file
            fio_cmd += ' --write_bw_log=%s' % out_file
            fio_cmd += ' --write_lat_log=%s' % out_file
        if 'recovery_test' in self.cluster.config:
            fio_cmd += ' --time_based'
        fio_cmd += ' %s > %s 2> %s/error_log' % (self.names, out_file,
                                                 self.run_dir)

        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(self.run_dir, recovery_callback)

        logger.info('Running fio %s test.', self.mode)
        common.pdsh(settings.getnodes('clients'), fio_cmd).communicate()

        # FIO output Parsing logic
        if (self.output_format == 'terse'):
            hostname = '`hostname -s`'
            parse_cmd = 'sudo sed "s/$/;%s;%s;%s;%s;%s;%s;%s/" ' % (
                hostname, self.mode, self.op_size, self.iodepth, self.numjobs,
                self.client_ra, self.concurrent_procs)
            parse_cmd += ' %s > %s/terse_output' % (out_file, self.run_dir)
            common.pdsh(settings.getnodes('clients'), parse_cmd).communicate()

        monitoring.stop(self.run_dir)
        common.sync_files('%s/*' % self.run_dir, self.out_dir)
예제 #34
0
파일: rbdfio.py 프로젝트: sirspock/cbt
 def mkimages(self):
     monitoring.start("%s/pool_monitoring" % self.run_dir)
     self.cluster.rmpool(self.poolname, self.pool_profile)
     self.cluster.mkpool(self.poolname, self.pool_profile)
     common.pdsh(settings.getnodes('clients'), '/usr/bin/rbd create cbt-kernelrbdfio-`hostname -s` --size %s --pool %s' % (self.vol_size, self.poolname)).communicate()
     common.pdsh(settings.getnodes('clients'), 'sudo rbd map cbt-kernelrbdfio-`hostname -s` --pool %s --id admin' % self.poolname).communicate()
     common.pdsh(settings.getnodes('clients'), 'sudo mkfs.xfs /dev/rbd/cbt-kernelrbdfio/cbt-kernelrbdfio-`hostname -s`').communicate()
     common.pdsh(settings.getnodes('clients'), 'sudo mkdir -p -m0755 -- %s/cbt-kernelrbdfio-`hostname -s`' % self.cluster.mnt_dir).communicate()
     common.pdsh(settings.getnodes('clients'), 'sudo mount -t xfs -o noatime,inode64 /dev/rbd/cbt-kernelrbdfio/cbt-kernelrbdfio-`hostname -s` %s/cbt-kernelrbdfio-`hostname -s`' % self.cluster.mnt_dir).communicate()
     monitoring.stop()
예제 #35
0
파일: rbdfio.py 프로젝트: sirspock/cbt
    def run(self):
        super(RbdFio, self).run()

        # Set client readahead
        self.set_client_param('read_ahead_kb', self.client_ra)

        # We'll always drop caches for rados bench
        self.dropcaches()

        monitoring.start(self.run_dir)

        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(self.run_dir, recovery_callback)

        time.sleep(5)
        out_file = '%s/output' % self.run_dir
        fio_cmd = 'sudo %s' % (self.cmd_path_full)
        fio_cmd += ' --rw=%s' % self.mode
        if (self.mode == 'readwrite' or self.mode == 'randrw'):
            fio_cmd += ' --rwmixread=%s --rwmixwrite=%s' % (self.rwmixread, self.rwmixwrite)
        fio_cmd += ' --ioengine=%s' % self.ioengine
        if self.time is not None:
            fio_cmd += ' --runtime=%s' % self.time
        if self.ramp is not None:
            fio_cmd += ' --ramp_time=%s' % self.ramp
        fio_cmd += ' --numjobs=%s' % self.numjobs
        fio_cmd += ' --direct=1'
        fio_cmd += ' --bs=%dB' % self.op_size
        fio_cmd += ' --iodepth=%d' % self.iodepth
        if self.vol_size:
            fio_cmd += ' --size=%dM' % (int(self.vol_size) * 0.9)
        fio_cmd += ' --write_iops_log=%s' % out_file
        fio_cmd += ' --write_bw_log=%s' % out_file
        fio_cmd += ' --write_lat_log=%s' % out_file
        if 'recovery_test' in self.cluster.config:
            fio_cmd += ' --time_based'
        if self.random_distribution is not None:
            fio_cmd += ' --random_distribution=%s' % self.random_distribution
        fio_cmd += ' %s > %s' % (self.names, out_file)
        if self.log_avg_msec is not None:
            fio_cmd += ' --log_avg_msec=%s' % self.log_avg_msec
        logger.info('Running rbd fio %s test.', self.mode)
        common.pdsh(settings.getnodes('clients'), fio_cmd).communicate()

        # If we were doing recovery, wait until it's done.
        if 'recovery_test' in self.cluster.config:
            self.cluster.wait_recovery_done()

        monitoring.stop(self.run_dir)

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(self.run_dir)
        common.sync_files('%s/*' % self.run_dir, self.out_dir)
예제 #36
0
파일: rawfio.py 프로젝트: rzarzynski/cbt
    def run(self):
        super(RawFio, self).run()
        # Set client readahead
        clnts = settings.getnodes('clients')

        # We'll always drop caches for rados bench
        self.dropcaches()

        monitoring.start(self.run_dir)

        time.sleep(5)

        logger.info('Starting raw fio %s test.', self.mode)

        fio_process_list = []
        for i in range(self.concurrent_procs):
            b = self.block_devices[i % len(self.block_devices)]
            fiopath = b
            out_file = '%s/output.%d' % (self.run_dir, i)
            fio_cmd = 'sudo %s' % self.fio_cmd
            fio_cmd += ' --rw=%s' % self.mode
            if (self.mode == 'readwrite' or self.mode == 'randrw'):
                fio_cmd += ' --rwmixread=%s --rwmixwrite=%s' % (
                    self.rwmixread, self.rwmixwrite)
            fio_cmd += ' --ioengine=%s' % self.ioengine
            fio_cmd += ' --runtime=%s' % self.time
            fio_cmd += ' --ramp_time=%s' % self.ramp
            if self.startdelay:
                fio_cmd += ' --startdelay=%s' % self.startdelay
            if self.rate_iops:
                fio_cmd += ' --rate_iops=%s' % self.rate_iops
            fio_cmd += ' --numjobs=%s' % self.numjobs
            fio_cmd += ' --direct=%s' % self.direct
            fio_cmd += ' --bs=%dB' % self.op_size
            fio_cmd += ' --iodepth=%d' % self.iodepth
            fio_cmd += ' --size=%dM' % self.vol_size
            if self.log_iops:
                fio_cmd += ' --write_iops_log=%s' % out_file
            if self.log_bw:
                fio_cmd += ' --write_bw_log=%s' % out_file
            if self.log_lat:
                fio_cmd += ' --write_lat_log=%s' % out_file
            fio_cmd += ' --output-format=%s' % self.fio_out_format
            if 'recovery_test' in self.cluster.config:
                fio_cmd += ' --time_based'
            fio_cmd += ' --name=%s > %s' % (fiopath, out_file)
            logger.debug("FIO CMD: %s" % fio_cmd)
            fio_process_list.append(
                common.pdsh(clnts, fio_cmd, continue_if_error=False))
        for p in fio_process_list:
            p.communicate()
        monitoring.stop(self.run_dir)
        logger.info('Finished raw fio test')

        common.sync_files('%s/*' % self.run_dir, self.out_dir)
예제 #37
0
파일: kvmrbdfio.py 프로젝트: ASBishop/cbt
    def run(self):
        super(KvmRbdFio, self).run()
        # Set client readahead
        self.set_client_param('read_ahead_kb', self.client_ra)
        clnts = settings.getnodes('clients')

        # We'll always drop caches for rados bench
        self.dropcaches()

        monitoring.start(self.run_dir)

        time.sleep(5)
        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(self.run_dir, recovery_callback)

        logger.info('Starting rbd fio %s test.', self.mode)

        fio_process_list = []
        for i in range(self.concurrent_procs):
            b = self.block_devices[i % len(self.block_devices)]
            bnm = os.path.basename(b)
            mtpt = '/srv/rbdfio-`hostname -s`-%s' % bnm
            fiopath = os.path.join(mtpt, 'fio%d.img' % i)
            out_file = '%s/output.%d' % (self.run_dir, i)
            fio_cmd = 'sudo %s' % self.fio_cmd
            fio_cmd += ' --rw=%s' % self.mode
            if (self.mode == 'readwrite' or self.mode == 'randrw'):
                fio_cmd += ' --rwmixread=%s --rwmixwrite=%s' % (self.rwmixread, self.rwmixwrite)
            fio_cmd += ' --ioengine=%s' % self.ioengine
            fio_cmd += ' --runtime=%s' % self.time
            fio_cmd += ' --ramp_time=%s' % self.ramp
            if self.startdelay:
                fio_cmd += ' --startdelay=%s' % self.startdelay
            if self.rate_iops:
                fio_cmd += ' --rate_iops=%s' % self.rate_iops
            fio_cmd += ' --numjobs=%s' % self.numjobs
            fio_cmd += ' --direct=1'
            fio_cmd += ' --bs=%dB' % self.op_size
            fio_cmd += ' --iodepth=%d' % self.iodepth
            fio_cmd += ' --size=%dM' % self.vol_size 
            fio_cmd += ' --write_iops_log=%s' % out_file
            fio_cmd += ' --write_bw_log=%s' % out_file
            fio_cmd += ' --write_lat_log=%s' % out_file
            if 'recovery_test' in self.cluster.config:
                fio_cmd += ' --time_based'
            fio_cmd += ' --name=%s > %s' % (fiopath, out_file)
            fio_process_list.append(common.pdsh(clnts, fio_cmd, continue_if_error=False))
        for p in fio_process_list:
            p.communicate()
        monitoring.stop(self.run_dir)
        logger.info('Finished rbd fio test')

        common.sync_files('%s/*' % self.run_dir, self.out_dir)
예제 #38
0
파일: radosbench.py 프로젝트: JevonQ/cbt
    def _run(self, mode, run_dir, out_dir):
        # We'll always drop caches for rados bench
        self.dropcaches()

        if self.concurrent_ops:
            concurrent_ops_str = '--concurrent-ios %s' % self.concurrent_ops
        #determine rados version
        rados_version_str, err = common.pdsh(settings.getnodes('head'), '/usr/bin/rados -v').communicate()
        m = re.findall("version (\d+)", rados_version_str)
        rados_version = int(m[0])

        if mode in ['write'] or rados_version < 9:
            op_size_str = '-b %s' % self.op_size
        else:
            op_size_str = ''


        common.make_remote_dir(run_dir)

        # dump the cluster config
        self.cluster.dump_config(run_dir)

        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(run_dir, recovery_callback)

        # Run rados bench
        monitoring.start(run_dir)
        logger.info('Running radosbench %s test.' % mode)
        ps = []
        for i in xrange(self.concurrent_procs):
            out_file = '%s/output.%s' % (run_dir, i)
            objecter_log = '%s/objecter.%s.log' % (run_dir, i)
            # default behavior is to use a single storage pool 
            pool_name = self.pool
            run_name = '--run-name %s`hostname -s`-%s'%(self.object_set_id, i)
            if self.pool_per_proc: # support previous behavior of 1 storage pool per rados process
                pool_name = 'rados-bench-`hostname -s`-%s'%i
                run_name = ''
            rados_bench_cmd = '%s -c %s -p %s bench %s %s %s %s %s --no-cleanup 2> %s > %s' % \
                 (self.cmd_path_full, self.tmp_conf, pool_name, op_size_str, self.time, mode, concurrent_ops_str, run_name, objecter_log, out_file)
            p = common.pdsh(settings.getnodes('clients'), rados_bench_cmd)
            ps.append(p)
        for p in ps:
            p.wait()
        monitoring.stop(run_dir)

        # If we were doing recovery, wait until it's done.
        if 'recovery_test' in self.cluster.config:
            self.cluster.wait_recovery_done()

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(run_dir)
        common.sync_files('%s/*' % run_dir, out_dir)
예제 #39
0
 def mkimages(self):
     monitoring.start("%s/pool_monitoring" % self.run_dir)
     self.cluster.rmpool(self.poolname, self.pool_profile)
     self.cluster.mkpool(self.poolname, self.pool_profile)
     for node in settings.getnodes('clients').split(','):
         node = node.rpartition("@")[2]
         common.pdsh(
             settings.getnodes('head'),
             '/usr/bin/rbd create cbt-librbdfio-%s --size %s --pool %s --order %s'
             % (node, self.vol_size, self.poolname, self.vol_order))
     monitoring.stop()
예제 #40
0
파일: radosbench.py 프로젝트: bengland2/cbt
    def initialize(self): 
        super(Radosbench, self).initialize()

        logger.info('Pausing for 60s for idle monitoring.')
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s/*' % self.run_dir, self.out_dir)

        return True
예제 #41
0
    def initialize(self):
        super(Radosbench, self).initialize()

        logger.info('Pausing for 60s for idle monitoring.')
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s/*' % self.run_dir, self.out_dir)

        return True
예제 #42
0
파일: librbdfio.py 프로젝트: ASBishop/cbt
    def mkimages(self):
        monitoring.start("%s/pool_monitoring" % self.run_dir)
        if (self.use_existing_volumes == False):
          self.cluster.rmpool(self.poolname, self.pool_profile)
          self.cluster.mkpool(self.poolname, self.pool_profile)
          for node in settings.getnodes('clients').split(','):
              for volnum in xrange(0, self.volumes_per_client):
                  node = node.rpartition("@")[2]
#                  common.pdsh(settings.getnodes('head'), '/usr/bin/rbd create cbt-librbdfio-%s-%d --size %s --pool %s --order %s' % (node, volnum, self.vol_size, self.poolname, self.vol_order)).communicate()
                  self.cluster.mkimage('cbt-librbdfio-%s-%d' % (node,volnum), self.vol_size, self.poolname, self.vol_order)
        monitoring.stop()
예제 #43
0
파일: radosbench.py 프로젝트: JevonQ/cbt
 def mkpools(self):
     monitoring.start("%s/pool_monitoring" % self.run_dir)
     if self.pool_per_proc: # allow use of a separate storage pool per process
         for i in xrange(self.concurrent_procs):
             for node in settings.getnodes('clients').split(','):
                 node = node.rpartition("@")[2]
                 self.cluster.rmpool('rados-bench-%s-%s' % (node, i), self.pool_profile)
                 self.cluster.mkpool('rados-bench-%s-%s' % (node, i), self.pool_profile)
     else: # the default behavior is to use a single Ceph storage pool for all rados bench processes
         self.cluster.rmpool('rados-bench-cbt', self.pool_profile)
         self.cluster.mkpool('rados-bench-cbt', self.pool_profile)
     monitoring.stop()
 def mkimages(self):
     monitoring.start("%s/pool_monitoring" % self.run_dir)
     if (self.use_existing_volumes == False):
         self.cluster.rmpool(self.poolname, self.pool_profile)
         self.cluster.mkpool(self.poolname, self.pool_profile)
         for node in settings.getnodes('clients').split(','):
             for volnum in xrange(0, self.volumes_per_client):
                 node = node.rpartition("@")[2]
                 #                  common.pdsh(settings.getnodes('head'), '/usr/bin/rbd create cbt-librbdfio-%s-%d --size %s --pool %s --order %s' % (node, volnum, self.vol_size, self.poolname, self.vol_order)).communicate()
                 self.cluster.mkimage(
                     'cbt-librbdfio-%s-%d' % (node, volnum), self.vol_size,
                     self.poolname, self.vol_order)
     monitoring.stop()
예제 #45
0
 def mkimages(self):
     monitoring.start("%s/pool_monitoring" % self.run_dir)
     self.cluster.rmpool(self.poolname, self.pool_profile)
     self.cluster.mkpool(self.poolname, self.pool_profile)
     for node in settings.getnodes("clients").split(","):
         for volnum in xrange(0, self.volumes_per_client):
             node = node.rpartition("@")[2]
             common.pdsh(
                 settings.getnodes("head"),
                 "/usr/bin/rbd create cbt-librbdfio-%s-%d --size %s --pool %s --order %s"
                 % (node, volnum, self.vol_size, self.poolname, self.vol_order),
             ).communicate()
     monitoring.stop()
 def mkpools(self):
     monitoring.start("%s/pool_monitoring" % self.run_dir)
     if self.pool_per_proc:  # allow use of a separate storage pool per process
         for i in xrange(self.concurrent_procs):
             for node in settings.getnodes('clients').split(','):
                 node = node.rpartition("@")[2]
                 # self.cluster.rmpool('rados-bench-%s-%s' % (node, i), self.pool_profile)
                 self.cluster.mkpool('rados-bench-%s-%s' % (node, i),
                                     self.pool_profile)
     else:  # the default behavior is to use a single Ceph storage pool for all rados bench processes
         # self.cluster.rmpool('rados-bench-cbt', self.pool_profile)
         self.cluster.mkpool('rados-bench-cbt', self.pool_profile)
     monitoring.stop()
예제 #47
0
 def mkimages(self):
     monitoring.start("%s/pool_monitoring" % self.run_dir)
     if (self.use_existing_volumes == False):
       self.cluster.rmpool(self.pool_name, self.pool_profile)
       self.cluster.mkpool(self.pool_name, self.pool_profile, 'rbd')
       if self.data_pool_profile:
           self.data_pool = self.pool_name + "-data"
           self.cluster.rmpool(self.data_pool, self.data_pool_profile)
           self.cluster.mkpool(self.data_pool, self.data_pool_profile, 'rbd')
       for node in common.get_fqdn_list('clients'):
           for volnum in xrange(0, self.volumes_per_client):
               node = node.rpartition("@")[2]
               self.cluster.mkimage('cbt-librbdfio-%s-%d' % (node,volnum), self.vol_size, self.pool_name, self.data_pool, self.vol_order)
     monitoring.stop()
예제 #48
0
파일: rawfio.py 프로젝트: bengland2/cbt
    def run(self):
        super(RawFio, self).run()
        # Set client readahead
        clnts = settings.getnodes('clients')

        # We'll always drop caches for rados bench
        self.dropcaches()

        monitoring.start(self.run_dir)

        time.sleep(5)

        logger.info('Starting raw fio %s test.', self.mode)

        fio_process_list = []
        for i in range(self.concurrent_procs):
            b = self.block_devices[i % len(self.block_devices)]
            fiopath = b
            out_file = '%s/output.%d' % (self.run_dir, i)
            fio_cmd = 'sudo %s' % self.fio_cmd
            fio_cmd += ' --rw=%s' % self.mode
            if (self.mode == 'readwrite' or self.mode == 'randrw'):
                fio_cmd += ' --rwmixread=%s --rwmixwrite=%s' % (self.rwmixread, self.rwmixwrite)
            fio_cmd += ' --ioengine=%s' % self.ioengine
            fio_cmd += ' --runtime=%s' % self.time
            fio_cmd += ' --ramp_time=%s' % self.ramp
            if self.startdelay:
                fio_cmd += ' --startdelay=%s' % self.startdelay
            if self.rate_iops:
                fio_cmd += ' --rate_iops=%s' % self.rate_iops
            fio_cmd += ' --numjobs=%s' % self.numjobs
            fio_cmd += ' --direct=%s' % self.direct
            fio_cmd += ' --bs=%dB' % self.op_size
            fio_cmd += ' --iodepth=%d' % self.iodepth
            fio_cmd += ' --size=%dM' % self.vol_size 
            fio_cmd += ' --write_iops_log=%s' % out_file
            fio_cmd += ' --write_bw_log=%s' % out_file
            fio_cmd += ' --write_lat_log=%s' % out_file
            fio_cmd += ' --output-format=%s' % self.fio_out_format
            if 'recovery_test' in self.cluster.config:
                fio_cmd += ' --time_based'
            fio_cmd += ' --name=%s > %s' % (fiopath, out_file)
            logger.debug("FIO CMD: %s" % fio_cmd)
            fio_process_list.append(common.pdsh(clnts, fio_cmd, continue_if_error=False))
        for p in fio_process_list:
            p.communicate()
        monitoring.stop(self.run_dir)
        logger.info('Finished raw fio test')

        common.sync_files('%s/*' % self.run_dir, self.out_dir)
예제 #49
0
    def initialize(self): 
        super(Radosbench, self).initialize()

        print 'Running scrub monitoring.'
        monitoring.start("%s/scrub_monitoring" % self.run_dir)
        self.cluster.check_scrub()
        monitoring.stop()

        print 'Pausing for 60s for idle monitoring.'
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s/*' % self.run_dir, self.out_dir)

        return True
예제 #50
0
파일: getput.py 프로젝트: yunfeiguan/cbt
    def initialize(self):
        super(Getput, self).initialize()

        # create the user and key
        self.cluster.add_swift_user(self.user, self.subuser, self.key)

        # Clean and Create the run directory
        common.clean_remote_dir(self.run_dir)
        common.make_remote_dir(self.run_dir)

        logger.info('Pausing for 60s for idle monitoring.')
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s/*' % self.run_dir, self.out_dir)
예제 #51
0
    def initialize(self): 
        super(Radosbench, self).initialize()

        print 'Running scrub monitoring.'
        monitoring.start("%s/scrub_monitoring" % self.run_dir)
        self.cluster.check_scrub()
        monitoring.stop()

        print 'Pausing for 60s for idle monitoring.'
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s/*' % self.run_dir, self.out_dir)

        return True
예제 #52
0
    def _run(self, mode, run_dir, out_dir):
        # We'll always drop caches for rados bench
        self.dropcaches()

        if self.concurrent_ops:
            concurrent_ops_str = '--concurrent-ios %s' % self.concurrent_ops
        op_size_str = '-b %s' % self.op_size

        common.make_remote_dir(run_dir)

        # dump the cluster config
        self.cluster.dump_config(run_dir)

        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(run_dir, recovery_callback)

        # Run rados bench
        monitoring.start(run_dir)
        logger.info('Running radosbench read test.')
        ps = []
        for i in xrange(self.concurrent_procs):
            out_file = '%s/output.%s' % (run_dir, i)
            objecter_log = '%s/objecter.%s.log' % (run_dir, i)
            # default behavior is to use a single storage pool
            pool_name = 'rados-bench-cbt'
            run_name = '--run-name `hostname -s`-%s' % i
            if self.pool_per_proc:  # support previous behavior of 1 storage pool per rados process
                pool_name = 'rados-bench-`hostname -s`-%s' % i
                run_name = ''
            rados_bench_cmd = '%s -c %s -p %s bench %s %s %s %s %s --no-cleanup 2> %s > %s' % \
                 (self.cmd_path_full, self.tmp_conf, pool_name, op_size_str, self.time, mode, concurrent_ops_str, run_name, objecter_log, out_file)
            p = common.pdsh(settings.getnodes('clients'), rados_bench_cmd)
            ps.append(p)
        for p in ps:
            p.wait()
        monitoring.stop(run_dir)

        # If we were doing recovery, wait until it's done.
        if 'recovery_test' in self.cluster.config:
            self.cluster.wait_recovery_done()

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(run_dir)
        common.sync_files('%s/*' % run_dir, out_dir)
예제 #53
0
파일: cosbench.py 프로젝트: ASBishop/cbt
    def initialize(self):
        super(Cosbench, self).initialize()

        logger.debug('Running cosbench and radosgw check.')
        self.prerun_check()

        logger.debug('Running scrub monitoring.')
        monitoring.start("%s/scrub_monitoring" % self.run_dir)
        self.cluster.check_scrub()
        monitoring.stop()

        logger.debug('Pausing for 60s for idle monitoring.')
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s' % self.run_dir, self.out_dir)

        # Create the run directory
        common.make_remote_dir(self.run_dir)

        conf = self.config
        if not self.config["template"]:
            self.config["template"] = "default"
        self.config["workload"] = self.choose_template("default", conf)

        # add a "prepare" stage if mode is read or mix
        if not self.container_prepare_check():
            workstage_init = {
                "name": "init",
                "work": {"type":"init", "workers":conf["workers"], "config":"containers=r(1,%s);cprefix=%s-%s-%s" % (conf["containers_max"], conf["obj_size"], conf["mode"], conf["objects_max"])}
            }
            workstage_prepare = {
                "name":"prepare",
                "work": {
                    "type":"prepare",
                    "workers":conf["workers"],
                    "config":"containers=r(1,%s);objects=r(1,%s);cprefix=%s-%s-%s;sizes=c(%s)%s" %
                    (conf["containers_max"], conf["objects_max"], conf["obj_size"], conf["mode"], conf["objects_max"], conf["obj_size_num"], conf["obj_size_unit"])
                }
            }
            self.config["workload"]["workflow"]["workstage"].insert(0, workstage_prepare)
            self.config["workload"]["workflow"]["workstage"].insert(0, workstage_init)

        self.prepare_xml(self.config["workload"])
        return True
예제 #54
0
파일: librbdfio.py 프로젝트: yunfeiguan/cbt
    def run(self):
        super(LibrbdFio, self).run()

        # We'll always drop caches for rados bench
        self.dropcaches()

        # dump the cluster config
        self.cluster.dump_config(self.run_dir)

        monitoring.start(self.run_dir)

        time.sleep(5)

        # If the pg autoscaler kicks in before starting the test,
        # wait for it to complete. Otherwise, results may be skewed.
        ret = self.cluster.check_pg_autoscaler(
            self.wait_pgautoscaler_timeout,
            "%s/pgautoscaler.log" % self.run_dir)
        if ret == 1:
            logger.warn(
                "PG autoscaler taking longer to complete. Continuing anyway...results may be skewed."
            )

        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(self.run_dir, recovery_callback)

        logger.info('Running rbd fio %s test.', self.mode)
        ps = []
        for i in range(self.volumes_per_client):
            fio_cmd = self.mkfiocmd(i)
            p = common.pdsh(settings.getnodes('clients'), fio_cmd)
            ps.append(p)
        for p in ps:
            p.wait()
        # If we were doing recovery, wait until it's done.
        if 'recovery_test' in self.cluster.config:
            self.cluster.wait_recovery_done()

        monitoring.stop(self.run_dir)

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(self.run_dir)
        common.sync_files('%s/*' % self.run_dir, self.out_dir)
        self.analyze(self.out_dir)
예제 #55
0
    def run(self):
        super(KvmRbdFio, self).run()
        # Set client readahead
        self.set_client_param('read_ahead_kb', self.client_ra)

        # We'll always drop caches for rados bench
        self.dropcaches()

        monitoring.start(self.run_dir)

        time.sleep(5)
 #       names = ''
 #       for i in xrange(self.concurrent_procs):
 #           names += "--name=/srv/rbdfio-`hostname -s`-%d/cbt-kvmrbdfio " % i
 #           names += '--name=/srv/rbdfio-`hostname -s`-0/cbt-kvmrbdfio-%d ' % i 
        out_file = '%s/output' % self.run_dir
#        pre_cmd = 'sudo fio --rw=write -ioengine=sync --numjobs=%s --bs=4M --size %dM %s > /dev/null' % (self.numjobs, self.vol_size, self.names)
        fio_cmd = 'sudo %s' % self.fio_cmd
        fio_cmd += ' --rw=%s' % self.mode
        if (self.mode == 'readwrite' or self.mode == 'randrw'):
            fio_cmd += ' --rwmixread=%s --rwmixwrite=%s' % (self.rwmixread, self.rwmixwrite)
        fio_cmd += ' --ioengine=%s' % self.ioengine
        fio_cmd += ' --runtime=%s' % self.time
        fio_cmd += ' --ramp_time=%s' % self.ramp
        fio_cmd += ' --numjobs=%s' % self.numjobs
        fio_cmd += ' --direct=1'
        fio_cmd += ' --bs=%dB' % self.op_size
        fio_cmd += ' --iodepth=%d' % self.iodepth
        fio_cmd += ' --size=%dM' % self.vol_size 
        fio_cmd += ' --write_iops_log=%s' % out_file 
        fio_cmd += ' --write_bw_log=%s' % out_file
        fio_cmd += ' --write_lat_log=%s' % out_file
        if 'recovery_test' in self.cluster.config:
            fio_cmd += ' --time_based'
        fio_cmd += ' %s > %s' % (self.names, out_file)

        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(self.run_dir, recovery_callback)

        logger.info('Running rbd fio %s test.', self.mode)
        common.pdsh(settings.getnodes('clients'), fio_cmd).communicate()
        monitoring.stop(self.run_dir)

        common.sync_files('%s/*' % self.run_dir, self.out_dir)
예제 #56
0
파일: getput.py 프로젝트: bengland2/cbt
    def initialize(self): 
        super(Getput, self).initialize()

        # create the user and key
        self.cluster.add_swift_user(self.user, self.subuser, self.key)

        # Clean and Create the run directory
        common.clean_remote_dir(self.run_dir)
        common.make_remote_dir(self.run_dir)

        logger.info('Pausing for 60s for idle monitoring.')
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s/*' % self.run_dir, self.out_dir)

        return True