示例#1
0
    def initialize(self): 
        super(LibrbdFio, self).initialize()

        print 'Running scrub monitoring.'
        monitoring.start("%s/scrub_monitoring" % self.run_dir)
        self.cluster.check_scrub()
        monitoring.stop()

        print 'Pausing for 60s for idle monitoring.'
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s/*' % self.run_dir, self.out_dir)

        self.mkimages()

        # Create the run directory
        common.make_remote_dir(self.run_dir)

        # populate the fio files
        print 'Attempting to populating fio files...'
        pre_cmd = 'sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=cbt-librbdfio-`hostname -s` --invalidate=0  --rw=write --numjobs=%s --bs=4M --size %dM %s > /dev/null' % (self.cmd_path, self.poolname, self.numjobs, self.vol_size, self.names)
        common.pdsh(settings.getnodes('clients'), pre_cmd).communicate()

        return True
示例#2
0
    def initialize(self):
        super(LibrbdFio, self).initialize()

        # Clean and Create the run directory
        common.clean_remote_dir(self.run_dir)
        common.make_remote_dir(self.run_dir)

        logger.info('Pausing for 60s for idle monitoring.')
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s/*' % self.run_dir, self.out_dir)

        self.mkimages()

        # populate the fio files
        ps = []
        logger.info('Attempting to populating fio files...')
        if (self.use_existing_volumes == False):
            for volnum in range(self.volumes_per_client):
                rbd_name = 'cbt-librbdfio-`%s`-%d' % (common.get_fqdn_cmd(),
                                                      volnum)
                pre_cmd = 'sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=%s --invalidate=0  --rw=write --numjobs=%s --bs=4M --size %dM %s --output-format=%s > /dev/null' % (
                    self.cmd_path, self.pool_name, rbd_name, self.numjobs,
                    self.vol_size, self.names, self.fio_out_format)
                p = common.pdsh(settings.getnodes('clients'), pre_cmd)
                ps.append(p)
            for p in ps:
                p.wait()
示例#3
0
    def initialize(self):
        pass
        #        self.cleanup()
        #        super(KvmRbdFio, self).initialize()
        common.setup_cluster()
        #        common.setup_ceph()

        # Setup the pools
        #        common.pdsh(settings.cluster.get('head'), 'sudo ceph osd pool create rbdfio %d %d' % (self.pgs, self.pgs)).communicate()
        #        common.pdsh(settings.cluster.get('head'), 'sudo ceph osd pool set rbdfio size 1').communicate()
        #        print 'Checking Healh after pool creation.'
        #        common.check_health()

        #        common.pdsh(settings.cluster.get('clients'), 'sudo modprobe rbd').communicate()
        #        for i in xrange(self.concurrent_procs):
        names = ""
        for i in xrange(self.concurrent_procs):
            letter = string.ascii_lowercase[i + 1]
            #            common.pdsh(settings.cluster.get('clients'), 'sudo rbd create rbdfio/rbdfio-`hostname -s`-%d --size %d' % (i, self.vol_size)).communicate()
            #            common.pdsh(settings.cluster.get('clients'), 'sudo rbd map rbdfio-`hostname -s`-%d  --pool rbdfio --id admin' % i).communicate()
            #            common.pdsh(settings.cluster.get('clients'), 'sudo echo "%s %s rbdfio rbdfio-`hostname -s`-%d" | sudo tee /sys/bus/rbd/add && sudo /sbin/udevadm settle' % (self.rbdadd_mons, self.rbdadd_options, i)).communicate()
            common.pdsh(settings.getnodes('clients'),
                        'sudo mkfs.xfs /dev/vd%s' % letter).communicate()
            common.pdsh(settings.getnodes('clients'),
                        'sudo mkdir /srv/rbdfio-`hostname -s`-%d' %
                        i).communicate()
            common.pdsh(
                settings.getnodes('clients'),
                'sudo mount -t xfs -o noatime,inode64 /dev/vd%s /srv/rbdfio-`hostname -s`-%d'
                % (letter, i)).communicate()

        # Create the run directory
        common.make_remote_dir(self.run_dir)
示例#4
0
    def initialize(self):
        super(Cosbench, self).initialize()

        logger.debug('Running cosbench and radosgw check.')
        self.prerun_check()

        logger.debug('Running scrub monitoring.')
        monitoring.start("%s/scrub_monitoring" % self.run_dir)
        self.cluster.check_scrub()
        monitoring.stop()

        logger.debug('Pausing for 60s for idle monitoring.')
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s' % self.run_dir, self.out_dir)

        # Create the run directory
        common.make_remote_dir(self.run_dir)

        conf = self.config
        if not self.config["template"]:
            self.config["template"] = "default"
        self.config["workload"] = self.choose_template("default", conf)
        self.prepare_xml(self.config["workload"])
        return True
示例#5
0
    def run(self):
        super(RbdFio, self).run()
        # We'll always drop caches for rados bench
        self.dropcaches()

        common.make_remote_dir(self.run_dir)
        monitoring.start(self.run_dir)
        # Run rados bench
        print 'Running rbd fio %s test.' % self.mode
        names = ""
        for i in xrange(self.concurrent_procs):
            names += "--name=/srv/rbdfio-`hostname -s`-%d/cbt-rbdfio " % i
        out_file = '%s/output' % self.run_dir
        fio_cmd = 'sudo fio --rw=%s -ioengine=%s --runtime=%s --numjobs=1 --direct=1 --bs=%dB --iodepth=%d --size %dM %s > %s' % (
            self.mode, self.ioengine, self.time, self.op_size, self.iodepth,
            self.vol_size * 9 / 10, names, out_file)
        common.pdsh(settings.getnodes('clients'), fio_cmd).communicate()
        #        ps = []
        #        for i in xrange(self.concurrent_procs):
        #            out_file = '%s/output.%s' % (self.run_dir, i)
        #            p = common.pdsh(settings.cluster.get('clients'), 'sudo fio --rw=%s -ioengine=%s --runtime=%s --name=/srv/rbdfio-`hostname -s`-%d/cbt-rbdfio --numjobs=1 --direct=1 --bs=%dB --iodepth=%d --size %dM > %s' % (self.mode, self.ioengine, self.time, i, self.op_size, self.iodepth, self.vol_size * 9/10, out_file))
        #            ps.append(p)
        #        for p in ps:
        #            p.wait()
        monitoring.stop(self.run_dir)
        common.sync_files('%s/*' % self.run_dir, self.out_dir)
示例#6
0
文件: rawfio.py 项目: nasirkamal/cbt
    def initialize(self):
        super(RawFio, self).initialize()
        common.pdsh(settings.getnodes('clients'),
                    'sudo rm -rf %s' % self.run_dir,
                    continue_if_error=False).communicate()
        common.make_remote_dir(self.run_dir)
        clnts = settings.getnodes('clients')
        logger.info('creating mountpoints...')

        logger.info('Attempting to initialize fio files...')
        initializer_list = []
        for i in range(self.concurrent_procs):
            b = self.block_devices[i % len(self.block_devices)]
            fiopath = b
            pre_cmd = 'sudo %s --rw=write -ioengine=%s --bs=%s ' % (self.fio_cmd, self.ioengine, self.op_size)
            pre_cmd = '%s --size %dM --name=%s --output-format=%s> /dev/null' % (
                pre_cmd, self.vol_size, fiopath, self.fio_out_format)
            initializer_list.append(common.pdsh(clnts, pre_cmd,
                                                continue_if_error=False))
        for p in initializer_list:
            p.communicate()

        # Create the run directory
        common.pdsh(clnts, 'rm -rf %s' % self.run_dir,
                    continue_if_error=False).communicate()
        common.make_remote_dir(self.run_dir)
示例#7
0
    def initialize(self): 
        super(LibrbdFio, self).initialize()

        # Clean and Create the run directory
        common.clean_remote_dir(self.run_dir)
        common.make_remote_dir(self.run_dir)

        logger.info('Pausing for 60s for idle monitoring.')
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s/*' % self.run_dir, self.out_dir)

        self.mkimages()

        # populate the fio files
        ps = []
        logger.info('Attempting to populating fio files...')
        if (self.use_existing_volumes == False):
          for volnum in xrange(self.volumes_per_client):
              rbd_name = 'cbt-librbdfio-`%s`-%d' % (common.get_fqdn_cmd(), volnum)
              pre_cmd = 'sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=%s --invalidate=0  --rw=write --numjobs=%s --bs=4M --size %dM %s --output-format=%s > /dev/null' % (self.cmd_path, self.pool_name, rbd_name, self.numjobs, self.vol_size, self.names, self.fio_out_format)
              p = common.pdsh(settings.getnodes('clients'), pre_cmd)
              ps.append(p)
          for p in ps:
              p.wait()
        return True
示例#8
0
    def run(self):
        super(RbdFio, self).run()
        # Set client readahead
        self.set_client_param('read_ahead_kb', self.client_ra)

        # We'll always drop caches for rados bench
        self.dropcaches()

        common.make_remote_dir(self.run_dir)
        monitoring.start(self.run_dir)
        # Run rados bench
        print 'Running rbd fio %s test.' % self.mode
        names = ""
        for i in xrange(self.concurrent_procs):
            names += "--name=%s/mnt/rbdfio-`hostname -s`-%d/cbt-rbdfio " % (self.tmp_dir, i)
        out_file = '%s/output' % self.run_dir
        fio_cmd = 'sudo fio --rw=%s -ioengine=%s --runtime=%s --numjobs=1 --direct=1 --bs=%dB --iodepth=%d --size %dM %s > %s' %  (self.mode, self.ioengine, self.time, self.op_size, self.iodepth, self.vol_size * 9/10, names, out_file)
        common.pdsh(settings.getnodes('clients'), fio_cmd).communicate()
#        ps = []
#        for i in xrange(self.concurrent_procs):
#            out_file = '%s/output.%s' % (self.run_dir, i)
#            p = common.pdsh(settings.cluster.get('clients'), 'sudo fio --rw=%s -ioengine=%s --runtime=%s --name=/srv/rbdfio-`hostname -s`-%d/cbt-rbdfio --numjobs=1 --direct=1 --bs=%dB --iodepth=%d --size %dM > %s' % (self.mode, self.ioengine, self.time, i, self.op_size, self.iodepth, self.vol_size * 9/10, out_file))
#            ps.append(p)
#        for p in ps:
#            p.wait()
        monitoring.stop(self.run_dir)
        common.sync_files('%s/*' % self.run_dir, self.out_dir)
示例#9
0
    def initialize(self): 
        pass
#        self.cleanup()
#        super(KvmRbdFio, self).initialize()
        common.setup_cluster()
#        common.setup_ceph()

        # Setup the pools
#        common.pdsh(settings.cluster.get('head'), 'sudo ceph osd pool create rbdfio %d %d' % (self.pgs, self.pgs)).communicate()
#        common.pdsh(settings.cluster.get('head'), 'sudo ceph osd pool set rbdfio size 1').communicate()
#        print 'Checking Healh after pool creation.'
#        common.check_health()

#        common.pdsh(settings.cluster.get('clients'), 'sudo modprobe rbd').communicate()
#        for i in xrange(self.concurrent_procs):
        names = ""
        for i in xrange(self.concurrent_procs):
            letter = string.ascii_lowercase[i+1]
#            common.pdsh(settings.cluster.get('clients'), 'sudo rbd create rbdfio/rbdfio-`hostname -s`-%d --size %d' % (i, self.vol_size)).communicate()
#            common.pdsh(settings.cluster.get('clients'), 'sudo rbd map rbdfio-`hostname -s`-%d  --pool rbdfio --id admin' % i).communicate()
#            common.pdsh(settings.cluster.get('clients'), 'sudo echo "%s %s rbdfio rbdfio-`hostname -s`-%d" | sudo tee /sys/bus/rbd/add && sudo /sbin/udevadm settle' % (self.rbdadd_mons, self.rbdadd_options, i)).communicate()
            common.pdsh(settings.cluster.get('clients'), 'sudo mkfs.xfs /dev/vd%s' % letter).communicate()
            common.pdsh(settings.cluster.get('clients'), 'sudo mkdir /srv/rbdfio-`hostname -s`-%d' % i).communicate()
            common.pdsh(settings.cluster.get('clients'), 'sudo mount -t xfs -o noatime,inode64 /dev/vd%s /srv/rbdfio-`hostname -s`-%d' %(letter, i)).communicate()

        # Create the run directory
        common.make_remote_dir(self.run_dir)
示例#10
0
    def initialize(self):
        super(StdFioBench, self).initialize()
        for i in xrange(1):
            letter = string.ascii_lowercase[i + 1]
            if not self.use_existing:
                common.pdsh(settings.getnodes('clients'), 'sudo umount -f %s' %
                            (self.block_dev_name)).communicate()
                common.pdsh(
                    settings.getnodes('clients'), 'sudo mkfs.%s -f  %s' %
                    (self.filesystem, self.block_dev_name)).communicate()
            common.pdsh(settings.getnodes('clients'), 'sudo mkdir -p %s ' %
                        (self.mount_point_name)).communicate()
            common.pdsh(
                settings.getnodes('clients'),
                'sudo mount -t %s -o noatime %s %s' %
                (self.filesystem, self.block_dev_name,
                 self.mount_point_name)).communicate()
            common.pdsh(
                settings.getnodes('clients'),
                'sudo mkdir -p %s/`hostname -s`-%d' %
                (self.mount_point_name, i)).communicate()

        # Create the run directory
        common.make_remote_dir(self.run_dir)

        # populate the fio files
        logger.info('Attempting to populating fio files...')
        pre_cmd = 'sudo %s --rw=write --ioengine=sync --numjobs=%s --bs=8M --size %dM %s > /dev/null ' % (
            self.fio_cmd, self.numjobs, self.vol_size, self.names)
        common.pdsh(settings.getnodes('clients'), pre_cmd).communicate()
示例#11
0
    def initialize(self):
        super(LibrbdFio, self).initialize()

        logger.info('Running scrub monitoring.')
        monitoring.start("%s/scrub_monitoring" % self.run_dir)
        self.cluster.check_scrub()
        monitoring.stop()

        logger.info('Pausing for 60s for idle monitoring.')
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s/*' % self.run_dir, self.out_dir)

        self.mkimages()

        # Create the run directory
        common.make_remote_dir(self.run_dir)

        # populate the fio files
        ps = []
        logger.info('Attempting to populating fio files...')
        for i in xrange(self.volumes_per_client):
            pre_cmd = 'sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=cbt-librbdfio-`hostname -s`-%d --invalidate=0  --rw=write --numjobs=%s --bs=4M --size %dM %s > /dev/null' % (
                self.cmd_path, self.poolname, i, self.numjobs, self.vol_size,
                self.names)
            p = common.pdsh(settings.getnodes('clients'), pre_cmd)
            ps.append(p)
        for p in ps:
            p.wait()
        return True
示例#12
0
文件: rawfio.py 项目: bengland2/cbt
    def initialize(self): 
        super(RawFio, self).initialize()
        common.pdsh(settings.getnodes('clients'),
                    'sudo rm -rf %s' % self.run_dir,
                    continue_if_error=False).communicate()
        common.make_remote_dir(self.run_dir)
        clnts = settings.getnodes('clients')
        logger.info('creating mountpoints...')

        logger.info('Attempting to initialize fio files...')
        initializer_list = []
        for i in range(self.concurrent_procs):
            b = self.block_devices[i % len(self.block_devices)]
            fiopath = b
            pre_cmd = 'sudo %s --rw=write -ioengine=%s --bs=%s ' % (self.fio_cmd, self.ioengine, self.op_size)
            pre_cmd = '%s --size %dM --name=%s --output-format=%s> /dev/null' % (
                       pre_cmd, self.vol_size, fiopath, self.fio_out_format)
            initializer_list.append(common.pdsh(clnts, pre_cmd,
                                    continue_if_error=False))
        for p in initializer_list:
             p.communicate()

        # Create the run directory
        common.pdsh(clnts, 'rm -rf %s' % self.run_dir,
                    continue_if_error=False).communicate()
        common.make_remote_dir(self.run_dir)
示例#13
0
    def _run(self, mode, run_dir, out_dir):
        # We'll always drop caches for rados bench
        self.dropcaches()

        if self.concurrent_ops:
            concurrent_ops_str = '--concurrent-ios %s' % self.concurrent_ops
        op_size_str = '-b %s' % self.op_size

        common.make_remote_dir(run_dir)

        # dump the cluster config
        common.dump_config(run_dir)

        monitoring.start(run_dir)
        # Run rados bench
        print 'Running radosbench read test.'
        ps = []
        for i in xrange(self.concurrent_procs):
            out_file = '%s/output.%s' % (run_dir, i)
            objecter_log = '%s/objecter.%s.log' % (run_dir, i)
            p = common.pdsh(settings.getnodes('clients'), '/usr/bin/rados -p rados-bench-`hostname -s`-%s %s bench %s %s %s --no-cleanup 2> %s > %s' % (i, op_size_str, self.time, mode, concurrent_ops_str, objecter_log, out_file))
            ps.append(p)
        for p in ps:
            p.wait()
        monitoring.stop(run_dir)

        # Get the historic ops
        common.dump_historic_ops(run_dir)
        common.sync_files('%s/*' % run_dir, out_dir)
示例#14
0
    def _run(self, mode, run_dir, out_dir):
        # We'll always drop caches for rados bench
        self.dropcaches()

        if self.concurrent_ops:
            concurrent_ops_str = "--concurrent-ios %s" % self.concurrent_ops
        op_size_str = "-b %s" % self.op_size

        common.make_remote_dir(run_dir)
        monitoring.start(run_dir)
        # Run rados bench
        print "Running radosbench read test."
        ps = []
        for i in xrange(self.concurrent_procs):
            out_file = "%s/output.%s" % (run_dir, i)
            objecter_log = "%s/objecter.%s.log" % (run_dir, i)
            p = common.pdsh(
                settings.cluster.get("clients"),
                "/usr/bin/rados -p rados-bench-%s %s bench %s %s %s --no-cleanup 2> %s > %s"
                % (i, op_size_str, self.time, mode, concurrent_ops_str, objecter_log, out_file),
            )
            ps.append(p)
        for p in ps:
            p.wait()
        monitoring.stop(run_dir)
        common.sync_files("%s/*" % run_dir, out_dir)
示例#15
0
    def initialize(self):
        super(Cosbench, self).initialize()

        logger.debug('Running cosbench and radosgw check.')
        self.prerun_check()

        logger.debug('Running scrub monitoring.')
        monitoring.start("%s/scrub_monitoring" % self.run_dir)
        self.cluster.check_scrub()
        monitoring.stop()

        logger.debug('Pausing for 60s for idle monitoring.')
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s' % self.run_dir, self.out_dir)

        # Create the run directory
        common.make_remote_dir(self.run_dir)

        conf = self.config
        if not self.config["template"]:
            self.config["template"] = "default"
        self.config["workload"] = self.choose_template("default", conf)
        self.prepare_xml(self.config["workload"])
        return True
示例#16
0
    def initialize(self): 
        common.cleanup_tests()
        if not self.use_existing:
            common.setup_cluster()
            common.setup_ceph()

            # Create the run directory
            common.make_remote_dir(self.run_dir)

            # Setup the pools

            monitoring.start("%s/pool_monitoring" % self.run_dir)
            for i in xrange(self.concurrent_procs):
                for node in settings.getnodes('clients').split(','):
                    node = node.rpartition("@")[2]
                    common.pdsh(settings.getnodes('head'), 'sudo ceph osd pool create rados-bench-%s-%s %d %d' % (node, i, self.pgs_per_pool, self.pgs_per_pool)).communicate()
                    common.pdsh(settings.getnodes('head'), 'sudo ceph osd pool set rados-bench-%s-%s size 1' % (node, i)).communicate()
                    # check the health for each pool.
                    print 'Checking Healh after pool creation.'
                    common.check_health()
            monitoring.stop()

        print 'Running scrub monitoring.'
        monitoring.start("%s/scrub_monitoring" % self.run_dir)
        common.check_scrub()
        monitoring.stop()

        print 'Pausing for 60s for idle monitoring.'
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s/*' % self.run_dir, self.out_dir)

        return True
示例#17
0
    def initialize(self): 
        super(RbdFio, self).initialize()
        self.cleanup()

        if not self.use_existing:
            self.cluster.initialize()
            self.cluster.dump_config(self.run_dir)

            # Setup the pools
            monitoring.start("%s/pool_monitoring" % self.run_dir)
            common.pdsh(settings.getnodes('head'), 'sudo ceph -c %s osd pool create rbdfio %d %d' % (self.tmp_conf, self.pgs, self.pgs)).communicate()
            common.pdsh(settings.getnodes('head'), 'sudo ceph -c %s osd pool set rbdfio size 1' % self.tmp_conf).communicate()
            print 'Checking Healh after pool creation.'
            self.cluster.check_health()
            monitoring.stop()

            # Mount the filesystem
            common.pdsh(settings.getnodes('clients'), 'sudo modprobe rbd').communicate()
            for i in xrange(self.concurrent_procs):
                common.pdsh(settings.getnodes('clients'), 'sudo rbd -c %s create rbdfio/rbdfio-`hostname -s`-%d --size %d' % (self.tmp_conf, i, self.vol_size)).communicate()
                common.pdsh(settings.getnodes('clients'), 'sudo echo "%s %s rbdfio rbdfio-`hostname -s`-%d" | sudo tee /sys/bus/rbd/add && sudo /sbin/udevadm settle' % (self.rbdadd_mons, self.rbdadd_options, i)).communicate()
                common.pdsh(settings.getnodes('clients'), 'sudo mkfs.xfs /dev/rbd/rbdfio/rbdfio-`hostname -s`-%d' % i).communicate()
                common.pdsh(settings.getnodes('clients'), 'sudo mkdir -p -m0755 -- %s/mnt/rbdfio-`hostname -s`-%d' % (self.tmp_dir, i)).communicate()
                common.pdsh(settings.getnodes('clients'), 'sudo mount -t xfs -o noatime,inode64 /dev/rbd/rbdfio/rbdfio-`hostname -s`-%d %s/mnt/rbdfio-`hostname -s`-%d' % (i, self.tmp_dir, i)).communicate()

        print 'Running scrub monitoring'
        monitoring.start("%s/scrub_monitoring" % self.run_dir)
        self.cluster.check_scrub()
        monitoring.stop()

        # Create the run directory
        common.make_remote_dir(self.run_dir)
示例#18
0
    def initialize(self):
        super(LibrbdFio, self).initialize()

        print "Running scrub monitoring."
        monitoring.start("%s/scrub_monitoring" % self.run_dir)
        self.cluster.check_scrub()
        monitoring.stop()

        print "Pausing for 60s for idle monitoring."
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files("%s/*" % self.run_dir, self.out_dir)

        self.mkimages()

        # Create the run directory
        common.make_remote_dir(self.run_dir)

        # populate the fio files
        ps = []
        print "Attempting to populating fio files..."
        for i in xrange(self.volumes_per_client):
            pre_cmd = (
                "sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=cbt-librbdfio-`hostname -s`-%d --invalidate=0  --rw=write --numjobs=%s --bs=4M --size %dM %s > /dev/null"
                % (self.cmd_path, self.poolname, i, self.numjobs, self.vol_size, self.names)
            )
            p = common.pdsh(settings.getnodes("clients"), pre_cmd)
            ps.append(p)
        for p in ps:
            p.wait()
        return True
示例#19
0
    def initialize(self): 
        super(RbdFio, self).initialize()

        logger.info('Running scrub monitoring.')
        monitoring.start("%s/scrub_monitoring" % self.run_dir)
        self.cluster.check_scrub()
        monitoring.stop()

        logger.info('Pausing for 60s for idle monitoring.')
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s/*' % self.run_dir, self.out_dir)

        self.mkimages()
 
        # Create the run directory
        common.make_remote_dir(self.run_dir)

        # populate the fio files
        logger.info('Attempting to populating fio files...')
        pre_cmd = 'sudo %s --ioengine=%s --rw=write --numjobs=%s --bs=4M --size %dM %s > /dev/null' % (self.cmd_path, self.ioengine, self.numjobs, self.vol_size*0.9, self.names)
        common.pdsh(settings.getnodes('clients'), pre_cmd).communicate()

        return True
示例#20
0
    def initialize(self): 
        self.cleanup()
        super(RbdFio, self).initialize()
        common.setup_cluster()
        common.setup_ceph()
        common.dump_config(self.run_dir)
        # Setup the pools
        common.pdsh(settings.getnodes('head'), 'sudo ceph osd pool create rbdfio %d %d' % (self.pgs, self.pgs)).communicate()
        common.pdsh(settings.getnodes('head'), 'sudo ceph osd pool set rbdfio size 1').communicate()
        print 'Checking Healh after pool creation.'
        common.check_health()

        common.pdsh(settings.getnodes('clients'), 'sudo modprobe rbd').communicate()
        for i in xrange(self.concurrent_procs):
            common.pdsh(settings.getnodes('clients'), 'sudo rbd create rbdfio/rbdfio-`hostname -s`-%d --size %d' % (i, self.vol_size)).communicate()
#            common.pdsh(settings.cluster.get('clients'), 'sudo rbd map rbdfio-`hostname -s`-%d  --pool rbdfio --id admin' % i).communicate()
            common.pdsh(settings.getnodes('clients'), 'sudo echo "%s %s rbdfio rbdfio-`hostname -s`-%d" | sudo tee /sys/bus/rbd/add && sudo /sbin/udevadm settle' % (self.rbdadd_mons, self.rbdadd_options, i)).communicate()
            common.pdsh(settings.getnodes('clients'), 'sudo mkfs.xfs /dev/rbd/rbdfio/rbdfio-`hostname -s`-%d' % i).communicate()
            common.pdsh(settings.getnodes('clients'), 'sudo mkdir /srv/rbdfio-`hostname -s`-%d' % i).communicate()
            common.pdsh(settings.getnodes('clients'), 'sudo mount -t xfs -o noatime,inode64 /dev/rbd/rbdfio/rbdfio-`hostname -s`-%d /srv/rbdfio-`hostname -s`-%d' %(i, i)).communicate()

        common.check_scrub()

        # Create the run directory
        common.make_remote_dir(self.run_dir)
    def _run(self, mode, run_dir, out_dir):
        # We'll always drop caches for rados bench
        #self.dropcaches()

        if self.concurrent_ops:
            concurrent_ops_str = '--concurrent-ios %s' % self.concurrent_ops
        #determine rados version
        #rados_version_str = subprocess.check_output(["rados", "-v"])
        rados_version_str, err = common.pdsh(
            settings.getnodes('head'), '/usr/bin/rados -v').communicate()
        m = re.findall("version (\d+)", rados_version_str)
        rados_version = int(m[0])

        if mode in ['write'] or rados_version < 9:
            op_size_str = '-b %s' % self.op_size
        else:
            op_size_str = ''

        common.make_remote_dir(run_dir)

        # dump the cluster config
        self.cluster.dump_config(run_dir)

        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(run_dir, recovery_callback)

        # Run rados bench
        monitoring.start(run_dir)
        logger.info('Running radosbench %s test.' % mode)
        ps = []
        for i in xrange(self.concurrent_procs):
            out_file = '%s/output.%s' % (run_dir, i)
            objecter_log = '%s/objecter.%s.log' % (run_dir, i)
            # default behavior is to use a single storage pool
            pool_name = self.pool
            run_name = '--run-name %s`hostname -s`-%s' % (self.object_set_id,
                                                          i)
            if self.pool_per_proc:  # support previous behavior of 1 storage pool per rados process
                pool_name = 'rados-bench-`hostname -s`-%s' % i
                run_name = ''
            rados_bench_cmd = '%s -c %s -p %s bench %s %s %s %s %s --no-cleanup 2> %s > %s' % \
                 (self.cmd_path_full, self.tmp_conf, pool_name, op_size_str, self.time, mode, concurrent_ops_str, run_name, objecter_log, out_file)
            p = common.pdsh(settings.getnodes('clients'), rados_bench_cmd)
            ps.append(p)
        for p in ps:
            p.wait()
        monitoring.stop(run_dir)

        # If we were doing recovery, wait until it's done.
        if 'recovery_test' in self.cluster.config:
            self.cluster.wait_recovery_done()

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(run_dir)
        common.sync_files('%s/*' % run_dir, out_dir)
示例#22
0
    def initialize(self):
        super(Cosbench, self).initialize()

        logger.debug('Running cosbench and radosgw check.')
        self.prerun_check()

        logger.debug('Pausing for 60s for idle monitoring.')
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s' % self.run_dir, self.out_dir)

        # Create the run directory
        common.make_remote_dir(self.run_dir)

        conf = self.config
        if not self.config["template"]:
            self.config["template"] = "default"
        self.config["workload"] = self.choose_template("default", conf)

        # add a "prepare" stage if mode is read or mix
        if not self.container_prepare_check():
            workstage_init = {
                "name": "init",
                "work": {
                    "type":
                    "init",
                    "workers":
                    conf["workers"],
                    "config":
                    "containers=r(1,%s);cprefix=%s-%s-%s" %
                    (conf["containers_max"], conf["obj_size"], conf["mode"],
                     conf["objects_max"])
                }
            }
            workstage_prepare = {
                "name": "prepare",
                "work": {
                    "type":
                    "prepare",
                    "workers":
                    conf["workers"],
                    "config":
                    "containers=r(1,%s);objects=r(1,%s);cprefix=%s-%s-%s;sizes=c(%s)%s"
                    % (conf["containers_max"], conf["objects_max"],
                       conf["obj_size"], conf["mode"], conf["objects_max"],
                       conf["obj_size_num"], conf["obj_size_unit"])
                }
            }
            self.config["workload"]["workflow"]["workstage"].insert(
                0, workstage_prepare)
            self.config["workload"]["workflow"]["workstage"].insert(
                0, workstage_init)

        self.prepare_xml(self.config["workload"])
        return True
    def initialize(self):
        self.cluster.cleanup()
        use_existing = settings.cluster.get('use_existing', True)
        if not use_existing:
            self.cluster.initialize()

        self.cleanup()
        # Create the run directory
        common.make_remote_dir(self.run_dir)
示例#24
0
文件: radosbench.py 项目: JevonQ/cbt
    def _run(self, mode, run_dir, out_dir):
        # We'll always drop caches for rados bench
        self.dropcaches()

        if self.concurrent_ops:
            concurrent_ops_str = '--concurrent-ios %s' % self.concurrent_ops
        #determine rados version
        rados_version_str, err = common.pdsh(settings.getnodes('head'), '/usr/bin/rados -v').communicate()
        m = re.findall("version (\d+)", rados_version_str)
        rados_version = int(m[0])

        if mode in ['write'] or rados_version < 9:
            op_size_str = '-b %s' % self.op_size
        else:
            op_size_str = ''


        common.make_remote_dir(run_dir)

        # dump the cluster config
        self.cluster.dump_config(run_dir)

        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(run_dir, recovery_callback)

        # Run rados bench
        monitoring.start(run_dir)
        logger.info('Running radosbench %s test.' % mode)
        ps = []
        for i in xrange(self.concurrent_procs):
            out_file = '%s/output.%s' % (run_dir, i)
            objecter_log = '%s/objecter.%s.log' % (run_dir, i)
            # default behavior is to use a single storage pool 
            pool_name = self.pool
            run_name = '--run-name %s`hostname -s`-%s'%(self.object_set_id, i)
            if self.pool_per_proc: # support previous behavior of 1 storage pool per rados process
                pool_name = 'rados-bench-`hostname -s`-%s'%i
                run_name = ''
            rados_bench_cmd = '%s -c %s -p %s bench %s %s %s %s %s --no-cleanup 2> %s > %s' % \
                 (self.cmd_path_full, self.tmp_conf, pool_name, op_size_str, self.time, mode, concurrent_ops_str, run_name, objecter_log, out_file)
            p = common.pdsh(settings.getnodes('clients'), rados_bench_cmd)
            ps.append(p)
        for p in ps:
            p.wait()
        monitoring.stop(run_dir)

        # If we were doing recovery, wait until it's done.
        if 'recovery_test' in self.cluster.config:
            self.cluster.wait_recovery_done()

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(run_dir)
        common.sync_files('%s/*' % run_dir, out_dir)
示例#25
0
    def initialize(self): 
        super(KvmRbdFio, self).initialize()
        for i in xrange(1):
             letter = string.ascii_lowercase[i+1]
             common.pdsh(settings.getnodes('clients'), 'sudo mkfs.ext4 /dev/vd%s' % letter).communicate()
             common.pdsh(settings.getnodes('clients'), 'sudo mkdir /srv/rbdfio-`hostname -s`-%d' % i).communicate()
             common.pdsh(settings.getnodes('clients'), 'sudo mount -t ext4 -o noatime /dev/vd%s /srv/rbdfio-`hostname -s`-%d' %(letter, i)).communicate()

        # Create the run directory
        common.make_remote_dir(self.run_dir)

        # populate the fio files
        logger.info('Attempting to populating fio files...')
        pre_cmd = 'sudo fio --rw=write -ioengine=sync --numjobs=%s --bs=4M --size %dM %s > /dev/null' % (self.numjobs, self.vol_size, self.names)
        common.pdsh(settings.getnodes('clients'), pre_cmd).communicate()
示例#26
0
文件: getput.py 项目: yunfeiguan/cbt
    def initialize(self):
        super(Getput, self).initialize()

        # create the user and key
        self.cluster.add_swift_user(self.user, self.subuser, self.key)

        # Clean and Create the run directory
        common.clean_remote_dir(self.run_dir)
        common.make_remote_dir(self.run_dir)

        logger.info('Pausing for 60s for idle monitoring.')
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s/*' % self.run_dir, self.out_dir)
示例#27
0
    def _run(self, mode, run_dir, out_dir):
        # We'll always drop caches for rados bench
        self.dropcaches()

        if self.concurrent_ops:
            concurrent_ops_str = '--concurrent-ios %s' % self.concurrent_ops
        op_size_str = '-b %s' % self.op_size

        common.make_remote_dir(run_dir)

        # dump the cluster config
        self.cluster.dump_config(run_dir)

        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(run_dir, recovery_callback)

        # Run rados bench
        monitoring.start(run_dir)
        logger.info('Running radosbench read test.')
        ps = []
        for i in xrange(self.concurrent_procs):
            out_file = '%s/output.%s' % (run_dir, i)
            objecter_log = '%s/objecter.%s.log' % (run_dir, i)
            # default behavior is to use a single storage pool
            pool_name = 'rados-bench-cbt'
            run_name = '--run-name `hostname -s`-%s' % i
            if self.pool_per_proc:  # support previous behavior of 1 storage pool per rados process
                pool_name = 'rados-bench-`hostname -s`-%s' % i
                run_name = ''
            rados_bench_cmd = '%s -c %s -p %s bench %s %s %s %s %s --no-cleanup 2> %s > %s' % \
                 (self.cmd_path_full, self.tmp_conf, pool_name, op_size_str, self.time, mode, concurrent_ops_str, run_name, objecter_log, out_file)
            p = common.pdsh(settings.getnodes('clients'), rados_bench_cmd)
            ps.append(p)
        for p in ps:
            p.wait()
        monitoring.stop(run_dir)

        # If we were doing recovery, wait until it's done.
        if 'recovery_test' in self.cluster.config:
            self.cluster.wait_recovery_done()

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(run_dir)
        common.sync_files('%s/*' % run_dir, out_dir)
示例#28
0
文件: cosbench.py 项目: ASBishop/cbt
    def initialize(self):
        super(Cosbench, self).initialize()

        logger.debug('Running cosbench and radosgw check.')
        self.prerun_check()

        logger.debug('Running scrub monitoring.')
        monitoring.start("%s/scrub_monitoring" % self.run_dir)
        self.cluster.check_scrub()
        monitoring.stop()

        logger.debug('Pausing for 60s for idle monitoring.')
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s' % self.run_dir, self.out_dir)

        # Create the run directory
        common.make_remote_dir(self.run_dir)

        conf = self.config
        if not self.config["template"]:
            self.config["template"] = "default"
        self.config["workload"] = self.choose_template("default", conf)

        # add a "prepare" stage if mode is read or mix
        if not self.container_prepare_check():
            workstage_init = {
                "name": "init",
                "work": {"type":"init", "workers":conf["workers"], "config":"containers=r(1,%s);cprefix=%s-%s-%s" % (conf["containers_max"], conf["obj_size"], conf["mode"], conf["objects_max"])}
            }
            workstage_prepare = {
                "name":"prepare",
                "work": {
                    "type":"prepare",
                    "workers":conf["workers"],
                    "config":"containers=r(1,%s);objects=r(1,%s);cprefix=%s-%s-%s;sizes=c(%s)%s" %
                    (conf["containers_max"], conf["objects_max"], conf["obj_size"], conf["mode"], conf["objects_max"], conf["obj_size_num"], conf["obj_size_unit"])
                }
            }
            self.config["workload"]["workflow"]["workstage"].insert(0, workstage_prepare)
            self.config["workload"]["workflow"]["workstage"].insert(0, workstage_init)

        self.prepare_xml(self.config["workload"])
        return True
示例#29
0
文件: getput.py 项目: bengland2/cbt
    def initialize(self): 
        super(Getput, self).initialize()

        # create the user and key
        self.cluster.add_swift_user(self.user, self.subuser, self.key)

        # Clean and Create the run directory
        common.clean_remote_dir(self.run_dir)
        common.make_remote_dir(self.run_dir)

        logger.info('Pausing for 60s for idle monitoring.')
        monitoring.start("%s/idle_monitoring" % self.run_dir)
        time.sleep(60)
        monitoring.stop()

        common.sync_files('%s/*' % self.run_dir, self.out_dir)

        return True
示例#30
0
    def initialize(self):
        self.cleanup()
        super(RbdFio, self).initialize()
        common.setup_cluster()
        common.setup_ceph()
        common.dump_config(self.run_dir)
        # Setup the pools
        common.pdsh(
            settings.getnodes('head'),
            'sudo ceph osd pool create rbdfio %d %d' %
            (self.pgs, self.pgs)).communicate()
        common.pdsh(settings.getnodes('head'),
                    'sudo ceph osd pool set rbdfio size 1').communicate()
        print 'Checking Healh after pool creation.'
        common.check_health()

        common.pdsh(settings.getnodes('clients'),
                    'sudo modprobe rbd').communicate()
        for i in xrange(self.concurrent_procs):
            common.pdsh(
                settings.getnodes('clients'),
                'sudo rbd create rbdfio/rbdfio-`hostname -s`-%d --size %d' %
                (i, self.vol_size)).communicate()
            #            common.pdsh(settings.cluster.get('clients'), 'sudo rbd map rbdfio-`hostname -s`-%d  --pool rbdfio --id admin' % i).communicate()
            common.pdsh(
                settings.getnodes('clients'),
                'sudo echo "%s %s rbdfio rbdfio-`hostname -s`-%d" | sudo tee /sys/bus/rbd/add && sudo /sbin/udevadm settle'
                % (self.rbdadd_mons, self.rbdadd_options, i)).communicate()
            common.pdsh(
                settings.getnodes('clients'),
                'sudo mkfs.xfs /dev/rbd/rbdfio/rbdfio-`hostname -s`-%d' %
                i).communicate()
            common.pdsh(settings.getnodes('clients'),
                        'sudo mkdir /srv/rbdfio-`hostname -s`-%d' %
                        i).communicate()
            common.pdsh(
                settings.getnodes('clients'),
                'sudo mount -t xfs -o noatime,inode64 /dev/rbd/rbdfio/rbdfio-`hostname -s`-%d /srv/rbdfio-`hostname -s`-%d'
                % (i, i)).communicate()

        common.check_scrub()

        # Create the run directory
        common.make_remote_dir(self.run_dir)
示例#31
0
    def initialize(self):
        super(Radosbench, self).initialize()
        if settings.cluster.get("rebuild_every_test", False):
            # Setup the pools
            for i in xrange(self.concurrent_procs):
                common.pdsh(
                    settings.cluster.get("head"),
                    "sudo ceph osd pool create rados-bench-%s %d %d" % (i, self.pgs_per_pool, self.pgs_per_pool),
                ).communicate()
                common.pdsh(
                    settings.cluster.get("head"), "sudo ceph osd pool set rados-bench-%s size 1" % i
                ).communicate()

        # Create the run directory
        common.make_remote_dir(self.run_dir)

        # Check the health before we start the benchmarks
        print "Checking Health."
        common.check_health()
示例#32
0
    def _run(self, mode, run_dir, out_dir):
        # We'll always drop caches for rados bench
        self.dropcaches()

        if self.concurrent_ops:
            concurrent_ops_str = '--concurrent-ios %s' % self.concurrent_ops
        op_size_str = '-b %s' % self.op_size

        common.make_remote_dir(run_dir)

        # dump the cluster config
        self.cluster.dump_config(run_dir)

        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(run_dir, recovery_callback)

        # Run rados bench
        monitoring.start(run_dir)
        print 'Running radosbench read test.'
        ps = []
        for i in xrange(self.concurrent_procs):
            out_file = '%s/output.%s' % (run_dir, i)
            objecter_log = '%s/objecter.%s.log' % (run_dir, i)
            p = common.pdsh(
                settings.getnodes('clients'),
                '%s -c %s -p rados-bench-`hostname -s`-%s %s bench %s %s %s --no-cleanup 2> %s > %s'
                %
                (self.cmd_path_full, self.tmp_conf, i, op_size_str, self.time,
                 mode, concurrent_ops_str, objecter_log, out_file))
            ps.append(p)
        for p in ps:
            p.wait()
        monitoring.stop(run_dir)

        # If we were doing recovery, wait until it's done.
        if 'recovery_test' in self.cluster.config:
            self.cluster.wait_recovery_done()

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(run_dir)
        common.sync_files('%s/*' % run_dir, out_dir)
示例#33
0
    def initialize(self):
        super(KvmRbdFio, self).initialize()
        common.pdsh(settings.getnodes('clients', 'osds', 'mons', 'rgws'),
                    'sudo rm -rf %s' % self.run_dir,
                    continue_if_error=False).communicate()
        common.make_remote_dir(self.run_dir)
        clnts = settings.getnodes('clients')
        logger.info('creating mountpoints...')
        for b in self.block_devices:
            bnm = os.path.basename(b)
            mtpt = '/srv/rbdfio-`%s`-%s' % (common.get_fqdn_cmd(), bnm)
            common.pdsh(clnts,
                        'sudo mkfs.ext4 %s' % b,
                        continue_if_error=False).communicate()
            common.pdsh(clnts,
                        'sudo mkdir -p %s' % mtpt,
                        continue_if_error=False).communicate()
            common.pdsh(clnts,
                        'sudo mount -t ext4 -o noatime %s %s' % (b, mtpt),
                        continue_if_error=False).communicate()
        logger.info('Attempting to initialize fio files...')
        initializer_list = []
        for i in range(self.concurrent_procs):
            b = self.block_devices[i % len(self.block_devices)]
            bnm = os.path.basename(b)
            mtpt = '/srv/rbdfio-`hostname -s`-%s' % bnm
            fiopath = os.path.join(mtpt, 'fio%d.img' % i)
            pre_cmd = 'sudo %s --rw=write -ioengine=sync --bs=4M ' % self.fio_cmd
            pre_cmd = '%s --size %dM --name=%s > /dev/null' % (
                pre_cmd, self.vol_size, fiopath)
            initializer_list.append(
                common.pdsh(clnts, pre_cmd, continue_if_error=False))
        for p in initializer_list:
            p.communicate()

        # Create the run directory
        common.pdsh(clnts, 'rm -rf %s' % self.run_dir,
                    continue_if_error=False).communicate()
        common.make_remote_dir(self.run_dir)
示例#34
0
    def _run(self, mode, run_dir, out_dir):
        # We'll always drop caches for rados bench
        self.dropcaches()

        if self.concurrent_ops:
            concurrent_ops_str = '--concurrent-ios %s' % self.concurrent_ops
        op_size_str = '-b %s' % self.op_size

        common.make_remote_dir(run_dir)

        # dump the cluster config
        self.cluster.dump_config(run_dir)

        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(run_dir, recovery_callback)

        # Run rados bench
        monitoring.start(run_dir)
        print 'Running radosbench read test.'
        ps = []
        for i in xrange(self.concurrent_procs):
            out_file = '%s/output.%s' % (run_dir, i)
            objecter_log = '%s/objecter.%s.log' % (run_dir, i)
            p = common.pdsh(settings.getnodes('clients'), '%s -c %s -p rados-bench-`hostname -s`-%s %s bench %s %s %s --no-cleanup 2> %s > %s' % (self.cmd_path_full, self.tmp_conf, i, op_size_str, self.time, mode, concurrent_ops_str, objecter_log, out_file))
            ps.append(p)
        for p in ps:
            p.wait()
        monitoring.stop(run_dir)

        # If we were doing recovery, wait until it's done.
        if 'recovery_test' in self.cluster.config:
            self.cluster.wait_recovery_done()

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(run_dir)
        common.sync_files('%s/*' % run_dir, out_dir)
示例#35
0
文件: kvmrbdfio.py 项目: ASBishop/cbt
    def initialize(self): 
        super(KvmRbdFio, self).initialize()
        common.pdsh(settings.getnodes('clients', 'osds', 'mons', 'rgws'),
                    'sudo rm -rf %s' % self.run_dir,
                    continue_if_error=False).communicate()
        common.make_remote_dir(self.run_dir)
        clnts = settings.getnodes('clients')
        logger.info('creating mountpoints...')
        for b in self.block_devices:
            bnm = os.path.basename(b)
            mtpt = '/srv/rbdfio-`hostname -s`-%s' % bnm
            common.pdsh(clnts, 'sudo mkfs.ext4 %s' % b,
                        continue_if_error=False).communicate()
            common.pdsh(clnts, 'sudo mkdir -p %s' % mtpt,
                        continue_if_error=False).communicate()
            common.pdsh(clnts, 'sudo mount -t ext4 -o noatime %s %s' % (b,mtpt),
                        continue_if_error=False).communicate()
        logger.info('Attempting to initialize fio files...')
        initializer_list = []
        for i in range(self.concurrent_procs):
            b = self.block_devices[i % len(self.block_devices)]
            bnm = os.path.basename(b)
            mtpt = '/srv/rbdfio-`hostname -s`-%s' % bnm
            fiopath = os.path.join(mtpt, 'fio%d.img' % i)
            pre_cmd = 'sudo %s --rw=write -ioengine=sync --bs=4M ' % self.fio_cmd
            pre_cmd = '%s --size %dM --name=%s > /dev/null' % (
                       pre_cmd, self.vol_size, fiopath)
            initializer_list.append(common.pdsh(clnts, pre_cmd,
                                    continue_if_error=False))
        for p in initializer_list:
             p.communicate()

        # Create the run directory
        common.pdsh(clnts, 'rm -rf %s' % self.run_dir, 
                    continue_if_error=False).communicate()
        common.make_remote_dir(self.run_dir)
示例#36
0
    def initialize(self):
        super(Fio, self).initialize()

        # Clean and Create the run directory
        common.clean_remote_dir(self.run_dir)
        common.make_remote_dir(self.run_dir)
示例#37
0
    def initialize(self): 
        super(MysqlSysBench, self).initialize()

        if not self.no_rbd:
            print 'Running scrub monitoring.'
            monitoring.start("%s/scrub_monitoring" % self.run_dir)
            self.cluster.check_scrub()
            monitoring.stop()

            print 'Pausing for 60s for idle monitoring.'
            monitoring.start("%s/idle_monitoring" % self.run_dir)
            time.sleep(60)
            monitoring.stop()

        # Create the run directory
        common.make_remote_dir(self.run_dir)
        
        # Create the out directory
        common.make_remote_dir(self.out_dir)

        common.sync_files('%s/*' % self.run_dir, self.out_dir)

        self.mkimages()
 
        if not self.no_create_db:
            # Initialize the datadir
            print 'Running mysql_install_db.'
            mysql_install_cmd = 'sudo /usr/bin/mysql_install_db --no-defaults --datadir=%s --user=mysql --force > %s/mysql_install.out 2> %s/mysqlinstall.err ' % (self.mysql_datadir,self.out_dir,self.out_dir)
            common.pdsh(settings.getnodes('clients'), mysql_install_cmd).communicate()
        
            time.sleep(5)
        
            # Starting MySQL on all nodes
            print 'Starting MySQL'
            mysql_cmd = 'sudo chmod 777 %s; ' % self.out_dir
            mysql_cmd += 'sudo /usr/sbin/mysqld --no-defaults --user=mysql --datadir=%s ' % self.mysql_datadir
            mysql_cmd += '--pid-file=/tmp/mysqlsysbench.pid '
            mysql_cmd += '--innodb-buffer-pool-size=%s ' % self.mycnf_innodb_buffer_pool_size
            mysql_cmd += '--innodb-log-file-size=%s ' % self.mycnf_innodb_log_file_size
            mysql_cmd += '--innodb-log-buffer-size=%s ' % self.mycnf_innodb_log_buffer_size
            mysql_cmd += '--innodb-read-io-threads=%s ' % self.mycnf_innodb_read_io_threads
            mysql_cmd += '--innodb-write-io-threads=%s ' % self.mycnf_innodb_write_io_threads
            mysql_cmd += '--innodb-purge-threads=%s ' % self.mycnf_innodb_purge_threads
            mysql_cmd += '--innodb-doublewrite=%s ' % self.mycnf_innodb_doublewrite
            mysql_cmd += '--innodb-file-format=%s ' % self.mycnf_innodb_file_format
            mysql_cmd += '--innodb-flush-method=%s ' % self.mycnf_innodb_flush_method
            mysql_cmd += '--innodb-flush-log-at-trx-commit=%s ' % self.mycnf_innodb_flush_log_at_trx_commit
            #mysql_cmd += '--innodb-flush-neighbors=%s ' % self.mycnf_innodb_flush_neighbors  # only for percona server
            mysql_cmd += '--log-error=%s/mysqld.log ' % self.out_dir
            mysql_cmd += '--socket=%s ' % self.mysql_socket
            mysql_cmd += '--skip-networking '
            mysql_cmd += '--query-cache-size=0 '
            mysql_cmd += '--innodb-file-per-table ' 
            mysql_cmd += '--skip-performance-schema '
            mysql_cmd += ' > %s/mysql_start.out 2> %s/mysql_start.err ' % (self.out_dir,self.out_dir) 
            mysql_cmd += '&'
            common.pdsh(settings.getnodes('clients'), mysql_cmd).communicate()
        
            #give it time to start up
            print 'Waiting for 60s for mysql to start...'
            time.sleep(60)

            # Create the sysbench tables
            print 'Creating the Sysbench database...'
            mysql_cmd = '/usr/bin/mysql -e "create database sbtest;" '
            mysql_cmd += '-u root '
            mysql_cmd += '--socket=%s ' % self.mysql_socket
            common.pdsh(settings.getnodes('clients'),  mysql_cmd).communicate()
        
        if self.existing_database_is_preloaded = 0:        
            # Creation of the benchmark tables
            print 'Creating the Sysbench tables...'
            pre_cmd = '%s ' % self.cmd_path_full
            pre_cmd += '--test=%s ' % self.prepare_path
            if not self.no_create_db:
                pre_cmd += '--mysql-user=root '
            else:
                pre_cmd += '--mysql-user=%s --mysql-password=%s ' % (self.mysql_user, self.mysql_pass)
            pre_cmd += '--mysql-socket=%s ' % self.mysql_socket
            pre_cmd += '--mysql-db=%s ' % self.mysql_database
            pre_cmd += '--mysql-table-engine=%s ' % self.mysql_engine
            pre_cmd += '--oltp-tables-count=%s ' % self.oltp_table_count
            pre_cmd += '--oltp-table-size=%s ' % self.oltp_table_size
            pre_cmd += '--num-threads=%s run ' % self.threads
            pre_cmd += ' > %s/sysbench_prepare.out 2> %s/sysbench_prepare.err ' % (self.out_dir,self.out_dir)
            common.pdsh(settings.getnodes('clients'), pre_cmd).communicate()
示例#38
0
    def _run(self, mode, run_dir, out_dir):
        # We'll always drop caches for rados bench
        self.dropcaches()

        if self.concurrent_ops:
            concurrent_ops_str = '--concurrent-ios %s' % self.concurrent_ops
        #determine rados version

        rados_version_str = self.get_rados_version()

        m = re.findall("version (\d+)", rados_version_str)
        if not m:
            m = re.findall("version v(\d+)", rados_version_str)

        rados_version = int(m[0])

        if mode in ['write'] or rados_version < 9:
            op_size_str = '-b %s' % self.op_size
        else:
            op_size_str = ''

        # Max Objects
        max_objects_str = ''
        if self.max_objects and rados_version < 9:
            raise ValueError('max_objects not supported by rados_version < 9')
        if self.max_objects and rados_version > 9:
            max_objects_str = '--max-objects %s' % self.max_objects

        # Write to OMAP
        write_omap_str = ''
        if self.write_omap and rados_version < 9:
            raise ValueError('write_omap not supported by rados_version < 9')
        if self.write_omap and rados_version > 9:
            write_omap_str = '--write-omap'

        common.make_remote_dir(run_dir)

        # dump the cluster config
        self.cluster.dump_config(run_dir)

        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(run_dir, recovery_callback)

        # Run rados bench
        monitoring.start(run_dir)
        logger.info('Running radosbench %s test.' % mode)
        ps = []
        for i in xrange(self.concurrent_procs):
            out_file = '%s/output.%s' % (run_dir, i)
            objecter_log = '%s/objecter.%s.log' % (run_dir, i)
            # default behavior is to use a single storage pool
            pool_name = self.pool

            run_name = '--run-name %s`%s`-%s' % (self.object_set_id,
                                                 common.get_fqdn_cmd(), i)
            if self.pool_per_proc:  # support previous behavior of 1 storage pool per rados process
                pool_name = 'rados-bench-``-%s' % (common.get_fqdn_cmd(), i)
                run_name = ''
            rados_bench_cmd = '%s -c %s -p %s bench %s %s %s %s %s %s %s --no-cleanup 2> %s > %s' % \
                 (self.cmd_path_full, self.tmp_conf, pool_name, op_size_str, self.time, mode, concurrent_ops_str, max_objects_str, write_omap_str, run_name, objecter_log, out_file)
            p = common.pdsh(settings.getnodes('clients'), rados_bench_cmd)
            ps.append(p)
        for p in ps:
            p.wait()
        monitoring.stop(run_dir)

        # If we were doing recovery, wait until it's done.
        if 'recovery_test' in self.cluster.config:
            self.cluster.wait_recovery_done()

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(run_dir)
        common.sync_files('%s/*' % run_dir, out_dir)
        self.analyze(out_dir)
示例#39
0
    def _run(self, mode, run_dir, out_dir):
        # We'll always drop caches for rados bench
        self.dropcaches()

        if self.concurrent_ops:
            concurrent_ops_str = '--concurrent-ios %s' % self.concurrent_ops
        #determine rados version

        rados_version_str = self.get_rados_version()

        m = re.findall("version (\d+)", rados_version_str)
        if not m:
           m = re.findall("version v(\d+)", rados_version_str)

        rados_version = int(m[0])

        if mode in ['write'] or rados_version < 9:
            op_size_str = '-b %s' % self.op_size
        else:
            op_size_str = ''

        # Max Objects
        max_objects_str = ''
        if self.max_objects and rados_version < 9:
           raise ValueError('max_objects not supported by rados_version < 9')
        if self.max_objects and rados_version > 9:
           max_objects_str = '--max-objects %s' % self.max_objects

        # Write to OMAP
        write_omap_str = ''
        if self.write_omap and rados_version < 9:
           raise ValueError('write_omap not supported by rados_version < 9')
        if self.write_omap and rados_version > 9:
           write_omap_str = '--write-omap'


        common.make_remote_dir(run_dir)

        # dump the cluster config
        self.cluster.dump_config(run_dir)

        # Run the backfill testing thread if requested
        if 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(run_dir, recovery_callback)

        # Run rados bench
        monitoring.start(run_dir)
        logger.info('Running radosbench %s test.' % mode)
        ps = []
        for i in xrange(self.concurrent_procs):
            out_file = '%s/output.%s' % (run_dir, i)
            objecter_log = '%s/objecter.%s.log' % (run_dir, i)
            # default behavior is to use a single storage pool 
            pool_name = self.pool

            run_name = '--run-name %s`%s`-%s'%(self.object_set_id, common.get_fqdn_cmd(), i)
            if self.pool_per_proc: # support previous behavior of 1 storage pool per rados process
                pool_name = 'rados-bench-``-%s'% (common.get_fqdn_cmd(), i)
                run_name = ''
            rados_bench_cmd = '%s -c %s -p %s bench %s %s %s %s %s %s %s --no-cleanup 2> %s > %s' % \
                 (self.cmd_path_full, self.tmp_conf, pool_name, op_size_str, self.time, mode, concurrent_ops_str, max_objects_str, write_omap_str, run_name, objecter_log, out_file)
            p = common.pdsh(settings.getnodes('clients'), rados_bench_cmd)
            ps.append(p)
        for p in ps:
            p.wait()
        monitoring.stop(run_dir)

        # If we were doing recovery, wait until it's done.
        if 'recovery_test' in self.cluster.config:
            self.cluster.wait_recovery_done()

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(run_dir)
        common.sync_files('%s/*' % run_dir, out_dir)
        self.analyze(out_dir)
示例#40
0
 def cleandir(self):
     # Wipe and create the run directory
     common.clean_remote_dir(self.run_dir)
     common.make_remote_dir(self.run_dir)
示例#41
0
    def _run(self, mode, run_dir, out_dir, max_objects, runtime):
        # We'll always drop caches for rados bench
        self.dropcaches()

        if self.concurrent_ops:
            concurrent_ops_str = '--concurrent-ios %s' % self.concurrent_ops

        rados_version = self.get_rados_version()

        # Max Objects
        max_objects_str = ''
        if max_objects:
            if rados_version < 10:
                raise ValueError('max_objects not supported by rados_version < 10')
            max_objects_str = '--max-objects %s' % max_objects

        # Operation type 
        op_type = mode
        if mode == 'prefill':
            op_type = 'write'

        if op_type == 'write':
            op_size_str = '-b %s' % self.op_size
        else:
            op_size_str = ''  

        # Write to OMAP
        write_omap_str = ''
        if self.write_omap:
            if rados_version < 10:
                raise ValueError('write_omap not supported by rados_version < 10')
            write_omap_str = '--write-omap'

        run_dir = os.path.join(self.run_dir, run_dir)
        common.make_remote_dir(run_dir)

        # dump the cluster config
        self.cluster.dump_config(run_dir)

        # Run the backfill testing thread if requested (but not for prefill)
        if mode != 'prefill' and 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(run_dir, recovery_callback)

        # Run rados bench
        with monitoring.monitor(run_dir) as monitor:
            logger.info('Running radosbench %s test.' % mode)
            ps = []
            for i in range(self.concurrent_procs):
                out_file = '%s/output.%s' % (run_dir, i)
                objecter_log = '%s/objecter.%s.log' % (run_dir, i)
                if self.pool_per_proc:
                    # support previous behavior of 1 storage pool per rados process
                    pool_name_cmd = 'rados-bench-`{fqdn_cmd}`-{i}'
                    pool_name = pool_name_cmd.format(fqdn_cmd=common.get_fqdn_cmd(), i=i)
                    run_name = ''
                else:
                    # default behavior is to use a single storage pool
                    pool_name = self.pool
                    run_name_fmt = '--run-name {object_set_id} `{fqdn_cmd}`-{i}'
                    run_name = run_name_fmt.format(
                        object_set_id=self.object_set_id,
                        fqdn_cmd=common.get_fqdn_cmd(),
                        i=i)
                rados_bench_cmd_fmt = \
                    '{cmd} -c {conf} -p {pool} bench {op_size_arg} {duration} ' \
                    '{op_type} {concurrent_ops_arg} {max_objects_arg} ' \
                    '{write_omap_arg} {run_name} --no-cleanup ' \
                    '2> {stderr} > {stdout}'
                rados_bench_cmd = rados_bench_cmd_fmt.format(
                    cmd=self.cmd_path_full,
                    conf=self.tmp_conf,
                    pool=pool_name,
                    op_size_arg=op_size_str,
                    duration=runtime,
                    op_type=op_type,
                    concurrent_ops_arg=concurrent_ops_str,
                    max_objects_arg=max_objects_str,
                    write_omap_arg=write_omap_str,
                    run_name=run_name,
                    stderr=objecter_log,
                    stdout=out_file)
                p = common.pdsh(settings.getnodes('clients'), rados_bench_cmd)
                ps.append(p)
            for p in ps:
                p.wait()

        # If we were doing recovery, wait until it's done (but not for prefill).
        if mode != 'prefill' and 'recovery_test' in self.cluster.config:
            self.cluster.wait_recovery_done()

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(run_dir)

        out_dir = os.path.join(self.out_dir, out_dir)
        common.sync_files('%s/*' % run_dir, out_dir)
        self.analyze(out_dir)
示例#42
0
 def cleandir(self):
     # Wipe and create the run directory
     common.clean_remote_dir(self.run_dir)
     common.make_remote_dir(self.run_dir)