def initialize(self): common.cleanup_tests() if not self.use_existing: common.setup_cluster() common.setup_ceph() # Create the run directory common.make_remote_dir(self.run_dir) # Setup the pools monitoring.start("%s/pool_monitoring" % self.run_dir) for i in xrange(self.concurrent_procs): for node in settings.getnodes('clients').split(','): node = node.rpartition("@")[2] common.pdsh(settings.getnodes('head'), 'sudo ceph osd pool create rados-bench-%s-%s %d %d' % (node, i, self.pgs_per_pool, self.pgs_per_pool)).communicate() common.pdsh(settings.getnodes('head'), 'sudo ceph osd pool set rados-bench-%s-%s size 1' % (node, i)).communicate() # check the health for each pool. print 'Checking Healh after pool creation.' common.check_health() monitoring.stop() print 'Running scrub monitoring.' monitoring.start("%s/scrub_monitoring" % self.run_dir) common.check_scrub() monitoring.stop() print 'Pausing for 60s for idle monitoring.' monitoring.start("%s/idle_monitoring" % self.run_dir) time.sleep(60) monitoring.stop() common.sync_files('%s/*' % self.run_dir, self.out_dir) return True
def initialize(self): self.cleanup() super(RbdFio, self).initialize() common.setup_cluster() common.setup_ceph() common.dump_config(self.run_dir) # Setup the pools common.pdsh(settings.getnodes('head'), 'sudo ceph osd pool create rbdfio %d %d' % (self.pgs, self.pgs)).communicate() common.pdsh(settings.getnodes('head'), 'sudo ceph osd pool set rbdfio size 1').communicate() print 'Checking Healh after pool creation.' common.check_health() common.pdsh(settings.getnodes('clients'), 'sudo modprobe rbd').communicate() for i in xrange(self.concurrent_procs): common.pdsh(settings.getnodes('clients'), 'sudo rbd create rbdfio/rbdfio-`hostname -s`-%d --size %d' % (i, self.vol_size)).communicate() # common.pdsh(settings.cluster.get('clients'), 'sudo rbd map rbdfio-`hostname -s`-%d --pool rbdfio --id admin' % i).communicate() common.pdsh(settings.getnodes('clients'), 'sudo echo "%s %s rbdfio rbdfio-`hostname -s`-%d" | sudo tee /sys/bus/rbd/add && sudo /sbin/udevadm settle' % (self.rbdadd_mons, self.rbdadd_options, i)).communicate() common.pdsh(settings.getnodes('clients'), 'sudo mkfs.xfs /dev/rbd/rbdfio/rbdfio-`hostname -s`-%d' % i).communicate() common.pdsh(settings.getnodes('clients'), 'sudo mkdir /srv/rbdfio-`hostname -s`-%d' % i).communicate() common.pdsh(settings.getnodes('clients'), 'sudo mount -t xfs -o noatime,inode64 /dev/rbd/rbdfio/rbdfio-`hostname -s`-%d /srv/rbdfio-`hostname -s`-%d' %(i, i)).communicate() common.check_scrub() # Create the run directory common.make_remote_dir(self.run_dir)
def initialize(self): self.cleanup() super(RbdFio, self).initialize() common.setup_cluster() common.setup_ceph() common.dump_config(self.run_dir) # Setup the pools common.pdsh( settings.getnodes('head'), 'sudo ceph osd pool create rbdfio %d %d' % (self.pgs, self.pgs)).communicate() common.pdsh(settings.getnodes('head'), 'sudo ceph osd pool set rbdfio size 1').communicate() print 'Checking Healh after pool creation.' common.check_health() common.pdsh(settings.getnodes('clients'), 'sudo modprobe rbd').communicate() for i in xrange(self.concurrent_procs): common.pdsh( settings.getnodes('clients'), 'sudo rbd create rbdfio/rbdfio-`hostname -s`-%d --size %d' % (i, self.vol_size)).communicate() # common.pdsh(settings.cluster.get('clients'), 'sudo rbd map rbdfio-`hostname -s`-%d --pool rbdfio --id admin' % i).communicate() common.pdsh( settings.getnodes('clients'), 'sudo echo "%s %s rbdfio rbdfio-`hostname -s`-%d" | sudo tee /sys/bus/rbd/add && sudo /sbin/udevadm settle' % (self.rbdadd_mons, self.rbdadd_options, i)).communicate() common.pdsh( settings.getnodes('clients'), 'sudo mkfs.xfs /dev/rbd/rbdfio/rbdfio-`hostname -s`-%d' % i).communicate() common.pdsh(settings.getnodes('clients'), 'sudo mkdir /srv/rbdfio-`hostname -s`-%d' % i).communicate() common.pdsh( settings.getnodes('clients'), 'sudo mount -t xfs -o noatime,inode64 /dev/rbd/rbdfio/rbdfio-`hostname -s`-%d /srv/rbdfio-`hostname -s`-%d' % (i, i)).communicate() common.check_scrub() # Create the run directory common.make_remote_dir(self.run_dir)
def initialize(self): if settings.cluster.get('rebuild_every_test', False): common.setup_ceph()
help = 'The ceph.conf file to use.', ) parser.add_argument( 'config_file', help = 'YAML config file.', ) args = parser.parse_args() return args if __name__ == '__main__': ctx = parse_args() settings.initialize(ctx) common.setup_cluster() if not settings.cluster.get("rebuild_every_test", False): common.setup_ceph() # setup_radosbench(rb_config) print 'Checking Health.' check_health() iteration = 0 print settings.cluster while (iteration < settings.cluster.get("iterations", 0)): if os.path.exists(os.path.join(settings.cluster.get("archive_dir"), '%08d' % iteration)): print 'Skipping existing iteration %d.' % iteration iteration += 1 continue benchmarks = benchmarkfactory.getAll(iteration) for b in benchmarks: # print b