Exemplo n.º 1
0
    def run(self):
        try:
            ceph_conf = self.cl_obj.tmp_conf
            phost = sshtarget(settings.cluster.get('user'), self.host)

            # Setup the keyring directory
            data_dir = '%s/osd.%s' % (self.tmp_dir, self.osdnum)
            common.pdsh(phost, 'sudo rm -rf %s' % data_dir).communicate()
            common.pdsh(phost, 'mkdir -p %s' % data_dir).communicate()
            key_fn = '%s/keyring' % data_dir

            # Setup crush and the keyring
            common.pdsh(phost, 'sudo %s auth get-or-create osd.%s mon \'allow rwx\' osd \'allow *\' -o %s' % (self.cl_obj.ceph_cmd, self.osdnum, key_fn)).communicate()

            common.pdsh(phost, 'sudo %s -c %s osd crush add osd.%d 1.0 host=%s rack=localrack root=default' % (self.cl_obj.ceph_cmd, ceph_conf, self.osdnum, self.host)).communicate()
            cmd='ulimit -n 16384 && ulimit -c unlimited && exec %s -c %s -i %d --mkfs --osd-uuid %s' % (self.cl_obj.ceph_osd_cmd, ceph_conf, self.osdnum, self.osduuid)
            common.pdsh(phost, 'sudo sh -c "%s"' % cmd).communicate()

            # Start the OSD
            pidfile="%s/ceph-osd.%d.pid" % (self.cl_obj.pid_dir, self.osdnum)
            cmd = '%s -c %s -i %d --pid-file=%s' % (self.cl_obj.ceph_osd_cmd, ceph_conf, self.osdnum, pidfile)
            if self.cl_obj.osd_valgrind:
                cmd = common.setup_valgrind(self.cl_obj.osd_valgrind, 'osd.%d' % self.osdnum, self.cl_obj.tmp_dir) + ' ' + cmd
            else:
                cmd = '%s %s' % (self.cl_obj.ceph_run_cmd, cmd)
            stderr_file = "%s/osd.%d.stderr" % (self.cl_obj.tmp_dir, self.osdnum)
            common.pdsh(phost, 'sudo sh -c "ulimit -n 16384 && ulimit -c unlimited && exec %s 2> %s"' % (cmd, stderr_file)).communicate()
        except Exception as e:
            self.exc = e
        finally:
            self.response_time = time.time() - self.start_time
Exemplo n.º 2
0
    def start_mgrs(self):
        user = settings.cluster.get('user')
        mgrhosts = settings.cluster.get('mgrs')

        if not mgrhosts:
            return

        for mgrhost, manager in mgrhosts.iteritems():
            for mgrname, mgrsettings in manager.iteritems():
                cmd = '%s -i %s' % (self.ceph_mgr_cmd, mgrname)
                if self.mgr_valgrind:
                    cmd = "%s %s" % (common.setup_valgrind(
                        self.mgr_valgrind, mgrname, self.tmp_dir), cmd)
                else:
                    cmd = "%s %s" % (self.ceph_run_cmd, cmd)
                if user:
                    pdshhost = '%s@%s' % (user, mgrhost)
                data_dir = "%s/mgr.%s" % (self.tmp_dir, mgrname)
                common.pdsh(pdshhost,
                            'sudo mkdir -p %s' % data_dir).communicate()
                common.pdsh(
                    pdshhost,
                    'sudo %s auth get-or-create mgr.%s mon \'allow profile mgr\' mds \'allow *\' osd \'allow *\' -o %s/keyring'
                    % (self.ceph_cmd, mgrname, data_dir)).communicate()
                common.pdsh(
                    pdshhost,
                    'sudo sh -c "ulimit -n 16384 && ulimit -c unlimited && exec %s"'
                    % cmd).communicate()
Exemplo n.º 3
0
    def run(self):
        if self.osd_ra and self.osd_ra_changed:
            logger.info('Setting OSD Read Ahead to: %s', self.osd_ra)
            self.cluster.set_osd_param('read_ahead_kb', self.osd_ra)

        logger.debug('Cleaning existing temporary run directory: %s',
                     self.run_dir)
        common.pdsh(settings.getnodes('clients', 'osds', 'mons', 'rgws'),
                    'sudo rm -rf %s' % self.run_dir).communicate()
        if self.valgrind is not None:
            logger.debug('Adding valgrind to the command path.')
            self.cmd_path_full = common.setup_valgrind(self.valgrind,
                                                       self.getclass(),
                                                       self.run_dir)
        # Set the full command path
        self.cmd_path_full += self.cmd_path

        # Store the parameters of the test run
        config_file = os.path.join(self.archive_dir, 'benchmark_config.yaml')
        if not os.path.exists(self.archive_dir):
            os.makedirs(self.archive_dir)
        if not os.path.exists(config_file):
            config_dict = dict(cluster=self.config)
            with open(config_file, 'w') as fd:
                yaml.dump(config_dict, fd, default_flow_style=False)
Exemplo n.º 4
0
    def make_osds(self):
        osdnum = 0
        osdhosts = settings.cluster.get('osds')

        for host in osdhosts:
            user = settings.cluster.get('user')
            if user:
                pdshhost = '%s@%s' % (user, host)

            for i in xrange(0, settings.cluster.get('osds_per_node')):            
                # Build the OSD
                osduuid = str(uuid.uuid4())
                key_fn = '%s/osd-device-%s-data/keyring' % (self.mnt_dir, i)
                common.pdsh(pdshhost, 'sudo ceph -c %s osd create %s' % (self.tmp_conf, osduuid)).communicate()
                common.pdsh(pdshhost, 'sudo ceph -c %s osd crush add osd.%d 1.0 host=%s rack=localrack root=default' % (self.tmp_conf, osdnum, host)).communicate()
                common.pdsh(pdshhost, 'sudo sh -c "ulimit -n 16384 && ulimit -c unlimited && exec %s -c %s -i %d --mkfs --mkkey --osd-uuid %s"' % (self.ceph_osd_cmd, self.tmp_conf, osdnum, osduuid)).communicate()
                common.pdsh(pdshhost, 'sudo ceph -c %s -i %s auth add osd.%d osd "allow *" mon "allow profile osd"' % (self.tmp_conf, key_fn, osdnum)).communicate()

                # Start the OSD
                pidfile="%s/ceph-osd.%d.pid" % (self.pid_dir, osdnum)
                cmd = '%s -c %s -i %d --pid-file=%s' % (self.ceph_osd_cmd, self.tmp_conf, osdnum, pidfile)
                if self.osd_valgrind:
                    cmd = "%s %s" % (common.setup_valgrind(self.osd_valgrind, 'osd.%d' % osdnum, self.tmp_dir), cmd)
                else:
                    cmd = 'ceph-run %s' % cmd

                common.pdsh(pdshhost, 'sudo sh -c "ulimit -n 16384 && ulimit -c unlimited && exec %s"' % cmd).communicate()
                osdnum = osdnum+1
Exemplo n.º 5
0
    def make_osds(self):
        osdnum = 0
        osdhosts = settings.cluster.get('osds')

        for host in osdhosts:
            user = settings.cluster.get('user')
            if user:
                pdshhost = '%s@%s' % (user, host)

            for i in xrange(0, settings.cluster.get('osds_per_node')):            
                # Build the OSD
                osduuid = str(uuid.uuid4())
                key_fn = '%s/osd-device-%s-data/keyring' % (self.mnt_dir, i)
                common.pdsh(pdshhost, 'sudo %s -c %s osd create %s' % (self.ceph_cmd, self.tmp_conf, osduuid)).communicate()
                common.pdsh(pdshhost, 'sudo %s -c %s osd crush add osd.%d 1.0 host=%s rack=localrack root=default' % (self.ceph_cmd, self.tmp_conf, osdnum, host)).communicate()
                common.pdsh(pdshhost, 'sudo sh -c "ulimit -n 16384 && ulimit -c unlimited && exec %s -c %s -i %d --mkfs --mkkey --osd-uuid %s"' % (self.ceph_osd_cmd, self.tmp_conf, osdnum, osduuid)).communicate()
                common.pdsh(pdshhost, 'sudo %s -c %s -i %s auth add osd.%d osd "allow *" mon "allow profile osd"' % (self.ceph_cmd, self.tmp_conf, key_fn, osdnum)).communicate()

                # Start the OSD
                pidfile="%s/ceph-osd.%d.pid" % (self.pid_dir, osdnum)
                cmd = '%s -c %s -i %d --pid-file=%s' % (self.ceph_osd_cmd, self.tmp_conf, osdnum, pidfile)
                if self.osd_valgrind:
                    cmd = "%s %s" % (common.setup_valgrind(self.osd_valgrind, 'osd.%d' % osdnum, self.tmp_dir), cmd)
                else:
                    cmd = '%s %s' % (self.ceph_run_cmd, cmd)
                stderr_file = "%s/osd.%d.stderr" % (self.tmp_dir, osdnum)

                common.pdsh(pdshhost, 'sudo sh -c "ulimit -n 16384 && ulimit -c unlimited && exec %s 2> %s"' % (cmd, stderr_file)).communicate()
                osdnum = osdnum+1
Exemplo n.º 6
0
    def __init__(self, archive_dir, cluster, config):
        self.acceptable = config.pop('acceptable', {})
        self.config = config
        self.cluster = cluster
        hashable = json.dumps(sorted(self.config.items())).encode()
        digest = hashlib.sha1(hashable).hexdigest()[:8]
        self.archive_dir = os.path.join(
            archive_dir, 'results', '{:0>8}'.format(config.get('iteration')),
            'id-{}'.format(digest))
        self.run_dir = os.path.join(settings.cluster.get('tmp_dir'),
                                    '{:0>8}'.format(config.get('iteration')),
                                    self.getclass())
        self.osd_ra = config.get('osd_ra', None)
        self.cmd_path = ''
        self.valgrind = config.get('valgrind', None)
        self.cmd_path_full = ''
        self.log_iops = config.get('log_iops', True)
        self.log_bw = config.get('log_bw', True)
        self.log_lat = config.get('log_lat', True)
        if self.valgrind is not None:
            self.cmd_path_full = common.setup_valgrind(self.valgrind,
                                                       self.getclass(),
                                                       self.run_dir)

        self.osd_ra_changed = False
        if self.osd_ra:
            self.osd_ra_changed = True
        else:
            self.osd_ra = common.get_osd_ra()
Exemplo n.º 7
0
    def __init__(self, cluster, config):
        self.config = config
        self.cluster = cluster
        #        self.cluster = Ceph(settings.cluster)
        self.archive_dir = "%s/%s/%08d/%s%s" % (
            settings.cluster.get('archive_dir'), "results",
            config.get('iteration'), "id",
            hash(frozenset((self.config).items())))
        self.run_dir = "%s/%08d/%s" % (settings.cluster.get('tmp_dir'),
                                       config.get('iteration'),
                                       self.getclass())
        self.osd_ra = config.get('osd_ra', None)
        self.cmd_path = ''
        self.valgrind = config.get('valgrind', None)
        self.cmd_path_full = ''
        self.log_iops = config.get('log_iops', True)
        self.log_bw = config.get('log_bw', True)
        self.log_lat = config.get('log_lat', True)
        if self.valgrind is not None:
            self.cmd_path_full = common.setup_valgrind(self.valgrind,
                                                       self.getclass(),
                                                       self.run_dir)

        self.osd_ra_changed = False
        if self.osd_ra:
            self.osd_ra_changed = True
        else:
            self.osd_ra = common.get_osd_ra()
Exemplo n.º 8
0
 def run(self):
     print "Setting OSD Read Ahead to: %s" % self.osd_ra
     self.cluster.set_osd_param("read_ahead_kb", self.osd_ra)
     print "Cleaning existing temporary run directory: %s" % self.run_dir
     common.pdsh(settings.getnodes("clients", "osds", "mons", "rgws"), "sudo rm -rf %s" % self.run_dir).communicate()
     if self.valgrind is not None:
         print "Adding valgrind to the command path."
         self.cmd_path_full = common.setup_valgrind(self.valgrind, self.getclass(), self.run_dir)
Exemplo n.º 9
0
    def start_rgw(self):
        user = settings.cluster.get('user')
        rgwhosts = settings.cluster.get('rgws')

        if not rgwhosts:
            return

        # If we are starting rGW, make the RGW pools
        self.make_rgw_pools()

        for rgwhost, gateways in rgwhosts.items():
            for rgwname, rgwsettings in gateways.items():
                host = rgwsettings.get('host', rgwhost)
                port = rgwsettings.get('port', None)
                ssl_certificate = rgwsettings.get('ssl_certificate', None)

                # Build the urls (s3)
                url = "http://" if ssl_certificate is None else "https://"
                url += host
                url += ":7480" if port is None else ":%s" % port
                self.urls.append(url)

                # Build the auth urls (swift)
                auth_url = url + "/auth/v1.0"
                self.auth_urls.append(auth_url)

                # set the rgw_frontends
                rgw_frontends = None
                if ssl_certificate is not None:
                    rgw_frontends = "civetweb ssl_certificate=%s" % ssl_certificate
                if port is not None:
                    if rgw_frontends is None:
                        rgw_frontends = "civetweb"
                    rgw_frontends += " port=%s" % port

                cmd = '%s -c %s -n %s --log-file=%s/rgw.log' % (self.ceph_rgw_cmd, self.tmp_conf, rgwname, self.log_dir)
                if rgw_frontends is not None:
                    cmd += " --rgw-frontends='%s'" % rgw_frontends
                if self.rgw_valgrind:
                    cmd = "%s %s" % (common.setup_valgrind(self.rgw_valgrind, 'rgw.%s' % host, self.tmp_dir), cmd)
                else:
                    cmd = '%s %s' % (self.ceph_run_cmd, cmd)

                if user:
                    pdshhost = '%s@%s' % (user, rgwhost)
                common.pdsh(pdshhost, 'sudo sh -c "ulimit -n 16384 && ulimit -c unlimited && exec %s"' % cmd).communicate()

                # set min_size of pools to 1, when there is only one osd
                num_osds = len(settings.cluster.get('osds'))
                rgw_default_pools = ['.rgw.root', 'default.rgw.control', 'default.rgw.meta', 'default.rgw.log']
                pool_min_repl_size = 1

                if num_osds == 1:
                    time.sleep(5)
                    for pool in rgw_default_pools:
                        common.pdsh(settings.getnodes('head'), 'sudo %s -c %s osd pool set %s min_size %d' % (self.ceph_cmd, self.tmp_conf, pool, pool_min_repl_size),
                        continue_if_error=False).communicate()
                        time.sleep(5)
Exemplo n.º 10
0
 def run(self):
     print 'Setting OSD Read Ahead to: %s' % self.osd_ra
     self.cluster.set_osd_param('read_ahead_kb', self.osd_ra)
     print 'Cleaning existing temporary run directory: %s' % self.run_dir
     common.pdsh(settings.getnodes('clients', 'osds', 'mons', 'rgws'), 'sudo rm -rf %s' % self.run_dir)
     if self.valgrind is not None:
         print 'Adding valgrind to the command path.'
         self.cmd_path_full = common.setup_valgrind(self.valgrind, self.getclass(), self.run_dir)
     # Set the full command path
     self.cmd_path_full += self.cmd_path
Exemplo n.º 11
0
    def start_rgw(self):
        user = settings.cluster.get('user')
        rgwhosts = settings.cluster.get('rgws')

        if not rgwhosts:
            return

        # If we are starting rGW, make the RGW pools
        self.make_rgw_pools()

        for rgwhost, gateways in rgwhosts.iteritems():
            for rgwname, rgwsettings in gateways.iteritems():
                host = rgwsettings.get('host', rgwhost)
                port = rgwsettings.get('port', None)
                ssl_certificate = rgwsettings.get('ssl_certificate', None)

                # Build the auth_url
                auth_url = "http://" if ssl_certificate is None else "https://"
                auth_url += host
                auth_url += ":7480" if port is None else ":%s" % port
                auth_url += "/auth/v1.0"
                self.auth_urls.append(auth_url)

                # set the rgw_frontends
                rgw_frontends = None
                if ssl_certificate is not None:
                    rgw_frontends = "civetweb ssl_certificate=%s" % ssl_certificate
                if port is not None:
                    if rgw_frontends is None:
                        rgw_frontends = "civetweb"
                    rgw_frontends += " port=%s" % port

                cmd = '%s -c %s -n %s --log-file=%s/rgw.log' % (self.ceph_rgw_cmd, self.tmp_conf, rgwname, self.log_dir)
                if rgw_frontends is not None:
                    cmd += " --rgw-frontends='%s'" % rgw_frontends
                if self.rgw_valgrind:
                    cmd = "%s %s" % (common.setup_valgrind(self.rgw_valgrind, 'rgw.%s' % host, self.tmp_dir), cmd)
                else:
                    cmd = '%s %s' % (self.ceph_run_cmd, cmd)

                if user:
                    pdshhost = '%s@%s' % (user, rgwhost)
                common.pdsh(pdshhost, 'sudo sh -c "ulimit -n 16384 && ulimit -c unlimited && exec %s"' % cmd).communicate()

                # set min_size of pools to 1, when there is only one osd
                num_osds = len(settings.cluster.get('osds'))
                rgw_default_pools = ['.rgw.root', 'default.rgw.control', 'default.rgw.meta', 'default.rgw.log']
                pool_min_repl_size = 1

                if num_osds == 1:
                    time.sleep(5)
                    for pool in rgw_default_pools:
                        common.pdsh(settings.getnodes('head'), 'sudo %s -c %s osd pool set %s min_size %d' % (self.ceph_cmd, self.tmp_conf, pool, pool_min_repl_size),
                        continue_if_error=False).communicate()
                        time.sleep(5)
Exemplo n.º 12
0
    def __init__(self, cluster, config):
        self.config = config
        self.cluster = cluster
#        self.cluster = Ceph(settings.cluster)
        self.archive_dir = "%s/%08d/%s" % (settings.cluster.get('archive_dir'), config.get('iteration'), self.getclass())
        self.run_dir = "%s/%08d/%s" % (settings.cluster.get('tmp_dir'), config.get('iteration'), self.getclass())
        self.osd_ra = config.get('osd_ra', 128)
        self.cmd_path = ''
        self.valgrind = config.get('valgrind', None)
        self.cmd_path_full = '' 
        if self.valgrind is not None:
            self.cmd_path_full = common.setup_valgrind(self.valgrind, self.getclass(), self.run_dir)
    def run(self):
        if self.osd_ra and self.osd_ra_changed:
            logger.info('Setting OSD Read Ahead to: %s', self.osd_ra)
            self.cluster.set_osd_param('read_ahead_kb', self.osd_ra)

        logger.debug('Cleaning existing temporary run directory: %s', self.run_dir)
        common.pdsh(settings.getnodes('clients', 'osds', 'mons', 'rgws'), 'sudo rm -rf %s' % self.run_dir).communicate()
        if self.valgrind is not None:
            logger.debug('Adding valgrind to the command path.')
            self.cmd_path_full = common.setup_valgrind(self.valgrind, self.getclass(), self.run_dir)
        # Set the full command path
        self.cmd_path_full += self.cmd_path
Exemplo n.º 14
0
    def make_mons(self):
        # Build and distribute the client keyring
        client_admin_dir = "%s/client.admin" % self.tmp_dir
        common.pdsh(settings.getnodes('head', 'clients', 'osds', 'mons', 'rgws', 'mgrs'), 'rm -rf %s' % client_admin_dir).communicate()
        common.pdsh(settings.getnodes('head', 'clients', 'osds', 'mons', 'rgws', 'mgrs'), 'mkdir -p %s' % client_admin_dir).communicate()

        keyring_fn = os.path.join(client_admin_dir, "keyring")
        common.pdsh(settings.getnodes('head'), '%s --create-keyring --gen-key --name=mon. %s --cap mon \'allow *\'' % (self.ceph_authtool_cmd, keyring_fn)).communicate()
        common.pdsh(settings.getnodes('head'), '%s --gen-key --name=client.admin --cap mon \'allow *\' --cap osd \'allow *\' --cap mds \'allow *\' --cap mgr \'allow *\' %s' % (self.ceph_authtool_cmd, keyring_fn)).communicate()
        common.rscp(settings.getnodes('head'), keyring_fn, '%s.tmp' % keyring_fn).communicate()
        common.pdcp(settings.getnodes('head', 'clients', 'osds', 'mons', 'rgws', 'mgrs'), '', '%s.tmp' % keyring_fn, keyring_fn).communicate()
        common.pdsh(settings.getnodes('head', 'clients', 'osds', 'mons', 'rgws', 'mgrs'), 'sudo mv %s %s.cbt.bak' % (self.client_keyring, self.client_keyring)).communicate()
        common.pdsh(settings.getnodes('head', 'clients', 'osds', 'mons', 'rgws', 'mgrs'), 'sudo ln -s %s %s' % (keyring_fn, self.client_keyring)).communicate()
        common.pdsh(settings.getnodes('head', 'clients', 'osds', 'mons', 'rgws', 'mgrs'), 'sudo mv %s %s.cbt.bak' % (self.client_secret, self.client_secret)).communicate()
        common.pdsh(settings.getnodes('head', 'clients', 'osds', 'mons', 'rgws', 'mgrs'), 'sudo sh -c \'%s --print-key %s > %s\'' % (self.ceph_authtool_cmd, self.client_keyring, self.client_secret)).communicate()
        # Build the monmap, retrieve it, and distribute it
        mons = settings.getnodes('mons').split(',')
        cmd = 'monmaptool --create --clobber'
        monhosts = self.get_mon_hosts()
        logger.info(monhosts)
        for monhost, mons in monhosts.items():
           for mon, addr in mons.items():
                cmd = cmd + ' --add %s %s' % (mon, addr)
        cmd = cmd + ' --print %s' % self.monmap_fn
        common.pdsh(settings.getnodes('head'), cmd).communicate()
        common.rscp(settings.getnodes('head'), self.monmap_fn, '%s.tmp' % self.monmap_fn).communicate()
        common.pdcp(settings.getnodes('mons'), '', '%s.tmp' % self.monmap_fn, self.monmap_fn).communicate()

        # Build the ceph-mons
        user = settings.cluster.get('user')
        for monhost, mons in monhosts.items():
            if user:
                monhost = '%s@%s' % (user, monhost)
            for mon, addr in mons.items():
                common.pdsh(monhost, 'sudo rm -rf %s/mon.%s' % (self.tmp_dir, mon)).communicate()
                common.pdsh(monhost, 'mkdir -p %s/mon.%s' % (self.tmp_dir, mon)).communicate()
                common.pdsh(monhost, 'sudo sh -c "ulimit -c unlimited && exec %s --mkfs -c %s -i %s --monmap=%s --keyring=%s"' % (self.ceph_mon_cmd, self.tmp_conf, mon, self.monmap_fn, keyring_fn)).communicate()
                common.pdsh(monhost, 'cp %s %s/mon.%s/keyring' % (keyring_fn, self.tmp_dir, mon)).communicate()
            
        # Start the mons
        for monhost, mons in monhosts.items():
            if user:
                monhost = '%s@%s' % (user, monhost)
            for mon, addr in mons.items():
                pidfile="%s/%s.pid" % (self.pid_dir, monhost)
                cmd = 'sudo sh -c "ulimit -n 16384 && ulimit -c unlimited && exec %s -c %s -i %s --keyring=%s --pid-file=%s"' % (self.ceph_mon_cmd, self.tmp_conf, mon, keyring_fn, pidfile)
                if self.mon_valgrind:
                    cmd = "%s %s" % (common.setup_valgrind(self.mon_valgrind, 'mon.%s' % monhost, self.tmp_dir), cmd)
                else:
                    cmd = '%s %s' % (self.ceph_run_cmd, cmd)
                common.pdsh(monhost, 'sudo %s' % cmd).communicate()
Exemplo n.º 15
0
    def start_rgw(self):
        rgwhosts = settings.cluster.get('rgws', [])

        for host in rgwhosts:
            pdshhost = host
            user = settings.cluster.get('user')
            if user:
                pdshhost = '%s@%s' % (user, host)
            cmd = '%s -c %s -n client.radosgw.gateway --log-file=%s/rgw.log' % (self.ceph_rgw_cmd, self.tmp_conf, self.log_dir)
            if self.rgw_valgrind:
                cmd = "%s %s" % (common.setup_valgrind(self.rgw_valgrind, 'rgw.%s' % host, self.tmp_dir), cmd)
            else:
                cmd = '%s %s' % (self.ceph_run_cmd, cmd)

            common.pdsh(pdshhost, 'sudo sh -c "ulimit -n 16384 && ulimit -c unlimited && exec %s"' % cmd).communicate()
Exemplo n.º 16
0
    def start_rgw(self):
        rgwhosts = settings.cluster.get('rgws', [])

        for host in rgwhosts:
            pdshhost = host
            user = settings.cluster.get('user')
            if user:
                pdshhost = '%s@%s' % (user, host)
            cmd = '%s -c %s -n client.radosgw.gateway --log-file=%s/rgw.log' % (self.ceph_rgw_cmd, self.tmp_conf, self.log_dir)
            if self.rgw_valgrind:
                cmd = "%s %s" % (common.setup_valgrind(self.rgw_valgrind, 'rgw.%s' % host, self.tmp_dir), cmd)
            else:
                cmd = 'ceph-run %s' % cmd

            common.pdsh(pdshhost, 'sudo sh -c "ulimit -n 16384 && ulimit -c unlimited && exec %s"' % cmd).communicate()
Exemplo n.º 17
0
    def start_rgw(self):
        user = settings.cluster.get('user')
        rgwhosts = settings.cluster.get('rgws')

        if not rgwhosts:
            return

        # If we are starting rGW, make the RGW pools
        self.make_rgw_pools()

        for rgwhost, gateways in rgwhosts.iteritems():
            for rgwname, rgwsettings in gateways.iteritems():
                host = rgwsettings.get('host', rgwhost)
                port = rgwsettings.get('port', None)
                ssl_certificate = rgwsettings.get('ssl_certificate', None)

                # Build the auth_url
                auth_url = "http://" if ssl_certificate is None else "https://"
                auth_url += host
                auth_url += ":7480" if port is None else ":%s" % port
                auth_url += "/auth/v1.0"
                self.auth_urls.append(auth_url)

                # set the rgw_frontends
                rgw_frontends = None
                if ssl_certificate is not None:
                    rgw_frontends = "civetweb ssl_certificate=%s" % ssl_certificate
                if port is not None:
                    if rgw_frontends is None:
                        rgw_frontends = "civetweb"
                    rgw_frontends += " port=%s" % port

                cmd = '%s -c %s -n %s --log-file=%s/rgw.log' % (
                    self.ceph_rgw_cmd, self.tmp_conf, rgwname, self.log_dir)
                if rgw_frontends is not None:
                    cmd += " --rgw-frontends='%s'" % rgw_frontends
                if self.rgw_valgrind:
                    cmd = "%s %s" % (common.setup_valgrind(
                        self.rgw_valgrind, 'rgw.%s' % host, self.tmp_dir), cmd)
                else:
                    cmd = '%s %s' % (self.ceph_run_cmd, cmd)

                if user:
                    pdshhost = '%s@%s' % (user, rgwhost)
                common.pdsh(
                    pdshhost,
                    'sudo sh -c "ulimit -n 16384 && ulimit -c unlimited && exec %s"'
                    % cmd).communicate()
Exemplo n.º 18
0
 def __init__(self, cluster, config):
     self.config = config
     self.cluster = cluster
     #        self.cluster = Ceph(settings.cluster)
     self.archive_dir = "%s/%08d/%s" % (
         settings.cluster.get("archive_dir"),
         config.get("iteration"),
         self.getclass(),
     )
     self.run_dir = "%s/%08d/%s" % (settings.cluster.get("tmp_dir"), config.get("iteration"), self.getclass())
     self.osd_ra = config.get("osd_ra", 128)
     self.cmd_path = ""
     self.valgrind = config.get("valgrind", None)
     self.cmd_path_full = ""
     if self.valgrind is not None:
         self.cmd_path_full = common.setup_valgrind(self.valgrind, self.getclass(), self.run_dir)
Exemplo n.º 19
0
    def start_mgrs(self):
        user = settings.cluster.get('user')
        mgrhosts = settings.cluster.get('mgrs')

        if not mgrhosts:
            return

        for mgrhost, manager in mgrhosts.iteritems():
            for mgrname, mgrsettings in manager.iteritems():
                cmd = '%s -i %s' % (self.ceph_mgr_cmd, mgrname)
                if self.mgr_valgrind:
                    cmd = "%s %s" % (common.setup_valgrind(self.mgr_valgrind, mgrname, self.tmp_dir), cmd)
                else:
                    cmd = "%s %s" % (self.ceph_run_cmd, cmd)
                if user:
                    pdshhost = '%s@%s' % (user, mgrhost)
                common.pdsh(pdshhost, 'sudo sh -c "ulimit -n 16384 && ulimit -c unlimited && exec %s"' % cmd).communicate()
Exemplo n.º 20
0
    def start_mgrs(self):
        user = settings.cluster.get('user')
        mgrhosts = settings.cluster.get('mgrs')

        if not mgrhosts:
            return

        for mgrhost, manager in mgrhosts.iteritems():
            for mgrname, mgrsettings in manager.iteritems():
                cmd = '%s -i %s' % (self.ceph_mgr_cmd, mgrname)
                if self.mgr_valgrind:
                    cmd = "%s %s" % (common.setup_valgrind(self.mgr_valgrind, mgrname, self.tmp_dir), cmd)
                else:
                    cmd = "%s %s" % (self.ceph_run_cmd, cmd)
                if user:
                    pdshhost = '%s@%s' % (user, mgrhost)
                common.pdsh(pdshhost, 'sudo sh -c "ulimit -n 16384 && ulimit -c unlimited && exec %s"' % cmd).communicate()
Exemplo n.º 21
0
    def make_mons(self):
        # Build and distribute the keyring
        common.pdsh(settings.getnodes('head'), 'ceph-authtool --create-keyring --gen-key --name=mon. %s --cap mon \'allow *\'' % self.keyring_fn).communicate()
        common.pdsh(settings.getnodes('head'), 'ceph-authtool --gen-key --name=client.admin --set-uid=0 --cap mon \'allow *\' --cap osd \'allow *\' --cap mds allow %s' % self.keyring_fn).communicate()
        common.rscp(settings.getnodes('head'), self.keyring_fn, '%s.tmp' % self.keyring_fn).communicate()
        common.pdcp(settings.getnodes('mons', 'osds', 'rgws', 'mds'), '', '%s.tmp' % self.keyring_fn, self.keyring_fn).communicate()

        # Build the monmap, retrieve it, and distribute it
        mons = settings.getnodes('mons').split(',')
        cmd = 'monmaptool --create --clobber'
        monhosts = settings.cluster.get('mons')
        logger.info(monhosts)
        for monhost, mons in monhosts.iteritems():
           for mon, addr in mons.iteritems():
                cmd = cmd + ' --add %s %s' % (mon, addr)
        cmd = cmd + ' --print %s' % self.monmap_fn
        common.pdsh(settings.getnodes('head'), cmd).communicate()
        common.rscp(settings.getnodes('head'), self.monmap_fn, '%s.tmp' % self.monmap_fn).communicate()
        common.pdcp(settings.getnodes('mons'), '', '%s.tmp' % self.monmap_fn, self.monmap_fn).communicate()

        # Build the ceph-mons
        user = settings.cluster.get('user')
        for monhost, mons in monhosts.iteritems():
            if user:
                monhost = '%s@%s' % (user, monhost)
            for mon, addr in mons.iteritems():
                common.pdsh(monhost, 'sudo rm -rf %s/mon.%s' % (self.tmp_dir, mon)).communicate()
                common.pdsh(monhost, 'mkdir -p %s/mon.%s' % (self.tmp_dir, mon)).communicate()
                common.pdsh(monhost, 'sudo sh -c "ulimit -c unlimited && exec %s --mkfs -c %s -i %s --monmap=%s --keyring=%s"' % (self.ceph_mon_cmd, self.tmp_conf, mon, self.monmap_fn, self.keyring_fn)).communicate()
                common.pdsh(monhost, 'cp %s %s/mon.%s/keyring' % (self.keyring_fn, self.tmp_dir, mon)).communicate()
            
        # Start the mons
        for monhost, mons in monhosts.iteritems():
            if user:
                monhost = '%s@%s' % (user, monhost)
            for mon, addr in mons.iteritems():
                pidfile="%s/%s.pid" % (self.pid_dir, monhost)
                cmd = 'sudo sh -c "ulimit -n 16384 && ulimit -c unlimited && exec %s -c %s -i %s --keyring=%s --pid-file=%s"' % (self.ceph_mon_cmd, self.tmp_conf, mon, self.keyring_fn, pidfile)
                if self.mon_valgrind:
                    cmd = "%s %s" % (common.setup_valgrind(self.mon_valgrind, 'mon.%s' % monhost, self.tmp_dir), cmd)
                else:
                    cmd = '%s %s' % (self.ceph_run_cmd, cmd)
                common.pdsh(monhost, 'sudo %s' % cmd).communicate()
Exemplo n.º 22
0
    def make_mons(self):
        # Build and distribute the keyring
        common.pdsh(settings.getnodes('head'), 'ceph-authtool --create-keyring --gen-key --name=mon. %s --cap mon \'allow *\'' % self.keyring_fn).communicate()
        common.pdsh(settings.getnodes('head'), 'ceph-authtool --gen-key --name=client.admin --set-uid=0 --cap mon \'allow *\' --cap osd \'allow *\' --cap mds allow %s' % self.keyring_fn).communicate()
        common.rscp(settings.getnodes('head'), self.keyring_fn, '%s.tmp' % self.keyring_fn).communicate()
        common.pdcp(settings.getnodes('mons', 'osds', 'rgws', 'mds'), '', '%s.tmp' % self.keyring_fn, self.keyring_fn).communicate()

        # Build the monmap, retrieve it, and distribute it
        mons = settings.getnodes('mons').split(',')
        cmd = 'monmaptool --create --clobber'
        monhosts = settings.cluster.get('mons')
        print monhosts
        for monhost, mons in monhosts.iteritems():
           for mon, addr in mons.iteritems():
                cmd = cmd + ' --add %s %s' % (mon, addr)
        cmd = cmd + ' --print %s' % self.monmap_fn
        common.pdsh(settings.getnodes('head'), cmd).communicate()
        common.rscp(settings.getnodes('head'), self.monmap_fn, '%s.tmp' % self.monmap_fn).communicate()
        common.pdcp(settings.getnodes('mons'), '', '%s.tmp' % self.monmap_fn, self.monmap_fn).communicate()

        # Build the ceph-mons
        user = settings.cluster.get('user')
        for monhost, mons in monhosts.iteritems():
            if user:
                monhost = '%s@%s' % (user, monhost)
            for mon, addr in mons.iteritems():
                common.pdsh(monhost, 'sudo rm -rf %s/mon.%s' % (self.tmp_dir, mon)).communicate()
                common.pdsh(monhost, 'mkdir -p %s/mon.%s' % (self.tmp_dir, mon)).communicate()
                common.pdsh(monhost, 'sudo sh -c "ulimit -c unlimited && exec %s --mkfs -c %s -i %s --monmap=%s --keyring=%s"' % (self.ceph_mon_cmd, self.tmp_conf, mon, self.monmap_fn, self.keyring_fn)).communicate()
                common.pdsh(monhost, 'cp %s %s/mon.%s/keyring' % (self.keyring_fn, self.tmp_dir, mon)).communicate()
            
        # Start the mons
        for monhost, mons in monhosts.iteritems():
            if user:
                monhost = '%s@%s' % (user, monhost)
            for mon, addr in mons.iteritems():
                pidfile="%s/%s.pid" % (self.pid_dir, monhost)
                cmd = 'sudo sh -c "ulimit -c unlimited && exec %s -c %s -i %s --keyring=%s --pid-file=%s"' % (self.ceph_mon_cmd, self.tmp_conf, mon, self.keyring_fn, pidfile)
                if self.mon_valgrind:
                    cmd = "%s %s" % (common.setup_valgrind(self.mon_valgrind, 'mon.%s' % monhost, self.tmp_dir), cmd)
                else:
                    cmd = 'ceph-run %s' % cmd
                common.pdsh(monhost, 'sudo %s' % cmd).communicate()
Exemplo n.º 23
0
    def __init__(self, cluster, config):
        self.config = config
        self.cluster = cluster
#        self.cluster = Ceph(settings.cluster)
        self.archive_dir = "%s/%s/%08d/%s%s" % (settings.cluster.get('archive_dir'),
                                                "results", config.get('iteration'), "id",
                                                hash(frozenset((self.config).items())))
        self.run_dir = "%s/%08d/%s" % (settings.cluster.get('tmp_dir'), config.get('iteration'), self.getclass())
        self.osd_ra = config.get('osd_ra', None)
        self.cmd_path = ''
        self.valgrind = config.get('valgrind', None)
        self.cmd_path_full = '' 
        if self.valgrind is not None:
            self.cmd_path_full = common.setup_valgrind(self.valgrind, self.getclass(), self.run_dir)

        self.osd_ra_changed = False
        if self.osd_ra:
            self.osd_ra_changed = True
        else:
            self.osd_ra = common.get_osd_ra()
Exemplo n.º 24
0
    def run(self):
        if self.osd_ra and self.osd_ra_changed:
            logger.info('Setting OSD Read Ahead to: %s', self.osd_ra)
            self.cluster.set_osd_param('read_ahead_kb', self.osd_ra)

        logger.debug('Cleaning existing temporary run directory: %s', self.run_dir)
        common.pdsh(settings.getnodes('clients', 'osds', 'mons', 'rgws'), 'sudo rm -rf %s' % self.run_dir).communicate()
        if self.valgrind is not None:
            logger.debug('Adding valgrind to the command path.')
            self.cmd_path_full = common.setup_valgrind(self.valgrind, self.getclass(), self.run_dir)
        # Set the full command path
        self.cmd_path_full += self.cmd_path

        # Store the parameters of the test run
        config_file = os.path.join(self.archive_dir, 'benchmark_config.yaml')
        if not os.path.exists(self.archive_dir):
            os.makedirs(self.archive_dir)
        if not os.path.exists(config_file):
            config_dict = dict(cluster=self.config)
            with open(config_file, 'w') as fd:
                yaml.dump(config_dict, fd, default_flow_style=False)
Exemplo n.º 25
0
    def start_rgw(self):
        user = settings.cluster.get('user')
        rgwhosts = settings.cluster.get('rgws')

        if not rgwhosts:
            return

        for rgwhost, gateways in rgwhosts.iteritems():
            for rgwname, rgwsettings in gateways.iteritems():
                host = rgwsettings.get('host', rgwhost)
                port = rgwsettings.get('port', None)
                ssl_certificate = rgwsettings.get('ssl_certificate', None)

                # Build the auth_url
                auth_url = "http://" if ssl_certificate is None else "https://"
                auth_url += host
                auth_url += ":7480" if port is None else ":%s" % port
                auth_url += "/auth/v1.0"
                self.auth_urls.append(auth_url)

                # set the rgw_frontends
                rgw_frontends = None
                if ssl_certificate is not None:
                    rgw_frontends = "civetweb ssl_certificate=%s" % ssl_certificate
                if port is not None:
                    if rgw_frontends is None:
                        rgw_frontends = "civetweb"
                    rgw_frontends += " port=%s" % port

                cmd = '%s -c %s -n %s --log-file=%s/rgw.log' % (self.ceph_rgw_cmd, self.tmp_conf, rgwname, self.log_dir)
                if rgw_frontends is not None:
                    cmd += " --rgw-frontends='%s'" % rgw_frontends
                if self.rgw_valgrind:
                    cmd = "%s %s" % (common.setup_valgrind(self.rgw_valgrind, 'rgw.%s' % host, self.tmp_dir), cmd)
                else:
                    cmd = '%s %s' % (self.ceph_run_cmd, cmd)

                if user:
                    pdshhost = '%s@%s' % (user, rgwhost)
                common.pdsh(pdshhost, 'sudo sh -c "ulimit -n 16384 && ulimit -c unlimited && exec %s"' % cmd).communicate()
Exemplo n.º 26
0
    def run(self):
        try:
            key_fn = '%s/keyring' % self.osddir
            ceph_conf = self.cl_obj.tmp_conf
            phost = sshtarget(settings.cluster.get('user'), self.host)
            common.pdsh(
                phost,
                'sudo %s -c %s osd crush add osd.%d 1.0 host=%s rack=localrack root=default'
                % (self.cl_obj.ceph_cmd, ceph_conf, self.osdnum,
                   self.host)).communicate()
            cmd = 'ulimit -n 16384 && ulimit -c unlimited && exec %s -c %s -i %d --mkfs --mkkey --osd-uuid %s' % (
                self.cl_obj.ceph_osd_cmd, ceph_conf, self.osdnum, self.osduuid)
            common.pdsh(phost, 'sudo sh -c "%s"' % cmd).communicate()
            common.pdsh(
                phost,
                'sudo %s -c %s -i %s auth add osd.%d osd "allow *" mon "allow profile osd" mgr "allow"'
                % (self.cl_obj.ceph_cmd, ceph_conf, key_fn,
                   self.osdnum)).communicate()

            # Start the OSD
            pidfile = "%s/ceph-osd.%d.pid" % (self.cl_obj.pid_dir, self.osdnum)
            cmd = '%s -c %s -i %d --pid-file=%s' % (
                self.cl_obj.ceph_osd_cmd, ceph_conf, self.osdnum, pidfile)
            if self.cl_obj.osd_valgrind:
                cmd = common.setup_valgrind(self.cl_obj.osd_valgrind,
                                            'osd.%d' % self.osdnum,
                                            self.cl_obj.tmp_dir) + ' ' + cmd
            else:
                cmd = '%s %s' % (self.cl_obj.ceph_run_cmd, cmd)
            stderr_file = "%s/osd.%d.stderr" % (self.cl_obj.tmp_dir,
                                                self.osdnum)
            common.pdsh(
                phost,
                'sudo sh -c "ulimit -n 16384 && ulimit -c unlimited && exec %s 2> %s"'
                % (cmd, stderr_file)).communicate()
        except Exception as e:
            self.exc = e
        finally:
            self.response_time = time.time() - self.start_time
Exemplo n.º 27
0
    def run(self):
        try:
            key_fn = '%s/keyring'%self.osddir
            ceph_conf = self.cl_obj.tmp_conf
            phost = sshtarget(settings.cluster.get('user'), self.host)
            common.pdsh(phost, 'sudo %s -c %s osd crush add osd.%d 1.0 host=%s rack=localrack root=default' % (self.cl_obj.ceph_cmd, ceph_conf, self.osdnum, self.host)).communicate()
            cmd='ulimit -n 16384 && ulimit -c unlimited && exec %s -c %s -i %d --mkfs --mkkey --osd-uuid %s' % (self.cl_obj.ceph_osd_cmd, ceph_conf, self.osdnum, self.osduuid)
            common.pdsh(phost, 'sudo sh -c "%s"' % cmd).communicate()
            common.pdsh(phost, 'sudo %s -c %s -i %s auth add osd.%d osd "allow *" mon "allow profile osd" mgr "allow"' % (self.cl_obj.ceph_cmd, ceph_conf, key_fn, self.osdnum)).communicate()

            # Start the OSD
            pidfile="%s/ceph-osd.%d.pid" % (self.cl_obj.pid_dir, self.osdnum)
            cmd = '%s -c %s -i %d --pid-file=%s' % (self.cl_obj.ceph_osd_cmd, ceph_conf, self.osdnum, pidfile)
            if self.cl_obj.osd_valgrind:
                cmd = common.setup_valgrind(self.cl_obj.osd_valgrind, 'osd.%d' % self.osdnum, self.cl_obj.tmp_dir) + ' ' + cmd
            else:
                cmd = '%s %s' % (self.cl_obj.ceph_run_cmd, cmd)
            stderr_file = "%s/osd.%d.stderr" % (self.cl_obj.tmp_dir, self.osdnum)
            common.pdsh(phost, 'sudo sh -c "ulimit -n 16384 && ulimit -c unlimited && exec %s 2> %s"' % (cmd, stderr_file)).communicate()
        except Exception as e:
            self.exc = e
        finally:
            self.response_time = time.time() - self.start_time
Exemplo n.º 28
0
    def make_mons(self):
        # Build and distribute the keyring
        common.pdsh(
            settings.getnodes("head"),
            "ceph-authtool --create-keyring --gen-key --name=mon. %s --cap mon 'allow *'" % self.keyring_fn,
        ).communicate()
        common.pdsh(
            settings.getnodes("head"),
            "ceph-authtool --gen-key --name=client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds allow %s"
            % self.keyring_fn,
        ).communicate()
        common.rscp(settings.getnodes("head"), self.keyring_fn, "%s.tmp" % self.keyring_fn).communicate()
        common.pdcp(
            settings.getnodes("mons", "osds", "rgws", "mds"), "", "%s.tmp" % self.keyring_fn, self.keyring_fn
        ).communicate()

        # Build the monmap, retrieve it, and distribute it
        mons = settings.getnodes("mons").split(",")
        cmd = "monmaptool --create --clobber"
        monhosts = settings.cluster.get("mons")
        print monhosts
        for monhost, mons in monhosts.iteritems():
            for mon, addr in mons.iteritems():
                cmd = cmd + " --add %s %s" % (mon, addr)
        cmd = cmd + " --print %s" % self.monmap_fn
        common.pdsh(settings.getnodes("head"), cmd).communicate()
        common.rscp(settings.getnodes("head"), self.monmap_fn, "%s.tmp" % self.monmap_fn).communicate()
        common.pdcp(settings.getnodes("mons"), "", "%s.tmp" % self.monmap_fn, self.monmap_fn).communicate()

        # Build the ceph-mons
        user = settings.cluster.get("user")
        for monhost, mons in monhosts.iteritems():
            if user:
                monhost = "%s@%s" % (user, monhost)
            for mon, addr in mons.iteritems():
                common.pdsh(monhost, "sudo rm -rf %s/mon.%s" % (self.tmp_dir, mon)).communicate()
                common.pdsh(monhost, "mkdir -p %s/mon.%s" % (self.tmp_dir, mon)).communicate()
                common.pdsh(
                    monhost,
                    'sudo sh -c "ulimit -c unlimited && exec %s --mkfs -c %s -i %s --monmap=%s --keyring=%s"'
                    % (self.ceph_mon_cmd, self.tmp_conf, mon, self.monmap_fn, self.keyring_fn),
                ).communicate()
                common.pdsh(monhost, "cp %s %s/mon.%s/keyring" % (self.keyring_fn, self.tmp_dir, mon)).communicate()

        # Start the mons
        for monhost, mons in monhosts.iteritems():
            if user:
                monhost = "%s@%s" % (user, monhost)
            for mon, addr in mons.iteritems():
                pidfile = "%s/%s.pid" % (self.pid_dir, monhost)
                cmd = 'sudo sh -c "ulimit -c unlimited && exec %s -c %s -i %s --keyring=%s --pid-file=%s"' % (
                    self.ceph_mon_cmd,
                    self.tmp_conf,
                    mon,
                    self.keyring_fn,
                    pidfile,
                )
                if self.mon_valgrind:
                    cmd = "%s %s" % (common.setup_valgrind(self.mon_valgrind, "mon.%s" % monhost, self.tmp_dir), cmd)
                else:
                    cmd = "ceph-run %s" % cmd
                common.pdsh(monhost, "sudo %s" % cmd).communicate()