def run_rest_api_daemon(ctx, api_clients): if not hasattr(ctx, 'daemons'): ctx.daemons = CephState() remotes = ctx.cluster.only(teuthology.is_type('client')).remotes testdir = teuthology.get_testdir(ctx) coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) for rems, roles in remotes.iteritems(): for whole_id_ in roles: if whole_id_ in api_clients: id_ = whole_id_[len('clients'):] run_cmd = [ 'sudo', 'daemon-helper', 'kill', 'ceph-rest-api', '-n', 'client.rest{id}'.format(id=id_), ] cl_rest_id = 'client.rest{id}'.format(id=id_) ctx.daemons.add_daemon(rems, 'restapi', cl_rest_id, args=run_cmd, logger=log.getChild(cl_rest_id), stdin=run.PIPE, wait=False, ) try: yield finally: """ TO DO: destroy daemons started -- modify iter_daemons_of_role """ teuthology.stop_daemons_of_type(ctx, 'restapi')
def run_rest_api_daemon(ctx, api_clients): """ Wrapper starts the rest api daemons """ if not hasattr(ctx, 'daemons'): ctx.daemons = CephState() remotes = ctx.cluster.only(teuthology.is_type('client')).remotes for rems, roles in remotes.iteritems(): for whole_id_ in roles: if whole_id_ in api_clients: id_ = whole_id_[len('clients'):] run_cmd = [ 'sudo', 'daemon-helper', 'kill', 'ceph-rest-api', '-n', 'client.rest{id}'.format(id=id_), ] cl_rest_id = 'client.rest{id}'.format(id=id_) ctx.daemons.add_daemon(rems, 'restapi', cl_rest_id, args=run_cmd, logger=log.getChild(cl_rest_id), stdin=run.PIPE, wait=False, ) for i in range(1, 12): log.info('testing for ceph-rest-api try {0}'.format(i)) run_cmd = [ 'wget', '-O', '/dev/null', '-q', 'http://localhost:5000/api/v0.1/status' ] proc = rems.run( args=run_cmd, check_status=False ) if proc.exitstatus == 0: break time.sleep(5) if proc.exitstatus != 0: raise RuntimeError('Cannot contact ceph-rest-api') try: yield finally: """ TO DO: destroy daemons started -- modify iter_daemons_of_role """ teuthology.stop_daemons_of_type(ctx, 'restapi')
def run_daemon(ctx, config, type_): """ Run daemons for a role type. Handle the startup and termination of a a daemon. On startup -- set coverages, cpu_profile, valgrind values for all remotes, and a max_mds value for one mds. On cleanup -- Stop all existing daemons of this type. :param ctx: Context :param config: Configuration :paran type_: Role type """ log.info('Starting %s daemons...' % type_) testdir = teuthology.get_testdir(ctx) daemons = ctx.cluster.only(teuthology.is_type(type_)) # check whether any daemons if this type are configured if daemons is None: return coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) daemon_signal = 'kill' if config.get('coverage') or config.get('valgrind') is not None: daemon_signal = 'term' for remote, roles_for_host in daemons.remotes.iteritems(): for id_ in teuthology.roles_of_type(roles_for_host, type_): name = '%s.%s' % (type_, id_) run_cmd = [ 'sudo', 'adjust-ulimits', 'ceph-coverage', coverage_dir, 'daemon-helper', daemon_signal, ] run_cmd_tail = [ 'ceph-%s' % (type_), '-f', '-i', id_] if type_ in config.get('cpu_profile', []): profile_path = '/var/log/ceph/profiling-logger/%s.%s.prof' % (type_, id_) run_cmd.extend([ 'env', 'CPUPROFILE=%s' % profile_path ]) if config.get('valgrind') is not None: valgrind_args = None if type_ in config['valgrind']: valgrind_args = config['valgrind'][type_] if name in config['valgrind']: valgrind_args = config['valgrind'][name] run_cmd = teuthology.get_valgrind_args(testdir, name, run_cmd, valgrind_args) run_cmd.extend(run_cmd_tail) ctx.daemons.add_daemon(remote, type_, id_, args=run_cmd, logger=log.getChild(name), stdin=run.PIPE, wait=False, ) try: yield finally: teuthology.stop_daemons_of_type(ctx, type_)
def start_rgw(ctx, config): """ Start rgw on remote sites. """ log.info('Starting rgw...') testdir = teuthology.get_testdir(ctx) for client in config.iterkeys(): (remote, ) = ctx.cluster.only(client).remotes.iterkeys() client_config = config.get(client) if client_config is None: client_config = {} log.info("rgw %s config is %s", client, client_config) id_ = client.split('.', 1)[1] log.info('client {client} is id {id}'.format(client=client, id=id_)) cmd_prefix = [ 'sudo', 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'daemon-helper', 'term', ] rgw_cmd = ['radosgw'] if ctx.rgw.frontend == 'apache': rgw_cmd.extend([ '--rgw-socket-path', '{tdir}/apache/tmp.{client}/fastcgi_sock/rgw_sock'.format( tdir=testdir, client=client, ), ]) elif ctx.rgw.frontend == 'civetweb': host, port = ctx.rgw.role_endpoints[client] rgw_cmd.extend([ '--rgw-frontends', 'civetweb port={port}'.format(port=port), ]) rgw_cmd.extend([ '-n', client, '-k', '/etc/ceph/ceph.{client}.keyring'.format(client=client), '--log-file', '/var/log/ceph/rgw.{client}.log'.format(client=client), '--rgw_ops_log_socket_path', '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir, client=client), '--foreground', run.Raw('|'), 'sudo', 'tee', '/var/log/ceph/rgw.{client}.stdout'.format(tdir=testdir, client=client), run.Raw('2>&1'), ]) if client_config.get('valgrind'): cmd_prefix = teuthology.get_valgrind_args( testdir, client, cmd_prefix, client_config.get('valgrind')) run_cmd = list(cmd_prefix) run_cmd.extend(rgw_cmd) ctx.daemons.add_daemon( remote, 'rgw', client, args=run_cmd, logger=log.getChild(client), stdin=run.PIPE, wait=False, ) try: yield finally: teuthology.stop_daemons_of_type(ctx, 'rgw') for client in config.iterkeys(): ctx.cluster.only(client).run(args=[ 'rm', '-f', '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir, client=client), ], )
def start_rgw(ctx, config, on_client=None, except_client=None): """ Start rgw on remote sites. """ log.info('Starting rgw...') log.debug('client %r', on_client) clients_to_run = [on_client] if on_client is None: clients_to_run = config.keys() testdir = teuthology.get_testdir(ctx) for client in clients_to_run: if client == except_client: continue (remote, ) = ctx.cluster.only(client).remotes.iterkeys() cluster_name, daemon_type, client_id = teuthology.split_role(client) client_with_id = daemon_type + '.' + client_id client_with_cluster = cluster_name + '.' + client_with_id zone = rgw_utils.zone_for_client(ctx, client) log.debug('zone %s', zone) client_config = config.get(client) if client_config is None: client_config = {} log.info("rgw %s config is %s", client, client_config) id_ = client.split('.', 1)[1] log.info('client {client} is id {id}'.format(client=client, id=id_)) cmd_prefix = [ 'sudo', 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'daemon-helper', 'term', ] rgw_cmd = ['radosgw'] host, port = ctx.multisite.role_endpoints[client] rgw_cmd.extend([ '--rgw-frontends', 'civetweb port={port}'.format(port=port), ]) if zone is not None: rgw_cmd.extend(['--rgw-zone', zone]) rgw_cmd.extend([ '-n', client_with_id, '--cluster', cluster_name, '-k', '/etc/ceph/{client_with_cluster}.keyring'.format( client_with_cluster=client_with_cluster), '--log-file', '/var/log/ceph/rgw.{client_with_cluster}.log'.format( client_with_cluster=client_with_cluster), '--rgw_ops_log_socket_path', '{tdir}/rgw.opslog.{client_with_cluster}.sock'.format( tdir=testdir, client_with_cluster=client_with_cluster), '--foreground', run.Raw('|'), 'sudo', 'tee', '/var/log/ceph/rgw.{client_with_cluster}.stdout'.format( tdir=testdir, client_with_cluster=client_with_cluster), run.Raw('2>&1'), ]) run_cmd = list(cmd_prefix) run_cmd.extend(rgw_cmd) ctx.daemons.add_daemon( remote, 'rgw', client, args=run_cmd, logger=log.getChild(client), stdin=run.PIPE, wait=False, ) # XXX: add_daemon() doesn't let us wait until radosgw finishes startup # use a connection pool with retry/backoff to poll each gateway until it starts listening http = PoolManager(retries=Retry(connect=8, backoff_factor=1)) for client in clients_to_run: if client == except_client: continue host, port = ctx.multisite.role_endpoints[client] endpoint = 'http://{host}:{port}/'.format(host=host, port=port) log.info( 'Polling {client} until it starts accepting connections on {endpoint}' .format(client=client, endpoint=endpoint)) http.request('GET', endpoint) try: yield finally: teuthology.stop_daemons_of_type(ctx, 'rgw') for client in config.iterkeys(): ctx.cluster.only(client).run(args=[ 'rm', '-f', '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir, client=client), ], )
def start_rgw(ctx, config): """ Start rgw on remote sites. """ log.info('Starting rgw...') testdir = teuthology.get_testdir(ctx) for client in config.iterkeys(): (remote,) = ctx.cluster.only(client).remotes.iterkeys() client_config = config.get(client) if client_config is None: client_config = {} log.info("rgw %s config is %s", client, client_config) id_ = client.split('.', 1)[1] log.info('client {client} is id {id}'.format(client=client, id=id_)) cmd_prefix = [ 'sudo', 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'daemon-helper', 'term', ] rgw_cmd = ['radosgw'] if ctx.rgw.frontend == 'apache': rgw_cmd.extend([ '--rgw-socket-path', '{tdir}/apache/tmp.{client}/fastcgi_sock/rgw_sock'.format( tdir=testdir, client=client, ), ]) elif ctx.rgw.frontend == 'civetweb': host, port = ctx.rgw.role_endpoints[client] rgw_cmd.extend([ '--rgw-frontends', 'civetweb port={port}'.format(port=port), ]) rgw_cmd.extend([ '-n', client, '-k', '/etc/ceph/ceph.{client}.keyring'.format(client=client), '--log-file', '/var/log/ceph/rgw.{client}.log'.format(client=client), '--rgw_ops_log_socket_path', '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir, client=client), '--foreground', run.Raw('|'), 'sudo', 'tee', '/var/log/ceph/rgw.{client}.stdout'.format(tdir=testdir, client=client), run.Raw('2>&1'), ]) if client_config.get('valgrind'): cmd_prefix = teuthology.get_valgrind_args( testdir, client, cmd_prefix, client_config.get('valgrind') ) run_cmd = list(cmd_prefix) run_cmd.extend(rgw_cmd) ctx.daemons.add_daemon( remote, 'rgw', client, args=run_cmd, logger=log.getChild(client), stdin=run.PIPE, wait=False, ) try: yield finally: teuthology.stop_daemons_of_type(ctx, 'rgw') for client in config.iterkeys(): ctx.cluster.only(client).run( args=[ 'rm', '-f', '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir, client=client), ], )
def run_daemon(ctx, config, type_): """ Run daemons for a role type. Handle the startup and termination of a a daemon. On startup -- set coverages, cpu_profile, valgrind values for all remotes, and a max_mds value for one mds. On cleanup -- Stop all existing daemons of this type. :param ctx: Context :param config: Configuration :paran type_: Role type """ cluster_name = config['cluster'] log.info('Starting %s daemons in cluster %s...', type_, cluster_name) testdir = teuthology.get_testdir(ctx) daemons = ctx.cluster.only(teuthology.is_type(type_, cluster_name)) # check whether any daemons if this type are configured if daemons is None: return coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) daemon_signal = 'kill' if config.get('coverage') or config.get('valgrind') is not None: daemon_signal = 'term' for remote, roles_for_host in daemons.remotes.iteritems(): is_type_ = teuthology.is_type(type_, cluster_name) for role in roles_for_host: if not is_type_(role): continue _, _, id_ = teuthology.split_role(role) run_cmd = [ 'sudo', 'adjust-ulimits', 'ceph-coverage', coverage_dir, 'daemon-helper', daemon_signal, ] run_cmd_tail = [ 'ceph-%s' % (type_), '-f', '--cluster', cluster_name, '-i', id_ ] if type_ in config.get('cpu_profile', []): profile_path = '/var/log/ceph/profiling-logger/%s.prof' % ( role) run_cmd.extend(['env', 'CPUPROFILE=%s' % profile_path]) if config.get('valgrind') is not None: valgrind_args = None if type_ in config['valgrind']: valgrind_args = config['valgrind'][type_] if role in config['valgrind']: valgrind_args = config['valgrind'][role] run_cmd = teuthology.get_valgrind_args(testdir, role, run_cmd, valgrind_args) run_cmd.extend(run_cmd_tail) ctx.daemons.add_daemon( remote, type_, id_, cluster=cluster_name, args=run_cmd, logger=log.getChild(role), stdin=run.PIPE, wait=False, ) try: yield finally: teuthology.stop_daemons_of_type(ctx, type_, cluster_name)
def start_rgw(ctx, config, on_client = None, except_client = None): """ Start rgw on remote sites. """ log.info('Starting rgw...') log.debug('client %r', on_client) clients_to_run = [on_client] if on_client is None: clients_to_run = config.keys() testdir = teuthology.get_testdir(ctx) for client in clients_to_run: if client == except_client: continue (remote,) = ctx.cluster.only(client).remotes.iterkeys() zone = rgw_utils.zone_for_client(ctx, client) log.debug('zone %s', zone) client_config = config.get(client) if client_config is None: client_config = {} log.info("rgw %s config is %s", client, client_config) id_ = client.split('.', 1)[1] log.info('client {client} is id {id}'.format(client=client, id=id_)) cmd_prefix = [ 'sudo', 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'daemon-helper', 'term', ] rgw_cmd = ['radosgw'] if ctx.rgw.frontend == 'apache': if ctx.rgw.use_fastcgi or _use_uds_with_fcgi(remote): rgw_cmd.extend([ '--rgw-socket-path', '{tdir}/apache/tmp.{client}/fastcgi_sock/rgw_sock'.format( tdir=testdir, client=client, ), '--rgw-frontends', 'fastcgi', ]) else: # for mod_proxy_fcgi, using tcp rgw_cmd.extend([ '--rgw-socket-path', '', '--rgw-print-continue', 'false', '--rgw-frontends', 'fastcgi socket_port=9000 socket_host=0.0.0.0', ]) elif ctx.rgw.frontend == 'civetweb': host, port = ctx.rgw.role_endpoints[client] rgw_cmd.extend([ '--rgw-frontends', 'civetweb port={port}'.format(port=port), ]) if zone is not None: rgw_cmd.extend(['--rgw-zone', zone]) rgw_cmd.extend([ '-n', client, '-k', '/etc/ceph/ceph.{client}.keyring'.format(client=client), '--log-file', '/var/log/ceph/rgw.{client}.log'.format(client=client), '--rgw_ops_log_socket_path', '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir, client=client), '--foreground', run.Raw('|'), 'sudo', 'tee', '/var/log/ceph/rgw.{client}.stdout'.format(tdir=testdir, client=client), run.Raw('2>&1'), ]) if client_config.get('valgrind'): cmd_prefix = teuthology.get_valgrind_args( testdir, client, cmd_prefix, client_config.get('valgrind') ) run_cmd = list(cmd_prefix) run_cmd.extend(rgw_cmd) ctx.daemons.add_daemon( remote, 'rgw', client, args=run_cmd, logger=log.getChild(client), stdin=run.PIPE, wait=False, ) try: yield finally: teuthology.stop_daemons_of_type(ctx, 'rgw') for client in config.iterkeys(): ctx.cluster.only(client).run( args=[ 'rm', '-f', '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir, client=client), ], )
def run_daemon(ctx, config, type_): """ Run daemons for a role type. Handle the startup and termination of a a daemon. On startup -- set coverages, cpu_profile, valgrind values for all remotes, and a max_mds value for one mds. On cleanup -- Stop all existing daemons of this type. :param ctx: Context :param config: Configuration :paran type_: Role type """ log.info('Starting %s daemons...' % type_) testdir = teuthology.get_testdir(ctx) daemons = ctx.cluster.only(teuthology.is_type(type_)) # check whether any daemons if this type are configured if daemons is None: return coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) daemon_signal = 'kill' if config.get('coverage') or config.get('valgrind') is not None: daemon_signal = 'term' num_active = 0 for remote, roles_for_host in daemons.remotes.iteritems(): for id_ in teuthology.roles_of_type(roles_for_host, type_): name = '%s.%s' % (type_, id_) if not (id_.endswith('-s')) and (id_.find('-s-') == -1): num_active += 1 run_cmd = [ 'sudo', 'adjust-ulimits', 'ceph-coverage', coverage_dir, 'daemon-helper', daemon_signal, ] run_cmd_tail = ['ceph-%s' % (type_), '-f', '-i', id_] if type_ in config.get('cpu_profile', []): profile_path = '/var/log/ceph/profiling-logger/%s.%s.prof' % ( type_, id_) run_cmd.extend(['env', 'CPUPROFILE=%s' % profile_path]) if config.get('valgrind') is not None: valgrind_args = None if type_ in config['valgrind']: valgrind_args = config['valgrind'][type_] if name in config['valgrind']: valgrind_args = config['valgrind'][name] run_cmd = teuthology.get_valgrind_args(testdir, name, run_cmd, valgrind_args) run_cmd.extend(run_cmd_tail) ctx.daemons.add_daemon( remote, type_, id_, args=run_cmd, logger=log.getChild(name), stdin=run.PIPE, wait=False, ) if type_ == 'mds': firstmon = teuthology.get_first_mon(ctx, config) (mon0_remote, ) = ctx.cluster.only(firstmon).remotes.keys() mon0_remote.run(args=[ 'adjust-ulimits', 'ceph-coverage', coverage_dir, 'ceph', 'mds', 'set_max_mds', str(num_active) ]) try: yield finally: teuthology.stop_daemons_of_type(ctx, type_)
def run_daemon(ctx, config, type_): """ Run daemons for a role type. Handle the startup and termination of a a daemon. On startup -- set coverages, cpu_profile, valgrind values for all remotes, and a max_mds value for one mds. On cleanup -- Stop all existing daemons of this type. :param ctx: Context :param config: Configuration :paran type_: Role type """ cluster_name = config["cluster"] log.info("Starting %s daemons in cluster %s...", type_, cluster_name) testdir = teuthology.get_testdir(ctx) daemons = ctx.cluster.only(teuthology.is_type(type_, cluster_name)) # check whether any daemons if this type are configured if daemons is None: return coverage_dir = "{tdir}/archive/coverage".format(tdir=testdir) daemon_signal = "kill" if config.get("coverage") or config.get("valgrind") is not None: daemon_signal = "term" for remote, roles_for_host in daemons.remotes.iteritems(): is_type_ = teuthology.is_type(type_, cluster_name) for role in roles_for_host: if not is_type_(role): continue _, _, id_ = teuthology.split_role(role) run_cmd = ["sudo", "adjust-ulimits", "ceph-coverage", coverage_dir, "daemon-helper", daemon_signal] run_cmd_tail = ["ceph-%s" % (type_), "-f", "--cluster", cluster_name, "-i", id_] if type_ in config.get("cpu_profile", []): profile_path = "/var/log/ceph/profiling-logger/%s.prof" % (role) run_cmd.extend(["env", "CPUPROFILE=%s" % profile_path]) if config.get("valgrind") is not None: valgrind_args = None if type_ in config["valgrind"]: valgrind_args = config["valgrind"][type_] if role in config["valgrind"]: valgrind_args = config["valgrind"][role] run_cmd = teuthology.get_valgrind_args(testdir, role, run_cmd, valgrind_args) run_cmd.extend(run_cmd_tail) ctx.daemons.add_daemon( remote, type_, id_, cluster=cluster_name, args=run_cmd, logger=log.getChild(role), stdin=run.PIPE, wait=False, ) try: yield finally: teuthology.stop_daemons_of_type(ctx, type_, cluster_name)
def start_rgw(ctx, config): log.info('Starting rgw...') testdir = teuthology.get_testdir(ctx) for client in config.iterkeys(): (remote,) = ctx.cluster.only(client).remotes.iterkeys() client_config = config.get(client) if client_config is None: client_config = {} log.info("rgw %s config is %s", client, client_config) id_ = client.split('.', 1)[1] log.info('client {client} is id {id}'.format(client=client, id=id_)) run_cmd=[ 'sudo', '{tdir}/adjust-ulimits'.format(tdir=testdir), 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), '{tdir}/daemon-helper'.format(tdir=testdir), 'term', ] run_cmd_tail=[ 'radosgw', '-n', client, '-k', '/etc/ceph/ceph.{client}.keyring'.format(client=client), '--log-file', '/var/log/ceph/rgw.{client}.log'.format(client=client), '--rgw_ops_log_socket_path', '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir, client=client), '{tdir}/apache/apache.{client}.conf'.format(tdir=testdir, client=client), '--foreground', run.Raw('|'), 'sudo', 'tee', '/var/log/ceph/rgw.{client}.stdout'.format(tdir=testdir, client=client), run.Raw('2>&1'), ] run_cmd.extend( teuthology.get_valgrind_args( testdir, client, client_config.get('valgrind') ) ) run_cmd.extend(run_cmd_tail) ctx.daemons.add_daemon( remote, 'rgw', client, args=run_cmd, logger=log.getChild(client), stdin=run.PIPE, wait=False, ) try: yield finally: teuthology.stop_daemons_of_type(ctx, 'rgw') for client in config.iterkeys(): ctx.cluster.only(client).run( args=[ 'rm', '-f', '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir, client=client), ], )
def run_daemon(ctx, config, type_): log.info('Starting %s daemons...' % type_) testdir = teuthology.get_testdir(ctx) daemons = ctx.cluster.only(teuthology.is_type(type_)) coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) daemon_signal = 'kill' if config.get('coverage') or config.get('valgrind') is not None: daemon_signal = 'term' num_active = 0 for remote, roles_for_host in daemons.remotes.iteritems(): for id_ in teuthology.roles_of_type(roles_for_host, type_): name = '%s.%s' % (type_, id_) if not (id_.endswith('-s')) and (id_.find('-s-') == -1): num_active += 1 run_cmd = [ 'sudo', 'adjust-ulimits', 'ceph-coverage', coverage_dir, 'daemon-helper', daemon_signal, ] run_cmd_tail = [ 'ceph-%s' % (type_), '-f', '-i', id_] if type_ in config.get('cpu_profile', []): profile_path = '/var/log/ceph/profiling-logger/%s.%s.prof' % (type_, id_) run_cmd.extend([ 'env', 'CPUPROFILE=%s' % profile_path ]) if config.get('valgrind') is not None: valgrind_args = None if type_ in config['valgrind']: valgrind_args = config['valgrind'][type_] if name in config['valgrind']: valgrind_args = config['valgrind'][name] run_cmd = teuthology.get_valgrind_args(testdir, name, run_cmd, valgrind_args) run_cmd.extend(run_cmd_tail) ctx.daemons.add_daemon(remote, type_, id_, args=run_cmd, logger=log.getChild(name), stdin=run.PIPE, wait=False, ) if type_ == 'mds': firstmon = teuthology.get_first_mon(ctx, config) (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() mon0_remote.run(args=[ 'adjust-ulimits', 'ceph-coverage', coverage_dir, 'ceph', 'mds', 'set_max_mds', str(num_active)]) try: yield finally: teuthology.stop_daemons_of_type(ctx, type_)