def __init__(self, ctx, admin_remote=None): self._ctx = ctx self.mds_ids = list(misc.all_roles_of_type(ctx.cluster, 'mds')) if len(self.mds_ids) == 0: raise RuntimeError("This task requires at least one MDS") first_mon = misc.get_first_mon(ctx, None) if admin_remote is None: (self.admin_remote, ) = ctx.cluster.only(first_mon).remotes.iterkeys() else: self.admin_remote = admin_remote self.mon_manager = ceph_manager.CephManager( self.admin_remote, ctx=ctx, logger=log.getChild('ceph_manager')) if hasattr(self._ctx, "daemons"): # Presence of 'daemons' attribute implies ceph task rather than ceph_deploy task self.mds_daemons = dict([ (mds_id, self._ctx.daemons.get_daemon('mds', mds_id)) for mds_id in self.mds_ids ]) client_list = list(misc.all_roles_of_type(self._ctx.cluster, 'client')) self.client_id = client_list[0] self.client_remote = list( misc.get_clients(ctx=ctx, roles=["client.{0}".format(self.client_id) ]))[0][1]
def __init__(self, ctx, config): self._ctx = ctx self._config = config mds_list = list(misc.all_roles_of_type(ctx.cluster, 'mds')) if len(mds_list) != 1: # Require exactly one MDS, the code path for creation failure when # a standby is available is different raise RuntimeError("This task requires exactly one MDS") self.mds_id = mds_list[0] (mds_remote, ) = ctx.cluster.only( 'mds.{_id}'.format(_id=self.mds_id)).remotes.iterkeys() manager = ceph_manager.CephManager( mds_remote, ctx=ctx, logger=log.getChild('ceph_manager'), ) self.mds_manager = manager client_list = list(misc.all_roles_of_type(self._ctx.cluster, 'client')) self.client_id = client_list[0] self.client_remote = list( misc.get_clients(ctx=ctx, roles=["client.{0}".format(self.client_id) ]))[0][1] self.test_files = ['a', 'b', 'c']
def __init__(self, ctx, fscid=None, create=None): super(Filesystem, self).__init__(ctx) self.id = None self.name = None self.metadata_pool_name = None self.data_pools = None client_list = list(misc.all_roles_of_type(self._ctx.cluster, 'client')) self.client_id = client_list[0] self.client_remote = list( misc.get_clients(ctx=ctx, roles=["client.{0}".format(self.client_id) ]))[0][1] if create is not None: if fscid is not None: raise RuntimeError("cannot specify fscid when creating fs") if create is True: self.name = 'cephfs' else: self.name = create if not self.legacy_configured(): self.create() elif fscid is not None: self.id = fscid self.getinfo(refresh=True)
def __init__(self, ctx, fscid=None, name=None, create=False, ec_profile=None): super(Filesystem, self).__init__(ctx) self.name = name self.ec_profile = ec_profile self.id = None self.metadata_pool_name = None self.metadata_overlay = False self.data_pool_name = None self.data_pools = None client_list = list(misc.all_roles_of_type(self._ctx.cluster, 'client')) self.client_id = client_list[0] self.client_remote = list(misc.get_clients(ctx=ctx, roles=["client.{0}".format(self.client_id)]))[0][1] if name is not None: if fscid is not None: raise RuntimeError("cannot specify fscid when creating fs") if create and not self.legacy_configured(): self.create() else: if fscid is not None: self.id = fscid self.getinfo(refresh = True) # Stash a reference to the first created filesystem on ctx, so # that if someone drops to the interactive shell they can easily # poke our methods. if not hasattr(self._ctx, "filesystem"): self._ctx.filesystem = self
def task(ctx, config): """ Mount/unmount a ``kernel`` client. The config is optional and defaults to mounting on all clients. If a config is given, it is expected to be a list of clients to do this operation on. This lets you e.g. set up one client with ``ceph-fuse`` and another with ``kclient``. Example that mounts all clients:: tasks: - ceph: - kclient: - interactive: Example that uses both ``kclient` and ``ceph-fuse``:: tasks: - ceph: - ceph-fuse: [client.0] - kclient: [client.1] - interactive: :param ctx: Context :param config: Configuration """ log.info('Mounting kernel clients...') assert config is None or isinstance(config, list), \ "task kclient got invalid config" if config is None: config = ['client.{id}'.format(id=id_) for id_ in misc.all_roles_of_type(ctx.cluster, 'client')] clients = list(misc.get_clients(ctx=ctx, roles=config)) test_dir = misc.get_testdir(ctx) # Assemble mon addresses remotes_and_roles = ctx.cluster.remotes.items() roles = [roles for (remote_, roles) in remotes_and_roles] ips = [remote_.ssh.get_transport().getpeername()[0] for (remote_, _) in remotes_and_roles] mons = misc.get_mons(roles, ips).values() mounts = {} for id_, remote in clients: kernel_mount = KernelMount(mons, test_dir, id_, remote) mounts[id_] = kernel_mount kernel_mount.mount() ctx.mounts = mounts try: yield mounts finally: log.info('Unmounting kernel clients...') for mount in mounts.values(): mount.umount()
def task(ctx, config): """ Enable most ceph console logging Example that enables logging on all clients:: tasks: - ceph: - kclient: - kcon_most - interactive: Example that enables logging only on the client using kclient:: tasks: - ceph: - kclient: [client.0] - kcon_most [client.0] - interactive: """ log.info('Enable additional kernel logging...') assert config is None or isinstance(config, list), \ "task kcon_most got invalid config" if config is None: config = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] clients = list(teuthology.get_clients(ctx=ctx, roles=config)) testdir = teuthology.get_testdir(ctx) for id_, remote in clients: # TODO: Don't have to run this more than once per node (remote) log.info('Enable logging on client.{id} at {remote} ...'.format( id=id_, remote=remote)) remote.run( args=[ 'sudo', 'kcon_most', 'on' ], ) try: yield finally: log.info('Disable extra kernel logging on clients...') for id_, remote in clients: log.debug('Disable extra kernel logging on client.{id}...'.format(id=id_)) remote.run( args=[ 'sudo', 'kcon_most', 'off' ], )
def task(ctx, config): """ Create a mount dir 'client' that is just the local disk: Example that "mounts" all clients: tasks: - localdir: - interactive: Example for a specific client: tasks: - localdir: [client.2] - interactive: :param ctx: Context :param config: Configuration """ log.info('Creating local mnt dirs...') testdir = teuthology.get_testdir(ctx) if config is None: config = list('client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')) clients = list(teuthology.get_clients(ctx=ctx, roles=config)) for id_, remote in clients: mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) log.info('Creating dir {remote} {mnt}...'.format( remote=remote, mnt=mnt)) remote.run( args=[ 'mkdir', '--', mnt, ], ) try: yield finally: log.info('Removing local mnt dirs...') for id_, remote in clients: mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) remote.run( args=[ 'rm', '-rf', '--', mnt, ], )
def test_get_clients_simple(): ctx = argparse.Namespace() remote = FakeRemote() ctx.cluster = cluster.Cluster(remotes=[(remote, ['client.0', 'client.1'])], ) g = misc.get_clients(ctx=ctx, roles=['client.1']) got = next(g) assert len(got) == 2 assert got[0] == ('1') assert got[1] is remote with pytest.raises(StopIteration): next(g)
def __init__(self, ctx, name=None): super(Filesystem, self).__init__(ctx) if name is None: name = "cephfs" self.name = name self.metadata_pool_name = "{0}_metadata".format(name) self.data_pool_name = "{0}_data".format(name) client_list = list(misc.all_roles_of_type(self._ctx.cluster, 'client')) self.client_id = client_list[0] self.client_remote = list(misc.get_clients(ctx=ctx, roles=["client.{0}".format(self.client_id)]))[0][1]
def __init__(self, ctx): self._ctx = ctx self.mds_ids = list(misc.all_roles_of_type(ctx.cluster, 'mds')) if len(self.mds_ids) == 0: raise RuntimeError("This task requires at least one MDS") first_mon = misc.get_first_mon(ctx, None) (self.mon_remote,) = ctx.cluster.only(first_mon).remotes.iterkeys() self.mon_manager = ceph_manager.CephManager(self.mon_remote, ctx=ctx, logger=log.getChild('ceph_manager')) self.mds_daemons = dict([(mds_id, self._ctx.daemons.get_daemon('mds', mds_id)) for mds_id in self.mds_ids]) client_list = list(misc.all_roles_of_type(self._ctx.cluster, 'client')) self.client_id = client_list[0] self.client_remote = list(misc.get_clients(ctx=ctx, roles=["client.{0}".format(self.client_id)]))[0][1]
def task(ctx, config): """ Enable most ceph console logging Example that enables logging on all clients:: tasks: - ceph: - kclient: - kcon_most - interactive: Example that enables logging only on the client using kclient:: tasks: - ceph: - kclient: [client.0] - kcon_most [client.0] - interactive: """ log.info('Enable additional kernel logging...') assert config is None or isinstance(config, list), \ "task kcon_most got invalid config" if config is None: config = [ 'client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client') ] clients = list(teuthology.get_clients(ctx=ctx, roles=config)) for id_, remote in clients: # TODO: Don't have to run this more than once per node (remote) log.info('Enable logging on client.{id} at {remote} ...'.format( id=id_, remote=remote)) remote.run(args=['sudo', 'kcon_most', 'on'], ) try: yield finally: log.info('Disable extra kernel logging on clients...') for id_, remote in clients: log.debug('Disable extra kernel logging on client.{id}...'.format( id=id_)) remote.run(args=['sudo', 'kcon_most', 'off'], )
def __init__(self, ctx, admin_remote=None): self._ctx = ctx self.mds_ids = list(misc.all_roles_of_type(ctx.cluster, 'mds')) if len(self.mds_ids) == 0: raise RuntimeError("This task requires at least one MDS") first_mon = misc.get_first_mon(ctx, None) if admin_remote is None: (self.admin_remote,) = ctx.cluster.only(first_mon).remotes.iterkeys() else: self.admin_remote = admin_remote self.mon_manager = ceph_manager.CephManager(self.admin_remote, ctx=ctx, logger=log.getChild('ceph_manager')) if hasattr(self._ctx, "daemons"): # Presence of 'daemons' attribute implies ceph task rather than ceph_deploy task self.mds_daemons = dict([(mds_id, self._ctx.daemons.get_daemon('mds', mds_id)) for mds_id in self.mds_ids]) client_list = list(misc.all_roles_of_type(self._ctx.cluster, 'client')) self.client_id = client_list[0] self.client_remote = list(misc.get_clients(ctx=ctx, roles=["client.{0}".format(self.client_id)]))[0][1]
def __init__(self, ctx): self._ctx = ctx self.mds_ids = list(misc.all_roles_of_type(ctx.cluster, 'mds')) if len(self.mds_ids) == 0: raise RuntimeError("This task requires at least one MDS") first_mon = misc.get_first_mon(ctx, None) (self.mon_remote, ) = ctx.cluster.only(first_mon).remotes.iterkeys() self.mon_manager = ceph_manager.CephManager( self.mon_remote, ctx=ctx, logger=log.getChild('ceph_manager')) self.mds_daemons = dict([(mds_id, self._ctx.daemons.get_daemon('mds', mds_id)) for mds_id in self.mds_ids]) client_list = list(misc.all_roles_of_type(self._ctx.cluster, 'client')) self.client_id = client_list[0] self.client_remote = list( misc.get_clients(ctx=ctx, roles=["client.{0}".format(self.client_id) ]))[0][1]
def __init__(self, ctx, config): self._ctx = ctx self._config = config mds_list = list(misc.all_roles_of_type(ctx.cluster, 'mds')) if len(mds_list) != 1: # Require exactly one MDS, the code path for creation failure when # a standby is available is different raise RuntimeError("This task requires exactly one MDS") self.mds_id = mds_list[0] (mds_remote,) = ctx.cluster.only('mds.{_id}'.format(_id=self.mds_id)).remotes.iterkeys() manager = ceph_manager.CephManager( mds_remote, ctx=ctx, logger=log.getChild('ceph_manager'), ) self.mds_manager = manager client_list = list(misc.all_roles_of_type(self._ctx.cluster, 'client')) self.client_id = client_list[0] self.client_remote = list(misc.get_clients(ctx=ctx, roles=["client.{0}".format(self.client_id)]))[0][1] self.test_files = ['a', 'b', 'c']
def task(ctx, config): """ Export/Unexport a ``nfs server`` client. The config is optional and defaults to exporting on all clients. If a config is given, it is expected to be a list or dict of clients to do this operation on. You must have specified ``ceph-fuse`` or ``kclient`` on all clients specified for knfsd. Example that exports all clients:: tasks: - ceph: - kclient: - knfsd: - interactive: Example that uses both ``kclient` and ``ceph-fuse``:: tasks: - ceph: - ceph-fuse: [client.0] - kclient: [client.1] - knfsd: [client.0, client.1] - interactive: Example that specifies export options:: tasks: - ceph: - kclient: [client.0, client.1] - knfsd: client.0: options: [rw,root_squash] client.1: - interactive: Note that when options aren't specified, rw,no_root_squash is the default. When you specify options, the defaults are as specified by exports(5). So if empty options are specified, i.e. options: [] these are the defaults: ro,sync,wdelay,hide,nocrossmnt,secure,root_squash,no_all_squash, no_subtree_check,secure_locks,acl,anonuid=65534,anongid=65534 :param ctx: Context :param config: Configuration """ log.info('Exporting nfs server...') if config is None: config = dict(('client.{id}'.format(id=id_), None) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')) elif isinstance(config, list): config = dict((name, None) for name in config) clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys())) for id_, remote in clients: mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_)) client_config = config.get("client.%s" % id_) if client_config is None: client_config = {} log.debug("Client client.%s config is %s" % (id_, client_config)) if client_config.get('options') is not None: opts = ','.join(client_config.get('options')) else: opts = 'rw,no_root_squash' # Undocumented option to export to any client in case # testing in interactive mode from other unspecified clients. wildcard = False if client_config.get('wildcard') is not None: wildcard = True log.info('Exporting knfsd client.{id} at {remote} *:{mnt} ({opt})...'.format( id=id_, remote=remote, mnt=mnt, opt=opts)) """ Should the user want to run with root_squash enabled, there is no way to write anything to the initial ceph root dir which is set to rwxr-xr-x root root. This could possibly break test cases that make assumptions about the initial state of the root dir. """ remote.run( args=[ 'sudo', 'chmod', "777", '{MNT}'.format(MNT=mnt), ], ) """ Start NFS kernel server """ remote.run( args=get_nfsd_args(remote, 'restart') ) args=[ 'sudo', "exportfs", '-o', 'fsid=123{id},{opt}'.format(id=id_,opt=opts), ] if wildcard: args += ['*:{MNT}'.format(MNT=mnt)] else: """ DEFAULT Prevent bogus clients from old runs from access our export. Specify all specify node addresses for this run. """ ips = [host for (host, _) in (remote.ssh.get_transport().getpeername() for (remote, roles) in ctx.cluster.remotes.items())] for ip in ips: args += [ '{ip}:{MNT}'.format(ip=ip, MNT=mnt) ] log.info('remote run {args}'.format(args=args)) remote.run( args=args ) try: yield finally: log.info('Unexporting nfs server...') for id_, remote in clients: log.debug('Unexporting client client.{id}...'.format(id=id_)) mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_)) try: log.debug('Checking active files on mount {mnt}'.format(mnt=mnt)) remote.run( args=[ 'sudo', 'lsof', '-V', '+D', '{mnt}'.format(mnt=mnt), ], check_status=False ) finally: log.debug('Stopping NFS server on client.{id}...'.format(id=id_)) remote.run( args=get_nfsd_args(remote, 'stop') ) log.debug('Syncing client client.{id}'.format(id=id_)) remote.run( args=[ 'sync' ] )
def task(ctx, config): """ Export/Unexport a ``nfs server`` client. The config is optional and defaults to exporting on all clients. If a config is given, it is expected to be a list or dict of clients to do this operation on. You must have specified ``ceph-fuse`` or ``kclient`` on all clients specified for knfsd. Example that exports all clients:: tasks: - ceph: - kclient: - knfsd: - interactive: Example that uses both ``kclient` and ``ceph-fuse``:: tasks: - ceph: - ceph-fuse: [client.0] - kclient: [client.1] - knfsd: [client.0, client.1] - interactive: Example that specifies export options:: tasks: - ceph: - kclient: [client.0, client.1] - knfsd: client.0: options: [rw,root_squash] client.1: - interactive: Note that when options aren't specified, rw,no_root_squash is the default. When you specify options, the defaults are as specified by exports(5). So if empty options are specified, i.e. options: [] these are the defaults: ro,sync,wdelay,hide,nocrossmnt,secure,root_squash,no_all_squash, no_subtree_check,secure_locks,acl,anonuid=65534,anongid=65534 """ log.info("Exporting nfs server...") if config is None: config = dict( ("client.{id}".format(id=id_), None) for id_ in teuthology.all_roles_of_type(ctx.cluster, "client") ) elif isinstance(config, list): config = dict((name, None) for name in config) clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys())) for id_, remote in clients: mnt = os.path.join("/tmp/cephtest", "mnt.{id}".format(id=id_)) client_config = config.get("client.%s" % id_) if client_config is None: client_config = {} log.debug("Client client.%s config is %s" % (id_, client_config)) if client_config.get("options") is not None: opts = ",".join(client_config.get("options")) else: opts = "rw,no_root_squash" # Undocumented option to export to any client in case # testing in interactive mode from other unspecified clients. wildcard = False if client_config.get("wildcard") is not None: wildcard = True log.info( "Exporting knfsd client.{id} at {remote} *:{mnt} ({opt})...".format( id=id_, remote=remote, mnt=mnt, opt=opts ) ) """ Should the user want to run with root_squash enabled, there is no way to write anything to the initial ceph root dir which is set to rwxr-xr-x root root. This could possibly break test cases that make assumptions about the initial state of the root dir. """ remote.run(args=["sudo", "chmod", "777", "{MNT}".format(MNT=mnt)]) args = ["sudo", "exportfs", "-o", "fsid=123{id},{opt}".format(id=id_, opt=opts)] if wildcard: args += ["*:{MNT}".format(MNT=mnt)] else: """ DEFAULT Prevent bogus clients from old runs from access our export. Specify all specify node addresses for this run. """ ips = [ host for (host, port) in ( remote.ssh.get_transport().getpeername() for (remote, roles) in ctx.cluster.remotes.items() ) ] for ip in ips: args += ["{ip}:{MNT}".format(ip=ip, MNT=mnt)] log.info("remote run {args}".format(args=args)) remote.run(args=args) try: yield finally: log.info("Unexporting nfs server...") for id_, remote in clients: log.debug("Unexporting client client.{id}...".format(id=id_)) mnt = os.path.join("/tmp/cephtest", "mnt.{id}".format(id=id_)) remote.run(args=["sudo", "exportfs", "-au"])
def task(ctx, config): """ Given a Ceph cluster has already been set up, exercise the migration of the CephFS journal from an older format to the latest format. On successful completion the filesystem will be running with a journal in the new format. Optionally specify which client to use like this: - mds-journal_migration: client: client.0 """ if not hasattr(ctx, 'ceph'): raise RuntimeError("This task must be nested in 'ceph' task") if not hasattr(ctx, 'mounts'): raise RuntimeError("This task must be nested inside 'kclient' or 'ceph_fuse' task") # Determine which client we will use if config and 'client' in config: # Use client specified in config client_role = config['client'] client_list = list(misc.get_clients(ctx, [client_role])) try: client_id = client_list[0][0] except IndexError: raise RuntimeError("Client role '{0}' not found".format(client_role)) else: # Pick one arbitrary client to use client_list = list(misc.all_roles_of_type(ctx.cluster, 'client')) try: client_id = client_list[0] except IndexError: raise RuntimeError("This task requires at least one client") fs = Filesystem(ctx) ctx.fs = fs old_journal_version = JOURNAL_FORMAT_LEGACY new_journal_version = JOURNAL_FORMAT_RESILIENT fs.set_ceph_conf('mds', 'mds journal format', old_journal_version) # Create a filesystem using the older journal format. for mount in ctx.mounts.values(): mount.umount_wait() fs.mds_stop() fs.reset() fs.mds_restart() # Do some client work so that the log is populated with something. mount = ctx.mounts[client_id] with mount.mounted(): mount.create_files() mount.check_files() # sanity, this should always pass # Run a more substantial workunit so that the length of the log to be # coverted is going span at least a few segments workunit(ctx, { 'clients': { "client.{0}".format(client_id): ["suites/fsstress.sh"], }, "timeout": "3h" }) # Modify the ceph.conf to ask the MDS to use the new journal format. fs.set_ceph_conf('mds', 'mds journal format', new_journal_version) # Restart the MDS. fs.mds_fail_restart() fs.wait_for_daemons() # This ensures that all daemons come up into a valid state fs.wait_for_daemons() # Check that files created in the initial client workload are still visible # in a client mount. with mount.mounted(): mount.check_files() # Verify that the journal really has been rewritten. journal_version = fs.get_journal_version() if journal_version != new_journal_version: raise RuntimeError("Journal was not upgraded, version should be {0} but is {1}".format( new_journal_version, journal_version() )) # Verify that cephfs-journal-tool can now read the rewritten journal proc = mount.client_remote.run( args=["cephfs-journal-tool", "journal", "inspect"], stdout=StringIO()) if not proc.stdout.getvalue().strip().endswith(": OK"): raise RuntimeError("Unexpected journal-tool result: '{0}'".format( proc.stdout.getvalue() )) mount.client_remote.run( args=["sudo", "cephfs-journal-tool", "event", "get", "json", "--path", "/tmp/journal.json"]) proc = mount.client_remote.run( args=[ "python", "-c", "import json; print len(json.load(open('/tmp/journal.json')))" ], stdout=StringIO()) event_count = int(proc.stdout.getvalue().strip()) if event_count < 1000: # Approximate value of "lots", expected from having run fsstress raise RuntimeError("Unexpectedly few journal events: {0}".format(event_count)) # Leave all MDSs and clients running for any child tasks for mount in ctx.mounts.values(): mount.mount() mount.wait_until_mounted() yield
def task(ctx, config): """ Mount/unmount a ``kernel`` client. The config is optional and defaults to mounting on all clients. If a config is given, it is expected to be a list of clients to do this operation on. This lets you e.g. set up one client with ``ceph-fuse`` and another with ``kclient``. Example that mounts all clients:: tasks: - ceph: - kclient: - interactive: Example that uses both ``kclient` and ``ceph-fuse``:: tasks: - ceph: - ceph-fuse: [client.0] - kclient: [client.1] - interactive: Pass a dictionary instead of lists to specify per-client config: tasks: -kclient: client.0: debug: true :param ctx: Context :param config: Configuration """ log.info('Mounting kernel clients...') assert config is None or isinstance(config, list) or isinstance(config, dict), \ "task kclient got invalid config" if config is None: config = ['client.{id}'.format(id=id_) for id_ in misc.all_roles_of_type(ctx.cluster, 'client')] if isinstance(config, list): client_roles = config config = dict([r, dict()] for r in client_roles) elif isinstance(config, dict): client_roles = config.keys() else: raise ValueError("Invalid config object: {0} ({1})".format(config, config.__class__)) # config has been converted to a dict by this point overrides = ctx.config.get('overrides', {}) deep_merge(config, overrides.get('kclient', {})) clients = list(misc.get_clients(ctx=ctx, roles=client_roles)) test_dir = misc.get_testdir(ctx) # Assemble mon addresses remotes_and_roles = ctx.cluster.remotes.items() roles = [roles for (remote_, roles) in remotes_and_roles] ips = [remote_.ssh.get_transport().getpeername()[0] for (remote_, _) in remotes_and_roles] mons = misc.get_mons(roles, ips).values() mounts = {} for id_, remote in clients: kernel_mount = KernelMount( mons, test_dir, id_, remote, ctx.teuthology_config.get('ipmi_user', None), ctx.teuthology_config.get('ipmi_password', None), ctx.teuthology_config.get('ipmi_domain', None) ) mounts[id_] = kernel_mount client_config = config["client.{0}".format(id_)] if client_config.get('debug', False): remote.run(args=["sudo", "bash", "-c", "echo 'module ceph +p' > /sys/kernel/debug/dynamic_debug/control"]) remote.run(args=["sudo", "bash", "-c", "echo 'module libceph +p' > /sys/kernel/debug/dynamic_debug/control"]) kernel_mount.mount() ctx.mounts = mounts try: yield mounts finally: log.info('Unmounting kernel clients...') for mount in mounts.values(): mount.umount()
def task(ctx, config): """ Mount/unmount a ``kernel`` client. The config is optional and defaults to mounting on all clients. If a config is given, it is expected to be a list of clients to do this operation on. This lets you e.g. set up one client with ``ceph-fuse`` and another with ``kclient``. Example that mounts all clients:: tasks: - ceph: - kclient: - interactive: Example that uses both ``kclient` and ``cfuse``:: tasks: - ceph: - cfuse: [client.0] - kclient: [client.1] - interactive: """ log.info('Mounting kernel clients...') assert config is None or isinstance(config, list), \ "task kclient got invalid config" if config is None: config = [ 'client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client') ] clients = list(teuthology.get_clients(ctx=ctx, roles=config)) for id_, remote in clients: mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) log.info('Mounting kclient client.{id} at {remote} {mnt}...'.format( id=id_, remote=remote, mnt=mnt)) # figure mon ips remotes_and_roles = ctx.cluster.remotes.items() roles = [roles for (remote_, roles) in remotes_and_roles] ips = [ host for (host, port) in (remote_.ssh.get_transport().getpeername() for (remote_, roles) in remotes_and_roles) ] mons = teuthology.get_mons(roles, ips).values() secret = '/tmp/cephtest/data/client.{id}.secret'.format(id=id_) teuthology.write_secret_file(remote, 'client.{id}'.format(id=id_), secret) remote.run(args=[ 'mkdir', '--', mnt, ], ) remote.run(args=[ 'sudo', '/tmp/cephtest/enable-coredump', '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', '/tmp/cephtest/archive/coverage', '/tmp/cephtest/binary/usr/local/sbin/mount.ceph', '{mons}:/'.format(mons=','.join(mons)), mnt, '-v', '-o', 'name={id},secretfile={secret}'.format(id=id_, secret=secret), ], ) try: yield finally: log.info('Unmounting kernel clients...') for id_, remote in clients: log.debug('Unmounting client client.{id}...'.format(id=id_)) mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) remote.run(args=[ 'sudo', 'umount', mnt, ], ) remote.run(args=[ 'rmdir', '--', mnt, ], )
def task(ctx, config): """ Mount/unmount a ``kernel`` client. The config is optional and defaults to mounting on all clients. If a config is given, it is expected to be a list of clients to do this operation on. This lets you e.g. set up one client with ``ceph-fuse`` and another with ``kclient``. Example that mounts all clients:: tasks: - ceph: - kclient: - interactive: Example that uses both ``kclient` and ``cfuse``:: tasks: - ceph: - cfuse: [client.0] - kclient: [client.1] - interactive: """ log.info('Mounting kernel clients...') assert config is None or isinstance(config, list), \ "task kclient got invalid config" if config is None: config = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] clients = list(teuthology.get_clients(ctx=ctx, roles=config)) for id_, remote in clients: mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) log.info('Mounting kclient client.{id} at {remote} {mnt}...'.format( id=id_, remote=remote, mnt=mnt)) # figure mon ips remotes_and_roles = ctx.cluster.remotes.items() roles = [roles for (remote_, roles) in remotes_and_roles] ips = [host for (host, port) in (remote_.ssh.get_transport().getpeername() for (remote_, roles) in remotes_and_roles)] mons = teuthology.get_mons(roles, ips).values() secret = '/tmp/cephtest/data/client.{id}.secret'.format(id=id_) teuthology.write_secret_file(remote, 'client.{id}'.format(id=id_), secret) remote.run( args=[ 'mkdir', '--', mnt, ], ) remote.run( args=[ 'sudo', '/tmp/cephtest/enable-coredump', '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', '/tmp/cephtest/archive/coverage', '/tmp/cephtest/binary/usr/local/sbin/mount.ceph', '{mons}:/'.format(mons=','.join(mons)), mnt, '-v', '-o', 'name={id},secretfile={secret}'.format(id=id_, secret=secret), ], ) try: yield finally: log.info('Unmounting kernel clients...') for id_, remote in clients: log.debug('Unmounting client client.{id}...'.format(id=id_)) mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) remote.run( args=[ 'sudo', 'umount', mnt, ], ) remote.run( args=[ 'rmdir', '--', mnt, ], )
def task(ctx, config): """ Mount/unmount a ``kernel`` client. The config is optional and defaults to mounting on all clients. If a config is given, it is expected to be a list of clients to do this operation on. This lets you e.g. set up one client with ``ceph-fuse`` and another with ``kclient``. ``brxnet`` should be a Private IPv4 Address range, default range is [192.168.0.0/16] Example that mounts all clients:: tasks: - ceph: - kclient: - interactive: - brxnet: [192.168.0.0/16] Example that uses both ``kclient` and ``ceph-fuse``:: tasks: - ceph: - ceph-fuse: [client.0] - kclient: [client.1] - interactive: Pass a dictionary instead of lists to specify per-client config: tasks: -kclient: client.0: debug: true :param ctx: Context :param config: Configuration """ log.info('Mounting kernel clients...') if config is None: ids = misc.all_roles_of_type(ctx.cluster, 'client') client_roles = [f'client.{id_}' for id_ in ids] config = dict([r, dict()] for r in client_roles) elif isinstance(config, list): client_roles = config config = dict([r, dict()] for r in client_roles) elif isinstance(config, dict): client_roles = filter(lambda x: 'client.' in x, config.keys()) else: raise ValueError( f"Invalid config object: {config} ({config.__class__})") log.info(f"config is {config}") clients = list(misc.get_clients(ctx=ctx, roles=client_roles)) test_dir = misc.get_testdir(ctx) for id_, remote in clients: KernelMount.cleanup_stale_netnses_and_bridge(remote) mounts = {} overrides = ctx.config.get('overrides', {}).get('kclient', {}) top_overrides = dict( filter(lambda x: 'client.' not in x[0], overrides.items())) for id_, remote in clients: entity = f"client.{id_}" client_config = config.get(entity) if client_config is None: client_config = {} # top level overrides for k, v in top_overrides.items(): if v is not None: client_config[k] = v # mount specific overrides client_config_overrides = overrides.get(entity) deep_merge(client_config, client_config_overrides) log.info(f"{entity} config is {client_config}") cephfs_name = client_config.get("cephfs_name") if config.get("disabled", False) or not client_config.get('mounted', True): continue kernel_mount = KernelMount(ctx=ctx, test_dir=test_dir, client_id=id_, client_remote=remote, brxnet=ctx.teuthology_config.get( 'brxnet', None), config=client_config, cephfs_name=cephfs_name) mounts[id_] = kernel_mount if client_config.get('debug', False): remote.run(args=[ "sudo", "bash", "-c", "echo 'module ceph +p' > /sys/kernel/debug/dynamic_debug/control" ]) remote.run(args=[ "sudo", "bash", "-c", "echo 'module libceph +p' > /sys/kernel/debug/dynamic_debug/control" ]) kernel_mount.mount() def umount_all(): log.info('Unmounting kernel clients...') forced = False for mount in mounts.values(): if mount.is_mounted(): try: mount.umount() except (CommandFailedError, MaxWhileTries): log.warning("Ordinary umount failed, forcing...") forced = True mount.umount_wait(force=True) for id_, remote in clients: KernelMount.cleanup_stale_netnses_and_bridge(remote) return forced ctx.mounts = mounts try: yield mounts except: umount_all() # ignore forced retval, we are already in error handling finally: forced = umount_all() if forced: # The context managers within the kclient manager worked (i.e. # the test workload passed) but for some reason we couldn't # umount, so turn this into a test failure. raise RuntimeError("Kernel mounts did not umount cleanly")
def task(ctx, config): """ Mount/unmount a ``ceph-fuse`` client. The config is optional and defaults to mounting on all clients. If a config is given, it is expected to be a list of clients to do this operation on. This lets you e.g. set up one client with ``ceph-fuse`` and another with ``kclient``. Example that mounts all clients:: tasks: - ceph: - cfuse: - interactive: Example that uses both ``kclient` and ``cfuse``:: tasks: - ceph: - cfuse: [client.0] - kclient: [client.1] - interactive: Example that enables valgrind: tasks: - ceph: - cfuse: client.0: valgrind: --tool=memcheck - interactive: """ log.info('Mounting ceph-fuse clients...') cfuse_daemons = {} if config is None: config = dict( ('client.{id}'.format(id=id_), None) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')) elif isinstance(config, list): config = dict((name, None) for name in config) clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys())) for id_, remote in clients: mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) log.info('Mounting ceph-fuse client.{id} at {remote} {mnt}...'.format( id=id_, remote=remote, mnt=mnt)) client_config = config.get("client.%s" % id_) if client_config is None: client_config = {} log.info("Client client.%s config is %s" % (id_, client_config)) daemon_signal = 'kill' if client_config.get('coverage'): log.info('Recording coverage for this run.') daemon_signal = 'term' remote.run(args=[ 'mkdir', '--', mnt, ], ) run_cmd = [ '/tmp/cephtest/enable-coredump', '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', '/tmp/cephtest/archive/coverage', '/tmp/cephtest/daemon-helper', ] run_cmd_tail = [ '/tmp/cephtest/binary/usr/local/bin/ceph-fuse', '-f', '--name', 'client.{id}'.format(id=id_), '-c', '/tmp/cephtest/ceph.conf', # TODO ceph-fuse doesn't understand dash dash '--', mnt, ] extra_args = None if client_config.get('valgrind') is not None: log.debug('Running client.{id} under valgrind'.format(id=id_)) val_path = '/tmp/cephtest/archive/log/valgrind' daemon_signal = 'term' remote.run( args=[ 'mkdir', '-p', '--', val_path, ], wait=True, ) extra_args = [ 'valgrind', '--log-file={vdir}/client.{id}.log'.format(vdir=val_path, id=id_), client_config.get('valgrind') ] run_cmd.append(daemon_signal) if extra_args is not None: run_cmd.extend(extra_args) run_cmd.extend(run_cmd_tail) proc = remote.run( args=run_cmd, logger=log.getChild('cfuse.{id}'.format(id=id_)), stdin=run.PIPE, wait=False, ) cfuse_daemons[id_] = proc for id_, remote in clients: mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) teuthology.wait_until_fuse_mounted( remote=remote, fuse=cfuse_daemons[id_], mountpoint=mnt, ) try: yield finally: log.info('Unmounting ceph-fuse clients...') for id_, remote in clients: mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) remote.run(args=[ 'fusermount', '-u', mnt, ], ) run.wait(cfuse_daemons.itervalues()) for id_, remote in clients: mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) remote.run(args=[ 'rmdir', '--', mnt, ], )
def task(ctx, config): """ Mount/unmount a ``ceph-fuse`` client. The config is optional and defaults to mounting on all clients. If a config is given, it is expected to be a list of clients to do this operation on. This lets you e.g. set up one client with ``ceph-fuse`` and another with ``kclient``. Example that mounts all clients:: tasks: - ceph: - ceph-fuse: - interactive: Example that uses both ``kclient` and ``ceph-fuse``:: tasks: - ceph: - ceph-fuse: [client.0] - kclient: [client.1] - interactive: Example that enables valgrind: tasks: - ceph: - ceph-fuse: client.0: valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes] - interactive: Example that stops an already-mounted client: :: tasks: - ceph: - ceph-fuse: [client.0] - ... do something that requires the FS mounted ... - ceph-fuse: client.0: mounted: false - ... do something that requires the FS unmounted ... Example that adds more generous wait time for mount (for virtual machines): tasks: - ceph: - ceph-fuse: client.0: mount_wait: 60 # default is 0, do not wait before checking /sys/ mount_timeout: 120 # default is 30, give up if /sys/ is not populated - interactive: :param ctx: Context :param config: Configuration """ log.info('Running ceph_fuse task...') testdir = teuthology.get_testdir(ctx) log.info("config is {}".format(str(config))) config = get_client_configs(ctx, config) log.info("new config is {}".format(str(config))) # List clients we will configure mounts for, default is all clients clients = list(teuthology.get_clients(ctx=ctx, roles=filter(lambda x: 'client.' in x, config.keys()))) all_mounts = getattr(ctx, 'mounts', {}) mounted_by_me = {} skipped = {} # Construct any new FuseMount instances for id_, remote in clients: client_config = config.get("client.%s" % id_) if client_config is None: client_config = {} auth_id = client_config.get("auth_id", id_) skip = client_config.get("skip", False) if skip: skipped[id_] = skip continue if id_ not in all_mounts: fuse_mount = FuseMount(ctx, client_config, testdir, auth_id, remote) all_mounts[id_] = fuse_mount else: # Catch bad configs where someone has e.g. tried to use ceph-fuse and kcephfs for the same client assert isinstance(all_mounts[id_], FuseMount) if not config.get("disabled", False) and client_config.get('mounted', True): mounted_by_me[id_] = {"config": client_config, "mount": all_mounts[id_]} ctx.mounts = all_mounts # Mount any clients we have been asked to (default to mount all) log.info('Mounting ceph-fuse clients...') for info in mounted_by_me.values(): config = info["config"] mount_path = config.get("mount_path") mountpoint = config.get("mountpoint") info["mount"].mount(mountpoint=mountpoint, mount_path=mount_path) for info in mounted_by_me.values(): info["mount"].wait_until_mounted() # Umount any pre-existing clients that we have not been asked to mount for client_id in set(all_mounts.keys()) - set(mounted_by_me.keys()) - set(skipped.keys()): mount = all_mounts[client_id] if mount.is_mounted(): mount.umount_wait() try: yield all_mounts finally: log.info('Unmounting ceph-fuse clients...') for info in mounted_by_me.values(): # Conditional because an inner context might have umounted it mount = info["mount"] if mount.is_mounted(): mount.umount_wait()
def task(ctx, config): """ Mount/unmount a ``ceph-fuse`` client. The config is optional and defaults to mounting on all clients. If a config is given, it is expected to be a list of clients to do this operation on. This lets you e.g. set up one client with ``ceph-fuse`` and another with ``kclient``. Example that mounts all clients:: tasks: - ceph: - ceph-fuse: - interactive: Example that uses both ``kclient` and ``ceph-fuse``:: tasks: - ceph: - ceph-fuse: [client.0] - kclient: [client.1] - interactive: Example that enables valgrind: tasks: - ceph: - ceph-fuse: client.0: valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes] - interactive: :param ctx: Context :param config: Configuration """ log.info('Mounting ceph-fuse clients...') fuse_daemons = {} testdir = teuthology.get_testdir(ctx) if config is None: config = dict( ('client.{id}'.format(id=id_), None) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')) elif isinstance(config, list): config = dict((name, None) for name in config) overrides = ctx.config.get('overrides', {}) teuthology.deep_merge(config, overrides.get('ceph-fuse', {})) clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys())) for id_, remote in clients: client_config = config.get("client.%s" % id_) if client_config is None: client_config = {} log.info("Client client.%s config is %s" % (id_, client_config)) daemon_signal = 'kill' if client_config.get('coverage') or client_config.get( 'valgrind') is not None: daemon_signal = 'term' mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) log.info('Mounting ceph-fuse client.{id} at {remote} {mnt}...'.format( id=id_, remote=remote, mnt=mnt)) remote.run(args=[ 'mkdir', '--', mnt, ], ) run_cmd = [ 'sudo', 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'daemon-helper', daemon_signal, ] run_cmd_tail = [ 'ceph-fuse', '-f', '--name', 'client.{id}'.format(id=id_), # TODO ceph-fuse doesn't understand dash dash '--', mnt, ] if client_config.get('valgrind') is not None: run_cmd = teuthology.get_valgrind_args( testdir, 'client.{id}'.format(id=id_), run_cmd, client_config.get('valgrind'), ) run_cmd.extend(run_cmd_tail) proc = remote.run( args=run_cmd, logger=log.getChild('ceph-fuse.{id}'.format(id=id_)), stdin=run.PIPE, wait=False, ) fuse_daemons[id_] = proc for id_, remote in clients: mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) teuthology.wait_until_fuse_mounted( remote=remote, fuse=fuse_daemons[id_], mountpoint=mnt, ) remote.run(args=[ 'sudo', 'chmod', '1777', '{tdir}/mnt.{id}'.format(tdir=testdir, id=id_) ], ) try: yield finally: log.info('Unmounting ceph-fuse clients...') for id_, remote in clients: mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) try: remote.run(args=[ 'sudo', 'fusermount', '-u', mnt, ], ) except run.CommandFailedError: log.info('Failed to unmount ceph-fuse on {name}, aborting...'. format(name=remote.name)) # abort the fuse mount, killing all hung processes remote.run(args=[ 'if', 'test', '-e', '/sys/fs/fuse/connections/*/abort', run.Raw(';'), 'then', 'echo', '1', run.Raw('>'), run.Raw('/sys/fs/fuse/connections/*/abort'), run.Raw(';'), 'fi', ], ) # make sure its unmounted remote.run(args=[ 'sudo', 'umount', '-l', '-f', mnt, ], ) run.wait(fuse_daemons.itervalues()) for id_, remote in clients: mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) remote.run(args=[ 'rmdir', '--', mnt, ], )
def task(ctx, config): """ Mount/unmount a ``ceph-fuse`` client. The config is optional and defaults to mounting on all clients. If a config is given, it is expected to be a list of clients to do this operation on. This lets you e.g. set up one client with ``ceph-fuse`` and another with ``kclient``. Example that mounts all clients:: tasks: - ceph: - ceph-fuse: - interactive: Example that uses both ``kclient` and ``ceph-fuse``:: tasks: - ceph: - ceph-fuse: [client.0] - kclient: [client.1] - interactive: Example that enables valgrind: tasks: - ceph: - ceph-fuse: client.0: valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes] - interactive: Example that stops an already-mounted client: :: tasks: - ceph: - ceph-fuse: [client.0] - ... do something that requires the FS mounted ... - ceph-fuse: client.0: mounted: false - ... do something that requires the FS unmounted ... :param ctx: Context :param config: Configuration """ log.info('Mounting ceph-fuse clients...') testdir = teuthology.get_testdir(ctx) config = get_client_configs(ctx, config) # List clients we will configure mounts for, default is all clients clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys())) all_mounts = getattr(ctx, 'mounts', {}) mounted_by_me = {} # Construct any new FuseMount instances for id_, remote in clients: client_config = config.get("client.%s" % id_) if client_config is None: client_config = {} if id_ not in all_mounts: fuse_mount = FuseMount(client_config, testdir, id_, remote) all_mounts[id_] = fuse_mount else: # Catch bad configs where someone has e.g. tried to use ceph-fuse and kcephfs for the same client assert isinstance(all_mounts[id_], FuseMount) if client_config.get('mounted', True): mounted_by_me[id_] = all_mounts[id_] # Mount any clients we have been asked to (default to mount all) for mount in mounted_by_me.values(): mount.mount() for mount in mounted_by_me.values(): mount.wait_until_mounted() # Umount any pre-existing clients that we have not been asked to mount for client_id in set(all_mounts.keys()) - set(mounted_by_me.keys()): mount = all_mounts[client_id] if mount.is_mounted(): mount.umount_wait() ctx.mounts = all_mounts try: yield all_mounts finally: log.info('Unmounting ceph-fuse clients...') for mount in mounted_by_me.values(): # Conditional because an inner context might have umounted it if mount.is_mounted(): mount.umount_wait()
def task(ctx, config): """ Mount/unmount a ``kernel`` client. The config is optional and defaults to mounting on all clients. If a config is given, it is expected to be a list of clients to do this operation on. This lets you e.g. set up one client with ``ceph-fuse`` and another with ``kclient``. Example that mounts all clients:: tasks: - ceph: - kclient: - interactive: Example that uses both ``kclient` and ``ceph-fuse``:: tasks: - ceph: - ceph-fuse: [client.0] - kclient: [client.1] - interactive: Pass a dictionary instead of lists to specify per-client config: tasks: -kclient: client.0: debug: true :param ctx: Context :param config: Configuration """ log.info('Mounting kernel clients...') assert config is None or isinstance(config, list) or isinstance(config, dict), \ "task kclient got invalid config" if config is None: config = ['client.{id}'.format(id=id_) for id_ in misc.all_roles_of_type(ctx.cluster, 'client')] if isinstance(config, list): client_roles = config config = dict([r, dict()] for r in client_roles) elif isinstance(config, dict): client_roles = filter(lambda x: 'client.' in x, config.keys()) else: raise ValueError("Invalid config object: {0} ({1})".format(config, config.__class__)) # config has been converted to a dict by this point overrides = ctx.config.get('overrides', {}) deep_merge(config, overrides.get('kclient', {})) clients = list(misc.get_clients(ctx=ctx, roles=client_roles)) test_dir = misc.get_testdir(ctx) # Assemble mon addresses remotes_and_roles = ctx.cluster.remotes.items() roles = [roles for (remote_, roles) in remotes_and_roles] ips = [remote_.ssh.get_transport().getpeername()[0] for (remote_, _) in remotes_and_roles] mons = misc.get_mons(roles, ips).values() mounts = {} for id_, remote in clients: client_config = config.get("client.%s" % id_) if client_config is None: client_config = {} if config.get("disabled", False) or not client_config.get('mounted', True): continue kernel_mount = KernelMount( mons, test_dir, id_, remote, ctx.teuthology_config.get('ipmi_user', None), ctx.teuthology_config.get('ipmi_password', None), ctx.teuthology_config.get('ipmi_domain', None) ) mounts[id_] = kernel_mount if client_config.get('debug', False): remote.run(args=["sudo", "bash", "-c", "echo 'module ceph +p' > /sys/kernel/debug/dynamic_debug/control"]) remote.run(args=["sudo", "bash", "-c", "echo 'module libceph +p' > /sys/kernel/debug/dynamic_debug/control"]) kernel_mount.mount() def umount_all(): log.info('Unmounting kernel clients...') forced = False for mount in mounts.values(): if mount.is_mounted(): try: mount.umount() except (CommandFailedError, MaxWhileTries): log.warn("Ordinary umount failed, forcing...") forced = True mount.umount_wait(force=True) return forced ctx.mounts = mounts try: yield mounts except: umount_all() # ignore forced retval, we are already in error handling finally: forced = umount_all() if forced: # The context managers within the kclient manager worked (i.e. # the test workload passed) but for some reason we couldn't # umount, so turn this into a test failure. raise RuntimeError("Kernel mounts did not umount cleanly")
def task(ctx, config): """ Mount nfs client (requires nfs server export like knfsd or ganesh) Example that mounts a single nfs client:: tasks: - ceph: - kclient: [client.0] - knfsd: [client.0] - nfs: client.1: server: client.0 - interactive: Example that mounts multiple nfs clients with options:: tasks: - ceph: - kclient: [client.0, client.1] - knfsd: [client.0, client.1] - nfs: client.2: server: client.0 options: [rw,hard,intr,nfsvers=3] client.3: server: client.1 options: [ro] - workunit: clients: client.2: - suites/dbench.sh client.3: - suites/blogbench.sh It is not recommended that the nfs client and nfs server reside on the same node. So in the example above client.0-3 should be on 4 distinct nodes. The client nfs testing would be using only client.2 and client.3. """ log.info('Mounting nfs clients...') assert isinstance(config, dict) clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys())) testdir = teuthology.get_testdir(ctx) for id_, remote in clients: mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) client_config = config.get("client.%s" % id_) if client_config is None: client_config = {} log.debug("Client client.%s config is %s" % (id_, client_config)) assert client_config.get('server') is not None server = client_config.get('server'); svr_id = server[len('client.'):] svr_mnt = os.path.join(testdir, 'mnt.{id}'.format(id=svr_id)) svr_remote = None all_config = ['client.{id}'.format(id=tmpid) for tmpid in teuthology.all_roles_of_type(ctx.cluster, 'client')] all_clients = list(teuthology.get_clients(ctx=ctx, roles=all_config)) for tmpid, tmpremote in all_clients: if tmpid == svr_id: svr_remote = tmpremote break assert svr_remote is not None svr_remote = svr_remote.name.split('@', 2)[1] if client_config.get('options') is not None: opts = ','.join(client_config.get('options')) else: opts = 'rw' log.info('Mounting client.{id} from client.{sid}'.format(id=id_, sid=svr_id)) log.debug('mount -o {opts} {remote}:{svr_mnt} {mnt}'.format( remote=svr_remote, svr_mnt=svr_mnt, opts=opts, mnt=mnt)) remote.run( args=[ 'mkdir', '--', mnt, ], ) remote.run( args=[ 'sudo', "mount", "-o", opts, '{remote}:{mnt}'.format(remote=svr_remote, mnt=svr_mnt), mnt ], ) try: yield finally: log.info('Unmounting nfs clients...') for id_, remote in clients: log.debug('Unmounting nfs client client.{id}...'.format(id=id_)) mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) try: log.debug('First, syncing client client.{id}'.format(id=id_)) remote.run( args=[ 'sync' ] ) remote.run( args=[ 'sudo', 'lsof', '-V', '+D', '{mnt}'.format(mnt=mnt), ], check_status=False ) finally: remote.run( args=[ 'sudo', 'umount', mnt, ], ) remote.run( args=[ 'rmdir', '--', mnt, ], )
def task(ctx, config): """ Mount/unmount a ``ceph-fuse`` client. The config is optional and defaults to mounting on all clients. If a config is given, it is expected to be a list of clients to do this operation on. This lets you e.g. set up one client with ``ceph-fuse`` and another with ``kclient``. Example that mounts all clients:: tasks: - ceph: - ceph-fuse: - interactive: Example that uses both ``kclient` and ``ceph-fuse``:: tasks: - ceph: - ceph-fuse: [client.0] - kclient: [client.1] - interactive: Example that enables valgrind: tasks: - ceph: - ceph-fuse: client.0: valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes] - interactive: :param ctx: Context :param config: Configuration """ log.info("Mounting ceph-fuse clients...") fuse_daemons = {} testdir = teuthology.get_testdir(ctx) if config is None: config = dict( ("client.{id}".format(id=id_), None) for id_ in teuthology.all_roles_of_type(ctx.cluster, "client") ) elif isinstance(config, list): config = dict((name, None) for name in config) overrides = ctx.config.get("overrides", {}) teuthology.deep_merge(config, overrides.get("ceph-fuse", {})) clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys())) for id_, remote in clients: client_config = config.get("client.%s" % id_) if client_config is None: client_config = {} log.info("Client client.%s config is %s" % (id_, client_config)) daemon_signal = "kill" if client_config.get("coverage") or client_config.get("valgrind") is not None: daemon_signal = "term" mnt = os.path.join(testdir, "mnt.{id}".format(id=id_)) log.info("Mounting ceph-fuse client.{id} at {remote} {mnt}...".format(id=id_, remote=remote, mnt=mnt)) remote.run(args=["mkdir", "--", mnt]) run_cmd = [ "sudo", "adjust-ulimits", "ceph-coverage", "{tdir}/archive/coverage".format(tdir=testdir), "daemon-helper", daemon_signal, ] run_cmd_tail = [ "ceph-fuse", "-f", "--name", "client.{id}".format(id=id_), # TODO ceph-fuse doesn't understand dash dash '--', mnt, ] if client_config.get("valgrind") is not None: run_cmd = teuthology.get_valgrind_args( testdir, "client.{id}".format(id=id_), run_cmd, client_config.get("valgrind") ) run_cmd.extend(run_cmd_tail) proc = remote.run( args=run_cmd, logger=log.getChild("ceph-fuse.{id}".format(id=id_)), stdin=run.PIPE, wait=False ) fuse_daemons[id_] = proc for id_, remote in clients: mnt = os.path.join(testdir, "mnt.{id}".format(id=id_)) teuthology.wait_until_fuse_mounted(remote=remote, fuse=fuse_daemons[id_], mountpoint=mnt) remote.run(args=["sudo", "chmod", "1777", "{tdir}/mnt.{id}".format(tdir=testdir, id=id_)]) try: yield finally: log.info("Unmounting ceph-fuse clients...") for id_, remote in clients: mnt = os.path.join(testdir, "mnt.{id}".format(id=id_)) try: remote.run(args=["sudo", "fusermount", "-u", mnt]) except run.CommandFailedError: log.info("Failed to unmount ceph-fuse on {name}, aborting...".format(name=remote.name)) # abort the fuse mount, killing all hung processes remote.run( args=[ "if", "test", "-e", "/sys/fs/fuse/connections/*/abort", run.Raw(";"), "then", "echo", "1", run.Raw(">"), run.Raw("/sys/fs/fuse/connections/*/abort"), run.Raw(";"), "fi", ] ) # make sure its unmounted remote.run(args=["sudo", "umount", "-l", "-f", mnt]) run.wait(fuse_daemons.itervalues()) for id_, remote in clients: mnt = os.path.join(testdir, "mnt.{id}".format(id=id_)) remote.run(args=["rmdir", "--", mnt])
def task(ctx, config): """ Mount/unmount a cifs client. The config is optional and defaults to mounting on all clients. If a config is given, it is expected to be a list of clients to do this operation on. Example that starts smbd and mounts cifs on all nodes:: tasks: - ceph: - samba: - cifs-mount: - interactive: Example that splits smbd and cifs: tasks: - ceph: - samba: [samba.0] - cifs-mount: [client.0] - ceph-fuse: [client.1] - interactive: Example that specifies the share name: tasks: - ceph: - ceph-fuse: - samba: samba.0: cephfuse: "{testdir}/mnt.0" - cifs-mount: client.0: share: cephfuse :param ctx: Context :param config: Configuration """ log.info('Mounting cifs clients...') if config is None: config = dict( ('client.{id}'.format(id=id_), None) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')) elif isinstance(config, list): config = dict((name, None) for name in config) clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys())) from teuthology.task.samba import get_sambas samba_roles = [ 'samba.{id_}'.format(id_=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'samba') ] sambas = list(get_sambas(ctx=ctx, roles=samba_roles)) (ip, _) = sambas[0][1].ssh.get_transport().getpeername() log.info('samba ip: {ip}'.format(ip=ip)) for id_, remote in clients: mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_)) log.info('Mounting cifs client.{id} at {remote} {mnt}...'.format( id=id_, remote=remote, mnt=mnt)) remote.run(args=[ 'mkdir', '--', mnt, ], ) rolestr = 'client.{id_}'.format(id_=id_) unc = "ceph" log.info("config: {c}".format(c=config)) if config[rolestr] is not None and 'share' in config[rolestr]: unc = config[rolestr]['share'] remote.run(args=[ 'sudo', 'mount', '-t', 'cifs', '//{sambaip}/{unc}'.format(sambaip=ip, unc=unc), '-o', 'username=ubuntu,password=ubuntu', mnt, ], ) remote.run(args=[ 'sudo', 'chown', 'ubuntu:ubuntu', '{m}/'.format(m=mnt), ], ) try: yield finally: log.info('Unmounting cifs clients...') for id_, remote in clients: remote.run(args=[ 'sudo', 'umount', mnt, ], ) for id_, remote in clients: while True: try: remote.run(args=[ 'rmdir', '--', mnt, run.Raw('2>&1'), run.Raw('|'), 'grep', 'Device or resource busy', ], ) import time time.sleep(1) except Exception: break
def task(ctx, config): """ Mount/unmount a cifs client. The config is optional and defaults to mounting on all clients. If a config is given, it is expected to be a list of clients to do this operation on. Example that starts smbd and mounts cifs on all nodes:: tasks: - ceph: - samba: - cifs-mount: - interactive: Example that splits smbd and cifs: tasks: - ceph: - samba: [samba.0] - cifs-mount: [client.0] - ceph-fuse: [client.1] - interactive: Example that specifies the share name: tasks: - ceph: - ceph-fuse: - samba: samba.0: cephfuse: "{testdir}/mnt.0" - cifs-mount: client.0: share: cephfuse """ log.info('Mounting cifs clients...') if config is None: config = dict(('client.{id}'.format(id=id_), None) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')) elif isinstance(config, list): config = dict((name, None) for name in config) clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys())) from teuthology.task.samba import get_sambas samba_roles = ['samba.{id_}'.format(id_=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'samba')] sambas = list(get_sambas(ctx=ctx, roles=samba_roles)) (ip, port) = sambas[0][1].ssh.get_transport().getpeername() log.info('samba ip: {ip}'.format(ip=ip)) for id_, remote in clients: mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_)) log.info('Mounting cifs client.{id} at {remote} {mnt}...'.format( id=id_, remote=remote,mnt=mnt)) remote.run( args=[ 'mkdir', '--', mnt, ], ) rolestr = 'client.{id_}'.format(id_=id_) unc = "ceph" log.info("config: {c}".format(c=config)) if config[rolestr] is not None and 'share' in config[rolestr]: unc = config[rolestr]['share'] remote.run( args=[ 'sudo', 'mount', '-t', 'cifs', '//{sambaip}/{unc}'.format(sambaip=ip, unc=unc), '-o', 'username=ubuntu,password=ubuntu', mnt, ], ) remote.run( args=[ 'sudo', 'chown', 'ubuntu:ubuntu', '{m}/'.format(m=mnt), ], ) try: yield finally: log.info('Unmounting cifs clients...') for id_, remote in clients: remote.run( args=[ 'sudo', 'umount', mnt, ], ) for id_, remote in clients: while True: try: remote.run( args=[ 'rmdir', '--', mnt, run.Raw('2>&1'), run.Raw('|'), 'grep', 'Device or resource busy', ], ) import time time.sleep(1) except: break
def task(ctx, config): """ Mount/unmount a ``kernel`` client. The config is optional and defaults to mounting on all clients. If a config is given, it is expected to be a list of clients to do this operation on. This lets you e.g. set up one client with ``ceph-fuse`` and another with ``kclient``. Example that mounts all clients:: tasks: - ceph: - kclient: - interactive: Example that uses both ``kclient` and ``ceph-fuse``:: tasks: - ceph: - ceph-fuse: [client.0] - kclient: [client.1] - interactive: Pass a dictionary instead of lists to specify per-client config: tasks: -kclient: client.0: debug: true :param ctx: Context :param config: Configuration """ log.info('Mounting kernel clients...') assert config is None or isinstance(config, list) or isinstance(config, dict), \ "task kclient got invalid config" if config is None: config = ['client.{id}'.format(id=id_) for id_ in misc.all_roles_of_type(ctx.cluster, 'client')] if isinstance(config, list): client_roles = config config = dict([r, dict()] for r in client_roles) elif isinstance(config, dict): client_roles = filter(lambda x: 'client.' in x, config.keys()) else: raise ValueError("Invalid config object: {0} ({1})".format(config, config.__class__)) # config has been converted to a dict by this point overrides = ctx.config.get('overrides', {}) deep_merge(config, overrides.get('kclient', {})) clients = list(misc.get_clients(ctx=ctx, roles=client_roles)) test_dir = misc.get_testdir(ctx) mounts = {} for id_, remote in clients: client_config = config.get("client.%s" % id_) if client_config is None: client_config = {} if config.get("disabled", False) or not client_config.get('mounted', True): continue kernel_mount = KernelMount( ctx, test_dir, id_, remote, ctx.teuthology_config.get('ipmi_user', None), ctx.teuthology_config.get('ipmi_password', None), ctx.teuthology_config.get('ipmi_domain', None) ) mounts[id_] = kernel_mount if client_config.get('debug', False): remote.run(args=["sudo", "bash", "-c", "echo 'module ceph +p' > /sys/kernel/debug/dynamic_debug/control"]) remote.run(args=["sudo", "bash", "-c", "echo 'module libceph +p' > /sys/kernel/debug/dynamic_debug/control"]) kernel_mount.mount() def umount_all(): log.info('Unmounting kernel clients...') forced = False for mount in mounts.values(): if mount.is_mounted(): try: mount.umount() except (CommandFailedError, MaxWhileTries): log.warning("Ordinary umount failed, forcing...") forced = True mount.umount_wait(force=True) return forced ctx.mounts = mounts try: yield mounts except: umount_all() # ignore forced retval, we are already in error handling finally: forced = umount_all() if forced: # The context managers within the kclient manager worked (i.e. # the test workload passed) but for some reason we couldn't # umount, so turn this into a test failure. raise RuntimeError("Kernel mounts did not umount cleanly")
def task(ctx, config): """ Mount/unmount a ``ceph-fuse`` client. The config is optional and defaults to mounting on all clients. If a config is given, it is expected to be a list of clients to do this operation on. This lets you e.g. set up one client with ``ceph-fuse`` and another with ``kclient``. Example that mounts all clients:: tasks: - ceph: - ceph-fuse: - interactive: Example that uses both ``kclient` and ``ceph-fuse``:: tasks: - ceph: - ceph-fuse: [client.0] - kclient: [client.1] - interactive: Example that enables valgrind: tasks: - ceph: - ceph-fuse: client.0: valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes] - interactive: Example that stops an already-mounted client: :: tasks: - ceph: - ceph-fuse: [client.0] - ... do something that requires the FS mounted ... - ceph-fuse: client.0: mounted: false - ... do something that requires the FS unmounted ... Example that adds more generous wait time for mount (for virtual machines): tasks: - ceph: - ceph-fuse: client.0: mount_wait: 60 # default is 0, do not wait before checking /sys/ mount_timeout: 120 # default is 30, give up if /sys/ is not populated - interactive: :param ctx: Context :param config: Configuration """ log.info('Mounting ceph-fuse clients...') testdir = teuthology.get_testdir(ctx) config = get_client_configs(ctx, config) # List clients we will configure mounts for, default is all clients clients = list(teuthology.get_clients(ctx=ctx, roles=filter(lambda x: 'client.' in x, config.keys()))) all_mounts = getattr(ctx, 'mounts', {}) mounted_by_me = {} # Construct any new FuseMount instances for id_, remote in clients: client_config = config.get("client.%s" % id_) if client_config is None: client_config = {} if id_ not in all_mounts: fuse_mount = FuseMount(client_config, testdir, id_, remote) all_mounts[id_] = fuse_mount else: # Catch bad configs where someone has e.g. tried to use ceph-fuse and kcephfs for the same client assert isinstance(all_mounts[id_], FuseMount) if not config.get("disabled", False) and client_config.get('mounted', True): mounted_by_me[id_] = all_mounts[id_] ctx.mounts = all_mounts # Mount any clients we have been asked to (default to mount all) for mount in mounted_by_me.values(): mount.mount() for mount in mounted_by_me.values(): mount.wait_until_mounted() # Umount any pre-existing clients that we have not been asked to mount for client_id in set(all_mounts.keys()) - set(mounted_by_me.keys()): mount = all_mounts[client_id] if mount.is_mounted(): mount.umount_wait() try: yield all_mounts finally: log.info('Unmounting ceph-fuse clients...') for mount in mounted_by_me.values(): # Conditional because an inner context might have umounted it if mount.is_mounted(): mount.umount_wait()
def task(ctx, config): """ Mount/unmount a ``ceph-fuse`` client. The config is optional and defaults to mounting on all clients. If a config is given, it is expected to be a list of clients to do this operation on. This lets you e.g. set up one client with ``ceph-fuse`` and another with ``kclient``. Example that mounts all clients:: tasks: - ceph: - ceph-fuse: - interactive: Example that uses both ``kclient` and ``ceph-fuse``:: tasks: - ceph: - ceph-fuse: [client.0] - kclient: [client.1] - interactive: Example that enables valgrind: tasks: - ceph: - ceph-fuse: client.0: valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes] - interactive: """ log.info('Mounting ceph-fuse clients...') fuse_daemons = {} if config is None: config = dict(('client.{id}'.format(id=id_), None) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')) elif isinstance(config, list): config = dict((name, None) for name in config) overrides = ctx.config.get('overrides', {}) teuthology.deep_merge(config, overrides.get('ceph-fuse', {})) clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys())) for id_, remote in clients: mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) log.info('Mounting ceph-fuse client.{id} at {remote} {mnt}...'.format( id=id_, remote=remote,mnt=mnt)) client_config = config.get("client.%s" % id_) if client_config is None: client_config = {} log.info("Client client.%s config is %s" % (id_, client_config)) daemon_signal = 'kill' if client_config.get('coverage') or client_config.get('valgrind') is not None: daemon_signal = 'term' remote.run( args=[ 'mkdir', '--', mnt, ], ) run_cmd=[ '/tmp/cephtest/enable-coredump', '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', '/tmp/cephtest/archive/coverage', '/tmp/cephtest/daemon-helper', daemon_signal, ] run_cmd_tail=[ '/tmp/cephtest/binary/usr/local/bin/ceph-fuse', '-f', '--name', 'client.{id}'.format(id=id_), '-c', '/tmp/cephtest/ceph.conf', # TODO ceph-fuse doesn't understand dash dash '--', mnt, ] if client_config.get('valgrind') is not None: run_cmd.extend( teuthology.get_valgrind_args( 'client.{id}'.format(id=id_), client_config.get('valgrind'), ) ) run_cmd.extend(run_cmd_tail) proc = remote.run( args=run_cmd, logger=log.getChild('ceph-fuse.{id}'.format(id=id_)), stdin=run.PIPE, wait=False, ) fuse_daemons[id_] = proc for id_, remote in clients: mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) teuthology.wait_until_fuse_mounted( remote=remote, fuse=fuse_daemons[id_], mountpoint=mnt, ) remote.run(args=['sudo', 'chmod', '1777', '/tmp/cephtest/mnt.{id}'.format(id=id_)],) try: yield finally: log.info('Unmounting ceph-fuse clients...') for id_, remote in clients: mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) try: remote.run( args=[ 'fusermount', '-u', mnt, ], ) except CommandFailedError as e: log.info('Failed to unmount ceph-fuse on {name}, aborting...'.format(name=remote.name)) # abort the fuse mount, killing all hung processes remote.run( args=[ 'echo', '1', run.Raw('>'), run.Raw('/sys/fs/fuse/connections/*/abort'), ], ) # make sure its unmounted remote.run( args=[ 'sudo', 'umount', '-l', '-f', mnt, ], ) run.wait(fuse_daemons.itervalues()) for id_, remote in clients: mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) remote.run( args=[ 'rmdir', '--', mnt, ], )
def task(ctx, config): """ Given a Ceph cluster has already been set up, exercise the migration of the CephFS journal from an older format to the latest format. On successful completion the filesystem will be running with a journal in the new format. Optionally specify which client to use like this: - mds-journal_migration: client: client.0 """ if not hasattr(ctx, 'ceph'): raise RuntimeError("This task must be nested in 'ceph' task") if not hasattr(ctx, 'mounts'): raise RuntimeError("This task must be nested inside 'kclient' or 'ceph_fuse' task") # Determine which client we will use if config and 'client' in config: # Use client specified in config client_role = config['client'] client_list = list(misc.get_clients(ctx, [client_role])) try: client_id = client_list[0][0] except IndexError: raise RuntimeError("Client role '{0}' not found".format(client_role)) else: # Pick one arbitrary client to use client_list = list(misc.all_roles_of_type(ctx.cluster, 'client')) try: client_id = client_list[0] except IndexError: raise RuntimeError("This task requires at least one client") fs = Filesystem(ctx, config) ctx.fs = fs old_journal_version = JOURNAL_FORMAT_LEGACY new_journal_version = JOURNAL_FORMAT_RESILIENT # Set config so that journal will be created in older format if 'mds' not in ctx.ceph.conf: ctx.ceph.conf['mds'] = {} ctx.ceph.conf['mds']['mds journal format'] = old_journal_version write_conf(ctx) # XXX because we don't have the ceph task's config object, if they # used a different config path this won't work. # Create a filesystem using the older journal format. for mount in ctx.mounts.values(): mount.umount_wait() fs.mds_stop() fs.reset() fs.mds_restart() # Do some client work so that the log is populated with something. mount = ctx.mounts[client_id] with mount.mounted(): mount.create_files() mount.check_files() # sanity, this should always pass # Modify the ceph.conf to ask the MDS to use the new journal format. ctx.ceph.conf['mds']['mds journal format'] = new_journal_version write_conf(ctx) # Restart the MDS. fs.mds_fail_restart() fs.wait_for_daemons() # This ensures that all daemons come up into a valid state fs.wait_for_daemons() # Check that files created in the initial client workload are still visible # in a client mount. with mount.mounted(): mount.check_files() # Verify that the journal really has been rewritten. journal_version = fs.get_journal_version() if journal_version != new_journal_version: raise RuntimeError("Journal was not upgraded, version should be {0} but is {1}".format( new_journal_version, journal_version() )) # Leave all MDSs and clients running for any child tasks for mount in ctx.mounts.values(): mount.mount() mount.wait_until_mounted() yield
def task(ctx, config): """ Mount nfs client (requires nfs server export like knfsd or ganesh) Example that mounts a single nfs client:: tasks: - ceph: - kclient: [client.0] - knfsd: [client.0] - nfs: client.1: server: client.0 - interactive: Example that mounts multiple nfs clients with options:: tasks: - ceph: - kclient: [client.0, client.1] - knfsd: [client.0, client.1] - nfs: client.2: server: client.0 options: [rw,hard,intr,nfsvers=3] client.3: server: client.1 options: [ro] - workunit: clients: client.2: - suites/dbench.sh client.3: - suites/blogbench.sh It is not recommended that the nfs client and nfs server reside on the same node. So in the example above client.0-3 should be on 4 distinct nodes. The client nfs testing would be using only client.2 and client.3. """ log.info('Mounting nfs clients...') assert isinstance(config, dict) clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys())) testdir = teuthology.get_testdir(ctx) for id_, remote in clients: mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) client_config = config.get("client.%s" % id_) if client_config is None: client_config = {} log.debug("Client client.%s config is %s" % (id_, client_config)) assert client_config.get('server') is not None server = client_config.get('server') svr_id = server[len('client.'):] svr_mnt = os.path.join(testdir, 'mnt.{id}'.format(id=svr_id)) svr_remote = None all_config = [ 'client.{id}'.format(id=tmpid) for tmpid in teuthology.all_roles_of_type(ctx.cluster, 'client') ] all_clients = list(teuthology.get_clients(ctx=ctx, roles=all_config)) for tmpid, tmpremote in all_clients: if tmpid == svr_id: svr_remote = tmpremote break assert svr_remote is not None svr_remote = svr_remote.name.split('@', 2)[1] if client_config.get('options') is not None: opts = ','.join(client_config.get('options')) else: opts = 'rw' log.info('Mounting client.{id} from client.{sid}'.format(id=id_, sid=svr_id)) log.debug('mount -o {opts} {remote}:{svr_mnt} {mnt}'.format( remote=svr_remote, svr_mnt=svr_mnt, opts=opts, mnt=mnt)) remote.run(args=[ 'mkdir', '--', mnt, ], ) remote.run(args=[ 'sudo', "mount", "-o", opts, '{remote}:{mnt}'.format(remote=svr_remote, mnt=svr_mnt), mnt ], ) try: yield finally: log.info('Unmounting nfs clients...') for id_, remote in clients: log.debug('Unmounting nfs client client.{id}...'.format(id=id_)) mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) try: log.debug('First, syncing client client.{id}'.format(id=id_)) remote.run(args=['sync']) remote.run(args=[ 'sudo', 'lsof', '-V', '+D', '{mnt}'.format(mnt=mnt), ], check_status=False) finally: remote.run(args=[ 'sudo', 'umount', mnt, ], ) remote.run(args=[ 'rmdir', '--', mnt, ], )
def task(ctx, config): """ Mount/unmount a ``ceph-fuse`` client. The config is optional and defaults to mounting on all clients. If a config is given, it is expected to be a list of clients to do this operation on. This lets you e.g. set up one client with ``ceph-fuse`` and another with ``kclient``. Example that mounts all clients:: tasks: - ceph: - ceph-fuse: - interactive: Example that uses both ``kclient` and ``ceph-fuse``:: tasks: - ceph: - ceph-fuse: [client.0] - kclient: [client.1] - interactive: Example that enables valgrind: tasks: - ceph: - ceph-fuse: client.0: valgrind: --tool=memcheck - interactive: """ log.info("Mounting ceph-fuse clients...") fuse_daemons = {} if config is None: config = dict( ("client.{id}".format(id=id_), None) for id_ in teuthology.all_roles_of_type(ctx.cluster, "client") ) elif isinstance(config, list): config = dict((name, None) for name in config) clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys())) for id_, remote in clients: mnt = os.path.join("/tmp/cephtest", "mnt.{id}".format(id=id_)) log.info("Mounting ceph-fuse client.{id} at {remote} {mnt}...".format(id=id_, remote=remote, mnt=mnt)) client_config = config.get("client.%s" % id_) if client_config is None: client_config = {} log.info("Client client.%s config is %s" % (id_, client_config)) daemon_signal = "kill" if client_config.get("coverage") or client_config.get("valgrind") is not None: daemon_signal = "term" remote.run(args=["mkdir", "--", mnt]) run_cmd = [ "/tmp/cephtest/enable-coredump", "/tmp/cephtest/binary/usr/local/bin/ceph-coverage", "/tmp/cephtest/archive/coverage", "/tmp/cephtest/daemon-helper", daemon_signal, ] run_cmd_tail = [ "/tmp/cephtest/binary/usr/local/bin/ceph-fuse", "-f", "--name", "client.{id}".format(id=id_), "-c", "/tmp/cephtest/ceph.conf", # TODO ceph-fuse doesn't understand dash dash '--', mnt, ] if client_config.get("valgrind") is not None: run_cmd.extend(teuthology.get_valgrind_args("client.{id}".format(id=id_), client_config.get("valgrind"))) run_cmd.extend(run_cmd_tail) proc = remote.run( args=run_cmd, logger=log.getChild("ceph-fuse.{id}".format(id=id_)), stdin=run.PIPE, wait=False ) fuse_daemons[id_] = proc for id_, remote in clients: mnt = os.path.join("/tmp/cephtest", "mnt.{id}".format(id=id_)) teuthology.wait_until_fuse_mounted(remote=remote, fuse=fuse_daemons[id_], mountpoint=mnt) try: yield finally: log.info("Unmounting ceph-fuse clients...") for id_, remote in clients: mnt = os.path.join("/tmp/cephtest", "mnt.{id}".format(id=id_)) remote.run(args=["fusermount", "-u", mnt]) run.wait(fuse_daemons.itervalues()) for id_, remote in clients: mnt = os.path.join("/tmp/cephtest", "mnt.{id}".format(id=id_)) remote.run(args=["rmdir", "--", mnt])
def task(ctx, config): """ Export/Unexport a ``nfs server`` client. The config is optional and defaults to exporting on all clients. If a config is given, it is expected to be a list or dict of clients to do this operation on. You must have specified ``ceph-fuse`` or ``kclient`` on all clients specified for knfsd. Example that exports all clients:: tasks: - ceph: - kclient: - knfsd: - interactive: Example that uses both ``kclient` and ``ceph-fuse``:: tasks: - ceph: - ceph-fuse: [client.0] - kclient: [client.1] - knfsd: [client.0, client.1] - interactive: Example that specifies export options:: tasks: - ceph: - kclient: [client.0, client.1] - knfsd: client.0: options: [rw,root_squash] client.1: - interactive: Note that when options aren't specified, rw,no_root_squash is the default. When you specify options, the defaults are as specified by exports(5). So if empty options are specified, i.e. options: [] these are the defaults: ro,sync,wdelay,hide,nocrossmnt,secure,root_squash,no_all_squash, no_subtree_check,secure_locks,acl,anonuid=65534,anongid=65534 """ log.info('Exporting nfs server...') if config is None: config = dict( ('client.{id}'.format(id=id_), None) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')) elif isinstance(config, list): config = dict((name, None) for name in config) clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys())) for id_, remote in clients: mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_)) client_config = config.get("client.%s" % id_) if client_config is None: client_config = {} log.debug("Client client.%s config is %s" % (id_, client_config)) if client_config.get('options') is not None: opts = ','.join(client_config.get('options')) else: opts = 'rw,no_root_squash' # Undocumented option to export to any client in case # testing in interactive mode from other unspecified clients. wildcard = False if client_config.get('wildcard') is not None: wildcard = True log.info('Exporting knfsd client.{id} at {remote} *:{mnt} ({opt})...'. format(id=id_, remote=remote, mnt=mnt, opt=opts)) """ Should the user want to run with root_squash enabled, there is no way to write anything to the initial ceph root dir which is set to rwxr-xr-x root root. This could possibly break test cases that make assumptions about the initial state of the root dir. """ remote.run(args=[ 'sudo', 'chmod', "777", '{MNT}'.format(MNT=mnt), ], ) args = [ 'sudo', "exportfs", '-o', 'fsid=123{id},{opt}'.format(id=id_, opt=opts), ] if wildcard: args += ['*:{MNT}'.format(MNT=mnt)] else: """ DEFAULT Prevent bogus clients from old runs from access our export. Specify all specify node addresses for this run. """ ips = [ host for (host, port) in (remote.ssh.get_transport().getpeername() for (remote, roles) in ctx.cluster.remotes.items()) ] for ip in ips: args += ['{ip}:{MNT}'.format(ip=ip, MNT=mnt)] log.info('remote run {args}'.format(args=args)) remote.run(args=args) try: yield finally: log.info('Unexporting nfs server...') for id_, remote in clients: log.debug('Unexporting client client.{id}...'.format(id=id_)) mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_)) remote.run(args=[ 'sudo', 'exportfs', '-au', ], )
def task(ctx, config): """ Mount/unmount a ``ceph-fuse`` client. The config is optional and defaults to mounting on all clients. If a config is given, it is expected to be a list of clients to do this operation on. This lets you e.g. set up one client with ``ceph-fuse`` and another with ``kclient``. Example that mounts all clients:: tasks: - ceph: - cfuse: - interactive: Example that uses both ``kclient` and ``cfuse``:: tasks: - ceph: - cfuse: [client.0] - kclient: [client.1] - interactive: Example that enables valgrind: tasks: - ceph: - cfuse: client.0: valgrind: --tool=memcheck - interactive: """ log.info('Mounting ceph-fuse clients...') cfuse_daemons = {} if config is None: config = dict(('client.{id}'.format(id=id_), None) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')) elif isinstance(config, list): config = dict((name, None) for name in config) clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys())) for id_, remote in clients: mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) log.info('Mounting ceph-fuse client.{id} at {remote} {mnt}...'.format( id=id_, remote=remote,mnt=mnt)) client_config = config.get("client.%s" % id_) if client_config is None: client_config = {} log.info("Client client.%s config is %s" % (id_, client_config)) daemon_signal = 'kill' if client_config.get('coverage'): log.info('Recording coverage for this run.') daemon_signal = 'term' remote.run( args=[ 'mkdir', '--', mnt, ], ) run_cmd=[ '/tmp/cephtest/enable-coredump', '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', '/tmp/cephtest/archive/coverage', '/tmp/cephtest/daemon-helper', ] run_cmd_tail=[ '/tmp/cephtest/binary/usr/local/bin/ceph-fuse', '-f', '--name', 'client.{id}'.format(id=id_), '-c', '/tmp/cephtest/ceph.conf', # TODO ceph-fuse doesn't understand dash dash '--', mnt, ] extra_args = None if client_config.get('valgrind') is not None: log.debug('Running client.{id} under valgrind'.format(id=id_)) val_path = '/tmp/cephtest/archive/log/valgrind' daemon_signal = 'term' remote.run( args=[ 'mkdir', '-p', '--', val_path, ], wait=True, ) extra_args = [ 'valgrind', '--log-file={vdir}/client.{id}.log'.format(vdir=val_path, id=id_), client_config.get('valgrind') ] run_cmd.append(daemon_signal) if extra_args is not None: run_cmd.extend(extra_args) run_cmd.extend(run_cmd_tail) proc = remote.run( args=run_cmd, logger=log.getChild('cfuse.{id}'.format(id=id_)), stdin=run.PIPE, wait=False, ) cfuse_daemons[id_] = proc for id_, remote in clients: mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) teuthology.wait_until_fuse_mounted( remote=remote, fuse=cfuse_daemons[id_], mountpoint=mnt, ) try: yield finally: log.info('Unmounting ceph-fuse clients...') for id_, remote in clients: mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) remote.run( args=[ 'fusermount', '-u', mnt, ], ) run.wait(cfuse_daemons.itervalues()) for id_, remote in clients: mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) remote.run( args=[ 'rmdir', '--', mnt, ], )
def task(ctx, config): """ Mount/unmount a ``ceph-fuse`` client. The config is optional and defaults to mounting on all clients. If a config is given, it is expected to be a list of clients to do this operation on. This lets you e.g. set up one client with ``ceph-fuse`` and another with ``kclient``. ``brxnet`` should be a Private IPv4 Address range, default range is [192.168.0.0/16] Example that mounts all clients:: tasks: - ceph: - ceph-fuse: - interactive: - brxnet: [192.168.0.0/16] Example that uses both ``kclient` and ``ceph-fuse``:: tasks: - ceph: - ceph-fuse: [client.0] - kclient: [client.1] - interactive: Example that enables valgrind: tasks: - ceph: - ceph-fuse: client.0: valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes] - interactive: Example that stops an already-mounted client: :: tasks: - ceph: - ceph-fuse: [client.0] - ... do something that requires the FS mounted ... - ceph-fuse: client.0: mounted: false - ... do something that requires the FS unmounted ... Example that adds more generous wait time for mount (for virtual machines): tasks: - ceph: - ceph-fuse: client.0: mount_wait: 60 # default is 0, do not wait before checking /sys/ mount_timeout: 120 # default is 30, give up if /sys/ is not populated - interactive: Example that creates and mounts a subvol: overrides: ceph: subvols: create: 2 subvol_options: "--namespace-isolated --size 25000000000" ceph-fuse: client.0: mount_subvol_num: 0 kclient: client.1: mount_subvol_num: 1 :param ctx: Context :param config: Configuration """ log.info('Running ceph_fuse task...') if config is None: ids = misc.all_roles_of_type(ctx.cluster, 'client') client_roles = [f'client.{id_}' for id_ in ids] config = dict([r, dict()] for r in client_roles) elif isinstance(config, list): client_roles = config config = dict([r, dict()] for r in client_roles) elif isinstance(config, dict): client_roles = filter(lambda x: 'client.' in x, config.keys()) else: raise ValueError( f"Invalid config object: {config} ({config.__class__})") log.info(f"config is {config}") clients = list(misc.get_clients(ctx=ctx, roles=client_roles)) testdir = misc.get_testdir(ctx) all_mounts = getattr(ctx, 'mounts', {}) mounted_by_me = {} skipped = {} remotes = set() brxnet = config.get("brxnet", None) # Construct any new FuseMount instances overrides = ctx.config.get('overrides', {}).get('ceph-fuse', {}) top_overrides = dict( filter(lambda x: 'client.' not in x[0], overrides.items())) for id_, remote in clients: entity = f"client.{id_}" client_config = config.get(entity) if client_config is None: client_config = {} # top level overrides misc.deep_merge(client_config, top_overrides) # mount specific overrides client_config_overrides = overrides.get(entity) misc.deep_merge(client_config, client_config_overrides) log.info(f"{entity} config is {client_config}") remotes.add(remote) auth_id = client_config.get("auth_id", id_) cephfs_name = client_config.get("cephfs_name") skip = client_config.get("skip", False) if skip: skipped[id_] = skip continue if id_ not in all_mounts: fuse_mount = FuseMount(ctx=ctx, client_config=client_config, test_dir=testdir, client_id=auth_id, client_remote=remote, brxnet=brxnet, cephfs_name=cephfs_name) all_mounts[id_] = fuse_mount else: # Catch bad configs where someone has e.g. tried to use ceph-fuse and kcephfs for the same client assert isinstance(all_mounts[id_], FuseMount) if not config.get("disabled", False) and client_config.get( 'mounted', True): mounted_by_me[id_] = { "config": client_config, "mount": all_mounts[id_] } ctx.mounts = all_mounts # Umount any pre-existing clients that we have not been asked to mount for client_id in set(all_mounts.keys()) - set(mounted_by_me.keys()) - set( skipped.keys()): mount = all_mounts[client_id] if mount.is_mounted(): mount.umount_wait() for remote in remotes: FuseMount.cleanup_stale_netnses_and_bridge(remote) # Mount any clients we have been asked to (default to mount all) log.info('Mounting ceph-fuse clients...') for info in mounted_by_me.values(): config = info["config"] mount_x = info['mount'] if config.get("mount_path"): mount_x.cephfs_mntpt = config.get("mount_path") if config.get("mountpoint"): mount_x.hostfs_mntpt = config.get("mountpoint") mount_x.mount() for info in mounted_by_me.values(): info["mount"].wait_until_mounted() try: yield all_mounts finally: log.info('Unmounting ceph-fuse clients...') for info in mounted_by_me.values(): # Conditional because an inner context might have umounted it mount = info["mount"] if mount.is_mounted(): mount.umount_wait() for remote in remotes: FuseMount.cleanup_stale_netnses_and_bridge(remote)
def task(ctx, config): """ Mount/unmount a ``ceph-fuse`` client. The config is optional and defaults to mounting on all clients. If a config is given, it is expected to be a list of clients to do this operation on. This lets you e.g. set up one client with ``ceph-fuse`` and another with ``kclient``. Example that mounts all clients:: tasks: - ceph: - ceph-fuse: - interactive: Example that uses both ``kclient` and ``ceph-fuse``:: tasks: - ceph: - ceph-fuse: [client.0] - kclient: [client.1] - interactive: Example that enables valgrind: tasks: - ceph: - ceph-fuse: client.0: valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes] - interactive: Example that stops an already-mounted client: :: tasks: - ceph: - ceph-fuse: [client.0] - ... do something that requires the FS mounted ... - ceph-fuse: client.0: mounted: false - ... do something that requires the FS unmounted ... Example that adds more generous wait time for mount (for virtual machines): tasks: - ceph: - ceph-fuse: client.0: mount_wait: 60 # default is 0, do not wait before checking /sys/ mount_timeout: 120 # default is 30, give up if /sys/ is not populated - interactive: :param ctx: Context :param config: Configuration """ log.info('Mounting ceph-fuse clients...') testdir = teuthology.get_testdir(ctx) config = get_client_configs(ctx, config) # List clients we will configure mounts for, default is all clients clients = list( teuthology.get_clients(ctx=ctx, roles=filter(lambda x: 'client.' in x, config.keys()))) all_mounts = getattr(ctx, 'mounts', {}) mounted_by_me = {} log.info('Wait for MDS to reach steady state...') mds_cluster = MDSCluster(ctx) status = mds_cluster.status() for filesystem in status.get_filesystems(): fs = Filesystem(ctx, fscid=filesystem['id']) fs.wait_for_daemons() log.info('Ready to start ceph-fuse...') # Construct any new FuseMount instances for id_, remote in clients: client_config = config.get("client.%s" % id_) if client_config is None: client_config = {} if id_ not in all_mounts: fuse_mount = FuseMount(client_config, testdir, id_, remote) all_mounts[id_] = fuse_mount else: # Catch bad configs where someone has e.g. tried to use ceph-fuse and kcephfs for the same client assert isinstance(all_mounts[id_], FuseMount) if not config.get("disabled", False) and client_config.get( 'mounted', True): mounted_by_me[id_] = all_mounts[id_] ctx.mounts = all_mounts # Mount any clients we have been asked to (default to mount all) for mount in mounted_by_me.values(): mount.mount() for mount in mounted_by_me.values(): mount.wait_until_mounted() # Umount any pre-existing clients that we have not been asked to mount for client_id in set(all_mounts.keys()) - set(mounted_by_me.keys()): mount = all_mounts[client_id] if mount.is_mounted(): mount.umount_wait() try: yield all_mounts finally: log.info('Unmounting ceph-fuse clients...') for mount in mounted_by_me.values(): # Conditional because an inner context might have umounted it if mount.is_mounted(): mount.umount_wait()