def task(ctx, config): """ Mount/unmount a ``kernel`` client. The config is optional and defaults to mounting on all clients. If a config is given, it is expected to be a list of clients to do this operation on. This lets you e.g. set up one client with ``ceph-fuse`` and another with ``kclient``. Example that mounts all clients:: tasks: - ceph: - kclient: - interactive: Example that uses both ``kclient` and ``cfuse``:: tasks: - ceph: - cfuse: [client.0] - kclient: [client.1] - interactive: """ log.info('Mounting kernel clients...') assert config is None or isinstance(config, list), \ "task kclient got invalid config" if config is None: config = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] clients = list(teuthology.get_clients(ctx=ctx, roles=config)) for id_, remote in clients: mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) log.info('Mounting kclient client.{id} at {remote} {mnt}...'.format( id=id_, remote=remote, mnt=mnt)) # figure mon ips remotes_and_roles = ctx.cluster.remotes.items() roles = [roles for (remote_, roles) in remotes_and_roles] ips = [host for (host, port) in (remote_.ssh.get_transport().getpeername() for (remote_, roles) in remotes_and_roles)] mons = teuthology.get_mons(roles, ips).values() secret = '/tmp/cephtest/data/client.{id}.secret'.format(id=id_) teuthology.write_secret_file(remote, 'client.{id}'.format(id=id_), secret) remote.run( args=[ 'mkdir', '--', mnt, ], ) remote.run( args=[ 'sudo', '/tmp/cephtest/enable-coredump', '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', '/tmp/cephtest/archive/coverage', '/tmp/cephtest/binary/usr/local/sbin/mount.ceph', '{mons}:/'.format(mons=','.join(mons)), mnt, '-v', '-o', 'name={id},secretfile={secret}'.format(id=id_, secret=secret), ], ) try: yield finally: log.info('Unmounting kernel clients...') for id_, remote in clients: log.debug('Unmounting client client.{id}...'.format(id=id_)) mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) remote.run( args=[ 'sudo', 'umount', mnt, ], ) remote.run( args=[ 'rmdir', '--', mnt, ], )
def task(ctx, config): """ Mount/unmount a ``kernel`` client. The config is optional and defaults to mounting on all clients. If a config is given, it is expected to be a list of clients to do this operation on. This lets you e.g. set up one client with ``ceph-fuse`` and another with ``kclient``. Example that mounts all clients:: tasks: - ceph: - kclient: - interactive: Example that uses both ``kclient` and ``cfuse``:: tasks: - ceph: - cfuse: [client.0] - kclient: [client.1] - interactive: """ log.info('Mounting kernel clients...') assert config is None or isinstance(config, list), \ "task kclient got invalid config" if config is None: config = [ 'client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client') ] clients = list(teuthology.get_clients(ctx=ctx, roles=config)) for id_, remote in clients: mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) log.info('Mounting kclient client.{id} at {remote} {mnt}...'.format( id=id_, remote=remote, mnt=mnt)) # figure mon ips remotes_and_roles = ctx.cluster.remotes.items() roles = [roles for (remote_, roles) in remotes_and_roles] ips = [ host for (host, port) in (remote_.ssh.get_transport().getpeername() for (remote_, roles) in remotes_and_roles) ] mons = teuthology.get_mons(roles, ips).values() secret = '/tmp/cephtest/data/client.{id}.secret'.format(id=id_) teuthology.write_secret_file(remote, 'client.{id}'.format(id=id_), secret) remote.run(args=[ 'mkdir', '--', mnt, ], ) remote.run(args=[ 'sudo', '/tmp/cephtest/enable-coredump', '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', '/tmp/cephtest/archive/coverage', '/tmp/cephtest/binary/usr/local/sbin/mount.ceph', '{mons}:/'.format(mons=','.join(mons)), mnt, '-v', '-o', 'name={id},secretfile={secret}'.format(id=id_, secret=secret), ], ) try: yield finally: log.info('Unmounting kernel clients...') for id_, remote in clients: log.debug('Unmounting client client.{id}...'.format(id=id_)) mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) remote.run(args=[ 'sudo', 'umount', mnt, ], ) remote.run(args=[ 'rmdir', '--', mnt, ], )
def dev_create(ctx, config): """ Map block devices to rbd images. For example:: tasks: - ceph: - rbd.create_image: [client.0] - rbd.modprobe: [client.0] - rbd.dev_create: client.0: testimage.client.0 """ assert isinstance(config, dict) or isinstance(config, list), \ "task dev_create only supports a list or dictionary for configuration" if isinstance(config, dict): role_images = config.items() else: role_images = [(role, None) for role in config] log.info('Creating rbd block devices...') for role, image in role_images: if image is None: image = default_image_name(role) (remote, ) = ctx.cluster.only(role).remotes.keys() # add udev rule for creating /dev/rbd/pool/image remote.run(args=[ 'echo', 'KERNEL=="rbd[0-9]*", PROGRAM="/tmp/cephtest/binary/usr/local/bin/ceph-rbdnamer %n", SYMLINK+="rbd/%c{1}/%c{2}"', run.Raw('>'), '/tmp/cephtest/51-rbd.rules', ], ) remote.run(args=[ 'sudo', 'mv', '/tmp/cephtest/51-rbd.rules', '/etc/udev/rules.d/', ], ) secretfile = '/tmp/cephtest/data/{role}.secret'.format(role=role) teuthology.write_secret_file(remote, role, secretfile) remote.run( args=[ 'sudo', 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', '/tmp/cephtest/enable-coredump', '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', '/tmp/cephtest/archive/coverage', '/tmp/cephtest/binary/usr/local/bin/rbd', '-c', '/tmp/cephtest/ceph.conf', '--user', role.rsplit('.')[-1], '--secret', secretfile, '-p', 'rbd', 'map', image, run.Raw('&&'), # wait for the symlink to be created by udev 'while', 'test', '!', '-e', '/dev/rbd/rbd/{image}'.format(image=image), run.Raw(';'), 'do', 'sleep', '1', run.Raw(';'), 'done', ], ) try: yield finally: log.info('Unmapping rbd devices...') for role, image in role_images: if image is None: image = default_image_name(role) (remote, ) = ctx.cluster.only(role).remotes.keys() remote.run( args=[ 'sudo', 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', '/tmp/cephtest/enable-coredump', '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', '/tmp/cephtest/archive/coverage', '/tmp/cephtest/binary/usr/local/bin/rbd', '-c', '/tmp/cephtest/ceph.conf', '-p', 'rbd', 'unmap', '/dev/rbd/rbd/{imgname}'.format(imgname=image), run.Raw('&&'), # wait for the symlink to be deleted by udev 'while', 'test', '-e', '/dev/rbd/rbd/{image}'.format(image=image), run.Raw(';'), 'do', 'sleep', '1', run.Raw(';'), 'done', ], ) remote.run( args=[ 'sudo', 'rm', '/etc/udev/rules.d/51-rbd.rules', ], wait=False, )
def _run_tests(ctx, role, tests): assert isinstance(role, basestring) PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] (remote,) = ctx.cluster.only(role).remotes.iterkeys() mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) # subdir so we can remove and recreate this a lot without sudo scratch_tmp = os.path.join(mnt, 'client.{id}'.format(id=id_), 'tmp') srcdir = '/tmp/cephtest/workunit.{role}'.format(role=role) secretfile = '/tmp/cephtest/data/{role}.secret'.format(role=role) teuthology.write_secret_file(remote, role, secretfile) remote.run( logger=log.getChild(role), args=[ 'mkdir', '--', srcdir, run.Raw('&&'), 'wget', '-q', '-O-', # TODO make branch/tag/sha1 used configurable 'https://github.com/NewDreamNetwork/ceph/tarball/HEAD', run.Raw('|'), 'tar', '-C', srcdir, '-x', '-z', '-f-', '--wildcards', '--no-wildcards-match-slash', '--strip-components=3', '--', '*/qa/workunits/', run.Raw('&&'), 'cd', '--', srcdir, run.Raw('&&'), 'if', 'test', '-e', 'Makefile', run.Raw(';'), 'then', 'make', run.Raw(';'), 'fi', run.Raw('&&'), 'find', '-executable', '-type', 'f', '-printf', r'%P\0'.format(srcdir=srcdir), run.Raw('>/tmp/cephtest/workunits.list'), ], ) workunits = sorted(teuthology.get_file(remote, '/tmp/cephtest/workunits.list').split('\0')) assert workunits try: assert isinstance(tests, list) for spec in tests: log.info('Running workunits matching %s on %s...', spec, role) prefix = '{spec}/'.format(spec=spec) to_run = [w for w in workunits if w == spec or w.startswith(prefix)] if not to_run: raise RuntimeError('Spec did not match any workunits: {spec!r}'.format(spec=spec)) for workunit in to_run: log.info('Running workunit %s...', workunit) remote.run( logger=log.getChild(role), args=[ 'mkdir', '--', scratch_tmp, run.Raw('&&'), 'cd', '--', scratch_tmp, run.Raw('&&'), run.Raw('PATH="$PATH:/tmp/cephtest/binary/usr/local/bin"'), run.Raw('LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/tmp/cephtest/binary/usr/local/lib"'), run.Raw('CEPH_CONF="/tmp/cephtest/ceph.conf"'), run.Raw('CEPH_SECRET_FILE="{file}"'.format(file=secretfile)), run.Raw('CEPH_ID="{id}"'.format(id=id_)), run.Raw('PYTHONPATH="$PYTHONPATH:/tmp/cephtest/binary/usr/local/lib/python2.6/dist-packages"'), '/tmp/cephtest/enable-coredump', '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', '/tmp/cephtest/archive/coverage', '{srcdir}/{workunit}'.format( srcdir=srcdir, workunit=workunit, ), run.Raw('&&'), 'rm', '-rf', '--', scratch_tmp, ], ) finally: remote.run( logger=log.getChild(role), args=[ 'rm', '-rf', '--', '/tmp/cephtest/workunits.list', srcdir, ], )
def dev_create(ctx, config): """ Map block devices to rbd images. For example:: tasks: - ceph: - rbd.create_image: [client.0] - rbd.modprobe: [client.0] - rbd.dev_create: client.0: testimage.client.0 """ assert isinstance(config, dict) or isinstance(config, list), \ "task dev_create only supports a list or dictionary for configuration" if isinstance(config, dict): role_images = config.items() else: role_images = [(role, None) for role in config] log.info('Creating rbd block devices...') for role, image in role_images: if image is None: image = default_image_name(role) (remote,) = ctx.cluster.only(role).remotes.keys() # add udev rule for creating /dev/rbd/pool/image remote.run( args=[ 'echo', 'KERNEL=="rbd[0-9]*", PROGRAM="/tmp/cephtest/binary/usr/local/bin/ceph-rbdnamer %n", SYMLINK+="rbd/%c{1}/%c{2}"', run.Raw('>'), '/tmp/cephtest/51-rbd.rules', ], ) remote.run( args=[ 'sudo', 'mv', '/tmp/cephtest/51-rbd.rules', '/etc/udev/rules.d/', ], ) secretfile = '/tmp/cephtest/data/{role}.secret'.format(role=role) teuthology.write_secret_file(remote, role, secretfile) remote.run( args=[ 'sudo', 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', '/tmp/cephtest/enable-coredump', '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', '/tmp/cephtest/archive/coverage', '/tmp/cephtest/binary/usr/local/bin/rbd', '-c', '/tmp/cephtest/ceph.conf', '--user', role.rsplit('.')[-1], '--secret', secretfile, '-p', 'rbd', 'map', image, run.Raw('&&'), # wait for the symlink to be created by udev 'while', 'test', '!', '-e', '/dev/rbd/rbd/{image}'.format(image=image), run.Raw(';'), 'do', 'sleep', '1', run.Raw(';'), 'done', ], ) try: yield finally: log.info('Unmapping rbd devices...') for role, image in role_images: if image is None: image = default_image_name(role) (remote,) = ctx.cluster.only(role).remotes.keys() remote.run( args=[ 'sudo', 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', '/tmp/cephtest/enable-coredump', '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', '/tmp/cephtest/archive/coverage', '/tmp/cephtest/binary/usr/local/bin/rbd', '-c', '/tmp/cephtest/ceph.conf', '-p', 'rbd', 'unmap', '/dev/rbd/rbd/{imgname}'.format(imgname=image), run.Raw('&&'), # wait for the symlink to be deleted by udev 'while', 'test', '-e', '/dev/rbd/rbd/{image}'.format(image=image), run.Raw(';'), 'do', 'sleep', '1', run.Raw(';'), 'done', ], ) remote.run( args=[ 'sudo', 'rm', '-f', '/etc/udev/rules.d/51-rbd.rules', ], wait=False, )
def _run_tests(ctx, role, tests): assert isinstance(role, basestring) PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] (remote, ) = ctx.cluster.only(role).remotes.iterkeys() mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) # subdir so we can remove and recreate this a lot without sudo scratch_tmp = os.path.join(mnt, 'client.{id}'.format(id=id_), 'tmp') srcdir = '/tmp/cephtest/workunit.{role}'.format(role=role) secretfile = '/tmp/cephtest/data/{role}.secret'.format(role=role) teuthology.write_secret_file(remote, role, secretfile) remote.run( logger=log.getChild(role), args=[ 'mkdir', '--', srcdir, run.Raw('&&'), 'wget', '-q', '-O-', # TODO make branch/tag/sha1 used configurable 'https://github.com/NewDreamNetwork/ceph/tarball/HEAD', run.Raw('|'), 'tar', '-C', srcdir, '-x', '-z', '-f-', '--wildcards', '--no-wildcards-match-slash', '--strip-components=3', '--', '*/qa/workunits/', run.Raw('&&'), 'cd', '--', srcdir, run.Raw('&&'), 'if', 'test', '-e', 'Makefile', run.Raw(';'), 'then', 'make', run.Raw(';'), 'fi', run.Raw('&&'), 'find', '-executable', '-type', 'f', '-printf', r'%P\0'.format(srcdir=srcdir), run.Raw('>/tmp/cephtest/workunits.list'), ], ) workunits = sorted( teuthology.get_file(remote, '/tmp/cephtest/workunits.list').split('\0')) assert workunits try: assert isinstance(tests, list) for spec in tests: log.info('Running workunits matching %s on %s...', spec, role) prefix = '{spec}/'.format(spec=spec) to_run = [ w for w in workunits if w == spec or w.startswith(prefix) ] if not to_run: raise RuntimeError( 'Spec did not match any workunits: {spec!r}'.format( spec=spec)) for workunit in to_run: log.info('Running workunit %s...', workunit) remote.run( logger=log.getChild(role), args=[ 'mkdir', '--', scratch_tmp, run.Raw('&&'), 'cd', '--', scratch_tmp, run.Raw('&&'), run.Raw( 'PATH="$PATH:/tmp/cephtest/binary/usr/local/bin"'), run.Raw( 'LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/tmp/cephtest/binary/usr/local/lib"' ), run.Raw('CEPH_CONF="/tmp/cephtest/ceph.conf"'), run.Raw('CEPH_SECRET_FILE="{file}"'.format( file=secretfile)), run.Raw('CEPH_ID="{id}"'.format(id=id_)), run.Raw( 'PYTHONPATH="$PYTHONPATH:/tmp/cephtest/binary/usr/local/lib/python2.6/dist-packages"' ), '/tmp/cephtest/enable-coredump', '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', '/tmp/cephtest/archive/coverage', '{srcdir}/{workunit}'.format( srcdir=srcdir, workunit=workunit, ), run.Raw('&&'), 'rm', '-rf', '--', scratch_tmp, ], ) finally: remote.run( logger=log.getChild(role), args=[ 'rm', '-rf', '--', '/tmp/cephtest/workunits.list', srcdir, ], )
def _run_tests(ctx, refspec, role, tests, env, subdir=None): assert isinstance(role, basestring) PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] (remote,) = ctx.cluster.only(role).remotes.iterkeys() mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) # subdir so we can remove and recreate this a lot without sudo if subdir is None: scratch_tmp = os.path.join(mnt, 'client.{id}'.format(id=id_), 'tmp') else: scratch_tmp = os.path.join(mnt, subdir) srcdir = '/tmp/cephtest/workunit.{role}'.format(role=role) secretfile = '/tmp/cephtest/data/{role}.secret'.format(role=role) teuthology.write_secret_file(remote, role, secretfile) ceph_ref = ctx.summary.get('ceph-sha1', 'master') remote.run( logger=log.getChild(role), args=[ 'mkdir', '--', srcdir, run.Raw('&&'), 'wget', '-q', '-O-', 'https://github.com/ceph/ceph/tarball/%s' % refspec, run.Raw('|'), 'tar', '-C', srcdir, '-x', '-z', '-f-', '--wildcards', '--no-wildcards-match-slash', '--strip-components=3', '--', '*/qa/workunits/', run.Raw('&&'), 'cd', '--', srcdir, run.Raw('&&'), 'if', 'test', '-e', 'Makefile', run.Raw(';'), 'then', 'make', run.Raw(';'), 'fi', run.Raw('&&'), 'find', '-executable', '-type', 'f', '-printf', r'%P\0'.format(srcdir=srcdir), run.Raw('>/tmp/cephtest/workunits.list'), ], ) workunits = sorted(teuthology.get_file(remote, '/tmp/cephtest/workunits.list').split('\0')) assert workunits try: assert isinstance(tests, list) for spec in tests: log.info('Running workunits matching %s on %s...', spec, role) prefix = '{spec}/'.format(spec=spec) to_run = [w for w in workunits if w == spec or w.startswith(prefix)] if not to_run: raise RuntimeError('Spec did not match any workunits: {spec!r}'.format(spec=spec)) for workunit in to_run: log.info('Running workunit %s...', workunit) args = [ 'mkdir', '-p', '--', scratch_tmp, run.Raw('&&'), 'cd', '--', scratch_tmp, run.Raw('&&'), run.Raw('CEPH_REF={ref}'.format(ref=ceph_ref)), run.Raw('PATH="$PATH:/tmp/cephtest/binary/usr/local/bin"'), run.Raw('LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/tmp/cephtest/binary/usr/local/lib"'), run.Raw('CEPH_CONF="/tmp/cephtest/ceph.conf"'), run.Raw('CEPH_SECRET_FILE="{file}"'.format(file=secretfile)), run.Raw('CEPH_ID="{id}"'.format(id=id_)), run.Raw('PYTHONPATH="$PYTHONPATH:/tmp/cephtest/binary/usr/local/lib/python2.7/dist-packages:/tmp/cephtest/binary/usr/local/lib/python2.6/dist-packages"'), ] if env is not None: for var, val in env.iteritems(): quoted_val = pipes.quote(val) env_arg = '{var}={val}'.format(var=var, val=quoted_val) args.append(run.Raw(env_arg)) args.extend([ '/tmp/cephtest/enable-coredump', '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', '/tmp/cephtest/archive/coverage', '{srcdir}/{workunit}'.format( srcdir=srcdir, workunit=workunit, ), ]) remote.run( logger=log.getChild(role), args=args, ) remote.run( logger=log.getChild(role), args=['rm', '-rf', '--', scratch_tmp], ) finally: log.info('Stopping %s on %s...', spec, role) remote.run( logger=log.getChild(role), args=[ 'rm', '-rf', '--', '/tmp/cephtest/workunits.list', srcdir, ], )