Beispiel #1
0
def task(ctx, config):
    """
    Run an admin socket command, make sure the output is json, and run
    a test program on it. The test program should read json from
    stdin. This task succeeds if the test program exits with status 0.

    To run the same test on all clients::

        tasks:
        - ceph:
        - rados:
        - admin_socket:
            all:
              dump_requests:
                test: http://example.com/script

    To restrict it to certain clients::

        tasks:
        - ceph:
        - rados: [client.1]
        - admin_socket:
            client.1:
              dump_requests:
                test: http://example.com/script

    If an admin socket command has arguments, they can be specified as
    a list::

        tasks:
        - ceph:
        - rados: [client.0]
        - admin_socket:
            client.0:
              dump_requests:
                test: http://example.com/script
              help:
                test: http://example.com/test_help_version
                args: [version]

    Note that there must be a ceph client with an admin socket running
    before this task is run. The tests are parallelized at the client
    level. Tests for a single client are run serially.

    :param ctx: Context
    :param config: Configuration
    """
    assert isinstance(config, dict), \
        'admin_socket task requires a dict for configuration'
    teuthology.replace_all_with_clients(ctx.cluster, config)

    with parallel() as ptask:
        for client, tests in config.iteritems():
            ptask.spawn(_run_tests, ctx, client, tests)
def task(ctx, config):
    """
    Run an admin socket command, make sure the output is json, and run
    a test program on it. The test program should read json from
    stdin. This task succeeds if the test program exits with status 0.

    To run the same test on all clients::

        tasks:
        - ceph:
        - rados:
        - admin_socket:
            all:
              dump_requests:
                test: http://example.com/script

    To restrict it to certain clients::

        tasks:
        - ceph:
        - rados: [client.1]
        - admin_socket:
            client.1:
              dump_requests:
                test: http://example.com/script

    If an admin socket command has arguments, they can be specified as
    a list::

        tasks:
        - ceph:
        - rados: [client.0]
        - admin_socket:
            client.0:
              dump_requests:
                test: http://example.com/script
              help:
                test: http://example.com/test_help_version
                args: [version]

    Note that there must be a ceph client with an admin socket running
    before this task is run. The tests are parallelized at the client
    level. Tests for a single client are run serially.

    :param ctx: Context
    :param config: Configuration
    """
    assert isinstance(config, dict), \
        'admin_socket task requires a dict for configuration'
    teuthology.replace_all_with_clients(ctx.cluster, config)

    with parallel() as ptask:
        for client, tests in config.iteritems():
            ptask.spawn(_run_tests, ctx, client, tests)
def task(ctx, config):
    """
    Run an autotest test on the ceph cluster.

    Only autotest client tests are supported.

    The config is a mapping from role name to list of tests to run on
    that client.

    For example::

        tasks:
        - ceph:
        - ceph-fuse: [client.0, client.1]
        - autotest:
            client.0: [dbench]
            client.1: [bonnie]

    You can also specify a list of tests to run on all clients::

        tasks:
        - ceph:
        - ceph-fuse:
        - autotest:
            all: [dbench]
    """
    assert isinstance(config, dict)
    config = teuthology.replace_all_with_clients(ctx.cluster, config)
    log.info('Setting up autotest...')
    testdir = teuthology.get_testdir(ctx)
    with parallel() as p:
        for role in config.iterkeys():
            (remote,) = ctx.cluster.only(role).remotes.keys()
            p.spawn(_download, testdir, remote)

    log.info('Making a separate scratch dir for every client...')
    for role in config.iterkeys():
        assert isinstance(role, basestring)
        PREFIX = 'client.'
        assert role.startswith(PREFIX)
        id_ = role[len(PREFIX):]
        (remote,) = ctx.cluster.only(role).remotes.iterkeys()
        mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
        scratch = os.path.join(mnt, 'client.{id}'.format(id=id_))
        remote.run(
            args=[
                'sudo',
                'install',
                '-d',
                '-m', '0755',
                '--owner={user}'.format(user='******'), #TODO
                '--',
                scratch,
                ],
            )

    with parallel() as p:
        for role, tests in config.iteritems():
            (remote,) = ctx.cluster.only(role).remotes.keys()
            p.spawn(_run_tests, testdir, remote, role, tests)
Beispiel #4
0
def task(ctx, config):
    """
    Run an autotest test on the ceph cluster.

    Only autotest client tests are supported.

    The config is a mapping from role name to list of tests to run on
    that client.

    For example::

        tasks:
        - ceph:
        - ceph-fuse: [client.0, client.1]
        - autotest:
            client.0: [dbench]
            client.1: [bonnie]

    You can also specify a list of tests to run on all clients::

        tasks:
        - ceph:
        - ceph-fuse:
        - autotest:
            all: [dbench]
    """
    assert isinstance(config, dict)
    config = teuthology.replace_all_with_clients(ctx.cluster, config)
    log.info('Setting up autotest...')
    testdir = teuthology.get_testdir(ctx)
    with parallel() as p:
        for role in config.keys():
            (remote,) = ctx.cluster.only(role).remotes.keys()
            p.spawn(_download, testdir, remote)

    log.info('Making a separate scratch dir for every client...')
    for role in config.keys():
        assert isinstance(role, basestring)
        PREFIX = 'client.'
        assert role.startswith(PREFIX)
        id_ = role[len(PREFIX):]
        (remote,) = ctx.cluster.only(role).remotes.keys()
        mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
        scratch = os.path.join(mnt, 'client.{id}'.format(id=id_))
        remote.run(
            args=[
                'sudo',
                'install',
                '-d',
                '-m', '0755',
                '--owner={user}'.format(user='******'), #TODO
                '--',
                scratch,
                ],
            )

    with parallel() as p:
        for role, tests in config.items():
            (remote,) = ctx.cluster.only(role).remotes.keys()
            p.spawn(_run_tests, testdir, remote, role, tests)
Beispiel #5
0
def task(ctx, config):
    """
    Create and mount an rbd image.

    For example, you can specify which clients to run on::

        tasks:
        - ceph:
        - rbd: [client.0, client.1]

    There are a few image options::

        tasks:
        - ceph:
        - rbd:
            client.0: # uses defaults
            client.1:
                image_name: foo
                image_size: 2048
                fs_type: xfs

    To use default options on all clients::

        tasks:
        - ceph:
        - rbd:
            all:

    To create 20GiB images and format them with xfs on all clients::

        tasks:
        - ceph:
        - rbd:
            all:
              image_size: 20480
              fs_type: xfs
    """
    if config is None:
        config = {all: None}
    norm_config = config
    if isinstance(config, dict):
        norm_config = teuthology.replace_all_with_clients(ctx.cluster, config)
    if isinstance(norm_config, dict):
        role_images = {}
        for role, properties in norm_config.iteritems():
            if properties is None:
                properties = {}
            role_images[role] = properties.get('image_name')
    else:
        role_images = norm_config

    with contextutil.nested(
            lambda: create_image(ctx=ctx, config=norm_config),
            lambda: modprobe(ctx=ctx, config=norm_config),
            lambda: dev_create(ctx=ctx, config=role_images),
            lambda: mkfs(ctx=ctx, config=norm_config),
            lambda: mount(ctx=ctx, config=role_images),
    ):
        yield
Beispiel #6
0
def task(ctx, config):
    """
    This is task for start immutable_object_cache.
    """
    assert isinstance(config, dict), \
           "task immutable_object_cache only supports a dictionary for configuration"

    managers = []
    config = teuthology.replace_all_with_clients(ctx.cluster, config)
    managers.append(lambda: immutable_object_cache(ctx=ctx, config=config))

    with contextutil.nested(*managers):
        yield
Beispiel #7
0
def task(ctx, config):
    """
    This is task for testing persistent write log cache recovery.
    """
    assert isinstance(config, dict), \
            "task rbd_pwl_cache_recovery only supports a dictionary for configuration"

    managers = []
    config = teuthology.replace_all_with_clients(ctx.cluster, config)
    managers.append(
        lambda: thrashes_rbd_bench_on_persistent_cache(ctx=ctx, config=config)
        )

    with contextutil.nested(*managers):
        yield
Beispiel #8
0
def task(ctx, config):
    """
    Run a test inside of QEMU on top of rbd. Only one test
    is supported per client.

    For example, you can specify which clients to run on::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
            client.1:
              test: http://ceph.com/qa/test2.sh

    Or use the same settings on all clients:

        tasks:
        - ceph:
        - qemu:
            all:
              test: http://ceph.com/qa/test.sh

    For tests that don't need a filesystem, set type to block::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
              type: block

    The test should be configured to run on /dev/vdb and later
    devices.

    If you want to run a test that uses more than one rbd image,
    specify how many images to use::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
              type: block
              num_rbd: 2

    You can set the amount of memory the VM has (default is 1024 MB)::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
              memory: 512 # megabytes
    """
    assert isinstance(config, dict), \
           "task qemu only supports a dictionary for configuration"

    config = teuthology.replace_all_with_clients(ctx.cluster, config)

    managers = []
    for client, client_config in config.iteritems():
        num_rbd = client_config.get('num_rbd', 1)
        assert num_rbd > 0, 'at least one rbd device must be used'
        for i in xrange(num_rbd):
            create_config = {
                client: {
                    'image_name':
                    '{client}.{num}'.format(client=client, num=i),
                    }
                }
            managers.append(
                lambda create_config=create_config:
                rbd.create_image(ctx=ctx, config=create_config)
                )

    managers.extend([
        lambda: create_dirs(ctx=ctx, config=config),
        lambda: generate_iso(ctx=ctx, config=config),
        lambda: download_image(ctx=ctx, config=config),
        lambda: run_qemu(ctx=ctx, config=config),
        ])

    with contextutil.nested(*managers):
        yield
Beispiel #9
0
def task(ctx, config):
    """
    Run all cram tests from the specified urls on the specified
    clients. Each client runs tests in parallel.

    Limitations:
    Tests must have a .t suffix. Tests with duplicate names will
    overwrite each other, so only the last one will run.

    For example::

        tasks:
        - ceph:
        - cram:
            clients:
              client.0:
              - http://ceph.com/qa/test.t
              - http://ceph.com/qa/test2.t]
              client.1: [http://ceph.com/qa/test.t]

    You can also run a list of cram tests on all clients::

        tasks:
        - ceph:
        - cram:
            clients:
              all: [http://ceph.com/qa/test.t]

    :param ctx: Context
    :param config: Configuration
    """
    assert isinstance(config, dict)
    assert 'clients' in config and isinstance(config['clients'], dict), \
           'configuration must contain a dictionary of clients'

    clients = teuthology.replace_all_with_clients(ctx.cluster,
                                                  config['clients'])
    testdir = teuthology.get_testdir(ctx)

    try:
        for client, tests in clients.iteritems():
            (remote,) = ctx.cluster.only(client).remotes.iterkeys()
            client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
            remote.run(
                args=[
                    'mkdir', '--', client_dir,
                    run.Raw('&&'),
                    'virtualenv', '{tdir}/virtualenv'.format(tdir=testdir),
                    run.Raw('&&'),
                    '{tdir}/virtualenv/bin/pip'.format(tdir=testdir),
                    'install', 'cram',
                    ],
                )
            for test in tests:
                log.info('fetching test %s for %s', test, client)
                assert test.endswith('.t'), 'tests must end in .t'
                remote.run(
                    args=[
                        'wget', '-nc', '-nv', '-P', client_dir, '--', test,
                        ],
                    )

        with parallel() as p:
            for role in clients.iterkeys():
                p.spawn(_run_tests, ctx, role)
    finally:
        for client, tests in clients.iteritems():
            (remote,) = ctx.cluster.only(client).remotes.iterkeys()
            client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
            test_files = set([test.rsplit('/', 1)[1] for test in tests])

            # remove test files unless they failed
            for test_file in test_files:
                abs_file = os.path.join(client_dir, test_file)
                remote.run(
                    args=[
                        'test', '-f', abs_file + '.err',
                        run.Raw('||'),
                        'rm', '-f', '--', abs_file,
                        ],
                    )

            # ignore failure since more than one client may
            # be run on a host, and the client dir should be
            # non-empty if the test failed
            remote.run(
                args=[
                    'rm', '-rf', '--',
                    '{tdir}/virtualenv'.format(tdir=testdir),
                    run.Raw(';'),
                    'rmdir', '--ignore-fail-on-non-empty', client_dir,
                    ],
                )
Beispiel #10
0
def task(ctx, config):
    """
    Run all cram tests from the specified urls on the specified
    clients. Each client runs tests in parallel.

    Limitations:
    Tests must have a .t suffix. Tests with duplicate names will
    overwrite each other, so only the last one will run.

    For example::

        tasks:
        - ceph:
        - cram:
            clients:
              client.0:
              - http://ceph.com/qa/test.t
              - http://ceph.com/qa/test2.t]
              client.1: [http://ceph.com/qa/test.t]
            branch: foo

    You can also run a list of cram tests on all clients::

        tasks:
        - ceph:
        - cram:
            clients:
              all: [http://ceph.com/qa/test.t]

    :param ctx: Context
    :param config: Configuration
    """
    assert isinstance(config, dict)
    assert "clients" in config and isinstance(
        config["clients"], dict
    ), "configuration must contain a dictionary of clients"

    clients = teuthology.replace_all_with_clients(ctx.cluster, config["clients"])
    testdir = teuthology.get_testdir(ctx)

    overrides = ctx.config.get("overrides", {})
    teuthology.deep_merge(config, overrides.get("workunit", {}))

    refspec = config.get("branch")
    if refspec is None:
        refspec = config.get("tag")
    if refspec is None:
        refspec = config.get("sha1")
    if refspec is None:
        refspec = "HEAD"

    try:
        for client, tests in clients.iteritems():
            (remote,) = ctx.cluster.only(client).remotes.iterkeys()
            client_dir = "{tdir}/archive/cram.{role}".format(tdir=testdir, role=client)
            remote.run(
                args=[
                    "mkdir",
                    "--",
                    client_dir,
                    run.Raw("&&"),
                    "virtualenv",
                    "{tdir}/virtualenv".format(tdir=testdir),
                    run.Raw("&&"),
                    "{tdir}/virtualenv/bin/pip".format(tdir=testdir),
                    "install",
                    "cram==0.6",
                ]
            )
            for test in tests:
                log.info("fetching test %s for %s", test, client)
                assert test.endswith(".t"), "tests must end in .t"
                remote.run(args=["wget", "-nc", "-nv", "-P", client_dir, "--", test.format(branch=refspec)])

        with parallel() as p:
            for role in clients.iterkeys():
                p.spawn(_run_tests, ctx, role)
    finally:
        for client, tests in clients.iteritems():
            (remote,) = ctx.cluster.only(client).remotes.iterkeys()
            client_dir = "{tdir}/archive/cram.{role}".format(tdir=testdir, role=client)
            test_files = set([test.rsplit("/", 1)[1] for test in tests])

            # remove test files unless they failed
            for test_file in test_files:
                abs_file = os.path.join(client_dir, test_file)
                remote.run(args=["test", "-f", abs_file + ".err", run.Raw("||"), "rm", "-f", "--", abs_file])

            # ignore failure since more than one client may
            # be run on a host, and the client dir should be
            # non-empty if the test failed
            remote.run(
                args=[
                    "rm",
                    "-rf",
                    "--",
                    "{tdir}/virtualenv".format(tdir=testdir),
                    run.Raw(";"),
                    "rmdir",
                    "--ignore-fail-on-non-empty",
                    client_dir,
                ]
            )
Beispiel #11
0
def xfstests(ctx, config):
    """
    Run xfstests over rbd devices.  This interface sets up all
    required configuration automatically if not otherwise specified.
    Note that only one instance of xfstests can run on a single host
    at a time.  By default, the set of tests specified is run once.
    If a (non-zero) count value is supplied, the complete set of
    tests will be run that number of times.

    For example::

        tasks:
        - ceph:
        # Image sizes are in MB
        - rbd.xfstests:
            client.0:
                count: 3
                test_image: 'test_image'
                test_size: 250
                test_format: 2
                scratch_image: 'scratch_image'
                scratch_size: 250
                scratch_format: 1
                fs_type: 'xfs'
                tests: '1-9 11-15 17 19-21 26-28 31-34 41 45-48'
    """
    if config is None:
        config = { 'all': None }
    assert isinstance(config, dict) or isinstance(config, list), \
        "task xfstests only supports a list or dictionary for configuration"
    if isinstance(config, dict):
        config = teuthology.replace_all_with_clients(ctx.cluster, config)
        runs = config.items()
    else:
        runs = [(role, None) for role in config]

    running_xfstests = {}
    for role, properties in runs:
        assert role.startswith('client.'), \
            "task xfstests can only run on client nodes"
        for host, roles_for_host in ctx.cluster.remotes.items():
            if role in roles_for_host:
                assert host not in running_xfstests, \
                    "task xfstests allows only one instance at a time per host"
                running_xfstests[host] = True

    images_config = {}
    scratch_config = {}
    modprobe_config = {}
    image_map_config = {}
    scratch_map_config = {}
    xfstests_config = {}
    for role, properties in runs:
        if properties is None:
            properties = {}

        test_image = properties.get('test_image', 'test_image.{role}'.format(role=role))
        test_size = properties.get('test_size', 2000) # 2G
        test_fmt = properties.get('test_format', 1)
        scratch_image = properties.get('scratch_image', 'scratch_image.{role}'.format(role=role))
        scratch_size = properties.get('scratch_size', 10000) # 10G
        scratch_fmt = properties.get('scratch_format', 1)

        images_config[role] = dict(
            image_name=test_image,
            image_size=test_size,
            image_format=test_fmt,
            )

        scratch_config[role] = dict(
            image_name=scratch_image,
            image_size=scratch_size,
            image_format=scratch_fmt,
            )

        xfstests_config[role] = dict(
            count=properties.get('count', 1),
            test_dev='/dev/rbd/rbd/{image}'.format(image=test_image),
            scratch_dev='/dev/rbd/rbd/{image}'.format(image=scratch_image),
            fs_type=properties.get('fs_type', 'xfs'),
            tests=properties.get('tests'),
            )

        log.info('Setting up xfstests using RBD images:')
        log.info('      test ({size} MB): {image}'.format(size=test_size,
                                                        image=test_image))
        log.info('   scratch ({size} MB): {image}'.format(size=scratch_size,
                                                        image=scratch_image))
        modprobe_config[role] = None
        image_map_config[role] = test_image
        scratch_map_config[role] = scratch_image

    with contextutil.nested(
        lambda: create_image(ctx=ctx, config=images_config),
        lambda: create_image(ctx=ctx, config=scratch_config),
        lambda: modprobe(ctx=ctx, config=modprobe_config),
        lambda: dev_create(ctx=ctx, config=image_map_config),
        lambda: dev_create(ctx=ctx, config=scratch_map_config),
        lambda: run_xfstests(ctx=ctx, config=xfstests_config),
        ):
        yield
Beispiel #12
0
def task(ctx, config):
    """
    Run a test inside of QEMU on top of rbd. Only one test
    is supported per client.

    For example, you can specify which clients to run on::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
            client.1:
              test: http://ceph.com/qa/test2.sh

    Or use the same settings on all clients:

        tasks:
        - ceph:
        - qemu:
            all:
              test: http://ceph.com/qa/test.sh

    For tests that don't need a filesystem, set type to block::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
              type: block

    The test should be configured to run on /dev/vdb and later
    devices.

    If you want to run a test that uses more than one rbd image,
    specify how many images to use::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
              type: block
              num_rbd: 2

    You can set the amount of memory the VM has (default is 1024 MB)::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
              memory: 512 # megabytes

    If you want to run a test against a cloned rbd image, set clone to true::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
              clone: true
    """
    assert isinstance(config, dict), \
           "task qemu only supports a dictionary for configuration"

    config = teuthology.replace_all_with_clients(ctx.cluster, config)

    managers = []
    create_images(ctx=ctx, config=config, managers=managers)
    managers.extend([
        lambda: create_dirs(ctx=ctx, config=config),
        lambda: generate_iso(ctx=ctx, config=config),
        lambda: download_image(ctx=ctx, config=config),
    ])
    create_clones(ctx=ctx, config=config, managers=managers)
    managers.append(lambda: run_qemu(ctx=ctx, config=config), )

    with contextutil.nested(*managers):
        yield
Beispiel #13
0
def task(ctx, config):
    """
    Run all cram tests from the specified urls on the specified
    clients. Each client runs tests in parallel.

    Limitations:
    Tests must have a .t suffix. Tests with duplicate names will
    overwrite each other, so only the last one will run.

    For example::

        tasks:
        - ceph:
        - cram:
            clients:
              client.0:
              - http://ceph.com/qa/test.t
              - http://ceph.com/qa/test2.t]
              client.1: [http://ceph.com/qa/test.t]
            branch: foo

    You can also run a list of cram tests on all clients::

        tasks:
        - ceph:
        - cram:
            clients:
              all: [http://ceph.com/qa/test.t]

    :param ctx: Context
    :param config: Configuration
    """
    assert isinstance(config, dict)
    assert 'clients' in config and isinstance(config['clients'], dict), \
           'configuration must contain a dictionary of clients'

    clients = teuthology.replace_all_with_clients(ctx.cluster,
                                                  config['clients'])
    testdir = teuthology.get_testdir(ctx)

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('workunit', {}))

    refspec = config.get('branch')
    if refspec is None:
        refspec = config.get('tag')
    if refspec is None:
        refspec = config.get('sha1')
    if refspec is None:
        refspec = 'HEAD'

    # hack: the git_url is always ceph-ci or ceph
    git_url = teuth_config.get_ceph_git_url()
    repo_name = 'ceph.git'
    if git_url.count('ceph-ci'):
        repo_name = 'ceph-ci.git'

    try:
        for client, tests in clients.iteritems():
            (remote, ) = ctx.cluster.only(client).remotes.iterkeys()
            client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir,
                                                             role=client)
            remote.run(args=[
                'mkdir',
                '--',
                client_dir,
                run.Raw('&&'),
                'virtualenv',
                '{tdir}/virtualenv'.format(tdir=testdir),
                run.Raw('&&'),
                '{tdir}/virtualenv/bin/pip'.format(tdir=testdir),
                'install',
                'cram==0.6',
            ], )
            for test in tests:
                url = test.format(repo=repo_name, branch=refspec)
                log.info('fetching test %s for %s', url, client)
                assert test.endswith('.t'), 'tests must end in .t'
                remote.run(args=[
                    'wget',
                    '-nc',
                    '-nv',
                    '-P',
                    client_dir,
                    '--',
                    url,
                ], )

        with parallel() as p:
            for role in clients.iterkeys():
                p.spawn(_run_tests, ctx, role)
    finally:
        for client, tests in clients.iteritems():
            (remote, ) = ctx.cluster.only(client).remotes.iterkeys()
            client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir,
                                                             role=client)
            test_files = set([test.rsplit('/', 1)[1] for test in tests])

            # remove test files unless they failed
            for test_file in test_files:
                abs_file = os.path.join(client_dir, test_file)
                remote.run(args=[
                    'test',
                    '-f',
                    abs_file + '.err',
                    run.Raw('||'),
                    'rm',
                    '-f',
                    '--',
                    abs_file,
                ], )

            # ignore failure since more than one client may
            # be run on a host, and the client dir should be
            # non-empty if the test failed
            remote.run(args=[
                'rm',
                '-rf',
                '--',
                '{tdir}/virtualenv'.format(tdir=testdir),
                run.Raw(';'),
                'rmdir',
                '--ignore-fail-on-non-empty',
                client_dir,
            ], )
Beispiel #14
0
def task(ctx, config):
    """
    Run a test inside of QEMU on top of rbd. Only one test
    is supported per client.

    For example, you can specify which clients to run on::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
            client.1:
              test: http://ceph.com/qa/test2.sh

    Or use the same settings on all clients:

        tasks:
        - ceph:
        - qemu:
            all:
              test: http://ceph.com/qa/test.sh

    For tests that don't need a filesystem, set type to block::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
              type: block

    The test should be configured to run on /dev/vdb and later
    devices.

    If you want to run a test that uses more than one rbd image,
    specify how many images to use::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
              type: block
              num_rbd: 2

    You can set the amount of memory the VM has (default is 1024 MB)::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
              memory: 512 # megabytes
    """
    assert isinstance(config, dict), \
           "task qemu only supports a dictionary for configuration"

    config = teuthology.replace_all_with_clients(ctx.cluster, config)

    managers = []
    for client, client_config in config.iteritems():
        num_rbd = client_config.get('num_rbd', 1)
        assert num_rbd > 0, 'at least one rbd device must be used'
        for i in xrange(num_rbd):
            create_config = {
                client: {
                    'image_name':
                    '{client}.{num}'.format(client=client, num=i),
                    }
                }
            managers.append(
                lambda create_config=create_config:
                rbd.create_image(ctx=ctx, config=create_config)
                )

    managers.extend([
        lambda: create_dirs(ctx=ctx, config=config),
        lambda: generate_iso(ctx=ctx, config=config),
        lambda: download_image(ctx=ctx, config=config),
        lambda: run_qemu(ctx=ctx, config=config),
        ])

    with contextutil.nested(*managers):
        yield
Beispiel #15
0
def task(ctx, config):
    """
    Run a test inside of QEMU on top of rbd. Only one test
    is supported per client.

    For example, you can specify which clients to run on::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://download.ceph.com/qa/test.sh
            client.1:
              test: http://download.ceph.com/qa/test2.sh

    Or use the same settings on all clients:

        tasks:
        - ceph:
        - qemu:
            all:
              test: http://download.ceph.com/qa/test.sh

    For tests that don't need a filesystem, set type to block::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://download.ceph.com/qa/test.sh
              type: block

    The test should be configured to run on /dev/vdb and later
    devices.

    If you want to run a test that uses more than one rbd image,
    specify how many images to use::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://download.ceph.com/qa/test.sh
              type: block
              disks: 2

    - or -

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
              type: block
              disks:
                - image_size: 1024
                - image_size: 2048

    You can set the amount of CPUs and memory the VM has (default is 1 CPU and
    4096 MB)::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://download.ceph.com/qa/test.sh
              cpus: 4
              memory: 512 # megabytes

    If you want to run a test against a cloned rbd image, set clone to true::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://download.ceph.com/qa/test.sh
              clone: true

    If you need to configure additional cloud-config options, set cloud_config
    to the required data set::

        tasks:
        - ceph
        - qemu:
            client.0:
                test: http://ceph.com/qa/test.sh
                cloud_config_archive:
                    - |
                      #/bin/bash
                      touch foo1
                    - content: |
                        test data
                      type: text/plain
                      filename: /tmp/data

    If you need to override the default cloud image, set image_url:

        tasks:
        - ceph
        - qemu:
            client.0:
                test: http://ceph.com/qa/test.sh
                image_url: https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img
    """
    assert isinstance(config, dict), \
           "task qemu only supports a dictionary for configuration"

    config = teuthology.replace_all_with_clients(ctx.cluster, config)

    managers = []
    create_images(ctx=ctx, config=config, managers=managers)
    managers.extend([
        lambda: create_dirs(ctx=ctx, config=config),
        lambda: generate_iso(ctx=ctx, config=config),
        lambda: download_image(ctx=ctx, config=config),
        ])
    create_clones(ctx=ctx, config=config, managers=managers)
    managers.append(
        lambda: run_qemu(ctx=ctx, config=config),
        )

    with contextutil.nested(*managers):
        yield
Beispiel #16
0
def task(ctx, config):
    """
    Run all cram tests from the specified urls on the specified
    clients. Each client runs tests in parallel.

    Limitations:
    Tests must have a .t suffix. Tests with duplicate names will
    overwrite each other, so only the last one will run.

    For example::

        tasks:
        - ceph:
        - cram:
            clients:
              client.0:
              - http://ceph.com/qa/test.t
              - http://ceph.com/qa/test2.t]
              client.1: [http://ceph.com/qa/test.t]

    You can also run a list of cram tests on all clients::

        tasks:
        - ceph:
        - cram:
            clients:
              all: [http://ceph.com/qa/test.t]
    """
    assert isinstance(config, dict)
    assert 'clients' in config and isinstance(config['clients'], dict), \
           'configuration must contain a dictionary of clients'

    clients = teuthology.replace_all_with_clients(ctx.cluster,
                                                  config['clients'])
    testdir = teuthology.get_testdir(ctx)

    try:
        for client, tests in clients.iteritems():
            (remote, ) = ctx.cluster.only(client).remotes.iterkeys()
            client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir,
                                                             role=client)
            remote.run(args=[
                'mkdir',
                '--',
                client_dir,
                run.Raw('&&'),
                'virtualenv',
                '{tdir}/virtualenv'.format(tdir=testdir),
                run.Raw('&&'),
                '{tdir}/virtualenv/bin/pip'.format(tdir=testdir),
                'install',
                'cram',
            ], )
            for test in tests:
                log.info('fetching test %s for %s', test, client)
                assert test.endswith('.t'), 'tests must end in .t'
                remote.run(args=[
                    'wget',
                    '-nc',
                    '-nv',
                    '-P',
                    client_dir,
                    '--',
                    test,
                ], )

        with parallel() as p:
            for role in clients.iterkeys():
                p.spawn(_run_tests, ctx, role)
    finally:
        for client, tests in clients.iteritems():
            (remote, ) = ctx.cluster.only(client).remotes.iterkeys()
            client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir,
                                                             role=client)
            test_files = set([test.rsplit('/', 1)[1] for test in tests])

            # remove test files unless they failed
            for test_file in test_files:
                abs_file = os.path.join(client_dir, test_file)
                remote.run(args=[
                    'test',
                    '-f',
                    abs_file + '.err',
                    run.Raw('||'),
                    'rm',
                    '-f',
                    '--',
                    abs_file,
                ], )

            # ignore failure since more than one client may
            # be run on a host, and the client dir should be
            # non-empty if the test failed
            remote.run(args=[
                'rm',
                '-rf',
                '--',
                '{tdir}/virtualenv'.format(tdir=testdir),
                run.Raw(';'),
                'rmdir',
                '--ignore-fail-on-non-empty',
                client_dir,
            ], )
Beispiel #17
0
def task(ctx, config):
    """
    Run a test inside of QEMU on top of rbd. Only one test
    is supported per client.

    For example, you can specify which clients to run on::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
            client.1:
              test: http://ceph.com/qa/test2.sh

    Or use the same settings on all clients:

        tasks:
        - ceph:
        - qemu:
            all:
              test: http://ceph.com/qa/test.sh

    For tests that don't need a filesystem, set type to block::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
              type: block

    The test should be configured to run on /dev/vdb and later
    devices.

    If you want to run a test that uses more than one rbd image,
    specify how many images to use::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
              type: block
              num_rbd: 2

    You can set the amount of memory the VM has (default is 1024 MB)::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
              memory: 512 # megabytes

    If you want to run a test against a cloned rbd image, set clone to true::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
              clone: true
    """
    assert isinstance(config, dict), \
           "task qemu only supports a dictionary for configuration"

    config = teuthology.replace_all_with_clients(ctx.cluster, config)

    managers = []
    create_images(ctx=ctx, config=config, managers=managers)
    managers.extend([
        lambda: create_dirs(ctx=ctx, config=config),
        lambda: generate_iso(ctx=ctx, config=config),
        lambda: download_image(ctx=ctx, config=config),
        ])
    create_clones(ctx=ctx, config=config, managers=managers)
    managers.append(
        lambda: run_qemu(ctx=ctx, config=config),
        )

    with contextutil.nested(*managers):
        yield
Beispiel #18
0
Datei: cram.py Projekt: zxgm/ceph
def task(ctx, config):
    """
    Run all cram tests from the specified paths on the specified
    clients. Each client runs tests in parallel.

    Limitations:
    Tests must have a .t suffix. Tests with duplicate names will
    overwrite each other, so only the last one will run.

    For example::

        tasks:
        - ceph:
        - cram:
            clients:
              client.0:
              - qa/test.t
              - qa/test2.t]
              client.1: [qa/test.t]
            branch: foo

    You can also run a list of cram tests on all clients::

        tasks:
        - ceph:
        - cram:
            clients:
              all: [qa/test.t]

    :param ctx: Context
    :param config: Configuration
    """
    assert isinstance(config, dict)
    assert 'clients' in config and isinstance(config['clients'], dict), \
           'configuration must contain a dictionary of clients'

    clients = teuthology.replace_all_with_clients(ctx.cluster,
                                                  config['clients'])
    testdir = teuthology.get_testdir(ctx)

    overrides = ctx.config.get('overrides', {})
    refspec = get_refspec_after_overrides(config, overrides)

    git_url = teuth_config.get_ceph_qa_suite_git_url()
    log.info('Pulling tests from %s ref %s', git_url, refspec)

    try:
        for client, tests in clients.iteritems():
            (remote, ) = ctx.cluster.only(client).remotes.keys()
            client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir,
                                                             role=client)
            remote.run(args=[
                'mkdir',
                '--',
                client_dir,
                run.Raw('&&'),
                'virtualenv',
                '{tdir}/virtualenv'.format(tdir=testdir),
                run.Raw('&&'),
                '{tdir}/virtualenv/bin/pip'.format(tdir=testdir),
                'install',
                'cram==0.6',
            ], )
            clone_dir = '{tdir}/clone.{role}'.format(tdir=testdir, role=client)
            remote.run(args=refspec.clone(git_url, clone_dir))

            for test in tests:
                assert test.endswith('.t'), 'tests must end in .t'
                remote.run(args=[
                    'cp',
                    '--',
                    os.path.join(clone_dir, test),
                    client_dir,
                ], )

        with parallel() as p:
            for role in clients.keys():
                p.spawn(_run_tests, ctx, role)
    finally:
        for client, tests in clients.iteritems():
            (remote, ) = ctx.cluster.only(client).remotes.keys()
            client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir,
                                                             role=client)
            test_files = set([test.rsplit('/', 1)[1] for test in tests])

            # remove test files unless they failed
            for test_file in test_files:
                abs_file = os.path.join(client_dir, test_file)
                remote.run(args=[
                    'test',
                    '-f',
                    abs_file + '.err',
                    run.Raw('||'),
                    'rm',
                    '-f',
                    '--',
                    abs_file,
                ], )

            # ignore failure since more than one client may
            # be run on a host, and the client dir should be
            # non-empty if the test failed
            remote.run(args=[
                'rm',
                '-rf',
                '--',
                '{tdir}/virtualenv'.format(tdir=testdir),
                clone_dir,
                run.Raw(';'),
                'rmdir',
                '--ignore-fail-on-non-empty',
                client_dir,
            ], )
Beispiel #19
0
def task(ctx, config):
    """
    Run a test inside of QEMU on top of rbd. Only one test
    is supported per client.

    For example, you can specify which clients to run on::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://download.ceph.com/qa/test.sh
            client.1:
              test: http://download.ceph.com/qa/test2.sh

    Or use the same settings on all clients:

        tasks:
        - ceph:
        - qemu:
            all:
              test: http://download.ceph.com/qa/test.sh

    For tests that don't need a filesystem, set type to block::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://download.ceph.com/qa/test.sh
              type: block

    The test should be configured to run on /dev/vdb and later
    devices.

    If you want to run a test that uses more than one rbd image,
    specify how many images to use::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://download.ceph.com/qa/test.sh
              type: block
              disks: 2

    - or -

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
              type: block
              disks:
                - image_size: 1024
                - image_size: 2048

    You can set the amount of CPUs and memory the VM has (default is 1 CPU and
    4096 MB)::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://download.ceph.com/qa/test.sh
              cpus: 4
              memory: 512 # megabytes

    If you want to run a test against a cloned rbd image, set clone to true::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://download.ceph.com/qa/test.sh
              clone: true

    If you need to configure additional cloud-config options, set cloud_config
    to the required data set::

        tasks:
        - ceph
        - qemu:
            client.0:
                test: http://ceph.com/qa/test.sh
                cloud_config_archive:
                    - |
                      #/bin/bash
                      touch foo1
                    - content: |
                        test data
                      type: text/plain
                      filename: /tmp/data

    If you need to override the default cloud image, set image_url:

        tasks:
        - ceph
        - qemu:
            client.0:
                test: http://ceph.com/qa/test.sh
                image_url: https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img
    """
    assert isinstance(config, dict), \
           "task qemu only supports a dictionary for configuration"

    config = teuthology.replace_all_with_clients(ctx.cluster, config)

    managers = []
    create_images(ctx=ctx, config=config, managers=managers)
    managers.extend([
        lambda: create_dirs(ctx=ctx, config=config),
        lambda: generate_iso(ctx=ctx, config=config),
        lambda: download_image(ctx=ctx, config=config),
        ])
    create_clones(ctx=ctx, config=config, managers=managers)
    managers.append(
        lambda: run_qemu(ctx=ctx, config=config),
        )

    with contextutil.nested(*managers):
        yield
Beispiel #20
0
def task(ctx, config):
    """
    Create and mount an rbd image.

    For example, you can specify which clients to run on::

        tasks:
        - ceph:
        - rbd: [client.0, client.1]

    There are a few image options::

        tasks:
        - ceph:
        - rbd:
            client.0: # uses defaults
            client.1:
                image_name: foo
                image_size: 2048
                image_format: 2
                fs_type: xfs

    To use default options on all clients::

        tasks:
        - ceph:
        - rbd:
            all:

    To create 20GiB images and format them with xfs on all clients::

        tasks:
        - ceph:
        - rbd:
            all:
              image_size: 20480
              fs_type: xfs
    """
    if config is None:
        config = { 'all': None }
    norm_config = config
    if isinstance(config, dict):
        norm_config = teuthology.replace_all_with_clients(ctx.cluster, config)
    if isinstance(norm_config, dict):
        role_images = {}
        for role, properties in norm_config.iteritems():
            if properties is None:
                properties = {}
            role_images[role] = properties.get('image_name')
    else:
        role_images = norm_config

    log.debug('rbd config is: %s', norm_config)

    with contextutil.nested(
        lambda: create_image(ctx=ctx, config=norm_config),
        lambda: modprobe(ctx=ctx, config=norm_config),
        lambda: dev_create(ctx=ctx, config=role_images),
        lambda: generic_mkfs(ctx=ctx, config=norm_config,
                devname_rtn=rbd_devname_rtn),
        lambda: generic_mount(ctx=ctx, config=role_images,
                devname_rtn=rbd_devname_rtn),
        ):
        yield
Beispiel #21
0
def task(ctx, config):
    """
    Run a test inside of QEMU on top of rbd. Only one test
    is supported per client.

    For example, you can specify which clients to run on::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://download.ceph.com/qa/test.sh
            client.1:
              test: http://download.ceph.com/qa/test2.sh

    Or use the same settings on all clients:

        tasks:
        - ceph:
        - qemu:
            all:
              test: http://download.ceph.com/qa/test.sh

    For tests that want to explicitly describe the RBD images to connect:

        tasks:
        - ceph:
        - qemu:
            client.0:
                test: http://download.ceph.com/qa/test.sh
                clone: True/False (optionally clone all created disks),
                image_url: <URL> (optional default image URL)
                type: filesystem / block (optional default device type)
                disks: [
                    {
                        action: create / clone / none (optional, defaults to create)
                        image_name: <image name> (optional)
                        parent_name: <parent_name> (if action == clone),
                        type: filesystem / block (optional, defaults to fileystem)
                        image_url: <URL> (optional),
                        image_size: <MiB> (optional)
                        encryption_format: luks1 / luks2 / none (optional, defaults to none)
                    }, ...
                ]

    You can set the amount of CPUs and memory the VM has (default is 1 CPU and
    4096 MB)::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://download.ceph.com/qa/test.sh
              cpus: 4
              memory: 512 # megabytes

    If you need to configure additional cloud-config options, set cloud_config
    to the required data set::

        tasks:
        - ceph
        - qemu:
            client.0:
                test: http://ceph.com/qa/test.sh
                cloud_config_archive:
                    - |
                      #/bin/bash
                      touch foo1
                    - content: |
                        test data
                      type: text/plain
                      filename: /tmp/data
    """
    assert isinstance(config, dict), \
           "task qemu only supports a dictionary for configuration"

    config = teuthology.replace_all_with_clients(ctx.cluster, config)
    normalize_disks(config)

    managers = []
    create_images(ctx=ctx, config=config, managers=managers)
    managers.extend([
        lambda: create_dirs(ctx=ctx, config=config),
        lambda: generate_iso(ctx=ctx, config=config),
        lambda: download_image(ctx=ctx, config=config),
    ])
    create_clones(ctx=ctx, config=config, managers=managers)
    create_encrypted_devices(ctx=ctx, config=config, managers=managers)
    managers.append(lambda: run_qemu(ctx=ctx, config=config), )

    with contextutil.nested(*managers):
        yield
Beispiel #22
0
def xfstests(ctx, config):
    """
    Run xfstests over rbd devices.  This interface sets up all
    required configuration automatically if not otherwise specified.
    Note that only one instance of xfstests can run on a single host
    at a time.  By default, the set of tests specified is run once.
    If a (non-zero) count value is supplied, the complete set of
    tests will be run that number of times.

    For example::

        tasks:
        - ceph:
        # Image sizes are in MB
        - rbd.xfstests:
            client.0:
                count: 3
                test_image: 'test_image'
                test_size: 250
                test_format: 2
                scratch_image: 'scratch_image'
                scratch_size: 250
                scratch_format: 1
                fs_type: 'xfs'
                tests: 'generic/100 xfs/003 xfs/005 xfs/006 generic/015'
                exclude:
                - generic/42
                randomize: true
                xfstests_branch: master
                xfstests_url: 'https://raw.github.com/ceph/branch/master/qa'
    """
    if config is None:
        config = { 'all': None }
    assert isinstance(config, dict) or isinstance(config, list), \
        "task xfstests only supports a list or dictionary for configuration"
    if isinstance(config, dict):
        config = teuthology.replace_all_with_clients(ctx.cluster, config)
        runs = config.items()
    else:
        runs = [(role, None) for role in config]

    running_xfstests = {}
    for role, properties in runs:
        assert role.startswith('client.'), \
            "task xfstests can only run on client nodes"
        for host, roles_for_host in ctx.cluster.remotes.items():
            if role in roles_for_host:
                assert host not in running_xfstests, \
                    "task xfstests allows only one instance at a time per host"
                running_xfstests[host] = True

    images_config = {}
    scratch_config = {}
    modprobe_config = {}
    image_map_config = {}
    scratch_map_config = {}
    xfstests_config = {}
    for role, properties in runs:
        if properties is None:
            properties = {}

        test_image = properties.get('test_image', 'test_image.{role}'.format(role=role))
        test_size = properties.get('test_size', 10000) # 10G
        test_fmt = properties.get('test_format', 1)
        scratch_image = properties.get('scratch_image', 'scratch_image.{role}'.format(role=role))
        scratch_size = properties.get('scratch_size', 10000) # 10G
        scratch_fmt = properties.get('scratch_format', 1)

        images_config[role] = dict(
            image_name=test_image,
            image_size=test_size,
            image_format=test_fmt,
            )

        scratch_config[role] = dict(
            image_name=scratch_image,
            image_size=scratch_size,
            image_format=scratch_fmt,
            )

        xfstests_branch = properties.get('xfstests_branch', 'master')
        xfstests_url = properties.get('xfstests_url', 'https://raw.github.com/ceph/ceph/{branch}/qa'.format(branch=xfstests_branch))

        xfstests_config[role] = dict(
            count=properties.get('count', 1),
            test_dev='/dev/rbd/rbd/{image}'.format(image=test_image),
            scratch_dev='/dev/rbd/rbd/{image}'.format(image=scratch_image),
            fs_type=properties.get('fs_type', 'xfs'),
            randomize=properties.get('randomize', False),
            tests=properties.get('tests'),
            exclude=properties.get('exclude', []),
            xfstests_url=xfstests_url,
            )

        log.info('Setting up xfstests using RBD images:')
        log.info('      test ({size} MB): {image}'.format(size=test_size,
                                                        image=test_image))
        log.info('   scratch ({size} MB): {image}'.format(size=scratch_size,
                                                        image=scratch_image))
        modprobe_config[role] = None
        image_map_config[role] = test_image
        scratch_map_config[role] = scratch_image

    with contextutil.nested(
        lambda: create_image(ctx=ctx, config=images_config),
        lambda: create_image(ctx=ctx, config=scratch_config),
        lambda: modprobe(ctx=ctx, config=modprobe_config),
        lambda: dev_create(ctx=ctx, config=image_map_config),
        lambda: dev_create(ctx=ctx, config=scratch_map_config),
        lambda: run_xfstests(ctx=ctx, config=xfstests_config),
        ):
        yield
Beispiel #23
0
def xfstests(ctx, config):
    """
    Run xfstests over rbd devices.  This interface sets up all
    required configuration automatically if not otherwise specified.
    Note that only one instance of xfstests can run on a single host
    at a time.

    For example::

        tasks:
        - ceph:
        # Image sizes are in MB
        - rbd.xfstests:
            client.0:
                test_image: 'test_image'
                test_size: 250
                scratch_image: 'scratch_image'
                scratch_size: 250
                fs_type: 'xfs'
                tests: '1-9 11-15 17 19-21 26-28 31-34 41 45-48'
    """
    if config is None:
        config = { 'all': None }
    assert isinstance(config, dict) or isinstance(config, list), \
        "task xfstests only supports a list or dictionary for configuration"
    if isinstance(config, dict):
        config = teuthology.replace_all_with_clients(ctx.cluster, config)
        runs = config.items()
    else:
        runs = [(role, None) for role in config]

    running_xfstests = {}
    for role, properties in runs:
        assert role.startswith('client.'), \
            "task xfstests can only run on client nodes"
        for host, roles_for_host in ctx.cluster.remotes.items():
            if role in roles_for_host:
                assert host not in running_xfstests, \
                    "task xfstests allows only one instance at a time per host"
                running_xfstests[host] = True

    for role, properties in runs:
        if properties is None:
            properties = {}

        test_image = properties.get('test_image', 'test_image')
        test_size = properties.get('test_size', 250)
        scratch_image = properties.get('scratch_image', 'scratch_image')
        scratch_size = properties.get('scratch_size', 250)

        test_image_config = {}
        test_image_config['image_name'] = test_image
        test_image_config['image_size'] = test_size

        scratch_image_config = {}
        scratch_image_config['image_name'] = scratch_image
        scratch_image_config['image_size'] = scratch_size


        test_config = {}
        test_config['test_dev'] = \
                '/dev/rbd/rbd/{image}'.format(image=test_image)
        test_config['scratch_dev'] = \
                '/dev/rbd/rbd/{image}'.format(image=scratch_image)
        test_config['fs_type'] = properties.get('fs_type', 'xfs')
        test_config['tests'] = properties.get('tests', None)

        log.info('Setting up xfstests using RBD images:')
        log.info('      test ({size} MB): {image}'.format(size=test_size,
                                                        image=test_image))
        log.info('   scratch ({size} MB): {image}'.format(size=scratch_size,
                                                        image=scratch_image))
        with contextutil.nested(
            lambda: create_image(ctx=ctx, \
                        config={ role: test_image_config }),
            lambda: create_image(ctx=ctx, \
                        config={ role: scratch_image_config }),
            lambda: modprobe(ctx=ctx, config={ role: None }),
            lambda: dev_create(ctx=ctx, config={ role: test_image }),
            lambda: dev_create(ctx=ctx, config={ role: scratch_image }),
            lambda: run_xfstests(ctx=ctx, config={ role: test_config }),
            ):
            yield