Exemplo n.º 1
0
Arquivo: cram.py Projeto: zxgm/ceph
def task(ctx, config):
    """
    Run all cram tests from the specified paths on the specified
    clients. Each client runs tests in parallel.

    Limitations:
    Tests must have a .t suffix. Tests with duplicate names will
    overwrite each other, so only the last one will run.

    For example::

        tasks:
        - ceph:
        - cram:
            clients:
              client.0:
              - qa/test.t
              - qa/test2.t]
              client.1: [qa/test.t]
            branch: foo

    You can also run a list of cram tests on all clients::

        tasks:
        - ceph:
        - cram:
            clients:
              all: [qa/test.t]

    :param ctx: Context
    :param config: Configuration
    """
    assert isinstance(config, dict)
    assert 'clients' in config and isinstance(config['clients'], dict), \
           'configuration must contain a dictionary of clients'

    clients = teuthology.replace_all_with_clients(ctx.cluster,
                                                  config['clients'])
    testdir = teuthology.get_testdir(ctx)

    overrides = ctx.config.get('overrides', {})
    refspec = get_refspec_after_overrides(config, overrides)

    git_url = teuth_config.get_ceph_qa_suite_git_url()
    log.info('Pulling tests from %s ref %s', git_url, refspec)

    try:
        for client, tests in clients.iteritems():
            (remote, ) = ctx.cluster.only(client).remotes.keys()
            client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir,
                                                             role=client)
            remote.run(args=[
                'mkdir',
                '--',
                client_dir,
                run.Raw('&&'),
                'virtualenv',
                '{tdir}/virtualenv'.format(tdir=testdir),
                run.Raw('&&'),
                '{tdir}/virtualenv/bin/pip'.format(tdir=testdir),
                'install',
                'cram==0.6',
            ], )
            clone_dir = '{tdir}/clone.{role}'.format(tdir=testdir, role=client)
            remote.run(args=refspec.clone(git_url, clone_dir))

            for test in tests:
                assert test.endswith('.t'), 'tests must end in .t'
                remote.run(args=[
                    'cp',
                    '--',
                    os.path.join(clone_dir, test),
                    client_dir,
                ], )

        with parallel() as p:
            for role in clients.keys():
                p.spawn(_run_tests, ctx, role)
    finally:
        for client, tests in clients.iteritems():
            (remote, ) = ctx.cluster.only(client).remotes.keys()
            client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir,
                                                             role=client)
            test_files = set([test.rsplit('/', 1)[1] for test in tests])

            # remove test files unless they failed
            for test_file in test_files:
                abs_file = os.path.join(client_dir, test_file)
                remote.run(args=[
                    'test',
                    '-f',
                    abs_file + '.err',
                    run.Raw('||'),
                    'rm',
                    '-f',
                    '--',
                    abs_file,
                ], )

            # ignore failure since more than one client may
            # be run on a host, and the client dir should be
            # non-empty if the test failed
            remote.run(args=[
                'rm',
                '-rf',
                '--',
                '{tdir}/virtualenv'.format(tdir=testdir),
                clone_dir,
                run.Raw(';'),
                'rmdir',
                '--ignore-fail-on-non-empty',
                client_dir,
            ], )
Exemplo n.º 2
0
def generate_iso(ctx, config):
    """Execute system commands to generate iso"""
    log.info('generating iso...')
    testdir = teuthology.get_testdir(ctx)

    # use ctx.config instead of config, because config has been
    # through teuthology.replace_all_with_clients()
    refspec = get_refspec_after_overrides(ctx.config, {})

    git_url = teuth_config.get_ceph_qa_suite_git_url()
    log.info('Pulling tests from %s ref %s', git_url, refspec)

    for client, client_config in config.iteritems():
        assert 'test' in client_config, 'You must specify a test to run'
        test = client_config['test']

        (remote,) = ctx.cluster.only(client).remotes.keys()

        clone_dir = '{tdir}/qemu_clone.{role}'.format(tdir=testdir, role=client)
        remote.run(args=refspec.clone(git_url, clone_dir))

        src_dir = os.path.dirname(__file__)
        userdata_path = os.path.join(testdir, 'qemu', 'userdata.' + client)
        metadata_path = os.path.join(testdir, 'qemu', 'metadata.' + client)

        with file(os.path.join(src_dir, 'userdata_setup.yaml'), 'rb') as f:
            test_setup = ''.join(f.readlines())
            # configuring the commands to setup the nfs mount
            mnt_dir = "/export/{client}".format(client=client)
            test_setup = test_setup.format(
                mnt_dir=mnt_dir
            )

        with file(os.path.join(src_dir, 'userdata_teardown.yaml'), 'rb') as f:
            test_teardown = ''.join(f.readlines())

        user_data = test_setup
        if client_config.get('type', 'filesystem') == 'filesystem':
            num_disks = client_config.get('disks', DEFAULT_NUM_DISKS)
            if isinstance(num_disks, list):
                num_disks = len(num_disks)
            for i in xrange(1, num_disks):
                dev_letter = chr(ord('a') + i)
                user_data += """
- |
  #!/bin/bash
  mkdir /mnt/test_{dev_letter}
  mkfs -t xfs /dev/vd{dev_letter}
  mount -t xfs /dev/vd{dev_letter} /mnt/test_{dev_letter}
""".format(dev_letter=dev_letter)

        user_data += """
- |
  #!/bin/bash
  test -d /etc/ceph || mkdir /etc/ceph
  cp /mnt/cdrom/ceph.* /etc/ceph/
"""

        cloud_config_archive = client_config.get('cloud_config_archive', [])
        if cloud_config_archive:
          user_data += yaml.safe_dump(cloud_config_archive, default_style='|',
                                      default_flow_style=False)

        # this may change later to pass the directories as args to the
        # script or something. xfstests needs that.
        user_data += """
- |
  #!/bin/bash
  test -d /mnt/test_b && cd /mnt/test_b
  /mnt/cdrom/test.sh > /mnt/log/test.log 2>&1 && touch /mnt/log/success
""" + test_teardown

        user_data = user_data.format(
            ceph_branch=ctx.config.get('branch'),
            ceph_sha1=ctx.config.get('sha1'))
        teuthology.write_file(remote, userdata_path, StringIO(user_data))

        with file(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f:
            teuthology.write_file(remote, metadata_path, f)

        test_file = '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client)

        log.info('fetching test %s for %s', test, client)
        remote.run(
            args=[
                'cp', '--', os.path.join(clone_dir, test), test_file,
                run.Raw('&&'),
                'chmod', '755', test_file,
                ],
            )
        remote.run(
            args=[
                'genisoimage', '-quiet', '-input-charset', 'utf-8',
                '-volid', 'cidata', '-joliet', '-rock',
                '-o', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
                '-graft-points',
                'user-data={userdata}'.format(userdata=userdata_path),
                'meta-data={metadata}'.format(metadata=metadata_path),
                'ceph.conf=/etc/ceph/ceph.conf',
                'ceph.keyring=/etc/ceph/ceph.keyring',
                'test.sh={file}'.format(file=test_file),
                ],
            )
    try:
        yield
    finally:
        for client in config.iterkeys():
            (remote,) = ctx.cluster.only(client).remotes.keys()
            remote.run(
                args=[
                    'rm', '-rf',
                    '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
                    os.path.join(testdir, 'qemu', 'userdata.' + client),
                    os.path.join(testdir, 'qemu', 'metadata.' + client),
                    '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client),
                    '{tdir}/qemu_clone.{client}'.format(tdir=testdir, client=client),
                    ],
                )
Exemplo n.º 3
0
def task(ctx, config):
    """
    Run ceph on all workunits found under the specified path.

    For example::

        tasks:
        - ceph:
        - ceph-fuse: [client.0]
        - workunit:
            clients:
              client.0: [direct_io, xattrs.sh]
              client.1: [snaps]
            branch: foo

    You can also run a list of workunits on all clients:
        tasks:
        - ceph:
        - ceph-fuse:
        - workunit:
            tag: v0.47
            clients:
              all: [direct_io, xattrs.sh, snaps]

    If you have an "all" section it will run all the workunits
    on each client simultaneously, AFTER running any workunits specified
    for individual clients. (This prevents unintended simultaneous runs.)

    To customize tests, you can specify environment variables as a dict. You
    can also specify a time limit for each work unit (defaults to 3h):

        tasks:
        - ceph:
        - ceph-fuse:
        - workunit:
            sha1: 9b28948635b17165d17c1cf83d4a870bd138ddf6
            clients:
              all: [snaps]
            env:
              FOO: bar
              BAZ: quux
            timeout: 3h

    This task supports roles that include a ceph cluster, e.g.::

        tasks:
        - ceph:
        - workunit:
            clients:
              backup.client.0: [foo]
              client.1: [bar] # cluster is implicitly 'ceph'

    You can also specify an alternative top-level dir to 'qa/workunits', like
    'qa/standalone', with::

        tasks:
        - install:
        - workunit:
            basedir: qa/standalone
            clients:
              client.0:
                - test-ceph-helpers.sh

    :param ctx: Context
    :param config: Configuration
    """
    assert isinstance(config, dict)
    assert isinstance(config.get('clients'), dict), \
        'configuration must contain a dictionary of clients'

    overrides = ctx.config.get('overrides', {})
    refspec = get_refspec_after_overrides(config, overrides)
    timeout = config.get('timeout', '3h')
    cleanup = config.get('cleanup', True)

    log.info('Pulling workunits from ref %s', refspec)

    created_mountpoint = {}

    if config.get('env') is not None:
        assert isinstance(config['env'], dict), 'env must be a dictionary'
    clients = config['clients']

    # Create scratch dirs for any non-all workunits
    log.info('Making a separate scratch dir for every client...')
    for role in clients.iterkeys():
        assert isinstance(role, basestring)
        if role == "all":
            continue

        assert 'client' in role
        created_mnt_dir = _make_scratch_dir(ctx, role, config.get('subdir'))
        created_mountpoint[role] = created_mnt_dir

    # Execute any non-all workunits
    log.info("timeout={}".format(timeout))
    log.info("cleanup={}".format(cleanup))
    with parallel() as p:
        for role, tests in clients.iteritems():
            if role != "all":
                p.spawn(_run_tests,
                        ctx,
                        refspec,
                        role,
                        tests,
                        config.get('env'),
                        basedir=config.get('basedir', 'qa/workunits'),
                        timeout=timeout,
                        cleanup=cleanup)

    if cleanup:
        # Clean up dirs from any non-all workunits
        for role, created in created_mountpoint.items():
            _delete_dir(ctx, role, created)

    # Execute any 'all' workunits
    if 'all' in clients:
        all_tasks = clients["all"]
        _spawn_on_all_clients(ctx,
                              refspec,
                              all_tasks,
                              config.get('env'),
                              config.get('basedir', 'qa/workunits'),
                              config.get('subdir'),
                              timeout=timeout,
                              cleanup=cleanup)
Exemplo n.º 4
0
def task(ctx, config):
    """
    Run all cram tests from the specified paths on the specified
    clients. Each client runs tests in parallel.

    Limitations:
    Tests must have a .t suffix. Tests with duplicate names will
    overwrite each other, so only the last one will run.

    For example::

        tasks:
        - ceph:
        - cram:
            clients:
              client.0:
              - qa/test.t
              - qa/test2.t]
              client.1: [qa/test.t]
            branch: foo

    You can also run a list of cram tests on all clients::

        tasks:
        - ceph:
        - cram:
            clients:
              all: [qa/test.t]

    :param ctx: Context
    :param config: Configuration
    """
    assert isinstance(config, dict)
    assert 'clients' in config and isinstance(config['clients'], dict), \
           'configuration must contain a dictionary of clients'

    clients = teuthology.replace_all_with_clients(ctx.cluster,
                                                  config['clients'])
    testdir = teuthology.get_testdir(ctx)

    overrides = ctx.config.get('overrides', {})
    refspec = get_refspec_after_overrides(config, overrides)

    git_url = teuth_config.get_ceph_qa_suite_git_url()
    log.info('Pulling tests from %s ref %s', git_url, refspec)

    try:
        for client, tests in clients.iteritems():
            (remote,) = ctx.cluster.only(client).remotes.iterkeys()
            client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
            remote.run(
                args=[
                    'mkdir', '--', client_dir,
                    run.Raw('&&'),
                    'virtualenv', '{tdir}/virtualenv'.format(tdir=testdir),
                    run.Raw('&&'),
                    '{tdir}/virtualenv/bin/pip'.format(tdir=testdir),
                    'install', 'cram==0.6',
                    ],
                )
            clone_dir = '{tdir}/clone.{role}'.format(tdir=testdir, role=client)
            remote.run(args=refspec.clone(git_url, clone_dir))

            for test in tests:
                assert test.endswith('.t'), 'tests must end in .t'
                remote.run(
                    args=[
                        'cp', '--', os.path.join(clone_dir, test), client_dir,
                        ],
                    )

        with parallel() as p:
            for role in clients.iterkeys():
                p.spawn(_run_tests, ctx, role)
    finally:
        for client, tests in clients.iteritems():
            (remote,) = ctx.cluster.only(client).remotes.iterkeys()
            client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
            test_files = set([test.rsplit('/', 1)[1] for test in tests])

            # remove test files unless they failed
            for test_file in test_files:
                abs_file = os.path.join(client_dir, test_file)
                remote.run(
                    args=[
                        'test', '-f', abs_file + '.err',
                        run.Raw('||'),
                        'rm', '-f', '--', abs_file,
                        ],
                    )

            # ignore failure since more than one client may
            # be run on a host, and the client dir should be
            # non-empty if the test failed
            remote.run(
                args=[
                    'rm', '-rf', '--',
                    '{tdir}/virtualenv'.format(tdir=testdir),
                    clone_dir,
                    run.Raw(';'),
                    'rmdir', '--ignore-fail-on-non-empty', client_dir,
                    ],
                )
Exemplo n.º 5
0
def task(ctx, config):
    """
    Run ceph on all workunits found under the specified path.

    For example::

        tasks:
        - ceph:
        - ceph-fuse: [client.0]
        - workunit:
            clients:
              client.0: [direct_io, xattrs.sh]
              client.1: [snaps]
            branch: foo

    You can also run a list of workunits on all clients:
        tasks:
        - ceph:
        - ceph-fuse:
        - workunit:
            tag: v0.47
            clients:
              all: [direct_io, xattrs.sh, snaps]

    If you have an "all" section it will run all the workunits
    on each client simultaneously, AFTER running any workunits specified
    for individual clients. (This prevents unintended simultaneous runs.)

    To customize tests, you can specify environment variables as a dict. You
    can also specify a time limit for each work unit (defaults to 3h):

        tasks:
        - ceph:
        - ceph-fuse:
        - workunit:
            sha1: 9b28948635b17165d17c1cf83d4a870bd138ddf6
            clients:
              all: [snaps]
            env:
              FOO: bar
              BAZ: quux
            timeout: 3h

    This task supports roles that include a ceph cluster, e.g.::

        tasks:
        - ceph:
        - workunit:
            clients:
              backup.client.0: [foo]
              client.1: [bar] # cluster is implicitly 'ceph'

    You can also specify an alternative top-level dir to 'qa/workunits', like
    'qa/standalone', with::

        tasks:
        - install:
        - workunit:
            basedir: qa/standalone
            clients:
              client.0:
                - test-ceph-helpers.sh

    :param ctx: Context
    :param config: Configuration
    """
    assert isinstance(config, dict)
    assert isinstance(config.get('clients'), dict), \
        'configuration must contain a dictionary of clients'

    overrides = ctx.config.get('overrides', {})
    refspec = get_refspec_after_overrides(config, overrides)
    timeout = config.get('timeout', '3h')
    cleanup = config.get('cleanup', True)

    log.info('Pulling workunits from ref %s', refspec)

    created_mountpoint = {}

    if config.get('env') is not None:
        assert isinstance(config['env'], dict), 'env must be a dictionary'
    clients = config['clients']

    # Create scratch dirs for any non-all workunits
    log.info('Making a separate scratch dir for every client...')
    for role in clients.iterkeys():
        assert isinstance(role, basestring)
        if role == "all":
            continue

        assert 'client' in role
        created_mnt_dir = _make_scratch_dir(ctx, role, config.get('subdir'))
        created_mountpoint[role] = created_mnt_dir

    # Execute any non-all workunits
    log.info("timeout={}".format(timeout))
    log.info("cleanup={}".format(cleanup))
    with parallel() as p:
        for role, tests in clients.iteritems():
            if role != "all":
                p.spawn(_run_tests, ctx, refspec, role, tests,
                        config.get('env'),
                        basedir=config.get('basedir','qa/workunits'),
                        timeout=timeout,cleanup=cleanup)

    if cleanup:
        # Clean up dirs from any non-all workunits
        for role, created in created_mountpoint.items():
            _delete_dir(ctx, role, created)

    # Execute any 'all' workunits
    if 'all' in clients:
        all_tasks = clients["all"]
        _spawn_on_all_clients(ctx, refspec, all_tasks, config.get('env'),
                              config.get('basedir', 'qa/workunits'),
                              config.get('subdir'), timeout=timeout,
                              cleanup=cleanup)
Exemplo n.º 6
0
Arquivo: qemu.py Projeto: anlaneg/ceph
def generate_iso(ctx, config):
    """Execute system commands to generate iso"""
    log.info('generating iso...')
    testdir = teuthology.get_testdir(ctx)

    # use ctx.config instead of config, because config has been
    # through teuthology.replace_all_with_clients()
    refspec = get_refspec_after_overrides(ctx.config, {})

    git_url = teuth_config.get_ceph_qa_suite_git_url()
    log.info('Pulling tests from %s ref %s', git_url, refspec)

    for client, client_config in config.iteritems():
        assert 'test' in client_config, 'You must specify a test to run'
        test = client_config['test']

        (remote,) = ctx.cluster.only(client).remotes.keys()

        clone_dir = '{tdir}/clone.{role}'.format(tdir=testdir, role=client)
        remote.run(args=refspec.clone(git_url, clone_dir))

        src_dir = os.path.dirname(__file__)
        userdata_path = os.path.join(testdir, 'qemu', 'userdata.' + client)
        metadata_path = os.path.join(testdir, 'qemu', 'metadata.' + client)

        with file(os.path.join(src_dir, 'userdata_setup.yaml'), 'rb') as f:
            test_setup = ''.join(f.readlines())
            # configuring the commands to setup the nfs mount
            mnt_dir = "/export/{client}".format(client=client)
            test_setup = test_setup.format(
                mnt_dir=mnt_dir
            )

        with file(os.path.join(src_dir, 'userdata_teardown.yaml'), 'rb') as f:
            test_teardown = ''.join(f.readlines())

        user_data = test_setup
        if client_config.get('type', 'filesystem') == 'filesystem':
            num_disks = client_config.get('disks', DEFAULT_NUM_DISKS)
            if isinstance(num_disks, list):
                num_disks = len(num_disks)
            for i in xrange(1, num_disks):
                dev_letter = chr(ord('a') + i)
                user_data += """
- |
  #!/bin/bash
  mkdir /mnt/test_{dev_letter}
  mkfs -t xfs /dev/vd{dev_letter}
  mount -t xfs /dev/vd{dev_letter} /mnt/test_{dev_letter}
""".format(dev_letter=dev_letter)

        user_data += """
- |
  #!/bin/bash
  test -d /etc/ceph || mkdir /etc/ceph
  cp /mnt/cdrom/ceph.* /etc/ceph/
"""

        cloud_config_archive = client_config.get('cloud_config_archive', [])
        if cloud_config_archive:
          user_data += yaml.safe_dump(cloud_config_archive, default_style='|',
                                      default_flow_style=False)

        # this may change later to pass the directories as args to the
        # script or something. xfstests needs that.
        user_data += """
- |
  #!/bin/bash
  test -d /mnt/test_b && cd /mnt/test_b
  /mnt/cdrom/test.sh > /mnt/log/test.log 2>&1 && touch /mnt/log/success
""" + test_teardown

        user_data = user_data.format(
            ceph_branch=ctx.config.get('branch'),
            ceph_sha1=ctx.config.get('sha1'))
        teuthology.write_file(remote, userdata_path, StringIO(user_data))

        with file(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f:
            teuthology.write_file(remote, metadata_path, f)

        test_file = '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client)

        log.info('fetching test %s for %s', test, client)
        remote.run(
            args=[
                'cp', '--', os.path.join(clone_dir, test), test_file,
                run.Raw('&&'),
                'chmod', '755', test_file,
                ],
            )
        remote.run(
            args=[
                'genisoimage', '-quiet', '-input-charset', 'utf-8',
                '-volid', 'cidata', '-joliet', '-rock',
                '-o', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
                '-graft-points',
                'user-data={userdata}'.format(userdata=userdata_path),
                'meta-data={metadata}'.format(metadata=metadata_path),
                'ceph.conf=/etc/ceph/ceph.conf',
                'ceph.keyring=/etc/ceph/ceph.keyring',
                'test.sh={file}'.format(file=test_file),
                ],
            )
    try:
        yield
    finally:
        for client in config.iterkeys():
            (remote,) = ctx.cluster.only(client).remotes.keys()
            remote.run(
                args=[
                    'rm', '-rf',
                    '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
                    os.path.join(testdir, 'qemu', 'userdata.' + client),
                    os.path.join(testdir, 'qemu', 'metadata.' + client),
                    '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client),
                    '{tdir}/clone.{client}'.format(tdir=testdir, client=client),
                    ],
                )