Example #1
0
def fetch_qa_suite(branch, lock=True):
    """
    Make sure ceph-qa-suite is checked out.

    :param branch: The branch to fetch
    :returns:      The destination path
    """
    return fetch_repo(config.get_ceph_qa_suite_git_url(), branch, lock=lock)
Example #2
0
def build_git_url(project, project_owner='ceph'):
    """
    Return the git URL to clone the project
    """
    if project == 'ceph-qa-suite':
        base = config.get_ceph_qa_suite_git_url()
    elif project == 'ceph':
        base = config.get_ceph_git_url()
    else:
        base = 'https://github.com/{project_owner}/{project}'
    url_templ = re.sub('\.git$', '', base)
    return url_templ.format(project_owner=project_owner, project=project)
Example #3
0
def fetch_qa_suite(branch, commit=None, lock=True):
    """
    Make sure ceph-qa-suite is checked out.

    :param branch: The branch to fetch
    :param commit: The sha1 to checkout. Defaults to None, which uses HEAD of the branch.
    :returns:      The destination path
    """
    return fetch_repo(config.get_ceph_qa_suite_git_url(),
                      branch,
                      commit,
                      lock=lock)
Example #4
0
    def setup(self):
        super(RGWMultisiteTests, self).setup()

        overrides = self.ctx.config.get('overrides', {})
        misc.deep_merge(self.config, overrides.get('rgw-multisite-tests', {}))

        if not self.ctx.rgw_multisite:
            raise ConfigError(
                'rgw-multisite-tests must run after the rgw-multisite task')
        realm = self.ctx.rgw_multisite.realm
        master_zone = realm.meta_master_zone()

        branch = self.config.get('branch')
        if not branch:
            # run from suite_path
            suite_path = self.ctx.config.get('suite_path')
            self.module_path = suite_path + '/../src/test/rgw/rgw_multi'
        else:
            # clone the qa branch
            repo = self.config.get('repo',
                                   teuth_config.get_ceph_qa_suite_git_url())
            log.info("cloning suite branch %s from %s...", branch, repo)
            clonedir = fetch_repo(repo, branch)
            # import its version of rgw_multi
            self.module_path = clonedir + '/src/test/rgw/rgw_multi'

        log.info("importing tests from %s", self.module_path)
        spec = importlib.util.spec_from_file_location(
            'rgw_multi', self.module_path + '/__init__.py')
        module = importlib.util.module_from_spec(spec)
        sys.modules[spec.name] = module
        spec.loader.exec_module(module)

        from rgw_multi import multisite, tests

        # create the test user
        log.info('creating test user..')
        user = multisite.User('rgw-multisite-test-user')
        user.create(master_zone, [
            '--display-name', 'Multisite Test User', '--gen-access-key',
            '--gen-secret', '--caps', 'roles=*'
        ])

        config = self.config.get('config', {})
        tests.init_multi(realm, user, tests.Config(**config))
        tests.realm_meta_checkpoint(realm)
Example #5
0
    def create_initial_config(self):
        """
        Put together the config file used as the basis for each job in the run.
        Grabs hashes for the latest ceph, kernel and teuthology versions in the
        branches specified and specifies them so we know exactly what we're
        testing.

        :returns: A JobConfig object
        """
        self.kernel_dict = self.choose_kernel()
        ceph_hash = self.choose_ceph_hash()
        # We don't store ceph_version because we don't use it yet outside of
        # logging.
        self.choose_ceph_version(ceph_hash)
        suite_branch = self.choose_suite_branch()
        suite_hash = self.choose_suite_hash(suite_branch)
        if self.args.suite_dir:
            self.suite_repo_path = self.args.suite_dir
        else:
            self.suite_repo_path = util.fetch_repos(suite_branch,
                                                    test_name=self.name)
        teuthology_branch, teuthology_sha1 = self.choose_teuthology_branch()

        if self.args.distro_version:
            self.args.distro_version, _ = \
                OS.version_codename(self.args.distro, self.args.distro_version)
        self.config_input = dict(
            suite=self.args.suite,
            suite_branch=suite_branch,
            suite_hash=suite_hash,
            ceph_branch=self.args.ceph_branch,
            ceph_hash=ceph_hash,
            ceph_repo=config.get_ceph_git_url(),
            teuthology_branch=teuthology_branch,
            teuthology_sha1=teuthology_sha1,
            machine_type=self.args.machine_type,
            distro=self.args.distro,
            distro_version=self.args.distro_version,
            archive_upload=config.archive_upload,
            archive_upload_key=config.archive_upload_key,
            suite_repo=config.get_ceph_qa_suite_git_url(),
            suite_relpath=self.args.suite_relpath,
            flavor=self.args.flavor,
        )
        return self.build_base_config()
Example #6
0
def process_args(args):
    conf = YamlConfig()
    rename_args = {
        'ceph': 'ceph_branch',
        'sha1': 'ceph_sha1',
        'kernel': 'kernel_branch',
        # FIXME: ceph flavor and kernel flavor are separate things
        'flavor': 'kernel_flavor',
        '<config_yaml>': 'base_yaml_paths',
        'filter': 'filter_in',
    }
    for (key, value) in args.items():
        # Translate --foo-bar to foo_bar
        key = key.lstrip('--').replace('-', '_')
        # Rename the key if necessary
        key = rename_args.get(key) or key
        if key == 'suite_branch':
            value = value or override_arg_defaults('--suite-branch', None)
        if key == 'suite' and value is not None:
            value = normalize_suite_name(value)
        if key == 'suite_relpath' and value is None:
            value = ''
        elif key in ('limit', 'priority', 'num', 'newest', 'seed', 'job_threshold'):
            value = int(value)
        elif key == 'subset' and value is not None:
            # take input string '2/3' and turn into (2, 3)
            value = tuple(map(int, value.split('/')))
        elif key in ('filter_all', 'filter_in', 'filter_out', 'rerun_statuses'):
            if not value:
                value = []
            else:
                value = [x.strip() for x in value.split(',')]
        elif key == 'ceph_repo':
            value = expand_short_repo_name(
                value,
                config.get_ceph_git_url())
        elif key == 'suite_repo':
            value = expand_short_repo_name(
                value,
                config.get_ceph_qa_suite_git_url())
        elif key in ('validate_sha1', 'filter_fragments'):
            value = strtobool(value)
        conf[key] = value
    return conf
Example #7
0
def _run_tests(ctx,
               refspec,
               role,
               tests,
               env,
               basedir,
               subdir=None,
               timeout=None,
               cleanup=True,
               coverage_and_limits=True):
    """
    Run the individual test. Create a scratch directory and then extract the
    workunits from git. Make the executables, and then run the tests.
    Clean up (remove files created) after the tests are finished.

    :param ctx:     Context
    :param refspec: branch, sha1, or version tag used to identify this
                    build
    :param tests:   specific tests specified.
    :param env:     environment set in yaml file.  Could be None.
    :param subdir:  subdirectory set in yaml file.  Could be None
    :param timeout: If present, use the 'timeout' command on the remote host
                    to limit execution time. Must be specified by a number
                    followed by 's' for seconds, 'm' for minutes, 'h' for
                    hours, or 'd' for days. If '0' or anything that evaluates
                    to False is passed, the 'timeout' command is not used.
    """
    testdir = misc.get_testdir(ctx)
    assert isinstance(role, str)
    cluster, type_, id_ = misc.split_role(role)
    assert type_ == 'client'
    remote = get_remote_for_role(ctx, role)
    mnt = _client_mountpoint(ctx, cluster, id_)
    # subdir so we can remove and recreate this a lot without sudo
    if subdir is None:
        scratch_tmp = os.path.join(mnt, 'client.{id}'.format(id=id_), 'tmp')
    else:
        scratch_tmp = os.path.join(mnt, subdir)
    clonedir = '{tdir}/clone.{role}'.format(tdir=testdir, role=role)
    srcdir = '{cdir}/{basedir}'.format(cdir=clonedir, basedir=basedir)

    git_url = teuth_config.get_ceph_qa_suite_git_url()
    # if we are running an upgrade test, and ceph-ci does not have branches like
    # `jewel`, so should use ceph.git as an alternative.
    try:
        remote.run(logger=log.getChild(role),
                   args=refspec.clone(git_url, clonedir))
    except CommandFailedError:
        if git_url.endswith('/ceph-ci.git'):
            alt_git_url = git_url.replace('/ceph-ci.git', '/ceph.git')
        elif git_url.endswith('/ceph-ci'):
            alt_git_url = re.sub(r'/ceph-ci$', '/ceph.git', git_url)
        else:
            raise
        log.info(
            "failed to check out '%s' from %s; will also try in %s",
            refspec,
            git_url,
            alt_git_url,
        )
        remote.run(logger=log.getChild(role),
                   args=refspec.clone(alt_git_url, clonedir))
    remote.run(
        logger=log.getChild(role),
        args=[
            'cd',
            '--',
            srcdir,
            run.Raw('&&'),
            'if',
            'test',
            '-e',
            'Makefile',
            run.Raw(';'),
            'then',
            'make',
            run.Raw(';'),
            'fi',
            run.Raw('&&'),
            'find',
            '-executable',
            '-type',
            'f',
            '-printf',
            r'%P\0',
            run.Raw('>{tdir}/workunits.list.{role}'.format(tdir=testdir,
                                                           role=role)),
        ],
    )

    workunits_file = '{tdir}/workunits.list.{role}'.format(tdir=testdir,
                                                           role=role)
    workunits = sorted(
        misc.get_file(remote, workunits_file).decode().split('\0'))
    assert workunits

    try:
        assert isinstance(tests, list)
        for spec in tests:
            dir_or_fname, *optional_args = shlex.split(spec)
            log.info('Running workunits matching %s on %s...', dir_or_fname,
                     role)
            # match executables named "foo" or "foo/*" with workunit named
            # "foo"
            to_run = [
                w for w in workunits
                if os.path.commonpath([w, dir_or_fname]) == dir_or_fname
            ]
            if not to_run:
                raise RuntimeError(
                    'Spec did not match any workunits: {spec!r}'.format(
                        spec=spec))
            for workunit in to_run:
                log.info('Running workunit %s...', workunit)
                args = [
                    'mkdir',
                    '-p',
                    '--',
                    scratch_tmp,
                    run.Raw('&&'),
                    'cd',
                    '--',
                    scratch_tmp,
                    run.Raw('&&'),
                    run.Raw('CEPH_CLI_TEST_DUP_COMMAND=1'),
                    run.Raw('CEPH_REF={ref}'.format(ref=refspec)),
                    run.Raw('TESTDIR="{tdir}"'.format(tdir=testdir)),
                    run.Raw('CEPH_ARGS="--cluster {0}"'.format(cluster)),
                    run.Raw('CEPH_ID="{id}"'.format(id=id_)),
                    run.Raw('PATH=$PATH:/usr/sbin'),
                    run.Raw('CEPH_BASE={dir}'.format(dir=clonedir)),
                    run.Raw('CEPH_ROOT={dir}'.format(dir=clonedir)),
                ]
                if env is not None:
                    for var, val in env.items():
                        quoted_val = pipes.quote(val)
                        env_arg = '{var}={val}'.format(var=var, val=quoted_val)
                        args.append(run.Raw(env_arg))
                if coverage_and_limits:
                    args.extend([
                        'adjust-ulimits', 'ceph-coverage',
                        '{tdir}/archive/coverage'.format(tdir=testdir)
                    ])
                if timeout and timeout != '0':
                    args.extend(['timeout', timeout])
                args.extend([
                    '{srcdir}/{workunit}'.format(
                        srcdir=srcdir,
                        workunit=workunit,
                    ),
                ])
                remote.run(
                    logger=log.getChild(role),
                    args=args + optional_args,
                    label="workunit test {workunit}".format(workunit=workunit))
                if cleanup:
                    args = ['sudo', 'rm', '-rf', '--', scratch_tmp]
                    remote.run(logger=log.getChild(role),
                               args=args,
                               timeout=(60 * 60))
    finally:
        log.info('Stopping %s on %s...', tests, role)
        args = ['sudo', 'rm', '-rf', '--', workunits_file, clonedir]
        # N.B. don't cleanup scratch_tmp! If the mount is broken then rm will hang.
        remote.run(
            logger=log.getChild(role),
            args=args,
        )
Example #8
0
def generate_iso(ctx, config):
    """Execute system commands to generate iso"""
    log.info('generating iso...')
    testdir = teuthology.get_testdir(ctx)

    # use ctx.config instead of config, because config has been
    # through teuthology.replace_all_with_clients()
    refspec = get_refspec_after_overrides(ctx.config, {})

    git_url = teuth_config.get_ceph_qa_suite_git_url()
    log.info('Pulling tests from %s ref %s', git_url, refspec)

    for client, client_config in config.iteritems():
        assert 'test' in client_config, 'You must specify a test to run'
        test = client_config['test']

        (remote,) = ctx.cluster.only(client).remotes.keys()

        clone_dir = '{tdir}/qemu_clone.{role}'.format(tdir=testdir, role=client)
        remote.run(args=refspec.clone(git_url, clone_dir))

        src_dir = os.path.dirname(__file__)
        userdata_path = os.path.join(testdir, 'qemu', 'userdata.' + client)
        metadata_path = os.path.join(testdir, 'qemu', 'metadata.' + client)

        with file(os.path.join(src_dir, 'userdata_setup.yaml'), 'rb') as f:
            test_setup = ''.join(f.readlines())
            # configuring the commands to setup the nfs mount
            mnt_dir = "/export/{client}".format(client=client)
            test_setup = test_setup.format(
                mnt_dir=mnt_dir
            )

        with file(os.path.join(src_dir, 'userdata_teardown.yaml'), 'rb') as f:
            test_teardown = ''.join(f.readlines())

        user_data = test_setup
        if client_config.get('type', 'filesystem') == 'filesystem':
            num_disks = client_config.get('disks', DEFAULT_NUM_DISKS)
            if isinstance(num_disks, list):
                num_disks = len(num_disks)
            for i in xrange(1, num_disks):
                dev_letter = chr(ord('a') + i)
                user_data += """
- |
  #!/bin/bash
  mkdir /mnt/test_{dev_letter}
  mkfs -t xfs /dev/vd{dev_letter}
  mount -t xfs /dev/vd{dev_letter} /mnt/test_{dev_letter}
""".format(dev_letter=dev_letter)

        user_data += """
- |
  #!/bin/bash
  test -d /etc/ceph || mkdir /etc/ceph
  cp /mnt/cdrom/ceph.* /etc/ceph/
"""

        cloud_config_archive = client_config.get('cloud_config_archive', [])
        if cloud_config_archive:
          user_data += yaml.safe_dump(cloud_config_archive, default_style='|',
                                      default_flow_style=False)

        # this may change later to pass the directories as args to the
        # script or something. xfstests needs that.
        user_data += """
- |
  #!/bin/bash
  test -d /mnt/test_b && cd /mnt/test_b
  /mnt/cdrom/test.sh > /mnt/log/test.log 2>&1 && touch /mnt/log/success
""" + test_teardown

        user_data = user_data.format(
            ceph_branch=ctx.config.get('branch'),
            ceph_sha1=ctx.config.get('sha1'))
        teuthology.write_file(remote, userdata_path, StringIO(user_data))

        with file(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f:
            teuthology.write_file(remote, metadata_path, f)

        test_file = '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client)

        log.info('fetching test %s for %s', test, client)
        remote.run(
            args=[
                'cp', '--', os.path.join(clone_dir, test), test_file,
                run.Raw('&&'),
                'chmod', '755', test_file,
                ],
            )
        remote.run(
            args=[
                'genisoimage', '-quiet', '-input-charset', 'utf-8',
                '-volid', 'cidata', '-joliet', '-rock',
                '-o', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
                '-graft-points',
                'user-data={userdata}'.format(userdata=userdata_path),
                'meta-data={metadata}'.format(metadata=metadata_path),
                'ceph.conf=/etc/ceph/ceph.conf',
                'ceph.keyring=/etc/ceph/ceph.keyring',
                'test.sh={file}'.format(file=test_file),
                ],
            )
    try:
        yield
    finally:
        for client in config.iterkeys():
            (remote,) = ctx.cluster.only(client).remotes.keys()
            remote.run(
                args=[
                    'rm', '-rf',
                    '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
                    os.path.join(testdir, 'qemu', 'userdata.' + client),
                    os.path.join(testdir, 'qemu', 'metadata.' + client),
                    '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client),
                    '{tdir}/qemu_clone.{client}'.format(tdir=testdir, client=client),
                    ],
                )
Example #9
0
def task(ctx, config):
    """
    Run all cram tests from the specified urls on the specified
    clients. Each client runs tests in parallel.

    Limitations:
    Tests must have a .t suffix. Tests with duplicate names will
    overwrite each other, so only the last one will run.

    For example::

        tasks:
        - ceph:
        - cram:
            clients:
              client.0:
              - http://download.ceph.com/qa/test.t
              - http://download.ceph.com/qa/test2.t]
              client.1: [http://download.ceph.com/qa/test.t]
            branch: foo

    You can also run a list of cram tests on all clients::

        tasks:
        - ceph:
        - cram:
            clients:
              all: [http://download.ceph.com/qa/test.t]

    :param ctx: Context
    :param config: Configuration
    """
    assert isinstance(config, dict)
    assert 'clients' in config and isinstance(config['clients'], dict), \
           'configuration must contain a dictionary of clients'

    clients = teuthology.replace_all_with_clients(ctx.cluster,
                                                  config['clients'])
    testdir = teuthology.get_testdir(ctx)

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('workunit', {}))

    refspec = config.get('branch')
    if refspec is None:
        refspec = config.get('tag')
    if refspec is None:
        refspec = config.get('sha1')
    if refspec is None:
        refspec = 'HEAD'

    git_url = teuth_config.get_ceph_qa_suite_git_url()
    if git_url.endswith('.git'):
        git_url = git_url[:-4]

    try:
        for client, tests in clients.iteritems():
            (remote, ) = ctx.cluster.only(client).remotes.iterkeys()
            client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir,
                                                             role=client)
            remote.run(args=[
                'mkdir',
                '--',
                client_dir,
                run.Raw('&&'),
                'virtualenv',
                '{tdir}/virtualenv'.format(tdir=testdir),
                run.Raw('&&'),
                '{tdir}/virtualenv/bin/pip'.format(tdir=testdir),
                'install',
                'cram==0.6',
            ], )
            for test in tests:
                url = test.format(gh_url=git_url, branch=refspec)
                log.info('fetching test %s for %s', url, client)
                assert test.endswith('.t'), 'tests must end in .t'
                remote.run(args=[
                    'wget',
                    '-nc',
                    '-nv',
                    '-P',
                    client_dir,
                    '--',
                    url,
                ], )

        with parallel() as p:
            for role in clients.iterkeys():
                p.spawn(_run_tests, ctx, role)
    finally:
        for client, tests in clients.iteritems():
            (remote, ) = ctx.cluster.only(client).remotes.iterkeys()
            client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir,
                                                             role=client)
            test_files = set([test.rsplit('/', 1)[1] for test in tests])

            # remove test files unless they failed
            for test_file in test_files:
                abs_file = os.path.join(client_dir, test_file)
                remote.run(args=[
                    'test',
                    '-f',
                    abs_file + '.err',
                    run.Raw('||'),
                    'rm',
                    '-f',
                    '--',
                    abs_file,
                ], )

            # ignore failure since more than one client may
            # be run on a host, and the client dir should be
            # non-empty if the test failed
            remote.run(args=[
                'rm',
                '-rf',
                '--',
                '{tdir}/virtualenv'.format(tdir=testdir),
                run.Raw(';'),
                'rmdir',
                '--ignore-fail-on-non-empty',
                client_dir,
            ], )
Example #10
0
def task(ctx, config):
    """
    Run all cram tests from the specified paths on the specified
    clients. Each client runs tests in parallel.

    Limitations:
    Tests must have a .t suffix. Tests with duplicate names will
    overwrite each other, so only the last one will run.

    For example::

        tasks:
        - ceph:
        - cram:
            clients:
              client.0:
              - qa/test.t
              - qa/test2.t]
              client.1: [qa/test.t]
            branch: foo

    You can also run a list of cram tests on all clients::

        tasks:
        - ceph:
        - cram:
            clients:
              all: [qa/test.t]

    :param ctx: Context
    :param config: Configuration
    """
    assert isinstance(config, dict)
    assert 'clients' in config and isinstance(config['clients'], dict), \
           'configuration must contain a dictionary of clients'

    clients = teuthology.replace_all_with_clients(ctx.cluster,
                                                  config['clients'])
    testdir = teuthology.get_testdir(ctx)

    overrides = ctx.config.get('overrides', {})
    refspec = get_refspec_after_overrides(config, overrides)

    git_url = teuth_config.get_ceph_qa_suite_git_url()
    log.info('Pulling tests from %s ref %s', git_url, refspec)

    try:
        for client, tests in clients.iteritems():
            (remote,) = ctx.cluster.only(client).remotes.iterkeys()
            client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
            remote.run(
                args=[
                    'mkdir', '--', client_dir,
                    run.Raw('&&'),
                    'virtualenv', '{tdir}/virtualenv'.format(tdir=testdir),
                    run.Raw('&&'),
                    '{tdir}/virtualenv/bin/pip'.format(tdir=testdir),
                    'install', 'cram==0.6',
                    ],
                )
            clone_dir = '{tdir}/clone.{role}'.format(tdir=testdir, role=client)
            remote.run(args=refspec.clone(git_url, clone_dir))

            for test in tests:
                assert test.endswith('.t'), 'tests must end in .t'
                remote.run(
                    args=[
                        'cp', '--', os.path.join(clone_dir, test), client_dir,
                        ],
                    )

        with parallel() as p:
            for role in clients.iterkeys():
                p.spawn(_run_tests, ctx, role)
    finally:
        for client, tests in clients.iteritems():
            (remote,) = ctx.cluster.only(client).remotes.iterkeys()
            client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
            test_files = set([test.rsplit('/', 1)[1] for test in tests])

            # remove test files unless they failed
            for test_file in test_files:
                abs_file = os.path.join(client_dir, test_file)
                remote.run(
                    args=[
                        'test', '-f', abs_file + '.err',
                        run.Raw('||'),
                        'rm', '-f', '--', abs_file,
                        ],
                    )

            # ignore failure since more than one client may
            # be run on a host, and the client dir should be
            # non-empty if the test failed
            remote.run(
                args=[
                    'rm', '-rf', '--',
                    '{tdir}/virtualenv'.format(tdir=testdir),
                    clone_dir,
                    run.Raw(';'),
                    'rmdir', '--ignore-fail-on-non-empty', client_dir,
                    ],
                )
Example #11
0
def _run_tests(ctx, refspec, role, tests, env, basedir,
               subdir=None, timeout=None, cleanup=True):
    """
    Run the individual test. Create a scratch directory and then extract the
    workunits from git. Make the executables, and then run the tests.
    Clean up (remove files created) after the tests are finished.

    :param ctx:     Context
    :param refspec: branch, sha1, or version tag used to identify this
                    build
    :param tests:   specific tests specified.
    :param env:     environment set in yaml file.  Could be None.
    :param subdir:  subdirectory set in yaml file.  Could be None
    :param timeout: If present, use the 'timeout' command on the remote host
                    to limit execution time. Must be specified by a number
                    followed by 's' for seconds, 'm' for minutes, 'h' for
                    hours, or 'd' for days. If '0' or anything that evaluates
                    to False is passed, the 'timeout' command is not used.
    """
    testdir = misc.get_testdir(ctx)
    assert isinstance(role, basestring)
    cluster, type_, id_ = misc.split_role(role)
    assert type_ == 'client'
    remote = get_remote_for_role(ctx, role)
    mnt = _client_mountpoint(ctx, cluster, id_)
    # subdir so we can remove and recreate this a lot without sudo
    if subdir is None:
        scratch_tmp = os.path.join(mnt, 'client.{id}'.format(id=id_), 'tmp')
    else:
        scratch_tmp = os.path.join(mnt, subdir)
    clonedir = '{tdir}/clone.{role}'.format(tdir=testdir, role=role)
    srcdir = '{cdir}/{basedir}'.format(cdir=clonedir,
                                       basedir=basedir)

    git_url = teuth_config.get_ceph_qa_suite_git_url()
    # if we are running an upgrade test, and ceph-ci does not have branches like
    # `jewel`, so should use ceph.git as an alternative.
    try:
        remote.run(logger=log.getChild(role),
                   args=refspec.clone(git_url, clonedir))
    except CommandFailedError:
        if git_url.endswith('/ceph-ci.git'):
            alt_git_url = git_url.replace('/ceph-ci.git', '/ceph.git')
        elif git_url.endswith('/ceph-ci'):
            alt_git_url = re.sub(r'/ceph-ci$', '/ceph.git', git_url)
        else:
            raise
        log.info(
            "failed to check out '%s' from %s; will also try in %s",
            refspec,
            git_url,
            alt_git_url,
        )
        remote.run(logger=log.getChild(role),
                   args=refspec.clone(alt_git_url, clonedir))
    remote.run(
        logger=log.getChild(role),
        args=[
            'cd', '--', srcdir,
            run.Raw('&&'),
            'if', 'test', '-e', 'Makefile', run.Raw(';'), 'then', 'make', run.Raw(';'), 'fi',
            run.Raw('&&'),
            'find', '-executable', '-type', 'f', '-printf', r'%P\0'.format(srcdir=srcdir),
            run.Raw('>{tdir}/workunits.list.{role}'.format(tdir=testdir, role=role)),
        ],
    )

    workunits_file = '{tdir}/workunits.list.{role}'.format(tdir=testdir, role=role)
    workunits = sorted(misc.get_file(remote, workunits_file).split('\0'))
    assert workunits

    try:
        assert isinstance(tests, list)
        for spec in tests:
            log.info('Running workunits matching %s on %s...', spec, role)
            prefix = '{spec}/'.format(spec=spec)
            to_run = [w for w in workunits if w == spec or w.startswith(prefix)]
            if not to_run:
                raise RuntimeError('Spec did not match any workunits: {spec!r}'.format(spec=spec))
            for workunit in to_run:
                log.info('Running workunit %s...', workunit)
                args = [
                    'mkdir', '-p', '--', scratch_tmp,
                    run.Raw('&&'),
                    'cd', '--', scratch_tmp,
                    run.Raw('&&'),
                    run.Raw('CEPH_CLI_TEST_DUP_COMMAND=1'),
                    run.Raw('CEPH_REF={ref}'.format(ref=refspec)),
                    run.Raw('TESTDIR="{tdir}"'.format(tdir=testdir)),
                    run.Raw('CEPH_ARGS="--cluster {0}"'.format(cluster)),
                    run.Raw('CEPH_ID="{id}"'.format(id=id_)),
                    run.Raw('PATH=$PATH:/usr/sbin'),
                    run.Raw('CEPH_BASE={dir}'.format(dir=clonedir)),
                    run.Raw('CEPH_ROOT={dir}'.format(dir=clonedir)),
                ]
                if env is not None:
                    for var, val in env.iteritems():
                        quoted_val = pipes.quote(val)
                        env_arg = '{var}={val}'.format(var=var, val=quoted_val)
                        args.append(run.Raw(env_arg))
                args.extend([
                    'adjust-ulimits',
                    'ceph-coverage',
                    '{tdir}/archive/coverage'.format(tdir=testdir)])
                if timeout and timeout != '0':
                    args.extend(['timeout', timeout])
                args.extend([
                    '{srcdir}/{workunit}'.format(
                        srcdir=srcdir,
                        workunit=workunit,
                    ),
                ])
                remote.run(
                    logger=log.getChild(role),
                    args=args,
                    label="workunit test {workunit}".format(workunit=workunit)
                )
                if cleanup:
                    args=['sudo', 'rm', '-rf', '--', scratch_tmp]
                    remote.run(logger=log.getChild(role), args=args)
    finally:
        log.info('Stopping %s on %s...', tests, role)
        args=['sudo', 'rm', '-rf', '--', workunits_file, clonedir]
        if cleanup:
            log.info("and cleaning up scratch: {}".format(scratch_tmp))
            args.append(scratch_tmp)
        remote.run(
            logger=log.getChild(role),
            args=args,
        )
Example #12
0
File: qemu.py Project: anlaneg/ceph
def generate_iso(ctx, config):
    """Execute system commands to generate iso"""
    log.info('generating iso...')
    testdir = teuthology.get_testdir(ctx)

    # use ctx.config instead of config, because config has been
    # through teuthology.replace_all_with_clients()
    refspec = get_refspec_after_overrides(ctx.config, {})

    git_url = teuth_config.get_ceph_qa_suite_git_url()
    log.info('Pulling tests from %s ref %s', git_url, refspec)

    for client, client_config in config.iteritems():
        assert 'test' in client_config, 'You must specify a test to run'
        test = client_config['test']

        (remote,) = ctx.cluster.only(client).remotes.keys()

        clone_dir = '{tdir}/clone.{role}'.format(tdir=testdir, role=client)
        remote.run(args=refspec.clone(git_url, clone_dir))

        src_dir = os.path.dirname(__file__)
        userdata_path = os.path.join(testdir, 'qemu', 'userdata.' + client)
        metadata_path = os.path.join(testdir, 'qemu', 'metadata.' + client)

        with file(os.path.join(src_dir, 'userdata_setup.yaml'), 'rb') as f:
            test_setup = ''.join(f.readlines())
            # configuring the commands to setup the nfs mount
            mnt_dir = "/export/{client}".format(client=client)
            test_setup = test_setup.format(
                mnt_dir=mnt_dir
            )

        with file(os.path.join(src_dir, 'userdata_teardown.yaml'), 'rb') as f:
            test_teardown = ''.join(f.readlines())

        user_data = test_setup
        if client_config.get('type', 'filesystem') == 'filesystem':
            num_disks = client_config.get('disks', DEFAULT_NUM_DISKS)
            if isinstance(num_disks, list):
                num_disks = len(num_disks)
            for i in xrange(1, num_disks):
                dev_letter = chr(ord('a') + i)
                user_data += """
- |
  #!/bin/bash
  mkdir /mnt/test_{dev_letter}
  mkfs -t xfs /dev/vd{dev_letter}
  mount -t xfs /dev/vd{dev_letter} /mnt/test_{dev_letter}
""".format(dev_letter=dev_letter)

        user_data += """
- |
  #!/bin/bash
  test -d /etc/ceph || mkdir /etc/ceph
  cp /mnt/cdrom/ceph.* /etc/ceph/
"""

        cloud_config_archive = client_config.get('cloud_config_archive', [])
        if cloud_config_archive:
          user_data += yaml.safe_dump(cloud_config_archive, default_style='|',
                                      default_flow_style=False)

        # this may change later to pass the directories as args to the
        # script or something. xfstests needs that.
        user_data += """
- |
  #!/bin/bash
  test -d /mnt/test_b && cd /mnt/test_b
  /mnt/cdrom/test.sh > /mnt/log/test.log 2>&1 && touch /mnt/log/success
""" + test_teardown

        user_data = user_data.format(
            ceph_branch=ctx.config.get('branch'),
            ceph_sha1=ctx.config.get('sha1'))
        teuthology.write_file(remote, userdata_path, StringIO(user_data))

        with file(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f:
            teuthology.write_file(remote, metadata_path, f)

        test_file = '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client)

        log.info('fetching test %s for %s', test, client)
        remote.run(
            args=[
                'cp', '--', os.path.join(clone_dir, test), test_file,
                run.Raw('&&'),
                'chmod', '755', test_file,
                ],
            )
        remote.run(
            args=[
                'genisoimage', '-quiet', '-input-charset', 'utf-8',
                '-volid', 'cidata', '-joliet', '-rock',
                '-o', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
                '-graft-points',
                'user-data={userdata}'.format(userdata=userdata_path),
                'meta-data={metadata}'.format(metadata=metadata_path),
                'ceph.conf=/etc/ceph/ceph.conf',
                'ceph.keyring=/etc/ceph/ceph.keyring',
                'test.sh={file}'.format(file=test_file),
                ],
            )
    try:
        yield
    finally:
        for client in config.iterkeys():
            (remote,) = ctx.cluster.only(client).remotes.keys()
            remote.run(
                args=[
                    'rm', '-rf',
                    '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
                    os.path.join(testdir, 'qemu', 'userdata.' + client),
                    os.path.join(testdir, 'qemu', 'metadata.' + client),
                    '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client),
                    '{tdir}/clone.{client}'.format(tdir=testdir, client=client),
                    ],
                )
Example #13
0
                              entirely new suite and including only jobs whose
                              descriptions match the selected ones. It does so
                              using the same logic as --filter.
                              Of all the flags that were passed when scheduling
                              the original run, the resulting one will only
                              inherit the suite value. Any others must be
                              passed as normal while scheduling with this
                              feature. For random tests involving facet whose
                              path ends with '$' operator, you might want to
                              use --seed argument to repeat them.
 -R, --rerun-statuses <statuses>
                              A comma-separated list of statuses to be used
                              with --rerun. Supported statuses are: 'dead',
                              'fail', 'pass', 'queued', 'running', 'waiting'
                              [default: fail,dead]
 --seed SEED                  An random number mostly useful when used along
                              with --rerun argument. This number can be found
                              in the output of teuthology-suite command.

""".format(
    default_machine_type=config.default_machine_type,
    default_results_timeout=config.results_timeout,
    default_ceph_repo=config.get_ceph_git_url(),
    default_suite_repo=config.get_ceph_qa_suite_git_url(),
)


def main(argv=sys.argv[1:]):
    args = docopt.docopt(doc, argv=argv)
    return teuthology.suite.main(args)
Example #14
0
                              passed as normal while scheduling with this
                              feature. For random tests involving facet whose
                              path ends with '$' operator, you might want to
                              use --seed argument to repeat them.
 -R, --rerun-statuses <statuses>
                              A comma-separated list of statuses to be used
                              with --rerun. Supported statuses are: 'dead',
                              'fail', 'pass', 'queued', 'running', 'waiting'
                              [default: fail,dead]
 --seed SEED                  An random number mostly useful when used along
                              with --rerun argument. This number can be found
                              in the output of teuthology-suite command. -1
                              for a random seed [default: -1].

""".format(
    default_machine_type=config.default_machine_type,
    default_results_timeout=config.results_timeout,
    default_ceph_repo=defaults('--ceph-repo',
                            config.get_ceph_git_url()),
    default_suite_repo=defaults('--suite-repo',
                            config.get_ceph_qa_suite_git_url()),
    default_ceph_branch=defaults('--ceph-branch', 'master'),
    default_suite_branch=defaults('--suite-branch', 'master'),
    default_teuthology_branch=defaults('--teuthology-branch', 'master'),
)


def main(argv=sys.argv[1:]):
    args = docopt.docopt(doc, argv=argv)
    return teuthology.suite.main(args)
Example #15
0
                              inherit the suite value. Any others must be
                              passed as normal while scheduling with this
                              feature. For random tests involving facet whose
                              path ends with '$' operator, you might want to
                              use --seed argument to repeat them.
 -R, --rerun-statuses <statuses>
                              A comma-separated list of statuses to be used
                              with --rerun. Supported statuses are: 'dead',
                              'fail', 'pass', 'queued', 'running', 'waiting'
                              [default: fail,dead]
 --seed SEED                  An random number mostly useful when used along
                              with --rerun argument. This number can be found
                              in the output of teuthology-suite command. -1
                              for a random seed [default: -1].

""".format(
    default_machine_type=config.default_machine_type,
    default_results_timeout=config.results_timeout,
    default_ceph_repo=defaults('--ceph-repo',
                            config.get_ceph_git_url()),
    default_suite_repo=defaults('--suite-repo',
                            config.get_ceph_qa_suite_git_url()),
    default_ceph_branch=defaults('--ceph-branch', 'master'),
    default_teuthology_branch=defaults('--teuthology-branch', 'master'),
)


def main(argv=sys.argv[1:]):
    args = docopt.docopt(doc, argv=argv)
    return teuthology.suite.main(args)
Example #16
0
File: cram.py Project: zxgm/ceph
def task(ctx, config):
    """
    Run all cram tests from the specified paths on the specified
    clients. Each client runs tests in parallel.

    Limitations:
    Tests must have a .t suffix. Tests with duplicate names will
    overwrite each other, so only the last one will run.

    For example::

        tasks:
        - ceph:
        - cram:
            clients:
              client.0:
              - qa/test.t
              - qa/test2.t]
              client.1: [qa/test.t]
            branch: foo

    You can also run a list of cram tests on all clients::

        tasks:
        - ceph:
        - cram:
            clients:
              all: [qa/test.t]

    :param ctx: Context
    :param config: Configuration
    """
    assert isinstance(config, dict)
    assert 'clients' in config and isinstance(config['clients'], dict), \
           'configuration must contain a dictionary of clients'

    clients = teuthology.replace_all_with_clients(ctx.cluster,
                                                  config['clients'])
    testdir = teuthology.get_testdir(ctx)

    overrides = ctx.config.get('overrides', {})
    refspec = get_refspec_after_overrides(config, overrides)

    git_url = teuth_config.get_ceph_qa_suite_git_url()
    log.info('Pulling tests from %s ref %s', git_url, refspec)

    try:
        for client, tests in clients.iteritems():
            (remote, ) = ctx.cluster.only(client).remotes.keys()
            client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir,
                                                             role=client)
            remote.run(args=[
                'mkdir',
                '--',
                client_dir,
                run.Raw('&&'),
                'virtualenv',
                '{tdir}/virtualenv'.format(tdir=testdir),
                run.Raw('&&'),
                '{tdir}/virtualenv/bin/pip'.format(tdir=testdir),
                'install',
                'cram==0.6',
            ], )
            clone_dir = '{tdir}/clone.{role}'.format(tdir=testdir, role=client)
            remote.run(args=refspec.clone(git_url, clone_dir))

            for test in tests:
                assert test.endswith('.t'), 'tests must end in .t'
                remote.run(args=[
                    'cp',
                    '--',
                    os.path.join(clone_dir, test),
                    client_dir,
                ], )

        with parallel() as p:
            for role in clients.keys():
                p.spawn(_run_tests, ctx, role)
    finally:
        for client, tests in clients.iteritems():
            (remote, ) = ctx.cluster.only(client).remotes.keys()
            client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir,
                                                             role=client)
            test_files = set([test.rsplit('/', 1)[1] for test in tests])

            # remove test files unless they failed
            for test_file in test_files:
                abs_file = os.path.join(client_dir, test_file)
                remote.run(args=[
                    'test',
                    '-f',
                    abs_file + '.err',
                    run.Raw('||'),
                    'rm',
                    '-f',
                    '--',
                    abs_file,
                ], )

            # ignore failure since more than one client may
            # be run on a host, and the client dir should be
            # non-empty if the test failed
            remote.run(args=[
                'rm',
                '-rf',
                '--',
                '{tdir}/virtualenv'.format(tdir=testdir),
                clone_dir,
                run.Raw(';'),
                'rmdir',
                '--ignore-fail-on-non-empty',
                client_dir,
            ], )
Example #17
0
                              limits (server creation per minute for instance).
  -r, --rerun <name>          Attempt to reschedule a run, selecting only those
                              jobs whose status are mentioned by
                              --rerun-status.
                              Note that this is implemented by scheduling an
                              entirely new suite and including only jobs whose
                              descriptions match the selected ones. It does so
                              using the same logic as --filter.
                              Of all the flags that were passed when scheduling
                              the original run, the resulting one will only
                              inherit the suite value. Any others must be
                              passed as normal while scheduling with this
                              feature.
 -R, --rerun-statuses <statuses>
                              A comma-separated list of statuses to be used
                              with --rerun. Supported statuses are: 'dead',
                              'fail', 'pass', 'queued', 'running', 'waiting'
                              [default: fail,dead]

""".format(
    default_machine_type=config.default_machine_type,
    default_results_timeout=config.results_timeout,
    default_ceph_repo=config.get_ceph_git_url(),
    default_suite_repo=config.get_ceph_qa_suite_git_url(),
)


def main(argv=sys.argv[1:]):
    args = docopt.docopt(doc, argv=argv)
    return teuthology.suite.main(args)