def get_tag_branch_sha1(gitbuilder):
    """The install config may have contradicting tag/branch and sha1.
    When suite.py prepares the jobs, it always overrides the sha1 with
    whatever default is provided on the command line with --distro
    and what is found in the gitbuilder. If it turns out that the
    tag or the branch in the install config task is about another sha1,
    it will override anyway. For instance:

    install:
       tag: v0.94.1

    will be changed into

    install:
       tag: v0.94.1
       sha1: 12345

    even though v0.94.1 is not sha1 12345. This is does not cause
    problem with the install task because
    GitbuilderProject._get_uri_reference is used to figure out what to
    install from the gitbuilder and this function gives priority to
    the tag, if not found the branch, if not found the sha1.

    It is however confusing and this function returns a sha1 that is
    consistent with the tag or the branch being used.

    """

    uri_reference = gitbuilder.uri_reference
    url = gitbuilder.base_url
    assert '/' + uri_reference in url, \
        (url + ' (from template ' + teuth_config.baseurl_template +
         ') does not contain /' + uri_reference)
    log.info('uri_reference ' + uri_reference)
    if uri_reference.startswith('ref/'):
        ref = re.sub('^ref/', '', uri_reference) # do not use basename because the ref may contain a /
        ceph_git_url = teuth_config.get_ceph_git_url()
        cmd = "git ls-remote " + ceph_git_url + " " + ref
        output = check_output(cmd, shell=True)
        if not output:
            raise Exception(cmd + " returns nothing")
        lines = output.splitlines()
        if len(lines) != 1:
            raise Exception(
                cmd + " returns " + output +
                " which contains " + str(len(lines)) +
                " lines instead of exactly one")
        log.info(cmd + " returns " + lines[0])
        (sha1, ref) = lines[0].split()
        if ref.startswith('refs/heads/'):
            tag = None
            branch = re.sub('^refs/heads/', '', ref)
        elif ref.startswith('refs/tags/'):
            tag = re.sub('^refs/tags/', '', ref)
            branch = None
    else:
        sha1 = os.path.basename(uri_reference)
        tag = None
        branch = None
    return (tag, branch, sha1)
def get_tag_branch_sha1(gitbuilder):
    """The install config may have contradicting tag/branch and sha1.
    When suite.py prepares the jobs, it always overrides the sha1 with
    whatever default is provided on the command line with --distro
    and what is found in the gitbuilder. If it turns out that the
    tag or the branch in the install config task is about another sha1,
    it will override anyway. For instance:

    install:
       tag: v0.94.1

    will be changed into

    install:
       tag: v0.94.1
       sha1: 12345

    even though v0.94.1 is not sha1 12345. This is does not cause
    problem with the install task because
    GitbuilderProject._get_uri_reference is used to figure out what to
    install from the gitbuilder and this function gives priority to
    the tag, if not found the branch, if not found the sha1.

    It is however confusing and this function returns a sha1 that is
    consistent with the tag or the branch being used.

    """

    uri_reference = gitbuilder.uri_reference
    url = gitbuilder.base_url
    assert '/' + uri_reference in url, \
        (url + ' (from template ' + teuth_config.baseurl_template +
         ') does not contain /' + uri_reference)
    log.info('uri_reference ' + uri_reference)
    if uri_reference.startswith('ref/'):
        ref = re.sub('^ref/', '', uri_reference
                     )  # do not use basename because the ref may contain a /
        ceph_git_url = teuth_config.get_ceph_git_url()
        cmd = "git ls-remote " + ceph_git_url + " " + ref
        output = check_output(cmd, shell=True)
        if not output:
            raise Exception(cmd + " returns nothing")
        lines = output.splitlines()
        if len(lines) != 1:
            raise Exception(cmd + " returns " + output + " which contains " +
                            str(len(lines)) + " lines instead of exactly one")
        log.info(cmd + " returns " + lines[0])
        (sha1, ref) = lines[0].split()
        if ref.startswith('refs/heads/'):
            tag = None
            branch = re.sub('^refs/heads/', '', ref)
        elif ref.startswith('refs/tags/'):
            tag = re.sub('^refs/tags/', '', ref)
            branch = None
    else:
        sha1 = os.path.basename(uri_reference)
        tag = None
        branch = None
    return (tag, branch, sha1)
def build_git_url(project, project_owner='ceph'):
    """
    Return the git URL to clone the project
    """
    if project == 'ceph-qa-suite':
        base = config.get_ceph_qa_suite_git_url()
    elif project == 'ceph':
        base = config.get_ceph_git_url()
    else:
        base = 'https://github.com/{project_owner}/{project}'
    url_templ = re.sub('\.git$', '', base)
    return url_templ.format(project_owner=project_owner, project=project)
Exemple #4
0
def download_ceph_daemon(ctx, config, ref):
    cluster_name = config['cluster']
    testdir = teuthology.get_testdir(ctx)

    if config.get('ceph_daemon_mode') != 'packaged-ceph-daemon':
        ref = config.get('ceph_daemon_branch', ref)
        git_url = teuth_config.get_ceph_git_url()
        log.info('Downloading ceph-daemon (repo %s ref %s)...' %
                 (git_url, ref))
        ctx.cluster.run(args=[
            'git',
            'archive',
            '--remote=' + git_url,
            ref,
            'src/ceph-daemon/ceph-daemon',
            run.Raw('|'),
            'tar',
            '-xO',
            'src/ceph-daemon/ceph-daemon',
            run.Raw('>'),
            ctx.ceph_daemon,
            run.Raw('&&'),
            'test',
            '-s',
            ctx.ceph_daemon,
            run.Raw('&&'),
            'chmod',
            '+x',
            ctx.ceph_daemon,
        ], )

    try:
        yield
    finally:
        log.info('Removing cluster...')
        ctx.cluster.run(args=[
            'sudo',
            ctx.ceph_daemon,
            'rm-cluster',
            '--fsid',
            ctx.ceph[cluster_name].fsid,
            '--force',
        ])

        if config.get('ceph_daemon_mode') == 'root':
            log.info('Removing ceph-daemon ...')
            ctx.cluster.run(args=[
                'rm',
                '-rf',
                ctx.ceph_daemon,
            ], )
Exemple #5
0
    def create_initial_config(self):
        """
        Put together the config file used as the basis for each job in the run.
        Grabs hashes for the latest ceph, kernel and teuthology versions in the
        branches specified and specifies them so we know exactly what we're
        testing.

        :returns: A JobConfig object
        """
        self.kernel_dict = self.choose_kernel()
        ceph_hash = self.choose_ceph_hash()
        # We don't store ceph_version because we don't use it yet outside of
        # logging.
        self.choose_ceph_version(ceph_hash)
        suite_branch = self.choose_suite_branch()
        suite_hash = self.choose_suite_hash(suite_branch)
        if self.args.suite_dir:
            self.suite_repo_path = self.args.suite_dir
        else:
            self.suite_repo_path = util.fetch_repos(suite_branch,
                                                    test_name=self.name)
        teuthology_branch, teuthology_sha1 = self.choose_teuthology_branch()

        if self.args.distro_version:
            self.args.distro_version, _ = \
                OS.version_codename(self.args.distro, self.args.distro_version)
        self.config_input = dict(
            suite=self.args.suite,
            suite_branch=suite_branch,
            suite_hash=suite_hash,
            ceph_branch=self.args.ceph_branch,
            ceph_hash=ceph_hash,
            ceph_repo=config.get_ceph_git_url(),
            teuthology_branch=teuthology_branch,
            teuthology_sha1=teuthology_sha1,
            machine_type=self.args.machine_type,
            distro=self.args.distro,
            distro_version=self.args.distro_version,
            archive_upload=config.archive_upload,
            archive_upload_key=config.archive_upload_key,
            suite_repo=config.get_ceph_qa_suite_git_url(),
            suite_relpath=self.args.suite_relpath,
            flavor=self.args.flavor,
        )
        return self.build_base_config()
Exemple #6
0
def process_args(args):
    conf = YamlConfig()
    rename_args = {
        'ceph': 'ceph_branch',
        'sha1': 'ceph_sha1',
        'kernel': 'kernel_branch',
        # FIXME: ceph flavor and kernel flavor are separate things
        'flavor': 'kernel_flavor',
        '<config_yaml>': 'base_yaml_paths',
        'filter': 'filter_in',
    }
    for (key, value) in args.items():
        # Translate --foo-bar to foo_bar
        key = key.lstrip('--').replace('-', '_')
        # Rename the key if necessary
        key = rename_args.get(key) or key
        if key == 'suite_branch':
            value = value or override_arg_defaults('--suite-branch', None)
        if key == 'suite' and value is not None:
            value = normalize_suite_name(value)
        if key == 'suite_relpath' and value is None:
            value = ''
        elif key in ('limit', 'priority', 'num', 'newest', 'seed', 'job_threshold'):
            value = int(value)
        elif key == 'subset' and value is not None:
            # take input string '2/3' and turn into (2, 3)
            value = tuple(map(int, value.split('/')))
        elif key in ('filter_all', 'filter_in', 'filter_out', 'rerun_statuses'):
            if not value:
                value = []
            else:
                value = [x.strip() for x in value.split(',')]
        elif key == 'ceph_repo':
            value = expand_short_repo_name(
                value,
                config.get_ceph_git_url())
        elif key == 'suite_repo':
            value = expand_short_repo_name(
                value,
                config.get_ceph_qa_suite_git_url())
        elif key in ('validate_sha1', 'filter_fragments'):
            value = strtobool(value)
        conf[key] = value
    return conf
Exemple #7
0
def get_sha1(ref):
    url = teuth_config.get_ceph_git_url()
    ls_remote = misc.sh("git ls-remote " + url + " " + ref)
    return ls_remote.split()[0]
Exemple #8
0
def task(ctx, config):
    """
    Build Ceph packages. This task will automagically be run
    before the task that need to install packages (this is taken
    care of by the internal teuthology task).

    The config should be as follows:

    buildpackages:
      good_machine:
        disk: 40 # GB
        ram: 48000 # MB
        cpus: 16
      min_machine:
        disk: 40 # GB
        ram: 8000 # MB
        cpus: 1

    example:

    tasks:
    - buildpackages:
        good_machine:
          disk: 40 # GB
          ram: 15000 # MB
          cpus: 16
        min_machine:
          disk: 40 # GB
          ram: 8000 # MB
          cpus: 1
    - install:

    When a buildpackages task is already included, the values it contains can be
    overriden with:

    overrides:
      buildpackages:
        good_machine:
          disk: 20 # GB
          ram: 2000 # MB
          cpus: 2
        min_machine:
          disk: 10 # GB
          ram: 1000 # MB
          cpus: 1

    """
    log.info('Beginning buildpackages...')
    if config is None:
        config = {}
    assert isinstance(config, dict), \
        'task only accepts a dict for config not ' + str(config)
    overrides = ctx.config.get('overrides', {})
    misc.deep_merge(config, overrides.get('buildpackages', {}))
    d = os.path.join(os.path.dirname(__file__), 'buildpackages')
    os_type = misc.get_distro(ctx)
    os_version = misc.get_distro_version(ctx)
    arch = ctx.config.get('arch', OpenStack().get_default_arch())
    dist = LocalGitbuilderProject()._get_distro(distro=os_type,
                                                version=os_version)
    pkg_type = get_pkg_type(os_type)
    misc.sh(
        "flock --close /tmp/buildpackages " +
        "make -C " + d + " " + os.environ['HOME'] + "/.ssh_agent")
    for (flavor, tag, branch, sha1) in lookup_configs(ctx, ctx.config):
        if tag:
            sha1 = get_sha1(tag)
        elif branch:
            sha1 = get_sha1(branch)
        log.info("building flavor = " + flavor + "," +
                 " tag = " + tag + "," +
                 " branch = " + branch + "," +
                 " sha1 = " + sha1)
        target = ('ceph-' +
                  pkg_type + '-' +
                  dist + '-' +
                  arch + '-' +
                  flavor + '-' +
                  sha1)
        openstack = OpenStack()
        openstack.set_provider()
        if openstack.provider == 'ovh':
            select = '^(vps|hg)-.*ssd'
        else:
            select = ''
        network = openstack.net()
        if network != "":
            network = " OPENSTACK_NETWORK='" + network + "' "
        openstack.image(os_type, os_version, arch) # create if it does not exist
        build_flavor = openstack.flavor_range(
            config['min_machine'], config['good_machine'], arch, select)
        default_arch = openstack.get_default_arch()
        http_flavor = openstack.flavor({
            'disk': 30, # GB
            'ram': 1024, # MB
            'cpus': 1,
        }, default_arch, select)
        lock = "/tmp/buildpackages-" + sha1 + "-" + os_type + "-" + os_version
        cmd = (". " + os.environ['HOME'] + "/.ssh_agent ; " +
               " flock --close " + lock +
               " make -C " + d +
               network +
               " CEPH_GIT_URL=" + teuth_config.get_ceph_git_url() +
               " CEPH_PKG_TYPE=" + pkg_type +
               " CEPH_OS_TYPE=" + os_type +
               " CEPH_OS_VERSION=" + os_version +
               " CEPH_DIST=" + dist +
               " CEPH_ARCH=" + arch +
               " CEPH_SHA1=" + sha1 +
               " CEPH_TAG=" + tag +
               " CEPH_BRANCH=" + branch +
               " CEPH_FLAVOR=" + flavor +
               " BUILD_FLAVOR=" + build_flavor +
               " HTTP_FLAVOR=" + http_flavor +
               " HTTP_ARCH=" + default_arch +
               " " + target +
               " ")
        log.info("buildpackages: " + cmd)
        misc.sh(cmd)
    teuth_config.gitbuilder_host = openstack.get_ip('packages-repository', '')
    log.info('Finished buildpackages')
Exemple #9
0
def task(ctx, config):
    """
    Build Ceph packages. This task will automagically be run
    before the task that need to install packages (this is taken
    care of by the internal teuthology task).

    The config should be as follows:

    buildpackages:
      good_machine:
        disk: 40 # GB
        ram: 48000 # MB
        cpus: 16
      min_machine:
        disk: 40 # GB
        ram: 8000 # MB
        cpus: 1

    example:

    tasks:
    - buildpackages:
        good_machine:
          disk: 40 # GB
          ram: 15000 # MB
          cpus: 16
        min_machine:
          disk: 40 # GB
          ram: 8000 # MB
          cpus: 1
    - install:

    When a buildpackages task is already included, the values it contains can be
    overriden with:

    overrides:
      buildpackages:
        good_machine:
          disk: 20 # GB
          ram: 2000 # MB
          cpus: 2
        min_machine:
          disk: 10 # GB
          ram: 1000 # MB
          cpus: 1

    """
    log.info('Beginning buildpackages...')
    if config is None:
        config = {}
    assert isinstance(config, dict), \
        'task only accepts a dict for config not ' + str(config)
    overrides = ctx.config.get('overrides', {})
    misc.deep_merge(config, overrides.get('buildpackages', {}))
    d = os.path.join(os.path.dirname(__file__), 'buildpackages')
    os_type = misc.get_distro(ctx)
    os_version = misc.get_distro_version(ctx)
    arch = ctx.config.get('arch', OpenStack().get_default_arch())
    dist = LocalGitbuilderProject()._get_distro(distro=os_type,
                                                version=os_version)
    pkg_type = get_pkg_type(os_type)
    misc.sh("flock --close /tmp/buildpackages " + "make -C " + d + " " +
            os.environ['HOME'] + "/.ssh_agent")
    for (flavor, tag, branch, sha1) in lookup_configs(ctx, ctx.config):
        if tag:
            sha1 = get_sha1(tag)
        elif branch:
            sha1 = get_sha1(branch)
        log.info("building flavor = " + flavor + "," + " tag = " + tag + "," +
                 " branch = " + branch + "," + " sha1 = " + sha1)
        self_name = 'teuthology'
        key_name = 'teuthology'
        pkg_repo = 'packages-repository'
        security_group = 'teuthology'
        if teuth_config.openstack.has_key('selfname'):
            self_name = teuth_config.openstack['selfname']
        if teuth_config.openstack.has_key('keypair'):
            key_name = teuth_config.openstack['keypair']
        if teuth_config.openstack.has_key('package_repo'):
            pkg_repo = teuth_config.openstack['package_repo']
        if teuth_config.openstack.has_key('server_group'):
            security_group = teuth_config.openstack['server_group']
        target = (self_name + '-ceph-' + pkg_type + '-' + dist + '-' + arch +
                  '-' + flavor + '-' + sha1)
        openstack = OpenStack()
        openstack.set_provider()
        network = openstack.net()
        if network != "":
            network = " OPENSTACK_NETWORK='" + network + "' "
        openstack.image(os_type, os_version,
                        arch)  # create if it does not exist
        build_flavor = openstack.flavor_range(config['min_machine'],
                                              config['good_machine'], arch)
        default_arch = openstack.get_default_arch()
        http_flavor = openstack.flavor(
            {
                'disk': 30,  # GB
                'ram': 1024,  # MB
                'cpus': 1,
            },
            default_arch)

        lock = "/tmp/buildpackages-" + sha1 + "-" + os_type + "-" + os_version
        cmd = (". " + os.environ['HOME'] + "/.ssh_agent ; " +
               " flock --close " + lock + " make -C " + d + network +
               " SELFNAME=" + self_name + " KEY_NAME=" + key_name +
               " PKG_REPO=" + pkg_repo + " SEC_GROUP=" + security_group +
               " CEPH_GIT_URL=" + teuth_config.get_ceph_git_url() +
               " CEPH_PKG_TYPE=" + pkg_type + " CEPH_OS_TYPE=" + os_type +
               " CEPH_OS_VERSION=" + os_version + " CEPH_DIST=" + dist +
               " CEPH_ARCH=" + arch + " CEPH_SHA1=" + sha1 + " CEPH_TAG=" +
               tag + " CEPH_BRANCH=" + branch + " CEPH_FLAVOR=" + flavor +
               " BUILD_FLAVOR=" + build_flavor + " HTTP_FLAVOR=" +
               http_flavor + " HTTP_ARCH=" + default_arch +
               " BUILDPACKAGES_CANONICAL_TAGS=" +
               ("true" if teuth_config.canonical_tags else "false") + " " +
               target + " ")
        log.info("Executing the following make command to build {} packages. " \
                 "Note that some values in the command, like CEPH_GIT_URL " \
                 "and BUILDPACKAGES_CANONICAL_TAGS, may differ from similar " \
                 "command-line parameter values. This is because " \
                 "the values used by this task are taken from the teuthology " \
                 "configuration file. If in doubt, tear down your teuthology " \
                 "instance and start again from scratch.".format(pkg_type))
        log.info("buildpackages make command: " + cmd)
        misc.sh(cmd)
    teuth_config.gitbuilder_host = openstack.get_ip(pkg_repo, '')
    log.info('Finished buildpackages')
Exemple #10
0
def task(ctx, config):
    """
    Run all cram tests from the specified urls on the specified
    clients. Each client runs tests in parallel.

    Limitations:
    Tests must have a .t suffix. Tests with duplicate names will
    overwrite each other, so only the last one will run.

    For example::

        tasks:
        - ceph:
        - cram:
            clients:
              client.0:
              - http://download.ceph.com/qa/test.t
              - http://download.ceph.com/qa/test2.t]
              client.1: [http://download.ceph.com/qa/test.t]
            branch: foo

    You can also run a list of cram tests on all clients::

        tasks:
        - ceph:
        - cram:
            clients:
              all: [http://download.ceph.com/qa/test.t]

    :param ctx: Context
    :param config: Configuration
    """
    assert isinstance(config, dict)
    assert 'clients' in config and isinstance(config['clients'], dict), \
           'configuration must contain a dictionary of clients'

    clients = teuthology.replace_all_with_clients(ctx.cluster,
                                                  config['clients'])
    testdir = teuthology.get_testdir(ctx)

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('workunit', {}))

    refspec = config.get('branch')
    if refspec is None:
        refspec = config.get('tag')
    if refspec is None:
        refspec = config.get('sha1')
    if refspec is None:
        refspec = 'HEAD'

    # hack: the git_url is always ceph-ci or ceph
    git_url = teuth_config.get_ceph_git_url()
    repo_name = 'ceph.git'
    if git_url.count('ceph-ci'):
        repo_name = 'ceph-ci.git'

    try:
        for client, tests in clients.iteritems():
            (remote,) = ctx.cluster.only(client).remotes.iterkeys()
            client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
            remote.run(
                args=[
                    'mkdir', '--', client_dir,
                    run.Raw('&&'),
                    'virtualenv', '{tdir}/virtualenv'.format(tdir=testdir),
                    run.Raw('&&'),
                    '{tdir}/virtualenv/bin/pip'.format(tdir=testdir),
                    'install', 'cram==0.6',
                    ],
                )
            for test in tests:
                url = test.format(repo=repo_name, branch=refspec)
                log.info('fetching test %s for %s', url, client)
                assert test.endswith('.t'), 'tests must end in .t'
                remote.run(
                    args=[
                        'wget', '-nc', '-nv', '-P', client_dir, '--', url,
                        ],
                    )

        with parallel() as p:
            for role in clients.iterkeys():
                p.spawn(_run_tests, ctx, role)
    finally:
        for client, tests in clients.iteritems():
            (remote,) = ctx.cluster.only(client).remotes.iterkeys()
            client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
            test_files = set([test.rsplit('/', 1)[1] for test in tests])

            # remove test files unless they failed
            for test_file in test_files:
                abs_file = os.path.join(client_dir, test_file)
                remote.run(
                    args=[
                        'test', '-f', abs_file + '.err',
                        run.Raw('||'),
                        'rm', '-f', '--', abs_file,
                        ],
                    )

            # ignore failure since more than one client may
            # be run on a host, and the client dir should be
            # non-empty if the test failed
            remote.run(
                args=[
                    'rm', '-rf', '--',
                    '{tdir}/virtualenv'.format(tdir=testdir),
                    run.Raw(';'),
                    'rmdir', '--ignore-fail-on-non-empty', client_dir,
                    ],
                )
def task(ctx, config):
    """
    Build Ceph packages. This task will automagically be run
    before the task that need to install packages (this is taken
    care of by the internal teuthology task).

    The config should be as follows:

    buildpackages:
      machine:
        disk: 40 # GB
        ram: 15000 # MB
        cpus: 16

    example:

    tasks:
    - buildpackages:
        machine:
          disk: 40 # GB
          ram: 15000 # MB
          cpus: 16
    - install:
    """
    log.info('Beginning buildpackages...')
    if config is None:
        config = {}
    assert isinstance(config, dict), \
        'task only accepts a dict for config not ' + str(config)
    d = os.path.join(os.path.dirname(__file__), 'buildpackages')
    for remote in ctx.cluster.remotes.iterkeys():
        for install_config in lookup_configs(ctx, ctx.config):
            gitbuilder = install._get_gitbuilder_project(
                ctx, remote, install_config)
            (tag, branch, sha1) = get_tag_branch_sha1(gitbuilder)
            check_call(
                "flock --close /tmp/buildpackages " +
                "make -C " + d + " " + os.environ['HOME'] + "/.ssh_agent",
                shell=True)
            url = gitbuilder.base_url
            target = os.path.dirname(urlparse.urlparse(url).path.strip('/'))
            target = os.path.dirname(target) + '-' + sha1
            openstack = OpenStack()
            if 'cloud.ovh.net' in os.environ['OS_AUTH_URL']:
                select = '^(vps|eg)-'
            else:
                select = ''
            build_flavor = openstack.flavor(config['machine'], select)
            http_flavor = openstack.flavor({
                'disk': 10, # GB
                'ram': 1024, # MB
                'cpus': 1,
            }, select)
            cmd = (". " + os.environ['HOME'] + "/.ssh_agent ; " +
                   " flock --close /tmp/buildpackages-" + sha1 +
                   " make -C " + d +
                   " CEPH_GIT_URL=" + teuth_config.get_ceph_git_url() +
                   " CEPH_PKG_TYPE=" + gitbuilder.pkg_type +
                   " CEPH_OS_TYPE=" + gitbuilder.os_type +
                   # os_version is from the remote and will be 7.1.23 for CentOS 7
                   # instead of the expected 7.0 for all 7.* CentOS
                   " CEPH_OS_VERSION=" + gitbuilder._get_version() +
                   " CEPH_DIST=" + gitbuilder.distro +
                   " CEPH_ARCH=" + gitbuilder.arch +
                   " CEPH_SHA1=" + sha1 +
                   " CEPH_TAG=" + (tag or '') +
                   " CEPH_BRANCH=" + (branch or '') +
                   " GITBUILDER_URL=" + url +
                   " BUILD_FLAVOR=" + build_flavor +
                   " HTTP_FLAVOR=" + http_flavor +
                   " " + target +
                   " ")
            log.info("buildpackages: " + cmd)
            check_call(cmd, shell=True)
        teuth_config.gitbuilder_host = openstack.get_ip('packages-repository', '')
        log.info('Finished buildpackages')
Exemple #12
0
                              entirely new suite and including only jobs whose
                              descriptions match the selected ones. It does so
                              using the same logic as --filter.
                              Of all the flags that were passed when scheduling
                              the original run, the resulting one will only
                              inherit the suite value. Any others must be
                              passed as normal while scheduling with this
                              feature. For random tests involving facet whose
                              path ends with '$' operator, you might want to
                              use --seed argument to repeat them.
 -R, --rerun-statuses <statuses>
                              A comma-separated list of statuses to be used
                              with --rerun. Supported statuses are: 'dead',
                              'fail', 'pass', 'queued', 'running', 'waiting'
                              [default: fail,dead]
 --seed SEED                  An random number mostly useful when used along
                              with --rerun argument. This number can be found
                              in the output of teuthology-suite command.

""".format(
    default_machine_type=config.default_machine_type,
    default_results_timeout=config.results_timeout,
    default_ceph_repo=config.get_ceph_git_url(),
    default_suite_repo=config.get_ceph_qa_suite_git_url(),
)


def main(argv=sys.argv[1:]):
    args = docopt.docopt(doc, argv=argv)
    return teuthology.suite.main(args)
def task(ctx, config):
    """
    Build Ceph packages. This task will automagically be run
    before the task that need to install packages (this is taken
    care of by the internal teuthology task).

    The config should be as follows:

    buildpackages:
      machine:
        disk: 40 # GB
        ram: 15000 # MB
        cpus: 16

    example:

    tasks:
    - buildpackages:
        machine:
          disk: 40 # GB
          ram: 15000 # MB
          cpus: 16
    - install:
    """
    log.info('Beginning buildpackages...')
    if config is None:
        config = {}
    assert isinstance(config, dict), \
        'task only accepts a dict for config not ' + str(config)
    d = os.path.join(os.path.dirname(__file__), 'buildpackages')
    for remote in ctx.cluster.remotes.iterkeys():
        for install_config in lookup_configs(ctx, ctx.config):
            gitbuilder = install._get_gitbuilder_project(
                ctx, remote, install_config)
            (tag, branch, sha1) = get_tag_branch_sha1(gitbuilder)
            check_call("flock --close /tmp/buildpackages " + "make -C " + d +
                       " " + os.environ['HOME'] + "/.ssh_agent",
                       shell=True)
            url = gitbuilder.base_url
            target = os.path.dirname(urlparse.urlparse(url).path.strip('/'))
            target = os.path.dirname(target) + '-' + sha1
            openstack = OpenStack()
            if 'cloud.ovh.net' in os.environ['OS_AUTH_URL']:
                select = '^(vps|eg)-'
            else:
                select = ''
            build_flavor = openstack.flavor(config['machine'], select)
            http_flavor = openstack.flavor(
                {
                    'disk': 10,  # GB
                    'ram': 1024,  # MB
                    'cpus': 1,
                },
                select)
            cmd = (
                ". " + os.environ['HOME'] + "/.ssh_agent ; " +
                " flock --close /tmp/buildpackages-" + sha1 + " make -C " + d +
                " CEPH_GIT_URL=" + teuth_config.get_ceph_git_url() +
                " CEPH_PKG_TYPE=" + gitbuilder.pkg_type + " CEPH_OS_TYPE=" +
                gitbuilder.os_type +
                # os_version is from the remote and will be 7.1.23 for CentOS 7
                # instead of the expected 7.0 for all 7.* CentOS
                " CEPH_OS_VERSION=" + gitbuilder._get_version() +
                " CEPH_DIST=" + gitbuilder.distro + " CEPH_ARCH=" +
                gitbuilder.arch + " CEPH_SHA1=" + sha1 + " CEPH_TAG=" +
                (tag or '') + " CEPH_BRANCH=" + (branch or '') +
                " CEPH_FLAVOR=" + gitbuilder.flavor + " GITBUILDER_URL=" +
                url + " BUILD_FLAVOR=" + build_flavor + " HTTP_FLAVOR=" +
                http_flavor + " " + target + " ")
            log.info("buildpackages: " + cmd)
            check_call(cmd, shell=True)
        teuth_config.gitbuilder_host = openstack.get_ip(
            'packages-repository', '')
        log.info('Finished buildpackages')
Exemple #14
0
def download_cephadm(ctx, config, ref):
    cluster_name = config['cluster']

    if config.get('cephadm_mode') != 'cephadm-package':
        ref = config.get('cephadm_branch', ref)
        git_url = teuth_config.get_ceph_git_url()
        log.info('Downloading cephadm (repo %s ref %s)...' % (git_url, ref))
        if git_url.startswith('https://github.com/'):
            # git archive doesn't like https:// URLs, which we use with github.
            rest = git_url.split('https://github.com/', 1)[1]
            rest.replace('.git/', '/')  # no .git suffix
            ctx.cluster.run(args=[
                'curl',
                '--silent',
                'https://raw.githubusercontent.com/' + rest + '/' + ref +
                '/src/cephadm/cephadm',
                run.Raw('>'),
                ctx.cephadm,
                run.Raw('&&'),
                'test',
                '-s',
                ctx.cephadm,
                run.Raw('&&'),
                'chmod',
                '+x',
                ctx.cephadm,
            ], )
        else:
            ctx.cluster.run(args=[
                'git',
                'archive',
                '--remote=' + git_url,
                ref,
                'src/cephadm/cephadm',
                run.Raw('|'),
                'tar',
                '-xO',
                'src/cephadm/cephadm',
                run.Raw('>'),
                ctx.cephadm,
                run.Raw('&&'),
                'test',
                '-s',
                ctx.cephadm,
                run.Raw('&&'),
                'chmod',
                '+x',
                ctx.cephadm,
            ], )

    try:
        yield
    finally:
        log.info('Removing cluster...')
        ctx.cluster.run(args=[
            'sudo',
            ctx.cephadm,
            'rm-cluster',
            '--fsid',
            ctx.ceph[cluster_name].fsid,
            '--force',
        ])

        if config.get('cephadm_mode') == 'root':
            log.info('Removing cephadm ...')
            ctx.cluster.run(args=[
                'rm',
                '-rf',
                ctx.cephadm,
            ], )
Exemple #15
0
def generate_iso(ctx, config):
    """Execute system commands to generate iso"""
    log.info('generating iso...')
    testdir = teuthology.get_testdir(ctx)

    # use ctx.config instead of config, because config has been
    # through teuthology.replace_all_with_clients()
    refspec = ctx.config.get('branch')
    if refspec is None:
        refspec = ctx.config.get('tag')
    if refspec is None:
        refspec = ctx.config.get('sha1')
    if refspec is None:
        refspec = 'HEAD'

    # hack: the git_url is always ceph-ci or ceph
    git_url = teuth_config.get_ceph_git_url()
    repo_name = 'ceph.git'
    if git_url.count('ceph-ci'):
        repo_name = 'ceph-ci.git'

    for client, client_config in config.iteritems():
        assert 'test' in client_config, 'You must specify a test to run'
        test_url = client_config['test'].format(repo=repo_name, branch=refspec)
        (remote, ) = ctx.cluster.only(client).remotes.keys()
        src_dir = os.path.dirname(__file__)
        userdata_path = os.path.join(testdir, 'qemu', 'userdata.' + client)
        metadata_path = os.path.join(testdir, 'qemu', 'metadata.' + client)

        with file(os.path.join(src_dir, 'userdata_setup.yaml'), 'rb') as f:
            test_setup = ''.join(f.readlines())
            # configuring the commands to setup the nfs mount
            mnt_dir = "/export/{client}".format(client=client)
            test_setup = test_setup.format(mnt_dir=mnt_dir)

        with file(os.path.join(src_dir, 'userdata_teardown.yaml'), 'rb') as f:
            test_teardown = ''.join(f.readlines())

        user_data = test_setup
        if client_config.get('type', 'filesystem') == 'filesystem':
            for i in xrange(0, client_config.get('num_rbd', DEFAULT_NUM_RBD)):
                dev_letter = chr(ord('b') + i)
                user_data += """
- |
  #!/bin/bash
  mkdir /mnt/test_{dev_letter}
  mkfs -t xfs /dev/vd{dev_letter}
  mount -t xfs /dev/vd{dev_letter} /mnt/test_{dev_letter}
""".format(dev_letter=dev_letter)

        # this may change later to pass the directories as args to the
        # script or something. xfstests needs that.
        user_data += """
- |
  #!/bin/bash
  test -d /mnt/test_b && cd /mnt/test_b
  /mnt/cdrom/test.sh > /mnt/log/test.log 2>&1 && touch /mnt/log/success
""" + test_teardown

        teuthology.write_file(remote, userdata_path, StringIO(user_data))

        with file(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f:
            teuthology.write_file(remote, metadata_path, f)

        test_file = '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir,
                                                          client=client)

        log.info('fetching test %s for %s', test_url, client)
        remote.run(args=[
            'wget',
            '-nv',
            '-O',
            test_file,
            test_url,
            run.Raw('&&'),
            'chmod',
            '755',
            test_file,
        ], )
        remote.run(args=[
            'genisoimage',
            '-quiet',
            '-input-charset',
            'utf-8',
            '-volid',
            'cidata',
            '-joliet',
            '-rock',
            '-o',
            '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
            '-graft-points',
            'user-data={userdata}'.format(userdata=userdata_path),
            'meta-data={metadata}'.format(metadata=metadata_path),
            'test.sh={file}'.format(file=test_file),
        ], )
    try:
        yield
    finally:
        for client in config.iterkeys():
            (remote, ) = ctx.cluster.only(client).remotes.keys()
            remote.run(args=[
                'rm',
                '-f',
                '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
                os.path.join(testdir, 'qemu', 'userdata.' + client),
                os.path.join(testdir, 'qemu', 'metadata.' + client),
                '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir,
                                                      client=client),
            ], )
Exemple #16
0
def generate_iso(ctx, config):
    """Execute system commands to generate iso"""
    log.info('generating iso...')
    testdir = teuthology.get_testdir(ctx)

    # use ctx.config instead of config, because config has been
    # through teuthology.replace_all_with_clients()
    refspec = ctx.config.get('branch')
    if refspec is None:
        refspec = ctx.config.get('tag')
    if refspec is None:
        refspec = ctx.config.get('sha1')
    if refspec is None:
        refspec = 'HEAD'

    # hack: the git_url is always ceph-ci or ceph
    git_url = teuth_config.get_ceph_git_url()
    repo_name = 'ceph.git'
    if git_url.count('ceph-ci'):
        repo_name = 'ceph-ci.git'

    for client, client_config in config.iteritems():
        assert 'test' in client_config, 'You must specify a test to run'
        test_url = client_config['test'].format(repo=repo_name, branch=refspec)
        (remote,) = ctx.cluster.only(client).remotes.keys()
        src_dir = os.path.dirname(__file__)
        userdata_path = os.path.join(testdir, 'qemu', 'userdata.' + client)
        metadata_path = os.path.join(testdir, 'qemu', 'metadata.' + client)

        with file(os.path.join(src_dir, 'userdata_setup.yaml'), 'rb') as f:
            test_setup = ''.join(f.readlines())
            # configuring the commands to setup the nfs mount
            mnt_dir = "/export/{client}".format(client=client)
            test_setup = test_setup.format(
                mnt_dir=mnt_dir
            )

        with file(os.path.join(src_dir, 'userdata_teardown.yaml'), 'rb') as f:
            test_teardown = ''.join(f.readlines())

        user_data = test_setup
        if client_config.get('type', 'filesystem') == 'filesystem':
            num_disks = client_config.get('disks', DEFAULT_NUM_DISKS)
            if isinstance(num_disks, list):
                num_disks = len(num_disks)
            for i in xrange(1, num_disks):
                dev_letter = chr(ord('a') + i)
                user_data += """
- |
  #!/bin/bash
  mkdir /mnt/test_{dev_letter}
  mkfs -t xfs /dev/vd{dev_letter}
  mount -t xfs /dev/vd{dev_letter} /mnt/test_{dev_letter}
""".format(dev_letter=dev_letter)

        user_data += """
- |
  #!/bin/bash
  test -d /etc/ceph || mkdir /etc/ceph
  cp /mnt/cdrom/ceph.* /etc/ceph/
"""

        cloud_config_archive = client_config.get('cloud_config_archive', [])
        if cloud_config_archive:
          user_data += yaml.safe_dump(cloud_config_archive, default_style='|',
                                      default_flow_style=False)

        # this may change later to pass the directories as args to the
        # script or something. xfstests needs that.
        user_data += """
- |
  #!/bin/bash
  test -d /mnt/test_b && cd /mnt/test_b
  /mnt/cdrom/test.sh > /mnt/log/test.log 2>&1 && touch /mnt/log/success
""" + test_teardown

        user_data = user_data.format(
            ceph_branch=ctx.config.get('branch'),
            ceph_sha1=ctx.config.get('sha1'))
        teuthology.write_file(remote, userdata_path, StringIO(user_data))

        with file(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f:
            teuthology.write_file(remote, metadata_path, f)

        test_file = '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client)

        log.info('fetching test %s for %s', test_url, client)
        remote.run(
            args=[
                'wget', '-nv', '-O', test_file,
                test_url,
                run.Raw('&&'),
                'chmod', '755', test_file,
                ],
            )
        remote.run(
            args=[
                'genisoimage', '-quiet', '-input-charset', 'utf-8',
                '-volid', 'cidata', '-joliet', '-rock',
                '-o', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
                '-graft-points',
                'user-data={userdata}'.format(userdata=userdata_path),
                'meta-data={metadata}'.format(metadata=metadata_path),
                'ceph.conf=/etc/ceph/ceph.conf',
                'ceph.keyring=/etc/ceph/ceph.keyring',
                'test.sh={file}'.format(file=test_file),
                ],
            )
    try:
        yield
    finally:
        for client in config.iterkeys():
            (remote,) = ctx.cluster.only(client).remotes.keys()
            remote.run(
                args=[
                    'rm', '-f',
                    '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
                    os.path.join(testdir, 'qemu', 'userdata.' + client),
                    os.path.join(testdir, 'qemu', 'metadata.' + client),
                    '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client),
                    ],
                )
Exemple #17
0
def task(ctx, config):
    """
    Run all cram tests from the specified urls on the specified
    clients. Each client runs tests in parallel.

    Limitations:
    Tests must have a .t suffix. Tests with duplicate names will
    overwrite each other, so only the last one will run.

    For example::

        tasks:
        - ceph:
        - cram:
            clients:
              client.0:
              - http://ceph.com/qa/test.t
              - http://ceph.com/qa/test2.t]
              client.1: [http://ceph.com/qa/test.t]
            branch: foo

    You can also run a list of cram tests on all clients::

        tasks:
        - ceph:
        - cram:
            clients:
              all: [http://ceph.com/qa/test.t]

    :param ctx: Context
    :param config: Configuration
    """
    assert isinstance(config, dict)
    assert 'clients' in config and isinstance(config['clients'], dict), \
           'configuration must contain a dictionary of clients'

    clients = teuthology.replace_all_with_clients(ctx.cluster,
                                                  config['clients'])
    testdir = teuthology.get_testdir(ctx)

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('workunit', {}))

    refspec = config.get('branch')
    if refspec is None:
        refspec = config.get('tag')
    if refspec is None:
        refspec = config.get('sha1')
    if refspec is None:
        refspec = 'HEAD'

    # hack: the git_url is always ceph-ci or ceph
    git_url = teuth_config.get_ceph_git_url()
    repo_name = 'ceph.git'
    if git_url.count('ceph-ci'):
        repo_name = 'ceph-ci.git'

    try:
        for client, tests in clients.iteritems():
            (remote, ) = ctx.cluster.only(client).remotes.iterkeys()
            client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir,
                                                             role=client)
            remote.run(args=[
                'mkdir',
                '--',
                client_dir,
                run.Raw('&&'),
                'virtualenv',
                '{tdir}/virtualenv'.format(tdir=testdir),
                run.Raw('&&'),
                '{tdir}/virtualenv/bin/pip'.format(tdir=testdir),
                'install',
                'cram==0.6',
            ], )
            for test in tests:
                url = test.format(repo=repo_name, branch=refspec)
                log.info('fetching test %s for %s', url, client)
                assert test.endswith('.t'), 'tests must end in .t'
                remote.run(args=[
                    'wget',
                    '-nc',
                    '-nv',
                    '-P',
                    client_dir,
                    '--',
                    url,
                ], )

        with parallel() as p:
            for role in clients.iterkeys():
                p.spawn(_run_tests, ctx, role)
    finally:
        for client, tests in clients.iteritems():
            (remote, ) = ctx.cluster.only(client).remotes.iterkeys()
            client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir,
                                                             role=client)
            test_files = set([test.rsplit('/', 1)[1] for test in tests])

            # remove test files unless they failed
            for test_file in test_files:
                abs_file = os.path.join(client_dir, test_file)
                remote.run(args=[
                    'test',
                    '-f',
                    abs_file + '.err',
                    run.Raw('||'),
                    'rm',
                    '-f',
                    '--',
                    abs_file,
                ], )

            # ignore failure since more than one client may
            # be run on a host, and the client dir should be
            # non-empty if the test failed
            remote.run(args=[
                'rm',
                '-rf',
                '--',
                '{tdir}/virtualenv'.format(tdir=testdir),
                run.Raw(';'),
                'rmdir',
                '--ignore-fail-on-non-empty',
                client_dir,
            ], )
Exemple #18
0
def task(ctx, config):
    """
    Build Ceph packages. This task will automagically be run
    before the task that need to install packages (this is taken
    care of by the internal teuthology task).

    The config should be as follows:

    buildpackages:
      machine:
        disk: 40 # GB
        ram: 15000 # MB
        cpus: 16

    example:

    tasks:
    - buildpackages:
        machine:
          disk: 40 # GB
          ram: 15000 # MB
          cpus: 16
    - install:
    """
    log.info('Beginning buildpackages...')
    if config is None:
        config = {}
    assert isinstance(config, dict), \
        'task only accepts a dict for config not ' + str(config)
    d = os.path.join(os.path.dirname(__file__), 'buildpackages')
    os_type = misc.get_distro(ctx)
    os_version = misc.get_distro_version(ctx)
    arch = ctx.config.get('arch', 'x86_64')
    dist = LocalGitbuilderProject()._get_distro(distro=os_type,
                                                version=os_version)
    pkg_type = get_pkg_type(os_type)
    misc.sh(
        "flock --close /tmp/buildpackages " +
        "make -C " + d + " " + os.environ['HOME'] + "/.ssh_agent")
    for (flavor, tag, branch, sha1) in lookup_configs(ctx, ctx.config):
        if tag:
            sha1 = get_sha1(tag)
        elif branch:
            sha1 = get_sha1(branch)
        log.info("building flavor = " + flavor + "," +
                 " tag = " + tag + "," +
                 " branch = " + branch + "," +
                 " sha1 = " + sha1)
        target = ('ceph-' +
                  pkg_type + '-' +
                  dist + '-' +
                  arch + '-' +
                  flavor + '-' +
                  sha1)
        openstack = OpenStack()
        openstack.set_provider()
        if openstack.provider == 'ovh':
            select = '^(vps|eg)-'
        else:
            select = ''
        openstack.image(os_type, os_version) # create if it does not exist
        build_flavor = openstack.flavor(config['machine'], select)
        http_flavor = openstack.flavor({
            'disk': 40, # GB
            'ram': 1024, # MB
            'cpus': 1,
        }, select)
        cmd = (". " + os.environ['HOME'] + "/.ssh_agent ; " +
               " flock --close /tmp/buildpackages-" + sha1 +
               " make -C " + d +
               " CEPH_GIT_URL=" + teuth_config.get_ceph_git_url() +
               " CEPH_PKG_TYPE=" + pkg_type +
               " CEPH_OS_TYPE=" + os_type +
               " CEPH_OS_VERSION=" + os_version +
               " CEPH_DIST=" + dist +
               " CEPH_ARCH=" + arch +
               " CEPH_SHA1=" + sha1 +
               " CEPH_TAG=" + tag +
               " CEPH_BRANCH=" + branch +
               " CEPH_FLAVOR=" + flavor +
               " BUILD_FLAVOR=" + build_flavor +
               " HTTP_FLAVOR=" + http_flavor +
               " " + target +
               " ")
        log.info("buildpackages: " + cmd)
        misc.sh(cmd)
    teuth_config.gitbuilder_host = openstack.get_ip('packages-repository', '')
    log.info('Finished buildpackages')
Exemple #19
0
                              inherit the suite value. Any others must be
                              passed as normal while scheduling with this
                              feature. For random tests involving facet whose
                              path ends with '$' operator, you might want to
                              use --seed argument to repeat them.
 -R, --rerun-statuses <statuses>
                              A comma-separated list of statuses to be used
                              with --rerun. Supported statuses are: 'dead',
                              'fail', 'pass', 'queued', 'running', 'waiting'
                              [default: fail,dead]
 --seed SEED                  An random number mostly useful when used along
                              with --rerun argument. This number can be found
                              in the output of teuthology-suite command. -1
                              for a random seed [default: -1].

""".format(
    default_machine_type=config.default_machine_type,
    default_results_timeout=config.results_timeout,
    default_ceph_repo=defaults('--ceph-repo', config.get_ceph_git_url()),
    default_suite_repo=defaults('--suite-repo',
                                config.get_ceph_qa_suite_git_url()),
    default_ceph_branch=defaults('--ceph-branch', 'master'),
    default_suite_branch=defaults('--suite-branch', 'master'),
    default_teuthology_branch=defaults('--teuthology-branch', 'master'),
)


def main(argv=sys.argv[1:]):
    args = docopt.docopt(doc, argv=argv)
    return teuthology.suite.main(args)
Exemple #20
0
def _run_tests(ctx, refspec, role, tests, env, subdir=None, timeout=None):
    """
    Run the individual test. Create a scratch directory and then extract the
    workunits from git. Make the executables, and then run the tests.
    Clean up (remove files created) after the tests are finished.

    :param ctx:     Context
    :param refspec: branch, sha1, or version tag used to identify this
                    build
    :param tests:   specific tests specified.
    :param env:     environment set in yaml file.  Could be None.
    :param subdir:  subdirectory set in yaml file.  Could be None
    :param timeout: If present, use the 'timeout' command on the remote host
                    to limit execution time. Must be specified by a number
                    followed by 's' for seconds, 'm' for minutes, 'h' for
                    hours, or 'd' for days. If '0' or anything that evaluates
                    to False is passed, the 'timeout' command is not used.
    """
    testdir = misc.get_testdir(ctx)
    assert isinstance(role, basestring)
    cluster, type_, id_ = misc.split_role(role)
    assert type_ == 'client'
    remote = get_remote_for_role(ctx, role)
    mnt = _client_mountpoint(ctx, cluster, id_)
    # subdir so we can remove and recreate this a lot without sudo
    if subdir is None:
        scratch_tmp = os.path.join(mnt, 'client.{id}'.format(id=id_), 'tmp')
    else:
        scratch_tmp = os.path.join(mnt, subdir)
    clonedir = '{tdir}/clone.{role}'.format(tdir=testdir, role=role)
    srcdir = '{cdir}/qa/workunits'.format(cdir=clonedir)

    git_url = teuth_config.get_ceph_git_url()
    try:
        remote.run(
            logger=log.getChild(role),
            args=[
                'rm',
                '-rf',
                clonedir,
                run.Raw('&&'),
                'git',
                'clone',
                git_url,
                clonedir,
                run.Raw('&&'),
                'cd',
                '--',
                clonedir,
                run.Raw('&&'),
                'git',
                'checkout',
                refspec,
            ],
        )
    except CommandFailedError:
        alt_git_url = git_url.replace('ceph-ci', 'ceph')
        log.info(
            "failed to check out '%s' from %s; will also try in %s",
            refspec,
            git_url,
            alt_git_url,
        )
        remote.run(
            logger=log.getChild(role),
            args=[
                'rm',
                '-rf',
                clonedir,
                run.Raw('&&'),
                'git',
                'clone',
                alt_git_url,
                clonedir,
                run.Raw('&&'),
                'cd',
                '--',
                clonedir,
                run.Raw('&&'),
                'git',
                'checkout',
                refspec,
            ],
        )

    remote.run(
        logger=log.getChild(role),
        args=[
            'cd',
            '--',
            srcdir,
            run.Raw('&&'),
            'if',
            'test',
            '-e',
            'Makefile',
            run.Raw(';'),
            'then',
            'make',
            run.Raw(';'),
            'fi',
            run.Raw('&&'),
            'find',
            '-executable',
            '-type',
            'f',
            '-printf',
            r'%P\0'.format(srcdir=srcdir),
            run.Raw('>{tdir}/workunits.list.{role}'.format(tdir=testdir,
                                                           role=role)),
        ],
    )

    workunits_file = '{tdir}/workunits.list.{role}'.format(tdir=testdir,
                                                           role=role)
    workunits = sorted(misc.get_file(remote, workunits_file).split('\0'))
    assert workunits

    try:
        assert isinstance(tests, list)
        for spec in tests:
            log.info('Running workunits matching %s on %s...', spec, role)
            prefix = '{spec}/'.format(spec=spec)
            to_run = [
                w for w in workunits if w == spec or w.startswith(prefix)
            ]
            if not to_run:
                raise RuntimeError(
                    'Spec did not match any workunits: {spec!r}'.format(
                        spec=spec))
            for workunit in to_run:
                log.info('Running workunit %s...', workunit)
                args = [
                    'mkdir',
                    '-p',
                    '--',
                    scratch_tmp,
                    run.Raw('&&'),
                    'cd',
                    '--',
                    scratch_tmp,
                    run.Raw('&&'),
                    run.Raw('CEPH_CLI_TEST_DUP_COMMAND=1'),
                    run.Raw('CEPH_REF={ref}'.format(ref=refspec)),
                    run.Raw('TESTDIR="{tdir}"'.format(tdir=testdir)),
                    run.Raw('CEPH_ARGS="--cluster {0}"'.format(cluster)),
                    run.Raw('CEPH_ID="{id}"'.format(id=id_)),
                    run.Raw('PATH=$PATH:/usr/sbin'),
                    run.Raw('CEPH_BASE={dir}'.format(dir=clonedir)),
                ]
                if env is not None:
                    for var, val in env.iteritems():
                        quoted_val = pipes.quote(val)
                        env_arg = '{var}={val}'.format(var=var, val=quoted_val)
                        args.append(run.Raw(env_arg))
                args.extend([
                    'adjust-ulimits', 'ceph-coverage',
                    '{tdir}/archive/coverage'.format(tdir=testdir)
                ])
                if timeout and timeout != '0':
                    args.extend(['timeout', timeout])
                args.extend([
                    '{srcdir}/{workunit}'.format(
                        srcdir=srcdir,
                        workunit=workunit,
                    ),
                ])
                remote.run(
                    logger=log.getChild(role),
                    args=args,
                    label="workunit test {workunit}".format(workunit=workunit))
                remote.run(
                    logger=log.getChild(role),
                    args=['sudo', 'rm', '-rf', '--', scratch_tmp],
                )
    finally:
        log.info('Stopping %s on %s...', tests, role)
        remote.run(
            logger=log.getChild(role),
            args=[
                'rm',
                '-rf',
                '--',
                workunits_file,
                clonedir,
            ],
        )
Exemple #21
0
                              inherit the suite value. Any others must be
                              passed as normal while scheduling with this
                              feature. For random tests involving facet whose
                              path ends with '$' operator, you might want to
                              use --seed argument to repeat them.
 -R, --rerun-statuses <statuses>
                              A comma-separated list of statuses to be used
                              with --rerun. Supported statuses are: 'dead',
                              'fail', 'pass', 'queued', 'running', 'waiting'
                              [default: fail,dead]
 --seed SEED                  An random number mostly useful when used along
                              with --rerun argument. This number can be found
                              in the output of teuthology-suite command. -1
                              for a random seed [default: -1].

""".format(
    default_machine_type=config.default_machine_type,
    default_results_timeout=config.results_timeout,
    default_ceph_repo=defaults('--ceph-repo',
                            config.get_ceph_git_url()),
    default_suite_repo=defaults('--suite-repo',
                            config.get_ceph_qa_suite_git_url()),
    default_ceph_branch=defaults('--ceph-branch', 'master'),
    default_teuthology_branch=defaults('--teuthology-branch', 'master'),
)


def main(argv=sys.argv[1:]):
    args = docopt.docopt(doc, argv=argv)
    return teuthology.suite.main(args)
Exemple #22
0
                              passed as normal while scheduling with this
                              feature. For random tests involving facet whose
                              path ends with '$' operator, you might want to
                              use --seed argument to repeat them.
 -R, --rerun-statuses <statuses>
                              A comma-separated list of statuses to be used
                              with --rerun. Supported statuses are: 'dead',
                              'fail', 'pass', 'queued', 'running', 'waiting'
                              [default: fail,dead]
 --seed SEED                  An random number mostly useful when used along
                              with --rerun argument. This number can be found
                              in the output of teuthology-suite command. -1
                              for a random seed [default: -1].

""".format(
    default_machine_type=config.default_machine_type,
    default_results_timeout=config.results_timeout,
    default_ceph_repo=defaults('--ceph-repo',
                            config.get_ceph_git_url()),
    default_suite_repo=defaults('--suite-repo',
                            config.get_ceph_qa_suite_git_url()),
    default_ceph_branch=defaults('--ceph-branch', 'master'),
    default_suite_branch=defaults('--suite-branch', 'master'),
    default_teuthology_branch=defaults('--teuthology-branch', 'master'),
)


def main(argv=sys.argv[1:]):
    args = docopt.docopt(doc, argv=argv)
    return teuthology.suite.main(args)
Exemple #23
0
                              limits (server creation per minute for instance).
  -r, --rerun <name>          Attempt to reschedule a run, selecting only those
                              jobs whose status are mentioned by
                              --rerun-status.
                              Note that this is implemented by scheduling an
                              entirely new suite and including only jobs whose
                              descriptions match the selected ones. It does so
                              using the same logic as --filter.
                              Of all the flags that were passed when scheduling
                              the original run, the resulting one will only
                              inherit the suite value. Any others must be
                              passed as normal while scheduling with this
                              feature.
 -R, --rerun-statuses <statuses>
                              A comma-separated list of statuses to be used
                              with --rerun. Supported statuses are: 'dead',
                              'fail', 'pass', 'queued', 'running', 'waiting'
                              [default: fail,dead]

""".format(
    default_machine_type=config.default_machine_type,
    default_results_timeout=config.results_timeout,
    default_ceph_repo=config.get_ceph_git_url(),
    default_suite_repo=config.get_ceph_qa_suite_git_url(),
)


def main(argv=sys.argv[1:]):
    args = docopt.docopt(doc, argv=argv)
    return teuthology.suite.main(args)
Exemple #24
0
def _run_tests(ctx, client, tests):
    """
    Create a temp directory and wait for a client socket to be created.
    For each test, copy the executable locally and run the test.
    Remove temp directory when finished.

    :param ctx: Context
    :param client: client machine to run the test
    :param tests: list of tests to run
    """
    testdir = teuthology.get_testdir(ctx)
    log.debug('Running admin socket tests on %s', client)
    (remote,) = ctx.cluster.only(client).remotes.iterkeys()
    socket_path = '/var/run/ceph/ceph-{name}.asok'.format(name=client)
    overrides = ctx.config.get('overrides', {}).get('admin_socket', {})

    try:
        tmp_dir = os.path.join(
            testdir,
            'admin_socket_{client}'.format(client=client),
            )
        remote.run(
            args=[
                'mkdir',
                '--',
                tmp_dir,
                run.Raw('&&'),
                # wait for client process to create the socket
                'while', 'test', '!', '-e', socket_path, run.Raw(';'),
                'do', 'sleep', '1', run.Raw(';'), 'done',
                ],
            )

        for command, config in tests.iteritems():
            if config is None:
                config = {}
            teuthology.deep_merge(config, overrides)
            log.debug('Testing %s with config %s', command, str(config))

            test_path = None
            if 'test' in config:
                # hack: the git_url is always ceph-ci or ceph
                git_url = teuth_config.get_ceph_git_url()
                repo_name = 'ceph.git'
                if git_url.count('ceph-ci'):
                    repo_name = 'ceph-ci.git'
                url = config['test'].format(
                    branch=config.get('branch', 'master'),
                    repo=repo_name,
                    )
                test_path = os.path.join(tmp_dir, command)
                remote.run(
                    args=[
                        'wget',
                        '-q',
                        '-O',
                        test_path,
                        '--',
                        url,
                        run.Raw('&&'),
                        'chmod',
                        'u=rx',
                        '--',
                        test_path,
                        ],
                    )

            args = config.get('args', [])
            assert isinstance(args, list), \
                'admin socket command args must be a list'
            sock_out = _socket_command(ctx, remote, socket_path, command, args)
            if test_path is not None:
                remote.run(
                    args=[
                        test_path,
                        ],
                    stdin=json.dumps(sock_out),
                    )

    finally:
        remote.run(
            args=[
                'rm', '-rf', '--', tmp_dir,
                ],
            )
Exemple #25
0
def _run_tests(ctx, refspec, role, tests, env, subdir=None, timeout=None):
    """
    Run the individual test. Create a scratch directory and then extract the
    workunits from git. Make the executables, and then run the tests.
    Clean up (remove files created) after the tests are finished.

    :param ctx:     Context
    :param refspec: branch, sha1, or version tag used to identify this
                    build
    :param tests:   specific tests specified.
    :param env:     environment set in yaml file.  Could be None.
    :param subdir:  subdirectory set in yaml file.  Could be None
    :param timeout: If present, use the 'timeout' command on the remote host
                    to limit execution time. Must be specified by a number
                    followed by 's' for seconds, 'm' for minutes, 'h' for
                    hours, or 'd' for days. If '0' or anything that evaluates
                    to False is passed, the 'timeout' command is not used.
    """
    testdir = misc.get_testdir(ctx)
    assert isinstance(role, basestring)
    cluster, type_, id_ = misc.split_role(role)
    assert type_ == 'client'
    remote = get_remote_for_role(ctx, role)
    mnt = _client_mountpoint(ctx, cluster, id_)
    # subdir so we can remove and recreate this a lot without sudo
    if subdir is None:
        scratch_tmp = os.path.join(mnt, 'client.{id}'.format(id=id_), 'tmp')
    else:
        scratch_tmp = os.path.join(mnt, subdir)
    clonedir = '{tdir}/clone.{role}'.format(tdir=testdir, role=role)
    srcdir = '{cdir}/qa/workunits'.format(cdir=clonedir)

    git_url = teuth_config.get_ceph_git_url()
    try:
        remote.run(
            logger=log.getChild(role),
            args=[
                'rm',
                '-rf',
                clonedir,
                run.Raw('&&'),
                'git',
                'clone',
                git_url,
                clonedir,
                run.Raw('&&'),
                'cd', '--', clonedir,
                run.Raw('&&'),
                'git', 'checkout', refspec,
            ],
        )
    except CommandFailedError:
        alt_git_url = git_url.replace('ceph-ci', 'ceph')
        log.info(
            "failed to check out '%s' from %s; will also try in %s",
            refspec,
            git_url,
            alt_git_url,
        )
        remote.run(
            logger=log.getChild(role),
            args=[
                'rm',
                '-rf',
                clonedir,
                run.Raw('&&'),
                'git',
                'clone',
                alt_git_url,
                clonedir,
                run.Raw('&&'),
                'cd', '--', clonedir,
                run.Raw('&&'),
                'git', 'checkout', refspec,
            ],
        )

    remote.run(
        logger=log.getChild(role),
        args=[
            'cd', '--', srcdir,
            run.Raw('&&'),
            'if', 'test', '-e', 'Makefile', run.Raw(';'), 'then', 'make', run.Raw(';'), 'fi',
            run.Raw('&&'),
            'find', '-executable', '-type', 'f', '-printf', r'%P\0'.format(srcdir=srcdir),
            run.Raw('>{tdir}/workunits.list.{role}'.format(tdir=testdir, role=role)),
        ],
    )

    workunits_file = '{tdir}/workunits.list.{role}'.format(tdir=testdir, role=role)
    workunits = sorted(misc.get_file(remote, workunits_file).split('\0'))
    assert workunits

    try:
        assert isinstance(tests, list)
        for spec in tests:
            log.info('Running workunits matching %s on %s...', spec, role)
            prefix = '{spec}/'.format(spec=spec)
            to_run = [w for w in workunits if w == spec or w.startswith(prefix)]
            if not to_run:
                raise RuntimeError('Spec did not match any workunits: {spec!r}'.format(spec=spec))
            for workunit in to_run:
                log.info('Running workunit %s...', workunit)
                args = [
                    'mkdir', '-p', '--', scratch_tmp,
                    run.Raw('&&'),
                    'cd', '--', scratch_tmp,
                    run.Raw('&&'),
                    run.Raw('CEPH_CLI_TEST_DUP_COMMAND=1'),
                    run.Raw('CEPH_REF={ref}'.format(ref=refspec)),
                    run.Raw('TESTDIR="{tdir}"'.format(tdir=testdir)),
                    run.Raw('CEPH_ARGS="--cluster {0}"'.format(cluster)),
                    run.Raw('CEPH_ID="{id}"'.format(id=id_)),
                    run.Raw('PATH=$PATH:/usr/sbin'),
                    run.Raw('CEPH_BASE={dir}'.format(dir=clonedir)),
                ]
                if env is not None:
                    for var, val in env.iteritems():
                        quoted_val = pipes.quote(val)
                        env_arg = '{var}={val}'.format(var=var, val=quoted_val)
                        args.append(run.Raw(env_arg))
                args.extend([
                    'adjust-ulimits',
                    'ceph-coverage',
                    '{tdir}/archive/coverage'.format(tdir=testdir)])
                if timeout and timeout != '0':
                    args.extend(['timeout', timeout])
                args.extend([
                    '{srcdir}/{workunit}'.format(
                        srcdir=srcdir,
                        workunit=workunit,
                    ),
                ])
                remote.run(
                    logger=log.getChild(role),
                    args=args,
                    label="workunit test {workunit}".format(workunit=workunit)
                )
                remote.run(
                    logger=log.getChild(role),
                    args=['sudo', 'rm', '-rf', '--', scratch_tmp],
                )
    finally:
        log.info('Stopping %s on %s...', tests, role)
        remote.run(
            logger=log.getChild(role),
            args=[
                'rm', '-rf', '--', workunits_file, clonedir,
            ],
        )
Exemple #26
0
def _run_tests(ctx, client, tests):
    """
    Create a temp directory and wait for a client socket to be created.
    For each test, copy the executable locally and run the test.
    Remove temp directory when finished.

    :param ctx: Context
    :param client: client machine to run the test
    :param tests: list of tests to run
    """
    testdir = teuthology.get_testdir(ctx)
    log.debug('Running admin socket tests on %s', client)
    (remote, ) = ctx.cluster.only(client).remotes.keys()
    socket_path = '/var/run/ceph/ceph-{name}.asok'.format(name=client)
    overrides = ctx.config.get('overrides', {}).get('admin_socket', {})

    try:
        tmp_dir = os.path.join(
            testdir,
            'admin_socket_{client}'.format(client=client),
        )
        remote.run(
            args=[
                'mkdir',
                '--',
                tmp_dir,
                run.Raw('&&'),
                # wait for client process to create the socket
                'while',
                'test',
                '!',
                '-e',
                socket_path,
                run.Raw(';'),
                'do',
                'sleep',
                '1',
                run.Raw(';'),
                'done',
            ], )

        for command, config in tests.items():
            if config is None:
                config = {}
            teuthology.deep_merge(config, overrides)
            log.debug('Testing %s with config %s', command, str(config))

            test_path = None
            if 'test' in config:
                # hack: the git_url is always ceph-ci or ceph
                git_url = teuth_config.get_ceph_git_url()
                repo_name = 'ceph.git'
                if git_url.count('ceph-ci'):
                    repo_name = 'ceph-ci.git'
                url = config['test'].format(
                    branch=config.get('branch', 'master'),
                    repo=repo_name,
                )
                test_path = os.path.join(tmp_dir, command)
                remote.run(args=[
                    'wget',
                    '-q',
                    '-O',
                    test_path,
                    '--',
                    url,
                    run.Raw('&&'),
                    'chmod',
                    'u=rx',
                    '--',
                    test_path,
                ], )

            args = config.get('args', [])
            assert isinstance(args, list), \
                'admin socket command args must be a list'
            sock_out = _socket_command(ctx, remote, socket_path, command, args)
            if test_path is not None:
                remote.run(
                    args=[
                        test_path,
                    ],
                    stdin=json.dumps(sock_out),
                )

    finally:
        remote.run(args=[
            'rm',
            '-rf',
            '--',
            tmp_dir,
        ], )
Exemple #27
0
def download_cephadm(ctx, config, ref):
    cluster_name = config['cluster']

    if config.get('cephadm_mode') != 'cephadm-package':
        ref = config.get('cephadm_branch', ref)
        git_url = teuth_config.get_ceph_git_url()
        log.info('Downloading cephadm (repo %s ref %s)...' % (git_url, ref))
        if git_url.startswith('https://github.com/'):
            # git archive doesn't like https:// URLs, which we use with github.
            rest = git_url.split('https://github.com/', 1)[1]
            rest = re.sub(r'\.git/?$', '', rest).strip() # no .git suffix
            ctx.cluster.run(
                args=[
                    'curl', '--silent',
                    'https://raw.githubusercontent.com/' + rest + '/' + ref + '/src/cephadm/cephadm',
                    run.Raw('>'),
                    ctx.cephadm,
                    run.Raw('&&'),
                    'ls', '-l',
                    ctx.cephadm,
                ],
            )
        else:
            ctx.cluster.run(
                args=[
                    'git', 'archive',
                    '--remote=' + git_url,
                    ref,
                    'src/cephadm/cephadm',
                    run.Raw('|'),
                    'tar', '-xO', 'src/cephadm/cephadm',
                    run.Raw('>'),
                    ctx.cephadm,
                ],
            )
        # sanity-check the resulting file and set executable bit
        cephadm_file_size = '$(stat -c%s {})'.format(ctx.cephadm)
        ctx.cluster.run(
            args=[
                'test', '-s', ctx.cephadm,
                run.Raw('&&'),
                'test', run.Raw(cephadm_file_size), "-gt", run.Raw('1000'),
                run.Raw('&&'),
                'chmod', '+x', ctx.cephadm,
            ],
        )

    try:
        yield
    finally:
        log.info('Removing cluster...')
        ctx.cluster.run(args=[
            'sudo',
            ctx.cephadm,
            'rm-cluster',
            '--fsid', ctx.ceph[cluster_name].fsid,
            '--force',
        ])

        if config.get('cephadm_mode') == 'root':
            log.info('Removing cephadm ...')
            ctx.cluster.run(
                args=[
                    'rm',
                    '-rf',
                    ctx.cephadm,
                ],
            )
def get_sha1(ref):
    url = teuth_config.get_ceph_git_url()
    ls_remote = check_output("git ls-remote " + url + " " + ref, shell=True)
    return ls_remote.split()[0]