def task(ctx, config):
    """
    Run an autotest test on the ceph cluster.

    Only autotest client tests are supported.

    The config is a mapping from role name to list of tests to run on
    that client.

    For example::

        tasks:
        - ceph:
        - ceph-fuse: [client.0, client.1]
        - autotest:
            client.0: [dbench]
            client.1: [bonnie]

    You can also specify a list of tests to run on all clients::

        tasks:
        - ceph:
        - ceph-fuse:
        - autotest:
            all: [dbench]
    """
    assert isinstance(config, dict)
    config = teuthology.replace_all_with_clients(ctx.cluster, config)
    log.info('Setting up autotest...')
    testdir = teuthology.get_testdir(ctx)
    with parallel() as p:
        for role in config.iterkeys():
            (remote,) = ctx.cluster.only(role).remotes.keys()
            p.spawn(_download, testdir, remote)

    log.info('Making a separate scratch dir for every client...')
    for role in config.iterkeys():
        assert isinstance(role, basestring)
        PREFIX = 'client.'
        assert role.startswith(PREFIX)
        id_ = role[len(PREFIX):]
        (remote,) = ctx.cluster.only(role).remotes.iterkeys()
        mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
        scratch = os.path.join(mnt, 'client.{id}'.format(id=id_))
        remote.run(
            args=[
                'sudo',
                'install',
                '-d',
                '-m', '0755',
                '--owner={user}'.format(user='******'), #TODO
                '--',
                scratch,
                ],
            )

    with parallel() as p:
        for role, tests in config.iteritems():
            (remote,) = ctx.cluster.only(role).remotes.keys()
            p.spawn(_run_tests, testdir, remote, role, tests)
Beispiel #2
0
def remove_sources(ctx, config):
    """
    Removes repo source files from each remote in ctx.

    :param ctx: the argparse.Namespace object
    :param config: the config dict
    """
    remove_sources_pkgs = {
        'deb': _remove_sources_list_deb,
        'rpm': _remove_sources_list_rpm,
    }
    with parallel() as p:
        project = config.get('project', 'ceph')
        log.info("Removing {proj} sources lists".format(
            proj=project))
        for remote in ctx.cluster.remotes.iterkeys():
            remove_fn = remove_sources_pkgs[remote.os.package_type]
            p.spawn(remove_fn, remote, project)

    with parallel() as p:
        project = 'calamari'
        log.info("Removing {proj} sources lists".format(
            proj=project))
        for remote in ctx.cluster.remotes.iterkeys():
            remove_fn = remove_sources_pkgs[remote.os.package_type]
            p.spawn(remove_fn, remote, project)
Beispiel #3
0
def task(ctx, config):
    """
    Run a group of tasks in parallel.

    example:
    - parallel:
       - tasktest:
       - tasktest:

    You can also reference the job from elsewhere:

    foo:
      tasktest:
    tasks:
    - parallel:
      - foo
      - tasktest:

    That is, if the entry is not a dict, we will look it up in the top-level
    config.

    Sequential task and Parallel tasks can be nested.
    """

    log.info('starting parallel...')
    with parallel.parallel() as p:
        for entry in config:
            if not isinstance(entry, dict):
                entry = ctx.config.get(entry, {})
            ((taskname, confg),) = entry.iteritems()
            p.spawn(_run_spawned, ctx, confg, taskname)
Beispiel #4
0
def vm_setup(ctx, config):
    """
    Look for virtual machines and handle their initialization
    """
    all_tasks = [x.keys()[0] for x in ctx.config['tasks']]
    need_chef = False
    if 'chef' in all_tasks or 'kernel' in all_tasks:
        need_chef = True
    with parallel() as p:
        editinfo = os.path.join(os.path.dirname(__file__),'edit_sudoers.sh')
        for rem in ctx.cluster.remotes.iterkeys():
            mname = rem.shortname
            if misc.is_vm(mname):
                r = rem.run(args=['test', '-e', '/ceph-qa-ready',],
                        stdout=StringIO(),
                        check_status=False,)
                if r.returncode != 0:
                    p1 = subprocess.Popen(['cat', editinfo], stdout=subprocess.PIPE)
                    p2 = subprocess.Popen(
                        [
                            'ssh',
                            '-o', 'StrictHostKeyChecking=no',
                            '-t', '-t',
                            str(rem),
                            'sudo',
                            'sh'
                        ],
                        stdin=p1.stdout, stdout=subprocess.PIPE
                    )
                    _, err = p2.communicate()
                    if err:
                        log.info("Edit of /etc/sudoers failed: %s", err)
                    if need_chef:
                        p.spawn(_download_and_run_chef, rem)
def task(ctx, config):
    """
    Run fsx on an rbd image.

    Currently this requires running as client.admin
    to create a pool.

    Specify which clients to run on as a list::

      tasks:
        ceph:
        rbd_fsx:
          clients: [client.0, client.1]

    You can optionally change some properties of fsx:

      tasks:
        ceph:
        rbd_fsx:
          clients: <list of clients>
          seed: <random seed number, or 0 to use the time>
          ops: <number of operations to do>
          size: <maximum image size in bytes>
          valgrind: [--tool=<valgrind tool>]
    """
    log.info('starting rbd_fsx...')
    with parallel() as p:
        for role in config['clients']:
            p.spawn(_run_one_client, ctx, config, role)
    yield
Beispiel #6
0
def _spawn_on_all_clients(ctx, refspec, tests, env, subdir):
    """
    Make a scratch directory for each client in the cluster, and then for each
    test spawn _run_tests for each role.    

    :param ctx: Context
    :param refspec: branch, sha1, or version tag used to identify this
                    build 
    :param tests: specific tests specified.
    :param env: evnironment set in yaml file.  Could be None.
    :param subdir: subdirectory set in yaml file.  Could be None
    """
    client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client')
    client_remotes = list()
    for client in client_generator:
        (client_remote,) = ctx.cluster.only('client.{id}'.format(id=client)).remotes.iterkeys()
        client_remotes.append((client_remote, 'client.{id}'.format(id=client)))
        _make_scratch_dir(ctx, "client.{id}".format(id=client), subdir)

    for unit in tests:
        with parallel() as p:
            for remote, role in client_remotes:
                p.spawn(_run_tests, ctx, refspec, role, [unit], env, subdir)

    # cleanup the generated client directories
    client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client')
    for client in client_generator:
        _delete_dir(ctx, 'client.{id}'.format(id=client))
Beispiel #7
0
def _spawn_on_all_clients(ctx, refspec, tests, env, subdir, timeout=None):
    """
    Make a scratch directory for each client in the cluster, and then for each
    test spawn _run_tests() for each role.

    See run_tests() for parameter documentation.
    """
    client_generator = misc.all_roles_of_type(ctx.cluster, 'client')
    client_remotes = list()

    created_mountpoint = {}
    for client in client_generator:
        (client_remote,) = ctx.cluster.only('client.{id}'.format(id=client)).remotes.iterkeys()
        client_remotes.append((client_remote, 'client.{id}'.format(id=client)))
        created_mountpoint[client] = _make_scratch_dir(ctx, "client.{id}".format(id=client), subdir)

    for unit in tests:
        with parallel() as p:
            for remote, role in client_remotes:
                p.spawn(_run_tests, ctx, refspec, role, [unit], env, subdir,
                        timeout=timeout)

    # cleanup the generated client directories
    client_generator = misc.all_roles_of_type(ctx.cluster, 'client')
    for client in client_generator:
        _delete_dir(ctx, 'client.{id}'.format(id=client), created_mountpoint[client])
Beispiel #8
0
def task(ctx, config):
    """
    Execute commands on multiple roles in parallel

        tasks:
        - ceph:
        - ceph-fuse: [client.0, client.1]
        - pexec:
            client.0:
              - while true; do echo foo >> bar; done
            client.1:
              - sleep 1
              - tail -f bar
        - interactive:

    """
    log.info('Executing custom commands...')
    assert isinstance(config, dict), "task pexec got invalid config"

    sudo = False
    if 'sudo' in config:
        sudo = config['sudo']
        del config['sudo']

    if 'all' in config and len(config) == 1:
        a = config['all']
        roles = teuthology.all_roles(ctx.cluster)
        config = dict((id_, a) for id_ in roles)

    with parallel() as p:
        for role, ls in config.iteritems():
            (remote,) = ctx.cluster.only(role).remotes.iterkeys()
            p.spawn(_exec_role, remote, role, sudo, ls)
Beispiel #9
0
def run_xfstests(ctx, config):
    """
    Run xfstests over specified devices.

    Warning: both the test and scratch devices specified will be
    overwritten.  Normally xfstests modifies (but does not destroy)
    the test device, but for now the run script used here re-makes
    both filesystems.

    Note: Only one instance of xfstests can run on a single host at
    a time, although this is not enforced.

    This task in its current form needs some improvement.  For
    example, it assumes all roles provided in the config are
    clients, and that the config provided is a list of key/value
    pairs.  For now please use the xfstests() interface, below.

    For example::

        tasks:
        - ceph:
        - rbd.run_xfstests:
            client.0:
                count: 2
                test_dev: 'test_dev'
                scratch_dev: 'scratch_dev'
                fs_type: 'xfs'
                tests: '1-9 11-15 17 19-21 26-28 31-34 41 45-48'
    """
    with parallel() as p:
        for role, properties in config.items():
            p.spawn(run_xfstests_one_client, ctx, role, properties)
    yield
Beispiel #10
0
def vm_setup(ctx, config):
    """
    Look for virtual machines and handle their initialization
    """
    with parallel() as p:
        editinfo = os.path.join(os.path.dirname(__file__), 'edit_sudoers.sh')
        for remote in ctx.cluster.remotes.iterkeys():
            mname = re.match(".*@([^\.]*)\.?.*", str(remote)).group(1)
            if teuthology.is_vm(mname):
                r = remote.run(
                    args=[
                        'test',
                        '-e',
                        '/ceph-qa-ready',
                    ],
                    stdout=StringIO(),
                    check_status=False,
                )
                if r.exitstatus != 0:
                    p1 = subprocess.Popen(['cat', editinfo],
                                          stdout=subprocess.PIPE)
                    p2 = subprocess.Popen(
                        ['ssh', '-t', '-t',
                         str(remote), 'sudo', 'sh'],
                        stdin=p1.stdout,
                        stdout=subprocess.PIPE)
                    _, err = p2.communicate()
                    if err:
                        log.info("Edit of /etc/sudoers failed: %s", err)
                    p.spawn(_handle_vm_init, remote)
Beispiel #11
0
def rh_install(ctx, config):
    """
    Installs rh ceph on all hosts in ctx.

    :param ctx: the argparse.Namespace object
    :param config: the config dict
    """
    version = config["rhbuild"]
    rh_versions = ["1.3.0", "1.3.1"]
    if version in rh_versions:
        log.info("%s is a supported version", version)
    else:
        raise RuntimeError("Unsupported RH Ceph version %s", version)

    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            if remote.os.name == "rhel":
                log.info("Installing on RHEL node: %s", remote.shortname)
                p.spawn(rh_install_pkgs, ctx, remote, version)
            else:
                log.info("Node %s is not RHEL", remote.shortname)
                raise RuntimeError("Test requires RHEL nodes")
    try:
        yield
    finally:
        if config.get("skip_uninstall"):
            log.info("Skipping uninstall of Ceph")
        else:
            rh_uninstall(ctx=ctx, config=config)
Beispiel #12
0
def _spawn_on_all_clients(ctx, refspec, tests, env, subdir, timeout=None):
    """
    Make a scratch directory for each client in the cluster, and then for each
    test spawn _run_tests() for each role.

    See run_tests() for parameter documentation.
    """
    is_client = misc.is_type('client')
    client_remotes = {}
    created_mountpoint = {}
    for remote, roles_for_host in ctx.cluster.remotes.items():
        for role in roles_for_host:
            if is_client(role):
                client_remotes[role] = remote
                created_mountpoint[role] = _make_scratch_dir(ctx, role, subdir)

    for unit in tests:
        with parallel() as p:
            for role, remote in client_remotes.items():
                p.spawn(_run_tests,
                        ctx,
                        refspec,
                        role, [unit],
                        env,
                        subdir,
                        timeout=timeout)

    # cleanup the generated client directories
    for role, _ in client_remotes.items():
        _delete_dir(ctx, role, created_mountpoint[role])
Beispiel #13
0
def nuke(ctx, log, should_unlock, sync_clocks=True, reboot_all=True,
         noipmi=False):
    from teuthology.parallel import parallel
    from teuthology.lock import list_locks
    total_unnuked = {}
    targets = dict(ctx.config['targets'])
    if ctx.name:
        log.info('Checking targets against current locks')
        locks = list_locks(ctx)
        #Remove targets who's description doesn't match archive name.
        for lock in locks:
            for target in targets:
                 if target == lock['name']:
                     if ctx.name not in lock['description']:
                         del ctx.config['targets'][lock['name']]
                         log.info('Not nuking %s because description doesn\'t match', lock['name'])
    with parallel() as p:
        for target, hostkey in ctx.config['targets'].iteritems():
            p.spawn(
                nuke_one,
                ctx,
                {target: hostkey},
                log,
                should_unlock,
                sync_clocks,
                reboot_all,
                ctx.config.get('check-locks', True),
                noipmi,
                )
        for unnuked in p:
            if unnuked:
                total_unnuked.update(unnuked)
    if total_unnuked:
        log.error('Could not nuke the following targets:\n' + '\n  '.join(['targets:', ] + yaml.safe_dump(total_unnuked, default_flow_style=False).splitlines()))
Beispiel #14
0
def remove_sources(ctx, config):
    remove_sources_pkgs = {"deb": _remove_sources_list_deb, "rpm": _remove_sources_list_rpm}
    log.info("Removing {proj} sources lists".format(proj=config.get("project", "ceph")))
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            system_type = teuthology.get_system_type(remote)
            p.spawn(remove_sources_pkgs[system_type], remote, config.get("project", "ceph"))
Beispiel #15
0
def _spawn_on_all_clients(ctx, refspec, tests, env, subdir, timeout=None):
    """
    Make a scratch directory for each client in the cluster, and then for each
    test spawn _run_tests() for each role.

    See run_tests() for parameter documentation.
    """
    is_client = misc.is_type('client')
    client_remotes = {}
    created_mountpoint = {}
    for remote, roles_for_host in ctx.cluster.remotes.items():
        for role in roles_for_host:
            if is_client(role):
                client_remotes[role] = remote
                created_mountpoint[role] = _make_scratch_dir(ctx, role, subdir)

    for unit in tests:
        with parallel() as p:
            for role, remote in client_remotes.items():
                p.spawn(_run_tests, ctx, refspec, role, [unit], env, subdir,
                        timeout=timeout)

    # cleanup the generated client directories
    for role, _ in client_remotes.items():
        _delete_dir(ctx, role, created_mountpoint[role])
Beispiel #16
0
def purge_data(ctx):
    """
    Purge /var/lib/ceph
    """
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            p.spawn(_purge_data, remote)
Beispiel #17
0
def run_xfstests(ctx, config):
    """
    Run xfstests over specified devices.

    Warning: both the test and scratch devices specified will be
    overwritten.  Normally xfstests modifies (but does not destroy)
    the test device, but for now the run script used here re-makes
    both filesystems.

    Note: Only one instance of xfstests can run on a single host at
    a time, although this is not enforced.

    This task in its current form needs some improvement.  For
    example, it assumes all roles provided in the config are
    clients, and that the config provided is a list of key/value
    pairs.  For now please use the xfstests() interface, below.

    For example::

        tasks:
        - ceph:
        - rbd.run_xfstests:
            client.0:
                count: 2
                test_dev: 'test_dev'
                scratch_dev: 'scratch_dev'
                fs_type: 'xfs'
                tests: '1-9 11-15 17 19-21 26-28 31-34 41 45-48'
    """
    with parallel() as p:
        for role, properties in config.items():
            p.spawn(run_xfstests_one_client, ctx, role, properties)
    yield
Beispiel #18
0
def rh_install(ctx, config):
    """
    Installs rh ceph on all hosts in ctx.

    :param ctx: the argparse.Namespace object
    :param config: the config dict
    """
    version = config['rhbuild']
    rh_versions = ['1.3.0', '1.3.1', '1.3.2', '2.0']
    if version in rh_versions:
        log.info("%s is a supported version", version)
    else:
        raise RuntimeError("Unsupported RH Ceph version %s", version)

    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            if remote.os.name == 'rhel':
                log.info("Installing on RHEL node: %s", remote.shortname)
                p.spawn(rh_install_pkgs, ctx, remote, version)
            else:
                log.info("Node %s is not RHEL", remote.shortname)
                raise RuntimeError("Test requires RHEL nodes")
    try:
        yield
    finally:
        if config.get('skip_uninstall'):
            log.info("Skipping uninstall of Ceph")
        else:
            rh_uninstall(ctx=ctx, config=config)
Beispiel #19
0
def vm_setup(ctx, config):
    """
    Look for virtual machines and handle their initialization
    """
    all_tasks = [x.keys()[0] for x in ctx.config["tasks"]]
    need_ansible = False
    if "kernel" in all_tasks and "ansible.cephlab" not in all_tasks:
        need_ansible = True
    ansible_hosts = set()
    with parallel():
        editinfo = os.path.join(os.path.dirname(__file__), "edit_sudoers.sh")
        for rem in ctx.cluster.remotes.iterkeys():
            if misc.is_vm(rem.shortname):
                ansible_hosts.add(rem.shortname)
                r = rem.run(args=["test", "-e", "/ceph-qa-ready"], stdout=StringIO(), check_status=False)
                if r.returncode != 0:
                    p1 = subprocess.Popen(["cat", editinfo], stdout=subprocess.PIPE)
                    p2 = subprocess.Popen(
                        ["ssh", "-o", "StrictHostKeyChecking=no", "-t", "-t", str(rem), "sudo", "sh"],
                        stdin=p1.stdout,
                        stdout=subprocess.PIPE,
                    )
                    _, err = p2.communicate()
                    if err:
                        log.error("Edit of /etc/sudoers failed: %s", err)
    if need_ansible and ansible_hosts:
        log.info("Running ansible on %s", list(ansible_hosts))
        ansible_config = dict(hosts=list(ansible_hosts))
        with ansible.CephLab(ctx, config=ansible_config):
            pass
Beispiel #20
0
 def test_simultaneous_access(self):
     count = 5
     with parallel.parallel() as p:
         for i in range(count):
             p.spawn(repo_utils.enforce_repo_state, self.repo_url,
                     self.dest_path, 'master')
         for result in p:
             assert result is None
Beispiel #21
0
def binaries(ctx, config):
    path = config.get("path")

    if path is None:
        # fetch Apache Hadoop from gitbuilder
        log.info("Fetching and unpacking Apache Hadoop binaries from gitbuilder...")
        apache_sha1, apache_hadoop_bindir_url = teuthology.get_ceph_binary_url(
            package="apache-hadoop",
            branch=config.get("apache_branch"),
            tag=config.get("tag"),
            sha1=config.get("sha1"),
            flavor=config.get("flavor"),
            format=config.get("format"),
            dist=config.get("dist"),
            arch=config.get("arch"),
        )
        log.info("apache_hadoop_bindir_url %s" % (apache_hadoop_bindir_url))
        ctx.summary["apache-hadoop-sha1"] = apache_sha1

        # fetch Inktank Hadoop from gitbuilder
        log.info("Fetching and unpacking Inktank Hadoop binaries from gitbuilder...")
        inktank_sha1, inktank_hadoop_bindir_url = teuthology.get_ceph_binary_url(
            package="hadoop",
            branch=config.get("inktank_branch"),
            tag=config.get("tag"),
            sha1=config.get("sha1"),
            flavor=config.get("flavor"),
            format=config.get("format"),
            dist=config.get("dist"),
            arch=config.get("arch"),
        )
        log.info("inktank_hadoop_bindir_url %s" % (inktank_hadoop_bindir_url))
        ctx.summary["inktank-hadoop-sha1"] = inktank_sha1

    else:
        raise Exception("The hadoop task does not support the path argument at present")

    with parallel() as p:
        hadoopNodes = ctx.cluster.only(teuthology.is_type("hadoop"))
        # these can happen independently
        for remote in hadoopNodes.remotes.iterkeys():
            p.spawn(_node_binaries, ctx, config, remote, inktank_hadoop_bindir_url, apache_hadoop_bindir_url)

    try:
        yield
    finally:
        log.info("Removing hadoop binaries...")
        run.wait(
            ctx.cluster.run(
                args=["rm", "-rf", "--", "{tdir}/apache_hadoop".format(tdir=teuthology.get_testdir(ctx))], wait=False
            )
        )
        run.wait(
            ctx.cluster.run(
                args=["rm", "-rf", "--", "{tdir}/inktank_hadoop".format(tdir=teuthology.get_testdir(ctx))], wait=False
            )
        )
Beispiel #22
0
def purge_data(ctx):
    """
    Purge /var/lib/ceph on every remote in ctx.

    :param ctx: the argparse.Namespace object
    """
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            p.spawn(_purge_data, remote)
Beispiel #23
0
def install_packages(ctx, pkgs, config):
    """
    installs Debian packages.
    """
    install_pkgs = {"deb": _update_deb_package_list_and_install, "rpm": _update_rpm_package_list_and_install}
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            system_type = teuthology.get_system_type(remote)
            p.spawn(install_pkgs[system_type], ctx, remote, pkgs[system_type], config)
Beispiel #24
0
def remove_packages(ctx, config, pkgs):
    remove_pkgs = {
        "deb": _remove_deb,
        "rpm": _remove_rpm,
    }
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            system_type = teuthology.get_system_type(remote)
            p.spawn(remove_pkgs[system_type], ctx, config, remote, pkgs[system_type])
Beispiel #25
0
def purge_data(ctx):
    """
    Purge /var/lib/ceph on every remote in ctx.

    :param ctx: the argparse.Namespace object
    """
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            p.spawn(_purge_data, remote)
Beispiel #26
0
def task(ctx, config):
    """
    client.0:
       fio-io-size: 100g or 80% or 100m
       fio-version: 2.2.9
       formats: [2]
       features: [[layering],[striping],[layering,exclusive-lock,object-map]]
       test-clone-io: 1  #remove this option to not run create rbd clone and not run io on clone
       io-engine: "sync or rbd or any io-engine"
       rw: randrw
    client.1:
       fio-io-size: 100g
       fio-version: 2.2.9
       rw: read
       image-size:20480

or
    all:
       fio-io-size: 400g
       rw: randrw
       formats: [2]
       features: [[layering],[striping]]
       io-engine: libaio

    Create rbd image + device and exercise IO for format/features provided in config file
    Config can be per client or one config can be used for all clients, fio jobs are run in parallel for client provided

    """
    if config.get('all'):
        client_config = config['all']
    clients = ctx.cluster.only(teuthology.is_type('client'))
    rbd_test_dir = teuthology.get_testdir(ctx) + "/rbd_fio_test"
    for remote, role in clients.remotes.iteritems():
        if 'client_config' in locals():
            with parallel() as p:
                p.spawn(run_fio, remote, client_config, rbd_test_dir)
        else:
            for client_config in config:
                if client_config in role:
                    with parallel() as p:
                        p.spawn(run_fio, remote, config[client_config],
                                rbd_test_dir)

    yield
Beispiel #27
0
def remove_sources(ctx, config):
    remove_sources_pkgs = {
        'deb': _remove_sources_list_deb,
        'rpm': _remove_sources_list_rpm,
    }
    log.info("Removing {proj} sources lists".format(proj=config.get('project', 'ceph')))
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            system_type = teuthology.get_system_type(remote)
            p.spawn(remove_sources_pkgs[system_type], remote, config.get('project', 'ceph'))
Beispiel #28
0
def setup_stage_cdn(ctx, config):
    """
    Configure internal stage cdn
    """
    rhbuild = ctx.config.get('redhat').get('rhbuild')
    if not rhbuild:
        raise ConfigError("Provide rhbuild attribute")
    teuthconfig.rhbuild = str(rhbuild)
    with parallel() as p:
        for remote in ctx.cluster.remotes.keys():
            if remote.os.name == 'rhel':
                log.info("subscribing stage cdn on : %s", remote.shortname)
                p.spawn(_subscribe_stage_cdn, remote)
    try:
        yield
    finally:
        with parallel() as p:
            for remote in ctx.cluster.remotes.keys():
                p.spawn(_unsubscribe_stage_cdn, remote)
Beispiel #29
0
def task(ctx, config):
    """
    client.0:
       fio-io-size: 100g or 80% or 100m
       fio-version: 2.2.9
       formats: [2]
       features: [[layering],[striping],[layering,exclusive-lock,object-map]]
       test-clone-io: 1  #remove this option to not run create rbd clone and not run io on clone
       io-engine: "sync or rbd or any io-engine"
       rw: randrw
    client.1:
       fio-io-size: 100g
       fio-version: 2.2.9
       rw: read
       image-size:20480

or
    all:
       fio-io-size: 400g
       rw: randrw
       formats: [2]
       features: [[layering],[striping]]
       io-engine: libaio

    Create rbd image + device and exercise IO for format/features provided in config file
    Config can be per client or one config can be used for all clients, fio jobs are run in parallel for client provided

    """
    if config.get('all'):
        client_config = config['all']
    clients = ctx.cluster.only(teuthology.is_type('client'))
    rbd_test_dir = teuthology.get_testdir(ctx) + "/rbd_fio_test"
    for remote,role in clients.remotes.iteritems():
        if 'client_config' in locals():
           with parallel() as p:
               p.spawn(run_fio, remote, client_config, rbd_test_dir)
        else:
           for client_config in config:
              if client_config in role:
                 with parallel() as p:
                     p.spawn(run_fio, remote, config[client_config], rbd_test_dir)

    yield
Beispiel #30
0
def setup_stage_cdn(ctx, config):
    """
    Configure internal stage cdn
    """
    suite_path = ctx.config.get('suite_path')
    if not suite_path:
        raise ConfigError("suite_path missing")
    teuthconfig.suite_path = suite_path

    with parallel() as p:
        for remote in ctx.cluster.remotes.keys():
            if remote.os.name == 'rhel':
                log.info("subscribing stage cdn on : %s", remote.shortname)
                p.spawn(_subscribe_stage_cdn, remote)
    try:
        yield
    finally:
        with parallel() as p:
            for remote in ctx.cluster.remotes.keys():
                p.spawn(_unsubscribe_stage_cdn, remote)
Beispiel #31
0
def set_cdn_repo(ctx, config):
    build = config.get('rhbuild')
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            if remote.os.package_type == 'rpm':
                if build == '1.3.2' or build == '1.3.3':
                    p.spawn(enable_cdn_repo, remote, repos_13x)
                elif build == '2.0':
                    p.spawn(enable_cdn_repo, remote, repos_20)
                elif build == '3.0':
                    p.spawn(enable_cdn_repo, remote, repos_30)
Beispiel #32
0
def _setup_latest_repo(ctx, config):
    """
    Setup repo based on redhat nodes
    """
    with parallel():
        for remote in ctx.cluster.remotes.keys():
            if remote.os.package_type == 'rpm':
                # pre-cleanup
                remote.run(args=['sudo', 'rm', run.Raw('/etc/yum.repos.d/rh*')],
                           check_status=False)
                remote.run(args=['sudo', 'yum', 'clean', 'metadata'])
                if not remote.os.version.startswith('8'):
                    remote.run(args=['sudo', 'yum', 'update', 'metadata'])
                # skip is required for beta iso testing
                if config.get('skip-subscription-manager', False) is True:
                    log.info("Skipping subscription-manager command")
                else:
                    remote.run(args=['sudo', 'subscription-manager', 'repos',
                                    run.Raw('--disable=*ceph*')],
                               check_status=False
                               )
                base_url = config.get('base-repo-url', '')
                installer_url = config.get('installer-repo-url', '')
                repos = ['MON', 'OSD', 'Tools', 'Calamari', 'Installer']
                installer_repos = ['Agent', 'Main', 'Installer']
                if config.get('base-rh-repos'):
                    repos = ctx.config.get('base-rh-repos')
                if config.get('installer-repos'):
                    installer_repos = ctx.config.get('installer-repos')
                # create base repo
                if base_url.startswith('http'):
                    repo_to_use = _get_repos_to_use(base_url, repos)
                    base_repo_file = NamedTemporaryFile(mode='w', delete=False)
                    _create_temp_repo_file(repo_to_use, base_repo_file)
                    remote.put_file(base_repo_file.name, base_repo_file.name)
                    remote.run(args=['sudo', 'cp', base_repo_file.name,
                                     '/etc/yum.repos.d/rh_ceph.repo'])
                    remote.run(args=['sudo', 'yum', 'clean', 'metadata'])
                if installer_url.startswith('http'):
                    irepo_to_use = _get_repos_to_use(
                        installer_url, installer_repos)
                    installer_file = NamedTemporaryFile(delete=False)
                    _create_temp_repo_file(irepo_to_use, installer_file)
                    remote.put_file(installer_file.name, installer_file.name)
                    remote.run(args=['sudo', 'cp', installer_file.name,
                                     '/etc/yum.repos.d/rh_inst.repo'])
                    remote.run(args=['sudo', 'yum', 'clean', 'metadata'])
                    if not remote.os.version.startswith('8'):
                        remote.run(args=['sudo', 'yum', 'update', 'metadata'])
            else:
                if config.get('deb-repo-url'):
                    deb_repo = config.get('deb-repo-url')
                    deb_gpg_key = config.get('deb-gpg-key', None)
                    set_deb_repo(remote, deb_repo, deb_gpg_key)
Beispiel #33
0
def rh_uninstall(ctx, config):
    """
     Uninstalls rh ceph on all hosts.
     It actually spawns rh_uninstall_pkgs() on the remotes for uninstall.

    :param ctx: the argparse.Namespace object
    :param config: the config dict
    """
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            p.spawn(rh_uninstall_pkgs, ctx, remote)
Beispiel #34
0
def task(ctx, config):
    """
    Run an admin socket command, make sure the output is json, and run
    a test program on it. The test program should read json from
    stdin. This task succeeds if the test program exits with status 0.

    To run the same test on all clients::

        tasks:
        - ceph:
        - rados:
        - admin_socket:
            all:
              dump_requests:
                test: http://example.com/script

    To restrict it to certain clients::

        tasks:
        - ceph:
        - rados: [client.1]
        - admin_socket:
            client.1:
              dump_requests:
                test: http://example.com/script

    If an admin socket command has arguments, they can be specified as
    a list::

        tasks:
        - ceph:
        - rados: [client.0]
        - admin_socket:
            client.0:
              dump_requests:
                test: http://example.com/script
              help:
                test: http://example.com/test_help_version
                args: [version]

    Note that there must be a ceph client with an admin socket running
    before this task is run. The tests are parallelized at the client
    level. Tests for a single client are run serially.

    :param ctx: Context
    :param config: Configuration
    """
    assert isinstance(config, dict), \
        'admin_socket task requires a dict for configuration'
    teuthology.replace_all_with_clients(ctx.cluster, config)

    with parallel() as ptask:
        for client, tests in config.iteritems():
            ptask.spawn(_run_tests, ctx, client, tests)
def task(ctx, config):
    """
    Run an admin socket command, make sure the output is json, and run
    a test program on it. The test program should read json from
    stdin. This task succeeds if the test program exits with status 0.

    To run the same test on all clients::

        tasks:
        - ceph:
        - rados:
        - admin_socket:
            all:
              dump_requests:
                test: http://example.com/script

    To restrict it to certain clients::

        tasks:
        - ceph:
        - rados: [client.1]
        - admin_socket:
            client.1:
              dump_requests:
                test: http://example.com/script

    If an admin socket command has arguments, they can be specified as
    a list::

        tasks:
        - ceph:
        - rados: [client.0]
        - admin_socket:
            client.0:
              dump_requests:
                test: http://example.com/script
              help:
                test: http://example.com/test_help_version
                args: [version]

    Note that there must be a ceph client with an admin socket running
    before this task is run. The tests are parallelized at the client
    level. Tests for a single client are run serially.

    :param ctx: Context
    :param config: Configuration
    """
    assert isinstance(config, dict), \
        'admin_socket task requires a dict for configuration'
    teuthology.replace_all_with_clients(ctx.cluster, config)

    with parallel() as ptask:
        for client, tests in config.iteritems():
            ptask.spawn(_run_tests, ctx, client, tests)
Beispiel #36
0
def rh_uninstall(ctx, config):
    """
     Uninstalls rh ceph on all hosts.
     It actually spawns rh_uninstall_pkgs() on the remotes for uninstall.

    :param ctx: the argparse.Namespace object
    :param config: the config dict
    """
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            p.spawn(rh_uninstall_pkgs, ctx, remote)
Beispiel #37
0
def install_latest_rh_kernel(ctx, config):
    """
    Installs the lastest z stream kernel
    Reboot for the new kernel to take effect
    """
    if config is None:
        config = {}
    if config.get('skip'):
        return
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            p.spawn(update_rh_kernel, remote)
Beispiel #38
0
def install_latest_rh_kernel(ctx, config):
    """
    Installs the lastest z stream kernel
    Reboot for the new kernel to take effect
    """
    if config is None:
        config = {}
    if config.get('skip'):
        return
    with parallel() as p:
        for remote in ctx.cluster.remotes.keys():
            p.spawn(update_rh_kernel, remote)
Beispiel #39
0
def _spawn_on_all_clients(ctx, tests):
    client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client')
    client_remotes = list()
    for client in client_generator:
        (client_remote,) = ctx.cluster.only('client.{id}'.format(id=client)).remotes.iterkeys()
        client_remotes.append((client_remote, 'client.{id}'.format(id=client)))
        _make_scratch_dir(ctx, "client.{id}".format(id=client))
        
    for unit in tests:
        with parallel() as p:
            for remote, role in client_remotes:
                p.spawn(_run_tests, ctx, role, [unit])
Beispiel #40
0
    def _one_or_all(self, mds_id, cb):
        """
        Call a callback for a single named MDS, or for all

        :param mds_id: MDS daemon name, or None
        :param cb: Callback taking single argument of MDS daemon name
        """
        if mds_id is None:
            with parallel() as p:
                for mds_id in self.mds_ids:
                    p.spawn(cb, mds_id)
        else:
            cb(mds_id)
    def _one_or_all(self, mds_id, cb):
        """
        Call a callback for a single named MDS, or for all

        :param mds_id: MDS daemon name, or None
        :param cb: Callback taking single argument of MDS daemon name
        """
        if mds_id is None:
            with parallel() as p:
                for mds_id in self.mds_ids:
                    p.spawn(cb, mds_id)
        else:
            cb(mds_id)
Beispiel #42
0
def _spawn_on_all_clients(ctx, tests):
    client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client')
    client_remotes = list()
    for client in client_generator:
        (client_remote, ) = ctx.cluster.only(
            'client.{id}'.format(id=client)).remotes.iterkeys()
        client_remotes.append((client_remote, 'client.{id}'.format(id=client)))
        _make_scratch_dir(ctx, "client.{id}".format(id=client))

    for unit in tests:
        with parallel() as p:
            for remote, role in client_remotes:
                p.spawn(_run_tests, ctx, role, [unit])
Beispiel #43
0
def remove_packages(ctx, config, pkgs):
    """
    Removes packages from each remote in ctx.

    :param ctx: the argparse.Namespace object
    :param config: the config dict
    :param pkgs: list of packages names to remove
    """
    remove_pkgs = {"deb": _remove_deb, "rpm": _remove_rpm}
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            system_type = teuthology.get_system_type(remote)
            p.spawn(remove_pkgs[system_type], ctx, config, remote, pkgs[system_type])
Beispiel #44
0
def remove_sources(ctx, config):
    """
    Removes repo source files from each remote in ctx.

    :param ctx: the argparse.Namespace object
    :param config: the config dict
    """
    remove_sources_pkgs = {"deb": _remove_sources_list_deb, "rpm": _remove_sources_list_rpm}
    log.info("Removing {proj} sources lists".format(proj=config.get("project", "ceph")))
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            system_type = teuthology.get_system_type(remote)
            p.spawn(remove_sources_pkgs[system_type], remote, config.get("project", "ceph"))
Beispiel #45
0
def install_packages(ctx, pkgs, config):
    """
    Installs packages on each remote in ctx.

    :param ctx: the argparse.Namespace object
    :param pkgs: list of packages names to install
    :param config: the config dict
    """
    install_pkgs = {"deb": _update_deb_package_list_and_install, "rpm": _update_rpm_package_list_and_install}
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            system_type = teuthology.get_system_type(remote)
            p.spawn(install_pkgs[system_type], ctx, remote, pkgs[system_type], config)
Beispiel #46
0
def install_packages(ctx, pkgs, config):
    """
    installs Debian packages.
    """
    install_pkgs = {
        "deb": _update_deb_package_list_and_install,
        "rpm": _update_rpm_package_list_and_install,
    }
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            system_type = teuthology.get_system_type(remote)
            p.spawn(
                install_pkgs[system_type],
                ctx, remote, pkgs[system_type], config)
Beispiel #47
0
def remove_sources(ctx, config):
    """
    Removes repo source files from each remote in ctx.

    :param ctx: the argparse.Namespace object
    :param config: the config dict
    """
    remove_sources_pkgs = {
        'deb': _remove_sources_list_deb,
        'rpm': _remove_sources_list_rpm,
    }
    with parallel() as p:
        project = config.get('project', 'ceph')
        log.info("Removing {proj} sources lists".format(proj=project))
        for remote in ctx.cluster.remotes.iterkeys():
            remove_fn = remove_sources_pkgs[remote.os.package_type]
            p.spawn(remove_fn, remote, project)

    with parallel() as p:
        project = 'calamari'
        log.info("Removing {proj} sources lists".format(proj=project))
        for remote in ctx.cluster.remotes.iterkeys():
            remove_fn = remove_sources_pkgs[remote.os.package_type]
            p.spawn(remove_fn, remote, project)
Beispiel #48
0
def main(args):
    if (args['--verbose']):
        teuthology.log.setLevel(logging.DEBUG)

    ctx = argparse.Namespace()
    ctx.os_type = args['--os-type']
    ctx.os_version = args['--os-version']

    nodes = args['<nodes>']

    reimage_types = get_reimage_types()
    statuses = query.get_statuses(nodes)
    owner = args['--owner'] or get_user()
    unlocked = [shortname(_['name']) for _ in statuses if not _['locked']]
    if unlocked:
        log.error("Some of the nodes are not locked: %s", unlocked)
        exit(1)

    improper = [
        shortname(_['name']) for _ in statuses if _['locked_by'] != owner
    ]
    if improper:
        log.error("Some of the nodes are not owned by '%s': %s", owner,
                  improper)
        exit(1)

    irreimageable = [
        shortname(_['name']) for _ in statuses
        if _['machine_type'] not in reimage_types
    ]
    if irreimageable:
        log.error(
            "Following nodes cannot be reimaged because theirs machine type "
            "is not reimageable: %s", irreimageable)
        exit(1)

    def reimage_node(ctx, machine_name, machine_type):
        ops.update_nodes([machine_name], True)
        reimage(ctx, machine_name, machine_type)
        ops.update_nodes([machine_name])
        log.debug("Node '%s' reimaging is complete", machine_name)

    with parallel() as p:
        for node in statuses:
            log.debug("Start node '%s' reimaging", node['name'])
            p.spawn(reimage_node, ctx, shortname(node['name']),
                    node['machine_type'])
Beispiel #49
0
def run_xfstests(ctx, config):
    """
    Run xfstests over specified devices.

    Warning: both the test and scratch devices specified will be
    overwritten.  Normally xfstests modifies (but does not destroy)
    the test device, but for now the run script used here re-makes
    both filesystems.

    Note: Only one instance of xfstests can run on a single host at
    a time, although this is not enforced.

    This task in its current form needs some improvement.  For
    example, it assumes all roles provided in the config are
    clients, and that the config provided is a list of key/value
    pairs.  For now please use the xfstests() interface, below.

    For example::

        tasks:
        - ceph:
        - rbd.run_xfstests:
            client.0:
                count: 2
                test_dev: 'test_dev'
                scratch_dev: 'scratch_dev'
                fs_type: 'xfs'
                tests: 'generic/100 xfs/003 xfs/005 xfs/006 generic/015'
                exclude:
                - generic/42
                randomize: true
    """
    with parallel() as p:
        for role, properties in config.items():
            p.spawn(run_xfstests_one_client, ctx, role, properties)
        exc_info = None
        while True:
            try:
                p.next()
            except StopIteration:
                break
            except:
                exc_info = sys.exc_info()
        if exc_info:
            raise exc_info[0], exc_info[1], exc_info[2]
    yield
Beispiel #50
0
def run_xfstests(ctx, config):
    """
    Run xfstests over specified devices.

    Warning: both the test and scratch devices specified will be
    overwritten.  Normally xfstests modifies (but does not destroy)
    the test device, but for now the run script used here re-makes
    both filesystems.

    Note: Only one instance of xfstests can run on a single host at
    a time, although this is not enforced.

    This task in its current form needs some improvement.  For
    example, it assumes all roles provided in the config are
    clients, and that the config provided is a list of key/value
    pairs.  For now please use the xfstests() interface, below.

    For example::

        tasks:
        - ceph:
        - rbd.run_xfstests:
            client.0:
                count: 2
                test_dev: 'test_dev'
                scratch_dev: 'scratch_dev'
                fs_type: 'xfs'
                tests: 'generic/100 xfs/003 xfs/005 xfs/006 generic/015'
                exclude:
                - generic/42
                randomize: true
    """
    with parallel() as p:
        for role, properties in config.items():
            p.spawn(run_xfstests_one_client, ctx, role, properties)
        exc_info = None
        while True:
            try:
                p.next()
            except StopIteration:
                break
            except:
                exc_info = sys.exc_info()
        if exc_info:
            raise exc_info[0], exc_info[1], exc_info[2]
    yield
Beispiel #51
0
def _spawn_on_all_clients(ctx, refspec, tests, env, subdir):
    client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client')
    client_remotes = list()
    for client in client_generator:
        (client_remote,) = ctx.cluster.only('client.{id}'.format(id=client)).remotes.iterkeys()
        client_remotes.append((client_remote, 'client.{id}'.format(id=client)))
        _make_scratch_dir(ctx, "client.{id}".format(id=client), subdir)

    for unit in tests:
        with parallel() as p:
            for remote, role in client_remotes:
                p.spawn(_run_tests, ctx, refspec, role, [unit], env, subdir)

    # cleanup the generated client directories
    client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client')
    for client in client_generator:
        _delete_dir(ctx, 'client.{id}'.format(id=client), subdir)
Beispiel #52
0
def remove_packages(ctx, config, pkgs):
    """
    Removes packages from each remote in ctx.

    :param ctx: the argparse.Namespace object
    :param config: the config dict
    :param pkgs: list of packages names to remove
    """
    remove_pkgs = {
        "deb": deb._remove,
        "rpm": rpm._remove,
    }
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            system_type = teuthology.get_system_type(remote)
            p.spawn(remove_pkgs[system_type], ctx, config, remote,
                    pkgs[system_type])
Beispiel #53
0
def install_packages(ctx, pkgs, config):
    """
    Installs packages on each remote in ctx.

    :param ctx: the argparse.Namespace object
    :param pkgs: list of packages names to install
    :param config: the config dict
    """
    install_pkgs = {
        "deb": _update_deb_package_list_and_install,
        "rpm": _update_rpm_package_list_and_install,
    }
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            system_type = teuthology.get_system_type(remote)
            p.spawn(install_pkgs[system_type], ctx, remote, pkgs[system_type],
                    config)
Beispiel #54
0
def task(ctx, config):
    """
    Run ceph all workunits found under the specified path.

    For example::

        tasks:
        - ceph:
        - cfuse: [client.0]
        - workunit:
            client.0: [direct_io, xattrs.sh]
            client.1: [snaps]

    You can also run a list of workunits on all clients:
        tasks:
        - ceph:
        - cfuse:
        - workunit:
            all: [direct_io, xattrs.sh, snaps]

    If you have an "all" section it will run all the workunits
    on each client simultaneously, AFTER running any workunits specified
    for individual clients. (This prevents unintended simultaneous runs.)
    """
    assert isinstance(config, dict)

    log.info('Making a separate scratch dir for every client...')
    for role in config.iterkeys():
        assert isinstance(role, basestring)
        if role == "all":
            continue
        PREFIX = 'client.'
        assert role.startswith(PREFIX)
        _make_scratch_dir(ctx, role)
    all_spec = False  #is there an all grouping?
    with parallel() as p:
        for role, tests in config.iteritems():
            if role != "all":
                p.spawn(_run_tests, ctx, role, tests)
            else:
                all_spec = True

    if all_spec:
        all_tasks = config["all"]
        _spawn_on_all_clients(ctx, all_tasks)
Beispiel #55
0
def _spawn_on_all_clients(ctx, refspec, tests, env, subdir):
    client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client')
    client_remotes = list()
    for client in client_generator:
        (client_remote, ) = ctx.cluster.only(
            'client.{id}'.format(id=client)).remotes.iterkeys()
        client_remotes.append((client_remote, 'client.{id}'.format(id=client)))
        _make_scratch_dir(ctx, "client.{id}".format(id=client), subdir)

    for unit in tests:
        with parallel() as p:
            for remote, role in client_remotes:
                p.spawn(_run_tests, ctx, refspec, role, [unit], env, subdir)

    # cleanup the generated client directories
    client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client')
    for client in client_generator:
        _delete_dir(ctx, 'client.{id}'.format(id=client), subdir)
Beispiel #56
0
def remove_sources(ctx, config):
    """
    Removes repo source files from each remote in ctx.

    :param ctx: the argparse.Namespace object
    :param config: the config dict
    """
    remove_sources_pkgs = {
        'deb': _remove_sources_list_deb,
        'rpm': _remove_sources_list_rpm,
    }
    log.info("Removing {proj} sources lists".format(
        proj=config.get('project', 'ceph')))
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            system_type = teuthology.get_system_type(remote)
            p.spawn(remove_sources_pkgs[
                    system_type], remote, config.get('project', 'ceph'))
Beispiel #57
0
def nuke(ctx, should_unlock, sync_clocks=True, noipmi=False, keep_logs=False, should_reboot=True):
    if 'targets' not in ctx.config:
        return
    total_unnuked = {}
    targets = dict(ctx.config['targets'])
    if ctx.name:
        log.info('Checking targets against current locks')
        locks = list_locks()
        # Remove targets who's description doesn't match archive name.
        for lock in locks:
            for target in targets:
                if target == lock['name']:
                    if ctx.name not in lock['description']:
                        del ctx.config['targets'][lock['name']]
                        log.info(
                            "Not nuking %s because description doesn't match",
                            lock['name'])
                    elif lock.get('up') is False:
                        del ctx.config['targets'][lock['name']]
                        log.info(
                            "Not nuking %s because it is down",
                            lock['name'])
    with parallel() as p:
        for target, hostkey in ctx.config['targets'].items():
            p.spawn(
                nuke_one,
                ctx,
                {target: hostkey},
                should_unlock,
                sync_clocks,
                ctx.config.get('check-locks', True),
                noipmi,
                keep_logs,
                should_reboot,
            )
        for unnuked in p:
            if unnuked:
                total_unnuked.update(unnuked)
    if total_unnuked:
        log.error('Could not nuke the following targets:\n' +
                  '\n  '.join(['targets:', ] +
                              yaml.safe_dump(
                                  total_unnuked,
                                  default_flow_style=False).splitlines()))
Beispiel #58
0
def _setup_latest_repo(ctx, config):
    """
    Setup repo based on redhat nodes
    """
    with parallel():
        for remote in ctx.cluster.remotes.keys():
            if remote.os.package_type == 'rpm':
                remote.run(args=[
                    'sudo', 'subscription-manager', 'repos',
                    run.Raw('--disable=*ceph*')
                ])
                base_url = config.get('base-repo-url', '')
                installer_url = config.get('installer-repo-url', '')
                repos = ['MON', 'OSD', 'Tools', 'Calamari', 'Installer']
                installer_repos = ['Agent', 'Main', 'Installer']
                if config.get('base-rh-repos'):
                    repos = ctx.config.get('base-rh-repos')
                if config.get('installer-repos'):
                    installer_repos = ctx.config.get('installer-repos')
                # create base repo
                if base_url.startswith('http'):
                    repo_to_use = _get_repos_to_use(base_url, repos)
                    base_repo_file = NamedTemporaryFile(delete=False)
                    _create_temp_repo_file(repo_to_use, base_repo_file)
                    remote.put_file(base_repo_file.name, base_repo_file.name)
                    remote.run(args=[
                        'sudo', 'cp', base_repo_file.name,
                        '/etc/yum.repos.d/rh_ceph.repo'
                    ])
                if installer_url.startswith('http'):
                    irepo_to_use = _get_repos_to_use(installer_url,
                                                     installer_repos)
                    installer_file = NamedTemporaryFile(delete=False)
                    _create_temp_repo_file(irepo_to_use, installer_file)
                    remote.put_file(installer_file.name, installer_file.name)
                    remote.run(args=[
                        'sudo', 'cp', installer_file.name,
                        '/etc/yum.repos.d/rh_inst.repo'
                    ])
            else:
                if config.get('deb-repo-url'):
                    deb_repo = config.get('deb-repo-url')
                    deb_gpg_key = config.get('deb-gpg-key', None)
                    set_deb_repo(remote, deb_repo, deb_gpg_key)
Beispiel #59
0
def task(ctx, config):
    """
    Run a group of tasks in parallel.

    example::

        - parallel:
           - tasktest:
           - tasktest:

    You can also define tasks in a top-level section outside of
    'tasks:', and reference them here.

    The referenced section must contain a list of tasks to run
    sequentially, or a single task as a dict. The latter is only
    available for backwards compatibility with existing suites::

        tasks:
        - parallel:
          - tasktest: # task inline
          - foo       # reference to top-level 'foo' section
          - bar       # reference to top-level 'bar' section
        foo:
        - tasktest1:
        - tasktest2:
        bar:
          tasktest: # note the list syntax from 'foo' is preferred

    That is, if the entry is not a dict, we will look it up in the top-level
    config.

    Sequential tasks and Parallel tasks can be nested.
    """

    log.info('starting parallel...')
    with parallel.parallel() as p:
        for entry in config:
            if not isinstance(entry, dict):
                entry = ctx.config.get(entry, {})
                # support the usual list syntax for tasks
                if isinstance(entry, list):
                    entry = dict(sequential=entry)
            ((taskname, confg), ) = entry.items()
            p.spawn(_run_spawned, ctx, confg, taskname)