コード例 #1
0
ファイル: ceph_deploy.py プロジェクト: bkmcfarland/ceph
def single_node_test(ctx, config):
    """
    - ceph-deploy.single_node_test: null

    #rhbuild testing
    - ceph-deploy.single_node_test:
        rhbuild: 1.2.3

    """
    log.info("Testing ceph-deploy on single node")
    if config is None:
        config = {}
    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))

    if config.get('rhbuild'):
        log.info("RH Build, Skip Download")
        with contextutil.nested(
            lambda: cli_test(ctx=ctx, config=config),
        ):
            yield
    else:
        with contextutil.nested(
            lambda: install_fn.ship_utilities(ctx=ctx, config=None),
            lambda: download_ceph_deploy(ctx=ctx, config=config),
            lambda: cli_test(ctx=ctx, config=config),
        ):
            yield
コード例 #2
0
ファイル: iscsi.py プロジェクト: hughsaunders/teuthology
def task(ctx, config):
    """
    handle iscsi admin login after a tgt connection has been established.

    Assume a default host client of client.0 and a sending client of
    client.0 if not specified otherwise.

    Sample tests could be:

    iscsi:

        This sets up a tgt link from client.0 to client.0

    iscsi: [client.1, client.2]

        This sets up a tgt link from client.1 to client.0 and a tgt link
        from client.2 to client.0

    iscsi:
        client.0: client.1
        client.1: client.0

        This sets up a tgt link from client.0 to client.1 and a tgt link
        from client.1 to client.0

    Note that the iscsi image name is iscsi-image, so this only works
    for one image being tested at any one time.
    """
    try:
        pairs = config.items()
    except AttributeError:
        pairs = [('client.0', 'client.0')]
    with contextutil.nested(
            lambda: start_iscsi_initiators(ctx=ctx, tgt_link=pairs),):
        yield
コード例 #3
0
ファイル: ssh_keys.py プロジェクト: AsherBond/teuthology
def task(ctx, config):
    """
    Creates a set of RSA keys, distributes the same key pair
    to all hosts listed in ctx.cluster, and adds all hosts
    to all others authorized_keys list. 

    During cleanup it will delete .ssh/id_rsa, .ssh/id_rsa.pub 
    and remove the entries in .ssh/authorized_keys while leaving
    pre-existing entries in place. 
    """

    if config is None:
        config = {}
    assert isinstance(config, dict), \
        "task hadoop only supports a dictionary for configuration"

    # this does not need to do cleanup and does not depend on 
    # ctx, so I'm keeping it outside of the nested calls
    public_key_string, private_key_string = generate_keys()

    with contextutil.nested(
        lambda: tweak_ssh_config(ctx, config),
        lambda: push_keys_to_host(ctx, config, public_key_string, private_key_string),
        #lambda: tweak_ssh_config(ctx, config),
        ):
        yield
コード例 #4
0
ファイル: devstack.py プロジェクト: tsg-/ceph-qa-suite
def task(ctx, config):
    if config is None:
        config = {}
    if not isinstance(config, dict):
        raise TypeError("config must be a dict")
    with nested(lambda: install(ctx=ctx, config=config), lambda: smoke(ctx=ctx, config=config)):
        yield
コード例 #5
0
ファイル: ceph_deploy.py プロジェクト: bkmcfarland/ceph
def task(ctx, config):
    """
    Set up and tear down a Ceph cluster.

    For example::

        tasks:
        - install:
             extras: yes
        - ssh_keys:
        - ceph-deploy:
             branch:
                stable: bobtail
             mon_initial_members: 1
             only_mon: true
             keep_running: true

        tasks:
        - install:
             extras: yes
        - ssh_keys:
        - ceph-deploy:
             branch:
                dev: master
             conf:
                mon:
                   debug mon = 20

        tasks:
        - install:
             extras: yes
        - ssh_keys:
        - ceph-deploy:
             branch:
                testing:
             dmcrypt: yes
             separate_journal_disk: yes

    """
    if config is None:
        config = {}

    assert isinstance(config, dict), \
        "task ceph-deploy only supports a dictionary for configuration"

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))

    if config.get('branch') is not None:
        assert isinstance(
            config['branch'], dict), 'branch must be a dictionary'

    log.info('task ceph-deploy with config ' + str(config))

    with contextutil.nested(
        lambda: install_fn.ship_utilities(ctx=ctx, config=None),
        lambda: download_ceph_deploy(ctx=ctx, config=config),
        lambda: build_ceph_cluster(ctx=ctx, config=config),
    ):
        yield
コード例 #6
0
ファイル: ceph-deploy.py プロジェクト: AsherBond/teuthology
def task(ctx, config):
    """
    Set up and tear down a Ceph cluster.

    For example::

        tasks:
        - install:
             extras: yes
        - ssh_keys:
        - ceph-deploy:
             branch:
                stable: bobtail
             mon_initial_members: 1

        tasks:
        - install:
             extras: yes
        - ssh_keys:
        - ceph-deploy:
             branch:
                dev: master
             conf:
                mon:
                   debug mon = 20

        tasks:
        - install:
             extras: yes
        - ssh_keys:
        - ceph-deploy:
             branch:
                testing:
    """
    if config is None:
        config = {}

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))

    assert isinstance(config, dict), \
        "task ceph-deploy only supports a dictionary for configuration"

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))

    if config.get('branch') is not None:
        assert isinstance(config['branch'], dict), 'branch must be a dictionary'

    with contextutil.nested(
         lambda: ceph_fn.ship_utilities(ctx=ctx, config=None),
         lambda: download_ceph_deploy(ctx=ctx, config=config),
         lambda: build_ceph_cluster(ctx=ctx, config=dict(
                 conf=config.get('conf', {}),
                 branch=config.get('branch',{}),
                 mon_initial_members=config.get('mon_initial_members', None),
                 test_mon_destroy=config.get('test_mon_destroy', None),
                 )),
        ):
        yield
コード例 #7
0
ファイル: rgw.py プロジェクト: calebamiles/teuthology
def task(ctx, config):
    """
    Spin up apache configured to run a rados gateway.
    Only one should be run per machine, since it uses a hard-coded port for now.

    For example, to run rgw on all clients::

        tasks:
        - ceph:
        - rgw:

    To only run on certain clients::

        tasks:
        - ceph:
        - rgw: [client.0, client.3]

    or

        tasks:
        - ceph:
        - rgw:
            client.0:
            client.3:

    To run radosgw through valgrind:

        tasks:
        - ceph:
        - rgw:
            client.0:
              valgrind: [--tool=memcheck]
            client.3:
              valgrind: [--tool=memcheck]

    """
    if config is None:
        config = dict(('client.{id}'.format(id=id_), None)
                  for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
    elif isinstance(config, list):
        config = dict((name, None) for name in config)

    for _, roles_for_host in ctx.cluster.remotes.iteritems():
        running_rgw = False
        for role in roles_for_host:
            if role in config.iterkeys():
                assert not running_rgw, "Only one client per host can run rgw."
                running_rgw = True

    with contextutil.nested(
        lambda: create_dirs(ctx=ctx, config=config),
        lambda: ship_config(ctx=ctx, config=config),
        lambda: start_rgw(ctx=ctx, config=config),
        lambda: start_apache(ctx=ctx, config=config),
        ):
        yield
コード例 #8
0
ファイル: dnsmasq.py プロジェクト: Carudy/ceph
def task(ctx, config):
    """
    Configures dnsmasq to add cnames for teuthology remotes. The task expects a
    dictionary, where each key is a role. If all cnames for that role use the
    same address as that role, the cnames can be given as a list. For example,
    this entry configures dnsmasq on the remote associated with client.0, adding
    two cnames for the ip address associated with client.0:

        - dnsmasq:
            client.0:
            - client0.example.com
            - c0.example.com

    If the addresses do not all match the given role, a dictionary can be given
    to specify the ip address by its target role. For example:

        - dnsmasq:
            client.0:
              client.0.example.com: client.0
              client.1.example.com: client.1
    """
    # apply overrides
    overrides = config.get('overrides', {})
    misc.deep_merge(config, overrides.get('dnsmasq', {}))

    # multiple roles may map to the same remote, so collect names by remote
    remote_names = {}
    for role, cnames in config.iteritems():
        remote = get_remote_for_role(ctx, role)
        if remote is None:
            raise ConfigError('no remote for role %s' % role)

        names = remote_names.get(remote, {})

        if isinstance(cnames, list):
            # when given a list of cnames, point to local ip
            for cname in cnames:
                names[cname] = remote.ip_address
        elif isinstance(cnames, dict):
            # when given a dict, look up the remote ip for each
            for cname, client in cnames.iteritems():
                r = get_remote_for_role(ctx, client)
                if r is None:
                    raise ConfigError('no remote for role %s' % client)
                names[cname] = r.ip_address

        remote_names[remote] = names

    # run a subtask for each unique remote
    subtasks = []
    for remote, cnames in remote_names.iteritems():
        subtasks.extend([ lambda r=remote, cn=cnames: setup_dnsmasq(r, cn) ])

    with contextutil.nested(*subtasks):
        yield
コード例 #9
0
ファイル: install.py プロジェクト: kawaguchi-s/teuthology
def task(ctx, config):
    """
    Install packages for a given project.

    tasks:
    - install:
        project: ceph
        branch: bar
    - install:
        project: samba
        branch: foo
        extra_packages: ['samba']

    Overrides are project specific:

    overrides:
      install:
        ceph:
          sha1: ...

    :param ctx: the argparse.Namespace object
    :param config: the config dict
    """
    if config is None:
        config = {}
    assert isinstance(config, dict), \
        "task install only supports a dictionary for configuration"

    project, = config.get('project', 'ceph'),
    log.debug('project %s' % project)
    overrides = ctx.config.get('overrides')
    if overrides:
        install_overrides = overrides.get('install', {})
        teuthology.deep_merge(config, install_overrides.get(project, {}))
    log.debug('config %s' % config)

    flavor = get_flavor(config)
    log.info("Using flavor: %s", flavor)

    ctx.summary['flavor'] = flavor

    with contextutil.nested(
        lambda: install(ctx=ctx, config=dict(
            branch=config.get('branch'),
            tag=config.get('tag'),
            sha1=config.get('sha1'),
            flavor=flavor,
            extra_packages=config.get('extra_packages', []),
            extras=config.get('extras', None),
            wait_for_package=ctx.config.get('wait_for_package', False),
            project=project,
        )),
        lambda: ship_utilities(ctx=ctx, config=None),
    ):
        yield
コード例 #10
0
ファイル: tgt.py プロジェクト: Abhishekvrshny/ceph-qa-suite
def task(ctx, config):
    """
    Start up tgt.

    To start on on all clients::

        tasks:
        - ceph:
        - tgt:

    To start on certain clients::

        tasks:
        - ceph:
        - tgt: [client.0, client.3]

    or

        tasks:
        - ceph:
        - tgt:
            client.0:
            client.3:

    An image blocksize size can also be specified::
        
        tasks:
        - ceph:
        - tgt:
            image_size = 20480

    The general flow of things here is:
        1. Find clients on which tgt is supposed to run (start_tgtd)
        2. Remotely start up tgt daemon
    On cleanup:
        3. Stop tgt daemon

    The iscsi administration is handled by the iscsi task.
    """
    if config:
        config = {key : val for key, val in config.items()
                if key.startswith('client')}
    # config at this point should only contain keys starting with 'client'
    start_tgtd = []
    remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
    log.info(remotes)
    if not config:
        start_tgtd = ['client.{id}'.format(id=id_)
            for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
    else:
        start_tgtd = config
    log.info(start_tgtd)
    with contextutil.nested(
            lambda: start_tgt_remotes(ctx=ctx, start_tgtd=start_tgtd),):
        yield
コード例 #11
0
ファイル: blktrace.py プロジェクト: AsherBond/teuthology
def task(ctx, config):
    if config is None:
        config = dict(('client.{id}'.format(id=id_), None)
                  for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
    elif isinstance(config, list):
        config = dict.fromkeys(config)

    with contextutil.nested(
        lambda: setup(ctx=ctx, config=config),
        lambda: execute(ctx=ctx, config=config),
        ):
        yield
コード例 #12
0
ファイル: rgw-logsocket.py プロジェクト: athanatos/teuthology
def task(ctx, config):
    """
    Run some s3-tests suite against rgw, verify opslog socket returns data

    Must restrict testing to a particular client::

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3tests: [client.0]

    To pass extra arguments to nose (e.g. to run a certain test)::

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3tests:
            client.0:
              extra_args: ['test_s3:test_object_acl_grand_public_read']
            client.1:
              extra_args: ['--exclude', 'test_100_continue']
    """
    assert (
        config is None or isinstance(config, list) or isinstance(config, dict)
    ), "task s3tests only supports a list or dictionary for configuration"
    all_clients = ["client.{id}".format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, "client")]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    overrides = ctx.config.get("overrides", {})
    # merge each client section, not the top level.
    for (client, cconf) in config.iteritems():
        teuthology.deep_merge(cconf, overrides.get("rgw-logsocket", {}))

    log.debug("config is %s", config)

    s3tests_conf = {}
    for client in clients:
        s3tests_conf[client] = ConfigObj(
            indent_type="",
            infile={"DEFAULT": {"port": 7280, "is_secure": "no"}, "fixtures": {}, "s3 main": {}, "s3 alt": {}},
        )

    with contextutil.nested(
        lambda: download(ctx=ctx, config=config),
        lambda: create_users(ctx=ctx, config=dict(clients=clients, s3tests_conf=s3tests_conf)),
        lambda: configure(ctx=ctx, config=dict(clients=config, s3tests_conf=s3tests_conf)),
        lambda: run_tests(ctx=ctx, config=config),
    ):
        yield
コード例 #13
0
ファイル: iscsi.py プロジェクト: AlfredChenxf/teuthology
def start_iscsi_initiators(ctx, tgt_link):
    """
    This is the sub-task that assigns an rbd to an iscsiadm control and
    performs a login (thereby creating a /dev/sd device).  It performs
    a logout when finished.
    """
    remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
    tgtd_list = []
    for role, host in tgt_link:
        rem = _get_remote(remotes, role)
        rem_name = _get_remote_name(remotes, host)
        rem.run(
            args=[
                'sudo',
                'iscsiadm',
                '-m',
                'discovery',
                '-t',
                'st',
                '-p',
                rem_name,
        ])
        proc = rem.run(
            args=[
                'sudo',
                'iscsiadm',
                '-m',
                'node',
                '--login',
        ])
        if proc.exitstatus == 0:
            tgtd_list.append((rem, rem_name))
        general_io_test(ctx, rem, host)
    try:
        with contextutil.nested(
            lambda: generic_mkfs(ctx=ctx, config={host: {'fs_type': 'xfs'}},
                    devname_rtn=tgt_devname_rtn),
            lambda: generic_mount(ctx=ctx, config={host: None},
                    devname_rtn=tgt_devname_rtn),
            ):
            yield
    finally:
        for rem_info in tgtd_list:
            rem = rem_info[0]
            rem_name = rem_info[1]
            rem.run(
                args=[
                    'sudo',
                    'iscsiadm',
                    '-m',
                    'node',
                    '--logout',
            ])
コード例 #14
0
def task(ctx, config):
    """
    Do the setup of a calamari server.

    - calamari_setup:
        version: 'v80.1'
        ice_tool_dir: <directory>
        iceball_location: <directory>

    Options are:

    version -- ceph version we are testing against (defaults to 80.1)
    ice_tool_dir -- optional local directory where ice-tool exists or will
                    be loaded (defaults to src in home directory)
    ice_version  -- version of ICE we're testing (with default)
    iceball_location -- Can be an HTTP URL, in which case fetch from this
                        location, using 'ice_version' and distro information
                        to select the right tarball.  Can also be a local
                        path.  If local path is '.', and iceball is
                        not already present, then we try to build
                        an iceball using the ice_tool_dir commands.
    ice_git_location -- location of ice tool on git
    start_browser -- If True, start a browser.  To be used by runs that will
                     bring up a browser quickly for human use.  Set to False
                     for overnight suites that are testing for problems in
                     the installation itself (defaults to False).
    email -- email address for the user (defaults to [email protected])
    no_epel -- indicates if we should remove epel files prior to yum
               installations.  Defaults to True.
    calamari_user -- user name to log into gui (defaults to admin)
    calamari_password -- calamari user password (defaults to admin)
    """
    cal_svr = None
    start_browser = config.get('start_browser', False)
    no_epel = config.get('no_epel', True)
    for remote_, roles in ctx.cluster.remotes.items():
        if 'client.0' in roles:
            cal_svr = remote_
            break
    if not cal_svr:
        raise RuntimeError('client.0 not found in roles')
    with contextutil.nested(
        lambda: adjust_yum_repos(ctx, cal_svr, no_epel),
        lambda: calamari_install(config, cal_svr),
        lambda: ceph_install(ctx, cal_svr),
        lambda: calamari_connect(ctx, cal_svr),
        lambda: browser(start_browser, cal_svr.hostname),
    ):
        yield
コード例 #15
0
ファイル: hadoop.py プロジェクト: ErwanAliasr1/teuthology
def task(ctx, config):
    if config is None:
        config = {}
    assert isinstance(config, dict), "task hadoop config must be dictionary"

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('hadoop', {}))

    tasks = [
        lambda: install_hadoop(ctx=ctx, config=config),
        lambda: start_hadoop(ctx=ctx, config=config),
    ]

    with contextutil.nested(*tasks):
        yield
コード例 #16
0
def single_node_test(ctx, config):
    """
    - ceph-deploy.single_node_test: null
    
    #rhbuild testing
    - ceph-deploy.single_node_test: 
        rhbuild: 1.2.3
        
    """
    log.info("Testing ceph-deploy on single node")
    if config is None:
        config = {}

    if config.get("rhbuild"):
        log.info("RH Build, Skip Download")
        with contextutil.nested(lambda: cli_test(ctx=ctx, config=config)):
            yield
    else:
        with contextutil.nested(
            lambda: install_fn.ship_utilities(ctx=ctx, config=None),
            lambda: download_ceph_deploy(ctx=ctx, config=config),
            lambda: cli_test(ctx=ctx, config=config),
        ):
            yield
コード例 #17
0
def task(ctx, config):
    """This is the main body of the task that gets run."""

    """Take car of some yaml parsing here"""
    if config is not None and not isinstance(config, list) and not isinstance(config, dict):
        assert False, "task parallel_example only supports a list or dictionary for configuration"
    if config is None:
        config = ["client.{id}".format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, "client")]
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    """Run Multiple contextmanagers sequentially by nesting them."""
    with contextutil.nested(
        lambda: parallel_test(ctx=ctx, config=clients), lambda: sequential_test(ctx=ctx, config=clients)
    ):
        yield
コード例 #18
0
ファイル: ceph.py プロジェクト: alsall/teuthology
def task(ctx, config):
    """
    Set up and tear down a Ceph cluster.

    For example::

        tasks:
        - ceph:
        - interactive:

    You can also specify what branch to run::

        tasks:
        - ceph:
            branch: foo

    Or a tag::

        tasks:
        - ceph:
            tag: v0.42.13

    Or a sha1::

        tasks:
        - ceph:
            sha1: 1376a5ab0c89780eab39ffbbe436f6a6092314ed

    Or a local source dir::

        tasks:
        - ceph:
            path: /home/sage/ceph

    To capture code coverage data, use::

        tasks:
        - ceph:
            coverage: true

    To use btrfs, ext4, or xfs on the target's scratch disks, use::

        tasks:
        - ceph:
            fs: xfs
            mkfs_options: [-b,size=65536,-l,logdev=/dev/sdc1]
            mount_options: [nobarrier, inode64]

    Note, this will cause the task to check the /scratch_devs file on each node
    for available devices.  If no such file is found, /dev/sdb will be used.

    To run some daemons under valgrind, include their names
    and the tool/args to use in a valgrind section::

        tasks:
        - ceph:
          valgrind:
            mds.1: --tool=memcheck
            osd.1: [--tool=memcheck, --leak-check=no]

    Those nodes which are using memcheck or valgrind will get
    checked for bad results.

    To adjust or modify config options, use::

        tasks:
        - ceph:
            conf:
              section:
                key: value

    For example::

        tasks:
        - ceph:
            conf:
              mds.0:
                some option: value
                other key: other value
              client.0:
                debug client: 10
                debug ms: 1

    By default, the cluster log is checked for errors and warnings,
    and the run marked failed if any appear. You can ignore log
    entries by giving a list of egrep compatible regexes, i.e.:

        tasks:
        - ceph:
            log-whitelist: ['foo.*bar', 'bad message']

    :param ctx: Context
    :param config: Configuration
    """
    if config is None:
        config = {}
    assert isinstance(config, dict), \
        "task ceph only supports a dictionary for configuration"

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('ceph', {}))

    ctx.daemons = CephState()

    testdir = teuthology.get_testdir(ctx)
    if config.get('coverage'):
        coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
        log.info('Creating coverage directory...')
        run.wait(
            ctx.cluster.run(
                args=[
                    'install',
                    '-d',
                    '-m0755',
                    '--',
                    coverage_dir,
                ],
                wait=False,
            ))

    with contextutil.nested(
            lambda: ceph_log(ctx=ctx, config=None),
            lambda: ship_utilities(ctx=ctx, config=None),
            lambda: valgrind_post(ctx=ctx, config=config),
            lambda: cluster(
                ctx=ctx,
                config=dict(
                    conf=config.get('conf', {}),
                    fs=config.get('fs', None),
                    mkfs_options=config.get('mkfs_options', None),
                    mount_options=config.get('mount_options', None),
                    block_journal=config.get('block_journal', None),
                    tmpfs_journal=config.get('tmpfs_journal', None),
                    log_whitelist=config.get('log-whitelist', []),
                    cpu_profile=set(config.get('cpu_profile', [])),
                )),
            lambda: run_daemon(ctx=ctx, config=config, type_='mon'),
            lambda: run_daemon(ctx=ctx, config=config, type_='osd'),
            lambda: run_daemon(ctx=ctx, config=config, type_='mds'),
    ):
        try:
            if config.get('wait-for-healthy', True):
                healthy(ctx=ctx, config=None)
            yield
        finally:
            osd_scrub_pgs(ctx, config)
コード例 #19
0
ファイル: rook.py プロジェクト: gyaozhou/ceph-read
def task(ctx, config):
    """
    Deploy rook-ceph cluster

      tasks:
      - kubeadm:
      - rook:
          branch: wip-foo
          spec:
            mon:
              count: 1

    The spec item is deep-merged against the cluster.yaml.  The branch, sha1, or
    image items are used to determine the Ceph container image.
    """
    if not config:
        config = {}
    assert isinstance(config, dict), \
        "task only supports a dictionary for configuration"

    log.info('Rook start')

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('ceph', {}))
    teuthology.deep_merge(config, overrides.get('rook', {}))
    log.info('Config: ' + str(config))

    # set up cluster context
    if not hasattr(ctx, 'rook'):
        ctx.rook = {}
    if 'cluster' not in config:
        config['cluster'] = 'ceph'
    cluster_name = config['cluster']
    if cluster_name not in ctx.rook:
        ctx.rook[cluster_name] = argparse.Namespace()

    ctx.rook[cluster_name].remote = list(ctx.cluster.remotes.keys())[0]

    # image
    teuth_defaults = teuth_config.get('defaults', {})
    cephadm_defaults = teuth_defaults.get('cephadm', {})
    containers_defaults = cephadm_defaults.get('containers', {})
    container_image_name = containers_defaults.get('image', None)
    if 'image' in config:
        ctx.rook[cluster_name].image = config.get('image')
    else:
        sha1 = config.get('sha1')
        flavor = config.get('flavor', 'default')
        if sha1:
            if flavor == "crimson":
                ctx.rook[
                    cluster_name].image = container_image_name + ':' + sha1 + '-' + flavor
            else:
                ctx.rook[
                    cluster_name].image = container_image_name + ':' + sha1
        else:
            # hmm, fall back to branch?
            branch = config.get('branch', 'master')
            ctx.rook[cluster_name].image = container_image_name + ':' + branch
    log.info('Ceph image is %s' % ctx.rook[cluster_name].image)

    with contextutil.nested(
            lambda: rook_operator(ctx, config),
            lambda: ceph_log(ctx, config),
            lambda: rook_cluster(ctx, config),
            lambda: rook_toolbox(ctx, config),
            lambda: wait_for_orch(ctx, config),
            lambda: rook_post_config(ctx, config),
            lambda: wait_for_osds(ctx, config),
            lambda: ceph_config_keyring(ctx, config),
            lambda: ceph_clients(ctx, config),
    ):
        if not hasattr(ctx, 'managers'):
            ctx.managers = {}
        ctx.managers[cluster_name] = CephManager(
            ctx.rook[cluster_name].remote,
            ctx=ctx,
            logger=log.getChild('ceph_manager.' + cluster_name),
            cluster=cluster_name,
            rook=True,
        )
        try:
            if config.get('wait-for-healthy', True):
                healthy(ctx=ctx, config=config)
            log.info('Rook complete, yielding')
            yield

        finally:
            to_remove = []
            ret = _shell(ctx,
                         config, ['ceph', 'orch', 'ls', '-f', 'json'],
                         stdout=BytesIO())
            if ret.exitstatus == 0:
                r = json.loads(ret.stdout.getvalue().decode('utf-8'))
                for service in r:
                    if service['service_type'] in ['rgw', 'mds', 'nfs']:
                        _shell(ctx, config,
                               ['ceph', 'orch', 'rm', service['service_name']])
                        to_remove.append(service['service_name'])
                with safe_while(
                        sleep=10, tries=90,
                        action="waiting for service removal") as proceed:
                    while proceed():
                        ret = _shell(ctx,
                                     config,
                                     ['ceph', 'orch', 'ls', '-f', 'json'],
                                     stdout=BytesIO())
                        if ret.exitstatus == 0:
                            r = json.loads(
                                ret.stdout.getvalue().decode('utf-8'))
                            still_up = [
                                service['service_name'] for service in r
                            ]
                            matches = set(still_up).intersection(to_remove)
                            if not matches:
                                break
            log.info('Tearing down rook')
コード例 #20
0
def task(ctx, config):
    """
    Install packages for a given project.

    tasks:
    - install:
        project: ceph
        branch: bar
    - install:
        project: samba
        branch: foo
        extra_packages: ['samba']
    - install:
        rhbuild: 1.2.3

    Overrides are project specific:

    overrides:
      install:
        ceph:
          sha1: ...

    When passed 'rhbuild' as a key, it will attempt to install an rh ceph build using ceph-deploy

    :param ctx: the argparse.Namespace object
    :param config: the config dict
    """
    if config is None:
        config = {}
    assert isinstance(config, dict), \
        "task install only supports a dictionary for configuration"

    rhbuild = None
    if config.get('rhbuild'):
        rhbuild = config.get('rhbuild')
        log.info("Build is %s " % rhbuild)

    project, = config.get('project', 'ceph'),
    log.debug('project %s' % project)
    overrides = ctx.config.get('overrides')
    if overrides:
        install_overrides = overrides.get('install', {})
        teuthology.deep_merge(config, install_overrides.get(project, {}))
    log.debug('config %s' % config)

    flavor = get_flavor(config)
    log.info("Using flavor: %s", flavor)

    ctx.summary['flavor'] = flavor

    if config.get('rhbuild'):
        with contextutil.nested(lambda: rh_install(ctx=ctx, config=config),
                                lambda: ship_utilities(ctx=ctx, config=None)):
            yield
    else:
        with contextutil.nested(
                lambda: install(ctx=ctx,
                                config=dict(
                                    branch=config.get('branch'),
                                    tag=config.get('tag'),
                                    sha1=config.get('sha1'),
                                    flavor=flavor,
                                    extra_packages=config.get(
                                        'extra_packages', []),
                                    extras=config.get('extras', None),
                                    wait_for_package=ctx.config.get(
                                        'wait_for_package', False),
                                    project=project,
                                )),
                lambda: ship_utilities(ctx=ctx, config=None),
        ):
            yield
コード例 #21
0
ファイル: s3tests.py プロジェクト: zxgm/ceph
def task(ctx, config):
    """
    Run the s3-tests suite against rgw.

    To run all tests on all clients::

        tasks:
        - ceph:
        - rgw:
        - s3tests:

    To restrict testing to particular clients::

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3tests: [client.0]

    To run against a server on client.1 and increase the boto timeout to 10m::

        tasks:
        - ceph:
        - rgw: [client.1]
        - s3tests:
            client.0:
              rgw_server: client.1
              idle_timeout: 600

    To pass extra arguments to nose (e.g. to run a certain test)::

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3tests:
            client.0:
              extra_args: ['test_s3:test_object_acl_grand_public_read']
            client.1:
              extra_args: ['--exclude', 'test_100_continue']
    """
    assert hasattr(ctx, 'rgw'), 's3tests must run after the rgw task'
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task s3tests only supports a list or dictionary for configuration"
    all_clients = ['client.{id}'.format(id=id_)
                   for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    overrides = ctx.config.get('overrides', {})
    # merge each client section, not the top level.
    for client in config.keys():
        if not config[client]:
            config[client] = {}
        teuthology.deep_merge(config[client], overrides.get('s3tests', {}))

    log.debug('s3tests config is %s', config)

    s3tests_conf = {}
    for client in clients:
        endpoint = ctx.rgw.role_endpoints.get(client)
        assert endpoint, 's3tests: no rgw endpoint for {}'.format(client)

        s3tests_conf[client] = ConfigObj(
            indent_type='',
            infile={
                'DEFAULT':
                    {
                    'port'      : endpoint.port,
                    'is_secure' : endpoint.cert is not None,
                    'api_name'  : 'default',
                    },
                'fixtures' : {},
                's3 main'  : {},
                's3 alt'   : {},
		's3 tenant': {},
                }
            )

    with contextutil.nested(
        lambda: download(ctx=ctx, config=config),
        lambda: create_users(ctx=ctx, config=dict(
                clients=clients,
                s3tests_conf=s3tests_conf,
                )),
        lambda: configure(ctx=ctx, config=dict(
                clients=config,
                s3tests_conf=s3tests_conf,
                )),
        lambda: run_tests(ctx=ctx, config=config),
        lambda: scan_for_leaked_encryption_keys(ctx=ctx, config=config),
        ):
        pass
    yield
コード例 #22
0
def task(ctx, config):
    """
    Install packages for a given project.

    tasks:
    - install:
        project: ceph
        branch: bar
    - install:
        project: samba
        branch: foo
        extra_packages: ['samba']
    - install:
        extra_packages:
           deb: ['librados-dev', 'libradosstriper-dev']
           rpm: ['librados-devel', 'libradosstriper-devel']
        extra_system_packages:
           deb: ['libboost-system-dev']
           rpm: ['boost-devel']
    - install:
        rhbuild: 1.3.0
        playbook: downstream_setup.yml
        vars:
           yum_repos:
             - url: "http://location.repo"
               name: "ceph_repo"

    Add repos before trying to install any package (all Shaman-related tasks
    will be ignored):

    - install:
        repos:
          - name: "repo-alias"
            priority: 1
            url: "http://location.repo"

    Note: The 'repos' are supported for SUSE-based distros only, but patches
    are welcome to add support for other distros.


    Overrides are project specific:

    overrides:
      install:
        ceph:
          sha1: ...


    Debug packages may optionally be installed:

    overrides:
      install:
        ceph:
          debuginfo: true


    Default package lists (which come from packages.yaml) may be overridden:

    overrides:
      install:
        ceph:
          packages:
            deb:
            - ceph-osd
            - ceph-mon
            rpm:
            - ceph-devel
            - rbd-fuse

    When tag, branch and sha1 do not reference the same commit hash, the
    tag takes precedence over the branch and the branch takes precedence
    over the sha1.

    When the overrides have a sha1 that is different from the sha1 of
    the project to be installed, it will be a noop if the project has
    a branch or tag, because they take precedence over the sha1. For
    instance:

    overrides:
      install:
        ceph:
          sha1: 1234

    tasks:
    - install:
        project: ceph
          sha1: 4567
          branch: foobar # which has sha1 4567

    The override will transform the tasks as follows:

    tasks:
    - install:
        project: ceph
          sha1: 1234
          branch: foobar # which has sha1 4567

    But the branch takes precedence over the sha1 and foobar
    will be installed. The override of the sha1 has no effect.

    When passed 'rhbuild' as a key, it will attempt to install an rh ceph build
    using ceph-deploy

    Normally, the package management system will try to install or upgrade
    specified packages as instructed. But if newer versions of these packages
    to be installed have been installed on test node, we will have to uninstall
    or downgrade them. To downgrade multiple packages in a single shot:

    tasks:
    - install:
        project: ceph
        branch: hammer
        downgrade_packages: ['librados2', 'librbd1']

    Reminder regarding teuthology-suite side effects:

    The teuthology-suite command always adds the following:

    overrides:
      install:
        ceph:
          sha1: 1234

    where sha1 matches the --ceph argument. For instance if
    teuthology-suite is called with --ceph master, the sha1 will be
    the tip of master. If called with --ceph v0.94.1, the sha1 will be
    the v0.94.1 (as returned by git rev-parse v0.94.1 which is not to
    be confused with git rev-parse v0.94.1^{commit})

    :param ctx: the argparse.Namespace object
    :param config: the config dict
    """
    if config is None:
        config = {}
    assert isinstance(config, dict), \
        "task install only supports a dictionary for configuration"

    project, = config.get('project', 'ceph'),
    log.debug('project %s' % project)
    overrides = ctx.config.get('overrides')
    repos = None
    if overrides:
        install_overrides = overrides.get('install', {})
        teuthology.deep_merge(config, install_overrides.get(project, {}))
        repos = install_overrides.get('repos', None)
        log.debug('INSTALL overrides: %s' % install_overrides)
    log.debug('config %s' % config)

    rhbuild = None
    if config.get('rhbuild'):
        rhbuild = config.get('rhbuild')
        log.info("Build is %s " % rhbuild)

    flavor = get_flavor(config)
    log.info("Using flavor: %s", flavor)

    ctx.summary['flavor'] = flavor
    nested_tasks = [
        lambda: redhat.install(ctx=ctx, config=config),
        lambda: ship_utilities(ctx=ctx, config=None)
    ]

    if config.get('rhbuild'):
        if config.get('playbook'):
            ansible_config = dict(config)
            # remove key not required by ansible task
            del ansible_config['rhbuild']
            nested_tasks.insert(
                0, lambda: ansible.CephLab(ctx, config=ansible_config))
        with contextutil.nested(*nested_tasks):
            yield
    else:
        nested_config = dict(
            branch=config.get('branch'),
            tag=config.get('tag'),
            sha1=config.get('sha1'),
            debuginfo=config.get('debuginfo'),
            flavor=flavor,
            downgrade_packages=config.get('downgrade_packages', []),
            extra_packages=config.get('extra_packages', []),
            extra_system_packages=config.get('extra_system_packages', []),
            exclude_packages=config.get('exclude_packages', []),
            extras=config.get('extras', None),
            wait_for_package=config.get('wait_for_package', False),
            project=project,
            packages=config.get('packages', dict()),
            install_ceph_packages=config.get('install_ceph_packages', True),
            repos_only=config.get('repos_only', False),
        )
        if repos:
            nested_config['repos'] = repos
        with contextutil.nested(
                lambda: install(ctx=ctx, config=nested_config),
                lambda: ship_utilities(ctx=ctx, config=None),
        ):
            yield
コード例 #23
0
def task(ctx, config):
    """
    Deploy and run Tempest's object storage campaign

    Example of configuration:

      overrides:
        ceph:
          conf:
            client:
              rgw keystone api version: 3
              rgw keystone accepted roles: admin,Member
              rgw keystone implicit tenants: true
              rgw keystone accepted admin roles: admin
              rgw swift enforce content length: true
              rgw swift account in url: true
              rgw swift versioning enabled: true
              rgw keystone admin domain: Default
              rgw keystone admin user: admin
              rgw keystone admin password: ADMIN
              rgw keystone admin project: admin
      tasks:
      # typically, the task should be preceded with install, ceph, tox,
      # keystone and rgw. Tox and Keystone are specific requirements
      # of tempest.py.
      - rgw:
          # it's important to match the prefix with the endpoint's URL
          # in Keystone. Additionally, if we want to test /info and its
          # accompanying stuff, the whole Swift API must be put in root
          # of the whole URL  hierarchy (read: frontend_prefix == /swift).
          frontend_prefix: /swift
          client.0:
            use-keystone-role: client.0
      - tempest:
          client.0:
            force-branch: master
            use-keystone-role: client.0
            auth:
              admin_username: admin
              admin_project_name: admin
              admin_password: ADMIN
              admin_domain_name: Default
            identity:
              uri: http://{keystone_public_host}:{keystone_public_port}/v2.0/
              uri_v3: http://{keystone_public_host}:{keystone_public_port}/v3/
              admin_role: admin
            object-storage:
              reseller_admin_role: admin
            object-storage-feature-enabled:
              container_sync: false
              discoverability: false
            blacklist:
              # please strip half of these items after merging PRs #15369
              # and #12704
              - .*test_list_containers_reverse_order.*
              - .*test_list_container_contents_with_end_marker.*
              - .*test_delete_non_empty_container.*
              - .*test_container_synchronization.*
              - .*test_get_object_after_expiration_time.*
              - .*test_create_object_with_transfer_encoding.*
    """
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        'task tempest only supports a list or dictionary for configuration'

    if not ctx.tox:
        raise ConfigError('tempest must run after the tox task')
    if not ctx.keystone:
        raise ConfigError('tempest must run after the keystone task')

    all_clients = [
        'client.{id}'.format(id=id_)
        for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')
    ]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)

    overrides = ctx.config.get('overrides', {})
    # merge each client section, not the top level.
    for client in config.keys():
        if not config[client]:
            config[client] = {}
        teuthology.deep_merge(config[client], overrides.get('keystone', {}))

    log.debug('Tempest config is %s', config)

    with contextutil.nested(
            lambda: download(ctx=ctx, config=config),
            lambda: setup_venv(ctx=ctx, config=config),
            lambda: configure_instance(ctx=ctx, config=config),
            lambda: run_tempest(ctx=ctx, config=config),
    ):
        yield
コード例 #24
0
ファイル: rgw.py プロジェクト: gerald-yang/ceph
def task(ctx, config):
    """
    For example, to run rgw on all clients::

        tasks:
        - ceph:
        - rgw:

    To only run on certain clients::

        tasks:
        - ceph:
        - rgw: [client.0, client.3]

    or

        tasks:
        - ceph:
        - rgw:
            client.0:
            client.3:

    To run radosgw through valgrind:

        tasks:
        - ceph:
        - rgw:
            client.0:
              valgrind: [--tool=memcheck]
            client.3:
              valgrind: [--tool=memcheck]

    To configure data or index pool pg_size:

        overrides:
          rgw:
            data_pool_pg_size: 256
            index_pool_pg_size: 128
    """
    if config is None:
        config = dict(
            ('client.{id}'.format(id=id_), None)
            for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
    elif isinstance(config, list):
        config = dict((name, None) for name in config)

    clients = config.keys()  # http://tracker.ceph.com/issues/20417

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('rgw', {}))

    ctx.rgw = argparse.Namespace()

    ctx.rgw.ec_data_pool = bool(config.pop('ec-data-pool', False))
    ctx.rgw.erasure_code_profile = config.pop('erasure_code_profile', {})
    ctx.rgw.cache_pools = bool(config.pop('cache-pools', False))
    ctx.rgw.frontend = config.pop('frontend', 'civetweb')
    ctx.rgw.compression_type = config.pop('compression type', None)
    ctx.rgw.storage_classes = config.pop('storage classes', None)
    default_cert = config.pop('ssl certificate', None)
    ctx.rgw.data_pool_pg_size = config.pop('data_pool_pg_size', 64)
    ctx.rgw.index_pool_pg_size = config.pop('index_pool_pg_size', 64)
    ctx.rgw.config = config

    log.debug("config is {}".format(config))
    log.debug("client list is {}".format(clients))

    ctx.rgw.role_endpoints = assign_endpoints(ctx, config, default_cert)

    subtasks = [
        lambda: create_pools(ctx=ctx, clients=clients),
    ]
    if ctx.rgw.compression_type:
        subtasks.extend([
            lambda: configure_compression(
                ctx=ctx, clients=clients, compression=ctx.rgw.compression_type
            ),
        ])
    if ctx.rgw.storage_classes:
        subtasks.extend([
            lambda: configure_storage_classes(ctx=ctx,
                                              clients=clients,
                                              storage_classes=ctx.rgw.
                                              storage_classes),
        ])
    subtasks.extend([
        lambda: start_rgw(ctx=ctx, config=config, clients=clients),
    ])

    with contextutil.nested(*subtasks):
        yield
コード例 #25
0
ファイル: ceph.py プロジェクト: tv42/teuthology
def task(ctx, config):
    """
    Set up and tear down a Ceph cluster.

    For example::

        tasks:
        - ceph:
        - interactive:

    You can also specify what branch to run::

        tasks:
        - ceph:
            branch: foo

    Or a tag::

        tasks:
        - ceph:
            tag: v0.42.13

    Or a sha1::

        tasks:
        - ceph:
            sha1: 1376a5ab0c89780eab39ffbbe436f6a6092314ed

    Or a local source dir::

        tasks:
        - ceph:
            path: /home/sage/ceph

    To capture code coverage data, use::

        tasks:
        - ceph:
            coverage: true

    To use btrfs on the osds, use::
        tasks:
        - ceph:
            btrfs: true
    Note, this will cause the task to check the /scratch_devs file on each node
    for available devices.  If no such file is found, /dev/sdb will be used.

    To run some daemons under valgrind, include their names
    and the tool/args to use in a valgrind section::

        tasks:
        - ceph:
          valgrind:
            mds.1: --tool=memcheck
            osd.1: [--tool=memcheck, --leak-check=no]

    Those nodes which are using memcheck or helgrind will get
    checked for bad results.

    To adjust or modify config options, use::

        tasks:
        - ceph:
            conf:
              section:
                key: value

    For example::

        tasks:
        - ceph:
            conf:
              mds.0:
                some option: value
                other key: other value
              client.0:
                debug client: 10
                debug ms: 1

    By default, the cluster log is checked for errors and warnings,
    and the run marked failed if any appear. You can ignore log
    entries by giving a list of egrep compatible regexes, i.e.:

        tasks:
        - ceph:
            log-whitelist: ['foo.*bar', 'bad message']

    """
    if config is None:
        config = {}
    assert isinstance(config, dict), \
        "task ceph only supports a dictionary for configuration"

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('ceph', {}))

    ctx.daemons = CephState()

    # Flavor tells us what gitbuilder to fetch the prebuilt software
    # from. It's a combination of possible keywords, in a specific
    # order, joined by dashes. It is used as a URL path name. If a
    # match is not found, the teuthology run fails. This is ugly,
    # and should be cleaned up at some point.

    flavor = []

    # First element: controlled by user (or not there, by default):
    # used to choose the right distribution, e.g. "oneiric".
    flavor_user = config.get('flavor')
    if flavor_user is not None:
        flavor.append(flavor_user)

    if config.get('path'):
        # local dir precludes any other flavors
        flavor = ['local']
    else:
        if config.get('coverage'):
            log.info('Recording coverage for this run.')
            flavor.append('gcov')
        else:
            if config.get('valgrind'):
                log.info('Using notcmalloc flavor and running some daemons under valgrind')
                flavor.append('notcmalloc')

    flavor = '-'.join(flavor)
    if flavor == '':
        flavor = None
    ctx.summary['flavor'] = flavor or 'default'

    if config.get('coverage'):
        coverage_dir = '/tmp/cephtest/archive/coverage'
        log.info('Creating coverage directory...')
        run.wait(
            ctx.cluster.run(
                args=[
                    'install', '-d', '-m0755', '--',
                    coverage_dir,
                    ],
                wait=False,
                )
            )

    if config.get('valgrind'):
        val_path = '/tmp/cephtest/archive/log/{val_dir}'.format(val_dir=config.get('valgrind').get('logs', "valgrind"))
        run.wait(
            ctx.cluster.run(
                args=[
                    'mkdir', '-p', val_path
                    ],
                wait=False,
                )
            )


    with contextutil.nested(
        lambda: ceph_log(ctx=ctx, config=None),
        lambda: ship_utilities(ctx=ctx, config=None),
        lambda: binaries(ctx=ctx, config=dict(
                branch=config.get('branch'),
                tag=config.get('tag'),
                sha1=config.get('sha1'),
                path=config.get('path'),
                flavor=flavor,
                )),
        lambda: valgrind_post(ctx=ctx, config=config),
        lambda: cluster(ctx=ctx, config=dict(
                conf=config.get('conf', {}),
                btrfs=config.get('btrfs', False),
                log_whitelist=config.get('log-whitelist', []),
                )),
        lambda: run_daemon(ctx=ctx, config=config, type='mon'),
        lambda: run_daemon(ctx=ctx, config=config, type='osd'),
        lambda: run_daemon(ctx=ctx, config=config, type='mds'),
        ):
        healthy(ctx=ctx, config=None)
        yield
コード例 #26
0
ファイル: install.py プロジェクト: Mirantis/teuthology
def task(ctx, config):
    """
    Install packages for a given project.

    tasks:
    - install:
        project: ceph
        branch: bar
    - install:
        project: samba
        branch: foo
        extra_packages: ['samba']
    - install:
        rhbuild: 1.3.0
        playbook: downstream_setup.yml
        vars:
           yum_repos:
             - url: "http://location.repo"
               name: "ceph_repo"

    Overrides are project specific:

    overrides:
      install:
        ceph:
          sha1: ...


    Debug packages may optionally be installed:

    overrides:
      install:
        ceph:
          debuginfo: true


    Default package lists (which come from packages.yaml) may be overridden:

    overrides:
      install:
        ceph:
          packages:
            deb:
            - ceph-osd
            - ceph-mon
            rpm:
            - ceph-devel
            - rbd-fuse

    When tag, branch and sha1 do not reference the same commit hash, the
    tag takes precedence over the branch and the branch takes precedence
    over the sha1.

    When the overrides have a sha1 that is different from the sha1 of
    the project to be installed, it will be a noop if the project has
    a branch or tag, because they take precedence over the sha1. For
    instance:

    overrides:
      install:
        ceph:
          sha1: 1234

    tasks:
    - install:
        project: ceph
          sha1: 4567
          branch: foobar # which has sha1 4567

    The override will transform the tasks as follows:

    tasks:
    - install:
        project: ceph
          sha1: 1234
          branch: foobar # which has sha1 4567

    But the branch takes precedence over the sha1 and foobar
    will be installed. The override of the sha1 has no effect.

    When passed 'rhbuild' as a key, it will attempt to install an rh ceph build using ceph-deploy

    Reminder regarding teuthology-suite side effects:

    The teuthology-suite command always adds the following:

    overrides:
      install:
        ceph:
          sha1: 1234

    where sha1 matches the --ceph argument. For instance if
    teuthology-suite is called with --ceph master, the sha1 will be
    the tip of master. If called with --ceph v0.94.1, the sha1 will be
    the v0.94.1 (as returned by git rev-parse v0.94.1 which is not to
    be confused with git rev-parse v0.94.1^{commit})

    :param ctx: the argparse.Namespace object
    :param config: the config dict
    """
    if config is None:
        config = {}
    assert isinstance(config, dict), \
        "task install only supports a dictionary for configuration"

    project, = config.get('project', 'ceph'),
    log.debug('project %s' % project)
    overrides = ctx.config.get('overrides')
    if overrides:
        install_overrides = overrides.get('install', {})
        teuthology.deep_merge(config, install_overrides.get(project, {}))
    log.debug('config %s' % config)

    rhbuild = None
    if config.get('rhbuild'):
        rhbuild = config.get('rhbuild')
        log.info("Build is %s " % rhbuild)

    flavor = get_flavor(config)
    log.info("Using flavor: %s", flavor)

    ctx.summary['flavor'] = flavor
    nested_tasks = [
        lambda: rh_install(ctx=ctx, config=config),
        lambda: ship_utilities(ctx=ctx, config=None)
    ]

    if config.get('rhbuild'):
        if config.get('playbook'):
            ansible_config = dict(config)
            # remove key not required by ansible task
            del ansible_config['rhbuild']
            nested_tasks.insert(
                0, lambda: ansible.CephLab(ctx, config=ansible_config))
        with contextutil.nested(*nested_tasks):
            yield
    else:
        with contextutil.nested(
                lambda: install(
                    ctx=ctx,
                    config=dict(
                        branch=config.get('branch'),
                        tag=config.get('tag'),
                        sha1=config.get('sha1'),
                        debuginfo=config.get('debuginfo'),
                        flavor=flavor,
                        extra_packages=config.get('extra_packages', []),
                        extras=config.get('extras', None),
                        wait_for_package=config.get('wait_for_package', False),
                        project=project,
                        packages=config.get('packages', dict()),
                    )),
                lambda: ship_utilities(ctx=ctx, config=None),
        ):
            yield
コード例 #27
0
def task(ctx, config):
    """
    Run some s3-tests suite against rgw, verify opslog socket returns data

    Must restrict testing to a particular client::

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3tests: [client.0]

    To pass extra arguments to nose (e.g. to run a certain test)::

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3tests:
            client.0:
              extra_args: ['test_s3:test_object_acl_grand_public_read']
            client.1:
              extra_args: ['--exclude', 'test_100_continue']
    """
    assert hasattr(ctx, 'rgw'), 'rgw-logsocket must run after the rgw task'
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task rgw-logsocket only supports a list or dictionary for configuration"
    all_clients = [
        'client.{id}'.format(id=id_)
        for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')
    ]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    overrides = ctx.config.get('overrides', {})
    # merge each client section, not the top level.
    for (client, cconf) in config.iteritems():
        teuthology.deep_merge(cconf, overrides.get('rgw-logsocket', {}))

    log.debug('config is %s', config)

    s3tests_conf = {}
    for client in clients:
        endpoint = ctx.rgw.role_endpoints.get(client)
        assert endpoint, 'rgw-logsocket: no rgw endpoint for {}'.format(client)

        s3tests_conf[client] = ConfigObj(indent_type='',
                                         infile={
                                             'DEFAULT': {
                                                 'port':
                                                 endpoint.port,
                                                 'is_secure':
                                                 endpoint.cert is not None,
                                             },
                                             'fixtures': {},
                                             's3 main': {},
                                             's3 alt': {},
                                         })

    with contextutil.nested(
            lambda: download(ctx=ctx, config=config),
            lambda: create_users(ctx=ctx,
                                 config=dict(
                                     clients=clients,
                                     s3tests_conf=s3tests_conf,
                                 )),
            lambda: configure(ctx=ctx,
                              config=dict(
                                  clients=config,
                                  s3tests_conf=s3tests_conf,
                              )),
            lambda: run_tests(ctx=ctx, config=config),
    ):
        yield
コード例 #28
0
ファイル: s3tests.py プロジェクト: smanjara/ceph
def task(ctx, config):
    """
    Run the s3-tests suite against rgw.

    To run all tests on all clients::

        tasks:
        - ceph:
        - rgw:
        - s3tests:

    To restrict testing to particular clients::

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3tests: [client.0]

    To run against a server on client.1 and increase the boto timeout to 10m::

        tasks:
        - ceph:
        - rgw: [client.1]
        - s3tests:
            client.0:
              rgw_server: client.1
              idle_timeout: 600

    To pass extra arguments to nose (e.g. to run a certain test)::

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3tests:
            client.0:
              extra_args: ['test_s3:test_object_acl_grand_public_read']
            client.1:
              extra_args: ['--exclude', 'test_100_continue']

    To run any sts-tests don't forget to set a config variable named 'sts_tests' to 'True' as follows::

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3tests:
            client.0:
              sts_tests: True
              rgw_server: client.0

    To run any cloud-transition tests don't forget to set a config variable named 'cloudtier_tests' to 'True' as follows::

        tasks:
        - ceph:
        - rgw: [client.0 client.1]
        - s3tests:
            client.0:
              cloudtier_tests: True
              rgw_server: client.0

    """
    assert hasattr(ctx, 'rgw'), 's3tests must run after the rgw task'
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task s3tests only supports a list or dictionary for configuration"
    all_clients = ['client.{id}'.format(id=id_)
                   for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    overrides = ctx.config.get('overrides', {})
    # merge each client section, not the top level.
    for client in config.keys():
        if not config[client]:
            config[client] = {}
        teuthology.deep_merge(config[client], overrides.get('s3tests', {}))

    log.debug('s3tests config is %s', config)

    s3tests_conf = {}

    for client, client_config in config.items():
        if 'sts_tests' in client_config:
            ctx.sts_variable = True
        else:
            ctx.sts_variable = False

        if 'cloudtier_tests' in client_config:
            ctx.cloudtier_variable = True
        else:
            ctx.cloudtier_variable = False

        if 'dbstore_tests' in client_config:
            ctx.dbstore_variable = True
        else:
            ctx.dbstore_variable = False

        #This will be the structure of config file when you want to run webidentity_test (sts-test)
        if ctx.sts_variable and "TOKEN" in os.environ:
            for client in clients:
                endpoint = ctx.rgw.role_endpoints.get(client)
                assert endpoint, 's3tests: no rgw endpoint for {}'.format(client)

                s3tests_conf[client] = ConfigObj(
                    indent_type='',
                    infile={
                        'DEFAULT':
                            {
                            'port'      : endpoint.port,
                            'is_secure' : endpoint.cert is not None,
                            'api_name'  : 'default',
                            },
                        'fixtures'   : {},
                        's3 main'    : {},
                        's3 alt'     : {},
                        's3 tenant'  : {},
                        'iam'        : {},
                        'webidentity': {},
                    }
                )

        elif ctx.sts_variable:
            #This will be the structure of config file when you want to run assume_role_test and get_session_token_test (sts-test)
            for client in clients:
                endpoint = ctx.rgw.role_endpoints.get(client)
                assert endpoint, 's3tests: no rgw endpoint for {}'.format(client)

                s3tests_conf[client] = ConfigObj(
                    indent_type='',
                    infile={
                        'DEFAULT':
                            {
                            'port'      : endpoint.port,
                            'is_secure' : endpoint.cert is not None,
                            'api_name'  : 'default',
                            },
                        'fixtures'   : {},
                        's3 main'    : {},
                        's3 alt'     : {},
                        's3 tenant'  : {},
                        'iam'        : {},
                        }
                    ) 

        elif ctx.cloudtier_variable:
            #This will be the structure of config file when you want to run normal s3-tests
            for client in clients:
                endpoint = ctx.rgw.role_endpoints.get(client)
                assert endpoint, 's3tests: no rgw endpoint for {}'.format(client)

                s3tests_conf[client] = ConfigObj(
                    indent_type='',
                    infile={
                        'DEFAULT':
                            {
                            'port'      : endpoint.port,
                            'is_secure' : endpoint.cert is not None,
                            'api_name'  : 'default',
                            },
                        'fixtures'   : {},
                        's3 main'    : {},
                        's3 alt'     : {},
                        's3 tenant'  : {},
                        's3 cloud'   : {},
                        }
                    ) 
        else:
            #This will be the structure of config file when you want to run normal s3-tests
            for client in clients:
                endpoint = ctx.rgw.role_endpoints.get(client)
                assert endpoint, 's3tests: no rgw endpoint for {}'.format(client)

                s3tests_conf[client] = ConfigObj(
                    indent_type='',
                    infile={
                        'DEFAULT':
                            {
                            'port'      : endpoint.port,
                            'is_secure' : endpoint.cert is not None,
                            'api_name'  : 'default',
                            },
                        'fixtures'   : {},
                        's3 main'    : {},
                        's3 alt'     : {},
                        's3 tenant'  : {},
                        }
                    )

    with contextutil.nested(
        lambda: download(ctx=ctx, config=config),
        lambda: create_users(ctx=ctx, config=dict(
                clients=clients,
                s3tests_conf=s3tests_conf,
                )),
        lambda: configure(ctx=ctx, config=dict(
                clients=config,
                s3tests_conf=s3tests_conf,
                )),
        lambda: run_tests(ctx=ctx, config=config),
        lambda: scan_for_leaked_encryption_keys(ctx=ctx, config=config),
        ):
        pass
    yield
コード例 #29
0
def task(ctx, config):
    if config is None:
        config = {}

    assert isinstance(config, dict), \
        "task only supports a dictionary for configuration"

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('ceph', {}))
    log.info('Config: ' + str(config))

    testdir = teuthology.get_testdir(ctx)

    # set up cluster context
    first_ceph_cluster = False
    if not hasattr(ctx, 'daemons'):
        first_ceph_cluster = True
    if not hasattr(ctx, 'ceph'):
        ctx.ceph = {}
        ctx.managers = {}
    if 'cluster' not in config:
        config['cluster'] = 'ceph'
    cluster_name = config['cluster']
    ctx.ceph[cluster_name] = argparse.Namespace()

    ctx.ceph[cluster_name].thrashers = []
    # fixme: setup watchdog, ala ceph.py

    # cephadm mode?
    if 'cephadm_mode' not in config:
        config['cephadm_mode'] = 'root'
    assert config['cephadm_mode'] in ['root', 'cephadm-package']
    if config['cephadm_mode'] == 'root':
        ctx.cephadm = testdir + '/cephadm'
    else:
        ctx.cephadm = 'cephadm'  # in the path

    if first_ceph_cluster:
        # FIXME: this is global for all clusters
        ctx.daemons = DaemonGroup(use_cephadm=ctx.cephadm)

    # image
    ctx.ceph[cluster_name].image = config.get('image')
    ref = None
    if not ctx.ceph[cluster_name].image:
        sha1 = config.get('sha1')
        if sha1:
            ctx.ceph[cluster_name].image = 'quay.io/ceph-ci/ceph:%s' % sha1
            ref = sha1
        else:
            # hmm, fall back to branch?
            branch = config.get('branch', 'master')
            ref = branch
            ctx.ceph[cluster_name].image = 'quay.io/ceph-ci/ceph:%s' % branch
    log.info('Cluster image is %s' % ctx.ceph[cluster_name].image)

    # uuid
    fsid = str(uuid.uuid1())
    log.info('Cluster fsid is %s' % fsid)
    ctx.ceph[cluster_name].fsid = fsid

    # mon ips
    log.info('Choosing monitor IPs and ports...')
    remotes_and_roles = ctx.cluster.remotes.items()
    roles = [role_list for (remote, role_list) in remotes_and_roles]
    ips = [
        host for (host, port) in (remote.ssh.get_transport().getpeername()
                                  for (remote, role_list) in remotes_and_roles)
    ]
    ctx.ceph[cluster_name].mons = get_mons(
        roles,
        ips,
        cluster_name,
        mon_bind_msgr2=config.get('mon_bind_msgr2', True),
        mon_bind_addrvec=config.get('mon_bind_addrvec', True),
    )
    log.info('Monitor IPs: %s' % ctx.ceph[cluster_name].mons)

    with contextutil.nested(
            lambda: ceph_initial(),
            lambda: normalize_hostnames(ctx=ctx),
            lambda: download_cephadm(ctx=ctx, config=config, ref=ref),
            lambda: ceph_log(ctx=ctx, config=config),
            lambda: ceph_crash(ctx=ctx, config=config),
            lambda: ceph_bootstrap(ctx=ctx, config=config),
            lambda: crush_setup(ctx=ctx, config=config),
            lambda: ceph_mons(ctx=ctx, config=config),
            lambda: distribute_config_and_admin_keyring(ctx=ctx, config=config
                                                        ),
            lambda: ceph_mgrs(ctx=ctx, config=config),
            lambda: ceph_osds(ctx=ctx, config=config),
            lambda: ceph_mdss(ctx=ctx, config=config),
            lambda: ceph_rgw(ctx=ctx, config=config),
            lambda: ceph_monitoring('prometheus', ctx=ctx, config=config),
            lambda: ceph_monitoring('node-exporter', ctx=ctx, config=config),
            lambda: ceph_monitoring('alertmanager', ctx=ctx, config=config),
            lambda: ceph_monitoring('grafana', ctx=ctx, config=config),
            lambda: ceph_clients(ctx=ctx, config=config),
    ):
        ctx.managers[cluster_name] = CephManager(
            ctx.ceph[cluster_name].bootstrap_remote,
            ctx=ctx,
            logger=log.getChild('ceph_manager.' + cluster_name),
            cluster=cluster_name,
            cephadm=True,
        )

        try:
            if config.get('wait-for-healthy', True):
                healthy(ctx=ctx, config=config)

            log.info('Setup complete, yielding')
            yield

        finally:
            log.info('Teardown begin')
コード例 #30
0
ファイル: qemu.py プロジェクト: whaddock/ceph
def task(ctx, config):
    """
    Run a test inside of QEMU on top of rbd. Only one test
    is supported per client.

    For example, you can specify which clients to run on::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://download.ceph.com/qa/test.sh
            client.1:
              test: http://download.ceph.com/qa/test2.sh

    Or use the same settings on all clients:

        tasks:
        - ceph:
        - qemu:
            all:
              test: http://download.ceph.com/qa/test.sh

    For tests that want to explicitly describe the RBD images to connect:

        tasks:
        - ceph:
        - qemu:
            client.0:
                test: http://download.ceph.com/qa/test.sh
                clone: True/False (optionally clone all created disks),
                image_url: <URL> (optional default image URL)
                type: filesystem / block (optional default device type)
                disks: [
                    {
                        action: create / clone / none (optional, defaults to create)
                        image_name: <image name> (optional)
                        parent_name: <parent_name> (if action == clone),
                        type: filesystem / block (optional, defaults to fileystem)
                        image_url: <URL> (optional),
                        image_size: <MiB> (optional)
                        encryption_format: luks1 / luks2 / none (optional, defaults to none)
                    }, ...
                ]

    You can set the amount of CPUs and memory the VM has (default is 1 CPU and
    4096 MB)::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://download.ceph.com/qa/test.sh
              cpus: 4
              memory: 512 # megabytes

    If you need to configure additional cloud-config options, set cloud_config
    to the required data set::

        tasks:
        - ceph
        - qemu:
            client.0:
                test: http://ceph.com/qa/test.sh
                cloud_config_archive:
                    - |
                      #/bin/bash
                      touch foo1
                    - content: |
                        test data
                      type: text/plain
                      filename: /tmp/data
    """
    assert isinstance(config, dict), \
           "task qemu only supports a dictionary for configuration"

    config = teuthology.replace_all_with_clients(ctx.cluster, config)
    normalize_disks(config)

    managers = []
    create_images(ctx=ctx, config=config, managers=managers)
    managers.extend([
        lambda: create_dirs(ctx=ctx, config=config),
        lambda: install_block_rbd_driver(ctx=ctx, config=config),
        lambda: generate_iso(ctx=ctx, config=config),
        lambda: download_image(ctx=ctx, config=config),
        ])
    create_clones(ctx=ctx, config=config, managers=managers)
    create_encrypted_devices(ctx=ctx, config=config, managers=managers)
    managers.append(
        lambda: run_qemu(ctx=ctx, config=config),
        )

    with contextutil.nested(*managers):
        yield
コード例 #31
0
ファイル: qemu.py プロジェクト: wuhongsong/ceph-qa-suite
def task(ctx, config):
    """
    Run a test inside of QEMU on top of rbd. Only one test
    is supported per client.

    For example, you can specify which clients to run on::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
            client.1:
              test: http://ceph.com/qa/test2.sh

    Or use the same settings on all clients:

        tasks:
        - ceph:
        - qemu:
            all:
              test: http://ceph.com/qa/test.sh

    For tests that don't need a filesystem, set type to block::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
              type: block

    The test should be configured to run on /dev/vdb and later
    devices.

    If you want to run a test that uses more than one rbd image,
    specify how many images to use::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
              type: block
              num_rbd: 2

    You can set the amount of memory the VM has (default is 1024 MB)::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
              memory: 512 # megabytes
    """
    assert isinstance(config, dict), \
           "task qemu only supports a dictionary for configuration"

    config = teuthology.replace_all_with_clients(ctx.cluster, config)

    managers = []
    for client, client_config in config.iteritems():
        num_rbd = client_config.get('num_rbd', 1)
        assert num_rbd > 0, 'at least one rbd device must be used'
        for i in xrange(num_rbd):
            create_config = {
                client: {
                    'image_name':
                    '{client}.{num}'.format(client=client, num=i),
                    }
                }
            managers.append(
                lambda create_config=create_config:
                rbd.create_image(ctx=ctx, config=create_config)
                )

    managers.extend([
        lambda: create_dirs(ctx=ctx, config=config),
        lambda: generate_iso(ctx=ctx, config=config),
        lambda: download_image(ctx=ctx, config=config),
        lambda: run_qemu(ctx=ctx, config=config),
        ])

    with contextutil.nested(*managers):
        yield
コード例 #32
0
ファイル: cephadm.py プロジェクト: doitroot/ceph_source
def task(ctx, config):
    """
    Deploy ceph cluster using cephadm

    Setup containers' mirrors before the bootstrap, if corresponding
    config provided in teuthology server config yaml file.

    For example, teuthology.yaml can contain the 'defaults' section:

        defaults:
          cephadm:
            containers:
              registry_mirrors:
                docker.io: 'registry.mirror.example.com:5000'
              image: 'quay.io/ceph-ci/ceph'

    Using overrides makes it possible to customize it per run.
    The equivalent 'overrides' section looks like:

        overrides:
          cephadm:
            containers:
              registry_mirrors:
                docker.io: 'registry.mirror.example.com:5000'
              image: 'quay.io/ceph-ci/ceph'

    :param ctx: the argparse.Namespace object
    :param config: the config dict
    """
    if config is None:
        config = {}

    assert isinstance(config, dict), \
        "task only supports a dictionary for configuration"

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('ceph', {}))
    teuthology.deep_merge(config, overrides.get('cephadm', {}))
    log.info('Config: ' + str(config))

    # set up cluster context
    if not hasattr(ctx, 'ceph'):
        ctx.ceph = {}
        ctx.managers = {}
    if 'cluster' not in config:
        config['cluster'] = 'ceph'
    cluster_name = config['cluster']
    if cluster_name not in ctx.ceph:
        ctx.ceph[cluster_name] = argparse.Namespace()
        ctx.ceph[cluster_name].bootstrapped = False
 
    # image
    teuth_defaults = teuth_config.get('defaults', {})
    cephadm_defaults = teuth_defaults.get('cephadm', {})
    containers_defaults = cephadm_defaults.get('containers', {})
    mirrors_defaults = containers_defaults.get('registry_mirrors', {})
    container_registry_mirror = mirrors_defaults.get('docker.io', None)
    container_image_name = containers_defaults.get('image', None)

    containers = config.get('containers', {})
    mirrors = containers.get('registry_mirrors', {})
    container_image_name = containers.get('image', container_image_name)
    container_registry_mirror = mirrors.get('docker.io',
                                            container_registry_mirror)


    if not hasattr(ctx.ceph[cluster_name], 'image'):
        ctx.ceph[cluster_name].image = config.get('image')
    ref = None
    if not ctx.ceph[cluster_name].image:
        if not container_image_name:
            raise Exception("Configuration error occurred. "
                            "The 'image' value is undefined for 'cephadm' task. "
                            "Please provide corresponding options in the task's "
                            "config, task 'overrides', or teuthology 'defaults' "
                            "section.")
        sha1 = config.get('sha1')
        flavor = config.get('flavor', 'default')

        if sha1:
            if flavor == "crimson":
                ctx.ceph[cluster_name].image = container_image_name + ':' + sha1 + '-' + flavor
            else:
                ctx.ceph[cluster_name].image = container_image_name + ':' + sha1
            ref = sha1
        else:
            # hmm, fall back to branch?
            branch = config.get('branch', 'master')
            ref = branch
            ctx.ceph[cluster_name].image = container_image_name + ':' + branch
    log.info('Cluster image is %s' % ctx.ceph[cluster_name].image)


    with contextutil.nested(
            #if the cluster is already bootstrapped bypass corresponding methods
            lambda: _bypass() if (ctx.ceph[cluster_name].bootstrapped)\
                              else initialize_config(ctx=ctx, config=config),
            lambda: ceph_initial(),
            lambda: normalize_hostnames(ctx=ctx),
            lambda: _bypass() if (ctx.ceph[cluster_name].bootstrapped)\
                              else download_cephadm(ctx=ctx, config=config, ref=ref),
            lambda: ceph_log(ctx=ctx, config=config),
            lambda: ceph_crash(ctx=ctx, config=config),
            lambda: _bypass() if (ctx.ceph[cluster_name].bootstrapped)\
                              else ceph_bootstrap(ctx, config,
                                                  container_registry_mirror),
            lambda: crush_setup(ctx=ctx, config=config),
            lambda: ceph_mons(ctx=ctx, config=config),
            lambda: distribute_config_and_admin_keyring(ctx=ctx, config=config),
            lambda: ceph_mgrs(ctx=ctx, config=config),
            lambda: ceph_osds(ctx=ctx, config=config),
            lambda: ceph_mdss(ctx=ctx, config=config),
            lambda: ceph_rgw(ctx=ctx, config=config),
            lambda: ceph_monitoring('prometheus', ctx=ctx, config=config),
            lambda: ceph_monitoring('node-exporter', ctx=ctx, config=config),
            lambda: ceph_monitoring('alertmanager', ctx=ctx, config=config),
            lambda: ceph_monitoring('grafana', ctx=ctx, config=config),
            lambda: ceph_clients(ctx=ctx, config=config),
    ):
        ctx.managers[cluster_name] = CephManager(
            ctx.ceph[cluster_name].bootstrap_remote,
            ctx=ctx,
            logger=log.getChild('ceph_manager.' + cluster_name),
            cluster=cluster_name,
            cephadm=True,
        )

        try:
            if config.get('wait-for-healthy', True):
                healthy(ctx=ctx, config=config)

            log.info('Setup complete, yielding')
            yield

        finally:
            log.info('Teardown begin')
コード例 #33
0
def task(ctx, config):
    """
    Set up and tear down a Hadoop cluster.

    This depends on either having ceph installed prior to hadoop, like so:

    roles:
    - [mon.0, mds.0, osd.0, hadoop.master.0]
    - [mon.1, osd.1, hadoop.slave.0]
    - [mon.2, hadoop.slave.1]

    tasks:
    - ceph:
    - hadoop:

    Or if you want to use HDFS under Hadoop, this will configure Hadoop
    for HDFS and start it along with MapReduce. Note that it does not 
    require Ceph be installed.

    roles:
    - [hadoop.master.0]
    - [hadoop.slave.0]
    - [hadoop.slave.1]

    tasks:
    - hadoop:
        hdfs: True

    This task requires exactly one hadoop.master be specified 
    and at least one hadoop.slave.

    This does *not* do anything with the Hadoop setup. To run wordcount, 
    you could use pexec like so (after the hadoop task):

    - pexec: 
        hadoop.slave.0:
          - mkdir -p /tmp/hadoop_input
          - wget http://ceph.com/qa/hadoop_input_files.tar -O /tmp/hadoop_input/files.tar
          - cd /tmp/hadoop_input/; tar -xf /tmp/hadoop_input/files.tar
          - {tdir}/hadoop/bin/hadoop fs -mkdir wordcount_input
          - {tdir}/hadoop/bin/hadoop fs -put /tmp/hadoop_input/*txt wordcount_input/
          - {tdir}/hadoop/bin/hadoop jar {tdir}/hadoop/build/hadoop-example*jar wordcount wordcount_input wordcount_output
          - rm -rf /tmp/hadoop_input
    """.format(tdir=teuthology.get_testdir(ctx))

    if config is None:
        config = {}
    assert isinstance(config, dict), \
        "task hadoop only supports a dictionary for configuration"

    dist = 'precise'
    format = 'jar'
    arch = 'x86_64'
    flavor = config.get('flavor', 'basic')

    ctx.summary['flavor'] = flavor

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('hadoop', {}))

    apache_branch = None
    if config.get('apache_hadoop_branch') is not None:
        apache_branch = config.get('apache_hadoop_branch')
    else:
        apache_branch = 'branch-1.0'  # hadoop branch to acquire

    inktank_branch = None
    if config.get('inktank_hadoop_branch') is not None:
        inktank_branch = config.get('inktank_hadoop_branch')
    else:
        inktank_branch = 'cephfs/branch-1.0'  # default branch name

    # replace any '/' with a '_' to match the artifact paths
    inktank_branch = inktank_branch.replace('/', '_')
    apache_branch = apache_branch.replace('/', '_')

    with contextutil.nested(
            lambda: validate_config(ctx=ctx, config=config),
            lambda: binaries(ctx=ctx,
                             config=dict(
                                 tag=config.get('tag'),
                                 sha1=config.get('sha1'),
                                 path=config.get('path'),
                                 flavor=flavor,
                                 dist=config.get('dist', dist),
                                 format=format,
                                 arch=arch,
                                 apache_branch=apache_branch,
                                 inktank_branch=inktank_branch,
                             )),
            lambda: configure_hadoop(ctx=ctx, config=config),
            lambda: start_hadoop(ctx=ctx, config=config),
            lambda: out_of_safemode(ctx=ctx, config=config),
    ):
        yield
コード例 #34
0
ファイル: qemu.py プロジェクト: yehudasa/ceph-pr-archive
def task(ctx, config):
    """
    Run a test inside of QEMU on top of rbd. Only one test
    is supported per client.

    For example, you can specify which clients to run on::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://download.ceph.com/qa/test.sh
            client.1:
              test: http://download.ceph.com/qa/test2.sh

    Or use the same settings on all clients:

        tasks:
        - ceph:
        - qemu:
            all:
              test: http://download.ceph.com/qa/test.sh

    For tests that don't need a filesystem, set type to block::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://download.ceph.com/qa/test.sh
              type: block

    The test should be configured to run on /dev/vdb and later
    devices.

    If you want to run a test that uses more than one rbd image,
    specify how many images to use::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://download.ceph.com/qa/test.sh
              type: block
              disks: 2

    - or -

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
              type: block
              disks:
                - image_size: 1024
                - image_size: 2048

    You can set the amount of CPUs and memory the VM has (default is 1 CPU and
    4096 MB)::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://download.ceph.com/qa/test.sh
              cpus: 4
              memory: 512 # megabytes

    If you want to run a test against a cloned rbd image, set clone to true::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://download.ceph.com/qa/test.sh
              clone: true

    If you need to configure additional cloud-config options, set cloud_config
    to the required data set::

        tasks:
        - ceph
        - qemu:
            client.0:
                test: http://ceph.com/qa/test.sh
                cloud_config_archive:
                    - |
                      #/bin/bash
                      touch foo1
                    - content: |
                        test data
                      type: text/plain
                      filename: /tmp/data

    If you need to override the default cloud image, set image_url:

        tasks:
        - ceph
        - qemu:
            client.0:
                test: http://ceph.com/qa/test.sh
                image_url: https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img
    """
    assert isinstance(config, dict), \
           "task qemu only supports a dictionary for configuration"

    config = teuthology.replace_all_with_clients(ctx.cluster, config)

    managers = []
    create_images(ctx=ctx, config=config, managers=managers)
    managers.extend([
        lambda: create_dirs(ctx=ctx, config=config),
        lambda: generate_iso(ctx=ctx, config=config),
        lambda: download_image(ctx=ctx, config=config),
    ])
    create_clones(ctx=ctx, config=config, managers=managers)
    managers.append(lambda: run_qemu(ctx=ctx, config=config), )

    with contextutil.nested(*managers):
        yield
コード例 #35
0
def task(ctx, config):
    """
    Run the testswift suite against rgw.

    To run all tests on all clients::

        tasks:
        - ceph:
        - rgw:
        - testswift:

    To restrict testing to particular clients::

        tasks:
        - ceph:
        - rgw: [client.0]
        - testswift: [client.0]

    To run against a server on client.1::

        tasks:
        - ceph:
        - rgw: [client.1]
        - testswift:
            client.0:
              rgw_server: client.1

    To pass extra arguments to nose (e.g. to run a certain test)::

        tasks:
        - ceph:
        - rgw: [client.0]
        - testswift:
            client.0:
              extra_args: ['test.functional.tests:TestFileUTF8', '-m', 'testCopy']
            client.1:
              extra_args: ['--exclude', 'TestFile']
    """
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task testswift only supports a list or dictionary for configuration"
    all_clients = [
        'client.{id}'.format(id=id_)
        for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')
    ]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    log.info('clients={c}'.format(c=clients))

    testswift_conf = {}
    for client in clients:
        testswift_conf[client] = ConfigObj(indent_type='',
                                           infile={
                                               'func_test': {
                                                   'auth_port': 7280,
                                                   'auth_ssl': 'no',
                                                   'auth_prefix': '/auth/',
                                               },
                                           })

    with contextutil.nested(
            lambda: download(ctx=ctx, config=clients),
            lambda: create_users(ctx=ctx,
                                 config=dict(
                                     clients=clients,
                                     testswift_conf=testswift_conf,
                                 )),
            lambda: configure(ctx=ctx,
                              config=dict(
                                  clients=config,
                                  testswift_conf=testswift_conf,
                              )),
            lambda: run_tests(ctx=ctx, config=config),
    ):
        pass
    yield
コード例 #36
0
def task(ctx, config):
    """
    Set up and tear down a Ceph cluster.

    For example::

        tasks:
        - install:
             extras: yes
        - ssh_keys:
        - ceph-deploy:
             branch:
                stable: bobtail
             mon_initial_members: 1
             only_mon: true
             keep_running: true
             # either choose bluestore or filestore, default is bluestore
             bluestore: True
             # or
             filestore: True

        tasks:
        - install:
             extras: yes
        - ssh_keys:
        - ceph-deploy:
             branch:
                dev: master
             conf:
                mon:
                   debug mon = 20

        tasks:
        - install:
             extras: yes
        - ssh_keys:
        - ceph-deploy:
             branch:
                testing:
             dmcrypt: yes
             separate_journal_disk: yes

    """
    if config is None:
        config = {}

    assert isinstance(config, dict), \
        "task ceph-deploy only supports a dictionary for configuration"

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))

    if config.get('branch') is not None:
        assert isinstance(config['branch'],
                          dict), 'branch must be a dictionary'

    log.info('task ceph-deploy with config ' + str(config))

    with contextutil.nested(
            lambda: install_fn.ship_utilities(ctx=ctx, config=None),
            lambda: download_ceph_deploy(ctx=ctx, config=config),
            lambda: build_ceph_cluster(ctx=ctx, config=config),
    ):
        yield
コード例 #37
0
ファイル: ragweed.py プロジェクト: zhxin/ceph-14.2.13
def task(ctx, config):
    """
    Run the ragweed suite against rgw.

    To run all tests on all clients::

        tasks:
        - ceph:
        - rgw:
        - ragweed:

    To restrict testing to particular clients::

        tasks:
        - ceph:
        - rgw: [client.0]
        - ragweed: [client.0]

    To run against a server on client.1 and increase the boto timeout to 10m::

        tasks:
        - ceph:
        - rgw: [client.1]
        - ragweed:
            client.0:
              rgw_server: client.1
              idle_timeout: 600
              stages: prepare,check

    To pass extra arguments to nose (e.g. to run a certain test)::

        tasks:
        - ceph:
        - rgw: [client.0]
        - ragweed:
            client.0:
              extra_args: ['test_s3:test_object_acl_grand_public_read']
            client.1:
              extra_args: ['--exclude', 'test_100_continue']
    """
    assert hasattr(ctx, 'rgw'), 'ragweed must run after the rgw task'
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task ragweed only supports a list or dictionary for configuration"
    all_clients = [
        'client.{id}'.format(id=id_)
        for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')
    ]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    overrides = ctx.config.get('overrides', {})
    # merge each client section, not the top level.
    for client in config.keys():
        if not config[client]:
            config[client] = {}
        teuthology.deep_merge(config[client], overrides.get('ragweed', {}))

    log.debug('ragweed config is %s', config)

    ragweed_conf = {}
    for client in clients:
        endpoint = ctx.rgw.role_endpoints.get(client)
        assert endpoint, 'ragweed: no rgw endpoint for {}'.format(client)

        ragweed_conf[client] = ConfigObj(indent_type='',
                                         infile={
                                             'rgw': {
                                                 'port':
                                                 endpoint.port,
                                                 'is_secure':
                                                 endpoint.cert is not None,
                                             },
                                             'fixtures': {},
                                             'user system': {},
                                             'user regular': {},
                                             'rados': {
                                                 'ceph_conf':
                                                 '/etc/ceph/ceph.conf',
                                             },
                                         })

    run_stages = {}

    with contextutil.nested(
            lambda: download(ctx=ctx, config=config),
            lambda: create_users(ctx=ctx,
                                 config=dict(
                                     clients=clients,
                                     ragweed_conf=ragweed_conf,
                                     config=config,
                                 ),
                                 run_stages=run_stages),
            lambda: configure(ctx=ctx,
                              config=dict(
                                  clients=config,
                                  ragweed_conf=ragweed_conf,
                              ),
                              run_stages=run_stages),
            lambda: run_tests(ctx=ctx, config=config, run_stages=run_stages),
    ):
        pass
    yield
コード例 #38
0
def task(ctx, config):
    """
    Run the s3tests-test-readwrite suite against rgw.

    To run all tests on all clients::

        tasks:
        - ceph:
        - rgw:
        - s3readwrite:

    To restrict testing to particular clients::

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3readwrite: [client.0]

    To run against a server on client.1::

        tasks:
        - ceph:
        - rgw: [client.1]
        - s3readwrite:
            client.0:
              rgw_server: client.1

    To pass extra test arguments

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3readwrite:
            client.0:
              readwrite:
                bucket: mybucket
                readers: 10
                writers: 3
                duration: 600
                files:
                  num: 10
                  size: 2000
                  stddev: 500
            client.1:
              ...

    To override s3 configuration

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3readwrite:
            client.0:
              s3:
                user_id: myuserid
                display_name: myname
                email: my@email
                access_key: myaccesskey
                secret_key: mysecretkey

    """
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task s3tests only supports a list or dictionary for configuration"
    all_clients = [
        'client.{id}'.format(id=id_)
        for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')
    ]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    s3tests_conf = {}
    for client in clients:
        if config[client] is None:
            config[client] = {}
        config[client].setdefault('s3', {})
        config[client].setdefault('readwrite', {})

        s3tests_conf[client] = ({
            'DEFAULT': {
                'port': 7280,
                'is_secure': False,
            },
            'readwrite': config[client]['readwrite'],
            's3': config[client]['s3'],
        })

    with contextutil.nested(
            lambda: download(ctx=ctx, config=clients),
            lambda: create_users(ctx=ctx,
                                 config=dict(
                                     clients=clients,
                                     s3tests_conf=s3tests_conf,
                                 )),
            lambda: configure(ctx=ctx,
                              config=dict(
                                  clients=config,
                                  s3tests_conf=s3tests_conf,
                              )),
            lambda: run_tests(ctx=ctx, config=config),
    ):
        yield
コード例 #39
0
def task(ctx, config):
    """
    Deploy and configure Keystone

    Example of configuration:

      - install:
      - ceph:
      - tox: [ client.0 ]
      - keystone:
          client.0:
            force-branch: master
            tenants:
              - name: admin
                description:  Admin Tenant
            users:
              - name: admin
                password: ADMIN
                project: admin
            roles: [ name: admin, name: Member ]
            role-mappings:
              - name: admin
                user: admin
                project: admin
            services:
              - name: keystone
                type: identity
                description: Keystone Identity Service
              - name: swift
                type: object-store
                description: Swift Service
    """
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task keystone only supports a list or dictionary for configuration"

    if not ctx.tox:
        raise ConfigError('keystone must run after the tox task')

    all_clients = [
        'client.{id}'.format(id=id_)
        for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')
    ]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)

    log.debug('Keystone config is %s', config)

    ctx.keystone = argparse.Namespace()
    ctx.keystone.public_endpoints = assign_ports(ctx, config, 5000)
    ctx.keystone.admin_endpoints = assign_ports(ctx, config, 35357)

    with contextutil.nested(
            lambda: download(ctx=ctx, config=config),
            lambda: install_packages(ctx=ctx, config=config),
            lambda: setup_venv(ctx=ctx, config=config),
            lambda: configure_instance(ctx=ctx, config=config),
            lambda: run_keystone(ctx=ctx, config=config),
            lambda: fill_keystone(ctx=ctx, config=config),
    ):
        yield
コード例 #40
0
ファイル: qemu.py プロジェクト: chuang-he/ceph
def task(ctx, config):
    """
    Run a test inside of QEMU on top of rbd. Only one test
    is supported per client.

    For example, you can specify which clients to run on::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
            client.1:
              test: http://ceph.com/qa/test2.sh

    Or use the same settings on all clients:

        tasks:
        - ceph:
        - qemu:
            all:
              test: http://ceph.com/qa/test.sh

    For tests that don't need a filesystem, set type to block::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
              type: block

    The test should be configured to run on /dev/vdb and later
    devices.

    If you want to run a test that uses more than one rbd image,
    specify how many images to use::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
              type: block
              num_rbd: 2

    You can set the amount of memory the VM has (default is 1024 MB)::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
              memory: 512 # megabytes

    If you want to run a test against a cloned rbd image, set clone to true::

        tasks:
        - ceph:
        - qemu:
            client.0:
              test: http://ceph.com/qa/test.sh
              clone: true
    """
    assert isinstance(config, dict), \
           "task qemu only supports a dictionary for configuration"

    config = teuthology.replace_all_with_clients(ctx.cluster, config)

    managers = []
    create_images(ctx=ctx, config=config, managers=managers)
    managers.extend([
        lambda: create_dirs(ctx=ctx, config=config),
        lambda: generate_iso(ctx=ctx, config=config),
        lambda: download_image(ctx=ctx, config=config),
    ])
    create_clones(ctx=ctx, config=config, managers=managers)
    managers.append(lambda: run_qemu(ctx=ctx, config=config), )

    with contextutil.nested(*managers):
        yield
コード例 #41
0
def task(ctx, config):
    """
    Either use configure apache to run a rados gateway, or use the built-in
    civetweb server.
    Only one should be run per machine, since it uses a hard-coded port for
    now.

    For example, to run rgw on all clients::

        tasks:
        - ceph:
        - rgw:

    To only run on certain clients::

        tasks:
        - ceph:
        - rgw: [client.0, client.3]

    or

        tasks:
        - ceph:
        - rgw:
            client.0:
            client.3:

    You can adjust the idle timeout for fastcgi (default is 30 seconds):

        tasks:
        - ceph:
        - rgw:
            client.0:
              idle_timeout: 90

    To run radosgw through valgrind:

        tasks:
        - ceph:
        - rgw:
            client.0:
              valgrind: [--tool=memcheck]
            client.3:
              valgrind: [--tool=memcheck]

    To use civetweb instead of apache:

        tasks:
        - ceph:
        - rgw:
          - client.0
        overrides:
          rgw:
            frontend: civetweb

    Note that without a modified fastcgi module e.g. with the default
    one on CentOS, you must have rgw print continue = false in ceph.conf::

        tasks:
        - ceph:
            conf:
              global:
                rgw print continue: false
        - rgw: [client.0]

    To use mod_proxy_fcgi instead of mod_fastcgi:

        overrides:
          rgw:
            use_fcgi: true

    To run rgws for multiple regions or zones, describe the regions
    and their zones in a regions section. The endpoints will be
    generated by this task. Each client must have a region, zone,
    and pools assigned in ceph.conf::

        tasks:
        - install:
        - ceph:
            conf:
              client.0:
                rgw region: foo
                rgw zone: foo-1
                rgw region root pool: .rgw.rroot.foo
                rgw zone root pool: .rgw.zroot.foo
                rgw log meta: true
                rgw log data: true
              client.1:
                rgw region: bar
                rgw zone: bar-master
                rgw region root pool: .rgw.rroot.bar
                rgw zone root pool: .rgw.zroot.bar
                rgw log meta: true
                rgw log data: true
              client.2:
                rgw region: bar
                rgw zone: bar-secondary
                rgw region root pool: .rgw.rroot.bar
                rgw zone root pool: .rgw.zroot.bar-secondary
        - rgw:
            default_idle_timeout: 30
            ec-data-pool: true
            erasure_code_profile:
              k: 2
              m: 1
              ruleset-failure-domain: osd
            realm: foo
            regions:
              foo:
                api name: api_name # default: region name
                is master: true    # default: false
                master zone: foo-1 # default: first zone
                zones: [foo-1]
                log meta: true
                log data: true
                placement targets: [target1, target2] # default: []
                default placement: target2            # default: ''
              bar:
                api name: bar-api
                zones: [bar-master, bar-secondary]
            client.0:
              system user:
                name: foo-system
                access key: X2IYPSTY1072DDY1SJMC
                secret key: YIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm
            client.1:
              system user:
                name: bar1
                access key: Y2IYPSTY1072DDY1SJMC
                secret key: XIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm
            client.2:
              system user:
                name: bar2
                access key: Z2IYPSTY1072DDY1SJMC
                secret key: ZIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm
    """
    if config is None:
        config = dict(('client.{id}'.format(id=id_), None)
                      for id_ in teuthology.all_roles_of_type(
                          ctx.cluster, 'client'))
    elif isinstance(config, list):
        config = dict((name, None) for name in config)

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('rgw', {}))

    regions = {}
    if 'regions' in config:
        # separate region info so only clients are keys in config
        regions = config['regions']
        del config['regions']

    role_endpoints = assign_ports(ctx, config)
    ctx.rgw = argparse.Namespace()
    ctx.rgw.role_endpoints = role_endpoints
    # stash the region info for later, since it was deleted from the config
    # structure
    ctx.rgw.regions = regions

    realm = None
    if 'realm' in config:
        # separate region info so only clients are keys in config
        realm = config['realm']
        del config['realm']
    ctx.rgw.realm = realm

    ctx.rgw.ec_data_pool = False
    if 'ec-data-pool' in config:
        ctx.rgw.ec_data_pool = bool(config['ec-data-pool'])
        del config['ec-data-pool']
    ctx.rgw.erasure_code_profile = {}
    if 'erasure_code_profile' in config:
        ctx.rgw.erasure_code_profile = config['erasure_code_profile']
        del config['erasure_code_profile']
    ctx.rgw.default_idle_timeout = 30
    if 'default_idle_timeout' in config:
        ctx.rgw.default_idle_timeout = int(config['default_idle_timeout'])
        del config['default_idle_timeout']
    ctx.rgw.cache_pools = False
    if 'cache-pools' in config:
        ctx.rgw.cache_pools = bool(config['cache-pools'])
        del config['cache-pools']

    ctx.rgw.frontend = 'civetweb'
    if 'frontend' in config:
        ctx.rgw.frontend = config['frontend']
        del config['frontend']

    ctx.rgw.use_fastcgi = True
    if "use_fcgi" in config:
        ctx.rgw.use_fastcgi = False
        log.info("Using mod_proxy_fcgi instead of mod_fastcgi...")
        del config['use_fcgi']

    subtasks = [
        lambda: create_nonregion_pools(
            ctx=ctx, config=config, regions=regions),
        ]

    multisite = len(regions) > 1

    if not multisite:
        for zonegroup, zonegroup_info in regions.iteritems():
            log.debug("zonegroup_info =%r", zonegroup_info)
            if len(zonegroup_info['zones']) > 1:
                multisite = True
                break

    log.debug('multisite %s', multisite)
    multi_cluster = multisite and len(ctx.config['roles']) > 1
    log.debug('multi_cluster %s', multi_cluster)
    master_client = None

    if multi_cluster:
        log.debug('multi cluster run')

        master_client = get_config_master_client(ctx=ctx,
                                                 config=config,
                                                 regions=regions)
        log.debug('master_client %r', master_client)
        subtasks.extend([
            lambda: configure_multisite_regions_and_zones(
                ctx=ctx,
                config=config,
                regions=regions,
                role_endpoints=role_endpoints,
                realm=realm,
                master_client = master_client,
            )
        ])

        subtasks.extend([
            lambda: configure_users_for_client(
                ctx=ctx,
                config=config,
                client=master_client,
                everywhere=False,
            ),
        ])

        if ctx.rgw.frontend == 'apache':
            subtasks.insert(0,
                            lambda: create_apache_dirs(ctx=ctx, config=config,
                                                       on_client=master_client))
            subtasks.extend([
                lambda: ship_apache_configs(ctx=ctx, config=config,
                                            role_endpoints=role_endpoints, on_client=master_client),
                lambda: start_rgw(ctx=ctx, config=config, on_client=master_client),
                lambda: start_apache(ctx=ctx, config=config, on_client=master_client),
            ])
        elif ctx.rgw.frontend == 'civetweb':
            subtasks.extend([
                lambda: start_rgw(ctx=ctx, config=config, on_client=master_client),
            ])
        else:
            raise ValueError("frontend must be 'apache' or 'civetweb'")

        subtasks.extend([lambda: wait_for_master(),])
        subtasks.extend([
            lambda: pull_configuration(ctx=ctx,
                                       config=config,
                                       regions=regions,
                                       role_endpoints=role_endpoints,
                                       realm=realm,
                                       master_client=master_client
            ),
        ])

        subtasks.extend([
            lambda: configure_users_for_client(
                ctx=ctx,
                config=config,
                client=master_client,
                everywhere=True
            ),
        ])

        if ctx.rgw.frontend == 'apache':
            subtasks.insert(0,
                            lambda: create_apache_dirs(ctx=ctx, config=config,
                                                       on_client=None,
                                                       except_client = master_client))
            subtasks.extend([
                lambda: ship_apache_configs(ctx=ctx, config=config,
                                            role_endpoints=role_endpoints,
                                            on_client=None,
                                            except_client = master_client,
                ),
                lambda: start_rgw(ctx=ctx,
                                  config=config,
                                  on_client=None,
                                  except_client = master_client),
                lambda: start_apache(ctx=ctx,
                                     config = config,
                                     on_client=None,
                                     except_client = master_client,
                ),
            ])
        elif ctx.rgw.frontend == 'civetweb':
            subtasks.extend([
                lambda: start_rgw(ctx=ctx,
                                  config=config,
                                  on_client=None,
                                  except_client = master_client),
            ])
        else:
            raise ValueError("frontend must be 'apache' or 'civetweb'")
                
    else:
        log.debug('single cluster run')
        subtasks.extend([
            lambda: configure_regions_and_zones(
                ctx=ctx,
                config=config,
                regions=regions,
                role_endpoints=role_endpoints,
                realm=realm,
            ),
            lambda: configure_users(
                ctx=ctx,
                config=config,
                everywhere=True,
            ),
        ])
        if ctx.rgw.frontend == 'apache':
            subtasks.insert(0, lambda: create_apache_dirs(ctx=ctx, config=config))
            subtasks.extend([
                lambda: ship_apache_configs(ctx=ctx, config=config,
                                            role_endpoints=role_endpoints),
                lambda: start_rgw(ctx=ctx,
                                  config=config),
                lambda: start_apache(ctx=ctx, config=config),
                ])
        elif ctx.rgw.frontend == 'civetweb':
            subtasks.extend([
                lambda: start_rgw(ctx=ctx,
                                  config=config),
            ])
        else:
            raise ValueError("frontend must be 'apache' or 'civetweb'")

    log.info("Using %s as radosgw frontend", ctx.rgw.frontend)
    with contextutil.nested(*subtasks):
        yield
コード例 #42
0
ファイル: swift.py プロジェクト: varshar16/ceph-ci
def task(ctx, config):
    """
    Run the testswift suite against rgw.

    To run all tests on all clients::

        tasks:
        - ceph:
        - rgw:
        - testswift:

    To restrict testing to particular clients::

        tasks:
        - ceph:
        - rgw: [client.0]
        - testswift: [client.0]

    To run against a server on client.1::

        tasks:
        - ceph:
        - rgw: [client.1]
        - testswift:
            client.0:
              rgw_server: client.1

    To pass extra arguments to nose (e.g. to run a certain test)::

        tasks:
        - ceph:
        - rgw: [client.0]
        - testswift:
            client.0:
              extra_args: ['test.functional.tests:TestFileUTF8', '-m', 'testCopy']
            client.1:
              extra_args: ['--exclude', 'TestFile']
    """
    assert hasattr(ctx, 'rgw'), 'swift must run after the rgw task'
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task testswift only supports a list or dictionary for configuration"
    all_clients = ['client.{id}'.format(id=id_)
                   for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)

    testswift_conf = {}
    clients = []
    for client, client_config in config.items():
        # http://tracker.ceph.com/issues/40304 can't bootstrap on rhel 7.6+
        (remote,) = ctx.cluster.only(client).remotes.keys()
        if remote.os.name == 'rhel' and LooseVersion(remote.os.version) >= LooseVersion('7.6'):
            log.warning('Swift tests cannot run on rhel 7.6+, skipping client {}'.format(client))
            continue

        clients.append(client)

        server = client_config.get('rgw_server', client)
        endpoint = ctx.rgw.role_endpoints.get(server)
        assert endpoint, 'swift: no rgw endpoint for {}'.format(server)

        testswift_conf[client] = ConfigObj(
                indent_type='',
                infile={
                    'func_test':
                        {
                        'auth_host' : endpoint.hostname,
                        'auth_port' : endpoint.port,
                        'auth_ssl' : 'yes' if endpoint.cert else 'no',
                        'auth_prefix' : '/auth/',
                        },
                    }
                )
    # only take config for valid clients
    config = {c: config[c] for c in clients}

    log.info('clients={c}'.format(c=config.keys()))
    with contextutil.nested(
        lambda: download(ctx=ctx, config=config),
        lambda: create_users(ctx=ctx, config=testswift_conf),
        lambda: configure(ctx=ctx, config=testswift_conf),
        lambda: run_tests(ctx=ctx, config=config),
        ):
        pass
    yield
コード例 #43
0
ファイル: rbd.py プロジェクト: zli091/ceph
def xfstests(ctx, config):
    """
    Run xfstests over rbd devices.  This interface sets up all
    required configuration automatically if not otherwise specified.
    Note that only one instance of xfstests can run on a single host
    at a time.  By default, the set of tests specified is run once.
    If a (non-zero) count value is supplied, the complete set of
    tests will be run that number of times.

    For example::

        tasks:
        - ceph:
        # Image sizes are in MB
        - rbd.xfstests:
            client.0:
                count: 3
                test_image: 'test_image'
                test_size: 250
                test_format: 2
                scratch_image: 'scratch_image'
                scratch_size: 250
                scratch_format: 1
                fs_type: 'xfs'
                tests: 'generic/100 xfs/003 xfs/005 xfs/006 generic/015'
                exclude:
                - generic/42
                randomize: true
                xfstests_branch: master
                xfstests_url: 'https://raw.github.com/ceph/branch/master/qa'
    """
    if config is None:
        config = {'all': None}
    assert isinstance(config, dict) or isinstance(config, list), \
        "task xfstests only supports a list or dictionary for configuration"
    if isinstance(config, dict):
        config = teuthology.replace_all_with_clients(ctx.cluster, config)
        runs = config.items()
    else:
        runs = [(role, None) for role in config]

    running_xfstests = {}
    for role, properties in runs:
        assert role.startswith('client.'), \
            "task xfstests can only run on client nodes"
        for host, roles_for_host in ctx.cluster.remotes.items():
            if role in roles_for_host:
                assert host not in running_xfstests, \
                    "task xfstests allows only one instance at a time per host"
                running_xfstests[host] = True

    images_config = {}
    scratch_config = {}
    modprobe_config = {}
    image_map_config = {}
    scratch_map_config = {}
    xfstests_config = {}
    for role, properties in runs:
        if properties is None:
            properties = {}

        test_image = properties.get('test_image',
                                    'test_image.{role}'.format(role=role))
        test_size = properties.get('test_size', 10000)  # 10G
        test_fmt = properties.get('test_format', 1)
        scratch_image = properties.get(
            'scratch_image', 'scratch_image.{role}'.format(role=role))
        scratch_size = properties.get('scratch_size', 10000)  # 10G
        scratch_fmt = properties.get('scratch_format', 1)

        images_config[role] = dict(
            image_name=test_image,
            image_size=test_size,
            image_format=test_fmt,
        )

        scratch_config[role] = dict(
            image_name=scratch_image,
            image_size=scratch_size,
            image_format=scratch_fmt,
        )

        xfstests_branch = properties.get('xfstests_branch', 'master')
        xfstests_url = properties.get(
            'xfstests_url',
            'https://raw.github.com/ceph/ceph/{branch}/qa'.format(
                branch=xfstests_branch))

        xfstests_config[role] = dict(
            count=properties.get('count', 1),
            test_dev='/dev/rbd/rbd/{image}'.format(image=test_image),
            scratch_dev='/dev/rbd/rbd/{image}'.format(image=scratch_image),
            fs_type=properties.get('fs_type', 'xfs'),
            randomize=properties.get('randomize', False),
            tests=properties.get('tests'),
            exclude=properties.get('exclude', []),
            xfstests_url=xfstests_url,
        )

        log.info('Setting up xfstests using RBD images:')
        log.info('      test ({size} MB): {image}'.format(size=test_size,
                                                          image=test_image))
        log.info('   scratch ({size} MB): {image}'.format(size=scratch_size,
                                                          image=scratch_image))
        modprobe_config[role] = None
        image_map_config[role] = {'image_name': test_image}
        scratch_map_config[role] = {'image_name': scratch_image}

    with contextutil.nested(
            lambda: create_image(ctx=ctx, config=images_config),
            lambda: create_image(ctx=ctx, config=scratch_config),
            lambda: modprobe(ctx=ctx, config=modprobe_config),
            lambda: dev_create(ctx=ctx, config=image_map_config),
            lambda: dev_create(ctx=ctx, config=scratch_map_config),
            lambda: run_xfstests(ctx=ctx, config=xfstests_config),
    ):
        yield
コード例 #44
0
ファイル: barbican.py プロジェクト: zxgm/ceph
def task(ctx, config):
    """
    Deploy and configure Keystone

    Example of configuration:

    tasks:
      - local_cluster:
          cluster_path: /home/adam/ceph-1/build
      - local_rgw:
      - tox: [ client.0 ]
      - keystone:
          client.0:
            sha1: 12.0.0.0b2
            force-branch: master
            tenants:
              - name: admin
                description:  Admin Tenant
              - name: rgwcrypt
                description: Encryption Tenant
              - name: barbican
                description: Barbican
              - name: s3
                description: S3 project
            users:
              - name: admin
                password: ADMIN
                project: admin
              - name: rgwcrypt-user
                password: rgwcrypt-pass
                project: rgwcrypt
              - name: barbican-user
                password: barbican-pass
                project: barbican
              - name: s3-user
                password: s3-pass
                project: s3
            roles: [ name: admin, name: Member, name: creator ]
            role-mappings:
              - name: admin
                user: admin
                project: admin
              - name: Member
                user: rgwcrypt-user
                project: rgwcrypt
              - name: admin
                user: barbican-user
                project: barbican
              - name: creator
                user: s3-user
                project: s3
            services:
              - name: keystone
                type: identity
                description: Keystone Identity Service
      - barbican:
          client.0:
            force-branch: master
            use-keystone-role: client.0
            keystone_authtoken:
              auth_plugin: password
              username: barbican-user
              password: barbican-pass
              user_domain_name: Default
            rgw_user:
              tenantName: rgwcrypt
              username: rgwcrypt-user
              password: rgwcrypt-pass
            secrets:
              - name: my-key-1
                base64: a2V5MS5GcWVxKzhzTGNLaGtzQkg5NGVpb1FKcFpGb2c=
                tenantName: s3
                username: s3-user
                password: s3-pass
              - name: my-key-2
                base64: a2V5Mi5yNUNNMGFzMVdIUVZxcCt5NGVmVGlQQ1k4YWg=
                tenantName: s3
                username: s3-user
                password: s3-pass
      - s3tests:
          client.0:
            force-branch: master
            kms_key: my-key-1
      - rgw:
          client.0:
            use-keystone-role: client.0
            use-barbican-role: client.0
    """
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task keystone only supports a list or dictionary for configuration"
    all_clients = ['client.{id}'.format(id=id_)
                   for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    overrides = ctx.config.get('overrides', {})
    # merge each client section, not the top level.
    for client in config.keys():
        if not config[client]:
            config[client] = {}
        teuthology.deep_merge(config[client], overrides.get('barbican', {}))

    log.debug('Barbican config is %s', config)

    if not hasattr(ctx, 'keystone'):
        raise ConfigError('barbican must run after the keystone task')


    ctx.barbican = argparse.Namespace()
    ctx.barbican.endpoints = assign_ports(ctx, config, 9311)
    ctx.barbican.keys = {}
    
    with contextutil.nested(
        lambda: download(ctx=ctx, config=config),
        lambda: setup_venv(ctx=ctx, config=config),
        lambda: configure_barbican(ctx=ctx, config=config),
        lambda: run_barbican(ctx=ctx, config=config),
        lambda: create_secrets(ctx=ctx, config=config),
        ):
        yield
コード例 #45
0
ファイル: rbd.py プロジェクト: zli091/ceph
def task(ctx, config):
    """
    Create and mount an rbd image.

    For example, you can specify which clients to run on::

        tasks:
        - ceph:
        - rbd: [client.0, client.1]

    There are a few image options::

        tasks:
        - ceph:
        - rbd:
            client.0: # uses defaults
            client.1:
                image_name: foo
                image_size: 2048
                image_format: 2
                fs_type: xfs

    To use default options on all clients::

        tasks:
        - ceph:
        - rbd:
            all:

    To create 20GiB images and format them with xfs on all clients::

        tasks:
        - ceph:
        - rbd:
            all:
              image_size: 20480
              fs_type: xfs
    """
    if config is None:
        config = {'all': None}
    norm_config = config
    if isinstance(config, dict):
        norm_config = teuthology.replace_all_with_clients(ctx.cluster, config)
    if isinstance(norm_config, dict):
        role_images = {}
        for role, properties in norm_config.items():
            if properties is None:
                properties = {}
            role_images[role] = {'image_name': properties.get('image_name')}
    else:
        role_images = norm_config

    log.debug('rbd config is: %s', norm_config)

    with contextutil.nested(
            lambda: create_image(ctx=ctx, config=norm_config),
            lambda: modprobe(ctx=ctx, config=norm_config),
            lambda: dev_create(ctx=ctx, config=role_images),
            lambda: generic_mkfs(
                ctx=ctx, config=norm_config, devname_rtn=rbd_devname_rtn),
            lambda: generic_mount(
                ctx=ctx, config=role_images, devname_rtn=rbd_devname_rtn),
    ):
        yield
コード例 #46
0
def task(ctx, config):
    """
    Configures dnsmasq to add cnames for teuthology remotes. The task expects a
    dictionary, where each key is a role. If all cnames for that role use the
    same address as that role, the cnames can be given as a list. For example,
    this entry configures dnsmasq on the remote associated with client.0, adding
    two cnames for the ip address associated with client.0:

        - dnsmasq:
            client.0:
            - client0.example.com
            - c0.example.com

    If the addresses do not all match the given role, a dictionary can be given
    to specify the ip address by its target role. For example:

        - dnsmasq:
            client.0:
              client.0.example.com: client.0
              client.1.example.com: client.1

    Cnames that end with a . are treated as prefix for the existing hostname.
    For example, if the remote for client.0 has a hostname of 'example.com',
    this task will add cnames for dev.example.com and test.example.com:

        - dnsmasq:
            client.0: [dev., test.]
    """
    # apply overrides
    overrides = config.get('overrides', {})
    misc.deep_merge(config, overrides.get('dnsmasq', {}))

    # multiple roles may map to the same remote, so collect names by remote
    remote_names = {}
    for role, cnames in config.items():
        remote = get_remote_for_role(ctx, role)
        if remote is None:
            raise ConfigError('no remote for role %s' % role)

        names = remote_names.get(remote, {})

        if isinstance(cnames, list):
            # when given a list of cnames, point to local ip
            for cname in cnames:
                if cname.endswith('.'):
                    cname += remote.hostname
                names[cname] = remote.ip_address
        elif isinstance(cnames, dict):
            # when given a dict, look up the remote ip for each
            for cname, client in cnames.items():
                r = get_remote_for_role(ctx, client)
                if r is None:
                    raise ConfigError('no remote for role %s' % client)
                if cname.endswith('.'):
                    cname += r.hostname
                names[cname] = r.ip_address

        remote_names[remote] = names

    testdir = misc.get_testdir(ctx)
    resolv_bak = '/'.join((testdir, 'resolv.bak'))
    resolv_tmp = '/'.join((testdir, 'resolv.tmp'))

    # run subtasks for each unique remote
    subtasks = []
    for remote, cnames in remote_names.items():
        subtasks.extend([lambda r=remote: install_dnsmasq(r)])
        subtasks.extend([lambda r=remote: backup_resolv(r, resolv_bak)])
        subtasks.extend([lambda r=remote: replace_resolv(r, resolv_tmp)])
        subtasks.extend(
            [lambda r=remote, cn=cnames: setup_dnsmasq(r, testdir, cn)])

    with contextutil.nested(*subtasks):
        yield
コード例 #47
0
def task(ctx, config):
    """
    Set up and tear down a Ceph cluster.

    For example::

        tasks:
        - install:
             extras: yes
        - ssh_keys:
        - ceph-deploy:
             branch:
                stable: bobtail
             mon_initial_members: 1
             ceph-deploy-branch: my-ceph-deploy-branch
             only_mon: true
             keep_running: true
             # either choose bluestore or filestore, default is bluestore
             bluestore: True
             # or
             filestore: True
             # skip install of mgr for old release using below flag
             skip-mgr: True  ( default is False )
             # to use ceph-volume instead of ceph-disk
             # ceph-disk can only be used with old ceph-deploy release from pypi
             use-ceph-volume: true

        tasks:
        - install:
             extras: yes
        - ssh_keys:
        - ceph-deploy:
             branch:
                dev: master
             conf:
                mon:
                   debug mon = 20

        tasks:
        - install:
             extras: yes
        - ssh_keys:
        - ceph-deploy:
             branch:
                testing:
             dmcrypt: yes
             separate_journal_disk: yes

    """
    if config is None:
        config = {}

    assert isinstance(config, dict), \
        "task ceph-deploy only supports a dictionary for configuration"

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))

    if config.get('branch') is not None:
        assert isinstance(config['branch'],
                          dict), 'branch must be a dictionary'

    log.info('task ceph-deploy with config ' + str(config))

    # we need to use 1.5.39-stable for testing jewel or master branch with
    # ceph-disk
    if config.get('use-ceph-volume', False) is False:
        # check we are not testing specific branch
        if config.get('ceph-deploy-branch', False) is False:
            config['ceph-deploy-branch'] = '1.5.39-stable'

    with contextutil.nested(
            lambda: install_fn.ship_utilities(ctx=ctx, config=None),
            lambda: download_ceph_deploy(ctx=ctx, config=config),
            lambda: build_ceph_cluster(ctx=ctx, config=config),
    ):
        yield