コード例 #1
0
ファイル: ceph_deploy.py プロジェクト: zjs1224522500/ceph
def single_node_test(ctx, config):
    """
    - ceph-deploy.single_node_test: null

    #rhbuild testing
    - ceph-deploy.single_node_test:
        rhbuild: 1.2.3

    """
    log.info("Testing ceph-deploy on single node")
    if config is None:
        config = {}
    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))

    if config.get('rhbuild'):
        log.info("RH Build, Skip Download")
        with contextutil.nested(
            lambda: cli_test(ctx=ctx, config=config),
        ):
            yield
    else:
        with contextutil.nested(
            lambda: install_fn.ship_utilities(ctx=ctx, config=None),
            lambda: download_ceph_deploy(ctx=ctx, config=config),
            lambda: cli_test(ctx=ctx, config=config),
        ):
            yield
コード例 #2
0
ファイル: ceph_deploy.py プロジェクト: LargerPanda/ceph
def task(ctx, config):
    """
    Set up and tear down a Ceph cluster.

    For example::

        tasks:
        - install:
             extras: yes
        - ssh_keys:
        - ceph-deploy:
             branch:
                stable: bobtail
             mon_initial_members: 1
             only_mon: true
             keep_running: true

        tasks:
        - install:
             extras: yes
        - ssh_keys:
        - ceph-deploy:
             branch:
                dev: master
             conf:
                mon:
                   debug mon = 20

        tasks:
        - install:
             extras: yes
        - ssh_keys:
        - ceph-deploy:
             branch:
                testing:
             dmcrypt: yes
             separate_journal_disk: yes

    """
    if config is None:
        config = {}

    assert isinstance(config, dict), \
        "task ceph-deploy only supports a dictionary for configuration"

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))

    if config.get('branch') is not None:
        assert isinstance(
            config['branch'], dict), 'branch must be a dictionary'

    log.info('task ceph-deploy with config ' + str(config))

    with contextutil.nested(
        lambda: install_fn.ship_utilities(ctx=ctx, config=None),
        lambda: download_ceph_deploy(ctx=ctx, config=config),
        lambda: build_ceph_cluster(ctx=ctx, config=config),
    ):
        yield
コード例 #3
0
ファイル: ceph-deploy.py プロジェクト: AsherBond/teuthology
def task(ctx, config):
    """
    Set up and tear down a Ceph cluster.

    For example::

        tasks:
        - install:
             extras: yes
        - ssh_keys:
        - ceph-deploy:
             branch:
                stable: bobtail
             mon_initial_members: 1

        tasks:
        - install:
             extras: yes
        - ssh_keys:
        - ceph-deploy:
             branch:
                dev: master
             conf:
                mon:
                   debug mon = 20

        tasks:
        - install:
             extras: yes
        - ssh_keys:
        - ceph-deploy:
             branch:
                testing:
    """
    if config is None:
        config = {}

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))

    assert isinstance(config, dict), \
        "task ceph-deploy only supports a dictionary for configuration"

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))

    if config.get('branch') is not None:
        assert isinstance(config['branch'], dict), 'branch must be a dictionary'

    with contextutil.nested(
         lambda: ceph_fn.ship_utilities(ctx=ctx, config=None),
         lambda: download_ceph_deploy(ctx=ctx, config=config),
         lambda: build_ceph_cluster(ctx=ctx, config=dict(
                 conf=config.get('conf', {}),
                 branch=config.get('branch',{}),
                 mon_initial_members=config.get('mon_initial_members', None),
                 test_mon_destroy=config.get('test_mon_destroy', None),
                 )),
        ):
        yield
コード例 #4
0
ファイル: install.py プロジェクト: jebtang/teuthology
def upgrade_common(ctx, config, deploy_style):
    """
    Common code for upgrading
    """

    assert config is None or isinstance(config, dict), \
        "install.upgrade only supports a dictionary for configuration"

    for i in config.keys():
            assert config.get(i) is None or isinstance(
                config.get(i), dict), 'host supports dictionary'

    project = config.get('project', 'ceph')

    # use 'install' overrides here, in case the upgrade target is left
    # unspecified/implicit.
    install_overrides = ctx.config.get(
        'overrides', {}).get('install', {}).get(project, {})
    log.info('project %s config %s overrides %s', project, config, install_overrides)

    # FIXME: extra_pkgs is not distro-agnostic
    extra_pkgs = config.get('extra_packages', [])
    log.info('extra packages: {packages}'.format(packages=extra_pkgs))

    # build a normalized remote -> config dict
    remotes = {}
    if 'all' in config:
        for remote in ctx.cluster.remotes.iterkeys():
            remotes[remote] = config.get('all')
    else:
        for role in config.keys():
            (remote,) = ctx.cluster.only(role).remotes.iterkeys()
            if remote in remotes:
                log.warn('remote %s came up twice (role %s)', remote, role)
                continue
            remotes[remote] = config.get(role)

    for remote, node in remotes.iteritems():
        if not node:
            node = {}

        this_overrides = copy.deepcopy(install_overrides)
        if 'sha1' in node or 'tag' in node or 'branch' in node:
            log.info('config contains sha1|tag|branch, removing those keys from override')
            this_overrides.pop('sha1', None)
            this_overrides.pop('tag', None)
            this_overrides.pop('branch', None)
        teuthology.deep_merge(node, this_overrides)
        log.info('remote %s config %s', remote, node)

        system_type = teuthology.get_system_type(remote)
        assert system_type in ('deb', 'rpm')
        pkgs = PACKAGES[project][system_type]
        log.info("Upgrading {proj} {system_type} packages: {pkgs}".format(
            proj=project, system_type=system_type, pkgs=', '.join(pkgs)))
            # FIXME: again, make extra_pkgs distro-agnostic
        pkgs += extra_pkgs
        node['project'] = project
        
        deploy_style(ctx, node, remote, pkgs, system_type)
コード例 #5
0
ファイル: ceph_deploy.py プロジェクト: bkmcfarland/ceph
def single_node_test(ctx, config):
    """
    - ceph-deploy.single_node_test: null

    #rhbuild testing
    - ceph-deploy.single_node_test:
        rhbuild: 1.2.3

    """
    log.info("Testing ceph-deploy on single node")
    if config is None:
        config = {}
    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))

    if config.get('rhbuild'):
        log.info("RH Build, Skip Download")
        with contextutil.nested(
            lambda: cli_test(ctx=ctx, config=config),
        ):
            yield
    else:
        with contextutil.nested(
            lambda: install_fn.ship_utilities(ctx=ctx, config=None),
            lambda: download_ceph_deploy(ctx=ctx, config=config),
            lambda: cli_test(ctx=ctx, config=config),
        ):
            yield
コード例 #6
0
ファイル: workunit.py プロジェクト: zwj262310/ceph
def get_refspec_after_overrides(config, overrides):
    # mimic the behavior of the "install" task, where the "overrides" are
    # actually the defaults of that task. in other words, if none of "sha1",
    # "tag", or "branch" is specified by a "workunit" tasks, we will update
    # it with the information in the "workunit" sub-task nested in "overrides".
    overrides = copy.deepcopy(overrides.get('workunit', {}))
    refspecs = {
        'suite_sha1': Refspec,
        'suite_branch': Branch,
        'sha1': Refspec,
        'tag': Refspec,
        'branch': Branch
    }
    if any(map(lambda i: i in config, refspecs.keys())):
        for i in refspecs.keys():
            overrides.pop(i, None)
    misc.deep_merge(config, overrides)

    for spec, cls in refspecs.items():
        refspec = config.get(spec)
        if refspec:
            refspec = cls(refspec)
            break
    if refspec is None:
        refspec = Head()
    return refspec
コード例 #7
0
ファイル: ceph_deploy.py プロジェクト: bkmcfarland/ceph
def task(ctx, config):
    """
    Set up and tear down a Ceph cluster.

    For example::

        tasks:
        - install:
             extras: yes
        - ssh_keys:
        - ceph-deploy:
             branch:
                stable: bobtail
             mon_initial_members: 1
             only_mon: true
             keep_running: true

        tasks:
        - install:
             extras: yes
        - ssh_keys:
        - ceph-deploy:
             branch:
                dev: master
             conf:
                mon:
                   debug mon = 20

        tasks:
        - install:
             extras: yes
        - ssh_keys:
        - ceph-deploy:
             branch:
                testing:
             dmcrypt: yes
             separate_journal_disk: yes

    """
    if config is None:
        config = {}

    assert isinstance(config, dict), \
        "task ceph-deploy only supports a dictionary for configuration"

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))

    if config.get('branch') is not None:
        assert isinstance(
            config['branch'], dict), 'branch must be a dictionary'

    log.info('task ceph-deploy with config ' + str(config))

    with contextutil.nested(
        lambda: install_fn.ship_utilities(ctx=ctx, config=None),
        lambda: download_ceph_deploy(ctx=ctx, config=config),
        lambda: build_ceph_cluster(ctx=ctx, config=config),
    ):
        yield
コード例 #8
0
ファイル: rbd_fsx.py プロジェクト: ApusApp/ceph
def _run_one_client(ctx, config, role):
    """Spawned task that runs the client"""
    krbd = config.get('krbd', False)
    nbd = config.get('nbd', False)
    testdir = teuthology.get_testdir(ctx)
    (remote,) = ctx.cluster.only(role).remotes.iterkeys()

    args = []
    if krbd or nbd:
        args.append('sudo') # rbd(-nbd) map/unmap need privileges
    args.extend([
        'adjust-ulimits',
        'ceph-coverage',
        '{tdir}/archive/coverage'.format(tdir=testdir)
    ])

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('rbd_fsx', {}))

    if config.get('valgrind'):
        args = teuthology.get_valgrind_args(
            testdir,
            'fsx_{id}'.format(id=role),
            args,
            config.get('valgrind')
        )

    args.extend([
        'ceph_test_librbd_fsx',
        '-d', # debug output for all operations
        '-W', '-R', # mmap doesn't work with rbd
        '-p', str(config.get('progress_interval', 100)), # show progress
        '-P', '{tdir}/archive'.format(tdir=testdir),
        '-r', str(config.get('readbdy',1)),
        '-w', str(config.get('writebdy',1)),
        '-t', str(config.get('truncbdy',1)),
        '-h', str(config.get('holebdy',1)),
        '-l', str(config.get('size', 250000000)),
        '-S', str(config.get('seed', 0)),
        '-N', str(config.get('ops', 1000)),
    ])
    if krbd:
        args.append('-K') # -K enables krbd mode
    if nbd:
        args.append('-M') # -M enables nbd mode
    if config.get('direct_io', False):
        args.append('-Z') # -Z use direct IO
    if not config.get('randomized_striping', True):
        args.append('-U') # -U disables randomized striping
    if not config.get('punch_holes', True):
        args.append('-H') # -H disables discard ops
    if config.get('journal_replay', False):
        args.append('-j') # -j replay all IO events from journal
    args.extend([
        'pool_{pool}'.format(pool=role),
        'image_{image}'.format(image=role),
    ])

    remote.run(args=args)
コード例 #9
0
ファイル: check_counter.py プロジェクト: viduship/ceph-1
    def end(self):
        overrides = self.ctx.config.get('overrides', {})
        misc.deep_merge(self.config, overrides.get('check-counter', {}))

        cluster_name = self.config.get('cluster_name', None)
        dry_run = self.config.get('dry_run', False)
        targets = self.config.get('counters', {})

        if cluster_name is None:
            cluster_name = self.ctx.managers.keys()[0]

        for daemon_type, counters in targets.items():
            # List of 'a', 'b', 'c'...
            daemon_ids = list(misc.all_roles_of_type(self.ctx.cluster, daemon_type))
            daemons = dict([(daemon_id,
                             self.ctx.daemons.get_daemon(daemon_type, daemon_id))
                            for daemon_id in daemon_ids])

            seen = set()

            for daemon_id, daemon in daemons.items():
                if not daemon.running():
                    log.info("Ignoring daemon {0}, it isn't running".format(daemon_id))
                    continue
                else:
                    log.debug("Getting stats from {0}".format(daemon_id))

                manager = self.ctx.managers[cluster_name]
                proc = manager.admin_socket(daemon_type, daemon_id, ["perf", "dump"])
                response_data = proc.stdout.getvalue().strip()
                if response_data:
                    perf_dump = json.loads(response_data)
                else:
                    log.warning("No admin socket response from {0}, skipping".format(daemon_id))
                    continue

                for counter in counters:
                    subsys, counter_id = counter.split(".")
                    if subsys not in perf_dump or counter_id not in perf_dump[subsys]:
                        log.warning("Counter '{0}' not found on daemon {1}.{2}".format(
                            counter, daemon_type, daemon_id))
                        continue
                    value = perf_dump[subsys][counter_id]

                    log.info("Daemon {0}.{1} {2}={3}".format(
                        daemon_type, daemon_id, counter, value
                    ))

                    if value > 0:
                        seen.add(counter)

            if not dry_run:
                unseen = set(counters) - set(seen)
                if unseen:
                    raise RuntimeError("The following counters failed to be set "
                                       "on {0} daemons: {1}".format(
                        daemon_type, unseen
                    ))
コード例 #10
0
ファイル: ststests.py プロジェクト: TRYTOBE8TME/sts-tests
def task(ctx, config):
    assert hasattr(ctx, 'rgw'), 'ststests must run after the rgw task'
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task ststests only supports a list or dictionary for configuration"
    all_clients = ['client.{id}'.format(id=id_)
                   for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    overrides = ctx.config.get('overrides', {})
    # merge each client section, not the top level.
    for client in config.keys():
        if not config[client]:
            config[client] = {}
        teuthology.deep_merge(config[client], overrides.get('ststests', {}))

    log.debug('ststests config is %s', config)

    ststests_conf = {}
    for client in clients:
        endpoint = ctx.rgw.role_endpoints.get(client)
        assert endpoint, 'ststests: no rgw endpoint for {}'.format(client)

        ststests_conf[client] = ConfigObj(
            indent_type='',
            infile={
                'DEFAULT':
                    {
                    'port'      : endpoint.port,
                    'is_secure' : endpoint.cert is not None,
                    'api_name'  : 'default',
                    },
                'fixtures' : {},
                'sts'  : {},
                'iam'   : {},
                's3 main': {},
                }
            )

    with contextutil.nested(
        lambda: download(ctx=ctx, config=config),
        lambda: create_users(ctx=ctx, config=dict(
                clients=clients,
                ststests_conf=ststests_conf,
                )),
        lambda: configure(ctx=ctx, config=dict(
                clients=config,
                ststests_conf=ststests_conf,
                )),
        lambda: run_tests(ctx=ctx, config=config),
        ):
        pass
    yield
コード例 #11
0
def _run_one_client(ctx, config, role):
    """Spawned task that runs the client"""
    krbd = config.get('krbd', False)
    testdir = teuthology.get_testdir(ctx)
    (remote,) = ctx.cluster.only(role).remotes.iterkeys()

    args = []
    if krbd:
        args.append('sudo') # rbd map/unmap need privileges
    args.extend([
        'adjust-ulimits',
        'ceph-coverage',
        '{tdir}/archive/coverage'.format(tdir=testdir)
    ])

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('rbd_fsx', {}))

    if config.get('valgrind'):
        args = teuthology.get_valgrind_args(
            testdir,
            'fsx_{id}'.format(id=role),
            args,
            config.get('valgrind')
        )

    args.extend([
        'ceph_test_librbd_fsx',
        '-d', # debug output for all operations
        '-W', '-R', # mmap doesn't work with rbd
        '-p', str(config.get('progress_interval', 100)), # show progress
        '-P', '{tdir}/archive'.format(tdir=testdir),
        '-r', str(config.get('readbdy',1)),
        '-w', str(config.get('writebdy',1)),
        '-t', str(config.get('truncbdy',1)),
        '-h', str(config.get('holebdy',1)),
        '-l', str(config.get('size', 250000000)),
        '-S', str(config.get('seed', 0)),
        '-N', str(config.get('ops', 1000)),
    ])
    if krbd:
        args.append('-K') # -K enables krbd mode
    if config.get('direct_io', False):
        args.append('-Z') # -Z use direct IO
    if not config.get('randomized_striping', True):
        args.append('-U') # -U disables randomized striping
    if not config.get('punch_holes', True):
        args.append('-H') # -H disables discard ops
    if config.get('journal_replay', False):
        args.append('-j') # -j replay all IO events from journal
    args.extend([
        'pool_{pool}'.format(pool=role),
        'image_{image}'.format(image=role),
    ])

    remote.run(args=args)
コード例 #12
0
ファイル: calamari.py プロジェクト: AlfredChenxf/teuthology
def reposetup(ctx, config):
    """
    task reposetup
    Sets up calamari repository on all 'osd', 'mon', and 'calamari.' remotes;
     cleans up when done

    calamari.reposetup:
        pkgdir:
        username:
        password:

    Supply the above in an override file if you need to manage the
    secret repo credentials separately from the test definition (likely).

    pkgdir encodes package directory (possibly more than one path component)
    as in https://<username>:<password>@SERVER/<pkgdir>/{deb,rpm}{..}

    """
    overrides = ctx.config.get('overrides', {})
    # XXX deep_merge returns the result, which matters if either is None
    # make sure that doesn't happen
    if config is None:
        config = {'dummy': 'dummy'}
    teuthology.deep_merge(config, overrides.get('calamari.reposetup', {}))

    try:
        pkgdir = config['pkgdir']
        username = config['username']
        password = config['password']
    except KeyError:
        raise RuntimeError('requires pkgdir, username, and password')

    # repo gets installed on any remote with role mon, osd, or calamari
    def needs_repo(role):
        for type in 'mon.', 'osd.', 'calamari.':
            if role.startswith(type):
                return True
        return False

    remotes = _remotes(ctx, needs_repo)
    if remotes is None:
        raise RuntimeError('No roles configured')

    try:
        for rem in remotes:
            log.info(rem)
            keypath = 'http://download.inktank.com/keys/release.asc'
            pkg.install_repokey(rem, keypath)
            pkg.install_repo(rem, 'download.inktank.com', pkgdir,
                             username, password)
        yield

    finally:
        for rem in remotes:
            pkg.remove_repo(rem)
コード例 #13
0
def task(ctx, config):
    """
    Configures dnsmasq to add cnames for teuthology remotes. The task expects a
    dictionary, where each key is a role. If all cnames for that role use the
    same address as that role, the cnames can be given as a list. For example,
    this entry configures dnsmasq on the remote associated with client.0, adding
    two cnames for the ip address associated with client.0:

        - dnsmasq:
            client.0:
            - client0.example.com
            - c0.example.com

    If the addresses do not all match the given role, a dictionary can be given
    to specify the ip address by its target role. For example:

        - dnsmasq:
            client.0:
              client.0.example.com: client.0
              client.1.example.com: client.1
    """
    # apply overrides
    overrides = config.get('overrides', {})
    misc.deep_merge(config, overrides.get('dnsmasq', {}))

    # multiple roles may map to the same remote, so collect names by remote
    remote_names = {}
    for role, cnames in config.iteritems():
        remote = get_remote_for_role(ctx, role)
        if remote is None:
            raise ConfigError('no remote for role %s' % role)

        names = remote_names.get(remote, {})

        if isinstance(cnames, list):
            # when given a list of cnames, point to local ip
            for cname in cnames:
                names[cname] = remote.ip_address
        elif isinstance(cnames, dict):
            # when given a dict, look up the remote ip for each
            for cname, client in cnames.iteritems():
                r = get_remote_for_role(ctx, client)
                if r is None:
                    raise ConfigError('no remote for role %s' % client)
                names[cname] = r.ip_address

        remote_names[remote] = names

    # run a subtask for each unique remote
    subtasks = []
    for remote, cnames in remote_names.iteritems():
        subtasks.extend([lambda r=remote, cn=cnames: setup_dnsmasq(r, cn)])

    with contextutil.nested(*subtasks):
        yield
コード例 #14
0
ファイル: dnsmasq.py プロジェクト: Carudy/ceph
def task(ctx, config):
    """
    Configures dnsmasq to add cnames for teuthology remotes. The task expects a
    dictionary, where each key is a role. If all cnames for that role use the
    same address as that role, the cnames can be given as a list. For example,
    this entry configures dnsmasq on the remote associated with client.0, adding
    two cnames for the ip address associated with client.0:

        - dnsmasq:
            client.0:
            - client0.example.com
            - c0.example.com

    If the addresses do not all match the given role, a dictionary can be given
    to specify the ip address by its target role. For example:

        - dnsmasq:
            client.0:
              client.0.example.com: client.0
              client.1.example.com: client.1
    """
    # apply overrides
    overrides = config.get('overrides', {})
    misc.deep_merge(config, overrides.get('dnsmasq', {}))

    # multiple roles may map to the same remote, so collect names by remote
    remote_names = {}
    for role, cnames in config.iteritems():
        remote = get_remote_for_role(ctx, role)
        if remote is None:
            raise ConfigError('no remote for role %s' % role)

        names = remote_names.get(remote, {})

        if isinstance(cnames, list):
            # when given a list of cnames, point to local ip
            for cname in cnames:
                names[cname] = remote.ip_address
        elif isinstance(cnames, dict):
            # when given a dict, look up the remote ip for each
            for cname, client in cnames.iteritems():
                r = get_remote_for_role(ctx, client)
                if r is None:
                    raise ConfigError('no remote for role %s' % client)
                names[cname] = r.ip_address

        remote_names[remote] = names

    # run a subtask for each unique remote
    subtasks = []
    for remote, cnames in remote_names.iteritems():
        subtasks.extend([ lambda r=remote, cn=cnames: setup_dnsmasq(r, cn) ])

    with contextutil.nested(*subtasks):
        yield
コード例 #15
0
ファイル: calamari.py プロジェクト: alsall/teuthology
def reposetup(ctx, config):
    """
    task reposetup
    Sets up calamari repository on all 'osd', 'mon', and 'calamari.' remotes;
     cleans up when done

    calamari.reposetup:
        pkgdir:
        username:
        password:

    Supply the above in an override file if you need to manage the
    secret repo credentials separately from the test definition (likely).

    pkgdir encodes package directory (possibly more than one path component)
    as in https://<username>:<password>@SERVER/<pkgdir>/{deb,rpm}{..}

    """
    overrides = ctx.config.get('overrides', {})
    # XXX deep_merge returns the result, which matters if either is None
    # make sure that doesn't happen
    if config is None:
        config = {'dummy': 'dummy'}
    teuthology.deep_merge(config, overrides.get('calamari.reposetup', {}))

    try:
        pkgdir = config['pkgdir']
        username = config['username']
        password = config['password']
    except KeyError:
        raise RuntimeError('requires pkgdir, username, and password')

    # repo gets installed on any remote with role mon, osd, or calamari
    def needs_repo(role):
        for type in 'mon.', 'osd.', 'calamari.':
            if role.startswith(type):
                return True
        return False

    remotes = _remotes(ctx, needs_repo)
    if remotes is None:
        raise RuntimeError('No roles configured')

    try:
        for rem in remotes:
            log.info(rem)
            keypath = 'http://download.inktank.com/keys/release.asc'
            pkg.install_repokey(rem, keypath)
            pkg.install_repo(rem, 'download.inktank.com', pkgdir, username,
                             password)
        yield

    finally:
        for rem in remotes:
            pkg.remove_repo(rem)
コード例 #16
0
ファイル: install.py プロジェクト: kawaguchi-s/teuthology
def task(ctx, config):
    """
    Install packages for a given project.

    tasks:
    - install:
        project: ceph
        branch: bar
    - install:
        project: samba
        branch: foo
        extra_packages: ['samba']

    Overrides are project specific:

    overrides:
      install:
        ceph:
          sha1: ...

    :param ctx: the argparse.Namespace object
    :param config: the config dict
    """
    if config is None:
        config = {}
    assert isinstance(config, dict), \
        "task install only supports a dictionary for configuration"

    project, = config.get('project', 'ceph'),
    log.debug('project %s' % project)
    overrides = ctx.config.get('overrides')
    if overrides:
        install_overrides = overrides.get('install', {})
        teuthology.deep_merge(config, install_overrides.get(project, {}))
    log.debug('config %s' % config)

    flavor = get_flavor(config)
    log.info("Using flavor: %s", flavor)

    ctx.summary['flavor'] = flavor

    with contextutil.nested(
        lambda: install(ctx=ctx, config=dict(
            branch=config.get('branch'),
            tag=config.get('tag'),
            sha1=config.get('sha1'),
            flavor=flavor,
            extra_packages=config.get('extra_packages', []),
            extras=config.get('extras', None),
            wait_for_package=ctx.config.get('wait_for_package', False),
            project=project,
        )),
        lambda: ship_utilities(ctx=ctx, config=None),
    ):
        yield
コード例 #17
0
ファイル: calamari.py プロジェクト: AlfredChenxf/teuthology
def server(ctx, config):
    """
    task server:

    Calamari server setup.  Add role 'calamari.server' to the remote
    that will run the webapp.  'calamari.restapi' role must be present
    to serve as the cluster-api target for calamari-server.  Only one
    of calamari.server and calamari.restapi roles is supported currently.

    For example::

        roles:
        - [calamari.server]
        - [mon.0, calamari.restapi]
        - [osd.0, osd.1]
        tasks:
        - calamari.restapi:
        - calamari.server:
    """
    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('calamari.server', {}))

    remote = _remotes(ctx, lambda r: r.startswith('calamari.server'))
    if not remote:
        raise RuntimeError('No role configured')

    restapi_remote = _remotes(ctx, lambda r: r.startswith('calamari.restapi'))
    if not restapi_remote:
        raise RuntimeError('Must supply calamari.restapi role')

    remote = remote[0]
    restapi_remote = restapi_remote[0]

    try:
        # sqlite3 command is required; on some platforms it's already
        # there and not removable (required for, say yum)
        sqlite_package = pkg.get_package_name('sqlite', remote)
        if sqlite_package and not pkg.install_package(sqlite_package, remote):
            raise RuntimeError('{} install failed'.format(sqlite_package))

        if not pkg.install_package('calamari-server', remote) or \
           not pkg.install_package('calamari-clients', remote) or \
           not _disable_default_nginx(remote) or \
           not _setup_calamari_cluster(remote, restapi_remote):
            raise RuntimeError('Server installation failure')

        log.info('client/server setup complete')
        yield
    finally:
        pkg.remove_package('calamari-server', remote)
        pkg.remove_package('calamari-clients', remote)
        if sqlite_package:
            pkg.remove_package(sqlite_package, remote)
コード例 #18
0
ファイル: rgw-logsocket.py プロジェクト: athanatos/teuthology
def task(ctx, config):
    """
    Run some s3-tests suite against rgw, verify opslog socket returns data

    Must restrict testing to a particular client::

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3tests: [client.0]

    To pass extra arguments to nose (e.g. to run a certain test)::

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3tests:
            client.0:
              extra_args: ['test_s3:test_object_acl_grand_public_read']
            client.1:
              extra_args: ['--exclude', 'test_100_continue']
    """
    assert (
        config is None or isinstance(config, list) or isinstance(config, dict)
    ), "task s3tests only supports a list or dictionary for configuration"
    all_clients = ["client.{id}".format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, "client")]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    overrides = ctx.config.get("overrides", {})
    # merge each client section, not the top level.
    for (client, cconf) in config.iteritems():
        teuthology.deep_merge(cconf, overrides.get("rgw-logsocket", {}))

    log.debug("config is %s", config)

    s3tests_conf = {}
    for client in clients:
        s3tests_conf[client] = ConfigObj(
            indent_type="",
            infile={"DEFAULT": {"port": 7280, "is_secure": "no"}, "fixtures": {}, "s3 main": {}, "s3 alt": {}},
        )

    with contextutil.nested(
        lambda: download(ctx=ctx, config=config),
        lambda: create_users(ctx=ctx, config=dict(clients=clients, s3tests_conf=s3tests_conf)),
        lambda: configure(ctx=ctx, config=dict(clients=config, s3tests_conf=s3tests_conf)),
        lambda: run_tests(ctx=ctx, config=config),
    ):
        yield
コード例 #19
0
ファイル: calamari.py プロジェクト: alsall/teuthology
def server(ctx, config):
    """
    task server:

    Calamari server setup.  Add role 'calamari.server' to the remote
    that will run the webapp.  'calamari.restapi' role must be present
    to serve as the cluster-api target for calamari-server.  Only one
    of calamari.server and calamari.restapi roles is supported currently.

    For example::

        roles:
        - [calamari.server]
        - [mon.0, calamari.restapi]
        - [osd.0, osd.1]
        tasks:
        - calamari.restapi:
        - calamari.server:
    """
    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('calamari.server', {}))

    remote = _remotes(ctx, lambda r: r.startswith('calamari.server'))
    if not remote:
        raise RuntimeError('No role configured')

    restapi_remote = _remotes(ctx, lambda r: r.startswith('calamari.restapi'))
    if not restapi_remote:
        raise RuntimeError('Must supply calamari.restapi role')

    remote = remote[0]
    restapi_remote = restapi_remote[0]

    try:
        # sqlite3 command is required; on some platforms it's already
        # there and not removable (required for, say yum)
        sqlite_package = pkg.get_package_name('sqlite', remote)
        if sqlite_package and not pkg.install_package(sqlite_package, remote):
            raise RuntimeError('{} install failed'.format(sqlite_package))

        if not pkg.install_package('calamari-server', remote) or \
           not pkg.install_package('calamari-clients', remote) or \
           not _disable_default_nginx(remote) or \
           not _setup_calamari_cluster(remote, restapi_remote):
            raise RuntimeError('Server installation failure')

        log.info('client/server setup complete')
        yield
    finally:
        pkg.remove_package('calamari-server', remote)
        pkg.remove_package('calamari-clients', remote)
        if sqlite_package:
            pkg.remove_package(sqlite_package, remote)
コード例 #20
0
ファイル: __init__.py プロジェクト: sunnyku/teuthology
def upgrade_remote_to_config(ctx, config):
    assert config is None or isinstance(config, dict), \
        "install.upgrade only supports a dictionary for configuration"

    project = config.get('project', 'ceph')

    # use 'install' overrides here, in case the upgrade target is left
    # unspecified/implicit.
    install_overrides = ctx.config.get(
        'overrides', {}).get('install', {}).get(project, {})
    log.info('project %s config %s overrides %s', project, config,
             install_overrides)

    # build a normalized remote -> config dict
    remotes = {}
    if 'all' in config:
        for remote in ctx.cluster.remotes.keys():
            remotes[remote] = config.get('all')
    else:
        for role in config.keys():
            remotes_dict = ctx.cluster.only(role).remotes
            if not remotes_dict:
                # This is a regular config argument, not a role
                continue
            # take any remote in the dict
            remote = next(iter(remotes_dict))
            if remote in remotes:
                log.warn('remote %s came up twice (role %s)', remote, role)
                continue
            remotes[remote] = config.get(role)

    result = {}
    for remote, node in remotes.items():
        if not node:
            node = {}

        this_overrides = copy.deepcopy(install_overrides)
        if 'sha1' in node or 'tag' in node or 'branch' in node:
            log.info("config contains sha1|tag|branch, "
                     "removing those keys from override")
            this_overrides.pop('sha1', None)
            this_overrides.pop('tag', None)
            this_overrides.pop('branch', None)
        teuthology.deep_merge(node, this_overrides)
        log.info('remote %s config %s', remote, node)
        node['project'] = project

        result[remote] = node

    return result
コード例 #21
0
ファイル: __init__.py プロジェクト: ceph/teuthology
def upgrade_remote_to_config(ctx, config):
    assert config is None or isinstance(config, dict), \
        "install.upgrade only supports a dictionary for configuration"

    project = config.get('project', 'ceph')

    # use 'install' overrides here, in case the upgrade target is left
    # unspecified/implicit.
    install_overrides = ctx.config.get(
        'overrides', {}).get('install', {}).get(project, {})
    log.info('project %s config %s overrides %s', project, config,
             install_overrides)

    # build a normalized remote -> config dict
    remotes = {}
    if 'all' in config:
        for remote in ctx.cluster.remotes.iterkeys():
            remotes[remote] = config.get('all')
    else:
        for role in config.keys():
            remotes_dict = ctx.cluster.only(role).remotes
            if not remotes_dict:
                # This is a regular config argument, not a role
                continue
            remote = remotes_dict.keys()[0]
            if remote in remotes:
                log.warn('remote %s came up twice (role %s)', remote, role)
                continue
            remotes[remote] = config.get(role)

    result = {}
    for remote, node in remotes.iteritems():
        if not node:
            node = {}

        this_overrides = copy.deepcopy(install_overrides)
        if 'sha1' in node or 'tag' in node or 'branch' in node:
            log.info("config contains sha1|tag|branch, "
                     "removing those keys from override")
            this_overrides.pop('sha1', None)
            this_overrides.pop('tag', None)
            this_overrides.pop('branch', None)
        teuthology.deep_merge(node, this_overrides)
        log.info('remote %s config %s', remote, node)
        node['project'] = project

        result[remote] = node

    return result
コード例 #22
0
ファイル: calamari.py プロジェクト: AlfredChenxf/teuthology
def agent(ctx, config):
    """
    task agent
    calamari.agent: install stats collection
       (for each role of type 'mon.' or 'osd.')

    For example::

        roles:
        - [osd.0, mon.a]
        - [osd.1]
        tasks:
        - calamari.agent:
    """

    log.info('calamari.agent starting')
    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('calamari.agent', {}))

    # agent gets installed on any remote with role mon or osd
    def needs_agent(role):
        for type in 'mon.', 'osd.':
            if role.startswith(type):
                return True
        return False

    remotes = _remotes(ctx, needs_agent)
    if remotes is None:
        raise RuntimeError('No role configured')
    try:
        for rem in remotes:
            log.info('Installing calamari-agent on %s', rem)
            pkg.install_package('calamari-agent', rem)
            server_remote = _remotes(ctx,
                lambda r: r.startswith('calamari.server'))
            if not server_remote:
                raise RuntimeError('No calamari.server role available')
            server_remote = server_remote[0]
            # why isn't shortname available by default?
            serverhost = server_remote.name.split('@')[1]
            log.info('configuring Diamond for {}'.format(serverhost))
            if not _edit_diamond_config(rem, serverhost):
                raise RuntimeError(
                    'Diamond config edit failed on {0}'.format(rem)
                )
        yield
    finally:
            for rem in remotes:
                pkg.remove_package('calamari-agent', rem)
コード例 #23
0
ファイル: hadoop.py プロジェクト: ErwanAliasr1/teuthology
def task(ctx, config):
    if config is None:
        config = {}
    assert isinstance(config, dict), "task hadoop config must be dictionary"

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('hadoop', {}))

    tasks = [
        lambda: install_hadoop(ctx=ctx, config=config),
        lambda: start_hadoop(ctx=ctx, config=config),
    ]

    with contextutil.nested(*tasks):
        yield
コード例 #24
0
 def apply_overrides(self):
     """
     Look for an 'overrides' dict in self.ctx.config; look inside that for a
     dict with the same name as this task. Override any settings in
     self.config with those overrides
     """
     all_overrides = self.ctx.config.get('overrides', dict())
     if not all_overrides:
         return
     task_overrides = all_overrides.get(self.name)
     if task_overrides:
         self.log.debug(
             "Applying overrides for task {name}: {overrides}".format(
                 name=self.name, overrides=task_overrides))
         deep_merge(self.config, task_overrides)
コード例 #25
0
ファイル: hadoop.py プロジェクト: x-ion-de/teuthology
def task(ctx, config):
    if config is None:
        config = {}
    assert isinstance(config, dict), "task hadoop config must be dictionary"

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('hadoop', {}))

    tasks = [
        lambda: install_hadoop(ctx=ctx, config=config),
        lambda: start_hadoop(ctx=ctx, config=config),
    ]

    with contextutil.nested(*tasks):
        yield
コード例 #26
0
ファイル: calamari.py プロジェクト: alsall/teuthology
def agent(ctx, config):
    """
    task agent
    calamari.agent: install stats collection
       (for each role of type 'mon.' or 'osd.')

    For example::

        roles:
        - [osd.0, mon.a]
        - [osd.1]
        tasks:
        - calamari.agent:
    """

    log.info('calamari.agent starting')
    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('calamari.agent', {}))

    # agent gets installed on any remote with role mon or osd
    def needs_agent(role):
        for type in 'mon.', 'osd.':
            if role.startswith(type):
                return True
        return False

    remotes = _remotes(ctx, needs_agent)
    if remotes is None:
        raise RuntimeError('No role configured')
    try:
        for rem in remotes:
            log.info('Installing calamari-agent on %s', rem)
            pkg.install_package('calamari-agent', rem)
            server_remote = _remotes(ctx,
                                     lambda r: r.startswith('calamari.server'))
            if not server_remote:
                raise RuntimeError('No calamari.server role available')
            server_remote = server_remote[0]
            # why isn't shortname available by default?
            serverhost = server_remote.name.split('@')[1]
            log.info('configuring Diamond for {}'.format(serverhost))
            if not _edit_diamond_config(rem, serverhost):
                raise RuntimeError(
                    'Diamond config edit failed on {0}'.format(rem))
        yield
    finally:
        for rem in remotes:
            pkg.remove_package('calamari-agent', rem)
コード例 #27
0
ファイル: vault.py プロジェクト: zwj262310/ceph
def task(ctx, config):
    """
    Deploy and configure Vault

    Example of configuration:

    tasks:
    - vault:
        client.0:
          version: 1.2.2
          root_token: test_root_token
          engine: kv
          prefix: /v1/kv/data/
          secrets:
            - path: kv/teuthology/key_a
              secret: YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo=
            - path: kv/teuthology/key_b
              secret: aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
    """
    all_clients = [
        'client.{id}'.format(id=id_)
        for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')
    ]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)

    overrides = ctx.config.get('overrides', {})
    # merge each client section, not the top level.
    for client in config.keys():
        if not config[client]:
            config[client] = {}
        teuthology.deep_merge(config[client], overrides.get('vault', {}))

    log.debug('Vault config is %s', config)

    ctx.vault = argparse.Namespace()
    ctx.vault.endpoints = assign_ports(ctx, config, 8200)
    ctx.vault.root_token = None
    ctx.vault.prefix = config[client].get('prefix')
    ctx.vault.engine = config[client].get('engine')

    with contextutil.nested(lambda: download(ctx=ctx, config=config),
                            lambda: run_vault(ctx=ctx, config=config),
                            lambda: setup_vault(ctx=ctx, config=config),
                            lambda: create_secrets(ctx=ctx, config=config)):
        yield
コード例 #28
0
ファイル: buildpackages.py プロジェクト: leseb/ceph-ci
def apply_overrides(ctx, config):
    if config is None:
        config = {}
    else:
        config = copy.deepcopy(config)

    assert isinstance(config, dict), \
        "task install only supports a dictionary for configuration"

    project, = config.get('project', 'ceph'),
    log.debug('project %s' % project)
    overrides = ctx.config.get('overrides')
    if overrides:
        install_overrides = overrides.get('install', {})
        misc.deep_merge(config, install_overrides.get(project, {}))
    return config
コード例 #29
0
ファイル: __init__.py プロジェクト: BlaXpirit/teuthology
 def apply_overrides(self):
     """
     Look for an 'overrides' dict in self.ctx.config; look inside that for a
     dict with the same name as this task. Override any settings in
     self.config with those overrides
     """
     all_overrides = self.ctx.config.get('overrides', dict())
     if not all_overrides:
         return
     task_overrides = all_overrides.get(self.name)
     if task_overrides:
         self.log.debug(
             "Applying overrides for task {name}: {overrides}".format(
                 name=self.name, overrides=task_overrides)
         )
         deep_merge(self.config, task_overrides)
コード例 #30
0
ファイル: rgw_multisite_tests.py プロジェクト: drunkard/ceph
    def setup(self):
        super(RGWMultisiteTests, self).setup()

        overrides = self.ctx.config.get('overrides', {})
        misc.deep_merge(self.config, overrides.get('rgw-multisite-tests', {}))

        if not self.ctx.rgw_multisite:
            raise ConfigError(
                'rgw-multisite-tests must run after the rgw-multisite task')
        realm = self.ctx.rgw_multisite.realm
        master_zone = realm.meta_master_zone()

        branch = self.config.get('branch')
        if not branch:
            # run from suite_path
            suite_path = self.ctx.config.get('suite_path')
            self.module_path = suite_path + '/../src/test/rgw/rgw_multi'
        else:
            # clone the qa branch
            repo = self.config.get('repo',
                                   teuth_config.get_ceph_qa_suite_git_url())
            log.info("cloning suite branch %s from %s...", branch, repo)
            clonedir = fetch_repo(repo, branch)
            # import its version of rgw_multi
            self.module_path = clonedir + '/src/test/rgw/rgw_multi'

        log.info("importing tests from %s", self.module_path)
        spec = importlib.util.spec_from_file_location(
            'rgw_multi', self.module_path + '/__init__.py')
        module = importlib.util.module_from_spec(spec)
        sys.modules[spec.name] = module
        spec.loader.exec_module(module)

        from rgw_multi import multisite, tests

        # create the test user
        log.info('creating test user..')
        user = multisite.User('rgw-multisite-test-user')
        user.create(master_zone, [
            '--display-name', 'Multisite Test User', '--gen-access-key',
            '--gen-secret', '--caps', 'roles=*'
        ])

        config = self.config.get('config', {})
        tests.init_multi(realm, user, tests.Config(**config))
        tests.realm_meta_checkpoint(realm)
コード例 #31
0
def get_client_configs(ctx, config):
    """
    Get a map of the configuration for each FUSE client in the configuration by
    combining the configuration of the current task with any global overrides.

    :param ctx: Context instance
    :param config: configuration for this task
    :return: dict of client name to config or to None
    """
    if config is None:
        config = dict(('client.{id}'.format(id=id_), None)
                      for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
    elif isinstance(config, list):
        config = dict((name, None) for name in config)

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('ceph-fuse', {}))

    return config
コード例 #32
0
ファイル: ceph_fuse.py プロジェクト: ApusApp/ceph
def get_client_configs(ctx, config):
    """
    Get a map of the configuration for each FUSE client in the configuration by
    combining the configuration of the current task with any global overrides.

    :param ctx: Context instance
    :param config: configuration for this task
    :return: dict of client name to config or to None
    """
    if config is None:
        config = dict(('client.{id}'.format(id=id_), None)
                      for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
    elif isinstance(config, list):
        config = dict((name, None) for name in config)

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('ceph-fuse', {}))

    return config
コード例 #33
0
    def setup(self):
        super(RGWMultisiteTests, self).setup()

        overrides = self.ctx.config.get('overrides', {})
        misc.deep_merge(self.config, overrides.get('rgw-multisite-tests', {}))

        if not self.ctx.rgw_multisite:
            raise ConfigError('rgw-multisite-tests must run after the rgw-multisite task')
        realm = self.ctx.rgw_multisite.realm
        master_zone = realm.meta_master_zone()

        # create the test user
        log.info('creating test user..')
        user = multisite.User('rgw-multisite-test-user')
        user.create(master_zone, ['--display-name', 'Multisite Test User',
                                  '--gen-access-key', '--gen-secret'])

        config = self.config.get('config', {})
        tests.init_multi(realm, user, tests.Config(**config))
        tests.realm_meta_checkpoint(realm)
コード例 #34
0
ファイル: workunit.py プロジェクト: Yan-waller/ceph
def get_refspec_after_overrides(config, overrides):
    # mimic the behavior of the "install" task, where the "overrides" are
    # actually the defaults of that task. in other words, if none of "sha1",
    # "tag", or "branch" is specified by a "workunit" tasks, we will update
    # it with the information in the "workunit" sub-task nested in "overrides".
    overrides = copy.deepcopy(overrides.get('workunit', {}))
    refspecs = {'branch': Branch, 'tag': Refspec, 'sha1': Refspec}
    if any(map(lambda i: i in config, refspecs.iterkeys())):
        for i in refspecs.iterkeys():
            overrides.pop(i, None)
    misc.deep_merge(config, overrides)

    for spec, cls in refspecs.iteritems():
        refspec = config.get(spec)
        if refspec:
            refspec = cls(refspec)
            break
    if refspec is None:
        refspec = Head()
    return refspec
コード例 #35
0
    def setup(self):
        super(RGWMultisiteTests, self).setup()

        overrides = self.ctx.config.get('overrides', {})
        misc.deep_merge(self.config, overrides.get('rgw-multisite-tests', {}))

        if not self.ctx.rgw_multisite:
            raise ConfigError('rgw-multisite-tests must run after the rgw-multisite task')
        realm = self.ctx.rgw_multisite.realm
        master_zone = realm.meta_master_zone()

        # create the test user
        log.info('creating test user..')
        user = multisite.User('rgw-multisite-test-user')
        user.create(master_zone, ['--display-name', 'Multisite Test User',
                                  '--gen-access-key', '--gen-secret'])

        config = self.config.get('config', {})
        tests.init_multi(realm, user, tests.Config(**config))
        tests.realm_meta_checkpoint(realm)
コード例 #36
0
ファイル: util.py プロジェクト: yongseokoh/teuthology
def get_install_task_flavor(job_config):
    """
    Pokes through the install task's configuration (including its overrides) to
    figure out which flavor it will want to install.

    Only looks at the first instance of the install task in job_config.
    """
    project, = job_config.get('project', 'ceph'),
    tasks = job_config.get('tasks', dict())
    overrides = job_config.get('overrides', dict())
    install_overrides = overrides.get('install', dict())
    project_overrides = install_overrides.get(project, dict())
    first_install_config = dict()
    for task in tasks:
        if list(task.keys())[0] == 'install':
            first_install_config = list(task.values())[0] or dict()
            break
    first_install_config = copy.deepcopy(first_install_config)
    deep_merge(first_install_config, install_overrides)
    deep_merge(first_install_config, project_overrides)
    return get_flavor(first_install_config)
コード例 #37
0
ファイル: kubeadm.py プロジェクト: gyaozhou/ceph-read
def task(ctx, config):
    if not config:
        config = {}
    assert isinstance(config, dict), \
        "task only supports a dictionary for configuration"

    log.info('Kubeadm start')

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('kubeadm', {}))
    log.info('Config: ' + str(config))

    # set up cluster context
    if not hasattr(ctx, 'kubeadm'):
        ctx.kubeadm = {}
    if 'cluster' not in config:
        config['cluster'] = 'kubeadm'
    cluster_name = config['cluster']
    if cluster_name not in ctx.kubeadm:
        ctx.kubeadm[cluster_name] = argparse.Namespace()

    with contextutil.nested(
            lambda: preflight(ctx, config),
            lambda: allocate_pod_subnet(ctx, config),
            lambda: kubeadm_install(ctx, config),
            lambda: kubeadm_init_join(ctx, config),
            lambda: kubectl_config(ctx, config),
            lambda: pod_network(ctx, config),
            lambda: setup_pvs(ctx, config),
            lambda: final(ctx, config),
    ):
        try:
            log.info('Kubeadm complete, yielding')
            yield

        finally:
            log.info('Tearing down kubeadm')
コード例 #38
0
def normalize_and_apply_overrides(ctx, config, overrides):
    """
    kernel task config is hierarchical and needs to be transformed into
    a normal form, see normalize_config() for details.  Applying overrides is
    also more involved compared to other tasks because of the number of ways
    a version of the kernel to install can be specified.

    Returns a (normalized config, timeout) tuple.

    :param ctx: Context
    :param config: Configuration
    """
    timeout = TIMEOUT_DEFAULT
    if 'timeout' in config:
        timeout = config.pop('timeout')
    config = normalize_config(ctx, config)
    log.debug('normalized config %s' % config)

    if 'timeout' in overrides:
        timeout = overrides.pop('timeout')
    if overrides:
        overrides = normalize_config(ctx, overrides)
        log.debug('normalized overrides %s' % overrides)

        # Handle a case when a version specified with one type of version key
        # is overridden by a version specified with another type of version key
        # (e.g. 'branch: foo' is overridden with 'tag: bar').  To be able to
        # use deep_merge(), drop all version keys from the original config if
        # the corresponding override has a version key.
        for role, role_config in config.iteritems():
            if (role in overrides and
                    any(k in overrides[role] for k in VERSION_KEYS)):
                for k in VERSION_KEYS:
                    role_config.pop(k, None)
        teuthology.deep_merge(config, overrides)

    return (config, timeout)
コード例 #39
0
ファイル: calamari.py プロジェクト: alsall/teuthology
def restapi(ctx, config):
    """
    task restapi

    Calamari Rest API

    For example::

        roles:
        - [mon.a, osd.0, osd.1, calamari.restapi]
        - [osd.2, osd.3]
        tasks:
        - calamari.restapi:
    """
    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('calamari.restapi', {}))

    remotes_and_roles = \
        ctx.cluster.only(lambda r: r.startswith('calamari.restapi')).remotes
    if remotes_and_roles is None:
        raise RuntimeError('No role configured')

    # check that the role selected also has at least one mon role
    for rem, roles in remotes_and_roles.iteritems():
        if not any([r for r in roles if r.startswith('mon.')]):
            raise RuntimeError('no mon on remote with roles %s', roles)

    try:
        for rem in remotes_and_roles.iterkeys():
            log.info(rem)
            pkg.install_package('calamari-restapi', rem)
        yield

    finally:
        for rem in remotes_and_roles.iterkeys():
            pkg.remove_package('calamari-restapi', rem)
コード例 #40
0
ファイル: calamari.py プロジェクト: AlfredChenxf/teuthology
def restapi(ctx, config):
    """
    task restapi

    Calamari Rest API

    For example::

        roles:
        - [mon.a, osd.0, osd.1, calamari.restapi]
        - [osd.2, osd.3]
        tasks:
        - calamari.restapi:
    """
    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('calamari.restapi', {}))

    remotes_and_roles = \
        ctx.cluster.only(lambda r: r.startswith('calamari.restapi')).remotes
    if remotes_and_roles is None:
        raise RuntimeError('No role configured')

    # check that the role selected also has at least one mon role
    for rem, roles in remotes_and_roles.iteritems():
        if not any([r for r in roles if r.startswith('mon.')]):
            raise RuntimeError('no mon on remote with roles %s', roles)

    try:
        for rem in remotes_and_roles.iterkeys():
            log.info(rem)
            pkg.install_package('calamari-restapi', rem)
        yield

    finally:
        for rem in remotes_and_roles.iterkeys():
            pkg.remove_package('calamari-restapi', rem)
コード例 #41
0
ファイル: schedule.py プロジェクト: hjwsm1989/teuthology
def build_config(args):
    """
    Given a dict of arguments, build a job config
    """
    config_paths = args.get('<conf_file>', list())
    conf_dict = dict()
    for conf_path in config_paths:
        with file(conf_path) as partial_file:
            partial_dict = yaml.safe_load(partial_file)
        conf_dict = deep_merge(conf_dict, partial_dict)
    # strip out targets; the worker will allocate new ones when we run
    # the job with --lock.
    if 'targets' in conf_dict:
        del conf_dict['targets']
    args['config'] = conf_dict

    owner = args['--owner']
    if owner is None:
        owner = 'scheduled_{user}'.format(user=get_user())

    job_config = dict(
        name=args['--name'],
        last_in_suite=args['--last-in-suite'],
        email=args['--email'],
        description=args['--description'],
        owner=owner,
        verbose=args['--verbose'],
        machine_type=args['--worker'],
        tube=args['--worker'],
        priority=int(args['--priority']),
    )
    # Update the dict we just created, and not the other way around, to let
    # settings in the yaml override what's passed on the command line. This is
    # primarily to accommodate jobs with multiple machine types.
    job_config.update(conf_dict)
    if args['--timeout'] is not None:
        job_config['results_timeout'] = args['--timeout']
    return job_config
コード例 #42
0
ファイル: ceph_fuse.py プロジェクト: drunkard/ceph
def task(ctx, config):
    """
    Mount/unmount a ``ceph-fuse`` client.

    The config is optional and defaults to mounting on all clients. If
    a config is given, it is expected to be a list of clients to do
    this operation on. This lets you e.g. set up one client with
    ``ceph-fuse`` and another with ``kclient``.

    ``brxnet`` should be a Private IPv4 Address range, default range is
    [192.168.0.0/16]

    Example that mounts all clients::

        tasks:
        - ceph:
        - ceph-fuse:
        - interactive:
        - brxnet: [192.168.0.0/16]

    Example that uses both ``kclient` and ``ceph-fuse``::

        tasks:
        - ceph:
        - ceph-fuse: [client.0]
        - kclient: [client.1]
        - interactive:

    Example that enables valgrind:

        tasks:
        - ceph:
        - ceph-fuse:
            client.0:
              valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
        - interactive:

    Example that stops an already-mounted client:

    ::

        tasks:
            - ceph:
            - ceph-fuse: [client.0]
            - ... do something that requires the FS mounted ...
            - ceph-fuse:
                client.0:
                    mounted: false
            - ... do something that requires the FS unmounted ...

    Example that adds more generous wait time for mount (for virtual machines):

        tasks:
        - ceph:
        - ceph-fuse:
            client.0:
              mount_wait: 60 # default is 0, do not wait before checking /sys/
              mount_timeout: 120 # default is 30, give up if /sys/ is not populated
        - interactive:

    Example that creates and mounts a subvol:

        overrides:
          ceph:
            subvols:
              create: 2
              subvol_options: "--namespace-isolated --size 25000000000"
          ceph-fuse:
            client.0:
              mount_subvol_num: 0
          kclient:
            client.1:
              mount_subvol_num: 1

    :param ctx: Context
    :param config: Configuration
    """
    log.info('Running ceph_fuse task...')

    if config is None:
        ids = misc.all_roles_of_type(ctx.cluster, 'client')
        client_roles = [f'client.{id_}' for id_ in ids]
        config = dict([r, dict()] for r in client_roles)
    elif isinstance(config, list):
        client_roles = config
        config = dict([r, dict()] for r in client_roles)
    elif isinstance(config, dict):
        client_roles = filter(lambda x: 'client.' in x, config.keys())
    else:
        raise ValueError(
            f"Invalid config object: {config} ({config.__class__})")
    log.info(f"config is {config}")

    clients = list(misc.get_clients(ctx=ctx, roles=client_roles))
    testdir = misc.get_testdir(ctx)
    all_mounts = getattr(ctx, 'mounts', {})
    mounted_by_me = {}
    skipped = {}
    remotes = set()

    brxnet = config.get("brxnet", None)

    # Construct any new FuseMount instances
    overrides = ctx.config.get('overrides', {}).get('ceph-fuse', {})
    top_overrides = dict(
        filter(lambda x: 'client.' not in x[0], overrides.items()))
    for id_, remote in clients:
        entity = f"client.{id_}"
        client_config = config.get(entity)
        if client_config is None:
            client_config = {}
        # top level overrides
        misc.deep_merge(client_config, top_overrides)
        # mount specific overrides
        client_config_overrides = overrides.get(entity)
        misc.deep_merge(client_config, client_config_overrides)
        log.info(f"{entity} config is {client_config}")

        remotes.add(remote)
        auth_id = client_config.get("auth_id", id_)
        cephfs_name = client_config.get("cephfs_name")

        skip = client_config.get("skip", False)
        if skip:
            skipped[id_] = skip
            continue

        if id_ not in all_mounts:
            fuse_mount = FuseMount(ctx=ctx,
                                   client_config=client_config,
                                   test_dir=testdir,
                                   client_id=auth_id,
                                   client_remote=remote,
                                   brxnet=brxnet,
                                   cephfs_name=cephfs_name)
            all_mounts[id_] = fuse_mount
        else:
            # Catch bad configs where someone has e.g. tried to use ceph-fuse and kcephfs for the same client
            assert isinstance(all_mounts[id_], FuseMount)

        if not config.get("disabled", False) and client_config.get(
                'mounted', True):
            mounted_by_me[id_] = {
                "config": client_config,
                "mount": all_mounts[id_]
            }

    ctx.mounts = all_mounts

    # Umount any pre-existing clients that we have not been asked to mount
    for client_id in set(all_mounts.keys()) - set(mounted_by_me.keys()) - set(
            skipped.keys()):
        mount = all_mounts[client_id]
        if mount.is_mounted():
            mount.umount_wait()

    for remote in remotes:
        FuseMount.cleanup_stale_netnses_and_bridge(remote)

    # Mount any clients we have been asked to (default to mount all)
    log.info('Mounting ceph-fuse clients...')
    for info in mounted_by_me.values():
        config = info["config"]
        mount_x = info['mount']
        if config.get("mount_path"):
            mount_x.cephfs_mntpt = config.get("mount_path")
        if config.get("mountpoint"):
            mount_x.hostfs_mntpt = config.get("mountpoint")
        mount_x.mount()

    for info in mounted_by_me.values():
        info["mount"].wait_until_mounted()

    try:
        yield all_mounts
    finally:
        log.info('Unmounting ceph-fuse clients...')

        for info in mounted_by_me.values():
            # Conditional because an inner context might have umounted it
            mount = info["mount"]
            if mount.is_mounted():
                mount.umount_wait()
        for remote in remotes:
            FuseMount.cleanup_stale_netnses_and_bridge(remote)
コード例 #43
0
ファイル: kclient.py プロジェクト: Abhishekvrshny/ceph
def task(ctx, config):
    """
    Mount/unmount a ``kernel`` client.

    The config is optional and defaults to mounting on all clients. If
    a config is given, it is expected to be a list of clients to do
    this operation on. This lets you e.g. set up one client with
    ``ceph-fuse`` and another with ``kclient``.

    Example that mounts all clients::

        tasks:
        - ceph:
        - kclient:
        - interactive:

    Example that uses both ``kclient` and ``ceph-fuse``::

        tasks:
        - ceph:
        - ceph-fuse: [client.0]
        - kclient: [client.1]
        - interactive:


    Pass a dictionary instead of lists to specify per-client config:

        tasks:
        -kclient:
            client.0:
                debug: true

    :param ctx: Context
    :param config: Configuration
    """
    log.info('Mounting kernel clients...')
    assert config is None or isinstance(config, list) or isinstance(config, dict), \
        "task kclient got invalid config"

    if config is None:
        config = ['client.{id}'.format(id=id_)
                  for id_ in misc.all_roles_of_type(ctx.cluster, 'client')]

    if isinstance(config, list):
        client_roles = config
        config = dict([r, dict()] for r in client_roles)
    elif isinstance(config, dict):
        client_roles = filter(lambda x: 'client.' in x, config.keys())
    else:
        raise ValueError("Invalid config object: {0} ({1})".format(config, config.__class__))

    # config has been converted to a dict by this point
    overrides = ctx.config.get('overrides', {})
    deep_merge(config, overrides.get('kclient', {}))

    clients = list(misc.get_clients(ctx=ctx, roles=client_roles))

    test_dir = misc.get_testdir(ctx)

    # Assemble mon addresses
    remotes_and_roles = ctx.cluster.remotes.items()
    roles = [roles for (remote_, roles) in remotes_and_roles]
    ips = [remote_.ssh.get_transport().getpeername()[0]
           for (remote_, _) in remotes_and_roles]
    mons = misc.get_mons(roles, ips).values()

    mounts = {}
    for id_, remote in clients:
        client_config = config.get("client.%s" % id_)
        if client_config is None:
            client_config = {}

        if config.get("disabled", False) or not client_config.get('mounted', True):
            continue

        kernel_mount = KernelMount(
            mons,
            test_dir,
            id_,
            remote,
            ctx.teuthology_config.get('ipmi_user', None),
            ctx.teuthology_config.get('ipmi_password', None),
            ctx.teuthology_config.get('ipmi_domain', None)
        )

        mounts[id_] = kernel_mount

        if client_config.get('debug', False):
            remote.run(args=["sudo", "bash", "-c", "echo 'module ceph +p' > /sys/kernel/debug/dynamic_debug/control"])
            remote.run(args=["sudo", "bash", "-c", "echo 'module libceph +p' > /sys/kernel/debug/dynamic_debug/control"])

        kernel_mount.mount()


    def umount_all():
        log.info('Unmounting kernel clients...')

        forced = False
        for mount in mounts.values():
            if mount.is_mounted():
                try:
                    mount.umount()
                except (CommandFailedError, MaxWhileTries):
                    log.warn("Ordinary umount failed, forcing...")
                    forced = True
                    mount.umount_wait(force=True)

        return forced

    ctx.mounts = mounts
    try:
        yield mounts
    except:
        umount_all()  # ignore forced retval, we are already in error handling
    finally:

        forced = umount_all()
        if forced:
            # The context managers within the kclient manager worked (i.e.
            # the test workload passed) but for some reason we couldn't
            # umount, so turn this into a test failure.
            raise RuntimeError("Kernel mounts did not umount cleanly")
コード例 #44
0
ファイル: __init__.py プロジェクト: sunnyku/teuthology
def task(ctx, config):
    """
    Install packages for a given project.

    tasks:
    - install:
        project: ceph
        branch: bar
    - install:
        project: samba
        branch: foo
        extra_packages: ['samba']
    - install:
        extra_packages:
           deb: ['librados-dev', 'libradosstriper-dev']
           rpm: ['librados-devel', 'libradosstriper-devel']
        extra_system_packages:
           deb: ['libboost-system-dev']
           rpm: ['boost-devel']
    - install:
        rhbuild: 1.3.0
        playbook: downstream_setup.yml
        vars:
           yum_repos:
             - url: "http://location.repo"
               name: "ceph_repo"

    Add repos before trying to install any package (all Shaman-related tasks
    will be ignored):

    - install:
        repos:
          - name: "repo-alias"
            priority: 1
            url: "http://location.repo"

    Note: The 'repos' are supported for SUSE-based distros only, but patches
    are welcome to add support for other distros.


    Overrides are project specific:

    overrides:
      install:
        ceph:
          sha1: ...


    Debug packages may optionally be installed:

    overrides:
      install:
        ceph:
          debuginfo: true


    Default package lists (which come from packages.yaml) may be overridden:

    overrides:
      install:
        ceph:
          packages:
            deb:
            - ceph-osd
            - ceph-mon
            rpm:
            - ceph-devel
            - rbd-fuse

    When tag, branch and sha1 do not reference the same commit hash, the
    tag takes precedence over the branch and the branch takes precedence
    over the sha1.

    When the overrides have a sha1 that is different from the sha1 of
    the project to be installed, it will be a noop if the project has
    a branch or tag, because they take precedence over the sha1. For
    instance:

    overrides:
      install:
        ceph:
          sha1: 1234

    tasks:
    - install:
        project: ceph
          sha1: 4567
          branch: foobar # which has sha1 4567

    The override will transform the tasks as follows:

    tasks:
    - install:
        project: ceph
          sha1: 1234
          branch: foobar # which has sha1 4567

    But the branch takes precedence over the sha1 and foobar
    will be installed. The override of the sha1 has no effect.

    When passed 'rhbuild' as a key, it will attempt to install an rh ceph build
    using ceph-deploy

    Normally, the package management system will try to install or upgrade
    specified packages as instructed. But if newer versions of these packages
    to be installed have been installed on test node, we will have to uninstall
    or downgrade them. To downgrade multiple packages in a single shot:

    tasks:
    - install:
        project: ceph
        branch: hammer
        downgrade_packages: ['librados2', 'librbd1']

    Reminder regarding teuthology-suite side effects:

    The teuthology-suite command always adds the following:

    overrides:
      install:
        ceph:
          sha1: 1234

    where sha1 matches the --ceph argument. For instance if
    teuthology-suite is called with --ceph master, the sha1 will be
    the tip of master. If called with --ceph v0.94.1, the sha1 will be
    the v0.94.1 (as returned by git rev-parse v0.94.1 which is not to
    be confused with git rev-parse v0.94.1^{commit})

    :param ctx: the argparse.Namespace object
    :param config: the config dict
    """
    if config is None:
        config = {}
    assert isinstance(config, dict), \
        "task install only supports a dictionary for configuration"

    project, = config.get('project', 'ceph'),
    log.debug('project %s' % project)
    overrides = ctx.config.get('overrides')
    repos = None
    if overrides:
        install_overrides = overrides.get('install', {})
        teuthology.deep_merge(config, install_overrides.get(project, {}))
        repos = install_overrides.get('repos', None)
        log.debug('INSTALL overrides: %s' % install_overrides)
    log.debug('config %s' % config)

    rhbuild = None
    if config.get('rhbuild'):
        rhbuild = config.get('rhbuild')
        log.info("Build is %s " % rhbuild)

    flavor = get_flavor(config)
    log.info("Using flavor: %s", flavor)

    ctx.summary['flavor'] = flavor
    nested_tasks = [lambda: redhat.install(ctx=ctx, config=config),
                    lambda: ship_utilities(ctx=ctx, config=None)]

    if config.get('rhbuild'):
        if config.get('playbook'):
            ansible_config = dict(config)
            # remove key not required by ansible task
            del ansible_config['rhbuild']
            nested_tasks.insert(
                0,
                lambda: ansible.CephLab(ctx, config=ansible_config)
            )
        with contextutil.nested(*nested_tasks):
                yield
    else:
        nested_config = dict(
                branch=config.get('branch'),
                cleanup=config.get('cleanup'),
                debuginfo=config.get('debuginfo'),
                downgrade_packages=config.get('downgrade_packages', []),
                exclude_packages=config.get('exclude_packages', []),
                extra_packages=config.get('extra_packages', []),
                extra_system_packages=config.get('extra_system_packages', []),
                extras=config.get('extras', None),
                flavor=flavor,
                install_ceph_packages=config.get('install_ceph_packages', True),
                packages=config.get('packages', dict()),
                project=project,
                repos_only=config.get('repos_only', False),
                sha1=config.get('sha1'),
                tag=config.get('tag'),
                wait_for_package=config.get('wait_for_package', False),
        )
        if repos:
            nested_config['repos'] = repos
        with contextutil.nested(
            lambda: install(ctx=ctx, config=nested_config),
            lambda: ship_utilities(ctx=ctx, config=None),
        ):
            yield
コード例 #45
0
ファイル: ceph-fuse.py プロジェクト: hughsaunders/teuthology
def task(ctx, config):
    """
    Mount/unmount a ``ceph-fuse`` client.

    The config is optional and defaults to mounting on all clients. If
    a config is given, it is expected to be a list of clients to do
    this operation on. This lets you e.g. set up one client with
    ``ceph-fuse`` and another with ``kclient``.

    Example that mounts all clients::

        tasks:
        - ceph:
        - ceph-fuse:
        - interactive:

    Example that uses both ``kclient` and ``ceph-fuse``::

        tasks:
        - ceph:
        - ceph-fuse: [client.0]
        - kclient: [client.1]
        - interactive:

    Example that enables valgrind:

        tasks:
        - ceph:
        - ceph-fuse:
            client.0:
              valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
        - interactive:

    :param ctx: Context
    :param config: Configuration
    """
    log.info("Mounting ceph-fuse clients...")
    fuse_daemons = {}

    testdir = teuthology.get_testdir(ctx)

    if config is None:
        config = dict(
            ("client.{id}".format(id=id_), None) for id_ in teuthology.all_roles_of_type(ctx.cluster, "client")
        )
    elif isinstance(config, list):
        config = dict((name, None) for name in config)

    overrides = ctx.config.get("overrides", {})
    teuthology.deep_merge(config, overrides.get("ceph-fuse", {}))

    clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys()))

    for id_, remote in clients:
        client_config = config.get("client.%s" % id_)
        if client_config is None:
            client_config = {}
        log.info("Client client.%s config is %s" % (id_, client_config))

        daemon_signal = "kill"
        if client_config.get("coverage") or client_config.get("valgrind") is not None:
            daemon_signal = "term"

        mnt = os.path.join(testdir, "mnt.{id}".format(id=id_))
        log.info("Mounting ceph-fuse client.{id} at {remote} {mnt}...".format(id=id_, remote=remote, mnt=mnt))

        remote.run(args=["mkdir", "--", mnt])

        run_cmd = [
            "sudo",
            "adjust-ulimits",
            "ceph-coverage",
            "{tdir}/archive/coverage".format(tdir=testdir),
            "daemon-helper",
            daemon_signal,
        ]
        run_cmd_tail = [
            "ceph-fuse",
            "-f",
            "--name",
            "client.{id}".format(id=id_),
            # TODO ceph-fuse doesn't understand dash dash '--',
            mnt,
        ]

        if client_config.get("valgrind") is not None:
            run_cmd = teuthology.get_valgrind_args(
                testdir, "client.{id}".format(id=id_), run_cmd, client_config.get("valgrind")
            )

        run_cmd.extend(run_cmd_tail)

        proc = remote.run(
            args=run_cmd, logger=log.getChild("ceph-fuse.{id}".format(id=id_)), stdin=run.PIPE, wait=False
        )
        fuse_daemons[id_] = proc

    for id_, remote in clients:
        mnt = os.path.join(testdir, "mnt.{id}".format(id=id_))
        teuthology.wait_until_fuse_mounted(remote=remote, fuse=fuse_daemons[id_], mountpoint=mnt)
        remote.run(args=["sudo", "chmod", "1777", "{tdir}/mnt.{id}".format(tdir=testdir, id=id_)])

    try:
        yield
    finally:
        log.info("Unmounting ceph-fuse clients...")
        for id_, remote in clients:
            mnt = os.path.join(testdir, "mnt.{id}".format(id=id_))
            try:
                remote.run(args=["sudo", "fusermount", "-u", mnt])
            except run.CommandFailedError:
                log.info("Failed to unmount ceph-fuse on {name}, aborting...".format(name=remote.name))
                # abort the fuse mount, killing all hung processes
                remote.run(
                    args=[
                        "if",
                        "test",
                        "-e",
                        "/sys/fs/fuse/connections/*/abort",
                        run.Raw(";"),
                        "then",
                        "echo",
                        "1",
                        run.Raw(">"),
                        run.Raw("/sys/fs/fuse/connections/*/abort"),
                        run.Raw(";"),
                        "fi",
                    ]
                )
                # make sure its unmounted
                remote.run(args=["sudo", "umount", "-l", "-f", mnt])

        run.wait(fuse_daemons.itervalues())

        for id_, remote in clients:
            mnt = os.path.join(testdir, "mnt.{id}".format(id=id_))
            remote.run(args=["rmdir", "--", mnt])
コード例 #46
0
def task(ctx, config):
    if config is None:
        config = {}

    assert isinstance(config, dict), \
        "task only supports a dictionary for configuration"

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('ceph', {}))
    log.info('Config: ' + str(config))

    testdir = teuthology.get_testdir(ctx)

    # set up cluster context
    if not hasattr(ctx, 'ceph'):
        ctx.ceph = {}
        ctx.managers = {}
    if 'cluster' not in config:
        config['cluster'] = 'ceph'
    cluster_name = config['cluster']
    if cluster_name not in ctx.ceph:
        ctx.ceph[cluster_name] = argparse.Namespace()
        ctx.ceph[cluster_name].bootstrapped = False
 
    # image
    if not hasattr(ctx.ceph[cluster_name], 'image'):
        ctx.ceph[cluster_name].image = config.get('image')
    ref = None
    if not ctx.ceph[cluster_name].image:
        sha1 = config.get('sha1')
        if sha1:
            ctx.ceph[cluster_name].image = 'quay.io/ceph-ci/ceph:%s' % sha1
            ref = sha1
        else:
            # hmm, fall back to branch?
            branch = config.get('branch', 'master')
            ref = branch
            ctx.ceph[cluster_name].image = 'quay.io/ceph-ci/ceph:%s' % branch
    log.info('Cluster image is %s' % ctx.ceph[cluster_name].image)


    with contextutil.nested(
            #if the cluster is already bootstrapped bypass corresponding methods
            lambda: _bypass() if (ctx.ceph[cluster_name].bootstrapped)\
                              else initialize_config(ctx=ctx, config=config),
            lambda: ceph_initial(),
            lambda: normalize_hostnames(ctx=ctx),
            lambda: _bypass() if (ctx.ceph[cluster_name].bootstrapped)\
                              else download_cephadm(ctx=ctx, config=config, ref=ref),
            lambda: ceph_log(ctx=ctx, config=config),
            lambda: ceph_crash(ctx=ctx, config=config),
            lambda: _bypass() if (ctx.ceph[cluster_name].bootstrapped)\
                              else ceph_bootstrap(ctx=ctx, config=config),
            lambda: crush_setup(ctx=ctx, config=config),
            lambda: ceph_mons(ctx=ctx, config=config),
            lambda: distribute_config_and_admin_keyring(ctx=ctx, config=config),
            lambda: ceph_mgrs(ctx=ctx, config=config),
            lambda: ceph_osds(ctx=ctx, config=config),
            lambda: ceph_mdss(ctx=ctx, config=config),
            lambda: ceph_rgw(ctx=ctx, config=config),
            lambda: ceph_monitoring('prometheus', ctx=ctx, config=config),
            lambda: ceph_monitoring('node-exporter', ctx=ctx, config=config),
            lambda: ceph_monitoring('alertmanager', ctx=ctx, config=config),
            lambda: ceph_monitoring('grafana', ctx=ctx, config=config),
            lambda: ceph_clients(ctx=ctx, config=config),
    ):
        ctx.managers[cluster_name] = CephManager(
            ctx.ceph[cluster_name].bootstrap_remote,
            ctx=ctx,
            logger=log.getChild('ceph_manager.' + cluster_name),
            cluster=cluster_name,
            cephadm=True,
        )

        try:
            if config.get('wait-for-healthy', True):
                healthy(ctx=ctx, config=config)

            log.info('Setup complete, yielding')
            yield

        finally:
            log.info('Teardown begin')
コード例 #47
0
def _run_tests(ctx, client, tests):
    """
    Create a temp directory and wait for a client socket to be created.
    For each test, copy the executable locally and run the test.
    Remove temp directory when finished.

    :param ctx: Context
    :param client: client machine to run the test
    :param tests: list of tests to run
    """
    testdir = teuthology.get_testdir(ctx)
    log.debug('Running admin socket tests on %s', client)
    (remote,) = ctx.cluster.only(client).remotes.iterkeys()
    socket_path = '/var/run/ceph/ceph-{name}.asok'.format(name=client)
    overrides = ctx.config.get('overrides', {}).get('admin_socket', {})

    try:
        tmp_dir = os.path.join(
            testdir,
            'admin_socket_{client}'.format(client=client),
            )
        remote.run(
            args=[
                'mkdir',
                '--',
                tmp_dir,
                run.Raw('&&'),
                # wait for client process to create the socket
                'while', 'test', '!', '-e', socket_path, run.Raw(';'),
                'do', 'sleep', '1', run.Raw(';'), 'done',
                ],
            )

        for command, config in tests.iteritems():
            if config is None:
                config = {}
            teuthology.deep_merge(config, overrides)
            log.debug('Testing %s with config %s', command, str(config))

            test_path = None
            if 'test' in config:
                url = config['test'].format(
                    branch=config.get('branch', 'master')
                    )
                test_path = os.path.join(tmp_dir, command)
                remote.run(
                    args=[
                        'wget',
                        '-q',
                        '-O',
                        test_path,
                        '--',
                        url,
                        run.Raw('&&'),
                        'chmod',
                        'u=rx',
                        '--',
                        test_path,
                        ],
                    )

            args = config.get('args', [])
            assert isinstance(args, list), \
                'admin socket command args must be a list'
            sock_out = _socket_command(ctx, remote, socket_path, command, args)
            if test_path is not None:
                remote.run(
                    args=[
                        test_path,
                        ],
                    stdin=json.dumps(sock_out),
                    )

    finally:
        remote.run(
            args=[
                'rm', '-rf', '--', tmp_dir,
                ],
            )
コード例 #48
0
ファイル: barbican.py プロジェクト: hoang0310/ceph-1
def task(ctx, config):
    """
    Deploy and configure Keystone

    Example of configuration:

    tasks:
      - local_cluster:
          cluster_path: /home/adam/ceph-1/build
      - local_rgw:
      - tox: [ client.0 ]
      - keystone:
          client.0:
            sha1: 12.0.0.0b2
            force-branch: master
            tenants:
              - name: admin
                description:  Admin Tenant
              - name: rgwcrypt
                description: Encryption Tenant
              - name: barbican
                description: Barbican
              - name: s3
                description: S3 project
            users:
              - name: admin
                password: ADMIN
                project: admin
              - name: rgwcrypt-user
                password: rgwcrypt-pass
                project: rgwcrypt
              - name: barbican-user
                password: barbican-pass
                project: barbican
              - name: s3-user
                password: s3-pass
                project: s3
            roles: [ name: admin, name: Member, name: creator ]
            role-mappings:
              - name: admin
                user: admin
                project: admin
              - name: Member
                user: rgwcrypt-user
                project: rgwcrypt
              - name: admin
                user: barbican-user
                project: barbican
              - name: creator
                user: s3-user
                project: s3
            services:
              - name: keystone
                type: identity
                description: Keystone Identity Service
      - barbican:
          client.0:
            force-branch: master
            use-keystone-role: client.0
            keystone_authtoken:
              auth_plugin: password
              username: barbican-user
              password: barbican-pass
              user_domain_name: Default
            rgw_user:
              tenantName: rgwcrypt
              username: rgwcrypt-user
              password: rgwcrypt-pass
            secrets:
              - name: my-key-1
                base64: a2V5MS5GcWVxKzhzTGNLaGtzQkg5NGVpb1FKcFpGb2c=
                tenantName: s3
                username: s3-user
                password: s3-pass
              - name: my-key-2
                base64: a2V5Mi5yNUNNMGFzMVdIUVZxcCt5NGVmVGlQQ1k4YWg=
                tenantName: s3
                username: s3-user
                password: s3-pass
      - s3tests:
          client.0:
            force-branch: master
            kms_key: my-key-1
      - rgw:
          client.0:
            use-keystone-role: client.0
            use-barbican-role: client.0
    """
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task keystone only supports a list or dictionary for configuration"
    all_clients = [
        'client.{id}'.format(id=id_)
        for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')
    ]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    overrides = ctx.config.get('overrides', {})
    # merge each client section, not the top level.
    for client in config.iterkeys():
        if not config[client]:
            config[client] = {}
        teuthology.deep_merge(config[client], overrides.get('barbican', {}))

    log.debug('Barbican config is %s', config)

    if not hasattr(ctx, 'keystone'):
        raise ConfigError('barbican must run after the keystone task')

    ctx.barbican = argparse.Namespace()
    ctx.barbican.endpoints = assign_ports(ctx, config, 9311)
    ctx.barbican.token = {}
    ctx.barbican.keys = {}

    with contextutil.nested(
            lambda: download(ctx=ctx, config=config),
            lambda: setup_venv(ctx=ctx, config=config),
            lambda: configure_barbican(ctx=ctx, config=config),
            lambda: run_barbican(ctx=ctx, config=config),
            lambda: create_secrets(ctx=ctx, config=config),
    ):
        yield
コード例 #49
0
ファイル: install.py プロジェクト: smanjara/teuthology
def upgrade_common(ctx, config, deploy_style):
    """
    Common code for upgrading
    """
    assert config is None or isinstance(config, dict), "install.upgrade only supports a dictionary for configuration"

    project = config.get("project", "ceph")

    # use 'install' overrides here, in case the upgrade target is left
    # unspecified/implicit.
    install_overrides = ctx.config.get("overrides", {}).get("install", {}).get(project, {})
    log.info("project %s config %s overrides %s", project, config, install_overrides)

    # FIXME: extra_pkgs is not distro-agnostic
    extra_pkgs = config.get("extra_packages", [])
    log.info("extra packages: {packages}".format(packages=extra_pkgs))

    # build a normalized remote -> config dict
    remotes = {}
    if "all" in config:
        for remote in ctx.cluster.remotes.iterkeys():
            remotes[remote] = config.get("all")
    else:
        for role in config.keys():
            remotes_dict = ctx.cluster.only(role).remotes
            if not remotes_dict:
                # This is a regular config argument, not a role
                continue
            remote = remotes_dict.keys()[0]
            if remote in remotes:
                log.warn("remote %s came up twice (role %s)", remote, role)
                continue
            remotes[remote] = config.get(role)

    for remote, node in remotes.iteritems():
        if not node:
            node = {}

        this_overrides = copy.deepcopy(install_overrides)
        if "sha1" in node or "tag" in node or "branch" in node:
            log.info("config contains sha1|tag|branch, removing those keys from override")
            this_overrides.pop("sha1", None)
            this_overrides.pop("tag", None)
            this_overrides.pop("branch", None)
        teuthology.deep_merge(node, this_overrides)
        log.info("remote %s config %s", remote, node)

        system_type = teuthology.get_system_type(remote)
        assert system_type in ("deb", "rpm")
        pkgs = PACKAGES[project][system_type]
        excluded_packages = config.get("exclude_packages", list())
        pkgs = list(set(pkgs).difference(set(excluded_packages)))
        log.info(
            "Upgrading {proj} {system_type} packages: {pkgs}".format(
                proj=project, system_type=system_type, pkgs=", ".join(pkgs)
            )
        )
        # FIXME: again, make extra_pkgs distro-agnostic
        pkgs += extra_pkgs
        node["project"] = project

        deploy_style(ctx, node, remote, pkgs, system_type)
        verify_package_version(ctx, node, remote)
コード例 #50
0
ファイル: install.py プロジェクト: smanjara/teuthology
def task(ctx, config):
    """
    Install packages for a given project.

    tasks:
    - install:
        project: ceph
        branch: bar
    - install:
        project: samba
        branch: foo
        extra_packages: ['samba']
    - install:
        rhbuild: 1.3.0 
        playbook: downstream_setup.yml
        vars:
           yum_repos:
             - url: "http://location.repo"
               name: "ceph_repo"

    Overrides are project specific:

    overrides:
      install:
        ceph:
          sha1: ...

    When passed 'rhbuild' as a key, it will attempt to install an rh ceph build using ceph-deploy

    :param ctx: the argparse.Namespace object
    :param config: the config dict
    """
    if config is None:
        config = {}
    assert isinstance(config, dict), "task install only supports a dictionary for configuration"

    project, = (config.get("project", "ceph"),)
    log.debug("project %s" % project)
    overrides = ctx.config.get("overrides")
    if overrides:
        install_overrides = overrides.get("install", {})
        teuthology.deep_merge(config, install_overrides.get(project, {}))
    log.debug("config %s" % config)

    rhbuild = None
    if config.get("rhbuild"):
        rhbuild = config.get("rhbuild")
        log.info("Build is %s " % rhbuild)

    flavor = get_flavor(config)
    log.info("Using flavor: %s", flavor)

    ctx.summary["flavor"] = flavor
    nested_tasks = [lambda: rh_install(ctx=ctx, config=config), lambda: ship_utilities(ctx=ctx, config=None)]

    if config.get("rhbuild"):
        if config.get("playbook"):
            ansible_config = dict(config)
            # remove key not required by ansible task
            del ansible_config["rhbuild"]
            nested_tasks.insert(0, lambda: ansible.CephLab(ctx, config=ansible_config))
        with contextutil.nested(*nested_tasks):
            yield
    else:
        with contextutil.nested(
            lambda: install(
                ctx=ctx,
                config=dict(
                    branch=config.get("branch"),
                    tag=config.get("tag"),
                    sha1=config.get("sha1"),
                    flavor=flavor,
                    extra_packages=config.get("extra_packages", []),
                    extras=config.get("extras", None),
                    wait_for_package=ctx.config.get("wait_for_package", False),
                    project=project,
                ),
            ),
            lambda: ship_utilities(ctx=ctx, config=None),
        ):
            yield
コード例 #51
0
ファイル: cephadm.py プロジェクト: wuhen110/ceph
def task(ctx, config):
    """
    Deploy ceph cluster using cephadm

    Setup containers' mirrors before the bootstrap, if corresponding
    config provided in teuthology server config yaml file.

    For example, teuthology.yaml can contain the 'defaults' section:

        defaults:
          cephadm:
            containers:
              registry_mirrors:
                docker.io: 'registry.mirror.example.com:5000'
              image: 'quay.io/ceph-ci/ceph'

    Using overrides makes it possible to customize it per run.
    The equivalent 'overrides' section looks like:

        overrides:
          cephadm:
            containers:
              registry_mirrors:
                docker.io: 'registry.mirror.example.com:5000'
              image: 'quay.io/ceph-ci/ceph'

    :param ctx: the argparse.Namespace object
    :param config: the config dict
    """
    if config is None:
        config = {}

    assert isinstance(config, dict), \
        "task only supports a dictionary for configuration"

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('ceph', {}))
    teuthology.deep_merge(config, overrides.get('cephadm', {}))
    log.info('Config: ' + str(config))

    # set up cluster context
    if not hasattr(ctx, 'ceph'):
        ctx.ceph = {}
        ctx.managers = {}
    if 'cluster' not in config:
        config['cluster'] = 'ceph'
    cluster_name = config['cluster']
    if cluster_name not in ctx.ceph:
        ctx.ceph[cluster_name] = argparse.Namespace()
        ctx.ceph[cluster_name].bootstrapped = False

    # image
    teuth_defaults = teuth_config.get('defaults', {})
    cephadm_defaults = teuth_defaults.get('cephadm', {})
    containers_defaults = cephadm_defaults.get('containers', {})
    mirrors_defaults = containers_defaults.get('registry_mirrors', {})
    container_registry_mirror = mirrors_defaults.get('docker.io', None)
    container_image_name = containers_defaults.get('image', None)

    containers = config.get('containers', {})
    mirrors = containers.get('registry_mirrors', {})
    container_image_name = containers.get('image', container_image_name)
    container_registry_mirror = mirrors.get('docker.io',
                                            container_registry_mirror)

    if not hasattr(ctx.ceph[cluster_name], 'image'):
        ctx.ceph[cluster_name].image = config.get('image')
    ref = None
    if not ctx.ceph[cluster_name].image:
        if not container_image_name:
            raise Exception(
                "Configuration error occurred. "
                "The 'image' value is undefined for 'cephadm' task. "
                "Please provide corresponding options in the task's "
                "config, task 'overrides', or teuthology 'defaults' "
                "section.")
        sha1 = config.get('sha1')
        flavor = config.get('flavor', 'default')

        if sha1:
            if flavor == "crimson":
                ctx.ceph[
                    cluster_name].image = container_image_name + ':' + sha1 + '-' + flavor
            else:
                ctx.ceph[
                    cluster_name].image = container_image_name + ':' + sha1
            ref = sha1
        else:
            # hmm, fall back to branch?
            branch = config.get('branch', 'master')
            ref = branch
            ctx.ceph[cluster_name].image = container_image_name + ':' + branch
    log.info('Cluster image is %s' % ctx.ceph[cluster_name].image)


    with contextutil.nested(
            #if the cluster is already bootstrapped bypass corresponding methods
            lambda: _bypass() if (ctx.ceph[cluster_name].bootstrapped)\
                              else initialize_config(ctx=ctx, config=config),
            lambda: ceph_initial(),
            lambda: normalize_hostnames(ctx=ctx),
            lambda: _bypass() if (ctx.ceph[cluster_name].bootstrapped)\
                              else download_cephadm(ctx=ctx, config=config, ref=ref),
            lambda: ceph_log(ctx=ctx, config=config),
            lambda: ceph_crash(ctx=ctx, config=config),
            lambda: _bypass() if (ctx.ceph[cluster_name].bootstrapped)\
                              else ceph_bootstrap(ctx, config,
                                                  container_registry_mirror),
            lambda: crush_setup(ctx=ctx, config=config),
            lambda: ceph_mons(ctx=ctx, config=config),
            lambda: distribute_config_and_admin_keyring(ctx=ctx, config=config),
            lambda: ceph_mgrs(ctx=ctx, config=config),
            lambda: ceph_osds(ctx=ctx, config=config),
            lambda: ceph_mdss(ctx=ctx, config=config),
            lambda: ceph_rgw(ctx=ctx, config=config),
            lambda: ceph_monitoring('prometheus', ctx=ctx, config=config),
            lambda: ceph_monitoring('node-exporter', ctx=ctx, config=config),
            lambda: ceph_monitoring('alertmanager', ctx=ctx, config=config),
            lambda: ceph_monitoring('grafana', ctx=ctx, config=config),
            lambda: ceph_clients(ctx=ctx, config=config),
    ):
        ctx.managers[cluster_name] = CephManager(
            ctx.ceph[cluster_name].bootstrap_remote,
            ctx=ctx,
            logger=log.getChild('ceph_manager.' + cluster_name),
            cluster=cluster_name,
            cephadm=True,
        )

        try:
            if config.get('wait-for-healthy', True):
                healthy(ctx=ctx, config=config)

            log.info('Setup complete, yielding')
            yield

        finally:
            log.info('Teardown begin')
コード例 #52
0
def task(ctx, config):
    """
    Run the ragweed suite against rgw.

    To run all tests on all clients::

        tasks:
        - ceph:
        - rgw:
        - ragweed:

    To restrict testing to particular clients::

        tasks:
        - ceph:
        - rgw: [client.0]
        - ragweed: [client.0]

    To run against a server on client.1 and increase the boto timeout to 10m::

        tasks:
        - ceph:
        - rgw: [client.1]
        - ragweed:
            client.0:
              rgw_server: client.1
              idle_timeout: 600
              stages: prepare,check

    To pass extra arguments to nose (e.g. to run a certain test)::

        tasks:
        - ceph:
        - rgw: [client.0]
        - ragweed:
            client.0:
              extra_args: ['test_s3:test_object_acl_grand_public_read']
            client.1:
              extra_args: ['--exclude', 'test_100_continue']
    """
    assert hasattr(ctx, 'rgw'), 'ragweed must run after the rgw task'
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task ragweed only supports a list or dictionary for configuration"
    all_clients = [
        'client.{id}'.format(id=id_)
        for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')
    ]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    overrides = ctx.config.get('overrides', {})
    # merge each client section, not the top level.
    for client in config.iterkeys():
        if not config[client]:
            config[client] = {}
        teuthology.deep_merge(config[client], overrides.get('ragweed', {}))

    log.debug('ragweed config is %s', config)

    ragweed_conf = {}
    for client in clients:
        endpoint = ctx.rgw.role_endpoints.get(client)
        assert endpoint, 'ragweed: no rgw endpoint for {}'.format(client)

        ragweed_conf[client] = ConfigObj(indent_type='',
                                         infile={
                                             'rgw': {
                                                 'port':
                                                 endpoint.port,
                                                 'is_secure':
                                                 endpoint.cert is not None,
                                             },
                                             'fixtures': {},
                                             'user system': {},
                                             'user regular': {},
                                             'rados': {
                                                 'ceph_conf':
                                                 '/etc/ceph/ceph.conf',
                                             },
                                         })

    run_stages = {}

    with contextutil.nested(
            lambda: download(ctx=ctx, config=config),
            lambda: create_users(ctx=ctx,
                                 config=dict(
                                     clients=clients,
                                     ragweed_conf=ragweed_conf,
                                     config=config,
                                 ),
                                 run_stages=run_stages),
            lambda: configure(ctx=ctx,
                              config=dict(
                                  clients=config,
                                  ragweed_conf=ragweed_conf,
                              ),
                              run_stages=run_stages),
            lambda: run_tests(ctx=ctx, config=config, run_stages=run_stages),
    ):
        pass
    yield
コード例 #53
0
ファイル: workunit.py プロジェクト: leseb/ceph-ci
def task(ctx, config):
    """
    Run ceph on all workunits found under the specified path.

    For example::

        tasks:
        - ceph:
        - ceph-fuse: [client.0]
        - workunit:
            clients:
              client.0: [direct_io, xattrs.sh]
              client.1: [snaps]
            branch: foo

    You can also run a list of workunits on all clients:
        tasks:
        - ceph:
        - ceph-fuse:
        - workunit:
            tag: v0.47
            clients:
              all: [direct_io, xattrs.sh, snaps]

    If you have an "all" section it will run all the workunits
    on each client simultaneously, AFTER running any workunits specified
    for individual clients. (This prevents unintended simultaneous runs.)

    To customize tests, you can specify environment variables as a dict. You
    can also specify a time limit for each work unit (defaults to 3h):

        tasks:
        - ceph:
        - ceph-fuse:
        - workunit:
            sha1: 9b28948635b17165d17c1cf83d4a870bd138ddf6
            clients:
              all: [snaps]
            env:
              FOO: bar
              BAZ: quux
            timeout: 3h

    This task supports roles that include a ceph cluster, e.g.::

        tasks:
        - ceph:
        - workunit:
            clients:
              backup.client.0: [foo]
              client.1: [bar] # cluster is implicitly 'ceph'

    :param ctx: Context
    :param config: Configuration
    """
    assert isinstance(config, dict)
    assert isinstance(config.get('clients'), dict), \
        'configuration must contain a dictionary of clients'

    overrides = ctx.config.get('overrides', {})
    misc.deep_merge(config, overrides.get('workunit', {}))

    refspec = config.get('branch')
    if refspec is None:
        refspec = config.get('tag')
    if refspec is None:
        refspec = config.get('sha1')
    if refspec is None:
        refspec = 'HEAD'

    timeout = config.get('timeout', '3h')

    log.info('Pulling workunits from ref %s', refspec)

    created_mountpoint = {}

    if config.get('env') is not None:
        assert isinstance(config['env'], dict), 'env must be a dictionary'
    clients = config['clients']

    # Create scratch dirs for any non-all workunits
    log.info('Making a separate scratch dir for every client...')
    for role in clients.iterkeys():
        assert isinstance(role, basestring)
        if role == "all":
            continue

        assert 'client' in role
        created_mnt_dir = _make_scratch_dir(ctx, role, config.get('subdir'))
        created_mountpoint[role] = created_mnt_dir

    # Execute any non-all workunits
    with parallel() as p:
        for role, tests in clients.iteritems():
            if role != "all":
                p.spawn(_run_tests,
                        ctx,
                        refspec,
                        role,
                        tests,
                        config.get('env'),
                        timeout=timeout)

    # Clean up dirs from any non-all workunits
    for role, created in created_mountpoint.items():
        _delete_dir(ctx, role, created)

    # Execute any 'all' workunits
    if 'all' in clients:
        all_tasks = clients["all"]
        _spawn_on_all_clients(ctx,
                              refspec,
                              all_tasks,
                              config.get('env'),
                              config.get('subdir'),
                              timeout=timeout)
コード例 #54
0
def task(ctx, config):
    """
    Run the s3-tests suite against rgw.

    To run all tests on all clients::

        tasks:
        - ceph:
        - rgw:
        - s3tests:

    To restrict testing to particular clients::

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3tests: [client.0]

    To run against a server on client.1 and increase the boto timeout to 10m::

        tasks:
        - ceph:
        - rgw: [client.1]
        - s3tests:
            client.0:
              rgw_server: client.1
              idle_timeout: 600

    To pass extra arguments to nose (e.g. to run a certain test)::

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3tests:
            client.0:
              extra_args: ['test_s3:test_object_acl_grand_public_read']
            client.1:
              extra_args: ['--exclude', 'test_100_continue']

    To run any sts-tests don't forget to set a config variable named 'sts_tests' to 'True' as follows::

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3tests:
            client.0:
              sts_tests: True
              rgw_server: client.0

    """
    assert hasattr(ctx, 'rgw'), 's3tests must run after the rgw task'
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task s3tests only supports a list or dictionary for configuration"
    all_clients = [
        'client.{id}'.format(id=id_)
        for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')
    ]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    overrides = ctx.config.get('overrides', {})
    # merge each client section, not the top level.
    for client in config.keys():
        if not config[client]:
            config[client] = {}
        teuthology.deep_merge(config[client], overrides.get('s3tests', {}))

    log.debug('s3tests config is %s', config)

    s3tests_conf = {}

    for client, client_config in config.items():
        if 'sts_tests' in client_config:
            ctx.sts_variable = True
        else:
            ctx.sts_variable = False
        #This will be the structure of config file when you want to run webidentity_test (sts-test)
        if ctx.sts_variable and "TOKEN" in os.environ:
            for client in clients:
                endpoint = ctx.rgw.role_endpoints.get(client)
                assert endpoint, 's3tests: no rgw endpoint for {}'.format(
                    client)

                s3tests_conf[client] = ConfigObj(indent_type='',
                                                 infile={
                                                     'DEFAULT': {
                                                         'port':
                                                         endpoint.port,
                                                         'is_secure':
                                                         endpoint.cert
                                                         is not None,
                                                         'api_name':
                                                         'default',
                                                     },
                                                     'fixtures': {},
                                                     's3 main': {},
                                                     's3 alt': {},
                                                     's3 tenant': {},
                                                     'iam': {},
                                                     'webidentity': {},
                                                 })

        elif ctx.sts_variable:
            #This will be the structure of config file when you want to run assume_role_test and get_session_token_test (sts-test)
            for client in clients:
                endpoint = ctx.rgw.role_endpoints.get(client)
                assert endpoint, 's3tests: no rgw endpoint for {}'.format(
                    client)

                s3tests_conf[client] = ConfigObj(indent_type='',
                                                 infile={
                                                     'DEFAULT': {
                                                         'port':
                                                         endpoint.port,
                                                         'is_secure':
                                                         endpoint.cert
                                                         is not None,
                                                         'api_name':
                                                         'default',
                                                     },
                                                     'fixtures': {},
                                                     's3 main': {},
                                                     's3 alt': {},
                                                     's3 tenant': {},
                                                     'iam': {},
                                                 })

        else:
            #This will be the structure of config file when you want to run normal s3-tests
            for client in clients:
                endpoint = ctx.rgw.role_endpoints.get(client)
                assert endpoint, 's3tests: no rgw endpoint for {}'.format(
                    client)

                s3tests_conf[client] = ConfigObj(indent_type='',
                                                 infile={
                                                     'DEFAULT': {
                                                         'port':
                                                         endpoint.port,
                                                         'is_secure':
                                                         endpoint.cert
                                                         is not None,
                                                         'api_name':
                                                         'default',
                                                     },
                                                     'fixtures': {},
                                                     's3 main': {},
                                                     's3 alt': {},
                                                     's3 tenant': {},
                                                 })

    with contextutil.nested(
            lambda: download(ctx=ctx, config=config),
            lambda: create_users(ctx=ctx,
                                 config=dict(
                                     clients=clients,
                                     s3tests_conf=s3tests_conf,
                                 )),
            lambda: configure(ctx=ctx,
                              config=dict(
                                  clients=config,
                                  s3tests_conf=s3tests_conf,
                              )),
            lambda: run_tests(ctx=ctx, config=config),
            lambda: scan_for_leaked_encryption_keys(ctx=ctx, config=config),
    ):
        pass
    yield
コード例 #55
0
ファイル: admin_socket.py プロジェクト: XinzeChi/teuthology
def _run_tests(ctx, client, tests):
    """
    Create a temp directory and wait for a client socket to be created.
    For each test, copy the executable locally and run the test.
    Remove temp directory when finished.

    :param ctx: Context
    :param client: client machine to run the test
    :param tests: list of tests to run
    """
    testdir = teuthology.get_testdir(ctx)
    log.debug("Running admin socket tests on %s", client)
    (remote,) = ctx.cluster.only(client).remotes.iterkeys()
    socket_path = "/var/run/ceph/ceph-{name}.asok".format(name=client)
    overrides = ctx.config.get("overrides", {}).get("admin_socket", {})

    try:
        tmp_dir = os.path.join(testdir, "admin_socket_{client}".format(client=client))
        remote.run(
            args=[
                "mkdir",
                "--",
                tmp_dir,
                run.Raw("&&"),
                # wait for client process to create the socket
                "while",
                "test",
                "!",
                "-e",
                socket_path,
                run.Raw(";"),
                "do",
                "sleep",
                "1",
                run.Raw(";"),
                "done",
            ]
        )

        for command, config in tests.iteritems():
            if config is None:
                config = {}
            teuthology.deep_merge(config, overrides)
            log.debug("Testing %s with config %s", command, str(config))

            test_path = None
            if "test" in config:
                url = config["test"].format(branch=config.get("branch", "master"))
                test_path = os.path.join(tmp_dir, command)
                remote.run(
                    args=["wget", "-q", "-O", test_path, "--", url, run.Raw("&&"), "chmod", "u=rx", "--", test_path]
                )

            args = config.get("args", [])
            assert isinstance(args, list), "admin socket command args must be a list"
            sock_out = _socket_command(ctx, remote, socket_path, command, args)
            if test_path is not None:
                remote.run(args=[test_path], stdin=json.dumps(sock_out))

    finally:
        remote.run(args=["rm", "-rf", "--", tmp_dir])
コード例 #56
0
ファイル: s3a_hadoop.py プロジェクト: Carudy/ceph
def task(ctx, config):
    """
     Run Hadoop S3A tests using Ceph
     usage:
      -tasks:
         ceph-ansible:
         s3a-hadoop:
           maven-version: '3.3.9' (default)
           hadoop-version: '2.7.3'
           bucket-name: 's3atest' (default)
           access-key: 'anykey' (uses a default value)
           secret-key: 'secretkey' ( uses a default value)
    """
    if config is None:
        config = {}

    assert isinstance(config, dict), \
        "task only supports a dictionary for configuration"

    overrides = ctx.config.get('overrides', {})
    misc.deep_merge(config, overrides.get('s3a-hadoop', {}))
    testdir = misc.get_testdir(ctx)
    rgws = ctx.cluster.only(misc.is_type('rgw'))
    # use the first rgw node to test s3a
    rgw_node = rgws.remotes.keys()[0]
    # get versions
    maven_major = config.get('maven-major', 'maven-3')
    maven_version = config.get('maven-version', '3.3.9')
    hadoop_ver = config.get('hadoop-version', '2.7.3')
    bucket_name = config.get('bucket-name', 's3atest')
    access_key = config.get('access-key', 'EGAQRD2ULOIFKFSKCT4F')
    secret_key = config.get(
        'secret-key',
        'zi816w1vZKfaSM85Cl0BxXTwSLyN7zB4RbTswrGb')

    # set versions for cloning the repo
    apache_maven = 'apache-maven-{maven_version}-bin.tar.gz'.format(
        maven_version=maven_version)
    maven_link = 'http://mirror.jax.hugeserver.com/apache/maven/' + \
        '{maven_major}/{maven_version}/binaries/'.format(maven_major=maven_major, maven_version=maven_version) + apache_maven
    hadoop_git = 'https://github.com/apache/hadoop'
    hadoop_rel = 'hadoop-{ver} rel/release-{ver}'.format(ver=hadoop_ver)
    install_prereq(rgw_node)
    rgw_node.run(
        args=[
            'cd',
            testdir,
            run.Raw('&&'),
            'wget',
            maven_link,
            run.Raw('&&'),
            'tar',
            '-xvf',
            apache_maven,
            run.Raw('&&'),
            'git',
            'clone',
            run.Raw(hadoop_git),
            run.Raw('&&'),
            'cd',
            'hadoop',
            run.Raw('&&'),
            'git',
            'checkout',
            '-b',
            run.Raw(hadoop_rel)
        ]
    )
    dnsmasq_name = 's3.ceph.com'
    configure_s3a(rgw_node, dnsmasq_name, access_key, secret_key, bucket_name, testdir)
    setup_dnsmasq(rgw_node, dnsmasq_name)
    fix_rgw_config(rgw_node, dnsmasq_name)
    setup_user_bucket(rgw_node, dnsmasq_name, access_key, secret_key, bucket_name, testdir)
    if hadoop_ver.startswith('2.8'):
        test_options = '-Dit.test=ITestS3A* -Dparallel-tests -Dscale -Dfs.s3a.scale.test.huge.filesize=128M verify'
    else:
        test_options = 'test -Dtest=S3a*,TestS3A*'
    try:
        run_s3atest(rgw_node, maven_version, testdir, test_options)
        yield
    finally:
        log.info("Done s3a testing, Cleaning up")
        for fil in ['apache*', 'hadoop*', 'venv*', 'create*']:
            rgw_node.run(args=['rm', run.Raw('-rf'), run.Raw('{tdir}/{file}'.format(tdir=testdir, file=fil))])
        # restart and let NM restore original config
        rgw_node.run(args=['sudo', 'systemctl', 'stop', 'dnsmasq'])
        rgw_node.run(args=['sudo', 'systemctl', 'restart', 'network.service'], check_status=False)
        rgw_node.run(args=['sudo', 'systemctl', 'status', 'network.service'], check_status=False)
コード例 #57
0
ファイル: workunit.py プロジェクト: beess/ceph
def task(ctx, config):
    """
    Run ceph on all workunits found under the specified path.

    For example::

        tasks:
        - ceph:
        - ceph-fuse: [client.0]
        - workunit:
            clients:
              client.0: [direct_io, xattrs.sh]
              client.1: [snaps]
            branch: foo

    You can also run a list of workunits on all clients:
        tasks:
        - ceph:
        - ceph-fuse:
        - workunit:
            tag: v0.47
            clients:
              all: [direct_io, xattrs.sh, snaps]

    If you have an "all" section it will run all the workunits
    on each client simultaneously, AFTER running any workunits specified
    for individual clients. (This prevents unintended simultaneous runs.)

    To customize tests, you can specify environment variables as a dict. You
    can also specify a time limit for each work unit (defaults to 3h):

        tasks:
        - ceph:
        - ceph-fuse:
        - workunit:
            sha1: 9b28948635b17165d17c1cf83d4a870bd138ddf6
            clients:
              all: [snaps]
            env:
              FOO: bar
              BAZ: quux
            timeout: 3h

    This task supports roles that include a ceph cluster, e.g.::

        tasks:
        - ceph:
        - workunit:
            clients:
              backup.client.0: [foo]
              client.1: [bar] # cluster is implicitly 'ceph'

    :param ctx: Context
    :param config: Configuration
    """
    assert isinstance(config, dict)
    assert isinstance(config.get('clients'), dict), \
        'configuration must contain a dictionary of clients'

    overrides = ctx.config.get('overrides', {})
    misc.deep_merge(config, overrides.get('workunit', {}))

    refspec = config.get('branch')
    if refspec is None:
        refspec = config.get('tag')
    if refspec is None:
        refspec = config.get('sha1')
    if refspec is None:
        refspec = 'HEAD'

    timeout = config.get('timeout', '3h')

    log.info('Pulling workunits from ref %s', refspec)

    created_mountpoint = {}

    if config.get('env') is not None:
        assert isinstance(config['env'], dict), 'env must be a dictionary'
    clients = config['clients']

    # Create scratch dirs for any non-all workunits
    log.info('Making a separate scratch dir for every client...')
    for role in clients.iterkeys():
        assert isinstance(role, basestring)
        if role == "all":
            continue

        assert 'client' in role
        created_mnt_dir = _make_scratch_dir(ctx, role, config.get('subdir'))
        created_mountpoint[role] = created_mnt_dir

    # Execute any non-all workunits
    with parallel() as p:
        for role, tests in clients.iteritems():
            if role != "all":
                p.spawn(_run_tests, ctx, refspec, role, tests,
                        config.get('env'), timeout=timeout)

    # Clean up dirs from any non-all workunits
    for role, created in created_mountpoint.items():
        _delete_dir(ctx, role, created)

    # Execute any 'all' workunits
    if 'all' in clients:
        all_tasks = clients["all"]
        _spawn_on_all_clients(ctx, refspec, all_tasks, config.get('env'),
                              config.get('subdir'), timeout=timeout)