コード例 #1
0
ファイル: workunit.py プロジェクト: hughsaunders/teuthology
def _spawn_on_all_clients(ctx, refspec, tests, env, subdir):
    """
    Make a scratch directory for each client in the cluster, and then for each
    test spawn _run_tests for each role.    

    :param ctx: Context
    :param refspec: branch, sha1, or version tag used to identify this
                    build 
    :param tests: specific tests specified.
    :param env: evnironment set in yaml file.  Could be None.
    :param subdir: subdirectory set in yaml file.  Could be None
    """
    client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client')
    client_remotes = list()
    for client in client_generator:
        (client_remote,) = ctx.cluster.only('client.{id}'.format(id=client)).remotes.iterkeys()
        client_remotes.append((client_remote, 'client.{id}'.format(id=client)))
        _make_scratch_dir(ctx, "client.{id}".format(id=client), subdir)

    for unit in tests:
        with parallel() as p:
            for remote, role in client_remotes:
                p.spawn(_run_tests, ctx, refspec, role, [unit], env, subdir)

    # cleanup the generated client directories
    client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client')
    for client in client_generator:
        _delete_dir(ctx, 'client.{id}'.format(id=client))
コード例 #2
0
ファイル: workunit.py プロジェクト: yghannam/ceph-qa-suite
def _spawn_on_all_clients(ctx, refspec, tests, env, subdir, timeout=None):
    """
    Make a scratch directory for each client in the cluster, and then for each
    test spawn _run_tests() for each role.

    See run_tests() for parameter documentation.
    """
    client_generator = misc.all_roles_of_type(ctx.cluster, 'client')
    client_remotes = list()

    created_mountpoint = {}
    for client in client_generator:
        (client_remote,) = ctx.cluster.only('client.{id}'.format(id=client)).remotes.iterkeys()
        client_remotes.append((client_remote, 'client.{id}'.format(id=client)))
        created_mountpoint[client] = _make_scratch_dir(ctx, "client.{id}".format(id=client), subdir)

    for unit in tests:
        with parallel() as p:
            for remote, role in client_remotes:
                p.spawn(_run_tests, ctx, refspec, role, [unit], env, subdir,
                        timeout=timeout)

    # cleanup the generated client directories
    client_generator = misc.all_roles_of_type(ctx.cluster, 'client')
    for client in client_generator:
        _delete_dir(ctx, 'client.{id}'.format(id=client), created_mountpoint[client])
コード例 #3
0
ファイル: filesystem.py プロジェクト: yghannam/ceph-qa-suite
    def __init__(self, ctx):
        self._ctx = ctx

        self.mds_ids = list(misc.all_roles_of_type(ctx.cluster, 'mds'))
        if len(self.mds_ids) == 0:
            raise RuntimeError("This task requires at least one MDS")

        first_mon = misc.get_first_mon(ctx, None)
        (self.mon_remote,) = ctx.cluster.only(first_mon).remotes.iterkeys()
        self.mon_manager = ceph_manager.CephManager(self.mon_remote, ctx=ctx, logger=log.getChild('ceph_manager'))
        self.mds_daemons = dict([(mds_id, self._ctx.daemons.get_daemon('mds', mds_id)) for mds_id in self.mds_ids])

        client_list = list(misc.all_roles_of_type(self._ctx.cluster, 'client'))
        self.client_id = client_list[0]
        self.client_remote = list(misc.get_clients(ctx=ctx, roles=["client.{0}".format(self.client_id)]))[0][1]
コード例 #4
0
ファイル: kernel.py プロジェクト: dzafman/teuthology
def normalize_config(ctx, config):
    """
    Returns a config whose keys are all real roles.
    Generic roles (client, mon, osd, etc.) are replaced with
    the actual roles (client.0, client.1, etc.). If the config
    specifies a different version for a specific role, this is
    unchanged.

    For example, with 3 OSDs this::

         osd:
           tag: v3.0
           kdb: true
         osd.1:
           branch: new_btrfs
           kdb: false
         osd.3:
           deb: /path/to/linux-whatever.deb

    is transformed into::

         osd.0:
           tag: v3.0
           kdb: true
         osd.1:
           branch: new_btrfs
           kdb: false
         osd.2:
           tag: v3.0
           kdb: true
         osd.3:
           deb: /path/to/linux-whatever.deb

    If config is None or just specifies a version to use,
    it is applied to all nodes.
    """
    if config is None or \
            len(filter(lambda x: x in ['tag', 'branch', 'sha1', 'kdb',
                                       'deb'],
                       config.keys())) == len(config.keys()):
        new_config = {}
        if config is None:
            config = {'branch': 'master'}
        for _, roles_for_host in ctx.cluster.remotes.iteritems():
            new_config[roles_for_host[0]] = config
        return new_config

    new_config = {}
    for role, role_config in config.iteritems():
        if role_config is None:
            role_config = {'branch': 'master'}
        if '.' in role:
            new_config[role] = role_config
        else:
            for id_ in teuthology.all_roles_of_type(ctx.cluster, role):
                name = '{type}.{id}'.format(type=role, id=id_)
                # specific overrides generic
                if name not in config:
                    new_config[name] = role_config
    return new_config
コード例 #5
0
ファイル: filesystem.py プロジェクト: xiaoxichen/ceph
    def __init__(self, ctx, fscid=None, name=None, create=False,
                 ec_profile=None):
        super(Filesystem, self).__init__(ctx)

        self.name = name
        self.ec_profile = ec_profile
        self.id = None
        self.metadata_pool_name = None
        self.metadata_overlay = False
        self.data_pool_name = None
        self.data_pools = None

        client_list = list(misc.all_roles_of_type(self._ctx.cluster, 'client'))
        self.client_id = client_list[0]
        self.client_remote = list(misc.get_clients(ctx=ctx, roles=["client.{0}".format(self.client_id)]))[0][1]

        if name is not None:
            if fscid is not None:
                raise RuntimeError("cannot specify fscid when creating fs")
            if create and not self.legacy_configured():
                self.create()
        else:
            if fscid is not None:
                self.id = fscid
                self.getinfo(refresh = True)

        # Stash a reference to the first created filesystem on ctx, so
        # that if someone drops to the interactive shell they can easily
        # poke our methods.
        if not hasattr(self._ctx, "filesystem"):
            self._ctx.filesystem = self
コード例 #6
0
ファイル: kclient.py プロジェクト: andrewschoen/ceph-qa-suite
def task(ctx, config):
    """
    Mount/unmount a ``kernel`` client.

    The config is optional and defaults to mounting on all clients. If
    a config is given, it is expected to be a list of clients to do
    this operation on. This lets you e.g. set up one client with
    ``ceph-fuse`` and another with ``kclient``.

    Example that mounts all clients::

        tasks:
        - ceph:
        - kclient:
        - interactive:

    Example that uses both ``kclient` and ``ceph-fuse``::

        tasks:
        - ceph:
        - ceph-fuse: [client.0]
        - kclient: [client.1]
        - interactive:

    :param ctx: Context
    :param config: Configuration
    """
    log.info('Mounting kernel clients...')
    assert config is None or isinstance(config, list), \
        "task kclient got invalid config"

    if config is None:
        config = ['client.{id}'.format(id=id_)
                  for id_ in misc.all_roles_of_type(ctx.cluster, 'client')]
    clients = list(misc.get_clients(ctx=ctx, roles=config))

    test_dir = misc.get_testdir(ctx)

    # Assemble mon addresses
    remotes_and_roles = ctx.cluster.remotes.items()
    roles = [roles for (remote_, roles) in remotes_and_roles]
    ips = [remote_.ssh.get_transport().getpeername()[0]
           for (remote_, _) in remotes_and_roles]
    mons = misc.get_mons(roles, ips).values()

    mounts = {}
    for id_, remote in clients:
        kernel_mount = KernelMount(mons, test_dir, id_, remote)
        mounts[id_] = kernel_mount

        kernel_mount.mount()

    ctx.mounts = mounts
    try:
        yield mounts
    finally:
        log.info('Unmounting kernel clients...')
        for mount in mounts.values():
            mount.umount()
コード例 #7
0
ファイル: filesystem.py プロジェクト: tsg-/ceph-qa-suite
    def create(self):
        pg_warn_min_per_osd = int(self.get_config("mon_pg_warn_min_per_osd"))
        osd_count = len(list(misc.all_roles_of_type(self._ctx.cluster, "osd")))
        pgs_per_fs_pool = pg_warn_min_per_osd * osd_count

        self.admin_remote.run(args=["sudo", "ceph", "osd", "pool", "create", "metadata", pgs_per_fs_pool.__str__()])
        self.admin_remote.run(args=["sudo", "ceph", "osd", "pool", "create", "data", pgs_per_fs_pool.__str__()])
        self.admin_remote.run(args=["sudo", "ceph", "fs", "new", "default", "metadata", "data"])
コード例 #8
0
ファイル: filesystem.py プロジェクト: yghannam/ceph-qa-suite
    def create(self):
        pg_warn_min_per_osd = int(self.get_config('mon_pg_warn_min_per_osd'))
        osd_count = len(list(misc.all_roles_of_type(self._ctx.cluster, 'osd')))
        pgs_per_fs_pool = pg_warn_min_per_osd * osd_count

        self.mon_remote.run(args=['sudo', 'ceph', 'osd', 'pool', 'create', 'metadata', pgs_per_fs_pool.__str__()])
        self.mon_remote.run(args=['sudo', 'ceph', 'osd', 'pool', 'create', 'data', pgs_per_fs_pool.__str__()])
        self.mon_remote.run(args=['sudo', 'ceph', 'fs', 'new', 'default', 'metadata', 'data'])
コード例 #9
0
ファイル: workunit.py プロジェクト: AsherBond/teuthology
def _spawn_on_all_clients(ctx, refspec, tests, env, subdir):
    client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client')
    client_remotes = list()
    for client in client_generator:
        (client_remote,) = ctx.cluster.only('client.{id}'.format(id=client)).remotes.iterkeys()
        client_remotes.append((client_remote, 'client.{id}'.format(id=client)))
        _make_scratch_dir(ctx, "client.{id}".format(id=client), subdir)

    for unit in tests:
        with parallel() as p:
            for remote, role in client_remotes:
                p.spawn(_run_tests, ctx, refspec, role, [unit], env, subdir)

    # cleanup the generated client directories
    client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client')
    for client in client_generator:
        _delete_dir(ctx, 'client.{id}'.format(id=client), subdir)
コード例 #10
0
ファイル: check_counter.py プロジェクト: Abhishekvrshny/ceph
    def end(self):
        overrides = self.ctx.config.get('overrides', {})
        misc.deep_merge(self.config, overrides.get('check-counter', {}))

        cluster_name = self.config.get('cluster_name', None)
        dry_run = self.config.get('dry_run', False)
        targets = self.config.get('counters', {})

        if cluster_name is None:
            cluster_name = self.ctx.managers.keys()[0]

        for daemon_type, counters in targets.items():
            # List of 'a', 'b', 'c'...
            daemon_ids = list(misc.all_roles_of_type(self.ctx.cluster, daemon_type))
            daemons = dict([(daemon_id,
                             self.ctx.daemons.get_daemon(daemon_type, daemon_id))
                            for daemon_id in daemon_ids])

            seen = set()

            for daemon_id, daemon in daemons.items():
                if not daemon.running():
                    log.info("Ignoring daemon {0}, it isn't running".format(daemon_id))
                    continue
                else:
                    log.debug("Getting stats from {0}".format(daemon_id))

                manager = self.ctx.managers[cluster_name]
                proc = manager.admin_socket(daemon_type, daemon_id, ["perf", "dump"])
                response_data = proc.stdout.getvalue().strip()
                if response_data:
                    perf_dump = json.loads(response_data)
                else:
                    log.warning("No admin socket response from {0}, skipping".format(daemon_id))
                    continue

                for counter in counters:
                    subsys, counter_id = counter.split(".")
                    if subsys not in perf_dump or counter_id not in perf_dump[subsys]:
                        log.warning("Counter '{0}' not found on daemon {1}.{2}".format(
                            counter, daemon_type, daemon_id))
                        continue
                    value = perf_dump[subsys][counter_id]

                    log.info("Daemon {0}.{1} {2}={3}".format(
                        daemon_type, daemon_id, counter, value
                    ))

                    if value > 0:
                        seen.add(counter)

            if not dry_run:
                unseen = set(counters) - set(seen)
                if unseen:
                    raise RuntimeError("The following counters failed to be set "
                                       "on {0} daemons: {1}".format(
                        daemon_type, unseen
                    ))
コード例 #11
0
ファイル: filesystem.py プロジェクト: xiaoxichen/ceph
    def get_config(self, key, service_type=None):
        """
        Get config from mon by default, or a specific service if caller asks for it
        """
        if service_type is None:
            service_type = 'mon'

        service_id = sorted(misc.all_roles_of_type(self._ctx.cluster, service_type))[0]
        return self.json_asok(['config', 'get', key], service_type, service_id)[key]
コード例 #12
0
ファイル: rgw.py プロジェクト: calebamiles/teuthology
def task(ctx, config):
    """
    Spin up apache configured to run a rados gateway.
    Only one should be run per machine, since it uses a hard-coded port for now.

    For example, to run rgw on all clients::

        tasks:
        - ceph:
        - rgw:

    To only run on certain clients::

        tasks:
        - ceph:
        - rgw: [client.0, client.3]

    or

        tasks:
        - ceph:
        - rgw:
            client.0:
            client.3:

    To run radosgw through valgrind:

        tasks:
        - ceph:
        - rgw:
            client.0:
              valgrind: [--tool=memcheck]
            client.3:
              valgrind: [--tool=memcheck]

    """
    if config is None:
        config = dict(('client.{id}'.format(id=id_), None)
                  for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
    elif isinstance(config, list):
        config = dict((name, None) for name in config)

    for _, roles_for_host in ctx.cluster.remotes.iteritems():
        running_rgw = False
        for role in roles_for_host:
            if role in config.iterkeys():
                assert not running_rgw, "Only one client per host can run rgw."
                running_rgw = True

    with contextutil.nested(
        lambda: create_dirs(ctx=ctx, config=config),
        lambda: ship_config(ctx=ctx, config=config),
        lambda: start_rgw(ctx=ctx, config=config),
        lambda: start_apache(ctx=ctx, config=config),
        ):
        yield
コード例 #13
0
ファイル: kcon_most.py プロジェクト: kri5/teuthology
def task(ctx, config):
    """
    Enable most ceph console logging

    Example that enables logging on all clients::

        tasks:
        - ceph:
        - kclient:
        - kcon_most
        - interactive:

    Example that enables logging only on the client using kclient::

        tasks:
        - ceph:
        - kclient: [client.0]
        - kcon_most [client.0]
        - interactive:
    """
    log.info('Enable additional kernel logging...')
    assert config is None or isinstance(config, list), \
        "task kcon_most got invalid config"

    if config is None:
        config = ['client.{id}'.format(id=id_)
                  for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
    clients = list(teuthology.get_clients(ctx=ctx, roles=config))

    testdir = teuthology.get_testdir(ctx)

    for id_, remote in clients:
        # TODO: Don't have to run this more than once per node (remote)
        log.info('Enable logging on client.{id} at {remote} ...'.format(
                id=id_, remote=remote))
        remote.run(
            args=[
                'sudo',
                'kcon_most',
                'on'
                ],
            )

    try:
        yield
    finally:
        log.info('Disable extra kernel logging on clients...')
        for id_, remote in clients:
            log.debug('Disable extra kernel logging on client.{id}...'.format(id=id_))
            remote.run(
                args=[
                    'sudo',
                    'kcon_most',
                    'off'
                    ],
                )
コード例 #14
0
ファイル: localdir.py プロジェクト: AlfredChenxf/teuthology
def task(ctx, config):
    """
    Create a mount dir 'client' that is just the local disk:

    Example that "mounts" all clients:

        tasks:
        - localdir:
        - interactive:

    Example for a specific client:

        tasks:
        - localdir: [client.2]
        - interactive:

    :param ctx: Context
    :param config: Configuration
    """
    log.info('Creating local mnt dirs...')

    testdir = teuthology.get_testdir(ctx)

    if config is None:
        config = list('client.{id}'.format(id=id_)
                      for id_ in teuthology.all_roles_of_type(ctx.cluster,
                                                              'client'))

    clients = list(teuthology.get_clients(ctx=ctx, roles=config))
    for id_, remote in clients:
        mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
        log.info('Creating dir {remote} {mnt}...'.format(
                remote=remote, mnt=mnt))
        remote.run(
            args=[
                'mkdir',
                '--',
                mnt,
                ],
            )

    try:
        yield

    finally:
        log.info('Removing local mnt dirs...')
        for id_, remote in clients:
            mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
            remote.run(
                args=[
                    'rm',
                    '-rf',
                    '--',
                    mnt,
                    ],
                )
コード例 #15
0
def task(ctx, config):
    """
    Go through filesystem creation with a synthetic failure in an MDS
    in its 'up:creating' state, to exercise the retry behaviour.
    """
    # Grab handles to the teuthology objects of interest
    mdslist = list(misc.all_roles_of_type(ctx.cluster, 'mds'))
    if len(mdslist) != 1:
        # Require exactly one MDS, the code path for creation failure when
        # a standby is available is different
        raise RuntimeError("This task requires exactly one MDS")

    mds_id = mdslist[0]
    (mds_remote,) = ctx.cluster.only('mds.{_id}'.format(_id=mds_id)).remotes.iterkeys()
    manager = ceph_manager.CephManager(
        mds_remote, ctx=ctx, logger=log.getChild('ceph_manager'),
    )

    # Stop MDS
    self.fs.set_max_mds(0)
    self.fs.mds_stop(mds_id)
    self.fs.mds_fail(mds_id)

    # Reset the filesystem so that next start will go into CREATING
    manager.raw_cluster_cmd('fs', 'rm', "default", "--yes-i-really-mean-it")
    manager.raw_cluster_cmd('fs', 'new', "default", "metadata", "data")

    # Start the MDS with mds_kill_create_at set, it will crash during creation
    mds.restart_with_args(["--mds_kill_create_at=1"])
    try:
        mds.wait_for_exit()
    except CommandFailedError as e:
        if e.exitstatus == 1:
            log.info("MDS creation killed as expected")
        else:
            log.error("Unexpected status code %s" % e.exitstatus)
            raise

    # Since I have intentionally caused a crash, I will clean up the resulting core
    # file to avoid task.internal.coredump seeing it as a failure.
    log.info("Removing core file from synthetic MDS failure")
    mds_remote.run(args=['rm', '-f', Raw("{archive}/coredump/*.core".format(archive=misc.get_archive_dir(ctx)))])

    # It should have left the MDS map state still in CREATING
    status = self.fs.status().get_mds(mds_id)
    assert status['state'] == 'up:creating'

    # Start the MDS again without the kill flag set, it should proceed with creation successfully
    mds.restart()

    # Wait for state ACTIVE
    self.fs.wait_for_state("up:active", timeout=120, mds_id=mds_id)

    # The system should be back up in a happy healthy state, go ahead and run any further tasks
    # inside this context.
    yield
コード例 #16
0
ファイル: filesystem.py プロジェクト: xiaoxichen/ceph
    def get_pgs_per_fs_pool(self):
        """
        Calculate how many PGs to use when creating a pool, in order to avoid raising any
        health warnings about mon_pg_warn_min_per_osd

        :return: an integer number of PGs
        """
        pg_warn_min_per_osd = int(self.get_config('mon_pg_warn_min_per_osd'))
        osd_count = len(list(misc.all_roles_of_type(self._ctx.cluster, 'osd')))
        return pg_warn_min_per_osd * osd_count
コード例 #17
0
ファイル: tgt.py プロジェクト: Abhishekvrshny/ceph-qa-suite
def task(ctx, config):
    """
    Start up tgt.

    To start on on all clients::

        tasks:
        - ceph:
        - tgt:

    To start on certain clients::

        tasks:
        - ceph:
        - tgt: [client.0, client.3]

    or

        tasks:
        - ceph:
        - tgt:
            client.0:
            client.3:

    An image blocksize size can also be specified::
        
        tasks:
        - ceph:
        - tgt:
            image_size = 20480

    The general flow of things here is:
        1. Find clients on which tgt is supposed to run (start_tgtd)
        2. Remotely start up tgt daemon
    On cleanup:
        3. Stop tgt daemon

    The iscsi administration is handled by the iscsi task.
    """
    if config:
        config = {key : val for key, val in config.items()
                if key.startswith('client')}
    # config at this point should only contain keys starting with 'client'
    start_tgtd = []
    remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
    log.info(remotes)
    if not config:
        start_tgtd = ['client.{id}'.format(id=id_)
            for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
    else:
        start_tgtd = config
    log.info(start_tgtd)
    with contextutil.nested(
            lambda: start_tgt_remotes(ctx=ctx, start_tgtd=start_tgtd),):
        yield
コード例 #18
0
ファイル: rados.py プロジェクト: bkmcfarland/ceph
    def thread():
        """Thread spawned by gevent"""
        clients = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
        log.info('clients are %s' % clients)
        manager = ctx.managers['ceph']
        if config.get('ec_pool', False):
            profile = config.get('erasure_code_profile', {})
            profile_name = profile.get('name', 'teuthologyprofile')
            manager.create_erasure_code_profile(profile_name, profile)
        else:
            profile_name = None
        for i in range(int(config.get('runs', '1'))):
            log.info("starting run %s out of %s", str(i), config.get('runs', '1'))
            tests = {}
            existing_pools = config.get('pools', [])
            created_pools = []
            for role in config.get('clients', clients):
                assert isinstance(role, basestring)
                PREFIX = 'client.'
                assert role.startswith(PREFIX)
                id_ = role[len(PREFIX):]

                pool = config.get('pool', None)
                if not pool and existing_pools:
                    pool = existing_pools.pop()
                else:
                    pool = manager.create_pool_with_unique_name(
                        erasure_code_profile_name=profile_name,
                        erasure_code_use_overwrites=
                          config.get('erasure_code_use_overwrites', False)
                    )
                    created_pools.append(pool)
                    if config.get('fast_read', False):
                        manager.raw_cluster_cmd(
                            'osd', 'pool', 'set', pool, 'fast_read', 'true')
                    min_size = config.get('min_size', None);
                    if min_size is not None:
                        manager.raw_cluster_cmd(
                            'osd', 'pool', 'set', pool, 'min_size', str(min_size))

                (remote,) = ctx.cluster.only(role).remotes.iterkeys()
                proc = remote.run(
                    args=["CEPH_CLIENT_ID={id_}".format(id_=id_)] + args +
                    ["--pool", pool],
                    logger=log.getChild("rados.{id}".format(id=id_)),
                    stdin=run.PIPE,
                    wait=False
                    )
                tests[id_] = proc
            run.wait(tests.itervalues())

            for pool in created_pools:
                manager.wait_snap_trimming_complete(pool);
                manager.remove_pool(pool)
コード例 #19
0
ファイル: filesystem.py プロジェクト: dreamhost/ceph-qa-suite
    def __init__(self, ctx, admin_remote=None):
        self._ctx = ctx

        self.mds_ids = list(misc.all_roles_of_type(ctx.cluster, 'mds'))
        if len(self.mds_ids) == 0:
            raise RuntimeError("This task requires at least one MDS")

        first_mon = misc.get_first_mon(ctx, None)
        if admin_remote is None:
            (self.admin_remote,) = ctx.cluster.only(first_mon).remotes.iterkeys()
        else:
            self.admin_remote = admin_remote
        self.mon_manager = ceph_manager.CephManager(self.admin_remote, ctx=ctx, logger=log.getChild('ceph_manager'))
        if hasattr(self._ctx, "daemons"):
            # Presence of 'daemons' attribute implies ceph task rather than ceph_deploy task
            self.mds_daemons = dict([(mds_id, self._ctx.daemons.get_daemon('mds', mds_id)) for mds_id in self.mds_ids])

        client_list = list(misc.all_roles_of_type(self._ctx.cluster, 'client'))
        self.client_id = client_list[0]
        self.client_remote = list(misc.get_clients(ctx=ctx, roles=["client.{0}".format(self.client_id)]))[0][1]
コード例 #20
0
    def __init__(self, ctx):
        super(MgrCluster, self).__init__(ctx)
        self.mgr_ids = list(misc.all_roles_of_type(ctx.cluster, 'mgr'))

        if len(self.mgr_ids) == 0:
            raise RuntimeError(
                "This task requires at least one manager daemon")

        self.mgr_daemons = dict(
            [(mgr_id, self._ctx.daemons.get_daemon('mgr', mgr_id)) for mgr_id
             in self.mgr_ids])
コード例 #21
0
ファイル: filesystem.py プロジェクト: xiaoxichen/ceph
    def __init__(self, ctx):
        super(MDSCluster, self).__init__(ctx)

        self.mds_ids = list(misc.all_roles_of_type(ctx.cluster, 'mds'))

        if len(self.mds_ids) == 0:
            raise RuntimeError("This task requires at least one MDS")

        if hasattr(self._ctx, "daemons"):
            # Presence of 'daemons' attribute implies ceph task rather than ceph_deploy task
            self.mds_daemons = dict([(mds_id, self._ctx.daemons.get_daemon('mds', mds_id)) for mds_id in self.mds_ids])
コード例 #22
0
ファイル: filesystem.py プロジェクト: dvanders/ceph-qa-suite
    def __init__(self, ctx):
        self.mds_ids = list(misc.all_roles_of_type(ctx.cluster, 'mds'))
        self._ctx = ctx

        if len(self.mds_ids) == 0:
            raise RuntimeError("This task requires at least one MDS")

        self.mon_manager = ceph_manager.CephManager(self.admin_remote, ctx=ctx, logger=log.getChild('ceph_manager'))
        if hasattr(self._ctx, "daemons"):
            # Presence of 'daemons' attribute implies ceph task rather than ceph_deploy task
            self.mds_daemons = dict([(mds_id, self._ctx.daemons.get_daemon('mds', mds_id)) for mds_id in self.mds_ids])
コード例 #23
0
ファイル: rgw-logsocket.py プロジェクト: athanatos/teuthology
def task(ctx, config):
    """
    Run some s3-tests suite against rgw, verify opslog socket returns data

    Must restrict testing to a particular client::

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3tests: [client.0]

    To pass extra arguments to nose (e.g. to run a certain test)::

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3tests:
            client.0:
              extra_args: ['test_s3:test_object_acl_grand_public_read']
            client.1:
              extra_args: ['--exclude', 'test_100_continue']
    """
    assert (
        config is None or isinstance(config, list) or isinstance(config, dict)
    ), "task s3tests only supports a list or dictionary for configuration"
    all_clients = ["client.{id}".format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, "client")]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    overrides = ctx.config.get("overrides", {})
    # merge each client section, not the top level.
    for (client, cconf) in config.iteritems():
        teuthology.deep_merge(cconf, overrides.get("rgw-logsocket", {}))

    log.debug("config is %s", config)

    s3tests_conf = {}
    for client in clients:
        s3tests_conf[client] = ConfigObj(
            indent_type="",
            infile={"DEFAULT": {"port": 7280, "is_secure": "no"}, "fixtures": {}, "s3 main": {}, "s3 alt": {}},
        )

    with contextutil.nested(
        lambda: download(ctx=ctx, config=config),
        lambda: create_users(ctx=ctx, config=dict(clients=clients, s3tests_conf=s3tests_conf)),
        lambda: configure(ctx=ctx, config=dict(clients=config, s3tests_conf=s3tests_conf)),
        lambda: run_tests(ctx=ctx, config=config),
    ):
        yield
コード例 #24
0
ファイル: blktrace.py プロジェクト: AsherBond/teuthology
def task(ctx, config):
    if config is None:
        config = dict(('client.{id}'.format(id=id_), None)
                  for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
    elif isinstance(config, list):
        config = dict.fromkeys(config)

    with contextutil.nested(
        lambda: setup(ctx=ctx, config=config),
        lambda: execute(ctx=ctx, config=config),
        ):
        yield
コード例 #25
0
ファイル: workunit.py プロジェクト: tv42/teuthology
def _spawn_on_all_clients(ctx, tests):
    client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client')
    client_remotes = list()
    for client in client_generator:
        (client_remote,) = ctx.cluster.only('client.{id}'.format(id=client)).remotes.iterkeys()
        client_remotes.append((client_remote, 'client.{id}'.format(id=client)))
        _make_scratch_dir(ctx, "client.{id}".format(id=client))
        
    for unit in tests:
        with parallel() as p:
            for remote, role in client_remotes:
                p.spawn(_run_tests, ctx, role, [unit])
コード例 #26
0
ファイル: filesystem.py プロジェクト: dvanders/ceph-qa-suite
    def __init__(self, ctx, name=None):
        super(Filesystem, self).__init__(ctx)

        if name is None:
            name = "cephfs"

        self.name = name
        self.metadata_pool_name = "{0}_metadata".format(name)
        self.data_pool_name = "{0}_data".format(name)

        client_list = list(misc.all_roles_of_type(self._ctx.cluster, 'client'))
        self.client_id = client_list[0]
        self.client_remote = list(misc.get_clients(ctx=ctx, roles=["client.{0}".format(self.client_id)]))[0][1]
コード例 #27
0
    def __init__(self, ctx, config):
        self._ctx = ctx
        self._config = config

        mds_list = list(misc.all_roles_of_type(ctx.cluster, 'mds'))
        if len(mds_list) != 1:
            # Require exactly one MDS, the code path for creation failure when
            # a standby is available is different
            raise RuntimeError("This task requires exactly one MDS")

        self.mds_id = mds_list[0]

        (mds_remote,) = ctx.cluster.only('mds.{_id}'.format(_id=self.mds_id)).remotes.iterkeys()
        manager = ceph_manager.CephManager(
            mds_remote, ctx=ctx, logger=log.getChild('ceph_manager'),
        )
        self.mds_manager = manager

        client_list = list(misc.all_roles_of_type(self._ctx.cluster, 'client'))
        self.client_id = client_list[0]
        self.client_remote = list(misc.get_clients(ctx=ctx, roles=["client.{0}".format(self.client_id)]))[0][1]

        self.test_files = ['a', 'b', 'c']
コード例 #28
0
def task(ctx, config):
    """This is the main body of the task that gets run."""

    """Take car of some yaml parsing here"""
    if config is not None and not isinstance(config, list) and not isinstance(config, dict):
        assert False, "task parallel_example only supports a list or dictionary for configuration"
    if config is None:
        config = ["client.{id}".format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, "client")]
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    """Run Multiple contextmanagers sequentially by nesting them."""
    with contextutil.nested(
        lambda: parallel_test(ctx=ctx, config=clients), lambda: sequential_test(ctx=ctx, config=clients)
    ):
        yield
コード例 #29
0
ファイル: pexec.py プロジェクト: AsherBond/teuthology
def _generate_remotes(ctx, config):
    if 'all' in config and len(config) == 1:
        ls = config['all']
        for remote in ctx.cluster.remotes.iterkeys():
            yield (remote, ls)
    elif 'clients' in config:
        ls = config['clients']
        for role in teuthology.all_roles_of_type(ctx.cluster, 'client'):
            (remote,) = ctx.cluster.only('client.{r}'.format(r=role)).remotes.iterkeys()
            yield (remote, ls)
        del config['clients']
        for role, ls in config.iteritems():
            (remote,) = ctx.cluster.only(role).remotes.iterkeys()
            yield (remote, ls)
    else:
        for role, ls in config.iteritems():
            (remote,) = ctx.cluster.only(role).remotes.iterkeys()
            yield (remote, ls)
コード例 #30
0
ファイル: tgt.py プロジェクト: hughsaunders/teuthology
def task(ctx, config):
    """
    Start up tgt.

    To start on on all clients::

        tasks:
        - ceph:
        - tgt:

    To start on certain clients::

        tasks:
        - ceph:
        - tgt: [client.0, client.3]

    or

        tasks:
        - ceph:
        - tgt:
            client.0:
            client.3:

    The general flow of things here is:
        1. Find clients on which tgt is supposed to run (start_tgtd)
        2. Remotely start up tgt daemon
    On cleanup:
        3. Stop tgt daemon

    The iscsi administration is handled by the iscsi task.
    """
    start_tgtd = []
    remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
    log.info(remotes)
    if config == None:
        start_tgtd = ['client.{id}'.format(id=id_)
            for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
    else:
        start_tgtd = config
    log.info(start_tgtd)
    with contextutil.nested(
            lambda: start_tgt_remotes(ctx=ctx, start_tgtd=start_tgtd),):
        yield
コード例 #31
0
ファイル: tempest.py プロジェクト: shili1992/ceph_read
def task(ctx, config):
    """
    Deploy and run Tempest's object storage campaign

    Example of configuration:

      overrides:
        ceph:
          conf:
            client:
              rgw keystone admin token: ADMIN
              rgw keystone accepted roles: admin,Member
              rgw keystone implicit tenants: true
              rgw keystone accepted admin roles: admin
              rgw swift enforce content length: true
              rgw swift account in url: true
              rgw swift versioning enabled: true
      tasks:
      # typically, the task should be preceded with install, ceph, tox,
      # keystone and rgw. Tox and Keystone are specific requirements
      # of tempest.py.
      - rgw:
          # it's important to match the prefix with the endpoint's URL
          # in Keystone. Additionally, if we want to test /info and its
          # accompanying stuff, the whole Swift API must be put in root
          # of the whole URL  hierarchy (read: frontend_prefix == /swift).
          frontend_prefix: /swift
          client.0:
            use-keystone-role: client.0
      - tempest:
          client.0:
            force-branch: master
            use-keystone-role: client.0
            auth:
              admin_username: admin
              admin_project_name: admin
              admin_password: ADMIN
              admin_domain_name: Default
            identity:
              uri: http://{keystone_public_host}:{keystone_public_port}/v2.0/
              uri_v3: http://{keystone_public_host}:{keystone_public_port}/v3/
              admin_role: admin
            object-storage:
              reseller_admin_role: admin
            object-storage-feature-enabled:
              container_sync: false
              discoverability: false
            blacklist:
              # please strip half of these items after merging PRs #15369
              # and #12704
              - .*test_list_containers_reverse_order.*
              - .*test_list_container_contents_with_end_marker.*
              - .*test_delete_non_empty_container.*
              - .*test_container_synchronization.*
              - .*test_get_object_after_expiration_time.*
              - .*test_create_object_with_transfer_encoding.*
    """
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        'task tempest only supports a list or dictionary for configuration'

    if not ctx.tox:
        raise ConfigError('tempest must run after the tox task')
    if not ctx.keystone:
        raise ConfigError('tempest must run after the keystone task')

    all_clients = [
        'client.{id}'.format(id=id_)
        for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')
    ]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)

    overrides = ctx.config.get('overrides', {})
    # merge each client section, not the top level.
    for client in config.keys():
        if not config[client]:
            config[client] = {}
        teuthology.deep_merge(config[client], overrides.get('keystone', {}))

    log.debug('Tempest config is %s', config)

    with contextutil.nested(
            lambda: download(ctx=ctx, config=config),
            lambda: setup_venv(ctx=ctx, config=config),
            lambda: configure_instance(ctx=ctx, config=config),
            lambda: run_tempest(ctx=ctx, config=config),
    ):
        yield
コード例 #32
0
def task(ctx, config):
    """
    Create the specified number of pools and write 16 objects to them (thereby forcing
    the PG creation on each OSD). This task creates pools from all the clients,
    in parallel. It is easy to add other daemon types which have the appropriate
    permissions, but I don't think anything else does.
    The config is just the number of pools to create. I recommend setting
    "mon create pg interval" to a very low value in your ceph config to speed
    this up.
    
    You probably want to do this to look at memory consumption, and
    maybe to test how performance changes with the number of PGs. For example:
    
    tasks:
    - ceph:
        config:
          mon:
            mon create pg interval: 1
    - manypools: 3000
    - radosbench:
        clients: [client.0]
        time: 360
    """
    
    log.info('creating {n} pools'.format(n=config))
    
    poolnum = int(config)
    creator_remotes = []
    client_roles = teuthology.all_roles_of_type(ctx.cluster, 'client')
    log.info('got client_roles={client_roles_}'.format(client_roles_=client_roles))
    for role in client_roles:
        log.info('role={role_}'.format(role_=role))
        (creator_remote, ) = ctx.cluster.only('client.{id}'.format(id=role)).remotes.iterkeys()
        creator_remotes.append((creator_remote, 'client.{id}'.format(id=role)))

    remaining_pools = poolnum
    poolprocs=dict()
    while (remaining_pools > 0):
        log.info('{n} pools remaining to create'.format(n=remaining_pools))
	for remote, role_ in creator_remotes:
            poolnum = remaining_pools
            remaining_pools -= 1
            if remaining_pools < 0:
                continue
            log.info('creating pool{num} on {role}'.format(num=poolnum, role=role_))
	    proc = remote.run(
	        args=[
		    'ceph',
		    '--name', role_,
		    'osd', 'pool', 'create', 'pool{num}'.format(num=poolnum), '8',
		    run.Raw('&&'),
		    'rados',
		    '--name', role_,
		    '--pool', 'pool{num}'.format(num=poolnum),
		    'bench', '0', 'write', '-t', '16', '--block-size', '1'
		    ],
		wait = False
	    )
            log.info('waiting for pool and object creates')
	    poolprocs[remote] = proc
        
        run.wait(poolprocs.itervalues())
    
    log.info('created all {n} pools and wrote 16 objects to each'.format(n=poolnum))
コード例 #33
0
ファイル: mpi.py プロジェクト: hualongfeng/teuthology
def task(ctx, config):
    """
    Setup MPI and execute commands

    Example that starts an MPI process on specific clients::

        tasks:
        - ceph:
        - ceph-fuse: [client.0, client.1]
        - ssh_keys:
        - mpi: 
            nodes: [client.0, client.1]
            exec: ior ...

    Example that starts MPI processes on all clients::

        tasks:
        - ceph:
        - ceph-fuse:
        - ssh_keys:
        - mpi:
            exec: ior ...

    Example that starts MPI processes on all roles::

        tasks:
        - ceph:
        - ssh_keys:
        - mpi:
            nodes: all
            exec: ...

    Example that specifies a working directory for MPI processes:

        tasks:
        - ceph:
        - ceph-fuse:
        - pexec:
            clients:
              - ln -s {testdir}/mnt.* {testdir}/gmnt
        - ssh_keys:
        - mpi:
            exec: fsx-mpi
            workdir: {testdir}/gmnt
        - pexec:
            clients:
              - rm -f {testdir}/gmnt

    :param ctx: Context
    :param config: Configuration
    """
    assert isinstance(config, dict), 'task mpi got invalid config'
    assert 'exec' in config, 'task mpi got invalid config, missing exec'

    testdir = teuthology.get_testdir(ctx)

    mpiexec = config['exec'].replace('$TESTDIR', testdir)
    hosts = []
    remotes = []
    master_remote = None
    if 'nodes' in config:
        if isinstance(config['nodes'], basestring) and config['nodes'] == 'all':
            for role in  teuthology.all_roles(ctx.cluster):
                (remote,) = ctx.cluster.only(role).remotes.keys()
                ip,port = remote.ssh.get_transport().getpeername()
                hosts.append(ip)
                remotes.append(remote)
            (master_remote,) = ctx.cluster.only(config['nodes'][0]).remotes.keys()
        elif isinstance(config['nodes'], list):
            for role in config['nodes']:
                (remote,) = ctx.cluster.only(role).remotes.keys()
                ip,port = remote.ssh.get_transport().getpeername()
                hosts.append(ip)
                remotes.append(remote)
            (master_remote,) = ctx.cluster.only(config['nodes'][0]).remotes.keys()
    else:
        roles = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
        (master_remote,) = ctx.cluster.only(roles[0]).remotes.keys()
        for role in roles:
            (remote,) = ctx.cluster.only(role).remotes.keys()
            ip,port = remote.ssh.get_transport().getpeername()
            hosts.append(ip)
            remotes.append(remote)

    # mpich is sensitive to different versions on different nodes
    _check_mpi_version(remotes)

    workdir = []
    if 'workdir' in config:
        workdir = ['-wdir', config['workdir'].replace('$TESTDIR', testdir) ]

    log.info('mpi rank 0 is: {name}'.format(name=master_remote.name))

    # write out the mpi hosts file
    log.info('mpi nodes: [%s]' % (', '.join(hosts)))
    teuthology.write_file(remote=master_remote,
                          path='{tdir}/mpi-hosts'.format(tdir=testdir),
                          data='\n'.join(hosts))
    log.info('mpiexec on {name}: {cmd}'.format(name=master_remote.name, cmd=mpiexec))
    args=['mpiexec', '-f', '{tdir}/mpi-hosts'.format(tdir=testdir)]
    args.extend(workdir)
    args.extend(mpiexec.split(' '))
    master_remote.run(args=args, )
    log.info('mpi task completed')
    master_remote.run(args=['rm', '{tdir}/mpi-hosts'.format(tdir=testdir)])
コード例 #34
0
def task(ctx, config):
    """
    Go through filesystem creation with a synthetic failure in an MDS
    in its 'up:creating' state, to exercise the retry behaviour.
    """
    # Grab handles to the teuthology objects of interest
    mdslist = list(misc.all_roles_of_type(ctx.cluster, 'mds'))
    if len(mdslist) != 1:
        # Require exactly one MDS, the code path for creation failure when
        # a standby is available is different
        raise RuntimeError("This task requires exactly one MDS")

    mds_id = mdslist[0]
    (mds_remote, ) = ctx.cluster.only(
        'mds.{_id}'.format(_id=mds_id)).remotes.iterkeys()
    manager = ceph_manager.CephManager(
        mds_remote,
        ctx=ctx,
        logger=log.getChild('ceph_manager'),
    )

    # Stop MDS
    self.fs.set_max_mds(0)
    self.fs.mds_stop(mds_id)
    self.fs.mds_fail(mds_id)

    # Reset the filesystem so that next start will go into CREATING
    manager.raw_cluster_cmd('fs', 'rm', "default", "--yes-i-really-mean-it")
    manager.raw_cluster_cmd('fs', 'new', "default", "metadata", "data")

    # Start the MDS with mds_kill_create_at set, it will crash during creation
    mds.restart_with_args(["--mds_kill_create_at=1"])
    try:
        mds.wait_for_exit()
    except CommandFailedError as e:
        if e.exitstatus == 1:
            log.info("MDS creation killed as expected")
        else:
            log.error("Unexpected status code %s" % e.exitstatus)
            raise

    # Since I have intentionally caused a crash, I will clean up the resulting core
    # file to avoid task.internal.coredump seeing it as a failure.
    log.info("Removing core file from synthetic MDS failure")
    mds_remote.run(args=[
        'rm', '-f',
        Raw("{archive}/coredump/*.core".format(
            archive=misc.get_archive_dir(ctx)))
    ])

    # It should have left the MDS map state still in CREATING
    status = self.fs.status().get_mds(mds_id)
    assert status['state'] == 'up:creating'

    # Start the MDS again without the kill flag set, it should proceed with creation successfully
    mds.restart()

    # Wait for state ACTIVE
    self.fs.wait_for_state("up:active", timeout=120, mds_id=mds_id)

    # The system should be back up in a happy healthy state, go ahead and run any further tasks
    # inside this context.
    yield
コード例 #35
0
ファイル: keystone.py プロジェクト: LargerPanda/ceph-1
def task(ctx, config):
    """
    Deploy and configure Keystone

    Example of configuration:

      - install:
      - ceph:
      - tox: [ client.0 ]
      - keystone:
          client.0:
            force-branch: master
            domains:
              - name: default
                description: Default Domain
            projects:
              - name: admin
                description:  Admin Tenant
            users:
              - name: admin
                password: ADMIN
                project: admin
            roles: [ name: admin, name: Member ]
            role-mappings:
              - name: admin
                user: admin
                project: admin
            services:
              - name: keystone
                type: identity
                description: Keystone Identity Service
              - name: swift
                type: object-store
                description: Swift Service
    """
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task keystone only supports a list or dictionary for configuration"

    if not hasattr(ctx, 'tox'):
        raise ConfigError('keystone must run after the tox task')

    all_clients = ['client.{id}'.format(id=id_)
                   for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)

    log.debug('Keystone config is %s', config)

    ctx.keystone = argparse.Namespace()
    ctx.keystone.public_endpoints = assign_ports(ctx, config, 5000)
    ctx.keystone.admin_endpoints = assign_ports(ctx, config, 35357)

    with contextutil.nested(
        lambda: download(ctx=ctx, config=config),
        lambda: install_packages(ctx=ctx, config=config),
        lambda: setup_venv(ctx=ctx, config=config),
        lambda: configure_instance(ctx=ctx, config=config),
        lambda: run_keystone(ctx=ctx, config=config),
        lambda: fill_keystone(ctx=ctx, config=config),
        ):
        yield
コード例 #36
0
ファイル: dump_stuck.py プロジェクト: zwj262310/ceph
def task(ctx, config):
    """
    Test the dump_stuck command.

    :param ctx: Context
    :param config: Configuration
    """
    assert config is None, \
        'dump_stuck requires no configuration'
    assert teuthology.num_instances_of_type(ctx.cluster, 'osd') == 2, \
        'dump_stuck requires exactly 2 osds'

    timeout = 60
    first_mon = teuthology.get_first_mon(ctx, config)
    (mon, ) = ctx.cluster.only(first_mon).remotes.keys()

    manager = ceph_manager.CephManager(
        mon,
        ctx=ctx,
        logger=log.getChild('ceph_manager'),
    )

    manager.flush_pg_stats([0, 1])
    manager.wait_for_clean(timeout)

    manager.raw_cluster_cmd(
        'tell',
        'mon.a',
        'injectargs',
        '--',
        #                            '--mon-osd-report-timeout 90',
        '--mon-pg-stuck-threshold 10')

    # all active+clean
    check_stuck(
        manager,
        num_inactive=0,
        num_unclean=0,
        num_stale=0,
    )
    num_pgs = manager.get_num_pgs()

    manager.mark_out_osd(0)
    time.sleep(timeout)
    manager.flush_pg_stats([1])
    manager.wait_for_recovery(timeout)

    # all active+clean+remapped
    check_stuck(
        manager,
        num_inactive=0,
        num_unclean=0,
        num_stale=0,
    )

    manager.mark_in_osd(0)
    manager.flush_pg_stats([0, 1])
    manager.wait_for_clean(timeout)

    # all active+clean
    check_stuck(
        manager,
        num_inactive=0,
        num_unclean=0,
        num_stale=0,
    )

    log.info('stopping first osd')
    manager.kill_osd(0)
    manager.mark_down_osd(0)
    manager.wait_for_active(timeout)

    log.info('waiting for all to be unclean')
    starttime = time.time()
    done = False
    while not done:
        try:
            check_stuck(
                manager,
                num_inactive=0,
                num_unclean=num_pgs,
                num_stale=0,
            )
            done = True
        except AssertionError:
            # wait up to 15 minutes to become stale
            if time.time() - starttime > 900:
                raise

    log.info('stopping second osd')
    manager.kill_osd(1)
    manager.mark_down_osd(1)

    log.info('waiting for all to be stale')
    starttime = time.time()
    done = False
    while not done:
        try:
            check_stuck(
                manager,
                num_inactive=0,
                num_unclean=num_pgs,
                num_stale=num_pgs,
            )
            done = True
        except AssertionError:
            # wait up to 15 minutes to become stale
            if time.time() - starttime > 900:
                raise

    log.info('reviving')
    for id_ in teuthology.all_roles_of_type(ctx.cluster, 'osd'):
        manager.revive_osd(id_)
        manager.mark_in_osd(id_)
    while True:
        try:
            manager.flush_pg_stats([0, 1])
            break
        except Exception:
            log.exception('osds must not be started yet, waiting...')
            time.sleep(1)
    manager.wait_for_clean(timeout)

    check_stuck(
        manager,
        num_inactive=0,
        num_unclean=0,
        num_stale=0,
    )
コード例 #37
0
ファイル: swift.py プロジェクト: varshar16/ceph-ci
def task(ctx, config):
    """
    Run the testswift suite against rgw.

    To run all tests on all clients::

        tasks:
        - ceph:
        - rgw:
        - testswift:

    To restrict testing to particular clients::

        tasks:
        - ceph:
        - rgw: [client.0]
        - testswift: [client.0]

    To run against a server on client.1::

        tasks:
        - ceph:
        - rgw: [client.1]
        - testswift:
            client.0:
              rgw_server: client.1

    To pass extra arguments to nose (e.g. to run a certain test)::

        tasks:
        - ceph:
        - rgw: [client.0]
        - testswift:
            client.0:
              extra_args: ['test.functional.tests:TestFileUTF8', '-m', 'testCopy']
            client.1:
              extra_args: ['--exclude', 'TestFile']
    """
    assert hasattr(ctx, 'rgw'), 'swift must run after the rgw task'
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task testswift only supports a list or dictionary for configuration"
    all_clients = ['client.{id}'.format(id=id_)
                   for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)

    testswift_conf = {}
    clients = []
    for client, client_config in config.items():
        # http://tracker.ceph.com/issues/40304 can't bootstrap on rhel 7.6+
        (remote,) = ctx.cluster.only(client).remotes.keys()
        if remote.os.name == 'rhel' and LooseVersion(remote.os.version) >= LooseVersion('7.6'):
            log.warning('Swift tests cannot run on rhel 7.6+, skipping client {}'.format(client))
            continue

        clients.append(client)

        server = client_config.get('rgw_server', client)
        endpoint = ctx.rgw.role_endpoints.get(server)
        assert endpoint, 'swift: no rgw endpoint for {}'.format(server)

        testswift_conf[client] = ConfigObj(
                indent_type='',
                infile={
                    'func_test':
                        {
                        'auth_host' : endpoint.hostname,
                        'auth_port' : endpoint.port,
                        'auth_ssl' : 'yes' if endpoint.cert else 'no',
                        'auth_prefix' : '/auth/',
                        },
                    }
                )
    # only take config for valid clients
    config = {c: config[c] for c in clients}

    log.info('clients={c}'.format(c=config.keys()))
    with contextutil.nested(
        lambda: download(ctx=ctx, config=config),
        lambda: create_users(ctx=ctx, config=testswift_conf),
        lambda: configure(ctx=ctx, config=testswift_conf),
        lambda: run_tests(ctx=ctx, config=config),
        ):
        pass
    yield
コード例 #38
0
ファイル: barbican.py プロジェクト: zxgm/ceph
def task(ctx, config):
    """
    Deploy and configure Keystone

    Example of configuration:

    tasks:
      - local_cluster:
          cluster_path: /home/adam/ceph-1/build
      - local_rgw:
      - tox: [ client.0 ]
      - keystone:
          client.0:
            sha1: 12.0.0.0b2
            force-branch: master
            tenants:
              - name: admin
                description:  Admin Tenant
              - name: rgwcrypt
                description: Encryption Tenant
              - name: barbican
                description: Barbican
              - name: s3
                description: S3 project
            users:
              - name: admin
                password: ADMIN
                project: admin
              - name: rgwcrypt-user
                password: rgwcrypt-pass
                project: rgwcrypt
              - name: barbican-user
                password: barbican-pass
                project: barbican
              - name: s3-user
                password: s3-pass
                project: s3
            roles: [ name: admin, name: Member, name: creator ]
            role-mappings:
              - name: admin
                user: admin
                project: admin
              - name: Member
                user: rgwcrypt-user
                project: rgwcrypt
              - name: admin
                user: barbican-user
                project: barbican
              - name: creator
                user: s3-user
                project: s3
            services:
              - name: keystone
                type: identity
                description: Keystone Identity Service
      - barbican:
          client.0:
            force-branch: master
            use-keystone-role: client.0
            keystone_authtoken:
              auth_plugin: password
              username: barbican-user
              password: barbican-pass
              user_domain_name: Default
            rgw_user:
              tenantName: rgwcrypt
              username: rgwcrypt-user
              password: rgwcrypt-pass
            secrets:
              - name: my-key-1
                base64: a2V5MS5GcWVxKzhzTGNLaGtzQkg5NGVpb1FKcFpGb2c=
                tenantName: s3
                username: s3-user
                password: s3-pass
              - name: my-key-2
                base64: a2V5Mi5yNUNNMGFzMVdIUVZxcCt5NGVmVGlQQ1k4YWg=
                tenantName: s3
                username: s3-user
                password: s3-pass
      - s3tests:
          client.0:
            force-branch: master
            kms_key: my-key-1
      - rgw:
          client.0:
            use-keystone-role: client.0
            use-barbican-role: client.0
    """
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task keystone only supports a list or dictionary for configuration"
    all_clients = ['client.{id}'.format(id=id_)
                   for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    overrides = ctx.config.get('overrides', {})
    # merge each client section, not the top level.
    for client in config.keys():
        if not config[client]:
            config[client] = {}
        teuthology.deep_merge(config[client], overrides.get('barbican', {}))

    log.debug('Barbican config is %s', config)

    if not hasattr(ctx, 'keystone'):
        raise ConfigError('barbican must run after the keystone task')


    ctx.barbican = argparse.Namespace()
    ctx.barbican.endpoints = assign_ports(ctx, config, 9311)
    ctx.barbican.keys = {}
    
    with contextutil.nested(
        lambda: download(ctx=ctx, config=config),
        lambda: setup_venv(ctx=ctx, config=config),
        lambda: configure_barbican(ctx=ctx, config=config),
        lambda: run_barbican(ctx=ctx, config=config),
        lambda: create_secrets(ctx=ctx, config=config),
        ):
        yield
コード例 #39
0
ファイル: kclient.py プロジェクト: LargerPanda/ceph
def task(ctx, config):
    """
    Mount/unmount a ``kernel`` client.

    The config is optional and defaults to mounting on all clients. If
    a config is given, it is expected to be a list of clients to do
    this operation on. This lets you e.g. set up one client with
    ``ceph-fuse`` and another with ``kclient``.

    Example that mounts all clients::

        tasks:
        - ceph:
        - kclient:
        - interactive:

    Example that uses both ``kclient` and ``ceph-fuse``::

        tasks:
        - ceph:
        - ceph-fuse: [client.0]
        - kclient: [client.1]
        - interactive:


    Pass a dictionary instead of lists to specify per-client config:

        tasks:
        -kclient:
            client.0:
                debug: true

    :param ctx: Context
    :param config: Configuration
    """
    log.info('Mounting kernel clients...')
    assert config is None or isinstance(config, list) or isinstance(config, dict), \
        "task kclient got invalid config"

    if config is None:
        config = ['client.{id}'.format(id=id_)
                  for id_ in misc.all_roles_of_type(ctx.cluster, 'client')]

    if isinstance(config, list):
        client_roles = config
        config = dict([r, dict()] for r in client_roles)
    elif isinstance(config, dict):
        client_roles = config.keys()
    else:
        raise ValueError("Invalid config object: {0} ({1})".format(config, config.__class__))

    # config has been converted to a dict by this point
    overrides = ctx.config.get('overrides', {})
    deep_merge(config, overrides.get('kclient', {}))

    clients = list(misc.get_clients(ctx=ctx, roles=client_roles))

    test_dir = misc.get_testdir(ctx)

    # Assemble mon addresses
    remotes_and_roles = ctx.cluster.remotes.items()
    roles = [roles for (remote_, roles) in remotes_and_roles]
    ips = [remote_.ssh.get_transport().getpeername()[0]
           for (remote_, _) in remotes_and_roles]
    mons = misc.get_mons(roles, ips).values()

    mounts = {}
    for id_, remote in clients:
        kernel_mount = KernelMount(
            mons,
            test_dir,
            id_,
            remote,
            ctx.teuthology_config.get('ipmi_user', None),
            ctx.teuthology_config.get('ipmi_password', None),
            ctx.teuthology_config.get('ipmi_domain', None)
        )

        mounts[id_] = kernel_mount

        client_config = config["client.{0}".format(id_)]
        if client_config.get('debug', False):
            remote.run(args=["sudo", "bash", "-c", "echo 'module ceph +p' > /sys/kernel/debug/dynamic_debug/control"])
            remote.run(args=["sudo", "bash", "-c", "echo 'module libceph +p' > /sys/kernel/debug/dynamic_debug/control"])

        kernel_mount.mount()

    ctx.mounts = mounts
    try:
        yield mounts
    finally:
        log.info('Unmounting kernel clients...')
        for mount in mounts.values():
            mount.umount()
コード例 #40
0
def task(ctx, config):
    """
    Mount/unmount a ``kernel`` client.

    The config is optional and defaults to mounting on all clients. If
    a config is given, it is expected to be a list of clients to do
    this operation on. This lets you e.g. set up one client with
    ``ceph-fuse`` and another with ``kclient``.

    ``brxnet`` should be a Private IPv4 Address range, default range is
    [192.168.0.0/16]

    Example that mounts all clients::

        tasks:
        - ceph:
        - kclient:
        - interactive:
        - brxnet: [192.168.0.0/16]

    Example that uses both ``kclient` and ``ceph-fuse``::

        tasks:
        - ceph:
        - ceph-fuse: [client.0]
        - kclient: [client.1]
        - interactive:


    Pass a dictionary instead of lists to specify per-client config:

        tasks:
        -kclient:
            client.0:
                debug: true

    :param ctx: Context
    :param config: Configuration
    """
    log.info('Mounting kernel clients...')
    assert config is None or isinstance(config, list) or isinstance(config, dict), \
        "task kclient got invalid config"

    if config is None:
        config = [
            'client.{id}'.format(id=id_)
            for id_ in misc.all_roles_of_type(ctx.cluster, 'client')
        ]

    if isinstance(config, list):
        client_roles = config
        config = dict([r, dict()] for r in client_roles)
    elif isinstance(config, dict):
        client_roles = filter(lambda x: 'client.' in x, config.keys())
    else:
        raise ValueError("Invalid config object: {0} ({1})".format(
            config, config.__class__))

    # config has been converted to a dict by this point
    overrides = ctx.config.get('overrides', {})
    deep_merge(config, overrides.get('kclient', {}))

    clients = list(misc.get_clients(ctx=ctx, roles=client_roles))

    test_dir = misc.get_testdir(ctx)

    mounts = {}
    for id_, remote in clients:
        client_config = config.get("client.%s" % id_)
        if client_config is None:
            client_config = {}

        if config.get("disabled",
                      False) or not client_config.get('mounted', True):
            continue

        kernel_mount = KernelMount(
            ctx,
            test_dir,
            id_,
            remote,
            ctx.teuthology_config.get('brxnet', None),
        )

        mounts[id_] = kernel_mount

        if client_config.get('debug', False):
            remote.run(args=[
                "sudo", "bash", "-c",
                "echo 'module ceph +p' > /sys/kernel/debug/dynamic_debug/control"
            ])
            remote.run(args=[
                "sudo", "bash", "-c",
                "echo 'module libceph +p' > /sys/kernel/debug/dynamic_debug/control"
            ])

        kernel_mount.mount()

    def umount_all():
        log.info('Unmounting kernel clients...')

        forced = False
        for mount in mounts.values():
            if mount.is_mounted():
                try:
                    mount.umount()
                except (CommandFailedError, MaxWhileTries):
                    log.warning("Ordinary umount failed, forcing...")
                    forced = True
                    mount.umount_wait(force=True)

        return forced

    ctx.mounts = mounts
    try:
        yield mounts
    except:
        umount_all()  # ignore forced retval, we are already in error handling
    finally:

        forced = umount_all()
        if forced:
            # The context managers within the kclient manager worked (i.e.
            # the test workload passed) but for some reason we couldn't
            # umount, so turn this into a test failure.
            raise RuntimeError("Kernel mounts did not umount cleanly")
コード例 #41
0
ファイル: mds_thrash.py プロジェクト: AsherBond/teuthology
def task(ctx, config):
    """
  Stress test the mds by thrashing while another task/workunit
  is running.

  Please refer to MDSThrasher class for further information on the
  available options.
  """
    if config is None:
        config = {}
    assert isinstance(config, dict), \
        'mds_thrash task only accepts a dict for configuration'
    mdslist = list(teuthology.all_roles_of_type(ctx.cluster, 'mds'))
    assert len(mdslist) > 1, \
        'mds_thrash task requires at least 2 metadata servers'

    # choose random seed
    seed = None
    if 'seed' in config:
        seed = int(config['seed'])
    else:
        seed = int(time.time())
    log.info('mds thrasher using random seed: {seed}'.format(seed=seed))
    random.seed(seed)

    max_thrashers = config.get('max_thrash', 1)
    thrashers = {}

    (first, ) = ctx.cluster.only(
        'mds.{_id}'.format(_id=mdslist[0])).remotes.iterkeys()
    manager = ceph_manager.CephManager(
        first,
        ctx=ctx,
        logger=log.getChild('ceph_manager'),
    )

    # make sure everyone is in active, standby, or standby-replay
    log.info('Wait for all MDSs to reach steady state...')
    statuses = None
    statuses_by_rank = None
    while True:
        statuses = {m: manager.get_mds_status(m) for m in mdslist}
        statuses_by_rank = {s['rank']: s for (_, s) in statuses.iteritems()}

        ready = filter(
            lambda (_, s): s['state'] == 'up:active' or s['state']
            == 'up:standby' or s['state'] == 'up:standby-replay',
            statuses.items())
        if len(ready) == len(statuses):
            break
        time.sleep(2)
    log.info('Ready to start thrashing')

    # setup failure groups
    failure_groups = {}
    actives = {
        s['name']: s
        for (_, s) in statuses.iteritems() if s['state'] == 'up:active'
    }
    log.info('Actives is: {d}'.format(d=actives))
    log.info('Statuses is: {d}'.format(d=statuses_by_rank))
    for active in actives:
        for (r, s) in statuses.iteritems():
            if s['standby_for_name'] == active:
                if not active in failure_groups:
                    failure_groups[active] = []
                log.info('Assigning mds rank {r} to failure group {g}'.format(
                    r=r, g=active))
                failure_groups[active].append(r)

    for (active, standbys) in failure_groups.iteritems():

        weight = 1.0
        if 'thrash_weights' in config:
            weight = int(config['thrash_weights'].get(
                'mds.{_id}'.format(_id=active), '0.0'))

        failure_group = [active]
        failure_group.extend(standbys)
        thrashers[active] = MDSThrasher(
            ctx,
            manager,
            config,
            logger=log.getChild(
                'mds_thrasher.failure_group.[{a}, {sbs}]'.format(
                    a=active, sbs=', '.join(standbys))),
            failure_group=failure_group,
            weight=weight)
        # if thrash_weights isn't specified and we've reached max_thrash,
        # we're done
        if not 'thrash_weights' in config and len(thrashers) == max_thrashers:
            break

    try:
        log.debug('Yielding')
        yield
    finally:
        log.info('joining mds_thrashers')
        for t in thrashers:
            log.info('join thrasher for failure group [{fg}]'.format(
                fg=', '.join(failure_group)))
            thrashers[t].do_join()
        log.info('done joining')
コード例 #42
0
ファイル: radosgw_admin.py プロジェクト: leseb/ceph-ci
def task(ctx, config):
    """
    Test radosgw-admin functionality against a running rgw instance.
    """
    global log
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task s3tests only supports a list or dictionary for configuration"
    all_clients = ['client.{id}'.format(id=id_)
                   for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    multi_region_run = rgw_utils.multi_region_enabled(ctx)

    client = clients[0]; # default choice, multi-region code may overwrite this
    if multi_region_run:
        client = rgw_utils.get_master_client(ctx, clients)

    # once the client is chosen, pull the host name and  assigned port out of
    # the role_endpoints that were assigned by the rgw task
    (remote_host, remote_port) = ctx.rgw.role_endpoints[client]

    realm = ctx.rgw.realm
    log.debug('radosgw-admin: realm %r', realm)
    
    ##
    user1='foo'
    user2='fud'
    subuser1='foo:foo1'
    subuser2='foo:foo2'
    display_name1='Foo'
    display_name2='Fud'
    email='*****@*****.**'
    email2='*****@*****.**'
    access_key='9te6NH5mcdcq0Tc5i8i1'
    secret_key='Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu'
    access_key2='p5YnriCv1nAtykxBrupQ'
    secret_key2='Q8Tk6Q/27hfbFSYdSkPtUqhqx1GgzvpXa4WARozh'
    swift_secret1='gpS2G9RREMrnbqlp29PP2D36kgPR1tm72n5fPYfL'
    swift_secret2='ri2VJQcKSYATOY6uaDUX7pxgkW+W1YmC6OCxPHwy'

    bucket_name='myfoo'
    bucket_name2='mybar'

    # connect to rgw
    connection = boto.s3.connection.S3Connection(
        aws_access_key_id=access_key,
        aws_secret_access_key=secret_key,
        is_secure=False,
        port=remote_port,
        host=remote_host,
        calling_format=boto.s3.connection.OrdinaryCallingFormat(),
        )
    connection2 = boto.s3.connection.S3Connection(
        aws_access_key_id=access_key2,
        aws_secret_access_key=secret_key2,
        is_secure=False,
        port=remote_port,
        host=remote_host,
        calling_format=boto.s3.connection.OrdinaryCallingFormat(),
        )

    # legend (test cases can be easily grep-ed out)
    # TESTCASE 'testname','object','method','operation','assertion'
    # TESTCASE 'info-nosuch','user','info','non-existent user','fails'
    (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
    assert err

    # TESTCASE 'create-ok','user','create','w/all valid info','succeeds'
    (err, out) = rgwadmin(ctx, client, [
            'user', 'create',
            '--uid', user1,
            '--display-name', display_name1,
            '--email', email,
            '--access-key', access_key,
            '--secret', secret_key,
            '--max-buckets', '4'
            ],
            check_status=True)

    # TESTCASE 'duplicate email','user','create','existing user email','fails'
    (err, out) = rgwadmin(ctx, client, [
            'user', 'create',
            '--uid', user2,
            '--display-name', display_name2,
            '--email', email,
            ])
    assert err

    # TESTCASE 'info-existing','user','info','existing user','returns correct info'
    (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
    assert out['user_id'] == user1
    assert out['email'] == email
    assert out['display_name'] == display_name1
    assert len(out['keys']) == 1
    assert out['keys'][0]['access_key'] == access_key
    assert out['keys'][0]['secret_key'] == secret_key
    assert not out['suspended']

    # this whole block should only be run if regions have been configured
    if multi_region_run:
        rgw_utils.radosgw_agent_sync_all(ctx)
        # post-sync, validate that user1 exists on the sync destination host
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            dest_client = c_config['dest']
            (err, out) = rgwadmin(ctx, dest_client, ['metadata', 'list', 'user'])
            (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1], check_status=True)
            assert out['user_id'] == user1
            assert out['email'] == email
            assert out['display_name'] == display_name1
            assert len(out['keys']) == 1
            assert out['keys'][0]['access_key'] == access_key
            assert out['keys'][0]['secret_key'] == secret_key
            assert not out['suspended']

        # compare the metadata between different regions, make sure it matches
        log.debug('compare the metadata between different regions, make sure it matches')
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            source_client = c_config['src']
            dest_client = c_config['dest']
            (err1, out1) = rgwadmin(ctx, source_client,
                ['metadata', 'get', 'user:{uid}'.format(uid=user1)], check_status=True)
            (err2, out2) = rgwadmin(ctx, dest_client,
                ['metadata', 'get', 'user:{uid}'.format(uid=user1)], check_status=True)
            assert out1 == out2

        # suspend a user on the master, then check the status on the destination
        log.debug('suspend a user on the master, then check the status on the destination')
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            source_client = c_config['src']
            dest_client = c_config['dest']
            (err, out) = rgwadmin(ctx, source_client, ['user', 'suspend', '--uid', user1])
            rgw_utils.radosgw_agent_sync_all(ctx)
            (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1], check_status=True)
            assert out['suspended']

        # delete a user on the master, then check that it's gone on the destination
        log.debug('delete a user on the master, then check that it\'s gone on the destination')
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            source_client = c_config['src']
            dest_client = c_config['dest']
            (err, out) = rgwadmin(ctx, source_client, ['user', 'rm', '--uid', user1], check_status=True)
            rgw_utils.radosgw_agent_sync_all(ctx)
            (err, out) = rgwadmin(ctx, source_client, ['user', 'info', '--uid', user1])
            assert out is None
            (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1])
            assert out is None

            # then recreate it so later tests pass
            (err, out) = rgwadmin(ctx, client, [
                'user', 'create',
                '--uid', user1,
                '--display-name', display_name1,
                '--email', email,
                '--access-key', access_key,
                '--secret', secret_key,
                '--max-buckets', '4'
                ],
                check_status=True)

        # now do the multi-region bucket tests
        log.debug('now do the multi-region bucket tests')

        # Create a second user for the following tests
        log.debug('Create a second user for the following tests')
        (err, out) = rgwadmin(ctx, client, [
            'user', 'create',
            '--uid', user2,
            '--display-name', display_name2,
            '--email', email2,
            '--access-key', access_key2,
            '--secret', secret_key2,
            '--max-buckets', '4'
            ],
            check_status=True)
        (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user2], check_status=True)
        assert out is not None

        # create a bucket and do a sync
        log.debug('create a bucket and do a sync')
        bucket = connection.create_bucket(bucket_name2)
        rgw_utils.radosgw_agent_sync_all(ctx)

        # compare the metadata for the bucket between different regions, make sure it matches
        log.debug('compare the metadata for the bucket between different regions, make sure it matches')
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            source_client = c_config['src']
            dest_client = c_config['dest']
            (err1, out1) = rgwadmin(ctx, source_client,
                ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
                check_status=True)
            (err2, out2) = rgwadmin(ctx, dest_client,
                ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
                check_status=True)
            log.debug('metadata 1 %r', out1)
            log.debug('metadata 2 %r', out2)
            assert out1 == out2

            # get the bucket.instance info and compare that
            src_bucket_id = out1['data']['bucket']['bucket_id']
            dest_bucket_id = out2['data']['bucket']['bucket_id']
            (err1, out1) = rgwadmin(ctx, source_client, ['metadata', 'get',
                'bucket.instance:{bucket_name}:{bucket_instance}'.format(
                bucket_name=bucket_name2,bucket_instance=src_bucket_id)],
                check_status=True)
            (err2, out2) = rgwadmin(ctx, dest_client, ['metadata', 'get',
                'bucket.instance:{bucket_name}:{bucket_instance}'.format(
                bucket_name=bucket_name2,bucket_instance=dest_bucket_id)],
                check_status=True)
            del out1['data']['bucket_info']['bucket']['pool']
            del out1['data']['bucket_info']['bucket']['index_pool']
            del out1['data']['bucket_info']['bucket']['data_extra_pool']
            del out2['data']['bucket_info']['bucket']['pool']
            del out2['data']['bucket_info']['bucket']['index_pool']
            del out2['data']['bucket_info']['bucket']['data_extra_pool']
            assert out1 == out2

        same_region = 0
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            source_client = c_config['src']
            dest_client = c_config['dest']

            source_region = rgw_utils.region_for_client(ctx, source_client)
            dest_region = rgw_utils.region_for_client(ctx, dest_client)

            # 301 is only returned for requests to something in a different region
            if source_region == dest_region:
                log.debug('301 is only returned for requests to something in a different region')
                same_region += 1
                continue

            # Attempt to create a new connection with user1 to the destination RGW
            log.debug('Attempt to create a new connection with user1 to the destination RGW')
            # and use that to attempt a delete (that should fail)

            (dest_remote_host, dest_remote_port) = ctx.rgw.role_endpoints[dest_client]
            connection_dest = boto.s3.connection.S3Connection(
                aws_access_key_id=access_key,
                aws_secret_access_key=secret_key,
                is_secure=False,
                port=dest_remote_port,
                host=dest_remote_host,
                calling_format=boto.s3.connection.OrdinaryCallingFormat(),
                )

            # this should fail
            r, content = send_raw_http_request(connection_dest, 'DELETE', bucket_name2, '', follow_redirects = False)
            assert r.status == 301

            # now delete the bucket on the source RGW and do another sync
            log.debug('now delete the bucket on the source RGW and do another sync')
            bucket.delete()
            rgw_utils.radosgw_agent_sync_all(ctx)

        if same_region == len(ctx.radosgw_agent.config):
            bucket.delete()
            rgw_utils.radosgw_agent_sync_all(ctx)

        # make sure that the bucket no longer exists in either region
        log.debug('make sure that the bucket no longer exists in either region')
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            source_client = c_config['src']
            dest_client = c_config['dest']
            (err1, out1) = rgwadmin(ctx, source_client, ['metadata', 'get',
                'bucket:{bucket_name}'.format(bucket_name=bucket_name2)])
            (err2, out2) = rgwadmin(ctx, dest_client, ['metadata', 'get',
                'bucket:{bucket_name}'.format(bucket_name=bucket_name2)])
            # Both of the previous calls should have errors due to requesting
            # metadata for non-existent buckets
            assert err1
            assert err2

        # create a bucket and then sync it
        log.debug('create a bucket and then sync it')
        bucket = connection.create_bucket(bucket_name2)
        rgw_utils.radosgw_agent_sync_all(ctx)

        # compare the metadata for the bucket between different regions, make sure it matches
        log.debug('compare the metadata for the bucket between different regions, make sure it matches')
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            source_client = c_config['src']
            dest_client = c_config['dest']
            (err1, out1) = rgwadmin(ctx, source_client,
                ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
                check_status=True)
            (err2, out2) = rgwadmin(ctx, dest_client,
                ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
                check_status=True)
            assert out1 == out2

        # Now delete the bucket and recreate it with a different user
        log.debug('Now delete the bucket and recreate it with a different user')
        # within the same window of time and then sync.
        bucket.delete()
        bucket = connection2.create_bucket(bucket_name2)
        rgw_utils.radosgw_agent_sync_all(ctx)

        # compare the metadata for the bucket between different regions, make sure it matches
        log.debug('compare the metadata for the bucket between different regions, make sure it matches')
        # user2 should own the bucket in both regions
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            source_client = c_config['src']
            dest_client = c_config['dest']
            (err1, out1) = rgwadmin(ctx, source_client,
                ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
                check_status=True)
            (err2, out2) = rgwadmin(ctx, dest_client,
                ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
                check_status=True)
            assert out1 == out2
            assert out1['data']['owner'] == user2
            assert out1['data']['owner'] != user1

        # now we're going to use this bucket to test meta-data update propagation
        log.debug('now we\'re going to use this bucket to test meta-data update propagation')
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            source_client = c_config['src']
            dest_client = c_config['dest']

            # get the metadata so we can tweak it
            log.debug('get the metadata so we can tweak it')
            (err, orig_data) = rgwadmin(ctx, source_client,
                ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
                check_status=True)

            # manually edit mtime for this bucket to be 300 seconds in the past
            log.debug('manually edit mtime for this bucket to be 300 seconds in the past')
            new_data = copy.deepcopy(orig_data)
            mtime = datetime.datetime.strptime(orig_data['mtime'], "%Y-%m-%d %H:%M:%S.%fZ") - datetime.timedelta(300)
            new_data['mtime'] =  unicode(mtime.strftime("%Y-%m-%d %H:%M:%S.%fZ"))
            log.debug("new mtime ", mtime)
            assert new_data != orig_data
            (err, out) = rgwadmin(ctx, source_client,
                ['metadata', 'put', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
                stdin=StringIO(json.dumps(new_data)),
                check_status=True)

            # get the metadata and make sure that the 'put' worked
            log.debug('get the metadata and make sure that the \'put\' worked')
            (err, out) = rgwadmin(ctx, source_client,
                ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
                check_status=True)
            assert out == new_data

            # sync to propagate the new metadata
            log.debug('sync to propagate the new metadata')
            rgw_utils.radosgw_agent_sync_all(ctx)

            # get the metadata from the dest and compare it to what we just set
            log.debug('get the metadata from the dest and compare it to what we just set')
            # and what the source region has.
            (err1, out1) = rgwadmin(ctx, source_client,
                ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
                check_status=True)
            (err2, out2) = rgwadmin(ctx, dest_client,
                ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
                check_status=True)
            # yeah for the transitive property
            assert out1 == out2
            assert out1 == new_data

        # now we delete the bucket
        log.debug('now we delete the bucket')
        bucket.delete()

        log.debug('sync to propagate the deleted bucket')
        rgw_utils.radosgw_agent_sync_all(ctx)

        # Delete user2 as later tests do not expect it to exist.
        # Verify that it is gone on both regions
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            source_client = c_config['src']
            dest_client = c_config['dest']
            (err, out) = rgwadmin(ctx, source_client,
                ['user', 'rm', '--uid', user2], check_status=True)
            rgw_utils.radosgw_agent_sync_all(ctx)
            # The two 'user info' calls should fail and not return any data
            # since we just deleted this user.
            (err, out) = rgwadmin(ctx, source_client, ['user', 'info', '--uid', user2])
            assert out is None
            (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user2])
            assert out is None

        # Test data sync

        # First create a bucket for data sync test purpose
        bucket = connection.create_bucket(bucket_name + 'data')

        # Create a tiny file and check if in sync
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            if c_config.get('metadata-only'):
                continue

            for full in (True, False):
                source_client = c_config['src']
                dest_client = c_config['dest']
                k = boto.s3.key.Key(bucket)
                k.key = 'tiny_file'
                k.set_contents_from_string("123456789")
                safety_window = rgw_utils.radosgw_data_log_window(ctx, source_client)
                time.sleep(safety_window)
                rgw_utils.radosgw_agent_sync_all(ctx, data=True, full=full)
                (dest_host, dest_port) = ctx.rgw.role_endpoints[dest_client]
                dest_connection = boto.s3.connection.S3Connection(
                    aws_access_key_id=access_key,
                    aws_secret_access_key=secret_key,
                    is_secure=False,
                    port=dest_port,
                    host=dest_host,
                    calling_format=boto.s3.connection.OrdinaryCallingFormat(),
                )
                dest_k = dest_connection.get_bucket(bucket_name + 'data').get_key('tiny_file')
                assert k.get_contents_as_string() == dest_k.get_contents_as_string()

                # check that deleting it removes it from the dest zone
                k.delete()
                time.sleep(safety_window)
                # full sync doesn't handle deleted objects yet
                rgw_utils.radosgw_agent_sync_all(ctx, data=True, full=False)

                dest_bucket = dest_connection.get_bucket(bucket_name + 'data')
                dest_k = dest_bucket.get_key('tiny_file')
                assert dest_k == None, 'object not deleted from destination zone'

        # finally we delete the bucket
        bucket.delete()

        bucket = connection.create_bucket(bucket_name + 'data2')
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            if c_config.get('metadata-only'):
                continue

            for full in (True, False):
                source_client = c_config['src']
                dest_client = c_config['dest']
                (dest_host, dest_port) = ctx.rgw.role_endpoints[dest_client]
                dest_connection = boto.s3.connection.S3Connection(
                    aws_access_key_id=access_key,
                    aws_secret_access_key=secret_key,
                    is_secure=False,
                    port=dest_port,
                    host=dest_host,
                    calling_format=boto.s3.connection.OrdinaryCallingFormat(),
                )
                for i in range(20):
                    k = boto.s3.key.Key(bucket)
                    k.key = 'tiny_file_' + str(i)
                    k.set_contents_from_string(str(i) * 100)

                safety_window = rgw_utils.radosgw_data_log_window(ctx, source_client)
                time.sleep(safety_window)
                rgw_utils.radosgw_agent_sync_all(ctx, data=True, full=full)

                for i in range(20):
                    dest_k = dest_connection.get_bucket(bucket_name + 'data2').get_key('tiny_file_' + str(i))
                    assert (str(i) * 100) == dest_k.get_contents_as_string()
                    k = boto.s3.key.Key(bucket)
                    k.key = 'tiny_file_' + str(i)
                    k.delete()

                # check that deleting removes the objects from the dest zone
                time.sleep(safety_window)
                # full sync doesn't delete deleted objects yet
                rgw_utils.radosgw_agent_sync_all(ctx, data=True, full=False)

                for i in range(20):
                    dest_bucket = dest_connection.get_bucket(bucket_name + 'data2')
                    dest_k = dest_bucket.get_key('tiny_file_' + str(i))
                    assert dest_k == None, 'object %d not deleted from destination zone' % i
        bucket.delete()

    # end of 'if multi_region_run:'

    # TESTCASE 'suspend-ok','user','suspend','active user','succeeds'
    (err, out) = rgwadmin(ctx, client, ['user', 'suspend', '--uid', user1],
        check_status=True)

    # TESTCASE 'suspend-suspended','user','suspend','suspended user','succeeds w/advisory'
    (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
    assert out['suspended']

    # TESTCASE 're-enable','user','enable','suspended user','succeeds'
    (err, out) = rgwadmin(ctx, client, ['user', 'enable', '--uid', user1], check_status=True)

    # TESTCASE 'info-re-enabled','user','info','re-enabled user','no longer suspended'
    (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
    assert not out['suspended']

    # TESTCASE 'add-keys','key','create','w/valid info','succeeds'
    (err, out) = rgwadmin(ctx, client, [
            'key', 'create', '--uid', user1,
            '--access-key', access_key2, '--secret', secret_key2,
            ], check_status=True)

    # TESTCASE 'info-new-key','user','info','after key addition','returns all keys'
    (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1],
        check_status=True)
    assert len(out['keys']) == 2
    assert out['keys'][0]['access_key'] == access_key2 or out['keys'][1]['access_key'] == access_key2
    assert out['keys'][0]['secret_key'] == secret_key2 or out['keys'][1]['secret_key'] == secret_key2

    # TESTCASE 'rm-key','key','rm','newly added key','succeeds, key is removed'
    (err, out) = rgwadmin(ctx, client, [
            'key', 'rm', '--uid', user1,
            '--access-key', access_key2,
            ], check_status=True)
    assert len(out['keys']) == 1
    assert out['keys'][0]['access_key'] == access_key
    assert out['keys'][0]['secret_key'] == secret_key

    # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
    subuser_access = 'full'
    subuser_perm = 'full-control'

    (err, out) = rgwadmin(ctx, client, [
            'subuser', 'create', '--subuser', subuser1,
            '--access', subuser_access
            ], check_status=True)

    # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
    (err, out) = rgwadmin(ctx, client, [
            'subuser', 'modify', '--subuser', subuser1,
            '--secret', swift_secret1,
            '--key-type', 'swift',
            ], check_status=True)

    # TESTCASE 'subuser-perm-mask', 'subuser', 'info', 'test subuser perm mask durability', 'succeeds'
    (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])

    assert out['subusers'][0]['permissions'] == subuser_perm

    # TESTCASE 'info-swift-key','user','info','after key addition','returns all keys'
    (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
    assert len(out['swift_keys']) == 1
    assert out['swift_keys'][0]['user'] == subuser1
    assert out['swift_keys'][0]['secret_key'] == swift_secret1

    # TESTCASE 'add-swift-subuser','key','create','swift sub-user key','succeeds'
    (err, out) = rgwadmin(ctx, client, [
            'subuser', 'create', '--subuser', subuser2,
            '--secret', swift_secret2,
            '--key-type', 'swift',
            ], check_status=True)

    # TESTCASE 'info-swift-subuser','user','info','after key addition','returns all sub-users/keys'
    (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
    assert len(out['swift_keys']) == 2
    assert out['swift_keys'][0]['user'] == subuser2 or out['swift_keys'][1]['user'] == subuser2
    assert out['swift_keys'][0]['secret_key'] == swift_secret2 or out['swift_keys'][1]['secret_key'] == swift_secret2

    # TESTCASE 'rm-swift-key1','key','rm','subuser','succeeds, one key is removed'
    (err, out) = rgwadmin(ctx, client, [
            'key', 'rm', '--subuser', subuser1,
            '--key-type', 'swift',
            ], check_status=True)
    assert len(out['swift_keys']) == 1

    # TESTCASE 'rm-subuser','subuser','rm','subuser','success, subuser is removed'
    (err, out) = rgwadmin(ctx, client, [
            'subuser', 'rm', '--subuser', subuser1,
            ], check_status=True)
    assert len(out['subusers']) == 1

    # TESTCASE 'rm-subuser-with-keys','subuser','rm','subuser','succeeds, second subser and key is removed'
    (err, out) = rgwadmin(ctx, client, [
            'subuser', 'rm', '--subuser', subuser2,
            '--key-type', 'swift', '--purge-keys',
            ], check_status=True)
    assert len(out['swift_keys']) == 0
    assert len(out['subusers']) == 0

    # TESTCASE 'bucket-stats','bucket','stats','no session/buckets','succeeds, empty list'
    (err, out) = rgwadmin(ctx, client, ['bucket', 'stats', '--uid', user1],
        check_status=True)
    assert len(out) == 0

    if multi_region_run:
        rgw_utils.radosgw_agent_sync_all(ctx)

    # TESTCASE 'bucket-stats2','bucket','stats','no buckets','succeeds, empty list'
    (err, out) = rgwadmin(ctx, client, ['bucket', 'list', '--uid', user1], check_status=True)
    assert len(out) == 0

    # create a first bucket
    bucket = connection.create_bucket(bucket_name)

    # TESTCASE 'bucket-list','bucket','list','one bucket','succeeds, expected list'
    (err, out) = rgwadmin(ctx, client, ['bucket', 'list', '--uid', user1], check_status=True)
    assert len(out) == 1
    assert out[0] == bucket_name

    # TESTCASE 'bucket-list-all','bucket','list','all buckets','succeeds, expected list'
    (err, out) = rgwadmin(ctx, client, ['bucket', 'list'], check_status=True)
    assert len(out) >= 1
    assert bucket_name in out;

    # TESTCASE 'max-bucket-limit,'bucket','create','4 buckets','5th bucket fails due to max buckets == 4'
    bucket2 = connection.create_bucket(bucket_name + '2')
    bucket3 = connection.create_bucket(bucket_name + '3')
    bucket4 = connection.create_bucket(bucket_name + '4')
    # the 5th should fail.
    failed = False
    try:
        connection.create_bucket(bucket_name + '5')
    except Exception:
        failed = True
    assert failed

    # delete the buckets
    bucket2.delete()
    bucket3.delete()
    bucket4.delete()

    # TESTCASE 'bucket-stats3','bucket','stats','new empty bucket','succeeds, empty list'
    (err, out) = rgwadmin(ctx, client, [
            'bucket', 'stats', '--bucket', bucket_name], check_status=True)
    assert out['owner'] == user1
    bucket_id = out['id']

    # TESTCASE 'bucket-stats4','bucket','stats','new empty bucket','succeeds, expected bucket ID'
    (err, out) = rgwadmin(ctx, client, ['bucket', 'stats', '--uid', user1], check_status=True)
    assert len(out) == 1
    assert out[0]['id'] == bucket_id    # does it return the same ID twice in a row?

    # use some space
    key = boto.s3.key.Key(bucket)
    key.set_contents_from_string('one')

    # TESTCASE 'bucket-stats5','bucket','stats','after creating key','succeeds, lists one non-empty object'
    (err, out) = rgwadmin(ctx, client, [
            'bucket', 'stats', '--bucket', bucket_name], check_status=True)
    assert out['id'] == bucket_id
    assert out['usage']['rgw.main']['num_objects'] == 1
    assert out['usage']['rgw.main']['size_kb'] > 0

    # reclaim it
    key.delete()

    # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'fails', 'access denied error'
    (err, out) = rgwadmin(ctx, client,
        ['bucket', 'unlink', '--uid', user1, '--bucket', bucket_name],
        check_status=True)

    # create a second user to link the bucket to
    (err, out) = rgwadmin(ctx, client, [
            'user', 'create',
            '--uid', user2,
            '--display-name', display_name2,
            '--access-key', access_key2,
            '--secret', secret_key2,
            '--max-buckets', '1',
            ],
            check_status=True)

    # try creating an object with the first user before the bucket is relinked
    denied = False
    key = boto.s3.key.Key(bucket)

    try:
        key.set_contents_from_string('two')
    except boto.exception.S3ResponseError:
        denied = True

    assert not denied

    # delete the object
    key.delete()

    # link the bucket to another user
    (err, out) = rgwadmin(ctx, client, ['metadata', 'get', 'bucket:{n}'.format(n=bucket_name)],
        check_status=True)

    bucket_data = out['data']
    assert bucket_data['bucket']['name'] == bucket_name

    bucket_id = bucket_data['bucket']['bucket_id']

    # link the bucket to another user
    (err, out) = rgwadmin(ctx, client, ['bucket', 'link', '--uid', user2, '--bucket', bucket_name, '--bucket-id', bucket_id],
        check_status=True)

    # try to remove user, should fail (has a linked bucket)
    (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user2])
    assert err

    # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'succeeds, bucket unlinked'
    (err, out) = rgwadmin(ctx, client, ['bucket', 'unlink', '--uid', user2, '--bucket', bucket_name],
        check_status=True)

    # relink the bucket to the first user and delete the second user
    (err, out) = rgwadmin(ctx, client,
        ['bucket', 'link', '--uid', user1, '--bucket', bucket_name, '--bucket-id', bucket_id],
        check_status=True)

    (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user2],
        check_status=True)

    # TESTCASE 'object-rm', 'object', 'rm', 'remove object', 'succeeds, object is removed'

    # upload an object
    object_name = 'four'
    key = boto.s3.key.Key(bucket, object_name)
    key.set_contents_from_string(object_name)

    # now delete it
    (err, out) = rgwadmin(ctx, client,
        ['object', 'rm', '--bucket', bucket_name, '--object', object_name],
        check_status=True)

    # TESTCASE 'bucket-stats6','bucket','stats','after deleting key','succeeds, lists one no objects'
    (err, out) = rgwadmin(ctx, client, [
            'bucket', 'stats', '--bucket', bucket_name],
            check_status=True)
    assert out['id'] == bucket_id
    assert out['usage']['rgw.main']['num_objects'] == 0

    # list log objects
    # TESTCASE 'log-list','log','list','after activity','succeeds, lists one no objects'
    (err, out) = rgwadmin(ctx, client, ['log', 'list'], check_status=True)
    assert len(out) > 0

    for obj in out:
        # TESTCASE 'log-show','log','show','after activity','returns expected info'
        if obj[:4] == 'meta' or obj[:4] == 'data' or obj[:18] == 'obj_delete_at_hint':
            continue

        (err, rgwlog) = rgwadmin(ctx, client, ['log', 'show', '--object', obj],
            check_status=True)
        assert len(rgwlog) > 0

        # exempt bucket_name2 from checking as it was only used for multi-region tests
        assert rgwlog['bucket'].find(bucket_name) == 0 or rgwlog['bucket'].find(bucket_name2) == 0
        assert rgwlog['bucket'] != bucket_name or rgwlog['bucket_id'] == bucket_id
        assert rgwlog['bucket_owner'] == user1 or rgwlog['bucket'] == bucket_name + '5' or rgwlog['bucket'] == bucket_name2
        for entry in rgwlog['log_entries']:
            log.debug('checking log entry: ', entry)
            assert entry['bucket'] == rgwlog['bucket']
            possible_buckets = [bucket_name + '5', bucket_name2]
            user = entry['user']
            assert user == user1 or user.endswith('system-user') or \
                rgwlog['bucket'] in possible_buckets

        # TESTCASE 'log-rm','log','rm','delete log objects','succeeds'
        (err, out) = rgwadmin(ctx, client, ['log', 'rm', '--object', obj],
            check_status=True)

    # TODO: show log by bucket+date

    # need to wait for all usage data to get flushed, should take up to 30 seconds
    timestamp = time.time()
    while time.time() - timestamp <= (20 * 60):      # wait up to 20 minutes
        (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--categories', 'delete_obj'])  # last operation we did is delete obj, wait for it to flush
        if get_user_successful_ops(out, user1) > 0:
            break
        time.sleep(1)

    assert time.time() - timestamp <= (20 * 60)

    # TESTCASE 'usage-show' 'usage' 'show' 'all usage' 'succeeds'
    (err, out) = rgwadmin(ctx, client, ['usage', 'show'], check_status=True)
    assert len(out['entries']) > 0
    assert len(out['summary']) > 0

    user_summary = get_user_summary(out, user1)

    total = user_summary['total']
    assert total['successful_ops'] > 0

    # TESTCASE 'usage-show2' 'usage' 'show' 'user usage' 'succeeds'
    (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1],
        check_status=True)
    assert len(out['entries']) > 0
    assert len(out['summary']) > 0
    user_summary = out['summary'][0]
    for entry in user_summary['categories']:
        assert entry['successful_ops'] > 0
    assert user_summary['user'] == user1

    # TESTCASE 'usage-show3' 'usage' 'show' 'user usage categories' 'succeeds'
    test_categories = ['create_bucket', 'put_obj', 'delete_obj', 'delete_bucket']
    for cat in test_categories:
        (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1, '--categories', cat],
            check_status=True)
        assert len(out['summary']) > 0
        user_summary = out['summary'][0]
        assert user_summary['user'] == user1
        assert len(user_summary['categories']) == 1
        entry = user_summary['categories'][0]
        assert entry['category'] == cat
        assert entry['successful_ops'] > 0

    # the usage flush interval is 30 seconds, wait that much an then some
    # to make sure everything has been flushed
    time.sleep(35)

    # TESTCASE 'usage-trim' 'usage' 'trim' 'user usage' 'succeeds, usage removed'
    (err, out) = rgwadmin(ctx, client, ['usage', 'trim', '--uid', user1],
        check_status=True)
    (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1],
        check_status=True)
    assert len(out['entries']) == 0
    assert len(out['summary']) == 0

    # TESTCASE 'user-suspend2','user','suspend','existing user','succeeds'
    (err, out) = rgwadmin(ctx, client, ['user', 'suspend', '--uid', user1],
        check_status=True)

    # TESTCASE 'user-suspend3','user','suspend','suspended user','cannot write objects'
    try:
        key = boto.s3.key.Key(bucket)
        key.set_contents_from_string('five')
    except boto.exception.S3ResponseError as e:
        assert e.status == 403

    # TESTCASE 'user-renable2','user','enable','suspended user','succeeds'
    (err, out) = rgwadmin(ctx, client, ['user', 'enable', '--uid', user1],
        check_status=True)

    # TESTCASE 'user-renable3','user','enable','reenabled user','can write objects'
    key = boto.s3.key.Key(bucket)
    key.set_contents_from_string('six')

    # TESTCASE 'gc-list', 'gc', 'list', 'get list of objects ready for garbage collection'

    # create an object large enough to be split into multiple parts
    test_string = 'foo'*10000000

    big_key = boto.s3.key.Key(bucket)
    big_key.set_contents_from_string(test_string)

    # now delete the head
    big_key.delete()

    # wait a bit to give the garbage collector time to cycle
    time.sleep(15)

    (err, out) = rgwadmin(ctx, client, ['gc', 'list'])

    assert len(out) > 0

    # TESTCASE 'gc-process', 'gc', 'process', 'manually collect garbage'
    (err, out) = rgwadmin(ctx, client, ['gc', 'process'], check_status=True)

    #confirm
    (err, out) = rgwadmin(ctx, client, ['gc', 'list'])

    assert len(out) == 0

    # TESTCASE 'rm-user-buckets','user','rm','existing user','fails, still has buckets'
    (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1])
    assert err

    # delete should fail because ``key`` still exists
    try:
        bucket.delete()
    except boto.exception.S3ResponseError as e:
        assert e.status == 409

    key.delete()
    bucket.delete()

    # TESTCASE 'policy', 'bucket', 'policy', 'get bucket policy', 'returns S3 policy'
    bucket = connection.create_bucket(bucket_name)

    # create an object
    key = boto.s3.key.Key(bucket)
    key.set_contents_from_string('seven')

    # should be private already but guarantee it
    key.set_acl('private')

    (err, out) = rgwadmin(ctx, client,
        ['policy', '--bucket', bucket.name, '--object', key.key],
        check_status=True, format='xml')

    acl = get_acl(key)

    assert acl == out.strip('\n')

    # add another grantee by making the object public read
    key.set_acl('public-read')

    (err, out) = rgwadmin(ctx, client,
        ['policy', '--bucket', bucket.name, '--object', key.key],
        check_status=True, format='xml')

    acl = get_acl(key)

    assert acl == out.strip('\n')

    # TESTCASE 'rm-bucket', 'bucket', 'rm', 'bucket with objects', 'succeeds'
    bucket = connection.create_bucket(bucket_name)
    key_name = ['eight', 'nine', 'ten', 'eleven']
    for i in range(4):
        key = boto.s3.key.Key(bucket)
        key.set_contents_from_string(key_name[i])

    (err, out) = rgwadmin(ctx, client,
        ['bucket', 'rm', '--bucket', bucket_name, '--purge-objects'],
        check_status=True)

    # TESTCASE 'caps-add', 'caps', 'add', 'add user cap', 'succeeds'
    caps='user=read'
    (err, out) = rgwadmin(ctx, client, ['caps', 'add', '--uid', user1, '--caps', caps])

    assert out['caps'][0]['perm'] == 'read'

    # TESTCASE 'caps-rm', 'caps', 'rm', 'remove existing cap from user', 'succeeds'
    (err, out) = rgwadmin(ctx, client, ['caps', 'rm', '--uid', user1, '--caps', caps])

    assert not out['caps']

    # TESTCASE 'rm-user','user','rm','existing user','fails, still has buckets'
    bucket = connection.create_bucket(bucket_name)
    key = boto.s3.key.Key(bucket)

    (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1])
    assert err

    # TESTCASE 'rm-user2', 'user', 'rm', 'user with data', 'succeeds'
    bucket = connection.create_bucket(bucket_name)
    key = boto.s3.key.Key(bucket)
    key.set_contents_from_string('twelve')

    (err, out) = rgwadmin(ctx, client,
        ['user', 'rm', '--uid', user1, '--purge-data' ],
        check_status=True)

    # TESTCASE 'rm-user3','user','rm','deleted user','fails'
    (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
    assert err

    # TESTCASE 'zone-info', 'zone', 'get', 'get zone info', 'succeeds, has default placement rule'
    #

    if realm is None:
        (err, out) = rgwadmin(ctx, client, ['zone', 'get','--rgw-zone','default'])
    else:
        (err, out) = rgwadmin(ctx, client, ['zone', 'get'])
    orig_placement_pools = len(out['placement_pools'])

    # removed this test, it is not correct to assume that zone has default placement, it really
    # depends on how we set it up before
    #
    # assert len(out) > 0
    # assert len(out['placement_pools']) == 1

    # default_rule = out['placement_pools'][0]
    # assert default_rule['key'] == 'default-placement'

    rule={'key': 'new-placement', 'val': {'data_pool': '.rgw.buckets.2', 'index_pool': '.rgw.buckets.index.2'}}

    out['placement_pools'].append(rule)

    (err, out) = rgwadmin(ctx, client, ['zone', 'set'],
        stdin=StringIO(json.dumps(out)),
        check_status=True)

    if realm is None:
        (err, out) = rgwadmin(ctx, client, ['zone', 'get','--rgw-zone','default'])
    else:
        (err, out) = rgwadmin(ctx, client, ['zone', 'get'])
    assert len(out) > 0
    assert len(out['placement_pools']) == orig_placement_pools + 1
コード例 #43
0
def task(ctx, config):
    """
    Stress test the mds by running scrub iterations while another task/workunit
    is running.
    Example config:

    - fwd_scrub:
      scrub_timeout: 300
      sleep_between_iterations: 1
    """

    mds_cluster = MDSCluster(ctx)

    if config is None:
        config = {}
    assert isinstance(config, dict), \
        'fwd_scrub task only accepts a dict for configuration'
    mdslist = list(teuthology.all_roles_of_type(ctx.cluster, 'mds'))
    assert len(mdslist) > 0, \
        'fwd_scrub task requires at least 1 metadata server'

    (first, ) = ctx.cluster.only(f'mds.{mdslist[0]}').remotes.keys()
    manager = ceph_manager.CephManager(
        first,
        ctx=ctx,
        logger=log.getChild('ceph_manager'),
    )

    # make sure everyone is in active, standby, or standby-replay
    log.info('Wait for all MDSs to reach steady state...')
    status = mds_cluster.status()
    while True:
        steady = True
        for info in status.get_all():
            state = info['state']
            if state not in ('up:active', 'up:standby', 'up:standby-replay'):
                steady = False
                break
        if steady:
            break
        sleep(2)
        status = mds_cluster.status()

    log.info('Ready to start scrub thrashing')

    manager.wait_for_clean()
    assert manager.is_clean()

    if 'cluster' not in config:
        config['cluster'] = 'ceph'

    for fs in status.get_filesystems():
        fwd_scrubber = ForwardScrubber(Filesystem(ctx, fscid=fs['id']),
                                       config['scrub_timeout'],
                                       config['sleep_between_iterations'])
        fwd_scrubber.start()
        ctx.ceph[config['cluster']].thrashers.append(fwd_scrubber)

    try:
        log.debug('Yielding')
        yield
    finally:
        log.info('joining ForwardScrubbers')
        stop_all_fwd_scrubbers(ctx.ceph[config['cluster']].thrashers)
        log.info('done joining')
コード例 #44
0
ファイル: s3readwrite.py プロジェクト: AsherBond/teuthology
def task(ctx, config):
    """
    Run the s3tests-test-readwrite suite against rgw.

    To run all tests on all clients::

        tasks:
        - ceph:
        - rgw:
        - s3readwrite:

    To restrict testing to particular clients::

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3readwrite: [client.0]

    To run against a server on client.1::

        tasks:
        - ceph:
        - rgw: [client.1]
        - s3readwrite:
            client.0:
              rgw_server: client.1

    To pass extra test arguments

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3readwrite:
            client.0:
              readwrite:
                bucket: mybucket
                readers: 10
                writers: 3
                duration: 600
                files:
                  num: 10
                  size: 2000
                  stddev: 500
            client.1:
              ...

    To override s3 configuration

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3readwrite:
            client.0:
              s3:
                user_id: myuserid
                display_name: myname
                email: my@email
                access_key: myaccesskey
                secret_key: mysecretkey

    """
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task s3tests only supports a list or dictionary for configuration"
    all_clients = [
        'client.{id}'.format(id=id_)
        for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')
    ]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    overrides = ctx.config.get('overrides', {})
    # merge each client section, not the top level.
    for client in config.iterkeys():
        if not config[client]:
            config[client] = {}
        teuthology.deep_merge(config[client], overrides.get('s3readwrite', {}))

    log.debug('in s3readwrite, config is %s', config)

    s3tests_conf = {}
    for client in clients:
        if config[client] is None:
            config[client] = {}
        config[client].setdefault('s3', {})
        config[client].setdefault('readwrite', {})

        s3tests_conf[client] = ({
            'DEFAULT': {
                'port': 7280,
                'is_secure': False,
            },
            'readwrite': config[client]['readwrite'],
            's3': config[client]['s3'],
        })

    with contextutil.nested(
            lambda: download(ctx=ctx, config=config),
            lambda: create_users(ctx=ctx,
                                 config=dict(
                                     clients=clients,
                                     s3tests_conf=s3tests_conf,
                                 )),
            lambda: configure(ctx=ctx,
                              config=dict(
                                  clients=config,
                                  s3tests_conf=s3tests_conf,
                              )),
            lambda: run_tests(ctx=ctx, config=config),
    ):
        pass
    yield
コード例 #45
0
def task(ctx, config):
    """
    Either use configure apache to run a rados gateway, or use the built-in
    civetweb server.
    Only one should be run per machine, since it uses a hard-coded port for
    now.

    For example, to run rgw on all clients::

        tasks:
        - ceph:
        - rgw:

    To only run on certain clients::

        tasks:
        - ceph:
        - rgw: [client.0, client.3]

    or

        tasks:
        - ceph:
        - rgw:
            client.0:
            client.3:

    You can adjust the idle timeout for fastcgi (default is 30 seconds):

        tasks:
        - ceph:
        - rgw:
            client.0:
              idle_timeout: 90

    To run radosgw through valgrind:

        tasks:
        - ceph:
        - rgw:
            client.0:
              valgrind: [--tool=memcheck]
            client.3:
              valgrind: [--tool=memcheck]

    To use civetweb instead of apache:

        tasks:
        - ceph:
        - rgw:
          - client.0
        overrides:
          rgw:
            frontend: civetweb

    Note that without a modified fastcgi module e.g. with the default
    one on CentOS, you must have rgw print continue = false in ceph.conf::

        tasks:
        - ceph:
            conf:
              global:
                rgw print continue: false
        - rgw: [client.0]

    To use mod_proxy_fcgi instead of mod_fastcgi:

        overrides:
          rgw:
            use_fcgi: true

    To run rgws for multiple regions or zones, describe the regions
    and their zones in a regions section. The endpoints will be
    generated by this task. Each client must have a region, zone,
    and pools assigned in ceph.conf::

        tasks:
        - install:
        - ceph:
            conf:
              client.0:
                rgw region: foo
                rgw zone: foo-1
                rgw region root pool: .rgw.rroot.foo
                rgw zone root pool: .rgw.zroot.foo
                rgw log meta: true
                rgw log data: true
              client.1:
                rgw region: bar
                rgw zone: bar-master
                rgw region root pool: .rgw.rroot.bar
                rgw zone root pool: .rgw.zroot.bar
                rgw log meta: true
                rgw log data: true
              client.2:
                rgw region: bar
                rgw zone: bar-secondary
                rgw region root pool: .rgw.rroot.bar
                rgw zone root pool: .rgw.zroot.bar-secondary
        - rgw:
            default_idle_timeout: 30
            ec-data-pool: true
            erasure_code_profile:
              k: 2
              m: 1
              ruleset-failure-domain: osd
            regions:
              foo:
                api name: api_name # default: region name
                is master: true    # default: false
                master zone: foo-1 # default: first zone
                zones: [foo-1]
                log meta: true
                log data: true
                placement targets: [target1, target2] # default: []
                default placement: target2            # default: ''
              bar:
                api name: bar-api
                zones: [bar-master, bar-secondary]
            client.0:
              system user:
                name: foo-system
                access key: X2IYPSTY1072DDY1SJMC
                secret key: YIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm
            client.1:
              system user:
                name: bar1
                access key: Y2IYPSTY1072DDY1SJMC
                secret key: XIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm
            client.2:
              system user:
                name: bar2
                access key: Z2IYPSTY1072DDY1SJMC
                secret key: ZIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm
    """
    if config is None:
        config = dict(('client.{id}'.format(id=id_), None)
                      for id_ in teuthology.all_roles_of_type(
                          ctx.cluster, 'client'))
    elif isinstance(config, list):
        config = dict((name, None) for name in config)

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('rgw', {}))

    regions = {}
    if 'regions' in config:
        # separate region info so only clients are keys in config
        regions = config['regions']
        del config['regions']

    role_endpoints = assign_ports(ctx, config)
    ctx.rgw = argparse.Namespace()
    ctx.rgw.role_endpoints = role_endpoints
    # stash the region info for later, since it was deleted from the config
    # structure
    ctx.rgw.regions = regions

    ctx.rgw.ec_data_pool = False
    if 'ec-data-pool' in config:
        ctx.rgw.ec_data_pool = bool(config['ec-data-pool'])
        del config['ec-data-pool']
    ctx.rgw.erasure_code_profile = {}
    if 'erasure_code_profile' in config:
        ctx.rgw.erasure_code_profile = config['erasure_code_profile']
        del config['erasure_code_profile']
    ctx.rgw.default_idle_timeout = 30
    if 'default_idle_timeout' in config:
        ctx.rgw.default_idle_timeout = int(config['default_idle_timeout'])
        del config['default_idle_timeout']
    ctx.rgw.cache_pools = False
    if 'cache-pools' in config:
        ctx.rgw.cache_pools = bool(config['cache-pools'])
        del config['cache-pools']

    ctx.rgw.frontend = 'apache'
    if 'frontend' in config:
        ctx.rgw.frontend = config['frontend']
        del config['frontend']

    ctx.rgw.use_fastcgi = True
    if "use_fcgi" in config:
        ctx.rgw.use_fastcgi = False
        log.info("Using mod_proxy_fcgi instead of mod_fastcgi...")
        del config['use_fcgi']

    subtasks = [
        lambda: configure_regions_and_zones(
            ctx=ctx,
            config=config,
            regions=regions,
            role_endpoints=role_endpoints,
            ),
        lambda: configure_users(
            ctx=ctx,
            config=config,
            everywhere=bool(regions),
            ),
        lambda: create_nonregion_pools(
            ctx=ctx, config=config, regions=regions),
    ]
    if ctx.rgw.frontend == 'apache':
        subtasks.insert(0, lambda: create_apache_dirs(ctx=ctx, config=config))
        subtasks.extend([
            lambda: ship_apache_configs(ctx=ctx, config=config,
                                        role_endpoints=role_endpoints),
            lambda: start_rgw(ctx=ctx, config=config),
            lambda: start_apache(ctx=ctx, config=config),
        ])
    elif ctx.rgw.frontend == 'civetweb':
        subtasks.extend([
            lambda: start_rgw(ctx=ctx, config=config),
        ])
    else:
        raise ValueError("frontend must be 'apache' or 'civetweb'")

    log.info("Using %s as radosgw frontend", ctx.rgw.frontend)
    with contextutil.nested(*subtasks):
        yield
コード例 #46
0
ファイル: check_counter.py プロジェクト: LargerPanda/luminous
    def end(self):
        cluster_name = self.config.get('cluster_name', None)
        dry_run = self.config.get('dry_run', False)
        targets = self.config.get('counters', {})

        if cluster_name is None:
            cluster_name = self.ctx.managers.keys()[0]

        for daemon_type, counters in targets.items():
            # List of 'a', 'b', 'c'...
            daemon_ids = list(
                misc.all_roles_of_type(self.ctx.cluster, daemon_type))
            daemons = dict([
                (daemon_id,
                 self.ctx.daemons.get_daemon(daemon_type, daemon_id))
                for daemon_id in daemon_ids
            ])

            seen = set()

            for daemon_id, daemon in daemons.items():
                if not daemon.running():
                    log.info("Ignoring daemon {0}, it isn't running".format(
                        daemon_id))
                    continue
                else:
                    log.debug("Getting stats from {0}".format(daemon_id))

                manager = self.ctx.managers[cluster_name]
                proc = manager.admin_socket(daemon_type, daemon_id,
                                            ["perf", "dump"])
                response_data = proc.stdout.getvalue().strip()
                if response_data:
                    perf_dump = json.loads(response_data)
                else:
                    log.warning(
                        "No admin socket response from {0}, skipping".format(
                            daemon_id))
                    continue

                for counter in counters:
                    subsys, counter_id = counter.split(".")
                    if subsys not in perf_dump or counter_id not in perf_dump[
                            subsys]:
                        log.warning(
                            "Counter '{0}' not found on daemon {1}.{2}".format(
                                counter, daemon_type, daemon_id))
                        continue
                    value = perf_dump[subsys][counter_id]

                    log.info("Daemon {0}.{1} {2}={3}".format(
                        daemon_type, daemon_id, counter, value))

                    if value > 0:
                        seen.add(counter)

            if not dry_run:
                unseen = set(counters) - set(seen)
                if unseen:
                    raise RuntimeError(
                        "The following counters failed to be set "
                        "on {0} daemons: {1}".format(daemon_type, unseen))
コード例 #47
0
def normalize_config(ctx, config):
    """
    Returns a config whose keys are all real roles.
    Generic roles (client, mon, osd, etc.) are replaced with
    the actual roles (client.0, client.1, etc.). If the config
    specifies a different version for a specific role, this is
    unchanged.

    For example, with 4 OSDs this::

         osd:
           tag: v3.0
           kdb: true
         osd.1:
           branch: new_btrfs
           kdb: false
         osd.3:
           deb: /path/to/linux-whatever.deb

    is transformed into::

         osd.0:
           tag: v3.0
           kdb: true
         osd.1:
           branch: new_btrfs
           kdb: false
         osd.2:
           tag: v3.0
           kdb: true
         osd.3:
           deb: /path/to/linux-whatever.deb

    If config is None or just specifies a version to use,
    it is applied to all nodes.

    :param ctx: Context
    :param config: Configuration
    """
    if not config or \
            len(filter(lambda x: x in VERSION_KEYS + ['kdb', 'flavor'],
                       config.keys())) == len(config.keys()):
        new_config = {}
        if not config:
            config = CONFIG_DEFAULT
        for role in teuthology.all_roles(ctx.cluster):
            new_config[role] = config.copy()
        return new_config

    new_config = {}
    for role, role_config in config.items():
        if role_config is None:
            role_config = CONFIG_DEFAULT
        if '.' in role:
            new_config[role] = role_config.copy()
        else:
            for id_ in teuthology.all_roles_of_type(ctx.cluster, role):
                name = '{type}.{id}'.format(type=role, id=id_)
                # specific overrides generic
                if name not in config:
                    new_config[name] = role_config.copy()
    return new_config
コード例 #48
0
ファイル: ragweed.py プロジェクト: varshar16/ceph-ci
def task(ctx, config):
    """
    Run the ragweed suite against rgw.

    To run all tests on all clients::

        tasks:
        - ceph:
        - rgw:
        - ragweed:

    To restrict testing to particular clients::

        tasks:
        - ceph:
        - rgw: [client.0]
        - ragweed: [client.0]

    To run against a server on client.1 and increase the boto timeout to 10m::

        tasks:
        - ceph:
        - rgw: [client.1]
        - ragweed:
            client.0:
              rgw_server: client.1
              idle_timeout: 600
              stages: prepare,check

    To pass extra arguments to nose (e.g. to run a certain test)::

        tasks:
        - ceph:
        - rgw: [client.0]
        - ragweed:
            client.0:
              extra_args: ['test_s3:test_object_acl_grand_public_read']
            client.1:
              extra_args: ['--exclude', 'test_100_continue']
    """
    assert hasattr(ctx, 'rgw'), 'ragweed must run after the rgw task'
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task ragweed only supports a list or dictionary for configuration"
    all_clients = [
        'client.{id}'.format(id=id_)
        for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')
    ]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    overrides = ctx.config.get('overrides', {})
    # merge each client section, not the top level.
    for client in config.keys():
        if not config[client]:
            config[client] = {}
        teuthology.deep_merge(config[client], overrides.get('ragweed', {}))

    log.debug('ragweed config is %s', config)

    ragweed_conf = {}
    for client in clients:
        # use rgw_server endpoint if given, or default to same client
        target = config[client].get('rgw_server', client)

        endpoint = ctx.rgw.role_endpoints.get(target)
        assert endpoint, 'ragweed: no rgw endpoint for {}'.format(target)

        ragweed_conf[client] = ConfigObj(indent_type='',
                                         infile={
                                             'rgw': {
                                                 'host':
                                                 endpoint.dns_name,
                                                 'port':
                                                 endpoint.port,
                                                 'is_secure':
                                                 endpoint.cert is not None,
                                             },
                                             'fixtures': {},
                                             'user system': {},
                                             'user regular': {},
                                             'rados': {
                                                 'ceph_conf':
                                                 '/etc/ceph/ceph.conf',
                                             },
                                         })

    run_stages = {}

    with contextutil.nested(
            lambda: download(ctx=ctx, config=config),
            lambda: create_users(ctx=ctx,
                                 config=dict(
                                     clients=clients,
                                     ragweed_conf=ragweed_conf,
                                     config=config,
                                 ),
                                 run_stages=run_stages),
            lambda: configure(ctx=ctx,
                              config=dict(
                                  clients=config,
                                  ragweed_conf=ragweed_conf,
                              ),
                              run_stages=run_stages),
            lambda: run_tests(ctx=ctx, config=config, run_stages=run_stages),
    ):
        pass
    yield
コード例 #49
0
ファイル: pykmip.py プロジェクト: ekmixon/ceph
def task(ctx, config):
    """
    Deploy and configure PyKMIP

    Example of configuration:

    tasks:
    - install:
    - ceph:
       conf:
        client:
         rgw crypt s3 kms backend: kmip
         rgw crypt kmip ca path: /home/ubuntu/cephtest/ca/kmiproot.crt
         rgw crypt kmip client cert: /home/ubuntu/cephtest/ca/kmip-client.crt
         rgw crypt kmip client key: /home/ubuntu/cephtest/ca/kmip-client.key
         rgw crypt kmip kms key template: pykmip-$keyid
    - openssl_keys:
       kmiproot:
         client: client.0
         cn: kmiproot
         key-type: rsa:4096
    - openssl_keys:
       kmip-server:
         client: client.0
         ca: kmiproot
       kmip-client:
         client: client.0
         ca: kmiproot
         cn: rgw-client
    - pykmip:
        client.0:
          force-branch: master
          clientca: kmiproot
          servercert: kmip-server
          clientcert: kmip-client
          secrets:
          - name: pykmip-key-1
          - name: pykmip-key-2
    - rgw:
        client.0:
          use-pykmip-role: client.0
    - s3tests:
        client.0:
          force-branch: master
    """
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task pykmip only supports a list or dictionary for configuration"
    all_clients = [
        'client.{id}'.format(id=id_)
        for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')
    ]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)

    overrides = ctx.config.get('overrides', {})
    # merge each client section, not the top level.
    for client in config.keys():
        if not config[client]:
            config[client] = {}
        teuthology.deep_merge(config[client], overrides.get('pykmip', {}))

    log.debug('PyKMIP config is %s', config)

    if not hasattr(ctx, 'ssl_certificates'):
        raise ConfigError('pykmip must run after the openssl_keys task')

    ctx.pykmip = argparse.Namespace()
    ctx.pykmip.endpoints = assign_ports(ctx, config, 5696)
    ctx.pykmip.keys = {}

    with contextutil.nested(
            lambda: download(ctx=ctx, config=config),
            lambda: setup_venv(ctx=ctx, config=config),
            lambda: install_packages(ctx=ctx, config=config),
            lambda: configure_pykmip(ctx=ctx, config=config),
            lambda: run_pykmip(ctx=ctx, config=config),
            lambda: create_secrets(ctx=ctx, config=config),
    ):
        yield
コード例 #50
0
def task(ctx, config):
    """
    Run the testswift suite against rgw.

    To run all tests on all clients::

        tasks:
        - ceph:
        - rgw:
        - testswift:

    To restrict testing to particular clients::

        tasks:
        - ceph:
        - rgw: [client.0]
        - testswift: [client.0]

    To run against a server on client.1::

        tasks:
        - ceph:
        - rgw: [client.1]
        - testswift:
            client.0:
              rgw_server: client.1

    To pass extra arguments to nose (e.g. to run a certain test)::

        tasks:
        - ceph:
        - rgw: [client.0]
        - testswift:
            client.0:
              extra_args: ['test.functional.tests:TestFileUTF8', '-m', 'testCopy']
            client.1:
              extra_args: ['--exclude', 'TestFile']
    """
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task testswift only supports a list or dictionary for configuration"
    all_clients = [
        'client.{id}'.format(id=id_)
        for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')
    ]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    log.info('clients={c}'.format(c=clients))

    testswift_conf = {}
    for client in clients:
        testswift_conf[client] = ConfigObj(indent_type='',
                                           infile={
                                               'func_test': {
                                                   'auth_port': 7280,
                                                   'auth_ssl': 'no',
                                                   'auth_prefix': '/auth/',
                                               },
                                           })

    with contextutil.nested(
            lambda: download(ctx=ctx, config=clients),
            lambda: create_users(ctx=ctx,
                                 config=dict(
                                     clients=clients,
                                     testswift_conf=testswift_conf,
                                 )),
            lambda: configure(ctx=ctx,
                              config=dict(
                                  clients=config,
                                  testswift_conf=testswift_conf,
                              )),
            lambda: run_tests(ctx=ctx, config=config),
    ):
        pass
    yield
コード例 #51
0
ファイル: samba.py プロジェクト: AsherBond/teuthology
def task(ctx, config):
    """
    Setup samba smbd with ceph vfs module.  This task assumes the samba
    package has already been installed via the install task.

    The config is optional and defaults to starting samba on all nodes.
    If a config is given, it is expected to be a list of
    samba nodes to start smbd servers on.

    Example that starts smbd on all samba nodes::

        tasks:
        - install:
        - install:
            project: samba
            extra_packages: ['samba']
        - ceph:
        - samba:
        - interactive:

    Example that starts smbd on just one of the samba nodes and cifs on the other::

        tasks:
        - samba: [samba.0]
        - cifs: [samba.1]

    An optional backend can be specified, and requires a path which smbd will
    use as the backend storage location:

        roles:
            - [osd.0, osd.1, osd.2, mon.0, mon.1, mon.2, mds.a]
            - [client.0, samba.0]

        tasks:
        - ceph:
        - ceph-fuse: [client.0]
        - samba:
            samba.0:
              cephfuse: "{testdir}/mnt.0"

    This mounts ceph to {testdir}/mnt.0 using fuse, and starts smbd with
    a UNC of //localhost/cephfuse.  Access through that UNC will be on
    the ceph fuse mount point.

    If no arguments are specified in the samba
    role, the default behavior is to enable the ceph UNC //localhost/ceph
    and use the ceph vfs module as the smbd backend.

    """
    log.info("Setting up smbd with ceph vfs...")
    assert config is None or isinstance(config, list) or isinstance(config, dict), \
        "task samba got invalid config"

    if config is None:
        config = dict(
            ('samba.{id}'.format(id=id_), None)
            for id_ in teuthology.all_roles_of_type(ctx.cluster, 'samba'))
    elif isinstance(config, list):
        config = dict((name, None) for name in config)

    samba_servers = list(get_sambas(ctx=ctx, roles=config.keys()))

    testdir = teuthology.get_testdir(ctx)

    from teuthology.task.ceph import CephState
    if not hasattr(ctx, 'daemons'):
        ctx.daemons = CephState()

    for id_, remote in samba_servers:

        rolestr = "samba.{id_}".format(id_=id_)

        confextras = """vfs objects = ceph
  ceph:config_file = /etc/ceph/ceph.conf"""

        unc = "ceph"
        backend = "/"

        if config[rolestr] is not None:
            # verify that there's just one parameter in role
            if len(config[rolestr]) != 1:
                log.error(
                    "samba config for role samba.{id_} must have only one parameter"
                    .format(id_=id_))
                raise Exception('invalid config')
            confextras = ""
            (unc, backendstr) = config[rolestr].items()[0]
            backend = backendstr.format(testdir=testdir)

        # on first samba role, set ownership and permissions of ceph root
        # so that samba tests succeed
        if config[rolestr] is None and id_ == samba_servers[0][0]:
            remote.run(args=[
                'mkdir',
                '-p',
                '/tmp/cmnt',
                run.Raw('&&'),
                'sudo',
                'ceph-fuse',
                '/tmp/cmnt',
                run.Raw('&&'),
                'sudo',
                'chown',
                'ubuntu:ubuntu',
                '/tmp/cmnt/',
                run.Raw('&&'),
                'sudo',
                'chmod',
                '1777',
                '/tmp/cmnt/',
                run.Raw('&&'),
                'sudo',
                'umount',
                '/tmp/cmnt/',
                run.Raw('&&'),
                'rm',
                '-rf',
                '/tmp/cmnt',
            ], )
        else:
            remote.run(args=[
                'sudo',
                'chown',
                'ubuntu:ubuntu',
                backend,
                run.Raw('&&'),
                'sudo',
                'chmod',
                '1777',
                backend,
            ], )

        teuthology.sudo_write_file(
            remote, "/usr/local/samba/etc/smb.conf", """
[global]
  workgroup = WORKGROUP
  netbios name = DOMAIN

[{unc}]
  path = {backend}
  {extras}
  writeable = yes
  valid users = ubuntu
""".format(extras=confextras, unc=unc, backend=backend))

        # create ubuntu user
        remote.run(args=[
            'sudo', '/usr/local/samba/bin/smbpasswd', '-e', 'ubuntu',
            run.Raw('||'), 'printf',
            run.Raw('"ubuntu\nubuntu\n"'),
            run.Raw('|'), 'sudo', '/usr/local/samba/bin/smbpasswd', '-s', '-a',
            'ubuntu'
        ])

        smbd_cmd = [
            'sudo',
            'daemon-helper',
            'kill',
            'nostdin',
            '/usr/local/samba/sbin/smbd',
            '-F',
        ]
        ctx.daemons.add_daemon(
            remote,
            'smbd',
            id_,
            args=smbd_cmd,
            logger=log.getChild("smbd.{id_}".format(id_=id_)),
            stdin=run.PIPE,
            wait=False,
        )

        # let smbd initialize, probably a better way...
        import time
        time.sleep(5)
    try:
        yield
    finally:
        log.info('Stopping smbd processes...')
        exc_info = (None, None, None)
        for d in ctx.daemons.iter_daemons_of_role('smbd'):
            try:
                d.stop()
            except (run.CommandFailedError, run.CommandCrashedError,
                    run.ConnectionLostError):
                exc_info = sys.exc_info()
                log.exception('Saw exception from %s.%s', d.role, d.id_)
        if exc_info != (None, None, None):
            raise exc_info[0], exc_info[1], exc_info[2]

        for id_, remote in samba_servers:
            remote.run(args=[
                'sudo',
                'rm',
                '-rf',
                '/usr/local/samba/etc/smb.conf',
                '/usr/local/samba/private/*',
                '/usr/local/samba/var/run/',
                '/usr/local/samba/var/locks',
                '/usr/local/samba/var/lock',
            ], )
            # make sure daemons are gone
            try:
                remote.run(args=[
                    'while',
                    'sudo',
                    'killall',
                    '-9',
                    'smbd',
                    run.Raw(';'),
                    'do',
                    'sleep',
                    '1',
                    run.Raw(';'),
                    'done',
                ], )

                remote.run(args=[
                    'sudo',
                    'lsof',
                    backend,
                ], )
                remote.run(args=[
                    'sudo',
                    'fuser',
                    '-M',
                    backend,
                ], )
            except Exception:
                log.exception("Saw exception")
                pass
コード例 #52
0
def task(ctx, config):
    """
    Stress test the mds by thrashing while another task/workunit
    is running.

    Please refer to MDSThrasher class for further information on the
    available options.
    """

    mds_cluster = MDSCluster(ctx)

    if config is None:
        config = {}
    assert isinstance(config, dict), \
        'mds_thrash task only accepts a dict for configuration'
    mdslist = list(teuthology.all_roles_of_type(ctx.cluster, 'mds'))
    assert len(mdslist) > 1, \
        'mds_thrash task requires at least 2 metadata servers'

    # choose random seed
    if 'seed' in config:
        seed = int(config['seed'])
    else:
        seed = int(time.time())
    log.info('mds thrasher using random seed: {seed}'.format(seed=seed))
    random.seed(seed)

    (first, ) = ctx.cluster.only(
        'mds.{_id}'.format(_id=mdslist[0])).remotes.keys()
    manager = ceph_manager.CephManager(
        first,
        ctx=ctx,
        logger=log.getChild('ceph_manager'),
    )

    # make sure everyone is in active, standby, or standby-replay
    log.info('Wait for all MDSs to reach steady state...')
    status = mds_cluster.status()
    while True:
        steady = True
        for info in status.get_all():
            state = info['state']
            if state not in ('up:active', 'up:standby', 'up:standby-replay'):
                steady = False
                break
        if steady:
            break
        sleep(2)
        status = mds_cluster.status()
    log.info('Ready to start thrashing')

    manager.wait_for_clean()
    assert manager.is_clean()

    if 'cluster' not in config:
        config['cluster'] = 'ceph'

    for fs in status.get_filesystems():
        thrasher = MDSThrasher(ctx, manager, config,
                               Filesystem(ctx, fscid=fs['id']),
                               fs['mdsmap']['max_mds'])
        thrasher.start()
        ctx.ceph[config['cluster']].thrashers.append(thrasher)

    try:
        log.debug('Yielding')
        yield
    finally:
        log.info('joining mds_thrasher')
        thrasher.stop()
        if thrasher.exception is not None:
            raise RuntimeError('error during thrashing')
        thrasher.join()
        log.info('done joining')
コード例 #53
0
ファイル: ceph_fuse.py プロジェクト: remy3123/re-65-632
def task(ctx, config):
    """
    Mount/unmount a ``ceph-fuse`` client.

    The config is optional and defaults to mounting on all clients. If
    a config is given, it is expected to be a list of clients to do
    this operation on. This lets you e.g. set up one client with
    ``ceph-fuse`` and another with ``kclient``.

    ``brxnet`` should be a Private IPv4 Address range, default range is
    [192.168.0.0/16]

    Example that mounts all clients::

        tasks:
        - ceph:
        - ceph-fuse:
        - interactive:
        - brxnet: [192.168.0.0/16]

    Example that uses both ``kclient` and ``ceph-fuse``::

        tasks:
        - ceph:
        - ceph-fuse: [client.0]
        - kclient: [client.1]
        - interactive:

    Example that enables valgrind:

        tasks:
        - ceph:
        - ceph-fuse:
            client.0:
              valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
        - interactive:

    Example that stops an already-mounted client:

    ::

        tasks:
            - ceph:
            - ceph-fuse: [client.0]
            - ... do something that requires the FS mounted ...
            - ceph-fuse:
                client.0:
                    mounted: false
            - ... do something that requires the FS unmounted ...

    Example that adds more generous wait time for mount (for virtual machines):

        tasks:
        - ceph:
        - ceph-fuse:
            client.0:
              mount_wait: 60 # default is 0, do not wait before checking /sys/
              mount_timeout: 120 # default is 30, give up if /sys/ is not populated
        - interactive:

    :param ctx: Context
    :param config: Configuration
    """
    log.info('Running ceph_fuse task...')

    if config is None:
        ids = misc.all_roles_of_type(ctx.cluster, 'client')
        client_roles = [f'client.{id_}' for id_ in ids]
        config = dict([r, dict()] for r in client_roles)
    elif isinstance(config, list):
        client_roles = config
        config = dict([r, dict()] for r in client_roles)
    elif isinstance(config, dict):
        client_roles = filter(lambda x: 'client.' in x, config.keys())
    else:
        raise ValueError(
            f"Invalid config object: {config} ({config.__class__})")
    log.info(f"config is {config}")

    clients = list(misc.get_clients(ctx=ctx, roles=client_roles))
    testdir = misc.get_testdir(ctx)
    all_mounts = getattr(ctx, 'mounts', {})
    mounted_by_me = {}
    skipped = {}
    remotes = set()

    brxnet = config.get("brxnet", None)

    # Construct any new FuseMount instances
    overrides = ctx.config.get('overrides', {}).get('ceph-fuse', {})
    top_overrides = dict(
        filter(lambda x: 'client.' not in x[0], overrides.items()))
    for id_, remote in clients:
        entity = f"client.{id_}"
        client_config = config.get(entity)
        if client_config is None:
            client_config = {}
        # top level overrides
        for k, v in top_overrides.items():
            if v is not None:
                client_config[k] = v
        # mount specific overrides
        client_config_overrides = overrides.get(entity)
        misc.deep_merge(client_config, client_config_overrides)
        log.info(f"{entity} config is {client_config}")

        remotes.add(remote)
        auth_id = client_config.get("auth_id", id_)
        cephfs_name = client_config.get("cephfs_name")

        skip = client_config.get("skip", False)
        if skip:
            skipped[id_] = skip
            continue

        if id_ not in all_mounts:
            fuse_mount = FuseMount(ctx=ctx,
                                   client_config=client_config,
                                   test_dir=testdir,
                                   client_id=auth_id,
                                   client_remote=remote,
                                   brxnet=brxnet,
                                   cephfs_name=cephfs_name)
            all_mounts[id_] = fuse_mount
        else:
            # Catch bad configs where someone has e.g. tried to use ceph-fuse and kcephfs for the same client
            assert isinstance(all_mounts[id_], FuseMount)

        if not config.get("disabled", False) and client_config.get(
                'mounted', True):
            mounted_by_me[id_] = {
                "config": client_config,
                "mount": all_mounts[id_]
            }

    ctx.mounts = all_mounts

    # Umount any pre-existing clients that we have not been asked to mount
    for client_id in set(all_mounts.keys()) - set(mounted_by_me.keys()) - set(
            skipped.keys()):
        mount = all_mounts[client_id]
        if mount.is_mounted():
            mount.umount_wait()

    for remote in remotes:
        FuseMount.cleanup_stale_netnses_and_bridge(remote)

    # Mount any clients we have been asked to (default to mount all)
    log.info('Mounting ceph-fuse clients...')
    for info in mounted_by_me.values():
        config = info["config"]
        mount_x = info['mount']
        if config.get("mount_path"):
            mount_x.cephfs_mntpt = config.get("mount_path")
        if config.get("mountpoint"):
            mount_x.hostfs_mntpt = config.get("mountpoint")
        mount_x.mount()

    for info in mounted_by_me.values():
        info["mount"].wait_until_mounted()

    try:
        yield all_mounts
    finally:
        log.info('Unmounting ceph-fuse clients...')

        for info in mounted_by_me.values():
            # Conditional because an inner context might have umounted it
            mount = info["mount"]
            if mount.is_mounted():
                mount.umount_wait()
        for remote in remotes:
            FuseMount.cleanup_stale_netnses_and_bridge(remote)
コード例 #54
0
def task(ctx, config):
    """
    Test radosgw-admin functionality through the RESTful interface
    """
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task s3tests only supports a list or dictionary for configuration"
    all_clients = [
        'client.{id}'.format(id=id_)
        for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')
    ]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    # just use the first client...
    client = clients[0]

    ##
    admin_user = '******'
    admin_display_name = 'Ms. Admin User'
    admin_access_key = 'MH1WC2XQ1S8UISFDZC8W'
    admin_secret_key = 'dQyrTPA0s248YeN5bBv4ukvKU0kh54LWWywkrpoG'
    admin_caps = 'users=read, write; usage=read, write; buckets=read, write; zone=read, write'

    user1 = 'foo'
    user2 = 'fud'
    subuser1 = 'foo:foo1'
    subuser2 = 'foo:foo2'
    display_name1 = 'Foo'
    display_name2 = 'Fud'
    email = '*****@*****.**'
    access_key = '9te6NH5mcdcq0Tc5i8i1'
    secret_key = 'Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu'
    access_key2 = 'p5YnriCv1nAtykxBrupQ'
    secret_key2 = 'Q8Tk6Q/27hfbFSYdSkPtUqhqx1GgzvpXa4WARozh'
    swift_secret1 = 'gpS2G9RREMrnbqlp29PP2D36kgPR1tm72n5fPYfL'
    swift_secret2 = 'ri2VJQcKSYATOY6uaDUX7pxgkW+W1YmC6OCxPHwy'

    bucket_name = 'myfoo'

    # legend (test cases can be easily grep-ed out)
    # TESTCASE 'testname','object','method','operation','assertion'
    # TESTCASE 'create-admin-user','user','create','administrative user','succeeds'
    (err, out) = rgwadmin(ctx, client, [
        'user', 'create', '--uid', admin_user, '--display-name',
        admin_display_name, '--access-key', admin_access_key, '--secret',
        admin_secret_key, '--max-buckets', '0', '--caps', admin_caps
    ])
    logging.error(out)
    logging.error(err)
    assert not err

    (remote, ) = ctx.cluster.only(client).remotes.iterkeys()
    remote_host = remote.name.split('@')[1]
    admin_conn = boto.s3.connection.S3Connection(
        aws_access_key_id=admin_access_key,
        aws_secret_access_key=admin_secret_key,
        is_secure=False,
        port=7280,
        host=remote_host,
        calling_format=boto.s3.connection.OrdinaryCallingFormat(),
    )

    # TESTCASE 'info-nosuch','user','info','non-existent user','fails'
    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {"uid": user1})
    assert ret == 404

    # TESTCASE 'create-ok','user','create','w/all valid info','succeeds'
    (ret, out) = rgwadmin_rest(
        admin_conn, ['user', 'create'], {
            'uid': user1,
            'display-name': display_name1,
            'email': email,
            'access-key': access_key,
            'secret-key': secret_key,
            'max-buckets': '4'
        })

    assert ret == 200

    # TESTCASE 'info-existing','user','info','existing user','returns correct info'
    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid': user1})

    assert out['user_id'] == user1
    assert out['email'] == email
    assert out['display_name'] == display_name1
    assert len(out['keys']) == 1
    assert out['keys'][0]['access_key'] == access_key
    assert out['keys'][0]['secret_key'] == secret_key
    assert not out['suspended']

    # TESTCASE 'suspend-ok','user','suspend','active user','succeeds'
    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {
        'uid': user1,
        'suspended': True
    })
    assert ret == 200

    # TESTCASE 'suspend-suspended','user','suspend','suspended user','succeeds w/advisory'
    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid': user1})
    assert ret == 200
    assert out['suspended']
    assert out['email'] == email

    # TESTCASE 're-enable','user','enable','suspended user','succeeds'
    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {
        'uid': user1,
        'suspended': 'false'
    })
    assert not err

    # TESTCASE 'info-re-enabled','user','info','re-enabled user','no longer suspended'
    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid': user1})
    assert ret == 200
    assert not out['suspended']

    # TESTCASE 'add-keys','key','create','w/valid info','succeeds'
    (ret, out) = rgwadmin_rest(admin_conn, ['key', 'create'], {
        'uid': user1,
        'access-key': access_key2,
        'secret-key': secret_key2
    })

    assert ret == 200

    # TESTCASE 'info-new-key','user','info','after key addition','returns all keys'
    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid': user1})
    assert ret == 200
    assert len(out['keys']) == 2
    assert out['keys'][0]['access_key'] == access_key2 or out['keys'][1][
        'access_key'] == access_key2
    assert out['keys'][0]['secret_key'] == secret_key2 or out['keys'][1][
        'secret_key'] == secret_key2

    # TESTCASE 'rm-key','key','rm','newly added key','succeeds, key is removed'
    (ret, out) = rgwadmin_rest(admin_conn, ['key', 'rm'], {
        'uid': user1,
        'access-key': access_key2
    })

    assert ret == 200

    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid': user1})

    assert len(out['keys']) == 1
    assert out['keys'][0]['access_key'] == access_key
    assert out['keys'][0]['secret_key'] == secret_key

    # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
    (ret, out) = rgwadmin_rest(admin_conn, ['subuser', 'create'], {
        'subuser': subuser1,
        'secret-key': swift_secret1,
        'key-type': 'swift'
    })

    assert ret == 200

    # TESTCASE 'info-swift-key','user','info','after key addition','returns all keys'
    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid': user1})
    assert ret == 200
    assert len(out['swift_keys']) == 1
    assert out['swift_keys'][0]['user'] == subuser1
    assert out['swift_keys'][0]['secret_key'] == swift_secret1

    # TESTCASE 'add-swift-subuser','key','create','swift sub-user key','succeeds'
    (ret, out) = rgwadmin_rest(admin_conn, ['subuser', 'create'], {
        'subuser': subuser2,
        'secret-key': swift_secret2,
        'key-type': 'swift'
    })

    assert ret == 200

    # TESTCASE 'info-swift-subuser','user','info','after key addition','returns all sub-users/keys'
    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid': user1})
    assert ret == 200
    assert len(out['swift_keys']) == 2
    assert out['swift_keys'][0]['user'] == subuser2 or out['swift_keys'][1][
        'user'] == subuser2
    assert out['swift_keys'][0]['secret_key'] == swift_secret2 or out[
        'swift_keys'][1]['secret_key'] == swift_secret2

    # TESTCASE 'rm-swift-key1','key','rm','subuser','succeeds, one key is removed'
    (ret, out) = rgwadmin_rest(admin_conn, ['key', 'rm'], {
        'subuser': subuser1,
        'key-type': 'swift'
    })

    assert ret == 200

    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid': user1})
    assert len(out['swift_keys']) == 1

    # TESTCASE 'rm-subuser','subuser','rm','subuser','success, subuser is removed'
    (ret, out) = rgwadmin_rest(admin_conn, ['subuser', 'rm'],
                               {'subuser': subuser1})

    assert ret == 200

    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid': user1})
    assert len(out['subusers']) == 1

    # TESTCASE 'rm-subuser-with-keys','subuser','rm','subuser','succeeds, second subser and key is removed'
    (ret, out) = rgwadmin_rest(admin_conn, ['subuser', 'rm'], {
        'subuser': subuser2,
        'key-type': 'swift',
        '{purge-keys': True
    })

    assert ret == 200

    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid': user1})
    assert len(out['swift_keys']) == 0
    assert len(out['subusers']) == 0

    # TESTCASE 'bucket-stats','bucket','info','no session/buckets','succeeds, empty list'
    (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid': user1})
    assert ret == 200
    assert len(out) == 0

    # connect to rgw
    connection = boto.s3.connection.S3Connection(
        aws_access_key_id=access_key,
        aws_secret_access_key=secret_key,
        is_secure=False,
        port=7280,
        host=remote_host,
        calling_format=boto.s3.connection.OrdinaryCallingFormat(),
    )

    # TESTCASE 'bucket-stats2','bucket','stats','no buckets','succeeds, empty list'
    (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {
        'uid': user1,
        'stats': True
    })
    assert ret == 200
    assert len(out) == 0

    # create a first bucket
    bucket = connection.create_bucket(bucket_name)

    # TESTCASE 'bucket-list','bucket','list','one bucket','succeeds, expected list'
    (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid': user1})
    assert ret == 200
    assert len(out) == 1
    assert out[0] == bucket_name

    # TESTCASE 'bucket-stats3','bucket','stats','new empty bucket','succeeds, empty list'
    (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {
        'bucket': bucket_name,
        'stats': True
    })

    assert ret == 200
    assert out['owner'] == user1
    bucket_id = out['id']

    # TESTCASE 'bucket-stats4','bucket','stats','new empty bucket','succeeds, expected bucket ID'
    (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {
        'uid': user1,
        'stats': True
    })
    assert ret == 200
    assert len(out) == 1
    assert out[0][
        'id'] == bucket_id  # does it return the same ID twice in a row?

    # use some space
    key = boto.s3.key.Key(bucket)
    key.set_contents_from_string('one')

    # TESTCASE 'bucket-stats5','bucket','stats','after creating key','succeeds, lists one non-empty object'
    (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {
        'bucket': bucket_name,
        'stats': True
    })
    assert ret == 200
    assert out['id'] == bucket_id
    assert out['usage']['rgw.main']['num_objects'] == 1
    assert out['usage']['rgw.main']['size_kb'] > 0

    # reclaim it
    key.delete()

    # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'fails', 'access denied error'
    (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'unlink'], {
        'uid': user1,
        'bucket': bucket_name
    })

    assert ret == 200

    # create a second user to link the bucket to
    (ret, out) = rgwadmin_rest(
        admin_conn, ['user', 'create'], {
            'uid': user2,
            'display-name': display_name2,
            'access-key': access_key2,
            'secret-key': secret_key2,
            'max-buckets': '1',
        })

    assert ret == 200

    # try creating an object with the first user before the bucket is relinked
    denied = False
    key = boto.s3.key.Key(bucket)

    try:
        key.set_contents_from_string('two')
    except boto.exception.S3ResponseError:
        denied = True

    assert not denied

    # delete the object
    key.delete()

    # link the bucket to another user
    (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'link'], {
        'uid': user2,
        'bucket': bucket_name
    })

    assert ret == 200

    # try creating an object with the first user which should cause an error
    key = boto.s3.key.Key(bucket)

    try:
        key.set_contents_from_string('three')
    except boto.exception.S3ResponseError:
        denied = True

    assert denied

    # relink the bucket to the first user and delete the second user
    (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'link'], {
        'uid': user1,
        'bucket': bucket_name
    })
    assert ret == 200

    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid': user2})
    assert ret == 200

    # TESTCASE 'object-rm', 'object', 'rm', 'remove object', 'succeeds, object is removed'

    # upload an object
    object_name = 'four'
    key = boto.s3.key.Key(bucket, object_name)
    key.set_contents_from_string(object_name)

    # now delete it
    (ret, out) = rgwadmin_rest(admin_conn, ['object', 'rm'], {
        'bucket': bucket_name,
        'object': object_name
    })
    assert ret == 200

    # TESTCASE 'bucket-stats6','bucket','stats','after deleting key','succeeds, lists one no objects'
    (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {
        'bucket': bucket_name,
        'stats': True
    })
    assert ret == 200
    assert out['id'] == bucket_id
    assert out['usage']['rgw.main']['num_objects'] == 0

    # create a bucket for deletion stats
    useless_bucket = connection.create_bucket('useless_bucket')
    useless_key = useless_bucket.new_key('useless_key')
    useless_key.set_contents_from_string('useless string')

    # delete it
    useless_key.delete()
    useless_bucket.delete()

    # wait for the statistics to flush
    time.sleep(60)

    # need to wait for all usage data to get flushed, should take up to 30 seconds
    timestamp = time.time()
    while time.time() - timestamp <= (20 * 60):  # wait up to 20 minutes
        (ret, out) = rgwadmin_rest(
            admin_conn, ['usage', 'show'],
            {'categories': 'delete_obj'
             })  # last operation we did is delete obj, wait for it to flush

        if get_user_successful_ops(out, user1) > 0:
            break
        time.sleep(1)

    assert time.time() - timestamp <= (20 * 60)

    # TESTCASE 'usage-show' 'usage' 'show' 'all usage' 'succeeds'
    (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'])
    assert ret == 200
    assert len(out['entries']) > 0
    assert len(out['summary']) > 0
    user_summary = get_user_summary(out, user1)
    total = user_summary['total']
    assert total['successful_ops'] > 0

    # TESTCASE 'usage-show2' 'usage' 'show' 'user usage' 'succeeds'
    (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid': user1})
    assert ret == 200
    assert len(out['entries']) > 0
    assert len(out['summary']) > 0
    user_summary = out['summary'][0]
    for entry in user_summary['categories']:
        assert entry['successful_ops'] > 0
    assert user_summary['user'] == user1

    # TESTCASE 'usage-show3' 'usage' 'show' 'user usage categories' 'succeeds'
    test_categories = [
        'create_bucket', 'put_obj', 'delete_obj', 'delete_bucket'
    ]
    for cat in test_categories:
        (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {
            'uid': user1,
            'categories': cat
        })
        assert ret == 200
        assert len(out['summary']) > 0
        user_summary = out['summary'][0]
        assert user_summary['user'] == user1
        assert len(user_summary['categories']) == 1
        entry = user_summary['categories'][0]
        assert entry['category'] == cat
        assert entry['successful_ops'] > 0

    # TESTCASE 'usage-trim' 'usage' 'trim' 'user usage' 'succeeds, usage removed'
    (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'trim'], {'uid': user1})
    assert ret == 200
    (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid': user1})
    assert ret == 200
    assert len(out['entries']) == 0
    assert len(out['summary']) == 0

    # TESTCASE 'user-suspend2','user','suspend','existing user','succeeds'
    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {
        'uid': user1,
        'suspended': True
    })
    assert ret == 200

    # TESTCASE 'user-suspend3','user','suspend','suspended user','cannot write objects'
    try:
        key = boto.s3.key.Key(bucket)
        key.set_contents_from_string('five')
    except boto.exception.S3ResponseError as e:
        assert e.status == 403

    # TESTCASE 'user-renable2','user','enable','suspended user','succeeds'
    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {
        'uid': user1,
        'suspended': 'false'
    })
    assert ret == 200

    # TESTCASE 'user-renable3','user','enable','reenabled user','can write objects'
    key = boto.s3.key.Key(bucket)
    key.set_contents_from_string('six')

    # TESTCASE 'garbage-list', 'garbage', 'list', 'get list of objects ready for garbage collection'

    # create an object large enough to be split into multiple parts
    test_string = 'foo' * 10000000

    big_key = boto.s3.key.Key(bucket)
    big_key.set_contents_from_string(test_string)

    # now delete the head
    big_key.delete()

    # TESTCASE 'rm-user-buckets','user','rm','existing user','fails, still has buckets'
    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid': user1})
    assert ret == 409

    # delete should fail because ``key`` still exists
    try:
        bucket.delete()
    except boto.exception.S3ResponseError as e:
        assert e.status == 409

    key.delete()
    bucket.delete()

    # TESTCASE 'policy', 'bucket', 'policy', 'get bucket policy', 'returns S3 policy'
    bucket = connection.create_bucket(bucket_name)

    # create an object
    key = boto.s3.key.Key(bucket)
    key.set_contents_from_string('seven')

    # should be private already but guarantee it
    key.set_acl('private')

    (ret, out) = rgwadmin_rest(admin_conn, ['policy', 'show'], {
        'bucket': bucket.name,
        'object': key.key
    })
    assert ret == 200

    acl = key.get_xml_acl()
    assert acl == out.strip('\n')

    # add another grantee by making the object public read
    key.set_acl('public-read')

    (ret, out) = rgwadmin_rest(admin_conn, ['policy', 'show'], {
        'bucket': bucket.name,
        'object': key.key
    })
    assert ret == 200

    acl = key.get_xml_acl()
    assert acl == out.strip('\n')

    # TESTCASE 'rm-bucket', 'bucket', 'rm', 'bucket with objects', 'succeeds'
    bucket = connection.create_bucket(bucket_name)
    key_name = ['eight', 'nine', 'ten', 'eleven']
    for i in range(4):
        key = boto.s3.key.Key(bucket)
        key.set_contents_from_string(key_name[i])

    (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'rm'], {
        'bucket': bucket_name,
        'purge-objects': True
    })
    assert ret == 200

    # TESTCASE 'caps-add', 'caps', 'add', 'add user cap', 'succeeds'
    caps = 'usage=read'
    (ret, out) = rgwadmin_rest(admin_conn, ['caps', 'add'], {
        'uid': user1,
        'user-caps': caps
    })
    assert ret == 200
    assert out[0]['perm'] == 'read'

    # TESTCASE 'caps-rm', 'caps', 'rm', 'remove existing cap from user', 'succeeds'
    (ret, out) = rgwadmin_rest(admin_conn, ['caps', 'rm'], {
        'uid': user1,
        'user-caps': caps
    })
    assert ret == 200
    assert not out

    # TESTCASE 'rm-user','user','rm','existing user','fails, still has buckets'
    bucket = connection.create_bucket(bucket_name)
    key = boto.s3.key.Key(bucket)

    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid': user1})
    assert ret == 409

    # TESTCASE 'rm-user2', 'user', 'rm', user with data', 'succeeds'
    bucket = connection.create_bucket(bucket_name)
    key = boto.s3.key.Key(bucket)
    key.set_contents_from_string('twelve')

    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {
        'uid': user1,
        'purge-data': True
    })
    assert ret == 200

    # TESTCASE 'rm-user3','user','info','deleted user','fails'
    (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid': user1})
    assert ret == 404
コード例 #55
0
def task(ctx, config):
    """
    Given a Ceph cluster has already been set up, exercise the migration
    of the CephFS journal from an older format to the latest format.  On
    successful completion the filesystem will be running with a journal
    in the new format.

    Optionally specify which client to use like this:

    - mds-journal_migration:
        client: client.0

    """
    if not hasattr(ctx, 'ceph'):
        raise RuntimeError("This task must be nested in 'ceph' task")

    if not hasattr(ctx, 'mounts'):
        raise RuntimeError("This task must be nested inside 'kclient' or 'ceph_fuse' task")

    # Determine which client we will use
    if config and 'client' in config:
        # Use client specified in config
        client_role = config['client']
        client_list = list(misc.get_clients(ctx, [client_role]))
        try:
            client_id = client_list[0][0]
        except IndexError:
            raise RuntimeError("Client role '{0}' not found".format(client_role))
    else:
        # Pick one arbitrary client to use
        client_list = list(misc.all_roles_of_type(ctx.cluster, 'client'))
        try:
            client_id = client_list[0]
        except IndexError:
            raise RuntimeError("This task requires at least one client")

    fs = Filesystem(ctx)
    ctx.fs = fs
    old_journal_version = JOURNAL_FORMAT_LEGACY
    new_journal_version = JOURNAL_FORMAT_RESILIENT

    fs.set_ceph_conf('mds', 'mds journal format', old_journal_version)

    # Create a filesystem using the older journal format.
    for mount in ctx.mounts.values():
        mount.umount_wait()
    fs.mds_stop()
    fs.reset()
    fs.mds_restart()

    # Do some client work so that the log is populated with something.
    mount = ctx.mounts[client_id]
    with mount.mounted():
        mount.create_files()
        mount.check_files()  # sanity, this should always pass

        # Run a more substantial workunit so that the length of the log to be
        # coverted is going span at least a few segments
        workunit(ctx, {
            'clients': {
                "client.{0}".format(client_id): ["suites/fsstress.sh"],
            },
            "timeout": "3h"
        })

    # Modify the ceph.conf to ask the MDS to use the new journal format.
    fs.set_ceph_conf('mds', 'mds journal format', new_journal_version)

    # Restart the MDS.
    fs.mds_fail_restart()
    fs.wait_for_daemons()

    # This ensures that all daemons come up into a valid state
    fs.wait_for_daemons()

    # Check that files created in the initial client workload are still visible
    # in a client mount.
    with mount.mounted():
        mount.check_files()

    # Verify that the journal really has been rewritten.
    journal_version = fs.get_journal_version()
    if journal_version != new_journal_version:
        raise RuntimeError("Journal was not upgraded, version should be {0} but is {1}".format(
            new_journal_version, journal_version()
        ))

    # Verify that cephfs-journal-tool can now read the rewritten journal
    proc = mount.client_remote.run(
        args=["cephfs-journal-tool", "journal", "inspect"],
        stdout=StringIO())
    if not proc.stdout.getvalue().strip().endswith(": OK"):
        raise RuntimeError("Unexpected journal-tool result: '{0}'".format(
            proc.stdout.getvalue()
        ))

    mount.client_remote.run(
        args=["sudo", "cephfs-journal-tool", "event", "get", "json", "--path", "/tmp/journal.json"])
    proc = mount.client_remote.run(
        args=[
            "python",
            "-c",
            "import json; print len(json.load(open('/tmp/journal.json')))"
        ],
        stdout=StringIO())
    event_count = int(proc.stdout.getvalue().strip())
    if event_count < 1000:
        # Approximate value of "lots", expected from having run fsstress
        raise RuntimeError("Unexpectedly few journal events: {0}".format(event_count))

    # Leave all MDSs and clients running for any child tasks
    for mount in ctx.mounts.values():
        mount.mount()
        mount.wait_until_mounted()

    yield
コード例 #56
0
ファイル: rgw.py プロジェクト: perrynzhou/ceph-note
def task(ctx, config):
    """
    For example, to run rgw on all clients::

        tasks:
        - ceph:
        - rgw:

    To only run on certain clients::

        tasks:
        - ceph:
        - rgw: [client.0, client.3]

    or

        tasks:
        - ceph:
        - rgw:
            client.0:
            client.3:

    To run radosgw through valgrind:

        tasks:
        - ceph:
        - rgw:
            client.0:
              valgrind: [--tool=memcheck]
            client.3:
              valgrind: [--tool=memcheck]

    To configure data or index pool pg_size:

        overrides:
          rgw:
            data_pool_pg_size: 256
            index_pool_pg_size: 128
    """
    if config is None:
        config = dict(
            ('client.{id}'.format(id=id_), None)
            for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
    elif isinstance(config, list):
        config = dict((name, None) for name in config)

    clients = config.keys()  # http://tracker.ceph.com/issues/20417

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('rgw', {}))

    ctx.rgw = argparse.Namespace()

    ctx.rgw.ec_data_pool = bool(config.pop('ec-data-pool', False))
    ctx.rgw.erasure_code_profile = config.pop('erasure_code_profile', {})
    ctx.rgw.cache_pools = bool(config.pop('cache-pools', False))
    ctx.rgw.frontend = config.pop('frontend', 'civetweb')
    ctx.rgw.compression_type = config.pop('compression type', None)
    ctx.rgw.storage_classes = config.pop('storage classes', None)
    default_cert = config.pop('ssl certificate', None)
    ctx.rgw.data_pool_pg_size = config.pop('data_pool_pg_size', 64)
    ctx.rgw.index_pool_pg_size = config.pop('index_pool_pg_size', 64)
    ctx.rgw.config = config

    log.debug("config is {}".format(config))
    log.debug("client list is {}".format(clients))

    ctx.rgw.role_endpoints = assign_endpoints(ctx, config, default_cert)

    subtasks = [
        lambda: create_pools(ctx=ctx, clients=clients),
    ]
    if ctx.rgw.compression_type:
        subtasks.extend([
            lambda: configure_compression(
                ctx=ctx, clients=clients, compression=ctx.rgw.compression_type
            ),
        ])
    if ctx.rgw.storage_classes:
        subtasks.extend([
            lambda: configure_storage_classes(ctx=ctx,
                                              clients=clients,
                                              storage_classes=ctx.rgw.
                                              storage_classes),
        ])
    subtasks.extend([
        lambda: start_rgw(ctx=ctx, config=config, clients=clients),
    ])

    with contextutil.nested(*subtasks):
        yield
コード例 #57
0
ファイル: rgw.py プロジェクト: lixiaoy1/rdma-cm-from-amirv
def task(ctx, config):
    """
    Either use configure apache to run a rados gateway, or use the built-in
    civetweb server.
    Only one should be run per machine, since it uses a hard-coded port for
    now.

    For example, to run rgw on all clients::

        tasks:
        - ceph:
        - rgw:

    To only run on certain clients::

        tasks:
        - ceph:
        - rgw: [client.0, client.3]

    or

        tasks:
        - ceph:
        - rgw:
            client.0:
            client.3:

    You can adjust the idle timeout for fastcgi (default is 30 seconds):

        tasks:
        - ceph:
        - rgw:
            client.0:
              idle_timeout: 90

    To run radosgw through valgrind:

        tasks:
        - ceph:
        - rgw:
            client.0:
              valgrind: [--tool=memcheck]
            client.3:
              valgrind: [--tool=memcheck]

    To use civetweb instead of apache:

        tasks:
        - ceph:
        - rgw:
          - client.0
        overrides:
          rgw:
            frontend: civetweb

    Note that without a modified fastcgi module e.g. with the default
    one on CentOS, you must have rgw print continue = false in ceph.conf::

        tasks:
        - ceph:
            conf:
              global:
                rgw print continue: false
        - rgw: [client.0]

    To use mod_proxy_fcgi instead of mod_fastcgi:

        overrides:
          rgw:
            use_fcgi: true

    To run rgws for multiple regions or zones, describe the regions
    and their zones in a regions section. The endpoints will be
    generated by this task. Each client must have a region, zone,
    and pools assigned in ceph.conf::

        tasks:
        - install:
        - ceph:
            conf:
              client.0:
                rgw region: foo
                rgw zone: foo-1
                rgw region root pool: .rgw.rroot.foo
                rgw zone root pool: .rgw.zroot.foo
                rgw log meta: true
                rgw log data: true
              client.1:
                rgw region: bar
                rgw zone: bar-master
                rgw region root pool: .rgw.rroot.bar
                rgw zone root pool: .rgw.zroot.bar
                rgw log meta: true
                rgw log data: true
              client.2:
                rgw region: bar
                rgw zone: bar-secondary
                rgw region root pool: .rgw.rroot.bar
                rgw zone root pool: .rgw.zroot.bar-secondary
        - rgw:
            default_idle_timeout: 30
            ec-data-pool: true
            erasure_code_profile:
              k: 2
              m: 1
              ruleset-failure-domain: osd
            realm: foo
            regions:
              foo:
                api name: api_name # default: region name
                is master: true    # default: false
                master zone: foo-1 # default: first zone
                zones: [foo-1]
                log meta: true
                log data: true
                placement targets: [target1, target2] # default: []
                default placement: target2            # default: ''
              bar:
                api name: bar-api
                zones: [bar-master, bar-secondary]
            client.0:
              system user:
                name: foo-system
                access key: X2IYPSTY1072DDY1SJMC
                secret key: YIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm
            client.1:
              system user:
                name: bar1
                access key: Y2IYPSTY1072DDY1SJMC
                secret key: XIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm
            client.2:
              system user:
                name: bar2
                access key: Z2IYPSTY1072DDY1SJMC
                secret key: ZIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm
    """
    if config is None:
        config = dict(
            ('client.{id}'.format(id=id_), None)
            for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
    elif isinstance(config, list):
        config = dict((name, None) for name in config)

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('rgw', {}))

    regions = config.pop('regions', {})
    realm = config.pop('realm', None)

    role_endpoints = assign_ports(ctx, config)
    ctx.rgw = argparse.Namespace()
    ctx.rgw.role_endpoints = role_endpoints
    ctx.rgw.regions = regions
    ctx.rgw.realm = realm

    ctx.rgw.ec_data_pool = bool(config.pop('ec-data-pool', False))
    ctx.rgw.erasure_code_profile = config.pop('erasure_code_profile', {})
    ctx.rgw.default_idle_timeout = int(config.pop('default_idle_timeout', 30))
    ctx.rgw.cache_pools = bool(config.pop('cache-pools', False))
    ctx.rgw.frontend = config.pop('frontend', 'civetweb')

    ctx.rgw.use_fastcgi = not config.pop('use_fcgi', True)
    if not ctx.rgw.use_fastcgi:
        log.info("Using mod_proxy_fcgi instead of mod_fastcgi...")

    ctx.rgw.compression_type = config.pop('compression type', None)

    subtasks = [
        lambda: create_nonregion_pools(ctx=ctx, config=config, regions=regions
                                       ),
    ]
    log.debug('Nonregion pools created')

    multisite = len(regions) > 1

    if not multisite:
        for zonegroup, zonegroup_info in regions.iteritems():
            log.debug("zonegroup_info =%r", zonegroup_info)
            if len(zonegroup_info['zones']) > 1:
                multisite = True
                break

    log.debug('multisite %s', multisite)

    multi_cluster = False
    if multisite:
        prev_cluster_name = None
        roles = ctx.config['roles']
        #check if any roles have a different cluster_name from eachother
        for lst in roles:
            for role in lst:
                cluster_name, daemon_type, client_id = teuthology.split_role(
                    role)
                if cluster_name != prev_cluster_name and prev_cluster_name != None:
                    multi_cluster = True
                    break
                prev_cluster_name = cluster_name
            if multi_cluster:
                break

    log.debug('multi_cluster %s', multi_cluster)
    ctx.rgw.config = config
    master_client = None

    if multi_cluster:
        log.debug('multi cluster run')

        master_client = get_config_master_client(ctx=ctx,
                                                 config=config,
                                                 regions=regions)
        log.debug('master_client %r', master_client)
        subtasks.extend([
            lambda: configure_multisite_regions_and_zones(
                ctx=ctx,
                config=config,
                regions=regions,
                role_endpoints=role_endpoints,
                realm=realm,
                master_client=master_client,
            )
        ])

        subtasks.extend([
            lambda: configure_users_for_client(
                ctx=ctx,
                config=config,
                client=master_client,
                everywhere=False,
            ),
        ])

        if ctx.rgw.frontend == 'apache':
            subtasks.insert(
                0, lambda: create_apache_dirs(
                    ctx=ctx, config=config, on_client=master_client))
            subtasks.extend([
                lambda: ship_apache_configs(ctx=ctx,
                                            config=config,
                                            role_endpoints=role_endpoints,
                                            on_client=master_client),
                lambda: start_apache(
                    ctx=ctx, config=config, on_client=master_client),
                lambda: start_rgw(
                    ctx=ctx, config=config, on_client=master_client),
            ])
        elif ctx.rgw.frontend == 'civetweb':
            subtasks.extend([
                lambda: start_rgw(
                    ctx=ctx, config=config, on_client=master_client),
            ])
        else:
            raise ValueError("frontend must be 'apache' or 'civetweb'")

        subtasks.extend([
            lambda: pull_configuration(ctx=ctx,
                                       config=config,
                                       regions=regions,
                                       role_endpoints=role_endpoints,
                                       realm=realm,
                                       master_client=master_client),
        ])

        subtasks.extend([
            lambda: configure_users_for_client(
                ctx=ctx, config=config, client=master_client, everywhere=True),
        ])

        if ctx.rgw.frontend == 'apache':
            subtasks.insert(
                0, lambda: create_apache_dirs(ctx=ctx,
                                              config=config,
                                              on_client=None,
                                              except_client=master_client))
            subtasks.extend([
                lambda: ship_apache_configs(
                    ctx=ctx,
                    config=config,
                    role_endpoints=role_endpoints,
                    on_client=None,
                    except_client=master_client,
                ),
                lambda: start_apache(
                    ctx=ctx,
                    config=config,
                    on_client=None,
                    except_client=master_client,
                ),
                lambda: start_rgw(ctx=ctx,
                                  config=config,
                                  on_client=None,
                                  except_client=master_client),
            ])
        elif ctx.rgw.frontend == 'civetweb':
            subtasks.extend([
                lambda: start_rgw(ctx=ctx,
                                  config=config,
                                  on_client=None,
                                  except_client=master_client),
            ])
        else:
            raise ValueError("frontend must be 'apache' or 'civetweb'")

    else:
        log.debug('single cluster run')
        subtasks.extend([
            lambda: configure_regions_and_zones(
                ctx=ctx,
                config=config,
                regions=regions,
                role_endpoints=role_endpoints,
                realm=realm,
            ),
            lambda: configure_users(
                ctx=ctx,
                config=config,
                everywhere=True,
            ),
        ])
        if ctx.rgw.frontend == 'apache':
            subtasks.insert(0,
                            lambda: create_apache_dirs(ctx=ctx, config=config))
            subtasks.extend([
                lambda: ship_apache_configs(
                    ctx=ctx, config=config, role_endpoints=role_endpoints),
                lambda: start_apache(ctx=ctx, config=config),
                lambda: start_rgw(ctx=ctx, config=config),
            ])
        elif ctx.rgw.frontend == 'civetweb':
            subtasks.extend([
                lambda: start_rgw(ctx=ctx, config=config),
            ])
        else:
            raise ValueError("frontend must be 'apache' or 'civetweb'")

    log.info("Using %s as radosgw frontend", ctx.rgw.frontend)
    with contextutil.nested(*subtasks):
        yield
コード例 #58
0
ファイル: s3tests.py プロジェクト: smanjara/ceph
def task(ctx, config):
    """
    Run the s3-tests suite against rgw.

    To run all tests on all clients::

        tasks:
        - ceph:
        - rgw:
        - s3tests:

    To restrict testing to particular clients::

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3tests: [client.0]

    To run against a server on client.1 and increase the boto timeout to 10m::

        tasks:
        - ceph:
        - rgw: [client.1]
        - s3tests:
            client.0:
              rgw_server: client.1
              idle_timeout: 600

    To pass extra arguments to nose (e.g. to run a certain test)::

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3tests:
            client.0:
              extra_args: ['test_s3:test_object_acl_grand_public_read']
            client.1:
              extra_args: ['--exclude', 'test_100_continue']

    To run any sts-tests don't forget to set a config variable named 'sts_tests' to 'True' as follows::

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3tests:
            client.0:
              sts_tests: True
              rgw_server: client.0

    To run any cloud-transition tests don't forget to set a config variable named 'cloudtier_tests' to 'True' as follows::

        tasks:
        - ceph:
        - rgw: [client.0 client.1]
        - s3tests:
            client.0:
              cloudtier_tests: True
              rgw_server: client.0

    """
    assert hasattr(ctx, 'rgw'), 's3tests must run after the rgw task'
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task s3tests only supports a list or dictionary for configuration"
    all_clients = ['client.{id}'.format(id=id_)
                   for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    overrides = ctx.config.get('overrides', {})
    # merge each client section, not the top level.
    for client in config.keys():
        if not config[client]:
            config[client] = {}
        teuthology.deep_merge(config[client], overrides.get('s3tests', {}))

    log.debug('s3tests config is %s', config)

    s3tests_conf = {}

    for client, client_config in config.items():
        if 'sts_tests' in client_config:
            ctx.sts_variable = True
        else:
            ctx.sts_variable = False

        if 'cloudtier_tests' in client_config:
            ctx.cloudtier_variable = True
        else:
            ctx.cloudtier_variable = False

        if 'dbstore_tests' in client_config:
            ctx.dbstore_variable = True
        else:
            ctx.dbstore_variable = False

        #This will be the structure of config file when you want to run webidentity_test (sts-test)
        if ctx.sts_variable and "TOKEN" in os.environ:
            for client in clients:
                endpoint = ctx.rgw.role_endpoints.get(client)
                assert endpoint, 's3tests: no rgw endpoint for {}'.format(client)

                s3tests_conf[client] = ConfigObj(
                    indent_type='',
                    infile={
                        'DEFAULT':
                            {
                            'port'      : endpoint.port,
                            'is_secure' : endpoint.cert is not None,
                            'api_name'  : 'default',
                            },
                        'fixtures'   : {},
                        's3 main'    : {},
                        's3 alt'     : {},
                        's3 tenant'  : {},
                        'iam'        : {},
                        'webidentity': {},
                    }
                )

        elif ctx.sts_variable:
            #This will be the structure of config file when you want to run assume_role_test and get_session_token_test (sts-test)
            for client in clients:
                endpoint = ctx.rgw.role_endpoints.get(client)
                assert endpoint, 's3tests: no rgw endpoint for {}'.format(client)

                s3tests_conf[client] = ConfigObj(
                    indent_type='',
                    infile={
                        'DEFAULT':
                            {
                            'port'      : endpoint.port,
                            'is_secure' : endpoint.cert is not None,
                            'api_name'  : 'default',
                            },
                        'fixtures'   : {},
                        's3 main'    : {},
                        's3 alt'     : {},
                        's3 tenant'  : {},
                        'iam'        : {},
                        }
                    ) 

        elif ctx.cloudtier_variable:
            #This will be the structure of config file when you want to run normal s3-tests
            for client in clients:
                endpoint = ctx.rgw.role_endpoints.get(client)
                assert endpoint, 's3tests: no rgw endpoint for {}'.format(client)

                s3tests_conf[client] = ConfigObj(
                    indent_type='',
                    infile={
                        'DEFAULT':
                            {
                            'port'      : endpoint.port,
                            'is_secure' : endpoint.cert is not None,
                            'api_name'  : 'default',
                            },
                        'fixtures'   : {},
                        's3 main'    : {},
                        's3 alt'     : {},
                        's3 tenant'  : {},
                        's3 cloud'   : {},
                        }
                    ) 
        else:
            #This will be the structure of config file when you want to run normal s3-tests
            for client in clients:
                endpoint = ctx.rgw.role_endpoints.get(client)
                assert endpoint, 's3tests: no rgw endpoint for {}'.format(client)

                s3tests_conf[client] = ConfigObj(
                    indent_type='',
                    infile={
                        'DEFAULT':
                            {
                            'port'      : endpoint.port,
                            'is_secure' : endpoint.cert is not None,
                            'api_name'  : 'default',
                            },
                        'fixtures'   : {},
                        's3 main'    : {},
                        's3 alt'     : {},
                        's3 tenant'  : {},
                        }
                    )

    with contextutil.nested(
        lambda: download(ctx=ctx, config=config),
        lambda: create_users(ctx=ctx, config=dict(
                clients=clients,
                s3tests_conf=s3tests_conf,
                )),
        lambda: configure(ctx=ctx, config=dict(
                clients=config,
                s3tests_conf=s3tests_conf,
                )),
        lambda: run_tests(ctx=ctx, config=config),
        lambda: scan_for_leaked_encryption_keys(ctx=ctx, config=config),
        ):
        pass
    yield
コード例 #59
0
ファイル: s3tests.py プロジェクト: Josephucas/ceph-old
def task(ctx, config):
    """
    Run the s3-tests suite against rgw.

    To run all tests on all clients::

        tasks:
        - ceph:
        - rgw:
        - s3tests:

    To restrict testing to particular clients::

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3tests: [client.0]

    To run against a server on client.1 and increase the boto timeout to 10m::

        tasks:
        - ceph:
        - rgw: [client.1]
        - s3tests:
            client.0:
              rgw_server: client.1
              idle_timeout: 600

    To pass extra arguments to nose (e.g. to run a certain test)::

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3tests:
            client.0:
              extra_args: ['test_s3:test_object_acl_grand_public_read']
            client.1:
              extra_args: ['--exclude', 'test_100_continue']
    """
    assert hasattr(ctx, 'rgw'), 's3tests must run after the rgw task'
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task s3tests only supports a list or dictionary for configuration"
    all_clients = [
        'client.{id}'.format(id=id_)
        for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')
    ]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    overrides = ctx.config.get('overrides', {})
    # merge each client section, not the top level.
    for client in config.iterkeys():
        if not config[client]:
            config[client] = {}
        teuthology.deep_merge(config[client], overrides.get('s3tests', {}))

    log.debug('s3tests config is %s', config)

    s3tests_conf = {}
    for client in clients:
        endpoint = ctx.rgw.role_endpoints.get(client)
        assert endpoint, 's3tests: no rgw endpoint for {}'.format(client)

        s3tests_conf[client] = ConfigObj(
            indent_type='',
            infile={
                'DEFAULT': {
                    'port': endpoint.port,
                    'is_secure': 'yes' if endpoint.cert else 'no',
                },
                'fixtures': {},
                's3 main': {},
                's3 alt': {},
                's3 tenant': {},
            })

    with contextutil.nested(
            lambda: download(ctx=ctx, config=config),
            lambda: create_users(ctx=ctx,
                                 config=dict(
                                     clients=clients,
                                     s3tests_conf=s3tests_conf,
                                 )),
            lambda: configure(ctx=ctx,
                              config=dict(
                                  clients=config,
                                  s3tests_conf=s3tests_conf,
                              )),
            lambda: run_tests(ctx=ctx, config=config),
            lambda: scan_for_leaked_encryption_keys(ctx=ctx, config=config),
    ):
        pass
    yield
コード例 #60
0
def task(ctx, config):
    """
    Run the s3-tests suite against rgw.

    To run all tests on all clients::

        tasks:
        - ceph:
        - rgw:
        - s3tests:

    To restrict testing to particular clients::

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3tests: [client.0]

    To run against a server on client.1::

        tasks:
        - ceph:
        - rgw: [client.1]
        - s3tests:
            client.0:
              rgw_server: client.1

    To pass extra arguments to nose (e.g. to run a certain test)::

        tasks:
        - ceph:
        - rgw: [client.0]
        - s3tests:
            client.0:
              extra_args: ['test_s3:test_object_acl_grand_public_read']
            client.1:
              extra_args: ['--exclude', 'test_100_continue']
    """
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task s3tests only supports a list or dictionary for configuration"
    all_clients = [
        'client.{id}'.format(id=id_)
        for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')
    ]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    s3tests_conf = {}
    for client in clients:
        s3tests_conf[client] = ConfigObj(indent_type='',
                                         infile={
                                             'DEFAULT': {
                                                 'port': 7280,
                                                 'is_secure': 'no',
                                             },
                                             'fixtures': {},
                                             's3 main': {},
                                             's3 alt': {},
                                         })

    with contextutil.nested(
            lambda: download(ctx=ctx, config=clients),
            lambda: create_users(ctx=ctx,
                                 config=dict(
                                     clients=clients,
                                     s3tests_conf=s3tests_conf,
                                 )),
            lambda: configure(ctx=ctx,
                              config=dict(
                                  clients=config,
                                  s3tests_conf=s3tests_conf,
                              )),
            lambda: run_tests(ctx=ctx, config=config),
    ):
        yield