Ejemplo n.º 1
0
    def __init__(self, ctx, manager, config, name, logger):
        super(MonitorThrasher, self).__init__()

        self.ctx = ctx
        self.manager = manager
        self.manager.wait_for_clean()

        self.stopping = False
        self.logger = logger
        self.config = config
        self.name = name

        if self.config is None:
            self.config = dict()

        """ Test reproducibility """
        self.random_seed = self.config.get('seed', None)

        if self.random_seed is None:
            self.random_seed = int(time.time())

        self.rng = random.Random()
        self.rng.seed(int(self.random_seed))

        """ Monitor thrashing """
        self.revive_delay = float(self.config.get('revive_delay', 10.0))
        self.thrash_delay = float(self.config.get('thrash_delay', 0.0))

        self.thrash_many = self.config.get('thrash_many', False)
        self.maintain_quorum = self.config.get('maintain_quorum', True)

        self.scrub = self.config.get('scrub', True)

        self.freeze_mon_probability = float(self.config.get('freeze_mon_probability', 10))
        self.freeze_mon_duration = float(self.config.get('freeze_mon_duration', 15.0))

        assert self.max_killable() > 0, \
            'Unable to kill at least one monitor with the current config.'

        """ Store thrashing """
        self.store_thrash = self.config.get('store_thrash', False)
        self.store_thrash_probability = int(
            self.config.get('store_thrash_probability', 50))
        if self.store_thrash:
            assert self.store_thrash_probability > 0, \
                'store_thrash is set, probability must be > 0'
            assert self.maintain_quorum, \
                'store_thrash = true must imply maintain_quorum = true'

        #MDS failover
        self.mds_failover = self.config.get('check_mds_failover', False)

        if self.mds_failover:
            self.mds_cluster = MDSCluster(ctx)

        self.thread = gevent.spawn(self.do_thrash)
Ejemplo n.º 2
0
def ready(ctx, config):
    """
    That the file system is ready for clients.
    """

    if config is None:
        config = {}
    assert isinstance(config, dict), \
        'task only accepts a dict for configuration'

    timeout = config.get('timeout', 300)

    mdsc = MDSCluster(ctx)
    status = mdsc.status()

    for filesystem in status.get_filesystems():
        fs = Filesystem(ctx, fscid=filesystem['id'])
        fs.wait_for_daemons(timeout=timeout, status=status)
Ejemplo n.º 3
0
def pre_upgrade_save(ctx, config):
    """
    That the upgrade procedure doesn't clobber state: save state.
    """

    mdsc = MDSCluster(ctx)
    status = mdsc.status()

    state = {}
    ctx['mds-upgrade-state'] = state

    for fs in list(status.get_filesystems()):
        fscid = fs['id']
        mdsmap = fs['mdsmap']
        fs_state = {}
        fs_state['epoch'] = mdsmap['epoch']
        fs_state['max_mds'] = mdsmap['max_mds']
        fs_state['flags'] = mdsmap['flags'] & UPGRADE_FLAGS_MASK
        state[fscid] = fs_state
        log.debug(f"fs fscid={fscid},name={mdsmap['fs_name']} state = {fs_state}")
Ejemplo n.º 4
0
def post_upgrade_checks(ctx, config):
    """
    That the upgrade procedure doesn't clobber state.
    """

    state = ctx['mds-upgrade-state']

    mdsc = MDSCluster(ctx)
    status = mdsc.status()

    for fs in list(status.get_filesystems()):
        fscid = fs['id']
        mdsmap = fs['mdsmap']
        fs_state = state[fscid]
        log.debug(f"checking fs fscid={fscid},name={mdsmap['fs_name']} state = {fs_state}")

        # check state was restored to previous values
        assert fs_state['max_mds'] == mdsmap['max_mds']
        assert fs_state['flags'] == (mdsmap['flags'] & UPGRADE_FLAGS_MASK)

        # now confirm that the upgrade procedure was followed
        epoch = mdsmap['epoch']
        pre_upgrade_epoch = fs_state['epoch']
        assert pre_upgrade_epoch < epoch
        should_decrease_max_mds = fs_state['max_mds'] > 1
        did_decrease_max_mds = False
        should_disable_allow_standby_replay = fs_state['flags'] & CEPH_MDSMAP_ALLOW_STANDBY_REPLAY
        did_disable_allow_standby_replay = False
        for i in range(pre_upgrade_epoch+1, mdsmap['epoch']):
            old_status = mdsc.status(epoch=i)
            old_fs = old_status.get_fsmap(fscid)
            old_mdsmap = old_fs['mdsmap']
            if should_decrease_max_mds and old_mdsmap['max_mds'] == 1:
                log.debug(f"max_mds reduced in epoch {i}")
                did_decrease_max_mds = True
            if should_disable_allow_standby_replay and not (old_mdsmap['flags'] & CEPH_MDSMAP_ALLOW_STANDBY_REPLAY):
                log.debug(f"allow_standby_replay disabled in epoch {i}")
                did_disable_allow_standby_replay = True
        assert not should_decrease_max_mds or did_decrease_max_mds
        assert not should_disable_allow_standby_replay or did_disable_allow_standby_replay
Ejemplo n.º 5
0
def task(ctx, config):
    """
    Run the CephFS test cases.

    Run everything in tasks/cephfs/test_*.py:

    ::

        tasks:
          - install:
          - ceph:
          - ceph-fuse:
          - cephfs_test_runner:

    `modules` argument allows running only some specific modules:

    ::

        tasks:
            ...
          - cephfs_test_runner:
              modules:
                - tasks.cephfs.test_sessionmap
                - tasks.cephfs.test_auto_repair

    By default, any cases that can't be run on the current cluster configuration
    will generate a failure.  When the optional `fail_on_skip` argument is set
    to false, any tests that can't be run on the current configuration will
    simply be skipped:

    ::
        tasks:
            ...
         - cephfs_test_runner:
           fail_on_skip: false

    """

    ceph_cluster = CephCluster(ctx)

    if len(list(misc.all_roles_of_type(ctx.cluster, 'mds'))):
        mds_cluster = MDSCluster(ctx)
        fs = Filesystem(ctx)
    else:
        mds_cluster = None
        fs = None

    if len(list(misc.all_roles_of_type(ctx.cluster, 'mgr'))):
        mgr_cluster = MgrCluster(ctx)
    else:
        mgr_cluster = None

    # Mount objects, sorted by ID
    if hasattr(ctx, 'mounts'):
        mounts = [
            v for k, v in sorted(ctx.mounts.items(),
                                 lambda a, b: cmp(a[0], b[0]))
        ]
    else:
        # The test configuration has a filesystem but no fuse/kclient mounts
        mounts = []

    decorating_loader = DecoratingLoader({
        "ctx": ctx,
        "mounts": mounts,
        "fs": fs,
        "ceph_cluster": ceph_cluster,
        "mds_cluster": mds_cluster,
        "mgr_cluster": mgr_cluster,
    })

    fail_on_skip = config.get('fail_on_skip', True)

    # Put useful things onto ctx for interactive debugging
    ctx.fs = fs
    ctx.mds_cluster = mds_cluster
    ctx.mgr_cluster = mgr_cluster

    # Depending on config, either load specific modules, or scan for moduless
    if config and 'modules' in config and config['modules']:
        module_suites = []
        for mod_name in config['modules']:
            # Test names like cephfs.test_auto_repair
            module_suites.append(decorating_loader.loadTestsFromName(mod_name))
        overall_suite = suite.TestSuite(module_suites)
    else:
        # Default, run all tests
        overall_suite = decorating_loader.discover(
            os.path.join(os.path.dirname(os.path.abspath(__file__)),
                         "cephfs/"))

    if ctx.config.get("interactive-on-error", False):
        InteractiveFailureResult.ctx = ctx
        result_class = InteractiveFailureResult
    else:
        result_class = unittest.TextTestResult

    class LoggingResult(result_class):
        def startTest(self, test):
            log.info("Starting test: {0}".format(self.getDescription(test)))
            return super(LoggingResult, self).startTest(test)

        def addSkip(self, test, reason):
            if fail_on_skip:
                # Don't just call addFailure because that requires a traceback
                self.failures.append((test, reason))
            else:
                super(LoggingResult, self).addSkip(test, reason)

    # Execute!
    result = unittest.TextTestRunner(stream=LogStream(),
                                     resultclass=LoggingResult,
                                     verbosity=2,
                                     failfast=True).run(overall_suite)

    if not result.wasSuccessful():
        result.printErrors()  # duplicate output at end for convenience

        bad_tests = []
        for test, error in result.errors:
            bad_tests.append(str(test))
        for test, failure in result.failures:
            bad_tests.append(str(test))

        raise RuntimeError("Test failure: {0}".format(", ".join(bad_tests)))

    yield
Ejemplo n.º 6
0
def task(ctx, config):
    """
    Stress test the mds by thrashing while another task/workunit
    is running.

    Please refer to MDSThrasher class for further information on the
    available options.
    """

    mds_cluster = MDSCluster(ctx)

    if config is None:
        config = {}
    assert isinstance(config, dict), \
        'mds_thrash task only accepts a dict for configuration'
    mdslist = list(teuthology.all_roles_of_type(ctx.cluster, 'mds'))
    assert len(mdslist) > 1, \
        'mds_thrash task requires at least 2 metadata servers'

    # choose random seed
    if 'seed' in config:
        seed = int(config['seed'])
    else:
        seed = int(time.time())
    log.info('mds thrasher using random seed: {seed}'.format(seed=seed))
    random.seed(seed)

    max_thrashers = config.get('max_thrash', 1)
    thrashers = {}

    (first,) = ctx.cluster.only('mds.{_id}'.format(_id=mdslist[0])).remotes.iterkeys()
    manager = ceph_manager.CephManager(
        first, ctx=ctx, logger=log.getChild('ceph_manager'),
    )

    # make sure everyone is in active, standby, or standby-replay
    log.info('Wait for all MDSs to reach steady state...')
    statuses = None
    statuses_by_rank = None
    while True:
        statuses = {m: mds_cluster.get_mds_info(m) for m in mdslist}
        statuses_by_rank = {}
        for _, s in statuses.iteritems():
            if isinstance(s, dict):
                statuses_by_rank[s['rank']] = s

        ready = filter(lambda (_, s): s is not None and (s['state'] == 'up:active'
                                                         or s['state'] == 'up:standby'
                                                         or s['state'] == 'up:standby-replay'),
                       statuses.items())
        if len(ready) == len(statuses):
            break
        time.sleep(2)
    log.info('Ready to start thrashing')

    # setup failure groups
    failure_groups = {}
    actives = {s['name']: s for (_, s) in statuses.iteritems() if s['state'] == 'up:active'}
    log.info('Actives is: {d}'.format(d=actives))
    log.info('Statuses is: {d}'.format(d=statuses_by_rank))
    for active in actives:
        for (r, s) in statuses.iteritems():
            if s['standby_for_name'] == active:
                if not active in failure_groups:
                    failure_groups[active] = []
                log.info('Assigning mds rank {r} to failure group {g}'.format(r=r, g=active))
                failure_groups[active].append(r)

    manager.wait_for_clean()
    for (active, standbys) in failure_groups.iteritems():
        weight = 1.0
        if 'thrash_weights' in config:
            weight = int(config['thrash_weights'].get('mds.{_id}'.format(_id=active), '0.0'))

        failure_group = [active]
        failure_group.extend(standbys)

        thrasher = MDSThrasher(
            ctx, manager, mds_cluster, config,
            logger=log.getChild('mds_thrasher.failure_group.[{a}, {sbs}]'.format(
                a=active,
                sbs=', '.join(standbys)
            )
            ),
            failure_group=failure_group,
            weight=weight)
        thrasher.start()
        thrashers[active] = thrasher

        # if thrash_weights isn't specified and we've reached max_thrash,
        # we're done
        if 'thrash_weights' not in config and len(thrashers) == max_thrashers:
            break

    try:
        log.debug('Yielding')
        yield
    finally:
        log.info('joining mds_thrashers')
        for t in thrashers:
            log.info('join thrasher for failure group [{fg}]'.format(fg=', '.join(failure_group)))
            thrashers[t].stop()
            thrashers[t].join()
        log.info('done joining')
Ejemplo n.º 7
0
class MonitorThrasher(Thrasher):
    """
    How it works::

    - pick a monitor
    - kill it
    - wait for quorum to be formed
    - sleep for 'revive_delay' seconds
    - revive monitor
    - wait for quorum to be formed
    - sleep for 'thrash_delay' seconds

    Options::

    seed                Seed to use on the RNG to reproduce a previous
                        behaviour (default: None; i.e., not set)
    revive_delay        Number of seconds to wait before reviving
                        the monitor (default: 10)
    thrash_delay        Number of seconds to wait in-between
                        test iterations (default: 0)
    store_thrash        Thrash monitor store before killing the monitor being thrashed (default: False)
    store_thrash_probability  Probability of thrashing a monitor's store
                              (default: 50)
    thrash_many         Thrash multiple monitors instead of just one. If
                        'maintain_quorum' is set to False, then we will
                        thrash up to as many monitors as there are
                        available. (default: False)
    maintain_quorum     Always maintain quorum, taking care on how many
                        monitors we kill during the thrashing. If we
                        happen to only have one or two monitors configured,
                        if this option is set to True, then we won't run
                        this task as we cannot guarantee maintenance of
                        quorum. Setting it to false however would allow the
                        task to run with as many as just one single monitor.
                        (default: True)
    freeze_mon_probability: how often to freeze the mon instead of killing it,
                        in % (default: 0)
    freeze_mon_duration: how many seconds to freeze the mon (default: 15)
    scrub               Scrub after each iteration (default: True)
    check_mds_failover  Check if mds failover happened (default: False)

    Note: if 'store_thrash' is set to True, then 'maintain_quorum' must also
          be set to True.

    For example::

    tasks:
    - ceph:
    - mon_thrash:
        revive_delay: 20
        thrash_delay: 1
        store_thrash: true
        store_thrash_probability: 40
        seed: 31337
        maintain_quorum: true
        thrash_many: true
        check_mds_failover: True
    - ceph-fuse:
    - workunit:
        clients:
          all:
            - mon/workloadgen.sh
    """
    def __init__(self, ctx, manager, config, logger):
        super(MonitorThrasher, self).__init__()
        self.ctx = ctx
        self.manager = manager
        self.manager.wait_for_clean()

        self.stopping = False
        self.logger = logger
        self.config = config

        if self.config is None:
            self.config = dict()

        """ Test reproducibility """
        self.random_seed = self.config.get('seed', None)

        if self.random_seed is None:
            self.random_seed = int(time.time())

        self.rng = random.Random()
        self.rng.seed(int(self.random_seed))

        """ Monitor thrashing """
        self.revive_delay = float(self.config.get('revive_delay', 10.0))
        self.thrash_delay = float(self.config.get('thrash_delay', 0.0))

        self.thrash_many = self.config.get('thrash_many', False)
        self.maintain_quorum = self.config.get('maintain_quorum', True)

        self.scrub = self.config.get('scrub', True)

        self.freeze_mon_probability = float(self.config.get('freeze_mon_probability', 10))
        self.freeze_mon_duration = float(self.config.get('freeze_mon_duration', 15.0))

        assert self.max_killable() > 0, \
            'Unable to kill at least one monitor with the current config.'

        """ Store thrashing """
        self.store_thrash = self.config.get('store_thrash', False)
        self.store_thrash_probability = int(
            self.config.get('store_thrash_probability', 50))
        if self.store_thrash:
            assert self.store_thrash_probability > 0, \
                'store_thrash is set, probability must be > 0'
            assert self.maintain_quorum, \
                'store_thrash = true must imply maintain_quorum = true'

        #MDS failover
        self.mds_failover = self.config.get('check_mds_failover', False)

        if self.mds_failover:
            self.mds_cluster = MDSCluster(ctx)

        self.thread = gevent.spawn(self.do_thrash)

    def log(self, x):
        """
        locally log info messages
        """
        self.logger.info(x)

    def do_join(self):
        """
        Break out of this processes thrashing loop.
        """
        self.stopping = True
        self.thread.get()

    def should_thrash_store(self):
        """
        If allowed, indicate that we should thrash a certain percentage of
        the time as determined by the store_thrash_probability value.
        """
        if not self.store_thrash:
            return False
        return self.rng.randrange(0, 101) < self.store_thrash_probability

    def thrash_store(self, mon):
        """
        Thrash the monitor specified.
        :param mon: monitor to thrash
        """
        addr = self.ctx.ceph['ceph'].mons['mon.%s' % mon]
        self.log('thrashing mon.{id}@{addr} store'.format(id=mon, addr=addr))
        out = self.manager.raw_cluster_cmd('-m', addr, 'sync', 'force',
                                           '--yes-i-really-mean-it',
                                           '--i-know-what-i-am-doing')
        j = json.loads(out)
        assert j['ret'] == 0, \
            'error forcing store sync on mon.{id}:\n{ret}'.format(
                id=mon,ret=out)

    def should_freeze_mon(self):
        """
        Indicate that we should freeze a certain percentago of the time
        as determined by the freeze_mon_probability value.
        """
        return self.rng.randrange(0, 101) < self.freeze_mon_probability

    def freeze_mon(self, mon):
        """
        Send STOP signal to freeze the monitor.
        """
        log.info('Sending STOP to mon %s', mon)
        self.manager.signal_mon(mon, 19)  # STOP

    def unfreeze_mon(self, mon):
        """
        Send CONT signal to unfreeze the monitor.
        """
        log.info('Sending CONT to mon %s', mon)
        self.manager.signal_mon(mon, 18)  # CONT

    def kill_mon(self, mon):
        """
        Kill the monitor specified
        """
        self.log('killing mon.{id}'.format(id=mon))
        self.manager.kill_mon(mon)

    def revive_mon(self, mon):
        """
        Revive the monitor specified
        """
        self.log('killing mon.{id}'.format(id=mon))
        self.log('reviving mon.{id}'.format(id=mon))
        self.manager.revive_mon(mon)

    def max_killable(self):
        """
        Return the maximum number of monitors we can kill.
        """
        m = len(_get_mons(self.ctx))
        if self.maintain_quorum:
            return max(math.ceil(m/2.0)-1, 0)
        else:
            return m

    def do_thrash(self):
        """
        _do_thrash() wrapper.
        """
        try:
            self._do_thrash()
        except Exception as e:
            # See _run exception comment for MDSThrasher
            self.exception = e
            self.logger.exception("exception:")
            # Allow successful completion so gevent doesn't see an exception.
            # The DaemonWatchdog will observe the error and tear down the test.

    def _do_thrash(self):
        """
        Continuously loop and thrash the monitors.
        """
        #status before mon thrashing
        if self.mds_failover:
            oldstatus = self.mds_cluster.status()

        self.log('start thrashing')
        self.log('seed: {s}, revive delay: {r}, thrash delay: {t} '\
                   'thrash many: {tm}, maintain quorum: {mq} '\
                   'store thrash: {st}, probability: {stp} '\
                   'freeze mon: prob {fp} duration {fd}'.format(
                s=self.random_seed,r=self.revive_delay,t=self.thrash_delay,
                tm=self.thrash_many, mq=self.maintain_quorum,
                st=self.store_thrash,stp=self.store_thrash_probability,
                fp=self.freeze_mon_probability,fd=self.freeze_mon_duration,
                ))

        while not self.stopping:
            mons = _get_mons(self.ctx)
            self.manager.wait_for_mon_quorum_size(len(mons))
            self.log('making sure all monitors are in the quorum')
            for m in mons:
                s = self.manager.get_mon_status(m)
                assert s['state'] == 'leader' or s['state'] == 'peon'
                assert len(s['quorum']) == len(mons)

            kill_up_to = self.rng.randrange(1, self.max_killable()+1)
            mons_to_kill = self.rng.sample(mons, kill_up_to)
            self.log('monitors to thrash: {m}'.format(m=mons_to_kill))

            mons_to_freeze = []
            for mon in mons:
                if mon in mons_to_kill:
                    continue
                if self.should_freeze_mon():
                    mons_to_freeze.append(mon)
            self.log('monitors to freeze: {m}'.format(m=mons_to_freeze))

            for mon in mons_to_kill:
                self.log('thrashing mon.{m}'.format(m=mon))

                """ we only thrash stores if we are maintaining quorum """
                if self.should_thrash_store() and self.maintain_quorum:
                    self.thrash_store(mon)

                self.kill_mon(mon)

            if mons_to_freeze:
                for mon in mons_to_freeze:
                    self.freeze_mon(mon)
                self.log('waiting for {delay} secs to unfreeze mons'.format(
                    delay=self.freeze_mon_duration))
                time.sleep(self.freeze_mon_duration)
                for mon in mons_to_freeze:
                    self.unfreeze_mon(mon)

            if self.maintain_quorum:
                self.manager.wait_for_mon_quorum_size(len(mons)-len(mons_to_kill))
                for m in mons:
                    if m in mons_to_kill:
                        continue
                    s = self.manager.get_mon_status(m)
                    assert s['state'] == 'leader' or s['state'] == 'peon'
                    assert len(s['quorum']) == len(mons)-len(mons_to_kill)

            self.log('waiting for {delay} secs before reviving monitors'.format(
                delay=self.revive_delay))
            time.sleep(self.revive_delay)

            for mon in mons_to_kill:
                self.revive_mon(mon)
            # do more freezes
            if mons_to_freeze:
                for mon in mons_to_freeze:
                    self.freeze_mon(mon)
                self.log('waiting for {delay} secs to unfreeze mons'.format(
                    delay=self.freeze_mon_duration))
                time.sleep(self.freeze_mon_duration)
                for mon in mons_to_freeze:
                    self.unfreeze_mon(mon)

            self.manager.wait_for_mon_quorum_size(len(mons))
            for m in mons:
                s = self.manager.get_mon_status(m)
                assert s['state'] == 'leader' or s['state'] == 'peon'
                assert len(s['quorum']) == len(mons)

            if self.scrub:
                self.log('triggering scrub')
                try:
                    self.manager.raw_cluster_cmd('scrub')
                except Exception:
                    log.exception("Saw exception while triggering scrub")

            if self.thrash_delay > 0.0:
                self.log('waiting for {delay} secs before continuing thrashing'.format(
                    delay=self.thrash_delay))
                time.sleep(self.thrash_delay)

        #status after thrashing
        if self.mds_failover:
            status = self.mds_cluster.status()
            assert not oldstatus.hadfailover(status), \
                'MDS Failover'
Ejemplo n.º 8
0
def task(ctx, config):
    """
    Stress test the mds by thrashing while another task/workunit
    is running.

    Please refer to MDSThrasher class for further information on the
    available options.
    """

    mds_cluster = MDSCluster(ctx)

    if config is None:
        config = {}
    assert isinstance(config, dict), \
        'mds_thrash task only accepts a dict for configuration'
    mdslist = list(teuthology.all_roles_of_type(ctx.cluster, 'mds'))
    assert len(mdslist) > 1, \
        'mds_thrash task requires at least 2 metadata servers'

    # choose random seed
    if 'seed' in config:
        seed = int(config['seed'])
    else:
        seed = int(time.time())
    log.info('mds thrasher using random seed: {seed}'.format(seed=seed))
    random.seed(seed)

    (first, ) = ctx.cluster.only(
        'mds.{_id}'.format(_id=mdslist[0])).remotes.keys()
    manager = ceph_manager.CephManager(
        first,
        ctx=ctx,
        logger=log.getChild('ceph_manager'),
    )

    # make sure everyone is in active, standby, or standby-replay
    log.info('Wait for all MDSs to reach steady state...')
    status = mds_cluster.status()
    while True:
        steady = True
        for info in status.get_all():
            state = info['state']
            if state not in ('up:active', 'up:standby', 'up:standby-replay'):
                steady = False
                break
        if steady:
            break
        sleep(2)
        status = mds_cluster.status()
    log.info('Ready to start thrashing')

    manager.wait_for_clean()
    assert manager.is_clean()

    if 'cluster' not in config:
        config['cluster'] = 'ceph'

    for fs in status.get_filesystems():
        thrasher = MDSThrasher(ctx, manager, config, Filesystem(ctx, fs['id']),
                               fs['mdsmap']['max_mds'])
        thrasher.start()
        ctx.ceph[config['cluster']].thrashers.append(thrasher)

    try:
        log.debug('Yielding')
        yield
    finally:
        log.info('joining mds_thrasher')
        thrasher.stop()
        if thrasher.exception is not None:
            raise RuntimeError('error during thrashing')
        thrasher.join()
        log.info('done joining')
Ejemplo n.º 9
0
def task(ctx, config):
    """
    Stress test the mds by thrashing while another task/workunit
    is running.

    Please refer to MDSThrasher class for further information on the
    available options.
    """

    mds_cluster = MDSCluster(ctx)

    if config is None:
        config = {}
    assert isinstance(config, dict), \
        'mds_thrash task only accepts a dict for configuration'
    mdslist = list(teuthology.all_roles_of_type(ctx.cluster, 'mds'))
    assert len(mdslist) > 1, \
        'mds_thrash task requires at least 2 metadata servers'

    # choose random seed
    if 'seed' in config:
        seed = int(config['seed'])
    else:
        seed = int(time.time())
    log.info('mds thrasher using random seed: {seed}'.format(seed=seed))
    random.seed(seed)

    (first,) = ctx.cluster.only('mds.{_id}'.format(_id=mdslist[0])).remotes.iterkeys()
    manager = ceph_manager.CephManager(
        first, ctx=ctx, logger=log.getChild('ceph_manager'),
    )

    # make sure everyone is in active, standby, or standby-replay
    log.info('Wait for all MDSs to reach steady state...')
    status = mds_cluster.status()
    while True:
        steady = True
        for info in status.get_all():
            state = info['state']
            if state not in ('up:active', 'up:standby', 'up:standby-replay'):
                steady = False
                break
        if steady:
            break
        sleep(2)
        status = mds_cluster.status()
    log.info('Ready to start thrashing')

    thrashers = []

    watchdog = DaemonWatchdog(ctx, manager, config, thrashers)
    watchdog.start()

    manager.wait_for_clean()
    assert manager.is_clean()
    for fs in status.get_filesystems():
        thrasher = MDSThrasher(ctx, manager, config, Filesystem(ctx, fs['id']), fs['mdsmap']['max_mds'])
        thrasher.start()
        thrashers.append(thrasher)

    try:
        log.debug('Yielding')
        yield
    finally:
        log.info('joining mds_thrashers')
        for thrasher in thrashers:
            thrasher.stop()
            if thrasher.e:
                raise RuntimeError('error during thrashing')
            thrasher.join()
        log.info('done joining')

        watchdog.stop()
        watchdog.join()
Ejemplo n.º 10
0
def task(ctx, config):
    """
    Stress test the mds by thrashing while another task/workunit
    is running.

    Please refer to MDSThrasher class for further information on the
    available options.
    """

    mds_cluster = MDSCluster(ctx)

    if config is None:
        config = {}
    assert isinstance(config, dict), \
        'mds_thrash task only accepts a dict for configuration'
    mdslist = list(teuthology.all_roles_of_type(ctx.cluster, 'mds'))
    assert len(mdslist) > 1, \
        'mds_thrash task requires at least 2 metadata servers'

    # choose random seed
    if 'seed' in config:
        seed = int(config['seed'])
    else:
        seed = int(time.time())
    log.info('mds thrasher using random seed: {seed}'.format(seed=seed))
    random.seed(seed)

    max_thrashers = config.get('max_thrash', 1)
    thrashers = {}

    (first, ) = ctx.cluster.only(
        'mds.{_id}'.format(_id=mdslist[0])).remotes.iterkeys()
    manager = ceph_manager.CephManager(
        first,
        ctx=ctx,
        logger=log.getChild('ceph_manager'),
    )

    # make sure everyone is in active, standby, or standby-replay
    log.info('Wait for all MDSs to reach steady state...')
    statuses = None
    statuses_by_rank = None
    while True:
        statuses = {m: mds_cluster.get_mds_info(m) for m in mdslist}
        statuses_by_rank = {}
        for _, s in statuses.iteritems():
            if isinstance(s, dict):
                statuses_by_rank[s['rank']] = s

        ready = filter(
            lambda (_, s): s is not None and (s['state'] == 'up:active' or s[
                'state'] == 'up:standby' or s['state'] == 'up:standby-replay'),
            statuses.items())
        if len(ready) == len(statuses):
            break
        time.sleep(2)
    log.info('Ready to start thrashing')

    # setup failure groups
    failure_groups = {}
    actives = {
        s['name']: s
        for (_, s) in statuses.iteritems() if s['state'] == 'up:active'
    }
    log.info('Actives is: {d}'.format(d=actives))
    log.info('Statuses is: {d}'.format(d=statuses_by_rank))
    for active in actives:
        for (r, s) in statuses.iteritems():
            if s['standby_for_name'] == active:
                if not active in failure_groups:
                    failure_groups[active] = []
                log.info('Assigning mds rank {r} to failure group {g}'.format(
                    r=r, g=active))
                failure_groups[active].append(r)

    manager.wait_for_clean()
    for (active, standbys) in failure_groups.iteritems():
        weight = 1.0
        if 'thrash_weights' in config:
            weight = int(config['thrash_weights'].get(
                'mds.{_id}'.format(_id=active), '0.0'))

        failure_group = [active]
        failure_group.extend(standbys)

        thrasher = MDSThrasher(
            ctx,
            manager,
            mds_cluster,
            config,
            logger=log.getChild(
                'mds_thrasher.failure_group.[{a}, {sbs}]'.format(
                    a=active, sbs=', '.join(standbys))),
            failure_group=failure_group,
            weight=weight)
        thrasher.start()
        thrashers[active] = thrasher

        # if thrash_weights isn't specified and we've reached max_thrash,
        # we're done
        if 'thrash_weights' not in config and len(thrashers) == max_thrashers:
            break

    try:
        log.debug('Yielding')
        yield
    finally:
        log.info('joining mds_thrashers')
        for t in thrashers:
            log.info('join thrasher for failure group [{fg}]'.format(
                fg=', '.join(failure_group)))
            thrashers[t].stop()
            thrashers[t].get()  # Raise any exception from _run()
            thrashers[t].join()
        log.info('done joining')
Ejemplo n.º 11
0
def task(ctx, config):
    """
    Stress test the mds by running scrub iterations while another task/workunit
    is running.
    Example config:

    - fwd_scrub:
      scrub_timeout: 300
      sleep_between_iterations: 1
    """

    mds_cluster = MDSCluster(ctx)

    if config is None:
        config = {}
    assert isinstance(config, dict), \
        'fwd_scrub task only accepts a dict for configuration'
    mdslist = list(teuthology.all_roles_of_type(ctx.cluster, 'mds'))
    assert len(mdslist) > 0, \
        'fwd_scrub task requires at least 1 metadata server'

    (first, ) = ctx.cluster.only(f'mds.{mdslist[0]}').remotes.keys()
    manager = ceph_manager.CephManager(
        first,
        ctx=ctx,
        logger=log.getChild('ceph_manager'),
    )

    # make sure everyone is in active, standby, or standby-replay
    log.info('Wait for all MDSs to reach steady state...')
    status = mds_cluster.status()
    while True:
        steady = True
        for info in status.get_all():
            state = info['state']
            if state not in ('up:active', 'up:standby', 'up:standby-replay'):
                steady = False
                break
        if steady:
            break
        sleep(2)
        status = mds_cluster.status()

    log.info('Ready to start scrub thrashing')

    manager.wait_for_clean()
    assert manager.is_clean()

    if 'cluster' not in config:
        config['cluster'] = 'ceph'

    for fs in status.get_filesystems():
        fwd_scrubber = ForwardScrubber(Filesystem(ctx, fscid=fs['id']),
                                       config['scrub_timeout'],
                                       config['sleep_between_iterations'])
        fwd_scrubber.start()
        ctx.ceph[config['cluster']].thrashers.append(fwd_scrubber)

    try:
        log.debug('Yielding')
        yield
    finally:
        log.info('joining ForwardScrubbers')
        stop_all_fwd_scrubbers(ctx.ceph[config['cluster']].thrashers)
        log.info('done joining')
Ejemplo n.º 12
0
def task(ctx, config):
    """
    Mount/unmount a ``ceph-fuse`` client.

    The config is optional and defaults to mounting on all clients. If
    a config is given, it is expected to be a list of clients to do
    this operation on. This lets you e.g. set up one client with
    ``ceph-fuse`` and another with ``kclient``.

    Example that mounts all clients::

        tasks:
        - ceph:
        - ceph-fuse:
        - interactive:

    Example that uses both ``kclient` and ``ceph-fuse``::

        tasks:
        - ceph:
        - ceph-fuse: [client.0]
        - kclient: [client.1]
        - interactive:

    Example that enables valgrind:

        tasks:
        - ceph:
        - ceph-fuse:
            client.0:
              valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
        - interactive:

    Example that stops an already-mounted client:

    ::

        tasks:
            - ceph:
            - ceph-fuse: [client.0]
            - ... do something that requires the FS mounted ...
            - ceph-fuse:
                client.0:
                    mounted: false
            - ... do something that requires the FS unmounted ...

    Example that adds more generous wait time for mount (for virtual machines):

        tasks:
        - ceph:
        - ceph-fuse:
            client.0:
              mount_wait: 60 # default is 0, do not wait before checking /sys/
              mount_timeout: 120 # default is 30, give up if /sys/ is not populated
        - interactive:

    :param ctx: Context
    :param config: Configuration
    """
    log.info('Mounting ceph-fuse clients...')

    testdir = teuthology.get_testdir(ctx)
    config = get_client_configs(ctx, config)

    # List clients we will configure mounts for, default is all clients
    clients = list(teuthology.get_clients(ctx=ctx, roles=filter(lambda x: 'client.' in x, config.keys())))

    all_mounts = getattr(ctx, 'mounts', {})
    mounted_by_me = {}

    log.info('Wait for MDS to reach steady state...')
    mds_cluster = MDSCluster(ctx)
    status = mds_cluster.status()
    for filesystem in status.get_filesystems():
        fs = Filesystem(ctx, fscid=filesystem['id']) 
        fs.wait_for_daemons()
    log.info('Ready to start ceph-fuse...')

    # Construct any new FuseMount instances
    for id_, remote in clients:
        client_config = config.get("client.%s" % id_)
        if client_config is None:
            client_config = {}

        if id_ not in all_mounts:
            fuse_mount = FuseMount(client_config, testdir, id_, remote)
            all_mounts[id_] = fuse_mount
        else:
            # Catch bad configs where someone has e.g. tried to use ceph-fuse and kcephfs for the same client
            assert isinstance(all_mounts[id_], FuseMount)

        if not config.get("disabled", False) and client_config.get('mounted', True):
            mounted_by_me[id_] = all_mounts[id_]

    ctx.mounts = all_mounts

    # Mount any clients we have been asked to (default to mount all)
    for mount in mounted_by_me.values():
        mount.mount()

    for mount in mounted_by_me.values():
        mount.wait_until_mounted()

    # Umount any pre-existing clients that we have not been asked to mount
    for client_id in set(all_mounts.keys()) - set(mounted_by_me.keys()):
        mount = all_mounts[client_id]
        if mount.is_mounted():
            mount.umount_wait()

    try:
        yield all_mounts
    finally:
        log.info('Unmounting ceph-fuse clients...')

        for mount in mounted_by_me.values():
            # Conditional because an inner context might have umounted it
            if mount.is_mounted():
                mount.umount_wait()
Ejemplo n.º 13
0
def task(ctx, config):
    """
    Stress test the mds by thrashing while another task/workunit
    is running.

    Please refer to MDSThrasher class for further information on the
    available options.
    """

    mds_cluster = MDSCluster(ctx)

    if config is None:
        config = {}
    assert isinstance(config, dict), "mds_thrash task only accepts a dict for configuration"
    mdslist = list(teuthology.all_roles_of_type(ctx.cluster, "mds"))
    assert len(mdslist) > 1, "mds_thrash task requires at least 2 metadata servers"

    # choose random seed
    if "seed" in config:
        seed = int(config["seed"])
    else:
        seed = int(time.time())
    log.info("mds thrasher using random seed: {seed}".format(seed=seed))
    random.seed(seed)

    max_thrashers = config.get("max_thrash", 1)
    thrashers = {}

    (first,) = ctx.cluster.only("mds.{_id}".format(_id=mdslist[0])).remotes.iterkeys()
    manager = ceph_manager.CephManager(first, ctx=ctx, logger=log.getChild("ceph_manager"))

    # make sure everyone is in active, standby, or standby-replay
    log.info("Wait for all MDSs to reach steady state...")
    statuses = None
    statuses_by_rank = None
    while True:
        statuses = {m: mds_cluster.get_mds_info(m) for m in mdslist}
        statuses_by_rank = {}
        for _, s in statuses.iteritems():
            if isinstance(s, dict):
                statuses_by_rank[s["rank"]] = s

        ready = filter(
            lambda (_, s): s is not None
            and (s["state"] == "up:active" or s["state"] == "up:standby" or s["state"] == "up:standby-replay"),
            statuses.items(),
        )
        if len(ready) == len(statuses):
            break
        time.sleep(2)
    log.info("Ready to start thrashing")

    # setup failure groups
    failure_groups = {}
    actives = {s["name"]: s for (_, s) in statuses.iteritems() if s["state"] == "up:active"}
    log.info("Actives is: {d}".format(d=actives))
    log.info("Statuses is: {d}".format(d=statuses_by_rank))
    for active in actives:
        for (r, s) in statuses.iteritems():
            if s["standby_for_name"] == active:
                if not active in failure_groups:
                    failure_groups[active] = []
                log.info("Assigning mds rank {r} to failure group {g}".format(r=r, g=active))
                failure_groups[active].append(r)

    manager.wait_for_clean()
    for (active, standbys) in failure_groups.iteritems():
        weight = 1.0
        if "thrash_weights" in config:
            weight = int(config["thrash_weights"].get("mds.{_id}".format(_id=active), "0.0"))

        failure_group = [active]
        failure_group.extend(standbys)

        thrasher = MDSThrasher(
            ctx,
            manager,
            mds_cluster,
            config,
            logger=log.getChild("mds_thrasher.failure_group.[{a}, {sbs}]".format(a=active, sbs=", ".join(standbys))),
            failure_group=failure_group,
            weight=weight,
        )
        thrasher.start()
        thrashers[active] = thrasher

        # if thrash_weights isn't specified and we've reached max_thrash,
        # we're done
        if "thrash_weights" not in config and len(thrashers) == max_thrashers:
            break

    try:
        log.debug("Yielding")
        yield
    finally:
        log.info("joining mds_thrashers")
        for t in thrashers:
            log.info("join thrasher for failure group [{fg}]".format(fg=", ".join(failure_group)))
            thrashers[t].stop()
            thrashers[t].get()  # Raise any exception from _run()
            thrashers[t].join()
        log.info("done joining")
Ejemplo n.º 14
0
def task(ctx, config):
    """
    Mount/unmount a ``ceph-fuse`` client.

    The config is optional and defaults to mounting on all clients. If
    a config is given, it is expected to be a list of clients to do
    this operation on. This lets you e.g. set up one client with
    ``ceph-fuse`` and another with ``kclient``.

    Example that mounts all clients::

        tasks:
        - ceph:
        - ceph-fuse:
        - interactive:

    Example that uses both ``kclient` and ``ceph-fuse``::

        tasks:
        - ceph:
        - ceph-fuse: [client.0]
        - kclient: [client.1]
        - interactive:

    Example that enables valgrind:

        tasks:
        - ceph:
        - ceph-fuse:
            client.0:
              valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
        - interactive:

    Example that stops an already-mounted client:

    ::

        tasks:
            - ceph:
            - ceph-fuse: [client.0]
            - ... do something that requires the FS mounted ...
            - ceph-fuse:
                client.0:
                    mounted: false
            - ... do something that requires the FS unmounted ...

    Example that adds more generous wait time for mount (for virtual machines):

        tasks:
        - ceph:
        - ceph-fuse:
            client.0:
              mount_wait: 60 # default is 0, do not wait before checking /sys/
              mount_timeout: 120 # default is 30, give up if /sys/ is not populated
        - interactive:

    :param ctx: Context
    :param config: Configuration
    """
    log.info('Mounting ceph-fuse clients...')

    testdir = teuthology.get_testdir(ctx)
    config = get_client_configs(ctx, config)

    # List clients we will configure mounts for, default is all clients
    clients = list(
        teuthology.get_clients(ctx=ctx,
                               roles=filter(lambda x: 'client.' in x,
                                            config.keys())))

    all_mounts = getattr(ctx, 'mounts', {})
    mounted_by_me = {}

    log.info('Wait for MDS to reach steady state...')
    mds_cluster = MDSCluster(ctx)
    status = mds_cluster.status()
    for filesystem in status.get_filesystems():
        fs = Filesystem(ctx, fscid=filesystem['id'])
        fs.wait_for_daemons()
    log.info('Ready to start ceph-fuse...')

    # Construct any new FuseMount instances
    for id_, remote in clients:
        client_config = config.get("client.%s" % id_)
        if client_config is None:
            client_config = {}

        if id_ not in all_mounts:
            fuse_mount = FuseMount(client_config, testdir, id_, remote)
            all_mounts[id_] = fuse_mount
        else:
            # Catch bad configs where someone has e.g. tried to use ceph-fuse and kcephfs for the same client
            assert isinstance(all_mounts[id_], FuseMount)

        if not config.get("disabled", False) and client_config.get(
                'mounted', True):
            mounted_by_me[id_] = all_mounts[id_]

    ctx.mounts = all_mounts

    # Mount any clients we have been asked to (default to mount all)
    for mount in mounted_by_me.values():
        mount.mount()

    for mount in mounted_by_me.values():
        mount.wait_until_mounted()

    # Umount any pre-existing clients that we have not been asked to mount
    for client_id in set(all_mounts.keys()) - set(mounted_by_me.keys()):
        mount = all_mounts[client_id]
        if mount.is_mounted():
            mount.umount_wait()

    try:
        yield all_mounts
    finally:
        log.info('Unmounting ceph-fuse clients...')

        for mount in mounted_by_me.values():
            # Conditional because an inner context might have umounted it
            if mount.is_mounted():
                mount.umount_wait()