예제 #1
0
    def get_host_vars(self, remote):
        extra_vars = self.config.get('vars', dict())
        host_vars = dict()
        if not extra_vars.get('osd_auto_discovery', False):
            roles = self.ctx.cluster.remotes[remote]
            dev_needed = len(
                [role for role in roles if role.startswith('osd')])
            if teuth_config.get('ceph_ansible') and \
                    self.ctx.machine_type in teuth_config['ceph_ansible']['has_lvm_scratch_disks']:
                devices = get_file(remote, "/scratch_devs").split()
                vols = []

                for dev in devices:
                    if 'vg_nvme' in dev:
                        splitpath = dev.split('/')
                        vol = dict()
                        vol['data_vg'] = splitpath[2]
                        vol['data'] = splitpath[3]
                        vols.append(vol)
                extra_vars['lvm_volumes'] = vols
                self.config.update({'vars': extra_vars})
            else:
                host_vars['devices'] = get_scratch_devices(
                    remote)[0:dev_needed]
        if 'monitor_interface' not in extra_vars:
            host_vars['monitor_interface'] = remote.interface
        if 'radosgw_interface' not in extra_vars:
            host_vars['radosgw_interface'] = remote.interface
        if 'public_network' not in extra_vars:
            host_vars['public_network'] = remote.cidr
        return host_vars
예제 #2
0
def setup_container_registry(ctx, config):
    """
     setup container registry if setup_container_registry in config

     redhat:
      setup_container_registry: <registry.io>   # registry-name
    """
    if ctx.config.get('redhat').get('setup_container_registry', None):
        registry = ctx.config['redhat']['setup_container_registry']

        # fetch credentials from teuth_config
        creds = teuthconfig.get('registries', dict()).get(registry)
        if not creds:
            raise ConfigError("Registry not found....")

        # container-tool login
        for remote in ctx.cluster.remotes.keys():
            container_tool = "podman"
            if remote.os.version.startswith('7'):
                container_tool = "docker"

            remote.run(args=[
                'sudo', container_tool,
                'login', registry,
                '--username', creds['username'],
                '--password', creds['password'],
                ]
            )
    yield
예제 #3
0
def _get_config_value_for_remote(ctx, remote, config, key):
    """
    Look through config, and attempt to determine the "best" value to use
    for a given key. For example, given::

        config = {
            'all':
                {'branch': 'master'},
            'branch': 'next'
        }
        _get_config_value_for_remote(ctx, remote, config, 'branch')

    would return 'master'.

    :param ctx: the argparse.Namespace object
    :param remote: the teuthology.orchestra.remote.Remote object
    :param config: the config dict
    :param key: the name of the value to retrieve
    """
    roles = ctx.cluster.remotes[remote] if ctx else None
    if 'all' in config:
        return config['all'].get(key)
    elif roles:
        for role in roles:
            if role in config and key in config[role]:
                return config[role].get(key)
    return config.get(key)
예제 #4
0
def get_testdir(ctx=None):
    """
    :param ctx: Unused; accepted for compatibility
    :returns: A test directory
    """
    if 'test_path' in config:
        return config['test_path']
    return config.get('test_path', '/home/%s/cephtest' % get_test_user())
예제 #5
0
    def dashboard_prequisites(self):
        """
        Enable all dashboard pre-requisites
            1) Container tool login(podman)
            2) Return dashboard images
        Returns:
            dashboard_info
        """
        creds = teuth_config.get('registries', {})
        (ceph_first_mon, ) = iter(
            self.ctx.cluster.only(
                misc.get_first_mon(self.ctx, self.config,
                                   self.cluster_name)).remotes.keys())

        container = "docker"
        if ceph_first_mon.os.version.startswith('8'):
            container = "podman"

        # container tool login
        for remote, _ in list(self.ctx.cluster.remotes.items()):
            if "docker" in container:
                remote.sh("sudo yum install -y docker")
                remote.sh("sudo systemctl restart docker")
            for registry, cred in creds.items():
                remote.sh(
                    "sudo {container} login -p {passwd} -u {user} {registry}".
                    format(container=container,
                           passwd=cred.get('password'),
                           user=cred.get('username'),
                           registry=registry))

        # Look for dashboard information in <suite_path>/rh/downstream.yaml
        ds_yaml = os.path.join(
            teuth_config.get('ds_yaml_dir'),
            '{}.yaml'.format(self.rhbuild),
        )
        dashboard_info = yaml.safe_load(open(ds_yaml))
        dashboard = dashboard_info.get('dashboard')

        for build, images in dashboard.items():
            if build in self.rhbuild:
                return images
        return dict()
예제 #6
0
    def choose_teuthology_branch(self):
        """Select teuthology branch, check if it is present in repo and return
        tuple (branch, hash) where hash is commit sha1 corresponding
        to the HEAD of the branch.

        The branch name value is determined in the following order:

        Use ``--teuthology-branch`` argument value if supplied.

        Use ``TEUTH_BRANCH`` environment variable value if declared.

        If file ``qa/.teuthology_branch`` can be found in the suite repo
        supplied with ``--suite-repo`` or ``--suite-dir`` and contains
        non-empty string then use it as the branch name.

        Use ``teuthology_branch`` value if it is set in the one
        of the teuthology config files ``$HOME/teuthology.yaml``
        or ``/etc/teuthology.yaml`` correspondingly.

        Use ``master``.

        Generate exception if the branch is not present in the repo.

        """
        teuthology_branch = self.args.teuthology_branch
        if not teuthology_branch:
            teuthology_branch = os.environ.get('TEUTH_BRANCH', None)
        if not teuthology_branch:
            branch_file_path = self.suite_repo_path + '/qa/.teuthology_branch'
            log.debug('Check file %s exists', branch_file_path)
            if os.path.exists(branch_file_path):
                log.debug('Found teuthology branch config file %s',
                                                        branch_file_path)
                with open(branch_file_path) as f:
                    teuthology_branch = f.read().strip()
                    if teuthology_branch:
                        log.debug(
                            'The teuthology branch is overridden with %s',
                                                                teuthology_branch)
                    else:
                        log.warning(
                            'The teuthology branch config is empty, skipping')
        if not teuthology_branch:
            teuthology_branch = config.get('teuthology_branch', 'master')

        teuthology_sha1 = util.git_ls_remote(
            'teuthology',
            teuthology_branch
        )
        if not teuthology_sha1:
            exc = BranchNotFoundError(teuthology_branch, build_git_url('teuthology'))
            util.schedule_fail(message=str(exc), name=self.name)
        log.info("teuthology branch: %s %s", teuthology_branch, teuthology_sha1)
        return teuthology_branch, teuthology_sha1
예제 #7
0
def map_vnet(mip):
    for mapping in teuth_config.get('vnet', []):
        mnet = ipaddress.ip_network(mapping['machine_subnet'])
        vnet = ipaddress.ip_network(mapping['virtual_subnet'])
        if vnet.prefixlen >= mnet.prefixlen:
            log.error(f"virtual_subnet {vnet} prefix >= machine_subnet {mnet} prefix")
            return None
        if mip in mnet:
            pos = list(mnet.hosts()).index(mip)
            log.info(f"{mip} is in {mnet} at pos {pos}")
            sub = list(vnet.subnets(32 - mnet.prefixlen))[pos]
            return sub
    return None
예제 #8
0
def _enable_rhel_repos(remote):

    # Look for rh specific repos
    ds_yaml = os.path.join(teuthconfig.get('ds_yaml_dir'),
                           teuthconfig.rhbuild + ".yaml")

    rhel_repos = yaml.safe_load(open(ds_yaml))
    repos_to_subscribe = rhel_repos.get('rhel_repos').get(remote.os.version[0])

    for repo in repos_to_subscribe:
        remote.run(args=[
            'sudo', 'subscription-manager', 'repos', '--enable={r}'.format(
                r=repo)
        ])
예제 #9
0
파일: pcp.py 프로젝트: BlaXpirit/teuthology
 def __init__(self, ctx, config):
     super(PCP, self).__init__(ctx, config)
     if teuth_config.get('pcp_host') is None:
         self.enabled = False
     self.log = log
     self.job_id = self.ctx.config.get('job_id')
     # until the job stops, we may want to render graphs reflecting the most
     # current data
     self.stop_time = 'now'
     self.use_graphite = self.config.get('graphite', True)
     self.use_grafana = self.config.get('grafana', True)
     # fetch_archives defaults to False for now because of various bugs in
     # pmlogextract
     self.fetch_archives = self.config.get('fetch_archives', False)
예제 #10
0
파일: fog.py 프로젝트: xiling/teuthology
def get_types():
    """
    Fetch and parse config.fog['machine_types']

    :returns: The list of FOG-configured machine types. An empty list if FOG is
              not configured.
    """
    if not enabled():
        return []
    fog_conf = config.get('fog', dict())
    types = fog_conf.get('machine_types', '')
    if not isinstance(types, list):
        types = types.split(',')
    return [type_ for type_ in types if type_]
예제 #11
0
파일: pcp.py 프로젝트: zhengqin/teuthology
 def __init__(self, ctx, config):
     super(PCP, self).__init__(ctx, config)
     if teuth_config.get('pcp_host') is None:
         self.enabled = False
     self.log = log
     self.job_id = self.ctx.config.get('job_id')
     # until the job stops, we may want to render graphs reflecting the most
     # current data
     self.stop_time = 'now'
     self.use_graphite = self.config.get('graphite', True)
     self.use_grafana = self.config.get('grafana', True)
     # fetch_archives defaults to False for now because of various bugs in
     # pmlogextract
     self.fetch_archives = self.config.get('fetch_archives', False)
예제 #12
0
def get_types():
    """
    Fetch and parse config.pelagos['machine_types']

    :returns: The list of Pelagos-configured machine types. An empty list if Pelagos is
              not configured.
    """
    if not enabled():
        return []
    conf = config.get(config_section, dict())
    types = conf.get('machine_types', '')
    if not isinstance(types, list):
        types = [_ for _ in types.split(',') if _]
    return [_ for _ in types if _]
예제 #13
0
파일: fog.py 프로젝트: xiling/teuthology
def enabled(warn=False):
    """
    Check for required FOG settings

    :param warn: Whether or not to log a message containing unset parameters
    :returns: True if they are present; False if they are not
    """
    fog_conf = config.get('fog', dict())
    params = ['endpoint', 'api_token', 'user_token', 'machine_types']
    unset = [param for param in params if not fog_conf.get(param)]
    if unset and warn:
        log.warn(
            "FOG disabled; set the following config options to enable: %s",
            ' '.join(unset),
        )
    return (unset == [])
예제 #14
0
def enabled(warn=False):
    """
    Check for required Pelagos settings

    :param warn: Whether or not to log a message containing unset parameters
    :returns: True if they are present; False if they are not
    """
    conf = config.get(config_section, dict())
    params = ['endpoint', 'machine_types']
    unset = [_ for _ in params if not conf.get(_)]
    if unset and warn:
        log.warn(
            "Pelagos is disabled; set the following config options to enable: %s",
            ' '.join(unset),
        )
    return (unset == [])
예제 #15
0
def _subscribe_stage_cdn(remote):
    _unsubscribe_stage_cdn(remote)
    cdn_config = teuthconfig.get('cdn-config', dict())
    server_url = cdn_config.get(
        'server-url', 'subscription.rhsm.stage.redhat.com:443/subscription')
    base_url = cdn_config.get('base-url', 'https://cdn.stage.redhat.com')
    username = cdn_config.get('username', 'cephuser')
    password = cdn_config.get('password')
    remote.run(args=[
        'sudo', 'subscription-manager', '--force', 'register',
        run.Raw('--serverurl=' + server_url),
        run.Raw('--baseurl=' + base_url),
        run.Raw('--username='******'--password='******'--auto-attach'
    ],
               timeout=720)
    _enable_rhel_repos(remote)
예제 #16
0
def map_vips(mip, count):
    for mapping in teuth_config.get('vip', []):
        mnet = ipaddress.ip_network(mapping['machine_subnet'])
        vnet = ipaddress.ip_network(mapping['virtual_subnet'])
        if vnet.prefixlen >= mnet.prefixlen:
            log.error(
                f"virtual_subnet {vnet} prefix >= machine_subnet {mnet} prefix"
            )
            return None
        if mip in mnet:
            pos = list(mnet.hosts()).index(mip)
            log.info(f"{mip} in {mnet}, pos {pos}")
            r = []
            for sub in vnet.subnets(new_prefix=mnet.prefixlen):
                r += [list(sub.hosts())[pos]]
                count -= 1
                if count == 0:
                    break
            return vnet, r
    return None
예제 #17
0
파일: redhat.py 프로젝트: zmc/teuthology
def install(ctx, config):
    """
    Installs rh ceph on all hosts in ctx.

    :param ctx: the argparse.Namespace object
    :param config: the config dict

    uses yaml defined in qa suite or in users
    home dir to check for supported versions and
    packages to install.

    the format of yaml is:
    versions:
        supported:
           - '1.3.0'
        rpm:
            mapped:
               '1.3.0' : '0.94.1'
        deb:
            mapped:
               '1.3.0' : '0.94.1'
        pkgs:
            rpm:
             - ceph-mon
             - ceph-osd
            deb:
             - ceph-osd
             - ceph-mds
    """
    # Look for rh specific packages
    ds_yaml = os.path.join(
        teuth_config.get('ds_yaml_dir'),
        config.get('rhbuild') + ".yaml",
    )
    if not os.path.exists(ds_yaml):
        raise FileNotFoundError(f'Downstream rh version yaml file missing: {ds_yaml}')
    log.info("using yaml path %s", ds_yaml)
    downstream_config = yaml.safe_load(open(ds_yaml))
    rh_versions = downstream_config.get('versions', dict()).get('supported', [])
    external_config = dict(extra_system_packages=config.get('extra_system_packages'),
                           extra_packages=config.get('extra_packages'),
                           )
    downstream_config.update(external_config)
    version = config.get('rhbuild')
    if version in rh_versions:
        log.info("%s is a supported version", version)
    else:
        raise RuntimeError("Unsupported RH Ceph version %s", version)
    with parallel() as p:
        for remote in ctx.cluster.remotes.keys():
            if remote.os.name == 'rhel':
                log.info("Installing on RHEL node: %s", remote.shortname)
                p.spawn(install_pkgs, ctx, remote, version, downstream_config)
            else:
                log.info("Install on Ubuntu node: %s", remote.shortname)
                p.spawn(install_deb_pkgs, ctx, remote, version,
                        downstream_config)
    try:
        yield
    finally:
        if config.get('skip_uninstall'):
            log.info("Skipping uninstall of Ceph")
        else:
            with parallel() as p:
                for remote in ctx.cluster.remotes.keys():
                    p.spawn(uninstall_pkgs, ctx, remote, downstream_config)
예제 #18
0
파일: cephadm.py 프로젝트: wuhen110/ceph
def task(ctx, config):
    """
    Deploy ceph cluster using cephadm

    Setup containers' mirrors before the bootstrap, if corresponding
    config provided in teuthology server config yaml file.

    For example, teuthology.yaml can contain the 'defaults' section:

        defaults:
          cephadm:
            containers:
              registry_mirrors:
                docker.io: 'registry.mirror.example.com:5000'
              image: 'quay.io/ceph-ci/ceph'

    Using overrides makes it possible to customize it per run.
    The equivalent 'overrides' section looks like:

        overrides:
          cephadm:
            containers:
              registry_mirrors:
                docker.io: 'registry.mirror.example.com:5000'
              image: 'quay.io/ceph-ci/ceph'

    :param ctx: the argparse.Namespace object
    :param config: the config dict
    """
    if config is None:
        config = {}

    assert isinstance(config, dict), \
        "task only supports a dictionary for configuration"

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('ceph', {}))
    teuthology.deep_merge(config, overrides.get('cephadm', {}))
    log.info('Config: ' + str(config))

    # set up cluster context
    if not hasattr(ctx, 'ceph'):
        ctx.ceph = {}
        ctx.managers = {}
    if 'cluster' not in config:
        config['cluster'] = 'ceph'
    cluster_name = config['cluster']
    if cluster_name not in ctx.ceph:
        ctx.ceph[cluster_name] = argparse.Namespace()
        ctx.ceph[cluster_name].bootstrapped = False

    # image
    teuth_defaults = teuth_config.get('defaults', {})
    cephadm_defaults = teuth_defaults.get('cephadm', {})
    containers_defaults = cephadm_defaults.get('containers', {})
    mirrors_defaults = containers_defaults.get('registry_mirrors', {})
    container_registry_mirror = mirrors_defaults.get('docker.io', None)
    container_image_name = containers_defaults.get('image', None)

    containers = config.get('containers', {})
    mirrors = containers.get('registry_mirrors', {})
    container_image_name = containers.get('image', container_image_name)
    container_registry_mirror = mirrors.get('docker.io',
                                            container_registry_mirror)

    if not hasattr(ctx.ceph[cluster_name], 'image'):
        ctx.ceph[cluster_name].image = config.get('image')
    ref = None
    if not ctx.ceph[cluster_name].image:
        if not container_image_name:
            raise Exception(
                "Configuration error occurred. "
                "The 'image' value is undefined for 'cephadm' task. "
                "Please provide corresponding options in the task's "
                "config, task 'overrides', or teuthology 'defaults' "
                "section.")
        sha1 = config.get('sha1')
        flavor = config.get('flavor', 'default')

        if sha1:
            if flavor == "crimson":
                ctx.ceph[
                    cluster_name].image = container_image_name + ':' + sha1 + '-' + flavor
            else:
                ctx.ceph[
                    cluster_name].image = container_image_name + ':' + sha1
            ref = sha1
        else:
            # hmm, fall back to branch?
            branch = config.get('branch', 'master')
            ref = branch
            ctx.ceph[cluster_name].image = container_image_name + ':' + branch
    log.info('Cluster image is %s' % ctx.ceph[cluster_name].image)


    with contextutil.nested(
            #if the cluster is already bootstrapped bypass corresponding methods
            lambda: _bypass() if (ctx.ceph[cluster_name].bootstrapped)\
                              else initialize_config(ctx=ctx, config=config),
            lambda: ceph_initial(),
            lambda: normalize_hostnames(ctx=ctx),
            lambda: _bypass() if (ctx.ceph[cluster_name].bootstrapped)\
                              else download_cephadm(ctx=ctx, config=config, ref=ref),
            lambda: ceph_log(ctx=ctx, config=config),
            lambda: ceph_crash(ctx=ctx, config=config),
            lambda: _bypass() if (ctx.ceph[cluster_name].bootstrapped)\
                              else ceph_bootstrap(ctx, config,
                                                  container_registry_mirror),
            lambda: crush_setup(ctx=ctx, config=config),
            lambda: ceph_mons(ctx=ctx, config=config),
            lambda: distribute_config_and_admin_keyring(ctx=ctx, config=config),
            lambda: ceph_mgrs(ctx=ctx, config=config),
            lambda: ceph_osds(ctx=ctx, config=config),
            lambda: ceph_mdss(ctx=ctx, config=config),
            lambda: ceph_rgw(ctx=ctx, config=config),
            lambda: ceph_monitoring('prometheus', ctx=ctx, config=config),
            lambda: ceph_monitoring('node-exporter', ctx=ctx, config=config),
            lambda: ceph_monitoring('alertmanager', ctx=ctx, config=config),
            lambda: ceph_monitoring('grafana', ctx=ctx, config=config),
            lambda: ceph_clients(ctx=ctx, config=config),
    ):
        ctx.managers[cluster_name] = CephManager(
            ctx.ceph[cluster_name].bootstrap_remote,
            ctx=ctx,
            logger=log.getChild('ceph_manager.' + cluster_name),
            cluster=cluster_name,
            cephadm=True,
        )

        try:
            if config.get('wait-for-healthy', True):
                healthy(ctx=ctx, config=config)

            log.info('Setup complete, yielding')
            yield

        finally:
            log.info('Teardown begin')
예제 #19
0
파일: rook.py 프로젝트: rhcs-dashboard/ceph
def task(ctx, config):
    """
    Deploy rook-ceph cluster

      tasks:
      - kubeadm:
      - rook:
          branch: wip-foo
          spec:
            mon:
              count: 1

    The spec item is deep-merged against the cluster.yaml.  The branch, sha1, or
    image items are used to determine the Ceph container image.
    """
    if not config:
        config = {}
    assert isinstance(config, dict), \
        "task only supports a dictionary for configuration"

    log.info('Rook start')

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('ceph', {}))
    teuthology.deep_merge(config, overrides.get('rook', {}))
    log.info('Config: ' + str(config))

    # set up cluster context
    if not hasattr(ctx, 'rook'):
        ctx.rook = {}
    if 'cluster' not in config:
        config['cluster'] = 'ceph'
    cluster_name = config['cluster']
    if cluster_name not in ctx.rook:
        ctx.rook[cluster_name] = argparse.Namespace()

    ctx.rook[cluster_name].remote = list(ctx.cluster.remotes.keys())[0]

    # image
    teuth_defaults = teuth_config.get('defaults', {})
    cephadm_defaults = teuth_defaults.get('cephadm', {})
    containers_defaults = cephadm_defaults.get('containers', {})
    container_image_name = containers_defaults.get('image', None)
    if 'image' in config:
        ctx.rook[cluster_name].image = config.get('image')
    else:
        sha1 = config.get('sha1')
        flavor = config.get('flavor', 'default')
        if sha1:
            if flavor == "crimson":
                ctx.rook[
                    cluster_name].image = container_image_name + ':' + sha1 + '-' + flavor
            else:
                ctx.rook[
                    cluster_name].image = container_image_name + ':' + sha1
        else:
            # hmm, fall back to branch?
            branch = config.get('branch', 'master')
            ctx.rook[cluster_name].image = container_image_name + ':' + branch
    log.info('Ceph image is %s' % ctx.rook[cluster_name].image)

    with contextutil.nested(
            lambda: rook_operator(ctx, config),
            lambda: ceph_log(ctx, config),
            lambda: rook_cluster(ctx, config),
            lambda: rook_toolbox(ctx, config),
            lambda: wait_for_orch(ctx, config),
            lambda: rook_post_config(ctx, config),
            lambda: wait_for_osds(ctx, config),
            lambda: ceph_config_keyring(ctx, config),
            lambda: ceph_clients(ctx, config),
    ):
        if not hasattr(ctx, 'managers'):
            ctx.managers = {}
        ctx.managers[cluster_name] = CephManager(
            ctx.rook[cluster_name].remote,
            ctx=ctx,
            logger=log.getChild('ceph_manager.' + cluster_name),
            cluster=cluster_name,
            rook=True,
        )
        try:
            if config.get('wait-for-healthy', True):
                healthy(ctx=ctx, config=config)
            log.info('Rook complete, yielding')
            yield

        finally:
            to_remove = []
            ret = _shell(ctx,
                         config, ['ceph', 'orch', 'ls', '-f', 'json'],
                         stdout=BytesIO())
            if ret.exitstatus == 0:
                r = json.loads(ret.stdout.getvalue().decode('utf-8'))
                for service in r:
                    if service['service_type'] in [
                            'rgw', 'mds', 'nfs', 'rbd-mirror'
                    ]:
                        _shell(ctx, config,
                               ['ceph', 'orch', 'rm', service['service_name']])
                        to_remove.append(service['service_name'])
                with safe_while(
                        sleep=10, tries=90,
                        action="waiting for service removal") as proceed:
                    while proceed():
                        ret = _shell(ctx,
                                     config,
                                     ['ceph', 'orch', 'ls', '-f', 'json'],
                                     stdout=BytesIO())
                        if ret.exitstatus == 0:
                            r = json.loads(
                                ret.stdout.getvalue().decode('utf-8'))
                            still_up = [
                                service['service_name'] for service in r
                            ]
                            matches = set(still_up).intersection(to_remove)
                            if not matches:
                                break
            log.info('Tearing down rook')
예제 #20
0
def get_test_user(ctx=None):
    """
    :param ctx: Unused; accepted for compatibility
    :returns:   str -- the user to run tests as on remote hosts
    """
    return config.get('test_user', 'ubuntu')
예제 #21
0
def task(ctx, config):
    """
    Deploy rook-ceph cluster

      tasks:
      - kubeadm:
      - rook:
          branch: wip-foo
          spec:
            mon:
              count: 1

    The spec item is deep-merged against the cluster.yaml.  The branch, sha1, or
    image items are used to determine the Ceph container image.
    """
    if not config:
        config = {}
    assert isinstance(config, dict), \
        "task only supports a dictionary for configuration"

    log.info('Rook start')

    overrides = ctx.config.get('overrides', {})
    teuthology.deep_merge(config, overrides.get('ceph', {}))
    teuthology.deep_merge(config, overrides.get('rook', {}))
    log.info('Config: ' + str(config))

    # set up cluster context
    if not hasattr(ctx, 'rook'):
        ctx.rook = {}
    if 'cluster' not in config:
        config['cluster'] = 'ceph'
    cluster_name = config['cluster']
    if cluster_name not in ctx.rook:
        ctx.rook[cluster_name] = argparse.Namespace()

    ctx.rook[cluster_name].remote = list(ctx.cluster.remotes.keys())[0]

    # image
    teuth_defaults = teuth_config.get('defaults', {})
    cephadm_defaults = teuth_defaults.get('cephadm', {})
    containers_defaults = cephadm_defaults.get('containers', {})
    container_image_name = containers_defaults.get('image', None)
    if 'image' in config:
        ctx.rook[cluster_name].image = config.get('image')
    else:
        sha1 = config.get('sha1')
        flavor = config.get('flavor', 'default')
        if sha1:
            if flavor == "crimson":
                ctx.rook[cluster_name].image = container_image_name + ':' + sha1 + '-' + flavor
            else:
                ctx.rook[cluster_name].image = container_image_name + ':' + sha1
        else:
            # hmm, fall back to branch?
            branch = config.get('branch', 'master')
            ctx.rook[cluster_name].image = container_image_name + ':' + branch
    log.info('Ceph image is %s' % ctx.rook[cluster_name].image)
    
    with contextutil.nested(
            lambda: rook_operator(ctx, config),
            lambda: ceph_log(ctx, config),
            lambda: rook_cluster(ctx, config),
            lambda: rook_toolbox(ctx, config),
            lambda: wait_for_osds(ctx, config),
            lambda: ceph_config_keyring(ctx, config),
            lambda: ceph_clients(ctx, config),
    ):
        if not hasattr(ctx, 'managers'):
            ctx.managers = {}
        ctx.managers[cluster_name] = CephManager(
            ctx.rook[cluster_name].remote,
            ctx=ctx,
            logger=log.getChild('ceph_manager.' + cluster_name),
            cluster=cluster_name,
            rook=True,
        )
        try:
            if config.get('wait-for-healthy', True):
                healthy(ctx=ctx, config=config)
            log.info('Rook complete, yielding')
            yield

        finally:
            log.info('Tearing down rook')