コード例 #1
0
def check_optional_config_and_relations(configs):
    """Validate optional configuration and relations when present.

    This function is called from assess_status/set_os_workload_status as the
    charm_func and needs to return either None, None if there is no problem or
    the status, message if there is a problem.

    :param configs: an OSConfigRender() instance.
    :return 2-tuple: (string, string) = (status, message)
    """
    if relation_ids('ceph'):
        # Check that provided Ceph BlueStoe configuration is valid.
        try:
            bluestore_compression = context.CephBlueStoreCompressionContext()
            bluestore_compression.validate()
        except AttributeError:
            # The charm does late installation of the `ceph-common` package and
            # the class initializer above will throw an exception until it is.
            pass
        except ValueError as e:
            return ('blocked', 'Invalid configuration: {}'.format(str(e)))

    # return 'unknown' as the lowest priority to not clobber an existing
    # status.
    return "unknown", ""
コード例 #2
0
def check_optional_config_and_relations(configs):
    """Check that if we have a relation_id for high availability that we can
    get the hacluster config.  If we can't then we are blocked.  This function
    is called from assess_status/set_os_workload_status as the charm_func and
    needs to return either 'unknown', '' if there is no problem or the status,
    message if there is a problem.

    :param configs: an OSConfigRender() instance.
    :return 2-tuple: (string, string) = (status, message)
    """
    if relation_ids('ha'):
        try:
            get_hacluster_config()
        except Exception:
            return ('blocked', 'hacluster missing configuration: '
                    'vip, vip_iface, vip_cidr')
    # NOTE: misc multi-site relation and config checks
    multisite_config = (config('realm'), config('zonegroup'), config('zone'))
    if relation_ids('master') or relation_ids('slave'):
        if not all(multisite_config):
            return ('blocked', 'multi-site configuration incomplete '
                    '(realm={realm}, zonegroup={zonegroup}'
                    ', zone={zone})'.format(**config()))
    if (all(multisite_config)
            and not (relation_ids('master') or relation_ids('slave'))):
        return ('blocked', 'multi-site configuration but master/slave '
                'relation missing')
    if (all(multisite_config) and relation_ids('slave')):
        multisite_ready = False
        for rid in relation_ids('slave'):
            for unit in related_units(rid):
                if relation_get('url', unit=unit, rid=rid):
                    multisite_ready = True
                    continue
        if not multisite_ready:
            return ('waiting', 'multi-site master relation incomplete')
    master_configured = (
        leader_get('access_key'),
        leader_get('secret'),
        leader_get('restart_nonce'),
    )
    if (all(multisite_config) and relation_ids('master')
            and not all(master_configured)):
        return ('waiting', 'waiting for configuration of master zone')

    # Check that provided Ceph BlueStoe configuration is valid.
    try:
        bluestore_compression = context.CephBlueStoreCompressionContext()
        bluestore_compression.validate()
    except ValueError as e:
        return ('blocked', 'Invalid configuration: {}'.format(str(e)))

    # return 'unknown' as the lowest priority to not clobber an existing
    # status.
    return 'unknown', ''
コード例 #3
0
ファイル: classes.py プロジェクト: openstack/charms.openstack
    def _get_bluestore_compression():
        """Get BlueStore Compression charm configuration if present.

        :returns: Dictionary of options suitable for passing on as keyword
                  arguments or None.
        :rtype: Optional[Dict[str,any]]
        :raises: ValueError
        """
        try:
            bluestore_compression = (
                ch_context.CephBlueStoreCompressionContext())
            bluestore_compression.validate()
        except KeyError:
            # The charm does not have BlueStore Compression options defined
            bluestore_compression = None
        if bluestore_compression:
            return bluestore_compression.get_kwargs()
コード例 #4
0
def check_optional_config_and_relations(configs):
    """Validate optional configuration and relations when present.

    This function is called from assess_status/set_os_workload_status as the
    charm_func and needs to return either None, None if there is no problem or
    the status, message if there is a problem.

    :param configs: an OSConfigRender() instance.
    :return 2-tuple: (string, string) = (status, message)
    """
    # Check that if we have a relation_id for high availability that we can
    # get the hacluster config.  If we can't then we are blocked.
    if relation_ids('ha'):
        try:
            get_hacluster_config()
        except Exception:
            return ('blocked', 'hacluster missing configuration: '
                    'vip, vip_iface, vip_cidr')

    if relation_ids('ceph'):
        # Check that provided Ceph BlueStoe configuration is valid.
        try:
            bluestore_compression = context.CephBlueStoreCompressionContext()
            bluestore_compression.validate()
        except AttributeError:
            # The charm does late installation of the `ceph-common` package and
            # the class initializer above will throw an exception until it is.
            pass
        except ValueError as e:
            return ('blocked', 'Invalid configuration: {}'.format(str(e)))
        # ceph pkgs are only installed after the ceph relation is etablished
        # so gate checking broker requests on ceph relation being completed.
        if ('ceph' in configs.complete_contexts()
                and not is_request_complete(get_ceph_request())):
            return ('waiting', 'Ceph broker request incomplete')
    # return 'unknown' as the lowest priority to not clobber an existing
    # status.
    return "unknown", ""
コード例 #5
0
def assess_status():
    """Assess status of current unit"""
    # check to see if the unit is paused.
    application_version_set(get_upstream_version(VERSION_PACKAGE))
    if is_unit_upgrading_set():
        status_set(
            "blocked", "Ready for do-release-upgrade and reboot. "
            "Set complete when finished.")
        return
    if is_unit_paused_set():
        status_set('maintenance',
                   "Paused. Use 'resume' action to resume normal service.")
        return
    # Check for mon relation
    if len(relation_ids('mon')) < 1:
        status_set('blocked', 'Missing relation: monitor')
        return

    # Check for monitors with presented addresses
    # Check for bootstrap key presentation
    monitors = get_mon_hosts()
    if len(monitors) < 1 or not get_conf('osd_bootstrap_key'):
        status_set('waiting', 'Incomplete relation: monitor')
        return

    # Check for vault
    if use_vaultlocker():
        if not relation_ids('secrets-storage'):
            status_set('blocked', 'Missing relation: vault')
            return
        try:
            if not vaultlocker.vault_relation_complete():
                status_set('waiting', 'Incomplete relation: vault')
                return
        except Exception as e:
            status_set('blocked', "Warning: couldn't verify vault relation")
            log("Exception when verifying vault relation - maybe it was "
                "offline?:\n{}".format(str(e)))
            log("Traceback: {}".format(traceback.format_exc()))

    # Check for OSD device creation parity i.e. at least some devices
    # must have been presented and used for this charm to be operational
    (prev_status, prev_message) = status_get()
    running_osds = ceph.get_running_osds()
    if not prev_message.startswith('Non-pristine'):
        if not running_osds:
            status_set(
                'blocked',
                'No block devices detected using current configuration')
        else:
            status_set('active',
                       'Unit is ready ({} OSD)'.format(len(running_osds)))
    else:
        pristine = True
        osd_journals = get_journal_devices()
        for dev in list(set(ceph.unmounted_disks()) - set(osd_journals)):
            if (not ceph.is_active_bluestore_device(dev)
                    and not ceph.is_pristine_disk(dev)):
                pristine = False
                break
        if pristine:
            status_set('active',
                       'Unit is ready ({} OSD)'.format(len(running_osds)))

    try:
        get_bdev_enable_discard()
    except ValueError as ex:
        status_set('blocked', str(ex))

    try:
        bluestore_compression = ch_context.CephBlueStoreCompressionContext()
        bluestore_compression.validate()
    except ValueError as e:
        status_set('blocked', 'Invalid configuration: {}'.format(str(e)))
コード例 #6
0
def get_ceph_context(upgrading=False):
    """Returns the current context dictionary for generating ceph.conf

    :param upgrading: bool - determines if the context is invoked as
                      part of an upgrade proedure Setting this to true
                      causes settings useful during an upgrade to be
                      defined in the ceph.conf file
    """
    mon_hosts = get_mon_hosts()
    log('Monitor hosts are ' + repr(mon_hosts))

    networks = get_networks('ceph-public-network')
    public_network = ', '.join(networks)

    networks = get_networks('ceph-cluster-network')
    cluster_network = ', '.join(networks)

    cephcontext = {
        'auth_supported': get_auth(),
        'mon_hosts': ' '.join(mon_hosts),
        'fsid': get_fsid(),
        'old_auth': cmp_pkgrevno('ceph', "0.51") < 0,
        'crush_initial_weight': config('crush-initial-weight'),
        'osd_journal_size': config('osd-journal-size'),
        'osd_max_backfills': config('osd-max-backfills'),
        'osd_recovery_max_active': config('osd-recovery-max-active'),
        'use_syslog': str(config('use-syslog')).lower(),
        'ceph_public_network': public_network,
        'ceph_cluster_network': cluster_network,
        'loglevel': config('loglevel'),
        'dio': str(config('use-direct-io')).lower(),
        'short_object_len': use_short_objects(),
        'upgrade_in_progress': upgrading,
        'bluestore': use_bluestore(),
        'bluestore_experimental': cmp_pkgrevno('ceph', '12.1.0') < 0,
        'bluestore_block_wal_size': config('bluestore-block-wal-size'),
        'bluestore_block_db_size': config('bluestore-block-db-size'),
    }

    try:
        cephcontext['bdev_discard'] = get_bdev_enable_discard()
    except ValueError as ex:
        # the user set bdev-enable-discard to a non valid value, so logging the
        # issue as a warning and falling back to False/disable
        log(str(ex), level=WARNING)
        cephcontext['bdev_discard'] = False

    if config('prefer-ipv6'):
        dynamic_ipv6_address = get_ipv6_addr()[0]
        if not public_network:
            cephcontext['public_addr'] = dynamic_ipv6_address
        if not cluster_network:
            cephcontext['cluster_addr'] = dynamic_ipv6_address
    else:
        cephcontext['public_addr'] = get_public_addr()
        cephcontext['cluster_addr'] = get_cluster_addr()

    if config('customize-failure-domain'):
        az = az_info()
        if az:
            cephcontext['crush_location'] = "root=default {} host={}" \
                .format(az, socket.gethostname())
        else:
            log("Your Juju environment doesn't"
                "have support for Availability Zones")

    # NOTE(dosaboy): these sections must correspond to what is supported in the
    #                config template.
    sections = ['global', 'osd']
    cephcontext.update(
        ch_ceph.CephOSDConfContext(permitted_sections=sections)())
    cephcontext.update(ch_context.CephBlueStoreCompressionContext()())
    return cephcontext
コード例 #7
0
def get_ceph_request():
    rq = CephBrokerRq()
    if (config('libvirt-image-backend') == 'rbd'
            and assert_libvirt_rbd_imagebackend_allowed()):
        pool_name = config('rbd-pool')
        replicas = config('ceph-osd-replication-count')
        weight = config('ceph-pool-weight')
        bluestore_compression = ch_context.CephBlueStoreCompressionContext()

        if config('pool-type') == 'erasure-coded':
            # General EC plugin config
            plugin = config('ec-profile-plugin')
            technique = config('ec-profile-technique')
            device_class = config('ec-profile-device-class')
            metadata_pool_name = (config('ec-rbd-metadata-pool')
                                  or "{}-metadata".format(pool_name))
            bdm_k = config('ec-profile-k')
            bdm_m = config('ec-profile-m')
            # LRC plugin config
            bdm_l = config('ec-profile-locality')
            crush_locality = config('ec-profile-crush-locality')
            # SHEC plugin config
            bdm_c = config('ec-profile-durability-estimator')
            # CLAY plugin config
            bdm_d = config('ec-profile-helper-chunks')
            scalar_mds = config('ec-profile-scalar-mds')
            # Profile name
            profile_name = (config('ec-profile-name')
                            or "{}-profile".format(pool_name))
            # Metadata sizing is approximately 1% of overall data weight
            # but is in effect driven by the number of rbd's rather than
            # their size - so it can be very lightweight.
            metadata_weight = weight * 0.01
            # Resize data pool weight to accomodate metadata weight
            weight = weight - metadata_weight
            # Create metadata pool
            rq.add_op_create_pool(name=metadata_pool_name,
                                  replica_count=replicas,
                                  weight=metadata_weight,
                                  group='vms',
                                  app_name='rbd')

            # Create erasure profile
            rq.add_op_create_erasure_profile(name=profile_name,
                                             k=bdm_k,
                                             m=bdm_m,
                                             lrc_locality=bdm_l,
                                             lrc_crush_locality=crush_locality,
                                             shec_durability_estimator=bdm_c,
                                             clay_helper_chunks=bdm_d,
                                             clay_scalar_mds=scalar_mds,
                                             device_class=device_class,
                                             erasure_type=plugin,
                                             erasure_technique=technique)

            # Create EC data pool

            # NOTE(fnordahl): once we deprecate Python 3.5 support we can do
            # the unpacking of the BlueStore compression arguments as part of
            # the function arguments. Until then we need to build the dict
            # prior to the function call.
            kwargs = {
                'name': pool_name,
                'erasure_profile': profile_name,
                'weight': weight,
                'group': "vms",
                'app_name': "rbd",
                'allow_ec_overwrites': True
            }
            kwargs.update(bluestore_compression.get_kwargs())
            rq.add_op_create_erasure_pool(**kwargs)
        else:
            kwargs = {
                'name': pool_name,
                'replica_count': replicas,
                'weight': weight,
                'group': 'vms',
                'app_name': 'rbd',
            }
            kwargs.update(bluestore_compression.get_kwargs())
            rq.add_op_create_replicated_pool(**kwargs)

    if config('restrict-ceph-pools'):
        rq.add_op_request_access_to_group(
            name="volumes",
            object_prefix_permissions={'class-read': ['rbd_children']},
            permission='rwx')
        rq.add_op_request_access_to_group(
            name="images",
            object_prefix_permissions={'class-read': ['rbd_children']},
            permission='rwx')
        rq.add_op_request_access_to_group(
            name="vms",
            object_prefix_permissions={'class-read': ['rbd_children']},
            permission='rwx')
    return rq
コード例 #8
0
ファイル: ceph_rgw.py プロジェクト: jardon/charm-ceph-radosgw
def get_create_rgw_pools_rq(prefix=None):
    """Pre-create RGW pools so that they have the correct settings.

    If a prefix is provided it will be prepended to each pool name.

    When RGW creates its own pools it will create them with non-optimal
    settings (LP: #1476749).

    NOTE: see http://docs.ceph.com/docs/master/radosgw/config-ref/#pools and
          http://docs.ceph.com/docs/master/radosgw/config/#create-pools for
          list of supported/required pools.
    """
    def _add_light_pool(rq, pool, pg_num, prefix=None):
        # Per the Ceph PG Calculator, all of the lightweight pools get 0.10%
        # of the data by default and only the .rgw.buckets.* get higher values
        weights = {'.rgw.buckets.index': 3.00, '.rgw.buckets.extra': 1.00}
        w = weights.get(pool, 0.10)
        if prefix:
            pool = "{prefix}{pool}".format(prefix=prefix, pool=pool)
        if pg_num > 0:
            rq.add_op_create_pool(name=pool,
                                  replica_count=replicas,
                                  pg_num=pg_num,
                                  group='objects',
                                  app_name=CEPH_POOL_APP_NAME)
        else:
            rq.add_op_create_pool(name=pool,
                                  replica_count=replicas,
                                  weight=w,
                                  group='objects',
                                  app_name=CEPH_POOL_APP_NAME)

    rq = CephBrokerRq()
    replicas = config('ceph-osd-replication-count')

    prefix = prefix or 'default'
    # Buckets likely to contain the most data and therefore
    # requiring the most PGs
    heavy = ['.rgw.buckets.data']
    bucket_weight = config('rgw-buckets-pool-weight')
    bluestore_compression = ch_context.CephBlueStoreCompressionContext()

    if config('pool-type') == 'erasure-coded':
        # General EC plugin config
        plugin = config('ec-profile-plugin')
        technique = config('ec-profile-technique')
        device_class = config('ec-profile-device-class')
        bdm_k = config('ec-profile-k')
        bdm_m = config('ec-profile-m')
        # LRC plugin config
        bdm_l = config('ec-profile-locality')
        crush_locality = config('ec-profile-crush-locality')
        # SHEC plugin config
        bdm_c = config('ec-profile-durability-estimator')
        # CLAY plugin config
        bdm_d = config('ec-profile-helper-chunks')
        scalar_mds = config('ec-profile-scalar-mds')
        # Profile name
        service = service_name()
        profile_name = (config('ec-profile-name')
                        or "{}-profile".format(service))
        rq.add_op_create_erasure_profile(name=profile_name,
                                         k=bdm_k,
                                         m=bdm_m,
                                         lrc_locality=bdm_l,
                                         lrc_crush_locality=crush_locality,
                                         shec_durability_estimator=bdm_c,
                                         clay_helper_chunks=bdm_d,
                                         clay_scalar_mds=scalar_mds,
                                         device_class=device_class,
                                         erasure_type=plugin,
                                         erasure_technique=technique)

        for pool in heavy:
            pool = "{prefix}{pool}".format(prefix=prefix, pool=pool)
            # NOTE(fnordahl): once we deprecate Python 3.5 support we can do
            # the unpacking of the BlueStore compression arguments as part of
            # the function arguments. Until then we need to build the dict
            # prior to the function call.
            kwargs = {
                'name': pool,
                'erasure_profile': profile_name,
                'weight': bucket_weight,
                'group': "objects",
                'app_name': CEPH_POOL_APP_NAME,
            }
            kwargs.update(bluestore_compression.get_kwargs())
            rq.add_op_create_erasure_pool(**kwargs)
    else:
        for pool in heavy:
            pool = "{prefix}{pool}".format(prefix=prefix, pool=pool)
            # NOTE(fnordahl): once we deprecate Python 3.5 support we can do
            # the unpacking of the BlueStore compression arguments as part of
            # the function arguments. Until then we need to build the dict
            # prior to the function call.
            kwargs = {
                'name': pool,
                'replica_count': replicas,
                'weight': bucket_weight,
                'group': 'objects',
                'app_name': CEPH_POOL_APP_NAME,
            }
            kwargs.update(bluestore_compression.get_kwargs())
            rq.add_op_create_replicated_pool(**kwargs)

    # NOTE: we want these pools to have a smaller pg_num/pgp_num than the
    # others since they are not expected to contain as much data
    light = [
        '.rgw.control',
        '.rgw.data.root',
        '.rgw.gc',
        '.rgw.log',
        '.rgw.intent-log',
        '.rgw.meta',
        '.rgw.usage',
        '.rgw.users.keys',
        '.rgw.users.email',
        '.rgw.users.swift',
        '.rgw.users.uid',
        '.rgw.buckets.extra',
        '.rgw.buckets.index',
    ]
    pg_num = config('rgw-lightweight-pool-pg-num')
    for pool in light:
        _add_light_pool(rq, pool, pg_num, prefix)

    _add_light_pool(rq, '.rgw.root', pg_num)

    if config('restrict-ceph-pools'):
        rq.add_op_request_access_to_group(name="objects",
                                          permission='rwx',
                                          key_name='radosgw.gateway')

    return rq