Exemplo n.º 1
0
    def _get_conf_ceph_overrides(self):
        ceph_backend = self._get_primary_ceph_backend()
        if not ceph_backend:
            return {}

        replication, min_replication =\
            StorageBackendConfig.get_ceph_pool_replication(self.dbapi)

        # We don't use the chart to configure the cinder-volumes
        # pool, so these values don't have any impact right now.
        ruleset = 0

        conf_ceph = {
            'monitors': self._get_formatted_ceph_monitor_ips(),
            'admin_keyring': 'null',
            'pools': {
                'backup': {
                    # We use the chart to configure the pool for backups, so
                    # it's safe to use the same replication as for the primary
                    # tier pools.
                    'replication': replication,
                    'crush_rule': ruleset,
                },
                'volume': {
                    # The cinder chart doesn't currently support specifying
                    # the config for multiple volume/backup pools.
                    'replication': replication,
                    'crush_rule': ruleset,
                }
            }
        }

        return conf_ceph
Exemplo n.º 2
0
    def _get_rbd_ephemeral_storage(self):
        ephemeral_storage_conf = {}
        ephemeral_pools = []

        # Get the values for replication and min replication from the storage
        # backend attributes.
        replication, min_replication = \
            StorageBackendConfig.get_ceph_pool_replication(self.dbapi)

        # For now, the ephemeral pool will only be on the primary Ceph tier
        rule_name = "{0}{1}{2}".format(
            constants.SB_TIER_DEFAULT_NAMES[constants.SB_TIER_TYPE_CEPH],
            constants.CEPH_CRUSH_TIER_SUFFIX, "-ruleset").replace('-', '_')

        # Form the dictionary with the info for the ephemeral pool.
        # If needed, multiple pools can be specified.
        ephemeral_pool = {
            'rbd_pool_name': constants.CEPH_POOL_EPHEMERAL_NAME,
            'rbd_user': RBD_POOL_USER,
            'rbd_crush_rule': rule_name,
            'rbd_replication': replication,
            'rbd_chunk_size': constants.CEPH_POOL_EPHEMERAL_PG_NUM
        }
        ephemeral_pools.append(ephemeral_pool)

        ephemeral_storage_conf = {'type': 'rbd', 'rbd_pools': ephemeral_pools}

        return ephemeral_storage_conf
Exemplo n.º 3
0
    def _get_conf_ceph_overrides(self):
        ceph_backend = self._get_primary_ceph_backend()
        if not ceph_backend:
            return {}

        replication, min_replication =\
            StorageBackendConfig.get_ceph_pool_replication(self.dbapi)

        rule_name = "{0}{1}{2}".format(
            constants.SB_TIER_DEFAULT_NAMES[constants.SB_TIER_TYPE_CEPH],
            constants.CEPH_CRUSH_TIER_SUFFIX, "-ruleset").replace('-', '_')

        conf_ceph = {
            'monitors': self._get_formatted_ceph_monitor_ips(),
            'admin_keyring': 'null',
            'pools': {
                'backup': {
                    # We use the chart to configure the pool for backups, so
                    # it's safe to use the same replication as for the primary
                    # tier pools.
                    'replication': replication,
                    'crush_rule': rule_name,
                },
                'volume': {
                    # The cinder chart doesn't currently support specifying
                    # the config for multiple volume/backup pools.
                    'replication': replication,
                    'crush_rule': rule_name,
                }
            }
        }

        return conf_ceph
Exemplo n.º 4
0
    def _get_rbd_ephemeral_storage(self):
        ephemeral_storage_conf = {}
        ephemeral_pools = []

        # Get the values for replication and min replication from the storage
        # backend attributes.
        replication, min_replication = \
            StorageBackendConfig.get_ceph_pool_replication(self.dbapi)

        # For now, the ephemeral pool will only be on the primary Ceph tier
        # that's using the 0 crush ruleset.
        ruleset = 0

        # Form the dictionary with the info for the ephemeral pool.
        # If needed, multiple pools can be specified.
        ephemeral_pool = {
            'rbd_pool_name': constants.CEPH_POOL_EPHEMERAL_NAME,
            'rbd_user': RBD_POOL_USER,
            'rbd_crush_rule': ruleset,
            'rbd_replication': replication,
            'rbd_chunk_size': constants.CEPH_POOL_EPHEMERAL_PG_NUM
        }
        ephemeral_pools.append(ephemeral_pool)

        ephemeral_storage_conf = {'type': 'rbd', 'rbd_pools': ephemeral_pools}

        return ephemeral_storage_conf
Exemplo n.º 5
0
    def _get_conf_overrides(self):
        ceph_backend = self._get_primary_ceph_backend()
        if not ceph_backend and not self._rook_ceph:
            rbd_store_pool = ""
            rbd_store_user = ""
            replication = 1
        elif self._rook_ceph:
            rbd_store_pool = constants.CEPH_POOL_IMAGES_NAME
            rbd_store_user = RBD_STORE_USER

            replication = 2
            if utils.is_aio_simplex_system(self.dbapi):
                replication = 1
        else:
            rbd_store_pool = app_constants.CEPH_POOL_IMAGES_NAME
            rbd_store_user = RBD_STORE_USER
            replication, min_replication = \
                StorageBackendConfig.get_ceph_pool_replication(self.dbapi)

        if not self._rook_ceph:
            # Only the primary Ceph tier is used for the glance images pool
            rule_name = "{0}{1}{2}".format(
                constants.SB_TIER_DEFAULT_NAMES[constants.SB_TIER_TYPE_CEPH],
                constants.CEPH_CRUSH_TIER_SUFFIX,
                "-ruleset").replace('-', '_')
        else:
            rule_name = "storage_tier_ruleset"

        conf = {
            'glance': {
                'DEFAULT': {
                    'graceful_shutdown': True,
                    'show_image_direct_url': True,
                },
                'glance_store': {
                    'chunk_size': app_constants.CEPH_POOL_IMAGES_CHUNK_SIZE,
                    'filesystem_store_datadir': constants.GLANCE_IMAGE_PATH,
                    'rbd_store_pool': rbd_store_pool,
                    'rbd_store_user': rbd_store_user,
                    'rbd_store_replication': replication,
                    'rbd_store_crush_rule': rule_name,
                }
            }
        }

        if ceph_backend:
            conf['ceph'] = self._get_ceph_overrides()
        elif self._rook_ceph:
            conf['ceph'] = {
                'admin_keyring': self._get_rook_ceph_admin_keyring()
            }

        return conf
Exemplo n.º 6
0
    def _get_conf_ceph_overrides(self):
        ceph_backend = self._get_primary_ceph_backend()
        if not ceph_backend:
            return {}

        primary_tier_name =\
            constants.SB_TIER_DEFAULT_NAMES[constants.SB_TIER_TYPE_CEPH]

        replication, min_replication =\
            StorageBackendConfig.get_ceph_pool_replication(self.dbapi)

        pools = {}
        for backend in self.dbapi.storage_ceph_get_list():
            if backend.tier_name == primary_tier_name:
                pool_name = app_constants.CEPH_POOL_VOLUMES_NAME
            else:
                pool_name = "%s-%s" % (app_constants.CEPH_POOL_VOLUMES_NAME,
                                      backend.tier_name)
            rule_name = "{0}{1}{2}".format(
                backend.tier_name, constants.CEPH_CRUSH_TIER_SUFFIX,
                "-ruleset").replace('-', '_')
            pool = {
                'replication': replication,
                'crush_rule': rule_name.encode('utf8', 'strict'),
                'chunk_size': app_constants.CEPH_POOL_VOLUMES_CHUNK_SIZE,
                'app_name': app_constants.CEPH_POOL_VOLUMES_APP_NAME
            }
            pools[pool_name.encode('utf8', 'strict')] = pool
            if backend.name == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]:
                # Backup uses the same replication and crush rule as
                # the default storage backend
                pool_backup = {
                    'replication': replication,
                    'crush_rule': rule_name.encode('utf8', 'strict'),
                    'chunk_size': app_constants.CEPH_POOL_BACKUP_CHUNK_SIZE,
                    'app_name': app_constants.CEPH_POOL_BACKUP_APP_NAME
                }
                pools['backup'] = dict(pool_backup)

        return {
            'monitors': self._get_formatted_ceph_monitor_ips(),
            'admin_keyring': 'null',
            'pools': pools
        }
Exemplo n.º 7
0
    def _get_conf_overrides(self):
        ceph_backend = self._get_primary_ceph_backend()
        if not ceph_backend:
            rbd_store_pool = ""
            rbd_store_user = ""
            replication = 1
        else:
            rbd_store_pool = constants.CEPH_POOL_IMAGES_NAME
            rbd_store_user = RBD_STORE_USER
            replication, min_replication = \
                StorageBackendConfig.get_ceph_pool_replication(self.dbapi)

        # Only the primary Ceph tier is used for the glance images pool
        rule_name = "{0}{1}{2}".format(
            constants.SB_TIER_DEFAULT_NAMES[constants.SB_TIER_TYPE_CEPH],
            constants.CEPH_CRUSH_TIER_SUFFIX, "-ruleset").replace('-', '_')

        conf = {
            'glance': {
                'DEFAULT': {
                    'graceful_shutdown': True,
                    'show_image_direct_url': True,
                },
                'glance_store': {
                    'filesystem_store_datadir': constants.GLANCE_IMAGE_PATH,
                    'rbd_store_pool': rbd_store_pool,
                    'rbd_store_user': rbd_store_user,
                    'rbd_store_replication': replication,
                    'rbd_store_crush_rule': rule_name,
                }
            }
        }

        if ceph_backend:
            conf['ceph'] = self._get_ceph_overrides()

        return conf
Exemplo n.º 8
0
    def _get_conf_overrides(self):
        ceph_backend = self._get_primary_ceph_backend()
        if not ceph_backend:
            rbd_store_pool = ""
            rbd_store_user = ""
            replication = 1
        else:
            rbd_store_pool = constants.CEPH_POOL_IMAGES_NAME
            rbd_store_user = RBD_STORE_USER
            replication, min_replication = \
                StorageBackendConfig.get_ceph_pool_replication(self.dbapi)

        # Only the primary Ceph tier is used for the glance images pool, so
        # the crush ruleset is 0.
        ruleset = 0

        conf = {
            'glance': {
                'DEFAULT': {
                    'graceful_shutdown': True,
                    'show_image_direct_url': True,
                },
                'glance_store': {
                    'filesystem_store_datadir': constants.GLANCE_IMAGE_PATH,
                    'rbd_store_pool': rbd_store_pool,
                    'rbd_store_user': rbd_store_user,
                    'rbd_store_replication': replication,
                    'rbd_store_crush_rule': ruleset,
                }
            }
        }

        if ceph_backend:
            conf['ceph'] = self._get_ceph_overrides()

        return conf