def get_ceph_request(): rq = CephBrokerRq() if (config('libvirt-image-backend') == 'rbd' and assert_libvirt_rbd_imagebackend_allowed()): name = config('rbd-pool') replicas = config('ceph-osd-replication-count') weight = config('ceph-pool-weight') rq.add_op_create_pool(name=name, replica_count=replicas, weight=weight, group='vms') if config('restrict-ceph-pools'): rq.add_op_request_access_to_group( name="volumes", object_prefix_permissions={'class-read': ['rbd_children']}, permission='rwx') rq.add_op_request_access_to_group( name="images", object_prefix_permissions={'class-read': ['rbd_children']}, permission='rwx') rq.add_op_request_access_to_group( name="vms", object_prefix_permissions={'class-read': ['rbd_children']}, permission='rwx') return rq
def get_ceph_request(): service = service_name() rq = CephBrokerRq() replicas = config('ceph-osd-replication-count') weight = config('ceph-pool-weight') pool_name = config('rbd-pool-name') or service rq.add_op_create_pool(name=pool_name, replica_count=replicas, weight=weight, group='volumes', app_name='rbd') if config('restrict-ceph-pools'): rq.add_op_request_access_to_group( name='volumes', object_prefix_permissions={'class-read': ['rbd_children']}, permission='rwx') rq.add_op_request_access_to_group( name='images', object_prefix_permissions={'class-read': ['rbd_children']}, permission='rwx') rq.add_op_request_access_to_group( name='vms', object_prefix_permissions={'class-read': ['rbd_children']}, permission='rwx') return rq
def get_ceph_request(): service = service_name() rq = CephBrokerRq() replicas = config('ceph-osd-replication-count') weight = config('ceph-pool-weight') rq.add_op_create_pool(name=service, replica_count=replicas, weight=weight) return rq
def create_pool(self, name, replicas=3): """ Request pool setup @param name: name of pool to create @param replicas: number of replicas for supporting pools """ # json.dumps of the CephBrokerRq() requests = self.get_local(key='broker_reqs') or {} if name not in requests: rq = CephBrokerRq() rq.add_op_create_pool(name="{}".format(name), replica_count=replicas, weight=None) if not requests: requests = {} requests[name] = rq.request self.set_local(key='broker_reqs', value=requests) send_request_if_needed(rq, relation=self.relation_name) self.remove_state('{relation_name}.pools.available') else: rq = CephBrokerRq() try: j = json.loads(requests[name]) rq.ops = j['ops'] send_request_if_needed(rq, relation=self.relation_name) except ValueError as err: log("Unable to decode broker_req: {}. Error: {}".format( requests[name], err))
def get_ceph_request(): service = service_name() rq = CephBrokerRq() replicas = config('ceph-osd-replication-count') weight = config('ceph-pool-weight') rq.add_op_create_pool(name=service, replica_count=replicas, weight=weight, group='images') if config('restrict-ceph-pools'): rq.add_op_request_access_to_group(name="images", permission='rwx') return rq
def get_ceph_request(): service = service_name() rq = CephBrokerRq() replicas = config('ceph-osd-replication-count') weight = config('ceph-pool-weight') rq.add_op_create_pool(name=service, replica_count=replicas, weight=weight, group='images', app_name='rbd') if config('restrict-ceph-pools'): rq.add_op_request_access_to_group( name="images", object_prefix_permissions={'class-read': ['rbd_children']}, permission='rwx') return rq
def get_create_rgw_pools_rq(prefix): """Pre-create RGW pools so that they have the correct settings. This will prepend a prefix onto the pools if specified in the config.yaml When RGW creates its own pools it will create them with non-optimal settings (LP: #1476749). NOTE: see http://docs.ceph.com/docs/master/radosgw/config-ref/#pools and http://docs.ceph.com/docs/master/radosgw/config/#create-pools for list of supported/required pools. """ rq = CephBrokerRq() replicas = config('ceph-osd-replication-count') # Buckets likely to contain the most data and therefore requiring the most # PGs heavy = ['.rgw.buckets'] for pool in heavy: rq.add_op_create_pool(name=pool, replica_count=replicas) # NOTE: we want these pools to have a smaller pg_num/pgp_num than the # others since they are not expected to contain as much data light = ['.rgw', '.rgw.root', '.rgw.control', '.rgw.gc', '.rgw.buckets', '.rgw.buckets.index', '.rgw.buckets.extra', '.log', '.intent-log' '.usage', '.users' '.users.email' '.users.swift' '.users.uid'] pg_num = config('rgw-lightweight-pool-pg-num') for pool in light: if prefix: pool = "{prefix}{pool}".format( prefix=prefix, pool=pool) rq.add_op_create_pool(name=pool, replica_count=replicas, pg_num=pg_num) return rq
def get_ceph_request(): rq = CephBrokerRq() if (config('libvirt-image-backend') == 'rbd' and assert_libvirt_rbd_imagebackend_allowed()): name = config('rbd-pool') replicas = config('ceph-osd-replication-count') weight = config('ceph-pool-weight') rq.add_op_create_pool(name=name, replica_count=replicas, weight=weight, group='vms') if config('restrict-ceph-pools'): rq.add_op_request_access_to_group(name="volumes", permission='rwx') rq.add_op_request_access_to_group(name="images", permission='rwx') rq.add_op_request_access_to_group(name="vms", permission='rwx') return rq
def get_ceph_request(): service = service_name() rq = CephBrokerRq() replicas = config('ceph-osd-replication-count') rq.add_op_create_pool(name=service, replica_count=replicas, group="volumes") if config('restrict-ceph-pools'): rq.add_op_request_access_to_group( name="volumes", object_prefix_permissions={'class-read': ['rbd_children']}, permission='rwx') rq.add_op_request_access_to_group( name="images", object_prefix_permissions={'class-read': ['rbd_children']}, permission='rwx') rq.add_op_request_access_to_group( name="vms", object_prefix_permissions={'class-read': ['rbd_children']}, permission='rwx') return rq
def initialize_mds(self, name, replicas=3): """ Request pool setup and mds creation @param name: name of mds pools to create @param replicas: number of replicas for supporting pools """ # json.dumps of the CephBrokerRq() json_rq = self.get_local(key='broker_req') if not json_rq: rq = CephBrokerRq() rq.add_op_create_pool(name="{}_data".format(name), replica_count=replicas, weight=None) rq.add_op_create_pool(name="{}_metadata".format(name), replica_count=replicas, weight=None) # Create CephFS rq.ops.append({ 'op': 'create-cephfs', 'mds_name': name, 'data_pool': "{}_data".format(name), 'metadata_pool': "{}_metadata".format(name), }) self.set_local(key='broker_req', value=rq.request) send_request_if_needed(rq, relation=self.relation_name) else: rq = CephBrokerRq() try: j = json.loads(json_rq) log("Json request: {}".format(json_rq)) rq.ops = j['ops'] send_request_if_needed(rq, relation=self.relation_name) except ValueError as err: log("Unable to decode broker_req: {}. Error: {}".format( json_rq, err))
def create_pools(self, names, replicas=3): """ Request pools setup @param name: list of pool names to create @param replicas: number of replicas for supporting pools """ # json.dumps of the CephBrokerRq() requests = self.get_local(key='broker_reqs') or {} new_names = [name for name in names if name not in requests] # existing names get ignored here # new names get added to a single request if new_names: rq = CephBrokerRq() for name in new_names: rq.add_op_create_pool(name="{}".format(name), replica_count=replicas, weight=None) requests[name] = rq.request self.set_local(key='broker_reqs', value=requests) send_request_if_needed(rq, relation=self.relation_name) self.remove_state('{relation_name}.pools.available')
def get_ceph_request(): rq = CephBrokerRq() if (config('libvirt-image-backend') == 'rbd' and assert_libvirt_rbd_imagebackend_allowed()): pool_name = config('rbd-pool') replicas = config('ceph-osd-replication-count') weight = config('ceph-pool-weight') bluestore_compression = ch_context.CephBlueStoreCompressionContext() if config('pool-type') == 'erasure-coded': # General EC plugin config plugin = config('ec-profile-plugin') technique = config('ec-profile-technique') device_class = config('ec-profile-device-class') metadata_pool_name = (config('ec-rbd-metadata-pool') or "{}-metadata".format(pool_name)) bdm_k = config('ec-profile-k') bdm_m = config('ec-profile-m') # LRC plugin config bdm_l = config('ec-profile-locality') crush_locality = config('ec-profile-crush-locality') # SHEC plugin config bdm_c = config('ec-profile-durability-estimator') # CLAY plugin config bdm_d = config('ec-profile-helper-chunks') scalar_mds = config('ec-profile-scalar-mds') # Profile name profile_name = (config('ec-profile-name') or "{}-profile".format(pool_name)) # Metadata sizing is approximately 1% of overall data weight # but is in effect driven by the number of rbd's rather than # their size - so it can be very lightweight. metadata_weight = weight * 0.01 # Resize data pool weight to accomodate metadata weight weight = weight - metadata_weight # Create metadata pool rq.add_op_create_pool(name=metadata_pool_name, replica_count=replicas, weight=metadata_weight, group='vms', app_name='rbd') # Create erasure profile rq.add_op_create_erasure_profile(name=profile_name, k=bdm_k, m=bdm_m, lrc_locality=bdm_l, lrc_crush_locality=crush_locality, shec_durability_estimator=bdm_c, clay_helper_chunks=bdm_d, clay_scalar_mds=scalar_mds, device_class=device_class, erasure_type=plugin, erasure_technique=technique) # Create EC data pool # NOTE(fnordahl): once we deprecate Python 3.5 support we can do # the unpacking of the BlueStore compression arguments as part of # the function arguments. Until then we need to build the dict # prior to the function call. kwargs = { 'name': pool_name, 'erasure_profile': profile_name, 'weight': weight, 'group': "vms", 'app_name': "rbd", 'allow_ec_overwrites': True } kwargs.update(bluestore_compression.get_kwargs()) rq.add_op_create_erasure_pool(**kwargs) else: kwargs = { 'name': pool_name, 'replica_count': replicas, 'weight': weight, 'group': 'vms', 'app_name': 'rbd', } kwargs.update(bluestore_compression.get_kwargs()) rq.add_op_create_replicated_pool(**kwargs) if config('restrict-ceph-pools'): rq.add_op_request_access_to_group( name="volumes", object_prefix_permissions={'class-read': ['rbd_children']}, permission='rwx') rq.add_op_request_access_to_group( name="images", object_prefix_permissions={'class-read': ['rbd_children']}, permission='rwx') rq.add_op_request_access_to_group( name="vms", object_prefix_permissions={'class-read': ['rbd_children']}, permission='rwx') return rq
def get_ceph_request(): rq = CephBrokerRq() replicas = config('ceph-osd-replication-count') rq.add_op_create_pool(name=config('rbd-pool'), replica_count=replicas) return rq
def get_create_rgw_pools_rq(prefix=None): """Pre-create RGW pools so that they have the correct settings. If a prefix is provided it will be prepended to each pool name. When RGW creates its own pools it will create them with non-optimal settings (LP: #1476749). NOTE: see http://docs.ceph.com/docs/master/radosgw/config-ref/#pools and http://docs.ceph.com/docs/master/radosgw/config/#create-pools for list of supported/required pools. """ def _add_light_pool(rq, pool, pg_num, prefix=None): # Per the Ceph PG Calculator, all of the lightweight pools get 0.10% # of the data by default and only the .rgw.buckets.* get higher values weights = { '.rgw.buckets.index': 3.00, '.rgw.buckets.extra': 1.00 } w = weights.get(pool, 0.10) if prefix: pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) if pg_num > 0: rq.add_op_create_pool(name=pool, replica_count=replicas, pg_num=pg_num, group='objects', app_name=CEPH_POOL_APP_NAME) else: rq.add_op_create_pool(name=pool, replica_count=replicas, weight=w, group='objects', app_name=CEPH_POOL_APP_NAME) rq = CephBrokerRq() replicas = config('ceph-osd-replication-count') prefix = prefix or 'default' # Buckets likely to contain the most data and therefore # requiring the most PGs heavy = [ '.rgw.buckets.data' ] bucket_weight = config('rgw-buckets-pool-weight') if config('pool-type') == 'erasure-coded': # General EC plugin config plugin = config('ec-profile-plugin') technique = config('ec-profile-technique') device_class = config('ec-profile-device-class') bdm_k = config('ec-profile-k') bdm_m = config('ec-profile-m') # LRC plugin config bdm_l = config('ec-profile-locality') crush_locality = config('ec-profile-crush-locality') # SHEC plugin config bdm_c = config('ec-profile-durability-estimator') # CLAY plugin config bdm_d = config('ec-profile-helper-chunks') scalar_mds = config('ec-profile-scalar-mds') # Profile name service = service_name() profile_name = ( config('ec-profile-name') or "{}-profile".format(service) ) rq.add_op_create_erasure_profile( name=profile_name, k=bdm_k, m=bdm_m, lrc_locality=bdm_l, lrc_crush_locality=crush_locality, shec_durability_estimator=bdm_c, clay_helper_chunks=bdm_d, clay_scalar_mds=scalar_mds, device_class=device_class, erasure_type=plugin, erasure_technique=technique ) for pool in heavy: pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) rq.add_op_create_erasure_pool( name=pool, erasure_profile=profile_name, weight=bucket_weight, group="objects", app_name=CEPH_POOL_APP_NAME ) else: for pool in heavy: pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) rq.add_op_create_pool(name=pool, replica_count=replicas, weight=bucket_weight, group='objects', app_name=CEPH_POOL_APP_NAME) # NOTE: we want these pools to have a smaller pg_num/pgp_num than the # others since they are not expected to contain as much data light = [ '.rgw.control', '.rgw.data.root', '.rgw.gc', '.rgw.log', '.rgw.intent-log', '.rgw.meta', '.rgw.usage', '.rgw.users.keys', '.rgw.users.email', '.rgw.users.swift', '.rgw.users.uid', '.rgw.buckets.extra', '.rgw.buckets.index', ] pg_num = config('rgw-lightweight-pool-pg-num') for pool in light: _add_light_pool(rq, pool, pg_num, prefix) _add_light_pool(rq, '.rgw.root', pg_num) if config('restrict-ceph-pools'): rq.add_op_request_access_to_group(name="objects", permission='rwx', key_name='radosgw.gateway') return rq
def get_create_rgw_pools_rq(prefix=None): """Pre-create RGW pools so that they have the correct settings. If a prefix is provided it will be prepended to each pool name. When RGW creates its own pools it will create them with non-optimal settings (LP: #1476749). NOTE: see http://docs.ceph.com/docs/master/radosgw/config-ref/#pools and http://docs.ceph.com/docs/master/radosgw/config/#create-pools for list of supported/required pools. """ def _add_light_pool(rq, pool, pg_num, prefix=None): # Per the Ceph PG Calculator, all of the lightweight pools get 0.10% # of the data by default and only the .rgw.buckets.* get higher values weights = { '.rgw.buckets.index': 3.00, '.rgw.buckets.extra': 1.00 } w = weights.get(pool, 0.10) if prefix: pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) if pg_num > 0: rq.add_op_create_pool(name=pool, replica_count=replicas, pg_num=pg_num, group='objects', app_name=CEPH_POOL_APP_NAME) else: rq.add_op_create_pool(name=pool, replica_count=replicas, weight=w, group='objects', app_name=CEPH_POOL_APP_NAME) from apt import apt_pkg apt_pkg.init() rq = CephBrokerRq() replicas = config('ceph-osd-replication-count') prefix = prefix or 'default' # Buckets likely to contain the most data and therefore # requiring the most PGs heavy = [ '.rgw.buckets.data' ] bucket_weight = config('rgw-buckets-pool-weight') for pool in heavy: pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) rq.add_op_create_pool(name=pool, replica_count=replicas, weight=bucket_weight, group='objects', app_name=CEPH_POOL_APP_NAME) # NOTE: we want these pools to have a smaller pg_num/pgp_num than the # others since they are not expected to contain as much data light = [ '.rgw.control', '.rgw.data.root', '.rgw.gc', '.rgw.log', '.rgw.intent-log', '.rgw.meta', '.rgw.usage', '.rgw.users.keys', '.rgw.users.email', '.rgw.users.swift', '.rgw.users.uid', '.rgw.buckets.extra', '.rgw.buckets.index', ] pg_num = config('rgw-lightweight-pool-pg-num') for pool in light: _add_light_pool(rq, pool, pg_num, prefix) _add_light_pool(rq, '.rgw.root', pg_num) if config('restrict-ceph-pools'): rq.add_op_request_access_to_group(name="objects", permission='rwx', key_name='radosgw.gateway') return rq