def create_pool(self, name, replicas=3): """ Request pool setup @param name: name of pool to create @param replicas: number of replicas for supporting pools """ # json.dumps of the CephBrokerRq() requests = self.get_local(key='broker_reqs') or {} if name not in requests: rq = CephBrokerRq() rq.add_op_create_pool(name="{}".format(name), replica_count=replicas, weight=None) if not requests: requests = {} requests[name] = rq.request self.set_local(key='broker_reqs', value=requests) send_request_if_needed(rq, relation=self.relation_name) self.remove_state('{relation_name}.pools.available') else: rq = CephBrokerRq() try: j = json.loads(requests[name]) rq.ops = j['ops'] send_request_if_needed(rq, relation=self.relation_name) except ValueError as err: log("Unable to decode broker_req: {}. Error: {}".format( requests[name], err))
def get_ceph_request(): service = service_name() rq = CephBrokerRq() replicas = config('ceph-osd-replication-count') weight = config('ceph-pool-weight') rq.add_op_create_pool(name=service, replica_count=replicas, weight=weight) return rq
def get_ceph_request(): service = service_name() rq = CephBrokerRq() replicas = config('ceph-osd-replication-count') weight = config('ceph-pool-weight') pool_name = config('rbd-pool-name') or service rq.add_op_create_pool(name=pool_name, replica_count=replicas, weight=weight, group='volumes', app_name='rbd') if config('restrict-ceph-pools'): rq.add_op_request_access_to_group( name='volumes', object_prefix_permissions={'class-read': ['rbd_children']}, permission='rwx') rq.add_op_request_access_to_group( name='images', object_prefix_permissions={'class-read': ['rbd_children']}, permission='rwx') rq.add_op_request_access_to_group( name='vms', object_prefix_permissions={'class-read': ['rbd_children']}, permission='rwx') return rq
def request_access_to_group(self, name, namespace=None, permission=None, key_name=None, object_prefix_permissions=None): """ Adds the requested permissions to service's Ceph key Adds the requested permissions to the current service's Ceph key, allowing the key to access only the specified pools or object prefixes. object_prefix_permissions should be a dictionary keyed on the permission with the corresponding value being a list of prefixes to apply that permission to. { 'rwx': ['prefix1', 'prefix2'], 'class-read': ['prefix3']} @param name: Target group name for permissions request. @param namespace: namespace to further restrict pool access. @param permission: Permission to be requested against pool @param key_name: userid to grant permission to @param object_prefix_permissions: Add object_prefix permissions. """ current_request = self.get_current_request() or CephBrokerRq() current_request.add_op_request_access_to_group( name, namespace=namespace, permission=permission, key_name=key_name, object_prefix_permissions=object_prefix_permissions) self.send_request_if_needed(current_request)
def _get_request(relation_data): if 'broker_req' in relation_data: rq = CephBrokerRq(raw_request_data=relation_data['broker_req']) yield rq.request_id, rq # Note that empty return from generator produces empty generator and # not None, ref PEP 479 return
def get_ceph_request(): rq = CephBrokerRq() if (config('libvirt-image-backend') == 'rbd' and assert_libvirt_rbd_imagebackend_allowed()): name = config('rbd-pool') replicas = config('ceph-osd-replication-count') weight = config('ceph-pool-weight') rq.add_op_create_pool(name=name, replica_count=replicas, weight=weight, group='vms') if config('restrict-ceph-pools'): rq.add_op_request_access_to_group( name="volumes", object_prefix_permissions={'class-read': ['rbd_children']}, permission='rwx') rq.add_op_request_access_to_group( name="images", object_prefix_permissions={'class-read': ['rbd_children']}, permission='rwx') rq.add_op_request_access_to_group( name="vms", object_prefix_permissions={'class-read': ['rbd_children']}, permission='rwx') return rq
def create_erasure_profile(self, name, erasure_type='jerasure', erasure_technique=None, k=None, m=None, failure_domain=None, lrc_locality=None, shec_durability_estimator=None, clay_helper_chunks=None, device_class=None, clay_scalar_mds=None, lrc_crush_locality=None): """ Create erasure coding profile @param name: Name of erasure coding profile @param erasure_type: Erasure coding plugin to use @param erasure_technique: Erasure coding technique to use @param k: Number of data chunks @param m: Number of coding chunks @param failure_domain: Failure domain to use for PG placement @param lrc_locality: Group the coding and data chunks into sets of size locality (lrc plugin) @param shec_durability_estimator: The number of parity chuncks each of which includes a data chunk in its calculation range (shec plugin) @param clay_helper_chunks: The number of helper chunks to use for recovery operations (clay plugin) @param device_class: Device class to use for profile (ssd, hdd, nvme) @param clay_scalar_mds: Plugin to use for CLAY layered construction (jerasure|isa|shec) @param lrc_crush_locality: Type of crush bucket in which set of chunks defined by lrc_locality will be stored. """ rq = self.get_current_request() or CephBrokerRq() rq.add_op_create_erasure_profile( name=name, erasure_type=erasure_type, erasure_technique=erasure_technique, k=k, m=m, failure_domain=failure_domain, lrc_locality=lrc_locality, shec_durability_estimator=shec_durability_estimator, clay_helper_chunks=clay_helper_chunks, device_class=device_class, clay_scalar_mds=clay_scalar_mds, lrc_crush_locality=lrc_crush_locality) self.send_request_if_needed(rq) reactive.clear_flag( self.expand_name('{endpoint_name}.pools.available'))
def get_ceph_request(): service = service_name() rq = CephBrokerRq() replicas = config('ceph-osd-replication-count') weight = config('ceph-pool-weight') rq.add_op_create_pool(name=service, replica_count=replicas, weight=weight, group='images') if config('restrict-ceph-pools'): rq.add_op_request_access_to_group(name="images", permission='rwx') return rq
def request_ceph_permissions(self, ceph): rq = ceph.get_current_request() or CephBrokerRq() log("Requesting ceph permissions for client: {}".format( ch_core.hookenv.application_name()), level=ch_core.hookenv.INFO) rq.add_op({ 'op': 'set-key-permissions', 'permissions': CEPH_CAPABILITIES, 'client': ch_core.hookenv.application_name() }) ceph.send_request_if_needed(rq)
def get_current_request(self): broker_reqs = [] for relation in self.relations: broker_req = relation.to_publish.get('broker_req', {}) if broker_req: rq = CephBrokerRq() rq.set_ops(broker_req['ops']) broker_reqs.append(rq) # Check that if there are multiple requests then they are the same. assert all(x == broker_reqs[0] for x in broker_reqs) if broker_reqs: return broker_reqs[0]
def create_replicated_pool(self, name, replicas=3, weight=None, pg_num=None, group=None, namespace=None, app_name=None, **kwargs): """ Request pool setup :param name: Name of pool to create :type name: str :param replicas: Number of replicas for supporting pools :type replicas: int :param weight: The percentage of data the pool makes up :type weight: Optional[float] :param pg_num: If not provided, this value will be calculated by the broker based on how many OSDs are in the cluster at the time of creation. Note that, if provided, this value will be capped at the current available maximum. :type pg_num: Optional[int] :param group: Group to add pool to. :type group: Optional[str] :param namespace: A group can optionally have a namespace defined that will be used to further restrict pool access. :type namespace: Optional[str] :param app_name: (Optional) Tag pool with application name. Note that there is certain protocols emerging upstream with regard to meaningful application names to use. Examples are ``rbd`` and ``rgw``. :type app_name: Optional[str] :param kwargs: Additional keyword arguments subject to validation. Refer to CephBrokerRq.add_op_create_replicated_pool method for documentation. :type kwargs: Dict[str,any] """ rq = self.get_current_request() or CephBrokerRq() kwargs.update({ 'name': name, 'replica_count': replicas, 'pg_num': pg_num, 'weight': weight, 'group': group, 'namespace': namespace, 'app_name': app_name, }) rq.add_op_create_replicated_pool(**kwargs) self.send_request_if_needed(rq) reactive.clear_flag( self.expand_name('{endpoint_name}.pools.available'))
def setUp(self): self.harness = Harness(CharmBase, meta=''' name: client provides: ceph-client: interface: ceph-client ''') self.client_req = CephBrokerRq() self.client_req.add_op_create_replicated_pool(name='tmbtil', replica_count=3) self.client_req.add_op({ 'op': 'set-key-permissions', 'permissions': ['osd', 'allow *', 'mon', 'allow *', 'mgr', 'allow r'], 'client': 'ceph-iscsi' }) self.client_req.request_id = 'a3ad24dd-7e2f-11ea-8ba2-e5a5b68b415f' self.random_request = CephBrokerRq() self.random_request.add_op_create_replicated_pool(name='another-pool', replica_count=3)
def initialize_mds(self, name, replicas=3): """ Request pool setup and mds creation @param name: name of mds pools to create @param replicas: number of replicas for supporting pools """ # json.dumps of the CephBrokerRq() json_rq = self.get_local(key='broker_req') if not json_rq: rq = CephBrokerRq() rq.add_op_create_pool(name="{}_data".format(name), replica_count=replicas, weight=None) rq.add_op_create_pool(name="{}_metadata".format(name), replica_count=replicas, weight=None) # Create CephFS rq.ops.append({ 'op': 'create-cephfs', 'mds_name': name, 'data_pool': "{}_data".format(name), 'metadata_pool': "{}_metadata".format(name), }) self.set_local(key='broker_req', value=rq.request) send_request_if_needed(rq, relation=self.relation_name) else: rq = CephBrokerRq() try: j = json.loads(json_rq) log("Json request: {}".format(json_rq)) rq.ops = j['ops'] send_request_if_needed(rq, relation=self.relation_name) except ValueError as err: log("Unable to decode broker_req: {}. Error: {}".format( json_rq, err))
def create_erasure_pool(self, name, erasure_profile=None, weight=None, group=None, app_name=None, max_bytes=None, max_objects=None, allow_ec_overwrites=False, **kwargs): """ Request erasure coded pool setup :param name: Name of pool to create :type name: str :param erasure_profile: Name of erasure profile for pool :type erasure_profile: str :param weight: The percentage of data the pool makes up :type weight: Optional[float] :param group: Group to add pool to. :type group: Optional[str] :param app_name: Name of application using pool :type app_name: Optional[str] :param max_bytes: Maximum bytes of quota to apply :type max_bytes: Optional[int] :param max_objects: Maximum object quota to apply :type max_objects: Optional[int] :param allow_ec_overwrites: Allow EC pools to be overwritten :type allow_ec_overwrites: bool :param kwargs: Additional keyword arguments subject to validation. Refer to CephBrokerRq.add_op_create_replicated_pool method for documentation. :type kwargs: Dict[str,any] """ rq = self.get_current_request() or CephBrokerRq() kwargs.update({ 'name': name, 'erasure_profile': erasure_profile, 'weight': weight, 'group': group, 'app_name': app_name, 'max_bytes': max_bytes, 'max_objects': max_objects, 'allow_ec_overwrites': allow_ec_overwrites, }) rq.add_op_create_erasure_pool(**kwargs) self.send_request_if_needed(rq) reactive.clear_flag( self.expand_name('{endpoint_name}.pools.available'))
def request_cephfs(self, name, extra_pools=None): """Request creation of Ceph FS :param name: Name of mds to create :type name: str :param extra_pools: Additional pools to add to FS. :type extra_pools: List[str] """ rq = self.get_current_request() or CephBrokerRq() rq.add_op({ 'op': 'create-cephfs', 'mds_name': name, 'data_pool': "{}_data".format(name), 'extra_pools': extra_pools, 'metadata_pool': "{}_metadata".format(name) }) self.send_request_if_needed(rq)
def request_ceph_permissions(self, ceph): rq = CephBrokerRq() json_rq = ceph.get_local(key='broker_req') if json_rq: try: j = json.loads(json_rq) log("Json request: {}".format(json_rq)) rq.set_ops(j['ops']) except ValueError as err: log("Unable to decode broker_req: {}. Error {}".format( json_rq, err)) rq.add_op({'op': 'set-key-permissions', 'permissions': CEPH_CAPABILITIES, 'client': 'manila-ganesha'}) ceph.set_local(key='broker_req', value=rq.request) send_request_if_needed(rq, relation='ceph')
def get_ceph_request(): service = service_name() rq = CephBrokerRq() replicas = config('ceph-osd-replication-count') rq.add_op_create_pool(name=service, replica_count=replicas, group="volumes") if config('restrict-ceph-pools'): rq.add_op_request_access_to_group( name="volumes", object_prefix_permissions={'class-read': ['rbd_children']}, permission='rwx') rq.add_op_request_access_to_group( name="images", object_prefix_permissions={'class-read': ['rbd_children']}, permission='rwx') rq.add_op_request_access_to_group( name="vms", object_prefix_permissions={'class-read': ['rbd_children']}, permission='rwx') return rq
def changed(self): data = { 'mds_key': self.mds_key(), 'fsid': self.fsid(), 'auth': self.auth(), 'mon_hosts': self.mon_hosts() } if all(data.values()): self.set_state('{relation_name}.available') json_rq = self.get_local(key='broker_req') if json_rq: rq = CephBrokerRq() j = json.loads(json_rq) rq.ops = j['ops'] log("changed broker_req: {}".format(rq.ops)) if rq and is_request_complete(rq, relation=self.relation_name): log("Setting ceph-mds.pools.available") self.set_state('{relation_name}.pools.available') else: log("incomplete request. broker_req not found")
def create_pools(self, names, replicas=3): """ Request pools setup @param name: list of pool names to create @param replicas: number of replicas for supporting pools """ # json.dumps of the CephBrokerRq() requests = self.get_local(key='broker_reqs') or {} new_names = [name for name in names if name not in requests] # existing names get ignored here # new names get added to a single request if new_names: rq = CephBrokerRq() for name in new_names: rq.add_op_create_pool(name="{}".format(name), replica_count=replicas, weight=None) requests[name] = rq.request self.set_local(key='broker_reqs', value=requests) send_request_if_needed(rq, relation=self.relation_name) self.remove_state('{relation_name}.pools.available')
def get_ceph_request(): rq = CephBrokerRq() if (config('libvirt-image-backend') == 'rbd' and assert_libvirt_rbd_imagebackend_allowed()): pool_name = config('rbd-pool') replicas = config('ceph-osd-replication-count') weight = config('ceph-pool-weight') bluestore_compression = ch_context.CephBlueStoreCompressionContext() if config('pool-type') == 'erasure-coded': # General EC plugin config plugin = config('ec-profile-plugin') technique = config('ec-profile-technique') device_class = config('ec-profile-device-class') metadata_pool_name = (config('ec-rbd-metadata-pool') or "{}-metadata".format(pool_name)) bdm_k = config('ec-profile-k') bdm_m = config('ec-profile-m') # LRC plugin config bdm_l = config('ec-profile-locality') crush_locality = config('ec-profile-crush-locality') # SHEC plugin config bdm_c = config('ec-profile-durability-estimator') # CLAY plugin config bdm_d = config('ec-profile-helper-chunks') scalar_mds = config('ec-profile-scalar-mds') # Profile name profile_name = (config('ec-profile-name') or "{}-profile".format(pool_name)) # Metadata sizing is approximately 1% of overall data weight # but is in effect driven by the number of rbd's rather than # their size - so it can be very lightweight. metadata_weight = weight * 0.01 # Resize data pool weight to accomodate metadata weight weight = weight - metadata_weight # Create metadata pool rq.add_op_create_pool(name=metadata_pool_name, replica_count=replicas, weight=metadata_weight, group='vms', app_name='rbd') # Create erasure profile rq.add_op_create_erasure_profile(name=profile_name, k=bdm_k, m=bdm_m, lrc_locality=bdm_l, lrc_crush_locality=crush_locality, shec_durability_estimator=bdm_c, clay_helper_chunks=bdm_d, clay_scalar_mds=scalar_mds, device_class=device_class, erasure_type=plugin, erasure_technique=technique) # Create EC data pool # NOTE(fnordahl): once we deprecate Python 3.5 support we can do # the unpacking of the BlueStore compression arguments as part of # the function arguments. Until then we need to build the dict # prior to the function call. kwargs = { 'name': pool_name, 'erasure_profile': profile_name, 'weight': weight, 'group': "vms", 'app_name': "rbd", 'allow_ec_overwrites': True } kwargs.update(bluestore_compression.get_kwargs()) rq.add_op_create_erasure_pool(**kwargs) else: kwargs = { 'name': pool_name, 'replica_count': replicas, 'weight': weight, 'group': 'vms', 'app_name': 'rbd', } kwargs.update(bluestore_compression.get_kwargs()) rq.add_op_create_replicated_pool(**kwargs) if config('restrict-ceph-pools'): rq.add_op_request_access_to_group( name="volumes", object_prefix_permissions={'class-read': ['rbd_children']}, permission='rwx') rq.add_op_request_access_to_group( name="images", object_prefix_permissions={'class-read': ['rbd_children']}, permission='rwx') rq.add_op_request_access_to_group( name="vms", object_prefix_permissions={'class-read': ['rbd_children']}, permission='rwx') return rq
def get_create_rgw_pools_rq(prefix=None): """Pre-create RGW pools so that they have the correct settings. If a prefix is provided it will be prepended to each pool name. When RGW creates its own pools it will create them with non-optimal settings (LP: #1476749). NOTE: see http://docs.ceph.com/docs/master/radosgw/config-ref/#pools and http://docs.ceph.com/docs/master/radosgw/config/#create-pools for list of supported/required pools. """ def _add_light_pool(rq, pool, pg_num, prefix=None): # Per the Ceph PG Calculator, all of the lightweight pools get 0.10% # of the data by default and only the .rgw.buckets.* get higher values weights = { '.rgw.buckets.index': 3.00, '.rgw.buckets.extra': 1.00 } w = weights.get(pool, 0.10) if prefix: pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) if pg_num > 0: rq.add_op_create_pool(name=pool, replica_count=replicas, pg_num=pg_num, group='objects', app_name=CEPH_POOL_APP_NAME) else: rq.add_op_create_pool(name=pool, replica_count=replicas, weight=w, group='objects', app_name=CEPH_POOL_APP_NAME) rq = CephBrokerRq() replicas = config('ceph-osd-replication-count') prefix = prefix or 'default' # Buckets likely to contain the most data and therefore # requiring the most PGs heavy = [ '.rgw.buckets.data' ] bucket_weight = config('rgw-buckets-pool-weight') if config('pool-type') == 'erasure-coded': # General EC plugin config plugin = config('ec-profile-plugin') technique = config('ec-profile-technique') device_class = config('ec-profile-device-class') bdm_k = config('ec-profile-k') bdm_m = config('ec-profile-m') # LRC plugin config bdm_l = config('ec-profile-locality') crush_locality = config('ec-profile-crush-locality') # SHEC plugin config bdm_c = config('ec-profile-durability-estimator') # CLAY plugin config bdm_d = config('ec-profile-helper-chunks') scalar_mds = config('ec-profile-scalar-mds') # Profile name service = service_name() profile_name = ( config('ec-profile-name') or "{}-profile".format(service) ) rq.add_op_create_erasure_profile( name=profile_name, k=bdm_k, m=bdm_m, lrc_locality=bdm_l, lrc_crush_locality=crush_locality, shec_durability_estimator=bdm_c, clay_helper_chunks=bdm_d, clay_scalar_mds=scalar_mds, device_class=device_class, erasure_type=plugin, erasure_technique=technique ) for pool in heavy: pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) rq.add_op_create_erasure_pool( name=pool, erasure_profile=profile_name, weight=bucket_weight, group="objects", app_name=CEPH_POOL_APP_NAME ) else: for pool in heavy: pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) rq.add_op_create_pool(name=pool, replica_count=replicas, weight=bucket_weight, group='objects', app_name=CEPH_POOL_APP_NAME) # NOTE: we want these pools to have a smaller pg_num/pgp_num than the # others since they are not expected to contain as much data light = [ '.rgw.control', '.rgw.data.root', '.rgw.gc', '.rgw.log', '.rgw.intent-log', '.rgw.meta', '.rgw.usage', '.rgw.users.keys', '.rgw.users.email', '.rgw.users.swift', '.rgw.users.uid', '.rgw.buckets.extra', '.rgw.buckets.index', ] pg_num = config('rgw-lightweight-pool-pg-num') for pool in light: _add_light_pool(rq, pool, pg_num, prefix) _add_light_pool(rq, '.rgw.root', pg_num) if config('restrict-ceph-pools'): rq.add_op_request_access_to_group(name="objects", permission='rwx', key_name='radosgw.gateway') return rq
ceph.tune_dev(dev) mounts = filter(lambda disk: device_path in disk.device, psutil.disk_partitions()) if mounts: osd = mounts[0] osd_id = osd.mountpoint.split('/')[-1].split('-')[-1] request.ops.append({ 'op': 'move-osd-to-bucket', 'osd': "osd.{}".format(osd_id), 'bucket': bucket}) return request def get_devices(): devices = [] for path in action_get('osd-devices').split(' '): path = path.strip() if os.path.isabs(path): devices.append(path) return devices if __name__ == "__main__": request = CephBrokerRq() for dev in get_devices(): request = add_device(request=request, device_path=dev, bucket=action_get("bucket")) send_request_if_needed(request, relation='mon')