示例#1
0
def get_ceph_request():
    service = service_name()
    rq = CephBrokerRq()
    replicas = config('ceph-osd-replication-count')
    weight = config('ceph-pool-weight')
    rq.add_op_create_pool(name=service, replica_count=replicas, weight=weight)
    return rq
def get_ceph_request():
    service = service_name()
    rq = CephBrokerRq()
    replicas = config('ceph-osd-replication-count')
    weight = config('ceph-pool-weight')
    pool_name = config('rbd-pool-name') or service
    rq.add_op_create_pool(name=pool_name,
                          replica_count=replicas,
                          weight=weight,
                          group='volumes',
                          app_name='rbd')
    if config('restrict-ceph-pools'):
        rq.add_op_request_access_to_group(
            name='volumes',
            object_prefix_permissions={'class-read': ['rbd_children']},
            permission='rwx')
        rq.add_op_request_access_to_group(
            name='images',
            object_prefix_permissions={'class-read': ['rbd_children']},
            permission='rwx')
        rq.add_op_request_access_to_group(
            name='vms',
            object_prefix_permissions={'class-read': ['rbd_children']},
            permission='rwx')
    return rq
示例#3
0
def get_ceph_request():
    rq = CephBrokerRq()
    if (config('libvirt-image-backend') == 'rbd'
            and assert_libvirt_rbd_imagebackend_allowed()):
        name = config('rbd-pool')
        replicas = config('ceph-osd-replication-count')
        weight = config('ceph-pool-weight')
        rq.add_op_create_pool(name=name,
                              replica_count=replicas,
                              weight=weight,
                              group='vms')
    if config('restrict-ceph-pools'):
        rq.add_op_request_access_to_group(
            name="volumes",
            object_prefix_permissions={'class-read': ['rbd_children']},
            permission='rwx')
        rq.add_op_request_access_to_group(
            name="images",
            object_prefix_permissions={'class-read': ['rbd_children']},
            permission='rwx')
        rq.add_op_request_access_to_group(
            name="vms",
            object_prefix_permissions={'class-read': ['rbd_children']},
            permission='rwx')
    return rq
示例#4
0
def get_ceph_request():
    service = service_name()
    rq = CephBrokerRq()
    replicas = config('ceph-osd-replication-count')
    weight = config('ceph-pool-weight')
    rq.add_op_create_pool(name=service, replica_count=replicas,
                          weight=weight)
    return rq
示例#5
0
 def get_current_request(self):
     broker_reqs = []
     for relation in self.relations:
         broker_req = relation.to_publish.get('broker_req', {})
         if broker_req:
             rq = CephBrokerRq()
             rq.set_ops(broker_req['ops'])
             broker_reqs.append(rq)
     # Check that if there are multiple requests then they are the same.
     assert all(x == broker_reqs[0] for x in broker_reqs)
     if broker_reqs:
         return broker_reqs[0]
示例#6
0
def get_ceph_request():
    service = service_name()
    rq = CephBrokerRq()
    replicas = config('ceph-osd-replication-count')
    rq.add_op_create_pool(name=service,
                          replica_count=replicas,
                          group="volumes")
    if config('restrict-ceph-pools'):
        rq.add_op_request_access_to_group(name="volumes", permission='rwx')
        rq.add_op_request_access_to_group(name="images", permission='rwx')
        rq.add_op_request_access_to_group(name="vms", permission='rwx')
    return rq
def get_ceph_request():
    service = service_name()
    rq = CephBrokerRq()
    replicas = config('ceph-osd-replication-count')
    weight = config('ceph-pool-weight')
    rq.add_op_create_pool(name=service, replica_count=replicas,
                          weight=weight, group='images', app_name='rbd')
    if config('restrict-ceph-pools'):
        rq.add_op_request_access_to_group(
            name="images",
            object_prefix_permissions={'class-read': ['rbd_children']},
            permission='rwx')
    return rq
示例#8
0
 def _get_request(relation_data):
     if 'broker_req' in relation_data:
         rq = CephBrokerRq(raw_request_data=relation_data['broker_req'])
         yield rq.request_id, rq
     # Note that empty return from generator produces empty generator and
     # not None, ref PEP 479
     return
示例#9
0
    def request_access_to_group(self,
                                name,
                                namespace=None,
                                permission=None,
                                key_name=None,
                                object_prefix_permissions=None):
        """
        Adds the requested permissions to service's Ceph key

        Adds the requested permissions to the current service's Ceph key,
        allowing the key to access only the specified pools or
        object prefixes. object_prefix_permissions should be a dictionary
        keyed on the permission with the corresponding value being a list
        of prefixes to apply that permission to.
            {
                'rwx': ['prefix1', 'prefix2'],
                'class-read': ['prefix3']}
        @param name: Target group name for permissions request.
        @param namespace: namespace to further restrict pool access.
        @param permission: Permission to be requested against pool
        @param key_name: userid to grant permission to
        @param object_prefix_permissions: Add object_prefix permissions.
        """
        current_request = self.get_current_request() or CephBrokerRq()
        current_request.add_op_request_access_to_group(
            name,
            namespace=namespace,
            permission=permission,
            key_name=key_name,
            object_prefix_permissions=object_prefix_permissions)
        self.send_request_if_needed(current_request)
示例#10
0
def get_create_rgw_pools_rq(prefix):
    """Pre-create RGW pools so that they have the correct settings. This
    will prepend a prefix onto the pools if specified in the config.yaml

    When RGW creates its own pools it will create them with non-optimal
    settings (LP: #1476749).

    NOTE: see http://docs.ceph.com/docs/master/radosgw/config-ref/#pools and
          http://docs.ceph.com/docs/master/radosgw/config/#create-pools for
          list of supported/required pools.
    """
    rq = CephBrokerRq()
    replicas = config('ceph-osd-replication-count')

    # Buckets likely to contain the most data and therefore requiring the most
    # PGs
    heavy = ['.rgw.buckets']

    for pool in heavy:
        rq.add_op_create_pool(name=pool, replica_count=replicas)

    # NOTE: we want these pools to have a smaller pg_num/pgp_num than the
    # others since they are not expected to contain as much data
    light = ['.rgw',
             '.rgw.root',
             '.rgw.control',
             '.rgw.gc',
             '.rgw.buckets',
             '.rgw.buckets.index',
             '.rgw.buckets.extra',
             '.log',
             '.intent-log'
             '.usage',
             '.users'
             '.users.email'
             '.users.swift'
             '.users.uid']
    pg_num = config('rgw-lightweight-pool-pg-num')
    for pool in light:
        if prefix:
            pool = "{prefix}{pool}".format(
                prefix=prefix,
                pool=pool)

        rq.add_op_create_pool(name=pool, replica_count=replicas, pg_num=pg_num)

    return rq
示例#11
0
    def create_pool(self, name, replicas=3):
        """
        Request pool setup

        @param name: name of pool to create
        @param replicas: number of replicas for supporting pools
        """
        # json.dumps of the CephBrokerRq()
        requests = self.get_local(key='broker_reqs') or {}

        if name not in requests:
            rq = CephBrokerRq()
            rq.add_op_create_pool(name="{}".format(name),
                                  replica_count=replicas,
                                  weight=None)
            if not requests:
                requests = {}

            requests[name] = rq.request
            self.set_local(key='broker_reqs', value=requests)
            send_request_if_needed(rq, relation=self.relation_name)
            self.remove_state('{relation_name}.pools.available')
        else:
            rq = CephBrokerRq()
            try:
                j = json.loads(requests[name])
                rq.ops = j['ops']
                send_request_if_needed(rq, relation=self.relation_name)
            except ValueError as err:
                log("Unable to decode broker_req: {}.  Error: {}".format(
                    requests[name], err))
示例#12
0
    def create_erasure_profile(self,
                               name,
                               erasure_type='jerasure',
                               erasure_technique=None,
                               k=None,
                               m=None,
                               failure_domain=None,
                               lrc_locality=None,
                               shec_durability_estimator=None,
                               clay_helper_chunks=None,
                               device_class=None,
                               clay_scalar_mds=None,
                               lrc_crush_locality=None):
        """
        Create erasure coding profile

        @param name: Name of erasure coding profile
        @param erasure_type: Erasure coding plugin to use
        @param erasure_technique: Erasure coding technique to use
        @param k: Number of data chunks
        @param m: Number of coding chunks
        @param failure_domain: Failure domain to use for PG placement
        @param lrc_locality:
            Group the coding and data chunks into sets
            of size locality (lrc plugin)
        @param shec_durability_estimator:
            The number of parity chuncks each of which includes
            a data chunk in its calculation range (shec plugin)
        @param clay_helper_chunks:
            The number of helper chunks to use for recovery operations
            (clay plugin)
        @param device_class:
            Device class to use for profile (ssd, hdd, nvme)
        @param clay_scalar_mds:
            Plugin to use for CLAY layered construction
            (jerasure|isa|shec)
        @param lrc_crush_locality:
            Type of crush bucket in which set of chunks
            defined by lrc_locality will be stored.
        """
        rq = self.get_current_request() or CephBrokerRq()
        rq.add_op_create_erasure_profile(
            name=name,
            erasure_type=erasure_type,
            erasure_technique=erasure_technique,
            k=k,
            m=m,
            failure_domain=failure_domain,
            lrc_locality=lrc_locality,
            shec_durability_estimator=shec_durability_estimator,
            clay_helper_chunks=clay_helper_chunks,
            device_class=device_class,
            clay_scalar_mds=clay_scalar_mds,
            lrc_crush_locality=lrc_crush_locality)
        self.send_request_if_needed(rq)
        reactive.clear_flag(
            self.expand_name('{endpoint_name}.pools.available'))
 def request_ceph_permissions(self, ceph):
     rq = ceph.get_current_request() or CephBrokerRq()
     log("Requesting ceph permissions for client: {}".format(
         ch_core.hookenv.application_name()),
         level=ch_core.hookenv.INFO)
     rq.add_op({
         'op': 'set-key-permissions',
         'permissions': CEPH_CAPABILITIES,
         'client': ch_core.hookenv.application_name()
     })
     ceph.send_request_if_needed(rq)
def get_ceph_request():
    rq = CephBrokerRq()
    if (config('libvirt-image-backend') == 'rbd' and
            assert_libvirt_rbd_imagebackend_allowed()):
        name = config('rbd-pool')
        replicas = config('ceph-osd-replication-count')
        weight = config('ceph-pool-weight')
        rq.add_op_create_pool(name=name, replica_count=replicas, weight=weight,
                              group='vms')
    if config('restrict-ceph-pools'):
        rq.add_op_request_access_to_group(name="volumes",
                                          permission='rwx')
        rq.add_op_request_access_to_group(name="images",
                                          permission='rwx')
        rq.add_op_request_access_to_group(name="vms",
                                          permission='rwx')
    return rq
示例#15
0
    def create_replicated_pool(self,
                               name,
                               replicas=3,
                               weight=None,
                               pg_num=None,
                               group=None,
                               namespace=None,
                               app_name=None,
                               **kwargs):
        """
        Request pool setup

        :param name: Name of pool to create
        :type name: str
        :param replicas: Number of replicas for supporting pools
        :type replicas: int
        :param weight: The percentage of data the pool makes up
        :type weight: Optional[float]
        :param pg_num: If not provided, this value will be calculated by the
                       broker based on how many OSDs are in the cluster at the
                       time of creation. Note that, if provided, this value
                       will be capped at the current available maximum.
        :type pg_num: Optional[int]
        :param group: Group to add pool to.
        :type group: Optional[str]
        :param namespace: A group can optionally have a namespace defined that
                          will be used to further restrict pool access.
        :type namespace: Optional[str]
        :param app_name: (Optional) Tag pool with application name.  Note that
                         there is certain protocols emerging upstream with
                         regard to meaningful application names to use.
                         Examples are ``rbd`` and ``rgw``.
        :type app_name: Optional[str]
        :param kwargs: Additional keyword arguments subject to validation.
                       Refer to CephBrokerRq.add_op_create_replicated_pool
                       method for documentation.
        :type kwargs: Dict[str,any]
        """
        rq = self.get_current_request() or CephBrokerRq()
        kwargs.update({
            'name': name,
            'replica_count': replicas,
            'pg_num': pg_num,
            'weight': weight,
            'group': group,
            'namespace': namespace,
            'app_name': app_name,
        })
        rq.add_op_create_replicated_pool(**kwargs)
        self.send_request_if_needed(rq)
        reactive.clear_flag(
            self.expand_name('{endpoint_name}.pools.available'))
    def changed(self):
        data = {
            'mds_key': self.mds_key(),
            'fsid': self.fsid(),
            'auth': self.auth(),
            'mon_hosts': self.mon_hosts()
        }
        if all(data.values()):
            self.set_state('{relation_name}.available')

        json_rq = self.get_local(key='broker_req')
        if json_rq:
            rq = CephBrokerRq()
            j = json.loads(json_rq)
            rq.ops = j['ops']
            log("changed broker_req: {}".format(rq.ops))

            if rq and is_request_complete(rq, relation=self.relation_name):
                log("Setting ceph-mds.pools.available")
                self.set_state('{relation_name}.pools.available')
            else:
                log("incomplete request. broker_req not found")
 def setUp(self):
     self.harness = Harness(CharmBase,
                            meta='''
         name: client
         provides:
           ceph-client:
             interface: ceph-client
     ''')
     self.client_req = CephBrokerRq()
     self.client_req.add_op_create_replicated_pool(name='tmbtil',
                                                   replica_count=3)
     self.client_req.add_op({
         'op':
         'set-key-permissions',
         'permissions':
         ['osd', 'allow *', 'mon', 'allow *', 'mgr', 'allow r'],
         'client':
         'ceph-iscsi'
     })
     self.client_req.request_id = 'a3ad24dd-7e2f-11ea-8ba2-e5a5b68b415f'
     self.random_request = CephBrokerRq()
     self.random_request.add_op_create_replicated_pool(name='another-pool',
                                                       replica_count=3)
示例#18
0
    def create_erasure_pool(self,
                            name,
                            erasure_profile=None,
                            weight=None,
                            group=None,
                            app_name=None,
                            max_bytes=None,
                            max_objects=None,
                            allow_ec_overwrites=False,
                            **kwargs):
        """
        Request erasure coded pool setup

        :param name: Name of pool to create
        :type name: str
        :param erasure_profile: Name of erasure profile for pool
        :type erasure_profile: str
        :param weight: The percentage of data the pool makes up
        :type weight: Optional[float]
        :param group: Group to add pool to.
        :type group: Optional[str]
        :param app_name: Name of application using pool
        :type app_name: Optional[str]
        :param max_bytes: Maximum bytes of quota to apply
        :type max_bytes: Optional[int]
        :param max_objects: Maximum object quota to apply
        :type max_objects: Optional[int]
        :param allow_ec_overwrites: Allow EC pools to be overwritten
        :type allow_ec_overwrites: bool
        :param kwargs: Additional keyword arguments subject to validation.
                       Refer to CephBrokerRq.add_op_create_replicated_pool
                       method for documentation.
        :type kwargs: Dict[str,any]
        """
        rq = self.get_current_request() or CephBrokerRq()
        kwargs.update({
            'name': name,
            'erasure_profile': erasure_profile,
            'weight': weight,
            'group': group,
            'app_name': app_name,
            'max_bytes': max_bytes,
            'max_objects': max_objects,
            'allow_ec_overwrites': allow_ec_overwrites,
        })
        rq.add_op_create_erasure_pool(**kwargs)
        self.send_request_if_needed(rq)
        reactive.clear_flag(
            self.expand_name('{endpoint_name}.pools.available'))
示例#19
0
    def create_pools(self, names, replicas=3):
        """
        Request pools setup

        @param name: list of pool names to create
        @param replicas: number of replicas for supporting pools
        """
        # json.dumps of the CephBrokerRq()
        requests = self.get_local(key='broker_reqs') or {}

        new_names = [name for name in names if name not in requests]

        # existing names get ignored here
        # new names get added to a single request
        if new_names:
            rq = CephBrokerRq()
            for name in new_names:
                rq.add_op_create_pool(name="{}".format(name),
                                      replica_count=replicas,
                                      weight=None)
                requests[name] = rq.request
            self.set_local(key='broker_reqs', value=requests)
            send_request_if_needed(rq, relation=self.relation_name)
            self.remove_state('{relation_name}.pools.available')
    def request_cephfs(self, name, extra_pools=None):
        """Request creation of Ceph FS

        :param name: Name of mds to create
        :type name: str
        :param extra_pools: Additional pools to add to FS.
        :type extra_pools: List[str]
        """
        rq = self.get_current_request() or CephBrokerRq()
        rq.add_op({
            'op': 'create-cephfs',
            'mds_name': name,
            'data_pool': "{}_data".format(name),
            'extra_pools': extra_pools,
            'metadata_pool': "{}_metadata".format(name)
        })
        self.send_request_if_needed(rq)
示例#21
0
    def initialize_mds(self, name, replicas=3):
        """
        Request pool setup and mds creation

        @param name: name of mds pools to create
        @param replicas: number of replicas for supporting pools
        """
        # json.dumps of the CephBrokerRq()
        json_rq = self.get_local(key='broker_req')

        if not json_rq:
            rq = CephBrokerRq()
            rq.add_op_create_pool(name="{}_data".format(name),
                                  replica_count=replicas,
                                  weight=None)
            rq.add_op_create_pool(name="{}_metadata".format(name),
                                  replica_count=replicas,
                                  weight=None)
            # Create CephFS
            rq.ops.append({
                'op': 'create-cephfs',
                'mds_name': name,
                'data_pool': "{}_data".format(name),
                'metadata_pool': "{}_metadata".format(name),
            })
            self.set_local(key='broker_req', value=rq.request)
            send_request_if_needed(rq, relation=self.relation_name)
        else:
            rq = CephBrokerRq()
            try:
                j = json.loads(json_rq)
                log("Json request: {}".format(json_rq))
                rq.ops = j['ops']
                send_request_if_needed(rq, relation=self.relation_name)
            except ValueError as err:
                log("Unable to decode broker_req: {}.  Error: {}".format(
                    json_rq, err))
示例#22
0
    def request_ceph_permissions(self, ceph):
        rq = CephBrokerRq()

        json_rq = ceph.get_local(key='broker_req')
        if json_rq:
            try:
                j = json.loads(json_rq)
                log("Json request: {}".format(json_rq))
                rq.set_ops(j['ops'])
            except ValueError as err:
                log("Unable to decode broker_req: {}. Error {}".format(
                    json_rq, err))

        rq.add_op({'op': 'set-key-permissions',
                   'permissions': CEPH_CAPABILITIES,
                   'client': 'manila-ganesha'})
        ceph.set_local(key='broker_req', value=rq.request)
        send_request_if_needed(rq, relation='ceph')
def get_ceph_request():
    rq = CephBrokerRq()
    if (config('libvirt-image-backend') == 'rbd'
            and assert_libvirt_rbd_imagebackend_allowed()):
        pool_name = config('rbd-pool')
        replicas = config('ceph-osd-replication-count')
        weight = config('ceph-pool-weight')
        bluestore_compression = ch_context.CephBlueStoreCompressionContext()

        if config('pool-type') == 'erasure-coded':
            # General EC plugin config
            plugin = config('ec-profile-plugin')
            technique = config('ec-profile-technique')
            device_class = config('ec-profile-device-class')
            metadata_pool_name = (config('ec-rbd-metadata-pool')
                                  or "{}-metadata".format(pool_name))
            bdm_k = config('ec-profile-k')
            bdm_m = config('ec-profile-m')
            # LRC plugin config
            bdm_l = config('ec-profile-locality')
            crush_locality = config('ec-profile-crush-locality')
            # SHEC plugin config
            bdm_c = config('ec-profile-durability-estimator')
            # CLAY plugin config
            bdm_d = config('ec-profile-helper-chunks')
            scalar_mds = config('ec-profile-scalar-mds')
            # Profile name
            profile_name = (config('ec-profile-name')
                            or "{}-profile".format(pool_name))
            # Metadata sizing is approximately 1% of overall data weight
            # but is in effect driven by the number of rbd's rather than
            # their size - so it can be very lightweight.
            metadata_weight = weight * 0.01
            # Resize data pool weight to accomodate metadata weight
            weight = weight - metadata_weight
            # Create metadata pool
            rq.add_op_create_pool(name=metadata_pool_name,
                                  replica_count=replicas,
                                  weight=metadata_weight,
                                  group='vms',
                                  app_name='rbd')

            # Create erasure profile
            rq.add_op_create_erasure_profile(name=profile_name,
                                             k=bdm_k,
                                             m=bdm_m,
                                             lrc_locality=bdm_l,
                                             lrc_crush_locality=crush_locality,
                                             shec_durability_estimator=bdm_c,
                                             clay_helper_chunks=bdm_d,
                                             clay_scalar_mds=scalar_mds,
                                             device_class=device_class,
                                             erasure_type=plugin,
                                             erasure_technique=technique)

            # Create EC data pool

            # NOTE(fnordahl): once we deprecate Python 3.5 support we can do
            # the unpacking of the BlueStore compression arguments as part of
            # the function arguments. Until then we need to build the dict
            # prior to the function call.
            kwargs = {
                'name': pool_name,
                'erasure_profile': profile_name,
                'weight': weight,
                'group': "vms",
                'app_name': "rbd",
                'allow_ec_overwrites': True
            }
            kwargs.update(bluestore_compression.get_kwargs())
            rq.add_op_create_erasure_pool(**kwargs)
        else:
            kwargs = {
                'name': pool_name,
                'replica_count': replicas,
                'weight': weight,
                'group': 'vms',
                'app_name': 'rbd',
            }
            kwargs.update(bluestore_compression.get_kwargs())
            rq.add_op_create_replicated_pool(**kwargs)

    if config('restrict-ceph-pools'):
        rq.add_op_request_access_to_group(
            name="volumes",
            object_prefix_permissions={'class-read': ['rbd_children']},
            permission='rwx')
        rq.add_op_request_access_to_group(
            name="images",
            object_prefix_permissions={'class-read': ['rbd_children']},
            permission='rwx')
        rq.add_op_request_access_to_group(
            name="vms",
            object_prefix_permissions={'class-read': ['rbd_children']},
            permission='rwx')
    return rq
示例#24
0
def get_ceph_request():
    rq = CephBrokerRq()
    replicas = config('ceph-osd-replication-count')
    rq.add_op_create_pool(name=config('rbd-pool'), replica_count=replicas)
    return rq
class TestCephClientRequires(unittest.TestCase):

    TEST_CASE_0 = {
        'ceph-mon/0': {
            'remote_unit_data': {
                'ingress-address': '192.0.2.1',
                'ceph-public-address': '192.0.2.1'
            }
        },
        'ceph-mon/1': {
            'remote_unit_data': {
                'ingress-address': '192.0.2.2',
                'ceph-public-address': '192.0.2.2'
            }
        },
        'ceph-mon/2': {
            'remote_unit_data': {
                'ingress-address': '192.0.2.3',
                'ceph-public-address': '192.0.2.3'
            }
        },
        'client/0': {
            'remote_unit_data': {
                'ingress-address': '192.0.2.4'
            }
        }
    }

    TEST_CASE_1 = {
        'ceph-mon/0': {
            'remote_unit_data': {
                'auth': 'cephx',
                'key': 'AQBUfpVeNl7CHxAA8/f6WTcYFxW2dJ5VyvWmJg==',
                'ingress-address': '192.0.2.1',
                'ceph-public-address': '192.0.2.1'
            }
        },
        'ceph-mon/1': {
            'remote_unit_data': {
                'auth':
                'cephx',
                'key':
                'AQBUfpVeNl7CHxAA8/f6WTcYFxW2dJ5VyvWmJg==',
                'ingress-address':
                '192.0.2.2',
                'ceph-public-address':
                '192.0.2.2',
                'broker-rsp-client-0':
                ('{"exit-code": 0, '
                 '"request-id": "a3ad24dd-7e2f-11ea-8ba2-e5a5b68b415f"}'),
                'broker-rsp-client-1':
                ('{"exit-code": 0, '
                 '"request-id": "c729e333-7e2f-11ea-8b3c-09dfcfc90070"}'),
                'broker_rsp':
                ('{"exit-code": 0, '
                 '"request-id": "c729e333-7e2f-11ea-8b3c-09dfcfc90070')
            }
        },
        'ceph-mon/2': {
            'remote_unit_data': {
                'auth': 'cephx',
                'key': 'AQBUfpVeNl7CHxAA8/f6WTcYFxW2dJ5VyvWmJg==',
                'ingress-address': '192.0.2.3',
                'ceph-public-address': '192.0.2.3'
            }
        },
        'client/0': {
            'remote_unit_data': {
                'ingress-address':
                '192.0.2.4',
                'broker_req':
                ('{"api-version": 1, '
                 '"ops": [{"op": "create-pool", "name": "tmbtil", '
                 '"replicas": 3, "pg_num": null, "weight": null, '
                 '"group": null, "group-namespace": null, '
                 '"app-name": null, '
                 '"max-bytes": null, "max-objects": null}, '
                 '{"op": "set-key-permissions", '
                 '"permissions": ["osd", "allow *", "mon", "allow *", '
                 '"mgr", '
                 '"allow r"], "client": "ceph-iscsi"}], '
                 '"request-id": "a3ad24dd-7e2f-11ea-8ba2-e5a5b68b415f"}')
            }
        }
    }

    def setUp(self):
        self.harness = Harness(CharmBase,
                               meta='''
            name: client
            provides:
              ceph-client:
                interface: ceph-client
        ''')
        self.client_req = CephBrokerRq()
        self.client_req.add_op_create_replicated_pool(name='tmbtil',
                                                      replica_count=3)
        self.client_req.add_op({
            'op':
            'set-key-permissions',
            'permissions':
            ['osd', 'allow *', 'mon', 'allow *', 'mgr', 'allow r'],
            'client':
            'ceph-iscsi'
        })
        self.client_req.request_id = 'a3ad24dd-7e2f-11ea-8ba2-e5a5b68b415f'
        self.random_request = CephBrokerRq()
        self.random_request.add_op_create_replicated_pool(name='another-pool',
                                                          replica_count=3)

    def apply_unit_data(self, test_case, rel_id, load_requst_from_client=True):
        for unit_name, data in test_case.items():
            if not load_requst_from_client and unit_name.startswith('client'):
                continue
            self.harness.add_relation_unit(rel_id, unit_name)
            self.harness.update_relation_data(
                rel_id, unit_name, test_case[unit_name]['remote_unit_data'])

    def harness_setup(self, test_case, load_requst_from_client=False):
        rel_id = self.harness.add_relation('ceph-client', 'ceph-mon')
        self.apply_unit_data(test_case, rel_id)
        self.harness.begin()
        ceph_client = CephClientRequires(self.harness.charm, 'ceph-client')
        if load_requst_from_client:
            raw_rq = test_case['client/0']['remote_unit_data']['broker_req']
            ceph_client.state.broker_req = raw_rq
        return ceph_client

    def test_request_osd_settings(self):
        self.harness.begin()
        self.ceph_client = CephClientRequires(self.harness.charm,
                                              'ceph-client')
        relation_id = self.harness.add_relation('ceph-client', 'ceph-mon')
        self.harness.add_relation_unit(relation_id, 'ceph-mon/0')
        self.harness.update_relation_data(relation_id, 'ceph-mon/0',
                                          {'ingress-address': '192.0.2.2'})
        settings = {'osd heartbeat grace': 20, 'osd heartbeat interval': 5}
        self.ceph_client.request_osd_settings(settings)

        rel = self.harness.charm.model.get_relation('ceph-client')
        rel_data = rel.data[self.harness.charm.model.unit]
        self.assertEqual(json.loads(rel_data['osd-settings']), settings)

    def test_mon_hosts(self):
        self.harness.begin()
        self.ceph_client = CephClientRequires(self.harness.charm,
                                              'ceph-client')
        mon_ips = ['192.0.2.1', '192.0.2.2', '2001:DB8::1']
        mon_hosts = self.ceph_client.mon_hosts(mon_ips)
        self.assertEqual(mon_hosts,
                         ['192.0.2.1', '192.0.2.2', '[2001:DB8::1]'])

    def test_mon_hosts_ceph_proxy(self):
        self.harness.begin()
        self.ceph_client = CephClientRequires(self.harness.charm,
                                              'ceph-client')
        proxy_mon_ips = ['192.0.2.1 192.0.2.2 2001:DB8::1']
        mon_hosts = self.ceph_client.mon_hosts(proxy_mon_ips)
        self.assertEqual(mon_hosts,
                         ['192.0.2.1', '192.0.2.2', '[2001:DB8::1]'])

    def test_get_relation_data(self):
        relation_id_a = self.harness.add_relation('ceph-client', 'ceph-monA')
        relation_id_b = self.harness.add_relation('ceph-client', 'ceph-monB')
        self.harness.begin()
        self.harness.add_relation_unit(relation_id_a, 'ceph-monA/0')
        self.harness.update_relation_data(
            relation_id_a,
            'ceph-monA/0',
            {
                'ingress-address': '192.0.2.2',
                'ceph-public-address': '192.0.2.2',
                'key': 'foo',
                'auth': 'bar'
            },
        )
        self.harness.add_relation_unit(relation_id_a, 'ceph-monA/1')
        self.harness.update_relation_data(
            relation_id_a,
            'ceph-monA/1',
            {'ingress-address': '192.0.2.3'},
        )
        self.harness.add_relation_unit(relation_id_b, 'ceph-monB/0')
        self.harness.update_relation_data(
            relation_id_b,
            'ceph-monB/0',
            {
                'ingress-address': '2001:DB8::1',
                'ceph-public-address': '2001:DB8::1',
                'key': 'foo',
                'auth': 'bar'
            },
        )
        self.harness.add_relation_unit(relation_id_b, 'ceph-monB/1')
        self.harness.update_relation_data(
            relation_id_b,
            'ceph-monB/1',
            {
                'ingress-address': '2001:DB8::2',
                'ceph-public-address': '2001:DB8::2'
            },
        )

        self.ceph_client = CephClientRequires(self.harness.charm,
                                              'ceph-client')
        rel_data = self.ceph_client.get_relation_data()
        self.assertEqual(
            rel_data, {
                'mon_hosts': ['192.0.2.2', '[2001:DB8::1]', '[2001:DB8::2]'],
                'key': 'foo',
                'auth': 'bar',
            })

    def test_existing_request_complete(self):
        ceph_client = self.harness_setup(self.TEST_CASE_1,
                                         load_requst_from_client=True)
        self.assertTrue(ceph_client.existing_request_complete())

    def test_existing_request_false(self):
        test_case = copy.deepcopy(self.TEST_CASE_1)
        test_case['ceph-mon/1']['remote_unit_data'] = {}
        ceph_client = self.harness_setup(test_case,
                                         load_requst_from_client=True)
        self.assertFalse(ceph_client.existing_request_complete())

    def test_on_changed(self):
        class TestReceiver(framework.Object):
            def __init__(self, parent, key):
                super().__init__(parent, key)
                self.observed_events = []

            def on_broker_available(self, event):
                self.observed_events.append(event)

        self.harness.begin()
        self.ceph_client = CephClientRequires(self.harness.charm,
                                              'ceph-client')
        receiver = TestReceiver(self.harness.framework, 'receiver')
        self.harness.framework.observe(self.ceph_client.on.broker_available,
                                       receiver)
        # No data yet.
        relation_id = self.harness.add_relation('ceph-client', 'ceph-mon')
        # Get broker_available as soon as relation is present.

        self.assertEqual(len(receiver.observed_events), 0)
        self.harness.add_relation_unit(relation_id, 'ceph-mon/0')
        self.harness.update_relation_data(
            relation_id,
            'ceph-mon/0',
            {
                'ingress-address': '192.0.2.2',
                'ceph-public-address': '192.0.2.2'
            },
        )

        # Got the necessary data - should get a BrokerAvailable event.
        self.apply_unit_data(self.TEST_CASE_1,
                             relation_id,
                             load_requst_from_client=False)
        # 1 broker_available event per mon and 1 completed request: 4 events
        self.assertEqual(len(receiver.observed_events), 4)
        self.assertIsInstance(receiver.observed_events[0],
                              BrokerAvailableEvent)

    @mock.patch.object(CephClientRequires, 'send_request_if_needed')
    def test_create_replicated_pool(self, _send_request_if_needed):
        # TODO: Replace mocking with real calls. Otherwise this test is not
        # very useful.
        self.harness.begin()
        self.ceph_client = CephClientRequires(self.harness.charm,
                                              'ceph-client')

        self.ceph_client.create_replicated_pool('ceph-client')
        _send_request_if_needed.assert_not_called()

        self.harness.add_relation('ceph-client', 'ceph-mon')
        self.ceph_client.create_replicated_pool('ceph-client')
        _send_request_if_needed.assert_called()

    @mock.patch.object(CephClientRequires, 'send_request_if_needed')
    def test_create_request_ceph_permissions(self, _send_request_if_needed):
        # TODO: Replace mocking with real calls. Otherwise this test is not
        # very useful.
        self.harness.begin()
        self.ceph_client = CephClientRequires(self.harness.charm,
                                              'ceph-client')
        CEPH_CAPABILITIES = [
            "osd", "allow *", "mon", "allow *", "mgr", "allow r"
        ]
        self.ceph_client.request_ceph_permissions('ceph-iscsi',
                                                  CEPH_CAPABILITIES)
        _send_request_if_needed.assert_not_called()

        self.harness.add_relation('ceph-client', 'ceph-mon')
        self.ceph_client.create_replicated_pool('ceph-client')
        _send_request_if_needed.assert_called()

    def test_get_previous_request(self):
        ceph_client = self.harness_setup(self.TEST_CASE_1,
                                         load_requst_from_client=False)
        rel = self.harness.charm.model.get_relation('ceph-client')
        self.assertEqual(
            ceph_client.get_previous_request(rel).request_id,
            'a3ad24dd-7e2f-11ea-8ba2-e5a5b68b415f')

    def test_get_previous_request_no_request(self):
        ceph_client = self.harness_setup(self.TEST_CASE_0,
                                         load_requst_from_client=False)
        rel = self.harness.charm.model.get_relation('ceph-client')
        self.assertEqual(ceph_client.get_previous_request(rel), None)

    def test_get_request_states(self):
        ceph_client = self.harness_setup(self.TEST_CASE_1,
                                         load_requst_from_client=False)
        relations = [self.harness.charm.model.get_relation('ceph-client')]
        self.assertEqual(
            ceph_client.get_request_states(self.client_req, relations),
            {'ceph-client:0': {
                'complete': True,
                'sent': True
            }})

    def test_get_request_states_new_request(self):
        ceph_client = self.harness_setup(self.TEST_CASE_1,
                                         load_requst_from_client=False)
        relations = [self.harness.charm.model.get_relation('ceph-client')]
        self.assertEqual(
            ceph_client.get_request_states(self.random_request, relations),
            {'ceph-client:0': {
                'complete': False,
                'sent': False
            }})

    def test_is_request_complete_for_relation(self):
        ceph_client = self.harness_setup(self.TEST_CASE_1,
                                         load_requst_from_client=False)
        relation = self.harness.charm.model.get_relation('ceph-client')
        self.assertTrue(
            ceph_client.is_request_complete_for_relation(
                self.client_req, relation))

    def test_is_request_complete(self):
        ceph_client = self.harness_setup(self.TEST_CASE_1,
                                         load_requst_from_client=False)
        relations = [self.harness.charm.model.get_relation('ceph-client')]
        self.assertTrue(
            ceph_client.is_request_complete(self.client_req, relations))

    def test_is_request_complete_similar_req(self):
        ceph_client = self.harness_setup(self.TEST_CASE_1,
                                         load_requst_from_client=False)
        relations = [self.harness.charm.model.get_relation('ceph-client')]
        similar_req = copy.deepcopy(self.client_req)
        similar_req.request_id = '2234234234'
        self.assertTrue(ceph_client.is_request_complete(
            similar_req, relations))

    def test_is_request_complete_new_req(self):
        ceph_client = self.harness_setup(self.TEST_CASE_1,
                                         load_requst_from_client=False)
        relations = [self.harness.charm.model.get_relation('ceph-client')]
        self.assertFalse(
            ceph_client.is_request_complete(self.random_request, relations))

    def test_is_request_sent(self):
        ceph_client = self.harness_setup(self.TEST_CASE_1,
                                         load_requst_from_client=False)
        relations = [self.harness.charm.model.get_relation('ceph-client')]
        self.assertTrue(ceph_client.is_request_sent(self.client_req,
                                                    relations))

    def test_is_request_sent_similar_req(self):
        ceph_client = self.harness_setup(self.TEST_CASE_1,
                                         load_requst_from_client=False)
        relations = [self.harness.charm.model.get_relation('ceph-client')]
        similar_req = copy.deepcopy(self.client_req)
        similar_req.request_id = '2234234234'
        self.assertTrue(ceph_client.is_request_sent(similar_req, relations))

    def test_is_request_sent_new_req(self):
        ceph_client = self.harness_setup(self.TEST_CASE_1,
                                         load_requst_from_client=False)
        relations = [self.harness.charm.model.get_relation('ceph-client')]
        self.assertFalse(
            ceph_client.is_request_sent(self.random_request, relations))

    def test_send_request_if_needed(self):
        ceph_client = self.harness_setup(self.TEST_CASE_0,
                                         load_requst_from_client=False)
        relations = [self.harness.charm.model.get_relation('ceph-client')]
        self.assertIsNone(
            relations[0].data[self.harness.charm.model.unit].get('broker_req'))
        ceph_client.send_request_if_needed(self.random_request, relations)
        self.assertIsNotNone(
            relations[0].data[self.harness.charm.model.unit]['broker_req'])

    def test_send_request_if_needed_duplicate(self):
        ceph_client = self.harness_setup(self.TEST_CASE_1,
                                         load_requst_from_client=False)
        relations = [self.harness.charm.model.get_relation('ceph-client')]
        similar_req = copy.deepcopy(self.client_req)
        similar_req.request_id = '2234234234'
        orig_req_data = relations[0].data[self.harness.charm.model.unit].get(
            'broker_req')
        ceph_client.send_request_if_needed(similar_req, relations)
        self.assertEqual(
            relations[0].data[self.harness.charm.model.unit]['broker_req'],
            orig_req_data)
示例#26
0
def get_create_rgw_pools_rq(prefix=None):
    """Pre-create RGW pools so that they have the correct settings.

    If a prefix is provided it will be prepended to each pool name.

    When RGW creates its own pools it will create them with non-optimal
    settings (LP: #1476749).

    NOTE: see http://docs.ceph.com/docs/master/radosgw/config-ref/#pools and
          http://docs.ceph.com/docs/master/radosgw/config/#create-pools for
          list of supported/required pools.
    """
    def _add_light_pool(rq, pool, pg_num, prefix=None):
        # Per the Ceph PG Calculator, all of the lightweight pools get 0.10%
        # of the data by default and only the .rgw.buckets.* get higher values
        weights = {
            '.rgw.buckets.index': 3.00,
            '.rgw.buckets.extra': 1.00
        }
        w = weights.get(pool, 0.10)
        if prefix:
            pool = "{prefix}{pool}".format(prefix=prefix, pool=pool)
        if pg_num > 0:
            rq.add_op_create_pool(name=pool, replica_count=replicas,
                                  pg_num=pg_num, group='objects',
                                  app_name=CEPH_POOL_APP_NAME)
        else:
            rq.add_op_create_pool(name=pool, replica_count=replicas,
                                  weight=w, group='objects',
                                  app_name=CEPH_POOL_APP_NAME)

    rq = CephBrokerRq()
    replicas = config('ceph-osd-replication-count')

    prefix = prefix or 'default'
    # Buckets likely to contain the most data and therefore
    # requiring the most PGs
    heavy = [
        '.rgw.buckets.data'
    ]
    bucket_weight = config('rgw-buckets-pool-weight')

    if config('pool-type') == 'erasure-coded':
        # General EC plugin config
        plugin = config('ec-profile-plugin')
        technique = config('ec-profile-technique')
        device_class = config('ec-profile-device-class')
        bdm_k = config('ec-profile-k')
        bdm_m = config('ec-profile-m')
        # LRC plugin config
        bdm_l = config('ec-profile-locality')
        crush_locality = config('ec-profile-crush-locality')
        # SHEC plugin config
        bdm_c = config('ec-profile-durability-estimator')
        # CLAY plugin config
        bdm_d = config('ec-profile-helper-chunks')
        scalar_mds = config('ec-profile-scalar-mds')
        # Profile name
        service = service_name()
        profile_name = (
            config('ec-profile-name') or "{}-profile".format(service)
        )
        rq.add_op_create_erasure_profile(
            name=profile_name,
            k=bdm_k, m=bdm_m,
            lrc_locality=bdm_l,
            lrc_crush_locality=crush_locality,
            shec_durability_estimator=bdm_c,
            clay_helper_chunks=bdm_d,
            clay_scalar_mds=scalar_mds,
            device_class=device_class,
            erasure_type=plugin,
            erasure_technique=technique
        )

        for pool in heavy:
            pool = "{prefix}{pool}".format(prefix=prefix, pool=pool)
            rq.add_op_create_erasure_pool(
                name=pool,
                erasure_profile=profile_name,
                weight=bucket_weight,
                group="objects",
                app_name=CEPH_POOL_APP_NAME
            )
    else:
        for pool in heavy:
            pool = "{prefix}{pool}".format(prefix=prefix, pool=pool)
            rq.add_op_create_pool(name=pool, replica_count=replicas,
                                  weight=bucket_weight, group='objects',
                                  app_name=CEPH_POOL_APP_NAME)

    # NOTE: we want these pools to have a smaller pg_num/pgp_num than the
    # others since they are not expected to contain as much data
    light = [
        '.rgw.control',
        '.rgw.data.root',
        '.rgw.gc',
        '.rgw.log',
        '.rgw.intent-log',
        '.rgw.meta',
        '.rgw.usage',
        '.rgw.users.keys',
        '.rgw.users.email',
        '.rgw.users.swift',
        '.rgw.users.uid',
        '.rgw.buckets.extra',
        '.rgw.buckets.index',
    ]
    pg_num = config('rgw-lightweight-pool-pg-num')
    for pool in light:
        _add_light_pool(rq, pool, pg_num, prefix)

    _add_light_pool(rq, '.rgw.root', pg_num)

    if config('restrict-ceph-pools'):
        rq.add_op_request_access_to_group(name="objects",
                                          permission='rwx',
                                          key_name='radosgw.gateway')

    return rq
示例#27
0
def get_create_rgw_pools_rq(prefix=None):
    """Pre-create RGW pools so that they have the correct settings.

    If a prefix is provided it will be prepended to each pool name.

    When RGW creates its own pools it will create them with non-optimal
    settings (LP: #1476749).

    NOTE: see http://docs.ceph.com/docs/master/radosgw/config-ref/#pools and
          http://docs.ceph.com/docs/master/radosgw/config/#create-pools for
          list of supported/required pools.
    """
    def _add_light_pool(rq, pool, pg_num, prefix=None):
        # Per the Ceph PG Calculator, all of the lightweight pools get 0.10%
        # of the data by default and only the .rgw.buckets.* get higher values
        weights = {
            '.rgw.buckets.index': 3.00,
            '.rgw.buckets.extra': 1.00
        }
        w = weights.get(pool, 0.10)
        if prefix:
            pool = "{prefix}{pool}".format(prefix=prefix, pool=pool)
        if pg_num > 0:
            rq.add_op_create_pool(name=pool, replica_count=replicas,
                                  pg_num=pg_num, group='objects',
                                  app_name=CEPH_POOL_APP_NAME)
        else:
            rq.add_op_create_pool(name=pool, replica_count=replicas,
                                  weight=w, group='objects',
                                  app_name=CEPH_POOL_APP_NAME)

    from apt import apt_pkg

    apt_pkg.init()
    rq = CephBrokerRq()
    replicas = config('ceph-osd-replication-count')

    prefix = prefix or 'default'

    # Buckets likely to contain the most data and therefore
    # requiring the most PGs
    heavy = [
        '.rgw.buckets.data'
    ]
    bucket_weight = config('rgw-buckets-pool-weight')
    for pool in heavy:
        pool = "{prefix}{pool}".format(prefix=prefix, pool=pool)
        rq.add_op_create_pool(name=pool, replica_count=replicas,
                              weight=bucket_weight, group='objects',
                              app_name=CEPH_POOL_APP_NAME)

    # NOTE: we want these pools to have a smaller pg_num/pgp_num than the
    # others since they are not expected to contain as much data
    light = [
        '.rgw.control',
        '.rgw.data.root',
        '.rgw.gc',
        '.rgw.log',
        '.rgw.intent-log',
        '.rgw.meta',
        '.rgw.usage',
        '.rgw.users.keys',
        '.rgw.users.email',
        '.rgw.users.swift',
        '.rgw.users.uid',
        '.rgw.buckets.extra',
        '.rgw.buckets.index',
    ]
    pg_num = config('rgw-lightweight-pool-pg-num')
    for pool in light:
        _add_light_pool(rq, pool, pg_num, prefix)

    _add_light_pool(rq, '.rgw.root', pg_num)

    if config('restrict-ceph-pools'):
        rq.add_op_request_access_to_group(name="objects",
                                          permission='rwx',
                                          key_name='radosgw.gateway')

    return rq
示例#28
0
        ceph.tune_dev(dev)
    mounts = filter(lambda disk: device_path
                    in disk.device, psutil.disk_partitions())
    if mounts:
        osd = mounts[0]
        osd_id = osd.mountpoint.split('/')[-1].split('-')[-1]
        request.ops.append({
            'op': 'move-osd-to-bucket',
            'osd': "osd.{}".format(osd_id),
            'bucket': bucket})
    return request


def get_devices():
    devices = []
    for path in action_get('osd-devices').split(' '):
        path = path.strip()
        if os.path.isabs(path):
            devices.append(path)

    return devices


if __name__ == "__main__":
    request = CephBrokerRq()
    for dev in get_devices():
        request = add_device(request=request,
                             device_path=dev,
                             bucket=action_get("bucket"))
    send_request_if_needed(request, relation='mon')