class TestCephClientRequires(unittest.TestCase):

    TEST_CASE_0 = {
        'ceph-mon/0': {
            'remote_unit_data': {
                'ingress-address': '192.0.2.1',
                'ceph-public-address': '192.0.2.1'
            }
        },
        'ceph-mon/1': {
            'remote_unit_data': {
                'ingress-address': '192.0.2.2',
                'ceph-public-address': '192.0.2.2'
            }
        },
        'ceph-mon/2': {
            'remote_unit_data': {
                'ingress-address': '192.0.2.3',
                'ceph-public-address': '192.0.2.3'
            }
        },
        'client/0': {
            'remote_unit_data': {
                'ingress-address': '192.0.2.4'
            }
        }
    }

    TEST_CASE_1 = {
        'ceph-mon/0': {
            'remote_unit_data': {
                'auth': 'cephx',
                'key': 'AQBUfpVeNl7CHxAA8/f6WTcYFxW2dJ5VyvWmJg==',
                'ingress-address': '192.0.2.1',
                'ceph-public-address': '192.0.2.1'
            }
        },
        'ceph-mon/1': {
            'remote_unit_data': {
                'auth':
                'cephx',
                'key':
                'AQBUfpVeNl7CHxAA8/f6WTcYFxW2dJ5VyvWmJg==',
                'ingress-address':
                '192.0.2.2',
                'ceph-public-address':
                '192.0.2.2',
                'broker-rsp-client-0':
                ('{"exit-code": 0, '
                 '"request-id": "a3ad24dd-7e2f-11ea-8ba2-e5a5b68b415f"}'),
                'broker-rsp-client-1':
                ('{"exit-code": 0, '
                 '"request-id": "c729e333-7e2f-11ea-8b3c-09dfcfc90070"}'),
                'broker_rsp':
                ('{"exit-code": 0, '
                 '"request-id": "c729e333-7e2f-11ea-8b3c-09dfcfc90070')
            }
        },
        'ceph-mon/2': {
            'remote_unit_data': {
                'auth': 'cephx',
                'key': 'AQBUfpVeNl7CHxAA8/f6WTcYFxW2dJ5VyvWmJg==',
                'ingress-address': '192.0.2.3',
                'ceph-public-address': '192.0.2.3'
            }
        },
        'client/0': {
            'remote_unit_data': {
                'ingress-address':
                '192.0.2.4',
                'broker_req':
                ('{"api-version": 1, '
                 '"ops": [{"op": "create-pool", "name": "tmbtil", '
                 '"replicas": 3, "pg_num": null, "weight": null, '
                 '"group": null, "group-namespace": null, '
                 '"app-name": null, '
                 '"max-bytes": null, "max-objects": null}, '
                 '{"op": "set-key-permissions", '
                 '"permissions": ["osd", "allow *", "mon", "allow *", '
                 '"mgr", '
                 '"allow r"], "client": "ceph-iscsi"}], '
                 '"request-id": "a3ad24dd-7e2f-11ea-8ba2-e5a5b68b415f"}')
            }
        }
    }

    def setUp(self):
        self.harness = Harness(CharmBase,
                               meta='''
            name: client
            provides:
              ceph-client:
                interface: ceph-client
        ''')
        self.client_req = CephBrokerRq()
        self.client_req.add_op_create_replicated_pool(name='tmbtil',
                                                      replica_count=3)
        self.client_req.add_op({
            'op':
            'set-key-permissions',
            'permissions':
            ['osd', 'allow *', 'mon', 'allow *', 'mgr', 'allow r'],
            'client':
            'ceph-iscsi'
        })
        self.client_req.request_id = 'a3ad24dd-7e2f-11ea-8ba2-e5a5b68b415f'
        self.random_request = CephBrokerRq()
        self.random_request.add_op_create_replicated_pool(name='another-pool',
                                                          replica_count=3)

    def apply_unit_data(self, test_case, rel_id, load_requst_from_client=True):
        for unit_name, data in test_case.items():
            if not load_requst_from_client and unit_name.startswith('client'):
                continue
            self.harness.add_relation_unit(rel_id, unit_name)
            self.harness.update_relation_data(
                rel_id, unit_name, test_case[unit_name]['remote_unit_data'])

    def harness_setup(self, test_case, load_requst_from_client=False):
        rel_id = self.harness.add_relation('ceph-client', 'ceph-mon')
        self.apply_unit_data(test_case, rel_id)
        self.harness.begin()
        ceph_client = CephClientRequires(self.harness.charm, 'ceph-client')
        if load_requst_from_client:
            raw_rq = test_case['client/0']['remote_unit_data']['broker_req']
            ceph_client.state.broker_req = raw_rq
        return ceph_client

    def test_request_osd_settings(self):
        self.harness.begin()
        self.ceph_client = CephClientRequires(self.harness.charm,
                                              'ceph-client')
        relation_id = self.harness.add_relation('ceph-client', 'ceph-mon')
        self.harness.add_relation_unit(relation_id, 'ceph-mon/0')
        self.harness.update_relation_data(relation_id, 'ceph-mon/0',
                                          {'ingress-address': '192.0.2.2'})
        settings = {'osd heartbeat grace': 20, 'osd heartbeat interval': 5}
        self.ceph_client.request_osd_settings(settings)

        rel = self.harness.charm.model.get_relation('ceph-client')
        rel_data = rel.data[self.harness.charm.model.unit]
        self.assertEqual(json.loads(rel_data['osd-settings']), settings)

    def test_mon_hosts(self):
        self.harness.begin()
        self.ceph_client = CephClientRequires(self.harness.charm,
                                              'ceph-client')
        mon_ips = ['192.0.2.1', '192.0.2.2', '2001:DB8::1']
        mon_hosts = self.ceph_client.mon_hosts(mon_ips)
        self.assertEqual(mon_hosts,
                         ['192.0.2.1', '192.0.2.2', '[2001:DB8::1]'])

    def test_mon_hosts_ceph_proxy(self):
        self.harness.begin()
        self.ceph_client = CephClientRequires(self.harness.charm,
                                              'ceph-client')
        proxy_mon_ips = ['192.0.2.1 192.0.2.2 2001:DB8::1']
        mon_hosts = self.ceph_client.mon_hosts(proxy_mon_ips)
        self.assertEqual(mon_hosts,
                         ['192.0.2.1', '192.0.2.2', '[2001:DB8::1]'])

    def test_get_relation_data(self):
        relation_id_a = self.harness.add_relation('ceph-client', 'ceph-monA')
        relation_id_b = self.harness.add_relation('ceph-client', 'ceph-monB')
        self.harness.begin()
        self.harness.add_relation_unit(relation_id_a, 'ceph-monA/0')
        self.harness.update_relation_data(
            relation_id_a,
            'ceph-monA/0',
            {
                'ingress-address': '192.0.2.2',
                'ceph-public-address': '192.0.2.2',
                'key': 'foo',
                'auth': 'bar'
            },
        )
        self.harness.add_relation_unit(relation_id_a, 'ceph-monA/1')
        self.harness.update_relation_data(
            relation_id_a,
            'ceph-monA/1',
            {'ingress-address': '192.0.2.3'},
        )
        self.harness.add_relation_unit(relation_id_b, 'ceph-monB/0')
        self.harness.update_relation_data(
            relation_id_b,
            'ceph-monB/0',
            {
                'ingress-address': '2001:DB8::1',
                'ceph-public-address': '2001:DB8::1',
                'key': 'foo',
                'auth': 'bar'
            },
        )
        self.harness.add_relation_unit(relation_id_b, 'ceph-monB/1')
        self.harness.update_relation_data(
            relation_id_b,
            'ceph-monB/1',
            {
                'ingress-address': '2001:DB8::2',
                'ceph-public-address': '2001:DB8::2'
            },
        )

        self.ceph_client = CephClientRequires(self.harness.charm,
                                              'ceph-client')
        rel_data = self.ceph_client.get_relation_data()
        self.assertEqual(
            rel_data, {
                'mon_hosts': ['192.0.2.2', '[2001:DB8::1]', '[2001:DB8::2]'],
                'key': 'foo',
                'auth': 'bar',
            })

    def test_existing_request_complete(self):
        ceph_client = self.harness_setup(self.TEST_CASE_1,
                                         load_requst_from_client=True)
        self.assertTrue(ceph_client.existing_request_complete())

    def test_existing_request_false(self):
        test_case = copy.deepcopy(self.TEST_CASE_1)
        test_case['ceph-mon/1']['remote_unit_data'] = {}
        ceph_client = self.harness_setup(test_case,
                                         load_requst_from_client=True)
        self.assertFalse(ceph_client.existing_request_complete())

    def test_on_changed(self):
        class TestReceiver(framework.Object):
            def __init__(self, parent, key):
                super().__init__(parent, key)
                self.observed_events = []

            def on_broker_available(self, event):
                self.observed_events.append(event)

        self.harness.begin()
        self.ceph_client = CephClientRequires(self.harness.charm,
                                              'ceph-client')
        receiver = TestReceiver(self.harness.framework, 'receiver')
        self.harness.framework.observe(self.ceph_client.on.broker_available,
                                       receiver)
        # No data yet.
        relation_id = self.harness.add_relation('ceph-client', 'ceph-mon')
        # Get broker_available as soon as relation is present.

        self.assertEqual(len(receiver.observed_events), 0)
        self.harness.add_relation_unit(relation_id, 'ceph-mon/0')
        self.harness.update_relation_data(
            relation_id,
            'ceph-mon/0',
            {
                'ingress-address': '192.0.2.2',
                'ceph-public-address': '192.0.2.2'
            },
        )

        # Got the necessary data - should get a BrokerAvailable event.
        self.apply_unit_data(self.TEST_CASE_1,
                             relation_id,
                             load_requst_from_client=False)
        # 1 broker_available event per mon and 1 completed request: 4 events
        self.assertEqual(len(receiver.observed_events), 4)
        self.assertIsInstance(receiver.observed_events[0],
                              BrokerAvailableEvent)

    @mock.patch.object(CephClientRequires, 'send_request_if_needed')
    def test_create_replicated_pool(self, _send_request_if_needed):
        # TODO: Replace mocking with real calls. Otherwise this test is not
        # very useful.
        self.harness.begin()
        self.ceph_client = CephClientRequires(self.harness.charm,
                                              'ceph-client')

        self.ceph_client.create_replicated_pool('ceph-client')
        _send_request_if_needed.assert_not_called()

        self.harness.add_relation('ceph-client', 'ceph-mon')
        self.ceph_client.create_replicated_pool('ceph-client')
        _send_request_if_needed.assert_called()

    @mock.patch.object(CephClientRequires, 'send_request_if_needed')
    def test_create_request_ceph_permissions(self, _send_request_if_needed):
        # TODO: Replace mocking with real calls. Otherwise this test is not
        # very useful.
        self.harness.begin()
        self.ceph_client = CephClientRequires(self.harness.charm,
                                              'ceph-client')
        CEPH_CAPABILITIES = [
            "osd", "allow *", "mon", "allow *", "mgr", "allow r"
        ]
        self.ceph_client.request_ceph_permissions('ceph-iscsi',
                                                  CEPH_CAPABILITIES)
        _send_request_if_needed.assert_not_called()

        self.harness.add_relation('ceph-client', 'ceph-mon')
        self.ceph_client.create_replicated_pool('ceph-client')
        _send_request_if_needed.assert_called()

    def test_get_previous_request(self):
        ceph_client = self.harness_setup(self.TEST_CASE_1,
                                         load_requst_from_client=False)
        rel = self.harness.charm.model.get_relation('ceph-client')
        self.assertEqual(
            ceph_client.get_previous_request(rel).request_id,
            'a3ad24dd-7e2f-11ea-8ba2-e5a5b68b415f')

    def test_get_previous_request_no_request(self):
        ceph_client = self.harness_setup(self.TEST_CASE_0,
                                         load_requst_from_client=False)
        rel = self.harness.charm.model.get_relation('ceph-client')
        self.assertEqual(ceph_client.get_previous_request(rel), None)

    def test_get_request_states(self):
        ceph_client = self.harness_setup(self.TEST_CASE_1,
                                         load_requst_from_client=False)
        relations = [self.harness.charm.model.get_relation('ceph-client')]
        self.assertEqual(
            ceph_client.get_request_states(self.client_req, relations),
            {'ceph-client:0': {
                'complete': True,
                'sent': True
            }})

    def test_get_request_states_new_request(self):
        ceph_client = self.harness_setup(self.TEST_CASE_1,
                                         load_requst_from_client=False)
        relations = [self.harness.charm.model.get_relation('ceph-client')]
        self.assertEqual(
            ceph_client.get_request_states(self.random_request, relations),
            {'ceph-client:0': {
                'complete': False,
                'sent': False
            }})

    def test_is_request_complete_for_relation(self):
        ceph_client = self.harness_setup(self.TEST_CASE_1,
                                         load_requst_from_client=False)
        relation = self.harness.charm.model.get_relation('ceph-client')
        self.assertTrue(
            ceph_client.is_request_complete_for_relation(
                self.client_req, relation))

    def test_is_request_complete(self):
        ceph_client = self.harness_setup(self.TEST_CASE_1,
                                         load_requst_from_client=False)
        relations = [self.harness.charm.model.get_relation('ceph-client')]
        self.assertTrue(
            ceph_client.is_request_complete(self.client_req, relations))

    def test_is_request_complete_similar_req(self):
        ceph_client = self.harness_setup(self.TEST_CASE_1,
                                         load_requst_from_client=False)
        relations = [self.harness.charm.model.get_relation('ceph-client')]
        similar_req = copy.deepcopy(self.client_req)
        similar_req.request_id = '2234234234'
        self.assertTrue(ceph_client.is_request_complete(
            similar_req, relations))

    def test_is_request_complete_new_req(self):
        ceph_client = self.harness_setup(self.TEST_CASE_1,
                                         load_requst_from_client=False)
        relations = [self.harness.charm.model.get_relation('ceph-client')]
        self.assertFalse(
            ceph_client.is_request_complete(self.random_request, relations))

    def test_is_request_sent(self):
        ceph_client = self.harness_setup(self.TEST_CASE_1,
                                         load_requst_from_client=False)
        relations = [self.harness.charm.model.get_relation('ceph-client')]
        self.assertTrue(ceph_client.is_request_sent(self.client_req,
                                                    relations))

    def test_is_request_sent_similar_req(self):
        ceph_client = self.harness_setup(self.TEST_CASE_1,
                                         load_requst_from_client=False)
        relations = [self.harness.charm.model.get_relation('ceph-client')]
        similar_req = copy.deepcopy(self.client_req)
        similar_req.request_id = '2234234234'
        self.assertTrue(ceph_client.is_request_sent(similar_req, relations))

    def test_is_request_sent_new_req(self):
        ceph_client = self.harness_setup(self.TEST_CASE_1,
                                         load_requst_from_client=False)
        relations = [self.harness.charm.model.get_relation('ceph-client')]
        self.assertFalse(
            ceph_client.is_request_sent(self.random_request, relations))

    def test_send_request_if_needed(self):
        ceph_client = self.harness_setup(self.TEST_CASE_0,
                                         load_requst_from_client=False)
        relations = [self.harness.charm.model.get_relation('ceph-client')]
        self.assertIsNone(
            relations[0].data[self.harness.charm.model.unit].get('broker_req'))
        ceph_client.send_request_if_needed(self.random_request, relations)
        self.assertIsNotNone(
            relations[0].data[self.harness.charm.model.unit]['broker_req'])

    def test_send_request_if_needed_duplicate(self):
        ceph_client = self.harness_setup(self.TEST_CASE_1,
                                         load_requst_from_client=False)
        relations = [self.harness.charm.model.get_relation('ceph-client')]
        similar_req = copy.deepcopy(self.client_req)
        similar_req.request_id = '2234234234'
        orig_req_data = relations[0].data[self.harness.charm.model.unit].get(
            'broker_req')
        ceph_client.send_request_if_needed(similar_req, relations)
        self.assertEqual(
            relations[0].data[self.harness.charm.model.unit]['broker_req'],
            orig_req_data)
def get_ceph_request():
    rq = CephBrokerRq()
    if (config('libvirt-image-backend') == 'rbd'
            and assert_libvirt_rbd_imagebackend_allowed()):
        pool_name = config('rbd-pool')
        replicas = config('ceph-osd-replication-count')
        weight = config('ceph-pool-weight')
        bluestore_compression = ch_context.CephBlueStoreCompressionContext()

        if config('pool-type') == 'erasure-coded':
            # General EC plugin config
            plugin = config('ec-profile-plugin')
            technique = config('ec-profile-technique')
            device_class = config('ec-profile-device-class')
            metadata_pool_name = (config('ec-rbd-metadata-pool')
                                  or "{}-metadata".format(pool_name))
            bdm_k = config('ec-profile-k')
            bdm_m = config('ec-profile-m')
            # LRC plugin config
            bdm_l = config('ec-profile-locality')
            crush_locality = config('ec-profile-crush-locality')
            # SHEC plugin config
            bdm_c = config('ec-profile-durability-estimator')
            # CLAY plugin config
            bdm_d = config('ec-profile-helper-chunks')
            scalar_mds = config('ec-profile-scalar-mds')
            # Profile name
            profile_name = (config('ec-profile-name')
                            or "{}-profile".format(pool_name))
            # Metadata sizing is approximately 1% of overall data weight
            # but is in effect driven by the number of rbd's rather than
            # their size - so it can be very lightweight.
            metadata_weight = weight * 0.01
            # Resize data pool weight to accomodate metadata weight
            weight = weight - metadata_weight
            # Create metadata pool
            rq.add_op_create_pool(name=metadata_pool_name,
                                  replica_count=replicas,
                                  weight=metadata_weight,
                                  group='vms',
                                  app_name='rbd')

            # Create erasure profile
            rq.add_op_create_erasure_profile(name=profile_name,
                                             k=bdm_k,
                                             m=bdm_m,
                                             lrc_locality=bdm_l,
                                             lrc_crush_locality=crush_locality,
                                             shec_durability_estimator=bdm_c,
                                             clay_helper_chunks=bdm_d,
                                             clay_scalar_mds=scalar_mds,
                                             device_class=device_class,
                                             erasure_type=plugin,
                                             erasure_technique=technique)

            # Create EC data pool

            # NOTE(fnordahl): once we deprecate Python 3.5 support we can do
            # the unpacking of the BlueStore compression arguments as part of
            # the function arguments. Until then we need to build the dict
            # prior to the function call.
            kwargs = {
                'name': pool_name,
                'erasure_profile': profile_name,
                'weight': weight,
                'group': "vms",
                'app_name': "rbd",
                'allow_ec_overwrites': True
            }
            kwargs.update(bluestore_compression.get_kwargs())
            rq.add_op_create_erasure_pool(**kwargs)
        else:
            kwargs = {
                'name': pool_name,
                'replica_count': replicas,
                'weight': weight,
                'group': 'vms',
                'app_name': 'rbd',
            }
            kwargs.update(bluestore_compression.get_kwargs())
            rq.add_op_create_replicated_pool(**kwargs)

    if config('restrict-ceph-pools'):
        rq.add_op_request_access_to_group(
            name="volumes",
            object_prefix_permissions={'class-read': ['rbd_children']},
            permission='rwx')
        rq.add_op_request_access_to_group(
            name="images",
            object_prefix_permissions={'class-read': ['rbd_children']},
            permission='rwx')
        rq.add_op_request_access_to_group(
            name="vms",
            object_prefix_permissions={'class-read': ['rbd_children']},
            permission='rwx')
    return rq
示例#3
0
def get_create_rgw_pools_rq(prefix=None):
    """Pre-create RGW pools so that they have the correct settings.

    If a prefix is provided it will be prepended to each pool name.

    When RGW creates its own pools it will create them with non-optimal
    settings (LP: #1476749).

    NOTE: see http://docs.ceph.com/docs/master/radosgw/config-ref/#pools and
          http://docs.ceph.com/docs/master/radosgw/config/#create-pools for
          list of supported/required pools.
    """
    def _add_light_pool(rq, pool, pg_num, prefix=None):
        # Per the Ceph PG Calculator, all of the lightweight pools get 0.10%
        # of the data by default and only the .rgw.buckets.* get higher values
        weights = {'.rgw.buckets.index': 3.00, '.rgw.buckets.extra': 1.00}
        w = weights.get(pool, 0.10)
        if prefix:
            pool = "{prefix}{pool}".format(prefix=prefix, pool=pool)
        if pg_num > 0:
            rq.add_op_create_pool(name=pool,
                                  replica_count=replicas,
                                  pg_num=pg_num,
                                  group='objects',
                                  app_name=CEPH_POOL_APP_NAME)
        else:
            rq.add_op_create_pool(name=pool,
                                  replica_count=replicas,
                                  weight=w,
                                  group='objects',
                                  app_name=CEPH_POOL_APP_NAME)

    rq = CephBrokerRq()
    replicas = config('ceph-osd-replication-count')

    prefix = prefix or 'default'
    # Buckets likely to contain the most data and therefore
    # requiring the most PGs
    heavy = ['.rgw.buckets.data']
    bucket_weight = config('rgw-buckets-pool-weight')
    bluestore_compression = ch_context.CephBlueStoreCompressionContext()

    if config('pool-type') == 'erasure-coded':
        # General EC plugin config
        plugin = config('ec-profile-plugin')
        technique = config('ec-profile-technique')
        device_class = config('ec-profile-device-class')
        bdm_k = config('ec-profile-k')
        bdm_m = config('ec-profile-m')
        # LRC plugin config
        bdm_l = config('ec-profile-locality')
        crush_locality = config('ec-profile-crush-locality')
        # SHEC plugin config
        bdm_c = config('ec-profile-durability-estimator')
        # CLAY plugin config
        bdm_d = config('ec-profile-helper-chunks')
        scalar_mds = config('ec-profile-scalar-mds')
        # Profile name
        service = service_name()
        profile_name = (config('ec-profile-name')
                        or "{}-profile".format(service))
        rq.add_op_create_erasure_profile(name=profile_name,
                                         k=bdm_k,
                                         m=bdm_m,
                                         lrc_locality=bdm_l,
                                         lrc_crush_locality=crush_locality,
                                         shec_durability_estimator=bdm_c,
                                         clay_helper_chunks=bdm_d,
                                         clay_scalar_mds=scalar_mds,
                                         device_class=device_class,
                                         erasure_type=plugin,
                                         erasure_technique=technique)

        for pool in heavy:
            pool = "{prefix}{pool}".format(prefix=prefix, pool=pool)
            # NOTE(fnordahl): once we deprecate Python 3.5 support we can do
            # the unpacking of the BlueStore compression arguments as part of
            # the function arguments. Until then we need to build the dict
            # prior to the function call.
            kwargs = {
                'name': pool,
                'erasure_profile': profile_name,
                'weight': bucket_weight,
                'group': "objects",
                'app_name': CEPH_POOL_APP_NAME,
            }
            kwargs.update(bluestore_compression.get_kwargs())
            rq.add_op_create_erasure_pool(**kwargs)
    else:
        for pool in heavy:
            pool = "{prefix}{pool}".format(prefix=prefix, pool=pool)
            # NOTE(fnordahl): once we deprecate Python 3.5 support we can do
            # the unpacking of the BlueStore compression arguments as part of
            # the function arguments. Until then we need to build the dict
            # prior to the function call.
            kwargs = {
                'name': pool,
                'replica_count': replicas,
                'weight': bucket_weight,
                'group': 'objects',
                'app_name': CEPH_POOL_APP_NAME,
            }
            kwargs.update(bluestore_compression.get_kwargs())
            rq.add_op_create_replicated_pool(**kwargs)

    # NOTE: we want these pools to have a smaller pg_num/pgp_num than the
    # others since they are not expected to contain as much data
    light = [
        '.rgw.control',
        '.rgw.data.root',
        '.rgw.gc',
        '.rgw.log',
        '.rgw.intent-log',
        '.rgw.meta',
        '.rgw.usage',
        '.rgw.users.keys',
        '.rgw.users.email',
        '.rgw.users.swift',
        '.rgw.users.uid',
        '.rgw.buckets.extra',
        '.rgw.buckets.index',
    ]
    pg_num = config('rgw-lightweight-pool-pg-num')
    for pool in light:
        _add_light_pool(rq, pool, pg_num, prefix)

    _add_light_pool(rq, '.rgw.root', pg_num)

    if config('restrict-ceph-pools'):
        rq.add_op_request_access_to_group(name="objects",
                                          permission='rwx',
                                          key_name='radosgw.gateway')

    return rq