def test_not_ready_for_service(self): _relation_data = { 'mon:1': { 'ceph-mon/0': {}, 'ceph-mon/1': {}, 'ceph-mon/2': {}, } } self._setup_relation_data(_relation_data) self.assertFalse(utils.ready_for_service())
def test_not_ready_for_service(self): _relation_data = { 'mon:1': { 'ceph-mon/0': { }, 'ceph-mon/1': { }, 'ceph-mon/2': { }, } } self._setup_relation_data(_relation_data) self.assertFalse(utils.ready_for_service())
def test_ready_for_service_legacy_skip(self, mock_exists): mock_exists.return_value = True _relation_data = { 'mon:1': { 'ceph-mon/0': { 'radosgw_key': 'testkey', }, 'ceph-mon/1': { 'radosgw_key': 'testkey', }, 'ceph-mon/2': { 'radosgw_key': 'testkey', }, } } self._setup_relation_data(_relation_data) self.assertFalse(utils.ready_for_service(legacy=False))
def test_ready_for_service_legacy(self, mock_exists): mock_exists.return_value = True _relation_data = { 'mon:1': { 'ceph-mon/0': { 'radosgw_key': 'testkey', }, 'ceph-mon/1': { 'radosgw_key': 'testkey', }, 'ceph-mon/2': { 'radosgw_key': 'testkey', }, } } self._setup_relation_data(_relation_data) self.assertTrue(utils.ready_for_service()) mock_exists.assert_called_with('/etc/ceph/keyring.rados.gateway')
def test_ready_for_service_no_keyring(self, mock_exists): mock_exists.return_value = False _relation_data = { 'mon:1': { 'ceph-mon/0': { 'rgw.testhost_key': 'testkey', }, 'ceph-mon/1': { 'rgw.testhost_key': 'testkey', }, 'ceph-mon/2': { 'rgw.testhost_key': 'testkey', }, } } self._setup_relation_data(_relation_data) self.assertFalse(utils.ready_for_service()) mock_exists.assert_called_with( '/etc/ceph/ceph.client.rgw.testhost.keyring')
def test_ready_for_service_no_keyring(self, mock_exists): mock_exists.return_value = False _relation_data = { 'mon:1': { 'ceph-mon/0': { 'rgw.testhost_key': 'testkey', }, 'ceph-mon/1': { 'rgw.testhost_key': 'testkey', }, 'ceph-mon/2': { 'rgw.testhost_key': 'testkey', }, } } self._setup_relation_data(_relation_data) self.assertFalse(utils.ready_for_service()) mock_exists.assert_called_with( '/etc/ceph/ceph.client.rgw.testhost.keyring' )
def test_ready_for_service_legacy(self, mock_exists): mock_exists.return_value = True _relation_data = { 'mon:1': { 'ceph-mon/0': { 'radosgw_key': 'testkey', }, 'ceph-mon/1': { 'radosgw_key': 'testkey', }, 'ceph-mon/2': { 'radosgw_key': 'testkey', }, } } self._setup_relation_data(_relation_data) self.assertTrue(utils.ready_for_service()) mock_exists.assert_called_with( '/etc/ceph/keyring.rados.gateway' )
def slave_relation_changed(relation_id=None, unit=None): if not is_leader(): return if not ready_for_service(legacy=False): log('unit not ready, deferring multisite configuration') return master_data = relation_get(rid=relation_id, unit=unit) if not all((master_data.get('realm'), master_data.get('zonegroup'), master_data.get('access_key'), master_data.get('secret'), master_data.get('url'))): log("Defer processing until master RGW has provided required data") return internal_url = '{}:{}'.format( canonical_url(CONFIGS, INTERNAL), listen_port(), ) endpoints = [internal_url] realm = config('realm') zonegroup = config('zonegroup') zone = config('zone') if (realm, zonegroup) != (master_data['realm'], master_data['zonegroup']): log("Mismatched configuration so stop multi-site configuration now") return if not leader_get('restart_nonce'): # NOTE(jamespage): # This is an ugly kludge to force creation of the required data # items in the .rgw.root pool prior to the radosgw process being # started; radosgw-admin does not currently have a way of doing # this operation but a period update will force it to be created. multisite.update_period(fatal=False) mutation = False if realm not in multisite.list_realms(): multisite.pull_realm(url=master_data['url'], access_key=master_data['access_key'], secret=master_data['secret']) multisite.pull_period(url=master_data['url'], access_key=master_data['access_key'], secret=master_data['secret']) multisite.set_default_realm(realm) mutation = True if zone not in multisite.list_zones(): multisite.create_zone(zone, endpoints=endpoints, default=False, master=False, zonegroup=zonegroup, access_key=master_data['access_key'], secret=master_data['secret']) mutation = True if mutation: multisite.update_period() service_restart(service_name()) leader_set(restart_nonce=str(uuid.uuid4()))
def master_relation_joined(relation_id=None): if not ready_for_service(legacy=False): log('unit not ready, deferring multisite configuration') return internal_url = '{}:{}'.format( canonical_url(CONFIGS, INTERNAL), listen_port(), ) endpoints = [internal_url] realm = config('realm') zonegroup = config('zonegroup') zone = config('zone') access_key = leader_get('access_key') secret = leader_get('secret') if not all((realm, zonegroup, zone)): return relation_set(relation_id=relation_id, realm=realm, zonegroup=zonegroup, url=endpoints[0], access_key=access_key, secret=secret) if not is_leader(): return if not leader_get('restart_nonce'): # NOTE(jamespage): # This is an ugly kludge to force creation of the required data # items in the .rgw.root pool prior to the radosgw process being # started; radosgw-admin does not currently have a way of doing # this operation but a period update will force it to be created. multisite.update_period(fatal=False) mutation = False if realm not in multisite.list_realms(): multisite.create_realm(realm, default=True) mutation = True if zonegroup not in multisite.list_zonegroups(): multisite.create_zonegroup(zonegroup, endpoints=endpoints, default=True, master=True, realm=realm) mutation = True if zone not in multisite.list_zones(): multisite.create_zone(zone, endpoints=endpoints, default=True, master=True, zonegroup=zonegroup) mutation = True if MULTISITE_SYSTEM_USER not in multisite.list_users(): access_key, secret = multisite.create_system_user( MULTISITE_SYSTEM_USER) multisite.modify_zone(zone, access_key=access_key, secret=secret) leader_set(access_key=access_key, secret=secret) mutation = True if mutation: multisite.update_period() service_restart(service_name()) leader_set(restart_nonce=str(uuid.uuid4())) relation_set(relation_id=relation_id, access_key=access_key, secret=secret)
def slave_relation_changed(relation_id=None, unit=None): if not is_leader(): return if not ready_for_service(legacy=False): log('unit not ready, deferring multisite configuration') return master_data = relation_get(rid=relation_id, unit=unit) if not all((master_data.get('realm'), master_data.get('zonegroup'), master_data.get('access_key'), master_data.get('secret'), master_data.get('url'))): log("Defer processing until master RGW has provided required data") return internal_url = '{}:{}'.format( canonical_url(CONFIGS, INTERNAL), config('port') ) endpoints = [internal_url] realm = config('realm') zonegroup = config('zonegroup') zone = config('zone') if (realm, zonegroup) != (master_data['realm'], master_data['zonegroup']): log("Mismatched configuration so stop multi-site configuration now") return if not leader_get('restart_nonce'): # NOTE(jamespage): # This is an ugly kludge to force creation of the required data # items in the .rgw.root pool prior to the radosgw process being # started; radosgw-admin does not currently have a way of doing # this operation but a period update will force it to be created. multisite.update_period(fatal=False) mutation = False if realm not in multisite.list_realms(): multisite.pull_realm(url=master_data['url'], access_key=master_data['access_key'], secret=master_data['secret']) multisite.pull_period(url=master_data['url'], access_key=master_data['access_key'], secret=master_data['secret']) multisite.set_default_realm(realm) mutation = True if zone not in multisite.list_zones(): multisite.create_zone(zone, endpoints=endpoints, default=False, master=False, zonegroup=zonegroup, access_key=master_data['access_key'], secret=master_data['secret']) mutation = True if mutation: multisite.update_period() service_restart(service_name()) leader_set(restart_nonce=str(uuid.uuid4()))
def master_relation_joined(relation_id=None): if not ready_for_service(legacy=False): log('unit not ready, deferring multisite configuration') return internal_url = '{}:{}'.format( canonical_url(CONFIGS, INTERNAL), config('port') ) endpoints = [internal_url] realm = config('realm') zonegroup = config('zonegroup') zone = config('zone') access_key = leader_get('access_key') secret = leader_get('secret') if not all((realm, zonegroup, zone)): return relation_set(relation_id=relation_id, realm=realm, zonegroup=zonegroup, url=endpoints[0], access_key=access_key, secret=secret) if not is_leader(): return if not leader_get('restart_nonce'): # NOTE(jamespage): # This is an ugly kludge to force creation of the required data # items in the .rgw.root pool prior to the radosgw process being # started; radosgw-admin does not currently have a way of doing # this operation but a period update will force it to be created. multisite.update_period(fatal=False) mutation = False if realm not in multisite.list_realms(): multisite.create_realm(realm, default=True) mutation = True if zonegroup not in multisite.list_zonegroups(): multisite.create_zonegroup(zonegroup, endpoints=endpoints, default=True, master=True, realm=realm) mutation = True if zone not in multisite.list_zones(): multisite.create_zone(zone, endpoints=endpoints, default=True, master=True, zonegroup=zonegroup) mutation = True if MULTISITE_SYSTEM_USER not in multisite.list_users(): access_key, secret = multisite.create_system_user( MULTISITE_SYSTEM_USER ) multisite.modify_zone(zone, access_key=access_key, secret=secret) leader_set(access_key=access_key, secret=secret) mutation = True if mutation: multisite.update_period() service_restart(service_name()) leader_set(restart_nonce=str(uuid.uuid4())) relation_set(relation_id=relation_id, access_key=access_key, secret=secret)
def _mon_relation(): key_name = 'rgw.{}'.format(socket.gethostname()) legacy = True if request_per_unit_key(): legacy = False relation_set(relation_id=rid, key_name=key_name) try: rq = ceph.get_create_rgw_pools_rq( prefix=config('zone') or config('pool-prefix')) except ValueError as e: # The end user has most likely provided a invalid value for # a configuration option. Just log the traceback here, the # end user will be notified by assess_status() called at # the end of the hook execution. log('Caught ValueError, invalid value provided for ' 'configuration?: "{}"'.format(str(e)), level=DEBUG) return if is_request_complete(rq, relation='mon'): log('Broker request complete', level=DEBUG) CONFIGS.write_all() # New style per unit keys key = relation_get(attribute='{}_key'.format(key_name), rid=rid, unit=unit) if not key: # Fallback to old style global key key = relation_get(attribute='radosgw_key', rid=rid, unit=unit) key_name = None if key: new_keyring = ceph.import_radosgw_key(key, name=key_name) # NOTE(jamespage): # Deal with switch from radosgw init script to # systemd named units for radosgw instances by # stopping and disabling the radosgw unit if systemd_based_radosgw(): service_stop('radosgw') service('disable', 'radosgw') # Update the nrpe config. If we wait for the below # to be called elsewhere, there exists a period # where nagios will report the radosgw service as # down, and also not be monitoring the per # host services. update_nrpe_config(checks_to_remove=['radosgw']) # NOTE(jamespage): # Multi-site deployments need to defer restart as the # zone is not created until the master relation is # joined; restarting here will cause a restart burst # in systemd and stop the process restarting once # zone configuration is complete. if (not is_unit_paused_set() and new_keyring and not multisite_deployment()): log('Resume service "{}" as we now have keys for it.'. format(service_name()), level=DEBUG) service_resume(service_name()) if multisite_deployment(): process_multisite_relations() elif (ready_for_service(legacy=legacy) and is_leader() and 'mon' in CONFIGS.complete_contexts()): # In a non multi-site deployment create the # zone using the default zonegroup and restart the service internal_url = '{}:{}'.format( canonical_url(CONFIGS, INTERNAL), listen_port(), ) endpoints = [internal_url] zonegroup = 'default' zone = config('zone') existing_zones = multisite.list_zones() log('Existing zones {}'.format(existing_zones), level=DEBUG) if zone not in existing_zones: log("Zone '{}' doesn't exist, creating".format(zone)) try: multisite.create_zone(zone, endpoints=endpoints, default=True, master=True, zonegroup=zonegroup) except subprocess.CalledProcessError as e: if 'File exists' in e.stderr.decode('UTF-8'): # NOTE(lourot): may have been created in the # background by the Rados Gateway daemon, see # lp:1856106 log("Zone '{}' existed already after all".format( zone)) else: raise existing_zones = multisite.list_zones(retry_on_empty=True) log('Existing zones {}'.format(existing_zones), level=DEBUG) if zone not in existing_zones: raise RuntimeError( "Could not create zone '{}'".format(zone)) service_restart(service_name()) else: send_request_if_needed(rq, relation='mon')