def ceph_changed(): if 'ceph' not in CONFIGS.complete_contexts(): log('ceph relation incomplete. Peer not ready?') return service = service_name() if not ensure_ceph_keyring(service=service, user='******', group='cinder'): log('Could not create ceph keyring: peer not ready?') return try: if is_request_complete(get_ceph_request()): log('Request complete') CONFIGS.write_all() for rid in relation_ids('storage-backend'): storage_backend(rid) for r_id in relation_ids('ceph-access'): ceph_access_joined(r_id) # Ensure that cinder-volume is restarted since only now can we # guarantee that ceph resources are ready. service_restart('cinder-volume') else: send_request_if_needed(get_ceph_request()) except ValueError as e: # The end user has most likely provided a invalid value for a # configuration option. Just log the traceback here, the end user will # be notified by assess_status() called at the end of the hook # execution. log('Caught ValueError, invalid value provided for configuration?: ' '"{}"'.format(str(e)), level=DEBUG)
def ceph_changed(rid=None, unit=None): if 'ceph' not in CONFIGS.complete_contexts(): log('ceph relation incomplete. Peer not ready?') return if not ensure_ceph_keyring( service=service_name(), user='******', group='nova'): log('Could not create ceph keyring: peer not ready?') return CONFIGS.write(ceph_config_file()) CONFIGS.write(CEPH_SECRET) CONFIGS.write(NOVA_CONF) # With some refactoring, this can move into NovaComputeCephContext # and allow easily extended to support other compute flavors. key = relation_get(attribute='key', rid=rid, unit=unit) if config('virt-type') in ['kvm', 'qemu', 'lxc'] and key: create_libvirt_secret(secret_file=CEPH_SECRET, secret_uuid=CEPH_SECRET_UUID, key=key) if is_request_complete(get_ceph_request()): log('Request complete') # Ensure that nova-compute is restarted since only now can we # guarantee that ceph resources are ready, but only if not paused. if (not is_unit_paused_set() and not is_broker_action_done('nova_compute_restart', rid, unit)): service_restart('nova-compute') mark_broker_action_done('nova_compute_restart', rid, unit) else: send_request_if_needed(get_ceph_request())
def ceph_changed(rid=None, unit=None): if 'ceph' not in CONFIGS.complete_contexts(): log('ceph relation incomplete. Peer not ready?') return if not ensure_ceph_keyring(service=service_name(), user='******', group='nova'): log('Could not create ceph keyring: peer not ready?') return CONFIGS.write(ceph_config_file()) CONFIGS.write(CEPH_SECRET) CONFIGS.write(NOVA_CONF) # With some refactoring, this can move into NovaComputeCephContext # and allow easily extended to support other compute flavors. key = relation_get(attribute='key', rid=rid, unit=unit) if config('virt-type') in ['kvm', 'qemu', 'lxc'] and key: create_libvirt_secret(secret_file=CEPH_SECRET, secret_uuid=CEPH_SECRET_UUID, key=key) if is_request_complete(get_ceph_request()): log('Request complete') # Ensure that nova-compute is restarted since only now can we # guarantee that ceph resources are ready, but only if not paused. if (not is_unit_paused_set() and not is_broker_action_done('nova_compute_restart', rid, unit)): service_restart('nova-compute') mark_broker_action_done('nova_compute_restart', rid, unit) else: send_request_if_needed(get_ceph_request())
def ceph_changed(): if 'ceph' not in CONFIGS.complete_contexts(): log('ceph relation incomplete. Peer not ready?') return service = service_name() if not ensure_ceph_keyring(service=service, user='******', group='cinder'): log('Could not create ceph keyring: peer not ready?') return if is_request_complete(get_ceph_request()): log('Request complete') CONFIGS.write_all() set_ceph_env_variables(service=service) for rid in relation_ids('backup-backend'): backup_backend_joined(rid) # Ensure that cinder services are restarted since only now can we # guarantee that ceph resources are ready. Note that the order of # restart is important here. for svc in ['cinder-volume', 'cinder-backup']: service_restart(svc) else: send_request_if_needed(get_ceph_request())
def ceph_changed(): if 'ceph' not in CONFIGS.complete_contexts(): log('ceph relation incomplete. Peer not ready?') return if not ensure_ceph_keyring(service=service_name(), user='******', group='nova'): log('Could not create ceph keyring: peer not ready?') return CONFIGS.write(ceph_config_file()) CONFIGS.write(CEPH_SECRET) CONFIGS.write(NOVA_CONF) # With some refactoring, this can move into NovaComputeCephContext # and allow easily extended to support other compute flavors. if config('virt-type') in ['kvm', 'qemu', 'lxc'] and relation_get('key'): create_libvirt_secret(secret_file=CEPH_SECRET, secret_uuid=CEPH_SECRET_UUID, key=relation_get('key')) if (config('libvirt-image-backend') == 'rbd' and assert_libvirt_imagebackend_allowed()): if is_request_complete(get_ceph_request()): log('Request complete') # Ensure that nova-compute is restarted since only now can we # guarantee that ceph resources are ready. service_restart('nova-compute') else: send_request_if_needed(get_ceph_request())
def mon_relation(rid=None, unit=None): rq = ceph.get_create_rgw_pools_rq(prefix=config('pool-prefix')) if is_request_complete(rq, relation='mon'): log('Broker request complete', level=DEBUG) CONFIGS.write_all() key = relation_get(attribute='radosgw_key', rid=rid, unit=unit) if key: ceph.import_radosgw_key(key) if not is_unit_paused_set(): restart() # TODO figure out a better way todo this else: send_request_if_needed(rq, relation='mon')
def _mon_relation(): key_name = 'rgw.{}'.format(socket.gethostname()) if request_per_unit_key(): relation_set(relation_id=rid, key_name=key_name) # NOTE: prefer zone name if in use over pool-prefix. rq = ceph.get_create_rgw_pools_rq( prefix=config('zone') or config('pool-prefix')) if is_request_complete(rq, relation='mon'): log('Broker request complete', level=DEBUG) CONFIGS.write_all() # New style per unit keys key = relation_get(attribute='{}_key'.format(key_name), rid=rid, unit=unit) if not key: # Fallback to old style global key key = relation_get(attribute='radosgw_key', rid=rid, unit=unit) key_name = None if key: new_keyring = ceph.import_radosgw_key(key, name=key_name) # NOTE(jamespage): # Deal with switch from radosgw init script to # systemd named units for radosgw instances by # stopping and disabling the radosgw unit if systemd_based_radosgw(): service_stop('radosgw') service('disable', 'radosgw') # Update the nrpe config. If we wait for the below # to be called elsewhere, there exists a period # where nagios will report the radosgw service as # down, and also not be monitoring the per # host services. update_nrpe_config(checks_to_remove=['radosgw']) service('enable', service_name()) # NOTE(jamespage): # Multi-site deployments need to defer restart as the # zone is not created until the master relation is # joined; restarting here will cause a restart burst # in systemd and stop the process restarting once # zone configuration is complete. if (not is_unit_paused_set() and new_keyring and not multisite_deployment()): service_restart(service_name()) process_multisite_relations() else: send_request_if_needed(rq, relation='mon')
def _mon_relation(): key_name = 'rgw.{}'.format(socket.gethostname()) if request_per_unit_key(): relation_set(relation_id=rid, key_name=key_name) # NOTE: prefer zone name if in use over pool-prefix. rq = ceph.get_create_rgw_pools_rq( prefix=config('zone') or config('pool-prefix')) if is_request_complete(rq, relation='mon'): log('Broker request complete', level=DEBUG) CONFIGS.write_all() # New style per unit keys key = relation_get(attribute='{}_key'.format(key_name), rid=rid, unit=unit) if not key: # Fallback to old style global key key = relation_get(attribute='radosgw_key', rid=rid, unit=unit) key_name = None if key: new_keyring = ceph.import_radosgw_key(key, name=key_name) # NOTE(jamespage): # Deal with switch from radosgw init script to # systemd named units for radosgw instances by # stopping and disabling the radosgw unit if systemd_based_radosgw(): service_stop('radosgw') service('disable', 'radosgw') # Update the nrpe config. If we wait for the below # to be called elsewhere, there exists a period # where nagios will report the radosgw service as # down, and also not be monitoring the per # host services. update_nrpe_config(checks_to_remove=['radosgw']) # NOTE(jamespage): # Multi-site deployments need to defer restart as the # zone is not created until the master relation is # joined; restarting here will cause a restart burst # in systemd and stop the process restarting once # zone configuration is complete. if (not is_unit_paused_set() and new_keyring and not multisite_deployment()): log('Resume service "{}" as we now have keys for it.'. format(service_name()), level=DEBUG) service_resume(service_name()) process_multisite_relations() else: send_request_if_needed(rq, relation='mon')
def changed(self): data = self.initial_ceph_response() if all(data.values()): reactive.set_flag(self.expand_name('{endpoint_name}.available')) rq = self.get_current_request() if rq: log("changed broker_req: {}".format(rq.ops)) if rq and is_request_complete(rq, relation=self.relation_name): log("Setting ceph-client.pools.available") reactive.set_flag( self.expand_name('{endpoint_name}.pools.available')) else: log("incomplete request. broker_req not found")
def ceph_changed(): if 'ceph' not in CONFIGS.complete_contexts(): juju_log('ceph relation incomplete. Peer not ready?') return service = service_name() if not ensure_ceph_keyring(service=service, user='******', group='glance'): juju_log('Could not create ceph keyring: peer not ready?') return if is_request_complete(get_ceph_request()): juju_log('Request complete') CONFIGS.write(GLANCE_API_CONF) CONFIGS.write(ceph_config_file()) # Ensure that glance-api is restarted since only now can we # guarantee that ceph resources are ready. service_restart('glance-api') else: send_request_if_needed(get_ceph_request())
def ceph_changed(relation_id=None): if 'ceph' not in CONFIGS.complete_contexts(): juju_log('ceph relation incomplete. Peer not ready?') return service = service_name() if not ensure_ceph_keyring(service=service, user='******', group='cinder'): juju_log('Could not create ceph keyring: peer not ready?') return if is_request_complete(get_ceph_request()): log('Request complete') CONFIGS.write(CINDER_CONF) CONFIGS.write(ceph_config_file()) # Ensure that cinder-volume is restarted since only now can we # guarantee that ceph resources are ready. if not is_unit_paused_set(): service_restart('cinder-volume') else: send_request_if_needed(get_ceph_request())
def changed(self): data = { 'key': self.key(), 'auth': self.auth(), 'mon_hosts': self.mon_hosts() } if all(data.values()): self.set_state('{relation_name}.available') all_requests = self.get_local(key='broker_reqs') if all_requests: incomplete = [] for name, json_rq in all_requests.items(): req = json.loads(json_rq) if not is_request_complete(req['ops']): incomplete.append(name) if len(incomplete) == 0: self.set_state('{relation_name}.pools.available') else: log("incomplete requests {}.".format(incomplete.join(', ')))
def ceph_changed(relation_id=None): if 'ceph' not in CONFIGS.complete_contexts(): juju_log('ceph relation incomplete. Peer not ready?') return service = service_name() if not ensure_ceph_keyring(service=service, user='******', group='cinder'): juju_log('Could not create ceph keyring: peer not ready?') return if is_request_complete(get_ceph_request()): log('Request complete') set_ceph_env_variables(service=service) CONFIGS.write(CINDER_CONF) CONFIGS.write(ceph_config_file()) # Ensure that cinder-volume is restarted since only now can we # guarantee that ceph resources are ready. service_restart('cinder-volume') else: send_request_if_needed(get_ceph_request())
def changed(self): data = { 'mds_key': self.mds_key(), 'fsid': self.fsid(), 'auth': self.auth(), 'mon_hosts': self.mon_hosts() } if all(data.values()): self.set_state('{relation_name}.available') json_rq = self.get_local(key='broker_req') if json_rq: rq = CephBrokerRq() j = json.loads(json_rq) rq.ops = j['ops'] log("changed broker_req: {}".format(rq.ops)) if rq and is_request_complete(rq, relation=self.relation_name): log("Setting ceph-mds.pools.available") self.set_state('{relation_name}.pools.available') else: log("incomplete request. broker_req not found")
def ceph_changed(): if 'ceph' not in CONFIGS.complete_contexts(): log('ceph relation incomplete. Peer not ready?') return service = service_name() if not ensure_ceph_keyring(service=service, user='******', group='cinder'): log('Could not create ceph keyring: peer not ready?') return if is_request_complete(get_ceph_request()): log('Request complete') CONFIGS.write_all() for rid in relation_ids('storage-backend'): storage_backend(rid) for r_id in relation_ids('ceph-access'): ceph_access_joined(r_id) # Ensure that cinder-volume is restarted since only now can we # guarantee that ceph resources are ready. service_restart('cinder-volume') else: send_request_if_needed(get_ceph_request())
def check_optional_config_and_relations(configs): """Validate optional configuration and relations when present. This function is called from assess_status/set_os_workload_status as the charm_func and needs to return either None, None if there is no problem or the status, message if there is a problem. :param configs: an OSConfigRender() instance. :return 2-tuple: (string, string) = (status, message) """ # Check that if we have a relation_id for high availability that we can # get the hacluster config. If we can't then we are blocked. if relation_ids('ha'): try: get_hacluster_config() except Exception: return ('blocked', 'hacluster missing configuration: ' 'vip, vip_iface, vip_cidr') if relation_ids('ceph'): # Check that provided Ceph BlueStoe configuration is valid. try: bluestore_compression = context.CephBlueStoreCompressionContext() bluestore_compression.validate() except AttributeError: # The charm does late installation of the `ceph-common` package and # the class initializer above will throw an exception until it is. pass except ValueError as e: return ('blocked', 'Invalid configuration: {}'.format(str(e))) # ceph pkgs are only installed after the ceph relation is etablished # so gate checking broker requests on ceph relation being completed. if ('ceph' in configs.complete_contexts() and not is_request_complete(get_ceph_request())): return ('waiting', 'Ceph broker request incomplete') # return 'unknown' as the lowest priority to not clobber an existing # status. return "unknown", ""
def _mon_relation(): key_name = 'rgw.{}'.format(socket.gethostname()) legacy = True if request_per_unit_key(): legacy = False relation_set(relation_id=rid, key_name=key_name) try: rq = ceph.get_create_rgw_pools_rq( prefix=config('zone') or config('pool-prefix')) except ValueError as e: # The end user has most likely provided a invalid value for # a configuration option. Just log the traceback here, the # end user will be notified by assess_status() called at # the end of the hook execution. log('Caught ValueError, invalid value provided for ' 'configuration?: "{}"'.format(str(e)), level=DEBUG) return if is_request_complete(rq, relation='mon'): log('Broker request complete', level=DEBUG) CONFIGS.write_all() # New style per unit keys key = relation_get(attribute='{}_key'.format(key_name), rid=rid, unit=unit) if not key: # Fallback to old style global key key = relation_get(attribute='radosgw_key', rid=rid, unit=unit) key_name = None if key: new_keyring = ceph.import_radosgw_key(key, name=key_name) # NOTE(jamespage): # Deal with switch from radosgw init script to # systemd named units for radosgw instances by # stopping and disabling the radosgw unit if systemd_based_radosgw(): service_stop('radosgw') service('disable', 'radosgw') # Update the nrpe config. If we wait for the below # to be called elsewhere, there exists a period # where nagios will report the radosgw service as # down, and also not be monitoring the per # host services. update_nrpe_config(checks_to_remove=['radosgw']) # NOTE(jamespage): # Multi-site deployments need to defer restart as the # zone is not created until the master relation is # joined; restarting here will cause a restart burst # in systemd and stop the process restarting once # zone configuration is complete. if (not is_unit_paused_set() and new_keyring and not multisite_deployment()): log('Resume service "{}" as we now have keys for it.'. format(service_name()), level=DEBUG) service_resume(service_name()) if multisite_deployment(): process_multisite_relations() elif (ready_for_service(legacy=legacy) and is_leader() and 'mon' in CONFIGS.complete_contexts()): # In a non multi-site deployment create the # zone using the default zonegroup and restart the service internal_url = '{}:{}'.format( canonical_url(CONFIGS, INTERNAL), listen_port(), ) endpoints = [internal_url] zonegroup = 'default' zone = config('zone') existing_zones = multisite.list_zones() log('Existing zones {}'.format(existing_zones), level=DEBUG) if zone not in existing_zones: log("Zone '{}' doesn't exist, creating".format(zone)) try: multisite.create_zone(zone, endpoints=endpoints, default=True, master=True, zonegroup=zonegroup) except subprocess.CalledProcessError as e: if 'File exists' in e.stderr.decode('UTF-8'): # NOTE(lourot): may have been created in the # background by the Rados Gateway daemon, see # lp:1856106 log("Zone '{}' existed already after all".format( zone)) else: raise existing_zones = multisite.list_zones(retry_on_empty=True) log('Existing zones {}'.format(existing_zones), level=DEBUG) if zone not in existing_zones: raise RuntimeError( "Could not create zone '{}'".format(zone)) service_restart(service_name()) else: send_request_if_needed(rq, relation='mon')