def update(self, pool_id, attributes): osd_map = rest_plugin().get_sync_object(OsdMap) pool = self._resolve_pool(pool_id) pool_name = pool['pool_name'] if 'pg_num' in attributes: # Special case when setting pg_num: have to do some extra work # to wait for PG creation between setting these two fields. final_pg_count = attributes['pg_num'] if 'pgp_num' in attributes: pgp_num = attributes['pgp_num'] del attributes['pgp_num'] else: pgp_num = attributes['pg_num'] del attributes['pg_num'] pre_create_commands = self._pool_attribute_commands(pool_name, attributes) # This setting is new in Ceph Firefly, where it defaults to 32. # For older revisions, we simply pretend that the setting exists # with a default setting. mon_osd_max_split_count = int(rest_plugin().get_sync_object(Config).data.get( 'mon_osd_max_split_count', LEGACY_MON_OSD_MAX_SPLIT_COUNT)) initial_pg_count = pool['pg_num'] n_osds = min(initial_pg_count, len(osd_map.osds_by_id)) # The rules about creating PGs: # where N_osds = min(old_pg_count, osd_count) # the number of new PGs divided by N_osds may not be greater # than mon_osd_max_split_count block_size = mon_osd_max_split_count * n_osds return PgCreatingRequest( "Growing pool '{name}' to {size} PGs".format( name=pool_name, size=final_pg_count), pre_create_commands, pool_id, pool_name, pgp_num, initial_pg_count, final_pg_count, block_size) else: commands = self._pool_attribute_commands(pool_name, attributes) if not commands: raise NotImplementedError(attributes) # TODO: provide some machine-readable indication of which objects # are affected by a particular request. # Perhaps subclass Request for each type of object, and have that # subclass provide both the patches->commands mapping and the # human readable and machine readable descriptions of it? # Objects may be decorated with 'id' from use in a bulk PATCH, but # we don't want anything # from this point onwards to see that. if 'id' in attributes: del attributes['id'] return OsdMapModifyingRequest( "Modifying pool '{name}' ({attrs})".format( name=pool_name, attrs=", ".join( "%s=%s" % (k, v) for k, v in attributes.items()) ), commands)
def update(self, pool_id, attributes): osd_map = rest_plugin().get_sync_object(OsdMap) pool = self._resolve_pool(pool_id) pool_name = pool['pool_name'] if 'pg_num' in attributes: # Special case when setting pg_num: have to do some extra work # to wait for PG creation between setting these two fields. final_pg_count = attributes['pg_num'] if 'pgp_num' in attributes: pgp_num = attributes['pgp_num'] del attributes['pgp_num'] else: pgp_num = attributes['pg_num'] del attributes['pg_num'] pre_create_commands = self._pool_attribute_commands( pool_name, attributes) # This setting is new in Ceph Firefly, where it defaults to 32. # For older revisions, we simply pretend that the setting exists # with a default setting. mon_osd_max_split_count = int( rest_plugin().get_sync_object(Config).data.get( 'mon_osd_max_split_count', LEGACY_MON_OSD_MAX_SPLIT_COUNT)) initial_pg_count = pool['pg_num'] n_osds = min(initial_pg_count, len(osd_map.osds_by_id)) # The rules about creating PGs: # where N_osds = min(old_pg_count, osd_count) # the number of new PGs divided by N_osds may not be greater # than mon_osd_max_split_count block_size = mon_osd_max_split_count * n_osds return PgCreatingRequest( "Growing pool '{name}' to {size} PGs".format( name=pool_name, size=final_pg_count), pre_create_commands, pool_id, pool_name, pgp_num, initial_pg_count, final_pg_count, block_size) else: commands = self._pool_attribute_commands(pool_name, attributes) if not commands: raise NotImplementedError(attributes) # TODO: provide some machine-readable indication of which objects # are affected by a particular request. # Perhaps subclass Request for each type of object, and have that # subclass provide both the patches->commands mapping and the # human readable and machine readable descriptions of it? # Objects may be decorated with 'id' from use in a bulk PATCH, but # we don't want anything # from this point onwards to see that. if 'id' in attributes: del attributes['id'] return OsdMapModifyingRequest( "Modifying pool '{name}' ({attrs})".format( name=pool_name, attrs=", ".join("%s=%s" % (k, v) for k, v in attributes.items())), commands)
def run(self): cmd = self._commands[0] self._commands = self._commands[1:] self.result = CommandResult(self._tag) log.debug("cmd={0}".format(cmd)) # Commands come in as 2-tuple of args and prefix, convert them # to the form that send_command uses command = cmd[1] command['prefix'] = cmd[0] rest_plugin().send_command(self.result, json.dumps(command), self._tag)
def _request(self, method, obj_type, *args, **kwargs): """ Create and submit UserRequest for an apply, create, update or delete. """ request_factory = self.get_request_factory(obj_type) request = getattr(request_factory, method)(*args, **kwargs) if request: # sleeps permitted during terminal phase of submitting, because we're # doing I/O to the salt master to kick off rest_plugin().requests.submit(request) return {'request_id': request.id} else: return None
def on_completion(self, tag): """ Callback for when a salt/job/<jid>/ret event is received, in which we find the UserRequest that created the job, and inform it of completion so that it can progress. """ with self._lock: log.info("RequestCollection.on_completion: {0}".format(tag)) try: request = self.get_by_id(tag) except KeyError: log.warning("on_completion: unknown tag {0}" % tag) return request.rados_commands.advance() if request.rados_commands.is_complete(): if request.rados_commands.r == 0: try: request.complete_jid() except Exception as e: log.exception("Request %s threw exception in on_map", request.id) request.set_error("Internal error %s" % e) request.complete() # The request may be waiting for an epoch that we already have, if so # give it to the request right away for sync_type, want_version in request.awaiting_versions.items(): sync_object = rest_plugin().get_sync_object(sync_type) if want_version and sync_type.cmp(sync_object.version, want_version) >= 0: log.info("Awaited %s %s is immediately available" % (sync_type, want_version)) request.on_map(sync_type, sync_object) else: request.set_error(request.rados_commands.outs) request.complete()
def list_requests(self, filter_args): state = filter_args.get('state', None) fsid = filter_args.get('fsid', None) requests = rest_plugin().requests.get_all() return sorted([self._dump_request(r) for r in requests if (state is None or r.state == state) and (fsid is None or r.fsid == fsid)], lambda a, b: cmp(b['requested_at'], a['requested_at']))
def _request(self, method, obj_type, *args, **kwargs): """ Create and submit UserRequest for an apply, create, update or delete. """ request_factory = self.get_request_factory(obj_type) request = getattr(request_factory, method)(*args, **kwargs) if request: # sleeps permitted during terminal phase of submitting, because we're # doing I/O to the salt master to kick off rest_plugin().requests.submit(request) return { 'request_id': request.id } else: return None
def get_request(self, request_id): """ Get a JSON representation of a UserRequest """ try: return self._dump_request(rest_plugin().requests.get_by_id(request_id)) except KeyError: raise NotFound('request', request_id)
def update(self, osd_id, attributes): commands = [] osd_map = rest_plugin().get_sync_object(OsdMap) # in/out/down take a vector of strings called 'ids', while 'reweight' takes a single integer if 'in' in attributes and bool(attributes['in']) != bool( osd_map.osds_by_id[osd_id]['in']): if attributes['in']: commands.append(('osd in', { 'ids': [attributes['id'].__str__()] })) else: commands.append(('osd out', { 'ids': [attributes['id'].__str__()] })) if 'up' in attributes and bool(attributes['up']) != bool( osd_map.osds_by_id[osd_id]['up']): if not attributes['up']: commands.append(('osd down', { 'ids': [attributes['id'].__str__()] })) else: raise RuntimeError( "It is not valid to set a down OSD to be up") if 'reweight' in attributes: if attributes['reweight'] != float( osd_map.osd_tree_node_by_id[osd_id]['reweight']): commands.append(('osd reweight', { 'id': osd_id, 'weight': attributes['reweight'] })) if not commands: # Returning None indicates no-op return None msg_attrs = attributes.copy() del msg_attrs['id'] if msg_attrs.keys() == ['in']: message = "Marking osd.{id} {state}".format( id=osd_id, state=("in" if msg_attrs['in'] else "out")) elif msg_attrs.keys() == ['up']: message = "Marking osd.{id} down".format(id=osd_id) elif msg_attrs.keys() == ['reweight']: message = "Re-weighting osd.{id} to {pct}%".format( id=osd_id, pct="{0:.1f}".format(msg_attrs['reweight'] * 100.0)) else: message = "Modifying osd.{id} ({attrs})".format( id=osd_id, attrs=", ".join("%s=%s" % (k, v) for k, v in msg_attrs.items())) return OsdMapModifyingRequest(message, commands)
def list_requests(self, filter_args): state = filter_args.get('state', None) fsid = filter_args.get('fsid', None) requests = rest_plugin().requests.get_all() return sorted([ self._dump_request(r) for r in requests if (state is None or r.state == state) and ( fsid is None or r.fsid == fsid) ], lambda a, b: cmp(b['requested_at'], a['requested_at']))
def get_request(self, request_id): """ Get a JSON representation of a UserRequest """ try: return self._dump_request( rest_plugin().requests.get_by_id(request_id)) except KeyError: raise NotFound('request', request_id)
def get_valid_commands(self, osds): """ For each OSD in osds list valid commands """ ret_val = {} osd_map = rest_plugin().get_sync_object(OsdMap) for osd_id in osds: if osd_map.osds_by_id[osd_id]['up']: ret_val[osd_id] = {'valid_commands': OSD_IMPLEMENTED_COMMANDS} else: ret_val[osd_id] = {'valid_commands': []} return ret_val
def update_config(self, _, attributes): osd_map = rest_plugin().get_sync_object(OsdMap) commands = self._commands_to_set_flags(osd_map, attributes) if commands: return OsdMapModifyingRequest( "Modifying OSD config ({attrs})".format( attrs=", ".join("%s=%s" % (k, v) for k, v in attributes.items()) ), commands) else: return None
def update_config(self, _, attributes): osd_map = rest_plugin().get_sync_object(OsdMap) commands = self._commands_to_set_flags(osd_map, attributes) if commands: return OsdMapModifyingRequest( "Modifying OSD config ({attrs})".format(attrs=", ".join( "%s=%s" % (k, v) for k, v in attributes.items())), commands) else: return None
def update(self, osd_id, attributes): commands = [] osd_map = rest_plugin().get_sync_object(OsdMap) # in/out/down take a vector of strings called 'ids', while 'reweight' takes a single integer if 'in' in attributes and bool(attributes['in']) != bool(osd_map.osds_by_id[osd_id]['in']): if attributes['in']: commands.append(('osd in', {'ids': [attributes['id'].__str__()]})) else: commands.append(('osd out', {'ids': [attributes['id'].__str__()]})) if 'up' in attributes and bool(attributes['up']) != bool(osd_map.osds_by_id[osd_id]['up']): if not attributes['up']: commands.append(('osd down', {'ids': [attributes['id'].__str__()]})) else: raise RuntimeError("It is not valid to set a down OSD to be up") if 'reweight' in attributes: if attributes['reweight'] != float(osd_map.osd_tree_node_by_id[osd_id]['reweight']): commands.append(('osd reweight', {'id': osd_id, 'weight': attributes['reweight']})) if not commands: # Returning None indicates no-op return None msg_attrs = attributes.copy() del msg_attrs['id'] if msg_attrs.keys() == ['in']: message = "Marking osd.{id} {state}".format( id=osd_id, state=("in" if msg_attrs['in'] else "out")) elif msg_attrs.keys() == ['up']: message = "Marking osd.{id} down".format( id=osd_id) elif msg_attrs.keys() == ['reweight']: message = "Re-weighting osd.{id} to {pct}%".format( id=osd_id, pct="{0:.1f}".format(msg_attrs['reweight'] * 100.0)) else: message = "Modifying osd.{id} ({attrs})".format( id=osd_id, attrs=", ".join( "%s=%s" % (k, v) for k, v in msg_attrs.items())) return OsdMapModifyingRequest(message, commands)
def on_completion(self, tag): """ Callback for when a salt/job/<jid>/ret event is received, in which we find the UserRequest that created the job, and inform it of completion so that it can progress. """ with self._lock: log.info("RequestCollection.on_completion: {0}".format(tag)) try: request = self.get_by_id(tag) except KeyError: log.warning("on_completion: unknown tag {0}" % tag) return request.rados_commands.advance() if request.rados_commands.is_complete(): if request.rados_commands.r == 0: try: request.complete_jid() except Exception as e: log.exception("Request %s threw exception in on_map", request.id) request.set_error("Internal error %s" % e) request.complete() # The request may be waiting for an epoch that we already have, if so # give it to the request right away for sync_type, want_version in request.awaiting_versions.items( ): sync_object = rest_plugin().get_sync_object(sync_type) if want_version and sync_type.cmp( sync_object.version, want_version) >= 0: log.info("Awaited %s %s is immediately available" % (sync_type, want_version)) request.on_map(sync_type, sync_object) else: request.set_error(request.rados_commands.outs) request.complete()
def get_sync_object(self, object_type, path=None): return rest_plugin().get_sync_object(object_type, path)
def get_authenticators(self): return rest_plugin().get_authenticators()
def server_list(self): return rest_plugin().list_servers()
def server_get(self, fqdn): return rest_plugin().get_server(fqdn)
def cancel_request(self, request_id): try: rest_plugin().requests.cancel(request_id) return self.get_request(request_id) except KeyError: raise NotFound('request', request_id)
def _resolve_pool(self, pool_id): osd_map = rest_plugin().get_sync_object(OsdMap) return osd_map.pools_by_id[pool_id]
def get_metadata(self, svc_type, svc_id): return rest_plugin().get_metadata(svc_type, svc_id)