def scrub(self, deep_scrub): api = MonApi(self.cluster.fsid) osds = [daemon for daemon in self.daemons if daemon.startswith('osd.')] if not osds: raise NotSupportedError('Node {} does not have any OSDs'.format( self.hostname)) api_scrub = api.osd_deep_scrub if deep_scrub else api.osd_scrub return {osd: api_scrub(osd) for osd in osds}
def finish_task(self, result, status=STATUS_FINISHED): if TaskQueue.objects.get(pk=self.pk).status in [ TaskQueue.STATUS_FINISHED, TaskQueue.STATUS_EXCEPTION, TaskQueue.STATUS_ABORTED ]: raise NotSupportedError('Task is not running') logger.info(u'Task finished: {}'.format(result)) self.result = json.dumps(result) self.percent = 100 self.transition(status)
def delete(self, using=None): context = self.get_context() api = self.mon_api(context.fsid) try: api.osd_erasure_code_profile_rm(self.name) except ExternalCommandError as e: # TODO, I'm a bit unsatisfied with this catching # ExternalCommandError here, but ExternalCommandError should default to an # internal server error. logger.exception('Failed to delete ECP') raise NotSupportedError(e)
def save(self, force_insert=False, force_update=False, using=None, update_fields=None): context = self.get_context() api = self.mon_api(context.fsid) if not force_insert: raise NotImplementedError('Updating is not supported.') profile = ['k={}'.format(self.k), 'm={}'.format(self.m)] if self.ruleset_failure_domain: profile.append('crush-failure-domain={}'.format( self.ruleset_failure_domain)) try: api.osd_erasure_code_profile_set(self.name, profile) except ExternalCommandError as e: # TODO, I'm a bit unsatisfied with this catching # ExternalCommandError here, but ExternalCommandError should default to an # internal server error. logger.exception('Failed to create ECP') raise NotSupportedError(e)
def save(self, *args, **kwargs): """ This method implements three purposes. 1. Implements the functionality originally done by django (e.g. setting id on self) 2. Modify the Ceph state-machine in a sane way. 3. Providing a RESTful API. """ if self.cluster is None: self.cluster = CephPool.objects.nodb_context.cluster insert = getattr(self, 'id', None) is None with undo_transaction(self.mon_api(), exception_type=(ExternalCommandError, NotSupportedError), re_raise_exception=True) as api: if insert: api.osd_pool_create( self.name, self.pg_num, self.pg_num, # second pg_num is in fact pgp_num, but we don't want to allow # different values here. self.type, self.erasure_code_profile.name if (self.erasure_code_profile and self.type == 'erasure') else None) diff, original = (self.get_modified_fields( name=self.name) if insert else self.get_modified_fields()) self.set_read_only_fields(original) if insert: for attr, value in diff.items(): if not hasattr(self, attr): setattr(self, attr, value) self._task_queue = ceph.tasks.track_pg_creation.delay( self.cluster.fsid, self.id, 0, self.pg_num) if not insert and 'cluster' in diff: raise ValueError({'cluster': ["Cluster cannot be changed."]}) def schwartzian_transform(obj): key, val = obj if key == 'tier_of_id': return (1 if val is None else -1), obj # move to start or end. return 0, obj for key, value in sorted(diff.items(), key=schwartzian_transform): if key == 'pg_num': if not insert: api.osd_pool_set(self.name, "pg_num", value, undo_previous_value=original.pg_num) api.osd_pool_set(self.name, "pgp_num", value, undo_previous_value=original.pg_num) elif key == 'cache_mode': api.osd_tier_cache_mode( self.name, value, undo_previous_mode=original.cache_mode) elif key == 'tier_of_id': if self.tier_of is None: tier_of_target = original.tier_of api.osd_tier_remove(tier_of_target.name, self.name) else: tier_of_target = self.tier_of api.osd_tier_add(tier_of_target.name, self.name) elif key == 'read_tier_id': if self.read_tier is None: read_tier_target = original.read_tier api.osd_tier_remove_overlay( self.name, undo_previous_overlay=read_tier_target.name) else: read_tier_target = self.read_tier api.osd_tier_set_overlay(self.name, read_tier_target.name) elif key == 'flags': for flag in value: if flag == 'allow_ec_overwrites' or flag == 'ec_overwrites': api.osd_pool_set(self.name, 'allow_ec_overwrites', 'true') else: msg = 'Unknown flag \'{}\'.'.format(flag) logger.warning(msg) raise NotSupportedError(msg) elif key == 'compression_required_ratio': api.osd_pool_set(self.name, key, str(value), undo_previous_value=str( getattr(original, key))) elif key == 'application_metadata': for app in set(original.application_metadata) - set(value): api.osd_pool_application_disable(self.name, app) for app in set(value) - set(original.application_metadata): api.osd_pool_application_enable(self.name, app) elif key == 'crush_ruleset': logger.info('Setting crush_ruleset` is not yet supported.') elif self.type == 'replicated' and key not in \ ['name', 'erasure_code_profile_id'] and value is not None: api.osd_pool_set(self.name, key, value, undo_previous_value=getattr( original, key)) elif self.type == 'erasure' and key not in ['name', 'size', 'min_size'] \ and value is not None: api.osd_pool_set(self.name, key, value, undo_previous_value=getattr( original, key)) else: logger.warning( 'Tried to set "{}" to "{}" on pool "{}" aka "{}", which is not ' 'supported'.format(key, value, self.id, self.name)) super(CephPool, self).save(*args, **kwargs)