class ContainerReconciler(Daemon): """ Move objects that are in the wrong storage policy. """ def __init__(self, conf): self.conf = conf # This option defines how long an un-processable misplaced object # marker will be retried before it is abandoned. It is not coupled # with the tombstone reclaim age in the consistency engine. self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7)) self.interval = int(conf.get('interval', 30)) conf_path = conf.get('__file__') or \ '/etc/swift/container-reconciler.conf' self.logger = get_logger(conf, log_route='container-reconciler') request_tries = int(conf.get('request_tries') or 3) self.swift = InternalClient(conf_path, 'Swift Container Reconciler', request_tries) self.stats = defaultdict(int) self.last_stat_time = time.time() def stats_log(self, metric, msg, *args, **kwargs): """ Update stats tracking for metric and emit log message. """ level = kwargs.pop('level', logging.DEBUG) log_message = '%s: ' % metric + msg self.logger.log(level, log_message, *args, **kwargs) self.stats[metric] += 1 def log_stats(self, force=False): """ Dump stats to logger, noop when stats have been already been logged in the last minute. """ now = time.time() should_log = force or (now - self.last_stat_time > 60) if should_log: self.last_stat_time = now self.logger.info('Reconciler Stats: %r', dict(**self.stats)) def pop_queue(self, container, obj, q_ts, q_record): """ Issue a delete object request to the container for the misplaced object queue entry. :param container: the misplaced objects container :param obj: the name of the misplaced object :param q_ts: the timestamp of the misplaced object :param q_record: the timestamp of the queue entry N.B. q_ts will normally be the same time as q_record except when an object was manually re-enqued. """ q_path = '/%s/%s/%s' % (MISPLACED_OBJECTS_ACCOUNT, container, obj) x_timestamp = slightly_later_timestamp(max(q_record, q_ts)) self.stats_log('pop_queue', 'remove %r (%f) from the queue (%s)', q_path, q_ts, x_timestamp) headers = {'X-Timestamp': x_timestamp} direct_delete_container_entry(self.swift.container_ring, MISPLACED_OBJECTS_ACCOUNT, container, obj, headers=headers) def throw_tombstones(self, account, container, obj, timestamp, policy_index, path): """ Issue a delete object request to the given storage_policy. :param account: the account name :param container: the container name :param obj: the object name :param timestamp: the timestamp of the object to delete :param policy_index: the policy index to direct the request :param path: the path to be used for logging """ x_timestamp = slightly_later_timestamp(timestamp) self.stats_log('cleanup_attempt', '%r (%f) from policy_index ' '%s (%s) will be deleted', path, timestamp, policy_index, x_timestamp) headers = { 'X-Timestamp': x_timestamp, 'X-Backend-Storage-Policy-Index': policy_index, } success = False try: self.swift.delete_object(account, container, obj, acceptable_statuses=(2, 404), headers=headers) except UnexpectedResponse as err: self.stats_log( 'cleanup_failed', '%r (%f) was not cleaned up ' 'in storage_policy %s (%s)', path, timestamp, policy_index, err) else: success = True self.stats_log( 'cleanup_success', '%r (%f) was successfully ' 'removed from policy_index %s', path, timestamp, policy_index) return success def _reconcile_object(self, account, container, obj, q_policy_index, q_ts, q_op, path, **kwargs): """ Perform object reconciliation. :param account: the account name of the misplaced object :param container: the container name of the misplaced object :param obj: the object name :param q_policy_index: the policy index of the source indicated by the queue entry. :param q_ts: the timestamp of the misplaced object :param q_op: the operation of the misplaced request :param path: the full path of the misplaced object for logging :returns: True to indicate the request is fully processed successfully, otherwise False. """ container_policy_index = direct_get_container_policy_index( self.swift.container_ring, account, container) if container_policy_index is None: self.stats_log( 'unavailable_container', '%r (%f) unable to ' 'determine the destination policy_index', path, q_ts) return False if container_policy_index == q_policy_index: self.stats_log( 'noop_object', '%r (%f) container policy_index ' '%s matches queue policy index %s', path, q_ts, container_policy_index, q_policy_index) return True # check if object exists in the destination already self.logger.debug( 'checking for %r (%f) in destination ' 'policy_index %s', path, q_ts, container_policy_index) headers = {'X-Backend-Storage-Policy-Index': container_policy_index} dest_obj = self.swift.get_object_metadata(account, container, obj, headers=headers, acceptable_statuses=(2, 4)) dest_ts = Timestamp(dest_obj.get('x-backend-timestamp', 0)) if dest_ts >= q_ts: self.stats_log( 'found_object', '%r (%f) in policy_index %s ' 'is newer than queue (%f)', path, dest_ts, container_policy_index, q_ts) return self.throw_tombstones(account, container, obj, q_ts, q_policy_index, path) # object is misplaced self.stats_log( 'misplaced_object', '%r (%f) in policy_index %s ' 'should be in policy_index %s', path, q_ts, q_policy_index, container_policy_index) # fetch object from the source location self.logger.debug('fetching %r (%f) from storage policy %s', path, q_ts, q_policy_index) headers = {'X-Backend-Storage-Policy-Index': q_policy_index} try: source_obj_status, source_obj_info, source_obj_iter = \ self.swift.get_object(account, container, obj, headers=headers, acceptable_statuses=(2, 4)) except UnexpectedResponse as err: source_obj_status = err.resp.status_int source_obj_info = {} source_obj_iter = None source_ts = Timestamp(source_obj_info.get('x-backend-timestamp', 0)) if source_obj_status == 404 and q_op == 'DELETE': return self.ensure_tombstone_in_right_location( q_policy_index, account, container, obj, q_ts, path, container_policy_index, source_ts) else: return self.ensure_object_in_right_location( q_policy_index, account, container, obj, q_ts, path, container_policy_index, source_ts, source_obj_status, source_obj_info, source_obj_iter) def ensure_object_in_right_location(self, q_policy_index, account, container, obj, q_ts, path, container_policy_index, source_ts, source_obj_status, source_obj_info, source_obj_iter, **kwargs): """ Validate source object will satisfy the misplaced object queue entry and move to destination. :param q_policy_index: the policy_index for the source object :param account: the account name of the misplaced object :param container: the container name of the misplaced object :param obj: the name of the misplaced object :param q_ts: the timestamp of the misplaced object :param path: the full path of the misplaced object for logging :param container_policy_index: the policy_index of the destination :param source_ts: the timestamp of the source object :param source_obj_status: the HTTP status source object request :param source_obj_info: the HTTP headers of the source object request :param source_obj_iter: the body iter of the source object request """ if source_obj_status // 100 != 2 or source_ts < q_ts: if q_ts < time.time() - self.reclaim_age: # it's old and there are no tombstones or anything; give up self.stats_log('lost_source', '%r (%s) was not available in ' 'policy_index %s and has expired', path, q_ts.internal, q_policy_index, level=logging.CRITICAL) return True # the source object is unavailable or older than the queue # entry; a version that will satisfy the queue entry hopefully # exists somewhere in the cluster, so wait and try again self.stats_log('unavailable_source', '%r (%s) in ' 'policy_index %s responded %s (%s)', path, q_ts.internal, q_policy_index, source_obj_status, source_ts.internal, level=logging.WARNING) return False # optimistically move any source with a timestamp >= q_ts ts = max(Timestamp(source_ts), q_ts) # move the object put_timestamp = slightly_later_timestamp(ts, offset=2) self.stats_log( 'copy_attempt', '%r (%f) in policy_index %s will be ' 'moved to policy_index %s (%s)', path, source_ts, q_policy_index, container_policy_index, put_timestamp) headers = source_obj_info.copy() headers['X-Backend-Storage-Policy-Index'] = container_policy_index headers['X-Timestamp'] = put_timestamp try: self.swift.upload_object(FileLikeIter(source_obj_iter), account, container, obj, headers=headers) except UnexpectedResponse as err: self.stats_log('copy_failed', 'upload %r (%f) from ' 'policy_index %s to policy_index %s ' 'returned %s', path, source_ts, q_policy_index, container_policy_index, err, level=logging.WARNING) return False except: # noqa self.stats_log('unhandled_error', 'unable to upload %r (%f) ' 'from policy_index %s to policy_index %s ', path, source_ts, q_policy_index, container_policy_index, level=logging.ERROR, exc_info=True) return False self.stats_log( 'copy_success', '%r (%f) moved from policy_index %s ' 'to policy_index %s (%s)', path, source_ts, q_policy_index, container_policy_index, put_timestamp) return self.throw_tombstones(account, container, obj, q_ts, q_policy_index, path) def ensure_tombstone_in_right_location(self, q_policy_index, account, container, obj, q_ts, path, container_policy_index, source_ts, **kwargs): """ Issue a DELETE request against the destination to match the misplaced DELETE against the source. """ delete_timestamp = slightly_later_timestamp(q_ts, offset=2) self.stats_log( 'delete_attempt', '%r (%f) in policy_index %s ' 'will be deleted from policy_index %s (%s)', path, source_ts, q_policy_index, container_policy_index, delete_timestamp) headers = { 'X-Backend-Storage-Policy-Index': container_policy_index, 'X-Timestamp': delete_timestamp, } try: self.swift.delete_object(account, container, obj, headers=headers) except UnexpectedResponse as err: self.stats_log('delete_failed', 'delete %r (%f) from ' 'policy_index %s (%s) returned %s', path, source_ts, container_policy_index, delete_timestamp, err, level=logging.WARNING) return False except: # noqa self.stats_log('unhandled_error', 'unable to delete %r (%f) ' 'from policy_index %s (%s)', path, source_ts, container_policy_index, delete_timestamp, level=logging.ERROR, exc_info=True) return False self.stats_log('delete_success', '%r (%f) deleted from ' 'policy_index %s (%s)', path, source_ts, container_policy_index, delete_timestamp, level=logging.INFO) return self.throw_tombstones(account, container, obj, q_ts, q_policy_index, path) def reconcile_object(self, info): """ Process a possibly misplaced object write request. Determine correct destination storage policy by checking with primary containers. Check source and destination, copying or deleting into destination and cleaning up the source as needed. This method wraps _reconcile_object for exception handling. :param info: a queue entry dict :returns: True to indicate the request is fully processed successfully, otherwise False. """ self.logger.debug( 'checking placement for %r (%f) ' 'in policy_index %s', info['path'], info['q_ts'], info['q_policy_index']) success = False try: success = self._reconcile_object(**info) except: # noqa self.logger.exception( 'Unhandled Exception trying to ' 'reconcile %r (%f) in policy_index %s', info['path'], info['q_ts'], info['q_policy_index']) if success: metric = 'success' msg = 'was handled successfully' else: metric = 'retry' msg = 'must be retried' msg = '%(path)r (%(q_ts)f) in policy_index %(q_policy_index)s ' + msg self.stats_log(metric, msg, info, level=logging.INFO) self.log_stats() return success def _iter_containers(self): """ Generate a list of containers to process. """ # hit most recent container first instead of waiting on the updaters current_container = get_reconciler_container_name(time.time()) yield current_container container_gen = self.swift.iter_containers(MISPLACED_OBJECTS_ACCOUNT) self.logger.debug('looking for containers in %s', MISPLACED_OBJECTS_ACCOUNT) while True: one_page = None try: one_page = list( itertools.islice(container_gen, constraints.CONTAINER_LISTING_LIMIT)) except UnexpectedResponse as err: self.logger.error( 'Error listing containers in ' 'account %s (%s)', MISPLACED_OBJECTS_ACCOUNT, err) if not one_page: # don't generally expect more than one page break # reversed order since we expect older containers to be empty for c in reversed(one_page): # encoding here is defensive container = c['name'].encode('utf8') if container == current_container: continue # we've already hit this one this pass yield container def _iter_objects(self, container): """ Generate a list of objects to process. :param container: the name of the container to process If the given container is empty and older than reclaim_age this processor will attempt to reap it. """ self.logger.debug('looking for objects in %s', container) found_obj = False try: for raw_obj in self.swift.iter_objects(MISPLACED_OBJECTS_ACCOUNT, container): found_obj = True yield raw_obj except UnexpectedResponse as err: self.logger.error('Error listing objects in container %s (%s)', container, err) if float(container) < time.time() - self.reclaim_age and \ not found_obj: # Try to delete old empty containers so the queue doesn't # grow without bound. It's ok if there's a conflict. self.swift.delete_container(MISPLACED_OBJECTS_ACCOUNT, container, acceptable_statuses=(2, 404, 409, 412)) def reconcile(self): """ Main entry point for processing misplaced objects. Iterate over all queue entries and delegate to reconcile_object. """ self.logger.debug('pulling items from the queue') for container in self._iter_containers(): for raw_obj in self._iter_objects(container): try: obj_info = parse_raw_obj(raw_obj) except Exception: self.stats_log('invalid_record', 'invalid queue record: %r', raw_obj, level=logging.ERROR, exc_info=True) continue finished = self.reconcile_object(obj_info) if finished: self.pop_queue(container, raw_obj['name'], obj_info['q_ts'], obj_info['q_record']) self.log_stats() self.logger.debug('finished container %s', container) def run_once(self, *args, **kwargs): """ Process every entry in the queue. """ try: self.reconcile() except: # noqa self.logger.exception('Unhandled Exception trying to reconcile') self.log_stats(force=True) def run_forever(self, *args, **kwargs): while True: self.run_once(*args, **kwargs) self.stats = defaultdict(int) self.logger.info('sleeping between intervals (%ss)', self.interval) time.sleep(self.interval)
class ObjectExpirer(Daemon): """ Daemon that queries the internal hidden expiring_objects_account to discover objects that need to be deleted. :param conf: The daemon configuration. """ def __init__(self, conf): self.conf = conf self.logger = get_logger(conf, log_route='object-expirer') self.interval = int(conf.get('interval') or 300) self.expiring_objects_account = \ (conf.get('auto_create_account_prefix') or '.') + \ 'expiring_objects' conf_path = conf.get('__file__') or '/etc/swift/object-expirer.conf' request_tries = int(conf.get('request_tries') or 3) self.swift = InternalClient(conf_path, 'Swift Object Expirer', request_tries) self.report_interval = int(conf.get('report_interval') or 300) self.report_first_time = self.report_last_time = time() self.report_objects = 0 self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') self.rcache = join(self.recon_cache_path, 'object.recon') self.concurrency = int(conf.get('concurrency', 1)) if self.concurrency < 1: raise ValueError("concurrency must be set to at least 1") self.processes = int(self.conf.get('processes', 0)) self.process = int(self.conf.get('process', 0)) def report(self, final=False): """ Emits a log line report of the progress so far, or the final progress is final=True. :param final: Set to True for the last report once the expiration pass has completed. """ if final: elapsed = time() - self.report_first_time self.logger.info( _('Pass completed in %ds; %d objects expired') % (elapsed, self.report_objects)) dump_recon_cache( { 'object_expiration_pass': elapsed, 'expired_last_pass': self.report_objects }, self.rcache, self.logger) elif time() - self.report_last_time >= self.report_interval: elapsed = time() - self.report_first_time self.logger.info( _('Pass so far %ds; %d objects expired') % (elapsed, self.report_objects)) self.report_last_time = time() def run_once(self, *args, **kwargs): """ Executes a single pass, looking for objects to expire. :param args: Extra args to fulfill the Daemon interface; this daemon has no additional args. :param kwargs: Extra keyword args to fulfill the Daemon interface; this daemon accepts processes and process keyword args. These will override the values from the config file if provided. """ processes, process = self.get_process_values(kwargs) pool = GreenPool(self.concurrency) containers_to_delete = [] self.report_first_time = self.report_last_time = time() self.report_objects = 0 try: self.logger.debug(_('Run begin')) containers, objects = \ self.swift.get_account_info(self.expiring_objects_account) self.logger.info( _('Pass beginning; %s possible containers; %s ' 'possible objects') % (containers, objects)) for c in self.swift.iter_containers(self.expiring_objects_account): container = c['name'] timestamp = int(container) if timestamp > int(time()): break containers_to_delete.append(container) for o in self.swift.iter_objects(self.expiring_objects_account, container): obj = o['name'].encode('utf8') if processes > 0: obj_process = int( hashlib.md5('%s/%s' % (container, obj)).hexdigest(), 16) if obj_process % processes != process: continue timestamp, actual_obj = obj.split('-', 1) timestamp = int(timestamp) if timestamp > int(time()): break pool.spawn_n(self.delete_object, actual_obj, timestamp, container, obj) pool.waitall() for container in containers_to_delete: try: self.swift.delete_container( self.expiring_objects_account, container, acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT)) except (Exception, Timeout) as err: self.logger.exception( _('Exception while deleting container %s %s') % (container, str(err))) self.logger.debug(_('Run end')) self.report(final=True) except (Exception, Timeout): self.logger.exception(_('Unhandled exception')) def run_forever(self, *args, **kwargs): """ Executes passes forever, looking for objects to expire. :param args: Extra args to fulfill the Daemon interface; this daemon has no additional args. :param kwargs: Extra keyword args to fulfill the Daemon interface; this daemon has no additional keyword args. """ sleep(random() * self.interval) while True: begin = time() try: self.run_once(*args, **kwargs) except (Exception, Timeout): self.logger.exception(_('Unhandled exception')) elapsed = time() - begin if elapsed < self.interval: sleep(random() * (self.interval - elapsed)) def get_process_values(self, kwargs): """ Gets the processes, process from the kwargs if those values exist. Otherwise, return processes, process set in the config file. :param kwargs: Keyword args passed into the run_forever(), run_once() methods. They have values specified on the command line when the daemon is run. """ if kwargs.get('processes') is not None: processes = int(kwargs['processes']) else: processes = self.processes if kwargs.get('process') is not None: process = int(kwargs['process']) else: process = self.process if process < 0: raise ValueError( 'process must be an integer greater than or equal to 0') if processes < 0: raise ValueError( 'processes must be an integer greater than or equal to 0') if processes and process >= processes: raise ValueError('process must be less than or equal to processes') return processes, process def delete_object(self, actual_obj, timestamp, container, obj): start_time = time() try: self.delete_actual_object(actual_obj, timestamp) self.swift.delete_object(self.expiring_objects_account, container, obj) self.report_objects += 1 self.logger.increment('objects') except (Exception, Timeout) as err: self.logger.increment('errors') self.logger.exception( _('Exception while deleting object %s %s %s') % (container, obj, str(err))) self.logger.timing_since('timing', start_time) self.report() def delete_actual_object(self, actual_obj, timestamp): """ Deletes the end-user object indicated by the actual object name given '<account>/<container>/<object>' if and only if the X-Delete-At value of the object is exactly the timestamp given. :param actual_obj: The name of the end-user object to delete: '<account>/<container>/<object>' :param timestamp: The timestamp the X-Delete-At value must match to perform the actual delete. """ path = '/v1/' + urllib.quote(actual_obj.lstrip('/')) self.swift.make_request('DELETE', path, {'X-If-Delete-At': str(timestamp)}, (2, HTTP_NOT_FOUND, HTTP_PRECONDITION_FAILED))
class ObjectExpirer(Daemon): """ Daemon that queries the internal hidden expiring_objects_account to discover objects that need to be deleted. :param conf: The daemon configuration. """ def __init__(self, conf): self.conf = conf self.logger = get_logger(conf, log_route='object-expirer') self.interval = int(conf.get('interval') or 300) self.expiring_objects_account = \ (conf.get('auto_create_account_prefix') or '.') + \ 'expiring_objects' conf_path = conf.get('__file__') or '/etc/swift/object-expirer.conf' request_tries = int(conf.get('request_tries') or 3) self.swift = InternalClient(conf_path, 'Swift Object Expirer', request_tries) self.report_interval = int(conf.get('report_interval') or 300) self.report_first_time = self.report_last_time = time() self.report_objects = 0 self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') self.rcache = join(self.recon_cache_path, 'object.recon') self.concurrency = int(conf.get('concurrency', 1)) if self.concurrency < 1: raise ValueError("concurrency must be set to at least 1") self.processes = int(self.conf.get('processes', 0)) self.process = int(self.conf.get('process', 0)) def report(self, final=False): """ Emits a log line report of the progress so far, or the final progress is final=True. :param final: Set to True for the last report once the expiration pass has completed. """ if final: elapsed = time() - self.report_first_time self.logger.info(_('Pass completed in %ds; %d objects expired') % (elapsed, self.report_objects)) dump_recon_cache({'object_expiration_pass': elapsed, 'expired_last_pass': self.report_objects}, self.rcache, self.logger) elif time() - self.report_last_time >= self.report_interval: elapsed = time() - self.report_first_time self.logger.info(_('Pass so far %ds; %d objects expired') % (elapsed, self.report_objects)) self.report_last_time = time() def run_once(self, *args, **kwargs): """ Executes a single pass, looking for objects to expire. :param args: Extra args to fulfill the Daemon interface; this daemon has no additional args. :param kwargs: Extra keyword args to fulfill the Daemon interface; this daemon accepts processes and process keyword args. These will override the values from the config file if provided. """ processes, process = self.get_process_values(kwargs) pool = GreenPool(self.concurrency) containers_to_delete = [] self.report_first_time = self.report_last_time = time() self.report_objects = 0 try: self.logger.debug(_('Run begin')) containers, objects = \ self.swift.get_account_info(self.expiring_objects_account) self.logger.info(_('Pass beginning; %s possible containers; %s ' 'possible objects') % (containers, objects)) for c in self.swift.iter_containers(self.expiring_objects_account): container = c['name'] timestamp = int(container) if timestamp > int(time()): break containers_to_delete.append(container) for o in self.swift.iter_objects(self.expiring_objects_account, container): obj = o['name'].encode('utf8') if processes > 0: obj_process = int( hashlib.md5('%s/%s' % (container, obj)). hexdigest(), 16) if obj_process % processes != process: continue timestamp, actual_obj = obj.split('-', 1) timestamp = int(timestamp) if timestamp > int(time()): break pool.spawn_n( self.delete_object, actual_obj, timestamp, container, obj) pool.waitall() for container in containers_to_delete: try: self.swift.delete_container( self.expiring_objects_account, container, acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT)) except (Exception, Timeout), err: self.logger.exception( _('Exception while deleting container %s %s') % (container, str(err))) self.logger.debug(_('Run end')) self.report(final=True) except (Exception, Timeout): self.logger.exception(_('Unhandled exception'))
class ContainerReconciler(Daemon): """ Move objects that are in the wrong storage policy. """ def __init__(self, conf): self.conf = conf self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7)) self.interval = int(conf.get('interval', 30)) conf_path = conf.get('__file__') or \ '/etc/swift/container-reconciler.conf' self.logger = get_logger(conf, log_route='container-reconciler') request_tries = int(conf.get('request_tries') or 3) self.swift = InternalClient(conf_path, 'Swift Container Reconciler', request_tries) self.stats = defaultdict(int) self.last_stat_time = time.time() def stats_log(self, metric, msg, *args, **kwargs): """ Update stats tracking for metric and emit log message. """ level = kwargs.pop('level', logging.DEBUG) log_message = '%s: ' % metric + msg self.logger.log(level, log_message, *args, **kwargs) self.stats[metric] += 1 def log_stats(self, force=False): """ Dump stats to logger, noop when stats have been already been logged in the last minute. """ now = time.time() should_log = force or (now - self.last_stat_time > 60) if should_log: self.last_stat_time = now self.logger.info('Reconciler Stats: %r', dict(**self.stats)) def pop_queue(self, container, obj, q_ts, q_record): """ Issue a delete object request to the container for the misplaced object queue entry. :param container: the misplaced objects container :param q_ts: the timestamp of the misplaced object :param q_record: the timestamp of the queue entry N.B. q_ts will normally be the same time as q_record except when an object was manually re-enqued. """ q_path = '/%s/%s/%s' % (MISPLACED_OBJECTS_ACCOUNT, container, obj) x_timestamp = slightly_later_timestamp(max(q_record, q_ts)) self.stats_log('pop_queue', 'remove %r (%f) from the queue (%s)', q_path, q_ts, x_timestamp) headers = {'X-Timestamp': x_timestamp} direct_delete_container_entry( self.swift.container_ring, MISPLACED_OBJECTS_ACCOUNT, container, obj, headers=headers) def throw_tombstones(self, account, container, obj, timestamp, policy_index, path): """ Issue a delete object request to the given storage_policy. :param account: the account name :param container: the container name :param account: the object name :param timestamp: the timestamp of the object to delete :param policy_index: the policy index to direct the request :param path: the path to be used for logging """ x_timestamp = slightly_later_timestamp(timestamp) self.stats_log('cleanup_attempt', '%r (%f) from policy_index ' '%s (%s) will be deleted', path, timestamp, policy_index, x_timestamp) headers = { 'X-Timestamp': x_timestamp, 'X-Backend-Storage-Policy-Index': policy_index, } success = False try: self.swift.delete_object(account, container, obj, acceptable_statuses=(2, 404), headers=headers) except UnexpectedResponse as err: self.stats_log('cleanup_failed', '%r (%f) was not cleaned up ' 'in storage_policy %s (%s)', path, timestamp, policy_index, err) else: success = True self.stats_log('cleanup_success', '%r (%f) was successfully ' 'removed from policy_index %s', path, timestamp, policy_index) return success def _reconcile_object(self, account, container, obj, q_policy_index, q_ts, q_op, path, **kwargs): """ Perform object reconciliation. :param account: the account name of the misplaced object :param container: the container name of the misplaced object :param obj: the object name :param q_policy_index: the policy index of the source indicated by the queue entry. :param q_ts: the timestamp of the misplaced object :param q_op: the operation of the misplaced request :param path: the full path of the misplaced object for logging :returns: True to indicate the request is fully processed successfully, otherwise False. """ container_policy_index = direct_get_container_policy_index( self.swift.container_ring, account, container) if container_policy_index is None: self.stats_log('unavailable_container', '%r (%f) unable to ' 'determine the destination policy_index', path, q_ts) return False if container_policy_index == q_policy_index: self.stats_log('noop_object', '%r (%f) container policy_index ' '%s matches queue policy index %s', path, q_ts, container_policy_index, q_policy_index) return True # check if object exists in the destination already self.logger.debug('checking for %r (%f) in destination ' 'policy_index %s', path, q_ts, container_policy_index) headers = { 'X-Backend-Storage-Policy-Index': container_policy_index} dest_obj = self.swift.get_object_metadata(account, container, obj, headers=headers, acceptable_statuses=(2, 4)) dest_ts = Timestamp(dest_obj.get('x-backend-timestamp', 0)) if dest_ts >= q_ts: self.stats_log('found_object', '%r (%f) in policy_index %s ' 'is newer than queue (%f)', path, dest_ts, container_policy_index, q_ts) return self.throw_tombstones(account, container, obj, q_ts, q_policy_index, path) # object is misplaced self.stats_log('misplaced_object', '%r (%f) in policy_index %s ' 'should be in policy_index %s', path, q_ts, q_policy_index, container_policy_index) # fetch object from the source location self.logger.debug('fetching %r (%f) from storage policy %s', path, q_ts, q_policy_index) headers = { 'X-Backend-Storage-Policy-Index': q_policy_index} try: source_obj_status, source_obj_info, source_obj_iter = \ self.swift.get_object(account, container, obj, headers=headers, acceptable_statuses=(2, 4)) except UnexpectedResponse as err: source_obj_status = err.resp.status_int source_obj_info = {} source_obj_iter = None source_ts = Timestamp(source_obj_info.get('x-backend-timestamp', 0)) if source_obj_status == 404 and q_op == 'DELETE': return self.ensure_tombstone_in_right_location( q_policy_index, account, container, obj, q_ts, path, container_policy_index, source_ts) else: return self.ensure_object_in_right_location( q_policy_index, account, container, obj, q_ts, path, container_policy_index, source_ts, source_obj_status, source_obj_info, source_obj_iter) def ensure_object_in_right_location(self, q_policy_index, account, container, obj, q_ts, path, container_policy_index, source_ts, source_obj_status, source_obj_info, source_obj_iter, **kwargs): """ Validate source object will satisfy the misplaced object queue entry and move to destination. :param q_policy_index: the policy_index for the source object :param account: the account name of the misplaced object :param container: the container name of the misplaced object :param obj: the name of the misplaced object :param q_ts: the timestamp of the misplaced object :param path: the full path of the misplaced object for logging :param container_policy_index: the policy_index of the destination :param source_ts: the timestamp of the source object :param source_obj_status: the HTTP status source object request :param source_obj_info: the HTTP headers of the source object request :param source_obj_iter: the body iter of the source object request """ if source_obj_status // 100 != 2 or source_ts < q_ts: if q_ts < time.time() - self.reclaim_age: # it's old and there are no tombstones or anything; give up self.stats_log('lost_source', '%r (%s) was not available in ' 'policy_index %s and has expired', path, q_ts.internal, q_policy_index, level=logging.CRITICAL) return True # the source object is unavailable or older than the queue # entry; a version that will satisfy the queue entry hopefully # exists somewhere in the cluster, so wait and try again self.stats_log('unavailable_source', '%r (%s) in ' 'policy_index %s responded %s (%s)', path, q_ts.internal, q_policy_index, source_obj_status, source_ts.internal, level=logging.WARNING) return False # optimistically move any source with a timestamp >= q_ts ts = max(Timestamp(source_ts), q_ts) # move the object put_timestamp = slightly_later_timestamp(ts, offset=2) self.stats_log('copy_attempt', '%r (%f) in policy_index %s will be ' 'moved to policy_index %s (%s)', path, source_ts, q_policy_index, container_policy_index, put_timestamp) headers = source_obj_info.copy() headers['X-Backend-Storage-Policy-Index'] = container_policy_index headers['X-Timestamp'] = put_timestamp try: self.swift.upload_object( FileLikeIter(source_obj_iter), account, container, obj, headers=headers) except UnexpectedResponse as err: self.stats_log('copy_failed', 'upload %r (%f) from ' 'policy_index %s to policy_index %s ' 'returned %s', path, source_ts, q_policy_index, container_policy_index, err, level=logging.WARNING) return False except: # noqa self.stats_log('unhandled_error', 'unable to upload %r (%f) ' 'from policy_index %s to policy_index %s ', path, source_ts, q_policy_index, container_policy_index, level=logging.ERROR, exc_info=True) return False self.stats_log('copy_success', '%r (%f) moved from policy_index %s ' 'to policy_index %s (%s)', path, source_ts, q_policy_index, container_policy_index, put_timestamp) return self.throw_tombstones(account, container, obj, q_ts, q_policy_index, path) def ensure_tombstone_in_right_location(self, q_policy_index, account, container, obj, q_ts, path, container_policy_index, source_ts, **kwargs): """ Issue a DELETE request against the destination to match the misplaced DELETE against the source. """ delete_timestamp = slightly_later_timestamp(q_ts, offset=2) self.stats_log('delete_attempt', '%r (%f) in policy_index %s ' 'will be deleted from policy_index %s (%s)', path, source_ts, q_policy_index, container_policy_index, delete_timestamp) headers = { 'X-Backend-Storage-Policy-Index': container_policy_index, 'X-Timestamp': delete_timestamp, } try: self.swift.delete_object(account, container, obj, headers=headers) except UnexpectedResponse as err: self.stats_log('delete_failed', 'delete %r (%f) from ' 'policy_index %s (%s) returned %s', path, source_ts, container_policy_index, delete_timestamp, err, level=logging.WARNING) return False except: # noqa self.stats_log('unhandled_error', 'unable to delete %r (%f) ' 'from policy_index %s (%s)', path, source_ts, container_policy_index, delete_timestamp, level=logging.ERROR, exc_info=True) return False self.stats_log('delete_success', '%r (%f) deleted from ' 'policy_index %s (%s)', path, source_ts, container_policy_index, delete_timestamp, level=logging.INFO) return self.throw_tombstones(account, container, obj, q_ts, q_policy_index, path) def reconcile_object(self, info): """ Process a possibly misplaced object write request. Determine correct destination storage policy by checking with primary containers. Check source and destination, copying or deleting into destination and cleaning up the source as needed. This method wraps _reconcile_object for exception handling. :param info: a queue entry dict :returns: True to indicate the request is fully processed successfully, otherwise False. """ self.logger.debug('checking placement for %r (%f) ' 'in policy_index %s', info['path'], info['q_ts'], info['q_policy_index']) success = False try: success = self._reconcile_object(**info) except: # noqa self.logger.exception('Unhandled Exception trying to ' 'reconcile %r (%f) in policy_index %s', info['path'], info['q_ts'], info['q_policy_index']) if success: metric = 'success' msg = 'was handled successfully' else: metric = 'retry' msg = 'must be retried' msg = '%(path)r (%(q_ts)f) in policy_index %(q_policy_index)s ' + msg self.stats_log(metric, msg, info, level=logging.INFO) self.log_stats() return success def _iter_containers(self): """ Generate a list of containers to process. """ # hit most recent container first instead of waiting on the updaters current_container = get_reconciler_container_name(time.time()) yield current_container container_gen = self.swift.iter_containers(MISPLACED_OBJECTS_ACCOUNT) self.logger.debug('looking for containers in %s', MISPLACED_OBJECTS_ACCOUNT) while True: one_page = None try: one_page = list(itertools.islice( container_gen, constraints.CONTAINER_LISTING_LIMIT)) except UnexpectedResponse as err: self.logger.error('Error listing containers in ' 'account %s (%s)', MISPLACED_OBJECTS_ACCOUNT, err) if not one_page: # don't generally expect more than one page break # reversed order since we expect older containers to be empty for c in reversed(one_page): # encoding here is defensive container = c['name'].encode('utf8') if container == current_container: continue # we've already hit this one this pass yield container def _iter_objects(self, container): """ Generate a list of objects to process. :param container: the name of the container to process If the given container is empty and older than reclaim_age this processor will attempt to reap it. """ self.logger.debug('looking for objects in %s', container) found_obj = False try: for raw_obj in self.swift.iter_objects( MISPLACED_OBJECTS_ACCOUNT, container): found_obj = True yield raw_obj except UnexpectedResponse as err: self.logger.error('Error listing objects in container %s (%s)', container, err) if float(container) < time.time() - self.reclaim_age and \ not found_obj: # Try to delete old empty containers so the queue doesn't # grow without bound. It's ok if there's a conflict. self.swift.delete_container( MISPLACED_OBJECTS_ACCOUNT, container, acceptable_statuses=(2, 404, 409, 412)) def reconcile(self): """ Main entry point for processing misplaced objects. Iterate over all queue entries and delegate to reconcile_object. """ self.logger.debug('pulling items from the queue') for container in self._iter_containers(): for raw_obj in self._iter_objects(container): try: obj_info = parse_raw_obj(raw_obj) except Exception: self.stats_log('invalid_record', 'invalid queue record: %r', raw_obj, level=logging.ERROR, exc_info=True) continue finished = self.reconcile_object(obj_info) if finished: self.pop_queue(container, raw_obj['name'], obj_info['q_ts'], obj_info['q_record']) self.log_stats() self.logger.debug('finished container %s', container) def run_once(self, *args, **kwargs): """ Process every entry in the queue. """ try: self.reconcile() except: self.logger.exception('Unhandled Exception trying to reconcile') self.log_stats(force=True) def run_forever(self, *args, **kwargs): while True: self.run_once(*args, **kwargs) self.stats = defaultdict(int) self.logger.info('sleeping between intervals (%ss)', self.interval) time.sleep(self.interval)
class ObjectExpirer(Daemon): """ Daemon that queries the internal hidden expiring_objects_account to discover objects that need to be deleted. :param conf: The daemon configuration. """ def __init__(self, conf): self.conf = conf self.logger = get_logger(conf, log_route='object-expirer') self.interval = int(conf.get('interval') or 300) self.expiring_objects_account = \ (conf.get('auto_create_account_prefix') or '.') + \ 'expiring_objects' conf_path = conf.get('__file__') or '/etc/swift/object-expirer.conf' request_tries = int(conf.get('request_tries') or 3) self.swift = InternalClient(conf_path, 'Swift Object Expirer', request_tries) self.report_interval = int(conf.get('report_interval') or 300) self.report_first_time = self.report_last_time = time() self.report_objects = 0 self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') self.rcache = join(self.recon_cache_path, 'object.recon') def report(self, final=False): """ Emits a log line report of the progress so far, or the final progress is final=True. :param final: Set to True for the last report once the expiration pass has completed. """ if final: elapsed = time() - self.report_first_time self.logger.info(_('Pass completed in %ds; %d objects expired') % (elapsed, self.report_objects)) dump_recon_cache({'object_expiration_pass': elapsed, 'expired_last_pass': self.report_objects}, self.rcache, self.logger) elif time() - self.report_last_time >= self.report_interval: elapsed = time() - self.report_first_time self.logger.info(_('Pass so far %ds; %d objects expired') % (elapsed, self.report_objects)) self.report_last_time = time() def run_once(self, *args, **kwargs): """ Executes a single pass, looking for objects to expire. :param args: Extra args to fulfill the Daemon interface; this daemon has no additional args. :param kwargs: Extra keyword args to fulfill the Daemon interface; this daemon has no additional keyword args. """ self.report_first_time = self.report_last_time = time() self.report_objects = 0 try: self.logger.debug(_('Run begin')) containers, objects = \ self.swift.get_account_info(self.expiring_objects_account) self.logger.info(_('Pass beginning; %s possible containers; %s ' 'possible objects') % (containers, objects)) for c in self.swift.iter_containers(self.expiring_objects_account): container = c['name'] timestamp = int(container) if timestamp > int(time()): break for o in self.swift.iter_objects(self.expiring_objects_account, container): obj = o['name'] timestamp, actual_obj = obj.split('-', 1) timestamp = int(timestamp) if timestamp > int(time()): break start_time = time() try: self.delete_actual_object(actual_obj, timestamp) self.swift.delete_object(self.expiring_objects_account, container, obj) self.report_objects += 1 self.logger.increment('objects') except (Exception, Timeout), err: self.logger.increment('errors') self.logger.exception( _('Exception while deleting object %s %s %s') % (container, obj, str(err))) self.logger.timing_since('timing', start_time) self.report() try: self.swift.delete_container( self.expiring_objects_account, container, acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT)) except (Exception, Timeout), err: self.logger.exception( _('Exception while deleting container %s %s') % (container, str(err))) self.logger.debug(_('Run end')) self.report(final=True)
def test_reconciler_move_object_twice(self): # select some policies old_policy = random.choice(ENABLED_POLICIES) new_policy = random.choice([p for p in ENABLED_POLICIES if p != old_policy]) # setup a split brain self.brain.stop_handoff_half() # get old_policy on two primaries self.brain.put_container(policy_index=int(old_policy)) self.brain.start_handoff_half() self.brain.stop_primary_half() # force a recreate on handoffs self.brain.put_container(policy_index=int(old_policy)) self.brain.delete_container() self.brain.put_container(policy_index=int(new_policy)) self.brain.put_object() # populate memcache with new_policy self.brain.start_primary_half() # at this point two primaries have old policy container_part, container_nodes = self.container_ring.get_nodes( self.account, self.container_name) head_responses = [] for node in container_nodes: metadata = direct_client.direct_head_container( node, container_part, self.account, self.container_name) head_responses.append((node, metadata)) old_container_node_ids = [ node['id'] for node, metadata in head_responses if int(old_policy) == int(metadata['X-Backend-Storage-Policy-Index'])] self.assertEqual(2, len(old_container_node_ids)) # hopefully memcache still has the new policy cached self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'}, contents='VERIFY') # double-check object correctly written to new policy conf_files = [] for server in Manager(['container-reconciler']).servers: conf_files.extend(server.conf_files()) conf_file = conf_files[0] int_client = InternalClient(conf_file, 'probe-test', 3) int_client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(new_policy)}) int_client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4,), headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) # shutdown the containers that know about the new policy self.brain.stop_handoff_half() # and get rows enqueued from old nodes for server_type in ('container-replicator', 'container-updater'): server = Manager([server_type]) tuple(server.once(number=n + 1) for n in old_container_node_ids) # verify entry in the queue for the "misplaced" new_policy for container in int_client.iter_containers('.misplaced_objects'): for obj in int_client.iter_objects('.misplaced_objects', container['name']): expected = '%d:/%s/%s/%s' % (new_policy, self.account, self.container_name, self.object_name) self.assertEqual(obj['name'], expected) Manager(['container-reconciler']).once() # verify object in old_policy int_client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) # verify object is *not* in new_policy int_client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4,), headers={'X-Backend-Storage-Policy-Index': int(new_policy)}) self.get_to_final_state() # verify entry in the queue for container in int_client.iter_containers('.misplaced_objects'): for obj in int_client.iter_objects('.misplaced_objects', container['name']): expected = '%d:/%s/%s/%s' % (old_policy, self.account, self.container_name, self.object_name) self.assertEqual(obj['name'], expected) Manager(['container-reconciler']).once() # and now it flops back int_client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(new_policy)}) int_client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4,), headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) # make sure the queue is settled self.get_to_final_state() for container in int_client.iter_containers('.misplaced_objects'): for obj in int_client.iter_objects('.misplaced_objects', container['name']): self.fail('Found unexpected object %r in the queue' % obj) # verify that the object data read by external client is correct headers, data = self._get_object_patiently(int(new_policy)) self.assertEqual('VERIFY', data) self.assertEqual('custom-meta', headers['x-object-meta-test'])
def test_reconciler_move_object_twice(self): # select some policies old_policy = random.choice(ENABLED_POLICIES) new_policy = random.choice( [p for p in ENABLED_POLICIES if p != old_policy]) # setup a split brain self.brain.stop_handoff_half() # get old_policy on two primaries self.brain.put_container(policy_index=int(old_policy)) self.brain.start_handoff_half() self.brain.stop_primary_half() # force a recreate on handoffs self.brain.put_container(policy_index=int(old_policy)) self.brain.delete_container() self.brain.put_container(policy_index=int(new_policy)) self.brain.put_object() # populate memcache with new_policy self.brain.start_primary_half() # at this point two primaries have old policy container_part, container_nodes = self.container_ring.get_nodes( self.account, self.container_name) head_responses = [] for node in container_nodes: metadata = direct_client.direct_head_container( node, container_part, self.account, self.container_name) head_responses.append((node, metadata)) old_container_node_ids = [ node['id'] for node, metadata in head_responses if int(old_policy) == int(metadata['X-Backend-Storage-Policy-Index']) ] self.assertEqual(2, len(old_container_node_ids)) # hopefully memcache still has the new policy cached self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'}, contents='VERIFY') # double-check object correctly written to new policy conf_files = [] for server in Manager(['container-reconciler']).servers: conf_files.extend(server.conf_files()) conf_file = conf_files[0] int_client = InternalClient(conf_file, 'probe-test', 3) int_client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(new_policy)}) int_client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4, ), headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) # shutdown the containers that know about the new policy self.brain.stop_handoff_half() # and get rows enqueued from old nodes for server_type in ('container-replicator', 'container-updater'): server = Manager([server_type]) tuple(server.once(number=n + 1) for n in old_container_node_ids) # verify entry in the queue for the "misplaced" new_policy for container in int_client.iter_containers('.misplaced_objects'): for obj in int_client.iter_objects('.misplaced_objects', container['name']): expected = '%d:/%s/%s/%s' % (new_policy, self.account, self.container_name, self.object_name) self.assertEqual(obj['name'], expected) Manager(['container-reconciler']).once() # verify object in old_policy int_client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) # verify object is *not* in new_policy int_client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4, ), headers={'X-Backend-Storage-Policy-Index': int(new_policy)}) self.get_to_final_state() # verify entry in the queue for container in int_client.iter_containers('.misplaced_objects'): for obj in int_client.iter_objects('.misplaced_objects', container['name']): expected = '%d:/%s/%s/%s' % (old_policy, self.account, self.container_name, self.object_name) self.assertEqual(obj['name'], expected) Manager(['container-reconciler']).once() # and now it flops back int_client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(new_policy)}) int_client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4, ), headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) # make sure the queue is settled self.get_to_final_state() for container in int_client.iter_containers('.misplaced_objects'): for obj in int_client.iter_objects('.misplaced_objects', container['name']): self.fail('Found unexpected object %r in the queue' % obj) # verify that the object data read by external client is correct headers, data = self._get_object_patiently(int(new_policy)) self.assertEqual('VERIFY', data) self.assertEqual('custom-meta', headers['x-object-meta-test'])
class ObjectExpirer(Daemon): """ Daemon that queries the internal hidden expiring_objects_account to discover objects that need to be deleted. :param conf: The daemon configuration. """ def __init__(self, conf): self.conf = conf self.logger = get_logger(conf, log_route='object-expirer') self.interval = int(conf.get('interval') or 300) self.expiring_objects_account = \ (conf.get('auto_create_account_prefix') or '.') + \ (conf.get('expiring_objects_account_name') or 'expiring_objects') conf_path = conf.get('__file__') or '/etc/swift/object-expirer.conf' request_tries = int(conf.get('request_tries') or 3) self.swift = InternalClient(conf_path, 'Swift Object Expirer', request_tries) self.report_interval = int(conf.get('report_interval') or 300) self.report_first_time = self.report_last_time = time() self.report_objects = 0 self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') self.rcache = join(self.recon_cache_path, 'object.recon') self.concurrency = int(conf.get('concurrency', 1)) if self.concurrency < 1: raise ValueError("concurrency must be set to at least 1") self.processes = int(self.conf.get('processes', 0)) self.process = int(self.conf.get('process', 0)) def report(self, final=False): """ Emits a log line report of the progress so far, or the final progress is final=True. :param final: Set to True for the last report once the expiration pass has completed. """ if final: elapsed = time() - self.report_first_time self.logger.info(_('Pass completed in %ds; %d objects expired') % (elapsed, self.report_objects)) dump_recon_cache({'object_expiration_pass': elapsed, 'expired_last_pass': self.report_objects}, self.rcache, self.logger) elif time() - self.report_last_time >= self.report_interval: elapsed = time() - self.report_first_time self.logger.info(_('Pass so far %ds; %d objects expired') % (elapsed, self.report_objects)) self.report_last_time = time() def run_once(self, *args, **kwargs): """ Executes a single pass, looking for objects to expire. :param args: Extra args to fulfill the Daemon interface; this daemon has no additional args. :param kwargs: Extra keyword args to fulfill the Daemon interface; this daemon accepts processes and process keyword args. These will override the values from the config file if provided. """ processes, process = self.get_process_values(kwargs) pool = GreenPool(self.concurrency) containers_to_delete = [] self.report_first_time = self.report_last_time = time() self.report_objects = 0 try: self.logger.debug(_('Run begin')) containers, objects = \ self.swift.get_account_info(self.expiring_objects_account) self.logger.info(_('Pass beginning; %s possible containers; %s ' 'possible objects') % (containers, objects)) for c in self.swift.iter_containers(self.expiring_objects_account): container = c['name'] timestamp = int(container) if timestamp > int(time()): break containers_to_delete.append(container) for o in self.swift.iter_objects(self.expiring_objects_account, container): obj = o['name'].encode('utf8') if processes > 0: obj_process = int( hashlib.md5('%s/%s' % (container, obj)). hexdigest(), 16) if obj_process % processes != process: continue timestamp, actual_obj = obj.split('-', 1) timestamp = int(timestamp) if timestamp > int(time()): break pool.spawn_n( self.delete_object, actual_obj, timestamp, container, obj) pool.waitall() for container in containers_to_delete: try: self.swift.delete_container( self.expiring_objects_account, container, acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT)) except (Exception, Timeout) as err: self.logger.exception( _('Exception while deleting container %s %s') % (container, str(err))) self.logger.debug(_('Run end')) self.report(final=True) except (Exception, Timeout): self.logger.exception(_('Unhandled exception')) def run_forever(self, *args, **kwargs): """ Executes passes forever, looking for objects to expire. :param args: Extra args to fulfill the Daemon interface; this daemon has no additional args. :param kwargs: Extra keyword args to fulfill the Daemon interface; this daemon has no additional keyword args. """ sleep(random() * self.interval) while True: begin = time() try: self.run_once(*args, **kwargs) except (Exception, Timeout): self.logger.exception(_('Unhandled exception')) elapsed = time() - begin if elapsed < self.interval: sleep(random() * (self.interval - elapsed)) def get_process_values(self, kwargs): """ Gets the processes, process from the kwargs if those values exist. Otherwise, return processes, process set in the config file. :param kwargs: Keyword args passed into the run_forever(), run_once() methods. They have values specified on the command line when the daemon is run. """ if kwargs.get('processes') is not None: processes = int(kwargs['processes']) else: processes = self.processes if kwargs.get('process') is not None: process = int(kwargs['process']) else: process = self.process if process < 0: raise ValueError( 'process must be an integer greater than or equal to 0') if processes < 0: raise ValueError( 'processes must be an integer greater than or equal to 0') if processes and process >= processes: raise ValueError( 'process must be less than or equal to processes') return processes, process def delete_object(self, actual_obj, timestamp, container, obj): start_time = time() try: self.delete_actual_object(actual_obj, timestamp) self.swift.delete_object(self.expiring_objects_account, container, obj) self.report_objects += 1 self.logger.increment('objects') except (Exception, Timeout) as err: self.logger.increment('errors') self.logger.exception( _('Exception while deleting object %s %s %s') % (container, obj, str(err))) self.logger.timing_since('timing', start_time) self.report() def delete_actual_object(self, actual_obj, timestamp): """ Deletes the end-user object indicated by the actual object name given '<account>/<container>/<object>' if and only if the X-Delete-At value of the object is exactly the timestamp given. :param actual_obj: The name of the end-user object to delete: '<account>/<container>/<object>' :param timestamp: The timestamp the X-Delete-At value must match to perform the actual delete. """ path = '/v1/' + urllib.quote(actual_obj.lstrip('/')) self.swift.make_request('DELETE', path, {'X-If-Delete-At': str(timestamp)}, (2, HTTP_NOT_FOUND, HTTP_PRECONDITION_FAILED))
class UtilizationAggregator(Daemon): def __init__(self, conf): self.conf = conf self.logger = get_logger(conf, log_route='utilization-aggregator') self.interval = int(conf.get('interval') or 60) self.aggregate_account = '.utilization' self.sample_account = '.transfer_record' conf_path = conf.get('__file__') or \ '/etc/swift/swift-utilization-aggregator.conf' request_tries = int(conf.get('request_tries') or 3) self.swift = InternalClient(conf_path, 'Swift Utilization Aggregator', request_tries) self.report_interval = int(conf.get('report_interval') or 60) self.report_first_time = self.report_last_time = time() self.report_containers = 0 self.report_objects = 0 self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') self.rcache = join(self.recon_cache_path, 'object.recon') self.concurrency = int(conf.get('concurrency', 1)) if self.concurrency < 1: raise ValueError("concurrency must be set to at least 1") self.processes = int(self.conf.get('processes', 0)) self.process = int(self.conf.get('process', 0)) self.container_ring = Ring('/etc/swift', ring_name='container') self.sample_rate = int(self.conf.get('sample_rate', 600)) self.last_chk = iso8601_to_timestamp(self.conf.get( 'service_start')) self.kinx_api_url = self.conf.get('kinx_api_url') def report(self, final=False): if final: elapsed = time() - self.report_first_time self.logger.info(_('Pass completed in %ds; %d containers,' ' %d objects aggregated') % (elapsed, self.report_containers, self.report_objects)) dump_recon_cache({'object_aggregation_pass': elapsed, 'aggregation_last_pass': self.report_containers}, self.rcache, self.logger) elif time() - self.report_last_time >= self.report_interval: elapsed = time() - self.report_first_time self.logger.info(_('Pass so far %ds; %d objects aggregated') % (elapsed, self.report_objects)) self.report_last_time = time() def run_once(self, *args, **kwargs): processes, process = self.get_process_values(kwargs) pool = GreenPool(self.concurrency) self.report_first_time = self.report_last_time = time() self.report_objects = 0 self.report_containers = 0 containers_to_delete = [] try: self.logger.debug(_('Run begin')) containers, objects = \ self.swift.get_account_info(self.sample_account) self.logger.info(_('Pass beginning; %s possible containers; %s ' 'possible objects') % (containers, objects)) for c in self.swift.iter_containers(self.sample_account): container = c['name'] try: timestamp, account = container.split('_', 1) timestamp = float(timestamp) except ValueError: self.logger.debug('ValueError: %s, ' 'need more than 1 value to unpack' % \ container) else: if processes > 0: obj_proc = int(hashlib.md5(container).hexdigest(), 16) if obj_proc % processes != process: continue n = (float(time()) // self.sample_rate) * self.sample_rate if timestamp <= n: containers_to_delete.append(container) pool.spawn_n(self.aggregate_container, container) pool.waitall() for container in containers_to_delete: try: self.logger.debug('delete container: %s' % container) self.swift.delete_container(self.sample_account, container, acceptable_statuses=( 2, HTTP_NOT_FOUND, HTTP_CONFLICT)) except (Exception, Timeout) as err: self.logger.exception( _('Exception while deleting container %s %s') % (container, str(err))) tenants_to_fillup = list() for c in self.swift.iter_containers(self.aggregate_account): tenant_id = c['name'] if processes > 0: c_proc = int(hashlib.md5(tenant_id).hexdigest(), 16) if c_proc % processes != process: continue tenants_to_fillup.append(tenant_id) # fillup lossed usage data self.fillup_lossed_usage_data(tenants_to_fillup) self.logger.debug(_('Run end')) self.report(final=True) except (Exception, Timeout): self.logger.exception(_('Unhandled exception')) def run_forever(self, *args, **kwargs): """ Executes passes forever, looking for objects to expire. :param args: Extra args to fulfill the Daemon interface; this daemon has no additional args. :param kwargs: Extra keyword args to fulfill the Daemon interface; this daemon has no additional keyword args. """ sleep(random() * self.interval) while True: begin = time() try: self.run_once(*args, **kwargs) except (Exception, Timeout): self.logger.exception(_('Unhandled exception')) elapsed = time() - begin if elapsed < self.interval: sleep(random() * (self.interval - elapsed)) def get_process_values(self, kwargs): """ Gets the processes, process from the kwargs if those values exist. Otherwise, return processes, process set in the config file. :param kwargs: Keyword args passed into the run_forever(), run_once() methods. They have values specified on the command line when the daemon is run. """ if kwargs.get('processes') is not None: processes = int(kwargs['processes']) else: processes = self.processes if kwargs.get('process') is not None: process = int(kwargs['process']) else: process = self.process if process < 0: raise ValueError( 'process must be an integer greater than or equal to 0') if processes < 0: raise ValueError( 'processes must be an integer greater than or equal to 0') if processes and process >= processes: raise ValueError( 'process must be less than or equal to processes') return processes, process def aggregate_container(self, container): start_time = time() try: objs_to_delete = list() bytes_recvs = dict() bytes_sents = dict() ts, tenant_id, account = container.split('_', 2) ts = int(float(ts)) for o in self.swift.iter_objects(self.sample_account, container): name = o['name'] objs_to_delete.append(name) ts, bytes_rv, bytes_st, trans_id, client_ip = name.split('/') bill_type = self.get_billtype_by_client_ip(client_ip, ts) bytes_recvs[bill_type] = bytes_recvs.get(bill_type, 0) + int(bytes_rv) bytes_sents[bill_type] = bytes_sents.get(bill_type, 0) + int(bytes_st) self.report_objects += 1 for o in objs_to_delete: self.swift.delete_object(self.sample_account, container, o) for bill_type, bt_rv in bytes_recvs.items(): t_object = 'transfer/%d/%d/%d_%d_%d' % (ts, bill_type, bt_rv, bytes_sents[bill_type], self.report_objects) self._hidden_update(tenant_id, t_object) except (Exception, Timeout) as err: self.logger.increment('errors') self.logger.exception( _('Exception while aggregating sample %s %s') % (container, str(err))) self.logger.timing_since('timing', start_time) self.report() def account_info(self, tenant_id, timestamp): path = '/v1/%s/%s?prefix=usage/%d&limit=1' % (self.aggregate_account, tenant_id, timestamp) resp = self.swift.make_request('GET', path, {}, (2,)) if len(resp.body) == 0: return 0, 0, 0 usages = resp.body.split('/', 2)[2].rstrip() cont_cnt, obj_cnt, bt_used = usages.split('_') return int(cont_cnt), int(obj_cnt), int(bt_used) def _hidden_update(self, container, obj, method='PUT'): hidden_path = '/%s/%s/%s' % (self.aggregate_account, container, obj) part, nodes = self.container_ring.get_nodes(self.aggregate_account, container) for node in nodes: ip = node['ip'] port = node['port'] dev = node['device'] action_headers = dict() action_headers['user-agent'] = 'aggregator' action_headers['X-Timestamp'] = normalize_timestamp(time()) action_headers['referer'] = 'aggregator-daemon' action_headers['x-size'] = '0' action_headers['x-content-type'] = "text/plain" action_headers['x-etag'] = 'd41d8cd98f00b204e9800998ecf8427e' conn = http_connect(ip, port, dev, part, method, hidden_path, action_headers) response = conn.getresponse() response.read() def fillup_lossed_usage_data(self, tenants): now = (float(time()) // self.sample_rate) * self.sample_rate path = '/v1/%s/%s?prefix=usage/%d&limit=1' for t in tenants: last = self.last_chk cont_cnt = obj_cnt = bt_used = -1 while last <= now: p = path % (self.aggregate_account, t, last) resp = self.swift.make_request('GET', p, {}, (2,)) if len(resp.body) != 0: usages = resp.body.split('/', 2)[2].rstrip() c, o, bt = usages.split('_') cont_cnt = int(c) obj_cnt = int(o) bt_used = int(bt) else: before = last - self.sample_rate if cont_cnt == -1: cont_cnt, obj_cnt, bt_used = \ self.account_info(self.aggregate_account, before) obj = 'usage/%d/%d_%d_%d' % (last, cont_cnt, obj_cnt, bt_used) self._hidden_update(t, obj) last += self.sample_rate self.last_chk = now def get_billtype_by_client_ip(self, client_ip, timestamp): end_ts = timestamp_to_iso8601(timestamp + self.sample_rate - 1) start_ts = timestamp_to_iso8601(timestamp) params = {'start': start_ts, 'end': end_ts} path = self.kinx_api_url + '/?%s' % (urllib.urlencode(params)) data = json.loads(urllib.urlopen(path).read()) bill_type = -1 for r in data['ip_ranges']: bill_type = r['bill_type'] for cidr in r['ip_range']: if self.ip_in_cidr(client_ip, cidr): return bill_type return bill_type def ip_in_cidr(self, client_ip, cidr): bt_to_bits = lambda b: bin(int(b))[2:].rjust(8, '0') ip_to_bits = lambda ip: ''.join([bt_to_bits(b) for b in ip.split('.')]) client_ip_bits = ip_to_bits(client_ip) ip, snet = cidr.split('/') ip_bits = ip_to_bits(ip) if client_ip_bits[:int(snet)] == ip_bits[:int(snet)]: return True else: return False
class ObjectExpirer(Daemon): def __init__(self, conf): super(ObjectExpirer, self).__init__(conf) self.conf = conf self.logger = get_logger(conf, log_route='s3-object-expirer') self.logger.set_statsd_prefix('s3-object-expirer') self.interval = int(conf.get('interval') or 300) self.s3_expiring_objects_account = \ (conf.get('auto_create_account_prefix') or '.') + \ (conf.get('expiring_objects_account_name') or 's3_expiring_objects') conf_path = conf.get('__file__') or '/etc/swift/s3-object-expirer.conf' request_tries = int(conf.get('request_tries') or 3) self.swift = InternalClient(conf_path, 'Swift Object Expirer', request_tries) self.glacier = self._init_glacier() self.glacier_account_prefix = '.glacier_' self.report_interval = int(conf.get('report_interval') or 300) self.report_first_time = self.report_last_time = time() self.report_objects = 0 self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') self.rcache = join(self.recon_cache_path, 'object.recon') self.concurrency = int(conf.get('concurrency', 1)) if self.concurrency < 1: raise ValueError("concurrency must be set to at least 1") self.processes = int(self.conf.get('processes', 0)) self.process = int(self.conf.get('process', 0)) self.client = Client(self.conf.get('sentry_sdn', '')) def _init_glacier(self): con = Layer2(region_name='ap-northeast-1') return con.get_vault('swift-s3-transition') def report(self, final=False): """ Emits a log line report of the progress so far, or the final progress is final=True. :param final: Set to True for the last report once the expiration pass has completed. """ if final: elapsed = time() - self.report_first_time self.logger.info(_('Pass completed in %ds; %d objects expired') % (elapsed, self.report_objects)) dump_recon_cache({'object_expiration_pass': elapsed, 'expired_last_pass': self.report_objects}, self.rcache, self.logger) elif time() - self.report_last_time >= self.report_interval: elapsed = time() - self.report_first_time self.logger.info(_('Pass so far %ds; %d objects expired') % (elapsed, self.report_objects)) self.report_last_time = time() def run_once(self, *args, **kwargs): """ Executes a single pass, looking for objects to expire. :param args: Extra args to fulfill the Daemon interface; this daemon has no additional args. :param kwargs: Extra keyword args to fulfill the Daemon interface; this daemon accepts processes and process keyword args. These will override the values from the config file if provided. """ processes, process = self.get_process_values(kwargs) pool = GreenPool(self.concurrency) containers_to_delete = [] self.report_first_time = self.report_last_time = time() self.report_objects = 0 try: self.logger.debug(_('Run begin')) containers, objects = \ self.swift.get_account_info(self.s3_expiring_objects_account) self.logger.info(_('Pass beginning; %s possible containers; %s ' 'possible objects') % (containers, objects)) for c in self.swift.iter_containers(self. s3_expiring_objects_account): container = c['name'] timestamp = int(container) if timestamp > int(time()): break containers_to_delete.append(container) for o in self.swift.iter_objects(self .s3_expiring_objects_account, container): obj = o['name'].encode('utf8') if processes > 0: obj_process = int( hashlib.md5('%s/%s' % (container, obj)). hexdigest(), 16) if obj_process % processes != process: continue pool.spawn_n(self.delete_object, container, obj) pool.waitall() for container in containers_to_delete: try: self.swift.delete_container( self.s3_expiring_objects_account, container, acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT)) except (Exception, Timeout) as err: report_exception(self.logger, _('Exception while deleting container %s %s') % (container, str(err)), self.client.captureException()) self.logger.debug(_('Run end')) self.report(final=True) except (Exception, Timeout): report_exception(self.logger, _('Unhandled exception'), self.client) def run_forever(self, *args, **kwargs): """ Executes passes forever, looking for objects to expire. :param args: Extra args to fulfill the Daemon interface; this daemon has no additional args. :param kwargs: Extra keyword args to fulfill the Daemon interface; this daemon has no additional keyword args. """ sleep(random() * self.interval) while True: begin = time() try: self.run_once(*args, **kwargs) except (Exception, Timeout): report_exception(self.logger, _('Unhandled exception'), self.client) elapsed = time() - begin if elapsed < self.interval: sleep(random() * (self.interval - elapsed)) def get_process_values(self, kwargs): """ Gets the processes, process from the kwargs if those values exist. Otherwise, return processes, process set in the config file. :param kwargs: Keyword args passed into the run_forever(), run_once() methods. They have values specified on the command line when the daemon is run. """ if kwargs.get('processes') is not None: processes = int(kwargs['processes']) else: processes = self.processes if kwargs.get('process') is not None: process = int(kwargs['process']) else: process = self.process if process < 0: raise ValueError( 'process must be an integer greater than or equal to 0') if processes < 0: raise ValueError( 'processes must be an integer greater than or equal to 0') if processes and process >= processes: raise ValueError( 'process must be less than or equal to processes') return processes, process def delete_object(self, hidden_container, obj): start_time = time() try: account, container, object = obj.split('/', 2) lifecycle = Lifecycle(account, container, object, swift_client=self.swift) object_header = lifecycle.object.headers object_rule = lifecycle.get_object_rule_by_action('Expiration') last_modified = gmt_to_timestamp(object_header['Last-Modified']) validation_flg = lifecycle.object_lifecycle_validation() if (validation_flg == LIFECYCLE_OK) or \ (validation_flg == DISABLED_TRANSITION): times = calc_when_actions_do(object_rule, last_modified) actual_expire_time = int(times['Expiration']) if actual_expire_time == int(hidden_container): self.delete_actual_object(obj) if lifecycle.get_s3_storage_class() == 'GLACIER': self.delete_glacier_object(obj) self.report_objects += 1 self.logger.increment('objects') self.swift.delete_object(self.s3_expiring_objects_account, hidden_container, obj) except (Exception, Timeout) as err: self.logger.increment('errors') report_exception(self.logger, _('Exception while deleting object %s %s %s') % (hidden_container, obj, str(err)), self.client) self.logger.timing_since('timing', start_time) self.report() def delete_glacier_object(self, obj): account, container, prefix = obj.split('/', 2) glacier_hidden_account = self.glacier_account_prefix + account objs = get_objects_by_prefix(glacier_hidden_account, container, prefix, swift_client=self.swift) glacier_obj = None for o in objs: name = get_glacier_objname_from_hidden_object(o) if name == prefix: glacier_obj = o break glacier_archive_id = get_glacier_key_from_hidden_object(glacier_obj) self.glacier.delete_archive(glacier_archive_id) self.swift.delete_object(glacier_hidden_account, container, glacier_obj) def delete_actual_object(self, obj): """ Deletes the end-user object indicated by the actual object name given '<account>/<container>/<object>' if and only if the X-Delete-At value of the object is exactly the timestamp given. :param obj: The name of the end-user object to delete: '<account>/<container>/<object>' """ path = '/v1/' + urllib.quote(obj.lstrip('/')) self.swift.make_request('DELETE', path, {}, (2, HTTP_NOT_FOUND))
class ObjectTransitor(Daemon): def __init__(self, conf): super(ObjectTransitor, self).__init__(conf) self.conf = conf self.logger = get_logger(conf, log_route='s3-object-transitor') self.logger.set_statsd_prefix('s3-object-transitor') self.interval = int(conf.get('interval') or 300) self.s3_tr_objects_account = \ (conf.get('auto_create_account_prefix') or '.') + \ (conf.get('expiring_objects_account_name') or 's3_transitioning_objects') conf_path = conf.get('__file__') or \ '/etc/swift/s3-object-transitor.conf' request_tries = int(conf.get('request_tries') or 3) self.swift = InternalClient(conf_path, 'Swift Object Transitor', request_tries) self.report_interval = int(conf.get('report_interval') or 300) self.report_first_time = self.report_last_time = time() self.report_objects = 0 self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') self.rcache = join(self.recon_cache_path, 'object.recon') self.concurrency = int(conf.get('concurrency', 1)) if self.concurrency < 1: raise ValueError("concurrency must be set to at least 1") self.processes = int(self.conf.get('processes', 0)) self.process = int(self.conf.get('process', 0)) self.client = Client(self.conf.get('sentry_sdn', '')) def report(self, final=False): """ Emits a log line report of the progress so far, or the final progress is final=True. :param final: Set to True for the last report once the expiration pass has completed. """ if final: elapsed = time() - self.report_first_time self.logger.info(_('Pass completed in %ds; %d objects ' 'transitioned') % (elapsed, self.report_objects)) dump_recon_cache({'object_transition_pass': elapsed, 'transitioned_last_pass': self.report_objects}, self.rcache, self.logger) elif time() - self.report_last_time >= self.report_interval: elapsed = time() - self.report_first_time self.logger.info(_('Pass so far %ds; %d objects transitioned') % (elapsed, self.report_objects)) self.report_last_time = time() def run_once(self, *args, **kwargs): """ Executes a single pass, looking for objects to expire. :param args: Extra args to fulfill the Daemon interface; this daemon has no additional args. :param kwargs: Extra keyword args to fulfill the Daemon interface; this daemon accepts processes and process keyword args. These will override the values from the config file if provided. """ processes, process = self.get_process_values(kwargs) pool = GreenPool(self.concurrency) containers_to_delete = [] self.report_first_time = self.report_last_time = time() self.report_objects = 0 try: self.logger.debug(_('Run begin')) containers, objects = \ self.swift.get_account_info(self.s3_tr_objects_account) self.logger.info(_('Pass beginning; %s possible containers; %s ' 'possible objects') % (containers, objects)) for c in self.swift.iter_containers(self.s3_tr_objects_account): container = c['name'] timestamp = int(container) if timestamp > int(time()): break containers_to_delete.append(container) for o in self.swift.iter_objects(self.s3_tr_objects_account, container): obj = o['name'].encode('utf8') if processes > 0: obj_process = int( hashlib.md5('%s/%s' % (container, obj)). hexdigest(), 16) if obj_process % processes != process: continue pool.spawn_n(self.transition_object, container, obj) pool.waitall() for container in containers_to_delete: try: self.swift.delete_container(self.s3_tr_objects_account, container, (2, 4)) except (Exception, Timeout) as err: report_exception(self.logger, _('Exception while deleting container %s %s') % (container, str(err)), self.client) self.logger.debug(_('Run end')) self.report(final=True) except (Exception, Timeout): report_exception(self.logger, _('Unhandled exception'), self.client) def run_forever(self, *args, **kwargs): """ Executes passes forever, looking for objects to expire. :param args: Extra args to fulfill the Daemon interface; this daemon has no additional args. :param kwargs: Extra keyword args to fulfill the Daemon interface; this daemon has no additional keyword args. """ sleep(random() * self.interval) while True: begin = time() try: self.run_once(*args, **kwargs) except (Exception, Timeout): report_exception(self.logger, _('Unhandled exception'), self.client) elapsed = time() - begin if elapsed < self.interval: sleep(random() * (self.interval - elapsed)) def get_process_values(self, kwargs): """ Gets the processes, process from the kwargs if those values exist. Otherwise, return processes, process set in the config file. :param kwargs: Keyword args passed into the run_forever(), run_once() methods. They have values specified on the command line when the daemon is run. """ if kwargs.get('processes') is not None: processes = int(kwargs['processes']) else: processes = self.processes if kwargs.get('process') is not None: process = int(kwargs['process']) else: process = self.process if process < 0: raise ValueError( 'process must be an integer greater than or equal to 0') if processes < 0: raise ValueError( 'processes must be an integer greater than or equal to 0') if processes and process >= processes: raise ValueError( 'process must be less than or equal to processes') return processes, process def transition_object(self, container, obj): start_time = time() try: obj_account, obj_container, obj_object = obj.split('/', 2) lifecycle = Lifecycle(obj_account, obj_container, obj_object, swift_client=self.swift) if is_success(lifecycle.object.status): object_header = lifecycle.object.headers object_rule = lifecycle.get_object_rule_by_action( 'Transition') last_modified = object_header['Last-Modified'] last_modified = gmt_to_timestamp(last_modified) validation_flg = lifecycle.object_lifecycle_validation() if (validation_flg == LIFECYCLE_OK) or \ (validation_flg == DISABLED_EXPIRATION): times = calc_when_actions_do(object_rule, last_modified) actual_expire_time = int(times['Transition']) if actual_expire_time == int(container): self.request_transition(obj) self.swift.delete_object(self.s3_tr_objects_account, container, obj) except (Exception, Timeout) as err: self.logger.increment('errors') report_exception(self.logger, _('Exception while transitioning object %s %s %s') % (container, obj, str(err)), self.client) self.logger.timing_since('timing', start_time) self.report() def request_transition(self, actual_obj): path = '/v1/' + urllib.quote(actual_obj.lstrip('/')) headers = {GLACIER_FLAG_META: True, 'X-S3-Object-Transition': True} resp = self.swift.make_request('POST', path, headers, (2, 5)) if resp.status_int == 500: raise Exception(resp.body) self.report_objects += 1 self.logger.increment('objects')