def _autocontainer_loop(self, account, marker=None, limit=None, concurrency=1, **kwargs): from functools import partial container_marker = self.flatns_manager(marker) if marker else None count = 0 kwargs['pool_manager'] = get_pool_manager( pool_maxsize=concurrency * 2) # Start to list contents at 'marker' inside the last visited container if container_marker: for element in depaginate( self.app.client_manager.storage.object_list, listing_key=lambda x: x['objects'], marker_key=lambda x: x.get('next_marker'), truncated_key=lambda x: x['truncated'], account=account, container=container_marker, marker=marker, **kwargs): count += 1 yield element if limit and count >= limit: return pool = GreenPool(concurrency) for object_list in pool.imap( partial(self._list_autocontainer_objects, account=account, **kwargs), depaginate(self.app.client_manager.storage.container_list, item_key=lambda x: x[0], marker_key=lambda x: x[-1][0], account=account, marker=container_marker)): for element in object_list: count += 1 yield element if limit and count >= limit: return
def __init__(self, namespace, concurrency=50, error_file=None, rebuild_file=None, check_xattr=True, limit_listings=0, request_attempts=1, logger=None, verbose=False, check_hash=False, **_kwargs): self.pool = GreenPool(concurrency) self.error_file = error_file self.check_xattr = bool(check_xattr) self.check_hash = bool(check_hash) self.logger = logger or get_logger( {'namespace': namespace}, name='integrity', verbose=verbose) # Optimisation for when we are only checking one object # or one container. # 0 -> do not limit # 1 -> limit account listings (list of containers) # 2 -> limit container listings (list of objects) self.limit_listings = limit_listings if self.error_file: outfile = open(self.error_file, 'a') self.error_writer = csv.writer(outfile, delimiter=' ') self.rebuild_file = rebuild_file if self.rebuild_file: self.fd = open(self.rebuild_file, 'a') self.rebuild_writer = csv.writer(self.fd, delimiter='|') self.api = ObjectStorageApi(namespace, logger=self.logger, max_retries=request_attempts - 1, request_attempts=request_attempts) self.rdir_client = RdirClient({"namespace": namespace}, logger=self.logger) self.accounts_checked = 0 self.containers_checked = 0 self.objects_checked = 0 self.chunks_checked = 0 self.account_not_found = 0 self.container_not_found = 0 self.object_not_found = 0 self.chunk_not_found = 0 self.account_exceptions = 0 self.container_exceptions = 0 self.object_exceptions = 0 self.chunk_exceptions = 0 self.list_cache = {} self.running = {} self.running_lock = Semaphore(1) self.result_queue = Queue(concurrency) self.run_time = 0
def __init__(self, namespace, concurrency=50, error_file=None, rebuild_file=None, full=True, limit_listings=0, request_attempts=1): self.pool = GreenPool(concurrency) self.error_file = error_file self.full = bool(full) # Optimisation for when we are only checking one object # or one container. # 0 -> do not limit # 1 -> limit account listings (list of containers) # 2 -> limit container listings (list of objects) self.limit_listings = limit_listings if self.error_file: f = open(self.error_file, 'a') self.error_writer = csv.writer(f, delimiter=' ') self.rebuild_file = rebuild_file if self.rebuild_file: fd = open(self.rebuild_file, 'a') self.rebuild_writer = csv.writer(fd, delimiter='|') conf = {'namespace': namespace} self.account_client = AccountClient(conf, max_retries=request_attempts - 1) self.container_client = ContainerClient( conf, max_retries=request_attempts - 1, request_attempts=request_attempts) self.blob_client = BlobClient(conf=conf) self.accounts_checked = 0 self.containers_checked = 0 self.objects_checked = 0 self.chunks_checked = 0 self.account_not_found = 0 self.container_not_found = 0 self.object_not_found = 0 self.chunk_not_found = 0 self.account_exceptions = 0 self.container_exceptions = 0 self.object_exceptions = 0 self.chunk_exceptions = 0 self.list_cache = {} self.running = {}
def run(self, *args, **kwargs): try: self.logger.info('conscience agent: starting') pool = GreenPool(len(self.watchers)) for watcher in self.watchers: pool.spawn(watcher.start) self.running = True while self.running: sleep(1) for w in self.watchers: if w.failed: self.watchers.remove(w) self.logger.warn('restart watcher "%s"', w.name) new_w = ServiceWatcher(self.conf, w.service) self.watchers.append(new_w) pool.spawn(new_w.start) except Exception as e: self.logger.error('ERROR in main loop %s', e) raise e finally: self.logger.warn('conscience agent: stopping') self.running = False self.stop_watchers()
def mover_pass(self, **kwargs): start_time = report_time = time.time() total_errors = 0 mover_time = 0 pool = GreenPool(self.concurrency) paths = paths_gen(self.volume) for path in paths: loop_time = time.time() now = time.time() if now - self.last_usage_check >= self.usage_check_interval: free_ratio = statfs(self.volume) usage = (1 - float(free_ratio)) * 100 if usage <= self.usage_target: self.logger.info( 'current usage %.2f%%: target reached (%.2f%%)', usage, self.usage_target) break self.last_usage_check = now # Spawn a chunk move task. # The call will block if no green thread is available. pool.spawn_n(self.safe_chunk_move, path) self.chunks_run_time = ratelimit(self.chunks_run_time, self.max_chunks_per_second) self.total_chunks_processed += 1 now = time.time() if now - self.last_reported >= self.report_interval: self.logger.info( '%(start_time)s ' '%(passes)d ' '%(errors)d ' '%(c_rate).2f ' '%(b_rate).2f ' '%(total).2f ' '%(mover_time).2f' '%(mover_rate).2f' % { 'start_time': time.ctime(report_time), 'passes': self.passes, 'errors': self.errors, 'c_rate': self.passes / (now - report_time), 'b_rate': self.bytes_processed / (now - report_time), 'total': (now - start_time), 'mover_time': mover_time, 'mover_rate': mover_time / (now - start_time) }) report_time = now total_errors += self.errors self.passes = 0 self.bytes_processed = 0 self.last_reported = now mover_time += (now - loop_time) if self.limit != 0 and self.total_chunks_processed >= self.limit: break pool.waitall() elapsed = (time.time() - start_time) or 0.000001 self.logger.info( '%(elapsed).02f ' '%(errors)d ' '%(chunk_rate).2f ' '%(bytes_rate).2f ' '%(mover_time).2f ' '%(mover_rate).2f' % { 'elapsed': elapsed, 'errors': total_errors + self.errors, 'chunk_rate': self.total_chunks_processed / elapsed, 'bytes_rate': self.total_bytes_processed / elapsed, 'mover_time': mover_time, 'mover_rate': mover_time / elapsed })
def __init__(self, namespace, concurrency=50, error_file=None, rebuild_file=None, check_xattr=True, limit_listings=0, request_attempts=1, logger=None, verbose=False, check_hash=False, min_time_in_error=0.0, required_confirmations=0, beanstalkd_addr=None, beanstalkd_tube=BlobRebuilder.DEFAULT_BEANSTALKD_WORKER_TUBE, cache_size=2**24, **_kwargs): self.pool = GreenPool(concurrency) self.error_file = error_file self.error_sender = None self.check_xattr = bool(check_xattr) self.check_hash = bool(check_hash) self.logger = logger or get_logger( {'namespace': namespace}, name='integrity', verbose=verbose) # Optimisation for when we are only checking one object # or one container. # 0 -> do not limit # 1 -> limit account listings (list of containers) # 2 -> limit container listings (list of objects) self.limit_listings = limit_listings if self.error_file: outfile = open(self.error_file, 'a') self.error_writer = csv.writer(outfile, delimiter=' ') self.rebuild_file = rebuild_file if self.rebuild_file: self.fd = open(self.rebuild_file, 'a') self.rebuild_writer = csv.writer(self.fd, delimiter='|') if beanstalkd_addr: self.error_sender = BeanstalkdSender(beanstalkd_addr, beanstalkd_tube, self.logger) self.api = ObjectStorageApi(namespace, logger=self.logger, max_retries=request_attempts - 1, request_attempts=request_attempts) self.rdir_client = RdirClient({"namespace": namespace}, logger=self.logger) self.accounts_checked = 0 self.containers_checked = 0 self.objects_checked = 0 self.chunks_checked = 0 self.account_not_found = 0 self.container_not_found = 0 self.object_not_found = 0 self.chunk_not_found = 0 self.account_exceptions = 0 self.container_exceptions = 0 self.object_exceptions = 0 self.chunk_exceptions = 0 self.list_cache = CacheDict(cache_size) self.running_tasks = {} self.running_lock = Semaphore(1) self.result_queue = LightQueue(concurrency) self.running = True self.run_time = 0 # Set of targets which must be checked again, to confirm # or deny the issues reported by previous passes. self.delayed_targets = dict() # Minimum time in error and number of confirmations of the error # before triggering a reconstruction action. self.min_time_in_error = min_time_in_error self.required_confirmations = required_confirmations
class Checker(object): def __init__(self, namespace, concurrency=50, error_file=None, rebuild_file=None, check_xattr=True, limit_listings=0, request_attempts=1, logger=None, verbose=False, check_hash=False, min_time_in_error=0.0, required_confirmations=0, beanstalkd_addr=None, beanstalkd_tube=BlobRebuilder.DEFAULT_BEANSTALKD_WORKER_TUBE, cache_size=2**24, **_kwargs): self.pool = GreenPool(concurrency) self.error_file = error_file self.error_sender = None self.check_xattr = bool(check_xattr) self.check_hash = bool(check_hash) self.logger = logger or get_logger( {'namespace': namespace}, name='integrity', verbose=verbose) # Optimisation for when we are only checking one object # or one container. # 0 -> do not limit # 1 -> limit account listings (list of containers) # 2 -> limit container listings (list of objects) self.limit_listings = limit_listings if self.error_file: outfile = open(self.error_file, 'a') self.error_writer = csv.writer(outfile, delimiter=' ') self.rebuild_file = rebuild_file if self.rebuild_file: self.fd = open(self.rebuild_file, 'a') self.rebuild_writer = csv.writer(self.fd, delimiter='|') if beanstalkd_addr: self.error_sender = BeanstalkdSender(beanstalkd_addr, beanstalkd_tube, self.logger) self.api = ObjectStorageApi(namespace, logger=self.logger, max_retries=request_attempts - 1, request_attempts=request_attempts) self.rdir_client = RdirClient({"namespace": namespace}, logger=self.logger) self.accounts_checked = 0 self.containers_checked = 0 self.objects_checked = 0 self.chunks_checked = 0 self.account_not_found = 0 self.container_not_found = 0 self.object_not_found = 0 self.chunk_not_found = 0 self.account_exceptions = 0 self.container_exceptions = 0 self.object_exceptions = 0 self.chunk_exceptions = 0 self.list_cache = CacheDict(cache_size) self.running_tasks = {} self.running_lock = Semaphore(1) self.result_queue = LightQueue(concurrency) self.running = True self.run_time = 0 # Set of targets which must be checked again, to confirm # or deny the issues reported by previous passes. self.delayed_targets = dict() # Minimum time in error and number of confirmations of the error # before triggering a reconstruction action. self.min_time_in_error = min_time_in_error self.required_confirmations = required_confirmations def reset_stats(self): self.accounts_checked = 0 self.containers_checked = 0 self.objects_checked = 0 self.chunks_checked = 0 self.account_not_found = 0 self.container_not_found = 0 self.object_not_found = 0 self.chunk_not_found = 0 self.account_exceptions = 0 self.container_exceptions = 0 self.object_exceptions = 0 self.chunk_exceptions = 0 def _spawn(self, func, target, *args, **kwargs): """ Spawn a task on the internal GreenPool. Discards the task if the pool is no more running. """ if self.running: return self.pool.spawn(func, target, *args, **kwargs) self.logger.info("Discarding %s", target) return None def _spawn_n(self, func, target, *args, **kwargs): """ Spawn a task on the internal GreenPool, do not wait for the result. Discards the task if the pool is no more running. """ if self.running: return self.pool.spawn_n(func, target, *args, **kwargs) self.logger.info("Discarding %s", target) return None def complete_target_from_chunk_metadata(self, target, xattr_meta): """ Complete a Target object from metadata found in chunk's extended attributes. In case the "fullpath" is not available, try to read legacy metadata, and maybe ask meta1 to resolve the CID into account and container names. """ # pylint: disable=unbalanced-tuple-unpacking try: acct, ct, path, vers, content_id = \ decode_fullpath(xattr_meta['full_path']) target.account = acct target.container = ct target.obj = path target.content_id = content_id target.version = vers except KeyError: # No fullpath header, try legacy headers if 'content_path' in xattr_meta: target.obj = xattr_meta['content_path'] if 'content_id' in xattr_meta: target.content_id = xattr_meta['content_id'] if 'content_version' in xattr_meta: target.version = xattr_meta['content_version'] cid = xattr_meta.get('container_id') if cid: try: md = self.api.directory.show(cid=cid) acct = md.get('account') ct = md.get('name') if acct: target.account = acct if ct: target.container = ct except Exception as err: self.logger.warn( "Failed to resolve CID %s into account " "and container names: %s", cid, err) def recover_and_complete_object_meta(self, target, chunk): _, rawx_service, chunk_id = chunk.rsplit('/', 2) # 1. Fetch chunk list from rdir (could be cached). # Unfortunately we cannot seek for a chunk ID. entries = [ x for x in self.rdir_client.chunk_fetch(rawx_service, limit=-1) if x[2] == chunk_id ] if not entries: self.logger.warn('Chunk %s not found in rdir' % chunk_id) return elif len(entries) > 1: self.logger.info('Chunk %s appears in %d objects', chunk_id, len(entries)) # 2. Find content and container IDs target.cid, target.content_id = entries[0][0:2] meta = self.api.object_get_properties(None, None, None, cid=target.cid, content=target.content_id) target.obj = meta['name'] target.version = meta['version'] target.account, target.container = self.api.resolve_cid(target.cid) def send_result(self, target, errors=None, irreparable=False): """ Put an item in the result queue. """ # TODO(FVE): send to an external queue. target.append_result(ItemResult(errors, irreparable)) self.result_queue.put(target) def send_chunk_job(self, target, irreparable=False): """ Send a "content broken" event, to trigger the reconstruction of the chunk. """ item = (self.api.namespace, target.cid, target.content_id, target.chunk) ev_dict = BlobRebuilder.task_event_from_item(item) if irreparable: ev_dict['data']['irreparable'] = irreparable job = json.dumps(ev_dict) self.error_sender.send_job(job) self.error_sender.job_done() # Don't expect any response def write_error(self, target, irreparable=False): if not self.error_file: return error = list() if irreparable: error.append(IRREPARABLE_PREFIX) error.append(target.account) if target.container: error.append(target.container) if target.obj: error.append(target.obj) if target.chunk: error.append(target.chunk) self.error_writer.writerow(error) def write_rebuilder_input(self, target, irreparable=False): error = list() if irreparable: error.append(IRREPARABLE_PREFIX) error.append(target.cid) # FIXME(FVE): ensure we always resolve content_id, # or pass object version along with object name. error.append(target.content_id or target.obj) error.append(target.chunk) self.rebuild_writer.writerow(error) def write_chunk_error(self, target, chunk=None, irreparable=False): if chunk is not None: target = target.copy() target.chunk = chunk self.write_error(target, irreparable=irreparable) if self.rebuild_file: self.write_rebuilder_input(target, irreparable=irreparable) if self.error_sender: self.send_chunk_job(target, irreparable=irreparable) def _check_chunk_xattr(self, target, obj_meta, xattr_meta): """ Check coherency of chunk extended attributes with object metadata. :returns: a list of errors """ errors = list() # Composed position -> erasure coding attr_prefix = 'meta' if '.' in obj_meta['pos'] else '' attr_key = attr_prefix + 'chunk_size' if str(obj_meta['size']) != xattr_meta.get(attr_key): errors.append( "'%s' xattr (%s) differs from size in meta2 (%s)" % (attr_key, xattr_meta.get(attr_key), obj_meta['size'])) attr_key = attr_prefix + 'chunk_hash' if obj_meta['hash'] != xattr_meta.get(attr_key): errors.append( "'%s' xattr (%s) differs from hash in meta2 (%s)" % (attr_key, xattr_meta.get(attr_key), obj_meta['hash'])) return errors def _check_chunk(self, target): """ Execute various checks on a chunk: - does it appear in object's chunk list? - is it reachable? - are its extended attributes coherent? :returns: the list of errors encountered, and the chunk's owner object metadata. """ chunk = target.chunk errors = list() obj_meta = None xattr_meta = None cached = self._get_cached_or_lock(chunk) if cached is not None: return cached + (True, ) self.logger.debug('Checking chunk "%s"', target) try: xattr_meta = self.api.blob_client.chunk_head( chunk, xattr=self.check_xattr, check_hash=self.check_hash) except exc.NotFound as err: self.chunk_not_found += 1 errors.append('Not found: %s' % (err, )) except exc.FaultyChunk as err: self.chunk_exceptions += 1 errors.append('Faulty: %r' % (err, )) except Exception as err: self.chunk_exceptions += 1 errors.append('Check failed: %s' % (err, )) if not target.obj: if xattr_meta: self.complete_target_from_chunk_metadata(target, xattr_meta) else: self.recover_and_complete_object_meta(target, chunk) if target.obj: obj_listing, obj_meta = self.check_obj(target.copy_object()) if chunk not in obj_listing: errors.append('Missing from object listing') db_meta = dict() else: db_meta = obj_listing[chunk] if db_meta and xattr_meta and self.check_xattr: errors.extend( self._check_chunk_xattr(target, db_meta, xattr_meta)) self.list_cache[chunk] = errors, obj_meta self._unlock(chunk) # Do not send errors directly, let the caller do it. # Indeed, it may want to check if the chunks can be repaired or not. self.chunks_checked += 1 return errors, obj_meta, False def check_chunk(self, target): errors, _obj_meta, from_cache = self._check_chunk(target) # If the result comes from the cache, we already reported it. if not from_cache: self.send_result(target, errors, target.irreparable) return errors def _check_metachunk(self, target, stg_met, pos, chunks, recurse=0): """ Check that a metachunk has the right number of chunks. :returns: the list of errors """ required = stg_met.expected_chunks errors = list() chunk_results = list() if len(chunks) < required: missing_chunks = required - len(chunks) if stg_met.ec: subs = {x['num'] for x in chunks} for sub in range(required): if sub not in subs: chkt = target.copy() chkt.chunk = '%d.%d' % (pos, sub) err = "Missing chunk at position %s" % chkt.chunk chunk_results.append((chkt, [err], False)) errors.append(err) else: for _ in range(missing_chunks): chkt = target.copy() chkt.chunk = '%d.%d' % (pos, sub) err = "Missing chunk at position %d" % pos chunk_results.append((chkt, [err], False)) errors.append(err) if recurse > 0: for chunk in chunks: tcopy = target.copy() tcopy.chunk = chunk['url'] chunk_errors, _, from_cache = self._check_chunk(tcopy) chunk_results.append((tcopy, chunk_errors, from_cache)) if chunk_errors: errors.append("Unusable chunk %s at position %s" % (chunk['url'], chunk['pos'])) irreparable = required - len(errors) < stg_met.min_chunks_to_read if irreparable: errors.append( "Unavailable metachunk at position %s " "(%d/%d chunks available, %d/%d required)" % (pos, required - len(errors), stg_met.expected_chunks, stg_met.min_chunks_to_read, stg_met.expected_chunks)) for tgt, errs, from_cache in chunk_results: # If the result comes from the cache, we already reported it. if not from_cache: self.send_result(tgt, errs, irreparable) # Since the "metachunk" is not an official item type, # this method does not report errors itself. Errors will # be reported as object errors. return errors def _check_obj_policy(self, target, obj_meta, chunks, recurse=0): """ Check that the list of chunks of an object matches the object's storage policy. :returns: the list of errors encountered """ stg_met = STORAGE_METHODS.load(obj_meta['chunk_method']) chunks_by_pos = _sort_chunks(chunks, stg_met.ec) tasks = list() for pos, pchunks in iteritems(chunks_by_pos): tasks.append((pos, self._spawn(self._check_metachunk, target.copy(), stg_met, pos, pchunks, recurse=recurse))) errors = list() for pos, task in tasks: if not task and not self.running: errors.append("Pos %d skipped: checker is exiting" % pos) continue try: errors.extend(task.wait()) except Exception as err: errors.append("Check failed: pos %d: %s" % (pos, err)) return errors def check_obj_versions(self, target, versions, recurse=0): """ Run checks of all versions of the targeted object in parallel. """ tasks = list() for ov in versions: tcopy = target.copy_object() tcopy.content_id = ov['id'] tcopy.version = str(ov['version']) tasks.append((tcopy.version, self._spawn(self.check_obj, tcopy, recurse=recurse))) errors = list() for version, task in tasks: if not task and not self.running: errors.append("Version %s skipped: checker is exiting" % version) continue try: task.wait() except Exception as err: errors.append("Check failed: version %s: %s" % (version, err)) if errors: # Send a result with the target without version to tell # we were not able to check all versions of the object. self.send_result(target, errors) def _load_obj_meta(self, target, errors): """ Load object metadata and chunks. :param target: which object to check. :param errors: list of errors that will be appended in case any error occurs. :returns: a tuple with object metadata and a list of chunks. """ try: return self.api.object_locate(target.account, target.container, target.obj, version=target.version, properties=False) except exc.NoSuchObject as err: self.object_not_found += 1 errors.append('Not found: %s' % (err, )) except Exception as err: self.object_exceptions += 1 errors.append('Check failed: %s' % (err, )) return None, [] def _get_cached_or_lock(self, lock_key): # If something is running, wait for it with self.running_lock: event = self.running_tasks.get(lock_key) if event: event.wait() event = None # Maybe get a cached result if lock_key in self.list_cache: return self.list_cache[lock_key] # No cached result, try to compute the thing ourselves while True: with self.running_lock: # Another check while locked if lock_key in self.list_cache: return self.list_cache[lock_key] # Still nothing cached event = self.running_tasks.get(lock_key) if event is None: self.running_tasks[lock_key] = Event() return None event.wait() def _unlock(self, lock_key): with self.running_lock: event = self.running_tasks[lock_key] del self.running_tasks[lock_key] event.send(True) def check_obj(self, target, recurse=0): """ Check one object version. If no version is specified, all versions of the object will be checked. :returns: the result of the check of the most recent version, or the one that is explicitly targeted. """ account = target.account container = target.container obj = target.obj vers = target.version # can be None cached = self._get_cached_or_lock((account, container, obj, vers)) if cached is not None: return cached self.logger.info('Checking object "%s"', target) container_listing, _ = self.check_container(target.copy_container()) errors = list() if obj not in container_listing: errors.append('Missing from container listing') # checksum = None else: versions = container_listing[obj] if vers is None: if target.content_id is None: # No version specified, check all versions self.check_obj_versions(target.copy_object(), versions, recurse=recurse) # Now return the cached result of the most recent version target.content_id = versions[0]['id'] target.version = str(versions[0]['version']) res = self.check_obj(target, recurse=0) self._unlock((account, container, obj, vers)) return res else: for ov in versions: if ov['id'] == target.content_id: vers = str(ov['version']) target.version = vers break else: errors.append('Missing from container listing') # TODO check checksum match # checksum = container_listing[obj]['hash'] pass meta, chunks = self._load_obj_meta(target, errors) chunk_listing = {c['url']: c for c in chunks} if meta: if target.content_id is None: target.content_id = meta['id'] if target.version is None: target.version = str(meta['version']) self.list_cache[(account, container, obj, vers)] = \ (chunk_listing, meta) self.objects_checked += 1 self._unlock((account, container, obj, vers)) # Skip the check if we could not locate the object if meta: errors.extend( self._check_obj_policy(target, meta, chunks, recurse=recurse)) self.send_result(target, errors) return chunk_listing, meta def check_container(self, target, recurse=0): account = target.account container = target.container cached = self._get_cached_or_lock((account, container)) if cached is not None: return cached self.logger.info('Checking container "%s"', target) account_listing = self.check_account(target.copy_account()) errors = list() if container not in account_listing: errors.append('Missing from account listing') marker = None results = [] ct_meta = dict() extra_args = dict() if self.limit_listings > 1 and target.obj: # When we are explicitly checking one object, start the listing # where this object is supposed to be. Do not use a limit, # but an end marker, in order to fetch all versions of the object. extra_args['prefix'] = target.obj extra_args['end_marker'] = target.obj + '\x00' # HACK while True: try: resp = self.api.object_list(account, container, marker=marker, versions=True, **extra_args) except exc.NoSuchContainer as err: self.container_not_found += 1 errors.append('Not found: %s' % (err, )) break except Exception as err: self.container_exceptions += 1 errors.append('Check failed: %s' % (err, )) break truncated = resp.get('truncated', False) if truncated: marker = resp['next_marker'] if resp['objects']: # safeguard, probably useless if not marker: marker = resp['objects'][-1]['name'] results.extend(resp['objects']) if not truncated or self.limit_listings > 1: break else: ct_meta = resp ct_meta.pop('objects') break container_listing = dict() # Save all object versions, with the most recent first for obj in results: container_listing.setdefault(obj['name'], list()).append(obj) for versions in container_listing.values(): versions.sort(key=lambda o: o['version'], reverse=True) if self.limit_listings <= 1: # We just listed the whole container, keep the result in a cache self.containers_checked += 1 self.list_cache[(account, container)] = container_listing, ct_meta self._unlock((account, container)) if recurse > 0: for obj_vers in container_listing.values(): for obj in obj_vers: tcopy = target.copy_object() tcopy.obj = obj['name'] tcopy.content_id = obj['id'] tcopy.version = str(obj['version']) self._spawn_n(self.check_obj, tcopy, recurse - 1) self.send_result(target, errors) return container_listing, ct_meta def check_account(self, target, recurse=0): account = target.account cached = self._get_cached_or_lock(account) if cached is not None: return cached self.logger.info('Checking account "%s"', target) errors = list() marker = None results = [] extra_args = dict() if self.limit_listings > 0 and target.container: # When we are explicitly checking one container, start the listing # where this container is supposed to be, and list only one # container. extra_args['prefix'] = target.container extra_args['limit'] = 1 while True: try: resp = self.api.container_list(account, marker=marker, **extra_args) except Exception as err: self.account_exceptions += 1 errors.append('Check failed: %s' % (err, )) break if resp: marker = resp[-1][0] results.extend(resp) if self.limit_listings > 0: break else: break containers = dict() for container in results: # Name, number of objects, number of bytes containers[container[0]] = (container[1], container[2]) if self.limit_listings <= 0: # We just listed the whole account, keep the result in a cache self.accounts_checked += 1 self.list_cache[account] = containers self._unlock(account) if recurse > 0: for container in containers: tcopy = target.copy_account() tcopy.container = container self._spawn_n(self.check_container, tcopy, recurse - 1) self.send_result(target, errors) return containers def check(self, target, recurse=0): if target.type == 'chunk': self._spawn_n(self.check_chunk, target) elif target.type == 'object': self._spawn_n(self.check_obj, target, recurse) elif target.type == 'container': self._spawn_n(self.check_container, target, recurse) else: self._spawn_n(self.check_account, target, recurse) def check_all_accounts(self, recurse=0): all_accounts = self.api.account_list() for acct in all_accounts: self.check(Target(acct), recurse=recurse) def fetch_results(self, rate_limiter=None): while self.running and not self.result_queue.empty(): res = self.result_queue.get(True) yield res # Rate limiting is done on the result queue for now. # Someday we could implement a submission queue instead of # letting each worker submit tasks to the pool, and do # the rate limiting on this queue. if rate_limiter is not None: self.run_time = rate_limiter(self.run_time) def merge_with_delayed_target(self, target): """ Merge the specified target with a delayed one. :returns: the delayed target, if there is one, with an error log including the errors of the new target. Return the new target otherwise. """ tkey = repr(target) prev_target = self.delayed_targets.get(tkey, target) if prev_target is not target: errors = dict(prev_target.error_log) errors.update(target.error_log) prev_target.error_log = sorted(errors.items()) return prev_target def log_result(self, target): """ Log a check result, if it shows errors. Dispatch the errors to the appropriate destinations (log files, queues, etc.). """ # The result may come from a new target, or from an old target # we checked another time, or both. target = self.merge_with_delayed_target(target) if target.has_errors: time_in_error, confirmations = target.time_in_error() if (time_in_error < self.min_time_in_error or confirmations < self.required_confirmations): self.logger.info("Delaying check for %s, %d/%d confirmations", target, confirmations, self.required_confirmations) self.delayed_targets[repr(target)] = target else: if target.type == 'chunk': self.logger.info( "Writing error for %s, %d/%d confirmations", target, confirmations, self.required_confirmations) self.write_chunk_error(target, irreparable=target.irreparable) else: self.write_error(target, irreparable=target.irreparable) self.delayed_targets.pop(repr(target), None) self.logger.warn( '%s:%s\n%s', target, ' irreparable' if target.irreparable else '', target.latest_error_result().errors_to_str(err_format=' %s')) def run(self, rate_limiter=None): """ Fetch results and write logs until all jobs have finished. :returns: a generator yielding check results. """ while self.running and (self.pool.running() + self.pool.waiting()): for result in self.fetch_results(rate_limiter): self.log_result(result) yield result sleep(0.1) if self.running: self.pool.waitall() # No rate limiting for result in self.fetch_results(): self.log_result(result) yield result self.list_cache = CacheDict(self.list_cache.size) def stop(self): self.logger.info("Stopping") self.running = False def report(self): success = True def _report_stat(name, stat): print("{0:18}: {1}".format(name, stat)) print() print('Report') _report_stat("Accounts checked", self.accounts_checked) if self.account_not_found: success = False _report_stat("Missing accounts", self.account_not_found) if self.account_exceptions: success = False _report_stat("Exceptions", self.account_exceptions) print() _report_stat("Containers checked", self.containers_checked) if self.container_not_found: success = False _report_stat("Missing containers", self.container_not_found) if self.container_exceptions: success = False _report_stat("Exceptions", self.container_exceptions) print() _report_stat("Objects checked", self.objects_checked) if self.object_not_found: success = False _report_stat("Missing objects", self.object_not_found) if self.object_exceptions: success = False _report_stat("Exceptions", self.object_exceptions) print() _report_stat("Chunks checked", self.chunks_checked) if self.chunk_not_found: success = False _report_stat("Missing chunks", self.chunk_not_found) if self.chunk_exceptions: success = False _report_stat("Exceptions", self.chunk_exceptions) return success
class Checker(object): def __init__(self, namespace, concurrency=50, error_file=None, rebuild_file=None, full=True, limit_listings=0, request_attempts=1, logger=None, verbose=False, integrity=False): self.pool = GreenPool(concurrency) self.error_file = error_file self.full = bool(full) self.integrity = bool(integrity) # Optimisation for when we are only checking one object # or one container. # 0 -> do not limit # 1 -> limit account listings (list of containers) # 2 -> limit container listings (list of objects) self.limit_listings = limit_listings if self.error_file: outfile = open(self.error_file, 'a') self.error_writer = csv.writer(outfile, delimiter=' ') self.rebuild_file = rebuild_file if self.rebuild_file: fd = open(self.rebuild_file, 'a') self.rebuild_writer = csv.writer(fd, delimiter='|') self.logger = logger or get_logger( {'namespace': namespace}, name='integrity', verbose=verbose) self.api = ObjectStorageApi(namespace, logger=self.logger, max_retries=request_attempts - 1, request_attempts=request_attempts) self.accounts_checked = 0 self.containers_checked = 0 self.objects_checked = 0 self.chunks_checked = 0 self.account_not_found = 0 self.container_not_found = 0 self.object_not_found = 0 self.chunk_not_found = 0 self.account_exceptions = 0 self.container_exceptions = 0 self.object_exceptions = 0 self.chunk_exceptions = 0 self.list_cache = {} self.running = {} self.result_queue = Queue() def complete_target_from_chunk_metadata(self, target, xattr_meta): """ Complete a Target object from metadata found in chunk's extended attributes. In case the "fullpath" is not available, try to read legacy metadata, and maybe ask meta1 to resolve the CID into account and container names. """ # pylint: disable=unbalanced-tuple-unpacking try: acct, ct, path, vers, content_id = \ decode_fullpath(xattr_meta['full_path']) target.account = acct target.container = ct target.obj = path target.content_id = content_id target.version = vers except KeyError: # No fullpath header, try legacy headers if 'content_path' in xattr_meta: target.obj = xattr_meta['content_path'] if 'content_id' in xattr_meta: target.content_id = xattr_meta['content_id'] if 'content_version' in xattr_meta: target.version = xattr_meta['content_version'] cid = xattr_meta.get('container_id') if cid: try: md = self.api.directory.show(cid=cid) acct = md.get('account') ct = md.get('name') if acct: target.account = acct if ct: target.container = ct except Exception as err: self.logger.warn( "Failed to resolve CID %s into account " "and container names: %s", cid, err) def send_result(self, target, errors=None): """ Put an item in the result queue. """ # TODO(FVE): send to an external queue. self.result_queue.put(ItemResult(target, errors)) def write_error(self, target, irreparable=False): if not self.error_file: return error = list() if irreparable: error.append('#IRREPARABLE') error.append(target.account) if target.container: error.append(target.container) if target.obj: error.append(target.obj) if target.chunk: error.append(target.chunk) self.error_writer.writerow(error) def write_rebuilder_input(self, target, irreparable=False): # FIXME(FVE): cid can be computed from account and container names ct_meta = self.list_cache[(target.account, target.container)][1] try: cid = ct_meta['system']['sys.name'].split('.', 1)[0] except KeyError: cid = ct_meta['properties']['sys.name'].split('.', 1)[0] error = list() if irreparable: error.append('#IRREPARABLE') error.append(cid) # FIXME(FVE): ensure we always resolve content_id, # or pass object version along with object name. error.append(target.content_id or target.obj) error.append(target.chunk) self.rebuild_writer.writerow(error) def write_chunk_error(self, target, chunk=None, irreparable=False): if chunk is not None: target = target.copy() target.chunk = chunk self.write_error(target, irreparable=irreparable) if self.rebuild_file: self.write_rebuilder_input(target, irreparable=irreparable) def _check_chunk_xattr(self, target, obj_meta, xattr_meta): """ Check coherency of chunk extended attributes with object metadata. :returns: a list of errors """ errors = list() # Composed position -> erasure coding attr_prefix = 'meta' if '.' in obj_meta['pos'] else '' attr_key = attr_prefix + 'chunk_size' if str(obj_meta['size']) != xattr_meta.get(attr_key): errors.append( "'%s' xattr (%s) differs from size in meta2 (%s)" % (attr_key, xattr_meta.get(attr_key), obj_meta['size'])) attr_key = attr_prefix + 'chunk_hash' if obj_meta['hash'] != xattr_meta.get(attr_key): errors.append( "'%s' xattr (%s) differs from hash in meta2 (%s)" % (attr_key, xattr_meta.get(attr_key), obj_meta['hash'])) return errors def _check_chunk(self, target): """ Execute various checks on a chunk: - does it appear in object's chunk list? - is it reachable? - are its extended attributes coherent? :returns: the list of errors encountered, and the chunk's owner object metadata. """ chunk = target.chunk errors = list() obj_meta = None xattr_meta = None try: xattr_meta = self.api.blob_client.chunk_head( chunk, xattr=self.full, check_hash=self.integrity) except exc.NotFound as err: self.chunk_not_found += 1 errors.append('Not found: %s' % (err, )) except exc.FaultyChunk as err: self.chunk_exceptions += 1 errors.append('Faulty: %r' % (err, )) except Exception as err: self.chunk_exceptions += 1 errors.append('Check failed: %s' % (err, )) if not target.obj and xattr_meta: self.complete_target_from_chunk_metadata(target, xattr_meta) if target.obj: obj_listing, obj_meta = self.check_obj(target.copy_object()) if chunk not in obj_listing: errors.append('Missing from object listing') db_meta = dict() else: db_meta = obj_listing[chunk] if db_meta and xattr_meta and self.full: errors.extend( self._check_chunk_xattr(target, db_meta, xattr_meta)) self.send_result(target, errors) self.chunks_checked += 1 return errors, obj_meta def check_chunk(self, target): errors, _obj_meta = self._check_chunk(target) return errors def _check_metachunk(self, target, stg_met, pos, chunks, recurse=0): """ Check that a metachunk has the right number of chunks. :returns: the list of errors """ required = stg_met.expected_chunks errors = list() if len(chunks) < required: missing_chunks = required - len(chunks) if stg_met.ec: subs = {x['num'] for x in chunks} for sub in range(required): if sub not in subs: errors.append("Missing chunk at position %d.%d" % (pos, sub)) else: for _ in range(missing_chunks): errors.append("Missing chunk at position %d" % pos) if recurse > 0: for chunk in chunks: tcopy = target.copy() tcopy.chunk = chunk['url'] chunk_errors, _ = self._check_chunk(tcopy) if chunk_errors: # The errors have already been reported by _check_chunk, # but we must count this chunk among the unusable chunks # of the current metachunk. errors.append("Unusable chunk %s at position %s" % (chunk['url'], chunk['pos'])) irreparable = required - len(errors) < stg_met.min_chunks_to_read if irreparable: errors.append( "Unavailable metachunk at position %s (%d/%d chunks)" % (pos, required - len(errors), stg_met.expected_chunks)) # Since the "metachunk" is not an official item type, # this method does not report errors itself. Errors will # be reported as object errors. return errors def _check_obj_policy(self, target, obj_meta, chunks, recurse=0): """ Check that the list of chunks of an object matches the object's storage policy. :returns: the list of errors encountered """ stg_met = STORAGE_METHODS.load(obj_meta['chunk_method']) chunks_by_pos = _sort_chunks(chunks, stg_met.ec) tasks = list() for pos, chunks in chunks_by_pos.iteritems(): tasks.append((pos, self.pool.spawn(self._check_metachunk, target.copy(), stg_met, pos, chunks, recurse=recurse))) errors = list() for pos, task in tasks: try: errors.extend(task.wait()) except Exception as err: errors.append("Check failed: pos %d: %s" % (pos, err)) return errors def check_obj_versions(self, target, versions, recurse=0): """ Run checks of all versions of the targeted object in parallel. """ tasks = list() for ov in versions: tcopy = target.copy_object() tcopy.content_id = ov['id'] tcopy.version = str(ov['version']) tasks.append((tcopy.version, self.pool.spawn(self.check_obj, tcopy, recurse=recurse))) errors = list() for version, task in tasks: try: task.wait() except Exception as err: errors.append("Check failed: version %s: %s" % (version, err)) if errors: # Send a result with the target without version to tell # we were not able to check all versions of the object. self.send_result(target, errors) def _load_obj_meta(self, target, errors): """ Load object metadata and chunks. :param target: which object to check. :param errors: list of errors that will be appended in case any error occurs. :returns: a tuple with object metadata and a list of chunks. """ try: return self.api.object_locate(target.account, target.container, target.obj, version=target.version, properties=False) except exc.NoSuchObject as err: self.object_not_found += 1 errors.append('Not found: %s' % (err, )) except Exception as err: self.object_exceptions += 1 errors.append('Check failed: %s' % (err, )) return None, [] def check_obj(self, target, recurse=0): """ Check one object version. If no version is specified, all versions of the object will be checked. :returns: the result of the check of the most recent version, or the one that is explicitly targeted. """ account = target.account container = target.container obj = target.obj vers = target.version # can be None if (account, container, obj, vers) in self.running: self.running[(account, container, obj, vers)].wait() if (account, container, obj, vers) in self.list_cache: return self.list_cache[(account, container, obj, vers)] self.running[(account, container, obj, vers)] = Event() self.logger.info('Checking object "%s"', target) container_listing, _ = self.check_container(target.copy_container()) errors = list() if obj not in container_listing: errors.append('Missing from container listing') # checksum = None else: versions = container_listing[obj] if vers is None: if target.content_id is None: # No version specified, check all versions self.check_obj_versions(target.copy_object(), versions, recurse=recurse) # Now return the cached result of the most recent version target.content_id = versions[0]['id'] target.version = str(versions[0]['version']) res = self.check_obj(target, recurse=0) self.running[(account, container, obj, vers)].send(True) del self.running[(account, container, obj, vers)] return res else: for ov in versions: if ov['id'] == target.content_id: vers = str(ov['version']) target.version = vers break else: errors.append('Missing from container listing') # TODO check checksum match # checksum = container_listing[obj]['hash'] pass meta, chunks = self._load_obj_meta(target, errors) chunk_listing = {c['url']: c for c in chunks} if meta: self.list_cache[(account, container, obj, vers)] = \ (chunk_listing, meta) self.objects_checked += 1 self.running[(account, container, obj, vers)].send(True) del self.running[(account, container, obj, vers)] # Skip the check if we could not locate the object if meta: errors.extend( self._check_obj_policy(target, meta, chunks, recurse=recurse)) self.send_result(target, errors) return chunk_listing, meta def check_container(self, target, recurse=0): account = target.account container = target.container if (account, container) in self.running: self.running[(account, container)].wait() if (account, container) in self.list_cache: return self.list_cache[(account, container)] self.running[(account, container)] = Event() self.logger.info('Checking container "%s"', target) account_listing = self.check_account(target.copy_account()) errors = list() if container not in account_listing: errors.append('Missing from account listing') marker = None results = [] ct_meta = dict() extra_args = dict() if self.limit_listings > 1 and target.obj: # When we are explicitly checking one object, start the listing # where this object is supposed to be. Do not use a limit, # but an end marker, in order to fetch all versions of the object. extra_args['prefix'] = target.obj extra_args['end_marker'] = target.obj + '\x00' # HACK while True: try: resp = self.api.object_list(account, container, marker=marker, versions=True, **extra_args) except exc.NoSuchContainer as err: self.container_not_found += 1 errors.append('Not found: %s' % (err, )) break except Exception as err: self.container_exceptions += 1 errors.append('Check failed: %s' % (err, )) break if resp.get('truncated', False): marker = resp['next_marker'] if resp['objects']: # safeguard, probably useless if not marker: marker = resp['objects'][-1]['name'] results.extend(resp['objects']) if self.limit_listings > 1: break else: ct_meta = resp ct_meta.pop('objects') break container_listing = dict() # Save all object versions, with the most recent first for obj in results: container_listing.setdefault(obj['name'], list()).append(obj) for versions in container_listing.values(): versions.sort(key=lambda o: o['version'], reverse=True) if self.limit_listings <= 1: # We just listed the whole container, keep the result in a cache self.containers_checked += 1 self.list_cache[(account, container)] = container_listing, ct_meta self.running[(account, container)].send(True) del self.running[(account, container)] if recurse > 0: for obj_vers in container_listing.values(): for obj in obj_vers: tcopy = target.copy_object() tcopy.obj = obj['name'] tcopy.content_id = obj['id'] tcopy.version = str(obj['version']) self.pool.spawn_n(self.check_obj, tcopy, recurse - 1) self.send_result(target, errors) return container_listing, ct_meta def check_account(self, target, recurse=0): account = target.account if account in self.running: self.running[account].wait() if account in self.list_cache: return self.list_cache[account] self.running[account] = Event() self.logger.info('Checking account "%s"', target) errors = list() marker = None results = [] extra_args = dict() if self.limit_listings > 0 and target.container: # When we are explicitly checking one container, start the listing # where this container is supposed to be, and list only one # container. extra_args['prefix'] = target.container extra_args['limit'] = 1 while True: try: resp = self.api.container_list(account, marker=marker, **extra_args) except Exception as err: self.account_exceptions += 1 errors.append('Check failed: %s' % (err, )) break if resp: marker = resp[-1][0] results.extend(resp) if self.limit_listings > 0: break else: break containers = dict() for container in results: # Name, number of objects, number of bytes containers[container[0]] = (container[1], container[2]) if self.limit_listings <= 0: # We just listed the whole account, keep the result in a cache self.accounts_checked += 1 self.list_cache[account] = containers self.running[account].send(True) del self.running[account] if recurse > 0: for container in containers: tcopy = target.copy_account() tcopy.container = container self.pool.spawn_n(self.check_container, tcopy, recurse - 1) self.send_result(target, errors) return containers def check(self, target, recurse=0): if target.type == 'chunk': self.pool.spawn_n(self.check_chunk, target) elif target.type == 'object': self.pool.spawn_n(self.check_obj, target, recurse) elif target.type == 'container': self.pool.spawn_n(self.check_container, target, recurse) else: self.pool.spawn_n(self.check_account, target, recurse) def fetch_results(self): while not self.result_queue.empty(): res = self.result_queue.get(True) yield res def log_result(self, result): if result.errors: if result.target.type == 'chunk': # FIXME(FVE): check error criticity # and set the irreparable flag. self.write_chunk_error(result.target) else: self.write_error(result.target) self.logger.warn('%s:\n%s', result.target, result.errors_to_str(err_format=' %s')) def run(self): """ Fetch results and write logs until all jobs have finished. :returns: a generator yielding check results. """ while self.pool.running() + self.pool.waiting(): for result in self.fetch_results(): self.log_result(result) yield result sleep(0.1) self.pool.waitall() for result in self.fetch_results(): self.log_result(result) yield result def report(self): success = True def _report_stat(name, stat): print("{0:18}: {1}".format(name, stat)) print() print('Report') _report_stat("Accounts checked", self.accounts_checked) if self.account_not_found: success = False _report_stat("Missing accounts", self.account_not_found) if self.account_exceptions: success = False _report_stat("Exceptions", self.account_exceptions) print() _report_stat("Containers checked", self.containers_checked) if self.container_not_found: success = False _report_stat("Missing containers", self.container_not_found) if self.container_exceptions: success = False _report_stat("Exceptions", self.container_exceptions) print() _report_stat("Objects checked", self.objects_checked) if self.object_not_found: success = False _report_stat("Missing objects", self.object_not_found) if self.object_exceptions: success = False _report_stat("Exceptions", self.object_exceptions) print() _report_stat("Chunks checked", self.chunks_checked) if self.chunk_not_found: success = False _report_stat("Missing chunks", self.chunk_not_found) if self.chunk_exceptions: success = False _report_stat("Exceptions", self.chunk_exceptions) return success
class Checker(object): def __init__(self, namespace, concurrency=50, error_file=None, rebuild_file=None, full=True, limit_listings=0, request_attempts=1): self.pool = GreenPool(concurrency) self.error_file = error_file self.full = bool(full) # Optimisation for when we are only checking one object # or one container. # 0 -> do not limit # 1 -> limit account listings (list of containers) # 2 -> limit container listings (list of objects) self.limit_listings = limit_listings if self.error_file: f = open(self.error_file, 'a') self.error_writer = csv.writer(f, delimiter=' ') self.rebuild_file = rebuild_file if self.rebuild_file: fd = open(self.rebuild_file, 'a') self.rebuild_writer = csv.writer(fd, delimiter='|') conf = {'namespace': namespace} self.account_client = AccountClient( conf, max_retries=request_attempts - 1) self.container_client = ContainerClient( conf, max_retries=request_attempts - 1, request_attempts=request_attempts) self.blob_client = BlobClient(conf=conf) self.accounts_checked = 0 self.containers_checked = 0 self.objects_checked = 0 self.chunks_checked = 0 self.account_not_found = 0 self.container_not_found = 0 self.object_not_found = 0 self.chunk_not_found = 0 self.account_exceptions = 0 self.container_exceptions = 0 self.object_exceptions = 0 self.chunk_exceptions = 0 self.list_cache = {} self.running = {} def write_error(self, target): error = [target.account] if target.container: error.append(target.container) if target.obj: error.append(target.obj) if target.chunk: error.append(target.chunk) self.error_writer.writerow(error) def write_rebuilder_input(self, target, obj_meta, ct_meta): try: cid = ct_meta['system']['sys.name'].split('.', 1)[0] except KeyError: cid = ct_meta['properties']['sys.name'].split('.', 1)[0] self.rebuild_writer.writerow((cid, obj_meta['id'], target.chunk)) def write_chunk_error(self, target, obj_meta, chunk=None): if chunk is not None: target = target.copy() target.chunk = chunk if self.error_file: self.write_error(target) if self.rebuild_file: self.write_rebuilder_input( target, obj_meta, self.list_cache[(target.account, target.container)][1]) def _check_chunk_xattr(self, target, obj_meta, xattr_meta): error = False # Composed position -> erasure coding attr_prefix = 'meta' if '.' in obj_meta['pos'] else '' attr_key = attr_prefix + 'chunk_size' if str(obj_meta['size']) != xattr_meta.get(attr_key): print(" Chunk %s '%s' xattr (%s) " "differs from size in meta2 (%s)" % (target, attr_key, xattr_meta.get(attr_key), obj_meta['size'])) error = True attr_key = attr_prefix + 'chunk_hash' if obj_meta['hash'] != xattr_meta.get(attr_key): print(" Chunk %s '%s' xattr (%s) " "differs from hash in meta2 (%s)" % (target, attr_key, xattr_meta.get(attr_key), obj_meta['hash'])) error = True return error def check_chunk(self, target): chunk = target.chunk obj_listing, obj_meta = self.check_obj(target) error = False if chunk not in obj_listing: print(' Chunk %s missing from object listing' % target) error = True db_meta = dict() else: db_meta = obj_listing[chunk] try: xattr_meta = self.blob_client.chunk_head(chunk, xattr=self.full) except exc.NotFound as e: self.chunk_not_found += 1 error = True print(' Not found chunk "%s": %s' % (target, str(e))) except Exception as e: self.chunk_exceptions += 1 error = True print(' Exception chunk "%s": %s' % (target, str(e))) else: if db_meta and self.full: error = self._check_chunk_xattr(target, db_meta, xattr_meta) if error: self.write_chunk_error(target, obj_meta) self.chunks_checked += 1 def check_obj_policy(self, target, obj_meta, chunks): """ Check that the list of chunks of an object matches the object's storage policy. """ stg_met = STORAGE_METHODS.load(obj_meta['chunk_method']) chunks_by_pos = _sort_chunks(chunks, stg_met.ec) if stg_met.ec: required = stg_met.ec_nb_data + stg_met.ec_nb_parity else: required = stg_met.nb_copy for pos, clist in chunks_by_pos.iteritems(): if len(clist) < required: print(' Missing %d chunks at position %s of %s' % ( required - len(clist), pos, target)) if stg_met.ec: subs = {x['num'] for x in clist} for sub in range(required): if sub not in subs: self.write_chunk_error(target, obj_meta, '%d.%d' % (pos, sub)) else: self.write_chunk_error(target, obj_meta, str(pos)) def check_obj(self, target, recurse=False): account = target.account container = target.container obj = target.obj if (account, container, obj) in self.running: self.running[(account, container, obj)].wait() if (account, container, obj) in self.list_cache: return self.list_cache[(account, container, obj)] self.running[(account, container, obj)] = Event() print('Checking object "%s"' % target) container_listing, ct_meta = self.check_container(target) error = False if obj not in container_listing: print(' Object %s missing from container listing' % target) error = True # checksum = None else: # TODO check checksum match # checksum = container_listing[obj]['hash'] pass results = [] meta = dict() try: meta, results = self.container_client.content_locate( account=account, reference=container, path=obj, properties=False) except exc.NotFound as e: self.object_not_found += 1 error = True print(' Not found object "%s": %s' % (target, str(e))) except Exception as e: self.object_exceptions += 1 error = True print(' Exception object "%s": %s' % (target, str(e))) chunk_listing = dict() for chunk in results: chunk_listing[chunk['url']] = chunk # Skip the check if we could not locate the object if meta: self.check_obj_policy(target.copy(), meta, results) self.list_cache[(account, container, obj)] = (chunk_listing, meta) self.objects_checked += 1 self.running[(account, container, obj)].send(True) del self.running[(account, container, obj)] if recurse: for chunk in chunk_listing: t = target.copy() t.chunk = chunk self.pool.spawn_n(self.check_chunk, t) if error and self.error_file: self.write_error(target) return chunk_listing, meta def check_container(self, target, recurse=False): account = target.account container = target.container if (account, container) in self.running: self.running[(account, container)].wait() if (account, container) in self.list_cache: return self.list_cache[(account, container)] self.running[(account, container)] = Event() print('Checking container "%s"' % target) account_listing = self.check_account(target) error = False if container not in account_listing: error = True print(' Container %s missing from account listing' % target) marker = None results = [] ct_meta = dict() extra_args = dict() if self.limit_listings > 1 and target.obj: # When we are explicitly checking one object, start the listing # where this object is supposed to be, and list only one object. extra_args['prefix'] = target.obj extra_args['limit'] = 1 while True: try: _, resp = self.container_client.content_list( account=account, reference=container, marker=marker, **extra_args) except exc.NotFound as e: self.container_not_found += 1 error = True print(' Not found container "%s": %s' % (target, str(e))) break except Exception as e: self.container_exceptions += 1 error = True print(' Exception container "%s": %s' % (target, str(e))) break if resp['objects']: marker = resp['objects'][-1]['name'] results.extend(resp['objects']) if self.limit_listings > 1: break else: ct_meta = resp ct_meta.pop('objects') break container_listing = dict() for obj in results: container_listing[obj['name']] = obj if self.limit_listings <= 1: # We just listed the whole container, keep the result in a cache self.containers_checked += 1 self.list_cache[(account, container)] = container_listing, ct_meta self.running[(account, container)].send(True) del self.running[(account, container)] if recurse: for obj in container_listing: t = target.copy() t.obj = obj self.pool.spawn_n(self.check_obj, t, True) if error and self.error_file: self.write_error(target) return container_listing, ct_meta def check_account(self, target, recurse=False): account = target.account if account in self.running: self.running[account].wait() if account in self.list_cache: return self.list_cache[account] self.running[account] = Event() print('Checking account "%s"' % target) error = False marker = None results = [] extra_args = dict() if self.limit_listings > 0 and target.container: # When we are explicitly checking one container, start the listing # where this container is supposed to be, and list only one # container. extra_args['prefix'] = target.container extra_args['limit'] = 1 while True: try: resp = self.account_client.container_list( account, marker=marker, **extra_args) except Exception as e: self.account_exceptions += 1 error = True print(' Exception account "%s": %s' % (target, str(e))) break if resp['listing']: marker = resp['listing'][-1][0] results.extend(resp['listing']) if self.limit_listings > 0: break else: break containers = dict() for e in results: containers[e[0]] = (e[1], e[2]) if self.limit_listings <= 0: # We just listed the whole account, keep the result in a cache self.accounts_checked += 1 self.list_cache[account] = containers self.running[account].send(True) del self.running[account] if recurse: for container in containers: t = target.copy() t.container = container self.pool.spawn_n(self.check_container, t, True) if error and self.error_file: self.write_error(target) return containers def check(self, target): if target.chunk and target.obj and target.container: self.pool.spawn_n(self.check_chunk, target) elif target.obj and target.container: self.pool.spawn_n(self.check_obj, target, True) elif target.container: self.pool.spawn_n(self.check_container, target, True) else: self.pool.spawn_n(self.check_account, target, True) def wait(self): self.pool.waitall() def report(self): success = True def _report_stat(name, stat): print("{0:18}: {1}".format(name, stat)) print() print('Report') _report_stat("Accounts checked", self.accounts_checked) if self.account_not_found: success = False _report_stat("Missing accounts", self.account_not_found) if self.account_exceptions: success = False _report_stat("Exceptions", self.account_exceptions) print() _report_stat("Containers checked", self.containers_checked) if self.container_not_found: success = False _report_stat("Missing containers", self.container_not_found) if self.container_exceptions: success = False _report_stat("Exceptions", self.container_exceptions) print() _report_stat("Objects checked", self.objects_checked) if self.object_not_found: success = False _report_stat("Missing objects", self.object_not_found) if self.object_exceptions: success = False _report_stat("Exceptions", self.object_exceptions) print() _report_stat("Chunks checked", self.chunks_checked) if self.chunk_not_found: success = False _report_stat("Missing chunks", self.chunk_not_found) if self.chunk_exceptions: success = False _report_stat("Exceptions", self.chunk_exceptions) return success