def test_non_dir_contents(self): with temptree([]) as tmpdir: data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) with open(os.path.join(data, "partition1"), "w"): pass partition = os.path.join(data, "partition2") os.makedirs(partition) with open(os.path.join(partition, "suffix1"), "w"): pass suffix = os.path.join(partition, "suffix2") os.makedirs(suffix) with open(os.path.join(suffix, "hash1"), "w"): pass locations = ondisk.audit_location_generator( tmpdir, "data", mount_check=False ) self.assertEqual(list(locations), [])
def _one_audit_pass(self, reported): all_locs = audit_location_generator(self.devices, container_server.DATADIR, '.db', mount_check=self.mount_check, logger=self.logger) for path, device, partition in all_locs: self.container_audit(path) if time.time() - reported >= 3600: # once an hour self.logger.info( _('Since %(time)s: Container audits: %(pass)s passed ' 'audit, %(fail)s failed audit'), {'time': time.ctime(reported), 'pass': self.container_passes, 'fail': self.container_failures}) dump_recon_cache( {'container_audits_since': reported, 'container_audits_passed': self.container_passes, 'container_audits_failed': self.container_failures}, self.rcache, self.logger) reported = time.time() self.container_passes = 0 self.container_failures = 0 self.containers_running_time = ratelimit_sleep( self.containers_running_time, self.max_containers_per_second) return reported
def _one_audit_pass(self, reported): all_locs = audit_location_generator(self.devices, container_server.DATADIR, '.db', mount_check=self.mount_check, logger=self.logger) for path, device, partition in all_locs: self.container_audit(path) if time.time() - reported >= 3600: # once an hour self.logger.info( _('Since %(time)s: Container audits: %(pass)s passed ' 'audit, %(fail)s failed audit'), { 'time': time.ctime(reported), 'pass': self.container_passes, 'fail': self.container_failures }) dump_recon_cache( { 'container_audits_since': reported, 'container_audits_passed': self.container_passes, 'container_audits_failed': self.container_failures }, self.rcache, self.logger) reported = time.time() self.container_passes = 0 self.container_failures = 0 self.containers_running_time = ratelimit_sleep( self.containers_running_time, self.max_containers_per_second) return reported
def run_once(self): """ Runs a single container sync scan. """ self.logger.info(_('Begin container sync "once" mode')) begin = time() all_locs = audit_location_generator(self.devices, container_server.DATADIR, '.db', mount_check=self.mount_check, logger=self.logger) for path, device, partition in all_locs: self.container_sync(path) if time() - self.reported >= 3600: # once an hour self.report() self.report() elapsed = time() - begin self.logger.info( _('Container sync "once" mode completed: %.02fs'), elapsed)
def run_forever(self): """ Runs container sync scans until stopped. """ sleep(random() * self.interval) while True: begin = time() all_locs = audit_location_generator(self.devices, container_server.DATADIR, '.db', mount_check=self.mount_check, logger=self.logger) for path, device, partition in all_locs: self.container_sync(path) if time() - self.reported >= 3600: # once an hour self.report() elapsed = time() - begin if elapsed < self.interval: sleep(self.interval - elapsed)
def run_once(self): """ Runs a single container sync scan. """ self.logger.info(_('Begin container sync "once" mode')) begin = time() all_locs = audit_location_generator(self.devices, container_server.DATADIR, '.db', mount_check=self.mount_check, logger=self.logger) for path, device, partition in all_locs: self.container_sync(path) if time() - self.reported >= 3600: # once an hour self.report() self.report() elapsed = time() - begin self.logger.info(_('Container sync "once" mode completed: %.02fs'), elapsed)
def audit_all_objects(self, mode='once'): self.logger.info(_('Begin object audit "%s" mode (%s)') % (mode, self.auditor_type)) begin = reported = time.time() self.total_bytes_processed = 0 self.total_files_processed = 0 total_quarantines = 0 total_errors = 0 time_auditing = 0 all_locs = audit_location_generator(self.devices, object_server.DATADIR, '.data', mount_check=self.mount_check, logger=self.logger) for path, device, partition in all_locs: loop_time = time.time() self.failsafe_object_audit(path, device, partition) self.logger.timing_since('timing', loop_time) self.files_running_time = ratelimit_sleep( self.files_running_time, self.max_files_per_second) self.total_files_processed += 1 now = time.time() if now - reported >= self.log_time: self.logger.info(_( 'Object audit (%(type)s). ' 'Since %(start_time)s: Locally: %(passes)d passed, ' '%(quars)d quarantined, %(errors)d errors ' 'files/sec: %(frate).2f , bytes/sec: %(brate).2f, ' 'Total time: %(total).2f, Auditing time: %(audit).2f, ' 'Rate: %(audit_rate).2f') % { 'type': self.auditor_type, 'start_time': time.ctime(reported), 'passes': self.passes, 'quars': self.quarantines, 'errors': self.errors, 'frate': self.passes / (now - reported), 'brate': self.bytes_processed / (now - reported), 'total': (now - begin), 'audit': time_auditing, 'audit_rate': time_auditing / (now - begin)}) dump_recon_cache({'object_auditor_stats_%s' % self.auditor_type: { 'errors': self.errors, 'passes': self.passes, 'quarantined': self.quarantines, 'bytes_processed': self.bytes_processed, 'start_time': reported, 'audit_time': time_auditing}}, self.rcache, self.logger) reported = now total_quarantines += self.quarantines total_errors += self.errors self.passes = 0 self.quarantines = 0 self.errors = 0 self.bytes_processed = 0 time_auditing += (now - loop_time) # Avoid divide by zero during very short runs elapsed = (time.time() - begin) or 0.000001 self.logger.info(_( 'Object audit (%(type)s) "%(mode)s" mode ' 'completed: %(elapsed).02fs. Total quarantined: %(quars)d, ' 'Total errors: %(errors)d, Total files/sec: %(frate).2f , ' 'Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, ' 'Rate: %(audit_rate).2f') % { 'type': self.auditor_type, 'mode': mode, 'elapsed': elapsed, 'quars': total_quarantines, 'errors': total_errors, 'frate': self.total_files_processed / elapsed, 'brate': self.total_bytes_processed / elapsed, 'audit': time_auditing, 'audit_rate': time_auditing / elapsed}) if self.stats_sizes: self.logger.info( _('Object audit stats: %s') % json.dumps(self.stats_buckets))
def audit_all_objects(self, mode='once'): self.logger.info( _('Begin object audit "%s" mode (%s)') % (mode, self.auditor_type)) begin = reported = time.time() self.total_bytes_processed = 0 self.total_files_processed = 0 total_quarantines = 0 total_errors = 0 time_auditing = 0 all_locs = audit_location_generator(self.devices, object_server.DATADIR, '.data', mount_check=self.mount_check, logger=self.logger) for path, device, partition in all_locs: loop_time = time.time() self.failsafe_object_audit(path, device, partition) self.logger.timing_since('timing', loop_time) self.files_running_time = ratelimit_sleep( self.files_running_time, self.max_files_per_second) self.total_files_processed += 1 now = time.time() if now - reported >= self.log_time: self.logger.info( _('Object audit (%(type)s). ' 'Since %(start_time)s: Locally: %(passes)d passed, ' '%(quars)d quarantined, %(errors)d errors ' 'files/sec: %(frate).2f , bytes/sec: %(brate).2f, ' 'Total time: %(total).2f, Auditing time: %(audit).2f, ' 'Rate: %(audit_rate).2f') % { 'type': self.auditor_type, 'start_time': time.ctime(reported), 'passes': self.passes, 'quars': self.quarantines, 'errors': self.errors, 'frate': self.passes / (now - reported), 'brate': self.bytes_processed / (now - reported), 'total': (now - begin), 'audit': time_auditing, 'audit_rate': time_auditing / (now - begin) }) dump_recon_cache( { 'object_auditor_stats_%s' % self.auditor_type: { 'errors': self.errors, 'passes': self.passes, 'quarantined': self.quarantines, 'bytes_processed': self.bytes_processed, 'start_time': reported, 'audit_time': time_auditing } }, self.rcache, self.logger) reported = now total_quarantines += self.quarantines total_errors += self.errors self.passes = 0 self.quarantines = 0 self.errors = 0 self.bytes_processed = 0 time_auditing += (now - loop_time) # Avoid divide by zero during very short runs elapsed = (time.time() - begin) or 0.000001 self.logger.info( _('Object audit (%(type)s) "%(mode)s" mode ' 'completed: %(elapsed).02fs. Total quarantined: %(quars)d, ' 'Total errors: %(errors)d, Total files/sec: %(frate).2f , ' 'Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, ' 'Rate: %(audit_rate).2f') % { 'type': self.auditor_type, 'mode': mode, 'elapsed': elapsed, 'quars': total_quarantines, 'errors': total_errors, 'frate': self.total_files_processed / elapsed, 'brate': self.total_bytes_processed / elapsed, 'audit': time_auditing, 'audit_rate': time_auditing / elapsed }) if self.stats_sizes: self.logger.info( _('Object audit stats: %s') % json.dumps(self.stats_buckets))
def audit_all_objects(self, mode="once"): self.logger.info(_('Begin object audit "%s" mode (%s)') % (mode, self.auditor_type)) begin = reported = time.time() self.total_bytes_processed = 0 self.total_files_processed = 0 total_quarantines = 0 total_errors = 0 time_auditing = 0 all_locs = audit_location_generator( self.devices, object_server.DATADIR, ".data", mount_check=self.mount_check, logger=self.logger ) for path, device, partition in all_locs: loop_time = time.time() self.failsafe_object_audit(path, device, partition) self.logger.timing_since("timing", loop_time) self.files_running_time = ratelimit_sleep(self.files_running_time, self.max_files_per_second) self.total_files_processed += 1 now = time.time() if now - reported >= self.log_time: self.logger.info( _( "Object audit (%(type)s). " "Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors " "files/sec: %(frate).2f , bytes/sec: %(brate).2f, " "Total time: %(total).2f, Auditing time: %(audit).2f, " "Rate: %(audit_rate).2f" ) % { "type": self.auditor_type, "start_time": time.ctime(reported), "passes": self.passes, "quars": self.quarantines, "errors": self.errors, "frate": self.passes / (now - reported), "brate": self.bytes_processed / (now - reported), "total": (now - begin), "audit": time_auditing, "audit_rate": time_auditing / (now - begin), } ) dump_recon_cache( { "object_auditor_stats_%s" % self.auditor_type: { "errors": self.errors, "passes": self.passes, "quarantined": self.quarantines, "bytes_processed": self.bytes_processed, "start_time": reported, "audit_time": time_auditing, } }, self.rcache, self.logger, ) reported = now total_quarantines += self.quarantines total_errors += self.errors self.passes = 0 self.quarantines = 0 self.errors = 0 self.bytes_processed = 0 time_auditing += now - loop_time # Avoid divide by zero during very short runs elapsed = (time.time() - begin) or 0.000001 self.logger.info( _( 'Object audit (%(type)s) "%(mode)s" mode ' "completed: %(elapsed).02fs. Total quarantined: %(quars)d, " "Total errors: %(errors)d, Total files/sec: %(frate).2f , " "Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, " "Rate: %(audit_rate).2f" ) % { "type": self.auditor_type, "mode": mode, "elapsed": elapsed, "quars": total_quarantines, "errors": total_errors, "frate": self.total_files_processed / elapsed, "brate": self.total_bytes_processed / elapsed, "audit": time_auditing, "audit_rate": time_auditing / elapsed, } ) if self.stats_sizes: self.logger.info(_("Object audit stats: %s") % json.dumps(self.stats_buckets))