def __init__(self, conf, logger=None, **kwargs): self.conf = conf self.logger = logger or get_logger(conf) volume = conf.get('volume') if not volume: raise ConfigurationException('No volume specified for converter') self.volume = volume self.namespace, self.volume_id = check_volume(self.volume) # cache self.name_by_cid = CacheDict() self.content_id_by_name = CacheDict() # client self.container_client = ContainerClient(conf, **kwargs) self.content_factory = ContentFactory(conf, self.container_client, logger=self.logger) self._rdir = None # we may never need it # stats/logs self.errors = 0 self.passes = 0 self.total_chunks_processed = 0 self.start_time = 0 self.last_reported = 0 self.report_interval = int_value(conf.get('report_interval'), 3600) # speed self.chunks_run_time = 0 self.max_chunks_per_second = int_value(conf.get('chunks_per_second'), 30) # backup self.no_backup = true_value(conf.get('no_backup', False)) self.backup_dir = conf.get('backup_dir') or tempfile.gettempdir() self.backup_name = 'backup_%s_%f' \ % (self.volume_id, time.time()) # dry run self.dry_run = true_value(conf.get('dry_run', False))
def init(self): super(ReplicateFilter, self).init() self.account = self.app_env['account_client'] self.cache_duration = float_value(self.conf.get('cache_duration'), CACHE_DURATION) self.cache_size = int_value(self.conf.get('cache_size'), CACHE_SIZE) self.cache = CacheDict(self.cache_size) self.check_account = boolean_value( self.conf.get('check_replication_enabled'), False) self.connection_timeout = float_value( self.conf.get('connection_timeout'), CONNECTION_TIMEOUT) self.read_timeout = float_value(self.conf.get('read_timeout'), READ_TIMEOUT)
def run(self, rate_limiter=None): """ Fetch results and write logs until all jobs have finished. :returns: a generator yielding check results. """ while self.running and (self.pool.running() + self.pool.waiting()): for result in self.fetch_results(rate_limiter): self.log_result(result) yield result sleep(0.1) if self.running: self.pool.waitall() # No rate limiting for result in self.fetch_results(): self.log_result(result) yield result self.list_cache = CacheDict(self.list_cache.size)
def __init__(self, namespace, concurrency=50, error_file=None, rebuild_file=None, check_xattr=True, limit_listings=0, request_attempts=1, logger=None, verbose=False, check_hash=False, min_time_in_error=0.0, required_confirmations=0, beanstalkd_addr=None, beanstalkd_tube=BlobRebuilder.DEFAULT_BEANSTALKD_WORKER_TUBE, cache_size=2**24, **_kwargs): self.pool = GreenPool(concurrency) self.error_file = error_file self.error_sender = None self.check_xattr = bool(check_xattr) self.check_hash = bool(check_hash) self.logger = logger or get_logger( {'namespace': namespace}, name='integrity', verbose=verbose) # Optimisation for when we are only checking one object # or one container. # 0 -> do not limit # 1 -> limit account listings (list of containers) # 2 -> limit container listings (list of objects) self.limit_listings = limit_listings if self.error_file: outfile = open(self.error_file, 'a') self.error_writer = csv.writer(outfile, delimiter=' ') self.rebuild_file = rebuild_file if self.rebuild_file: self.fd = open(self.rebuild_file, 'a') self.rebuild_writer = csv.writer(self.fd, delimiter='|') if beanstalkd_addr: self.error_sender = BeanstalkdSender(beanstalkd_addr, beanstalkd_tube, self.logger) self.api = ObjectStorageApi(namespace, logger=self.logger, max_retries=request_attempts - 1, request_attempts=request_attempts) self.rdir_client = RdirClient({"namespace": namespace}, logger=self.logger) self.accounts_checked = 0 self.containers_checked = 0 self.objects_checked = 0 self.chunks_checked = 0 self.account_not_found = 0 self.container_not_found = 0 self.object_not_found = 0 self.chunk_not_found = 0 self.account_exceptions = 0 self.container_exceptions = 0 self.object_exceptions = 0 self.chunk_exceptions = 0 self.list_cache = CacheDict(cache_size) self.running_tasks = {} self.running_lock = Semaphore(1) self.result_queue = LightQueue(concurrency) self.running = True self.run_time = 0 # Set of targets which must be checked again, to confirm # or deny the issues reported by previous passes. self.delayed_targets = dict() # Minimum time in error and number of confirmations of the error # before triggering a reconstruction action. self.min_time_in_error = min_time_in_error self.required_confirmations = required_confirmations
def __init__(self, conf, logger=None): self.conf = conf self.logger = logger or get_logger(self.conf) self.beanstalkd_replies = dict() self.tasks = CacheDict(size=10)