def __init__(self, conf, logger=None, **kwargs): self.conf = conf self.logger = logger or get_logger(conf) volume = conf.get('volume') if not volume: raise ConfigurationException('No volume specified for converter') self.volume = volume self.namespace, self.volume_id = check_volume(self.volume) # cache self.name_by_cid = CacheDict() self.content_id_by_name = CacheDict() # client self.container_client = ContainerClient(conf, **kwargs) self.content_factory = ContentFactory(conf, self.container_client, logger=self.logger) # stats/logs self.errors = 0 self.passes = 0 self.total_chunks_processed = 0 self.start_time = 0 self.last_reported = 0 self.report_interval = int_value(conf.get('report_interval'), 3600) # speed self.chunks_run_time = 0 self.max_chunks_per_second = int_value(conf.get('chunks_per_second'), 30) # backup self.no_backup = true_value(conf.get('no_backup', False)) self.backup_dir = conf.get('backup_dir') or tempfile.gettempdir() self.backup_name = 'backup_%s_%f' \ % (self.volume_id, time.time()) # dry run self.dry_run = true_value(conf.get('dry_run', False))
def __init__(self, conf, logger, volume): self.conf = conf self.logger = logger or get_logger(conf) self.volume = volume self.namespace, self.address = check_volume(self.volume) self.running = False self.run_time = 0 self.passes = 0 self.errors = 0 self.last_reported = 0 self.last_usage_check = 0 self.chunks_run_time = 0 self.bytes_running_time = 0 self.bytes_processed = 0 self.total_bytes_processed = 0 self.total_chunks_processed = 0 self.concurrency = int_value(conf.get('concurrency'), 10) self.usage_target = int_value(conf.get('usage_target'), 0) self.usage_check_interval = int_value(conf.get('usage_check_interval'), 60) self.report_interval = int_value(conf.get('report_interval'), 3600) self.max_chunks_per_second = int_value(conf.get('chunks_per_second'), 30) self.limit = int_value(conf.get('limit'), 0) self.allow_links = true_value(conf.get('allow_links', True)) self.blob_client = BlobClient(conf) self.container_client = ContainerClient(conf, logger=self.logger) self.content_factory = ContentFactory( conf, container_client=self.container_client, blob_client=self.blob_client) self.excluded_rawx = \ [rawx for rawx in conf.get('excluded_rawx', '').split(',') if rawx] self.fake_excluded_chunks = self._generate_fake_excluded_chunks()
def __init__(self, conf, logger, volume): self.conf = conf self.logger = logger or get_logger(conf) self.volume = volume self.namespace, self.address = check_volume(self.volume) self.run_time = 0 self.passes = 0 self.errors = 0 self.last_reported = 0 self.last_usage_check = 0 self.chunks_run_time = 0 self.bytes_running_time = 0 self.bytes_processed = 0 self.total_bytes_processed = 0 self.total_chunks_processed = 0 self.usage_target = int_value(conf.get('usage_target'), 0) self.usage_check_interval = int_value(conf.get('usage_check_interval'), 3600) self.report_interval = int_value(conf.get('report_interval'), 3600) self.max_chunks_per_second = int_value(conf.get('chunks_per_second'), 30) self.max_bytes_per_second = int_value(conf.get('bytes_per_second'), 10000000) self.limit = int_value(conf.get('limit'), 0) self.allow_links = true_value(conf.get('allow_links', True)) self.blob_client = BlobClient(conf) self.container_client = ContainerClient(conf, logger=self.logger) self.content_factory = ContentFactory(conf)
def __init__(self, conf, **kwargs): super(BlobIndexer, self).__init__(conf) self.logger = get_logger(conf) volume = conf.get('volume') if not volume: raise exc.ConfigurationException('No volume specified for indexer') self.volume = volume self.passes = 0 self.errors = 0 self.successes = 0 self.last_reported = 0 self.total_since_last_reported = 0 self.chunks_run_time = 0 self.interval = int_value( conf.get('interval'), 300) self.report_interval = int_value( conf.get('report_interval'), 3600) self.max_chunks_per_second = int_value( conf.get('chunks_per_second'), 30) pm = get_pool_manager(pool_connections=10) self.index_client = RdirClient(conf, logger=self.logger, pool_manager=pm) self.namespace, self.volume_id = check_volume(self.volume) self.convert_chunks = true_value(conf.get('convert_chunks')) if self.convert_chunks: converter_conf = self.conf.copy() converter_conf['no_backup'] = True self.converter = BlobConverter(converter_conf, logger=self.logger, pool_manager=pm) else: self.converter = None
def __init__(self, conf, logger, volume, container_ids): self.conf = conf self.logger = logger self.volume = volume self.volume_ns, self.volume_id = check_volume(self.volume) self.container_ids = container_ids or list() self.container_ids = [ container_id.upper() for container_id in self.container_ids ] self.namespace = self.conf['namespace'] if self.namespace != self.volume_ns: raise ValueError( 'Namespace (%s) mismatch with volume namespace (%s)', self.namespace, self.volume_ns) # action self.action_name = self.conf['action'].lower() if (self.action_name == 'insert'): self.action = self._insert_bean elif (self.action_name == 'update'): self.action = self._update_bean elif (self.action_name == 'check'): self.action = self._check_bean else: raise ValueError('Unknown action (%s)', self.action_name) # speed self.chunks_run_time = 0 self.max_chunks_per_second = int_value( self.conf.get('chunks_per_second'), self.DEFAULT_CHUNKS_PER_SECOND) # counters self.chunks_processed = 0 self.chunk_errors = 0 self.beans_processed = dict() self.bean_successes = dict() self.bean_already_exists = dict() self.bean_orphans = dict() self.bean_errors = dict() for bean_type in self.BEAN_TYPES: self.beans_processed[bean_type] = 0 self.bean_successes[bean_type] = 0 self.bean_already_exists[bean_type] = 0 self.bean_orphans[bean_type] = 0 self.bean_errors[bean_type] = 0 # report self.start_time = 0 self.last_report = 0 self.report_interval = int_value(conf.get('report_interval'), self.DEFAULT_REPORT_INTERVAL) self.client = ContainerClient({'namespace': self.namespace}, logger=self.logger) self.ctime = int(time.time())
def __init__(self, conf, logger, volume): self.conf = conf self.logger = logger self.volume = volume self.passes = 0 self.errors = 0 self.last_reported = 0 self.chunks_run_time = 0 self.total_chunks_processed = 0 self.report_interval = int_value( conf.get('report_interval'), 3600) self.max_chunks_per_second = int_value( conf.get('chunks_per_second'), 30) self.index_client = RdirClient(conf) self.namespace, self.volume_id = check_volume(self.volume)
def index_pass(self): self.namespace, self.volume_id = check_volume(self.volume) start_time = report_time = time.time() total_errors = 0 paths = paths_gen(self.volume) for path in paths: self.safe_update_index(path) self.chunks_run_time = ratelimit( self.chunks_run_time, self.max_chunks_per_second ) self.total_chunks_processed += 1 now = time.time() if now - self.last_reported >= self.report_interval: self.logger.info( '%(start_time)s ' '%(passes)d ' '%(errors)d ' '%(c_rate).2f ' '%(total).2f ' % { 'start_time': time.ctime(report_time), 'passes': self.passes, 'errors': self.errors, 'c_rate': self.passes / (now - report_time), 'total': (now - start_time) } ) report_time = now total_errors += self.errors self.passes = 0 self.errors = 0 self.last_reported = now elapsed = (time.time() - start_time) or 0.000001 self.logger.info( '%(elapsed).02f ' '%(errors)d ' '%(chunk_rate).2f ' % { 'elapsed': elapsed, 'errors': total_errors + self.errors, 'chunk_rate': self.total_chunks_processed / elapsed } )
def __init__(self, conf, **kwargs): super(BlobIndexer, self).__init__(conf) self.logger = get_logger(conf) volume = conf.get('volume') if not volume: raise exc.ConfigurationException('No volume specified for indexer') self.volume = volume self.passes = 0 self.errors = 0 self.successes = 0 self.last_reported = 0 self.chunks_run_time = 0 self.interval = int_value(conf.get('interval'), 300) self.report_interval = int_value(conf.get('report_interval'), 3600) self.max_chunks_per_second = int_value(conf.get('chunks_per_second'), 30) self.index_client = RdirClient(conf, logger=self.logger) self.namespace, self.volume_id = check_volume(self.volume)
def __init__(self, conf, logger, volume): self.conf = conf self.logger = logger self.volume = volume self.namespace = self.conf["namespace"] self.volume_ns, self.volume_id = check_volume(self.volume) c = dict() c['namespace'] = self.namespace self.client = ContainerClient(c, logger=self.logger) self.report_interval = conf.get( "report_period", default_report_interval) actions = { 'update': BlobRegistratorWorker._update_chunk, 'insert': BlobRegistratorWorker._insert_chunk, 'check': BlobRegistratorWorker._check_chunk, } self.action = actions[conf.get("action", "check")]
def __init__(self, conf, **kwargs): super(BlobIndexer, self).__init__(conf) self.logger = get_logger(conf) volume = conf.get('volume') if not volume: raise exc.ConfigurationException('No volume specified for indexer') self.volume = volume self.passes = 0 self.errors = 0 self.last_reported = 0 self.chunks_run_time = 0 self.total_chunks_processed = 0 self.interval = int_value( conf.get('interval'), 300) self.report_interval = int_value( conf.get('report_interval'), 3600) self.max_chunks_per_second = int_value( conf.get('chunks_per_second'), 30) self.index_client = RdirClient(conf) self.namespace, self.volume_id = check_volume(self.volume)
def audit_pass(self): self.namespace, self.address = check_volume(self.volume) start_time = report_time = time.time() total_errors = 0 total_corrupted = 0 total_orphans = 0 total_faulty = 0 audit_time = 0 paths = paths_gen(self.volume) for path in paths: loop_time = time.time() self.safe_chunk_audit(path) self.chunks_run_time = ratelimit( self.chunks_run_time, self.max_chunks_per_second ) self.total_chunks_processed += 1 now = time.time() if now - self.last_reported >= self.report_interval: self.logger.info( '%(start_time)s ' '%(passes)d ' '%(corrupted)d ' '%(faulty)d ' '%(orphans)d ' '%(errors)d ' '%(c_rate).2f ' '%(b_rate).2f ' '%(total).2f ' '%(audit_time).2f' '%(audit_rate).2f' % { 'start_time': time.ctime(report_time), 'passes': self.passes, 'corrupted': self.corrupted_chunks, 'faulty': self.faulty_chunks, 'orphans': self.orphan_chunks, 'errors': self.errors, 'c_rate': self.passes / (now - report_time), 'b_rate': self.bytes_processed / (now - report_time), 'total': (now - start_time), 'audit_time': audit_time, 'audit_rate': audit_time / (now - start_time) } ) report_time = now total_corrupted += self.corrupted_chunks total_orphans += self.orphan_chunks total_faulty += self.faulty_chunks total_errors += self.errors self.passes = 0 self.corrupted_chunks = 0 self.orphan_chunks = 0 self.faulty_chunks = 0 self.errors = 0 self.bytes_processed = 0 self.last_reported = now audit_time += (now - loop_time) elapsed = (time.time() - start_time) or 0.000001 self.logger.info( '%(elapsed).02f ' '%(corrupted)d ' '%(faulty)d ' '%(orphans)d ' '%(errors)d ' '%(chunk_rate).2f ' '%(bytes_rate).2f ' '%(audit_time).2f ' '%(audit_rate).2f' % { 'elapsed': elapsed, 'corrupted': total_corrupted + self.corrupted_chunks, 'faulty': total_faulty + self.faulty_chunks, 'orphans': total_orphans + self.orphan_chunks, 'errors': total_errors + self.errors, 'chunk_rate': self.total_chunks_processed / elapsed, 'bytes_rate': self.total_bytes_processed / elapsed, 'audit_time': audit_time, 'audit_rate': audit_time / elapsed } )
def audit_pass(self): self.namespace, self.address = check_volume(self.volume) start_time = report_time = time.time() total_errors = 0 total_corrupted = 0 total_orphans = 0 total_faulty = 0 audit_time = 0 paths = paths_gen(self.volume) for path in paths: loop_time = time.time() self.safe_chunk_audit(path) self.chunks_run_time = ratelimit(self.chunks_run_time, self.max_chunks_per_second) self.total_chunks_processed += 1 now = time.time() if now - self.last_reported >= self.report_interval: self.logger.info( "%(start_time)s " "%(passes)d " "%(corrupted)d " "%(faulty)d " "%(orphans)d " "%(errors)d " "%(c_rate).2f " "%(b_rate).2f " "%(total).2f " "%(audit_time).2f" "%(audit_rate).2f" % { "start_time": time.ctime(report_time), "passes": self.passes, "corrupted": self.corrupted_chunks, "faulty": self.faulty_chunks, "orphans": self.orphan_chunks, "errors": self.errors, "c_rate": self.passes / (now - report_time), "b_rate": self.bytes_processed / (now - report_time), "total": (now - start_time), "audit_time": audit_time, "audit_rate": audit_time / (now - start_time), } ) report_time = now total_corrupted += self.corrupted_chunks total_orphans += self.orphan_chunks total_faulty += self.faulty_chunks total_errors += self.errors self.passes = 0 self.corrupted_chunks = 0 self.orphan_chunks = 0 self.faulty_chunks = 0 self.errors = 0 self.bytes_processed = 0 self.last_reported = now audit_time += now - loop_time elapsed = (time.time() - start_time) or 0.000001 self.logger.info( "%(elapsed).02f " "%(corrupted)d " "%(faulty)d " "%(orphans)d " "%(errors)d " "%(chunk_rate).2f " "%(bytes_rate).2f " "%(audit_time).2f " "%(audit_rate).2f" % { "elapsed": elapsed, "corrupted": total_corrupted + self.corrupted_chunks, "faulty": total_faulty + self.faulty_chunks, "orphans": total_orphans + self.orphan_chunks, "errors": total_errors + self.errors, "chunk_rate": self.total_chunks_processed / elapsed, "bytes_rate": self.total_bytes_processed / elapsed, "audit_time": audit_time, "audit_rate": audit_time / elapsed, } )
def audit_pass(self): self.namespace, self.address = check_volume(self.volume) start_time = report_time = time.time() total_errors = 0 total_corrupted = 0 total_orphans = 0 total_faulty = 0 audit_time = 0 paths = paths_gen(self.volume) for path in paths: loop_time = time.time() self.safe_chunk_audit(path) self.chunks_run_time = ratelimit(self.chunks_run_time, self.max_chunks_per_second) self.total_chunks_processed += 1 now = time.time() if now - self.last_reported >= self.report_interval: self.logger.info( '%(start_time)s ' '%(passes)d ' '%(corrupted)d ' '%(faulty)d ' '%(orphans)d ' '%(errors)d ' '%(c_rate).2f ' '%(b_rate).2f ' '%(total).2f ' '%(audit_time).2f' '%(audit_rate).2f' % { 'start_time': time.ctime(report_time), 'passes': self.passes, 'corrupted': self.corrupted_chunks, 'faulty': self.faulty_chunks, 'orphans': self.orphan_chunks, 'errors': self.errors, 'c_rate': self.passes / (now - report_time), 'b_rate': self.bytes_processed / (now - report_time), 'total': (now - start_time), 'audit_time': audit_time, 'audit_rate': audit_time / (now - start_time) }) report_time = now total_corrupted += self.corrupted_chunks total_orphans += self.orphan_chunks total_faulty += self.faulty_chunks total_errors += self.errors self.passes = 0 self.corrupted_chunks = 0 self.orphan_chunks = 0 self.faulty_chunks = 0 self.errors = 0 self.bytes_processed = 0 self.last_reported = now audit_time += (now - loop_time) elapsed = (time.time() - start_time) or 0.000001 self.logger.info( '%(elapsed).02f ' '%(corrupted)d ' '%(faulty)d ' '%(orphans)d ' '%(errors)d ' '%(chunk_rate).2f ' '%(bytes_rate).2f ' '%(audit_time).2f ' '%(audit_rate).2f' % { 'elapsed': elapsed, 'corrupted': total_corrupted + self.corrupted_chunks, 'faulty': total_faulty + self.faulty_chunks, 'orphans': total_orphans + self.orphan_chunks, 'errors': total_errors + self.errors, 'chunk_rate': self.total_chunks_processed / elapsed, 'bytes_rate': self.total_bytes_processed / elapsed, 'audit_time': audit_time, 'audit_rate': audit_time / elapsed })
def mover_pass(self): self.namespace, self.address = check_volume(self.volume) start_time = report_time = time.time() total_errors = 0 mover_time = 0 paths = paths_gen(self.volume) for path in paths: loop_time = time.time() now = time.time() if now - self.last_usage_check >= self.usage_check_interval: used, total = statfs(self.volume) usage = (float(used) / total) * 100 if usage <= self.usage_target: self.logger.info( 'current usage %.2f%%: target reached (%.2f%%)', usage, self.usage_target) self.last_usage_check = now break self.safe_chunk_move(path) self.chunks_run_time = ratelimit(self.chunks_run_time, self.max_chunks_per_second) self.total_chunks_processed += 1 now = time.time() if now - self.last_reported >= self.report_interval: self.logger.info( '%(start_time)s ' '%(passes)d ' '%(errors)d ' '%(c_rate).2f ' '%(b_rate).2f ' '%(total).2f ' '%(mover_time).2f' '%(mover_rate).2f' % { 'start_time': time.ctime(report_time), 'passes': self.passes, 'errors': self.errors, 'c_rate': self.passes / (now - report_time), 'b_rate': self.bytes_processed / (now - report_time), 'total': (now - start_time), 'mover_time': mover_time, 'mover_rate': mover_time / (now - start_time) }) report_time = now total_errors += self.errors self.passes = 0 self.bytes_processed = 0 self.last_reported = now mover_time += (now - loop_time) elapsed = (time.time() - start_time) or 0.000001 self.logger.info( '%(elapsed).02f ' '%(errors)d ' '%(chunk_rate).2f ' '%(bytes_rate).2f ' '%(mover_time).2f ' '%(mover_rate).2f' % { 'elapsed': elapsed, 'errors': total_errors + self.errors, 'chunk_rate': self.total_chunks_processed / elapsed, 'bytes_rate': self.total_bytes_processed / elapsed, 'mover_time': mover_time, 'mover_rate': mover_time / elapsed })
def mover_pass(self): self.namespace, self.address = check_volume(self.volume) start_time = report_time = time.time() total_errors = 0 mover_time = 0 paths = paths_gen(self.volume) for path in paths: loop_time = time.time() now = time.time() if now - self.last_usage_check >= self.usage_check_interval: used, total = statfs(self.volume) usage = (float(used) / total) * 100 if usage <= self.usage_target: self.logger.info( 'current usage %.2f%%: target reached (%.2f%%)', usage, self.usage_target) self.last_usage_check = now break self.safe_chunk_move(path) self.chunks_run_time = ratelimit( self.chunks_run_time, self.max_chunks_per_second ) self.total_chunks_processed += 1 now = time.time() if now - self.last_reported >= self.report_interval: self.logger.info( '%(start_time)s ' '%(passes)d ' '%(errors)d ' '%(c_rate).2f ' '%(b_rate).2f ' '%(total).2f ' '%(mover_time).2f' '%(mover_rate).2f' % { 'start_time': time.ctime(report_time), 'passes': self.passes, 'errors': self.errors, 'c_rate': self.passes / (now - report_time), 'b_rate': self.bytes_processed / (now - report_time), 'total': (now - start_time), 'mover_time': mover_time, 'mover_rate': mover_time / (now - start_time) } ) report_time = now total_errors += self.errors self.passes = 0 self.bytes_processed = 0 self.last_reported = now mover_time += (now - loop_time) elapsed = (time.time() - start_time) or 0.000001 self.logger.info( '%(elapsed).02f ' '%(errors)d ' '%(chunk_rate).2f ' '%(bytes_rate).2f ' '%(mover_time).2f ' '%(mover_rate).2f' % { 'elapsed': elapsed, 'errors': total_errors + self.errors, 'chunk_rate': self.total_chunks_processed / elapsed, 'bytes_rate': self.total_bytes_processed / elapsed, 'mover_time': mover_time, 'mover_rate': mover_time / elapsed } )
def mover_pass(self): self.namespace, self.address = check_volume(self.volume) start_time = report_time = time.time() total_errors = 0 mover_time = 0 paths = paths_gen(self.volume) for path in paths: loop_time = time.time() now = time.time() if now - self.last_usage_check >= self.usage_check_interval: used, total = statfs(self.volume) usage = (float(used) / total) * 100 if usage <= self.usage_target: self.logger.info("current usage %.2f%%: target reached (%.2f%%)", usage, self.usage_target) self.last_usage_check = now break self.safe_chunk_move(path) self.chunks_run_time = ratelimit(self.chunks_run_time, self.max_chunks_per_second) self.total_chunks_processed += 1 now = time.time() if now - self.last_reported >= self.report_interval: self.logger.info( "%(start_time)s " "%(passes)d " "%(errors)d " "%(c_rate).2f " "%(b_rate).2f " "%(total).2f " "%(mover_time).2f" "%(mover_rate).2f" % { "start_time": time.ctime(report_time), "passes": self.passes, "errors": self.errors, "c_rate": self.passes / (now - report_time), "b_rate": self.bytes_processed / (now - report_time), "total": (now - start_time), "mover_time": mover_time, "mover_rate": mover_time / (now - start_time), } ) report_time = now total_errors += self.errors self.passes = 0 self.bytes_processed = 0 self.last_reported = now mover_time += now - loop_time elapsed = (time.time() - start_time) or 0.000001 self.logger.info( "%(elapsed).02f " "%(errors)d " "%(chunk_rate).2f " "%(bytes_rate).2f " "%(mover_time).2f " "%(mover_rate).2f" % { "elapsed": elapsed, "errors": total_errors + self.errors, "chunk_rate": self.total_chunks_processed / elapsed, "bytes_rate": self.total_bytes_processed / elapsed, "mover_time": mover_time, "mover_rate": mover_time / elapsed, } )