def test_get_logger(self): sio = StringIO() logger = logging.getLogger('test') logger.addHandler(logging.StreamHandler(sio)) logger = get_logger(None, 'test') logger.warn('msg1') self.assertEqual(sio.getvalue(), 'msg1\n') logger.debug('msg2') self.assertEqual(sio.getvalue(), 'msg1\n') conf = {'log_level': 'DEBUG'} logger = get_logger(conf, 'test') logger.debug('msg3') self.assertEqual(sio.getvalue(), 'msg1\nmsg3\n')
def __init__(self, conf, logger=None): self.conf = conf self.logger = logger or get_logger(self.conf) self.backend = XcuteBackend(self.conf, logger=self.logger) url_map = Map([ Rule('/status', endpoint='status'), Submount('/v1.0/xcute', [ Rule('/job/list', endpoint='job_list', methods=['GET']), Rule('/job/create', endpoint='job_create', methods=['POST']), Rule('/job/show', endpoint='job_show', methods=['GET']), Rule('/job/pause', endpoint='job_pause', methods=['POST']), Rule('/job/resume', endpoint='job_resume', methods=['POST']), Rule('/job/delete', endpoint='job_delete', methods=['DELETE']), Rule('/lock/list', endpoint='lock_list', methods=['GET']), Rule('/lock/show', endpoint='lock_show', methods=['GET']), ]) ]) super(XcuteServer, self).__init__(url_map, logger)
def __init__(self, conf, beanstalkd_addr=None, logger=None): self.conf = conf self.logger = logger or get_logger(self.conf) self.namespace = conf['namespace'] self.success = True # counters self.items_processed = 0 self.total_items_processed = 0 self.errors = 0 self.total_errors = 0 self.total_expected_items = None # report self.start_time = 0 self.last_report = 0 self.report_interval = int_value(self.conf.get('report_interval'), self.DEFAULT_REPORT_INTERVAL) # dispatcher self.dispatcher = None # input self.beanstalkd = None if beanstalkd_addr: self.beanstalkd = BeanstalkdListener( beanstalkd_addr, self.conf.get('beanstalkd_worker_tube') or self.DEFAULT_BEANSTALKD_WORKER_TUBE, self.logger)
def __init__(self, conf, logger, volume, input_file=None, **kwargs): # pylint: disable=no-member self.conf = conf self.logger = logger or get_logger(conf) self.namespace = conf['namespace'] self.volume = volume self.input_file = input_file self.concurrency = int_value(conf.get('concurrency'), self.DEFAULT_CONCURRENCY) self.success = True # exit gracefully self.running = True signal.signal(signal.SIGINT, self.exit_gracefully) signal.signal(signal.SIGTERM, self.exit_gracefully) # counters self.lock_counters = threading.Lock() self.items_processed = 0 self.errors = 0 self.total_items_processed = 0 self.total_errors = 0 # report self.lock_report = threading.Lock() self.start_time = 0 self.last_report = 0 self.report_interval = int_value(conf.get('report_interval'), self.DEFAULT_REPORT_INTERVAL)
def __init__(self, conf, endpoint=None, proxy_endpoint=None, refresh_delay=3600.0, logger=None, **kwargs): """ Initialize a client for the xcute service. :param conf: dictionary with at least the namespace name :type conf: `dict` :param endpoint: URL of an xcute service :param proxy_endpoint: URL of the proxy :param refresh_interval: time between refreshes of the xcute service endpoint (if not provided at instantiation) :type refresh_interval: `float` seconds """ super(XcuteClient, self).__init__(endpoint=endpoint, service_type='xcute-service', **kwargs) self.conf = conf self.logger = logger or get_logger(self.conf) self.conscience = ConscienceClient(conf, endpoint=proxy_endpoint, logger=self.logger, **kwargs) self._refresh_delay = refresh_delay if not self.endpoint else -1.0 self._last_refresh = 0.0
def __init__(self, api, account, container, logger=None): self.api = api self.account = account self.container = container self.logger = logger or get_logger(None, name=str(self.__class__)) self._rules = dict() self.src_xml = None
def __init__(self, conf, logger=None): self.conf = conf self.logger = logger or get_logger(self.conf) redis_conf = {k[6:]: v for k, v in self.conf.items() if k.startswith('redis_')} super(XcuteBackend, self).__init__(**redis_conf) self.script_create = self.register_script( self.lua_create) self.script_run_next = self.register_script( self.lua_run_next) self.script_free = self.register_script( self.lua_free) self.script_fail = self.register_script( self.lua_fail) self.script_request_pause = self.register_script( self.lua_request_pause) self.script_resume = self.register_script( self.lua_resume) self.script_update_config = self.register_script( self.lua_update_config) self.script_update_tasks_sent = self.register_script( self.lua_update_tasks_sent) self.script_abort_tasks_sent = self.register_script( self.lua_abort_tasks_sent) self.script_update_tasks_processed = self.register_script( self.lua_update_tasks_processed) self.script_incr_total = self.register_script( self.lua_incr_total) self.script_total_tasks_done = self.register_script( self.lua_total_tasks_done) self.script_delete = self.register_script( self.lua_delete)
def __init__(self, app, conf, **kwargs): self.logger = get_logger(conf) super(AccountUpdateFilter, self).__init__(app, conf, logger=self.logger, **kwargs) self.account = AccountClient(conf, logger=self.logger)
def __init__(self, conf, logger, volume): self.conf = conf self.logger = logger or get_logger(conf) self.volume = volume self.namespace, self.address = check_volume(self.volume) self.running = False self.run_time = 0 self.passes = 0 self.errors = 0 self.last_reported = 0 self.last_usage_check = 0 self.chunks_run_time = 0 self.bytes_running_time = 0 self.bytes_processed = 0 self.total_bytes_processed = 0 self.total_chunks_processed = 0 self.concurrency = int_value(conf.get('concurrency'), 10) self.usage_target = int_value(conf.get('usage_target'), 0) self.usage_check_interval = int_value(conf.get('usage_check_interval'), 60) self.report_interval = int_value(conf.get('report_interval'), 3600) self.max_chunks_per_second = int_value(conf.get('chunks_per_second'), 30) self.limit = int_value(conf.get('limit'), 0) self.allow_links = true_value(conf.get('allow_links', True)) self.blob_client = BlobClient(conf) self.container_client = ContainerClient(conf, logger=self.logger) self.content_factory = ContentFactory( conf, container_client=self.container_client, blob_client=self.blob_client) self.excluded_rawx = \ [rawx for rawx in conf.get('excluded_rawx', '').split(',') if rawx] self.fake_excluded_chunks = self._generate_fake_excluded_chunks()
def __init__(self, api, account, container, logger=None): self.api = api self.account = account self.container = container self.logger = logger or get_logger(None, name=str(self.__class__)) self.rules = list() self.processed_versions = None
def __init__(self, conf): validate_service_conf(conf) self.running = True self.conf = conf self.logger = get_logger(conf) self.load_services() self.init_watchers(self.conf['services'])
def __init__(self, conf, logger, volume, input_file=None, try_chunk_delete=False, beanstalkd_addr=None): self.conf = conf self.logger = logger or get_logger(conf) self.volume = volume self.run_time = 0 self.passes = 0 self.errors = 0 self.last_reported = 0 self.chunks_run_time = 0 self.bytes_running_time = 0 self.bytes_processed = 0 self.total_bytes_processed = 0 self.total_chunks_processed = 0 self.dry_run = true_value(conf.get('dry_run', False)) self.report_interval = int_value(conf.get('report_interval'), 3600) self.max_chunks_per_second = int_value(conf.get('chunks_per_second'), 30) self.max_bytes_per_second = int_value(conf.get('bytes_per_second'), 10000000) self.rdir_fetch_limit = int_value(conf.get('rdir_fetch_limit'), 100) self.allow_same_rawx = true_value(conf.get('allow_same_rawx')) self.input_file = input_file self.rdir_client = RdirClient(conf, logger=self.logger) self.content_factory = ContentFactory(conf) self.try_chunk_delete = try_chunk_delete self.beanstalkd_addr = beanstalkd_addr self.beanstalkd_tube = conf.get('beanstalkd_tube', 'rebuild') self.beanstalk = None
def __init__(self, conf, **kwargs): super(BlobIndexer, self).__init__(conf) self.logger = get_logger(conf) volume = conf.get('volume') if not volume: raise exc.ConfigurationException('No volume specified for indexer') self.volume = volume self.passes = 0 self.errors = 0 self.successes = 0 self.last_reported = 0 self.total_since_last_reported = 0 self.chunks_run_time = 0 self.interval = int_value( conf.get('interval'), 300) self.report_interval = int_value( conf.get('report_interval'), 3600) self.max_chunks_per_second = int_value( conf.get('chunks_per_second'), 30) pm = get_pool_manager(pool_connections=10) self.index_client = RdirClient(conf, logger=self.logger, pool_manager=pm) self.namespace, self.volume_id = check_volume(self.volume) self.convert_chunks = true_value(conf.get('convert_chunks')) if self.convert_chunks: converter_conf = self.conf.copy() converter_conf['no_backup'] = True self.converter = BlobConverter(converter_conf, logger=self.logger, pool_manager=pm) else: self.converter = None
def __init__(self, conf, pool_manager=None): self._cache = dict() self.conf = conf self.pool_manager = pool_manager or get_pool_manager() self._client = ConscienceClient(conf=self.conf, pool_manager=self.pool_manager) self.logger = get_logger(conf)
def __init__(self, conf, container_id, metadata, chunks, storage_method, account, container_name, blob_client=None, container_client=None, logger=None): self.conf = conf self.container_id = container_id self.metadata = metadata self.chunks = ChunksHelper(chunks) self.storage_method = storage_method self.logger = logger or get_logger(self.conf) self.blob_client = (blob_client or BlobClient(conf)) self.container_client = (container_client or ContainerClient(self.conf, logger=self.logger)) # FIXME: all these may be properties self.content_id = self.metadata["id"] self.path = self.metadata["name"] self.length = int(self.metadata["length"]) self.version = self.metadata["version"] self.checksum = self.metadata["hash"] self.chunk_method = self.metadata["chunk_method"] self.account = account self.container_name = container_name if 'full_path' in self.metadata: self.full_path = metadata['full_path'] else: self.full_path = encode_fullpath( self.account, self.container_name, self.path, self.version, self.content_id)
def __init__(self, conf, **kwargs): self.conf = conf self.ns = conf['namespace'] self.logger = get_logger(conf) self.directory = DirectoryClient(conf, logger=self.logger, **kwargs) self.rdir = RdirClient(conf, logger=self.logger, **kwargs) self._cs = None
def __init__(self, namespace, logger=None, **kwargs): """ Initialize the object storage API. :param namespace: name of the namespace to interract with :type namespace: `str` :keyword connection_timeout: connection timeout towards rawx services :type connection_timeout: `float` seconds :keyword read_timeout: timeout for rawx responses and data reads from the caller (when uploading) :type read_timeout: `float` seconds :keyword write_timeout: timeout for rawx write requests :type write_timeout: `float` seconds :keyword pool_manager: a pooled connection manager that will be used for all HTTP based APIs (except rawx) :type pool_manager: `urllib3.PoolManager` """ self.namespace = namespace conf = {"namespace": self.namespace} self.logger = logger or get_logger(conf) self.timeouts = {tok: float_value(tov, None) for tok, tov in kwargs.items() if tok in self.__class__.TIMEOUT_KEYS} from oio.account.client import AccountClient from oio.container.client import ContainerClient from oio.directory.client import DirectoryClient self.directory = DirectoryClient(conf, logger=self.logger, **kwargs) self.container = ContainerClient(conf, logger=self.logger, **kwargs) # In AccountClient, "endpoint" is the account service, not the proxy acct_kwargs = kwargs.copy() acct_kwargs["proxy_endpoint"] = acct_kwargs.pop("endpoint", None) self.account = AccountClient(conf, logger=self.logger, **acct_kwargs)
def __init__(self, conf, logger, volume): self.conf = conf self.logger = logger or get_logger(conf) self.volume = volume self.run_time = 0 self.passes = 0 self.errors = 0 self.last_reported = 0 self.last_usage_check = 0 self.chunks_run_time = 0 self.bytes_running_time = 0 self.bytes_processed = 0 self.total_bytes_processed = 0 self.total_chunks_processed = 0 self.usage_target = int_value(conf.get('usage_target'), 0) self.usage_check_interval = int_value(conf.get('usage_check_interval'), 3600) self.report_interval = int_value(conf.get('report_interval'), 3600) self.max_chunks_per_second = int_value(conf.get('chunks_per_second'), 30) self.max_bytes_per_second = int_value(conf.get('bytes_per_second'), 10000000) self.blob_client = BlobClient() self.container_client = ContainerClient(conf, logger=self.logger) self.content_factory = ContentFactory(conf)
def __init__(self, conf, endpoint=None, proxy_endpoint=None, refresh_delay=3600.0, logger=None, **kwargs): """ Initialize a client for the account service. :param conf: dictionary with at least the namespace name :type conf: `dict` :param endpoint: URL of an account service :param proxy_endpoint: URL of the proxy :param refresh_interval: time between refreshes of the account service endpoint (if not provided at instantiation) :type refresh_interval: `float` seconds """ super(AccountClient, self).__init__(endpoint=endpoint, service_type='account-service', **kwargs) self.logger = logger or get_logger(conf) self.cs = ConscienceClient(conf, endpoint=proxy_endpoint, logger=self.logger, **kwargs) self._global_kwargs = { tok: float_value(tov, None) for tok, tov in kwargs.items() if tok in TIMEOUT_KEYS } self._refresh_delay = refresh_delay if not self.endpoint else -1.0 self._last_refresh = 0.0
def __init__(self, conf, backend, logger=None): self.conf = conf self.backend = backend self.logger = logger or get_logger(conf) self.url_map = Map([ Rule('/status', endpoint='status'), Rule('/v1.0/account/create', endpoint='account_create', methods=['PUT']), Rule('/v1.0/account/delete', endpoint='account_delete', methods=['POST']), Rule('/v1.0/account/list', endpoint='account_list', methods=['GET']), Rule('/v1.0/account/update', endpoint='account_update', methods=['PUT', 'POST']), # FIXME(adu) only PUT Rule('/v1.0/account/show', endpoint='account_show', methods=['GET']), Rule('/v1.0/account/containers', endpoint='account_containers', methods=['GET']), Rule('/v1.0/account/refresh', endpoint='account_refresh', methods=['POST']), Rule('/v1.0/account/flush', endpoint='account_flush', methods=['POST']), Rule('/v1.0/account/container/update', endpoint='account_container_update', methods=['PUT', 'POST']), # FIXME(adu) only PUT Rule('/v1.0/account/container/reset', endpoint='account_container_reset', methods=['PUT', 'POST']), # FIXME(adu) only PUT ]) super(Account, self).__init__(self.url_map, self.logger)
def __init__(self, conf, **kwargs): super(BlobAuditor, self).__init__(conf) self.logger = get_logger(conf) volume = conf.get('volume') if not volume: raise exc.ConfigurationException('No volume specified for auditor') self.volume = volume
def __init__(self, conf, logger=None, **kwargs): self.conf = conf self.logger = logger or get_logger(conf) volume = conf.get('volume') if not volume: raise ConfigurationException('No volume specified for converter') self.volume = volume self.namespace, self.volume_id = check_volume(self.volume) # cache self.name_by_cid = CacheDict() self.content_id_by_name = CacheDict() # client self.container_client = ContainerClient(conf, **kwargs) self.content_factory = ContentFactory(conf, self.container_client, logger=self.logger) # stats/logs self.errors = 0 self.passes = 0 self.total_chunks_processed = 0 self.start_time = 0 self.last_reported = 0 self.report_interval = int_value(conf.get('report_interval'), 3600) # speed self.chunks_run_time = 0 self.max_chunks_per_second = int_value(conf.get('chunks_per_second'), 30) # backup self.no_backup = true_value(conf.get('no_backup', False)) self.backup_dir = conf.get('backup_dir') or tempfile.gettempdir() self.backup_name = 'backup_%s_%f' \ % (self.volume_id, time.time()) # dry run self.dry_run = true_value(conf.get('dry_run', False))
def __init__(self, namespace, concurrency=50, error_file=None, rebuild_file=None, check_xattr=True, limit_listings=0, request_attempts=1, logger=None, verbose=False, check_hash=False, **_kwargs): self.pool = GreenPool(concurrency) self.error_file = error_file self.check_xattr = bool(check_xattr) self.check_hash = bool(check_hash) self.logger = logger or get_logger( {'namespace': namespace}, name='integrity', verbose=verbose) # Optimisation for when we are only checking one object # or one container. # 0 -> do not limit # 1 -> limit account listings (list of containers) # 2 -> limit container listings (list of objects) self.limit_listings = limit_listings if self.error_file: outfile = open(self.error_file, 'a') self.error_writer = csv.writer(outfile, delimiter=' ') self.rebuild_file = rebuild_file if self.rebuild_file: self.fd = open(self.rebuild_file, 'a') self.rebuild_writer = csv.writer(self.fd, delimiter='|') self.api = ObjectStorageApi(namespace, logger=self.logger, max_retries=request_attempts - 1, request_attempts=request_attempts) self.rdir_client = RdirClient({"namespace": namespace}, logger=self.logger) self.accounts_checked = 0 self.containers_checked = 0 self.objects_checked = 0 self.chunks_checked = 0 self.account_not_found = 0 self.container_not_found = 0 self.object_not_found = 0 self.chunk_not_found = 0 self.account_exceptions = 0 self.container_exceptions = 0 self.object_exceptions = 0 self.chunk_exceptions = 0 self.list_cache = {} self.running = {} self.running_lock = Semaphore(1) self.result_queue = Queue(concurrency) self.run_time = 0
def setUp(self): super(TestBlobAuditorFunctional, self).setUp() self.namespace = self.conf['namespace'] self.account = self.conf['account'] self.test_dir = self.conf['sds_path'] rawx_num, rawx_path, rawx_addr = self.get_service_url('rawx') self.rawx = 'http://' + rawx_addr self.h = hashlib.new('md5') conf = {"namespace": self.namespace} self.auditor = BlobAuditorWorker(conf, get_logger(None), None) self.container_c = ContainerClient(conf) self.blob_c = BlobClient() self.ref = random_str(8) self.container_c.container_create(self.account, self.ref) self.url_rand = random_id(64) self.data = random_str(1280) self.h.update(self.data) self.hash_rand = self.h.hexdigest().lower() self.content = TestContent( random_str(6), len(self.data), self.url_rand, 1) self.content.id_container = cid_from_name( self.account, self.ref).upper() self.chunk = TestChunk(self.content.size, self.url_rand, 0, self.hash_rand) self.chunk_url = "%s/%s" % (self.rawx, self.chunk.id_chunk) self.chunk_proxy = {"hash": self.chunk.md5, "pos": "0", "size": self.chunk.size, "url": self.chunk_url} chunk_meta = {'content_path': self.content.path, 'container_id': self.content.id_container, 'chunk_method': 'plain/nb_copy=3', 'policy': 'TESTPOLICY', 'id': '0000', 'version': 1, 'chunk_id': self.chunk.id_chunk, 'chunk_pos': self.chunk.pos, 'chunk_hash': self.chunk.md5, 'full_path': ['%s/%s/%s' % (self.account, self.ref, self.content.path)], 'oio_version': OIO_VERSION } self.blob_c.chunk_put(self.chunk_url, chunk_meta, self.data) self.chunk_path = self.test_dir + '/data/' + self.namespace + \ '-rawx-1/' + self.chunk.id_chunk[0:3] + "/" + self.chunk.id_chunk self.bad_container_id = '0'*64
def init_request_processor(conf_file, app_name, app_factory, *args, **kwargs): conf = read_conf(conf_file, app_name) if 'logger' in kwargs: logger = kwargs.pop('logger') else: logger = get_logger(conf, app_name, verbose=kwargs.pop('verbose', False)) app = app_factory(conf) return (app, conf, logger, app_name)
def __init__(self, conf): super(Meta2Indexer, self).__init__(conf=conf) self.logger = get_logger(conf) if not conf.get("volume_list"): raise exc.OioException("No meta2 volumes provided to index !") self.volumes = [x.strip() for x in conf.get('volume_list').split(',')] self.pool = ContextPool(len(self.volumes)) self.volume_workers = [Meta2IndexingWorker(x, conf) for x in self.volumes]
def __init__(self, conf, request_prefix="", no_ns_in_url=False, endpoint=None, request_attempts=REQUEST_ATTEMPTS, logger=None, **kwargs): """ :param request_prefix: text to insert in between endpoint and requested URL :type request_prefix: `str` :param no_ns_in_url: do not insert namespace name between endpoint and `request_prefix` :type no_ns_in_url: `bool` :param request_attempts: number of attempts for the request in case of error 503 (defaults to 1) :raise oio.common.exceptions.ServiceBusy: if all attempts fail """ assert request_attempts > 0 validate_service_conf(conf) self.ns = conf.get('namespace') self.conf = conf self.logger = logger or get_logger(conf) # Look for an endpoint in the application configuration if not endpoint: endpoint = self.conf.get('proxyd_url', None) # Look for an endpoint in the namespace configuration if not endpoint: ns_conf = load_namespace_conf(self.ns) endpoint = ns_conf.get('proxy') # Historically, the endpoint did not contain any scheme self.proxy_scheme = 'http' split_endpoint = endpoint.split('://', 1) if len(split_endpoint) > 1: self.proxy_scheme = split_endpoint[0] self.proxy_netloc = split_endpoint[-1] ep_parts = list() ep_parts.append(self.proxy_scheme + ':/') ep_parts.append(self.proxy_netloc) ep_parts.append("v3.0") if not no_ns_in_url: ep_parts.append(self.ns) if request_prefix: ep_parts.append(request_prefix.lstrip('/')) self._request_attempts = request_attempts super(ProxyClient, self).__init__(endpoint='/'.join(ep_parts), service_type='proxy', **kwargs)
def __init__(self, cfg): super(ServiceLogger, self).__init__(cfg) prefix = cfg.syslog_prefix if cfg.syslog_prefix else '' address = cfg.syslog_addr if cfg.syslog_addr else '/dev/log' error_conf = { 'syslog_prefix': prefix, 'log_facility': 'LOG_LOCAL0', 'log_address': address } access_conf = { 'syslog_prefix': prefix, 'log_facility': 'LOG_LOCAL1', 'log_address': address } self.error_log = get_logger(error_conf, 'log') self.access_log = get_logger(access_conf, 'access')
def __init__(self, conf, **kwargs): super(BlobMover, self).__init__(conf) self.logger = get_logger(conf) volume = conf.get('volume') if not volume: raise exc.ConfigurationException('No volume specified for mover') self.volume = volume global SLEEP_TIME if SLEEP_TIME > int(conf.get('report_interval', 3600)): SLEEP_TIME = int(conf.get('report_interval', 3600))
def __init__(self, conf, rdir_client=None, **kwargs): self.conf = conf self.ns = conf['namespace'] self.logger = get_logger(conf) self.directory = DirectoryClient(conf, logger=self.logger, **kwargs) if rdir_client: self.rdir = rdir_client else: self.rdir = RdirClient(conf, logger=self.logger, **kwargs) self._cs = None self._pool_options = None