def __init__(self, cfg): self.cfg = cfg prefix = cfg.syslog_prefix if cfg.syslog_prefix else "" address = cfg.syslog_addr if cfg.syslog_addr else "/dev/log" error_conf = {"syslog_prefix": prefix, "log_facility": "LOG_LOCAL1", "log_address": address} access_conf = {"syslog_prefix": prefix, "log_facility": "LOG_LOCAL0", "log_address": address} self.error_log = get_logger(error_conf, "rdir.error") self.access_log = get_logger(access_conf, "rdir.access")
def test_get_logger(self): sio = StringIO() logger = logging.getLogger('test') logger.addHandler(logging.StreamHandler(sio)) logger = get_logger(None, 'test') logger.warn('msg1') self.assertEqual(sio.getvalue(), 'msg1\n') logger.debug('msg2') self.assertEqual(sio.getvalue(), 'msg1\n') conf = {'log_level': 'DEBUG'} logger = get_logger(conf, 'test') logger.debug('msg3') self.assertEqual(sio.getvalue(), 'msg1\nmsg3\n')
def __init__(self, conf, **kwargs): super(BlobMover, self).__init__(conf) self.logger = get_logger(conf) volume = conf.get('volume') if not volume: raise exc.ConfigurationException('No volume specified for mover') self.volume = volume
def __init__(self, conf, **kwargs): super(BlobAuditor, self).__init__(conf) self.logger = get_logger(conf) volume = conf.get('volume') if not volume: raise exc.ConfigurationException('No volume specified for auditor') self.volume = volume
def __init__(self, conf, service, **kwargs): self.conf = conf self.running = False for k in ['host', 'port', 'type']: if k not in service: raise Exception( 'Missing field "%s" in service configuration' % k) self.name = '%s|%s' % \ (service['host'], service['port']) self.check_interval = float_value(conf.get('check_interval'), 1) self.service = service self.rise = int_value(conf.get('rise'), 1) self.fall = int_value(conf.get('fall'), 1) self.logger = get_logger(self.conf) self.cs = ConscienceClient(self.conf) self.client = Client(self.conf) self.last_status = False self.failed = False self.service_definition = { 'ns': self.conf['namespace'], 'type': self.service['type'], 'addr': '%s:%s' % (self.service['host'], self.service['port']), 'score': 0, 'tags': {}} if self.service.get('location', None): self.service_definition['tags']['tag.loc'] = \ self.service['location'] self.service_checks = list() self.service_stats = list() self.init_checkers(service) self.init_stats(service)
def __init__(self, conf, logger, volume): self.conf = conf self.logger = logger or get_logger(conf) self.volume = volume self.run_time = 0 self.passes = 0 self.errors = 0 self.last_reported = 0 self.last_usage_check = 0 self.chunks_run_time = 0 self.bytes_running_time = 0 self.bytes_processed = 0 self.total_bytes_processed = 0 self.total_chunks_processed = 0 self.usage_target = int_value( conf.get('usage_target'), 0) self.usage_check_interval = int_value( conf.get('usage_check_interval'), 3600) self.report_interval = int_value( conf.get('report_interval'), 3600) self.max_chunks_per_second = int_value( conf.get('chunks_per_second'), 30) self.max_bytes_per_second = int_value( conf.get('bytes_per_second'), 10000000) self.blob_client = BlobClient() self.container_client = ContainerClient(conf) self.content_factory = ContentFactory(conf)
def __init__(self, conf, endpoint=None, proxy_endpoint=None, refresh_delay=3600.0, logger=None, **kwargs): """ Initialize a client for the account service. :param conf: dictionary with at least the namespace name :type conf: `dict` :param endpoint: URL of an account service :param proxy_endpoint: URL of the proxy :param refresh_interval: time between refreshes of the account service endpoint (if not provided at instantiation) :type refresh_interval: `float` seconds """ super(AccountClient, self).__init__(endpoint=endpoint, **kwargs) self.logger = logger or get_logger(conf) self.cs = ConscienceClient(conf, endpoint=proxy_endpoint, logger=self.logger, **kwargs) self._refresh_delay = refresh_delay if not self.endpoint else -1.0 self._last_refresh = 0.0
def __init__(self, conf, **kwargs): super(BlobMover, self).__init__(conf) self.logger = get_logger(conf) volume = conf.get("volume") if not volume: raise exc.ConfigurationException("No volume specified for mover") self.volume = volume
def __init__(self, conf, logger, volume, input_file=None, try_chunk_delete=False, beanstalkd_addr=None): self.conf = conf self.logger = logger or get_logger(conf) self.volume = volume self.run_time = 0 self.passes = 0 self.errors = 0 self.last_reported = 0 self.chunks_run_time = 0 self.bytes_running_time = 0 self.bytes_processed = 0 self.total_bytes_processed = 0 self.total_chunks_processed = 0 self.dry_run = true_value(conf.get('dry_run', False)) self.report_interval = int_value(conf.get('report_interval'), 3600) self.max_chunks_per_second = int_value(conf.get('chunks_per_second'), 30) self.max_bytes_per_second = int_value(conf.get('bytes_per_second'), 10000000) self.rdir_fetch_limit = int_value(conf.get('rdir_fetch_limit'), 100) self.allow_same_rawx = true_value(conf.get('allow_same_rawx')) self.input_file = input_file self.rdir_client = RdirClient(conf, logger=self.logger) self.content_factory = ContentFactory(conf) self.try_chunk_delete = try_chunk_delete self.beanstalkd_addr = beanstalkd_addr self.beanstalkd_tube = conf.get('beanstalkd_tube', 'rebuild')
def __init__(self, conf, service, **kwargs): self.conf = conf for k in ['host', 'port', 'type']: if k not in service: raise Exception('Missing field "%s" in service configuration' % k) self.name = '%s|%s' % \ (service['host'], service['port']) self.check_interval = float_value(conf.get('check_interval'), 1) self.service = service self.rise = int_value(conf.get('rise'), 1) self.fall = int_value(conf.get('fall'), 1) self.logger = get_logger(self.conf) self.cs = ConscienceClient(self.conf) self.init_checkers(service) self.last_status = False self.failed = False self.service_definition = { 'ns': self.conf['namespace'], 'type': self.service['type'], 'addr': '%s:%s' % (self.service['host'], self.service['port']), 'score': 0, 'tags': {} }
def __init__(self, conf, **kwargs): self.conf = conf self.ns = conf['namespace'] self.logger = get_logger(conf) self.directory = DirectoryClient(conf, logger=self.logger, **kwargs) self.rdir = RdirClient(conf, logger=self.logger, **kwargs) self._cs = None
def __init__(self, conf, logger, volume): self.conf = conf self.logger = logger or get_logger(conf) self.volume = volume self.run_time = 0 self.passes = 0 self.errors = 0 self.last_reported = 0 self.chunks_run_time = 0 self.bytes_running_time = 0 self.bytes_processed = 0 self.total_bytes_processed = 0 self.total_chunks_processed = 0 self.dry_run = true_value( conf.get('dry_run', False)) self.report_interval = int_value( conf.get('report_interval'), 3600) self.max_chunks_per_second = int_value( conf.get('chunks_per_second'), 30) self.max_bytes_per_second = int_value( conf.get('bytes_per_second'), 10000000) self.rdir_fetch_limit = int_value( conf.get('rdir_fetch_limit'), 100) self.rdir_client = RdirClient(conf) self.content_factory = ContentFactory(conf)
def __init__(self, conf, logger, volume): self.conf = conf self.logger = logger or get_logger(conf) self.volume = volume self.run_time = 0 self.passes = 0 self.errors = 0 self.last_reported = 0 self.last_usage_check = 0 self.chunks_run_time = 0 self.bytes_running_time = 0 self.bytes_processed = 0 self.total_bytes_processed = 0 self.total_chunks_processed = 0 self.usage_target = int_value( conf.get('usage_target'), 0) self.usage_check_interval = int_value( conf.get('usage_check_interval'), 3600) self.report_interval = int_value( conf.get('report_interval'), 3600) self.max_chunks_per_second = int_value( conf.get('chunks_per_second'), 30) self.max_bytes_per_second = int_value( conf.get('bytes_per_second'), 10000000) self.blob_client = BlobClient() self.container_client = ContainerClient(conf)
def setUp(self): super(TestBlobAuditorFunctional, self).setUp() self.namespace = self.conf['namespace'] self.account = self.conf['account'] self.test_dir = self.conf['sds_path'] rawx_num, rawx_path, rawx_addr = self.get_service_url('rawx') self.rawx = 'http://' + rawx_addr self.h = hashlib.new('md5') conf = {"namespace": self.namespace} self.auditor = BlobAuditorWorker(conf, get_logger(None), None) self.container_c = ContainerClient(conf) self.blob_c = BlobClient() self.ref = random_str(8) self.container_c.container_create(self.account, self.ref) self.url_rand = random_id(64) self.data = random_str(1280) self.h.update(self.data) self.hash_rand = self.h.hexdigest().lower() self.content = TestContent(random_str(6), len(self.data), self.url_rand, 1) self.content.id_container = cid_from_name(self.account, self.ref).upper() self.chunk = TestChunk(self.content.size, self.url_rand, 0, self.hash_rand) self.chunk_url = "%s/%s" % (self.rawx, self.chunk.id_chunk) self.chunk_proxy = { "hash": self.chunk.md5, "pos": "0", "size": self.chunk.size, "url": self.chunk_url } chunk_meta = { 'content_path': self.content.path, 'container_id': self.content.id_container, 'chunk_method': 'plain/nb_copy=3', 'policy': 'TESTPOLICY', 'id': '0000', 'version': 1, 'chunk_id': self.chunk.id_chunk, 'chunk_pos': self.chunk.pos, 'chunk_hash': self.chunk.md5, } self.blob_c.chunk_put(self.chunk_url, chunk_meta, self.data) self.chunk_path = self.test_dir + '/data/' + self.namespace + \ '-rawx-1/' + self.chunk.id_chunk[0:3] + "/" + self.chunk.id_chunk self.bad_container_id = '0' * 64
def setUp(self): super(TestBlobAuditorFunctional, self).setUp() self.namespace = self.conf['namespace'] self.account = self.conf['account'] self.test_dir = self.conf['sds_path'] self.chars = string.ascii_lowercase + string.ascii_uppercase +\ string.digits self.chars_id = string.digits + 'ABCDEF' self.rawx = 'http://' + self.conf["rawx"][0]['addr'] self.h = hashlib.new('md5') conf = {"namespace": self.namespace} self.auditor = BlobAuditorWorker(conf, get_logger(None), None) self.container_c = ContainerClient(conf) self.blob_c = BlobClient() self.ref = rand_generator(self.chars, 8) self.container_c.container_create(self.account, self.ref) self.url_rand = rand_generator(self.chars_id, 64) self.data = rand_generator(self.chars, 1280) self.h.update(self.data) self.hash_rand = self.h.hexdigest().lower() self.content = TestContent( rand_generator(self.chars, 6), len(self.data), self.url_rand, 1) self.content.id_container = cid_from_name( self.account, self.ref).upper() self.chunk = TestChunk(self.content.size, self.url_rand, 0, self.hash_rand) self.chunk_url = "%s/%s" % (self.rawx, self.chunk.id_chunk) self.chunk_proxy = {"hash": self.chunk.md5, "pos": "0", "size": self.chunk.size, "url": self.chunk_url} chunk_meta = {'content_size': self.content.size, 'content_chunksnb': self.content.nb_chunks, 'content_path': self.content.path, 'content_cid': self.content.id_container, 'content_mimetype': 'application/octet-stream', 'content_chunkmethod': 'bytes', 'content_policy': 'TESTPOLICY', 'content_id': '0000', 'content_version': 1, 'chunk_id': self.chunk.id_chunk, 'chunk_pos': self.chunk.pos} self.blob_c.chunk_put(self.chunk_url, chunk_meta, self.data) self.chunk_path = self.test_dir + '/data/NS-rawx-1/' +\ self.chunk.id_chunk[0:2] + "/" + self.chunk.id_chunk self.bad_container_id = '0'*64
def setUp(self): super(TestBlobAuditorFunctional, self).setUp() self.namespace = self.conf["namespace"] self.account = self.conf["account"] self.test_dir = self.conf["sds_path"] rawx_num, rawx_path, rawx_addr = self.get_service_url("rawx") self.rawx = "http://" + rawx_addr self.h = hashlib.new("md5") conf = {"namespace": self.namespace} self.auditor = BlobAuditorWorker(conf, get_logger(None), None) self.container_c = ContainerClient(conf) self.blob_c = BlobClient() self.ref = random_str(8) self.container_c.container_create(self.account, self.ref) self.url_rand = random_id(64) self.data = random_str(1280) self.h.update(self.data) self.hash_rand = self.h.hexdigest().lower() self.content = TestContent(random_str(6), len(self.data), self.url_rand, 1) self.content.id_container = cid_from_name(self.account, self.ref).upper() self.chunk = TestChunk(self.content.size, self.url_rand, 0, self.hash_rand) self.chunk_url = "%s/%s" % (self.rawx, self.chunk.id_chunk) self.chunk_proxy = {"hash": self.chunk.md5, "pos": "0", "size": self.chunk.size, "url": self.chunk_url} chunk_meta = { "content_path": self.content.path, "container_id": self.content.id_container, "chunk_method": "plain/nb_copy=3", "policy": "TESTPOLICY", "id": "0000", "version": 1, "chunk_id": self.chunk.id_chunk, "chunk_pos": self.chunk.pos, "chunk_hash": self.chunk.md5, } self.blob_c.chunk_put(self.chunk_url, chunk_meta, self.data) self.chunk_path = ( self.test_dir + "/data/" + self.namespace + "-rawx-1/" + self.chunk.id_chunk[0:3] + "/" + self.chunk.id_chunk ) self.bad_container_id = "0" * 64
def init_request_processor(conf_file, app_name, app_factory, *args, **kwargs): conf = read_conf(conf_file, app_name) if 'logger' in kwargs: logger = kwargs.pop('logger') else: logger = get_logger(conf, app_name, verbose=kwargs.pop('verbose', False)) app = app_factory(conf) return (app, conf, logger, app_name)
def __init__(self, conf): validate_service_conf(conf) self.conf = conf self.logger = get_logger(conf) self.running = False self.retry_interval = int_value(conf.get('retry_interval'), 30) self.last_retry = 0 self.init_zmq() self.init_queue() self.init_workers()
def __init__(self, conf, **kwargs): super(BlobMover, self).__init__(conf) self.logger = get_logger(conf) volume = conf.get('volume') if not volume: raise exc.ConfigurationException('No volume specified for mover') self.volume = volume global SLEEP_TIME if SLEEP_TIME > int(conf.get('report_interval', 3600)): SLEEP_TIME = int(conf.get('report_interval', 3600))
def __init__(self, cfg): self.cfg = cfg prefix = cfg.syslog_prefix if cfg.syslog_prefix else '' address = cfg.syslog_addr if cfg.syslog_addr else '/dev/log' error_conf = { 'syslog_prefix': prefix, 'log_facility': 'LOG_LOCAL0', 'log_address': address } access_conf = { 'syslog_prefix': prefix, 'log_facility': 'LOG_LOCAL1', 'log_address': address } self.error_log = get_logger(error_conf, 'rdir.error') self.access_log = get_logger(access_conf, 'rdir.access')
def __init__(self, cfg): self.cfg = cfg prefix = cfg.syslog_prefix if cfg.syslog_prefix else '' address = cfg.syslog_addr if cfg.syslog_addr else '/dev/log' error_conf = { 'syslog_prefix': prefix, 'log_facility': 'LOG_LOCAL1', 'log_address': address } access_conf = { 'syslog_prefix': prefix, 'log_facility': 'LOG_LOCAL0', 'log_address': address } self.error_log = get_logger(error_conf, 'account.error') self.access_log = get_logger(access_conf, 'account.access')
def __init__(self, conf, **kwargs): super(Client, self).__init__() validate_service_conf(conf) self.ns = conf.get("namespace") ns_conf = load_namespace_conf(self.ns) self.conf = conf self.ns_conf = ns_conf self.logger = get_logger(conf) self.session = requests.Session() self.endpoint = "http://%s" % ns_conf.get("proxy")
def __init__(self, conf, session=None, **kwargs): super(Client, self).__init__() validate_service_conf(conf) self.ns = conf.get('namespace') ns_conf = load_namespace_conf(self.ns) self.conf = conf self.ns_conf = ns_conf self.logger = get_logger(conf) self.session = session or requests.Session() self.endpoint = 'http://%s' % ns_conf.get('proxy')
def __init__(self, cfg): super(ServiceLogger, self).__init__(cfg) prefix = cfg.syslog_prefix if cfg.syslog_prefix else '' address = cfg.syslog_addr if cfg.syslog_addr else '/dev/log' error_conf = { 'syslog_prefix': prefix, 'log_facility': 'LOG_LOCAL0', 'log_address': address } access_conf = { 'syslog_prefix': prefix, 'log_facility': 'LOG_LOCAL1', 'log_address': address } self.error_log = get_logger(error_conf, 'log') self.access_log = get_logger(access_conf, 'access')
def __init__(self, conf_file, worker_class, **kwargs): section_name = 'event-agent' self.conf = read_conf(conf_file, section_name) self.logger = get_logger(self.conf, verbose=kwargs.pop('verbose', False)) redirect_stdio(self.logger) drop_privileges(self.conf.get('user', 'openio')) self.num_workers = int_value(self.conf.get('workers'), CPU_COUNT) self.worker_class = worker_class self.workers = {} self.sig_queue = []
def __init__(self, conf_file, worker_class, **kwargs): section_name = 'event-agent' self.conf = read_conf(conf_file, section_name) self.logger = get_logger( self.conf, verbose=kwargs.pop('verbose', False)) redirect_stdio(self.logger) drop_privileges(self.conf.get('user', 'openio')) self.num_workers = int_value(self.conf.get('workers'), CPU_COUNT) self.worker_class = worker_class self.workers = {} self.sig_queue = []
def run_daemon(klass, conf_file, section_name=None, **kwargs): if section_name is None: section_name = sub(r'([a-z])([A-Z])', r'\1-\2', klass.__name__).lower() conf = read_conf( conf_file, section_name, use_yaml=kwargs.pop('use_yaml', False)) logger = get_logger( conf, section_name, verbose=kwargs.pop('verbose', False)) try: klass(conf).start(**kwargs) except KeyboardInterrupt: logger.info('User interrupt') logger.info('Daemon exited')
def __init__(self, conf): validate_service_conf(conf) self.conf = conf self.logger = get_logger(conf) self.running = False self.retries_run_time = 0 self.max_retries_per_second = int_value( conf.get('retries_per_second'), 30) self.batch_size = int_value(conf.get('batch_size'), 500) self.init_zmq() self.init_queue() self.init_workers()
def setUp(self): super(TestBlobAuditorFunctional, self).setUp() self.namespace = self.conf['namespace'] self.account = self.conf['account'] self.test_dir = self.conf['sds_path'] rawx_num, rawx_path, rawx_addr = self.get_service_url('rawx') self.rawx = 'http://' + rawx_addr self.h = hashlib.new('md5') conf = {"namespace": self.namespace} self.auditor = BlobAuditorWorker(conf, get_logger(None), None) self.container_c = ContainerClient(conf) self.blob_c = BlobClient() self.ref = random_str(8) self.container_c.container_create(self.account, self.ref) self.url_rand = random_id(64) self.data = random_str(1280) self.h.update(self.data) self.hash_rand = self.h.hexdigest().lower() self.content = TestContent( random_str(6), len(self.data), self.url_rand, 1) self.content.id_container = cid_from_name( self.account, self.ref).upper() self.chunk = TestChunk(self.content.size, self.url_rand, 0, self.hash_rand) self.chunk_url = "%s/%s" % (self.rawx, self.chunk.id_chunk) self.chunk_proxy = {"hash": self.chunk.md5, "pos": "0", "size": self.chunk.size, "url": self.chunk_url} chunk_meta = {'content_path': self.content.path, 'container_id': self.content.id_container, 'content_chunkmethod': 'plain/nb_copy=3', 'content_policy': 'TESTPOLICY', 'content_id': '0000', 'content_version': 1, 'chunk_id': self.chunk.id_chunk, 'chunk_pos': self.chunk.pos} self.blob_c.chunk_put(self.chunk_url, chunk_meta, self.data) self.chunk_path = self.test_dir + '/data/' + self.namespace + \ '-rawx-1/' + self.chunk.id_chunk[0:3] + "/" + self.chunk.id_chunk self.bad_container_id = '0'*64
def __init__(self, conf, pool_manager=None, request_prefix="", no_ns_in_url=False, endpoint=None, request_attempts=REQUEST_ATTEMPTS, logger=None, **kwargs): """ :param pool_manager: an optional pool manager that will be reused :type pool_manager: `urllib3.PoolManager` :param request_prefix: text to insert in between endpoint and requested URL :type request_prefix: `str` :param no_ns_in_url: do not insert namespace name between endpoint and `request_prefix` :type no_ns_in_url: `bool` :param request_attempts: number of attempts for the request in case of error 503 :raise oio.common.exceptions.ServiceBusy: if all attempts fail """ assert (request_attempts > 0) validate_service_conf(conf) self.ns = conf.get('namespace') self.conf = conf self.logger = logger or get_logger(conf) ep_parts = list() if endpoint: self.proxy_netloc = endpoint.lstrip("http://") else: ns_conf = load_namespace_conf(self.ns) self.proxy_netloc = ns_conf.get('proxy') ep_parts.append("http:/") ep_parts.append(self.proxy_netloc) ep_parts.append("v3.0") if not no_ns_in_url: ep_parts.append(self.ns) if request_prefix: ep_parts.append(request_prefix.lstrip('/')) self._request_attempts = request_attempts super(ProxyClient, self).__init__(endpoint='/'.join(ep_parts), **kwargs)
def __init__(self, conf, name, context, **kwargs): self.conf = conf self.name = name verbose = kwargs.pop("verbose", False) self.logger = get_logger(self.conf, verbose=verbose) self.init_zmq(context) self.cs = ConscienceClient(self.conf) self.rdir = RdirClient(self.conf) self._acct_addr = None self.acct_update = 0 self.acct_refresh_interval = int_value(conf.get("acct_refresh_interval"), 60) self.acct_update = true_value(conf.get("acct_update", True)) self.rdir_update = true_value(conf.get("rdir_update", True)) self.session = requests.Session() self.failed = False
def __init__(self, conf, **kwargs): super(StorageTierer, self).__init__(conf) self.logger = get_logger(conf) if not conf.get(CONF_ACCOUNT): raise exc.ConfigurationException( "No account specified for storage tiering " "(token '%s'" % CONF_ACCOUNT) if not conf.get(CONF_OUTDATED_THRESHOLD): raise exc.ConfigurationException( "No date specified for storage tiering " "(token '%s'" % CONF_OUTDATED_THRESHOLD) if not conf.get(CONF_NEW_POLICY): raise exc.ConfigurationException( "No new policy specified for storage tiering " "(token '%s'" % CONF_NEW_POLICY)
def __init__(self, conf, backend, logger=None): self.conf = conf self.backend = backend self.logger = logger or get_logger(conf) self.url_map = Map([ Rule('/status', endpoint='status'), Rule('/v1.0/account/create', endpoint='account_create'), Rule('/v1.0/account/delete', endpoint='account_delete'), Rule('/v1.0/account/update', endpoint='account_update'), Rule('/v1.0/account/show', endpoint='account_show'), Rule('/v1.0/account/containers', endpoint='account_containers'), Rule('/v1.0/account/container/update', endpoint='account_container_update') ])
def __init__(self, conf, backend, logger=None): self.conf = conf self.backend = backend self.logger = logger or get_logger(conf) self.url_map = Map( [ Rule("/status", endpoint="status"), Rule("/v1.0/account/create", endpoint="account_create"), Rule("/v1.0/account/delete", endpoint="account_delete"), Rule("/v1.0/account/update", endpoint="account_update"), Rule("/v1.0/account/show", endpoint="account_show"), Rule("/v1.0/account/containers", endpoint="account_containers"), Rule("/v1.0/account/container/update", endpoint="account_container_update"), ] )
def __init__(self, conf, name, context, **kwargs): self.conf = conf self.name = name verbose = kwargs.pop('verbose', False) self.logger = get_logger(self.conf, verbose=verbose) self.init_zmq(context) self.cs = ConscienceClient(self.conf) self.rdir = RdirClient(self.conf) self._acct_addr = None self.acct_update = 0 self.acct_refresh_interval = int_value( conf.get('acct_refresh_interval'), 60) self.acct_update = true_value(conf.get('acct_update', True)) self.rdir_update = true_value(conf.get('rdir_update', True)) self.session = requests.Session() self.failed = False
def __init__(self, conf, service, **kwargs): self.conf = conf self.running = False for k in ['host', 'port', 'type']: if k not in service: raise Exception('Missing field "%s" in service configuration' % k) self.name = '%s|%s|%s' % \ (service['type'], service['host'], service['port']) self.service = service self.rise = int_value(self._load_item_config('rise'), 1) self.fall = int_value(self._load_item_config('fall'), 1) self.check_interval = float_value( self._load_item_config('check_interval'), 1) self.deregister_on_exit = true_value( self._load_item_config('deregister_on_exit', False)) self.logger = get_logger(self.conf) self.session = requests.Session() self.cs = ConscienceClient(self.conf, session=self.session) # FIXME: explain that self.client = ProxyClient(self.conf, session=self.session, no_ns_in_url=True) self.last_status = False self.failed = False self.service_definition = { 'ns': self.conf['namespace'], 'type': self.service['type'], 'addr': '%s:%s' % (self.service['host'], self.service['port']), 'score': 0, 'tags': {} } if self.service.get('location', None): self.service_definition['tags']['tag.loc'] = \ self.service['location'] if self.service.get('slots', None): self.service_definition['tags']['tag.slots'] = \ ','.join(self.service['slots']) self.service_checks = list() self.service_stats = list() self.init_checkers(service) self.init_stats(service)
def __init__(self, conf, **kwargs): super(StorageTierer, self).__init__(conf) self.logger = get_logger(conf) if not conf.get(CONF_ACCOUNT): raise exc.ConfigurationException( "No account specified for storage tiering " "(token '%s'" % CONF_ACCOUNT) if not conf.get(CONF_OUTDATED_THRESHOLD): raise exc.ConfigurationException( "No threshold specified for storage tiering " "(token '%s'" % CONF_OUTDATED_THRESHOLD) if not conf.get(CONF_NEW_POLICY): raise exc.ConfigurationException( "No new policy specified for storage tiering " "(token '%s'" % CONF_NEW_POLICY) if conf.get('syslog_prefix'): print "Logging to syslog, with prefix '%(syslog_prefix)s'" % conf
def __init__(self, conf, container_id, metadata, chunks, storage_method): self.conf = conf self.container_id = container_id self.metadata = metadata self.chunks = ChunksHelper(chunks) self.storage_method = storage_method self.logger = get_logger(self.conf) self.cs_client = ConscienceClient(conf) self.blob_client = BlobClient() self.container_client = ContainerClient(self.conf) self.content_id = self.metadata["id"] self.stgpol = self.metadata["policy"] self.path = self.metadata["name"] self.length = int(self.metadata["length"]) self.version = self.metadata["version"] self.checksum = self.metadata["hash"] self.mime_type = self.metadata["mime_type"] self.chunk_method = self.metadata["chunk_method"]
def __init__(self, conf, **kwargs): super(BlobIndexer, self).__init__(conf) self.logger = get_logger(conf) volume = conf.get('volume') if not volume: raise exc.ConfigurationException('No volume specified for indexer') self.volume = volume self.passes = 0 self.errors = 0 self.successes = 0 self.last_reported = 0 self.chunks_run_time = 0 self.interval = int_value(conf.get('interval'), 300) self.report_interval = int_value(conf.get('report_interval'), 3600) self.max_chunks_per_second = int_value(conf.get('chunks_per_second'), 30) self.index_client = RdirClient(conf, logger=self.logger) self.namespace, self.volume_id = check_volume(self.volume)
def __init__(self, conf, backend, logger=None): self.conf = conf self.logger = logger or get_logger(conf) self.backend = backend self.ns = conf['namespace'] self.url_map = Map([ Rule('/status', endpoint='status'), Rule('/v1/rdir/admin/show', endpoint='rdir_admin_show'), Rule('/v1/rdir/admin/unlock', endpoint='rdir_admin_unlock'), Rule('/v1/rdir/admin/lock', endpoint='rdir_admin_lock'), Rule('/v1/rdir/create', endpoint='rdir_create'), Rule('/v1/rdir/push', endpoint='rdir_push'), Rule('/v1/rdir/delete', endpoint='rdir_delete'), Rule('/v1/rdir/fetch', endpoint='rdir_fetch'), Rule('/v1/rdir/status', endpoint='rdir_status'), Rule('/v1/rdir/admin/clear', endpoint='rdir_admin_clear'), Rule('/v1/rdir/admin/incident', endpoint='rdir_admin_incident'), ])
def __init__(self, conf): if conf: self.conf = read_conf(conf['key_file'], section_name="admin-server") else: self.conf = {} self.logger = get_logger(self.conf, name="ContainerBackup") self.proxy = ObjectStorageApi(self.conf.get("namespace", NS), logger=self.logger) self.url_map = Map([ Rule('/v1.0/container/dump', endpoint='dump'), Rule('/v1.0/container/restore', endpoint='restore'), ]) self.REDIS_TIMEOUT = self.conf.get("redis_cache_timeout", self.REDIS_TIMEOUT) super(ContainerBackup, self).__init__(self.conf) WerkzeugApp.__init__(self, self.url_map, self.logger)
def __init__(self, conf, container_id, metadata, chunks, stgpol_args): self.conf = conf self.container_id = container_id self.metadata = metadata self.chunks = ChunksHelper(chunks) self.stgpol_args = stgpol_args self.logger = get_logger(self.conf) self.cs_client = ConscienceClient(conf) self.container_client = ContainerClient(self.conf) self.blob_client = BlobClient() self.session = requests.Session() self.content_id = self.metadata["id"] self.stgpol_name = self.metadata["policy"] self.path = self.metadata["name"] self.length = int(self.metadata["length"]) self.version = self.metadata["version"] self.hash = self.metadata["hash"] self.mime_type = self.metadata["mime-type"] self.chunk_method = self.metadata["chunk-method"]
def __init__(self, conf, **kwargs): super(BlobIndexer, self).__init__(conf) self.logger = get_logger(conf) volume = conf.get('volume') if not volume: raise exc.ConfigurationException('No volume specified for indexer') self.volume = volume self.passes = 0 self.errors = 0 self.last_reported = 0 self.chunks_run_time = 0 self.total_chunks_processed = 0 self.interval = int_value( conf.get('interval'), 300) self.report_interval = int_value( conf.get('report_interval'), 3600) self.max_chunks_per_second = int_value( conf.get('chunks_per_second'), 30) self.index_client = RdirClient(conf) self.namespace, self.volume_id = check_volume(self.volume)
def __init__(self, conf, backend, logger=None): self.conf = conf self.backend = backend self.logger = logger or get_logger(conf) self.url_map = Map([ Rule('/status', endpoint='status'), Rule('/v1.0/account/create', endpoint='account_create'), Rule('/v1.0/account/delete', endpoint='account_delete'), Rule('/v1.0/account/list', endpoint='account_list'), Rule('/v1.0/account/update', endpoint='account_update'), Rule('/v1.0/account/show', endpoint='account_show'), Rule('/v1.0/account/containers', endpoint='account_containers'), Rule('/v1.0/account/refresh', endpoint='account_refresh'), Rule('/v1.0/account/flush', endpoint='account_flush'), Rule('/v1.0/account/container/update', endpoint='account_container_update'), Rule('/v1.0/account/container/reset', endpoint='account_container_reset') ]) super(Account, self).__init__(self.url_map, self.logger)
def __init__(self, conf, logger, volume): self.conf = conf self.logger = logger or get_logger(conf) self.volume = volume self.run_time = 0 self.passes = 0 self.errors = 0 self.last_reported = 0 self.chunks_run_time = 0 self.bytes_running_time = 0 self.bytes_processed = 0 self.total_bytes_processed = 0 self.total_chunks_processed = 0 self.dry_run = true_value(conf.get('dry_run', False)) self.report_interval = int_value(conf.get('report_interval'), 3600) self.max_chunks_per_second = int_value(conf.get('chunks_per_second'), 30) self.max_bytes_per_second = int_value(conf.get('bytes_per_second'), 10000000) self.rdir_fetch_limit = int_value(conf.get('rdir_fetch_limit'), 100) self.rdir_client = RdirClient(conf) self.content_factory = ContentFactory(conf)
def __init__(self, conf, session=None, request_prefix="", no_ns_in_url=False, endpoint=None, **kwargs): """ :param session: an optional session that will be reused :type session: `requests.Session` :param request_prefix: text to insert in between endpoint and requested URL :type request_prefix: `str` :param no_ns_in_url: do not insert namespace name between endpoint and `request_prefix` :type no_ns_in_url: `bool` """ validate_service_conf(conf) self.ns = conf.get('namespace') self.conf = conf self.logger = get_logger(conf) ep_parts = list() if endpoint: self.proxy_netloc = endpoint[7:] # skip "http://" ep_parts.append(endpoint) else: ns_conf = load_namespace_conf(self.ns) self.proxy_netloc = ns_conf.get('proxy') ep_parts.append("http:/") ep_parts.append(self.proxy_netloc) ep_parts.append("v3.0") if not no_ns_in_url: ep_parts.append(self.ns) if request_prefix: ep_parts.append(request_prefix.lstrip('/')) super(ProxyClient, self).__init__(endpoint='/'.join(ep_parts), **kwargs)
def __init__(self, conf): self.conf = conf self.logger = get_logger(conf)
def __init__(self, conf): self.conf = conf self.logger = get_logger(conf) self.container_client = ContainerClient(conf)
def __init__(self, conf): self.db_path = conf.get('db_path') self.dbs = {} self.logger = get_logger(conf) if not os.path.exists(self.db_path): os.makedirs(self.db_path)
def __init__(self, conf): validate_service_conf(conf) self.conf = conf self.logger = get_logger(conf) self.load_services() self.init_watchers(self.conf['services'])