def __init__(self, conf, **kwargs): self.conf = conf self.ns = conf['namespace'] self.logger = get_logger(conf) self.directory = DirectoryClient(conf, logger=self.logger, **kwargs) self.rdir = RdirClient(conf, logger=self.logger, **kwargs) self._cs = None
def make_client(instance): from oio.directory.client import DirectoryClient endpoint = instance.get_endpoint('directory') client = DirectoryClient({"namespace": instance.namespace}, endpoint=endpoint) return client
def __init__(self, namespace, **kwargs): """ Initialize the object storage API. :param namespace: name of the namespace to interract with :type namespace: `str` :keyword connection_timeout: connection timeout towards rawx services :type connection_timeout: `float` seconds :keyword read_timeout: timeout for rawx responses and data reads from the caller (when uploading) :type read_timeout: `float` seconds :keyword write_timeout: timeout for rawx write requests :type write_timeout: `float` seconds """ self.namespace = namespace self.connection_timeout = utils.float_value( kwargs.get("connection_timeout"), None) self.read_timeout = utils.float_value(kwargs.get("read_timeout"), None) self.write_timeout = utils.float_value(kwargs.get("write_timeout"), None) # FIXME: share session between all the clients self.directory = DirectoryClient({"namespace": self.namespace}, **kwargs) self.account = AccountClient({"namespace": self.namespace}, **kwargs) self.container = ContainerClient({"namespace": self.namespace}, **kwargs)
def __init__(self, namespace, logger=None, **kwargs): """ Initialize the object storage API. :param namespace: name of the namespace to interract with :type namespace: `str` :keyword connection_timeout: connection timeout towards rawx services :type connection_timeout: `float` seconds :keyword read_timeout: timeout for rawx responses and data reads from the caller (when uploading) :type read_timeout: `float` seconds :keyword write_timeout: timeout for rawx write requests :type write_timeout: `float` seconds :keyword pool_manager: a pooled connection manager that will be used for all HTTP based APIs (except rawx) :type pool_manager: `urllib3.PoolManager` """ self.namespace = namespace conf = {"namespace": self.namespace} self.logger = logger or get_logger(conf) self.timeouts = {tok: float_value(tov, None) for tok, tov in kwargs.items() if tok in self.__class__.TIMEOUT_KEYS} from oio.account.client import AccountClient from oio.container.client import ContainerClient from oio.directory.client import DirectoryClient self.directory = DirectoryClient(conf, logger=self.logger, **kwargs) self.container = ContainerClient(conf, logger=self.logger, **kwargs) # In AccountClient, "endpoint" is the account service, not the proxy acct_kwargs = kwargs.copy() acct_kwargs["proxy_endpoint"] = acct_kwargs.pop("endpoint", None) self.account = AccountClient(conf, logger=self.logger, **acct_kwargs)
def __init__(self, conf, directory_client=None, **kwargs): super(RdirClient, self).__init__(service_type='rdir', **kwargs) self.directory = directory_client or DirectoryClient(conf, **kwargs) self.ns = conf['namespace'] self._addr_cache = dict() self.conf = conf self._cs = None
def setUp(self): super(TestMeta2Indexing, self).setUp() self.rdir_client = RdirClient(self.conf) self.directory_client = DirectoryClient(self.conf) self.container_client = ContainerClient(self.conf) self.containers = [random_str(14) for _ in range(0, randint(1, 10))] self.containers_svcs = {} self.event_agent_name = 'event-agent-1'
def __init__(self, conf, rdir_client=None, **kwargs): self.conf = conf self.ns = conf['namespace'] self.logger = get_logger(conf) self.directory = DirectoryClient(conf, logger=self.logger, **kwargs) if rdir_client: self.rdir = rdir_client else: self.rdir = RdirClient(conf, logger=self.logger, **kwargs) self._cs = None self._pool_options = None
def __init__(self, namespace, **kwargs): ep_parts = ["http:/", load_namespace_conf(namespace).get('proxy'), "v3.0", namespace, "content"] super(CheckMeta2, self).__init__(namespace, "meta2", endpoint="/".join(ep_parts), **kwargs) self.account = AccountClient({"namespace": self.ns}) self.container = ContainerClient({"namespace": self.ns}) self.directory = DirectoryClient({"namespace": self.ns}) self.reference = random_buffer('0123456789ABCDEF', 64)
def __init__(self, volume_path, conf, pool_manager=None): """ Initializes an Indexing worker for indexing meta2 databases. Possible values of conf relating to this worker are: - interval: (int) in sec time between two full scans. Default: half an hour. - report_interval: (int) in sec, time between two reports: Default: 300 - scanned_per_second: (int) maximum number of indexed databases /s. - try_removing_faulty_indexes : In the event where we encounter a database that's not supposed to be handled by this volume, attempt to remove it from this volume rdir index if it exists WARNING: The decision is based off of a proxy response, that could be affected by cache inconsistencies for example, use at your own risk. Default: False :param volume_path: The volume path to be indexed :param conf: The configuration to be passed to the needed services :param pool_manager: A connection pool manager. If none is given, a new one with a default size of 10 will be created. """ self.logger = get_logger(conf) self._stop = False self.volume = volume_path self.success_nb = 0 self.failed_nb = 0 self.full_scan_nb = 0 self.last_report_time = 0 self.last_scan_time = 0 self.last_index_time = 0 self.start_time = 0 self.indexed_since_last_report = 0 self.scans_interval = int_value( conf.get('interval'), 1800) self.report_interval = int_value( conf.get('report_interval'), 300) self.max_indexed_per_second = int_value( conf.get('scanned_per_second'), 3000) self.namespace, self.volume_id = check_volume_for_service_type( self.volume, "meta2") self.attempt_bad_index_removal = boolean_value( conf.get('try_removing_faulty_indexes', False) ) if not pool_manager: pool_manager = get_pool_manager(pool_connections=10) self.index_client = RdirClient(conf, logger=self.logger, pool_manager=pool_manager) self.dir_client = DirectoryClient(conf, logger=self.logger, pool_manager=pool_manager)
def run(args): pool = get_pool_manager() v = vars(args) dirclient = DirectoryClient(v) backend = AccountBackend(v) for entry, _, _, partial in full_list(backend, prefix=args.prefix): if partial: if args.verbose: print(":%s: partial, skip" % entry) continue try: dirclient.show(account=ACCOUNT, reference=entry) if args.verbose: print("%s: OK" % entry) continue except NotFound: pass except Exception as exc: print("Exception not managed for %s: %s" % (entry, str(exc))) continue print("%s: meta2 not found" % entry) if args.dry_run: continue data = {"dtime": time(), "name": entry} # post event to Account service res = pool.request('POST', HOST + '/v1.0/account/container/update?id=%s' % ACCOUNT, headers={'Content-Type': 'application/json'}, body=json.dumps(data)) if res.status_int / 100 != 2: print(res.status)
def __init__(self, conf, **kwargs): super(RdirClient, self).__init__(conf, **kwargs) self.directory_client = DirectoryClient(conf, **kwargs)
def directory(self): if not self._directory: self._directory = DirectoryClient(self.conf) return self._directory
def setUp(self): super(TestDirectoryAPI, self).setUp() self.api = DirectoryClient({'namespace': self.ns}, endpoint=self.uri)
def __init__(self, conf, **kwargs): super(RdirClient, self).__init__(**kwargs) self.directory = DirectoryClient(conf, **kwargs) self.ns = conf['namespace'] self._addr_cache = dict()
def __init__(self, conf, **kwargs): super(RdirClient, self).__init__(conf, **kwargs) self.autocreate = true_value(conf.get('autocreate', True)) self.directory_client = DirectoryClient(conf)
def reference(self): if not self._reference: self._reference = DirectoryClient(self.conf) return self._reference
def __init__(self, conf, **kwargs): super(RdirClient, self).__init__(conf, **kwargs) self.directory = DirectoryClient(conf, **kwargs) self._addr_cache = dict()
def make_client(instance): client = DirectoryClient({"namespace": instance.namespace}, endpoint=instance.get_endpoint('directory')) return client