def setUp(self): super(TestMeta2Database, self).setUp() self.api = ObjectStorageApi(self.ns) self.account = "test_meta2_database" self.reference = "meta2_database_" + random_str(4) self.meta2_database = Meta2Database(self.conf) self.service_type = 'meta2'
def setUp(self): super(TestObjectStorageApiPerformance, self).setUp() self.api = ObjectStorageApi(self.ns, endpoint=self.uri, source_address=('127.0.0.8', 0)) self.created = list() self.containers = set()
def setUp(self): super(TestBlobMover, self).setUp() self.container = random_str(16) self.cid = cid_from_name(self.account, self.container) self.path = random_str(16) self.api = ObjectStorageApi(self.ns) self.blob_client = BlobClient(self.conf) self.api.container_create(self.account, self.container) _, chunks = self.api.container.content_prepare(self.account, self.container, self.path, size=1) services = self.conscience.all_services('rawx') if len(chunks) >= len([s for s in services if s['score'] > 0]): self.skipTest("need at least %d rawx to run" % (len(chunks) + 1)) self.rawx_volumes = dict() for rawx in services: tags = rawx['tags'] service_id = tags.get('tag.service_id', None) if service_id is None: service_id = rawx['addr'] volume = tags.get('tag.vol', None) self.rawx_volumes[service_id] = volume self.api.object_create(self.account, self.container, obj_name=self.path, data="chunk") meta, self.chunks = self.api.object_locate(self.account, self.container, self.path) self.version = meta['version'] self.content_id = meta['id'] self.chunk_method = meta['chunk_method']
def setUp(self): super(TestContentVersioning, self).setUp() self.api = ObjectStorageApi(self.conf['namespace']) self.container = random_str(8) system = {'sys.m2.policy.version': '3'} self.wait_for_score(('meta2', )) self.api.container_create(self.account, self.container, system=system)
def setUp(self): super(TestBlobConverter, self).setUp() self.container = random_str(16) self.path = random_str(16) self.api = ObjectStorageApi(self.ns) self.api.container_create(self.account, self.container) _, chunks = self.api.container.content_prepare( self.account, self.container, self.path, size=1) services = self.conscience.all_services('rawx') self.rawx_volumes = dict() for rawx in services: tags = rawx['tags'] service_id = tags.get('tag.service_id', None) if service_id is None: service_id = rawx['addr'] volume = tags.get('tag.vol', None) self.rawx_volumes[service_id] = volume self.api.object_create( self.account, self.container, obj_name=self.path, data="chunk") meta, self.chunks = self.api.object_locate( self.account, self.container, self.path) self.version = meta['version'] self.content_id = meta['id'] self.container_id = cid_from_name(self.account, self.container)
def setUp(self): super(TestPerfectibleContent, self).setUp() self.api = ObjectStorageApi(self.ns, endpoint=self.uri, pool_manager=self.http_pool) # Ensure the tube is not clogged self.beanstalkd.drain_tube(DEFAULT_IMPROVER_TUBE, timeout=0.2)
def worker_container(): proxy = ObjectStorageApi(NS) while True: try: name = QUEUE.get(timeout=TIMEOUT) except eventlet.queue.Empty: break while True: if VERBOSE: print("Deleting", name) try: proxy.container_delete(ACCOUNT, name) COUNTERS.add(1, 0) break except Exception as ex: if "Election failed" in str(ex): # wait default Election wait delay ELECTIONS.add(1, 0) time.sleep(20) continue print("Container %s: %s" % (name, str(ex)), file=sys.stderr) break QUEUE.task_done()
def test_object_create_patch_kwargs(self): """ Check that the patch_kwargs decorator does its job on object_create. """ kwargs = {x: 'test' for x in ObjectStorageApi.EXTRA_KEYWORDS} # Pass kwargs to class constructor api = ObjectStorageApi('NS', endpoint=self.fake_endpoint, dummy_keyword='dummy_value', **kwargs) self.assertNotIn('dummy_keyword', api._global_kwargs) for k, v in kwargs.items(): self.assertIn(k, api._global_kwargs) self.assertEqual(v, api._global_kwargs[k]) # Verify that kwargs are forwarded to method call api._object_create = Mock() api.object_create_ext(self.account, self.container, data='data', obj_name='dummy') api._object_create.assert_called_with(self.account, self.container, 'dummy', ANY, ANY, append=ANY, headers=ANY, key_file=ANY, policy=ANY, properties=ANY, reqid=ANY, **kwargs)
def worker_objects(): proxy = ObjectStorageApi(NS) while True: try: name = QUEUE.get(timeout=TIMEOUT) except eventlet.queue.Empty: if VERBOSE: print("Leaving worker") break while True: try: items = proxy.object_list(ACCOUNT, name) objs = [_item['name'] for _item in items['objects']] size = sum([_item['size'] for _item in items['objects']]) if len(objs) == 0: break if VERBOSE: print("Deleting", len(objs), "objects") proxy.object_delete_many(ACCOUNT, name, objs=objs) COUNTERS.add(len(objs), size) break except Exception as ex: if "Election failed" in str(ex): # wait default Election wait delay ELECTIONS.add(1, 0) time.sleep(20) continue print("Objs %s: %s" % (name, str(ex)), file=sys.stderr) break QUEUE.task_done()
def __init__(self, namespace, concurrency=50, error_file=None, rebuild_file=None, check_xattr=True, limit_listings=0, request_attempts=1, logger=None, verbose=False, check_hash=False, **_kwargs): self.pool = GreenPool(concurrency) self.error_file = error_file self.check_xattr = bool(check_xattr) self.check_hash = bool(check_hash) self.logger = logger or get_logger( {'namespace': namespace}, name='integrity', verbose=verbose) # Optimisation for when we are only checking one object # or one container. # 0 -> do not limit # 1 -> limit account listings (list of containers) # 2 -> limit container listings (list of objects) self.limit_listings = limit_listings if self.error_file: outfile = open(self.error_file, 'a') self.error_writer = csv.writer(outfile, delimiter=' ') self.rebuild_file = rebuild_file if self.rebuild_file: self.fd = open(self.rebuild_file, 'a') self.rebuild_writer = csv.writer(self.fd, delimiter='|') self.api = ObjectStorageApi(namespace, logger=self.logger, max_retries=request_attempts - 1, request_attempts=request_attempts) self.rdir_client = RdirClient({"namespace": namespace}, logger=self.logger) self.accounts_checked = 0 self.containers_checked = 0 self.objects_checked = 0 self.chunks_checked = 0 self.account_not_found = 0 self.container_not_found = 0 self.object_not_found = 0 self.chunk_not_found = 0 self.account_exceptions = 0 self.container_exceptions = 0 self.object_exceptions = 0 self.chunk_exceptions = 0 self.list_cache = {} self.running = {} self.running_lock = Semaphore(1) self.result_queue = Queue(concurrency) self.run_time = 0
def setUp(self): super(TestContainerReplication, self).setUp() if int(self.conf.get('container_replicas', 1)) < 3: self.skipTest('Container replication must be enabled') self.api = ObjectStorageApi(self.ns, pool_manager=self.http_pool) self.must_restart_meta2 = False self.wait_for_score(('meta2', )) self._apply_conf_on_all('meta2', self.__class__.down_cache_opts)
def setUp(self): super(BaseServiceIdTest, self).setUp() if not self.conf['with_service_id']: self.skipTest("Service ID not enabled") self._cnt = random_str(10) self.http = urllib3.PoolManager() self.api = ObjectStorageApi(self.ns)
def setUp(self): super(TestPerfectibleContent, self).setUp() self.api = ObjectStorageApi(self.ns, endpoint=self.uri, pool_manager=self.http_pool) self.cs = ConscienceClient(self.conf, pool_manager=self.http_pool) self.event = EventClient(self.conf) self.locked_svc = list() # Ensure the tube is not clogged self.event.beanstalk.drain_tube(DEFAULT_IMPROVER_TUBE)
def setUp(self): super(TestMeta2EventsEmission, self).setUp() if not self.conf.get('webhook', ''): self.skipTest('webhook is required') self.acct = 'AccountWebhook%f' % time.time() self.cnt_name = 'TestWebhookEvents%f' % time.time() self.obj_name = 'obj%f' % time.time() self.storage_api = ObjectStorageApi(self.ns) self.pool = get_pool_manager() self._clean()
def test_iter_container_list(self): worker = StorageTiererWorker(self.gridconf, Mock()) api = ObjectStorageApi(self.namespace) actual = [x[0] for x in api.container_list(self.test_account)] if len(actual) < 3: print "Slow event propagation!" # account events have not yet propagated time.sleep(3.0) actual = [x[0] for x in api.container_list(self.test_account)[0]] gen = worker._list_containers() self.assertListEqual(list(gen), actual)
def setUp(self): super(TestContainerDownload, self).setUp() # FIXME: should we use direct API from BaseTestCase # or still container.client ? self.conn = ObjectStorageApi(self.ns) self._streaming = 'http://' + self.get_service_url('container')[2] self._cnt = random_container() self._uri = self.make_uri('dump') self._data = {} self.conn.container_create(self.account, self._cnt) self.raw = "" self._slo = []
def tier_content(self, config, stats, control): def _set(lock_, field, value): lock_.acquire() field.value = value lock_.release() def _add(lock_, field, value): lock_.acquire() field.value += value lock_.release() lock = control.get('lock') try: src = config.get('src') del config['src'] self.client.lock_score(dict(type="rawx", addr=src)) api = ObjectStorageApi(config["namespace"]) rdir_client = RdirClient({'namespace': config["namespace"]}) self.log.info("Starting tierer on %s with policy %s" % (src, config["policy"])) policies = dict() for part in config["policy"].split(','): policies[part.split(':')[0]] = part.split(':')[1] self.log.info("Parsed policy: " + part.split(':')[0] + " " + part.split(':')[1]) for marker in config["markers"]: req = dict( start_after=marker, limit=1000, ) _, resp_body = rdir_client._rdir_request(src, 'POST', 'fetch', json=req) for (key, value) in resp_body: _, _, chunk = key.split('|') res = requests.head("http://" + src + "/" + chunk) policy = res.headers.get("x-oio-chunk-meta-content-storage-policy", "") if policy not in policies.keys(): _add(lock, stats.get("skip"), 1) continue path = res.headers.get("x-oio-chunk-meta-full-path", "///") path_parts = path.split('/') if len(path_parts) < 3: _add(lock, stats.get("skip"), 1) continue try: api.object_change_policy(unquote(path_parts[0]), unquote(path_parts[1]), unquote(path_parts[2]), policies[policy]) _add(lock, stats.get("success"), 1) except Exception as e: self.log.info("Operation failed %s: %s (%s)" % (path, format_exc(e), policies[policy])) _add(lock, stats.get("fail"), 1) except Exception as e: self.log.error("Tierer failed with %s" % format_exc(e)) _set(lock, control.get('status'), 2) _set(lock, control.get('end'), int(time.time()))
def setUp(self): super(TestMeta2EventsEmission, self).setUp() self.container_name = 'TestEventsEmission%f' % time.time() self.container_id = cid_from_name(self.account, self.container_name) self.container_client = ContainerClient(self.conf) self.storage_api = ObjectStorageApi(self.conf['namespace']) self.beanstalkd0.drain_tube('oio-preserved')
def make_client(instance): client = ObjectStorageApi( endpoint=instance.get_endpoint('storage'), namespace=instance.namespace, admin_mode=instance.admin_mode ) return client
def make_client(instance): client = ObjectStorageApi( endpoint=instance.get_endpoint('storage'), namespace=instance.namespace, admin_mode=instance.admin_mode, perfdata=instance.cli_conf().get('perfdata') ) return client
def setUp(self): super(TestContentRebuildFilter, self).setUp() self.namespace = self.conf['namespace'] self.gridconf = {"namespace": self.namespace} self.container = "TestContentRebuildFilter%f" % time.time() self.ref = self.container self.container_client = ContainerClient(self.conf) self.container_client.container_create(self.account, self.container) syst = self.container_client.container_get_properties( self.account, self.container)['system'] self.container_id = syst['sys.name'].split('.', 1)[0] self.object_storage_api = ObjectStorageApi(namespace=self.namespace) queue_addr = choice(self.conf['services']['beanstalkd'])['addr'] self.queue_url = 'beanstalk://' + queue_addr self.conf['queue_url'] = self.queue_url self.conf['tube'] = DEFAULT_REBUILDER_TUBE self.notify_filter = NotifyFilter(app=_App, conf=self.conf)
def main(): args = options() global ACCOUNT, PROXY ACCOUNT = args.account PROXY = ObjectStorageApi("OPENIO") args.path = args.path.rstrip('/') if '/' in args.path: bucket, path = args.path.split('/', 1) else: bucket = args.path path = "" items = [container_hierarchy(bucket, path)] files = 0 size = 0 """ # slow method while items: new_files, new_size, new_items = get_list(items.pop()) items += new_items files += new_files size += new_size print("found %d files, %s bytes" % (files, size)) """ SUM = {} # fast method files = 0 size = 0 _bucket = container_hierarchy(bucket, path) for entry in full_list(prefix=container_hierarchy(bucket, path)): name, _files, _size, _ = entry if name != _bucket and not name.startswith(_bucket + '%2F'): continue size += _size files += _files items = name.split('%2F') while items: _name = '/'.join(items) if not _name.startswith(args.path): break if _name in SUM: SUM[_name] += _size else: SUM[_name] = _size items.pop() view = [(v, k) for k, v in SUM.items()] view.sort() for v, k in view: print("%s %s" % (show(v, args.human), k)) print("found %d files, %s bytes" % (files, size))
def make_client(instance): from oio.api.object_storage import ObjectStorageApi admin_mode = instance.get_admin_mode() endpoint = instance.get_endpoint('storage') client = ObjectStorageApi(endpoint=endpoint, namespace=instance.namespace, admin_mode=admin_mode) return client
def setUp(self): super(TestMeta2EventsEmission, self).setUp() self.container_name = 'TestEventsEmission%f' % time.time() self.container_id = cid_from_name(self.account, self.container_name) self.container_client = ContainerClient(self.conf) self.storage_api = ObjectStorageApi(self.conf['namespace']) self.event_agent_name = 'event-agent-1' self.bt_connections = [] self._bt_make_connections(self.conf['services']['beanstalkd'])
def storage(self): """ Get an instance of ObjectStorageApi. """ if self._storage is None: from oio.api.object_storage import ObjectStorageApi self._storage = ObjectStorageApi(self.namespace, endpoint=self.get_endpoint(), pool_manager=self.pool_manager) return self._storage
def __init__(self, conf): if conf: self.conf = read_conf(conf['key_file'], section_name="admin-server") else: self.conf = {} self.logger = get_logger(self.conf, name="ContainerBackup") self.proxy = ObjectStorageApi(self.conf.get("namespace", NS), logger=self.logger) self.url_map = Map([ Rule('/v1.0/container/dump', endpoint='dump'), Rule('/v1.0/container/restore', endpoint='restore'), ]) self.REDIS_TIMEOUT = self.conf.get("redis_cache_timeout", self.REDIS_TIMEOUT) super(ContainerBackup, self).__init__(self.conf) WerkzeugApp.__init__(self, self.url_map, self.logger)
def setUp(self): super(TestContentRebuildFilter, self).setUp() self.namespace = self.conf['namespace'] self.gridconf = {"namespace": self.namespace} self.container = "TestContentRebuildFilter%f" % time.time() self.ref = self.container self.container_client = ContainerClient(self.conf) self.container_client.container_create(self.account, self.container) syst = self.container_client.container_get_properties( self.account, self.container)['system'] self.container_id = syst['sys.name'].split('.', 1)[0] self.object_storage_api = ObjectStorageApi(namespace=self.namespace) self.stgpol = "SINGLE" self.conf['tube'] = 'rebuild' self.conf['queue_url'] = 'beanstalk://127.0.0.1:11300' self.notify_filter = NotifyFilter(app=_App, conf=self.conf) queue_url = self.conf.get('queue_url', 'tcp://127.0.0.1:11300') self.tube = self.conf.get('tube', 'rebuild') self.beanstalk = Beanstalk.from_url(queue_url) self.beanstalk.use(self.tube)
def __init__(self, conf): if conf: self.conf = read_conf(conf['key_file'], section_name="admin-server") else: self.conf = {} self.logger = get_logger(self.conf, name="ContainerBackup") self.proxy = ObjectStorageApi(self.conf.get("namespace", NS), logger=self.logger) self.url_map = Map([ Rule('/v1.0/container/dump', endpoint='dump'), Rule('/v1.0/container/restore', endpoint='restore'), ]) self.REDIS_TIMEOUT = self.conf.get("redis_cache_timeout", self.REDIS_TIMEOUT) redis_conf = { k[6:]: v for k, v in self.conf.items() if k.startswith("redis_") } redis_host = redis_conf.pop('host', None) if redis_host: parsed = urlparse('http://' + redis_host) if parsed.port is None: redis_host = '%s:%s' % (redis_host, redis_conf.pop('port', '6379')) redis_sentinel_hosts = redis_conf.pop( 'sentinel_hosts', # TODO(adu): Delete when it will no longer be used self.conf.get('sentinel_hosts')) redis_sentinel_name = redis_conf.pop( 'sentinel_name', # TODO(adu): Delete when it will no longer be used self.conf.get('sentinel_master_name')) RedisConnection.__init__(self, host=redis_host, sentinel_hosts=redis_sentinel_hosts, sentinel_name=redis_sentinel_name, **redis_conf) WerkzeugApp.__init__(self, self.url_map, self.logger)
class TestObjectStorageApiPerformance(BaseTestCase): def setUp(self): super(TestObjectStorageApiPerformance, self).setUp() self.api = ObjectStorageApi(self.ns, endpoint=self.uri) self.created = list() def tearDown(self): super(TestObjectStorageApiPerformance, self).tearDown() containers = set() for ct, name in self.created: try: self.api.object_delete(self.account, ct, name) containers.add(ct) except Exception: logging.exception("Failed to delete %s/%s/%s//%s", self.ns, self.account, ct, name) for ct in containers: try: self.api.container_delete(self.account, ct) except Exception: logging.exception('Failed to delete %s/%s/%s', self.ns, self.account, ct) def test_object_create_32_md5_checksum(self): container = self.__class__.__name__ + random_str(8) for i in range(32): obj = "obj-%03d" % i self.api.object_create(self.account, container, obj_name=obj, data=obj, chunk_checksum_algo='md5') self.created.append((container, obj)) def test_object_create_32_no_checksum(self): container = self.__class__.__name__ + random_str(8) for i in range(32): obj = "obj-%03d" % i self.api.object_create(self.account, container, obj_name=obj, data=obj, chunk_checksum_algo=None) self.created.append((container, obj))
def main(): args = options() global ACCOUNT, PROXY ACCOUNT = args.account PROXY = ObjectStorageApi("OPENIO") args.path = args.path.rstrip('/') if '/' in args.path: bucket, path = args.path.split('/', 1) else: bucket = args.path path = "" containers = [] _bucket = container_hierarchy(bucket, path) # we don't use placeholders, we use prefix path as prefix for entry in full_list(prefix=container_hierarchy(bucket, path)): name, _files, _size, _ = entry if name != _bucket and not name.startswith(_bucket + '%2F'): continue if _files: items = PROXY.object_list(ACCOUNT, name) objs = [_item['name'] for _item in items['objects']] PROXY.object_delete_many(ACCOUNT, name, objs=objs) print("Deleting", len(objs), "objects") containers.append(name) print("We have to delete", len(containers), "containers") for container in containers: print("Deleting", container) PROXY.container_delete(ACCOUNT, container)