class TestObjectStorageApiPerformance(BaseTestCase): def setUp(self): super(TestObjectStorageApiPerformance, self).setUp() self.api = ObjectStorageApi(self.ns, endpoint=self.uri, source_address=('127.0.0.8', 0)) self.created = list() self.containers = set() def tearDown(self): super(TestObjectStorageApiPerformance, self).tearDown() for ct, name in self.created: try: self.api.object_delete(self.account, ct, name) self.containers.add(ct) except Exception: logging.exception("Failed to delete %s/%s/%s//%s", self.ns, self.account, ct, name) for ct in self.containers: try: self.api.container_delete(self.account, ct) except Exception: logging.exception('Failed to delete %s/%s/%s', self.ns, self.account, ct) def test_object_create_32_md5_checksum(self): container = self.__class__.__name__ + random_str(8) for i in range(32): obj = "obj-%03d" % i self.api.object_create(self.account, container, obj_name=obj, data=obj, chunk_checksum_algo='md5') self.created.append((container, obj)) def test_object_create_32_no_checksum(self): container = self.__class__.__name__ + random_str(8) for i in range(32): obj = "obj-%03d" % i self.api.object_create(self.account, container, obj_name=obj, data=obj, chunk_checksum_algo=None) self.created.append((container, obj)) def test_object_list_empty_container(self): """ Ensure object listing of an empty container takes less than 35ms. """ container = self.__class__.__name__ + random_str(8) self.api.container_create(self.account, container) self.containers.add(container) for _ in range(8): start = monotonic_time() self.api.object_list(self.account, container) duration = monotonic_time() - start logging.info("Object list took %.6fs", duration) self.assertLess(duration, 0.035)
class ObjectStorageApiTestBase(BaseTestCase): def setUp(self): super(ObjectStorageApiTestBase, self).setUp() self.api = ObjectStorageApi(self.ns, endpoint=self.uri) self.created = list() def tearDown(self): super(ObjectStorageApiTestBase, self).tearDown() for ct, name in self.created: try: self.api.object_delete(self.account, ct, name) except Exception: logging.exception("Failed to delete %s/%s/%s//%s", self.ns, self.account, ct, name) def _create(self, name, metadata=None): return self.api.container_create(self.account, name, properties=metadata) def _delete(self, name): self.api.container_delete(self.account, name) def _clean(self, name, clear=False): if clear: # must clean properties before self.api.container_del_properties(self.account, name, []) self._delete(name) def _get_properties(self, name, properties=None): return self.api.container_get_properties(self.account, name, properties=properties) def _set_properties(self, name, properties=None): return self.api.container_set_properties(self.account, name, properties=properties) def _upload_empty(self, container, *objs, **kwargs): """Upload empty objects to `container`""" for obj in objs: self.api.object_create(self.account, container, obj_name=obj, data="", **kwargs) self.created.append((container, obj))
class TestObjectStorageApiPerformance(BaseTestCase): def setUp(self): super(TestObjectStorageApiPerformance, self).setUp() self.api = ObjectStorageApi(self.ns, endpoint=self.uri) self.created = list() def tearDown(self): super(TestObjectStorageApiPerformance, self).tearDown() containers = set() for ct, name in self.created: try: self.api.object_delete(self.account, ct, name) containers.add(ct) except Exception: logging.exception("Failed to delete %s/%s/%s//%s", self.ns, self.account, ct, name) for ct in containers: try: self.api.container_delete(self.account, ct) except Exception: logging.exception('Failed to delete %s/%s/%s', self.ns, self.account, ct) def test_object_create_32_md5_checksum(self): container = self.__class__.__name__ + random_str(8) for i in range(32): obj = "obj-%03d" % i self.api.object_create(self.account, container, obj_name=obj, data=obj, chunk_checksum_algo='md5') self.created.append((container, obj)) def test_object_create_32_no_checksum(self): container = self.__class__.__name__ + random_str(8) for i in range(32): obj = "obj-%03d" % i self.api.object_create(self.account, container, obj_name=obj, data=obj, chunk_checksum_algo=None) self.created.append((container, obj))
class TestObjectStorageApiPerfdata(BaseTestCase): def setUp(self): super(TestObjectStorageApiPerfdata, self).setUp() self.api = ObjectStorageApi(self.ns, endpoint=self.uri) self.created = list() def tearDown(self): super(TestObjectStorageApiPerfdata, self).tearDown() for ct, name in self.created: try: self.api.object_delete(self.account, ct, name) except Exception: logging.exception("Failed to delete %s/%s/%s//%s", self.ns, self.account, ct, name) def test_object_create_perfdata(self): perfdata = dict() container = random_str(8) obj = random_str(8) self.api.object_create(self.account, container, obj_name=obj, data=obj, perfdata=perfdata) self.assertIn('resolve', perfdata) self.assertIn('meta2', perfdata) self.assertIn('rawx', perfdata) perfdata.clear() self.api.object_delete(self.account, container, obj, perfdata=perfdata) self.assertIn('resolve', perfdata) self.assertIn('meta2', perfdata) def test_object_fetch_perfdata(self): perfdata = dict() container = random_str(8) obj = random_str(8) self.api.object_create(self.account, container, obj_name=obj, data=obj) _, stream = self.api.object_fetch(self.account, container, obj, perfdata=perfdata) self.assertIn('resolve', perfdata) self.assertIn('meta2', perfdata) self.assertNotIn('ttfb', perfdata) buf = ''.join(stream) self.assertEqual(obj, buf) self.assertIn('ttfb', perfdata) self.api.object_delete(self.account, container, obj)
class TestBlobConverter(BaseTestCase): def setUp(self): super(TestBlobConverter, self).setUp() self.container = random_str(16) self.path = random_str(16) self.api = ObjectStorageApi(self.ns) self.api.container_create(self.account, self.container) _, chunks = self.api.container.content_prepare( self.account, self.container, self.path, size=1) services = self.conscience.all_services('rawx') self.rawx_volumes = dict() for rawx in services: tags = rawx['tags'] service_id = tags.get('tag.service_id', None) if service_id is None: service_id = rawx['addr'] volume = tags.get('tag.vol', None) self.rawx_volumes[service_id] = volume self.api.object_create( self.account, self.container, obj_name=self.path, data="chunk") meta, self.chunks = self.api.object_locate( self.account, self.container, self.path) self.version = meta['version'] self.content_id = meta['id'] self.container_id = cid_from_name(self.account, self.container) def tearDown(self): try: self.api.object_delete(self.account, self.container, self.path) except Exception: pass super(TestBlobConverter, self).tearDown() def _chunk_path(self, chunk): url = chunk['url'] chunk_id = url.split('/', 3)[3] volume = self.rawx_volumes[self._chunk_volume_id(chunk)] return volume + '/' + chunk_id[:3] + '/' + chunk_id def _chunk_volume_id(self, chunk): return chunk['url'].split('/', 3)[2] def _deindex_chunk(self, chunk): rdir = RdirClient(self.conf, pool_manager=self.conscience.pool_manager) url = chunk['url'] volume_id = url.split('/', 3)[2] chunk_id = url.split('/', 3)[3] rdir.chunk_delete(volume_id, self.container_id, self.content_id, chunk_id) def _convert_and_check(self, chunk_volume, chunk_path, chunk_id_info, expected_raw_meta=None, expected_errors=0): conf = self.conf conf['volume'] = self.rawx_volumes[chunk_volume] converter = BlobConverter(conf, logger=self.logger) converter.safe_convert_chunk(chunk_path) self.assertEqual(1, converter.total_chunks_processed) self.assertEqual(1, converter.passes) self.assertEqual(expected_errors, converter.errors) checker = Checker(self.ns) for chunk_id, info in chunk_id_info.items(): account, container, path, version, content_id = info fullpath = encode_fullpath( account, container, path, version, content_id) cid = cid_from_name(account, container) meta, raw_meta = read_chunk_metadata(chunk_path, chunk_id) self.assertEqual(meta.get('chunk_id'), chunk_id) self.assertEqual(meta.get('container_id'), cid) self.assertEqual(meta.get('content_path'), path) self.assertEqual(meta.get('content_version'), version) self.assertEqual(meta.get('content_id'), content_id) self.assertEqual(meta.get('full_path'), fullpath) checker.check(Target( account, container=container, obj=path, chunk='http://' + converter.volume_id + '/' + chunk_id)) for _ in checker.run(): pass self.assertTrue(checker.report()) if expected_raw_meta: self.assertDictEqual(expected_raw_meta, raw_meta) continue self.assertNotIn(chunk_xattr_keys['chunk_id'], raw_meta) self.assertNotIn(chunk_xattr_keys['container_id'], raw_meta) self.assertNotIn(chunk_xattr_keys['content_path'], raw_meta) self.assertNotIn(chunk_xattr_keys['content_version'], raw_meta) self.assertNotIn(chunk_xattr_keys['content_id'], raw_meta) self.assertIn(CHUNK_XATTR_CONTENT_FULLPATH_PREFIX + chunk_id, raw_meta) for k in raw_meta.keys(): if k.startswith('oio:'): self.fail('old fullpath always existing') self.assertEqual(raw_meta[chunk_xattr_keys['oio_version']], OIO_VERSION) def _test_converter_single_chunk(self, chunk, expected_errors=0): chunk_volume = chunk['url'].split('/')[2] chunk_id = chunk['url'].split('/')[3] chunk_path = self._chunk_path(chunk) self._convert_and_check( chunk_volume, chunk_path, {chunk_id: (self.account, self.container, self.path, self.version, self.content_id)}, expected_errors=expected_errors) def test_converter(self): chunk = random.choice(self.chunks) self._test_converter_single_chunk(chunk) def test_converter_old_chunk(self): for c in self.chunks: convert_to_old_chunk( self._chunk_path(c), self.account, self.container, self.path, self.version, self.content_id) chunk = random.choice(self.chunks) chunk_volume = chunk['url'].split('/')[2] chunk_id = chunk['url'].split('/')[3] chunk_path = self._chunk_path(chunk) self._convert_and_check( chunk_volume, chunk_path, {chunk_id: (self.account, self.container, self.path, self.version, self.content_id)}) def test_converter_old_chunk_with_wrong_path(self): for c in self.chunks: convert_to_old_chunk( self._chunk_path(c), self.account, self.container, self.path + '+', self.version, self.content_id) chunk = random.choice(self.chunks) chunk_volume = chunk['url'].split('/')[2] chunk_id = chunk['url'].split('/')[3] chunk_path = self._chunk_path(chunk) self._convert_and_check( chunk_volume, chunk_path, {chunk_id: (self.account, self.container, self.path, self.version, self.content_id)}) def test_converter_old_chunk_with_wrong_content_id(self): for c in self.chunks: convert_to_old_chunk( self._chunk_path(c), self.account, self.container, self.path, self.version, '0123456789ABCDEF0123456789ABCDEF') chunk = random.choice(self.chunks) chunk_volume = chunk['url'].split('/')[2] chunk_id = chunk['url'].split('/')[3] chunk_path = self._chunk_path(chunk) self._convert_and_check( chunk_volume, chunk_path, {chunk_id: (self.account, self.container, self.path, self.version, self.content_id)}) def test_converter_old_chunk_with_old_fullpath(self): for c in self.chunks: convert_to_old_chunk( self._chunk_path(c), self.account, self.container, self.path, self.version, self.content_id, add_old_fullpath=True) chunk = random.choice(self.chunks) chunk_volume = chunk['url'].split('/')[2] chunk_id = chunk['url'].split('/')[3] chunk_path = self._chunk_path(chunk) self._convert_and_check( chunk_volume, chunk_path, {chunk_id: (self.account, self.container, self.path, self.version, self.content_id)}) def test_converter_old_chunk_with_old_fullpath_and_wrong_path(self): for c in self.chunks: convert_to_old_chunk( self._chunk_path(c), self.account, self.container, self.path, self.version, self.content_id, add_old_fullpath=True) convert_to_old_chunk( self._chunk_path(c), self.account, self.container, self.path + '+', self.version, self.content_id) chunk = random.choice(self.chunks) chunk_volume = chunk['url'].split('/')[2] chunk_id = chunk['url'].split('/')[3] chunk_path = self._chunk_path(chunk) self._convert_and_check( chunk_volume, chunk_path, {chunk_id: (self.account, self.container, self.path, self.version, self.content_id)}) def test_converter_old_chunk_with_wrong_fullpath(self): for c in self.chunks: convert_to_old_chunk( self._chunk_path(c), self.account, self.container, self.path, 'None', '0123456789ABCDEF0123456789ABCDEF', add_old_fullpath=True) convert_to_old_chunk( self._chunk_path(c), self.account, self.container, self.path, self.version, self.content_id) chunk = random.choice(self.chunks) chunk_volume = chunk['url'].split('/')[2] chunk_id = chunk['url'].split('/')[3] chunk_path = self._chunk_path(chunk) self._convert_and_check( chunk_volume, chunk_path, {chunk_id: (self.account, self.container, self.path, self.version, self.content_id)}) def test_converter_linked_chunk(self): self.api.object_link( self.account, self.container, self.path, self.account, self.container, self.path + '.link') linked_meta, linked_chunks = self.api.object_locate( self.account, self.container, self.path + '.link') self.assertNotEqual(self.content_id, linked_meta['id']) chunk = random.choice(self.chunks) chunk_volume = chunk['url'].split('/')[2] chunk_id = chunk['url'].split('/')[3] chunk_path = self._chunk_path(chunk) for c in linked_chunks: if chunk_volume == c['url'].split('/')[2]: linked_chunk_id2 = c['url'].split('/')[3] break linked_chunk = random.choice(linked_chunks) linked_chunk_volume = linked_chunk['url'].split('/')[2] linked_chunk_id = linked_chunk['url'].split('/')[3] linked_chunk_path = self._chunk_path(linked_chunk) for c in self.chunks: if linked_chunk_volume == c['url'].split('/')[2]: chunk_id2 = c['url'].split('/')[3] break self._convert_and_check( chunk_volume, chunk_path, {chunk_id: (self.account, self.container, self.path, self.version, self.content_id), linked_chunk_id2: (self.account, self.container, self.path + '.link', linked_meta['version'], linked_meta['id'])}) self._convert_and_check( linked_chunk_volume, linked_chunk_path, {chunk_id2: (self.account, self.container, self.path, self.version, self.content_id), linked_chunk_id: (self.account, self.container, self.path + '.link', linked_meta['version'], linked_meta['id'])}) def test_converter_old_linked_chunk(self): self.api.object_link( self.account, self.container, self.path, self.account, self.container, self.path + '.link') linked_meta, linked_chunks = self.api.object_locate( self.account, self.container, self.path + '.link') self.assertNotEqual(self.content_id, linked_meta['id']) for c in linked_chunks: convert_to_old_chunk( self._chunk_path(c), self.account, self.container, self.path + '.link', 'None', '0123456789ABCDEF0123456789ABCDEF', add_old_fullpath=True) for c in self.chunks: convert_to_old_chunk( self._chunk_path(c), self.account, self.container, self.path, self.version, self.content_id) chunk = random.choice(self.chunks) chunk_volume = chunk['url'].split('/')[2] chunk_id = chunk['url'].split('/')[3] chunk_path = self._chunk_path(chunk) for c in linked_chunks: if chunk_volume == c['url'].split('/')[2]: linked_chunk_id2 = c['url'].split('/')[3] break linked_chunk = random.choice(linked_chunks) linked_chunk_volume = linked_chunk['url'].split('/')[2] linked_chunk_id = linked_chunk['url'].split('/')[3] linked_chunk_path = self._chunk_path(linked_chunk) for c in self.chunks: if linked_chunk_volume == c['url'].split('/')[2]: chunk_id2 = c['url'].split('/')[3] break self._convert_and_check( chunk_volume, chunk_path, {chunk_id: (self.account, self.container, self.path, self.version, self.content_id), linked_chunk_id2: (self.account, self.container, self.path + '.link', linked_meta['version'], linked_meta['id'])}) self._convert_and_check( linked_chunk_volume, linked_chunk_path, {chunk_id2: (self.account, self.container, self.path, self.version, self.content_id), linked_chunk_id: (self.account, self.container, self.path + '.link', linked_meta['version'], linked_meta['id'])}) def test_converter_old_chunk_with_link_on_same_object(self): for c in self.chunks: convert_to_old_chunk( self._chunk_path(c), self.account, self.container, self.path, self.version, self.content_id) self.api.object_link( self.account, self.container, self.path, self.account, self.container, self.path) linked_meta, linked_chunks = self.api.object_locate( self.account, self.container, self.path) self.assertNotEqual(self.content_id, linked_meta['id']) linked_chunk = random.choice(linked_chunks) linked_chunk_volume = linked_chunk['url'].split('/')[2] linked_chunk_id = linked_chunk['url'].split('/')[3] linked_chunk_path = self._chunk_path(linked_chunk) # old xattr not removed _, expected_raw_meta = read_chunk_metadata(linked_chunk_path, linked_chunk_id) expected_raw_meta[chunk_xattr_keys['oio_version']] = OIO_VERSION self._convert_and_check( linked_chunk_volume, linked_chunk_path, {linked_chunk_id: (self.account, self.container, self.path, linked_meta['version'], linked_meta['id'])}, expected_raw_meta=expected_raw_meta, expected_errors=1) def test_converter_old_linked_chunk_with_link_on_same_object(self): self.api.object_link( self.account, self.container, self.path, self.account, self.container, self.path + '.link') linked_meta, linked_chunks = self.api.object_locate( self.account, self.container, self.path + '.link') self.assertNotEqual(self.content_id, linked_meta['id']) for c in linked_chunks: convert_to_old_chunk( self._chunk_path(c), self.account, self.container, self.path + '.link', 'None', '0123456789ABCDEF0123456789ABCDEF', add_old_fullpath=True) for c in self.chunks: convert_to_old_chunk( self._chunk_path(c), self.account, self.container, self.path, self.version, self.content_id, add_old_fullpath=True) self.api.object_link( self.account, self.container, self.path + '.link', self.account, self.container, self.path + '.link') linked_meta, linked_chunks = self.api.object_locate( self.account, self.container, self.path + '.link') self.assertNotEqual(self.content_id, linked_meta['id']) chunk = random.choice(self.chunks) chunk_volume = chunk['url'].split('/')[2] chunk_id = chunk['url'].split('/')[3] chunk_path = self._chunk_path(chunk) for c in linked_chunks: if chunk_volume == c['url'].split('/')[2]: linked_chunk_id2 = c['url'].split('/')[3] break linked_chunk = random.choice(linked_chunks) linked_chunk_volume = linked_chunk['url'].split('/')[2] linked_chunk_id = linked_chunk['url'].split('/')[3] linked_chunk_path = self._chunk_path(linked_chunk) for c in self.chunks: if linked_chunk_volume == c['url'].split('/')[2]: chunk_id2 = c['url'].split('/')[3] break self._convert_and_check( chunk_volume, chunk_path, {chunk_id: (self.account, self.container, self.path, self.version, self.content_id), linked_chunk_id2: (self.account, self.container, self.path + '.link', linked_meta['version'], linked_meta['id'])}) self._convert_and_check( linked_chunk_volume, linked_chunk_path, {chunk_id2: (self.account, self.container, self.path, self.version, self.content_id), linked_chunk_id: (self.account, self.container, self.path + '.link', linked_meta['version'], linked_meta['id'])}) def test_converter_with_versioning(self): self.api.container_set_properties( self.account, self.container, system={'sys.m2.policy.version': '2'}) self.api.object_create( self.account, self.container, obj_name=self.path, data='version') versioned_meta, versioned_chunks = self.api.object_locate( self.account, self.container, self.path) self.assertNotEqual(self.content_id, versioned_meta['id']) chunk = random.choice(self.chunks) chunk_volume = chunk['url'].split('/')[2] chunk_id = chunk['url'].split('/')[3] chunk_path = self._chunk_path(chunk) versioned_chunk = random.choice(versioned_chunks) versioned_chunk_volume = versioned_chunk['url'].split('/')[2] versioned_chunk_id = versioned_chunk['url'].split('/')[3] versioned_chunk_path = self._chunk_path(versioned_chunk) self._convert_and_check( chunk_volume, chunk_path, {chunk_id: (self.account, self.container, self.path, self.version, self.content_id)}) self._convert_and_check( versioned_chunk_volume, versioned_chunk_path, {versioned_chunk_id: (self.account, self.container, self.path, versioned_meta['version'], versioned_meta['id'])}) def test_converter_old_chunk_with_versioning(self): for c in self.chunks: convert_to_old_chunk( self._chunk_path(c), self.account, self.container, self.path, self.version, self.content_id) self.api.container_set_properties( self.account, self.container, system={'sys.m2.policy.version': '2'}) self.api.object_create( self.account, self.container, obj_name=self.path, data='version') versioned_meta, versioned_chunks = self.api.object_locate( self.account, self.container, self.path) self.assertNotEqual(self.content_id, versioned_meta['id']) for c in versioned_chunks: convert_to_old_chunk( self._chunk_path(c), self.account, self.container, self.path, versioned_meta['version'], versioned_meta['id']) chunk = random.choice(self.chunks) chunk_volume = chunk['url'].split('/')[2] chunk_id = chunk['url'].split('/')[3] chunk_path = self._chunk_path(chunk) versioned_chunk = random.choice(versioned_chunks) versioned_chunk_volume = versioned_chunk['url'].split('/')[2] versioned_chunk_id = versioned_chunk['url'].split('/')[3] versioned_chunk_path = self._chunk_path(versioned_chunk) self._convert_and_check( chunk_volume, chunk_path, {chunk_id: (self.account, self.container, self.path, self.version, self.content_id)}) self._convert_and_check( versioned_chunk_volume, versioned_chunk_path, {versioned_chunk_id: (self.account, self.container, self.path, versioned_meta['version'], versioned_meta['id'])}) def test_converter_file_not_found(self): """ Test what happens when the BlobConverter encounters a chunk with neither a fullpath extended attribute, not any of the legacy attributes. """ victim = random.choice(self.chunks) path = self._chunk_path(victim) chunk_volume = victim['url'].split('/')[2] os.remove(path) with patch('oio.blob.converter.BlobConverter.recover_chunk_fullpath') \ as recover: self._convert_and_check(chunk_volume, path, {}, expected_errors=1) recover.assert_not_called() def test_recover_missing_old_fullpath(self): for c in self.chunks: convert_to_old_chunk( self._chunk_path(c), self.account, self.container, self.path, self.version, self.content_id) victim = random.choice(self.chunks) self._test_converter_single_chunk(victim) def test_recover_missing_content_path(self): for c in self.chunks: convert_to_old_chunk( self._chunk_path(c), self.account, self.container, self.path, self.version, self.content_id, add_old_fullpath=True) victim = random.choice(self.chunks) path = self._chunk_path(victim) remove_xattr(path, chunk_xattr_keys['content_path']) self._test_converter_single_chunk(victim) def test_recover_missing_old_fullpath_and_content_path(self): for c in self.chunks: convert_to_old_chunk( self._chunk_path(c), self.account, self.container, self.path, self.version, self.content_id) victim = random.choice(self.chunks) path = self._chunk_path(victim) remove_xattr(path, chunk_xattr_keys['content_path']) self._test_converter_single_chunk(victim) def test_recover_missing_fullpath(self): """ Test what happens when the BlobConverter encounters a chunk with neither a fullpath extended attribute, not any of the legacy attributes. """ victim = random.choice(self.chunks) path = self._chunk_path(victim) remove_fullpath_xattr(path) self._test_converter_single_chunk(victim) def test_recover_missing_fullpath_not_indexed(self): """ Test what happens when the BlobConverter encounters a chunk with neither a fullpath extended attribute, not any of the legacy attributes, and the chunk does not appear in rdir. """ victim = random.choice(self.chunks) path = self._chunk_path(victim) remove_fullpath_xattr(path) self._deindex_chunk(victim) conf = dict(self.conf) conf['volume'] = self.rawx_volumes[self._chunk_volume_id(victim)] converter = BlobConverter(conf) self.assertRaises(KeyError, converter.recover_chunk_fullpath, path) def test_recover_missing_fullpath_orphan_chunk(self): """ Test what happens when the BlobConverter encounters a chunk with neither a fullpath extended attribute, not any of the legacy attributes, and the chunk does not appear in object description. """ victim = random.choice(self.chunks) path = self._chunk_path(victim) remove_fullpath_xattr(path) cbean = { 'content': self.content_id, 'hash': victim['hash'], 'id': victim['url'], 'size': victim['size'], 'pos': victim['pos'], 'type': 'chunk' } self.api.container.container_raw_delete( self.account, self.container, data=[cbean]) conf = dict(self.conf) conf['volume'] = self.rawx_volumes[self._chunk_volume_id(victim)] converter = BlobConverter(conf) self.assertRaises(OrphanChunk, converter.recover_chunk_fullpath, path)
class TestBlobAuditor(BaseTestCase): def setUp(self): super(TestBlobAuditor, self).setUp() self.container = random_str(16) self.cid = cid_from_name(self.account, self.container) self.path = random_str(16) self.api = ObjectStorageApi(self.ns) self.blob_client = BlobClient(self.conf) self.api.container_create(self.account, self.container) _, chunks = self.api.container.content_prepare( self.account, self.container, self.path, 1) services = self.conscience.all_services('rawx') self.rawx_volumes = dict() for rawx in services: tags = rawx['tags'] service_id = tags.get('tag.service_id', None) if service_id is None: service_id = rawx['addr'] volume = tags.get('tag.vol', None) self.rawx_volumes[service_id] = volume self.api.object_create( self.account, self.container, obj_name=self.path, data="chunk") meta, self.chunks = self.api.object_locate( self.account, self.container, self.path) self.version = meta['version'] self.content_id = meta['id'] def _chunk_path(self, chunk): url = chunk['url'] volume_id = url.split('/', 3)[2] chunk_id = url.split('/', 3)[3] volume = self.rawx_volumes[volume_id] return volume + '/' + chunk_id[:3] + '/' + chunk_id def test_audit(self): chunk = random.choice(self.chunks) chunk_volume = chunk['url'].split('/')[2] chunk_id = chunk['url'].split('/')[3] auditor = BlobAuditorWorker(self.conf, None, self.rawx_volumes[chunk_volume]) auditor.chunk_audit(self._chunk_path(chunk), chunk_id) def test_audit_old_chunk(self): for c in self.chunks: convert_to_old_chunk( self._chunk_path(c), self.account, self.container, self.path, self.version, self.content_id) chunk = random.choice(self.chunks) chunk_volume = chunk['url'].split('/')[2] chunk_id = chunk['url'].split('/')[3] auditor = BlobAuditorWorker(self.conf, None, self.rawx_volumes[chunk_volume]) auditor.chunk_audit(self._chunk_path(chunk), chunk_id) def test_audit_linked_chunk(self): self.api.object_link( self.account, self.container, self.path, self.account, self.container, self.path + '.link') chunk = random.choice(self.chunks) chunk_volume = chunk['url'].split('/')[2] chunk_id = chunk['url'].split('/')[3] chunk_path = self._chunk_path(chunk) auditor = BlobAuditorWorker(self.conf, None, self.rawx_volumes[chunk_volume]) auditor.chunk_audit(chunk_path, chunk_id) linked_meta, linked_chunks = self.api.object_locate( self.account, self.container, self.path + '.link') self.assertNotEqual(self.content_id, linked_meta['id']) linked_chunk = random.choice(linked_chunks) linked_chunk_id = linked_chunk['url'].split('/')[3] linked_chunk_path = self._chunk_path(linked_chunk) auditor.chunk_audit(linked_chunk_path, linked_chunk_id) auditor.chunk_audit(chunk_path, chunk_id) copy_chunk(chunk_path, chunk_path + '.copy') auditor.chunk_audit(chunk_path + '.copy', chunk_id) self.api.object_delete( self.account, self.container, self.path) auditor.chunk_audit(linked_chunk_path, linked_chunk_id) self.assertRaises(OrphanChunk, auditor.chunk_audit, chunk_path + '.copy', chunk_id) def test_audit_with_versioning(self): self.api.container_set_properties( self.account, self.container, system={'sys.m2.policy.version': '2'}) self.api.object_create( self.account, self.container, obj_name=self.path, data="version") chunk = random.choice(self.chunks) chunk_volume = chunk['url'].split('/')[2] chunk_id = chunk['url'].split('/')[3] chunk_path = self._chunk_path(chunk) auditor = BlobAuditorWorker(self.conf, None, self.rawx_volumes[chunk_volume]) auditor.chunk_audit(chunk_path, chunk_id) versioned_meta, versioned_chunks = self.api.object_locate( self.account, self.container, self.path) self.assertNotEqual(self.content_id, versioned_meta['id']) versioned_chunk = random.choice(versioned_chunks) versioned_chunk_volume = versioned_chunk['url'].split('/')[2] versioned_chunk_id = versioned_chunk['url'].split('/')[3] versioned_chunk_path = self._chunk_path(versioned_chunk) versioned_auditor = BlobAuditorWorker( self.conf, None, self.rawx_volumes[versioned_chunk_volume]) versioned_auditor.chunk_audit(versioned_chunk_path, versioned_chunk_id) auditor.chunk_audit(chunk_path, chunk_id) copy_chunk(chunk_path, chunk_path + '.copy') auditor.chunk_audit(chunk_path + '.copy', chunk_id) self.api.object_delete( self.account, self.container, self.path, version=self.version) versioned_auditor.chunk_audit(versioned_chunk_path, versioned_chunk_id) self.assertRaises(OrphanChunk, auditor.chunk_audit, chunk_path + '.copy', chunk_id)
class TestObjectStorageApiPerfdata(BaseTestCase): def setUp(self): super(TestObjectStorageApiPerfdata, self).setUp() self.api = ObjectStorageApi(self.ns, endpoint=self.uri) self.created = list() def tearDown(self): super(TestObjectStorageApiPerfdata, self).tearDown() for ct, name in self.created: try: self.api.object_delete(self.account, ct, name) except Exception: logging.exception("Failed to delete %s/%s/%s//%s", self.ns, self.account, ct, name) def test_object_create_perfdata(self): perfdata = dict() container = random_str(8) obj = random_str(8) self.api.object_create(self.account, container, obj_name=obj, data=obj, perfdata=perfdata) meta, chunks = self.api.object_locate(self.account, container, obj) self.assertIn('proxy', perfdata) self.assertIn('resolve', perfdata['proxy']) self.assertIn('meta2', perfdata['proxy']) self.assertIn('overall', perfdata['proxy']) self.assertIn('rawx', perfdata) if meta['policy'] == 'EC': self.assertIn('ec', perfdata['rawx']) for chunk in chunks: self.assertIn(chunk['url'], perfdata['rawx']) self.assertIn('overall', perfdata['rawx']) perfdata.clear() self.api.object_delete(self.account, container, obj, perfdata=perfdata) self.assertIn('proxy', perfdata) self.assertIn('resolve', perfdata['proxy']) self.assertIn('meta2', perfdata['proxy']) self.assertIn('overall', perfdata['proxy']) def test_object_fetch_perfdata(self): perfdata = dict() container = random_str(8) obj = random_str(8) self.api.object_create(self.account, container, obj_name=obj, data=obj) meta, chunks = self.api.object_locate(self.account, container, obj) stg_method = STORAGE_METHODS.load(meta['chunk_method']) _, stream = self.api.object_fetch(self.account, container, obj, perfdata=perfdata) self.assertIn('proxy', perfdata) self.assertIn('resolve', perfdata['proxy']) self.assertIn('meta2', perfdata['proxy']) self.assertIn('overall', perfdata['proxy']) self.assertNotIn('ttfb', perfdata) self.assertNotIn('ttlb', perfdata) buf = b''.join(stream) self.assertEqual(obj, buf) self.assertIn('rawx', perfdata) if stg_method.ec: self.assertIn('ec', perfdata['rawx']) nb_chunks_to_read = 0 for chunk in chunks: if chunk['url'] in perfdata['rawx']: nb_chunks_to_read += 1 self.assertLessEqual(stg_method.min_chunks_to_read, nb_chunks_to_read) self.assertIn('overall', perfdata['rawx']) self.assertIn('ttfb', perfdata) self.assertIn('ttlb', perfdata) self.api.object_delete(self.account, container, obj)
class TestContainerDownload(BaseTestCase): def setUp(self): super(TestContainerDownload, self).setUp() # FIXME: should we use direct API from BaseTestCase # or still container.client ? self.conn = ObjectStorageApi(self.ns) self._streaming = 'http://' + self.get_service_url('container')[2] self._cnt = random_container() self._uri = self.make_uri('dump') self._data = {} self.conn.container_create(self.account, self._cnt) self.raw = "" self._slo = [] def make_uri(self, action, account=None, container=None): account = account or self.account container = container or self._cnt return '%s/v1.0/container/%s?acct=%s&ref=%s' % ( self._streaming, action, account, container) def tearDown(self): for name in self._data: self.conn.object_delete(self.account, self._cnt, name) self.conn.container_delete(self.account, self._cnt) super(TestContainerDownload, self).tearDown() def _create_data(self, name=gen_names, metadata=None, size=513, append=False): for idx, _name in itertools.islice(name(), 5): mime = random.choice(MIMETYPE) if append and size > 0: data = gen_data(size / 2 * idx) entry = {'data': data, 'meta': None, 'mime': mime} self.conn.object_create(self.account, self._cnt, obj_name=_name, data=data, mime_type=mime) data = gen_data(size / 2 * idx) self.conn.object_create(self.account, self._cnt, obj_name=_name, data=data, mime_type=mime, append=True) entry['data'] += data else: data = gen_data(size * idx) entry = {'data': data, 'meta': None, 'mime': mime} self.conn.object_create(self.account, self._cnt, obj_name=_name, data=data, mime_type=mime) if metadata: entry['meta'] = {} for _ in xrange(10): key, val = metadata() entry['meta'][key] = val self.conn.object_update(self.account, self._cnt, _name, entry['meta']) self._data[_name] = entry def _create_s3_slo(self, name=gen_names, metadata=None): # create a fake S3 bucket with a SLO object chunksize = 10000 parts = 5 res = [] full_data = "" self.conn.container_create(self.account, self._cnt + '+segments') _name = "toto" etag = rand_str(50) part_number = 1 for size in [chunksize] * parts + [444]: data = gen_data(size) res.append({ 'bytes': size, 'content_type': 'application/octect-stream', 'hash': md5(data).hexdigest().upper(), 'last_modified': '2017-06-21T12:42:47.000000', 'name': '/%s+segments/%s/%s/%d' % (self._cnt, _name, etag, part_number) }) self.conn.object_create(self.account, "%s+segments" % self._cnt, obj_name='%s/%s/%d' % (_name, etag, part_number), data=data) full_data += data part_number += 1 self._data[_name] = { 'data': full_data, 'meta': { 'x-static-large-object': 'true', 'x-object-sysmeta-slo-etag': etag, 'x-object-sysmeta-slo-size': str(len(full_data)) } } self._slo.append(_name) data = json.dumps(res) self.conn.object_create(self.account, self._cnt, obj_name=_name, data=data) self.conn.object_update(self.account, self._cnt, _name, self._data[_name]['meta']) def _check_tar(self, data): raw = BytesIO(data) tar = tarfile.open(fileobj=raw, ignore_zeros=True) info = self._data.keys() for entry in tar.getnames(): if entry == CONTAINER_MANIFEST: # skip special entry continue self.assertIn(entry, info) tmp = tar.extractfile(entry) self.assertEqual(self._data[entry]['data'], tmp.read()) info.remove(entry) self.assertEqual(info, []) return tar def _check_container(self, cnt): ret = self.conn.object_list(account=self.account, container=cnt) names = self._data.keys() for obj in ret['objects']: name = obj['name'] self.assertIn(name, self._data) self.assertEqual(obj['size'], len(self._data[name]['data'])) _, data = self.conn.object_fetch(self.account, cnt, name) raw = "".join(data) self.assertEqual( md5(raw).hexdigest(), md5(self._data[name]['data']).hexdigest()) meta = self.conn.object_get_properties(self.account, cnt, name) self.assertEqual(meta['properties'], self._data[name]['meta']) names.remove(name) self.assertEqual(len(names), 0) def _simple_download(self, name=gen_names, metadata=None, size=513, append=False): self._create_data(name=name, metadata=metadata, size=size, append=append) ret = requests.get(self._uri) self.assertGreater(len(ret.content), 0) self.assertEqual(ret.status_code, 200) self.raw = ret.content return self._check_tar(ret.content) def _check_metadata(self, tar): for entry in tar.getnames(): if entry == CONTAINER_MANIFEST: # skip special entry continue headers = tar.getmember(entry).pax_headers keys = headers.keys()[:] for key, val in self._data[entry]['meta'].items(): key = u"SCHILY.xattr.user." + key.decode('utf-8') self.assertIn(key, headers) self.assertEqual(val.decode('utf-8'), headers[key]) keys.remove(key) # self.assertEqual(self._data[entry]['mime'], headers['mime_type']) keys.remove('mime_type') # self.assertEqual(keys, []) def test_missing_container(self): ret = requests.get(self._streaming + '/' + random_container("ms-")) self.assertEqual(ret.status_code, 404) def test_invalid_url(self): ret = requests.get(self._streaming) self.assertEqual(ret.status_code, 404) ret = requests.head(self._streaming + '/' + random_container('inv') + '/' + random_container('inv')) self.assertEqual(ret.status_code, 404) def test_download_empty_container(self): ret = requests.get(self._uri) self.assertEqual(ret.status_code, 204) def test_simple_download(self): self._simple_download() def test_check_head(self): self._create_data() get = requests.get(self._uri) head = requests.head(self._uri) self.assertEqual(get.headers['content-length'], head.headers['content-length']) def test_download_per_range(self): self._create_data() org = requests.get(self._uri) data = [] for idx in xrange(0, int(org.headers['content-length']), 512): ret = requests.get( self._uri, headers={'Range': 'bytes=%d-%d' % (idx, idx + 511)}) self.assertEqual(ret.status_code, 206) self.assertEqual(len(ret.content), 512) self.assertEqual(ret.content, org.content[idx:idx + 512]) data.append(ret.content) data = "".join(data) self.assertGreater(len(data), 0) self.assertEqual(md5(data).hexdigest(), md5(org.content).hexdigest()) def test_invalid_range(self): self._create_data() ranges = ((-512, 511), (512, 0), (1, 3), (98888, 99999)) for start, end in ranges: ret = requests.get(self._uri, headers={'Range': 'bytes=%d-%d' % (start, end)}) self.assertEqual( ret.status_code, 416, "Invalid error code for range %d-%d" % (start, end)) ret = requests.get(self._uri, headers={'Range': 'bytes=0-511, 512-1023'}) self.assertEqual(ret.status_code, 416) def test_file_metadata(self): tar = self._simple_download(metadata=gen_metadata) self._check_metadata(tar) def test_container_metadata(self): key, val = gen_metadata() ret = self.conn.container_update(self.account, self._cnt, {key: val}) ret = self.conn.container_show(self.account, self._cnt) ret = requests.get(self._uri) self.assertEqual(ret.status_code, 200) raw = BytesIO(ret.content) tar = tarfile.open(fileobj=raw, ignore_zeros=True) self.assertIn(CONTAINER_PROPERTIES, tar.getnames()) data = json.load(tar.extractfile(CONTAINER_PROPERTIES)) self.assertIn(key, data) self.assertEqual(val, data[key]) def test_charset_file(self): self._simple_download(name=gen_charset_names) @unittest.skip("wip") def test_byte_metadata(self): tar = self._simple_download(metadata=gen_byte_metadata) self._check_metadata(tar) def test_charset_metadata(self): tar = self._simple_download(metadata=gen_charset_metadata) self._check_metadata(tar) @attr('s3') def test_s3_simple_download(self): self._create_s3_slo() ret = requests.get(self._uri) self.assertGreater(len(ret.content), 0) self.assertEqual(ret.status_code, 200) self.raw = ret.content raw = BytesIO(ret.content) tar = tarfile.open(fileobj=raw, ignore_zeros=True) info = self._data.keys() for entry in tar.getnames(): if entry == CONTAINER_MANIFEST: # skip special entry continue self.assertIn(entry, info) tmp = tar.extractfile(entry) self.assertEqual(self._data[entry]['data'], tmp.read()) info.remove(entry) self.assertEqual(len(info), 0) return tar @attr('s3') def test_s3_range_download(self): self._create_s3_slo() org = requests.get(self._uri) self.assertEqual(org.status_code, 200) data = [] for idx in xrange(0, int(org.headers['content-length']), 512): ret = requests.get( self._uri, headers={'Range': 'bytes=%d-%d' % (idx, idx + 511)}) self.assertEqual(ret.status_code, 206) self.assertEqual(len(ret.content), 512) self.assertEqual(ret.content, org.content[idx:idx + 512]) data.append(ret.content) data = "".join(data) self.assertGreater(len(data), 0) self.assertEqual(md5(data).hexdigest(), md5(org.content).hexdigest()) @attr('s3') def test_s3_check_slo_metadata_download(self): self._create_s3_slo() org = requests.get(self.make_uri('dump')) self.assertEqual(org.status_code, 200) cnt = rand_str(20) res = requests.put(self.make_uri('restore', container=cnt), data=org.content) self.assertEqual(org.status_code, 200) res = self.conn.object_get_properties(self.account, cnt, self._slo[0]) props = res['properties'] self.assertNotIn('x-static-large-object', props) self.assertNotIn('x-object-sysmeta-slo-size', props) self.assertNotIn('x-object-sysmeta-slo-etag', props) @attr('simple') def test_simple_restore(self): self._create_data(metadata=gen_metadata) org = requests.get(self.make_uri('dump')) cnt = rand_str(20) res = requests.put(self.make_uri('restore', container=cnt), data=org.content) self.assertEqual(res.status_code, 201) self._check_container(cnt) @attr('restore') def test_multipart_restore(self): self._create_data(metadata=gen_metadata, size=1025 * 1024) org = requests.get(self.make_uri('dump')) cnt = rand_str(20) size = 1014 * 1024 parts = [ org.content[x:x + size] for x in xrange(0, len(org.content), size) ] uri = self.make_uri('restore', container=cnt) start = 0 for part in parts: hdrs = {'Range': 'bytes=%d-%d' % (start, start + len(part) - 1)} res = requests.put(uri, data=part, headers=hdrs) start += len(part) self.assertIn(res.status_code, [201, 206]) self._check_container(cnt) @attr('restore') def test_multipart_invalid_restore(self): self._create_data(metadata=gen_metadata, size=1025 * 1024) org = requests.get(self.make_uri('dump')) cnt = rand_str(20) uri = self.make_uri('restore', container=cnt) size = 1014 * 1024 parts = [ org.content[x:x + size] for x in xrange(0, len(org.content), size) ] start = 0 for part in parts: hdrs = {'Range': 'bytes=%d-%d' % (start, start + len(part) - 1)} res = requests.put(uri, data=part, headers=hdrs) self.assertIn(res.status_code, [201, 206]) start += len(part) # only unfinished restoration expose X-Consumed-Size if res.status_code == 206: res = requests.head(uri) self.assertEqual(int(res.headers['X-Consumed-Size']), start) inv = requests.put(uri, data=part, headers=hdrs) self.assertEqual(inv.status_code, 422) if res.status_code == 206: res = requests.head(uri) self.assertEqual(int(res.headers['X-Consumed-Size']), start) uri = self.make_uri('restore', container=rand_str(20)) hdrs = {'Range': 'bytes=%d-%d' % (size, size + len(parts[1]) - 1)} res = requests.put(uri, data=part, headers=hdrs) self.assertEqual(res.status_code, 422) self._check_container(cnt) @attr('concurrency') def test_multipart_concurrency(self): self._create_data(metadata=gen_metadata, size=1025 * 1024) org = requests.get(self.make_uri('dump')) cnt = rand_str(20) uri = self.make_uri('restore', container=cnt) size = divmod(len(org.content) / 3, 512)[0] * 512 parts = [ org.content[x:x + size] for x in xrange(0, len(org.content), size) ] start = 0 class StreamWithContentLength(Thread): """Thread to send data with delays to restore API""" def __init__(self, data, headers): self._count = 0 self._data = data self._hdrs = headers super(StreamWithContentLength, self).__init__() def __len__(self): return len(self._data) def read(self, *args): if self._count < len(self._data): time.sleep(0.5) data = self._data[self._count:self._count + size / 3] self._count += len(data) return data return "" def run(self): self._ret = requests.put(uri, data=self, headers=self._hdrs) for idx, part in enumerate(parts): hdrs = {'Range': 'bytes=%d-%d' % (start, start + len(part) - 1)} if idx == 0: res = requests.put(uri, data=part, headers=hdrs) self.assertIn(res.status_code, [201, 206]) else: # launch Thread and simulate slow bandwidth thr = StreamWithContentLength(part, hdrs) thr.start() # send data on same range time.sleep(0.5) res = requests.put(uri, data=part, headers=hdrs) self.assertEqual(res.status_code, 422) thr.join() self.assertIn(thr._ret.status_code, [201, 206]) start += len(part) self._check_container(cnt) @attr('disconnected') def test_broken_connectivity(self): self._create_data(metadata=gen_metadata, size=1025 * 1024) org = requests.get(self.make_uri('dump')) cnt = rand_str(20) class FakeStream(object): """Send data and simulate a connectivity issue""" def __init__(self, data, size): self._count = 0 self._data = data self._size = size def __len__(self): return len(self._data) def read(self, *args): if self._count < self._size: data = self._data[self._count:self._count + size / 3] self._count += len(data) return data if self._count == len(self._data): return "" raise Exception("break connection") def wait_lock(): """When the lock is gone, return current consumed size""" nb = 0 while True: time.sleep(0.1) req = requests.head(uri) if (req.status_code == 200 and req.headers.get( 'X-Upload-In-Progress', '1') == '0'): print("Tried before lock free", nb) print("Got consumed-size", req.headers['X-Consumed-Size']) return int(req.headers['X-Consumed-Size']) nb += 1 self.assertLess(nb, 10) uri = self.make_uri('restore', container=cnt) block = 1000 * 512 start = 0 cut = False while True: if start: start = wait_lock() stop = min(len(org.content), start + block) hdrs = {'Range': 'bytes=%d-%d' % (start, stop - 1)} size = stop - start if cut: size = block / 2 cut = not cut try: ret = requests.put(uri, headers=hdrs, data=FakeStream(org.content[start:stop], size)) except Exception: pass else: self.assertIn( ret.status_code, (201, 206), "Unexpected %d HTTP response: %s" % (ret.status_code, ret.content)) start += size if ret.status_code == 201: break result = requests.get(self.make_uri('dump', container=cnt)) self._check_tar(result.content) @attr('rawtar') def test_rawtar(self): """Create a normal tar archive and restore it""" raw = BytesIO() tarfile = TarFile(mode='w', fileobj=raw) testdata = rand_str(20) * 5000 inf = TarInfo("simpletar") fileraw = BytesIO() fileraw.write(testdata) inf.size = len(testdata) fileraw.seek(0) tarfile.addfile(inf, fileobj=fileraw) tarfile.close() raw.seek(0) data = raw.read() cnt = rand_str(20) ret = requests.put(self.make_uri("restore", container=cnt), data=data) self.assertEqual(ret.status_code, 201) meta, stream = self.conn.object_fetch(self.account, cnt, "simpletar") self.assertEqual( md5("".join(stream)).hexdigest(), md5(testdata).hexdigest()) @attr('invalid') def test_checksums(self): """Check restore operation with invalid tar""" tar = self._simple_download(append=True) manifest = json.load(tar.extractfile(CONTAINER_MANIFEST), object_pairs_hook=OrderedDict) # => add random bytes inside each file (either header and data) for entry in manifest: if entry['name'] == CONTAINER_MANIFEST: # CONTAINER_MANIFEST does not have checksum at this time continue inv = self.raw # Test with tar entry # checksum tar doesn't work very well with SCHILY attributes # so only apply changes on regular block entry idx = entry['start_block'] * BLOCKSIZE \ + random.randint(0, BLOCKSIZE) # + random.randint(0, entry['hdr_blocks'] * BLOCKSIZE) while self.raw[idx] == inv[idx]: inv = inv[:idx] + chr(random.randint(0, 255)) + inv[idx + 1:] cnt = rand_str(20) res = requests.put(self.make_uri('restore', container=cnt), data=inv) self.assertEqual(res.status_code, 400) # skip emty file if entry['size'] == 0: continue # Test with data blocks inv = self.raw idx = (entry['start_block'] + entry['hdr_blocks']) * BLOCKSIZE \ + random.randint(0, entry['size'] - 1) while self.raw[idx] == inv[idx]: inv = inv[:idx] + chr(random.randint(0, 255)) + inv[idx + 1:] cnt = rand_str(20) res = requests.put(self.make_uri('restore', container=cnt), data=inv) self.assertEqual(res.status_code, 400)
class TestContentVersioning(BaseTestCase): def setUp(self): super(TestContentVersioning, self).setUp() self.api = ObjectStorageApi(self.conf['namespace']) self.container = random_str(8) system = {'sys.m2.policy.version': '3'} self.wait_for_score(('meta2', )) self.api.container_create(self.account, self.container, system=system) def test_versioning_enabled(self): props = self.api.container_get_properties(self.account, self.container) self.assertEqual('3', props['system']['sys.m2.policy.version']) def test_list_versions(self): self.api.object_create(self.account, self.container, obj_name="versioned", data="content0") self.api.object_create(self.account, self.container, obj_name="versioned", data="content1") listing = self.api.object_list(self.account, self.container, versions=True) objects = listing['objects'] self.assertEqual(2, len(objects)) self.assertNotEqual(objects[0]['version'], objects[1]['version']) def test_container_purge(self): # many contents for i in range(0, 4): self.api.object_create(self.account, self.container, obj_name="versioned", data="content") listing = self.api.object_list(self.account, self.container, versions=True) objects = listing['objects'] self.assertEqual(4, len(objects)) oldest_version = min(objects, key=lambda x: x['version']) # use the maxvers of the container configuration self.api.container_purge(self.account, self.container) listing = self.api.object_list(self.account, self.container, versions=True) objects = listing['objects'] self.assertEqual(3, len(objects)) self.assertNotIn(oldest_version, [x['version'] for x in objects]) oldest_version = min(objects, key=lambda x: x['version']) # use the maxvers of the request self.api.container_purge(self.account, self.container, maxvers=1) listing = self.api.object_list(self.account, self.container, versions=True) objects = listing['objects'] self.assertEqual(1, len(objects)) self.assertNotIn(oldest_version, [x['version'] for x in objects]) def test_content_purge(self): # many contents for i in range(0, 4): self.api.object_create(self.account, self.container, obj_name="versioned", data="content") listing = self.api.object_list(self.account, self.container, versions=True) objects = listing['objects'] self.assertEqual(4, len(objects)) oldest_version = min(objects, key=lambda x: x['version']) # use the maxvers of the container configuration self.api.container.content_purge(self.account, self.container, "versioned") listing = self.api.object_list(self.account, self.container, versions=True) objects = listing['objects'] self.assertEqual(3, len(objects)) self.assertNotIn(oldest_version, [x['version'] for x in objects]) oldest_version = min(objects, key=lambda x: x['version']) # use the maxvers of the request self.api.container.content_purge(self.account, self.container, "versioned", maxvers=1) listing = self.api.object_list(self.account, self.container, versions=True) objects = listing['objects'] self.assertEqual(1, len(objects)) self.assertNotIn(oldest_version, [x['version'] for x in objects]) # other contents for i in range(0, 4): self.api.object_create(self.account, self.container, obj_name="versioned2", data="content" + str(i)) listing = self.api.object_list(self.account, self.container, versions=True) objects = listing['objects'] self.assertEqual(5, len(objects)) # use the maxvers of the container configuration self.api.container.content_purge(self.account, self.container, "versioned") listing = self.api.object_list(self.account, self.container, versions=True) objects = listing['objects'] self.assertEqual(5, len(objects)) def test_delete_exceeding_version(self): def check_num_objects_and_get_oldest_version(expected_objects, expected_deleted_aliases, oldest_version): listing = self.api.object_list(self.account, self.container, versions=True) objects = listing['objects'] nb_objects = 0 nb_deleted = 0 new_oldest_version = 0 for obj in objects: if obj['deleted']: nb_deleted += 1 else: nb_objects += 1 if new_oldest_version == 0 \ or new_oldest_version > obj['version']: new_oldest_version = obj['version'] self.assertEqual(expected_objects, nb_objects) self.assertEqual(expected_deleted_aliases, nb_deleted) if oldest_version is not None: self.assertLess(oldest_version, new_oldest_version) return new_oldest_version system = {'sys.m2.policy.version.delete_exceeding': '1'} self.api.container_set_properties(self.account, self.container, system=system) self.api.object_create(self.account, self.container, obj_name="versioned", data="content0") oldest_version = check_num_objects_and_get_oldest_version(1, 0, None) self.api.object_create(self.account, self.container, obj_name="versioned", data="content1") self.assertEqual(oldest_version, check_num_objects_and_get_oldest_version(2, 0, None)) self.api.object_create(self.account, self.container, obj_name="versioned", data="content2") self.assertEqual(oldest_version, check_num_objects_and_get_oldest_version(3, 0, None)) self.api.object_create(self.account, self.container, obj_name="versioned", data="content3") oldest_version = check_num_objects_and_get_oldest_version( 3, 0, oldest_version) self.api.object_delete(self.account, self.container, "versioned") self.assertEqual(oldest_version, check_num_objects_and_get_oldest_version(3, 1, None)) self.api.object_create(self.account, self.container, obj_name="versioned", data="content4") oldest_version = check_num_objects_and_get_oldest_version( 3, 1, oldest_version) self.api.object_create(self.account, self.container, obj_name="versioned", data="content5") oldest_version = check_num_objects_and_get_oldest_version( 3, 1, oldest_version) self.api.object_create(self.account, self.container, obj_name="versioned", data="content6") # FIXME(adu) The deleted alias should be deleted at the same time oldest_version = check_num_objects_and_get_oldest_version( 3, 1, oldest_version) self.api.object_create(self.account, self.container, obj_name="versioned", data="content7") oldest_version = check_num_objects_and_get_oldest_version( 3, 1, oldest_version) def test_change_flag_delete_exceeding_versions(self): def check_num_objects(expected): listing = self.api.object_list(self.account, self.container, versions=True) objects = listing['objects'] self.assertEqual(expected, len(objects)) for i in range(5): self.api.object_create(self.account, self.container, obj_name="versioned", data="content" + str(i)) check_num_objects(5) system = {'sys.m2.policy.version.delete_exceeding': '1'} self.api.container_set_properties(self.account, self.container, system=system) self.api.object_create(self.account, self.container, obj_name="versioned", data="content5") check_num_objects(3) for i in range(6, 10): self.api.object_create(self.account, self.container, obj_name="versioned", data="content" + str(i)) check_num_objects(3) system['sys.m2.policy.version.delete_exceeding'] = '0' self.api.container_set_properties(self.account, self.container, system=system) self.api.object_create(self.account, self.container, obj_name="versioned", data="content11") check_num_objects(4) def test_purge_objects_with_delete_marker(self): def check_num_objects(expected): listing = self.api.object_list(self.account, self.container, versions=True) objects = listing['objects'] self.assertEqual(expected, len(objects)) for i in range(5): self.api.object_create(self.account, self.container, obj_name="versioned", data="content" + str(i)) check_num_objects(5) self.api.object_delete(self.account, self.container, "versioned") self.assertRaises(NoSuchObject, self.api.object_locate, self.account, self.container, "versioned") check_num_objects(6) self.api.container.content_purge(self.account, self.container, "versioned") self.assertRaises(NoSuchObject, self.api.object_locate, self.account, self.container, "versioned") check_num_objects(4) system = {'sys.m2.keep_deleted_delay': '1'} self.api.container_set_properties(self.account, self.container, system=system) time.sleep(2) self.api.container.content_purge(self.account, self.container, "versioned") check_num_objects(0) def test_list_objects(self): resp = self.api.object_list(self.account, self.container) self.assertEqual(0, len(list(resp['objects']))) self.assertFalse(resp.get('truncated')) def _check_objects(expected_objects, objects): self.assertEqual(len(expected_objects), len(objects)) for i in range(len(expected_objects)): self.assertEqual(expected_objects[i]['name'], objects[i]['name']) self.assertEqual(int(expected_objects[i]['version']), int(objects[i]['version'])) self.assertEqual(true_value(expected_objects[i]['deleted']), true_value(objects[i]['deleted'])) all_versions = dict() def _create_object(obj_name, all_versions): self.api.object_create(self.account, self.container, obj_name=obj_name, data="test") versions = all_versions.get(obj_name, list()) versions.append( self.api.object_show(self.account, self.container, obj_name)) all_versions[obj_name] = versions def _delete_object(obj_name, all_versions): self.api.object_delete(self.account, self.container, obj_name) versions = all_versions.get(obj_name, list()) versions.append( self.api.object_show(self.account, self.container, obj_name)) all_versions[obj_name] = versions def _get_current_objects(all_versions): current_objects = list() obj_names = sorted(all_versions.keys()) for obj_name in obj_names: obj = all_versions[obj_name][-1] if not true_value(obj['deleted']): current_objects.append(obj) return current_objects def _get_object_versions(all_versions): object_versions = list() obj_names = sorted(all_versions.keys()) for obj_name in obj_names: versions = all_versions[obj_name] versions.reverse() object_versions += versions versions.reverse() return object_versions # 0 object expected_current_objects = _get_current_objects(all_versions) expected_object_versions = _get_object_versions(all_versions) resp = self.api.object_list(self.account, self.container, limit=3) _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, limit=2) _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, limit=1) _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, versions=True) _check_objects(expected_object_versions, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, versions=True, limit=3) _check_objects(expected_object_versions, list(resp['objects'])) self.assertFalse(resp.get('truncated')) # 3 objects with 1 version for i in range(3): _create_object("versioned" + str(i), all_versions) expected_current_objects = _get_current_objects(all_versions) expected_object_versions = _get_object_versions(all_versions) resp = self.api.object_list(self.account, self.container) _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, limit=3) _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, limit=2) _check_objects(expected_current_objects[:2], list(resp['objects'])) self.assertTrue(resp.get('truncated')) self.assertEqual('versioned1', resp['next_marker']) resp = self.api.object_list(self.account, self.container, limit=1) _check_objects(expected_current_objects[:1], list(resp['objects'])) self.assertTrue(resp.get('truncated')) self.assertEqual('versioned0', resp['next_marker']) resp = self.api.object_list(self.account, self.container, versions=True) _check_objects(expected_object_versions, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, versions=True, limit=3) _check_objects(expected_object_versions[:3], list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, marker='versioned0') _check_objects(expected_current_objects[1:], list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, marker='versioned0', limit=1) _check_objects(expected_current_objects[1:2], list(resp['objects'])) self.assertTrue(resp.get('truncated')) self.assertEqual('versioned1', resp['next_marker']) resp = self.api.object_list(self.account, self.container, marker='versioned0', versions=True) _check_objects(expected_object_versions[1:], list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, marker='versioned0', versions=True, limit=3) _check_objects(expected_object_versions[1:], list(resp['objects'])) self.assertFalse(resp.get('truncated')) # 3 objects with 2 versions for i in range(3): _create_object("versioned" + str(i), all_versions) expected_current_objects = _get_current_objects(all_versions) expected_object_versions = _get_object_versions(all_versions) resp = self.api.object_list(self.account, self.container) _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, limit=3) _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, limit=2) _check_objects(expected_current_objects[:2], list(resp['objects'])) self.assertTrue(resp.get('truncated')) self.assertEqual('versioned1', resp['next_marker']) resp = self.api.object_list(self.account, self.container, limit=1) _check_objects(expected_current_objects[:1], list(resp['objects'])) self.assertTrue(resp.get('truncated')) self.assertEqual('versioned0', resp['next_marker']) resp = self.api.object_list(self.account, self.container, versions=True) _check_objects(expected_object_versions, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, versions=True, limit=3) _check_objects(expected_object_versions[:3], list(resp['objects'])) self.assertTrue(resp.get('truncated')) self.assertEqual('versioned1', resp['next_marker']) resp = self.api.object_list(self.account, self.container, marker='versioned0') _check_objects(expected_current_objects[1:], list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, marker='versioned0', limit=1) _check_objects(expected_current_objects[1:2], list(resp['objects'])) self.assertTrue(resp.get('truncated')) self.assertEqual('versioned1', resp['next_marker']) resp = self.api.object_list(self.account, self.container, marker='versioned0', versions=True) _check_objects(expected_object_versions[2:], list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, marker='versioned0', versions=True, limit=3) _check_objects(expected_object_versions[2:5], list(resp['objects'])) self.assertTrue(resp.get('truncated')) self.assertEqual('versioned2', resp['next_marker']) # 3 objects with 2 versions and 1 object with delete marker _delete_object("versioned1", all_versions) expected_current_objects = _get_current_objects(all_versions) expected_object_versions = _get_object_versions(all_versions) resp = self.api.object_list(self.account, self.container) _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, limit=3) _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, limit=2) _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, limit=1) _check_objects(expected_current_objects[:1], list(resp['objects'])) self.assertTrue(resp.get('truncated')) self.assertEqual('versioned0', resp['next_marker']) resp = self.api.object_list(self.account, self.container, versions=True) _check_objects(expected_object_versions, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, versions=True, limit=3) _check_objects(expected_object_versions[:3], list(resp['objects'])) self.assertTrue(resp.get('truncated')) self.assertEqual('versioned1', resp['next_marker']) resp = self.api.object_list(self.account, self.container, marker='versioned0') _check_objects(expected_current_objects[1:], list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, marker='versioned0', limit=1) _check_objects(expected_current_objects[1:], list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, marker='versioned0', versions=True) _check_objects(expected_object_versions[2:], list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, marker='versioned0', versions=True, limit=3) _check_objects(expected_object_versions[2:5], list(resp['objects'])) self.assertTrue(resp.get('truncated')) self.assertEqual('versioned1', resp['next_marker']) # 3 objects with 2 versions and 2 objects with delete marker _delete_object("versioned0", all_versions) expected_current_objects = _get_current_objects(all_versions) expected_object_versions = _get_object_versions(all_versions) resp = self.api.object_list(self.account, self.container) _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, limit=3) _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, limit=2) _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, limit=1) _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, versions=True) _check_objects(expected_object_versions, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, versions=True, limit=3) _check_objects(expected_object_versions[:3], list(resp['objects'])) self.assertTrue(resp.get('truncated')) self.assertEqual('versioned0', resp['next_marker']) resp = self.api.object_list(self.account, self.container, marker='versioned0') _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, marker='versioned0', limit=1) _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, marker='versioned0', versions=True) _check_objects(expected_object_versions[3:], list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, marker='versioned0', versions=True, limit=3) _check_objects(expected_object_versions[3:6], list(resp['objects'])) self.assertTrue(resp.get('truncated')) self.assertEqual('versioned1', resp['next_marker']) # 3 objects with 2 versions and 3 objects with delete marker _delete_object("versioned2", all_versions) expected_current_objects = _get_current_objects(all_versions) expected_object_versions = _get_object_versions(all_versions) resp = self.api.object_list(self.account, self.container) _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, limit=3) _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, limit=2) _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, limit=1) _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, versions=True) _check_objects(expected_object_versions, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, versions=True, limit=3) _check_objects(expected_object_versions[:3], list(resp['objects'])) self.assertTrue(resp.get('truncated')) self.assertEqual('versioned0', resp['next_marker']) resp = self.api.object_list(self.account, self.container, marker='versioned0') _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, marker='versioned0', limit=1) _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, marker='versioned0', versions=True) _check_objects(expected_object_versions[3:], list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, marker='versioned0', versions=True, limit=3) _check_objects(expected_object_versions[3:6], list(resp['objects'])) self.assertTrue(resp.get('truncated')) self.assertEqual('versioned1', resp['next_marker']) # 3 objects with 2 versions and 3 objects with delete marker # (1 current version and 2 non current versions) _create_object("versioned0", all_versions) expected_current_objects = _get_current_objects(all_versions) expected_object_versions = _get_object_versions(all_versions) resp = self.api.object_list(self.account, self.container) _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, limit=3) _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, limit=2) _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, limit=1) _check_objects(expected_current_objects, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, versions=True) _check_objects(expected_object_versions, list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, versions=True, limit=3) _check_objects(expected_object_versions[:3], list(resp['objects'])) self.assertTrue(resp.get('truncated')) self.assertEqual('versioned0', resp['next_marker']) resp = self.api.object_list(self.account, self.container, marker='versioned0') _check_objects(expected_current_objects[1:], list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, marker='versioned0', limit=1) _check_objects(expected_current_objects[1:], list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, marker='versioned0', versions=True) _check_objects(expected_object_versions[4:], list(resp['objects'])) self.assertFalse(resp.get('truncated')) resp = self.api.object_list(self.account, self.container, marker='versioned0', versions=True, limit=3) _check_objects(expected_object_versions[4:7], list(resp['objects'])) self.assertTrue(resp.get('truncated')) self.assertEqual('versioned1', resp['next_marker'])
class TestObjectStorageAPI(BaseTestCase): def setUp(self): super(TestObjectStorageAPI, self).setUp() self.api = ObjectStorageApi(self.ns, endpoint=self.uri) self.created = list() def tearDown(self): super(TestObjectStorageAPI, self).tearDown() for ct, name in self.created: try: self.api.object_delete(self.account, ct, name) except Exception: logging.exception("Failed to delete %s/%s/%s//%s", self.ns, self.account, ct, name) def _create(self, name, metadata=None): return self.api.container_create(self.account, name, properties=metadata) def _delete(self, name): self.api.container_delete(self.account, name) def _clean(self, name, clear=False): if clear: # must clean properties before self.api.container_del_properties(self.account, name, []) self._delete(name) def _get_properties(self, name, properties=None): return self.api.container_get_properties(self.account, name, properties=properties) def _set_properties(self, name, properties=None): return self.api.container_set_properties(self.account, name, properties=properties) def test_container_show(self): # container_show on unknown container name = random_str(32) self.assertRaises(exc.NoSuchContainer, self.api.container_show, self.account, name) self._create(name) # container_show on existing container res = self.api.container_show(self.account, name) self.assertIsNot(res['properties'], None) self._delete(name) # container_show on deleted container self.assertRaises(exc.NoSuchContainer, self.api.container_show, self.account, name) def test_container_create(self): name = random_str(32) res = self._create(name) self.assertEqual(res, True) # second create res = self._create(name) self.assertEqual(res, False) # clean self._delete(name) def test_create_properties(self): name = random_str(32) metadata = { random_str(32): random_str(32), random_str(32): random_str(32), } res = self._create(name, metadata) self.assertEqual(res, True) data = self._get_properties(name) self.assertEqual(data['properties'], metadata) # clean self._clean(name, True) def test_container_delete(self): name = random_str(32) # container_delete on unknown container self.assertRaises(exc.NoSuchContainer, self.api.container_delete, self.account, name) res = self._create(name) self.assertEqual(res, True) # container_delete on existing container self._delete(name) # verify deleted self.assertRaises(exc.NoSuchContainer, self.api.container_show, self.account, name) # second delete self.assertRaises(exc.NoSuchContainer, self.api.container_delete, self.account, name) # verify deleted self.assertRaises(exc.NoSuchContainer, self.api.container_show, self.account, name) def test_container_get_properties(self): name = random_str(32) # container_get_properties on unknown container self.assertRaises(exc.NoSuchContainer, self.api.container_get_properties, self.account, name) res = self._create(name) self.assertEqual(res, True) # container_get_properties on existing container data = self.api.container_get_properties(self.account, name) self.assertEqual(data['properties'], {}) self.assertIsNot(data['system'], None) self.assertIn("sys.user.name", data['system']) # container_get_properties metadata = { random_str(32): random_str(32), random_str(32): random_str(32), } self._set_properties(name, metadata) data = self.api.container_get_properties(self.account, name) self.assertEqual(data['properties'], metadata) # clean self._clean(name, True) # container_get_properties on deleted container self.assertRaises(exc.NoSuchContainer, self.api.container_get_properties, self.account, name) def test_container_get_properties_filtered(self): self.skipTest("Server side properties filtering not implemented") name = random_str(32) res = self._create(name) self.assertEqual(res, True) # container_get_properties on existing container data = self.api.container_get_properties(self.account, name) self.assertEqual(data['properties'], {}) # container_get_properties metadata = { random_str(32): random_str(32), random_str(32): random_str(32), } self._set_properties(name, metadata) # container_get_properties specify key key = metadata.keys().pop(0) data = self.api.container_get_properties(self.account, name, [key]) self.assertEqual({key: metadata[key]}, data['properties']) # clean self._clean(name, True) def test_container_set_properties(self): name = random_str(32) metadata = { random_str(32): random_str(32), random_str(32): random_str(32), } # container_set_properties on unknown container self.assertRaises(exc.NoSuchContainer, self.api.container_set_properties, self.account, name, metadata) res = self._create(name) self.assertEqual(res, True) # container_set_properties on existing container self.api.container_set_properties(self.account, name, metadata) data = self._get_properties(name) self.assertEqual(data['properties'], metadata) # container_set_properties key = random_str(32) value = random_str(32) metadata2 = {key: value} self._set_properties(name, metadata2) metadata.update(metadata2) data = self._get_properties(name) self.assertEqual(data['properties'], metadata) # container_set_properties overwrite key key = metadata.keys().pop(0) value = random_str(32) metadata3 = {key: value} metadata.update(metadata3) self.api.container_set_properties(self.account, name, metadata3) data = self._get_properties(name) self.assertEqual(data['properties'], metadata) # clean self._clean(name, True) # container_set_properties on deleted container self.assertRaises(exc.NoSuchContainer, self.api.container_set_properties, self.account, name, metadata) def test_del_properties(self): name = random_str(32) metadata = { random_str(32): random_str(32), random_str(32): random_str(32), } # container_del_properties on unknown container self.assertRaises(exc.NoSuchContainer, self.api.container_del_properties, self.account, name, []) res = self._create(name, metadata) self.assertEqual(res, True) key = metadata.keys().pop() del metadata[key] # container_del_properties on existing container self.api.container_del_properties(self.account, name, [key]) data = self._get_properties(name) self.assertNotIn(key, data['properties']) key = random_str(32) # We do not check if a property exists before deleting it # self.assertRaises( # exc.NoSuchContainer, self.api.container_del_properties, # self.account, name, [key]) self.api.container_del_properties(self.account, name, [key]) data = self._get_properties(name) self.assertEqual(data['properties'], metadata) # clean self._clean(name, True) # container_del_properties on deleted container self.assertRaises(exc.NoSuchContainer, self.api.container_del_properties, self.account, name, metadata.keys()) def test_object_create_mime_type(self): name = random_str(32) self.api.object_create(self.account, name, data="data", obj_name=name, mime_type='text/custom') meta, _ = self.api.object_locate(self.account, name, name) self.assertEqual(meta['mime_type'], 'text/custom') def _upload_data(self, name): chunksize = int(self.conf["chunk_size"]) size = int(chunksize * 12) data = random_data(int(size)) self.api.object_create(self.account, name, obj_name=name, data=data) self.created.append((name, name)) _, chunks = self.api.object_locate(self.account, name, name) logging.debug("Chunks: %s", chunks) return sort_chunks(chunks, False), data def _fetch_range(self, name, range_): if not isinstance(range_[0], tuple): ranges = (range_, ) else: ranges = range_ stream = self.api.object_fetch(self.account, name, name, ranges=ranges)[1] data = "" for chunk in stream: data += chunk return data def test_object_fetch_range_start(self): """From 0 to somewhere""" name = random_str(16) _, data = self._upload_data(name) end = 666 fdata = self._fetch_range(name, (0, end)) self.assertEqual(len(fdata), end + 1) self.assertEqual(fdata, data[0:end + 1]) def test_object_fetch_range_end(self): """From somewhere to end""" name = random_str(16) chunks, data = self._upload_data(name) start = 666 last = max(chunks.keys()) end = chunks[last][0]['offset'] + chunks[last][0]['size'] fdata = self._fetch_range(name, (start, end)) self.assertEqual(len(fdata), len(data) - start) self.assertEqual(fdata, data[start:]) def test_object_fetch_range_metachunk_start(self): """From the start of the second metachunk to somewhere""" name = random_str(16) chunks, data = self._upload_data(name) start = chunks[1][0]['offset'] end = start + 666 fdata = self._fetch_range(name, (start, end)) self.assertEqual(len(fdata), end - start + 1) self.assertEqual(fdata, data[start:end + 1]) def test_object_fetch_range_metachunk_end(self): """From somewhere to end of the first metachunk""" name = random_str(16) chunks, data = self._upload_data(name) start = 666 end = chunks[0][0]['size'] - 1 fdata = self._fetch_range(name, (start, end)) self.assertEqual(len(fdata), end - start + 1) self.assertEqual(fdata, data[start:end + 1]) def test_object_fetch_range_2_metachunks(self): """ From somewhere in the first metachunk to somewhere in the second metachunk """ name = random_str(16) chunks, data = self._upload_data(name) start = 666 end = start + chunks[0][0]['size'] - 1 fdata = self._fetch_range(name, (start, end)) self.assertEqual(len(fdata), end - start + 1) self.assertEqual(fdata, data[start:end + 1]) def test_object_fetch_several_ranges(self): """ Download several ranges at once. """ name = random_str(16) chunks, data = self._upload_data(name) start = 666 end = start + chunks[0][0]['size'] - 1 fdata = self._fetch_range(name, ((start, end), (end + 1, end + 2))) self.assertEqual(len(fdata), end - start + 3) self.assertEqual(fdata, data[start:end + 3]) # Notice that we download some bytes from the second metachunk # before some from the first. fdata = self._fetch_range( name, ((chunks[0][0]['size'], chunks[0][0]['size'] + 2), (0, 1), (1, 2), (4, 6))) self.assertEqual(len(fdata), 10) self.assertEqual( fdata, data[chunks[0][0]['size']:chunks[0][0]['size'] + 3] + data[0:2] + data[1:3] + data[4:7]) def test_object_create_then_append(self): """Create an object then append data""" name = random_str(16) self.api.object_create(self.account, name, data="1" * 128, obj_name=name) _, size, _ = self.api.object_create(self.account, name, data="2" * 128, obj_name=name, append=True) self.assertEqual(size, 128) _, data = self.api.object_fetch(self.account, name, name) data = "".join(data) self.assertEqual(len(data), 256) self.assertEqual(data, "1" * 128 + "2" * 128) def test_object_create_from_append(self): """Create an object with append operation""" name = random_str(16) self.api.container_create(self.account, name) self.api.object_create(self.account, name, data="1" * 128, obj_name=name, append=True) _, data = self.api.object_fetch(self.account, name, name) data = "".join(data) self.assertEqual(len(data), 128) self.assertEqual(data, "1" * 128) def test_container_object_create_from_append(self): """Try to create container and object with append operation""" name = random_str(16) _chunks, size, checksum = self.api.object_create(self.account, name, data="1" * 128, obj_name=name, append=True) self.assertEqual(size, 128) meta = self.api.object_get_properties(self.account, name, name) self.assertEqual(meta.get('hash', "").lower(), checksum.lower()) def test_container_refresh(self): account = random_str(32) # container_refresh on unknown container name = random_str(32) self.assertRaises(exc.NoSuchContainer, self.api.container_refresh, account, name) self.api.container_create(account, name) time.sleep(0.5) # ensure container event have been processed # container_refresh on existing container self.api.container_refresh(account, name) time.sleep(0.5) # ensure container event have been processed res = self.api.container_list(account, prefix=name) name_container, nb_objects, nb_bytes, _ = res[0] self.assertEqual(name_container, name) self.assertEqual(nb_objects, 0) self.assertEqual(nb_bytes, 0) self.api.object_create(account, name, data="data", obj_name=name) time.sleep(0.5) # ensure container event have been processed # container_refresh on existing container with data self.api.container_refresh(account, name) time.sleep(0.5) # ensure container event have been processed res = self.api.container_list(account, prefix=name) name_container, nb_objects, nb_bytes, _ = res[0] self.assertEqual(name_container, name) self.assertEqual(nb_objects, 1) self.assertEqual(nb_bytes, 4) self.api.object_delete(account, name, name) time.sleep(0.5) # ensure container event have been processed self.api.container_delete(account, name) time.sleep(0.5) # ensure container event have been processed # container_refresh on deleted container self.assertRaises(exc.NoSuchContainer, self.api.container_refresh, account, name) self.api.account_delete(account) def test_container_refresh_user_not_found(self): name = random_str(32) self.api.account.container_update(name, name, {"mtime": time.time()}) self.api.container_refresh(name, name) containers = self.api.container_list(name) self.assertEqual(len(containers), 0) self.api.account_delete(name) def test_account_refresh(self): # account_refresh on unknown account account = random_str(32) self.assertRaises(exc.NoSuchAccount, self.api.account_refresh, account) # account_refresh on existing account self.api.account_create(account) self.api.account_refresh(account) time.sleep(0.5) # ensure container event have been processed res = self.api.account_show(account) self.assertEqual(res["bytes"], 0) self.assertEqual(res["objects"], 0) self.assertEqual(res["containers"], 0) name = random_str(32) self.api.object_create(account, name, data="data", obj_name=name) time.sleep(0.5) # ensure container event have been processed self.api.account_refresh(account) time.sleep(0.5) # ensure container event have been processed res = self.api.account_show(account) self.assertEqual(res["bytes"], 4) self.assertEqual(res["objects"], 1) self.assertEqual(res["containers"], 1) self.api.object_delete(account, name, name) time.sleep(0.5) # ensure container event have been processed self.api.container_delete(account, name) time.sleep(0.5) # ensure container event have been processed self.api.account_delete(account) # account_refresh on deleted account self.assertRaises(exc.NoSuchAccount, self.api.account_refresh, account) def test_all_accounts_refresh(self): # clear accounts accounts = self.api.account_list() for account in accounts: try: self.api.account_flush(account) self.api.account_delete(account) except exc.NoSuchAccount: # account remove in the meantime pass # all_accounts_refresh with 0 account self.api.all_accounts_refresh() # all_accounts_refresh with 2 account account1 = random_str(32) self.api.account_create(account1) account2 = random_str(32) self.api.account_create(account2) self.api.all_accounts_refresh() res = self.api.account_show(account1) self.assertEqual(res["bytes"], 0) self.assertEqual(res["objects"], 0) self.assertEqual(res["containers"], 0) res = self.api.account_show(account2) self.assertEqual(res["bytes"], 0) self.assertEqual(res["objects"], 0) self.assertEqual(res["containers"], 0) self.api.account_delete(account1) self.api.account_delete(account2) def test_account_flush(self): # account_flush on unknown account account = random_str(32) self.assertRaises(exc.NoSuchAccount, self.api.account_flush, account) # account_flush on existing account name1 = random_str(32) self.api.container_create(account, name1) name2 = random_str(32) self.api.container_create(account, name2) time.sleep(0.5) # ensure container event have been processed self.api.account_flush(account) containers = self.api.container_list(account) self.assertEqual(len(containers), 0) res = self.api.account_show(account) self.assertEqual(res["bytes"], 0) self.assertEqual(res["objects"], 0) self.assertEqual(res["containers"], 0) self.api.container_delete(account, name1) self.api.container_delete(account, name2) time.sleep(0.5) # ensure container event have been processed self.api.account_delete(account) # account_flush on deleted account self.assertRaises(exc.NoSuchAccount, self.api.account_flush, account) def test_object_create_then_truncate(self): """Create an object then truncate data""" name = random_str(16) self.api.object_create(self.account, name, data="1" * 128, obj_name=name) self.api.object_truncate(self.account, name, name, size=64) _, data = self.api.object_fetch(self.account, name, name) data = "".join(data) self.assertEqual(len(data), 64) self.assertEqual(data, "1" * 64) def test_object_create_append_then_truncate(self): """Create an object, append data then truncate on chunk boundary""" name = random_str(16) self.api.object_create(self.account, name, data="1" * 128, obj_name=name) _, size, _ = self.api.object_create(self.account, name, data="2" * 128, obj_name=name, append=True) self.assertEqual(size, 128) self.api.object_truncate(self.account, name, name, size=128) _, data = self.api.object_fetch(self.account, name, name) data = "".join(data) self.assertEqual(len(data), 128) self.assertEqual(data, "1" * 128) self.api.object_truncate(self.account, name, name, size=128) def test_object_create_then_invalid_truncate(self): """Create an object, append data then try to truncate outside object range""" name = random_str(16) self.api.object_create(self.account, name, data="1" * 128, obj_name=name) self.assertRaises(exc.OioException, self.api.object_truncate, self.account, name, name, size=-1) self.assertRaises(exc.OioException, self.api.object_truncate, self.account, name, name, size=129) def test_container_snapshot(self): name = random_str(16) self.api.container_create(self.account, name) test_object = "test_object" self.api.object_create(self.account, name, data="0" * 128, obj_name=test_object) # Snapshot cannot have same name and same account self.assertRaises(exc.ClientException, self.api.container_snapshot, self.account, name, self.account, name) snapshot_name = random_str(16) self.assertNotEqual(snapshot_name, name) # Non existing snapshot should work self.api.container_snapshot(self.account, name, self.account, snapshot_name) # Already taken snapshot name should failed self.assertRaises(exc.ClientException, self.api.container_snapshot, self.account, name, self.account, snapshot_name) # Check Container Frozen so create should failed self.assertRaises(exc.ServiceBusy, self.api.object_create, self.account, snapshot_name, data="1" * 128, obj_name="should_not_be_created") # fullpath is set on every chunk chunk_list = self.api.object_locate(self.account, name, test_object)[1] # check that every chunk is different from the target snapshot_list = self.api.object_locate(self.account, snapshot_name, test_object)[1] for c, t in zip(chunk_list, snapshot_list): self.assertNotEqual(c['url'], t['url']) # check target can be used self.api.object_create(self.account, name, data="0" * 128, obj_name="should_be_created") # Create and send copy of a object url_list = [c['url'] for c in chunk_list] copy_list = self.api._generate_copy(url_list) # every chunks should have the fullpath fullpath = self.api._generate_fullpath(self.account, snapshot_name, 'copy', 12456) self.api._send_copy(url_list, copy_list, fullpath[0]) # check that every copy exists pool_manager = get_pool_manager() for c in copy_list: r = pool_manager.request('HEAD', c) self.assertEqual(r.status, 200) self.assertIn(fullpath[0], r.headers["X-oio-chunk-meta-full-path"].split(',')) # Snapshot on non existing container should failed self.assertRaises(exc.NoSuchContainer, self.api.container_snapshot, random_str(16), random_str(16), random_str(16), random_str(16)) # Snapshot need to have a account self.assertRaises(exc.ClientException, self.api.container_snapshot, self.account, name, None, random_str(16)) # Snapshot need to have a name self.assertRaises(exc.ClientException, self.api.container_snapshot, self.account, name, random_str(16), None)
class TestMeta2EventsEmission(BaseTestCase): def setUp(self): super(TestMeta2EventsEmission, self).setUp() if not self.conf.get('webhook', ''): self.skipTest('webhook is required') self.acct = 'AccountWebhook%f' % time.time() self.cnt_name = 'TestWebhookEvents%f' % time.time() self.obj_name = 'obj%f' % time.time() self.storage_api = ObjectStorageApi(self.ns) self.pool = get_pool_manager() self._clean() def _get(self, success=True, timeout=2, event_id=None): path = '%s/%s/%s' % (self.acct, self.cnt_name, self.obj_name) start = time.time() while time.time() - start < timeout: res = self.pool.request('GET', 'http://127.0.0.1:9081/' + path) if success and res.status == 200: obj = json.loads(res.data) if not event_id or event_id < obj['eventId']: return res, obj if not success and res.status == 404: return res, None time.sleep(0.1) # fixme assert ("Timeout waiting webhook event") def _clean(self): self.pool.request('POST', 'http://127.0.0.1:9081/PURGE') def _add(self, data, properties=None): self.storage_api.object_create(self.acct, self.cnt_name, data=data, obj_name=self.obj_name, properties=properties) ret, data = self._get() self.assertEqual(ret.status, 200) return ret, data def _remove(self): self.storage_api.object_delete(self.acct, self.cnt_name, self.obj_name) ret, data = self._get(success=False) self.assertEqual(ret.status, 404) return ret, data def test_content_add(self): content = "XXX" ret, data = self._add(content) self.assertEqual(data['data']['account'], self.acct) self.assertEqual(data['data']['container'], self.cnt_name) self.assertEqual(data['data']['name'], self.obj_name) self.assertEqual(data['data']['size'], len(content)) def test_content_add_with_metadata(self): properties = {'key1': 'val1'} ret, data = self._add(data="XXX", properties=properties) self.assertEqual(data['data']['account'], self.acct) self.assertEqual(data['data']['container'], self.cnt_name) self.assertEqual(data['data']['name'], self.obj_name) self.assertEqual(data['data']['metadata'], properties) def test_content_update_metadata(self): properties = {'key1': 'val1'} ret, data = self._add(data="XXX", properties=properties) self.assertEqual(data['data']['metadata'], properties) properties = {'key1': 'NEWVAL'} self.storage_api.object_set_properties(self.acct, self.cnt_name, self.obj_name, properties) event_id = data['eventId'] res, data = self._get(event_id=event_id) self.assertEqual(res.status, 200) self.assertGreater(data['eventId'], event_id) self.assertEqual(data['data']['metadata'], properties) def test_content_remove(self): self._add("XX") self._remove()