def __init__(self, tool, queue_workers, queue_reply): super(ContentRepairerWorker, self).__init__(tool, queue_workers, queue_reply) self.chunk_operator = ChunkOperator(self.conf, logger=self.logger) self.blob_client = BlobClient(self.conf) self.container_client = ContainerClient(self.conf, logger=self.logger)
def init(self): self.handlers = { "plain": self._handle_rawx, "ec": self._handle_rawx, "backblaze": self._handle_b2, } self.blob_client = BlobClient(self.conf)
def __init__(self, namespace, concurrency=50, error_file=None): self.pool = GreenPool(concurrency) self.error_file = error_file if self.error_file: f = open(self.error_file, 'a') self.error_writer = csv.writer(f, delimiter=' ') conf = {'namespace': namespace} self.account_client = AccountClient(conf) self.container_client = ContainerClient(conf) self.blob_client = BlobClient() self.accounts_checked = 0 self.containers_checked = 0 self.objects_checked = 0 self.chunks_checked = 0 self.account_not_found = 0 self.container_not_found = 0 self.object_not_found = 0 self.chunk_not_found = 0 self.account_exceptions = 0 self.container_exceptions = 0 self.object_exceptions = 0 self.chunk_exceptions = 0 self.list_cache = {} self.running = {}
def setUp(self): super(TestBlobMover, self).setUp() self.container = random_str(16) self.cid = cid_from_name(self.account, self.container) self.path = random_str(16) self.api = ObjectStorageApi(self.ns) self.blob_client = BlobClient(self.conf) self.api.container_create(self.account, self.container) _, chunks = self.api.container.content_prepare(self.account, self.container, self.path, size=1) services = self.conscience.all_services('rawx') if len(chunks) >= len([s for s in services if s['score'] > 0]): self.skipTest("need at least %d rawx to run" % (len(chunks) + 1)) self.rawx_volumes = dict() for rawx in services: tags = rawx['tags'] service_id = tags.get('tag.service_id', None) if service_id is None: service_id = rawx['addr'] volume = tags.get('tag.vol', None) self.rawx_volumes[service_id] = volume self.api.object_create(self.account, self.container, obj_name=self.path, data="chunk") meta, self.chunks = self.api.object_locate(self.account, self.container, self.path) self.version = meta['version'] self.content_id = meta['id'] self.chunk_method = meta['chunk_method']
def _send_copy(self, targets, copies, fullpath): headers = {"x-oio-chunk-meta-full-path": fullpath} if not hasattr(self, "blob_client"): from oio.blob.client import BlobClient self.blob_client = BlobClient() for t, c in zip(targets, copies): self.blob_client.chunk_link(t, c, headers=headers).status
def setUp(self): super(TestPlainContent, self).setUp() if len(self.conf['services']['rawx']) < 4: self.skipTest( "Plain tests needs more than 3 rawx to run") self.namespace = self.conf['namespace'] self.account = self.conf['account'] self.chunk_size = self.conf['chunk_size'] self.gridconf = {"namespace": self.namespace} self.content_factory = ContentFactory( self.gridconf, logger=self.logger) self.container_client = ContainerClient( self.gridconf, logger=self.logger) self.blob_client = BlobClient(self.conf, logger=self.logger) self.container_name = "TestPlainContent-%f" % time.time() self.container_client.container_create(account=self.account, reference=self.container_name) self.container_id = cid_from_name(self.account, self.container_name).upper() self.content = "%s-%s" % (self.__class__.__name__, random_str(4)) self.stgpol = "SINGLE" self.stgpol_twocopies = "TWOCOPIES" self.stgpol_threecopies = "THREECOPIES"
def setUp(self): super(TestBlobAuditorFunctional, self).setUp() self.namespace = self.conf['namespace'] self.account = self.conf['account'] self.test_dir = self.conf['sds_path'] rawx_num, rawx_path, rawx_addr = self.get_service_url('rawx') self.rawx = 'http://' + rawx_addr self.h = hashlib.new('md5') conf = {"namespace": self.namespace} self.auditor = BlobAuditorWorker(conf, get_logger(None), None) self.container_c = ContainerClient(conf) self.blob_c = BlobClient() self.ref = random_str(8) self.container_c.container_create(self.account, self.ref) self.url_rand = random_id(64) self.data = random_str(1280) self.h.update(self.data) self.hash_rand = self.h.hexdigest().lower() self.content = TestContent(random_str(6), len(self.data), self.url_rand, 1) self.content.id_container = cid_from_name(self.account, self.ref).upper() self.chunk = TestChunk(self.content.size, self.url_rand, 0, self.hash_rand) self.chunk_url = "%s/%s" % (self.rawx, self.chunk.id_chunk) self.chunk_proxy = { "hash": self.chunk.md5, "pos": "0", "size": self.chunk.size, "url": self.chunk_url } chunk_meta = { 'content_path': self.content.path, 'container_id': self.content.id_container, 'chunk_method': 'plain/nb_copy=3', 'policy': 'TESTPOLICY', 'id': '0000', 'version': 1, 'chunk_id': self.chunk.id_chunk, 'chunk_pos': self.chunk.pos, 'chunk_hash': self.chunk.md5, } self.blob_c.chunk_put(self.chunk_url, chunk_meta, self.data) self.chunk_path = self.test_dir + '/data/' + self.namespace + \ '-rawx-1/' + self.chunk.id_chunk[0:3] + "/" + self.chunk.id_chunk self.bad_container_id = '0' * 64
def __init__(self, *args, **kwargs): super(ContentReaperFilter, self).__init__(*args, **kwargs) self.handlers = { "plain": self._handle_rawx, "ec": self._handle_rawx, "backblaze": self._handle_b2, } self.blob_client = BlobClient()
def init(self): self.handlers = { "plain": self._handle_rawx, "ec": self._handle_rawx, "backblaze": self._handle_b2, } kwargs = {k: v for k, v in self.conf.items() if k in URLLIB3_POOLMANAGER_KWARGS} self.blob_client = BlobClient(self.conf, logger=self.logger, **kwargs) self.chunk_concurrency = int(self.conf.get('concurrency', 3)) self.chunk_timeout = float(self.conf.get('timeout', 5.0))
def setUp(self): super(TestContentFactory, self).setUp() self.namespace = self.conf['namespace'] self.chunk_size = self.conf['chunk_size'] self.gridconf = {"namespace": self.namespace} self.content_factory = ContentFactory(self.gridconf) self.container_name = "TestContentFactory%f" % time.time() self.blob_client = BlobClient() self.container_client = ContainerClient(self.gridconf) self.container_client.container_create(acct=self.account, ref=self.container_name) self.container_id = cid_from_name(self.account, self.container_name).upper()
def setUp(self): super(TestRebuilderCrawler, self).setUp() self.namespace = self.conf['namespace'] self.account = self.conf['account'] self.gridconf = {"namespace": self.namespace} self.container_client = ContainerClient(self.gridconf) self.blob_client = BlobClient() self.container_name = "TestRebuilderCrawler%d" % int(time.time()) self.container_client.container_create(acct=self.account, ref=self.container_name)
def __init__(self, namespace, concurrency=50, error_file=None, rebuild_file=None, full=True, limit_listings=0, request_attempts=1): self.pool = GreenPool(concurrency) self.error_file = error_file self.full = bool(full) # Optimisation for when we are only checking one object # or one container. # 0 -> do not limit # 1 -> limit account listings (list of containers) # 2 -> limit container listings (list of objects) self.limit_listings = limit_listings if self.error_file: f = open(self.error_file, 'a') self.error_writer = csv.writer(f, delimiter=' ') self.rebuild_file = rebuild_file if self.rebuild_file: fd = open(self.rebuild_file, 'a') self.rebuild_writer = csv.writer(fd, delimiter='|') conf = {'namespace': namespace} self.account_client = AccountClient(conf, max_retries=request_attempts - 1) self.container_client = ContainerClient( conf, max_retries=request_attempts - 1, request_attempts=request_attempts) self.blob_client = BlobClient(conf=conf) self.accounts_checked = 0 self.containers_checked = 0 self.objects_checked = 0 self.chunks_checked = 0 self.account_not_found = 0 self.container_not_found = 0 self.object_not_found = 0 self.chunk_not_found = 0 self.account_exceptions = 0 self.container_exceptions = 0 self.object_exceptions = 0 self.chunk_exceptions = 0 self.list_cache = {} self.running = {}
def __init__(self, conf, logger, volume): self.conf = conf self.logger = logger or get_logger(conf) self.volume = volume self.run_time = 0 self.passes = 0 self.errors = 0 self.last_reported = 0 self.chunks_run_time = 0 self.bytes_running_time = 0 self.bytes_processed = 0 self.total_bytes_processed = 0 self.total_chunks_processed = 0 self.dry_run = true_value( conf.get('dry_run', False)) self.report_interval = int_value( conf.get('report_interval'), 3600) self.max_chunks_per_second = int_value( conf.get('chunks_per_second'), 30) self.max_bytes_per_second = int_value( conf.get('bytes_per_second'), 10000000) self.rdir_fetch_limit = int_value( conf.get('rdir_fetch_limit'), 100) self.blob_client = BlobClient() self.container_client = ContainerClient(conf) self.rdir_client = RdirClient(conf)
def __init__(self, conf, logger, volume): self.conf = conf self.logger = logger or get_logger(conf) self.volume = volume self.namespace, self.address = check_volume(self.volume) self.running = False self.run_time = 0 self.passes = 0 self.errors = 0 self.last_reported = 0 self.last_usage_check = 0 self.chunks_run_time = 0 self.bytes_running_time = 0 self.bytes_processed = 0 self.total_bytes_processed = 0 self.total_chunks_processed = 0 self.concurrency = int_value(conf.get('concurrency'), 10) self.usage_target = int_value(conf.get('usage_target'), 0) self.usage_check_interval = int_value(conf.get('usage_check_interval'), 60) self.report_interval = int_value(conf.get('report_interval'), 3600) self.max_chunks_per_second = int_value(conf.get('chunks_per_second'), 30) self.limit = int_value(conf.get('limit'), 0) self.allow_links = true_value(conf.get('allow_links', True)) self.blob_client = BlobClient(conf) self.container_client = ContainerClient(conf, logger=self.logger) self.content_factory = ContentFactory( conf, container_client=self.container_client, blob_client=self.blob_client) self.excluded_rawx = \ [rawx for rawx in conf.get('excluded_rawx', '').split(',') if rawx] self.fake_excluded_chunks = self._generate_fake_excluded_chunks()
def __init__(self, conf, logger, volume): self.conf = conf self.logger = logger or get_logger(conf) self.volume = volume self.run_time = 0 self.passes = 0 self.errors = 0 self.last_reported = 0 self.last_usage_check = 0 self.chunks_run_time = 0 self.bytes_running_time = 0 self.bytes_processed = 0 self.total_bytes_processed = 0 self.total_chunks_processed = 0 self.usage_target = int_value( conf.get('usage_target'), 0) self.usage_check_interval = int_value( conf.get('usage_check_interval'), 3600) self.report_interval = int_value( conf.get('report_interval'), 3600) self.max_chunks_per_second = int_value( conf.get('chunks_per_second'), 30) self.max_bytes_per_second = int_value( conf.get('bytes_per_second'), 10000000) self.blob_client = BlobClient() self.container_client = ContainerClient(conf) self.content_factory = ContentFactory(conf)
def __init__(self, conf, logger, volume): self.conf = conf self.logger = logger or get_logger(conf) self.volume = volume self.run_time = 0 self.passes = 0 self.errors = 0 self.last_reported = 0 self.last_usage_check = 0 self.chunks_run_time = 0 self.bytes_running_time = 0 self.bytes_processed = 0 self.total_bytes_processed = 0 self.total_chunks_processed = 0 self.usage_target = int_value( conf.get('usage_target'), 0) self.usage_check_interval = int_value( conf.get('usage_check_interval'), 3600) self.report_interval = int_value( conf.get('report_interval'), 3600) self.max_chunks_per_second = int_value( conf.get('chunks_per_second'), 30) self.max_bytes_per_second = int_value( conf.get('bytes_per_second'), 10000000) self.blob_client = BlobClient() self.container_client = ContainerClient(conf)
def setUp(self): super(TestBlobAuditor, self).setUp() self.container = random_str(16) self.cid = cid_from_name(self.account, self.container) self.path = random_str(16) self.api = ObjectStorageApi(self.ns) self.blob_client = BlobClient(self.conf) self.api.container_create(self.account, self.container) _, chunks = self.api.container.content_prepare( self.account, self.container, self.path, 1) services = self.conscience.all_services('rawx') self.rawx_volumes = dict() for rawx in services: tags = rawx['tags'] service_id = tags.get('tag.service_id', None) if service_id is None: service_id = rawx['addr'] volume = tags.get('tag.vol', None) self.rawx_volumes[service_id] = volume self.api.object_create( self.account, self.container, obj_name=self.path, data="chunk") meta, self.chunks = self.api.object_locate( self.account, self.container, self.path) self.version = meta['version'] self.content_id = meta['id']
def __init__(self, conf, container_id, metadata, chunks, storage_method, account, container_name, blob_client=None, container_client=None, logger=None): self.conf = conf self.container_id = container_id self.metadata = metadata self.chunks = ChunksHelper(chunks) self.storage_method = storage_method self.logger = logger or get_logger(self.conf) self.blob_client = (blob_client or BlobClient(conf)) self.container_client = (container_client or ContainerClient(self.conf, logger=self.logger)) # FIXME: all these may be properties self.content_id = self.metadata["id"] self.path = self.metadata["name"] self.length = int(self.metadata["length"]) self.version = self.metadata["version"] self.checksum = self.metadata["hash"] self.chunk_method = self.metadata["chunk_method"] self.account = account self.container_name = container_name if 'full_path' in self.metadata: self.full_path = metadata['full_path'] else: self.full_path = encode_fullpath( self.account, self.container_name, self.path, self.version, self.content_id)
def setUp(self): super(TestBlobAuditorFunctional, self).setUp() self.namespace = self.conf['namespace'] self.account = self.conf['account'] self.test_dir = self.conf['sds_path'] self.chars = string.ascii_lowercase + string.ascii_uppercase +\ string.digits self.chars_id = string.digits + 'ABCDEF' self.rawx = 'http://' + self.conf["rawx"][0]['addr'] self.h = hashlib.new('md5') conf = {"namespace": self.namespace} self.auditor = BlobAuditorWorker(conf, get_logger(None), None) self.container_c = ContainerClient(conf) self.blob_c = BlobClient() self.ref = rand_generator(self.chars, 8) self.container_c.container_create(self.account, self.ref) self.url_rand = rand_generator(self.chars_id, 64) self.data = rand_generator(self.chars, 1280) self.h.update(self.data) self.hash_rand = self.h.hexdigest().lower() self.content = TestContent( rand_generator(self.chars, 6), len(self.data), self.url_rand, 1) self.content.id_container = cid_from_name( self.account, self.ref).upper() self.chunk = TestChunk(self.content.size, self.url_rand, 0, self.hash_rand) self.chunk_url = "%s/%s" % (self.rawx, self.chunk.id_chunk) self.chunk_proxy = {"hash": self.chunk.md5, "pos": "0", "size": self.chunk.size, "url": self.chunk_url} chunk_meta = {'content_size': self.content.size, 'content_chunksnb': self.content.nb_chunks, 'content_path': self.content.path, 'content_cid': self.content.id_container, 'content_mimetype': 'application/octet-stream', 'content_chunkmethod': 'bytes', 'content_policy': 'TESTPOLICY', 'content_id': '0000', 'content_version': 1, 'chunk_id': self.chunk.id_chunk, 'chunk_pos': self.chunk.pos} self.blob_c.chunk_put(self.chunk_url, chunk_meta, self.data) self.chunk_path = self.test_dir + '/data/NS-rawx-1/' +\ self.chunk.id_chunk[0:2] + "/" + self.chunk.id_chunk self.bad_container_id = '0'*64
def setUp(self): super(TestBlobAuditorFunctional, self).setUp() self.namespace = self.conf["namespace"] self.account = self.conf["account"] self.test_dir = self.conf["sds_path"] rawx_num, rawx_path, rawx_addr = self.get_service_url("rawx") self.rawx = "http://" + rawx_addr self.h = hashlib.new("md5") conf = {"namespace": self.namespace} self.auditor = BlobAuditorWorker(conf, get_logger(None), None) self.container_c = ContainerClient(conf) self.blob_c = BlobClient() self.ref = random_str(8) self.container_c.container_create(self.account, self.ref) self.url_rand = random_id(64) self.data = random_str(1280) self.h.update(self.data) self.hash_rand = self.h.hexdigest().lower() self.content = TestContent(random_str(6), len(self.data), self.url_rand, 1) self.content.id_container = cid_from_name(self.account, self.ref).upper() self.chunk = TestChunk(self.content.size, self.url_rand, 0, self.hash_rand) self.chunk_url = "%s/%s" % (self.rawx, self.chunk.id_chunk) self.chunk_proxy = {"hash": self.chunk.md5, "pos": "0", "size": self.chunk.size, "url": self.chunk_url} chunk_meta = { "content_path": self.content.path, "container_id": self.content.id_container, "chunk_method": "plain/nb_copy=3", "policy": "TESTPOLICY", "id": "0000", "version": 1, "chunk_id": self.chunk.id_chunk, "chunk_pos": self.chunk.pos, "chunk_hash": self.chunk.md5, } self.blob_c.chunk_put(self.chunk_url, chunk_meta, self.data) self.chunk_path = ( self.test_dir + "/data/" + self.namespace + "-rawx-1/" + self.chunk.id_chunk[0:3] + "/" + self.chunk.id_chunk ) self.bad_container_id = "0" * 64
def __init__(self, conf, container_id, metadata, chunks, storage_method): self.conf = conf self.container_id = container_id self.metadata = metadata self.chunks = ChunksHelper(chunks) self.storage_method = storage_method self.logger = get_logger(self.conf) self.cs_client = ConscienceClient(conf) self.blob_client = BlobClient() self.container_client = ContainerClient(self.conf) self.content_id = self.metadata["id"] self.stgpol = self.metadata["policy"] self.path = self.metadata["name"] self.length = int(self.metadata["length"]) self.version = self.metadata["version"] self.checksum = self.metadata["hash"] self.mime_type = self.metadata["mime_type"] self.chunk_method = self.metadata["chunk_method"]
def __init__(self, conf, job_params, logger=None): super(RawxDecommissionTask, self).__init__(conf, job_params, logger=logger) self.service_id = job_params['service_id'] self.rawx_timeout = job_params['rawx_timeout'] self.min_chunk_size = job_params['min_chunk_size'] self.max_chunk_size = job_params['max_chunk_size'] self.excluded_rawx = job_params['excluded_rawx'] self.blob_client = BlobClient(self.conf, logger=self.logger) self.content_factory = ContentFactory(self.conf) self.conscience_client = ConscienceClient(self.conf, logger=self.logger) self.fake_excluded_chunks = self._generate_fake_excluded_chunks( self.excluded_rawx)
def setUp(self): super(TestContentFactory, self).setUp() self.namespace = self.conf['namespace'] self.chunk_size = self.conf['chunk_size'] self.gridconf = {"namespace": self.namespace} self.content_factory = ContentFactory(self.gridconf) self.container_name = "TestContentFactory%f" % time.time() self.blob_client = BlobClient(conf=self.conf) self.container_client = ContainerClient(self.gridconf) self.container_client.container_create(account=self.account, reference=self.container_name) self.container_id = cid_from_name(self.account, self.container_name).upper() self.stgpol = "SINGLE" self.stgpol_twocopies = "TWOCOPIES" self.stgpol_threecopies = "THREECOPIES" self.stgpol_ec = "EC"
def setUp(self): super(TestDupContent, self).setUp() if len(self.conf['rawx']) < 3: self.skipTest("Not enough rawx. " "Dup tests needs more than 2 rawx to run") self.namespace = self.conf['namespace'] self.account = self.conf['account'] self.chunk_size = self.conf['chunk_size'] self.gridconf = {"namespace": self.namespace} self.content_factory = ContentFactory(self.gridconf) self.container_client = ContainerClient(self.gridconf) self.blob_client = BlobClient() self.container_name = "TestDupContent%f" % time.time() self.container_client.container_create(acct=self.account, ref=self.container_name) self.container_id = cid_from_name(self.account, self.container_name).upper()
def __init__(self, conf, container_id, metadata, chunks, stgpol_args): self.conf = conf self.container_id = container_id self.metadata = metadata self.chunks = ChunksHelper(chunks) self.stgpol_args = stgpol_args self.logger = get_logger(self.conf) self.cs_client = ConscienceClient(conf) self.container_client = ContainerClient(self.conf) self.blob_client = BlobClient() self.session = requests.Session() self.content_id = self.metadata["id"] self.stgpol_name = self.metadata["policy"] self.path = self.metadata["name"] self.length = int(self.metadata["length"]) self.version = self.metadata["version"] self.hash = self.metadata["hash"] self.mime_type = self.metadata["mime-type"] self.chunk_method = self.metadata["chunk-method"]
def __init__(self, conf, container_client=None, blob_client=None, logger=None, **kwargs): self.conf = conf self.logger = logger or get_logger(conf) self.container_client = container_client or \ ContainerClient(conf, logger=self.logger, **kwargs) self.blob_client = blob_client or BlobClient(conf, **kwargs)
def setUp(self): super(TestBlobIndexer, self).setUp() self.rdir_client = RdirClient(self.conf) self.blob_client = BlobClient(self.conf) _, self.rawx_path, rawx_addr, _ = \ self.get_service_url('rawx') services = self.conscience.all_services('rawx') self.rawx_id = None for rawx in services: if rawx_addr == rawx['addr']: self.rawx_id = rawx['tags'].get('tag.service_id', None) if self.rawx_id is None: self.rawx_id = rawx_addr conf = self.conf.copy() conf['volume'] = self.rawx_path self.blob_indexer = BlobIndexer(conf) # clear rawx/rdir chunk_files = paths_gen(self.rawx_path) for chunk_file in chunk_files: os.remove(chunk_file) self.rdir_client.admin_clear(self.rawx_id, clear_all=True)
def setUp(self): super(TestBlobAuditorFunctional, self).setUp() self.namespace = self.conf['namespace'] self.account = self.conf['account'] self.test_dir = self.conf['sds_path'] rawx_num, rawx_path, rawx_addr = self.get_service_url('rawx') self.rawx = 'http://' + rawx_addr self.h = hashlib.new('md5') conf = {"namespace": self.namespace} self.auditor = BlobAuditorWorker(conf, get_logger(None), None) self.container_c = ContainerClient(conf) self.blob_c = BlobClient() self.ref = random_str(8) self.container_c.container_create(self.account, self.ref) self.url_rand = random_id(64) self.data = random_str(1280) self.h.update(self.data) self.hash_rand = self.h.hexdigest().lower() self.content = TestContent( random_str(6), len(self.data), self.url_rand, 1) self.content.id_container = cid_from_name( self.account, self.ref).upper() self.chunk = TestChunk(self.content.size, self.url_rand, 0, self.hash_rand) self.chunk_url = "%s/%s" % (self.rawx, self.chunk.id_chunk) self.chunk_proxy = {"hash": self.chunk.md5, "pos": "0", "size": self.chunk.size, "url": self.chunk_url} chunk_meta = {'content_path': self.content.path, 'container_id': self.content.id_container, 'content_chunkmethod': 'plain/nb_copy=3', 'content_policy': 'TESTPOLICY', 'content_id': '0000', 'content_version': 1, 'chunk_id': self.chunk.id_chunk, 'chunk_pos': self.chunk.pos} self.blob_c.chunk_put(self.chunk_url, chunk_meta, self.data) self.chunk_path = self.test_dir + '/data/' + self.namespace + \ '-rawx-1/' + self.chunk.id_chunk[0:3] + "/" + self.chunk.id_chunk self.bad_container_id = '0'*64
def setUp(self): super(TestBlobAuditorFunctional, self).setUp() self.namespace = self.conf['namespace'] self.account = self.conf['account'] self.ref = random_str(8) _, rawx_loc, rawx_addr, rawx_uuid = \ self.get_service_url('rawx') self.rawx_id = 'http://' + (rawx_uuid if rawx_uuid else rawx_addr) self.auditor = BlobAuditorWorker(self.conf, get_logger(None), None) self.container_client = ContainerClient(self.conf) self.blob_client = BlobClient(conf=self.conf) self.container_client.container_create(self.account, self.ref) self.content = TestContent(self.account, self.ref) self.chunk = TestChunk(self.content.cid, self.content.path, self.content.version, self.storage_policy, self.rawx_id, rawx_loc, self.content.size, self.content.hash) chunk_meta = { 'container_id': self.content.cid, 'content_path': self.content.path, 'version': self.content.version, 'id': self.content.id, 'full_path': self.content.fullpath, 'chunk_method': 'plain/nb_copy=3', 'policy': self.storage_policy, 'chunk_id': self.chunk.id, 'chunk_pos': self.chunk.pos, 'chunk_hash': self.chunk.metachunk_hash, 'chunk_size': self.chunk.metachunk_size, 'metachunk_hash': self.chunk.metachunk_hash, 'metachunk_size': self.chunk.metachunk_size, 'oio_version': OIO_VERSION } self.blob_client.chunk_put(self.chunk.url, chunk_meta, self.content.data)
def __init__(self, conf, container_id, metadata, chunks, storage_method, account, container_name, container_client=None): self.conf = conf self.container_id = container_id self.metadata = metadata self.chunks = ChunksHelper(chunks) self.storage_method = storage_method self.logger = get_logger(self.conf) self.blob_client = BlobClient() self.container_client = (container_client or ContainerClient(self.conf, logger=self.logger)) # FIXME: all these may be properties self.content_id = self.metadata["id"] self.path = self.metadata["name"] self.length = int(self.metadata["length"]) self.version = self.metadata["version"] self.checksum = self.metadata["hash"] self.chunk_method = self.metadata["chunk_method"] self.account = account self.container_name = container_name if 'full_path' in self.metadata: self.full_path = metadata['full_path'] else: self.full_path = [ '{0}/{1}/{2}/{3}'.format(quote_plus(self.account), quote_plus(self.container_name), quote_plus(self.path), self.version) ]
def setUp(self): super(TestECContent, self).setUp() if len(self.conf['services']['rawx']) < 12: self.skipTest("Not enough rawx. " "EC tests needs at least 12 rawx to run") self.namespace = self.conf['namespace'] self.account = self.conf['account'] self.chunk_size = self.conf['chunk_size'] self.gridconf = {"namespace": self.namespace} self.content_factory = ContentFactory(self.gridconf) self.container_client = ContainerClient(self.gridconf) self.blob_client = BlobClient(self.conf) self.container_name = "TestECContent%f" % time.time() self.container_client.container_create(account=self.account, reference=self.container_name) self.container_id = cid_from_name(self.account, self.container_name).upper() self.content = "%s-%s" % (self.__class__.__name__, random_str(4)) self.stgpol = "EC" self.size = 1024 * 1024 + 320 self.k = 6 self.m = 3
def setUp(self): with mock.patch('oio.container.client.gen_headers', gen_headers_mock): super(TestFilters, self).setUp() self.account = self.conf['account'] self.namespace = self.conf['namespace'] self.chunk_size = self.conf['chunk_size'] self.gridconf = {'namespace': self.namespace} self.content_factory = ContentFactory(self.gridconf) self.container_name = 'TestFilter%f' % time.time() self.blob_client = BlobClient() self.container_client = ContainerClient(self.gridconf) self.container_client.container_create(acct=self.account, ref=self.container_name) self.container_id = cid_from_name(self.account, self.container_name).upper() self.stgpol = "SINGLE"
def setUp(self): super(TestContentFactory, self).setUp() self.namespace = self.conf['namespace'] self.chunk_size = self.conf['chunk_size'] self.gridconf = {"namespace": self.namespace} self.content_factory = ContentFactory(self.gridconf) self.container_name = "TestContentFactory%f" % time.time() self.blob_client = BlobClient() self.container_client = ContainerClient(self.gridconf) self.container_client.container_create(acct=self.account, ref=self.container_name) self.container_id = cid_from_name(self.account, self.container_name).upper() self.stgpol = "SINGLE" self.stgpol_twocopies = "TWOCOPIES" self.stgpol_threecopies = "THREECOPIES" self.stgpol_ec = "EC"
def setUp(self): super(TestPlainContent, self).setUp() if len(self.conf['services']['rawx']) < 4: self.skipTest( "Plain tests needs more than 3 rawx to run") self.namespace = self.conf['namespace'] self.account = self.conf['account'] self.chunk_size = self.conf['chunk_size'] self.gridconf = {"namespace": self.namespace} self.content_factory = ContentFactory(self.gridconf) self.container_client = ContainerClient(self.gridconf) self.blob_client = BlobClient() self.container_name = "TestPlainContent-%f" % time.time() self.container_client.container_create(acct=self.account, ref=self.container_name) self.container_id = cid_from_name(self.account, self.container_name).upper() self.content = random_str(64) self.stgpol = "SINGLE" self.stgpol_twocopies = "TWOCOPIES" self.stgpol_threecopies = "THREECOPIES"
def setUp(self): super(TestECContent, self).setUp() if len(self.conf['services']['rawx']) < 12: self.skipTest("Not enough rawx. " "EC tests needs at least 12 rawx to run") self.namespace = self.conf['namespace'] self.account = self.conf['account'] self.chunk_size = self.conf['chunk_size'] self.gridconf = {"namespace": self.namespace} self.content_factory = ContentFactory(self.gridconf) self.container_client = ContainerClient(self.gridconf) self.blob_client = BlobClient() self.container_name = "TestECContent%f" % time.time() self.container_client.container_create(acct=self.account, ref=self.container_name) self.container_id = cid_from_name(self.account, self.container_name).upper() self.content = random_str(64) self.stgpol = "EC" self.size = 1024*1024 + 320 self.k = 6 self.m = 3
class BlobRebuilderWorker(object): def __init__(self, conf, logger, volume): self.conf = conf self.logger = logger or get_logger(conf) self.volume = volume self.run_time = 0 self.passes = 0 self.errors = 0 self.last_reported = 0 self.chunks_run_time = 0 self.bytes_running_time = 0 self.bytes_processed = 0 self.total_bytes_processed = 0 self.total_chunks_processed = 0 self.dry_run = true_value( conf.get('dry_run', False)) self.report_interval = int_value( conf.get('report_interval'), 3600) self.max_chunks_per_second = int_value( conf.get('chunks_per_second'), 30) self.max_bytes_per_second = int_value( conf.get('bytes_per_second'), 10000000) self.rdir_fetch_limit = int_value( conf.get('rdir_fetch_limit'), 100) self.blob_client = BlobClient() self.container_client = ContainerClient(conf) self.rdir_client = RdirClient(conf) def rebuilder_pass_with_lock(self): self.rdir_client.admin_lock(self.volume, "rebuilder on %s" % gethostname()) try: self.rebuilder_pass() finally: self.rdir_client.admin_unlock(self.volume) def rebuilder_pass(self): start_time = report_time = time.time() total_errors = 0 rebuilder_time = 0 chunks = self.rdir_client.chunk_fetch(self.volume, limit=self.rdir_fetch_limit, rebuild=True) for container_id, content_id, chunk_id, data in chunks: loop_time = time.time() if self.dry_run: self.dryrun_chunk_rebuild(container_id, content_id, chunk_id) else: self.safe_chunk_rebuild(container_id, content_id, chunk_id) self.chunks_run_time = ratelimit( self.chunks_run_time, self.max_chunks_per_second ) self.total_chunks_processed += 1 now = time.time() if now - self.last_reported >= self.report_interval: self.logger.info( '%(start_time)s ' '%(passes)d ' '%(errors)d ' '%(c_rate).2f ' '%(b_rate).2f ' '%(total).2f ' '%(rebuilder_time).2f' '%(rebuilder_rate).2f' % { 'start_time': time.ctime(report_time), 'passes': self.passes, 'errors': self.errors, 'c_rate': self.passes / (now - report_time), 'b_rate': self.bytes_processed / (now - report_time), 'total': (now - start_time), 'rebuilder_time': rebuilder_time, 'rebuilder_rate': rebuilder_time / (now - start_time) } ) report_time = now total_errors += self.errors self.passes = 0 self.bytes_processed = 0 self.last_reported = now rebuilder_time += (now - loop_time) elapsed = (time.time() - start_time) or 0.000001 self.logger.info( '%(elapsed).02f ' '%(errors)d ' '%(chunk_rate).2f ' '%(bytes_rate).2f ' '%(rebuilder_time).2f ' '%(rebuilder_rate).2f' % { 'elapsed': elapsed, 'errors': total_errors + self.errors, 'chunk_rate': self.total_chunks_processed / elapsed, 'bytes_rate': self.total_bytes_processed / elapsed, 'rebuilder_time': rebuilder_time, 'rebuilder_rate': rebuilder_time / elapsed } ) def dryrun_chunk_rebuild(self, container_id, content_id, chunk_id): self.logger.info("[dryrun] Rebuilding " "container %s, content %s, chunk %s" % (container_id, content_id, chunk_id)) self.passes += 1 def safe_chunk_rebuild(self, container_id, content_id, chunk_id): self.logger.info('Rebuilding (container %s, content %s, chunk %s)' % (container_id, content_id, chunk_id)) try: self.chunk_rebuild(container_id, content_id, chunk_id) except Exception as e: self.errors += 1 self.logger.error('ERROR while rebuilding chunk %s|%s|%s) : %s', container_id, content_id, chunk_id, e) self.passes += 1 def _meta2_get_chunks_at_pos(self, container_id, content_id, chunk_id): current_chunk_url = 'http://%s/%s' % (self.volume, chunk_id) try: data = self.container_client.content_show( cid=container_id, content=content_id) except exc.NotFound: raise exc.OrphanChunk('Content not found') current_chunk = None for c in data: if c['url'] == current_chunk_url: current_chunk = c break if not current_chunk: raise exc.OrphanChunk('Chunk not found in content') duplicate_chunks = [] for c in data: if c['pos'] == current_chunk['pos'] \ and c['url'] != current_chunk['url']: duplicate_chunks.append(c) if len(duplicate_chunks) == 0: raise exc.UnrecoverableContent('No copy of missing chunk') return current_chunk, duplicate_chunks def _meta2_get_spare_chunk(self, container_id, content_id, notin, broken): spare_data = {'notin': notin, 'broken': [broken], 'size': 0} try: spare_resp = self.container_client.content_spare( cid=container_id, content=content_id, data=spare_data) except ClientException as e: raise exc.SpareChunkException('No spare chunk (%s)' % e.message) return spare_resp['chunks'][0] def _meta2_replace_chunk(self, container_id, content_id, current_chunk, new_chunk): old = [{'type': 'chunk', 'id': current_chunk['url'], 'hash': current_chunk['hash'], 'size': current_chunk['size'], 'pos': current_chunk['pos'], 'content': content_id}] new = [{'type': 'chunk', 'id': new_chunk['id'], 'hash': current_chunk['hash'], 'size': current_chunk['size'], 'pos': current_chunk['pos'], 'content': content_id}] update_data = {'old': old, 'new': new} self.container_client.container_raw_update( cid=container_id, data=update_data) # TODO rain support def chunk_rebuild(self, container_id, content_id, chunk_id): current_chunk, duplicate_chunks = self._meta2_get_chunks_at_pos( container_id, content_id, chunk_id) spare_chunk = self._meta2_get_spare_chunk( container_id, content_id, duplicate_chunks, current_chunk) uploaded = False for src in duplicate_chunks: try: self.blob_client.chunk_copy(src['url'], spare_chunk['id']) self.logger.debug('copy chunk from %s to %s', src['url'], spare_chunk['id']) uploaded = True break except Exception as e: self.logger.debug('Failed to copy chunk from %s to %s: %s', src['url'], spare_chunk['id'], type(e)) if not uploaded: raise exc.UnrecoverableContent('No copy available ' 'of missing chunk') self._meta2_replace_chunk(container_id, content_id, current_chunk, spare_chunk) self.rdir_client.chunk_push(self.volume, container_id, content_id, chunk_id, rtime=int(time.time())) self.bytes_processed += current_chunk['size'] self.total_bytes_processed += current_chunk['size']
class TestRainContent(BaseTestCase): def setUp(self): super(TestRainContent, self).setUp() if len(self.conf['rawx']) < 12: self.skipTest("Not enough rawx. " "Rain tests needs more than 12 rawx to run") self.namespace = self.conf['namespace'] self.account = self.conf['account'] self.chunk_size = self.conf['chunk_size'] self.gridconf = {"namespace": self.namespace} self.content_factory = ContentFactory(self.gridconf) self.container_client = ContainerClient(self.gridconf) self.blob_client = BlobClient() self.container_name = "TestRainContent%f" % time.time() self.container_client.container_create(acct=self.account, ref=self.container_name) self.container_id = cid_from_name(self.account, self.container_name).upper() def tearDown(self): super(TestRainContent, self).tearDown() def _test_upload(self, data_size): data = random_data(data_size) content = self.content_factory.new(self.container_id, "titi", len(data), "RAIN") k = 6 m = 2 self.assertEqual(type(content), RainContent) content.upload(StringIO.StringIO(data)) meta, chunks = self.container_client.content_show( cid=self.container_id, content=content.content_id) chunks = ChunksHelper(chunks) self.assertEqual(meta['hash'], md5_data(data)) self.assertEqual(meta['length'], str(len(data))) self.assertEqual(meta['policy'], "RAIN") self.assertEqual(meta['name'], "titi") metachunk_nb = int(math.ceil(float(len(data)) / self.chunk_size)) if metachunk_nb == 0: metachunk_nb = 1 # special case for empty content nb_chunks_min = metachunk_nb * (1 + m) nb_chunks_max = metachunk_nb * (k + m) self.assertGreaterEqual(len(chunks), nb_chunks_min) self.assertLessEqual(len(chunks), nb_chunks_max) for metapos in range(metachunk_nb): chunks_at_pos = content.chunks.filter(metapos=metapos) data_chunks_at_pos = chunks_at_pos.filter(is_parity=False) parity_chunks_at_pos = chunks_at_pos.filter(is_parity=True) self.assertEquals(len(data_chunks_at_pos) >= 1, True) self.assertEquals(len(data_chunks_at_pos) <= k, True) self.assertEqual(len(parity_chunks_at_pos), m) for chunk in chunks_at_pos: meta, stream = self.blob_client.chunk_get(chunk.url) self.assertEqual(md5_stream(stream), chunk.hash) self.assertEqual(meta['content_size'], str(len(data))) self.assertEqual(meta['content_path'], "titi") self.assertEqual(meta['content_cid'], self.container_id) self.assertEqual(meta['content_id'], meta['content_id']) self.assertEqual(meta['chunk_id'], chunk.id) self.assertEqual(meta['chunk_pos'], chunk.pos) self.assertEqual(meta['chunk_hash'], chunk.hash) data_begin = metapos * self.chunk_size data_end = metapos * self.chunk_size + self.chunk_size target_metachunk_hash = md5_data(data[data_begin:data_end]) metachunk_hash = hashlib.md5() for chunk in data_chunks_at_pos: meta, stream = self.blob_client.chunk_get(chunk.url) for d in stream: metachunk_hash.update(d) self.assertEqual(metachunk_hash.hexdigest().upper(), target_metachunk_hash) def test_upload_0_byte(self): self._test_upload(0) def test_upload_1_byte(self): self._test_upload(1) def test_upload_chunksize_bytes(self): self._test_upload(self.chunk_size) def test_upload_chunksize_plus_1_bytes(self): self._test_upload(self.chunk_size + 1) def test_chunks_cleanup_when_upload_failed(self): data = random_data(2 * self.chunk_size) content = self.content_factory.new(self.container_id, "titi", len(data), "RAIN") self.assertEqual(type(content), RainContent) # set bad url for position 1 for chunk in content.chunks.filter(pos="1.p0"): chunk.url = "http://127.0.0.1:9/DEADBEEF" self.assertRaises(Exception, content.upload, StringIO.StringIO(data)) for chunk in content.chunks.exclude(pos="1.p0"): self.assertRaises(NotFound, self.blob_client.chunk_head, chunk.url) def _test_rebuild(self, data_size, broken_pos_list): data = os.urandom(data_size) old_content = self.content_factory.new(self.container_id, "titi", len(data), "RAIN") self.assertEqual(type(old_content), RainContent) old_content.upload(StringIO.StringIO(data)) # get the new structure of the uploaded content uploaded_content = self.content_factory.get(self.container_id, old_content.content_id) old_info = {} for pos in broken_pos_list: old_info[pos] = {} c = uploaded_content.chunks.filter(pos=pos)[0] old_info[pos]["url"] = c.url old_info[pos]["id"] = c.id old_info[pos]["hash"] = c.hash chunk_id_to_rebuild = c.id meta, stream = self.blob_client.chunk_get(c.url) old_info[pos]["dl_meta"] = meta old_info[pos]["dl_hash"] = md5_stream(stream) # delete the chunk self.blob_client.chunk_delete(c.url) # rebuild the broken chunks uploaded_content.rebuild_chunk(chunk_id_to_rebuild) # get the new structure of the content rebuilt_content = self.content_factory.get(self.container_id, uploaded_content.content_id) self.assertEqual(type(rebuilt_content), RainContent) for pos in broken_pos_list: c = rebuilt_content.chunks.filter(pos=pos)[0] rebuilt_meta, rebuilt_stream = self.blob_client.chunk_get(c.url) self.assertEqual(rebuilt_meta["chunk_id"], c.id) self.assertEqual(md5_stream(rebuilt_stream), old_info[pos]["dl_hash"]) self.assertEqual(c.hash, old_info[pos]["hash"]) self.assertThat(c.url, NotEquals(old_info[pos]["url"])) del old_info[pos]["dl_meta"]["chunk_id"] del rebuilt_meta["chunk_id"] self.assertEqual(rebuilt_meta, old_info[pos]["dl_meta"]) def test_content_0_byte_rebuild_pos_0_0(self): self._test_rebuild(0, ["0.0"]) def test_content_0_byte_rebuild_pos_0_0_and_0_p0(self): self._test_rebuild(0, ["0.0", "0.p0"]) def test_content_1_byte_rebuild_pos_0_0(self): self._test_rebuild(1, ["0.0"]) def test_content_1_byte_rebuild_pos_0_p0(self): self._test_rebuild(1, ["0.p0"]) def test_content_1_byte_rebuild_pos_0_0_and_0_p0(self): self._test_rebuild(1, ["0.0", "0.p0"]) def test_content_chunksize_bytes_rebuild_pos_0_0(self): self._test_rebuild(self.conf["chunk_size"], ["0.0"]) def test_content_chunksize_bytes_rebuild_pos_0_0_and_0_1(self): self._test_rebuild(self.conf["chunk_size"], ["0.0", "0.1"]) def test_content_chunksize_bytes_rebuild_pos_0_0_and_0_p0(self): self._test_rebuild(self.conf["chunk_size"], ["0.0", "0.p0"]) def test_content_chunksize_bytes_rebuild_pos_0_p0_and_0_p1(self): self._test_rebuild(self.conf["chunk_size"], ["0.p0", "0.p1"]) def test_content_chunksize_bytes_rebuild_more_than_k_chunk(self): self.assertRaises(UnrecoverableContent, self._test_rebuild, self.conf["chunk_size"], ["0.0", "0.1", "0.2"]) def _new_content(self, data, broken_pos_list=[]): old_content = self.content_factory.new(self.container_id, "titi", len(data), "RAIN") self.assertEqual(type(old_content), RainContent) old_content.upload(StringIO.StringIO(data)) for pos in broken_pos_list: c = old_content.chunks.filter(pos=pos)[0] self.blob_client.chunk_delete(c.url) # get the new structure of the uploaded content return self.content_factory.get(self.container_id, old_content.content_id) def test_orphan_chunk(self): content = self._new_content(random_data(10)) self.assertRaises(OrphanChunk, content.rebuild_chunk, "uNkNoWnId") def test_rebuild_on_the_fly(self): data = random_data(self.conf["chunk_size"]) content = self._new_content(data, ["0.0", "0.p0"]) stream = content.rebuild_metachunk("0", on_the_fly=True) dl_data = "".join(stream) self.assertEqual(dl_data, data) del_chunk_0_0 = content.chunks.filter(pos="0.0")[0] del_chunk_0_p0 = content.chunks.filter(pos="0.p0")[0] self.assertRaises(NotFound, self.blob_client.chunk_get, del_chunk_0_0.url) self.assertRaises(NotFound, self.blob_client.chunk_get, del_chunk_0_p0.url) def _test_download(self, data_size, broken_pos_list): data = random_data(data_size) content = self._new_content(data, broken_pos_list) downloaded_data = "".join(content.download()) self.assertEqual(downloaded_data, data) for pos in broken_pos_list: c = content.chunks.filter(pos=pos)[0] self.assertRaises(NotFound, self.blob_client.chunk_delete, c.url) def test_download_content_0_byte_without_broken_chunks(self): self._test_download(0, []) def test_download_content_1_byte_without_broken_chunks(self): self._test_download(1, []) def test_download_content_chunksize_bytes_without_broken_chunks(self): self._test_download(self.conf["chunk_size"], []) def test_download_content_chunksize_plus_1_without_broken_chunks(self): self._test_download(self.conf["chunk_size"] + 1, []) def test_download_content_0_byte_with_broken_0_0_and_0_p0(self): self._test_download(0, ["0.0", "0.p0"]) def test_download_content_1_byte_with_broken_0_0_and_0_p0(self): self._test_download(1, ["0.0", "0.p0"]) def test_download_content_2xchunksize_with_broken_0_2_and_1_0(self): self._test_download(2 * self.conf["chunk_size"], ["0.2", "1.0"]) def test_download_content_chunksize_bytes_with_3_broken_chunks(self): data = random_data(self.conf["chunk_size"]) content = self._new_content(data, ["0.0", "0.1", "0.2"]) gen = content.download() self.assertRaises(UnrecoverableContent, gen.next) def test_download_interrupt_close(self): data = random_data(self.conf["chunk_size"]) content = self._new_content(data, ["0.p0"]) download_iter = content.download() dl_data = "" for buf in download_iter: dl_data += buf self.assertEqual(len(dl_data), len(data)) self.assertEqual(dl_data, data) download_iter.close()
class TestBlobAuditorFunctional(BaseTestCase): def setUp(self): super(TestBlobAuditorFunctional, self).setUp() self.namespace = self.conf['namespace'] self.account = self.conf['account'] self.test_dir = self.conf['sds_path'] rawx_num, rawx_path, rawx_addr = self.get_service_url('rawx') self.rawx = 'http://' + rawx_addr self.h = hashlib.new('md5') conf = {"namespace": self.namespace} self.auditor = BlobAuditorWorker(conf, get_logger(None), None) self.container_c = ContainerClient(conf) self.blob_c = BlobClient() self.ref = random_str(8) self.container_c.container_create(self.account, self.ref) self.url_rand = random_id(64) self.data = random_str(1280) self.h.update(self.data) self.hash_rand = self.h.hexdigest().lower() self.content = TestContent( random_str(6), len(self.data), self.url_rand, 1) self.content.id_container = cid_from_name( self.account, self.ref).upper() self.chunk = TestChunk(self.content.size, self.url_rand, 0, self.hash_rand) self.chunk_url = "%s/%s" % (self.rawx, self.chunk.id_chunk) self.chunk_proxy = {"hash": self.chunk.md5, "pos": "0", "size": self.chunk.size, "url": self.chunk_url} chunk_meta = {'content_path': self.content.path, 'container_id': self.content.id_container, 'content_chunkmethod': 'plain/nb_copy=3', 'content_policy': 'TESTPOLICY', 'content_id': '0000', 'content_version': 1, 'chunk_id': self.chunk.id_chunk, 'chunk_pos': self.chunk.pos} self.blob_c.chunk_put(self.chunk_url, chunk_meta, self.data) self.chunk_path = self.test_dir + '/data/' + self.namespace + \ '-rawx-1/' + self.chunk.id_chunk[0:3] + "/" + self.chunk.id_chunk self.bad_container_id = '0'*64 def tearDown(self): super(TestBlobAuditorFunctional, self).tearDown() try: self.container_c.content_delete( self.account, self.ref, self.content.path) except Exception: pass try: self.container_c.container_destroy(self.account, self.ref) except Exception: pass try: os.remove(self.chunk_path) except Exception: pass def init_content(self): self.container_c.content_create( self.account, self.ref, self.content.path, self.chunk.size, self.hash_rand, data=[self.chunk_proxy]) def test_chunk_audit(self): self.init_content() self.auditor.chunk_audit(self.chunk_path) def test_content_deleted(self): self.assertRaises(exc.OrphanChunk, self.auditor.chunk_audit, self.chunk_path) def test_container_deleted(self): self.container_c.container_destroy(self.account, self.ref) self.assertRaises(exc.OrphanChunk, self.auditor.chunk_audit, self.chunk_path) def test_chunk_corrupted(self): self.init_content() with open(self.chunk_path, "w") as f: f.write(random_str(1280)) self.assertRaises(exc.CorruptedChunk, self.auditor.chunk_audit, self.chunk_path) def test_chunk_bad_size(self): self.init_content() with open(self.chunk_path, "w") as f: f.write(random_str(320)) self.assertRaises(exc.FaultyChunk, self.auditor.chunk_audit, self.chunk_path) def test_xattr_bad_chunk_size(self): self.init_content() xattr.setxattr( self.chunk_path, 'user.' + chunk_xattr_keys['chunk_size'], '-1') self.assertRaises(exc.FaultyChunk, self.auditor.chunk_audit, self.chunk_path) def test_xattr_bad_chunk_hash(self): self.init_content() xattr.setxattr( self.chunk_path, 'user.' + chunk_xattr_keys['chunk_hash'], 'WRONG_HASH') self.assertRaises(exc.CorruptedChunk, self.auditor.chunk_audit, self.chunk_path) def test_xattr_bad_content_path(self): self.init_content() xattr.setxattr( self.chunk_path, 'user.' + chunk_xattr_keys['content_path'], 'WRONG_PATH') self.assertRaises(exc.OrphanChunk, self.auditor.chunk_audit, self.chunk_path) def test_xattr_bad_chunk_id(self): self.init_content() xattr.setxattr( self.chunk_path, 'user.' + chunk_xattr_keys['chunk_id'], 'WRONG_ID') self.assertRaises(exc.OrphanChunk, self.auditor.chunk_audit, self.chunk_path) def test_xattr_bad_content_container(self): self.init_content() xattr.setxattr( self.chunk_path, 'user.' + chunk_xattr_keys['container_id'], self.bad_container_id) self.assertRaises(exc.OrphanChunk, self.auditor.chunk_audit, self.chunk_path) def test_xattr_bad_chunk_position(self): self.init_content() xattr.setxattr(self.chunk_path, 'user.grid.chunk.position', '42') xattr.setxattr( self.chunk_path, 'user.' + chunk_xattr_keys['chunk_pos'], '42') self.assertRaises(exc.FaultyChunk, self.auditor.chunk_audit, self.chunk_path) def test_chunk_bad_hash(self): self.h.update(self.data) self.hash_rand = self.h.hexdigest().lower() self.chunk.md5 = self.hash_rand self.chunk_proxy['hash'] = self.chunk.md5 self.init_content() self.assertRaises(exc.FaultyChunk, self.auditor.chunk_audit, self.chunk_path) def test_chunk_bad_length(self): self.chunk.size = 320 self.chunk_proxy['size'] = self.chunk.size self.init_content() self.assertRaises(exc.FaultyChunk, self.auditor.chunk_audit, self.chunk_path) def test_chunk_bad_chunk_size(self): self.chunk.size = 320 self.chunk_proxy['size'] = self.chunk.size self.init_content() self.assertRaises(exc.FaultyChunk, self.auditor.chunk_audit, self.chunk_path) def test_chunk_bad_url(self): self.chunk_proxy['url'] = '%s/WRONG_ID' % self.rawx self.init_content() self.assertRaises(exc.OrphanChunk, self.auditor.chunk_audit, self.chunk_path)
class TestBlobAuditorFunctional(BaseTestCase): def setUp(self): super(TestBlobAuditorFunctional, self).setUp() self.namespace = self.conf['namespace'] self.account = self.conf['account'] self.test_dir = self.conf['sds_path'] self.chars = string.ascii_lowercase + string.ascii_uppercase +\ string.digits self.chars_id = string.digits + 'ABCDEF' self.rawx = 'http://' + self.conf["rawx"][0]['addr'] self.h = hashlib.new('md5') conf = {"namespace": self.namespace} self.auditor = BlobAuditorWorker(conf, get_logger(None), None) self.container_c = ContainerClient(conf) self.blob_c = BlobClient() self.ref = rand_generator(self.chars, 8) self.container_c.container_create(self.account, self.ref) self.url_rand = rand_generator(self.chars_id, 64) self.data = rand_generator(self.chars, 1280) self.h.update(self.data) self.hash_rand = self.h.hexdigest().lower() self.content = TestContent( rand_generator(self.chars, 6), len(self.data), self.url_rand, 1) self.content.id_container = cid_from_name( self.account, self.ref).upper() self.chunk = TestChunk(self.content.size, self.url_rand, 0, self.hash_rand) self.chunk_url = "%s/%s" % (self.rawx, self.chunk.id_chunk) self.chunk_proxy = {"hash": self.chunk.md5, "pos": "0", "size": self.chunk.size, "url": self.chunk_url} chunk_meta = {'content_size': self.content.size, 'content_chunksnb': self.content.nb_chunks, 'content_path': self.content.path, 'content_cid': self.content.id_container, 'content_id': '0000', 'content_version': 1, 'chunk_id': self.chunk.id_chunk, 'chunk_pos': self.chunk.pos} self.blob_c.chunk_put(self.chunk_url, chunk_meta, self.data) self.chunk_path = self.test_dir + '/data/NS-rawx-1/' +\ self.chunk.id_chunk[0:2] + "/" + self.chunk.id_chunk self.bad_container_id = '0'*64 def tearDown(self): super(TestBlobAuditorFunctional, self).tearDown() try: self.container_c.content_delete( self.account, self.ref, self.content.path) except Exception: pass try: self.container_c.container_destroy(self.account, self.ref) except Exception: pass try: os.remove(self.chunk_path) except Exception: pass def init_content(self): self.container_c.content_create( self.account, self.ref, self.content.path, self.chunk.size, self.hash_rand, data=[self.chunk_proxy]) def test_chunk_audit(self): self.init_content() self.auditor.chunk_audit(self.chunk_path) def test_content_deleted(self): self.assertRaises(exc.OrphanChunk, self.auditor.chunk_audit, self.chunk_path) def test_container_deleted(self): self.container_c.container_destroy(self.account, self.ref) self.assertRaises(exc.OrphanChunk, self.auditor.chunk_audit, self.chunk_path) def test_chunk_corrupted(self): self.init_content() with open(self.chunk_path, "w") as f: f.write(rand_generator(self.chars, 1280)) self.assertRaises(exc.CorruptedChunk, self.auditor.chunk_audit, self.chunk_path) def test_chunk_bad_size(self): self.init_content() with open(self.chunk_path, "w") as f: f.write(rand_generator(self.chars, 320)) self.assertRaises(exc.FaultyChunk, self.auditor.chunk_audit, self.chunk_path) def test_xattr_bad_content_nbchunk(self): self.init_content() xattr.setxattr(self.chunk_path, 'user.grid.content.nbchunk', '42') self.assertRaises(exc.FaultyChunk, self.auditor.chunk_audit, self.chunk_path) def test_xattr_bad_chunk_size(self): self.init_content() xattr.setxattr(self.chunk_path, 'user.grid.chunk.size', '-1') self.assertRaises(exc.FaultyChunk, self.auditor.chunk_audit, self.chunk_path) def test_xattr_bad_chunk_hash(self): self.init_content() xattr.setxattr(self.chunk_path, 'user.grid.chunk.hash', 'WRONG_HASH') self.assertRaises(exc.CorruptedChunk, self.auditor.chunk_audit, self.chunk_path) def test_xattr_bad_content_size(self): self.init_content() xattr.setxattr(self.chunk_path, 'user.grid.content.size', '-1') self.assertRaises(exc.FaultyChunk, self.auditor.chunk_audit, self.chunk_path) def test_xattr_bad_content_path(self): self.init_content() xattr.setxattr(self.chunk_path, 'user.grid.content.path', 'WRONG_PATH') self.assertRaises(exc.OrphanChunk, self.auditor.chunk_audit, self.chunk_path) def test_xattr_bad_chunk_id(self): self.init_content() xattr.setxattr(self.chunk_path, 'user.grid.chunk.id', 'WRONG_ID') self.assertRaises(exc.OrphanChunk, self.auditor.chunk_audit, self.chunk_path) def test_xattr_bad_content_container(self): self.init_content() xattr.setxattr( self.chunk_path, 'user.grid.content.container', self.bad_container_id) self.assertRaises(exc.OrphanChunk, self.auditor.chunk_audit, self.chunk_path) def test_xattr_bad_chunk_position(self): self.init_content() xattr.setxattr(self.chunk_path, 'user.grid.chunk.position', '42') self.assertRaises(exc.FaultyChunk, self.auditor.chunk_audit, self.chunk_path) def test_chunk_bad_hash(self): self.h.update(self.data) self.hash_rand = self.h.hexdigest().lower() self.chunk.md5 = self.hash_rand self.chunk_proxy['hash'] = self.chunk.md5 self.init_content() self.assertRaises(exc.FaultyChunk, self.auditor.chunk_audit, self.chunk_path) def test_chunk_bad_length(self): self.chunk.size = 320 self.chunk_proxy['size'] = self.chunk.size self.init_content() self.assertRaises(exc.FaultyChunk, self.auditor.chunk_audit, self.chunk_path) def test_chunk_bad_chunk_size(self): self.chunk.size = 320 self.chunk_proxy['size'] = self.chunk.size self.init_content() self.assertRaises(exc.FaultyChunk, self.auditor.chunk_audit, self.chunk_path) def test_chunk_bad_url(self): self.chunk_proxy['url'] = '%s/WRONG_ID' % self.rawx self.init_content() self.assertRaises(exc.OrphanChunk, self.auditor.chunk_audit, self.chunk_path) def test_content_bad_path(self): self.content.path = 'BAD_PATH' self.init_content() self.assertRaises(exc.OrphanChunk, self.auditor.chunk_audit, self.chunk_path)
class Content(object): def __init__(self, conf, container_id, metadata, chunks, stgpol_args): self.conf = conf self.container_id = container_id self.metadata = metadata self.chunks = ChunksHelper(chunks) self.stgpol_args = stgpol_args self.logger = get_logger(self.conf) self.cs_client = ConscienceClient(conf) self.container_client = ContainerClient(self.conf) self.blob_client = BlobClient() self.session = requests.Session() self.content_id = self.metadata["id"] self.stgpol_name = self.metadata["policy"] self.path = self.metadata["name"] self.length = int(self.metadata["length"]) self.version = self.metadata["version"] self.hash = self.metadata["hash"] self.mime_type = self.metadata["mime-type"] self.chunk_method = self.metadata["chunk-method"] def _meta2_get_spare_chunk(self, chunks_notin, chunks_broken): spare_data = { "notin": ChunksHelper(chunks_notin, False).raw(), "broken": ChunksHelper(chunks_broken, False).raw() } try: spare_resp = self.container_client.content_spare( cid=self.container_id, content=self.content_id, data=spare_data, stgpol=self.stgpol_name) except ClientException as e: raise exc.SpareChunkException("No spare chunk (%s)" % e.message) url_list = [] for c in spare_resp["chunks"]: url_list.append(c["id"]) return url_list def _meta2_update_spare_chunk(self, current_chunk, new_url): old = [{'type': 'chunk', 'id': current_chunk.url, 'hash': current_chunk.hash, 'size': current_chunk.size, 'pos': current_chunk.pos, 'content': self.content_id}] new = [{'type': 'chunk', 'id': new_url, 'hash': current_chunk.hash, 'size': current_chunk.size, 'pos': current_chunk.pos, 'content': self.content_id}] update_data = {'old': old, 'new': new} self.container_client.container_raw_update( cid=self.container_id, data=update_data) def _meta2_create_object(self): self.container_client.content_create(cid=self.container_id, path=self.path, content_id=self.content_id, stgpol=self.stgpol_name, size=self.length, checksum=self.hash, version=self.version, chunk_method=self.chunk_method, mime_type=self.mime_type, data=self.chunks.raw()) def rebuild_chunk(self, chunk_id): raise NotImplementedError() def upload(self, stream): try: self._upload(stream) except: # Keep the stack trace exc_info = sys.exc_info() for chunk in self.chunks: try: self.blob_client.chunk_delete(chunk.url) except: self.logger.warn("Failed to delete %s", chunk.url) # Raise with the original stack trace raise exc_info[0], exc_info[1], exc_info[2] def _upload(self, stream): raise NotImplementedError() def download(self): raise NotImplementedError() def delete(self): self.container_client.content_delete(cid=self.container_id, path=self.path) def move_chunk(self, chunk_id): current_chunk = self.chunks.filter(id=chunk_id).one() if current_chunk is None: raise OrphanChunk("Chunk not found in content") other_chunks = self.chunks.filter( metapos=current_chunk.metapos).exclude(id=chunk_id).all() spare_urls = self._meta2_get_spare_chunk(other_chunks, [current_chunk]) self.logger.debug("copy chunk from %s to %s", current_chunk.url, spare_urls[0]) self.blob_client.chunk_copy(current_chunk.url, spare_urls[0]) self._meta2_update_spare_chunk(current_chunk, spare_urls[0]) try: self.blob_client.chunk_delete(current_chunk.url) except: self.logger.warn("Failed to delete chunk %s" % current_chunk.url) current_chunk.url = spare_urls[0] return current_chunk.raw()
class TestDupContent(BaseTestCase): def setUp(self): super(TestDupContent, self).setUp() if len(self.conf['rawx']) < 3: self.skipTest("Not enough rawx. " "Dup tests needs more than 2 rawx to run") self.namespace = self.conf['namespace'] self.account = self.conf['account'] self.chunk_size = self.conf['chunk_size'] self.gridconf = {"namespace": self.namespace} self.content_factory = ContentFactory(self.gridconf) self.container_client = ContainerClient(self.gridconf) self.blob_client = BlobClient() self.container_name = "TestDupContent%f" % time.time() self.container_client.container_create(acct=self.account, ref=self.container_name) self.container_id = cid_from_name(self.account, self.container_name).upper() def tearDown(self): super(TestDupContent, self).tearDown() def _test_upload(self, stgpol, data_size): data = random_data(data_size) content = self.content_factory.new(self.container_id, "titi", len(data), stgpol) self.assertEqual(type(content), DupContent) content.upload(StringIO.StringIO(data)) meta, chunks = self.container_client.content_show( cid=self.container_id, content=content.content_id) chunks = ChunksHelper(chunks) self.assertEqual(meta['hash'], md5_data(data)) self.assertEqual(meta['length'], str(len(data))) self.assertEqual(meta['policy'], stgpol) self.assertEqual(meta['name'], "titi") metachunk_nb = int(math.ceil(float(len(data)) / self.chunk_size)) if metachunk_nb == 0: metachunk_nb = 1 # special case for empty content if stgpol == "THREECOPIES": nb_copy = 3 elif stgpol == "TWOCOPIES": nb_copy = 2 elif stgpol == "SINGLE": nb_copy = 1 self.assertEqual(len(chunks), metachunk_nb * nb_copy) for pos in range(metachunk_nb): chunks_at_pos = chunks.filter(pos=pos) self.assertEqual(len(chunks_at_pos), nb_copy) data_begin = pos * self.chunk_size data_end = pos * self.chunk_size + self.chunk_size chunk_hash = md5_data(data[data_begin:data_end]) for chunk in chunks_at_pos: meta, stream = self.blob_client.chunk_get(chunk.url) self.assertEqual(md5_stream(stream), chunk_hash) self.assertEqual(meta['content_size'], str(len(data))) self.assertEqual(meta['content_path'], "titi") self.assertEqual(meta['content_cid'], self.container_id) self.assertEqual(meta['content_id'], meta['content_id']) self.assertEqual(meta['chunk_id'], chunk.id) self.assertEqual(meta['chunk_pos'], str(pos)) self.assertEqual(meta['chunk_hash'], chunk_hash) def test_twocopies_upload_0_byte(self): self._test_upload("TWOCOPIES", 0) def test_twocopies_upload_1_byte(self): self._test_upload("TWOCOPIES", 1) def test_twocopies_upload_chunksize_bytes(self): self._test_upload("TWOCOPIES", self.chunk_size) def test_twocopies_upload_chunksize_plus_1_bytes(self): self._test_upload("TWOCOPIES", self.chunk_size + 1) def test_single_upload_0_byte(self): self._test_upload("SINGLE", 0) def test_single_upload_chunksize_plus_1_bytes(self): self._test_upload("SINGLE", self.chunk_size + 1) def test_chunks_cleanup_when_upload_failed(self): data = random_data(2 * self.chunk_size) content = self.content_factory.new(self.container_id, "titi", len(data), "TWOCOPIES") self.assertEqual(type(content), DupContent) # set bad url for position 1 for chunk in content.chunks.filter(pos=1): chunk.url = "http://127.0.0.1:9/DEADBEEF" self.assertRaises(Exception, content.upload, StringIO.StringIO(data)) for chunk in content.chunks.exclude(pos=1): self.assertRaises(NotFound, self.blob_client.chunk_head, chunk.url) def _new_content(self, stgpol, data, broken_pos_list=[]): old_content = self.content_factory.new(self.container_id, "titi", len(data), stgpol) self.assertEqual(type(old_content), DupContent) old_content.upload(StringIO.StringIO(data)) broken_chunks_info = {} for pos, idx in broken_pos_list: c = old_content.chunks.filter(pos=pos)[idx] meta, stream = self.blob_client.chunk_get(c.url) if pos not in broken_chunks_info: broken_chunks_info[pos] = {} broken_chunks_info[pos][idx] = { "url": c.url, "id": c.id, "hash": c.hash, "dl_meta": meta, "dl_hash": md5_stream(stream) } self.blob_client.chunk_delete(c.url) # get the new structure of the uploaded content return (self.content_factory.get( self.container_id, old_content.content_id), broken_chunks_info) def _test_rebuild(self, stgpol, data_size, broken_pos_list, full_rebuild_pos): data = random_data(data_size) content, broken_chunks_info = self._new_content(stgpol, data, broken_pos_list) rebuild_pos, rebuild_idx = full_rebuild_pos rebuild_chunk_info = broken_chunks_info[rebuild_pos][rebuild_idx] content.rebuild_chunk(rebuild_chunk_info["id"]) # get the new structure of the content rebuilt_content = self.content_factory.get(self.container_id, content.content_id) self.assertEqual(type(rebuilt_content), DupContent) # find the rebuilt chunk for c in rebuilt_content.chunks.filter(pos=rebuild_pos): if len(content.chunks.filter(id=c.id)) > 0: # not the rebuilt chunk # if this chunk is broken, it must not have been rebuilt for b_c_i in broken_chunks_info[rebuild_pos].values(): if c.id == b_c_i["id"]: with ExpectedException(NotFound): _, _ = self.blob_client.chunk_get(c.url) continue meta, stream = self.blob_client.chunk_get(c.url) self.assertEqual(meta["chunk_id"], c.id) self.assertEqual(md5_stream(stream), rebuild_chunk_info["dl_hash"]) self.assertEqual(c.hash, rebuild_chunk_info["hash"]) self.assertThat(c.url, NotEquals(rebuild_chunk_info["url"])) del meta["chunk_id"] del rebuild_chunk_info["dl_meta"]["chunk_id"] self.assertEqual(meta, rebuild_chunk_info["dl_meta"]) def test_2copies_content_0_byte_1broken_rebuild_pos_0_idx_0(self): self._test_rebuild("TWOCOPIES", 0, [(0, 0)], (0, 0)) def test_2copies_content_1_byte_1broken_rebuild_pos_0_idx_1(self): self._test_rebuild("TWOCOPIES", 1, [(0, 1)], (0, 1)) def test_3copies_content_chunksize_bytes_2broken_rebuild_pos_0_idx_1(self): if len(self.conf['rawx']) <= 3: self.skipTest("Need more than 3 rawx") self._test_rebuild("THREECOPIES", self.chunk_size, [(0, 0), (0, 1)], (0, 1)) def test_3copies_content_2xchksize_bytes_2broken_rebuild_pos_1_idx_2(self): if len(self.conf['rawx']) <= 3: self.skipTest("Need more than 3 rawx") self._test_rebuild("THREECOPIES", 2 * self.chunk_size, [(1, 0), (1, 2)], (1, 2)) def test_2copies_content_0_byte_2broken_rebuild_pos_0_idx_0(self): with ExpectedException(UnrecoverableContent): self._test_rebuild("TWOCOPIES", 0, [(0, 0), (0, 1)], (0, 0)) def _test_download(self, stgpol, data_size, broken_pos_list): data = random_data(data_size) content, _ = self._new_content(stgpol, data, broken_pos_list) downloaded_data = "".join(content.download()) self.assertEqual(downloaded_data, data) for pos, idx in broken_pos_list: # check nothing has been rebuilt c = content.chunks.filter(pos=pos)[0] self.assertRaises(NotFound, self.blob_client.chunk_delete, c.url) def test_twocopies_download_content_0_byte_without_broken_chunks(self): self._test_download("TWOCOPIES", 0, []) def test_twocopies_download_content_0_byte_with_broken_0_0(self): self._test_download("TWOCOPIES", 0, [(0, 0)]) def test_twocopies_download_content_1_byte_without_broken_chunks(self): self._test_download("TWOCOPIES", 1, []) def test_twocopies_download_content_1_byte_with_broken_0_0(self): self._test_download("TWOCOPIES", 1, [(0, 0)]) def test_twocopies_download_chunksize_bytes_without_broken_chunks(self): self._test_download("TWOCOPIES", self.chunk_size, []) def test_twocopies_download_2xchuksize_bytes_with_broken_0_0_and_1_0(self): self._test_download("TWOCOPIES", self.chunk_size * 2, [(0, 0), (1, 0)]) def test_twocopies_download_content_chunksize_bytes_2_broken_chunks(self): data = random_data(self.chunk_size) content, _ = self._new_content("TWOCOPIES", data, [(0, 0), (0, 1)]) gen = content.download() self.assertRaises(UnrecoverableContent, gen.next) def test_single_download_content_1_byte_without_broken_chunks(self): self._test_download("SINGLE", 1, []) def test_single_download_chunksize_bytes_plus_1_without_broken_chunk(self): self._test_download("SINGLE", self.chunk_size * 2, [])
class Checker(object): def __init__(self, namespace, concurrency=50, error_file=None, rebuild_file=None, full=True): self.pool = GreenPool(concurrency) self.error_file = error_file self.full = bool(full) if self.error_file: f = open(self.error_file, 'a') self.error_writer = csv.writer(f, delimiter=' ') self.rebuild_file = rebuild_file if self.rebuild_file: fd = open(self.rebuild_file, 'a') self.rebuild_writer = csv.writer(fd, delimiter='|') conf = {'namespace': namespace} self.account_client = AccountClient(conf) self.container_client = ContainerClient(conf) self.blob_client = BlobClient() self.accounts_checked = 0 self.containers_checked = 0 self.objects_checked = 0 self.chunks_checked = 0 self.account_not_found = 0 self.container_not_found = 0 self.object_not_found = 0 self.chunk_not_found = 0 self.account_exceptions = 0 self.container_exceptions = 0 self.object_exceptions = 0 self.chunk_exceptions = 0 self.list_cache = {} self.running = {} def write_error(self, target): error = [target.account] if target.container: error.append(target.container) if target.obj: error.append(target.obj) if target.chunk: error.append(target.chunk) self.error_writer.writerow(error) def write_rebuilder_input(self, target, obj_meta, ct_meta): try: cid = ct_meta['system']['sys.name'].split('.', 1)[0] except KeyError: cid = ct_meta['properties']['sys.name'].split('.', 1)[0] self.rebuild_writer.writerow((cid, obj_meta['id'], target.chunk)) def write_chunk_error(self, target, obj_meta, chunk=None): if chunk is not None: target = target.copy() target.chunk = chunk if self.error_file: self.write_error(target) if self.rebuild_file: self.write_rebuilder_input( target, obj_meta, self.list_cache[(target.account, target.container)][1]) def _check_chunk_xattr(self, target, obj_meta, xattr_meta): error = False # Composed position -> erasure coding attr_prefix = 'meta' if '.' in obj_meta['pos'] else '' attr_key = attr_prefix + 'chunk_size' if str(obj_meta['size']) != xattr_meta.get(attr_key): print( " Chunk %s '%s' xattr (%s) " "differs from size in meta2 (%s)" % (target, attr_key, xattr_meta.get(attr_key), obj_meta['size'])) error = True attr_key = attr_prefix + 'chunk_hash' if obj_meta['hash'] != xattr_meta.get(attr_key): print( " Chunk %s '%s' xattr (%s) " "differs from hash in meta2 (%s)" % (target, attr_key, xattr_meta.get(attr_key), obj_meta['hash'])) error = True return error def check_chunk(self, target): chunk = target.chunk obj_listing, obj_meta = self.check_obj(target) error = False if chunk not in obj_listing: print(' Chunk %s missing from object listing' % target) error = True db_meta = dict() else: db_meta = obj_listing[chunk] try: xattr_meta = self.blob_client.chunk_head(chunk, xattr=self.full) except exc.NotFound as e: self.chunk_not_found += 1 error = True print(' Not found chunk "%s": %s' % (target, str(e))) except Exception as e: self.chunk_exceptions += 1 error = True print(' Exception chunk "%s": %s' % (target, str(e))) else: if db_meta and self.full: error = self._check_chunk_xattr(target, db_meta, xattr_meta) if error: self.write_chunk_error(target, obj_meta) self.chunks_checked += 1 def check_obj_policy(self, target, obj_meta, chunks): """ Check that the list of chunks of an object matches the object's storage policy. """ stg_met = STORAGE_METHODS.load(obj_meta['chunk_method']) chunks_by_pos = _sort_chunks(chunks, stg_met.ec) if stg_met.ec: required = stg_met.ec_nb_data + stg_met.ec_nb_parity else: required = stg_met.nb_copy for pos, clist in chunks_by_pos.iteritems(): if len(clist) < required: print(' Missing %d chunks at position %s of %s' % (required - len(clist), pos, target)) if stg_met.ec: subs = {x['num'] for x in clist} for sub in range(required): if sub not in subs: self.write_chunk_error(target, obj_meta, '%d.%d' % (pos, sub)) else: self.write_chunk_error(target, obj_meta, str(pos)) def check_obj(self, target, recurse=False): account = target.account container = target.container obj = target.obj if (account, container, obj) in self.running: self.running[(account, container, obj)].wait() if (account, container, obj) in self.list_cache: return self.list_cache[(account, container, obj)] self.running[(account, container, obj)] = Event() print('Checking object "%s"' % target) container_listing, ct_meta = self.check_container(target) error = False if obj not in container_listing: print(' Object %s missing from container listing' % target) error = True # checksum = None else: # TODO check checksum match # checksum = container_listing[obj]['hash'] pass results = [] meta = dict() try: meta, results = self.container_client.content_locate( account=account, reference=container, path=obj) except exc.NotFound as e: self.object_not_found += 1 error = True print(' Not found object "%s": %s' % (target, str(e))) except Exception as e: self.object_exceptions += 1 error = True print(' Exception object "%s": %s' % (target, str(e))) chunk_listing = dict() for chunk in results: chunk_listing[chunk['url']] = chunk self.check_obj_policy(target.copy(), meta, results) self.objects_checked += 1 self.list_cache[(account, container, obj)] = (chunk_listing, meta) self.running[(account, container, obj)].send(True) del self.running[(account, container, obj)] if recurse: for chunk in chunk_listing: t = target.copy() t.chunk = chunk self.pool.spawn_n(self.check_chunk, t) if error and self.error_file: self.write_error(target) return chunk_listing, meta def check_container(self, target, recurse=False): account = target.account container = target.container if (account, container) in self.running: self.running[(account, container)].wait() if (account, container) in self.list_cache: return self.list_cache[(account, container)] self.running[(account, container)] = Event() print('Checking container "%s"' % target) account_listing = self.check_account(target) error = False if container not in account_listing: error = True print(' Container %s missing from account listing' % target) marker = None results = [] ct_meta = dict() while True: try: _, resp = self.container_client.content_list( account=account, reference=container, marker=marker) except exc.NotFound as e: self.container_not_found += 1 error = True print(' Not found container "%s": %s' % (target, str(e))) break except Exception as e: self.container_exceptions += 1 error = True print(' Exception container "%s": %s' % (target, str(e))) break if resp['objects']: marker = resp['objects'][-1]['name'] results.extend(resp['objects']) else: ct_meta = resp ct_meta.pop('objects') break container_listing = dict() for obj in results: container_listing[obj['name']] = obj self.containers_checked += 1 self.list_cache[(account, container)] = container_listing, ct_meta self.running[(account, container)].send(True) del self.running[(account, container)] if recurse: for obj in container_listing: t = target.copy() t.obj = obj self.pool.spawn_n(self.check_obj, t, True) if error and self.error_file: self.write_error(target) return container_listing, ct_meta def check_account(self, target, recurse=False): account = target.account if account in self.running: self.running[account].wait() if account in self.list_cache: return self.list_cache[account] self.running[account] = Event() print('Checking account "%s"' % target) error = False marker = None results = [] while True: try: resp = self.account_client.container_list(account, marker=marker) except Exception as e: self.account_exceptions += 1 error = True print(' Exception account "%s": %s' % (target, str(e))) break if resp['listing']: marker = resp['listing'][-1][0] else: break results.extend(resp['listing']) containers = dict() for e in results: containers[e[0]] = (e[1], e[2]) self.list_cache[account] = containers self.running[account].send(True) del self.running[account] self.accounts_checked += 1 if recurse: for container in containers: t = target.copy() t.container = container self.pool.spawn_n(self.check_container, t, True) if error and self.error_file: self.write_error(target) return containers def check(self, target): if target.chunk and target.obj and target.container: self.pool.spawn_n(self.check_chunk, target) elif target.obj and target.container: self.pool.spawn_n(self.check_obj, target, True) elif target.container: self.pool.spawn_n(self.check_container, target, True) else: self.pool.spawn_n(self.check_account, target, True) def wait(self): self.pool.waitall() def report(self): def _report_stat(name, stat): print("{0:18}: {1}".format(name, stat)) print() print('Report') _report_stat("Accounts checked", self.accounts_checked) if self.account_not_found: _report_stat("Missing accounts", self.account_not_found) if self.account_exceptions: _report_stat("Exceptions", self.account_not_found) print() _report_stat("Containers checked", self.containers_checked) if self.container_not_found: _report_stat("Missing containers", self.container_not_found) if self.container_exceptions: _report_stat("Exceptions", self.container_exceptions) print() _report_stat("Objects checked", self.objects_checked) if self.object_not_found: _report_stat("Missing objects", self.object_not_found) if self.object_exceptions: _report_stat("Exceptions", self.object_exceptions) print() _report_stat("Chunks checked", self.chunks_checked) if self.chunk_not_found: _report_stat("Missing chunks", self.chunk_not_found) if self.chunk_exceptions: _report_stat("Exceptions", self.chunk_exceptions)
class BlobMoverWorker(object): def __init__(self, conf, logger, volume): self.conf = conf self.logger = logger or get_logger(conf) self.volume = volume self.run_time = 0 self.passes = 0 self.errors = 0 self.last_reported = 0 self.last_usage_check = 0 self.chunks_run_time = 0 self.bytes_running_time = 0 self.bytes_processed = 0 self.total_bytes_processed = 0 self.total_chunks_processed = 0 self.usage_target = int_value( conf.get('usage_target'), 0) self.usage_check_interval = int_value( conf.get('usage_check_interval'), 3600) self.report_interval = int_value( conf.get('report_interval'), 3600) self.max_chunks_per_second = int_value( conf.get('chunks_per_second'), 30) self.max_bytes_per_second = int_value( conf.get('bytes_per_second'), 10000000) self.blob_client = BlobClient() self.container_client = ContainerClient(conf) def mover_pass(self): self.namespace, self.address = check_volume(self.volume) start_time = report_time = time.time() total_errors = 0 mover_time = 0 paths = paths_gen(self.volume) for path in paths: loop_time = time.time() now = time.time() if now - self.last_usage_check >= self.usage_check_interval: used, total = statfs(self.volume) usage = (float(used) / total) * 100 if usage <= self.usage_target: self.logger.info( 'current usage %.2f%%: target reached (%.2f%%)', usage, self.usage_target) self.last_usage_check = now break self.safe_chunk_move(path) self.chunks_run_time = ratelimit( self.chunks_run_time, self.max_chunks_per_second ) self.total_chunks_processed += 1 now = time.time() if now - self.last_reported >= self.report_interval: self.logger.info( '%(start_time)s ' '%(passes)d ' '%(errors)d ' '%(c_rate).2f ' '%(b_rate).2f ' '%(total).2f ' '%(mover_time).2f' '%(mover_rate).2f' % { 'start_time': time.ctime(report_time), 'passes': self.passes, 'errors': self.errors, 'c_rate': self.passes / (now - report_time), 'b_rate': self.bytes_processed / (now - report_time), 'total': (now - start_time), 'mover_time': mover_time, 'mover_rate': mover_time / (now - start_time) } ) report_time = now total_errors += self.errors self.passes = 0 self.bytes_processed = 0 self.last_reported = now mover_time += (now - loop_time) elapsed = (time.time() - start_time) or 0.000001 self.logger.info( '%(elapsed).02f ' '%(errors)d ' '%(chunk_rate).2f ' '%(bytes_rate).2f ' '%(mover_time).2f ' '%(mover_rate).2f' % { 'elapsed': elapsed, 'errors': total_errors + self.errors, 'chunk_rate': self.total_chunks_processed / elapsed, 'bytes_rate': self.total_bytes_processed / elapsed, 'mover_time': mover_time, 'mover_rate': mover_time / elapsed } ) def safe_chunk_move(self, path): try: self.chunk_move(path) except Exception as e: self.errors += 1 self.logger.error('ERROR while moving chunk %s: %s', path, e) self.passes += 1 def load_chunk_metadata(self, path): with open(path) as f: return read_chunk_metadata(f) def chunk_move(self, path): meta = self.load_chunk_metadata(path) content_cid = meta['content_cid'] content_path = meta['content_path'] chunk_url = 'http://%s/%s' % \ (self.address, meta['chunk_id']) try: data = self.container_client.content_show( cid=content_cid, path=content_path) except exc.NotFound: raise exc.OrphanChunk('Content not found') current_chunk = None notin = [] for c in data: if c['pos'] == meta['chunk_pos']: notin.append(c) for c in notin: if c['url'] == chunk_url: current_chunk = c notin.remove(c) if not current_chunk: raise exc.OrphanChunk('Chunk not found in content') spare_data = {'notin': notin, 'broken': [current_chunk], 'size': 0} spare_resp = self.container_client.content_spare( cid=content_cid, path=content_path, data=spare_data) new_chunk = spare_resp['chunks'][0] self.blob_client.chunk_copy( current_chunk['url'], new_chunk['id']) old = [{'type': 'chunk', 'id': current_chunk['url'], 'hash': meta['chunk_hash'], 'size': int(meta['chunk_size'])}] new = [{'type': 'chunk', 'id': new_chunk['id'], 'hash': meta['chunk_hash'], 'size': int(meta['chunk_size'])}] update_data = {'old': old, 'new': new} self.container_client.container_raw_update( cid=content_cid, data=update_data) self.blob_client.chunk_delete(current_chunk['url']) self.logger.info( 'moved chunk %s to %s', current_chunk['url'], new_chunk['id'])
class TestECContent(BaseTestCase): def setUp(self): super(TestECContent, self).setUp() if len(self.conf['services']['rawx']) < 12: self.skipTest("Not enough rawx. " "EC tests needs at least 12 rawx to run") self.namespace = self.conf['namespace'] self.account = self.conf['account'] self.chunk_size = self.conf['chunk_size'] self.gridconf = {"namespace": self.namespace} self.content_factory = ContentFactory(self.gridconf) self.container_client = ContainerClient(self.gridconf) self.blob_client = BlobClient() self.container_name = "TestECContent%f" % time.time() self.container_client.container_create(acct=self.account, ref=self.container_name) self.container_id = cid_from_name(self.account, self.container_name).upper() self.content = random_str(64) self.stgpol = "EC" self.size = 1024*1024 + 320 self.k = 6 self.m = 3 def tearDown(self): super(TestECContent, self).tearDown() def random_chunks(self, nb): l = random.sample(xrange(self.k + self.m), nb) return ["0.%s" % i for i in l] def _test_create(self, data_size): # generate random test data data = random_data(data_size) # using factory create new EC content content = self.content_factory.new( self.container_id, self.content, len(data), self.stgpol) # verify the factory gave us an ECContent self.assertEqual(type(content), ECContent) # perform the content creation content.create(StringIO(data)) meta, chunks = self.container_client.content_show( cid=self.container_id, content=content.content_id) # verify metadata chunks = ChunksHelper(chunks) self.assertEqual(meta['hash'], md5_data(data)) self.assertEqual(meta['length'], str(len(data))) self.assertEqual(meta['policy'], self.stgpol) self.assertEqual(meta['name'], self.content) metachunk_nb = int(math.ceil(float(len(data)) / self.chunk_size)) \ if len(data) != 0 else 1 # verify each metachunk for metapos in range(metachunk_nb): chunks_at_pos = content.chunks.filter(metapos=metapos) for chunk in chunks_at_pos: meta, stream = self.blob_client.chunk_get(chunk.url) self.assertEqual(meta['metachunk_size'], str(chunk.size)) self.assertEqual(meta['metachunk_hash'], chunk.checksum) self.assertEqual(meta['content_path'], self.content) self.assertEqual(meta['container_id'], self.container_id) self.assertEqual(meta['content_id'], meta['content_id']) self.assertEqual(meta['chunk_id'], chunk.id) self.assertEqual(meta['chunk_pos'], chunk.pos) self.assertEqual(meta['chunk_hash'], md5_stream(stream)) def test_create_0_byte(self): self._test_create(0) def test_create_1_byte(self): self._test_create(1) def test_create(self): self._test_create(DAT_LEGIT_SIZE) def _test_rebuild(self, data_size, broken_pos_list): # generate test data data = os.urandom(data_size) # create initial content old_content = self.content_factory.new( self.container_id, self.content, len(data), self.stgpol) # verify factory work as intended self.assertEqual(type(old_content), ECContent) # perform initial content creation old_content.create(StringIO(data)) uploaded_content = self.content_factory.get(self.container_id, old_content.content_id) # break the content old_info = {} for pos in broken_pos_list: old_info[pos] = {} c = uploaded_content.chunks.filter(pos=pos)[0] old_info[pos]["url"] = c.url old_info[pos]["id"] = c.id old_info[pos]["hash"] = c.checksum chunk_id_to_rebuild = c.id meta, stream = self.blob_client.chunk_get(c.url) old_info[pos]["dl_meta"] = meta old_info[pos]["dl_hash"] = md5_stream(stream) # delete the chunk self.blob_client.chunk_delete(c.url) # rebuild the broken chunks uploaded_content.rebuild_chunk(chunk_id_to_rebuild) rebuilt_content = self.content_factory.get(self.container_id, uploaded_content.content_id) # sanity check self.assertEqual(type(rebuilt_content), ECContent) # verify rebuild result for pos in broken_pos_list: c = rebuilt_content.chunks.filter(pos=pos)[0] rebuilt_meta, rebuilt_stream = self.blob_client.chunk_get(c.url) self.assertEqual(rebuilt_meta["chunk_id"], c.id) self.assertEqual(md5_stream(rebuilt_stream), old_info[pos]["dl_hash"]) self.assertEqual(c.checksum, old_info[pos]["hash"]) self.assertNotEqual(c.url, old_info[pos]["url"]) del old_info[pos]["dl_meta"]["chunk_id"] del rebuilt_meta["chunk_id"] self.assertEqual(rebuilt_meta, old_info[pos]["dl_meta"]) def test_content_0_byte_rebuild(self): self._test_rebuild(0, self.random_chunks(1)) def test_content_0_byte_rebuild_advanced(self): self._test_rebuild(0, self.random_chunks(3)) def test_content_1_byte_rebuild(self): self._test_rebuild(1, self.random_chunks(1)) def test_content_1_byte_rebuild_advanced(self): self._test_rebuild(1, self.random_chunks(3)) def test_content_rebuild(self): self._test_rebuild(DAT_LEGIT_SIZE, self.random_chunks(1)) def test_content_rebuild_advanced(self): self._test_rebuild(DAT_LEGIT_SIZE, self.random_chunks(3)) def test_content_rebuild_unrecoverable(self): self.assertRaises( UnrecoverableContent, self._test_rebuild, DAT_LEGIT_SIZE, self.random_chunks(4)) def _new_content(self, data, broken_pos_list=[]): old_content = self.content_factory.new( self.container_id, self.content, len(data), self.stgpol) self.assertEqual(type(old_content), ECContent) old_content.create(StringIO(data)) # break content for pos in broken_pos_list: c = old_content.chunks.filter(pos=pos)[0] self.blob_client.chunk_delete(c.url) # get the new structure of the uploaded content return self.content_factory.get(self.container_id, old_content.content_id) def test_orphan_chunk(self): content = self._new_content(random_data(10)) self.assertRaises(OrphanChunk, content.rebuild_chunk, "invalid") def _test_fetch(self, data_size, broken_pos_list=None): broken_pos_list = broken_pos_list or [] test_data = random_data(data_size) content = self._new_content(test_data, broken_pos_list) data = "".join(content.fetch()) self.assertEqual(len(data), len(test_data)) self.assertEqual(md5_data(data), md5_data(test_data)) # verify that chunks are broken for pos in broken_pos_list: chunk = content.chunks.filter(pos=pos)[0] self.assertRaises( NotFound, self.blob_client.chunk_delete, chunk.url) def test_fetch_content_0_byte(self): self._test_fetch(0) def test_fetch_content_1_byte(self): self._test_fetch(1) def test_fetch_content(self): self._test_fetch(DAT_LEGIT_SIZE) def test_fetch_content_0_byte_broken(self): self._test_fetch(0, self.random_chunks(3)) def test_fetch_content_1_byte_broken(self): self._test_fetch(1, self.random_chunks(3)) def test_fetch_content_broken(self): self._test_fetch(DAT_LEGIT_SIZE, self.random_chunks(3)) def test_fetch_content_unrecoverable(self): broken_chunks = self.random_chunks(4) self.assertRaises( OioException, self._test_fetch, DAT_LEGIT_SIZE, broken_chunks)
class TestContentFactory(BaseTestCase): def setUp(self): super(TestContentFactory, self).setUp() self.namespace = self.conf['namespace'] self.chunk_size = self.conf['chunk_size'] self.gridconf = {"namespace": self.namespace} self.content_factory = ContentFactory(self.gridconf) self.container_name = "TestContentFactory%f" % time.time() self.blob_client = BlobClient() self.container_client = ContainerClient(self.gridconf) self.container_client.container_create(acct=self.account, ref=self.container_name) self.container_id = cid_from_name(self.account, self.container_name).upper() def tearDown(self): super(TestContentFactory, self).tearDown() def test_extract_datasec(self): self.content_factory.ns_info = { "data_security": { "DUPONETWO": "DUP:distance=1|nb_copy=2", "RAIN": "RAIN:k=6|m=2|algo=liber8tion" }, "storage_policy": { "RAIN": "NONE:RAIN:NONE", "SINGLE": "NONE:NONE:NONE", "TWOCOPIES": "NONE:DUPONETWO:NONE" } } ds_type, ds_args = self.content_factory._extract_datasec("RAIN") self.assertEqual(ds_type, "RAIN") self.assertEqual(ds_args, { "k": "6", "m": "2", "algo": "liber8tion" }) ds_type, ds_args = self.content_factory._extract_datasec("SINGLE") self.assertEqual(ds_type, "DUP") self.assertEqual(ds_args, { "nb_copy": "1", "distance": "0" }) ds_type, ds_args = self.content_factory._extract_datasec("TWOCOPIES") self.assertEqual(ds_type, "DUP") self.assertEqual(ds_args, { "nb_copy": "2", "distance": "1" }) self.assertRaises(InconsistentContent, self.content_factory._extract_datasec, "UnKnOwN") def test_get_rain(self): meta = { "chunk-method": "plain/rain?algo=liber8tion&k=6&m=2", "ctime": "1450176946", "deleted": "False", "hash": "E952A419957A6E405BFC53EC65483F73", "hash-method": "md5", "id": "3FA2C4A1ED2605005335A276890EC458", "length": "658", "mime-type": "application/octet-stream", "name": "tox.ini", "policy": "RAIN", "version": "1450176946676289" } chunks = [ { "url": "http://127.0.0.1:6012/A0A0", "pos": "0.p0", "size": 512, "hash": "E7D4E4AD460971CA2E3141F2102308D4"}, { "url": "http://127.0.0.1:6010/A01", "pos": "0.1", "size": 146, "hash": "760AB5DA7C51A3654F1CA622687CD6C3"}, { "url": "http://127.0.0.1:6011/A00", "pos": "0.0", "size": 512, "hash": "B1D08B86B8CAA90A2092CCA0DF9201DB"}, { "url": "http://127.0.0.1:6013/A0A1", "pos": "0.p1", "size": 512, "hash": "DA9D7F72AEEA5791565724424CE45C16"} ] self.content_factory.container_client.content_show = Mock( return_value=(meta, chunks)) c = self.content_factory.get("xxx_container_id", "xxx_content_id") self.assertEqual(type(c), RainContent) self.assertEqual(c.content_id, "3FA2C4A1ED2605005335A276890EC458") self.assertEqual(c.length, 658) self.assertEqual(c.path, "tox.ini") self.assertEqual(c.version, "1450176946676289") self.assertEqual(c.algo, "liber8tion") self.assertEqual(c.k, 6) self.assertEqual(c.m, 2) self.assertEqual(len(c.chunks), 4) self.assertEqual(c.chunks[0].raw(), chunks[2]) self.assertEqual(c.chunks[1].raw(), chunks[1]) self.assertEqual(c.chunks[2].raw(), chunks[0]) self.assertEqual(c.chunks[3].raw(), chunks[3]) def test_get_dup(self): meta = { "chunk-method": "plain/bytes", "ctime": "1450176946", "deleted": "False", "hash": "E952A419957A6E405BFC53EC65483F73", "hash-method": "md5", "id": "3FA2C4A1ED2605005335A276890EC458", "length": "658", "mime-type": "application/octet-stream", "name": "tox.ini", "policy": "TWOCOPIES", "version": "1450176946676289" } chunks = [ { "url": "http://127.0.0.1:6010/A0", "pos": "0", "size": 658, "hash": "E952A419957A6E405BFC53EC65483F73"}, { "url": "http://127.0.0.1:6011/A1", "pos": "0", "size": 658, "hash": "E952A419957A6E405BFC53EC65483F73"} ] self.content_factory.container_client.content_show = Mock( return_value=(meta, chunks)) c = self.content_factory.get("xxx_container_id", "xxx_content_id") self.assertEqual(type(c), DupContent) self.assertEqual(c.content_id, "3FA2C4A1ED2605005335A276890EC458") self.assertEqual(c.length, 658) self.assertEqual(c.path, "tox.ini") self.assertEqual(c.version, "1450176946676289") self.assertEqual(c.nb_copy, 2) self.assertEqual(c.distance, 1) self.assertEqual(len(c.chunks), 2) self.assertEqual(c.chunks[0].raw(), chunks[0]) self.assertEqual(c.chunks[1].raw(), chunks[1]) def test_get_unknown_content(self): self.assertRaises(ContentNotFound, self.content_factory.get, self.container_id, "1234") def test_new_rain(self): meta = { "chunk-method": "plain/rain?algo=liber8tion&k=6&m=2", "ctime": "1450341162", "deleted": "False", "hash": "", "hash-method": "md5", "id": "F4B1C8DD132705007DE8B43D0709DAA2", "length": "1000", "mime-type": "application/octet-stream", "name": "titi", "policy": "RAIN", "version": "1450341162332663" } chunks = [ { "url": "http://127.0.0.1:6010/0_p1", "pos": "0.p1", "size": 1048576, "hash": "00000000000000000000000000000000"}, { "url": "http://127.0.0.1:6011/0_p0", "pos": "0.p0", "size": 1048576, "hash": "00000000000000000000000000000000"}, { "url": "http://127.0.0.1:6016/0_1", "pos": "0.1", "size": 1048576, "hash": "00000000000000000000000000000000"}, { "url": "http://127.0.0.1:6017/0_0", "pos": "0.0", "size": 1048576, "hash": "00000000000000000000000000000000"} ] self.content_factory.container_client.content_prepare = Mock( return_value=(meta, chunks)) c = self.content_factory.new("xxx_container_id", "titi", 1000, "RAIN") self.assertEqual(type(c), RainContent) self.assertEqual(c.content_id, "F4B1C8DD132705007DE8B43D0709DAA2") self.assertEqual(c.length, 1000) self.assertEqual(c.path, "titi") self.assertEqual(c.version, "1450341162332663") self.assertEqual(c.algo, "liber8tion") self.assertEqual(c.k, 6) self.assertEqual(c.m, 2) self.assertEqual(len(c.chunks), 4) self.assertEqual(c.chunks[0].raw(), chunks[3]) self.assertEqual(c.chunks[1].raw(), chunks[2]) self.assertEqual(c.chunks[2].raw(), chunks[1]) self.assertEqual(c.chunks[3].raw(), chunks[0]) def _new_content(self, stgpol, data, path="titi"): old_content = self.content_factory.new(self.container_id, path, len(data), stgpol) old_content.upload(StringIO.StringIO(data)) return self.content_factory.get(self.container_id, old_content.content_id) def _test_change_policy(self, data_size, old_policy, new_policy): if (old_policy == "RAIN" or new_policy == "RAIN") \ and len(self.conf['rawx']) < 8: self.skipTest("RAIN: Need more than 8 rawx to run") data = random_data(data_size) obj_type = { "SINGLE": DupContent, "TWOCOPIES": DupContent, "THREECOPIES": DupContent, "RAIN": RainContent } old_content = self._new_content(old_policy, data) self.assertEqual(type(old_content), obj_type[old_policy]) changed_content = self.content_factory.change_policy( old_content.container_id, old_content.content_id, new_policy) self.assertRaises(NotFound, self.container_client.content_show, self.account, cid=old_content.container_id, content=old_content.content_id) new_content = self.content_factory.get(self.container_id, changed_content.content_id) self.assertEqual(type(new_content), obj_type[new_policy]) downloaded_data = "".join(new_content.download()) self.assertEqual(downloaded_data, data) def test_change_content_0_byte_policy_single_to_rain(self): self._test_change_policy(0, "SINGLE", "RAIN") def test_change_content_0_byte_policy_rain_to_twocopies(self): self._test_change_policy(0, "RAIN", "TWOCOPIES") def test_change_content_1_byte_policy_single_to_rain(self): self._test_change_policy(1, "SINGLE", "RAIN") def test_change_content_chunksize_bytes_policy_twocopies_to_rain(self): self._test_change_policy(self.chunk_size, "TWOCOPIES", "RAIN") def test_change_content_2xchunksize_bytes_policy_threecopies_to_rain(self): self._test_change_policy(self.chunk_size * 2, "THREECOPIES", "RAIN") def test_change_content_1_byte_policy_rain_to_threecopies(self): self._test_change_policy(1, "RAIN", "THREECOPIES") def test_change_content_chunksize_bytes_policy_rain_to_twocopies(self): self._test_change_policy(self.chunk_size, "RAIN", "TWOCOPIES") def test_change_content_2xchunksize_bytes_policy_rain_to_single(self): self._test_change_policy(self.chunk_size * 2, "RAIN", "SINGLE") def test_change_content_0_byte_policy_twocopies_to_threecopies(self): self._test_change_policy(0, "TWOCOPIES", "THREECOPIES") def test_change_content_chunksize_bytes_policy_single_to_twocopies(self): self._test_change_policy(self.chunk_size, "SINGLE", "TWOCOPIES") def test_change_content_2xchunksize_bytes_policy_3copies_to_single(self): self._test_change_policy(self.chunk_size * 2, "THREECOPIES", "SINGLE") def test_change_content_with_same_policy(self): data = random_data(10) old_content = self._new_content("TWOCOPIES", data) changed_content = self.content_factory.change_policy( old_content.container_id, old_content.content_id, "TWOCOPIES") self.assertEqual(old_content.content_id, changed_content.content_id) def test_change_policy_unknown_content(self): self.assertRaises(ContentNotFound, self.content_factory.change_policy, self.container_id, "1234", "SINGLE") def test_change_policy_unknown_storage_policy(self): data = random_data(10) old_content = self._new_content("TWOCOPIES", data) self.assertRaises(ClientException, self.content_factory.change_policy, self.container_id, old_content.content_id, "UnKnOwN") def _test_move_chunk(self, policy): data = random_data(self.chunk_size) content = self._new_content(policy, data) chunk_id = content.chunks.filter(metapos=0)[0].id chunk_url = content.chunks.filter(metapos=0)[0].url chunk_meta, chunk_stream = self.blob_client.chunk_get(chunk_url) chunk_hash = md5_stream(chunk_stream) new_chunk = content.move_chunk(chunk_id) content_updated = self.content_factory.get(self.container_id, content.content_id) hosts = [] for c in content_updated.chunks.filter(metapos=0): self.assertThat(hosts, Not(Contains(c.host))) self.assertNotEquals(c.id, chunk_id) hosts.append(c.host) new_chunk_meta, new_chunk_stream = self.blob_client.chunk_get( new_chunk["url"]) new_chunk_hash = md5_stream(new_chunk_stream) self.assertEqual(new_chunk_hash, chunk_hash) del chunk_meta["chunk_id"] del new_chunk_meta["chunk_id"] self.assertEqual(new_chunk_meta, chunk_meta) def test_single_move_chunk(self): self._test_move_chunk("SINGLE") def test_twocopies_move_chunk(self): self._test_move_chunk("TWOCOPIES") def test_rain_move_chunk(self): if len(self.conf['rawx']) < 9: self.skipTest("Need more than 8 rawx") self._test_move_chunk("RAIN") def test_move_chunk_not_in_content(self): data = random_data(self.chunk_size) content = self._new_content("TWOCOPIES", data) with ExpectedException(OrphanChunk): content.move_chunk("1234") def test_strange_paths(self): strange_paths = [ "Annual report.txt", "foo+bar=foobar.txt", "100%_bug_free.c", "forward/slash/allowed", "I\\put\\backslashes\\and$dollar$signs$in$file$names", "Je suis tombé sur la tête, mais ça va bien.", "%s%f%u%d%%", "carriage\rreturn", "line\nfeed", "ta\tbu\tla\ttion", "controlchars", ] answers = dict() for cname in strange_paths: content = self._new_content("SINGLE", "nobody cares", cname) answers[cname] = content listing = self.container_client.container_list(self.account, self.container_name) obj_set = {k["name"].encode("utf8", "ignore") for k in listing["objects"]} try: # Ensure the saved path is the one we gave the object for cname in answers: self.assertEqual(cname, answers[cname].path) # Ensure all objects appear in listing for cname in strange_paths: self.assertIn(cname, obj_set) finally: # Cleanup for cname in answers: try: content.delete() except: pass
class TestRebuilderCrawler(BaseTestCase): def setUp(self): super(TestRebuilderCrawler, self).setUp() self.namespace = self.conf['namespace'] self.account = self.conf['account'] self.gridconf = {"namespace": self.namespace} self.container_client = ContainerClient(self.gridconf) self.blob_client = BlobClient() self.container_name = "TestRebuilderCrawler%d" % int(time.time()) self.container_client.container_create(acct=self.account, ref=self.container_name) def _push_content(self, content): for c in content.chunks: self.blob_client.chunk_put(c.url, c.get_create_xattr(), c.data) self.container_client.content_create(acct=content.account, ref=content.container_name, path=content.content_name, size=content.size, checksum=content.hash, content_id=content.content_id, stgpol=content.stgpol, data=content.get_create_meta2()) def tearDown(self): super(TestRebuilderCrawler, self).tearDown() def test_rebuild_chunk(self): # push a new content content = TestContent(self.conf, self.account, self.container_name, "mycontent", "TWOCOPIES") data = "azerty" content.add_chunk(data, pos='0', rawx=0) content.add_chunk(data, pos='0', rawx=1) self._push_content(content) # rebuild the first rawx rebuilder = BlobRebuilderWorker(self.gridconf, None, self.conf['rawx'][0]['addr']) rebuilder.chunk_rebuild(content.container_id, content.content_id, content.chunks[0].id) # check meta2 information _, res = self.container_client.content_show(acct=content.account, ref=content.container_name, content=content.content_id) new_chunk_info = None for c in res: if (c['url'] != content.chunks[0].url and c['url'] != content.chunks[1].url): new_chunk_info = c new_chunk_id = new_chunk_info['url'].split('/')[-1] self.assertEqual(new_chunk_info['hash'], content.chunks[0].hash) self.assertEqual(new_chunk_info['pos'], content.chunks[0].pos) self.assertEqual(new_chunk_info['size'], content.chunks[0].size) # check chunk information meta, stream = self.blob_client.chunk_get(new_chunk_info['url']) self.assertEqual(meta['content_size'], str(content.chunks[0].size)) self.assertEqual(meta['content_path'], content.content_name) self.assertEqual(meta['content_cid'], content.container_id) self.assertEqual(meta['content_id'], content.content_id) self.assertEqual(meta['chunk_id'], new_chunk_id) self.assertEqual(meta['chunk_pos'], content.chunks[0].pos) self.assertEqual(meta['content_version'], content.version) self.assertEqual(meta['chunk_hash'], content.chunks[0].hash) self.assertEqual(stream.next(), content.chunks[0].data) # check rtime flag in rdir rdir_client = RdirClient(self.gridconf) res = rdir_client.chunk_fetch(self.conf['rawx'][0]['addr']) key = (content.container_id, content.content_id, content.chunks[0].id) for i_container, i_content, i_chunk, i_value in res: if (i_container, i_content, i_chunk) == key: check_value = i_value self.assertIsNotNone(check_value.get('rtime')) @unittest.skipIf(len(get_config()['rawx']) != 3, "The number of rawx must be 3") def test_rebuild_no_spare(self): # push a new content content = TestContent(self.conf, self.account, self.container_name, "mycontent", "THREECOPIES") data = "azerty" content.add_chunk(data, pos='0', rawx=0) content.add_chunk(data, pos='0', rawx=1) content.add_chunk(data, pos='0', rawx=2) self._push_content(content) # rebuild the first rawx rebuilder = BlobRebuilderWorker(self.gridconf, None, self.conf['rawx'][0]['addr']) self.assertRaises(SpareChunkException, rebuilder.chunk_rebuild, content.container_id, content.content_id, content.chunks[0].id) def test_rebuild_upload_failed(self): # push a new content content = TestContent(self.conf, self.account, self.container_name, "mycontent", "TWOCOPIES") data = "azerty" content.add_chunk(data, pos='0', rawx=0) content.add_chunk(data, pos='0', rawx=1) self._push_content(content) # rebuild the first rawx rebuilder = BlobRebuilderWorker(self.gridconf, None, self.conf['rawx'][0]['addr']) # Force upload to raise an exception with patch('oio.content.content.BlobClient') as MockClass: instance = MockClass.return_value instance.chunk_copy.side_effect = Exception("xx") self.assertRaises(UnrecoverableContent, rebuilder.chunk_rebuild, content.container_id, content.content_id, content.chunks[0].id) def test_rebuild_nonexistent_chunk(self): rebuilder = BlobRebuilderWorker(self.gridconf, None, self.conf['rawx'][0]['addr']) # try to rebuild an nonexistant chunk self.assertRaises(OrphanChunk, rebuilder.chunk_rebuild, 64 * '0', 32 * '0', 64 * '0') def test_rebuild_orphan_chunk(self): # push a new content content = TestContent(self.conf, self.account, self.container_name, "mycontent", "TWOCOPIES") data = "azerty" content.add_chunk(data, pos='0', rawx=0) content.add_chunk(data, pos='0', rawx=1) self._push_content(content) # rebuild the first rawx rebuilder = BlobRebuilderWorker(self.gridconf, None, self.conf['rawx'][0]['addr']) # try to rebuild an nonexistant chunk self.assertRaises(OrphanChunk, rebuilder.chunk_rebuild, content.container_id, content.content_id, 64 * '0') def test_rebuild_with_no_copy(self): # push a new content content = TestContent(self.conf, self.account, self.container_name, "mycontent", "SINGLE") data = "azerty" content.add_chunk(data, pos='0', rawx=0) self._push_content(content) # rebuild the first rawx rebuilder = BlobRebuilderWorker(self.gridconf, None, self.conf['rawx'][0]['addr']) # try to rebuild chunk without copy self.assertRaises(UnrecoverableContent, rebuilder.chunk_rebuild, content.container_id, content.content_id, content.chunks[0].id)
class TestBlobIndexer(BaseTestCase): def setUp(self): super(TestBlobIndexer, self).setUp() self.rdir_client = RdirClient(self.conf) self.blob_client = BlobClient(self.conf) _, self.rawx_path, rawx_addr, _ = \ self.get_service_url('rawx') services = self.conscience.all_services('rawx') self.rawx_id = None for rawx in services: if rawx_addr == rawx['addr']: self.rawx_id = rawx['tags'].get('tag.service_id', None) if self.rawx_id is None: self.rawx_id = rawx_addr conf = self.conf.copy() conf['volume'] = self.rawx_path self.blob_indexer = BlobIndexer(conf) # clear rawx/rdir chunk_files = paths_gen(self.rawx_path) for chunk_file in chunk_files: os.remove(chunk_file) self.rdir_client.admin_clear(self.rawx_id, clear_all=True) def _put_chunk(self): account = random_str(16) container = random_str(16) cid = cid_from_name(account, container) content_path = random_str(16) content_version = 1234567890 content_id = random_id(32) fullpath = encode_fullpath(account, container, content_path, content_version, content_id) chunk_id = random_chunk_id() data = random_buffer(string.printable, 100) meta = { 'full_path': fullpath, 'container_id': cid, 'content_path': content_path, 'version': content_version, 'id': content_id, 'chunk_method': 'ec/algo=liberasurecode_rs_vand,k=6,m=3', 'policy': 'TESTPOLICY', 'chunk_hash': md5(data).hexdigest().upper(), 'oio_version': OIO_VERSION, 'chunk_pos': 0, 'metachunk_hash': md5().hexdigest(), 'metachunk_size': 1024 } self.blob_client.chunk_put('http://' + self.rawx_id + '/' + chunk_id, meta, data) sleep(1) # ensure chunk event have been processed return account, container, cid, content_path, content_version, \ content_id, chunk_id def _delete_chunk(self, chunk_id): self.blob_client.chunk_delete('http://' + self.rawx_id + '/' + chunk_id) sleep(1) # ensure chunk event have been processed def _link_chunk(self, target_chunk_id): account = random_str(16) container = random_str(16) cid = cid_from_name(account, container) content_path = random_str(16) content_version = 1234567890 content_id = random_id(32) fullpath = encode_fullpath(account, container, content_path, content_version, content_id) _, link = self.blob_client.chunk_link( 'http://' + self.rawx_id + '/' + target_chunk_id, None, fullpath) chunk_id = link.split('/')[-1] sleep(1) # ensure chunk event have been processed return account, container, cid, content_path, content_version, \ content_id, chunk_id def _chunk_path(self, chunk_id): return self.rawx_path + '/' + chunk_id[:3] + '/' + chunk_id def test_blob_indexer(self): _, _, expected_cid, _, _, expected_content_id, expected_chunk_id = \ self._put_chunk() chunks = list(self.rdir_client.chunk_fetch(self.rawx_id)) self.assertEqual(1, len(chunks)) cid, content_id, chunk_id, _ = chunks[0] self.assertEqual(expected_cid, cid) self.assertEqual(expected_content_id, content_id) self.assertEqual(expected_chunk_id, chunk_id) self.rdir_client.admin_clear(self.rawx_id, clear_all=True) self.blob_indexer.index_pass() self.assertEqual(1, self.blob_indexer.successes) self.assertEqual(0, self.blob_indexer.errors) chunks = self.rdir_client.chunk_fetch(self.rawx_id) chunks = list(chunks) self.assertEqual(1, len(chunks)) cid, content_id, chunk_id, _ = chunks[0] self.assertEqual(expected_cid, cid) self.assertEqual(expected_content_id, content_id) self.assertEqual(expected_chunk_id, chunk_id) self._delete_chunk(expected_chunk_id) chunks = self.rdir_client.chunk_fetch(self.rawx_id) chunks = list(chunks) self.assertEqual(0, len(chunks)) def test_blob_indexer_with_old_chunk(self): expected_account, expected_container, expected_cid, \ expected_content_path, expected_content_version, \ expected_content_id, expected_chunk_id = self._put_chunk() chunks = list(self.rdir_client.chunk_fetch(self.rawx_id)) self.assertEqual(1, len(chunks)) cid, content_id, chunk_id, _ = chunks[0] self.assertEqual(expected_cid, cid) self.assertEqual(expected_content_id, content_id) self.assertEqual(expected_chunk_id, chunk_id) convert_to_old_chunk(self._chunk_path(chunk_id), expected_account, expected_container, expected_content_path, expected_content_version, expected_content_id) self.rdir_client.admin_clear(self.rawx_id, clear_all=True) self.blob_indexer.index_pass() self.assertEqual(1, self.blob_indexer.successes) self.assertEqual(0, self.blob_indexer.errors) chunks = self.rdir_client.chunk_fetch(self.rawx_id) chunks = list(chunks) self.assertEqual(1, len(chunks)) cid, content_id, chunk_id, _ = chunks[0] self.assertEqual(expected_cid, cid) self.assertEqual(expected_content_id, content_id) self.assertEqual(expected_chunk_id, chunk_id) self._delete_chunk(expected_chunk_id) chunks = self.rdir_client.chunk_fetch(self.rawx_id) chunks = list(chunks) self.assertEqual(0, len(chunks)) def test_blob_indexer_with_linked_chunk(self): _, _, expected_cid, _, _, expected_content_id, expected_chunk_id = \ self._put_chunk() chunks = self.rdir_client.chunk_fetch(self.rawx_id) chunks = list(chunks) self.assertEqual(1, len(chunks)) cid, content_id, chunk_id, _ = chunks[0] self.assertEqual(expected_cid, cid) self.assertEqual(expected_content_id, content_id) self.assertEqual(expected_chunk_id, chunk_id) self.rdir_client.admin_clear(self.rawx_id, clear_all=True) self.blob_indexer.index_pass() self.assertEqual(1, self.blob_indexer.successes) self.assertEqual(0, self.blob_indexer.errors) chunks = self.rdir_client.chunk_fetch(self.rawx_id) chunks = list(chunks) self.assertEqual(1, len(chunks)) cid, content_id, chunk_id, _ = chunks[0] self.assertEqual(expected_cid, cid) self.assertEqual(expected_content_id, content_id) self.assertEqual(expected_chunk_id, chunk_id) _, _, linked_cid, _, _, linked_content_id, linked_chunk_id = \ self._link_chunk(expected_chunk_id) self.rdir_client.admin_clear(self.rawx_id, clear_all=True) self.blob_indexer.index_pass() self.assertEqual(2, self.blob_indexer.successes) self.assertEqual(0, self.blob_indexer.errors) chunks = self.rdir_client.chunk_fetch(self.rawx_id) chunks = list(chunks) self.assertEqual(2, len(chunks)) self.assertNotEqual(chunks[0][2], chunks[1][2]) for chunk in chunks: cid, content_id, chunk_id, _ = chunk if chunk_id == expected_chunk_id: self.assertEqual(expected_cid, cid) self.assertEqual(expected_content_id, content_id) else: self.assertEqual(linked_cid, cid) self.assertEqual(linked_content_id, content_id) self.assertEqual(linked_chunk_id, chunk_id) self._delete_chunk(expected_chunk_id) chunks = self.rdir_client.chunk_fetch(self.rawx_id) chunks = list(chunks) self.assertEqual(1, len(chunks)) cid, content_id, chunk_id, _ = chunks[0] self.assertEqual(linked_cid, cid) self.assertEqual(linked_content_id, content_id) self.assertEqual(linked_chunk_id, chunk_id) self._delete_chunk(linked_chunk_id) chunks = self.rdir_client.chunk_fetch(self.rawx_id) chunks = list(chunks) self.assertEqual(0, len(chunks))
class Content(object): def __init__(self, conf, container_id, metadata, chunks, stgpol_args): self.conf = conf self.container_id = container_id self.metadata = metadata self.chunks = ChunksHelper(chunks) self.stgpol_args = stgpol_args self.logger = get_logger(self.conf) self.cs_client = ConscienceClient(conf) self.container_client = ContainerClient(self.conf) self.blob_client = BlobClient() self.session = requests.Session() self.content_id = self.metadata["id"] self.stgpol_name = self.metadata["policy"] self.path = self.metadata["name"] self.length = int(self.metadata["length"]) self.version = self.metadata["version"] self.hash = self.metadata["hash"] self.mime_type = self.metadata["mime-type"] self.chunk_method = self.metadata["chunk-method"] def _meta2_get_spare_chunk(self, chunks_notin, chunks_broken): spare_data = { "notin": ChunksHelper(chunks_notin, False).raw(), "broken": ChunksHelper(chunks_broken, False).raw() } try: spare_resp = self.container_client.content_spare( cid=self.container_id, content=self.content_id, data=spare_data, stgpol=self.stgpol_name) except ClientException as e: raise exc.SpareChunkException("No spare chunk (%s)" % e.message) url_list = [] for c in spare_resp["chunks"]: url_list.append(c["id"]) return url_list def _meta2_update_spare_chunk(self, current_chunk, new_url): old = [{'type': 'chunk', 'id': current_chunk.url, 'hash': current_chunk.hash, 'size': current_chunk.size, 'pos': current_chunk.pos, 'content': self.content_id}] new = [{'type': 'chunk', 'id': new_url, 'hash': current_chunk.hash, 'size': current_chunk.size, 'pos': current_chunk.pos, 'content': self.content_id}] update_data = {'old': old, 'new': new} self.container_client.container_raw_update( cid=self.container_id, data=update_data) def _meta2_create_object(self): self.container_client.content_create(cid=self.container_id, path=self.path, content_id=self.content_id, stgpol=self.stgpol_name, size=self.length, checksum=self.hash, version=self.version, chunk_method=self.chunk_method, mime_type=self.mime_type, data=self.chunks.raw()) def rebuild_chunk(self, chunk_id): raise NotImplementedError() def upload(self, stream): try: self._upload(stream) except Exception as e: for chunk in self.chunks: try: self.blob_client.chunk_delete(chunk.url) except: pass raise e def _upload(self, stream): raise NotImplementedError() def download(self): raise NotImplementedError()
class TestPlainContent(BaseTestCase): def setUp(self): super(TestPlainContent, self).setUp() if len(self.conf['services']['rawx']) < 4: self.skipTest( "Plain tests needs more than 3 rawx to run") self.namespace = self.conf['namespace'] self.account = self.conf['account'] self.chunk_size = self.conf['chunk_size'] self.gridconf = {"namespace": self.namespace} self.content_factory = ContentFactory(self.gridconf) self.container_client = ContainerClient(self.gridconf) self.blob_client = BlobClient(self.conf) self.container_name = "TestPlainContent-%f" % time.time() self.container_client.container_create(account=self.account, reference=self.container_name) self.container_id = cid_from_name(self.account, self.container_name).upper() self.content = random_str(64) self.stgpol = "SINGLE" self.stgpol_twocopies = "TWOCOPIES" self.stgpol_threecopies = "THREECOPIES" def _test_create(self, stgpol, data_size): data = random_data(data_size) content = self.content_factory.new(self.container_id, self.content, len(data), stgpol) content.create(BytesIO(data)) meta, chunks = self.container_client.content_locate( cid=self.container_id, content=content.content_id) self.assertEqual(meta['hash'], md5_data(data)) self.assertEqual(meta['length'], str(len(data))) self.assertEqual(meta['policy'], stgpol) self.assertEqual(meta['name'], self.content) metachunk_nb = int(math.ceil(float(len(data)) / self.chunk_size)) if metachunk_nb == 0: metachunk_nb = 1 # special case for empty content chunks = ChunksHelper(chunks) # TODO NO NO NO if stgpol == self.stgpol_threecopies: nb_copy = 3 elif stgpol == self.stgpol_twocopies: nb_copy = 2 elif stgpol == self.stgpol: nb_copy = 1 self.assertEqual(len(chunks), metachunk_nb * nb_copy) for pos in range(metachunk_nb): chunks_at_pos = chunks.filter(pos=pos) self.assertEqual(len(chunks_at_pos), nb_copy) data_begin = pos * self.chunk_size data_end = pos * self.chunk_size + self.chunk_size chunk_hash = md5_data(data[data_begin:data_end]) for chunk in chunks_at_pos: meta, stream = self.blob_client.chunk_get(chunk.url) self.assertEqual(md5_stream(stream), chunk_hash) self.assertEqual(meta['content_path'], self.content) self.assertEqual(meta['container_id'], self.container_id) self.assertEqual(meta['content_id'], meta['content_id']) self.assertEqual(meta['chunk_id'], chunk.id) self.assertEqual(meta['chunk_pos'], str(pos)) # Check that chunk data matches chunk hash from xattr self.assertEqual(meta['chunk_hash'], chunk_hash) # Check that chunk data matches chunk hash from database self.assertEqual(chunk.checksum, chunk_hash) full_path = encode_fullpath( self.account, self.container_name, self.content, meta['content_version'], meta['content_id']) self.assertEqual(meta['full_path'], full_path) self.assertEqual(meta['oio_version'], '4.2') def test_twocopies_create_0_byte(self): self._test_create(self.stgpol_twocopies, 0) def test_twocopies_create_1_byte(self): self._test_create(self.stgpol_twocopies, 1) def test_twocopies_create_chunksize_bytes(self): self._test_create(self.stgpol_twocopies, self.chunk_size) def test_twocopies_create_chunksize_plus_1_bytes(self): self._test_create(self.stgpol_twocopies, self.chunk_size + 1) def test_twocopies_create_6294503_bytes(self): self._test_create(self.stgpol_twocopies, 6294503) def test_single_create_0_byte(self): self._test_create(self.stgpol, 0) def test_single_create_chunksize_plus_1_bytes(self): self._test_create(self.stgpol, self.chunk_size + 1) def _new_content(self, stgpol, data, broken_pos_list=[]): old_content = self.content_factory.new( self.container_id, self.content, len(data), stgpol) old_content.create(BytesIO(data)) broken_chunks_info = {} for pos, idx in broken_pos_list: c = old_content.chunks.filter(pos=pos)[idx] meta, stream = self.blob_client.chunk_get(c.url) if pos not in broken_chunks_info: broken_chunks_info[pos] = {} broken_chunks_info[pos][idx] = { "url": c.url, "id": c.id, "hash": c.checksum, "dl_meta": meta, "dl_hash": md5_stream(stream) } self.blob_client.chunk_delete(c.url) # get the new structure of the uploaded content return (self.content_factory.get( self.container_id, old_content.content_id), broken_chunks_info) def _rebuild_and_check(self, content, broken_chunks_info, full_rebuild_pos, allow_frozen_container=False): rebuild_pos, rebuild_idx = full_rebuild_pos rebuild_chunk_info = broken_chunks_info[rebuild_pos][rebuild_idx] content.rebuild_chunk(rebuild_chunk_info["id"], allow_frozen_container=allow_frozen_container) # get the new structure of the content rebuilt_content = self.content_factory.get(self.container_id, content.content_id) # find the rebuilt chunk for c in rebuilt_content.chunks.filter(pos=rebuild_pos): if len(content.chunks.filter(id=c.id)) > 0: # not the rebuilt chunk # if this chunk is broken, it must not have been rebuilt for b_c_i in broken_chunks_info[rebuild_pos].values(): if c.id == b_c_i["id"]: with ExpectedException(NotFound): _, _ = self.blob_client.chunk_get(c.url) continue meta, stream = self.blob_client.chunk_get(c.url) self.assertEqual(meta["chunk_id"], c.id) self.assertEqual(md5_stream(stream), rebuild_chunk_info["dl_hash"]) self.assertEqual(c.checksum, rebuild_chunk_info["hash"]) self.assertThat(c.url, NotEquals(rebuild_chunk_info["url"])) del meta["chunk_id"] del rebuild_chunk_info["dl_meta"]["chunk_id"] self.assertEqual(meta, rebuild_chunk_info["dl_meta"]) def _test_rebuild(self, stgpol, data_size, broken_pos_list, full_rebuild_pos): data = random_data(data_size) content, broken_chunks_info = self._new_content( stgpol, data, broken_pos_list) self._rebuild_and_check(content, broken_chunks_info, full_rebuild_pos) def test_2copies_content_0_byte_1broken_rebuild_pos_0_idx_0(self): self._test_rebuild(self.stgpol_twocopies, 0, [(0, 0)], (0, 0)) def test_2copies_content_1_byte_1broken_rebuild_pos_0_idx_1(self): self._test_rebuild(self.stgpol_twocopies, 1, [(0, 1)], (0, 1)) def test_3copies_content_chunksize_bytes_2broken_rebuild_pos_0_idx_1(self): if len(self.conf['services']['rawx']) <= 3: self.skipTest("Need more than 3 rawx") self._test_rebuild(self.stgpol_threecopies, self.chunk_size, [(0, 0), (0, 1)], (0, 1)) def test_3copies_content_2xchksize_bytes_2broken_rebuild_pos_1_idx_2(self): self._test_rebuild(self.stgpol_threecopies, 2 * self.chunk_size, [(1, 0), (1, 2)], (1, 2)) def test_2copies_content_0_byte_2broken_rebuild_pos_0_idx_0(self): with ExpectedException(UnrecoverableContent): self._test_rebuild( self.stgpol_twocopies, 0, [(0, 0), (0, 1)], (0, 0)) def test_rebuild_chunk_in_frozen_container(self): data = random_data(self.chunk_size) content, broken_chunks_info = self._new_content( self.stgpol_twocopies, data, [(0, 0)]) system = dict() system['sys.status'] = str(OIO_DB_FROZEN) self.container_client.container_set_properties( self.account, self.container_name, None, system=system) try: full_rebuild_pos = (0, 0) rebuild_pos, rebuild_idx = full_rebuild_pos rebuild_chunk_info = broken_chunks_info[rebuild_pos][rebuild_idx] self.assertRaises(ServiceBusy, content.rebuild_chunk, rebuild_chunk_info["id"]) finally: system['sys.status'] = str(OIO_DB_ENABLED) self.container_client.container_set_properties( self.account, self.container_name, None, system=system) self._rebuild_and_check(content, broken_chunks_info, full_rebuild_pos, allow_frozen_container=True) def _test_fetch(self, stgpol, data_size, broken_pos_list): data = random_data(data_size) content, _ = self._new_content(stgpol, data, broken_pos_list) fetched_data = "".join(content.fetch()) self.assertEqual(fetched_data, data) for pos, idx in broken_pos_list: # check nothing has been rebuilt c = content.chunks.filter(pos=pos)[0] self.assertRaises(NotFound, self.blob_client.chunk_delete, c.url) def test_twocopies_fetch_content_0_byte_without_broken_chunks(self): self._test_fetch(self.stgpol_twocopies, 0, []) def test_twocopies_fetch_content_0_byte_with_broken_0_0(self): self._test_fetch(self.stgpol_twocopies, 0, [(0, 0)]) def test_twocopies_fetch_content_1_byte_without_broken_chunks(self): self._test_fetch(self.stgpol_twocopies, 1, []) def test_twocopies_fetch_content_1_byte_with_broken_0_0(self): self._test_fetch(self.stgpol_twocopies, 1, [(0, 0)]) def test_twocopies_fetch_chunksize_bytes_without_broken_chunks(self): self._test_fetch(self.stgpol_twocopies, self.chunk_size, []) def test_twocopies_fetch_2xchuksize_bytes_with_broken_0_0_and_1_0(self): self._test_fetch( self.stgpol_twocopies, self.chunk_size * 2, [(0, 0), (1, 0)]) def test_twocopies_fetch_content_chunksize_bytes_2_broken_chunks(self): data = random_data(self.chunk_size) content, _ = self._new_content( self.stgpol_twocopies, data, [(0, 0), (0, 1)]) gen = content.fetch() self.assertRaises(UnrecoverableContent, gen.next) def test_single_fetch_content_1_byte_without_broken_chunks(self): self._test_fetch(self.stgpol, 1, []) def test_single_fetch_chunksize_bytes_plus_1_without_broken_chunk(self): self._test_fetch(self.stgpol, self.chunk_size * 2, [])
class TestBlobAuditorFunctional(BaseTestCase): def setUp(self): super(TestBlobAuditorFunctional, self).setUp() self.namespace = self.conf["namespace"] self.account = self.conf["account"] self.test_dir = self.conf["sds_path"] rawx_num, rawx_path, rawx_addr = self.get_service_url("rawx") self.rawx = "http://" + rawx_addr self.h = hashlib.new("md5") conf = {"namespace": self.namespace} self.auditor = BlobAuditorWorker(conf, get_logger(None), None) self.container_c = ContainerClient(conf) self.blob_c = BlobClient() self.ref = random_str(8) self.container_c.container_create(self.account, self.ref) self.url_rand = random_id(64) self.data = random_str(1280) self.h.update(self.data) self.hash_rand = self.h.hexdigest().lower() self.content = TestContent(random_str(6), len(self.data), self.url_rand, 1) self.content.id_container = cid_from_name(self.account, self.ref).upper() self.chunk = TestChunk(self.content.size, self.url_rand, 0, self.hash_rand) self.chunk_url = "%s/%s" % (self.rawx, self.chunk.id_chunk) self.chunk_proxy = {"hash": self.chunk.md5, "pos": "0", "size": self.chunk.size, "url": self.chunk_url} chunk_meta = { "content_path": self.content.path, "container_id": self.content.id_container, "chunk_method": "plain/nb_copy=3", "policy": "TESTPOLICY", "id": "0000", "version": 1, "chunk_id": self.chunk.id_chunk, "chunk_pos": self.chunk.pos, "chunk_hash": self.chunk.md5, } self.blob_c.chunk_put(self.chunk_url, chunk_meta, self.data) self.chunk_path = ( self.test_dir + "/data/" + self.namespace + "-rawx-1/" + self.chunk.id_chunk[0:3] + "/" + self.chunk.id_chunk ) self.bad_container_id = "0" * 64 def tearDown(self): super(TestBlobAuditorFunctional, self).tearDown() try: self.container_c.content_delete(self.account, self.ref, self.content.path) except Exception: pass try: self.container_c.container_destroy(self.account, self.ref) except Exception: pass try: os.remove(self.chunk_path) except Exception: pass def init_content(self): self.container_c.content_create( self.account, self.ref, self.content.path, self.chunk.size, self.hash_rand, data=[self.chunk_proxy] ) def test_chunk_audit(self): self.init_content() self.auditor.chunk_audit(self.chunk_path) def test_content_deleted(self): self.assertRaises(exc.OrphanChunk, self.auditor.chunk_audit, self.chunk_path) def test_container_deleted(self): self.container_c.container_destroy(self.account, self.ref) self.assertRaises(exc.OrphanChunk, self.auditor.chunk_audit, self.chunk_path) def test_chunk_corrupted(self): self.init_content() with open(self.chunk_path, "w") as f: f.write(random_str(1280)) self.assertRaises(exc.CorruptedChunk, self.auditor.chunk_audit, self.chunk_path) def test_chunk_bad_size(self): self.init_content() with open(self.chunk_path, "w") as f: f.write(random_str(320)) self.assertRaises(exc.FaultyChunk, self.auditor.chunk_audit, self.chunk_path) def test_xattr_bad_chunk_size(self): self.init_content() xattr.setxattr(self.chunk_path, "user." + chunk_xattr_keys["chunk_size"], "-1") self.assertRaises(exc.FaultyChunk, self.auditor.chunk_audit, self.chunk_path) def test_xattr_bad_chunk_hash(self): self.init_content() xattr.setxattr(self.chunk_path, "user." + chunk_xattr_keys["chunk_hash"], "WRONG_HASH") self.assertRaises(exc.CorruptedChunk, self.auditor.chunk_audit, self.chunk_path) def test_xattr_bad_content_path(self): self.init_content() xattr.setxattr(self.chunk_path, "user." + chunk_xattr_keys["content_path"], "WRONG_PATH") self.assertRaises(exc.OrphanChunk, self.auditor.chunk_audit, self.chunk_path) def test_xattr_bad_chunk_id(self): self.init_content() xattr.setxattr(self.chunk_path, "user." + chunk_xattr_keys["chunk_id"], "WRONG_ID") self.assertRaises(exc.OrphanChunk, self.auditor.chunk_audit, self.chunk_path) def test_xattr_bad_content_container(self): self.init_content() xattr.setxattr(self.chunk_path, "user." + chunk_xattr_keys["container_id"], self.bad_container_id) self.assertRaises(exc.OrphanChunk, self.auditor.chunk_audit, self.chunk_path) def test_xattr_bad_chunk_position(self): self.init_content() xattr.setxattr(self.chunk_path, "user.grid.chunk.position", "42") xattr.setxattr(self.chunk_path, "user." + chunk_xattr_keys["chunk_pos"], "42") self.assertRaises(exc.FaultyChunk, self.auditor.chunk_audit, self.chunk_path) def test_chunk_bad_hash(self): self.h.update(self.data) self.hash_rand = self.h.hexdigest().lower() self.chunk.md5 = self.hash_rand self.chunk_proxy["hash"] = self.chunk.md5 self.init_content() self.assertRaises(exc.FaultyChunk, self.auditor.chunk_audit, self.chunk_path) def test_chunk_bad_length(self): self.chunk.size = 320 self.chunk_proxy["size"] = self.chunk.size self.init_content() self.assertRaises(exc.FaultyChunk, self.auditor.chunk_audit, self.chunk_path) def test_chunk_bad_chunk_size(self): self.chunk.size = 320 self.chunk_proxy["size"] = self.chunk.size self.init_content() self.assertRaises(exc.FaultyChunk, self.auditor.chunk_audit, self.chunk_path) def test_chunk_bad_url(self): self.chunk_proxy["url"] = "%s/WRONG_ID" % self.rawx self.init_content() self.assertRaises(exc.OrphanChunk, self.auditor.chunk_audit, self.chunk_path)