def test_write_quorum_success(self): checksum = self.checksum() source = empty_stream() size = CHUNK_SIZE meta_chunk = self.meta_chunk() quorum_size = self.storage_method.quorum resps = [201] * quorum_size resps += [500] * (len(meta_chunk) - quorum_size) with set_http_connect(*resps): handler = ReplicatedChunkWriteHandler(self.sysmeta, meta_chunk, checksum, self.storage_method) bytes_transferred, checksum, chunks = handler.stream(source, size) self.assertEqual(len(chunks), len(meta_chunk) - 1) for i in range(quorum_size): self.assertEqual(chunks[i].get('error'), None) # # JFS: starting at branche 3.x, it has been preferred to save # # only the chunks that succeeded. # for i in xrange(quorum_size, len(meta_chunk)): # self.assertEqual(chunks[i].get('error'), 'HTTP 500') self.assertEqual(bytes_transferred, 0) self.assertEqual(checksum, EMPTY_CHECKSUM)
def test_write_transfer(self): checksum = self.checksum() test_data = ('1234' * 1024)[:-10] size = len(test_data) meta_chunk = self.meta_chunk() nb = len(meta_chunk) resps = [201] * nb source = StringIO(test_data) put_reqs = defaultdict(lambda: {'parts': []}) def cb_body(conn_id, part): put_reqs[conn_id]['parts'].append(part) with set_http_connect(*resps, cb_body=cb_body): handler = ReplicatedChunkWriteHandler(self.sysmeta, meta_chunk, checksum, self.storage_method) bytes_transferred, checksum, chunks = handler.stream(source, size) final_checksum = self.checksum(test_data).hexdigest() self.assertEqual(len(test_data), bytes_transferred) self.assertEqual(final_checksum, checksum) bodies = [] for conn_id, info in put_reqs.items(): body, trailers = decode_chunked_body(''.join(info['parts'])) # TODO check trailers? bodies.append(body) self.assertEqual(len(bodies), nb) for body in bodies: self.assertEqual(len(test_data), len(body)) self.assertEqual(self.checksum(body).hexdigest(), final_checksum)
def write_repli_meta_chunk(self, source, size, storage_method, sysmeta, meta_chunk): meta_checksum = md5() handler = ReplicatedChunkWriteHandler(sysmeta, meta_chunk, meta_checksum) bytes_transferred, checksum, chunks = handler.stream(source, size) return Response("OK")
def test_write_transfer(self): checksum = self.checksum() test_data = ('1234' * 1024)[:-10] size = len(test_data) meta_chunk = self.meta_chunk() nb = len(meta_chunk) resps = [201] * nb source = StringIO(test_data) put_reqs = defaultdict(lambda: {'parts': []}) def cb_body(conn_id, part): put_reqs[conn_id]['parts'].append(part) with set_http_connect(*resps, cb_body=cb_body): handler = ReplicatedChunkWriteHandler( self.sysmeta, meta_chunk, checksum, self.storage_method) bytes_transferred, checksum, chunks = handler.stream(source, size) final_checksum = self.checksum(test_data).hexdigest() self.assertEqual(len(test_data), bytes_transferred) self.assertEqual(final_checksum, checksum) bodies = [] for conn_id, info in put_reqs.items(): body, trailers = decode_chunked_body(''.join(info['parts'])) # TODO check trailers? bodies.append(body) self.assertEqual(len(bodies), nb) for body in bodies: self.assertEqual(len(test_data), len(body)) self.assertEqual(self.checksum(body).hexdigest(), final_checksum)
def test_write_timeout(self): checksum = self.checksum() source = empty_stream() size = CHUNK_SIZE meta_chunk = self.meta_chunk() resps = [201] * (len(meta_chunk) - 1) resps.append(Timeout(1.0)) with set_http_connect(*resps): handler = ReplicatedChunkWriteHandler(self.sysmeta, meta_chunk, checksum, self.storage_method) bytes_transferred, checksum, chunks = handler.stream(source, size) self.assertEqual(len(chunks), len(meta_chunk) - 1) for i in range(len(meta_chunk) - 1): self.assertEqual(chunks[i].get('error'), None) # # JFS: starting at branche 3.x, it has been preferred to save only # # the chunks that succeeded. # self.assertEqual( # chunks[len(meta_chunk) - 1].get('error'), '1.0 second') self.assertEqual(bytes_transferred, 0) self.assertEqual(checksum, EMPTY_CHECKSUM)
def test_write_simple(self): checksum = self.checksum() source = empty_stream() meta_chunk = self.meta_chunk() size = CHUNK_SIZE resps = [201] * len(meta_chunk) with set_http_connect(*resps): handler = ReplicatedChunkWriteHandler(self.sysmeta, meta_chunk, checksum, self.storage_method) bytes_transferred, checksum, chunks = handler.stream(source, size) self.assertEqual(len(chunks), len(meta_chunk)) self.assertEqual(bytes_transferred, 0) self.assertEqual(checksum, EMPTY_CHECKSUM)
def chunk_put(self, url, meta, data, **kwargs): if not hasattr(data, 'read'): data = utils.GeneratorReader(data) chunk = {'url': url, 'pos': meta['chunk_pos']} # FIXME: ugly chunk_method = meta.get('chunk_method', meta.get('content_chunkmethod')) storage_method = STORAGE_METHODS.load(chunk_method) checksum = meta['metachunk_hash' if storage_method.ec else 'chunk_hash'] writer = ReplicatedChunkWriteHandler( meta, [chunk], FakeChecksum(checksum), storage_method, quorum=1) writer.stream(data, None)
def test_write_simple(self): checksum = self.checksum() source = empty_stream() meta_chunk = self.meta_chunk() size = CHUNK_SIZE resps = [201] * len(meta_chunk) with set_http_connect(*resps): handler = ReplicatedChunkWriteHandler( self.sysmeta, meta_chunk, checksum, self.storage_method) bytes_transferred, checksum, chunks = handler.stream( source, size) self.assertEqual(len(chunks), len(meta_chunk)) self.assertEqual(bytes_transferred, 0) self.assertEqual(checksum, EMPTY_CHECKSUM)
def chunk_put(self, url, meta, data, **kwargs): if not hasattr(data, 'read'): data = utils.GeneratorIO(data) chunk = {'url': url, 'pos': meta['chunk_pos']} # FIXME: ugly chunk_method = meta.get('chunk_method', meta.get('content_chunkmethod')) storage_method = STORAGE_METHODS.load(chunk_method) checksum = meta['metachunk_hash' if storage_method. ec else 'chunk_hash'] writer = ReplicatedChunkWriteHandler(meta, [chunk], FakeChecksum(checksum), storage_method, quorum=1) writer.stream(data, None)
def test_write_partial_exception(self): checksum = self.checksum() source = empty_stream() size = CHUNK_SIZE meta_chunk = self.meta_chunk() resps = [201] * (len(meta_chunk) - 1) resps.append(Exception("failure")) with set_http_connect(*resps): handler = ReplicatedChunkWriteHandler( self.sysmeta, meta_chunk, checksum, self.storage_method) bytes_transferred, checksum, chunks = handler.stream(source, size) self.assertEqual(len(chunks), len(meta_chunk)) for i in range(len(meta_chunk) - 1): self.assertEqual(chunks[i].get('error'), None) self.assertEqual(chunks[len(meta_chunk) - 1].get('error'), 'failure') self.assertEqual(bytes_transferred, 0) self.assertEqual(checksum, EMPTY_CHECKSUM)
def test_write_exception(self): checksum = self.checksum() source = empty_stream() meta_chunk = self.meta_chunk() size = CHUNK_SIZE resps = [500] * len(meta_chunk) with set_http_connect(*resps): handler = ReplicatedChunkWriteHandler(self.sysmeta, meta_chunk, checksum, self.storage_method) self.assertRaises(exc.OioException, handler.stream, source, size)
def test_write_partial_exception(self): checksum = self.checksum() source = empty_stream() size = CHUNK_SIZE meta_chunk = self.meta_chunk() resps = [201] * (len(meta_chunk) - 1) resps.append(Exception("failure")) with set_http_connect(*resps): handler = ReplicatedChunkWriteHandler(self.sysmeta, meta_chunk, checksum, self.storage_method) bytes_transferred, checksum, chunks = handler.stream(source, size) self.assertEqual(len(chunks), len(meta_chunk)) for i in range(len(meta_chunk) - 1): self.assertEqual(chunks[i].get('error'), None) self.assertEqual(chunks[len(meta_chunk) - 1].get('error'), 'failure') self.assertEqual(bytes_transferred, 0) self.assertEqual(checksum, EMPTY_CHECKSUM)
def test_write_quorum_error(self): checksum = self.checksum() source = empty_stream() size = CHUNK_SIZE meta_chunk = self.meta_chunk() quorum_size = self.storage_method.quorum resps = [500] * quorum_size resps += [201] * (len(meta_chunk) - quorum_size) with set_http_connect(*resps): handler = ReplicatedChunkWriteHandler(self.sysmeta, meta_chunk, checksum, self.storage_method) self.assertRaises(exc.OioException, handler.stream, source, size)
def test_write_quorum_success(self): checksum = self.checksum() source = empty_stream() size = CHUNK_SIZE meta_chunk = self.meta_chunk() quorum_size = self.storage_method.quorum resps = [201] * quorum_size resps += [500] * (len(meta_chunk) - quorum_size) with set_http_connect(*resps): handler = ReplicatedChunkWriteHandler( self.sysmeta, meta_chunk, checksum, self.storage_method) bytes_transferred, checksum, chunks = handler.stream(source, size) self.assertEqual(len(chunks), len(meta_chunk)) for i in range(quorum_size): self.assertEqual(chunks[i].get('error'), None) for i in xrange(quorum_size, len(meta_chunk)): self.assertEqual(chunks[i].get('error'), 'HTTP 500') self.assertEqual(bytes_transferred, 0) self.assertEqual(checksum, EMPTY_CHECKSUM)
def test_write_timeout_source(self): class TestReader(object): def read(self, size): raise Timeout(1.0) checksum = self.checksum() source = TestReader() size = CHUNK_SIZE meta_chunk = self.meta_chunk() nb = len(meta_chunk) resps = [201] * nb with set_http_connect(*resps): handler = ReplicatedChunkWriteHandler(self.sysmeta, meta_chunk, checksum, self.storage_method) self.assertRaises(Timeout, handler.stream, source, size)
def test_write_exception_source(self): class TestReader(object): def read(self, size): raise Exception('failure') checksum = self.checksum() source = TestReader() size = CHUNK_SIZE meta_chunk = self.meta_chunk() nb = len(meta_chunk) resps = [201] * nb with set_http_connect(*resps): handler = ReplicatedChunkWriteHandler(self.sysmeta, meta_chunk, checksum, self.storage_method) # TODO specialize exception self.assertRaises(Exception, handler.stream, source, size)