def test_duplicated_blob_error_on_put(self): self.manager._encrypt_and_upload = Mock(return_value=None) content, existing_id = "Blob content", uuid4().hex doc1 = BlobDoc(BytesIO(content), existing_id) yield self.manager.put(doc1, len(content)) doc2 = BlobDoc(BytesIO(content), existing_id) self.manager._encrypt_and_upload.reset_mock() with pytest.raises(BlobAlreadyExistsError): yield self.manager.put(doc2, len(content)) self.assertFalse(self.manager._encrypt_and_upload.called)
def test_local_list_blobs(self): self.manager._encrypt_and_upload = Mock(return_value=None) msg, blob_id1, blob_id2 = "1337", uuid4().hex, uuid4().hex doc = BlobDoc(BytesIO(msg), blob_id1) yield self.manager.put(doc, size=len(msg)) doc2 = BlobDoc(BytesIO(msg), blob_id2) yield self.manager.put(doc2, size=len(msg)) blobs_list = yield self.manager.local_list() self.assertEquals(set([blob_id1, blob_id2]), set(blobs_list))
def test_refresh_deletions_from_server(self): manager = BlobManager(self.tempdir, self.uri, self.secret, self.secret, uuid4().hex) self.addCleanup(manager.close) blob_id, content = 'delete_me', 'content' blob_id2 = 'dont_delete_me' doc1 = BlobDoc(BytesIO(content), blob_id) doc2 = BlobDoc(BytesIO(content), blob_id2) yield manager.put(doc1, len(content)) yield manager.put(doc2, len(content)) yield manager._delete_from_remote(blob_id) # remote only deletion self.assertTrue((yield manager.local.exists(blob_id))) yield manager.sync() self.assertFalse((yield manager.local.exists(blob_id))) self.assertTrue((yield manager.local.exists(blob_id2)))
def test_send_missing_sends_with_priority(self): # pretend we have some pending uploads _send = self.manager._send self.manager._send = Mock(return_value=None) content = "vegan cake" length = len(content) priorities = [ ('low', Priority.LOW), ('high', Priority.HIGH), ('medium', Priority.MEDIUM), ('urgent', Priority.URGENT), ] deferreds = [] for blob_id, priority in priorities: doc = BlobDoc(BytesIO(content), blob_id) d = self.manager.put(doc, length, priority=priority) deferreds.append(d) yield defer.gatherResults(deferreds) # make sure upload "succeeds" so sending works self.manager._send = _send self.manager._encrypt_and_upload = Mock(return_value=None) # this is the operation we are interested in self.manager.concurrent_transfers_limit = 1 yield self.manager.send_missing() # retrieve the order in which blob transfers were made calls = self.manager._encrypt_and_upload.mock_calls order = map(lambda c: c[1][0], calls) self.assertEqual(['urgent', 'high', 'medium', 'low'], order)
def test_put_stores_on_local_db(self): self.manager._encrypt_and_upload = Mock(return_value=None) msg, blob_id = "Hey Joe", uuid4().hex doc = BlobDoc(BytesIO(msg), blob_id=blob_id) yield self.manager.put(doc, size=len(msg)) result = yield self.manager.local.get(blob_id) self.assertEquals(result.getvalue(), msg) self.assertTrue(self.manager._encrypt_and_upload.called)
def deliver_using_blobs(client, fd): # put blob_id = uuid.uuid4().hex doc = BlobDoc(fd, blob_id=blob_id) size = sys.getsizeof(fd) yield client.blobmanager.put(doc, size, namespace='MX') # and flag flags = [Flags.PENDING] yield client.blobmanager.set_flags(blob_id, flags, namespace='MX')
def _put(blob_id, payload): logger.info(":: Starting full put: %s" % blob_id) manager = _manager() size = os.path.getsize(payload) with open(payload) as fd: doc = BlobDoc(fd, blob_id) result = yield manager.put(doc, size=size) logger.info(":: Result of put: %s" % str(result)) logger.info(":: Finished full put: %s" % blob_id)
def upload_blobs(client, amount, data): deferreds = [] for i in xrange(amount): fd = BytesIO(data) doc = BlobDoc(fd, blob_id=uuid.uuid4().hex) size = sys.getsizeof(fd) d = client.blobmanager.put(doc, size, namespace='payload') deferreds.append(d) yield gatherResults(deferreds)
def test_put_sets_priority(self): upload_failure = defer.fail(Exception()) self.manager._encrypt_and_upload = Mock(return_value=upload_failure) content, blob_id = "Blob content", uuid4().hex doc1 = BlobDoc(BytesIO(content), blob_id) with pytest.raises(Exception): yield self.manager.put(doc1, len(content), priority='urgent') priority = yield self.manager._get_priority(blob_id) self.assertEqual(Priority.URGENT, priority)
def test_put_local_only_doesnt_send_to_server(self): self.manager._encrypt_and_upload = Mock(return_value=None) msg, blob_id = "Hey Joe", uuid4().hex doc = BlobDoc(BytesIO(msg), blob_id=blob_id) yield self.manager.put(doc, size=len(msg), local_only=True) result = yield self.manager.local.get(blob_id) status, _ = yield self.manager.local.get_sync_status(blob_id) self.assertEquals(result.getvalue(), msg) self.assertEquals(status, SyncStatus.LOCAL_ONLY) self.assertFalse(self.manager._encrypt_and_upload.called)
def test_online_delete_marks_as_synced(self): self.manager._encrypt_and_upload = Mock(return_value=None) self.manager._delete_from_remote = Mock(return_value=None) content, blob_id = "Blob content", uuid4().hex doc1 = BlobDoc(BytesIO(content), blob_id) yield self.manager.put(doc1, len(content)) yield self.manager.delete(blob_id) sync_progress = yield self.manager.sync_progress expected = {'SYNCED': 1} self.assertEquals(expected, sync_progress)
def test_local_sync_status_pending_upload(self): upload_failure = defer.fail(Exception()) self.manager._encrypt_and_upload = Mock(return_value=upload_failure) content, blob_id = "Blob content", uuid4().hex doc1 = BlobDoc(BytesIO(content), blob_id) with pytest.raises(Exception): yield self.manager.put(doc1, len(content)) pending_upload = SyncStatus.PENDING_UPLOAD local_list = yield self.manager.local_list_status(pending_upload) self.assertIn(blob_id, local_list)
def test_delete_from_local_and_remote(self): self.manager._encrypt_and_upload = Mock(return_value=None) self.manager._delete_from_remote = Mock(return_value=None) content, blob_id = "Blob content", uuid4().hex doc1 = BlobDoc(BytesIO(content), blob_id) yield self.manager.put(doc1, len(content)) yield self.manager.delete(blob_id) local_list = yield self.manager.local_list() self.assertEquals(0, len(local_list)) params = {'namespace': ''} self.manager._delete_from_remote.assert_called_with(blob_id, **params)
def test_offline_delete_marks_as_pending_delete(self): deletion_failure = defer.fail(Exception()) self.manager._encrypt_and_upload = Mock(return_value=None) self.manager._delete_from_remote = Mock(return_value=deletion_failure) content, blob_id = "Blob content", uuid4().hex doc1 = BlobDoc(BytesIO(content), blob_id) yield self.manager.put(doc1, len(content)) with pytest.raises(Exception): yield self.manager.delete(blob_id) sync_progress = yield self.manager.sync_progress expected = {'PENDING_DELETE': 1} self.assertEquals(expected, sync_progress)
def test_send_missing(self): fd, missing_id = BytesIO('test'), uuid4().hex self.manager._encrypt_and_upload = Mock(return_value=None) self.manager.remote_list = Mock(return_value=[]) doc1 = BlobDoc(fd, missing_id) yield self.manager.put(doc1, 4) yield self.manager.send_missing() call_list = self.manager._encrypt_and_upload.call_args_list self.assertEquals(1, len(call_list)) call_blob_id, call_fd = call_list[0][0] self.assertEquals(missing_id, call_blob_id) self.assertEquals('test', call_fd.getvalue())
def test_put_then_get_using_real_file_descriptor(self): self.manager._encrypt_and_upload = Mock(return_value=None) self.manager._download_and_decrypt = Mock(return_value=None) msg, blob_id = "Fuuuuull cycleee! \o/", uuid4().hex tmpfile = os.tmpfile() tmpfile.write(msg) tmpfile.seek(0) doc = BlobDoc(tmpfile, blob_id) yield self.manager.put(doc, size=len(msg)) result = yield self.manager.get(doc.blob_id) self.assertEquals(result.getvalue(), msg) self.assertTrue(self.manager._encrypt_and_upload.called) self.assertFalse(self.manager._download_and_decrypt.called)
def test_get_range(self): user_id = uuid4().hex manager = BlobManager(self.tempdir, self.uri, self.secret, self.secret, user_id) self.addCleanup(manager.close) blob_id, content = 'blob_id', '0123456789' doc = BlobDoc(BytesIO(content), blob_id) yield manager.put(doc, len(content)) uri = urljoin(self.uri, '%s/%s' % (user_id, blob_id)) res = yield _get(uri, headers={'Range': 'bytes=10-20'}) text = yield res.text() self.assertTrue(res.headers.hasHeader('content-range')) content_range = res.headers.getRawHeaders('content-range').pop() self.assertIsNotNone(re.match('^bytes 10-20/[0-9]+$', content_range)) self.assertEqual(10, len(text))
def load_up_downloads(client, amount, data): # delete blobs from server ids = yield client.blobmanager.remote_list(namespace='payload') deferreds = [] for blob_id in ids: d = client.blobmanager.delete(blob_id, namespace='payload') deferreds.append(d) yield gatherResults(deferreds) # deliver some incoming blobs deferreds = [] for i in xrange(amount): fd = BytesIO(data) doc = BlobDoc(fd, blob_id=uuid.uuid4().hex) size = sys.getsizeof(fd) d = client.blobmanager.put(doc, size, namespace='payload') deferreds.append(d) yield gatherResults(deferreds)
def process_one_incoming_blob(client, item): fd = yield semaphore.run(client.blobmanager.get, item, namespace='MX') # create metadata docs deferreds = [] for name, data in PARTS.items(): d = client.create_doc({name: data}) deferreds.append(d) # put the incoming blob as it would be done after mail processing doc = BlobDoc(fd, blob_id=uuid.uuid4().hex) size = sys.getsizeof(fd) d = semaphore.run(client.blobmanager.put, doc, size, namespace='payload') deferreds.append(d) yield gatherResults(deferreds) # delete incoming blob yield semaphore.run(client.blobmanager.delete, item, namespace='MX')
def test_put_stores_on_local_db_with_namespace(self): self.manager._encrypt_and_upload = Mock(return_value=None) self.manager._download_and_decrypt = Mock(return_value=None) msg, blob_id = "Hey Joe", uuid4().hex doc = BlobDoc(BytesIO(msg), blob_id=blob_id) yield self.manager.put(doc, size=len(msg), namespace='custom') self.assertTrue(self.manager._encrypt_and_upload.called) arg1, arg2 = self.manager._encrypt_and_upload.call_args[0] kwargs = self.manager._encrypt_and_upload.call_args[1] self.assertEquals(arg1, blob_id) self.assertTrue(isinstance(arg2, BytesIO)) self.assertEquals(kwargs, {'namespace': 'custom'}) result = yield self.manager.local.get(blob_id) self.assertEquals(result, None) result = yield self.manager.local.get(blob_id, namespace='custom') self.assertEquals(result.getvalue(), msg)
def test_get_range_not_satisfiable(self): # put a blob in place user_id = uuid4().hex manager = BlobManager(self.tempdir, self.uri, self.secret, self.secret, user_id) self.addCleanup(manager.close) blob_id, content = uuid4().hex, 'content' doc = BlobDoc(BytesIO(content), blob_id) yield manager.put(doc, len(content)) # and check possible parsing errors uri = urljoin(self.uri, '%s/%s' % (user_id, blob_id)) ranges = [ 'bytes', 'bytes=', 'bytes=1', 'bytes=blah-100', 'potatoes=10-100' 'blah' ] for range in ranges: res = yield _get(uri, headers={'Range': range}) self.assertEqual(416, res.code) content_range = res.headers.getRawHeaders('content-range').pop() self.assertIsNotNone(re.match('^bytes \*/[0-9]+$', content_range))