async def test_iter_gridfs(self):
        gfs = AsyncIOMotorGridFS(self.db)

        async def cleanup():
            await self.db.fs.files.remove()
            await self.db.fs.chunks.remove()

        await cleanup()

        # Empty iterator.
        async for _ in gfs.find({'_id': 1}):
            self.fail()

        data = b'data'

        for n_files in 1, 2, 10:
            for i in range(n_files):
                await gfs.put(data, filename='filename')

            # Force extra batches to test iteration.
            j = 0
            async for _ in gfs.find({'filename': 'filename'}).batch_size(3):
                j += 1

            self.assertEqual(j, n_files)
            await cleanup()
Example #2
0
    def test_put_unacknowledged(self):
        client = self.asyncio_client(w=0)
        fs = AsyncIOMotorGridFS(client.motor_test)
        with self.assertRaises(ConfigurationError):
            yield from fs.put(b"hello")

        client.close()
Example #3
0
    def test_gridfs_secondary(self):
        primary_host, primary_port = test.env.primary
        primary = self.asyncio_client(primary_host, primary_port)
        if test.env.auth:
            yield from primary.admin.authenticate(db_user, db_password)

        secondary_host, secondary_port = test.env.secondaries[0]

        secondary = self.asyncio_client(
            secondary_host,
            secondary_port,
            read_preference=ReadPreference.SECONDARY)

        if test.env.auth:
            yield from secondary.admin.authenticate(db_user, db_password)

        yield from primary.motor_test.drop_collection("fs.files")
        yield from primary.motor_test.drop_collection("fs.chunks")

        # Should detect it's connected to secondary and not attempt to
        # create index
        fs = AsyncIOMotorGridFS(secondary.motor_test)

        # This won't detect secondary, raises error
        with self.assertRaises(AutoReconnect):
            yield from fs.put(b'foo')
Example #4
0
    def test_gridfs_replica_set(self):
        rsc = self.asyncio_rsc(w=test.env.w,
                               wtimeout=5000,
                               read_preference=ReadPreference.SECONDARY)

        fs = AsyncIOMotorGridFS(rsc.motor_test)
        oid = yield from fs.put(b'foo')
        gridout = yield from fs.get(oid)
        content = yield from gridout.read()
        self.assertEqual(b'foo', content)
    def test_stream_to_handler(self):
        fs = AsyncIOMotorGridFS(self.db)

        for content_length in (0, 1, 100, 100 * 1000):
            _id = yield from fs.put(b'a' * content_length)
            gridout = yield from fs.get(_id)
            handler = test.MockRequestHandler()
            yield from gridout.stream_to_handler(handler)
            self.assertEqual(content_length, handler.n_written)
            yield from fs.delete(_id)
Example #6
0
 async def test_stream_to_handler(self):
     # Sort of Tornado-specific, but it does work with asyncio.
     fs = AsyncIOMotorGridFS(self.db)
     content_length = 1000
     await fs.delete(1)
     self.assertEqual(1, await fs.put(b'a' * content_length, _id=1))
     gridout = await fs.get(1)
     handler = test.MockRequestHandler()
     await gridout.stream_to_handler(handler)
     self.assertEqual(content_length, handler.n_written)
     await fs.delete(1)
Example #7
0
async def put_gridfile():
    with tempfile.TemporaryFile() as tmp:
        with gzip.GzipFile(mode='wb', fileobj=tmp) as gzfile:
            for _ in range(10):
                gzfile.write(b'Nonesuch nonsense\n')

        gfs = AsyncIOMotorGridFS(client.my_database)
        tmp.seek(0)
        await gfs.put(tmp,
                      filename='my_file',
                      content_type='text',
                      metadata={'compressed': True})
Example #8
0
    def test_stream_to_handler(self):
        # TODO: Sort of Tornado-specific, but it does work with asyncio.
        class MockRequestHandler(object):
            def __init__(self):
                self.n_written = 0

            def write(self, data):
                self.n_written += len(data)

            def flush(self):
                pass

        fs = AsyncIOMotorGridFS(self.db)

        for content_length in (0, 1, 100, 100 * 1000):
            _id = yield from fs.put(b'a' * content_length)
            gridout = yield from fs.get(_id)
            handler = MockRequestHandler()
            yield from gridout.stream_to_handler(handler)
            self.assertEqual(content_length, handler.n_written)
            yield from fs.delete(_id)
Example #9
0
    async def test_iter_gridfs(self):
        gfs = AsyncIOMotorGridFS(self.db)

        async def cleanup():
            await self.db.fs.files.delete_many({})
            await self.db.fs.chunks.delete_many({})

        await cleanup()

        # Empty iterator.
        async for _ in gfs.find({'_id': 1}):
            self.fail()

        data = b'data'

        for n_files in 1, 2, 10:
            for i in range(n_files):
                await gfs.put(data, filename='filename')

            # Force extra batches to test iteration.
            j = 0
            async for _ in gfs.find({'filename': 'filename'}).batch_size(3):
                j += 1

            self.assertEqual(j, n_files)
            await cleanup()

        async with await gfs.new_file(_id=1, chunk_size=1) as f:
            await f.write(data)

        gout = await gfs.find_one({'_id': 1})
        chunks = []
        async for chunk in gout:
            chunks.append(chunk)

        self.assertEqual(len(chunks), len(data))
        self.assertEqual(b''.join(chunks), data)
Example #10
0
 def setUp(self):
     super().setUp()
     self.loop.run_until_complete(self._reset())
     self.fs = AsyncIOMotorGridFS(self.db)
Example #11
0
 def setUp(self):
     super().setUp()
     self.fs = AsyncIOMotorGridFS(self.db)
Example #12
0
    def test_put_unacknowledged(self):
        client = self.asyncio_client(w=0)
        with self.assertRaises(ConfigurationError):
            AsyncIOMotorGridFS(client.motor_test)

        client.close()