async def test_multi_chunk(self, blob):
        stream = streams.StringStream(blob)

        cutoff_stream_one = streams.CutoffStream(stream, 10)
        data_one = await cutoff_stream_one.read()
        assert len(data_one) == 10
        assert data_one == blob[0:10]

        cutoff_stream_two = streams.CutoffStream(stream, 10)
        data_two = await cutoff_stream_two.read()
        assert len(data_two) == 10
        assert data_two == blob[10:20]

        remainder = await stream.read()
        assert len(remainder) == 30
        assert remainder == blob[20:50]
示例#2
0
    async def _upload_part(self, stream, path, session_upload_id, chunk_number,
                           chunk_size):
        """Uploads a single part/chunk of the given stream to S3.

        :param int chunk_number: sequence number of chunk. 1-indexed.
        """

        cutoff_stream = streams.CutoffStream(stream, cutoff=chunk_size)

        headers = {'Content-Length': str(chunk_size)}
        params = {
            'partNumber': str(chunk_number),
            'uploadId': session_upload_id,
        }
        upload_url = functools.partial(self.bucket.new_key(
            path.path).generate_url,
                                       settings.TEMP_URL_SECS,
                                       'PUT',
                                       query_parameters=params,
                                       headers=headers)
        resp = await self.make_request(
            'PUT',
            upload_url,
            data=cutoff_stream,
            skip_auto_headers={'CONTENT-TYPE'},
            headers=headers,
            params=params,
            expects=(
                200,
                201,
            ),
            throws=exceptions.UploadError,
        )
        await resp.release()
        return resp.headers
示例#3
0
    async def _upload_part(self, stream: streams.BaseStream, chunk_size: int,
                           upload_args: dict) -> None:
        """Upload one part/chunk of the given stream to Dropbox

        "Append more data to an upload session. When the parameter close is set, this call will
        close the session. A single request should not upload more than 150 MB. ..."

        API Docs: https://www.dropbox.com/developers/documentation/http/documentation#files-upload_session-append
        """

        cutoff_stream = streams.CutoffStream(stream, cutoff=chunk_size)

        resp = await self.make_request(
            'POST',
            self._build_content_url('files', 'upload_session', 'append_v2'),
            headers={
                # ``Content-Length`` is required for ``asyncio`` to use inner chunked stream read
                'Content-Length': str(chunk_size),
                'Content-Type': 'application/octet-stream',
                'Dropbox-API-Arg': json.dumps(upload_args),
            },
            data=cutoff_stream,
            expects=(200, ),
            throws=core_exceptions.UploadError)

        await resp.release()
示例#4
0
    async def _upload_part(self, stream: streams.BaseStream, part_id: str,
                           part_size: int, start_offset: int,
                           session_id: str) -> dict:
        """Upload one part/chunk of the given stream to Box.

        Box requires that the sha of the part be sent along in the headers of the request.  To do
        this WB must write the stream segment to disk before uploading.  The part sha is calculated
        as the tempfile is written.

        API Docs: https://developer.box.com/reference#upload-part
        """

        cutoff_stream = streams.CutoffStream(stream, cutoff=part_size)
        part_hasher_name = 'part-{}-sha1'.format(part_id)
        stream.add_writer(part_hasher_name,
                          streams.HashStreamWriter(hashlib.sha1))

        f = tempfile.TemporaryFile()
        chunk = await cutoff_stream.read(self.TEMP_CHUNK_SIZE)
        while chunk:
            f.write(chunk)
            chunk = await cutoff_stream.read(self.TEMP_CHUNK_SIZE)
        file_stream = streams.FileStreamReader(f)

        part_sha = stream.writers[part_hasher_name].digest
        part_sha_b64 = base64.standard_b64encode(part_sha).decode()
        stream.remove_writer(part_hasher_name)

        byte_range = self._build_range_header(
            (start_offset, start_offset + part_size - 1))
        content_range = str(byte_range).replace('=', ' ') + '/{}'.format(
            stream.size)

        async with self.request(
                'PUT',
                self._build_upload_url('files', 'upload_sessions', session_id),
                headers=
            {
                # ``Content-Length`` is required for ``asyncio`` to use inner chunked stream read
                'Content-Length': str(part_size),
                'Content-Range': content_range,
                'Content-Type:': 'application/octet-stream',
                'Digest': 'sha={}'.format(part_sha_b64)
            },
                data=file_stream,
                expects=(201, 200),
                throws=exceptions.UploadError,
        ) as resp:
            data = await resp.json()

        f.close()
        return data['part']
    async def test_subchunk(self, blob):
        stream = streams.StringStream(blob)
        cutoff_stream = streams.CutoffStream(stream, 20)

        subchunk_one = await cutoff_stream.read(7)
        assert len(subchunk_one) == 7
        assert subchunk_one == blob[0:7]

        subchunk_two = await cutoff_stream.read(7)
        assert len(subchunk_two) == 7
        assert subchunk_two == blob[7:14]

        subchunk_three = await cutoff_stream.read(7)
        assert len(subchunk_three) == 6
        assert subchunk_three == blob[14:20]

        subchunk_four = await cutoff_stream.read(7)
        assert len(subchunk_four) == 0
        assert subchunk_four == b''

        remainder = await stream.read()
        assert len(remainder) == 30
        assert remainder == blob[20:50]
 def test_no_cutoff_exception(self, blob):
     stream = streams.StringStream(blob)
     with pytest.raises(TypeError):
         streams.CutoffStream(stream)
 async def test_one_chunk(self, blob):
     stream = streams.StringStream(blob)
     cutoff_stream = streams.CutoffStream(stream, len(blob))
     data = await cutoff_stream.read()
     assert len(data) == len(blob)
     assert data == blob