def test_Transfer_preprocess(transf): @transf.preprocessor def to_upper(filehandle, meta): filehandle.stream = BytesIO(filehandle.stream.read().lower()) return filehandle source = FileStorage(stream=BytesIO(b'Hello World')) transf._preprocess(source, {}) source.seek(0) assert source.read() == b'hello world'
def upload_stream(self, stream: FileStorage, upload_dir: str = None, chunk_size: int = 5 * 1024 * 1024, progress: bool = False) -> str: if not upload_dir: upload_dir = uuid4().hex ext = os.path.splitext(secure_filename(stream.name))[1] s3_file_path = os.path.join(upload_dir, uuid4().hex + ext) bucket = self.resource.Object(self.bucket, s3_file_path) mp = bucket.initiate_multipart_upload(ACL='public-read') #'bucket-owner-full-control') parts_etag = {} chunk_idx = 1 while True: chunk = stream.read(chunk_size) if not chunk: break part = mp.Part(chunk_idx) result = part.upload(Body=chunk) parts_etag[str(chunk_idx)] = result['ETag'] chunk_idx += 1 yield chunk_idx - 1, self._endpoint(s3_file_path) mp.complete( MultipartUpload={ 'Parts': [{ 'ETag': parts_etag[str(idx)], 'PartNumber': idx } for idx in range(1, chunk_idx)] }) yield chunk_idx - 1, self._endpoint(s3_file_path)