def close(self): if not self._closed: self._closed = True self._collect_parts(wait=True) self.parts.sort(key=lambda item: item['PartNumber']) aws.client("s3").complete_multipart_upload(Bucket=self.bucket_name, Key=self.key, MultipartUpload=dict(Parts=self.parts), UploadId=self.mpu)
def __init__(self, bucket_name: str, key: str): self.bucket_name = bucket_name self.key = key self.mpu = aws.client("s3").create_multipart_upload(Bucket=bucket_name, Key=key)['UploadId'] self.parts: List[Dict[str, Union[str, int]]] = list() self._closed = False self._part_uploads = concurrency.async_set()
def _put_part(self, part: Part) -> Dict[str, Union[str, int]]: aws_part_number = part.number + 1 resp = aws.client("s3").upload_part( Body=part.data, Bucket=self.bucket_name, Key=self.key, PartNumber=aws_part_number, UploadId=self.mpu, ) return dict(ETag=resp['ETag'], PartNumber=aws_part_number)
def _put_part_copy(self, part_number: int, src_blob: S3Blob): aws_part_number = part_number + 1 size = src_blob.size() chunk_size = get_s3_multipart_chunk_size(size) start_bytes = part_number * chunk_size end_bytes = start_bytes + chunk_size - 1 if end_bytes >= size: end_bytes = size - 1 resp = aws.client("s3").upload_part_copy( Bucket=self.bucket_name, Key=self.key, PartNumber=aws_part_number, CopySource=dict(Bucket=src_blob.bucket_name, Key=src_blob.key), CopySourceRange=f"bytes={start_bytes}-{end_bytes}", UploadId=self.mpu, ) return dict(ETag=resp['CopyPartResult']['ETag'], PartNumber=aws_part_number)
def get_tags(self) -> Dict[str, str]: tagset = aws.client("s3").get_object_tagging(Bucket=self.bucket_name, Key=self.key) return {tag['Key']: tag['Value'] for tag in tagset['TagSet']}
def put_tags(self, tags: Dict[str, str]): aws_tags = [dict(Key=k, Value=v) for k, v in tags.items()] aws.client("s3").put_object_tagging(Bucket=self.bucket_name, Key=self.key, Tagging=dict(TagSet=aws_tags))