def get_mpu(bucket, id, key_name): try: mpu = MultiPartUpload(bucket) mpu.id = id mpu.key_name = key_name return mpu except: return None
def mp_from_ids(mp_id, filepath, bucket): """Get the multipart upload from the bucket and multipart IDs. This allows us to reconstitute a connection to the upload from within multiprocessing functions. """ #conn = boto.connect_s3() #bucket = conn.lookup(mp_bucketname) #bucket = connection.get_bucket(bucketname, validate=False) mp = MultiPartUpload(bucket) mp.key_name = filepath mp.id = mp_id return mp
def get_request_from_state(upload_id, upload_state, bucket): """ Fetches or creates a MultiPartUpload object for an upload ID. """ if upload_state['status'] == UploadStates.NEW: upload_request = bucket.initiate_multipart_upload( upload_state['object']) new_state = { 'status': UploadStates.IN_PROGRESS, 'object': upload_state['object'], 'id': upload_request.id } update_upload_state(upload_id, new_state) else: upload_request = MultiPartUpload(bucket=bucket) upload_request.id = upload_state['id'] upload_request.key_name = upload_state['object'] return upload_request
def initiate_multipart_upload(self, key_name, headers=None, reduced_redundancy=False, metadata=None): """ Start a multipart upload operation. :type key_name: string :param key_name: The name of the key that will ultimately result from this multipart upload operation. This will be exactly as the key appears in the bucket after the upload process has been completed. :type headers: dict :param headers: Additional HTTP headers to send and store with the resulting key in S3. :type reduced_redundancy: boolean :param reduced_redundancy: In multipart uploads, the storage class is specified when initiating the upload, not when uploading individual parts. So if you want the resulting key to use the reduced redundancy storage class set this flag when you initiate the upload. :type metadata: dict :param metadata: Any metadata that you would like to set on the key that results from the multipart upload. """ query_args = 'uploads' if headers is None: headers = {} if reduced_redundancy: storage_class_header = self.connection.provider.storage_class_header if storage_class_header: headers[storage_class_header] = 'REDUCED_REDUNDANCY' # TODO: what if the provider doesn't support reduced redundancy? # (see boto.s3.key.Key.set_contents_from_file) if metadata is None: metadata = {} headers = boto.utils.merge_meta(headers, metadata, self.connection.provider) response = self.connection.make_request('POST', self.name, key_name, query_args=query_args, headers=headers) body = response.read() boto.log.debug(body) if response.status == 200: resp = MultiPartUpload(self) h = handler.XmlHandler(resp, self) xml.sax.parseString(body, h) return resp else: raise self.connection.provider.storage_response_error( response.status, response.reason, body)
def get_request_from_state(upload_id, upload_state, bucket): """ Fetches or creates a MultiPartUpload object for an upload ID. Args: upload_id: A string specifying the upload ID. upload_state: A dictionary containing upload state. bucket: A boto Bucket object. """ if upload_state['status'] == UploadStates.NEW: upload_request = bucket.initiate_multipart_upload( upload_state['object']) new_state = {'status': UploadStates.IN_PROGRESS, 'object': upload_state['object'], 'id': upload_request.id} upsert_upload_state(upload_id, new_state) else: upload_request = MultiPartUpload(bucket=bucket) upload_request.id = upload_state['id'] upload_request.key_name = upload_state['object'] return upload_request
def initiate_multipart_upload(self, key_name, headers=None): query_args = 'uploads' response = self.connection.make_request('POST', self.name, key_name, query_args=query_args, headers=headers) body = response.read() boto.log.debug(body) if response.status == 200: resp = MultiPartUpload(self) h = handler.XmlHandler(resp, self) xml.sax.parseString(body, h) return resp else: raise self.connection.provider.storage_response_error( response.status, response.reason, body)
def mp_from_ids(mp_id: str, mp_keyname: str, bucket: str) -> MultiPartUpload: mp = MultiPartUpload(bucket) mp.key_name = mp_keyname mp.id = mp_id return mp