def _handle_upload(self, body): bucket, key = find_bucket_key(self.dest) params = { 'Bucket': bucket, 'Key': key, 'Body': body, } self._handle_object_params(params) response_data = self.client.put_object(**params) etag = response_data['ETag'][1:-1] body.seek(0) check_etag(etag, body)
def _handle_upload(self, body): bucket, key = find_bucket_key(self.dest) params = { 'endpoint': self.endpoint, 'bucket': bucket, 'key': key, 'body': body, } self._handle_object_params(params) response_data, http = operate(self.service, 'PutObject', params) etag = response_data['ETag'][1:-1] body.seek(0) check_etag(etag, body)
def __call__(self): try: part_info = self.part_queue.get(True, QUEUE_TIMEOUT_GET) with self.counter_lock: self.part_counter.count += 1 try: filename = part_info[0] upload_id = part_info[1] part_number = part_info[2] part_size = part_info[3] body = self.read_part(filename, part_number, part_size) bucket, key = find_bucket_key(filename.dest) if sys.version_info[:2] == (2, 6): stream_body = StringIO(body) else: stream_body = bytearray(body) params = {'endpoint': self.endpoint, 'bucket': bucket, 'key': key, 'part_number': str(part_number), 'upload_id': upload_id, 'body': stream_body} response_data, http = operate(self.service, 'UploadPart', params) etag = retrieve_http_etag(http) check_etag(etag, body) parts = {'ETag': etag, 'PartNumber': part_number} self.dest_queue.put((part_number, parts)) print_str = print_operation(filename, 0) print_result = {'result': print_str} total = int(math.ceil(filename.size/float(part_size))) part_str = {'total': total} print_result['part'] = part_str self.printQueue.put(print_result) except requests.ConnectionError as e: connect_error = str(e) LOGGER.debug("%s part upload failure: %s" % (part_info[0].src, connect_error)) self.part_queue.put(part_info) self.executer.submit(copy.copy(self)) except MD5Error: LOGGER.debug("%s part upload failure: Data" "was corrupted" % part_info[0].src) self.part_queue.put(part_info) self.executer.submit(copy.copy(self)) except Exception as e: LOGGER.debug('%s' % str(e)) self.part_queue.task_done() with self.counter_lock: self.part_counter.count -= 1 except Queue.Empty: pass
def upload(self): """ Redirects the file to the multipart upload function if the file is large. If it is small enough, it puts the file as an object in s3. """ with open(self.src, 'rb') as body: bucket, key = find_bucket_key(self.dest) params = { 'endpoint': self.endpoint, 'bucket': bucket, 'key': key, 'body': body, } self._handle_object_params(params) response_data, http = operate(self.service, 'PutObject', params) etag = response_data['ETag'][1:-1] body.seek(0) check_etag(etag, body)
def upload(self): """ Redirects the file to the multipart upload function if the file is large. If it is small enough, it puts the file as an object in s3. """ body = read_file(self.src) bucket, key = find_bucket_key(self.dest) if sys.version_info[:2] == (2, 6): stream_body = StringIO(body) else: stream_body = bytearray(body) params = {'endpoint': self.endpoint, 'bucket': bucket, 'key': key} if body: params['body'] = stream_body self._handle_object_params(params) response_data, http = operate(self.service, 'PutObject', params) etag = retrieve_http_etag(http) check_etag(etag, body)
def save_file(filename, response_data, last_update): """ This writes to the file upon downloading. It reads the data in the response. Makes a new directory if needed and then writes the data to the file. It also modifies the last modified time to that of the S3 object. """ data = response_data['Body'].read() etag = response_data['ETag'][1:-1] check_etag(etag, data) d = os.path.dirname(filename) try: if not os.path.exists(d): os.makedirs(d) except Exception: pass with open(filename, 'wb') as out_file: out_file.write(data) last_update_tuple = last_update.timetuple() mod_timestamp = time.mktime(last_update_tuple) os.utime(filename, (int(mod_timestamp), int(mod_timestamp)))
def upload(self): """ Redirects the file to the multipart upload function if the file is large. If it is small enough, it puts the file as an object in s3. """ if not self.is_multi: body = read_file(self.src) bucket, key = find_bucket_key(self.dest) if sys.version_info[:2] == (2, 6): stream_body = StringIO(body) else: stream_body = bytearray(body) params = {'endpoint': self.endpoint, 'bucket': bucket, 'key': key} if body: params['body'] = stream_body if self.parameters['acl']: params['acl'] = self.parameters['acl'][0] response_data, http = operate(self.service, 'PutObject', params) etag = retrieve_http_etag(http) check_etag(etag, body) else: self.multi_upload()
def upload(self): """ Redirects the file to the multipart upload function if the file is large. If it is small enough, it puts the file as an object in s3. """ if not self.is_multi: body = read_file(self.src) bucket, key = find_bucket_key(self.dest) if sys.version_info[:2] == (2, 6): stream_body = StringIO(body) else: stream_body = bytearray(body) params = {'endpoint': self.endpoint, 'bucket': bucket, 'key': key} if body: params['body'] = stream_body if self.parameters['acl']: params['acl'] = self.parameters['acl'][0] if self.parameters['guess_mime_type']: self._inject_content_type(params, self.src) response_data, http = operate(self.service, 'PutObject', params) etag = retrieve_http_etag(http) check_etag(etag, body) else: self.multi_upload()