def run(self): for part in self.part_msg_list: part_number = str(part[0]) if len(part) == 5: bucket = self.bucket object = self.object partsize = part[3] offset = part[4] retry_times = self.retry_times while True: try: if self.uploaded_part_map.has_key(part_number): md5 = part[2] if self.uploaded_part_map[part_number].replace( '"', "").upper() == md5.upper(): break if retry_times <= 0: break res = self.oss.upload_part_from_file_given_pos( bucket, object, self.file_path, offset, partsize, self.upload_id, part_number) if res.status != 200: retry_times = retry_times - 1 time.sleep(1) else: etag = res.getheader("etag") if etag: self.uploaded_part_map[part_number] = etag break except: retry_times = retry_times - 1 time.sleep(1) else: print "not expected part for multiupload", part pass
def run(self): for part in self.part_msg_list: if len(part) == 5: bucket = self.bucket file_name = part[1] if isinstance(file_name, unicode): filename = file_name.encode('utf-8') object_name = file_name retry_times = self.retry_times is_skip = False while True: try: if retry_times <= 0: break res = self.oss.head_object(bucket, object_name) if res.status == 200: header_map = convert_header2map(res.getheaders()) etag = safe_get_element("etag", header_map) md5 = part[2] if etag.replace('"', "").upper() == md5.upper(): is_skip = True break except: retry_times = retry_times - 1 time.sleep(1) if is_skip: continue partsize = part[3] offset = part[4] retry_times = self.retry_times while True: try: if retry_times <= 0: break res = self.oss.put_object_from_file_given_pos( bucket, object_name, self.file_path, offset, partsize) if res.status != 200: print "upload ", file_name, "failed!", " ret is:", res.status print "headers", res.getheaders() retry_times = retry_times - 1 time.sleep(1) else: break except: retry_times = retry_times - 1 time.sleep(1) else: print "ERROR! part", part, " is not as expected!"
def run(self): for part in self.part_msg_list: if len(part) == 5: bucket = self.bucket file_name = part[1] if isinstance(file_name, unicode): filename = file_name.encode('utf-8') object_name = file_name retry_times = self.retry_times is_skip = False while True: try: if retry_times <= 0: break res = self.oss.head_object(bucket, object_name) if res.status == 200: header_map = convert_header2map(res.getheaders()) etag = safe_get_element("etag", header_map) md5 = part[2] if etag.replace('"', "").upper() == md5.upper(): is_skip = True break except: retry_times = retry_times - 1 time.sleep(1) if is_skip: continue partsize = part[3] offset = part[4] retry_times = self.retry_times while True: try: if retry_times <= 0: break res = self.oss.put_object_from_file_given_pos(bucket, object_name, self.file_path, offset, partsize) if res.status != 200: print "upload ", file_name, "failed!"," ret is:", res.status print "headers", res.getheaders() retry_times = retry_times - 1 time.sleep(1) else: break except: retry_times = retry_times - 1 time.sleep(1) else: print "ERROR! part", part , " is not as expected!"
def multi_upload_file2(oss, bucket, object, filename, upload_id, thread_num=10, max_part_num=10000, retry_times=5, headers=None, params=None): if not upload_id: print "empty upload_id" return False filename = convert_utf8(filename) part_msg_list = [] part_msg_list = split_large_file(filename, object, max_part_num) queue = Queue.Queue(0) uploaded_part_map = {} part_msg_xml = create_part_xml(part_msg_list) each_part_retry_times = 1 for i in range(retry_times): tmp_uploaded_part_map = get_part_map(oss, bucket, object, upload_id) if tmp_uploaded_part_map: for k, v in tmp_uploaded_part_map.items(): uploaded_part_map[k] = v thread_pool = [] for part in part_msg_list: if len(part) == 5: part_number = str(part[0]) md5 = part[2] is_need_upload = True if uploaded_part_map.has_key(part_number): md5 = part[2] if uploaded_part_map[part_number].replace('"', "").upper() == md5.upper(): is_need_upload = False continue if is_need_upload: queue.put((upload_part, oss, bucket, object, upload_id, filename, part)) else: print "not expected part", part for i in xrange(thread_num): current = UploadPartWorker2(each_part_retry_times, queue) thread_pool.append(current) current.start() queue.join() for item in thread_pool: item.join() res = oss.complete_upload(bucket, object, upload_id, part_msg_xml, headers, params) if res.status == 200: return res raise Exception("-3, after retry %s, failed, multi upload file failed! upload_id:%s" % (retry_times, upload_id))
def run(self): for part in self.part_msg_list: part_number = str(part[0]) if len(part) == 5: bucket = self.bucket object = self.object if self.uploaded_part_map.has_key(part_number): md5 = part[2] if self.uploaded_part_map[part_number].replace( '"', "").upper() == md5.upper(): continue partsize = part[3] offset = part[4] retry_times = self.retry_times while True: try: if retry_times <= 0: break res = self.oss.upload_part_from_file_given_pos( bucket, object, self.file_path, offset, partsize, self.upload_id, part_number) if res.status != 200: log.warn( "Upload %s/%s from %s, failed! ret is:%s." % (bucket, object, self.file_path, res.status)) log.warn("headers:%s" % res.getheaders()) retry_times = retry_times - 1 time.sleep(1) else: log.info( "Upload %s/%s from %s, OK! ret is:%s." % (bucket, object, self.file_path, res.status)) break except: retry_times = retry_times - 1 time.sleep(1) else: log.error("ERROR! part %s is not as expected!" % part)
def run(self): for part in self.part_msg_list: if len(part) == 5: bucket = self.bucket file_name = part[1] object_name = file_name res = self.oss.head_object(bucket, object_name) if res.status == 200: header_map = convert_header2map(res.getheaders()) etag = safe_get_element("etag", header_map) md5 = part[2] if etag.replace('"', "").upper() == md5.upper(): continue partsize = part[3] offset = part[4] res = self.oss.put_object_from_file_given_pos(bucket, object_name, self.file_path, offset, partsize) if res.status != 200: print "upload ", file_name, "failed!"," ret is:", res.status print "headers", res.getheaders() else: print "ERROR! part", part , " is not as expected!"
def run(self): for part in self.part_msg_list: part_number = str(part[0]) if len(part) == 5: bucket = self.bucket object = self.object if self.uploaded_part_map.has_key(part_number): md5 = part[2] if self.uploaded_part_map[part_number].replace( '"', "").upper() == md5.upper(): continue partsize = part[3] offset = part[4] retry_times = 5 while True: try: if retry_times <= 0: break l.acquire() res = self.oss.upload_part_from_file_given_pos( bucket, object, self.file_path, offset, partsize, self.upload_id, part_number) l.release() if res.status != 200: print "upload ", bucket, object, self.file_path, "failed!", " ret is:", res.status print "headers", res.getheaders() retry_times = retry_times - 1 time.sleep(1) else: break except: retry_times = retry_times - 1 time.sleep(1) else: print "ERROR! part", part, " is not as expected!"
def run(self): for part in self.part_msg_list: part_number = str(part[0]) if len(part) == 5: bucket = self.bucket object = self.object partsize = part[3] offset = part[4] retry_times = self.retry_times while True: try: if self.uploaded_part_map.has_key(part_number): md5 = part[2] if self.uploaded_part_map[part_number].replace('"', "").upper() == md5.upper(): break if retry_times <= 0: break res = self.oss.upload_part_from_file_given_pos(bucket, object, self.file_path, offset, partsize, self.upload_id, part_number) if res.status != 200: retry_times = retry_times - 1 time.sleep(1) else: etag = res.getheader("etag") if etag: self.uploaded_part_map[part_number] = etag break except: retry_times = retry_times - 1 time.sleep(1) else: print "not expected part for multiupload", part pass
def run(self): for part in self.part_msg_list: part_number = str(part[0]) if len(part) == 5: bucket = self.bucket object = self.object if self.uploaded_part_map.has_key(part_number): md5 = part[2] if self.uploaded_part_map[part_number].replace('"', "").upper() == md5.upper(): continue partsize = part[3] offset = part[4] retry_times = self.retry_times while True: try: if retry_times <= 0: break res = self.oss.upload_part_from_file_given_pos(bucket, object, self.file_path, offset, partsize, self.upload_id, part_number) if res.status != 200: log.warn("Upload %s/%s from %s, failed! ret is:%s." %(bucket, object, self.file_path, res.status)) log.warn("headers:%s" % res.getheaders()) retry_times = retry_times - 1 time.sleep(1) else: log.info("Upload %s/%s from %s, OK! ret is:%s." % (bucket, object, self.file_path, res.status)) break except: retry_times = retry_times - 1 time.sleep(1) else: log.error("ERROR! part %s is not as expected!" % part)
def run(self): for part in self.part_msg_list: part_number = str(part[0]) if len(part) == 5: bucket = self.bucket object = self.object if self.uploaded_part_map.has_key(part_number): md5 = part[2] if self.uploaded_part_map[part_number].replace('"', "").upper() == md5.upper(): continue partsize = part[3] offset = part[4] retry_times = 5 while True: try: if retry_times <= 0: break l.acquire() res = self.oss.upload_part_from_file_given_pos(bucket, object, self.file_path, offset, partsize, self.upload_id, part_number) l.release() if res.status != 200: print "upload ", bucket, object, self.file_path, "failed!"," ret is:", res.status print "headers", res.getheaders() retry_times = retry_times - 1 time.sleep(1) else: break except: retry_times = retry_times - 1 time.sleep(1) else: print "ERROR! part", part , " is not as expected!"