示例#1
0
文件: key.py 项目: fffonion/jss_sdk
 def multi_upload_fake(self, headers=None, local_file_path=None, part_size=commonconstants.DEFAULT_PART_SIZE):
     import sys
     bucket_name_check(self.bucket.name)
     object_name_check(self.name)
     pre_upload = self.init_multi_upload() 
     #fp = open(local_file_path, 'rb')
     m = md5.new()
     file_size = sys.maxint * sys.maxint
     num_part = file_size / part_size
     if file_size % part_size != 0:
         num_part = num_part + 1
     i = 0
     while True:
         i += 1
         retry_times=0
         data_content=os.urandom(part_size / 1024) * 1024
         m.update(data_content)
         print('part%d (%dM)' % (i, i * part_size / 1024 / 1024))
         while retry_times<commonconstants.DEFAULT_RETRY_COUNT:
             response = self.upload_part(headers, data_content, i + 1, pre_upload['UploadId'])
             if response.status/100==2:
                 break
             else:
                 retry_times=retry_times+1   
                 time.sleep(1)
         if retry_times>=commonconstants.DEFAULT_RETRY_COUNT:
             raise Exception("-2, after retry %s, failed, multi upload part failed!" % retry_times)
     result = self.get_uploaded_parts(None, pre_upload['UploadId'])
     part_submit_json = self._generate_part_json(result)
     data=self.complete_multi_upload(None, pre_upload['UploadId'], json.dumps(part_submit_json))
     #fp.close()
     
     return data,m.hexdigest()
示例#2
0
文件: key.py 项目: fffonion/jss_sdk
 def multi_upload_fp_with_size(self,headers=None,fp=None,size=None,part_size=commonconstants.DEFAULT_PART_SIZE):
     bucket_name_check(self.bucket.name)
     object_name_check(self.name)
     m = md5.new()
     pre_upload = self.init_multi_upload()
     num_part = size / part_size
     if size % part_size != 0:
         num_part = num_part + 1
     for i in range(num_part):
         retry_times=0
         data_content=fp.read(part_size)
         m.update(data_content)
         while retry_times<commonconstants.DEFAULT_RETRY_COUNT:
             response = self.upload_part(headers, data_content, i + 1, pre_upload['UploadId'])
             if response.status/100==2:
                 break
             else:
                 retry_times=retry_times+1
         if retry_times>=commonconstants.DEFAULT_RETRY_COUNT:
             raise Exception("-2, after retry %s, failed, multi upload part failed!" % retry_times) 
     result = self.get_uploaded_parts(None, pre_upload['UploadId'])
     part_submit_json = self._generate_part_json(result)
     data=self.complete_multi_upload(None, pre_upload['UploadId'], json.dumps(part_submit_json))
     fp.close() 
     return data,m.hexdigest()
示例#3
0
文件: key.py 项目: fffonion/jss_sdk
 def upload_flow(self,headers=None,data=None,compute_MD5=True):
     bucket_name_check(self.bucket.name)
     object_name_check(self.name)
     if not headers:
         headers = {}
     headers['Content-Length'] = len(data)
     if compute_MD5:
         m = md5.new()
         m.update(data)
         headers['Content-MD5'] = m.hexdigest()
     headers['Content-Type'] = commonconstants.DEFAULT_CONTENT_TYPE
     # httplib.HTTPConnection("%s:%d")
     self.bucket.jss_client.open_connection_to_put('PUT', self.bucket.name, self.name, headers)
     offset=0
     total_size = len(data)
     while offset < total_size:
         read_bytes = data[offset:offset + commonconstants.DEFAULT_SEND_SIZE]
         offset += commonconstants.DEFAULT_SEND_SIZE
         self.bucket.jss_client.send(read_bytes)
     response = self.bucket.jss_client.pool.getresponse()
 #print response.status
     if response.status / 100 > 2:
         error_handler(response)
     else:
         return response       
示例#4
0
文件: key.py 项目: fffonion/jss_sdk
    def multi_upload(self, headers=None, local_file_path=None, part_size=commonconstants.DEFAULT_PART_SIZE, callback=None, prt = None):
        if not prt:
            prt = lambda *args, **kwargs:None
        bucket_name_check(self.bucket.name)
        object_name_check(self.name)
        pre_upload = self.init_multi_upload() 
        fp = open(local_file_path, 'rb')
        m = md5.new()
        file_size = os.path.getsize(local_file_path)
        num_part = file_size / part_size
        if file_size % part_size != 0:
            num_part = num_part + 1

        check_part_trytime = commonconstants.DEFAULT_RETRY_COUNT
        leak_parts = [i for i in range(1, num_part+1)]#init
        while check_part_trytime > 0:
            for i in range(num_part):
                retry_times=0
                if len(leak_parts) == num_part or (i+1) in leak_parts:#firsr time or check leak
                    data_content=fp.read(part_size)
                    if len(leak_parts) == num_part:#first time
                        m.update(data_content)
                else:
                    fp.seek(part_size)
                    continue
                while retry_times<commonconstants.DEFAULT_RETRY_COUNT:
                    response = self.upload_part(headers, data_content, i + 1, pre_upload['UploadId'])
                    if response.status/100==2:
                        break
                    else:
                        retry_times=retry_times+1   
                        time.sleep(1)
                if callback:
                    callback(i,num_part,part_size)
                if retry_times>=commonconstants.DEFAULT_RETRY_COUNT:
                    raise Exception("-2, after retry %s, failed, multi upload part failed!" % retry_times)
            result = self.get_uploaded_parts(None, pre_upload['UploadId'])
            #check
            #prt(result['Part'])
            if len(result['Part']) == num_part:#no leak
                break
            all_part_finnished = [int(p['PartNumber']) for p in result['Part']]
            leak_parts = [i for i in range(1, num_part+1) if i not in all_part_finnished]
            prt('%d parts leaked:%s' % (len(leak_parts), ','.join(map(str, leak_parts))))
            
            check_part_trytime -= 1
            time.sleep(3)

        part_submit_json = self._generate_part_json(result)
        data=self.complete_multi_upload(None, pre_upload['UploadId'], json.dumps(part_submit_json))
        fp.close()
        
        return data,m.hexdigest()
示例#5
0
文件: key.py 项目: fffonion/jss_sdk
 def upload(self, headers=None, local_file_path=None, compute_MD5=True):
     """
     :type :bucket_name:string
     :param :bucket_name:the bucket you file want to put
     :type :object_name:string
     :param :object_name:the key of the file you want to upload
     :type: headers:dict
     :param: headers:Additional headers to pass along with the request to
         JSS. 
     :type local_file_path:string 
     :param local_file_path:the path of the  file which you want to upload.
     """
     bucket_name_check(self.bucket.name)
     object_name_check(self.name)
     fq = open(local_file_path, 'rb')
     fq.seek(os.SEEK_SET, os.SEEK_END)
     filesize = fq.tell()
     fq.seek(os.SEEK_SET)
     suffix = local_file_path.split('.')[-1]      
     if not headers:
         headers = {}
     headers['Content-Length'] = filesize
     if compute_MD5:
         fp = open(local_file_path)
         md5_value = file_MD5(fp)
         headers['Content-MD5'] = md5_value
         fp.close()   #new add this time
       
     if '.' + suffix in self.bucket.jss_client.map:
         headers['Content-Type'] = self.bucket.jss_client.map['.' + suffix]
     else:
         headers['Content-Type'] = commonconstants.DEFAULT_CONTENT_TYPE
    # httplib.HTTPConnection("%s:%d")
     self.bucket.jss_client.open_connection_to_put('PUT', self.bucket.name, self.name, headers)
     fq.seek(os.SEEK_SET)
     l = fq.read(commonconstants.DEFAULT_SEND_SIZE)
     while len(l) > 0:
         self.bucket.jss_client.send(l)
         l = fq.read(commonconstants.DEFAULT_SEND_SIZE)
     response = self.bucket.jss_client.pool.getresponse()
     #print response.status
     if response.status / 100 > 2:
         error_handler(response)
     else:
         fq.close()    # new  add this time
         return response
示例#6
0
 def multi_thread_upload(self, headers=None, local_file_path=None, part_size=commonconstants.DEFAULT_PART_SIZE,
                          num_thread=10, max_part=10000,timeout=None):
     bucket_name_check(self.bucket.name)
     object_name_check(self.name)
     pre_upload = self.init_multi_upload()
     task_queue = generate_all_task_slices(file_name=local_file_path, part_size=part_size, max_part=max_part)
     thread_list = []
     for i in range(num_thread):
         jss_client = jss.connection.JssClient(access_key=self.bucket.jss_client.access_key, secret_key=self.bucket.jss_client.secret_key,
                      host=self.bucket.jss_client.host, port=self.bucket.jss_client.port,timeout=timeout)
         thread_curr = Thread_Upload_Part(jss_client=jss_client, bucket_name=self.bucket.name, object_name=self.name, upload_id=pre_upload['UploadId'], all_task_slices=task_queue)
         thread_list.append(thread_curr)
         thread_curr.start()
     for item in thread_list:
         item.join()
     result = self.get_uploaded_parts(None, pre_upload['UploadId'])
     part_submit_json = self._generate_part_json(result)
     data=self.complete_multi_upload(None, pre_upload['UploadId'], json.dumps(part_submit_json))
     return data
 def create_bucket(self, bucket_name, headers=None):
     """
     creates a new bucket 
     
     type bucket_name: string
     :param bucket_name :The name of a new bucket
     
     type headers: dict
     :param headers: Additional headers to pass along with the request to JSS
     
     """
     bucket_name_check(bucket_name)
     if not headers:
     	headers={}
 	headers['Content-Length']=0
     response = self.make_request('PUT', bucket_name=bucket_name, headers=headers)
     if response.status / 100 > 2:
         error_handler(response)
     else:
         return response
示例#8
0
文件: key.py 项目: fffonion/jss_sdk
 def multi_thread_upload(self, headers=None, local_file_path=None, part_size=commonconstants.DEFAULT_PART_SIZE,
                          num_thread=10, max_part=10000,timeout=None,prt=None):
     if not prt:
         prt = lambda *args, **kwargs:None
     bucket_name_check(self.bucket.name)
     object_name_check(self.name)
     pre_upload = self.init_multi_upload()
     check_part_trytime = commonconstants.DEFAULT_RETRY_COUNT
     file_size = os.stat(local_file_path).st_size
     num_part = file_size / part_size
     if file_size % part_size != 0:
         num_part = num_part + 1
     leak_parts = [i for i in range(1, num_part+1)]#init
     while check_part_trytime > 0:
         task_queue = generate_all_task_slices(file_name=local_file_path, part_size=part_size, max_part=max_part, leak_parts = leak_parts)
         thread_list = []
         for i in range(num_thread):
             jss_client = jss.connection.JssClient(access_key=self.bucket.jss_client.access_key, secret_key=self.bucket.jss_client.secret_key,
                          host=self.bucket.jss_client.host, port=self.bucket.jss_client.port,timeout=timeout)
             thread_curr = Thread_Upload_Part(jss_client=jss_client, bucket_name=self.bucket.name, object_name=self.name, upload_id=pre_upload['UploadId'], all_task_slices=task_queue)
             thread_list.append(thread_curr)
             thread_curr.start()
         for item in thread_list:
             item.join()
         result = self.get_uploaded_parts(None, pre_upload['UploadId'])
         #check
         prt(result['Part'])
         if len(result['Part']) == num_part:#no leak
             break
         all_part_finnished = [int(p['PartNumber']) for p in result['Part']]
         leak_parts = [i for i in range(1, num_part+1) if i not in all_part_finnished]
         if len(leak_parts) == 0:
             break
         prt('%d parts leaked:%s' % (len(leak_parts), ','.join(map(str, leak_parts))))
         
         check_part_trytime -= 1
         time.sleep(3)
     part_submit_json = self._generate_part_json(result)
     data=self.complete_multi_upload(None, pre_upload['UploadId'], json.dumps(part_submit_json))
     return data
示例#9
0
文件: key.py 项目: fffonion/jss_sdk
 def upload_fp(self, headers=None, fp=None):
     bucket_name_check(self.bucket.name)
     object_name_check(self.name)
     fp.seek(os.SEEK_SET, os.SEEK_END)
     filesize = fp.tell()
     fp.seek(os.SEEK_SET)    
     if not headers:
         headers = {}
     headers['Content-Length'] = filesize
     headers['Content-Type'] = commonconstants.DEFAULT_CONTENT_TYPE
    # httplib.HTTPConnection("%s:%d")
     self.bucket.jss_client.open_connection_to_put('PUT', self.bucket.name, self.name, headers)
     l = fp.read(commonconstants.DEFAULT_SEND_SIZE)
     while len(l) > 0:
         self.bucket.jss_client.send(l)
         l = fp.read(commonconstants.DEFAULT_SEND_SIZE)
     response = self.bucket.jss_client.pool.getresponse()
     #print response.status
     if response.status / 100 > 2:
         error_handler(response)
     else:
         return response