Exemplo n.º 1
0
        def _resumable_upload():
            self.uploaded_disk_name = 'bakery-' + os.path.basename(self.disk_to_upload) + '-' + \
                                      ''.join(random.choices(string.digits, k=6))
            AlibabaDisk.iter += 1
            LOGGER.info('Upload iteration number %d', AlibabaDisk.iter)
            LOGGER.info('Uploading %s as %s', self.disk_to_upload,
                        self.uploaded_disk_name)
            start_time = time.time()
            time.sleep(1)
            result = False
            try:
                resumable_store = oss2.resumable.ResumableStore(
                    root=self.working_dir)
                oss2.resumable_upload(self.bucket,
                                      self.uploaded_disk_name,
                                      self.disk_to_upload,
                                      store=resumable_store,
                                      num_threads=number_of_threads)
                result = True
            except FileNotFoundError as exc:
                LOGGER.exception(exc)
                raise RuntimeError('Could not find file to upload: {}'.format(
                    self.disk_to_upload))
            except oss2.exceptions.NoSuchUpload as exc:
                LOGGER.error('Upload failed. UploadId: %s',
                             exc.details['UploadId'])
                LOGGER.exception(exc)

            LOGGER.info('Iteration %d of upload took %d seconds',
                        AlibabaDisk.iter,
                        time.time() - start_time)
            if not result:
                self.upload_cleanup()
            return result
    def test_progress(self):
        stats = {'previous': -1, 'ncalled': 0}

        def progress_callback(bytes_consumed, total_bytes):
            self.assertTrue(bytes_consumed <= total_bytes)
            self.assertTrue(bytes_consumed > stats['previous'])

            stats['previous'] = bytes_consumed
            stats['ncalled'] += 1

        key = random_string(16)
        content = random_bytes(5 * 100 * 1024 + 100)

        pathname = self._prepare_temp_file(content)

        part_size = 100 * 1024
        oss2.resumable_upload(self.bucket, key, pathname,
                              multipart_threshold=200 * 1024,
                              part_size=part_size,
                              progress_callback=progress_callback,
                              num_threads=1)
        self.assertEqual(stats['previous'], len(content))
        self.assertEqual(stats['ncalled'], oss2.utils.how_many(len(content), part_size) + 1)

        stats = {'previous': -1, 'ncalled': 0}
        oss2.resumable_upload(self.bucket, key, pathname,
                              multipart_threshold=len(content) + 100,
                              progress_callback=progress_callback)
        self.assertEqual(stats['previous'], len(content))

        self.bucket.delete_object(key)
Exemplo n.º 3
0
 def upload_files(self, objname, localfile):
     oss2.resumable_upload(
         self.Bucket,
         objname,
         localfile,
         store=oss2.ResumableStore(
             root=current_app.config['UPLOADED_FILE_DEST']))
Exemplo n.º 4
0
    def __test_interrupt(self, content_size, failed_part_number,
                         expected_unfinished=0,
                         modify_record_func=None):
        orig_upload_part = oss2.Bucket.upload_part

        def upload_part(self, key, upload_id, part_number, data):
            if part_number == failed_part_number:
                raise RuntimeError
            else:
                return orig_upload_part(self, key, upload_id, part_number, data)

        key = 'resume-' + random_string(32)
        content = random_bytes(content_size)

        pathname = self._prepare_temp_file(content)

        with patch.object(oss2.Bucket, 'upload_part', side_effect=upload_part, autospec=True) as mock_upload_part:
            self.assertRaises(RuntimeError, oss2.resumable_upload, self.bucket, key, pathname,
                              multipart_threshold=0,
                              part_size=100 * 1024)

        if modify_record_func:
            modify_record_func(oss2.resumable.make_upload_store(), self.bucket.bucket_name, key, pathname)

        oss2.resumable_upload(self.bucket, key, pathname, multipart_threshold=0, part_size=100 * 1024)

        self.assertEqual(len(list(oss2.ObjectUploadIterator(self.bucket, key))), expected_unfinished)
Exemplo n.º 5
0
    def resumableUpload(self, path):
        """断点续传上传

        Args:
            path (TYPE): file abspath

        Returns:
            TYPE: Description
        """
        part_size = os.path.getsize(path) if os.path.getsize(
            path) < 1024 * 1024 else os.path.getsize(path) // 10
        success = False
        retry = 10
        while not success and retry > 0:
            retry -= 1
            try:
                oss2.resumable_upload(
                    self.bucket,
                    path.rsplit(os.sep, 1)[1],
                    path,
                    progress_callback=self.percentage,
                    # store=oss2.ResumableStore(root='/tmp'),
                    store=oss2.ResumableStore(root='/tmp' if checkOS() ==
                                              'linux' else config.BASE_DIR),
                    multipart_threshold=1024 * 1024,
                    part_size=part_size,
                    num_threads=4)
                success = True
                return True
            except oss2.exceptions.RequestError as e:
                log.warn('上传失败,即将进行重试')
                time.sleep(2)
                continue
        return False
Exemplo n.º 6
0
  def uploadData(self, localPath, uploadName):
    print 'uploading data'
    req = request.Request(host=self.__host, protocol=constant.HTTP, url='/credential/upload/data/' + uploadName, method="GET",
                          time_out=3000)
    a, b, rr = self.cli.execute(req)

    try:
      res = json.loads(rr)
    except:
      print a, b, rr
      return

    if res['success'] != True:
      print res
      return

    token = res['credential']

    auth = oss2.StsAuth(token['AccessKeyId'], token['AccessKeySecret'], token['SecurityToken'])

    bucket = oss2.Bucket(auth, self.__endpoint, self.__bucket)

    oss2.resumable_upload(bucket, res['path'], localPath)

    print 'upload complete'
Exemplo n.º 7
0
def uploadFiles(bucket):
    """ uploadFiles
    Upload FLAGS.files to the oss
    """
    start_time = time.time()
    for tmp_file in FLAGS.files:
        if not os.path.exists(tmp_file):
            print("File {0} is not exists!".format(tmp_file))
        else:
            print("Will upload {0} to the oss!".format(tmp_file))
            tmp_time = time.time()
            # cut the file name
            filename = tmp_file[tmp_file.rfind("/") + 1:len(tmp_file)]
            ossFilename = os.path.join(FLAGS.prefix, filename)
            oss2.resumable_upload(bucket,
                                  ossFilename,
                                  tmp_file,
                                  progress_callback=percentage)

            print("\nFile {0} -> {1} uploads finished, cost {2} Sec.".format(
                tmp_file, ossFilename,
                time.time() - tmp_time))

    print("All upload tasks have finished!")
    print("Cost {0} Sec.".format(time.time() - start_time))
def vodeo_update(video_url, video_full_path):
    retry_time = 1
    result = False
    myLog.info(u"文件:%s开始上传,oss对象:%s" % (video_full_path, video_url))
    global bucket_video
    while retry_time <= setting.RETRY_TIMES:
        if not bucket_video:
            bucket_video = get_bucket_video(video_url)
        try:
            resumable_upload(bucket_video,
                             video_url,
                             video_full_path,
                             store=ResumableStore(root=setting.UPLOAD_CACHE),
                             multipart_threshold=setting.PART_SIZE,
                             part_size=setting.PART_SIZE,
                             num_threads=4)
            myLog.info(u"文件:%s上传成功,oss对象:%s" % (video_full_path, video_url))
            result = True
            break
        except Exception as e:
            bucket_video = None
            sleep_seconds = setting.WRONG_SLEEP**retry_time
            myLog.warning(u"文件:%s上传oss:%s失败,%s后重试" %
                          (video_full_path, video_url, sleep_seconds))
            myLog.error(e)
            time.sleep(sleep_seconds)
            retry_time += 1
    return result
Exemplo n.º 9
0
    def test_progress(self):
        stats = {'previous': -1}

        def progress_callback(bytes_consumed, total_bytes):
            self.assertTrue(bytes_consumed <= total_bytes)
            self.assertTrue(bytes_consumed > stats['previous'])

            stats['previous'] = bytes_consumed

        key = random_string(16)
        content = random_bytes(5 * 100 * 1024 + 100)

        pathname = self._prepare_temp_file(content)

        oss2.resumable_upload(self.bucket,
                              key,
                              pathname,
                              multipart_threshold=200 * 1024,
                              part_size=100 * 1024,
                              progress_callback=progress_callback)
        self.assertEqual(stats['previous'], len(content))

        stats = {'previous': -1}
        oss2.resumable_upload(self.bucket,
                              key,
                              pathname,
                              multipart_threshold=len(content) + 100,
                              progress_callback=progress_callback)
        self.assertEqual(stats['previous'], len(content))

        self.bucket.delete_object(key)
Exemplo n.º 10
0
    def test_progress(self):
        bucket = random.choice([self.bucket, self.rsa_crypto_bucket, self.kms_crypto_bucket])
        stats = {'previous': -1, 'ncalled': 0}

        def progress_callback(bytes_consumed, total_bytes):
            self.assertTrue(bytes_consumed <= total_bytes)
            self.assertTrue(bytes_consumed > stats['previous'])

            stats['previous'] = bytes_consumed
            stats['ncalled'] += 1

        key = random_string(16)
        content = random_bytes(5 * 100 * 1024 + 100)

        pathname = self._prepare_temp_file(content)

        part_size = 100 * 1024
        oss2.resumable_upload(bucket, key, pathname,
                              multipart_threshold=200 * 1024,
                              part_size=part_size,
                              progress_callback=progress_callback,
                              num_threads=1)
        self.assertEqual(stats['previous'], len(content))
        self.assertEqual(stats['ncalled'], oss2.utils.how_many(len(content), part_size) + 1)

        stats = {'previous': -1, 'ncalled': 0}
        oss2.resumable_upload(bucket, key, pathname,
                              multipart_threshold=len(content) + 100,
                              progress_callback=progress_callback)
        self.assertEqual(stats['previous'], len(content))

        bucket.delete_object(key)
Exemplo n.º 11
0
    def test_upload_sequenial(self):
        endpoint = "http://oss-cn-shanghai.aliyuncs.com"
        auth = oss2.Auth(OSS_ID, OSS_SECRET)
        bucket_name = OSS_BUCKET + "-test-upload-sequential"
        bucket = oss2.Bucket(auth, endpoint, bucket_name)
        bucket.create_bucket()

        key = random_string(16)
        content = random_bytes(5 * 100 * 1024)
        pathname = self._prepare_temp_file(content)

        oss2.resumable_upload(bucket,
                              key,
                              pathname,
                              multipart_threshold=200 * 1024,
                              part_size=None)
        result = bucket.get_object(key)
        self.assertIsNone(result.resp.headers.get('Content-MD5'))

        params = {'sequential': ''}
        oss2.resumable_upload(bucket,
                              key,
                              pathname,
                              multipart_threshold=200 * 1024,
                              part_size=None,
                              params=params)
        result = bucket.get_object(key)
        self.assertIsNotNone(result.resp.headers.get('Content-MD5'))

        bucket.delete_object(key)
        bucket.delete_bucket()
Exemplo n.º 12
0
 def put_obj(self,
             key,
             src,
             use_resume=True,
             part_size=(20 * 1024 * 1024),
             num_threads=4):
     """
     put file to oss
     :param key:
     :param src:
     """
     # use resume
     try:
         if use_resume:
             oss2.resumable_upload(self.bucket,
                                   key,
                                   src,
                                   store=oss2.ResumableStore(root='/tmp'),
                                   multipart_threshold=100 * 1024,
                                   part_size=part_size,
                                   num_threads=num_threads)
         else:
             self.bucket.put_object_from_file(key, src)
     except Exception as ex:
         print ex.message
Exemplo n.º 13
0
    def _do(self, job):
        config = Config()
        if job.action == _Job.PUSH:
            encode_md5 = base64.b64encode(bytearray.fromhex(job.md5)).decode()
            headers = {
                "Content-MD5": encode_md5,
                snapshot.AliOssSnapshot.meta_md5: job.md5
            }
            try:
                oss2.resumable_upload(
                    self.target_snapshot.bucket,
                    job.target,
                    job.src,
                    headers=headers,
                    store=oss2.ResumableStore(root=config.cache_dir),
                    multipart_threshold=config.multipart_threshold,
                    part_size=config.multipart_threshold,
                    num_threads=config.num_threads)

            except oss2.exceptions.InvalidDigest:
                job.info = "md5 mismatch"
                raise JobError

        elif job.action == _Job.REMOVE:
            self.target_snapshot.bucket.delete_object(job.target)
Exemplo n.º 14
0
    def __test_resume(self, content_size, uploaded_parts, expected_unfinished=0):
        part_size = 100 * 1024
        num_parts = (content_size + part_size - 1) // part_size

        key = 'resume-' + random_string(32)
        content = random_bytes(content_size)

        pathname = self._prepare_temp_file(content)

        upload_id = self.bucket.init_multipart_upload(key).upload_id

        for part_number in uploaded_parts:
            start = (part_number -1) * part_size
            if part_number == num_parts:
                end = content_size
            else:
                end = start + part_size

            self.bucket.upload_part(key, upload_id, part_number, content[start:end])

        oss2.resumable._rebuild_record(pathname, oss2.resumable.make_upload_store(), self.bucket, key, upload_id, part_size)
        oss2.resumable_upload(self.bucket, key, pathname, multipart_threshold=0, part_size=100 * 1024)

        result = self.bucket.get_object(key)
        self.assertEqual(content, result.read())

        self.assertEqual(len(list(oss2.ObjectUploadIterator(self.bucket, key))), expected_unfinished)

        self.bucket.delete_object(key)
Exemplo n.º 15
0
 def uploadFile(self, path, localPath):
     oss2.resumable_upload(self._bucket,
                           path,
                           localPath,
                           store=oss2.ResumableDownloadStore(root='./tmp'),
                           multipart_threshold=20 * 1024 * 1024,
                           part_size=10 * 1024 * 1024,
                           num_threads=3)
Exemplo n.º 16
0
 def multi_upload_obj(self, remote_file, local_file):
     oss2.resumable_upload(
         self._bucket,
         remote_file,
         local_file,
         store=oss2.ResumableStore(root='/tmp'),  #指定保存断点信息的目录
         multipart_threshold=100 * 1024,  #文件长度大于该值时,则用分片上传
         part_size=100 * 1024,  #分片大小
         num_threads=4)  #并发上传的线程数
    def test_resumable_upload(self):
        small_object = 'requestpayment-test-resumable-upload-small-object'
        big_object = 'requestpayment-test-resumable-upload-big-object'

        # Create tmp file smaller than multipart_threshold
        file_name = self._prepare_temp_file_with_size(150 * 1024)

        # Resumale upload small object without payer setting, should be failed.
        self.assertRaises(oss2.exceptions.AccessDenied, oss2.resumable_upload, self.payer_bucket, small_object, file_name, 
                        multipart_threshold=(200*1024), num_threads=2, part_size=(100*1024))

        # Resumale upload small object with payer setting, should be successful.
        headers = dict()
        headers[OSS_REQUEST_PAYER] = "requester"
        result = oss2.resumable_upload(self.payer_bucket, small_object, file_name, 
                        multipart_threshold=(200*1024), num_threads=2, part_size=(100*1024), headers=headers)
        self.assertEqual(result.status, 200)
        self.bucket.delete_object(small_object)

        # Start big file test
        # Create big file bigger than multipart_threshold
        file_name = self._prepare_temp_file_with_size(11 *1024 * 1024)

        # Resumale upload big object without payer setting, should be failed.
        self.assertRaises(oss2.exceptions.AccessDenied, oss2.resumable_upload, self.payer_bucket, big_object, file_name, 
                        multipart_threshold=(200*1024), num_threads=2, part_size=(100*1024))

        # Resumale upload big object with payer setting and tagging setting, should be successful.
        key1 = 'key1'
        value1 = 'value2'

        key2 = 'key2'
        value2 = 'value2'

        tag_str = key1 + '=' + value1
        tag_str += '&' + key2 + '=' + value2

        headers = dict()
        headers[OSS_REQUEST_PAYER] = "requester"
        headers[OSS_OBJECT_TAGGING] = tag_str
        result = oss2.resumable_upload(self.payer_bucket, big_object, file_name, 
                    multipart_threshold=(200*1024), num_threads=2, part_size=(100*1024), headers=headers)
        self.assertEqual(result.status, 200)

        # Check object size
        head_info = self.bucket.head_object(big_object)
        total_size = head_info.content_length
        self.assertEqual(total_size, (11 * 1024 * 1024))

        # Check tagging
        result = self.bucket.get_object_tagging(big_object)
        self.assertEqual(2, result.tag_set.len())
        tagging_rule = result.tag_set.tagging_rule
        self.assertEqual(value1, tagging_rule[key1])
        self.assertEqual(value2, tagging_rule[key2])

        self.bucket.delete_object(big_object)
Exemplo n.º 18
0
def uploadFile(file):
    fileRemote = os.path.split(file)[1]
    print("=====上传 %s 到阿里云 =====" % fileRemote)
    # 带进度条的断点续传
    oss2.resumable_upload(bucket, fileRemote, file, 
                          multipart_threshold=200*1024,
                          part_size=100*1024,
                          num_threads=4,
                          progress_callback=percentage)
Exemplo n.º 19
0
    def __test_resume(self,
                      content_size,
                      uploaded_parts,
                      expected_unfinished=0):
        bucket = random.choice(
            [self.bucket, self.rsa_crypto_bucket, self.kms_crypto_bucket])
        part_size = 100 * 1024
        num_parts = (content_size + part_size - 1) // part_size

        key = 'resume-' + random_string(32)
        content = random_bytes(content_size)
        encryption_flag = isinstance(bucket, oss2.CryptoBucket)

        context = None
        pathname = self._prepare_temp_file(content)
        if encryption_flag:
            context = models.MultipartUploadCryptoContext(
                content_size, part_size)
            upload_id = bucket.init_multipart_upload(
                key, upload_context=context).upload_id
        else:
            upload_id = bucket.init_multipart_upload(key).upload_id

        for part_number in uploaded_parts:
            start = (part_number - 1) * part_size
            if part_number == num_parts:
                end = content_size
            else:
                end = start + part_size

            if encryption_flag:
                bucket.upload_part(key,
                                   upload_id,
                                   part_number,
                                   content[start:end],
                                   upload_context=context)
            else:
                bucket.upload_part(key, upload_id, part_number,
                                   content[start:end])

        self._rebuild_record(pathname, oss2.resumable.make_upload_store(),
                             bucket, key, upload_id, part_size, context)
        oss2.resumable_upload(bucket,
                              key,
                              pathname,
                              multipart_threshold=0,
                              part_size=100 * 1024)

        result = bucket.get_object(key)
        self.assertEqual(content, result.read())

        self.assertEqual(
            len(list(oss2.ObjectUploadIterator(self.bucket, key))),
            expected_unfinished)

        bucket.delete_object(key)
Exemplo n.º 20
0
def upload(host, user, password, port, my_conf):
    abc = lib_innodb_backup.backup(host, user, password, port, my_conf)

    auth = oss2.Auth('LTAIQFvh36NyxuxI', 'M6Au2n69pz7mZFVrICKaQaFKxTv2do')
    # service = oss2.Service(auth, 'oss-cn-beijing.aliyuncs.com')
    # print([b.name for b in oss2.BucketIterator(service)])
    ticks = time.strftime("%Y%d%m_%H%M")
    bucket = oss2.Bucket(auth, 'oss-cn-beijing.aliyuncs.com',
                         'tplinuxmysqlbackup')
    oss2.resumable_upload(bucket, ticks + '.tar.gz', "%s.tar.gz" % abc)
 def uploadFile(self):
     try:
         oss2.resumable_upload(self.ossBucket,
                               self.objectname,
                               self.file,
                               multipart_threshold=100 * 1024,
                               num_threads=4,
                               progress_callback=self.percentage)
     except Exception as e:
         print(e)
Exemplo n.º 22
0
def upload_war(warfilepath):
    auth = oss2.Auth(accessKeyId, accessKeySecret)
    bucket = oss2.Bucket(auth, ossendpoint, ossbucketname)

    oss2.resumable_upload(bucket, os.path.basename(warfilepath), warfilepath)

    warurl = "https://" + ossbucketname + "." + ossendpoint + "/" + os.path.basename(
        warfilepath)

    return warurl
Exemplo n.º 23
0
def uploadFile(file):
    fileRemote = os.path.split(file)[1]
    print("=====上传 %s 到阿里云 =====" % fileRemote)
    # 带进度条的断点续传
    oss2.resumable_upload(bucket,
                          fileRemote,
                          file,
                          multipart_threshold=200 * 1024,
                          part_size=100 * 1024,
                          num_threads=4,
                          progress_callback=percentage)
Exemplo n.º 24
0
	def upload(self,file):
        	with open(file,'r+') as f:
              		lines = f.readlines()
              		for line in lines:
                       		line1 = line.strip('\n')
                       		remote_file = line1.split('/data1/jenkins_dir/')
                       		oss2.resumable_upload(bucket, remote_file[1], line1,
                                  	store=oss2.ResumableStore(root='/tmp'),
                                  	multipart_threshold=100*1024,
                                  	part_size=100*1024,
                                  	num_threads=4)
Exemplo n.º 25
0
    def test_concurrency(self):
        bucket = random.choice([self.bucket, self.rsa_crypto_bucket, self.kms_crypto_bucket])
        key = random_string(16)
        content = random_bytes(64 * 100 * 1024)

        pathname = self._prepare_temp_file(content)

        oss2.resumable_upload(bucket, key, pathname, multipart_threshold=200 * 1024, part_size=100 * 1024,
                              num_threads=8)
        result = bucket.get_object(key)
        self.assertEqual(content, result.read())
        self.assertEqual(result.headers['x-oss-object-type'], 'Multipart')
Exemplo n.º 26
0
 def up(img_path_, remote_img_loc_, img_):
     if not bucket.object_exists(remote_img_loc_):  # 判断远端文件是否存在
         oss2.resumable_upload(bucket,
                               remote_img_loc_,
                               img_path_,
                               multipart_threshold=200 * 1024,
                               part_size=100 * 1024,
                               num_threads=3,
                               progress_callback=percentage)
         print(', ' + img_ + ' is successfully uploaded.')
     else:
         print(img_ + " already exists, ignore it.")
Exemplo n.º 27
0
def upload_to_oss(path):
    config = configparser.ConfigParser()
    config.read(os.path.expanduser("~/.ossutilconfig"))
    OSS_ENDPOINT = config["Credentials"]["endpoint"]
    OSS_ACCESS_KEY_ID = config["Credentials"]["accessKeyID"]
    OSS_ACCESS_KEY_SECRET = config["Credentials"]["accessKeySecret"]
    OSS_BUCKET = "kolibri"

    oss_auth = oss2.Auth(OSS_ACCESS_KEY_ID, OSS_ACCESS_KEY_SECRET)
    bucket = oss2.Bucket(oss_auth, OSS_ENDPOINT, OSS_BUCKET)
    filename = os.path.split(path)[1]
    oss2.resumable_upload(bucket, filename, path)
Exemplo n.º 28
0
 def resumable(self, cloud_file, file_to_upload):
     print "开始断点续传%s" % (file_to_upload)
     startTime = time.time()
     bucket = self.__connect_oss()
     oss2.resumable_upload(bucket, cloud_file, file_to_upload,
                           store=oss2.ResumableStore(root='/tmp'),
                           multipart_threshold=self.__file_critical_size,
                           part_size=self.__file_chunk_size,
                           num_threads=10)
     endTime = time.time()
     spendTime = endTime - startTime
     print "Upload file spend %f second." % (spendTime)
Exemplo n.º 29
0
    def test_upload_small(self):
        key = random_string(16)
        content = random_bytes(100)

        pathname = self._prepare_temp_file(content)

        oss2.resumable_upload(self.bucket, key, pathname)

        result = self.bucket.get_object(key)
        self.assertEqual(content, result.read())
        self.assertEqual(result.headers['x-oss-object-type'], 'Normal')

        self.bucket.delete_object(key)
    def test_upload_small(self):
        key = random_string(16)
        content = random_bytes(100)

        pathname = self._prepare_temp_file(content)

        oss2.resumable_upload(self.bucket, key, pathname)

        result = self.bucket.get_object(key)
        self.assertEqual(content, result.read())
        self.assertEqual(result.headers['x-oss-object-type'], 'Normal')

        self.bucket.delete_object(key)
Exemplo n.º 31
0
def upload_to_oss(local_path, endpoint, remote_path):
    bucket = get_bucket(endpoint)
    local_path = os.path.abspath(local_path)
    if os.path.isdir(local_path):
        for top, dirs, files in os.walk(local_path):
            for file in files:
                local_file = os.path.join(top, file)
                remote_file = local_file.replace(local_path, remote_path)
                print("Uploading {0}...".format(local_file))
                oss2.resumable_upload(bucket, remote_file, local_file)
    else:
        print("Uploading {0}...".format(local_path))
        oss2.resumable_upload(bucket, remote_path, local_path)
    def test_upload_large(self):
        key = random_string(16)
        content = random_bytes(5 * 100 * 1024)

        pathname = self._prepare_temp_file(content)

        oss2.resumable_upload(self.bucket, key, pathname, multipart_threshold=200 * 1024, part_size=None)

        result = self.bucket.get_object(key)
        self.assertEqual(content, result.read())
        self.assertEqual(result.headers['x-oss-object-type'], 'Multipart')

        self.bucket.delete_object(key)
Exemplo n.º 33
0
    def test_concurrency(self):
        key = random_string(16)
        content = random_bytes(64 * 100 * 1024)

        pathname = self._prepare_temp_file(content)

        oss2.resumable_upload(self.bucket, key, pathname,
                              multipart_threshold=200 * 1024,
                              part_size=100*1024,
                              num_threads=8)
        result = self.bucket.get_object(key)
        self.assertEqual(content, result.read())
        self.assertEqual(result.headers['x-oss-object-type'], 'Multipart')
Exemplo n.º 34
0
    def _put(self, source_path, remote_filename):
        key_name = self.key_prefix + remote_filename

        # Default multipart parallel threads number is 5.
        multipart_parallel = 5
        if vars().has_key('globals.alicloud_multipart_upload_threads'):
            if globals.alicloud_multipart_upload_threads >= 5:
                multipart_parallel = globals.alicloud_multipart_upload_threads

        import oss2
        oss2.resumable_upload(self.bucket,
                              key_name,
                              source_path.name,
                              num_threads=multipart_parallel)
Exemplo n.º 35
0
 def upload(img_path_, remote_img_ref, img_full_filename):
     # 上传过程的函数,使用断点续传
     if not bucket.object_exists(remote_img_ref):  # 判断远端文件是否存在
         oss2.resumable_upload(
             bucket,
             remote_img_ref,
             img_path_,
             multipart_threshold=200 * 1024,
             part_size=100 * 1024,
             num_threads=3,
             progress_callback=percentage)
         print(', ' + img_full_filename + ' is successfully uploaded.')
     else:
         print(img_full_filename + " already exists, ignore it.")
def upload_oss(path, name):
    """
    :param path:
    :param name: name is filename stored on OSS, which known as 'key' in oss2.
    :return:
    """
    # KNOWN ISSUE: oss2 has a bug in oss2.resumable_upload() which not support non-ascii character

    path = to_unicode_or_bust(path)  # support support non-ascii character, such as path name in Chinese Language

    bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

    if not os.path.exists(path):  # need <type 'unicode'>
        raise OSError("cannot access '%s': No such file or directory" % to_str_or_bust(path))
    else:
        logger.info("file \'%s\' selected." % path)

    if name:
        # OSS中的目录/文件夹概念
        # https://help.aliyun.com/knowledge_detail/39527.html
        key = name
    else:
        key = os.path.basename(path)

    path = to_str_or_bust(path)
    key = to_str_or_bust(key)

    logger.info("ready for uploading file to oss")
    result = oss2.resumable_upload(bucket, key, path, multipart_threshold=10 * 1024 * 1024)

    oss_obj = bucket.get_object(key)
    logger.info(" ".join((oss_obj.request_id, str(oss_obj.status))))
    logger.info("file \'%s\' uploaded." % path)
    return result
Exemplo n.º 37
0
    def test_upload_large(self):
        key = random_string(16)
        content = random_bytes(5 * 100 * 1024)

        pathname = self._prepare_temp_file(content)

        result = oss2.resumable_upload(self.bucket, key, pathname, multipart_threshold=200 * 1024, part_size=None)
        self.assertTrue(result is not None)
        self.assertTrue(result.etag is not None)
        self.assertTrue(result.request_id is not None)

        result = self.bucket.get_object(key)
        self.assertEqual(content, result.read())
        self.assertEqual(result.headers['x-oss-object-type'], 'Multipart')

        self.bucket.delete_object(key)
# 上传分片
for i in range(3):
    result = bucket.upload_part(key, upload_id, i+1, content, progress_callback=percentage)
    parts.append(oss2.models.PartInfo(i+1, result.etag))

# 完成上传并回调
result = bucket.complete_multipart_upload(key, upload_id, parts)

"""
断点续传上传
"""
# 带进度条的断点续传
pathname = _prepare_temp_file(content)
oss2.resumable_upload(bucket, key, pathname, 
                      multipart_threshold=200*1024,
                      part_size=100*1024,
                      num_threads=3,
                      progress_callback=percentage)

"""
文件下载
"""
# 带进度条的下载
result = bucket.get_object(key, progress_callback=percentage)
content_got = b''
for chunk in result:
    content_got += chunk
assert content == content_got

"""
范围下载
def resumable_upload(bucket, object_key, local_file_path):
    # get a file path (string)
    oss2.resumable_upload(bucket, object_key, local_file_path, progress_callback=percentage, num_threads=4)
Exemplo n.º 40
0
from __future__ import print_function
import os, sys
import oss2
#
# 百分比显示回调函数
#
def percentage(consumed_bytes, total_bytes):
    if total_bytes:
        rate = int(100 * (float(consumed_bytes) / float(total_bytes)))
        print('\r{0}% '.format(rate), end=filePath)
        sys.stdout.flush()

# 脚本需要传入5个参数
if ( len(sys.argv) > 5 ):
    AccessKeyId     = sys.argv[1]
    AccessKeySecret = sys.argv[2]
    Endpoint        = sys.argv[3] 
    Bucket          = sys.argv[4]
    filePath = sys.argv[5]
    fileName = filePath.split("/")[-1]

else:
    print("Example: %s AccessKeyId AccessKeySecret Endpoint Bucket /data/backup.zip" % sys.argv[0])
    exit()

# OSS认证并开始上传
auth = oss2.Auth(AccessKeyId , AccessKeySecret)
bucket = oss2.Bucket(auth,  Endpoint, Bucket)
oss2.resumable_upload(bucket, fileName, filePath, progress_callback=percentage)
print('\rUpload %s to OSS Success!' % filePath)
Exemplo n.º 41
0
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)


def random_string(n):
    return ''.join(random.choice(string.ascii_lowercase) for i in range(n))

# 生成一个本地文件用于测试。文件内容是bytes类型。
filename = random_string(32) + '.txt'
content = oss2.to_bytes(random_string(1024 * 1024))

with open(filename, 'wb') as fileobj:
    fileobj.write(content)

# 断点续传一:因为文件比较小(小于oss2.defaults.multipart_threshold),
# 所以实际上用的是oss2.Bucket.put_object
oss2.resumable_upload(bucket, 'remote-normal.txt', filename)

# 断点续传二:为了展示的需要,我们指定multipart_threshold可选参数,确保使用分片上传
oss2.resumable_upload(bucket, 'remote-multipart.txt', filename, multipart_threshold=100 * 1024)


# 也可以直接调用分片上传接口。
# 首先可以用帮助函数设定分片大小,设我们期望的分片大小为128KB
total_size = os.path.getsize(filename)
part_size = oss2.determine_part_size(total_size, preferred_size=128 * 1024)

# 初始化分片上传,得到Upload ID。接下来的接口都要用到这个Upload ID。
key = 'remote-multipart2.txt'
upload_id = bucket.init_multipart_upload(key).upload_id

# 逐个上传分片