Esempio n. 1
0
    def resumableUpload(self, path):
        """断点续传上传

        Args:
            path (TYPE): file abspath

        Returns:
            TYPE: Description
        """
        part_size = os.path.getsize(path) if os.path.getsize(
            path) < 1024 * 1024 else os.path.getsize(path) // 10
        success = False
        retry = 10
        while not success and retry > 0:
            retry -= 1
            try:
                oss2.resumable_upload(
                    self.bucket,
                    path.rsplit(os.sep, 1)[1],
                    path,
                    progress_callback=self.percentage,
                    # store=oss2.ResumableStore(root='/tmp'),
                    store=oss2.ResumableStore(root='/tmp' if checkOS() ==
                                              'linux' else config.BASE_DIR),
                    multipart_threshold=1024 * 1024,
                    part_size=part_size,
                    num_threads=4)
                success = True
                return True
            except oss2.exceptions.RequestError as e:
                log.warn('上传失败,即将进行重试')
                time.sleep(2)
                continue
        return False
Esempio n. 2
0
    def _do(self, job):
        config = Config()
        if job.action == _Job.PUSH:
            encode_md5 = base64.b64encode(bytearray.fromhex(job.md5)).decode()
            headers = {
                "Content-MD5": encode_md5,
                snapshot.AliOssSnapshot.meta_md5: job.md5
            }
            try:
                oss2.resumable_upload(
                    self.target_snapshot.bucket,
                    job.target,
                    job.src,
                    headers=headers,
                    store=oss2.ResumableStore(root=config.cache_dir),
                    multipart_threshold=config.multipart_threshold,
                    part_size=config.multipart_threshold,
                    num_threads=config.num_threads)

            except oss2.exceptions.InvalidDigest:
                job.info = "md5 mismatch"
                raise JobError

        elif job.action == _Job.REMOVE:
            self.target_snapshot.bucket.delete_object(job.target)
Esempio n. 3
0
 def put_obj(self,
             key,
             src,
             use_resume=True,
             part_size=(20 * 1024 * 1024),
             num_threads=4):
     """
     put file to oss
     :param key:
     :param src:
     """
     # use resume
     try:
         if use_resume:
             oss2.resumable_upload(self.bucket,
                                   key,
                                   src,
                                   store=oss2.ResumableStore(root='/tmp'),
                                   multipart_threshold=100 * 1024,
                                   part_size=part_size,
                                   num_threads=num_threads)
         else:
             self.bucket.put_object_from_file(key, src)
     except Exception as ex:
         print ex.message
Esempio n. 4
0
 def _push(self, fi):
     try:
         headers = ALI.META
         headers.update({"Content-Length": str(fi.size)})  # for cdn check
         headers.update({"Content-MD5": fi.md5})  # for cdn check
         headers.update({"x-oss-meta-etag": fi.etag})
         ret = oss2.resumable_upload(self.bucket_mgr,
                                     fi.rname,
                                     fi.rpath,
                                     store=oss2.ResumableStore(root='/tmp'),
                                     multipart_threshold=10 * 1024 * 1024,
                                     part_size=10 * 1024 * 1024,
                                     num_threads=4,
                                     headers=headers)
         if ret.status != 200:
             log.error("ALI:_push: {file} error".format(file=fi.rpath))
             raise Exception(
                 "ALI:_push: {file} error".format(file=fi.rpath))
         if ret.crc != fi.crc:  # local check
             log.error(
                 "ALI:_push check: {file} error".format(file=fi.rpath))
             raise Exception(
                 "ALI:_push check: {file} error".format(file=fi.rpath))
         log.info("{fi} distributed".format(fi=fi))
         self.upload_files.add(fi.rname)
     except Exception as e:
         log.error("ALI:_push error: {error}".format(error=str(e)))
         raise Exception(
             "Distribute_ALI_push_error :{error}".format(error=str(e)))
Esempio n. 5
0
 def upload_files(self, objname, localfile):
     oss2.resumable_upload(
         self.Bucket,
         objname,
         localfile,
         store=oss2.ResumableStore(
             root=current_app.config['UPLOADED_FILE_DEST']))
Esempio n. 6
0
    def upload_fonts(self, file_path):
        file_name = file_path.split('/')[-1]

        fonts_buckets = self.get_font_bucket()
        for fonts_bucket in fonts_buckets:

            # get bucket region
            fonts_bucket_info = fonts_bucket.get_bucket_info()
            region = fonts_bucket_info.location.split('-')[-1]
            # get font bucket sotorage dir
            fonts_dir = config.ali_fonts_bucket[region]['font_dir']

            upload_res = oss2.resumable_upload(fonts_bucket, os.path.join(fonts_dir, file_name), file_path,
                                               store=oss2.ResumableStore(root='./tmp_files/uploads'),
                                               multipart_threshold=100 * 1024,
                                               part_size=100 * 1024,
                                               progress_callback=self.percentage,
                                               num_threads=4)
            print('', end='\n')
            # print('Upload response: %s' % upload_res)
            if upload_res.status == 200 or upload_res.resp.status == "OK":
                print('Font %s upload to bucket %s successed' % (file_name, fonts_bucket.bucket_name))
                self.logger.info('Font %s upload to bucket %s successed' % (file_name, fonts_bucket.bucket_name))
            else:
                print('Font %s upload to bucket %s failed' % (file_name, fonts_bucket.bucket_name))
                self.logger.error('Font %s upload to bucket %s failed' % (file_name, fonts_bucket.bucket_name))
Esempio n. 7
0
 def multi_upload_obj(self, remote_file, local_file):
     oss2.resumable_upload(
         self._bucket,
         remote_file,
         local_file,
         store=oss2.ResumableStore(root='/tmp'),  #指定保存断点信息的目录
         multipart_threshold=100 * 1024,  #文件长度大于该值时,则用分片上传
         part_size=100 * 1024,  #分片大小
         num_threads=4)  #并发上传的线程数
Esempio n. 8
0
	def upload(self,file):
        	with open(file,'r+') as f:
              		lines = f.readlines()
              		for line in lines:
                       		line1 = line.strip('\n')
                       		remote_file = line1.split('/data1/jenkins_dir/')
                       		oss2.resumable_upload(bucket, remote_file[1], line1,
                                  	store=oss2.ResumableStore(root='/tmp'),
                                  	multipart_threshold=100*1024,
                                  	part_size=100*1024,
                                  	num_threads=4)
    def test_resumable_store_dir(self):
        root = "./"
        store_dir = "test-resumable-store-dir"
        path = root + store_dir

        self.assertFalse(os.path.exists(path))
        store = oss2.ResumableStore(root, store_dir)
        self.assertTrue(os.path.exists(path))
        self.assertTrue(os.path.isdir(path))

        os.rmdir(path)
Esempio n. 10
0
    def resumable_upload_from_local(self,localfile=None,ossfile=None):
        '''
        '''
        ossfile = os.path.join(self.root_name,ossfile)

        result = oss2.resumable_upload(self.bucket,ossfile,localfile,
            store=oss2.ResumableStore(root='/tmp'), 
            multipart_threshold=100*1024,
            part_size=100*1024,
            num_threads=4,
            progress_callback=self.progress_callback
        )

        if result.status == 200:
            url = os.path.join(self.domain,ossfile)
        else:
            url = ''

        return url
Esempio n. 11
0
    def resume_upload(self,
                      remote_file_path,
                      local_file_path,
                      multipart_threshold=100 * 1024,
                      part_size=100 * 1024,
                      num_threads=5,
                      progress_callback=None):
        name = local_file_path.split('/')[-1]
        content_type = mimetypes.guess_type(
            name)[0] or 'application/x-octet-stream'
        with open(local_file_path, 'rb') as f:
            content_file = File(f)
            if hasattr(content_file, 'chunks'):
                content = b''.join(content_file.chunks())
            else:
                content = content_file.read()
        if not content:
            raise AliyunOSSException(
                '`local_file_path` not exists error, local_file_path: {}'.
                format(local_file_path))
        content_len = str(len(content))
        headers = {
            'x-oss-acl': self.acl,
            'Content-Type': content_type,
            'Content-Length': content_len,
        }

        return oss2.resumable_upload(self.bucket,
                                     remote_file_path,
                                     local_file_path,
                                     store=oss2.ResumableStore(root='/tmp'),
                                     headers=headers,
                                     multipart_threshold=multipart_threshold,
                                     part_size=part_size,
                                     num_threads=num_threads,
                                     progress_callback=progress_callback)
Esempio n. 12
0
    print('Backup App success!')

bucket = oss2.Bucket(auth, endpoint, bucketName)  #?~N??~O~V桶


def percentage(consumed_bytes, total_bytes):
    if total_bytes:
        rate = int(100 * (float(consumed_bytes) / float(total_bytes)))
        print('\rAre uploading:{0}%'.format(rate), end='')

        sys.stdout.flush()


#currentTime = time.strftime('%Y-%m-%d_%H:%M_timestamp:%s',time.localtime(time.time()))
#fileName = currentTime+'.sql'     #?~K??~N??~P~N?~@
#fileName = 'redmine_bk'+fileName     #?~K??~N??~I~M?~@
#fileName = 'sql/'+fileName
ossAppFilePath = 'application/' + appFileName
print(ossAppFilePath)

oss2.resumable_upload(
    bucket,
    ossAppFilePath,
    appFilePath,
    store=oss2.ResumableStore(root='/tmp'),
    multipart_threshold=100 * 1024,
    part_size=100 * 1024,
    num_threads=4,
    headers={"Content-Type": "application/octet-stream; charset=utf-8"},
    progress_callback=percentage)  #?~V??~B?续?| ?~J?| ?~V??~H?~L?~X?示?~[度
Esempio n. 13
0
def upload(file, name, range, description, proxy):
    if not proxy:
        os.environ['HTTP_PROXY'] = ''
        os.environ['HTTPS_PROXY'] = ''
        os.environ['http_proxy'] = ''
        os.environ['https_proxy'] = ''
    if not (file.endswith(".zip") or file.endswith(".tar.gz")):
        sys.exit('Error: uploading file should be one of .zip or .tar.gz type')
    filepath = Path(file)
    total_size = filepath.stat().st_size
    if not filepath.exists():
        sys.exit('Error: file not exists')
    name = name or filepath.name
    sha1 = hashlib.sha1()
    sha1.update((f"{filepath.name}-{total_size}").encode())
    digest = sha1.hexdigest()
    dataset_file = Path.home() / '.featurize' / f"file_upload_checkpoint_{digest}" / "dataset.json"
    dataset_file.parent.mkdir(parents=True, exist_ok=True)
    try:
        dataset = json.loads(dataset_file.read_text())
    except (FileNotFoundError, json.JSONDecodeError):
        # init dataset
        res = client.dataset.create(name, range, description)
        dataset = {
            'id': res['id'],
            'dataset_center': res['dataset_center'],
            'uploader_id': res['uploader_id'],
            'consumed_bytes': 0
        }
        dataset_file.write_text(json.dumps(dataset))
    credential = client.oss_credential.get()
    auth = oss2.StsAuth(
        credential['AccessKeyId'],
        credential['AccessKeySecret'],
        credential['SecurityToken']
    )
    bucket = oss2.Bucket(auth, 'http://oss-cn-beijing.aliyuncs.com', dataset['dataset_center']['bucket'])

    def progress_callback(consumed_bytes, total_bytes):
        pbar.update(consumed_bytes - pbar.n)
        dataset['consumed_bytes'] = consumed_bytes
        dataset_file.write_text(json.dumps(dataset))

    pbar = tqdm(total=total_size, unit='B', unit_scale=True)
    path = f"{dataset['uploader_id']}_{dataset['id']}/{filepath.name}"
    result = oss2.resumable_upload(
        bucket,
        path,
        filepath.resolve().as_posix(),
        store=oss2.ResumableStore(root='/tmp'),
        multipart_threshold=8 * 1024 * 1024,
        part_size=1024 * 1024 * 1,
        num_threads=1,
        progress_callback=progress_callback
    )

    if result.status != 200:
        sys.exit(f"Error: upload respond with code {result.status}")

    client.dataset.update(
        dataset_id=dataset['id'],
        uploaded=True,
        domain=f"{dataset['dataset_center']['bucket']}.oss-cn-beijing.aliyuncs.com",
        path=path,
        size=total_size,
        filename=filepath.name
    )

    shutil.rmtree(dataset_file.parent)
Esempio n. 14
0
    def encrypt_and_upload_files(self,
                                 local_file_name,
                                 remote_object_name,
                                 storage_class='Standard',
                                 file_sha256=None,
                                 cache_control='no-store',
                                 compare_sha256_before_uploading=False):
        """使用KMS加密并上传文件

        Args:
            local_file_name (str): 本地文件路径
            remote_object_name (str): 远程文件路径
            storage_class (str, 可选): Object的存储类型,取值:Standard、IA、Archive和ColdArchive。默认值为Standard
            file_sha256 (str, 可选): 如不提供将会自动计算本地文件sha256
            cache_control (str, 可选)
            compare_sha256_before_uploading (bool, 可选): 是否在上传之前对比远端文件的sha256,如相同则跳过上传
        """
        if not file_sha256:
            file_sha256 = calculate_local_file_sha256(local_file_name)
        retry_count = 0
        if compare_sha256_before_uploading:
            try:
                remote_object_sha256 = self.get_remote_file_headers(
                    remote_object_name)['x-oss-meta-sha256']
            except:
                remote_object_sha256 = file_sha256
            if remote_object_sha256 == file_sha256:
                logger.info("[encrypt_and_upload_files]sha256相同,跳过%s文件的上传" %
                            local_file_name)
                return 200
        while True:
            try:

                retry_count += 1
                oss2.resumable_upload(
                    self.__bucket,
                    remote_object_name,
                    local_file_name,
                    store=oss2.ResumableStore(root=config.temp_dir),
                    multipart_threshold=1024 * 1024 * 50,
                    part_size=1024 * 1024 * 50,
                    num_threads=4,
                    headers={
                        "content-length":
                        str(os.path.getsize(local_file_name)),
                        "Cache-Control": cache_control,
                        "x-oss-server-side-encryption": "KMS",
                        "x-oss-storage-class": storage_class,
                        "x-oss-meta-sha256": file_sha256
                    })
                break
            except (oss2.exceptions.ClientError, oss2.exceptions.RequestError,
                    ConnectionResetError) as err:
                if retry_count < config.Max_Retries:
                    logger.error(
                        "[encrypt_and_upload_files] error, retrying time %d" %
                        retry_count)
                    logger.error(err)
                else:
                    logger.exception("[encrypt_and_upload_files] Error")
                    raise oss2.exceptions.RequestError
                sleep(square(retry_count) * 10)
                while subprocess.run(self.__ping_cmd,
                                     capture_output=True).returncode != 0:
                    logger.error("无法连接网络,10秒后重试")
                    sleep(10)
        return 200
Esempio n. 15
0
 def upload_files(self, objname, localfile):
     oss2.resumable_upload(
         self.Bucket,
         objname,
         localfile,
         store=oss2.ResumableStore(root='/Users/root1/webdev/fileload'))
Esempio n. 16
0
    def push_to_oss(self, package_name=None):
        pkg_list = self.fetch_package_list()
        pkg_dir = os.path.join(self.base_path, 'packages')
        if not os.path.exists(pkg_dir):
            os.mkdir(pkg_dir)

        print('\n\nSyncing packages to OSS')
        oss_auth = oss2.Auth(self.access_key_id, self.access_key_secret)
        bucket = oss2.Bucket(oss_auth, self.endpoint, self.bucket)
        for pkg in pkg_list:
            if package_name:
                if package_name == pkg.file_name:
                    print('downloading {0}'.format(pkg.file_name))
                    r = requests.get(pkg.url, stream=True)
                    f = open(os.path.join(pkg_dir, pkg.file_name), 'wb')
                    for chunk in r.iter_content(chunk_size=512):
                        if chunk:
                            f.write(chunk)
                    f.close()
                    print('downloaded {0}'.format(pkg.file_name))

                    # upload to oss
                    sleep(10)
                    print('pushing {0}'.format(pkg.file_name))
                    with open(os.path.join(self.base_path, 'packages', pkg.file_name), 'rb') as fileobj:
                        bucket.put_object(pkg.file_name, fileobj)
                    bucket.put_object_acl(pkg.file_name, oss2.OBJECT_ACL_PUBLIC_READ)
                    sys.exit(0)
                continue
            else:
                if not bucket.object_exists(pkg.file_name):
                    print('remote: {0} is not exist'.format(pkg))
                    # check local package
                    if os.path.exists(os.path.join(pkg_dir, pkg.file_name)):
                        print('local: {0} is exist'.format(pkg))
                        print('pushing {0}'.format(pkg.file_name))
                        oss2.resumable_upload(bucket, pkg.file_name,
                                              os.path.join(self.base_path, 'packages', pkg.file_name),
                                              store=oss2.ResumableStore(root='/tmp'),
                                              multipart_threshold=100 * 1024,
                                              part_size=100 * 1024,
                                              num_threads=4,
                                              progress_callback=percentage)
                        bucket.put_object_acl(pkg.file_name, oss2.OBJECT_ACL_PUBLIC_READ)
                        os.remove(os.path.join(self.base_path, 'packages', pkg.file_name))
                    elif pkg.platform == 'linux_x86-x86':
                        pass
                    else:
                        print('local: {0} is not exist'.format(pkg))
                        print('downloading {0}'.format(pkg.file_name))
                        r = requests.get(pkg.url, stream=True)
                        f = open(os.path.join(pkg_dir, pkg.file_name), 'wb')
                        for chunk in r.iter_content(chunk_size=512):
                            if chunk:
                                f.write(chunk)
                        f.close()

                        # upload to oss
                        sleep(2)
                        print('pushing {0}'.format(pkg.file_name))
                        package_dst = os.path.join(self.base_path, 'packages', pkg.file_name)
                        oss2.resumable_upload(bucket, pkg.file_name, package_dst,
                                              store=oss2.ResumableStore(root='/tmp'),
                                              multipart_threshold=100 * 1024,
                                              part_size=100 * 1024,
                                              num_threads=4,
                                              progress_callback=percentage)
                        bucket.put_object_acl(pkg.file_name, oss2.OBJECT_ACL_PUBLIC_READ)
                        os.remove(os.path.join(self.base_path, 'packages', pkg.file_name))
                else:
                    print('remote: {0} is exist'.format(pkg))