Ejemplo n.º 1
0
 def test_postufiletowrongbucket(self):
     self.postfile_handler.set_keys(public_key, private_key)
     # post file to wrong bucket
     logger.info('\nstart post small file to wrong bucket')
     ret, resp = self.postfile_handler.postfile(wrong_bucket, post_small_key, small_local_file)
     logger.error(resp.error)
     assert resp.status_code == 400
Ejemplo n.º 2
0
 def test_postufilewithwrongkey(self):
     # set the wrong api keys
     self.postfile_handler.set_keys(wrong_public_key, wrong_private_key)
     # post small file with the wrong api keys
     logger.info('\nstart post small file to public bucket with wrong api keys pair')
     ret, resp = self.postfile_handler.postfile(public_bucket, post_small_key, small_local_file)
     logger.error(resp.error)
     assert resp.status_code == 403
Ejemplo n.º 3
0
 def test_postufiletowrongbucket(self):
     self.postfile_handler.set_keys(public_key, private_key)
     # post file to wrong bucket
     logger.info('\nstart post small file to wrong bucket')
     ret, resp = self.postfile_handler.postfile(wrong_bucket,
                                                post_small_key,
                                                small_local_file)
     logger.error(resp.error)
     assert resp.status_code == 400
Ejemplo n.º 4
0
 def test_postufilewithwrongkey(self):
     # set the wrong api keys
     self.postfile_handler.set_keys(wrong_public_key, wrong_private_key)
     # post small file with the wrong api keys
     logger.info(
         '\nstart post small file to public bucket with wrong api keys pair'
     )
     ret, resp = self.postfile_handler.postfile(public_bucket,
                                                post_small_key,
                                                small_local_file)
     logger.error(resp.error)
     assert resp.status_code == 403
Ejemplo n.º 5
0
    def test_putufilewithwrongkey(self):
        self.putufile_handler.set_keys(wrong_public_key, wrong_private_key)
        logger.info('\nput small file to public bucket with wrong api keys pair')
        # put small file to public bucket with wrong api keys pair
        ret, resp = self.putufile_handler.putfile(public_bucket, put_small_key, small_local_file)
        assert resp.status_code == 403
        logger.info(resp.error)

        # put small file to private bucket with wrong api keys pair
        logger.info('\nput small file to private bucket with wrong api keys pair')
        ret, resp = self.putufile_handler.putfile(private_bucket, put_small_key, small_local_file)
        logger.error('status_code:{0}'.format(resp.status_code))
        assert resp.status_code == 403
        logger.info(resp.error)
Ejemplo n.º 6
0
 def test_postufile(self):
     self.postfile_handler.set_keys(public_key, private_key)
     # post small file to public bucket
     logger.info('\nstart post small file to public bucket')
     ret, resp = self.postfile_handler.postfile(public_bucket,
                                                post_small_key,
                                                small_local_file)
     logger.error(resp.error)
     assert resp.status_code == 200
     # post big file to public bucket
     logger.info('\nstart post big file to public bucket')
     ret, resp = self.postfile_handler.postfile(public_bucket, post_big_key,
                                                big_local_file)
     logger.error(resp.error)
     assert resp.status_code == 200
     # post small file to private bucket
     logger.info('\nstart post small file to private bucket')
     ret, resp = self.postfile_handler.postfile(private_bucket,
                                                post_small_key,
                                                small_local_file)
     logger.error(resp.error)
     assert resp.status_code == 200
     # post big file to private bucket
     logger.info('\nstart post big file to private bucket')
     ret, resp = self.postfile_handler.postfile(private_bucket,
                                                post_big_key,
                                                big_local_file)
     logger.error(resp.error)
     assert resp.status_code == 200
Ejemplo n.º 7
0
    def test_putufilewithwrongkey(self):
        self.putufile_handler.set_keys(wrong_public_key, wrong_private_key)
        logger.info(
            '\nput small file to public bucket with wrong api keys pair')
        # put small file to public bucket with wrong api keys pair
        ret, resp = self.putufile_handler.putfile(public_bucket, put_small_key,
                                                  small_local_file)
        assert resp.status_code == 403
        logger.info(resp.error)

        # put small file to private bucket with wrong api keys pair
        logger.info(
            '\nput small file to private bucket with wrong api keys pair')
        ret, resp = self.putufile_handler.putfile(private_bucket,
                                                  put_small_key,
                                                  small_local_file)
        logger.error('status_code:{0}'.format(resp.status_code))
        assert resp.status_code == 403
        logger.info(resp.error)
Ejemplo n.º 8
0
def _getfilelist(url, header, param):
    """
    获取文件列表

    @param url: string 类型,获取文件列表的url
    @param header: dict类型,http 请求header,键值对类型分别为string,比如{'User-Agent': 'Google Chrome'}
    @return ret: 如果http状态码不为[200, 204, 206]之一则返回None,否则如果服务器返回json信息则返回dict类型,键值对类型分别为string, unicode string类型,否则返回空的dict
    @return  ResponseInfo: 响应的具体信息,UCloud UFile 服务器返回信息或者网络链接异常
    """

    try:
        response = requests.get(
            url,
            headers=header,
            params=param,
            timeout=config.get_default('connection_timeout'))
    except requests.RequestException as e:
        logger.error('send request error:{0}'.format(e))
        return None, ResponseInfo(None, e)
    return __return_wraper(response)
Ejemplo n.º 9
0
 def test_postufile(self):
     self.postfile_handler.set_keys(public_key, private_key)
     # post small file to public bucket
     logger.info('\nstart post small file to public bucket')
     ret, resp = self.postfile_handler.postfile(public_bucket, post_small_key, small_local_file)
     logger.error(resp.error)
     assert resp.status_code == 200
     # post big file to public bucket
     logger.info('\nstart post big file to public bucket')
     ret, resp = self.postfile_handler.postfile(public_bucket, post_big_key, big_local_file)
     logger.error(resp.error)
     assert resp.status_code == 200
     # post small file to private bucket
     logger.info('\nstart post small file to private bucket')
     ret, resp = self.postfile_handler.postfile(private_bucket, post_small_key, small_local_file)
     logger.error(resp.error)
     assert resp.status_code == 200
     # post big file to private bucket
     logger.info('\nstart post big file to private bucket')
     ret, resp = self.postfile_handler.postfile(private_bucket, post_big_key, big_local_file)
     logger.error(resp.error)
     assert resp.status_code == 200
Ejemplo n.º 10
0
    def uploadstream(self, bucket, key, stream, retrycount=3, retryinterval=5, mime_type=None, header=None):
        """
        分片上传二进制数据流到UFile空间

        @param bucket: 空间名称
        @param key: 上传数据在空间中的名称
        @param stream: file-like 对象或者二进制数据流
        @param mime_type: 上传数据的MIME类型
        @param retrycount: integer 类型,分片重传次数
        @param retryinterval: integer 类型,同个分片失败重传间隔,单位秒
        @param header: dict类型,http 请求header,键值对类型分别为string,比如{'User-Agent': 'Google Chrome'}
        @return ret: 如果http状态码为[200, 204, 206]之一则返回None,否则如果服务器返回json信息则返回dict类型,键值对类型分别为string, unicode string类型,否则返回空的dict
        @return  ResponseInfo: 响应的具体信息,UCloud UFile 服务器返回信息或者网络链接异常
        """
        self.__bucket = bucket
        self.__key = key
        self.etaglist = []
        self.uploadid = None
        self.blocksize = None
        self.__header = header
        self.__stream = stream
        self.pausepartnumber = 0
        if self.__header is None:
            self.__header = dict()
        else:
            _check_dict(self.__header)
        if 'User-Agent' not in self.__header:
            self.__header['User-Agent'] = config.get_default('user_agent')

        # initial sharding request
        ret, resp = self.__initialsharding()
        if resp.ok():
            self.uploadid = ret.get('UploadId')
            self.blocksize = ret.get('BlkSize')
            logger.info('multipart upload id: {0}'.format(self.uploadid))
        else:
            logger.error('multipar upload init failed. error message: {0}'.format(resp.error))
            return ret, resp

        # mulitple sharding upload
        if mime_type is None:
            if hasattr(self.__stream, 'seek') and hasattr(self.__stream, 'read'):
                self.__mimetype = s(Mimetype.from_buffer(self.__stream.read(1024)))
                self.__stream.seek(0, os.SEEK_SET)
            else:
                self.__mimetype = 'application/octec-stream'
        else:
            self.__mimetype = mime_type

        self.__header['Content-Type'] = self.__mimetype
        authorization = self.authorization('put', self.__bucket, self.__key, self.__header)
        self.__header['Authorization'] = authorization

        for data in _file_iter(self.__stream, self.blocksize):
            url = shardingupload_url(self.__bucket, self.__key, self.uploadid, self.pausepartnumber)
            ret = None
            resp = None
            for index in range(retrycount):
                logger.info('try {0} time sharding upload sharding {1}'.format(index + 1, self.pausepartnumber))
                logger.info('sharding url:{0}'.format(url))
                ret, resp = _shardingupload(url, data, self.__header)
                if not resp.ok():
                    logger.error('failed {0} time when upload sharding {1}.error message: {2}, uploadid: {3}'.format(index + 1, self.pausepartnumber, resp.error, self.uploadid))
                    if index < retrycount - 1:
                        time.sleep(retryinterval)
                else:
                    break
            if not resp.ok():
                logger.error('upload sharding {0} failed. uploadid: {1}'.format(self.pausepartnumber, self.uploadid))
                return ret, resp
            logger.info('upload sharding {0} succeed.etag:{1}, uploadid: {2}'.format(self.pausepartnumber, resp.etag, self.uploadid))
            self.pausepartnumber += 1
            self.etaglist.append(resp.etag)
        logger.info('start finish sharding request.')
        ret, resp = self.__finishupload()
        if not resp.ok():
            logger.error('multipart upload failed. uploadid:{0}, pausepartnumber: {1}, key: {2} FAIL!!!'.format(self.uploadid, self.pausepartnumber, self.__key))
        else:
            logger.info('mulitpart upload succeed. uploadid: {0}, key: {1} SUCCEED!!!'.format(self.uploadid, self.__key))
        return ret, resp
Ejemplo n.º 11
0
    def resumeuploadstream(self, retrycount=3, retryinterval=5, bucket=None, key=None, uploadid=None, blocksize=None, etaglist=None, stream=None, pausepartnumber=None, mime_type=None, header=None):
        """
        断点续传失败数据流的分片
        可以在调用uploadstream失败后重新续传,也可以通过传递所有需要的参数续传

        @param retrycount: integer 类型,分片重传次数
        @param retryinterval: integer 类型,同个分片失败重传间隔,单位秒
        @param bucket: string类型, 空间名称
        @param key: string类型,文件或数据在空间中的名称
        @param uploadid: string类型,初始化分片获得的uploadid
        @param blocksize: integer类型,分片大小
        @param etaglist: list类型,元素为已经上传成功的分片的etag
        @param pausepartnumber: integer类型,第一个失败分片的编号(编号从0开始)
        @param stream: file-like对象或者二进制数据流,需要重新上传的数据
        @param mime_type: string类型,上传数据的MIME类型
        @param header: dict类型,http 请求header,键值对类型分别为string,比如{'User-Agent': 'Google Chrome'}
        @return ret: 如果http状态码为[200, 204, 206]之一则返回None,否则如果服务器返回json信息则返回dict类型,键值对类型分别为string, unicode string类型,否则返回空的dict
        @return  ResponseInfo: 响应的具体信息,UCloud UFile 服务器返回信息或者网络链接异常
        """

        if bucket:
            self.__bucket = bucket
        if key:
            self.__key = key
        if uploadid:
            self.uploadid = uploadid
        if blocksize:
            self.blocksize = blocksize
        if stream:
            self.__stream = stream
        if etaglist:
            self.etaglist = etaglist
        if pausepartnumber:
            self.pausepartnumber = pausepartnumber
        if header:
            self.__header = header
        if mime_type is not None:
            self.__mimetype = mime_type
        elif self.__mimetype is None:
            self.__mimetype = 'application/octec-stream'

        if self.__header is None:
            self.__header = dict()
        else:
            _check_dict(self.__header)
        if 'User-Agent' not in self.__header:
            self.__header['User-Agent'] = config.get_default('user_agent')
        
        # initial sharding request
        if self.uploadid is None:
            ret, resp = self.__initialsharding()
            if resp.ok():
                self.uploadid = ret.get('UploadId')
                self.blocksize = ret.get('BlkSize')
                logger.info('multipart upload id: {0}'.format(self.uploadid))
            else:
                logger.error('multipar upload init failed. error message: {0}'.format(resp.error))
                return ret, resp

        self.__header['Content-Type'] = self.__mimetype
        authorization = self.authorization('put', self.__bucket, self.__key, self.__header)
        self.__header['Authorization'] = authorization

        for data in _file_iter(self.__stream, self.blocksize):
            url = shardingupload_url(self.__bucket, self.__key, self.uploadid, self.pausepartnumber)
            ret = None
            resp = None
            for index in range(retrycount):
                logger.info('retry {0} time sharding upload sharing {0}'.format(index + 1, self.pausepartnumber))
                logger.info('sharding url:{0}'.format(url))
                ret, resp = _shardingupload(url, data, self.__header)
                if not resp.ok():
                    logger.error('failed {0} time when retry upload sharding {1},error message: {2}, uploadid: {3}'.format(index + 1, self.pausepartnumber, resp.error, self.uploadid))
                    if index < retrycount - 1:
                        time.sleep(retryinterval)
                else:
                    break
            if not resp.ok():
                logger.error('retry upload sharding {0} failed, uploadid: {1}'.format(self.pausepartnumber, self.uploadid))
                return ret, resp
            logger.info('retry upload sharding {0} succeed. etag: {1}, uploadid: {2}'.format(self.pausepartnumber, resp.etag, self.uploadid))
            self.pausepartnumber += 1
            self.etaglist.append(resp.etag)
        # finish sharding upload
        logger.info('start finish upload request')
        ret, resp = self.__finishupload()
        if not resp.ok():
            logger.error('multipart upload failed. uploadid:{0}, pausepartnumber: {1}, key: {2} FAIL!!!'.format(self.uploadid, self.pausepartnumber, self.__key))
        else:
            logger.info('mulitpart upload succeed. uploadid: {0}, key: {1} SUCCEED!!!'.format(self.uploadid, self.__key))
        return ret, resp
Ejemplo n.º 12
0
    def uploadstream(self,
                     bucket,
                     key,
                     stream,
                     retrycount=3,
                     retryinterval=5,
                     mime_type=None,
                     header=None):
        """
        分片上传二进制数据流到UFile空间

        @param bucket: 空间名称
        @param key: 上传数据在空间中的名称
        @param stream: file-like 对象或者二进制数据流
        @param mime_type: 上传数据的MIME类型
        @param retrycount: integer 类型,分片重传次数
        @param retryinterval: integer 类型,同个分片失败重传间隔,单位秒
        @param header: dict类型,http 请求header,键值对类型分别为string,比如{'User-Agent': 'Google Chrome'}
        @return ret: 如果http状态码为[200, 204, 206]之一则返回None,否则如果服务器返回json信息则返回dict类型,键值对类型分别为string, unicode string类型,否则返回空的dict
        @return  ResponseInfo: 响应的具体信息,UCloud UFile 服务器返回信息或者网络链接异常
        """
        self.__bucket = bucket
        self.__key = key
        self.etaglist = []
        self.uploadid = None
        self.blocksize = None
        self.__header = header
        self.__stream = stream
        self.pausepartnumber = 0
        if self.__header is None:
            self.__header = dict()
        else:
            _check_dict(self.__header)
        if 'User-Agent' not in self.__header:
            self.__header['User-Agent'] = config.get_default('user_agent')

        # initial sharding request
        ret, resp = self.__initialsharding()
        if resp.ok():
            self.uploadid = ret.get('UploadId')
            self.blocksize = ret.get('BlkSize')
            logger.info('multipart upload id: {0}'.format(self.uploadid))
        else:
            logger.error(
                'multipar upload init failed. error message: {0}'.format(
                    resp.error))
            return ret, resp

        # mulitple sharding upload
        if mime_type is None:
            if hasattr(self.__stream, 'seek') and hasattr(
                    self.__stream, 'read'):
                self.__mimetype = s(
                    Mimetype.from_buffer(self.__stream.read(1024)))
                self.__stream.seek(0, os.SEEK_SET)
            else:
                self.__mimetype = 'application/octec-stream'
        else:
            self.__mimetype = mime_type

        self.__header['Content-Type'] = self.__mimetype
        authorization = self.authorization('put', self.__bucket, self.__key,
                                           self.__header)
        self.__header['Authorization'] = authorization

        for data in _file_iter(self.__stream, self.blocksize):
            url = shardingupload_url(self.__bucket, self.__key, self.uploadid,
                                     self.pausepartnumber)
            ret = None
            resp = None
            for index in range(retrycount):
                logger.info('try {0} time sharding upload sharding {1}'.format(
                    index + 1, self.pausepartnumber))
                logger.info('sharding url:{0}'.format(url))
                ret, resp = _shardingupload(url, data, self.__header)
                if not resp.ok():
                    logger.error(
                        'failed {0} time when upload sharding {1}.error message: {2}, uploadid: {3}'
                        .format(index + 1, self.pausepartnumber, resp.error,
                                self.uploadid))
                    if index < retrycount - 1:
                        time.sleep(retryinterval)
                else:
                    break
            if not resp.ok():
                logger.error(
                    'upload sharding {0} failed. uploadid: {1}'.format(
                        self.pausepartnumber, self.uploadid))
                return ret, resp
            logger.info(
                'upload sharding {0} succeed.etag:{1}, uploadid: {2}'.format(
                    self.pausepartnumber, resp.etag, self.uploadid))
            self.pausepartnumber += 1
            self.etaglist.append(resp.etag)
        logger.info('start finish sharding request.')
        ret, resp = self.__finishupload()
        if not resp.ok():
            logger.error(
                'multipart upload failed. uploadid:{0}, pausepartnumber: {1}, key: {2} FAIL!!!'
                .format(self.uploadid, self.pausepartnumber, self.__key))
        else:
            logger.info(
                'mulitpart upload succeed. uploadid: {0}, key: {1} SUCCEED!!!'.
                format(self.uploadid, self.__key))
        return ret, resp
Ejemplo n.º 13
0
    def resumeuploadstream(self,
                           retrycount=3,
                           retryinterval=5,
                           bucket=None,
                           key=None,
                           uploadid=None,
                           blocksize=None,
                           etaglist=None,
                           stream=None,
                           pausepartnumber=None,
                           mime_type=None,
                           header=None):
        """
        断点续传失败数据流的分片
        可以在调用uploadstream失败后重新续传,也可以通过传递所有需要的参数续传

        @param retrycount: integer 类型,分片重传次数
        @param retryinterval: integer 类型,同个分片失败重传间隔,单位秒
        @param bucket: string类型, 空间名称
        @param key: string类型,文件或数据在空间中的名称
        @param uploadid: string类型,初始化分片获得的uploadid
        @param blocksize: integer类型,分片大小
        @param etaglist: list类型,元素为已经上传成功的分片的etag
        @param pausepartnumber: integer类型,第一个失败分片的编号(编号从0开始)
        @param stream: file-like对象或者二进制数据流,需要重新上传的数据
        @param mime_type: string类型,上传数据的MIME类型
        @param header: dict类型,http 请求header,键值对类型分别为string,比如{'User-Agent': 'Google Chrome'}
        @return ret: 如果http状态码为[200, 204, 206]之一则返回None,否则如果服务器返回json信息则返回dict类型,键值对类型分别为string, unicode string类型,否则返回空的dict
        @return  ResponseInfo: 响应的具体信息,UCloud UFile 服务器返回信息或者网络链接异常
        """

        if bucket:
            self.__bucket = bucket
        if key:
            self.__key = key
        if uploadid:
            self.uploadid = uploadid
        if blocksize:
            self.blocksize = blocksize
        if stream:
            self.__stream = stream
        if etaglist:
            self.etaglist = etaglist
        if pausepartnumber:
            self.pausepartnumber = pausepartnumber
        if header:
            self.__header = header
        if mime_type is not None:
            self.__mimetype = mime_type
        elif self.__mimetype is None:
            self.__mimetype = 'application/octec-stream'

        if self.__header is None:
            self.__header = dict()
        else:
            _check_dict(self.__header)
        if 'User-Agent' not in self.__header:
            self.__header['User-Agent'] = config.get_default('user_agent')

        # initial sharding request
        if self.uploadid is None:
            ret, resp = self.__initialsharding()
            if resp.ok():
                self.uploadid = ret.get('UploadId')
                self.blocksize = ret.get('BlkSize')
                logger.info('multipart upload id: {0}'.format(self.uploadid))
            else:
                logger.error(
                    'multipar upload init failed. error message: {0}'.format(
                        resp.error))
                return ret, resp

        self.__header['Content-Type'] = self.__mimetype
        authorization = self.authorization('put', self.__bucket, self.__key,
                                           self.__header)
        self.__header['Authorization'] = authorization

        for data in _file_iter(self.__stream, self.blocksize):
            url = shardingupload_url(self.__bucket, self.__key, self.uploadid,
                                     self.pausepartnumber)
            ret = None
            resp = None
            for index in range(retrycount):
                logger.info(
                    'retry {0} time sharding upload sharing {0}'.format(
                        index + 1, self.pausepartnumber))
                logger.info('sharding url:{0}'.format(url))
                ret, resp = _shardingupload(url, data, self.__header)
                if not resp.ok():
                    logger.error(
                        'failed {0} time when retry upload sharding {1},error message: {2}, uploadid: {3}'
                        .format(index + 1, self.pausepartnumber, resp.error,
                                self.uploadid))
                    if index < retrycount - 1:
                        time.sleep(retryinterval)
                else:
                    break
            if not resp.ok():
                logger.error(
                    'retry upload sharding {0} failed, uploadid: {1}'.format(
                        self.pausepartnumber, self.uploadid))
                return ret, resp
            logger.info(
                'retry upload sharding {0} succeed. etag: {1}, uploadid: {2}'.
                format(self.pausepartnumber, resp.etag, self.uploadid))
            self.pausepartnumber += 1
            self.etaglist.append(resp.etag)
        # finish sharding upload
        logger.info('start finish upload request')
        ret, resp = self.__finishupload()
        if not resp.ok():
            logger.error(
                'multipart upload failed. uploadid:{0}, pausepartnumber: {1}, key: {2} FAIL!!!'
                .format(self.uploadid, self.pausepartnumber, self.__key))
        else:
            logger.info(
                'mulitpart upload succeed. uploadid: {0}, key: {1} SUCCEED!!!'.
                format(self.uploadid, self.__key))
        return ret, resp