def _form_put(up_token, key, data, params, mime_type, crc, progress_handler=None): fields = {} if params: for k, v in params.items(): fields[k] = str(v) if crc: fields['crc32'] = crc if key is not None: fields['key'] = key fields['token'] = up_token url = 'http://' + config.get_default('default_up_host') + '/' name = key if key else 'filename' r, info = http._post_file(url, data=fields, files={'file': (name, data, mime_type)}) if r is None and info.need_retry(): if info.connect_failed: url = 'http://' + config.get_default('default_up_host_backup') + '/' if hasattr(data, 'read') is False: pass elif hasattr(data, 'seek') and (not hasattr(data, 'seekable') or data.seekable()): data.seek(0) else: return r, info r, info = http._post_file(url, data=fields, files={'file': (name, data, mime_type)}) return r, info
def _init(): global _session _session = requests.Session() adapter = requests.adapters.HTTPAdapter( pool_connections=config.get_default('connection_pool'), pool_maxsize=config.get_default('connection_pool'), max_retries=config.get_default('connection_retries')) _session.mount('http://', adapter)
def _init(): session = requests.Session() adapter = requests.adapters.HTTPAdapter( pool_connections=config.get_default("connection_pool"), pool_maxsize=config.get_default("connection_pool"), max_retries=config.get_default("connection_retries"), ) session.mount("http://", adapter) global _session _session = session
def _put(up_token, key, data, params, mime_type, crc32, is_file=False): fields = {} if params: for k, v in params.items(): fields[k] = str(v) if crc32: fields['crc32'] = crc32 if key is not None: fields['key'] = key fields['token'] = up_token url = 'http://' + config.get_default('default_up_host') + '/' name = key if key else 'filename' r = None exception = None headers = {'User-Agent': config.USER_AGENT} try: r = _post(url, data=fields, files={'file': (name, data, mime_type)}, headers=headers) except Exception as e: exception = e finally: retry = _need_retry(r, exception) if retry: url = 'http://' + config.UPBACKUP_HOST + '/' if is_file: data.seek(0) try: r = _post(url, data=fields, files={'file': (name, data, mime_type)}, headers=headers) except Exception as e: raise QiniuClientException(str(e)) return _ret(r)
def make_block(self, block, block_size): crc = crc32(block) url = self.block_url(config.get_default('default_up_host'), block_size) r = None exception = None try: r = self.post(url, block) except Exception as e: exception = e finally: retry = _need_retry(r, exception) if retry: url = self.block_url(config.UPBACKUP_HOST, block_size) try: r = self.post(url, block) except Exception as e: raise QiniuClientException(str(e)) ret = _ret(r) if ret['crc32'] != crc: raise QiniuServiceException( r.status_code, 'unmatch crc checksum', r.headers['X-Reqid']) return ret
def _get_with_qiniu_mac(url, params, auth): try: r = requests.get( url, params=params, auth=qiniu.auth.QiniuMacRequestsAuth(auth) if auth is not None else None, timeout=config.get_default('connection_timeout'), headers=_headers) except Exception as e: return None, ResponseInfo(None, e) return __return_wrapper(r)
def _get(url, params, auth): try: r = requests.get( url, params=params, auth=RequestsAuth(auth), timeout=config.get_default('connection_timeout'), headers=_headers) except Exception as e: return None, ResponseInfo(None, e) return __return_wrapper(r)
def _post(url, data, files, auth): if _session is None: _init() try: r = _session.post( url, data=data, files=files, auth=auth, headers=_headers, timeout=config.get_default('connection_timeout')) except Exception as e: return None, ResponseInfo(None, e) return __return_wrapper(r)
def _post_with_qiniu_mac(url, data, auth): qn_auth = qiniu.auth.QiniuMacRequestsAuth(auth) if auth is not None else None timeout = config.get_default('connection_timeout') try: r = requests.post(url, json=data, auth=qn_auth, timeout=timeout, headers=_headers) except Exception as e: return None, ResponseInfo(None, e) return __return_wrapper(r)
def upload(self): """上传操作""" self.blockStatus = [] host = config.get_default('default_up_host') for block in _file_iter(self.input_stream, config._BLOCK_SIZE): length = len(block) crc = crc32(block) ret, info = self.make_block(block, length, host) if ret is None and not info.need_retry: return ret, info if info.connect_failed: host = config.get_default('default_up_host_backup') if info.need_retry or crc != ret['crc32']: ret, info = self.make_block(block, length, host) if ret is None or crc != ret['crc32']: return ret, info self.blockStatus.append(ret) if(callable(self.progress_handler)): self.progress_handler(((len(self.blockStatus) - 1) * config._BLOCK_SIZE)+length, self.size) return self.make_file(host)
def _post_with_qiniu_mac(url, data, auth): qn_auth = qiniu.auth.QiniuMacRequestsAuth( auth) if auth is not None else None timeout = config.get_default('connection_timeout') try: r = requests.post(url, json=data, auth=qn_auth, timeout=timeout, headers=_headers) except Exception as e: return None, ResponseInfo(None, e) return __return_wrapper(r)
def _get(url, params, auth): if _session is None: _init() try: r = _session.get( url, params=params, auth=qiniu.auth.RequestsAuth(auth) if auth is not None else None, timeout=config.get_default('connection_timeout'), headers=_headers) except Exception as e: return None, ResponseInfo(None, e) return __return_wrapper(r)
def set_bucketImagesource(self, bucket_name, resourceDomain, resourceHost=None): """ 设置 存储空间 镜像源 https://developer.qiniu.com/kodo/api/3966/bucket-image-source Args: bucket_name: 存储空间名 """ resourceDomain = urlsafe_base64_encode(resourceDomain) resourceHost = urlsafe_base64_encode(resourceHost) if resourceHost is None or resourceHost == "": url = "{0}/image/{1}/from/{2}".format( config.get_default("default_uc_host"), bucket_name, resourceDomain) else: url = "{0}/image/{1}/from/{2}/host/{3}".format( config.get_default("default_uc_host"), bucket_name, resourceDomain, resourceHost) data = None return http._post_with_qiniu_mac(url, data, self.auth)
def _form_put(up_token, key, data, params, mime_type, crc, hostscache_dir=None, progress_handler=None, file_name=None, modify_time=None, keep_last_modified=False): fields = {} if params: for k, v in params.items(): fields[k] = str(v) if crc: fields['crc32'] = crc if key is not None: fields['key'] = key fields['token'] = up_token if config.get_default('default_zone').up_host: url = config.get_default('default_zone').up_host else: url = config.get_default('default_zone').get_up_host_by_token( up_token, hostscache_dir) # name = key if key else file_name fname = file_name if not fname or not fname.strip(): fname = 'file_name' # last modify time if modify_time and keep_last_modified: fields['x-qn-meta-!Last-Modified'] = rfc_from_timestamp(modify_time) r, info = http._post_file(url, data=fields, files={'file': (fname, data, mime_type)}) if r is None and info.need_retry(): if info.connect_failed: if config.get_default('default_zone').up_host_backup: url = config.get_default('default_zone').up_host_backup else: url = config.get_default( 'default_zone').get_up_host_backup_by_token( up_token, hostscache_dir) if hasattr(data, 'read') is False: pass elif hasattr(data, 'seek') and (not hasattr(data, 'seekable') or data.seekable()): data.seek(0) else: return r, info r, info = http._post_file(url, data=fields, files={'file': (fname, data, mime_type)}) return r, info
def bucket_domain(self, bucket_name): """ 获取存储空间域名列表 Args: bucket_name: 存储空间名 """ options = { 'tbl': bucket_name, } url = "{0}/v6/domain/list?tbl={1}".format( config.get_default("default_api_host"), bucket_name) return self.__get(url, options)
def _post(url, data, files, auth): if _session is None: _init() try: r = _session.post(url, data=data, files=files, auth=auth, headers=_headers, timeout=config.get_default('connection_timeout')) except Exception as e: return None, ResponseInfo(None, e) return __return_wrapper(r)
def _form_put(up_token, key, data, params, mime_type, crc, progress_handler=None): fields = {} if params: for k, v in params.items(): fields[k] = str(v) if crc: fields['crc32'] = crc if key is not None: fields['key'] = key fields['token'] = up_token url = 'http://' + config.get_default('default_up_host') + '/' name = key if key else 'filename' r, info = http._post_file(url, data=fields, files={'file': (name, data, mime_type)}) if r is None and info.need_retry(): if info.connect_failed: url = 'http://' + config.get_default( 'default_up_host_backup') + '/' if hasattr(data, 'read') is False: pass elif hasattr(data, 'seek') and (not hasattr(data, 'seekable') or data.seekable()): data.seek(0) else: return r, info r, info = http._post_file(url, data=fields, files={'file': (name, data, mime_type)}) return r, info
def _post(url, data, files, auth, headers=None): if _session is None: _init() try: post_headers = _headers.copy() if headers is not None: for k, v in headers.items(): post_headers.update({k: v}) r = _session.post( url, data=data, files=files, auth=auth, headers=post_headers, timeout=config.get_default('connection_timeout')) except Exception as e: return None, ResponseInfo(None, e) return __return_wrapper(r)
def upload(self): """上传操作""" self.blockStatus = [] host = config.get_default('default_up_host') for block in _file_iter(self.input_stream, config._BLOCK_SIZE): length = len(block) crc = crc32(block) ret, info = self.make_block(block, length, host) if ret is None and not info.need_retry: return ret, info if info.connect_failed: host = config.get_default('default_up_host_backup') if info.need_retry or crc != ret['crc32']: ret, info = self.make_block(block, length, host) if ret is None or crc != ret['crc32']: return ret, info self.blockStatus.append(ret) if (callable(self.progress_handler)): self.progress_handler( ((len(self.blockStatus) - 1) * config._BLOCK_SIZE) + length, self.size) return self.make_file(host)
def _put(url, data, files, auth, headers=None): if _session is None: _init() try: post_headers = _headers.copy() if headers is not None: for k, v in headers.items(): post_headers.update({k: v}) r = _session.put( url, data=data, files=files, auth=auth, headers=post_headers, timeout=config.get_default('connection_timeout')) except Exception as e: return None, ResponseInfo(None, e) return __return_wrapper(r)
def upload(self): """上传操作""" self.blockStatus = [] host = config.get_default('default_zone').get_up_host_by_token(self.up_token) offset = self.recovery_from_record() for block in _file_iter(self.input_stream, config._BLOCK_SIZE, offset): length = len(block) crc = crc32(block) ret, info = self.make_block(block, length, host) if ret is None and not info.need_retry(): return ret, info if info.connect_failed(): host = config.get_default('default_zone').get_up_host_backup_by_token(self.up_token) if info.need_retry() or crc != ret['crc32']: ret, info = self.make_block(block, length, host) if ret is None or crc != ret['crc32']: return ret, info self.blockStatus.append(ret) offset += length self.record_upload_progress(offset) if(callable(self.progress_handler)): self.progress_handler(((len(self.blockStatus) - 1) * config._BLOCK_SIZE)+length, self.size) return self.make_file(host)
def _delete_with_qiniu_mac_and_headers(url, params, auth, headers): try: post_headers = _headers.copy() if headers is not None: for k, v in headers.items(): post_headers.update({k: v}) r = requests.delete(url, params=params, auth=qiniu.auth.QiniuMacRequestsAuth(auth) if auth is not None else None, timeout=config.get_default('connection_timeout'), headers=post_headers) except Exception as e: return None, ResponseInfo(None, e) return __return_wrapper(r)
def pfop(auth, bucket, key, fops, pipeline=None, notify_url=None): ops = '|'.join(fops) data = {'bucket': bucket, 'key': key, 'fops': ops} if pipeline: data['pipeline'] = pipeline if notify_url: data['notifyURL'] = notify_url headers = {'User-Agent': config.USER_AGENT} url = 'http://{0}/pfop'.format(config.API_HOST) r = requests.post( url, data=data, auth=RequestsAuth(auth), timeout=config.get_default('connection_timeout'), headers=headers) return _ret(r)
def list(self, bucket, prefix=None, marker=None, limit=None, delimiter=None): """前缀查询: 1. 首次请求 marker = None 2. 无论 err 值如何,均应该先看 ret.get('items') 是否有内容 3. 如果后续没有更多数据,err 返回 EOF,marker 返回 None(但不通过该特征来判断是否结束) 具体规格参考: http://developer.qiniu.com/docs/v6/api/reference/rs/list.html Args: bucket: 空间名 prefix: 列举前缀 marker: 列举标识符 limit: 单次列举个数限制 delimiter: 指定目录分隔符 Returns: 一个dict变量,类似 {"hash": "<Hash string>", "key": "<Key string>"} 一个ResponseInfo对象 一个EOF信息。 """ options = { 'bucket': bucket, } if marker is not None: options['marker'] = marker if limit is not None: options['limit'] = limit if prefix is not None: options['prefix'] = prefix if delimiter is not None: options['delimiter'] = delimiter url = 'http://{0}/list'.format(config.get_default('default_rsf_host')) ret, info = self.__get(url, options) eof = False if ret and not ret.get('marker'): eof = True return ret, eof, info
def pfop(auth, bucket, key, fops, pipeline=None, notify_url=None): ops = '|'.join(fops) data = {'bucket': bucket, 'key': key, 'fops': ops} if pipeline: data['pipeline'] = pipeline if notify_url: data['notifyURL'] = notify_url headers = {'User-Agent': config.USER_AGENT} url = 'http://{0}/pfop'.format(config.API_HOST) r = requests.post(url, data=data, auth=RequestsAuth(auth), timeout=config.get_default('connection_timeout'), headers=headers) return _ret(r)
def list(self, bucket, prefix=None, marker=None, limit=None, delimiter=None): """前缀查询: 1. 首次请求 marker = None 2. 无论 err 值如何,均应该先看 ret.get('items') 是否有内容 3. 如果后续没有更多数据,err 返回 EOF,marker 返回 None(但不通过该特征来判断是否结束) 具体规格参考: http://developer.qiniu.com/docs/v6/api/reference/rs/list.html Args: bucket: 空间名 prefix: 列举前缀 marker: 列举标识符 limit: 单次列举个数限制 delimiter: 指定目录分隔符 Returns: 一个dict变量,类似 {"hash": "<Hash string>", "key": "<Key string>"} 一个ResponseInfo对象 一个EOF信息。 """ options = { 'bucket': bucket, } if marker is not None: options['marker'] = marker if limit is not None: options['limit'] = limit if prefix is not None: options['prefix'] = prefix if delimiter is not None: options['delimiter'] = delimiter url = '{0}/list'.format(config.get_default('default_rsf_host')) ret, info = self.__get(url, options) eof = False if ret and not ret.get('marker'): eof = True return ret, eof, info
def _post_with_qiniu_mac(url, data, auth, headers=None): post_headers = _headers.copy() if headers is not None: for k, v in headers.items(): post_headers.update({k: v}) access_key = auth.get_access_key() secret_key = auth.get_secret_key() qn_auth = qiniu.auth.QiniuMacRequestsAuth( qiniu.auth.QiniuMacAuth(access_key, secret_key)) if auth is not None else None timeout = config.get_default('connection_timeout') try: r = requests.post(url, json=data, auth=qn_auth, timeout=timeout, headers=post_headers) except Exception as e: return None, ResponseInfo(None, e) return __return_wrapper(r)
def make_file(self): url = self.make_file_url(config.get_default('default_up_host')) body = ','.join([status['ctx'] for status in self.blockStatus]) r = None exception = None try: r = self.post(url, body) except Exception as e: exception = e finally: retry = _need_retry(r, exception) if retry: url = self.make_file_url(config.UPBACKUP_HOST) try: r = self.post(url, body) except Exception as e: raise QiniuClientException(str(e)) return _ret(r)
def _form_put(up_token, key, data, params, mime_type, crc, is_file=False, progress_handler=None): fields = {} if params: for k, v in params.items(): fields[k] = str(v) if crc: fields['crc32'] = crc if key is not None: fields['key'] = key fields['token'] = up_token url = 'http://' + config.get_default('default_up_host') + '/' name = key if key else 'filename' r, info = http._post_file(url, data=fields, files={'file': (name, data, mime_type)}) if r is None and info.need_retry(): if info.connect_failed: url = 'http://' + config.UPBACKUP_HOST + '/' if is_file: data.seek(0) r, info = http._post_file(url, data=fields, files={'file': (name, data, mime_type)}) return r, info
def batch(self, operations): """批量操作: 在单次请求中进行多个资源管理操作,具体规格参考: http://developer.qiniu.com/docs/v6/api/reference/rs/batch.html Args: operations: 资源管理操作数组,可通过 Returns: 一个dict变量,返回结果类似: [ { "code": <HttpCode int>, "data": <Data> }, { "code": <HttpCode int> }, { "code": <HttpCode int> }, { "code": <HttpCode int> }, { "code": <HttpCode int>, "data": { "error": "<ErrorMessage string>" } }, ... ] 一个ResponseInfo对象 """ url = '{0}/batch'.format(config.get_default('default_rs_host')) return self.__post(url, dict(op=operations))
def execute(self, key, fops, force=None): """执行持久化处理: Args: key: 待处理的源文件 fops: 处理详细操作,规格详见 http://developer.qiniu.com/docs/v6/api/reference/fop/ force: 强制执行持久化处理开关 Returns: 一个dict变量,返回持久化处理的persistentId,类似{"persistentId": 5476bedf7823de4068253bae}; 一个ResponseInfo对象 """ ops = ";".join(fops) data = {"bucket": self.bucket, "key": key, "fops": ops} if self.pipeline: data["pipeline"] = self.pipeline if self.notify_url: data["notifyURL"] = self.notify_url if force == 1: data["force"] = 1 url = "http://{0}/pfop".format(config.get_default("default_api_host")) return http._post_with_auth(url, data, self.auth)
def execute(self, key, fops, force=None): """执行持久化处理: Args: key: 待处理的源文件 fops: 处理详细操作,规格详见 http://developer.qiniu.com/docs/v6/api/reference/fop/ force: 强制执行持久化处理开关 Returns: 一个dict变量,返回持久化处理的persistentId,类似{"persistentId": 5476bedf7823de4068253bae}; 一个ResponseInfo对象 """ ops = ';'.join(fops) data = {'bucket': self.bucket, 'key': key, 'fops': ops} if self.pipeline: data['pipeline'] = self.pipeline if self.notify_url: data['notifyURL'] = self.notify_url if force == 1: data['force'] = 1 url = 'http://{0}/pfop'.format(config.get_default('default_api_host')) return http._post_with_auth(url, data, self.auth)
def execute(self, key, fops, force=None): """执行持久化处理: Args: key: 待处理的源文件 fops: 处理详细操作,规格详见 http://developer.qiniu.com/docs/v6/api/reference/fop/ force: 强制执行持久化处理开关 Returns: 一个dict变量,返回持久化处理的persistentId,类似{"persistentId": 5476bedf7823de4068253bae}; 一个ResponseInfo对象 """ ops = ';'.join(fops) data = {'bucket': self.bucket, 'key': key, 'fops': ops} if self.pipeline: data['pipeline'] = self.pipeline if self.notify_url: data['notifyURL'] = self.notify_url if force == 1: data['force'] = 1 url = '{0}/pfop'.format(config.get_default('default_api_host')) return http._post_with_auth(url, data, self.auth)
def __rs_do(self, operation, *args): return self.__server_do(config.get_default('default_rs_host'), operation, *args)
def __init__(self, auth, zone=None): self.auth = auth if(zone is None): self.zone = config.get_default('default_zone') else: self.zone = zone
def __init__(self, auth, zone=None): self.auth = auth if (zone is None): self.zone = config.get_default('default_zone') else: self.zone = zone
def _post(url, data=None, files=None, headers=None): if _session is None: _init() return _session.post( url, data=data, files=files, headers=headers, timeout=config.get_default('connection_timeout'))
def upload(self): """上传操作""" self.blockStatus = [] self.recovery_index = 1 self.expiredAt = None self.uploadId = None self.get_bucket() host = self.get_up_host() if self.version == 'v1': offset = self.recovery_from_record() self.part_size = config._BLOCK_SIZE elif self.version == 'v2': offset, self.uploadId, self.expiredAt = self.recovery_from_record() if offset > 0 and self.blockStatus != [] and self.uploadId is not None \ and self.expiredAt is not None: self.recovery_index = self.blockStatus[-1]['partNumber'] + 1 else: self.recovery_index = 1 init_url = self.block_url_v2(host, self.bucket_name) self.uploadId, self.expiredAt = self.init_upload_task(init_url) else: raise ValueError("version must choose v1 or v2 !") for index, block in enumerate( _file_iter(self.input_stream, self.part_size, offset)): length = len(block) if self.version == 'v1': crc = crc32(block) ret, info = self.make_block(block, length, host) elif self.version == 'v2': index_ = index + self.recovery_index url = self.block_url_v2( host, self.bucket_name) + '/%s/%d' % (self.uploadId, index_) ret, info = self.make_block_v2(block, url) if ret is None and not info.need_retry(): return ret, info if info.connect_failed(): if config.get_default('default_zone').up_host_backup: host = config.get_default('default_zone').up_host_backup else: host = config.get_default( 'default_zone').get_up_host_backup_by_token( self.up_token, self.hostscache_dir) if self.version == 'v1': if info.need_retry() or crc != ret['crc32']: ret, info = self.make_block(block, length, host) if ret is None or crc != ret['crc32']: return ret, info elif self.version == 'v2': if info.need_retry(): url = self.block_url_v2( host, self.bucket_name) + '/%s/%d' % (self.uploadId, index + 1) ret, info = self.make_block_v2(block, url) if ret is None: return ret, info del ret['md5'] ret['partNumber'] = index_ self.blockStatus.append(ret) offset += length self.record_upload_progress(offset) if (callable(self.progress_handler)): self.progress_handler( ((len(self.blockStatus) - 1) * self.part_size) + len(block), self.size) if self.version == 'v1': return self.make_file(host) elif self.version == 'v2': make_file_url = self.block_url_v2( host, self.bucket_name) + '/%s' % self.uploadId return self.make_file_v2(self.blockStatus, make_file_url, self.file_name, self.mime_type, self.params)
def put_file( up_token, key, file_path, params=None, mime_type='application/octet-stream', check_crc=False, progress_handler=None, upload_progress_recorder=None, keep_last_modified=False, hostscache_dir=None, part_size=None, version=None, bucket_name=None, raise_exception=False, ): """上传文件到七牛 Args: up_token: 上传凭证 key: 上传文件名 file_path: 上传文件的路径 params: 自定义变量,规格参考 http://developer.qiniu.com/docs/v6/api/overview/up/response/vars.html#xvar mime_type: 上传数据的mimeType check_crc: 是否校验crc32, 已弃用 progress_handler: 上传进度 upload_progress_recorder: 记录上传进度,用于断点续传 hostscache_dir: host请求 缓存文件保存位置 version 分片上传版本 目前支持v1/v2版本 默认v1 part_size 分片上传v2必传字段 默认大小为4MB 分片大小范围为1 MB - 1 GB bucket_name 分片上传v2字段 空间名称 raise_exception: 上传后自动校验key和hash, 如果不一致就报错 Returns: 一个dict变量,类似 {"hash": "<Hash string>", "key": "<Key string>"} 一个ResponseInfo对象 """ ret = {} size = os.stat(file_path).st_size with open(file_path, 'rb') as input_stream: file_name = os.path.basename(file_path) modify_time = int(os.path.getmtime(file_path)) if size > config.get_default('default_upload_threshold'): ret, info = put_stream( up_token, key, input_stream, file_name, size, hostscache_dir, params, mime_type, progress_handler, upload_progress_recorder=upload_progress_recorder, modify_time=modify_time, keep_last_modified=keep_last_modified, part_size=part_size, version=version, bucket_name=bucket_name) else: crc = file_crc32(file_path) ret, info = _form_put(up_token, key, input_stream, params, mime_type, crc, hostscache_dir, progress_handler, file_name, modify_time=modify_time, keep_last_modified=keep_last_modified) if raise_exception is True: if (ret["key"] != key) or (ret["hash"] != etag(file_path)): raise UploadException("数据校验不正确") return ret, info