def cloud_query_magnetinfo(cookie, tokens, source_url, save_path): """获取磁链的信息. 在新建磁链任务时, 要先获取这个磁链的信息, 比如里面包含哪些文件, 文件的名 称与大小等. source_url - 磁链的url, 以magent:开头. save_path - 保存到哪个目录 """ url = "".join( [ const.PAN_URL, "rest/2.0/services/cloud_dl?channel=chunlei&clienttype=0&web=1", "&bdstoken=", tokens["bdstoken"], ] ) data = "".join( [ "method=query_magnetinfo&app_id=250528", "&source_url=", encoder.encode_uri_component(source_url), "&save_path=", encoder.encode_uri_component(save_path), "&type=4", ] ) req = net.urlopen(url, headers={"Cookie": cookie.header_output()}, data=data.encode()) if req: content = req.data return json.loads(content.decode()) else: return None
def rapid_upload(cookie, tokens, source_path, path, upload_mode): '''快速上传''' ondup = const.UPLOAD_ONDUP[upload_mode] content_length = os.path.getsize(source_path) assert content_length > RAPIDUPLOAD_THRESHOLD, 'file size is not satisfied!' dir_name, file_name = os.path.split(path) content_md5 = hasher.md5(source_path) slice_md5 = hasher.md5(source_path, 0, RAPIDUPLOAD_THRESHOLD) url = ''.join([ const.PCS_URL_C, 'file?method=rapidupload&app_id=250528', '&ondup=', ondup, '&dir=', encoder.encode_uri_component(dir_name), '&filename=', encoder.encode_uri_component(file_name), '&content-length=', str(content_length), '&content-md5=', content_md5, '&slice-md5=', slice_md5, '&path=', encoder.encode_uri_component(path), '&', cookie.sub_output('BDUSS'), '&bdstoken=', tokens['bdstoken'], ]) req = net.urlopen(url, headers={'Cookie': cookie.header_output()}) if req: return json.loads(req.data.decode()) else: return None
def upload(cookie, source_path, path, upload_mode): """上传一个文件. 这个是使用的网页中的上传接口. upload_mode - const.UploadMode, 如果文件已在服务器上存在: * overwrite, 直接将其重写. * newcopy, 保留原先的文件, 并在新上传的文件名尾部加上当前时间戳. """ ondup = const.UPLOAD_ONDUP[upload_mode] dir_name, file_name = os.path.split(path) url = "".join( [ const.PCS_URL_C, "file?method=upload&app_id=250528", "&ondup=", ondup, "&dir=", encoder.encode_uri_component(dir_name), "&filename=", encoder.encode_uri_component(file_name), "&", cookie.sub_output("BDUSS"), ] ) with open(source_path, "rb") as fh: data = fh.read() fields = [] files = [("file", file_name, data)] headers = {"Accept": const.ACCEPT_HTML, "Origin": const.PAN_URL} req = net.post_multipart(url, headers, fields, files) if req: return json.loads(req.data.decode()) else: return None
def share_transfer(cookie, tokens, shareid, uk, filelist, dest, upload_mode): """ 将其他用户的文件保存到自己网盘里. uk - 其他用户的uk filelist - 要转移文件的列表, 是绝对路径 """ ondup = const.UPLOAD_ONDUP[upload_mode] url = "".join( [ const.PAN_URL, "share/transfer?app_id=250528&channel=chunlei&clienttype=0&web=1", "&bdstoken=", tokens["bdstoken"], "&from=", uk, "&shareid=", shareid, "&ondup=", ondup, "&async=1", ] ) data = "".join( ["path=", encoder.encode_uri_component(dest), "&filelist=", encoder.encode_uri_component(json.dumps(filelist))] ) req = net.urlopen( url, headers={"Cookie": cookie.header_output(), "Content-Type": const.CONTENT_FORM_UTF8}, data=data.encode() ) if req: content = req.data.decode() return json.loads(content) else: return None
def upload(cookie, source_path, path, upload_mode): '''上传一个文件. 这个是使用的网页中的上传接口. upload_mode - const.UploadMode, 如果文件已在服务器上存在: * overwrite, 直接将其重写. * newcopy, 保留原先的文件, 并在新上传的文件名尾部加上当前时间戳. ''' ondup = const.UPLOAD_ONDUP[upload_mode] dir_name, file_name = os.path.split(path) url = ''.join([ const.PCS_URL_C, 'file?method=upload&app_id=250528', '&ondup=', ondup, '&dir=', encoder.encode_uri_component(dir_name), '&filename=', encoder.encode_uri_component(file_name), '&', cookie.sub_output('BDUSS'), ]) with open(source_path, 'rb') as fh: data = fh.read() fields = [] files = [('file', file_name, data)] headers = {'Accept': const.ACCEPT_HTML, 'Origin': const.PAN_URL} req = net.post_multipart(url, headers, fields, files) if req: return json.loads(req.data.decode()) else: return None
def cloud_query_magnetinfo(cookie, tokens, source_url, save_path): '''获取磁链的信息. 在新建磁链任务时, 要先获取这个磁链的信息, 比如里面包含哪些文件, 文件的名 称与大小等. source_url - 磁链的url, 以magent:开头. save_path - 保存到哪个目录 ''' url = ''.join([ const.PAN_URL, 'rest/2.0/services/cloud_dl?channel=chunlei&clienttype=0&web=1', '&bdstoken=', tokens['bdstoken'], ]) data = ''.join([ 'method=query_magnetinfo&app_id=250528', '&source_url=', encoder.encode_uri_component(source_url), '&save_path=', encoder.encode_uri_component(save_path), '&type=4', ]) req = net.urlopen(url, headers={'Cookie': cookie.header_output()}, data=data.encode()) if req: content = req.data return json.loads(content.decode()) else: return None
def cloud_add_bt_task(cookie, tokens, source_url, save_path, selected_idx, file_sha1="", vcode="", vcode_input=""): """新建一个BT类的离线下载任务, 包括magent磁链. source_path - BT种子所在的绝对路径 save_path - 下载的文件要存放到的目录 selected_idx - BT种子中, 包含若干个文件, 这里, 来指定要下载哪些文件, 从1开始计数. file_sha1 - BT种子的sha1值, 如果是magent的话, 这个sha1值可以为空 vcode - 验证码的vcode vcode_input - 用户输入的四位验证码 """ url = "".join( [ const.PAN_URL, "rest/2.0/services/cloud_dl?channel=chunlei&clienttype=0&web=1", "&bdstoken=", tokens["bdstoken"], ] ) type_ = "2" url_type = "source_path" if source_url.startswith("magnet:"): type_ = "4" url_type = "source_url" if not save_path.endswith("/"): save_path = save_path + "/" data = [ "method=add_task&app_id=250528", "&file_sha1=", file_sha1, "&save_path=", encoder.encode_uri_component(save_path), "&selected_idx=", ",".join(str(i) for i in selected_idx), "&task_from=1", "&t=", util.timestamp(), "&", url_type, "=", encoder.encode_uri_component(source_url), "&type=", type_, ] if vcode: data.append("&input=") data.append(vcode_input) data.append("&vcode=") data.append(vcode) data = "".join(data) req = net.urlopen(url, headers={"Cookie": cookie.header_output()}, data=data.encode()) if req: content = req.data return json.loads(content.decode()) else: return None
def cloud_add_bt_task(cookie, tokens, source_url, save_path, selected_idx, file_sha1='', vcode='', vcode_input=''): '''新建一个BT类的离线下载任务, 包括magent磁链. source_path - BT种子所在的绝对路径 save_path - 下载的文件要存放到的目录 selected_idx - BT种子中, 包含若干个文件, 这里, 来指定要下载哪些文件, 从1开始计数. file_sha1 - BT种子的sha1值, 如果是magent的话, 这个sha1值可以为空 vcode - 验证码的vcode vcode_input - 用户输入的四位验证码 ''' url = ''.join([ const.PAN_URL, 'rest/2.0/services/cloud_dl?channel=chunlei&clienttype=0&web=1', '&bdstoken=', tokens['bdstoken'], ]) type_ = '2' url_type = 'source_path' if source_url.startswith('magnet:'): type_ = '4' url_type = 'source_url' if not save_path.endswith('/'): save_path = save_path + '/' data = [ 'method=add_task&app_id=250528', '&file_sha1=', file_sha1, '&save_path=', encoder.encode_uri_component(save_path), '&selected_idx=', ','.join(str(i) for i in selected_idx), '&task_from=1', '&t=', util.timestamp(), '&', url_type, '=', encoder.encode_uri_component(source_url), '&type=', type_ ] if vcode: data.append('&input=') data.append(vcode_input) data.append('&vcode=') data.append(vcode) data = ''.join(data) req = net.urlopen(url, headers={'Cookie': cookie.header_output()}, data=data.encode()) if req: content = req.data return json.loads(content.decode()) else: return None
def mkdir(cookie, tokens, path): '''创建一个目录. path 目录名, 绝对路径. @return 返回一个dict, 里面包含了fs_id, ctime等信息. ''' url = ''.join([ const.PAN_API_URL, 'create?a=commit&channel=chunlei&clienttype=0&web=1', '&bdstoken=', tokens['bdstoken'], ]) data = ''.join([ 'path=', encoder.encode_uri_component(path), '&isdir=1&size=&block_list=%5B%5D&method=post', ]) req = net.urlopen(url, headers={ 'Cookie': cookie.header_output(), 'Content-type': const.CONTENT_FORM_UTF8, }, data=data.encode()) if req: content = req.data return json.loads(content.decode()) else: return None
def list_dir(cookie, tokens, path, page=1, num=100): '''得到一个目录中的所有文件的信息(最多100条记录).''' timestamp = util.timestamp() url = ''.join([ const.PAN_API_URL, 'list?channel=chunlei&clienttype=0&web=1', '&num=', str(num), '&t=', timestamp, '&page=', str(page), '&dir=', encoder.encode_uri_component(path), '&t=', util.latency(), '&order=time&desc=1', '&_=', timestamp, '&bdstoken=', tokens['bdstoken'], ]) req = net.urlopen(url, headers={ 'Content-type': const.CONTENT_FORM_UTF8, 'Cookie': cookie.sub_output('BAIDUID', 'BDUSS', 'PANWEB', 'cflag'), }) if req: content = req.data return json.loads(content.decode()) else: return None
def delete_trash(cookie, tokens, fidlist): '''批量将文件从回收站中删除, 这一步不可还原!' fidlist - 待删除的目录/文件的fs_id 列表. 如果有一个文件的fs_id在回收站中不存在, 就会报错, 并返回. ''' url = ''.join([ const.PAN_API_URL, 'recycle/delete?channel=chunlei&clienttype=0&web=1', '&bdstoken=', tokens['bdstoken'], ]) data = 'fidlist=' + encoder.encode_uri_component(json.dumps(fidlist)) req = net.urlopen(url, headers={ 'Cookie': cookie.header_output(), 'Content-type': const.CONTENT_FORM_UTF8, }, data=data.encode()) if req: content = req.data return json.loads(content.decode()) else: return None
def copy(cookie, tokens, filelist): '''复制文件/目录到新位置. filelist 是一个list, 里面的每一项都是一个dict, 每个dict都有这几项: path - 文件/目录的当前的绝对路径, 包含文件名 dest - 要复制到的目的路径, 不包含文件名 newname - 文件/目录的新名称; 可以保持与当前名称一致. ''' url = ''.join([ const.PAN_API_URL, 'filemanager?channel=chunlei&clienttype=0&web=1&opera=copy', '&bdstoken=', tokens['bdstoken'], ]) data = 'filelist=' + encoder.encode_uri_component(json.dumps(filelist)) req = net.urlopen(url, headers={ 'Cookie': cookie.header_output(), 'Content-type': const.CONTENT_FORM_UTF8, }, data=data.encode()) if req: content = req.data return json.loads(content.decode()) else: return None
def rename(cookie, tokens, filelist): """批量重命名目录/文件. 只能修改文件名, 不能修改它所在的目录. filelist 是一个list, 里面的每一项都是一个dict, 每个dict包含两部分: path - 文件的绝对路径, 包含文件名. newname - 新名称. """ url = "".join( [ const.PAN_API_URL, "filemanager?channel=chunlei&clienttype=0&web=1&opera=rename", "&bdstoken=", tokens["bdstoken"], ] ) data = "filelist=" + encoder.encode_uri_component(json.dumps(filelist)) req = net.urlopen( url, headers={"Content-type": const.CONTENT_FORM_UTF8, "Cookie": cookie.header_output()}, data=data.encode() ) if req: content = req.data return json.loads(content.decode()) else: return None
def move(cookie, tokens, filelist): """移动文件/目录到新的位置. filelist 是一个list, 里面包含至少一个dict, 每个dict都有以下几项: path - 文件的当前的绝对路径, 包括文件名. dest - 文件的目标绝对路径, 不包括文件名. newname - 文件的新名称; 可以与保持原来的文件名一致, 也可以给一个新名称. """ url = "".join( [ const.PAN_API_URL, "filemanager?channel=chunlei&clienttype=0&web=1&opera=move", "&bdstoken=", tokens["bdstoken"], ] ) data = "filelist=" + encoder.encode_uri_component(json.dumps(filelist)) req = net.urlopen( url, headers={"Cookie": cookie.header_output(), "Content-type": const.CONTENT_FORM_UTF8}, data=data.encode() ) if req: content = req.data return json.loads(content.decode()) else: return None
def copy(cookie, tokens, filelist): """复制文件/目录到新位置. filelist 是一个list, 里面的每一项都是一个dict, 每个dict都有这几项: path - 文件/目录的当前的绝对路径, 包含文件名 dest - 要复制到的目的路径, 不包含文件名 newname - 文件/目录的新名称; 可以保持与当前名称一致. """ url = "".join( [ const.PAN_API_URL, "filemanager?channel=chunlei&clienttype=0&web=1&opera=copy", "&bdstoken=", tokens["bdstoken"], ] ) data = "filelist=" + encoder.encode_uri_component(json.dumps(filelist)) req = net.urlopen( url, headers={"Cookie": cookie.header_output(), "Content-type": const.CONTENT_FORM_UTF8}, data=data.encode() ) if req: content = req.data return json.loads(content.decode()) else: return None
def rename(cookie, tokens, filelist): '''批量重命名目录/文件. 只能修改文件名, 不能修改它所在的目录. filelist 是一个list, 里面的每一项都是一个dict, 每个dict包含两部分: path - 文件的绝对路径, 包含文件名. newname - 新名称. ''' url = ''.join([ const.PAN_API_URL, 'filemanager?channel=chunlei&clienttype=0&web=1&opera=rename', '&bdstoken=', tokens['bdstoken'], ]) data = 'filelist=' + encoder.encode_uri_component(json.dumps(filelist)) req = net.urlopen(url, headers={ 'Content-type': const.CONTENT_FORM_UTF8, 'Cookie': cookie.header_output(), }, data=data.encode()) if req: content = req.data return json.loads(content.decode()) else: return None
def cloud_query_sinfo(cookie, tokens, source_path): """获取网盘中种子的信息, 比如里面的文件名, 文件大小等. source_path - BT种子的绝对路径. """ url = "".join( [ const.PAN_URL, "rest/2.0/services/cloud_dl?channel=chunlei&clienttype=0&web=1", "&method=query_sinfo&app_id=250528", "&bdstoken=", tokens["bdstoken"], "&source_path=", encoder.encode_uri_component(source_path), "&type=2", "&t=", util.timestamp(), ] ) req = net.urlopen(url, headers={"Cookie": cookie.header_output()}) if req: content = req.data return json.loads(content.decode()) else: return None
def move(cookie, tokens, filelist): '''移动文件/目录到新的位置. filelist 是一个list, 里面包含至少一个dict, 每个dict都有以下几项: path - 文件的当前的绝对路径, 包括文件名. dest - 文件的目标绝对路径, 不包括文件名. newname - 文件的新名称; 可以与保持原来的文件名一致, 也可以给一个新名称. ''' url = ''.join([ const.PAN_API_URL, 'filemanager?channel=chunlei&clienttype=0&web=1&opera=move', '&bdstoken=', tokens['bdstoken'], ]) data = 'filelist=' + encoder.encode_uri_component(json.dumps(filelist)) req = net.urlopen(url, headers={ 'Cookie': cookie.header_output(), 'Content-type': const.CONTENT_FORM_UTF8, }, data=data.encode()) if req: content = req.data return json.loads(content.decode()) else: return None
def list_trash(cookie, tokens, path='/', page=1, num=100): '''获取回收站的信息. path - 目录的绝对路径, 默认是根目录 page - 页码, 默认是第一页 num - 每页有多少个文件, 默认是100个. 回收站里面的文件会被保存10天, 10天后会自动被清空. 回收站里面的文件不占用用户的存储空间. ''' url = ''.join([ const.PAN_API_URL, 'recycle/list?channel=chunlei&clienttype=0&web=1', '&num=', str(num), '&t=', util.timestamp(), '&dir=', encoder.encode_uri_component(path), '&t=', util.latency(), '&order=time&desc=1', '&_=', util.timestamp(), '&bdstoken=', tokens['bdstoken'], ]) req = net.urlopen(url, headers={'Cookie': cookie.header_output()}) if req: content = req.data return json.loads(content.decode()) else: return None
def list_trash(cookie, tokens, path="/", page=1, num=100): """获取回收站的信息. path - 目录的绝对路径, 默认是根目录 page - 页码, 默认是第一页 num - 每页有多少个文件, 默认是100个. 回收站里面的文件会被保存10天, 10天后会自动被清空. 回收站里面的文件不占用用户的存储空间. """ url = "".join( [ const.PAN_API_URL, "recycle/list?channel=chunlei&clienttype=0&web=1", "&num=", str(num), "&t=", util.timestamp(), "&dir=", encoder.encode_uri_component(path), "&t=", util.latency(), "&order=time&desc=1", "&_=", util.timestamp(), "&bdstoken=", tokens["bdstoken"], ] ) req = net.urlopen(url, headers={"Cookie": cookie.header_output()}) if req: content = req.data return json.loads(content.decode()) else: return None
def check_login(cookie, tokens, username): '''进行登录验证, 主要是在服务器上验证这个帐户的状态. 如果帐户不存在, 或者帐户异常, 就不需要再进行最后一步的登录操作了. 这一步有可能需要输入验证码. 返回的信息如下: {"errInfo":{ "no": "0" }, "data": { "codeString" : "", "vcodetype" : "" }} ''' url = ''.join([ const.PASSPORT_URL, '?logincheck', '&token=', tokens['token'], '&tpl=mm&apiver=v3', '&tt=', util.timestamp(), '&username='******'&isphone=false', ]) headers = { 'Cookie': cookie.header_output(), 'Referer': const.REFERER, } req = net.urlopen(url, headers=headers) if req: ubi = req.headers.get_all('Set-Cookie') return ubi, json.loads(req.data.decode()) else: return None
def restore_trash(cookie, tokens, fidlist): '''从回收站中还原文件/目录. fildlist - 要还原的文件/目录列表, fs_id. ''' url = ''.join([ const.PAN_API_URL, 'recycle/restore?channel=chunlei&clienttype=0&web=1', '&t=', util.timestamp(), '&bdstoken=', tokens['bdstoken'], ]) data = 'fidlist=' + encoder.encode_uri_component(json.dumps(fidlist)) req = net.urlopen(url, headers={ 'Cookie': cookie.header_output(), 'Content-type': const.CONTENT_FORM_UTF8, }, data=data.encode()) if req: content = req.data return json.loads(content.decode()) else: return None
def check_login(cookie, tokens, username): '''进行登录验证, 主要是在服务器上验证这个帐户的状态. 如果帐户不存在, 或者帐户异常, 就不需要再进行最后一步的登录操作了. 这一步有可能需要输入验证码. 返回的信息如下: {"errInfo":{ "no": "0" }, "data": { "codeString" : "", "vcodetype" : "" }} ''' url = ''.join([ const.PASSPORT_URL, '?logincheck', '&token=', tokens['token'], '&tpl=mm&apiver=v3', '&tt=', util.timestamp(), '&username='******'&isphone=false', ]) headers={ 'Cookie': cookie.header_output(), 'Referer': const.REFERER, } req = net.urlopen(url, headers=headers) if req: ubi = req.headers.get_all('Set-Cookie') return ubi, json.loads(req.data.decode()) else: return None
def cloud_add_link_task(cookie, tokens, source_url, save_path, vcode='', vcode_input=''): '''新建离线下载任务. source_url - 可以是http/https/ftp等一般的链接 可以是eMule这样的链接 path - 要保存到哪个目录, 比如 /Music/, 以/开头, 以/结尾的绝对路径. ''' url = ''.join([ const.PAN_URL, 'rest/2.0/services/cloud_dl?channel=chunlei&clienttype=0&web=1', '&bdstoken=', tokens['bdstoken'], ]) type_ = '' if source_url.startswith('ed2k'): type_ = '&type=3' if not save_path.endswith('/'): save_path = save_path + '/' data = [ 'method=add_task&app_id=250528', '&source_url=', encoder.encode_uri_component(source_url), '&save_path=', encoder.encode_uri_component(save_path), '&type=', type_, ] if vcode: data.append('&input=') data.append(vcode_input) data.append('&vcode=') data.append(vcode) data = ''.join(data) req = net.urlopen(url, headers={'Cookie': cookie.header_output()}, data=data.encode()) if req: content = req.data return json.loads(content.decode()) else: return None
def cloud_add_link_task(cookie, tokens, source_url, save_path, vcode="", vcode_input=""): """新建离线下载任务. source_url - 可以是http/https/ftp等一般的链接 可以是eMule这样的链接 path - 要保存到哪个目录, 比如 /Music/, 以/开头, 以/结尾的绝对路径. """ url = "".join( [ const.PAN_URL, "rest/2.0/services/cloud_dl?channel=chunlei&clienttype=0&web=1", "&bdstoken=", tokens["bdstoken"], ] ) type_ = "" if source_url.startswith("ed2k"): type_ = "&type=3" if not save_path.endswith("/"): save_path = save_path + "/" data = [ "method=add_task&app_id=250528", "&source_url=", encoder.encode_uri_component(source_url), "&save_path=", encoder.encode_uri_component(save_path), "&type=", type_, ] if vcode: data.append("&input=") data.append(vcode_input) data.append("&vcode=") data.append(vcode) data = "".join(data) req = net.urlopen(url, headers={"Cookie": cookie.header_output()}, data=data.encode()) if req: content = req.data return json.loads(content.decode()) else: return None
def stream_download(cookie, tokens, path): """下载流媒体文件. path - 流文件的绝对路径. """ url = "".join( [const.PCS_URL_D, "file?method=download", "&path=", encoder.encode_uri_component(path), "&app_id=250528"] ) req = net.urlopen_without_redirect(url, headers={"Cookie": cookie.header_output()}) if req: return req else: return None
def get_share_url_with_dirname(uk, shareid, dirname): '''得到共享目录的链接''' return ''.join([ const.PAN_URL, 'wap/link', '?shareid=', shareid, '&uk=', uk, '&dir=', encoder.encode_uri_component(dirname), '&third=0', ])
def share_transfer(cookie, tokens, shareid, uk, filelist, dest, upload_mode): ''' 将其他用户的文件保存到自己网盘里. uk - 其他用户的uk filelist - 要转移文件的列表, 是绝对路径 ''' ondup = const.UPLOAD_ONDUP[upload_mode] url = ''.join([ const.PAN_URL, 'share/transfer?app_id=250528&channel=chunlei&clienttype=0&web=1', '&bdstoken=', tokens['bdstoken'], '&from=', uk, '&shareid=', shareid, '&ondup=', ondup, '&async=1', ]) data = ''.join([ 'path=', encoder.encode_uri_component(dest), '&filelist=', encoder.encode_uri_component(json.dumps(filelist)) ]) req = net.urlopen(url, headers={ 'Cookie': cookie.header_output(), 'Content-Type': const.CONTENT_FORM_UTF8 }, data=data.encode()) if req: content = req.data.decode() return json.loads(content) else: return None
def rapid_upload(cookie, tokens, source_path, path, upload_mode): """快速上传""" ondup = const.UPLOAD_ONDUP[upload_mode] content_length = os.path.getsize(source_path) assert content_length > RAPIDUPLOAD_THRESHOLD, "file size is not satisfied!" dir_name, file_name = os.path.split(path) content_md5 = hasher.md5(source_path) slice_md5 = hasher.md5(source_path, 0, RAPIDUPLOAD_THRESHOLD) url = "".join( [ const.PCS_URL_C, "file?method=rapidupload&app_id=250528", "&ondup=", ondup, "&dir=", encoder.encode_uri_component(dir_name), "&filename=", encoder.encode_uri_component(file_name), "&content-length=", str(content_length), "&content-md5=", content_md5, "&slice-md5=", slice_md5, "&path=", encoder.encode_uri_component(path), "&", cookie.sub_output("BDUSS"), "&bdstoken=", tokens["bdstoken"], ] ) req = net.urlopen(url, headers={"Cookie": cookie.header_output()}) if req: return json.loads(req.data.decode()) else: return None
def get_metas(cookie, tokens, filelist, dlink=True): '''获取多个文件的metadata. filelist - 一个list, 里面是每个文件的绝对路径. 也可以是一个字符串, 只包含一个文件的绝对路径. dlink - 是否包含下载链接, 默认为True, 包含. @return 包含了文件的下载链接dlink, 通过它可以得到最终的下载链接. ''' if isinstance(filelist, str): filelist = [ filelist, ] url = ''.join([ const.PAN_API_URL, 'filemetas?channel=chunlei&clienttype=0&web=1', '&bdstoken=', tokens['bdstoken'], ]) if dlink: data = ('dlink=1&target=' + encoder.encode_uri_component(json.dumps(filelist))) else: data = ('dlink=0&target=' + encoder.encode_uri_component(json.dumps(filelist))) req = net.urlopen(url, headers={ 'Cookie': cookie.sub_output('BDUSS'), 'Content-type': const.CONTENT_FORM, }, data=data.encode()) if req: content = req.data return json.loads(content.decode()) else: return None
def get_share_url_with_dirname(uk, shareid, dirname): """得到共享目录的链接""" return "".join( [ const.PAN_URL, "wap/link", "?shareid=", shareid, "&uk=", uk, "&dir=", encoder.encode_uri_component(dirname), "&third=0", ] )
def get_metas(cookie, tokens, filelist, dlink=True): """获取多个文件的metadata. filelist - 一个list, 里面是每个文件的绝对路径. 也可以是一个字符串, 只包含一个文件的绝对路径. dlink - 是否包含下载链接, 默认为True, 包含. @return 包含了文件的下载链接dlink, 通过它可以得到最终的下载链接. """ if isinstance(filelist, str): filelist = [filelist] url = "".join([const.PAN_API_URL, "filemetas?channel=chunlei&clienttype=0&web=1", "&bdstoken=", tokens["bdstoken"]]) if dlink: data = "dlink=1&target=" + encoder.encode_uri_component(json.dumps(filelist)) else: data = "dlink=0&target=" + encoder.encode_uri_component(json.dumps(filelist)) req = net.urlopen( url, headers={"Cookie": cookie.sub_output("BDUSS"), "Content-type": const.CONTENT_FORM}, data=data.encode() ) if req: content = req.data return json.loads(content.decode()) else: return None
def stream_download(cookie, tokens, path): '''下载流媒体文件. path - 流文件的绝对路径. ''' url = ''.join([ const.PCS_URL_D, 'file?method=download', '&path=', encoder.encode_uri_component(path), '&app_id=250528', ]) req = net.urlopen_without_redirect( url, headers={'Cookie': cookie.header_output()}) if req: return req else: return None
def mkdir(cookie, tokens, path): """创建一个目录. path 目录名, 绝对路径. @return 返回一个dict, 里面包含了fs_id, ctime等信息. """ url = "".join( [const.PAN_API_URL, "create?a=commit&channel=chunlei&clienttype=0&web=1", "&bdstoken=", tokens["bdstoken"]] ) data = "".join(["path=", encoder.encode_uri_component(path), "&isdir=1&size=&block_list=%5B%5D&method=post"]) req = net.urlopen( url, headers={"Cookie": cookie.header_output(), "Content-type": const.CONTENT_FORM_UTF8}, data=data.encode() ) if req: content = req.data return json.loads(content.decode()) else: return None
def list_share_files(cookie, tokens, uk, shareid, dirname, page=1): '''列举出用户共享的某一个目录中的文件信息 这个对所有用户都有效 uk - user key shareid - 共享文件的ID值 dirname - 共享目录, 如果dirname为None, 说明这有可能是一个单独共享的文件, 这里, 需要调用list_share_single_file() ''' if not dirname: return list_share_single_file(cookie, tokens, uk, shareid) url = ''.join([ const.PAN_URL, 'share/list?channel=chunlei&clienttype=0&web=1&num=50', '&t=', util.timestamp(), '&page=', str(page), '&dir=', encoder.encode_uri_component(dirname), '&t=', util.latency(), '&shareid=', shareid, '&order=time&desc=1', '&uk=', uk, '&_=', util.timestamp(), '&bdstoken=', tokens['bdstoken'], ]) req = net.urlopen(url, headers={ 'Cookie': cookie.header_output(), 'Referer': const.SHARE_REFERER, }) if req: content = req.data info = json.loads(content.decode()) if info['errno'] == 0: return info['list'] return list_share_single_file(cookie, tokens, uk, shareid)
def delete_trash(cookie, tokens, fidlist): """批量将文件从回收站中删除, 这一步不可还原!' fidlist - 待删除的目录/文件的fs_id 列表. 如果有一个文件的fs_id在回收站中不存在, 就会报错, 并返回. """ url = "".join( [const.PAN_API_URL, "recycle/delete?channel=chunlei&clienttype=0&web=1", "&bdstoken=", tokens["bdstoken"]] ) data = "fidlist=" + encoder.encode_uri_component(json.dumps(fidlist)) req = net.urlopen( url, headers={"Cookie": cookie.header_output(), "Content-type": const.CONTENT_FORM_UTF8}, data=data.encode() ) if req: content = req.data return json.loads(content.decode()) else: return None
def list_share_files(cookie, tokens, uk, shareid, dirname, page=1): """列举出用户共享的某一个目录中的文件信息 这个对所有用户都有效 uk - user key shareid - 共享文件的ID值 dirname - 共享目录, 如果dirname为None, 说明这有可能是一个单独共享的文件, 这里, 需要调用list_share_single_file() """ if not dirname: return list_share_single_file(cookie, tokens, uk, shareid) url = "".join( [ const.PAN_URL, "share/list?channel=chunlei&clienttype=0&web=1&num=50", "&t=", util.timestamp(), "&page=", str(page), "&dir=", encoder.encode_uri_component(dirname), "&t=", util.latency(), "&shareid=", shareid, "&order=time&desc=1", "&uk=", uk, "&_=", util.timestamp(), "&bdstoken=", tokens["bdstoken"], ] ) req = net.urlopen(url, headers={"Cookie": cookie.header_output(), "Referer": const.SHARE_REFERER}) if req: content = req.data info = json.loads(content.decode()) if info["errno"] == 0: return info["list"] return list_share_single_file(cookie, tokens, uk, shareid)
def get_streaming_playlist(cookie, path, video_type='M3U8_AUTO_480'): '''获取流媒体(通常是视频)的播放列表. 默认得到的是m3u8格式的播放列表, 因为它最通用. path - 视频的绝对路径 video_type - 视频格式, 可以根据网速及片源, 选择不同的格式. ''' url = ''.join([ const.PCS_URL, 'file?method=streaming', '&path=', encoder.encode_uri_component(path), '&type=', video_type, '&app_id=250528', ]) req = net.urlopen(url, headers={'Cookie': cookie.header_output()}) if req: return req.data else: return None
def delete_files(cookie, tokens, filelist): """批量删除文件/目录. filelist - 待删除的文件/目录列表, 绝对路径 """ url = "".join( [ const.PAN_API_URL, "filemanager?channel=chunlei&clienttype=0&web=1&opera=delete", "&bdstoken=", tokens["bdstoken"], ] ) data = "filelist=" + encoder.encode_uri_component(json.dumps(filelist)) req = net.urlopen( url, headers={"Content-type": const.CONTENT_FORM_UTF8, "Cookie": cookie.header_output()}, data=data.encode() ) if req: content = req.data return json.loads(content.decode()) else: return None
def delete_files(cookie, tokens, filelist): '''批量删除文件/目录. filelist - 待删除的文件/目录列表, 绝对路径 ''' url = ''.join([ const.PAN_API_URL, 'filemanager?channel=chunlei&clienttype=0&web=1&opera=delete', '&bdstoken=', tokens['bdstoken'], ]) data = 'filelist=' + encoder.encode_uri_component(json.dumps(filelist)) req = net.urlopen(url, headers={ 'Content-type': const.CONTENT_FORM_UTF8, 'Cookie': cookie.header_output(), }, data=data.encode()) if req: content = req.data return json.loads(content.decode()) else: return None
def get_streaming_playlist(cookie, path, video_type="M3U8_AUTO_480"): """获取流媒体(通常是视频)的播放列表. 默认得到的是m3u8格式的播放列表, 因为它最通用. path - 视频的绝对路径 video_type - 视频格式, 可以根据网速及片源, 选择不同的格式. """ url = "".join( [ const.PCS_URL, "file?method=streaming", "&path=", encoder.encode_uri_component(path), "&type=", video_type, "&app_id=250528", ] ) req = net.urlopen(url, headers={"Cookie": cookie.header_output()}) if req: return req.data else: return None
def cloud_query_sinfo(cookie, tokens, source_path): '''获取网盘中种子的信息, 比如里面的文件名, 文件大小等. source_path - BT种子的绝对路径. ''' url = ''.join([ const.PAN_URL, 'rest/2.0/services/cloud_dl?channel=chunlei&clienttype=0&web=1', '&method=query_sinfo&app_id=250528', '&bdstoken=', tokens['bdstoken'], '&source_path=', encoder.encode_uri_component(source_path), '&type=2', '&t=', util.timestamp(), ]) req = net.urlopen(url, headers={'Cookie': cookie.header_output()}) if req: content = req.data return json.loads(content.decode()) else: return None
def create_superfile(cookie, path, block_list): """合并slice_upload()中产生的临时文件 path - 文件在服务器上的绝对路径 block_list - 这些文件分片的MD5列表 返回完整的文件pcs信息. """ url = "".join( [ const.PCS_URL_C, "file?method=createsuperfile&app_id=250528", "&path=", encoder.encode_uri_component(path), "&", cookie.sub_output("BDUSS"), ] ) param = {"block_list": block_list} data = "param=" + json.dumps(param) req = net.urlopen(url, headers={"Cookie": cookie.header_output()}, data=data.encode()) if req: return json.loads(req.data.decode()) else: return None
def restore_trash(cookie, tokens, fidlist): """从回收站中还原文件/目录. fildlist - 要还原的文件/目录列表, fs_id. """ url = "".join( [ const.PAN_API_URL, "recycle/restore?channel=chunlei&clienttype=0&web=1", "&t=", util.timestamp(), "&bdstoken=", tokens["bdstoken"], ] ) data = "fidlist=" + encoder.encode_uri_component(json.dumps(fidlist)) req = net.urlopen( url, headers={"Cookie": cookie.header_output(), "Content-type": const.CONTENT_FORM_UTF8}, data=data.encode() ) if req: content = req.data return json.loads(content.decode()) else: return None
def create_superfile(cookie, path, block_list): '''合并slice_upload()中产生的临时文件 path - 文件在服务器上的绝对路径 block_list - 这些文件分片的MD5列表 返回完整的文件pcs信息. ''' url = ''.join([ const.PCS_URL_C, 'file?method=createsuperfile&app_id=250528', '&path=', encoder.encode_uri_component(path), '&', cookie.sub_output('BDUSS'), ]) param = {'block_list': block_list} data = 'param=' + json.dumps(param) req = net.urlopen(url, headers={'Cookie': cookie.header_output()}, data=data.encode()) if req: return json.loads(req.data.decode()) else: return None
def list_dir(cookie, tokens, path, page=1, num=100): """得到一个目录中的所有文件的信息(最多100条记录).""" timestamp = util.timestamp() url = "".join( [ const.PAN_API_URL, "list?channel=chunlei&clienttype=0&web=1", "&num=", str(num), "&t=", timestamp, "&page=", str(page), "&dir=", encoder.encode_uri_component(path), "&t=", util.latency(), "&order=time&desc=1", "&_=", timestamp, "&bdstoken=", tokens["bdstoken"], ] ) req = net.urlopen( url, headers={ "Content-type": const.CONTENT_FORM_UTF8, "Cookie": cookie.sub_output("BAIDUID", "BDUSS", "PANWEB", "cflag"), }, ) if req: content = req.data return json.loads(content.decode()) else: return None
def post_login(cookie, tokens, username, password, rsakey, verifycode='', codestring=''): '''登录验证. password - 使用RSA加密后的base64字符串 rsakey - 与public_key相匹配的rsakey verifycode - 验证码, 默认为空 @return (status, info). 其中, status表示返回的状态: 0 - 正常, 这里, info里面存放的是auth_cookie -1 - 未知异常 4 - 密码错误 257 - 需要输入验证码, 此时info里面存放着(vcodetype, codeString)) ''' url = const.PASSPORT_LOGIN data = ''.join([ 'staticpage=https%3A%2F%2Fpassport.baidu.com%2Fstatic%2Fpasspc-account%2Fhtml%2Fv3Jump.html', '&charset=UTF-8', '&token=', tokens['token'], '&tpl=pp&subpro=&apiver=v3', '&tt=', util.timestamp(), '&codestring=', codestring, '&safeflg=0&u=http%3A%2F%2Fpassport.baidu.com%2F', '&isPhone=', '&quick_user=0&logintype=basicLogin&logLoginType=pc_loginBasic&idc=', '&loginmerge=true', '&username='******'&password='******'&verifycode=', verifycode, '&mem_pass=on', '&rsakey=', rsakey, '&crypttype=12', '&ppui_logintime=', get_ppui_logintime(), '&callback=parent.bd__pcbs__28g1kg', ]) headers = { 'Accept': const.ACCEPT_HTML, 'Cookie': cookie.sub_output('BAIDUID', 'HOSUPPORT', 'UBI'), 'Referer': const.REFERER, 'Connection': 'Keep-Alive', } req = net.urlopen(url, headers=headers, data=data.encode()) if req: content = req.data.decode() match = re.search('"(err_no[^"]+)"', content) if not match: return (-1, None) query = dict(urllib.parse.parse_qsl(match.group(1))) query['err_no'] = int(query['err_no']) err_no = query['err_no'] auth_cookie = req.headers.get_all('Set-Cookie') if err_no == 0: return (0, auth_cookie) # #!! not bind cellphone elif err_no == 18: return (0, auth_cookie) # 要输入验证码 elif err_no == 257: return (err_no, query) # 需要短信验证 elif err_no == 400031: return (err_no, query) else: return (err_no, None) else: return (-1, None) return (-1, None)
def post_login(cookie, tokens, username, password, rsakey, verifycode='', codestring=''): '''登录验证. password - 使用RSA加密后的base64字符串 rsakey - 与public_key相匹配的rsakey verifycode - 验证码, 默认为空 @return (status, info). 其中, status表示返回的状态: 0 - 正常, 这里, info里面存放的是auth_cookie -1 - 未知异常 4 - 密码错误 257 - 需要输入验证码, 此时info里面存放着(vcodetype, codeString)) ''' url = const.PASSPORT_LOGIN data = ''.join([ 'staticpage=https%3A%2F%2Fpassport.baidu.com%2Fstatic%2Fpasspc-account%2Fhtml%2Fv3Jump.html', '&charset=UTF-8', '&token=', tokens['token'], '&tpl=pp&subpro=&apiver=v3', '&tt=', util.timestamp(), '&codestring=', codestring, '&safeflg=0&u=http%3A%2F%2Fpassport.baidu.com%2F', '&isPhone=', '&quick_user=0&logintype=basicLogin&logLoginType=pc_loginBasic&idc=', '&loginmerge=true', '&username='******'&password='******'&verifycode=', verifycode, '&mem_pass=on', '&rsakey=', rsakey, '&crypttype=12', '&ppui_logintime=',get_ppui_logintime(), '&callback=parent.bd__pcbs__28g1kg', ]) headers={ 'Accept': const.ACCEPT_HTML, 'Cookie': cookie.sub_output('BAIDUID','HOSUPPORT', 'UBI'), 'Referer': const.REFERER, 'Connection': 'Keep-Alive', } req = net.urlopen(url, headers=headers, data=data.encode()) if req: content= req.data.decode() match = re.search('"(err_no[^"]+)"', content) if not match: return (-1, None) query = dict(urllib.parse.parse_qsl(match.group(1))) query['err_no'] = int(query['err_no']) err_no = query['err_no'] auth_cookie = req.headers.get_all('Set-Cookie') if err_no == 0: return (0, auth_cookie) # #!! not bind cellphone elif err_no == 18: return (0, auth_cookie) # 要输入验证码 elif err_no == 257: return (err_no, query) # 需要短信验证 elif err_no == 400031: return (err_no, query) else: return (err_no, None) else: return (-1, None) return (-1, None)