Beispiel #1
0
 def get(self, token):
     token = rec_token(token)
     try:
         S = Share_URL.query.get(token)
     except:
         abort_msg(500, '无法访问数据库...')
         return
     if not S:
         abort_msg(404, '此分享链接不存在或已删除!')
         return
     r_data = {'status': 200, 'data': {'msurl': S.msurl}}
     return r_data
Beispiel #2
0
 def get(self, url):
     url = url + '?from=ODSpeedUP'
     for arg, value in request.args.items():
         url += "&%s=%s" % (arg, value)
     token = get_token_from_cache(app_config.SCOPE)
     if not token:
         abort_msg(500, '未能查询到您的token,请退出登录后重新登录...')
     if len(url) <= 15:
         abort_msg(400, '输入的链接过短')
     # 对url解码
     if url.startswith('http://'):
         url = url.replace('http://', 'https://')
     elif not url.startswith('https://'):
         url = 'https://' + url
     if urlparse(url).netloc.endswith('sharepoint.com'):
         return deal_ofb(url)
     if urlparse(url).netloc == "1drv.ms":
         return deal_ofp(url)
Beispiel #3
0
 def get(self, host_header, dirver, item):
     token = get_token_from_cache(app_config.SCOPE)
     if not token:
         abort_msg(500, '未能查询到您的token,请退出登录后重新登录...')
     # 读取session
     FedAuth = session.get('FedAuth')
     cookies = {'FedAuth': FedAuth}
     h = {
         'User-Agent':
         'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36 Edg/83.0.478.58'
     }
     r_url = "https://%s-my.sharepoint.com/_api/v2.0/drives/%s/items/%s?version=Published" % (
         host_header, dirver, item)
     graph_data = requests.get(r_url, cookies=cookies, headers=h).json()
     return {
         'status': 200,
         'url': build_speedup_link(graph_data['@content.downloadUrl'])
     }
Beispiel #4
0
 def get(self, cid, id, authkey):
     ps = int(request.args.get('ps', 30))
     si = int(request.args.get('si', 0))
     url = app_config.OFP_Dir_URL % (ps, si, authkey, id, cid)
     print(url)
     h = {'appid': '1141147648', 'accept': 'application/json'}
     try:
         graph_data = requests.get(url, headers=h).json()
     except:
         abort_msg(500, '无法访问这个分享链接,请等待修复~~')
         return
     # print(requests.get(url, headers=h).text)
     if 'items' not in graph_data:
         abort_msg(500, '无法获取这个分享链接的信息,可能是链接授权过期了,返回重新进入试试?')
     result = list()
     if 'items' not in graph_data:
         abort_msg(500, '无法获取分享链接的item')
         return
     if 'folder' not in graph_data['items'][0]:
         abort_msg(500, '无法获取链接内的文件')
     for i in graph_data['items'][0]['folder']['children']:
         if 'folder' in i:
             r = {
                 'is_dir': True,
                 'cid': i['ownerCid'],
                 'id': i['id'],
                 'authkey': authkey,
                 'name': i['name'],
             }
         else:
             # 对link做处理,替换掉尾部
             d_url = i['urls']['download']
             l = d_url.rfind('/')
             r = {
                 'is_dir': False,
                 'down_url': build_speedup_link(d_url[:l]),
                 'name': i['name'] + i['extension'],
                 'size': i['size']
             }
         r['CreationDate'] = i['displayCreationDate']
         r['ModifiedDate'] = i['displayModifiedDate']
         result.append(r)
     r_data = {
         'status': 200,
         'data': result,
         'startIndex': graph_data['items'][0]['folder']['startIndex'],
         'totalCount': graph_data['items'][0]['folder']['totalCount']
     }
     return r_data
Beispiel #5
0
 def __init__(self):
     if 'uid' not in session:
         abort_msg(403, '您未登录,请先登录!')
Beispiel #6
0
def deal_ofp(url):
    if not urlparse(url).netloc == "1drv.ms":
        abort_msg(403, '输入的链接非OneDrive分享链接')
    s = requests.session()
    try:
        r = s.get(url, allow_redirects=False)
    except:
        abort_msg(500, '无法访问这个分享链接,请等待修复~~')
        return
    try:
        url = r.headers['Location']
    except:
        abort_msg(500, '您提交的链接可能不是分享链接或链接不可游客访问或者服务器错误')
    # 检测是否为文件
    down_url = url.replace('/redir?', '/download?')
    try:
        r_data = s.get(down_url, allow_redirects=False)
    except:
        abort_msg(500, '服务器无法连接到微软,请等待修复')
        return
    if 'Location' in r_data.headers:
        try:
            down_url = r_data.headers['Location']
            l = down_url.rfind('/')
            filename = down_url[l + 1:]
            filename = unquote(filename[:filename.rfind('?')], 'utf-8')
            down_url = down_url[:l]
            r = s.head(down_url)
            return {
                'status': 200,
                'is_dir': False,
                'OFB': False,
                'data': {
                    'url': build_speedup_link(down_url),
                    'size': r.headers['Content-Length'],
                    'name': filename
                }
            }
        except:
            abort_msg(500, '解析下载链接时出错')
            return
    try:
        r_data = s.get(url, allow_redirects=False)
    except:
        abort_msg(500, '服务器访问链接出错,请等待修复~')
        return
    try:
        txt = r_data.text
        soup = BeautifulSoup(txt, 'lxml')
        full_url = str(soup.find('noscript').find('meta').get('content'))
        url = full_url[6:]
        f = furl(url)
        cid = f.args['cid']
        id = f.args['id']
        authkey = f.args['authkey']
    except:
        abort_msg(500, '链接分析错误,请等待修复~~')
        return
    return {
        'status': 200,
        'is_dir': True,
        'OFB': False,
        'data': {
            'cid': cid,
            'id': id,
            'authkey': authkey
        }
    }
Beispiel #7
0
 def get(self, host_head, dir):
     host_head_org = host_head
     token = get_token_from_cache(app_config.SCOPE)
     if not token:
         abort_msg(500, '未能查询到您的token,请退出登录后重新登录...')
     try:
         host_head = str(host_head).replace('!', '/')
         full = '/' + str(dir)
         first = '/'.join(full.split('/')[0:4])
     except:
         abort_msg(500, '读取链接时出错')
         return
     # 读取session
     try:
         FedAuth = session['FedAuth']
     except:
         abort_msg(403, '您没有访问此链接的权限!')
         return
     cookies = {'FedAuth': FedAuth}
     if len(request.args) == 0:
         r_url = "https://%s/_api/web/GetListUsingPath(DecodedUrl=@a1)/RenderListDataAsStream?@a1='%s'&RootFolder=%s" % (
             host_head, first, full)
     else:
         r_url = "https://%s/_api/web/GetListUsingPath(DecodedUrl=@a1)/RenderListDataAsStream?@a1='%s'" % (
             host_head, first)
         for arg, value in request.args.items():
             r_url += "&%s=%s" % (arg, value)
     h = {
         'accept': 'application/json;odata=verbose',
         'content-type': 'application/json;odata=verbose',
     }
     d = '{"parameters":{"__metadata":{"type":"SP.RenderListDataParameters"},"RenderOptions":1185543,"AllowMultipleValueFilterForTaxonomyFields":true,"AddRequiredFields":true}}'
     try:
         graph_data = requests.post(r_url,
                                    data=d,
                                    headers=h,
                                    cookies=cookies).json()
     except:
         abort_msg(500, '请求微软服务器失败...')
         return
     # 缺少错误判断
     if 'ListData' not in graph_data:
         abort_msg(500, '文件目录请求失败...')
     if 'Row' not in graph_data['ListData']:
         abort_msg(500, '文件目录请求失败')
     result = list()
     for i in graph_data['ListData']['Row']:
         if i['FSObjType'] == '1':
             f = {
                 'is_dir': True,
                 'host_head': host_head_org,
                 'dir': dir + '/' + i['FileLeafRef'],
                 'name': i['FileLeafRef'],
                 'child_num': i['ItemChildCount'],
                 'lastModifiedDateTime': i['Modified']
             }
         else:
             Item_url = i['.spItemUrl']
             reg = "https://(.*)-my.sharepoint.com:443/_api/v2.0/drives/(.*)/items/(.*)?version=Published"
             matchObj = re.match(reg, Item_url)
             f = {
                 'is_dir': False,
                 'host_header': matchObj.group(1),
                 'dirver': matchObj.group(2),
                 'item': matchObj.group(3),
                 'name': i['FileLeafRef'],
                 'size': i['FileSizeDisplay'],
                 'lastModifiedDateTime': i['Modified']
             }
         result.append(f)
     r_data = {'status': 200, 'data': result}
     if 'NextHref' in graph_data['ListData']:
         r_data['NextHref'] = graph_data['ListData']['NextHref']
     else:
         r_data['NextHref'] = False
     if 'PrevHref' in graph_data['ListData']:
         r_data['PrevHref'] = graph_data['ListData']['PrevHref']
     else:
         r_data['PrevHref'] = False
     return r_data
Beispiel #8
0
def deal_ofb(url):
    if not urlparse(url).netloc.endswith('sharepoint.com'):
        abort_msg(403, '输入的链接非OneDrive分享链接')
    down_url = url + '&download=1'
    s = requests.session()
    try:
        r = s.get(down_url, allow_redirects=False)
    except:
        abort_msg(500, '无法请求这个资源,请等待修复')
        return
    if 'Location' in r.headers:
        true_url = r.headers['Location']
        try:
            if true_url[0:1] == '/':
                true_url = 'https://' + urlparse(url).netloc + true_url
                # 处理url
                filename = true_url[true_url.rfind('/') +
                                    1:true_url.rfind('?')]
                r = s.head(true_url)
                return {
                    'status': 200,
                    'is_dir': False,
                    'OFB': True,
                    'data': {
                        'url': build_speedup_link(true_url),
                        'size': r.headers['Content-Length'],
                        'name': filename
                    }
                }
        except:
            abort_msg(500, '分析文件下载链接时出错')
            return
    try:
        r = s.get(url, allow_redirects=False)
    except:
        abort_msg(500, '无法请求这个资源,请等待修复')
        return
    try:
        t_url = r.headers['Location']
    except:
        abort_msg(500, '您输入的可能非文件夹分享或这个链接不可游客访问或服务器内部错误')
        return
    try:
        url_list = t_url.split('/')
        host_head = '/'.join(url_list[2:5])
        f = furl(t_url)
        full_dir = f.args['id'].strip('/')
        # 录入session
        session['FedAuth'] = r.cookies.get_dict()['FedAuth']
    except:
        abort_msg(500, '分析链接时出错')
        return
    return {
        'status': 200,
        'is_dir': True,
        'OFB': True,
        'data': {
            'host_head': host_head.replace('/', '!'),
            'dir': full_dir
        }
    }