def book_detail(book_id) -> dict: url = "http://222-31-39-3-8080-p.vpn.nuc1941.top:8118//pft/showmarc/table.asp?nTmpKzh=%s" % book_id content = session.get(url, headers={ 'Cookie': global_values.get_value('vpn_cookie') }).content soups = bs4.BeautifulSoup(content, "html.parser") details = soups.find(id="tabs-2").find_all("tr") url = "http://222-31-39-3-8080-p.vpn.nuc1941.top:8118//pft/wxjs/BK_getKJFBS.asp" post_data = {"nkzh": book_id} content = session.post(url, data=post_data, headers={ 'Cookie': global_values.get_value('vpn_cookie') }).content.decode() detail_dict = {"id": book_id, "available_books": content} for i in details: tds = i.find_all("td") if tds[0].text.strip() == "010": info = tds[2].text.strip().split("@") for j in info: if len(j) < 1: continue if j[0] == "a": detail_dict["ISBN"] = j[1:] detail_dict["cover_url"] = douban_book_cover(j[1:]) elif tds[0].text.strip() == "200": info = tds[2].text.strip().split("@") for j in info: if len(j) < 1: continue if j[0] == "a": detail_dict["name"] = j[1:] elif j[0] == "f": detail_dict["author"] = j[1:] elif j[0] == "g": detail_dict["translator"] = j[1:] elif tds[0].text.strip() == "210": info = tds[2].text.strip().split("@") for j in info: if len(j) < 1: continue if j[0] == "c": detail_dict["press"] = j[1:] elif j[0] == "d": detail_dict["year"] = j[1:] elif j[0] == "g": detail_dict["translator"] = j[1:] elif tds[0].text.strip() == "330": info = tds[2].text.strip().split("@") for j in info: if len(j) < 1: continue if j[0] == "a": detail_dict["introduction"] = j[1:] if "cover_url" not in detail_dict.keys(): detail_dict[ "cover_url"] = "https://img1.doubanio.com/f/shire/5522dd1f5b742d1e1394a17f44d590646b63871d/pics/book" \ "-default-lpic.gif" return detail_dict
def handle_library_search_by_isbn(isbn: str): page = request.args.get('page', '1') if len(isbn) != 13 and len(isbn) != 10: custom_abort(-6, '无效的 ISBN 编号') if len(isbn) == 10: isbn = isbn[:1] + '-' + isbn[1:5] + '-' + isbn[5:9] + '-' + isbn[9:] else: isbn = isbn[:3] + '-' + isbn[3:4] + '-' + isbn[4:8] + '-' + isbn[ 8:12] + '-' + isbn[12:] url = 'http://222-31-39-3-8080-p.vpn.nuc1941.top:8118//pft/wxjs/bk_s_Q_fillpage.asp?q=标准编号=[[%s*]]' \ '&nmaxcount=&nSetPageSize=10&orderby=&Research=1&page=%s&opt=1' % (quote(isbn), page) content = session.get(url, headers={ 'Cookie': global_values.get_value('vpn_cookie') }).content.decode('utf-8') re_book_ids = re.findall(r"ShowItem\('([0-9]*)'\)", content) records_group = re.search('共([0-9]*)条记录', content) if not records_group: custom_abort(-6, '无结果') records = records_group.group(1) pool = Pool(10) book_list = pool.map(book_detail, re_book_ids) return { 'code': 0, 'data': { 'records': records, 'page': page, 'recordsPerPage': len(book_list), 'list': book_list } }
def handle_library_search_by_name(keyword: str): book_type = request.args.get('type', '正题名') page = request.args.get('page', '1') url = 'http://222-31-39-3-8080-p.vpn.nuc1941.top:8118//pft/wxjs/bk_s_Q_fillpage.asp?q=%s=[[*%s*]]' \ '&nmaxcount=&nSetPageSize=10&orderby=&Research=1&page=%s&opt=1' % (quote(book_type), quote(keyword), page) content = session.get(url, headers={ 'Cookie': global_values.get_value('vpn_cookie') }).content.decode('utf-8') re_book_ids = re.findall(r"ShowItem\('([0-9]*)'\)", content) records_group = re.search('共([0-9]*)条记录', content) if not records_group: custom_abort(-6, '无结果') records = records_group.group(1) pool = Pool(10) book_list = pool.map(book_detail, re_book_ids) return { 'code': 0, 'data': { 'records': records, 'page': page, 'recordsPerPage': len(book_list), 'list': book_list } }
def check_proxy(): try: if requests.get(proxy_status_url).content.decode() == "ok": global_values.set_value("proxy_status_ok", True) vpn_cookies = requests.get(get_cookies_url).content.decode() if not global_values.get_value("vpn_cookies"): logging.info("已获取 VPN cookies:%s" % vpn_cookies) global_values.set_value("vpn_cookies", vpn_cookies) else: global_values.set_value("proxy_status_ok", False) logging.warning("代理离线") except: global_values.set_value("proxy_status_ok", True)
def get_book_available_detail(book_id: str): url = "http://222-31-39-3-8080-p.vpn.nuc1941.top:8118//pft/showmarc/showbookitems.asp?nTmpKzh=%s" % book_id content = session.get(url, headers={ 'Cookie': global_values.get_value('vpn_cookie') }).content.decode("utf-8") soups = bs4.BeautifulSoup(content, "html.parser") trs = soups.find_all("tr") detail_items = [] for td in trs[1:]: tds = td.find_all("td") detail_items.append({ "number": "".join(tds[1].text.split()), "barcode": "".join(tds[2].text.split()), "location": "".join(tds[3].text.split()), "status": "".join(tds[4].text.split()) }) return {'code': 0, 'data': detail_items}
def check_auth(): message = "OK" error = "" code = 0 data = "" if request.path == '/': return redirect("https://dreace.top") if request.path[1:] in stopped_list: logging.warning("未开放查询") message = "未开放查询" code = -4 resp = Response(json.dumps({ "message": message, "error": error, "code": code, "data": data }), mimetype='application/json') return resp name = request.args.get('name', "") args = dict(request.args) if request.url.find("MessagePush") == -1: if not check_sign(args): logging.warning("身份认证失败") message = "身份认证失败" error = "身份认证失败" code = -2 if "ts" not in args.keys() or int(args["ts"]) + 3e5 < int( time.time() * 1000): logging.warning("拒绝本次请求") message = "拒绝本次请求" error = "拒绝本次请求" code = -2 # if name and time.localtime(time.time())[3] < 7: # logging.warning("非服务时间", ) # message = "非服务时间" # error = "非服务时间" # code = -1 elif request.path[1:] not in no_limit_url and not check_request_limit( request.args["key"]): logging.warning("拒绝了 %s 的请求", request.args["key"]) message = "操作过频繁" error = "操作过频繁" code = -5 if name == "guest" and request.path[1:] in guest_data.keys(): data = guest_data[request.path[1:]] if request.path[1:] in need_proxy_url and not global_values.get_value( "proxy_status_ok"): code = -7 message = "服务器网络故障" logging.warning("服务器网络故障") if code != 0 or len(data) > 1: resp = Response(json.dumps({ "message": message, "error": error, "code": code, "data": data }), mimetype='application/json') return resp url = request.path + "?" + get_cache_key(dict(request.args)) url_md5 = hashlib.md5(url.encode()).hexdigest() res_b = redis_cache.get(url_md5) if res_b: res = json.loads(res_b) res["cached"] = 1 resp = Response(json.dumps(res), mimetype='application/json') logging.info("命中缓存 %s", unquote(url)) return resp
def decorated_function(*args, **kwargs) -> dict: if not global_values.get_value("proxy_status_ok"): custom_abort(-2, '无法连接学校网络') return f(*args, **kwargs)