def eams_stu_info(session):

    # 更新session
    s = requests.session()
    if session: s.cookies.update(json.loads(session)) # 读取

    # 学生信息页面
    std_detail_url = f"{eams_base_url}/eams/stdDetail.action"

    detail_act = s.post(std_detail_url, headers = req_header)
    # print(detail_act.text)
    print(detail_act.history, detail_act.status_code)
    if len(detail_act.history) > 0:
        raise BaseException.APIException(msg='教务管理系统登录超时,请重新登录', error_code=10003)
 
    detail_soup = BeautifulSoup(detail_act.text, 'html.parser')
    item_title = list(detail_soup.find_all('td', class_='title'))

    stu_info = dict()
    for keyTag in item_title:
        if keyTag and keyTag.get_text().strip():
            key = keyTag.get_text().strip() #.replace(':','')
            valTag = keyTag.find_next_sibling('td', class_=None)
            stu_info[key] = valTag.get_text() if valTag else ""

    return stu_info
def eams_freeroom_buildings(session):
    # 更新session
    s = requests.session()
    if session: s.cookies.update(json.loads(session)) # 读取

    # 空教室查询页面
    freeroom_url = f"{eams_base_url}/eams/classroom/apply/free.action"

    page = s.get(freeroom_url, headers= req_header)

    if "账号密码登录" in page.text:
        raise BaseException.APIException(msg='教务管理系统登录超时,请重新登录', error_code=10003)

    psoup = BeautifulSoup(page.text, "html.parser")
    options = BeautifulSoup(str(psoup.find("select", id="building")), "html.parser").find_all("option")
    # print(options)

    data = []
    for o in options:
        if o.get("value") == "":
            continue
        # print("%s %s"%(o.get("value"), o.get("title")))
        d = {}
        d["value"] = o.get("value")
        d["title"] = o.get("title")
        data.append(d)

    # print(json.dumps(data, ensure_ascii=False))
    return jsonify({
        'msg': 'success',
        'data': data
    })
Beispiel #3
0
def lib_book_detail(recordId, ip, jsonify_res=True):

    header = {
        'Host': 'mfindhpu.libsp.com',
        'Connection': 'keep-alive',
        'Accept': 'application/json, text/plain, */*',
        'mappingPath': '',
        'groupCode': 'undefined',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36 Edg/88.0.705.50',
        'Sec-Fetch-Site': 'same-origin',
        'Sec-Fetch-Mode': 'cors',
        'Sec-Fetch-Dest': 'empty',
        'Referer': 'https://mfindhpu.libsp.com/',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-CN,zh;q=0.9'
    }
    header['X-Forwarded-For'] = ip if ip else '47.103.223.186'

    s = requests.session()

    res = s.get(f'https://mfindhpu.libsp.com/find/searchResultDetail/getDetail?recordId={recordId}', headers=header, verify=False)

    res = json.loads(res.text)

    if not res['success']:
        raise BaseException.APIException(msg=res['message'], error_code=res['errCode'])

    data = {
        'msg': res['message'],
        'data': res['data']
    }
    # print(res.text)
    return jsonify(data) if jsonify_res else data
Beispiel #4
0
def lib_session(ip='47.103.223.186'):

    s = requests.session()

    code_url = 'https://mfindhpu.libsp.com/oga/verifycode/img?'

    header = {
        'Host': 'mfindhpu.libsp.com',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36',
        'Accept-Encoding': 'gzip, deflate',
        'Accept': '*/*',
        'Connection': 'keep-alive',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7'
    }

    # s.get('https://mfindhpu.libsp.com/#/login', verify=False)
    code_res = s.get(code_url, headers=header, verify=False)
    res = json.loads(code_res.text.replace('\\n',''))
    if not res['success']:
        raise BaseException.APIException(msg=res['message'], error_code=res['errCode'])

    # print(code_res.text)
    res['session'] = parse.quote(json.dumps(s.cookies.get_dict(), ensure_ascii=False))
    res['captchaHtml'] = f"<img src='{res['data']['verifyCode']}'></img>"

    return jsonify(res)
def login_hpu_vpn(jwzh, jwmm, ip):

    s = requests.session()

    baseurl = "https://vpn.hpu.edu.cn/por/login_psw.csp?encrypt=0"
    mobile_header = {
        "User-Agent":
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.2 Mobile/15E148 Safari/604.1"
    }
    mobile_header['X-Forwarded-For'] = ip if ip else '47.103.223.186'

    formData = {"svpn_name": jwzh, "svpn_password": jwmm}

    s.post(baseurl,
           data=formData,
           headers=mobile_header,
           verify=False,
           allow_redirects=False)
    c*k = s.cookies.get_dict()

    if "ENABLE_RANDCODE" not in c*k:
        raise BaseException.APIException(msg='密码错误', error_code=30001)

    s.get(
        "https://vpn.hpu.edu.cn/web/1/http/0/218.196.240.155/swfweb/hpugg.aspx",
        headers=mobile_header,
        verify=False,
        allow_redirects=False)

    # print(json.dumps(s.cookies.get_dict(), ensure_ascii=False))
    session = json.dumps(s.cookies.get_dict(), ensure_ascii=False)

    return jsonify({'msg': 'success', 'session': parse.quote(session)})
Beispiel #6
0
def lib_book_cover(recordId, raw=False, ip=None):

    detail = lib_book_detail(recordId=recordId, ip=ip, jsonify_res=False)

    s = requests.session()

    # print(detail)
    bookname = detail['data']['bean2List'][0]['fieldVal'].split('/')[0]
    bookisbn = detail['data']['bean2List'][2]['fieldVal'].split('/')[0]

    res = s.get(f'https://mfindhpu.libsp.com/find/book/getDuxiuImageUrl?title={bookname}&isbn={bookisbn}', verify=False)

    res = json.loads(res.text)

    if not res['success']:
        raise BaseException.APIException(msg=res['message'], error_code=res['errCode'])

    if raw:
        img_content = s.get(res['data'], verify=False).content
        img_content_b64 = encode_base64(img_content)
        return "data:image/jpg;base64,%s"%img_content_b64

    data = {
        'msg': res['message'],
        'data': res['data']
    }
    # print(res.text)
    return jsonify(data)
def BookCoverRaw(recordId=None):
    if not recordId:
        BaseException.ParameterException('缺少参数: recordId')
    return HPULib.lib_book_cover(
        recordId,
        True
    )
def school_tc_code(session, ip):

    # 更新session
    s = requests.session()
    s.cookies.update(json.loads(session))

    mobile_header = {
        "User-Agent":
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.2 Mobile/15E148 Safari/604.1"
    }
    mobile_header['X-Forwarded-For'] = ip if ip else '47.103.223.186'

    tc_code_url = "https://vpn.hpu.edu.cn/web/0/http/2/218.196.240.21/tycsweb/dimage.aspx"
    code_img = s.get(tc_code_url, headers=mobile_header)

    # print(code_img.text)
    if code_img.status_code != 200:
        raise BaseException.APIException(msg='登录超时,请重新登录', error_code=30002)

    base64_data = base64.b64encode(code_img.content)
    b64 = base64_data.decode()

    # print("data:image/jpeg;base64,%s"%b64)
    return jsonify({
        'msg':
        'success',
        'captcha':
        "data:image/jpeg;base64,%s" % b64,
        'captchaHtml':
        f"<img src='data:image/jpeg;base64,{b64}'></img>"
    })
    def sectionFree(buildingId, dateBegin, dateEnd, section, mode):

        result = s.post(f"{eams_base_url}/eams/classroom/apply/free!search.action", 
        data={
            "classroom.building.id":buildingId,              # 教学楼id
            "cycleTime.cycleCount":"1",                      # 时间周期
            "cycleTime.cycleType":"1",                       # 周期类型(1:天,2:周)
            "cycleTime.dateBegin":dateBegin,                 # 起始空闲日期
            "cycleTime.dateEnd":dateEnd,                     # 结束空闲日期
            "roomApplyTimeType":"0",                         # 空闲时间单位(0:小节,1:时间)
            "timeBegin":str(section).split("-")[0],          # 起始空闲小节/时间
            "timeEnd":str(section).split("-")[1],            # 结束空闲小节/时间
            "pageNo":"1",                                    # 当前页码
            "pageSize":"1000",                               # 一页显示的条目数
            "orderBy":"classroom.name asc"                   # 排序方式(classroom.name asc:名称升序)
        }, headers=req_header)
        # print(result.text)
        if "账号密码登录" in result.text:
            raise BaseException.APIException(msg='教务管理系统登录超时,请重新登录', error_code=10003)
        if "返回前页" in result.text:
            errMsg = re.findall("<span>(.*)</span>", result.text)[0]
            raise BaseException.APIException(msg=errMsg, error_code=10003)

        rsoup = BeautifulSoup(result.text, "html.parser")
        dataTable = rsoup.find("tbody", id=re.compile("grid.*data"))
        # print(dataTable)

        data = []
        trs = dataTable.find_all("tr")
        for tr in trs:
            tds = tr.find_all("td")
            if not tds:
                return []
            d = {}
            d["num"] = tds[0].string.strip() if tds[0].string else ""
            # d["startSection"] = str(section).split("-")[0]
            # d["endSection"] = str(section).split("-")[1]
            d["name"] = tds[1].string.strip() if tds[1].string else ""
            if mode == '1':
                d["building"] = tds[2].string.strip() if tds[2].string else ""
                d["campus"] = tds[3].string.strip() if tds[3].string else ""
                d["type"] = tds[4].string.strip() if tds[4].string else ""
                d["seats"] = tds[5].string.strip() if tds[5].string else ""
            data.append(d)

        # print(json.dumps(data, ensure_ascii=False))
        return data
Beispiel #10
0
def lib_login(session, username, password, captcha, codeKey, ip):

    s = requests.session()
    s.cookies.update(json.loads(session))

    payload = {
        'username': username, 
        'password': password, 
        'verifyCode': captcha, 
        'mappingPath': '', 
        'nextpath': '/Home', 
        'codeKey': codeKey, 
        'uid': ''
    }

    header = {
        'Host': 'mfindhpu.libsp.com',
        'Connection': 'keep-alive',
        'Content-Length': '157',
        'Accept': 'application/json, text/plain, */*',
        'mappingPath': '',
        'groupCode': '200090',
        'null': 'null',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36 Edg/88.0.705.50',
        'Content-Type': 'application/json;charset=UTF-8',
        'Origin': 'https://mfindhpu.libsp.com',
        'Sec-Fetch-Site': 'same-origin',
        'Sec-Fetch-Mode': 'cors',
        'Sec-Fetch-Dest': 'empty',
        'Referer': 'https://mfindhpu.libsp.com/',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6'

    }
    header['X-Forwarded-For'] = ip if ip else '47.103.223.186'

    res = s.post('https://mfindhpu.libsp.com/oga/login',
        data=json.dumps(payload),
        headers=header,
        verify=False
    )
    # print(res.text)
    
    res = json.loads(res.text)

    if not res['success']:
        raise BaseException.APIException(msg=res['message'], error_code=res['errCode'])

    # loan_page = s.get('https://mfindhpu.libsp.com/find/loanInfo/loanHistoryList', headers = header)
    # print(loan_page.text)

    session_d = s.cookies.get_dict()
    session_d.update(res['data'])

    return jsonify({
        'msg': 'success',
        'data': parse.quote(json.dumps(session_d, ensure_ascii=False))
    })
Beispiel #11
0
def lib_book_collection(recordId, ip):
    
    header = {
        'Host': 'mfindhpu.libsp.com',
        'Connection': 'keep-alive',
        'Content-Length': '71',
        'Accept': 'application/json, text/plain, */*',
        'mappingPath': '',
        'groupCode': 'undefined',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36 Edg/88.0.705.50',
        'Content-Type': 'application/json;charset=UTF-8',
        'Origin': 'https://mfindhpu.libsp.com',
        'Sec-Fetch-Site': 'same-origin',
        'Sec-Fetch-Mode': 'cors',
        'Sec-Fetch-Dest': 'empty',
        'Referer': 'https://mfindhpu.libsp.com/',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-CN,zh;q=0.9'
    }
    header['X-Forwarded-For'] = ip if ip else '47.103.223.186'

    s = requests.session()

    res = s.post('https://mfindhpu.libsp.com/find/physical/groupitems',
        data=json.dumps({
            'page': 1, 
            'rows': 20000, 
            'entrance': None, 
            'recordId': recordId, 
            'isUnify': True
        }),
        headers=header, 
        verify=False
    )

    res = json.loads(res.text)

    if not res['success']:
        raise BaseException.APIException(msg=res['message'], error_code=res['errCode'])

    data = {
        'msg': res['message'],
        'data': res['data']
    }
    # print(res.text)
    return jsonify(data)
def school_ann(session, page, ip):

    aurl = "https://vpn.hpu.edu.cn/web/1/http/0/218.196.240.155:80/swfweb/hpugg.aspx"

    header = req_header
    header['X-Forwarded-For'] = ip if ip else '47.103.223.186'

    # 更新session
    s.cookies.update(json.loads(session))  # 读取

    # 请求页面
    res = s.get(aurl + "?page=" + str(page), headers=header, verify=False)
    # print(res.text, res.status_code)
    if res.status_code != 200:
        raise BaseException.APIException(msg='登录超时,请重新登录', error_code=30002)

    res_soup = BeautifulSoup(res.text, "html.parser")
    maxpage = res_soup.find_all("span", {"id": "Label4"})[0].string
    annnum = res_soup.find_all("span", {"id": "Label1"})[0].string

    ahrefs = res_soup.find_all("a", {"href": re.compile("/web/.*?id=.*")})

    datalist = []
    for i in ahrefs:
        d = {}
        q = i.contents
        w = i.get("href")
        e = i.parent.previous_sibling.string.strip()
        d["id"] = re.findall("id=(.*)", w)[0]
        d["title"] = q[0]
        d["date"] = e
        d["isTop"] = 1 if len(q) > 1 else 0
        datalist.append(d)

    out = {}
    out["annNum"] = annnum
    out["hasNext"] = 1 if int(maxpage) > int(page) else 0
    out["data"] = datalist

    # print(json.dumps(out, ensure_ascii=False))
    return jsonify({'msg': 'success', 'data': out})
Beispiel #13
0
def lib_loan_list(session, ip):

    header = {
        'Host': 'mfindhpu.libsp.com',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36',
        'Accept-Encoding': 'gzip, deflate',
        'Accept': '*/*',
        'Connection': 'keep-alive',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7',
        'Accept': 'application/json, text/plain, */*',
        'Content-Type': 'application/json;charset=UTF-8'
    }
    
    s = requests.session()
    s_d = json.loads(session)
    s.cookies.update(s_d)

    header['jwtOpacAuth'] = s_d['jwt']
    header['X-Forwarded-For'] = ip if ip else '47.103.223.186'

    loan_data = s.post(
        'https://mfindhpu.libsp.com/find/loanInfo/loanHistoryList',
        data=json.dumps({
            'page': 1,
            'rows': 20000
        }),
        headers=header, 
        verify=False
    )
    res = json.loads(loan_data.text)

    if not res['success']:
        raise BaseException.APIException(msg=res['message'], error_code=res['errCode'])

    # print(loan_data.text)

    return jsonify({
        'msg': res['message'],
        'data': res['data']
    })
def school_ann_download(session, ann_id):

    # requests.urllib3.disable_warnings()

    def mkdirW(path):
        isExis = os.path.exists(path)
        if not isExis:
            os.makedirs(path)

    aid = ann_id

    aurl = "https://vpn.hpu.edu.cn/web/1/http/2/218.196.240.155/swfweb/hpugm.aspx?id=" + aid

    # 更新session
    s = requests.session()
    s.cookies.update(json.loads(session))  # 读取

    res = s.get(aurl, verify=False)

    if res.status_code != 200:
        raise BaseException.APIException(msg='获取公告内容失败', error_code=30003)

    fUrl = parse.unquote(res.url)
    fName = fUrl.split("/")[-1:][0]
    fType = fUrl.split(".")[-1:][0]

    rootdir = "./public/sources/anns/"
    tempdir = './public/sources/temp/'
    itemdir = rootdir + aid + "/"
    mkdirW(itemdir)

    file_base_url = "http://127.0.0.1/"

    if fType.lower() in ["doc", "docx", "xls", "xlsx", "ppt", "pptx", "pdf"]:
        with open(itemdir + fName, "wb") as f:
            f.write(res.content)
        return jsonify({
            'msg': 'success',
            'data': [file_base_url + i for i in os.listdir(itemdir)]
        })

    if fType.lower() in ["rtf"]:
        with open(itemdir + fName + ".doc", "wb") as f:
            f.write(res.content)
        return jsonify({
            'msg': 'success',
            'data': [file_base_url + i for i in os.listdir(itemdir)]
        })

    if fType.lower() in ["jpg", "jpeg", "gif", "png"]:
        tempfile = tempdir + fName
        # print(tempfile)
        with open(tempfile, "wb") as d:
            d.write(res.content)
        with open(itemdir + fName + ".pdf", "wb") as f:
            i = Image.open(tempfile)
            i = i.convert("RGB")
            i.save(tempfile)
            f.write(img2pdf.convert(tempfile))
            os.remove(tempfile)
        return jsonify({
            'msg': 'success',
            'data': [file_base_url + i for i in os.listdir(itemdir)]
        })

    if fType.lower() in ["html", "htm"] and res.status_code == 200:
        pypandoc.convert_text(res.text,
                              "docx",
                              "html",
                              outputfile=itemdir + "/公告内容.docx")
        return jsonify({
            'msg': 'success',
            'data': [file_base_url + i for i in os.listdir(itemdir)]
        })

    if fType.lower() in ["zip", "7z"]:
        # if fType.lower() in ["zip"]:
        tempfile = tempdir + aid + "." + fType
        with open(tempfile, "wb") as f:
            f.write(res.content)
        cmdw = "LANG='zh_CN.UTF-8' 7za e -aoa '{}' -o{}".format(
            tempfile, itemdir)
        # cmdw = "LANG='zh_CN.UTF-8' unzip -j '{}' -o -d {}".format(tempfile, rootdir+"/"+aid)
        # print(cmdw)
        subprocess.call(cmdw, shell=True)
        os.remove(tempfile)
        return jsonify({
            'msg': 'success',
            'data': [file_base_url + i for i in os.listdir(itemdir)]
        })

    if fType.lower() in ["rar"]:
        tempfile = tempdir + aid + "." + fType
        with open(tempfile, "wb") as f:
            f.write(res.content)
        cmdw = "LANG='zh_CN.UTF-8' unrar e -o+ '{}' {}".format(
            tempfile, itemdir)
        # print(cmdw)
        subprocess.call(cmdw, shell=True)
        os.remove(tempfile)
        return jsonify({
            'msg': 'success',
            'data': [file_base_url + i for i in os.listdir(itemdir)]
        })

    if fType.lower() in ["aspx"] and res.status_code == 200:
        # print(rootdir+aid+"/公告内容.docx")
        pypandoc.convert_text(res.text,
                              "docx",
                              "html",
                              outputfile=itemdir + "/公告内容.docx")
        return jsonify({
            'msg': 'success',
            'data': [file_base_url + i for i in os.listdir(itemdir)]
        })
def school_tc_grade(session, usernum, userpwd, captcha, ip):

    # 更新session
    s = requests.session()
    s.cookies.update(json.loads(session))

    mobile_header = {
        "User-Agent":
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.2 Mobile/15E148 Safari/604.1"
    }
    mobile_header['X-Forwarded-For'] = ip if ip else '47.103.223.186'

    payload = {
        "__EVENTTARGET": "",
        "__EVENTARGUMENT": "",
        "__VIEWSTATE":
        "t+b8QtAqfEhdSJ6P4SJqTrs+1Q88aIYQ5jxugX1Kt0j3mrOmk7t28vDXibM3wPRDsT0M0Jx6kY4ABJtROHBT90DxOChms+SSjZbQNQsafhaUWzvap9ASodQWrmpRNSTbG7di5S0LdM19cz1Vh910o/bbqBsLXJc6KMa/06hzEsfTGQPfpA7pRJhIrnLt5js9zrk3jyUm/jHVc7KEGJojC/2Zp7nb3T+PaNj2iLYJ/C6VR+CzIp2tr6t0oC8cBZkuUFWSaACNdpaxclMx4bKJ7zaOU7wZ37/Us86YY8icSFrOpyqSyajBPycRJNG4EraVfdBW00pa1Yl/4dDrmumu0QPK9HBZ8eZhbUGqGoiobBbVYUzhufOz+oIEpJKf7kKMv8WyAqfbMAgOxJpQHgJkf7WbdNNwUzRvHyMeUNHNauderyDXDIegGshD/pQjn9ohhdckQvple8NQC5bv1IBlEajQmJv/tRD5bxOEeaS1jg6AbjtBWZjwAV9lPXW2C1tQt7m+sauoThJzyYoueNbQgpTcvWV7Mee9qb1Ik+ZjfI2i3ofbtGyQmjaVBTBjLukcISxS3d63UEkgqpx8iRtmLcXr/H8yRQj5wSXDto64gA+9iplE7MkAMaespRlGTYi7TCHWYfHhexZxfxYAn+s6SU2dch3GLjNcKgFXnabM6uPWzL4UcUC0rZILyN4M5QZf3emW/jp0tnMS+nZwtZpWar8Ly21nwmWKEve3ftgvB//+0kReaECDBAm/aXaKsn18HdLM4Es8PS5IZlcu+3vTiGzZ9RNvYKC0Fobgr1Dye+Wj+2spC2Ycf05UWT5C4RAqXz7DBI4Y5TPSABLjDkLarYBn+tZDGTaSn5JlQmlhyFbkJAClpJotq1lvvbv5YA+Be9Yt2o2edibPJxYPiw7oyQx70zYSNE3PlfVKZFSh7Ow33URWgj3JIQCAIjqLNd0/ScYafPUn4CFDzW99pqYgeeaull22TtkxF9hZBMVehMzl8OsvZb3XgrMErQZoyoVeY0qwID6niFCAX1COx/xBII94NvUoKEDP2iI74rlaQRxQyXFiLj1VNgzqtqIKfjblDA691lc3ugC8s6K/1FwT2TQ+WF5nkBXop3bNvIFCT/FXavwEbo5f+91vmsqXf5NZwQfR9s4man3657mmCtrvJFJSnB85CAE8cC8vPeSp3YfwXfXcs8FSluQtafzVIMZwIQEeXFtOKqcgaEhAI2Dfy98COaAXftyuuMkSYFSubTaQG2XWKeLfbe2m3ln2WQl7ftS9/i/iqaxNkZcDinX42qpyIIPdml9xm1zzltBk+nfZosJHIH2+2eQ1SRQDJ+Tmcbzbdhlr2WHQj1GRXHhhHoLr3aGMXOBlXeCwkcgge2EB11mV+c0kiu0Em77Msqdoz8H0THYGw6lONC/FpGuk4STXbbrVq3+kYU8vwCwhgYY/gxAW+jmugkAQNcSELiRe8GUF6YhNE7nb7ixQrrRQCb2DRmC+irFxoqP15cQKy3ECefHCLf93hw9XD56tBOf1nkjY48mxIgBbafQUMCIDuMlQAkpuLzJkmGLbGwcJckptO/XMLY4X4PDiDDieITHBPOra3rMwUgzi8IBiYuX+jzStLIU8KjbREyyz9z0fjP50KVHFepLmLSMy/Dkado8klapg36NijbKoWiQ5FS92wSJWkTsk2jX54ni3MVIsaJY4DkFJ+UFVD+TtXn8pMAe8MHN7cFC0SxXJKNHpYIPbBlHXFV8frGvIuY3N3VGt9e2kP+GR4/e9xJ/wOacylBZePLolV4nDNz3jikGkCAp1WSEGfCXVHJ2LUwrIW6VrbmINwwVb+oaNFiCW1aTikkDWEPxRqBjvGOUme9XKBNn2m0OA5Rf1vmTA4Fysg/BW/UOaSDlVOBZu9Mf8FbBAzD5rmgIuZsCGalfbPx4SwzSpvqhNnoTqb5ySTd1BF0bv0XLR1S7Evg47AGpuiJIjZEXnkiE4IZ0VElg5w9QjoPWnOyqyc/Owb+LIrcC+p9CKF/2LQohMKxIZ4/25Aa7oxqam1WGAXlod5NOE7euBpnE8JaDeAxxbe5/50FhbE8GiC4fz05a416kXbYHYTX710ubV7FNVHLbfk97Et6zknv4UGFhMEYsCY+2E5IDiUx4Yqk+HghVa3nNoRHx07axsXPNO8xo2wsw6bLxXJLme5k3kCRz35qawZiwx+BKEJ2dFS4eUfP4BAuUru4VYTQppRkPxriQBUji/ZJdANnqKuTw79QtZY59CDT3kZt4Jf8jxs3AVnc0YImB3t4VQKtkGGY/0z3pJ96xreBDBQYghkA20MgyCB4ks/H42UmISYfkMXSd9ETLUSitWa+O44pxF1Jf352RiDyppnPyA3/i6Lo17Iy9/4LAz0qH0E2E0AhsK+m4j44hovfjTXLZIIqeJySvZyRYtMf4F+yF1MuOuOWepkAQzOJsiA4FX2bETqsCINXp3hWp2RxnS+66kzFeQzRzzcfbYTXotB2fsIuCeZH3dYrTVY0J3ZNLdSdJ4JvDF2aNEZDrfwP7EEOlriWi0lAQkBH+Om6lXolc3uFOdqHA62pghjqjvB624ZXR3ogB/EIwDbjJqhYBK1Er0lNY3VPrE1WMEwbHiDUUo4kVhz6ePFJlxdRTN2uwqXjuvKqgevSjC4AX0nSeRMVKLcZasho2hL1sniufGgT3GJdT/4NH/uSM+XQU0B7QFHkoVVYxfVVvLXsqVvzCfanymAbsw34Wh5DIF0yIDde+MHLrB/JKX1NTzTqfRc9hKZ0A37Rs/8ymiVn+cc2Ea1kNWAXRBWQBBJFyL+81elMD3bGs0jBcuuglW8DI8EotR85FC+bM4CelZdCE1CkE8JUWb3qMiC6Lnpag34ff77VPG/QpXFA/gjJU75a/wxGq/wdd+rvPo8LFYQR0hMbV/w2fWDtZDi6p/wekAKvbUhoIJOeTeGoERw25ptdltARN5PP9SmHnJIaca4Xd95JyMuETB0A8TRgMYupq5KE88cDLvX0KiN9np1CMl78TCoeOX5qRJxCkPZNtWGNOCCo0bHN3gqz72ZHQ4j1voZIODXVlfngRYGtjZIIlQu3rCELsunOOU5NYl7S5Gmwowp1Mzpwrx2o0VyC7hB4OMBOtkLSWCUIrmkAhW772JHukWE8Kt1TMnWPOyKVZf3eKycanvTRDOZQZ12utm9shguEY+arXFmk6A8O6abNXvjJMwMiKDRPqLIXjFx9Ji/6AH/+WTNKHqR2SRLmeCKwpOG6/cBuGOb2gxFwPUz4xxxTIi9ix38SIGqGBDTxexIDkbcUkMDmJZeVPlyjRcD5v0qhG2p9k52PglG0kfKKwiXPt/s1OAcTLV9rHu8MslKwP3+sAr3VkNJcR++5wSc08+HEwUYvDSifFYhD8s9pkMwVsu6j01S/CvDU1I5P5TRjK3N9pNjNcPVCtSoUZJb6GCdi4oWHPCI/18rkSfqBPEGaSGMTLT1Dyqb5Z3OIfSSJB1F4ucsw1k0JkqC9p2GG775LosT7QG+/M6Bbml+92zABVuD7xYV9lsknh/K3XbYWOI+/PxS0EO92XUjT7SpnbIFytD7JgTmzLkc1NSYLLCXwE3RvlUrFGqCr+x1yJdWO9aY3McPceW5E9cFuWA/JH+f/upQLntLl5QiTD3F9IKqSGCr7N981K/KQl2KUZasop+w/JRDsQquRbbf6ShpG7R4hU20FrMtzxDuIuZnbsuvgNxor37J5N6Ar2xoGIGDWmvSNzoS7rMxveYSna9WCzTpjAiZRfX73j5H96EP0pe9AxASA2fQFIzE7X3/N7X11IMLczA+gOG6TrNuyEMLSpUJJgbgIki3Di6f1ioXPbkpW0vaDM4VaQ0Tv8CwRGFHdGW2V60iWbPKJQgmXnHK5Dtuh3m56Gb8zwjh8Ga1WhXpZy23VqlSdu+OZDuYdNrUNoNzrBavQqHS8xnFsOf0/oGR09OGippeq4PoJ6tRZk+vkuNsSThntrjxW1Mm6cXJgHE",
        "__VIEWSTATEGENERATOR": "C0FF12A6",
        "__EVENTVALIDATION":
        "d99tH4eWTqHuAmg6Cke3uYUqXKzohb8rAcjAWFo6pGFfCtf+PW7uhVyjcxrnpxYyUNsRApWPhpqtJZSyBalm6IwSq2Z96yWaVyuSOgRhAOVoTrvfppTvT/kMHheylaHVhJC/XT3tlmO/h+HGQ3euC0N+/vSx6wOk6Md5ng==",
        "TextBox1": usernum,
        "TextBox2": userpwd,
        "TextBox3": captcha,
        "RadioButtonList1": "2",
        "Button1": "登录"
    }

    tc_url = "https://vpn.hpu.edu.cn/web/1/http/1/218.196.240.21:80/tycsweb/index.aspx"
    tc_page = s.post(tc_url, payload, verify=False)

    # print(tc_page.text)
    if "运行时错误" in tc_page.text:
        raise BaseException.APIException(
            msg='校外访问关闭,请链接校园网访问http://218.196.240.21查询', error_code=30005)

    if "登录" in tc_page.text:
        raise BaseException.APIException(msg='密码错误', error_code=30006)

    tccj_url = "https://vpn.hpu.edu.cn/web/1/http/2/218.196.240.21/tycsweb/emyscore.aspx"
    tccj_p = s.get(tccj_url)

    tccj_soup = BeautifulSoup(tccj_p.text, "html.parser")
    tar_stunum = re.findall("学号:(.*)?</td>", tccj_p.text)[0].split()[0]
    nameurl = "https://vpn.hpu.edu.cn/web/1/http/2/218.196.240.21/root/fmleft1.aspx"
    tar_name = BeautifulSoup(
        s.get(nameurl, headers=mobile_header).text,
        "html.parser").find(id="Label1").string

    want_table = tccj_soup.find(class_="score-table")
    tds = BeautifulSoup(str(want_table), "html.parser").find_all("td")

    out = {}
    all_data = []
    # print(tds)
    box = []

    for td in tds:
        s = td.string
        box.append(s)
        if "总分" in s:
            all_data.append(box)
            box = []

    out["name"] = tar_name
    out["stunum"] = tar_stunum
    out["data"] = all_data

    # print(json.dumps(out, ensure_ascii=False))
    return jsonify({'msg': 'success', 'data': out})
Beispiel #16
0
def login_zhhq(session, username, password, captcha, lt, execution, token, ip):

    loginUrl = f"{zhhq_base_url}/cas/login#/"

    s = requests.session()
    s.cookies.update(json.loads(session))

    u = username
    p = password

    header = req_header
    header['X-Forwarded-For'] = ip if ip else '47.103.223.186'

    formdata = {
        "username": u,
        "password": p,
        "captcha": captcha,
        "token": token,
        "_eventId": "submit",
        "lt": lt,
        "source": "cas",
        "execution": execution
    }

    dologin = s.post(loginUrl, data=formdata, headers=header)
    # print(dologin.text)
    if "密码错误" in dologin.text:
        raise BaseException.APIException(msg='密码错误', error_code=20001)

    if "login." in dologin.text:
        raise BaseException.APIException(msg='验证码错误', error_code=20002)

    burl = "http://zhhq.hpu.edu.cn/redirect/main/user/statistics"

    Tindex_page = s.get(burl, headers=header)
    redUrl = re.findall("top.location.href='(.*?)'", Tindex_page.text)[0]

    s.get(
        redUrl,
        headers=
        # header
        {
            # "Host": "zhhq.hpu.edu.cn",
            "Connection": "keep-alive",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent":
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36",
            "Accept":
            "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
            "Referer": "http://zhhq.hpu.edu.cn/redirect/main/user/statistics",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9"
        },
        verify=False)

    index_page = s.get(burl, headers=header)

    ip_soup = BeautifulSoup(index_page.text, "html.parser")
    # print(ip_soup)
    avatar = ip_soup.find(attrs={
        "class": "Head_portrait"
    }).findChildren()[0].get("src").strip()
    nickname = ip_soup.find(attrs={"class": "userName"}).string
    academy = ip_soup.find(
        class_="information_text").findChildren()[0].get("title")
    dorm = ip_soup.find(
        class_="information_text").findChildren()[1].get("title")

    d = {}
    d["avatar"] = avatar
    d["nickname"] = nickname
    d["academy"] = academy
    d["dorm"] = dorm

    scookie = s.cookies.get_dict()
    d["session"] = parse.quote(json.dumps(scookie, ensure_ascii=False))

    # print(json.dumps(d, ensure_ascii=False))
    return jsonify(d)
def framework_error(e):
    return BaseException.framework_error(e, app)
Beispiel #18
0
def lib_simple_search(keyword,page, ip):

    header = {
        'Host': 'mfindhpu.libsp.com',
        'Connection': 'keep-alive',
        'Content-Length': '522',
        'Accept': 'application/json, text/plain, */*',
        'mappingPath': '',
        'groupCode': '200090',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36 Edg/88.0.705.50',
        'Content-Type': 'application/json;charset=UTF-8',
        'Origin': 'https://mfindhpu.libsp.com',
        'Sec-Fetch-Site': 'same-origin',
        'Sec-Fetch-Mode': 'cors',
        'Sec-Fetch-Dest': 'empty',
        'Referer': 'https://mfindhpu.libsp.com/',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-CN,zh;q=0.9'
    }
    
    s = requests.session()

    header['X-Forwarded-For'] = ip if ip else '47.103.223.186'

    req_url = 'https://mfindhpu.libsp.com/find/unify/search'

    payload = {
        'docCode': [None], 
        'searchFieldContent': keyword, 
        'searchField': 'keyWord', 
        'matchMode': '2', 
        'resourceType': [], 
        'subject': [], 
        'discode1': [], 
        'publisher': [], 
        'locationId': [], 
        'collectionName': [], 
        'author': [], 
        'langCode': [], 
        'countryCode': [], 
        'publishBegin': None, 
        'publishEnd': None, 
        'coreInclude': [], 
        'ddType': [], 
        'verifyStatus': [], 
        'group': [], 
        'sortField': 'relevance', 
        'sortClause': 'asc', 
        'page': page, 
        'rows': 10, 
        'onlyOnShelf': None, 
        'campusId': [], 
        'curLocationId': [], 
        'eCollectionIds': [], 
        'kindNo': [], 
        'libCode': [], 
        'searchItems': None, 
        'searchFieldList': None
    }

    res = s.post(req_url, headers=header, data=json.dumps(payload), verify=False)

    res = json.loads(res.text)

    if not res['success']:
        raise BaseException.APIException(msg=res['message'], error_code=res['errCode'])

    # print(res.text)
    return jsonify({
        'msg': res['message'],
        'data': res['data']
    })