示例#1
0
def douYinVideo():
    try:
        while True:
            # 抖音视频榜
            url = "https://www.iesdouyin.com/web/api/v2/hotsearch/billboard/aweme/"
            headers = {
                'User-Agent':
                'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3724.8 Safari/537.36'
            }
            conn = connect_mysql.test()
            cur = conn.cursor()
            while True:
                response = requests.get(url=url, headers=headers,
                                        verify=False).text
                result = json.loads(response)
                word_list = []
                for each in result['aweme_list']:
                    word = {
                        'desc':
                        each['aweme_info']['desc'],
                        'desc_extr':
                        each['label'],
                        "url":
                        f"""https://www.baidu.com/s?rtt=1&bsst=1&cl=2&tn=news&word={urllib.parse.quote(each['aweme_info']['desc'].replace('"', ""))}"""
                    }
                    word_list.append(word)
                sql = f"""update erp_hotpoint set HOTPOINT='{json.dumps(word_list, ensure_ascii=False)}', updatetime=now() where `FROM`='视频榜'; """
                cur.execute(sql)
                conn.commit()
                time.sleep(86400)
    except:
        traceback.print_exc()
        cur.close()
        conn.close()
示例#2
0
def baiDu():
    try:
        while True:
            # 实时热点
            rt_url = "http://top.baidu.com/buzz?b=1&c=513&fr=topbuzz_b341_c513"
            # 今日热点
            td_url = "http://top.baidu.com/buzz?b=341&c=513&fr=topbuzz_b1_c513"
            # 七日热点
            sd_url = "http://top.baidu.com/buzz?b=42&c=513&fr=topbuzz_b341_c513"
            conn = connect_mysql.test()
            cur = conn.cursor()
            hotPoint_dict = {'实时热点': rt_url, '今日热点': td_url, '七日热点': sd_url}
            headers = {
                'User-Agent':
                'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3724.8 Safari/537.36'
            }

            while True:
                for each in hotPoint_dict.keys():
                    response = requests.get(url=hotPoint_dict[each],
                                            headers=headers,
                                            verify=False).content.decode('gbk')
                    soup = BeautifulSoup(response, "html.parser")
                    table = soup.find('table', {'class': 'list-table'})
                    tr_list = table.find_all('tr')[1:]
                    card_list = []
                    for tr in tr_list:
                        try:
                            titlt = tr.find('a', {
                                'class': 'list-title'
                            }).text.strip("\n")
                            url = tr.find('a', {'class': 'list-title'})['href']
                        except AttributeError:
                            continue
                        try:
                            value = tr.find('span', {
                                'class': 'icon-fall'
                            }).text.strip("\n")
                        except AttributeError:
                            try:
                                value = tr.find('span', {
                                    'class': 'icon-rise'
                                }).text.strip("\n")
                            except AttributeError:
                                value = tr.find('span', {
                                    'class': 'icon-fair'
                                }).text.strip("\n")
                        card_list.append({
                            'desc': titlt,
                            'desc_extr': value,
                            'url': url
                        })
                    sql = f"""update erp_hotpoint set HOTPOINT='{json.dumps(card_list, ensure_ascii=False)}',updatetime=now() where `FROM`="{each}"; """
                    cur.execute(sql)
                    conn.commit()
                time.sleep(120)
    except:
        traceback.print_exc()
        cur.close()
        conn.close()
def main():
    conn = connect_mysql.test()
    cur = conn.cursor()
    sql = "select DISTINCT USER_KEY from xhs_attention_user where FANS_UP_RAT=1;"

    cur.execute(sql)

    for user_key in [item[0] for item in cur.fetchall()]:
        sql = f"select ID, USER_KEY,FANS,LIKED,COLLECT,NOTES from xhs_attention_user where USER_KEY={user_key} order by ID;"
        cur.execute(sql)
        result = cur.fetchall()
        data = []
        for i in range(len(result)):
            print(result[i][0], result[i][1])
            if i == 0:
                fans_up_num = 0
                liked_up_num = 0
                collect_up_num = 0
                notes_up_num = 0
                fans_up_rat = 0
                liked_up_rat = 0
                collect_up_rat = 0
                notes_up_rat = 0
            else:
                fans_up_num = result[i][2] - result[i - 1][2]
                liked_up_num = result[i][3] - result[i - 1][3]
                collect_up_num = result[i][4] - result[i - 1][4]
                notes_up_num = result[i][5] - result[i - 1][5]
                if result[i - 1][2]:
                    fans_up_rat = fans_up_num / result[i - 1][2] * 1000
                else:
                    fans_up_rat = 0
                if result[i - 1][3]:
                    liked_up_rat = liked_up_num / result[i - 1][3] * 1000
                else:
                    liked_up_rat = 0
                if result[i - 1][4]:
                    collect_up_rat = collect_up_num / result[i - 1][4] * 1000
                else:
                    collect_up_rat = 0
                if result[i - 1][5]:
                    notes_up_rat = notes_up_num / result[i - 1][5] * 1000
                else:
                    notes_up_rat = 0
            data.append([
                fans_up_rat, liked_up_rat, collect_up_rat, notes_up_rat,
                fans_up_num, liked_up_num, collect_up_num, notes_up_num,
                result[i][0]
            ])
        sql = "update xhs_attention_user set FANS_UP_RAT=%s,LIKED_UP_RAT=%s,COLLECT_UP_RAT=%s,NOTES_UP_RAT=%s,FANS_UP_NUM=%s,LIKED_UP_NUM=%s,COLLECT_UP_NUM=%s,NOTES_UP_NUM=%s where ID=%s;"
        conn.ping(True)
        cur.executemany(sql, data)
        conn.commit()
    cur.close()
    conn.close()
    def __init__(self):
        self.conn_T = connect_mysql.test()
        self.cur_T = self.conn_T.cursor()

        self.conn_W = connect_mysql.w_shark_erp()
        self.cur_W = self.conn_W.cursor()
        # 数据库已有的文章id列表
        self.have_list = []
        self.users_num = {}
        self.headers = {
            "Connection":
            "keep-alive",
            "User-Agent":
            "Mozilla/5.0 (Linux; Android 5.1.1; xiaomi mix Build/LMY47I; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/52.0.2743.100 Safari/537.36 AliApp(TB/9.1.0) TTID/600000@taobao_android_9.1.0 WindVane/8.5.0 900X1600 UT4Aplus/0.2.16",
            "Cookie":
            "_m_h5_tk=36b5227cd1a1e340e4d56bcc93555f2f_1587526955005; _m_h5_tk_enc=7385708053b9b4519913b71659d347aa;"
        }
示例#5
0
    def __init__(self):
        self.conn_T = connect_mysql.test()
        self.cur_T = self.conn_T.cursor()
        self.conn_W = connect_mysql.w_shark_erp()
        self.cur_W = self.conn_W.cursor()

        self.CLASS = {
            "新品": "1375",
            "首页": "1203",
            "新鲜": "1518",
            "评测": "1363",
            "园艺": "1379",
            "影视": "1516",
            "游戏": "1370",
            "二次 ": "1359",
            "垂钓": "1362",
            "数码": "1387",
            "优惠": "3626",
            "如何": "1378",
            "居家": "1377",
            "视频": "1340",
            "型男": "1361",
            "汽车": "1341",
            "摄影": "1360",
            "手机": "1513",
            "美妆": "1372",
            "萌宠": "1342",
            "旅行": "1514",
            "精选": "1204",
            "美搭": "1373",
            "运动": "1369",
            "没事": "1358",
            "母婴": "1364",
        }
        self.headers = {
            "Referer":
            "https://market.m.taobao.com/app/mtb/headline/pages/portal?spm=a215s.7406091.home_m_h_v5_toutiao_corner_1.3&utparam=%7B%22ranger_buckets_native%22%3A%22tsp2584_22605%22%7D&scm=1007.home_headline.headline.d&wh_weex=true&wx_navbar_hidden=true&_wx_statusbar_hidden=hidden_light_text&feedListFeeds=true&columnId=1206&pushFeedIds=209933620800,200253499132",
            "Connection":
            "keep-alive",
            "User-Agent":
            "Mozilla/5.0 (Linux; Android 5.1.1; xiaomi mix Build/LMY47I; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/52.0.2743.100 Safari/537.36 AliApp(TB/9.1.0) TTID/600000@taobao_android_9.1.0 WindVane/8.5.0 900X1600 UT4Aplus/0.2.16",
            "Cookie":
            "_m_h5_tk=d2fd278808f43520fbcbdc710af0923c_1589783019427;_m_h5_tk_enc=53dc2d73b37a50c68dbf4bf9acc83c02"
        }
        self.have_list = []
        self.context = ''
示例#6
0
import random
import easygui as g

headers = {
    "Connection":
    "keep-alive",
    "User-Agent":
    "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3724.8 Safari/537.36",
    "Referer":
    "https://v.taobao.com/v/content/video",
    "Cookie":
    "XSRF-TOKEN=9400fddf-8598-4fe7-9fa3-dc158ab982ff; _samesite_flag_=true; cookie2=174889f3da4383bb0b69df16e0d6d56c; t=d7e3737e54a72cd41a95478fff278ba5; _tb_token_=eee95e1d3df63; mt=ci=0_0; _tb_token_=undefined; cna=q04hFywZ7RICARsSAqAKxEjR; lgc=%5Cu70B8%5Cu4E86%5Cu8FD9%5Cu4E2A%5Cu987A%5Cu5B50; dnk=%5Cu70B8%5Cu4E86%5Cu8FD9%5Cu4E2A%5Cu987A%5Cu5B50; tracknick=%5Cu70B8%5Cu4E86%5Cu8FD9%5Cu4E2A%5Cu987A%5Cu5B50; v=0; _m_h5_tk=eb98b46edbf0bb2f43580408f3321695_1587384966546; _m_h5_tk_enc=be8a1a51bed44411dc19010cb31c3aba; sgcookie=EuO9eIaAj2jvmbxslXOrh; unb=2632081012; uc1=cookie16=VFC%2FuZ9az08KUQ56dCrZDlbNdA%3D%3D&cookie15=Vq8l%2BKCLz3%2F65A%3D%3D&existShop=false&pas=0&cookie21=U%2BGCWk%2F7p4mBoUyS4E9C&cookie14=UoTUPcii%2FmjRow%3D%3D; uc3=lg2=VFC%2FuZ9ayeYq2g%3D%3D&vt3=F8dBxGR3%2Fy6ovWHlpkc%3D&id2=UU6ifpqosHikVg%3D%3D&nk2=trmexKP205X9gFOH; csg=64ac8e0b; cookie17=UU6ifpqosHikVg%3D%3D; skt=b9005a62c4da2232; existShop=MTU4NzQzMTMwOA%3D%3D; uc4=id4=0%40U2xvKj8ULbTw1mnLTZg9YIuSQtuT&nk4=0%40tG90if0DnSIUfEG6Q6BlICjDd9hLfEg%3D; _cc_=UIHiLt3xSw%3D%3D; _l_g_=Ug%3D%3D; sg=%E5%AD%9023; _nk_=%5Cu70B8%5Cu4E86%5Cu8FD9%5Cu4E2A%5Cu987A%5Cu5B50; cookie1=BxeMfJ%2BF4RC08j%2BX%2BZGN7ugGUx4l6KH8Y%2F3xcgGclP0%3D; tfstk=cjfABAZxeuqD_C_2L1ek1Rf3XiF1a_r9Vqti6PMFPrtZuRlEtsv5t65MI-TsDQUR.; l=eB_BNnkVqZnX0ihUBO5ZS5TbU7_tNIRb8sPrfdJgmIHca69F_nSbdNQcLdZy8dtjgt5A8eKz8-EPGdeWSWz38xtjopgDAI4kCZv68e1..; isg=BCEhG87N225oGXS_a7TE0Z7VMO07zpXAvGxsuoP2uCjG6kC8yh2PkWdsSB7sIi34",
}
sql = """insert into v_video(USER_ID,NICK,PIC_URL,HOME_URL,FANS,`READS`,CLASS,UPDATE_TIME) values(%s,%s,%s,%s,%s,%s,%s,now());"""
update_sql = """update v_video set FANS=%s,`READS`=%s,UPDATE_TIME=now() where USER_ID=%s"""
conn = connect_mysql.test()
cur = conn.cursor()
select_sql = """select USER_ID from v_video;"""
cur.execute(select_sql)
had_list = []
for each in cur.fetchall():
    had_list.append(each[0])

for i in range(1, 26):
    print("第", i, "页")
    url = f"https://v.taobao.com/micromission/req/selectCreatorV3.do?cateType=602&currentPage={i}"
    while True:
        try:
            response = requests.get(url, headers=headers, verify=False).text
            data = json.loads(response)
            result = data['data']['result']
示例#7
0
def weiBo():
    hotSearch_url = "https://m.weibo.cn/api/container/getIndex?containerid=106003type%3D25%26t%3D3%26disable_hot%3D1%26filter_type%3Drealtimehot&title=%E5%BE%AE%E5%8D%9A%E7%83%AD%E6%90%9C&extparam=pos%3D0_0%26mi_cid%3D100103%26cate%3D10103%26filter_type%3Drealtimehot%26c_type%3D30%26display_time%3D1572329575&luicode=10000011&lfid=231583"
    movie_url = "https://m.weibo.cn/api/container/getIndex?containerid=231648_-_1_-_100_-_%E7%94%B5%E5%BD%B1%E8%AF%9D%E9%A2%98%E6%A6%9C_-_2&luicode=10000011&lfid=231648_-_7_-_2&page_type=08 "
    ssMeizhuag_url = "https://m.weibo.cn/api/container/getIndex?containerid=231648_-_1_-_114_-_%E6%97%B6%E5%B0%9A%E7%BE%8E%E5%A6%86%E8%AF%9D%E9%A2%98%E6%A6%9C_-_2&luicode=10000011&lfid=231648_-_7_-_2&page_type=08"
    travel_url = "https://m.weibo.cn/api/container/getIndex?containerid=231648_-_1_-_93_-_%E6%97%85%E6%B8%B8%E8%AF%9D%E9%A2%98%E6%A6%9C_-_2&luicode=10000011&lfid=231648_-_7_-_2&page_type=08"
    foods_url = "https://m.weibo.cn/api/container/getIndex?containerid=231648_-_1_-_91_-_%E7%BE%8E%E9%A3%9F%E8%AF%9D%E9%A2%98%E6%A6%9C_-_2&luicode=10000011&lfid=231648_-_7_-_2&page_type=08"
    picture_url = "https://m.weibo.cn/api/container/getIndex?containerid=231648_-_1_-_123_-_%E7%BE%8E%E5%9B%BE%E8%AF%9D%E9%A2%98%E6%A6%9C_-_2&luicode=10000011&lfid=231648_-_7_-_2&page_type=08"
    car_url = "https://m.weibo.cn/api/container/getIndex?containerid=231648_-_1_-_117_-_%E6%B1%BD%E8%BD%A6%E8%AF%9D%E9%A2%98%E6%A6%9C_-_2&luicode=10000011&lfid=231648_-_7_-_2&page_type=08"
    cat_url = "https://m.weibo.cn/api/container/getIndex?containerid=231648_-_1_-_128_-_%E8%90%8C%E5%AE%A0%E8%AF%9D%E9%A2%98%E6%A6%9C_-_2&luicode=10000011&lfid=231648_-_7_-_2&page_type=08"
    child_url = "https://m.weibo.cn/api/container/getIndex?containerid=231648_-_1_-_116_-_%E8%82%B2%E5%84%BF%E8%AF%9D%E9%A2%98%E6%A6%9C_-_2&luicode=10000011&lfid=231648_-_7_-_2&page_type=08"
    digital_url = "https://m.weibo.cn/api/container/getIndex?containerid=231648_-_1_-_131_-_%E6%95%B0%E7%A0%81%E8%AF%9D%E9%A2%98%E6%A6%9C_-_2&luicode=10000011&lfid=231648_-_7_-_2&page_type=08"
    home_url = "https://m.weibo.cn/api/container/getIndex?containerid=231648_-_1_-_504_hot_-_%E5%AE%B6%E5%B1%85%E8%AF%9D%E9%A2%98%E6%A6%9C_-_2&luicode=10000011&lfid=231648_-_7_-_2&page_type=08"
    topic_url_list = [
        movie_url, ssMeizhuag_url, travel_url, foods_url, picture_url, car_url,
        cat_url, child_url, digital_url, home_url
    ]
    try:
        while True:
            conn = connect_mysql.test()
            cur = conn.cursor()
            hotSearch_time = 0
            topic_time = 0

            while True:
                # 微博热搜
                # 一分钟更新一次
                topic_dict = {
                    '电影话题榜': '电影话题榜',
                    '时尚美妆话题榜': '时尚美妆话题榜',
                    '旅游话题榜': '旅游话题榜',
                    '美食话题榜': '美食话题榜',
                    '美图话题榜': '美图话题榜',
                    '汽车话题榜': '汽车话题榜',
                    '萌宠话题榜': '萌宠话题榜',
                    '育儿话题榜': '育儿话题榜',
                    '数码话题榜': '数码话题榜',
                    '家居话题榜': '家居话题榜'
                }
                if time.time() - hotSearch_time > 60:
                    hotSearch_time = time.time()
                    headers = {
                        'User-Agent':
                        'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3724.8 Safari/537.36'
                    }
                    hotSearch_response = requests.get(url=hotSearch_url,
                                                      headers=headers,
                                                      verify=False).text
                    try:
                        hotSearch_result = json.loads(hotSearch_response)
                    except:
                        traceback.print_exc()
                        continue
                    card_group = hotSearch_result['data']['cards'][0][
                        'card_group']
                    card_list = []
                    for card in card_group[1:]:
                        card_list.append({
                            'desc': card['desc'],
                            'desc_extr': card['desc_extr'],
                            "url": card["scheme"]
                        })
                    sql = f"""update erp_hotpoint set HOTPOINT='{json.dumps(card_list, ensure_ascii=False)}', updatetime=now() where `FROM`='热搜榜'; """
                    conn.ping(True)
                    cur.execute(sql)
                    conn.commit()
                    card_group = hotSearch_result['data']['cards'][1][
                        'card_group']
                    card_list = []
                    for card in card_group:
                        card_list.append({
                            'desc': card['desc'],
                            'desc_extr': card['desc_extr'],
                            "url": card["scheme"]
                        })
                    sql = f"""update erp_hotpoint set HOTPOINT='{json.dumps(card_list, ensure_ascii=False)}',updatetime=now() where `FROM`='实时上升热点'; """
                    cur.execute(sql)
                    conn.commit()

                # 微博实时上升热点
                # 一天更新一次
                if time.time() - topic_time > 86400:
                    topic_time = time.time()
                    # 获取电影话题排行榜
                    headers['Cache-Control'] = "max-age=0"
                    headers[
                        'Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'
                    headers['Upgrade-Insecure-Requests'] = '1'
                    headers['Connection'] = 'keep-alive'
                    headers[
                        'Cookie'] = "_T_WM=69451734627; MLOGIN=0; WEIBOCN_FROM=1110003030; M_WEIBOCN_PARAMS=luicode%3D10000011%26lfid%3D231648_-_7_-_2%26fid%3D231648_-_1_-_504_hot_-_%25E5%25AE%25B6%25E5%25B1%2585%25E8%25AF%259D%25E9%25A2%2598%25E6%25A6%259C_-_2%26uicode%3D10000011; XSRF-TOKEN=94520d"
                    for url in topic_url_list:
                        topic_class_temp = re.findall('_%[^_]*%9C_', url)[0]
                        topic_class = urllib.parse.unquote(
                            topic_class_temp.strip('_').replace('"', "'"))
                        card_list = weiBo_topic(url, headers)
                        sql = f"""update erp_hotpoint set HOTPOINT='{json.dumps(card_list, ensure_ascii=False)}',updatetime=now() where `FROM`="{topic_dict[topic_class]}"; """
                        cur.execute(sql)
                        conn.commit()
    except:
        traceback.print_exc()
        cur.close()
        conn.close()
示例#8
0
"""
添加好物类别
"""
import CralwerSet.connect_mysql as connect_mysql
import traceback

conn_T = connect_mysql.test()
cur_T = conn_T.cursor()
conn_W = connect_mysql.w_shark_erp()
cur_W = conn_W.cursor()
sql = "select ID, TITLE from yhh_hw where ID>14000;"
cur_T.execute(sql)

try:
    for item in cur_T.fetchall():
        sql = f"""SELECT t6.cat,t5.num FROM (select t4.MAIN_ID MAIN_ID,count(t4.MAIN_ID) num FROM (SELECT  t2.CLASSIFY_ID CLASSIFY_ID FROM (select URL_ID, CONTENT from crawler_commodity_module_description where match(CONTENT) against('{item[1].replace("'","’")}') limit 100) t1, cm_commodity t2 where t1.URL_ID=t2.URL_ID ) t3, class_id t4 where t3.CLASSIFY_ID = t4.ID GROUP BY t4.MAIN_ID) t5, class_id t6 WHERE t6.ID=t5.MAIN_ID ORDER BY t5.num desc LIMIT 1;"""
        cur_W.execute(sql)
        result = cur_W.fetchone()
        if not result:
            type = '类型不明'
        else:
            type = result[0][:-1]
        sql = f"""update yhh_hw set `TYPE`='{type}' where ID={item[0]} limit 1;"""
        cur_T.execute(sql)
        conn_T.commit()
        print(item[0], item[1], type)
except:
    traceback.print_exc()
cur_T.close()
conn_T.close()
cur_W.close()
示例#9
0
def get_DIY_essays():
    login()
    page = 0
    conn = connect_mysql.test()
    cur = conn.cursor()
    sql = """insert into erp_original_essay(ESSAY_ID,TEXT) values (%s,%s);"""
    while True:
        page += 1
        url = f"https://drbl.daorc.com/data_queryDataListToJSON.action?fileTypeId=99&tableFlag=WJ&pageInfo=STATUS@@1@@2@_@AUTOSTATUS@@1,2,3,4,5,6,7,8,9,10@@7@_@STATUS@@1@@2@_@applications@@1524367772261@@2@_@&limit=100&page={page}"
        response = requests.get(url, headers=HEADER).text
        result = json.loads(response)
        for each in result['rows']:
            id = each['ID']
            channelid_1 = each['CHANNELID_1']

            if channelid_1 in ['7714', '1661']:  # 单品
                essay_url = f"https://drbl.daorc.com/tbimagetxtcard_messageCheckView.action?id={id}&fileTypeId=7714&temp={time.time()}"
                response = requests.get(essay_url, headers=HEADER).text
                f_text_list = re.findall(
                    """<div style='margin-top: 15px'>[^(</div>)]*</div>""",
                    response)
                text_list = []
                for f_text in f_text_list:
                    text = f_text.split('>', 1)[1][:-6]
                    text_list.append((id, text.replace('"',
                                                       "'").replace("\n", "")))
                    print(datetime.datetime.now(), text)

            elif channelid_1 == "1507":  # 图文帖子
                essay_url = f"https://drbl.daorc.com/data_updateData.action?tableFlag=WJ&id={id}&fileTypeId={channelid_1}&temp={time.time()}"
                response = requests.get(essay_url, headers=HEADER).text
                f_text_list = re.findall("""&lt;p&gt;[^&]*&lt;/p&gt;""",
                                         response)
                text_list = []
                for f_text in f_text_list:
                    text = f_text.strip("&lt;p&gt;").strip("&lt;/p&gt;")
                    text_list.append((id, text.replace('"',
                                                       "'").replace("\n", "")))
                    print(datetime.datetime.now(), text)

            elif channelid_1 in ["1656", "1649", "102"]:  # 搭配
                essay_url = f"https://drbl.daorc.com/data_updateData.action?id={id}&tableFlag=WJ&fileTypeId={channelid_1}&temp={time.time()}"
                response = requests.get(essay_url, headers=HEADER).text
                f_text_list = re.findall(
                    """name='RECOREASON' inputType='1' >[^<>]*<""", response)
                text_list = []
                for f_text in f_text_list:
                    text = f_text.strip(
                        "name='RECOREASON' inputType='1' >").strip("<")
                    text_list.append((id, text.replace('"',
                                                       "'").replace("\n", "")))
                    print(datetime.datetime.now(), text)
            elif channelid_1 in ["1659"]:  # 清单
                essay_url = f"https://drbl.daorc.com/data_updateData.action?id={id}&tableFlag=WJ&fileTypeId={channelid_1}&temp={time.time()}"
                response = requests.get(essay_url, headers=HEADER).text
                f_text_list = re.findall(
                    """name='REMARK' inputType='1' >[^<>]*<""", response)
                text_list = []
                for f_text in f_text_list:
                    text = f_text.strip("name='REMARK' inputType='1' >").strip(
                        "<")
                    text_list.append((id, text.replace('"',
                                                       "'").replace("\n", "")))
                    print(datetime.datetime.now(), text)
            else:
                text_list = []
                continue
            cur.executemany(sql, text_list)
            conn.commit()
        if len(result['rows']) < 100:
            break