def __init__(self): self.starturl = 'http://www.80guakao.com/shengfen/hb/zhaopinxinxi/' self.headers = { 'User-Agent': 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 2.0.50727; SLCC2; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; Tablet PC 2.0; .NET4.0E)' } self.f = FETCH() self.m = MongoDB('mongodb://localhost', 'cuiworkdb', "80guakao_hb") self.r0 = Redisclient(0) self.r1 = Redisclient(1) self.r2 = Redisclient(2) self.r3 = Redisclient(3) self.category_name_list = [] self.sec_category_dict = {} self.headers_forpage = { "Host": "www.80guakao.com", "Connection": "keep-alive", "Pragma": "no-cache", "Cache-Control": "no-cache", "User-Agent": "Mozilla/5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 86.0.4240.111Safari / 537.36", "Accept": "*/*", "Referer": "http://www.80guakao.com/shengfen/hb/", "Accept-Encoding": "gzip,deflate", "Accept-Language": "zh-CN,zh;q=0.9", "Cookie": "", }
class ShunqiSpider: def __init__(self): self.start_url = 'https://b2b.11467.com/' self.headers = b"""Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9 Accept-Language: zh-CN,zh;q=0.9 Cache-Control: no-cache Connection: keep-alive Cookie: Hm_lvt_819e30d55b0d1cf6f2c4563aa3c36208=1616553403,1617870200; Hm_lpvt_819e30d55b0d1cf6f2c4563aa3c36208=1617870504; arp_scroll_position=400 Host: b2b.11467.com Pragma: no-cache Referer: https://www.11467.com/ sec-ch-ua: "Google Chrome";v="89", "Chromium";v="89", ";Not A Brand";v="99" sec-ch-ua-mobile: ?0 Sec-Fetch-Dest: document Sec-Fetch-Mode: navigate Sec-Fetch-Site: same-site Sec-Fetch-User: ?1 Upgrade-Insecure-Requests: 1 User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36""" self.f = FETCH() self.m = MongoDB('mongodb://localhost', 'cuiworkdb', "shunqiwang") self.r2 = Redisclient(2) self.r3 = Redisclient(3) self.area_name_list = [] def Get_res(self, url, headers): #返回可以xpath的对象的get # html = requests.get(url=url, headers=headers_raw_to_dict(headers)) html = self.f.fetch(url=url, headers=headers_raw_to_dict(headers)) res = etree.HTML(html.text) return res def get_area(self): res = self.Get_res(url=self.start_url, headers=self.headers) area_list = res.xpath( '//div[@class="box sidesubcat t5"]//div[@class="boxtitle"]//following-sibling::div[@class="boxcontent"]//dl[@class="listtxt"]//dd/a/@href' ) area_name_list = res.xpath( '//div[@class="box sidesubcat t5"]//div[@class="boxtitle"]//following-sibling::div[@class="boxcontent"]//dl[@class="listtxt"]//dd/a/text()' ) #"//www.11467.com/shenzhen/" #https://www.11467.com/shenzhen/ for i in range(len(area_list)): real_url = "https:" + area_list[i] area_name = area_name_list[i] self.r2.save_category_url(area_name, real_url) self.area_name_list.append(area_name) def get_sec_category(self): for i in self.area_name_list: url = self.r2.get_category_url(i) res = self.Get_res(url=url, headers=self.headers) sec_url_list = res.xpath( '//div[@id="il"]//div[@class="box huangyecity t5"]//div[@class="boxcontent"]//ul//li//dl//dt//a/@href' ) for url in sec_url_list: self.r2.save_page_url(i, url)
def __init__(self): self.start_url = 'http://www.98pz.com/t59c11s1/1.html' self.headers = { 'User-Agent': 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 2.0.50727; SLCC2; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; Tablet PC 2.0; .NET4.0E)' } self.r0 = Redisclient(0) self.r1 = Redisclient(1) self.f = FETCH() self.m = MongoDB('mongodb://localhost', 'cuiworkdb', "98guakao_hz_qz")
def __init__(self): self.starturl = 'http://hangzhou.qd8.com.cn/' self.headers = { 'User-Agent': 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 2.0.50727; SLCC2; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; Tablet PC 2.0; .NET4.0E)'} self.s = FETCH() self.m = MongoDB('mongodb://localhost', 'cuiworkdb', "78guakao") self.r0 = Redisclient(0) self.r1 = Redisclient(1) self.r2 = Redisclient(2) self.r3 = Redisclient(3) self.item_dict = {} self.db = MongoDB('mongodb://localhost', 'cuiworkdb', 'kd8')
def __init__(self): self.start_url = 'https://b2b.11467.com/' self.headers = b"""Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9 Accept-Language: zh-CN,zh;q=0.9 Cache-Control: no-cache Connection: keep-alive Cookie: Hm_lvt_819e30d55b0d1cf6f2c4563aa3c36208=1616553403,1617870200; Hm_lpvt_819e30d55b0d1cf6f2c4563aa3c36208=1617870504; arp_scroll_position=400 Host: b2b.11467.com Pragma: no-cache Referer: https://www.11467.com/ sec-ch-ua: "Google Chrome";v="89", "Chromium";v="89", ";Not A Brand";v="99" sec-ch-ua-mobile: ?0 Sec-Fetch-Dest: document Sec-Fetch-Mode: navigate Sec-Fetch-Site: same-site Sec-Fetch-User: ?1 Upgrade-Insecure-Requests: 1 User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36""" self.f = FETCH() self.m = MongoDB('mongodb://localhost', 'cuiworkdb', "shunqiwang") self.r2 = Redisclient(2) self.r3 = Redisclient(3) self.area_name_list = []
def __init__(self): self.start_url = 'http://www.9gk.cc/zp/sichuan/' self.headers = { 'User-Agent': 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 2.0.50727; SLCC2; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; Tablet PC 2.0; .NET4.0E)' } self.headers_fordata = { # ":authority":"www.cbi360.net", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", "Accept-Encoding": "gzip,deflate,br", "Accept-Language": "zh-CN,zh;q=0.9", "Cache-Control": "no-cache", "Content-Type": "application/x-www-form-urlencoded;charset=UTF-8", "Cookie": "Hm_lvt_ccf8b732d64d55d0d8a73ec2bcd276ab=1612144130,1612399856,1612752316,1613704044; Hm_lpvt_ccf8b732d64d55d0d8a73ec2bcd276ab=1613704100", "Connection": "keep-alive", "Host": "www.9gk.cc", "pragma": "no-cache", "Referer": "http://www.9gk.cc/zp/p1700", "Upgrade-Insecure-Requests": "1", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36", } self.r0 = Redisclient(0) self.r1 = Redisclient(1) self.r2 = Redisclient(2) self.f = FETCH() self.m = MongoDB('mongodb://localhost', 'cuiworkdb', "9guakao_chengdu")
# # text_json_que={ # # "O56fzBVE":"5zVw9fXZ4Req4c3hc19IRHrtiKj1OGRe1rF_SXQZxf3GkE1lrN2sslJr6FAg7ndd51EUCiJ_S8EIkR4aIHz9UlSsjjFnD8w_i8lPmGXOxtBURPKmtu2QvRgEdDjrf0yLub4z90UzG3KJmObB5LgWXB6.VOsAzBi.KfVYS2Edf5LdoO3wID_VqAtObwp2pklg7zQp_OoFsu0LMIEQfdAIMzICr.AgXspltnf7FhCrvgxl_pFnjoKEpzZ2PyZ.FAsC9JUZrFJ1nPCoIQzqDdLSkPCKBKzZhcJgBDlW4Ma7.uyHh0IFq1oMbql1QFCXz6DSOEM378TaczqHAfN623H3Ifb0NGqaJIy9IXFWCCu8u7Y_lUHJ8F93QXkgtOWnk9Mfv" # # } # # # # # # res = requests.post(url=text_url,headers=text_headers,data=pic_data,json=text_json_que) # # print(res.text) ####2021/1/8 爬取对应数据图片url import requests from time import sleep import json from Func.client import MongoDB from Func.fetchJX import FETCH s = FETCH() #改数据库 db = MongoDB('mongodb://localhost', 'cuiworkdb', "Shangbiao_GG-1731") #改url url = "http://wsgg.sbj.cnipa.gov.cn:9080/tmann/annInfoView/imageView.html?O56fzBVE=5cRVmWcgP6gC.1ulRszzI2_aOlP1jkNH0mxnPtgkE73P.1rSAtlAU1rHW64aHQoXm471Fzq7QOzRfVJiLnarbCbBAjmRHPnmNTUqx.Bfa6RWoAiipN6HKjl5E3Nb6Jp_LaGu5Dr0x1V4f2AsDjRza2LmDcNsd62msQ6SzqM646fK0XNFf.KzqSrexNQiIbLTdcX2wDPfCad.6G6Y4Pq28hw_OMDoIYVwZSvwH.emWD5UAVTbKi.mblyWCBYJOMZx5OMbUMWr05.V6JtgmG.usyr3_8OtVx8yHqisK54faJIdqZ5ofaDE4r6mjkZiGtqZZ96H_kqpPDS1WOjZMSlQGqQal8YnoPPDasrJ5lPkWyphiagHypaYQfoBfWUc3idLO" #改cookie pic_headers = { "Accept": "application/json, text/javascript, */*; q=0.01", "Accept-Encoding": "gzip,deflate", "Accept-Language": "zh-CN,zh;q=0.9", "Cache-Control": "no-cache", "Connection": "keep-alive", "Content-Length": "52", "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8", "Cookie":
class Gkspider: def __init__(self): self.starturl = 'http://www.80guakao.com/shengfen/hb/zhaopinxinxi/' self.headers = { 'User-Agent': 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 2.0.50727; SLCC2; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; Tablet PC 2.0; .NET4.0E)' } self.f = FETCH() self.m = MongoDB('mongodb://localhost', 'cuiworkdb', "80guakao_hb") self.r0 = Redisclient(0) self.r1 = Redisclient(1) self.r2 = Redisclient(2) self.r3 = Redisclient(3) self.category_name_list = [] self.sec_category_dict = {} self.headers_forpage = { "Host": "www.80guakao.com", "Connection": "keep-alive", "Pragma": "no-cache", "Cache-Control": "no-cache", "User-Agent": "Mozilla/5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 86.0.4240.111Safari / 537.36", "Accept": "*/*", "Referer": "http://www.80guakao.com/shengfen/hb/", "Accept-Encoding": "gzip,deflate", "Accept-Language": "zh-CN,zh;q=0.9", "Cookie": "", } def get_category(self): html = self.f.fetch(url=self.starturl, headers=self.headers, method='get') # html = requests.get(url=self.starturl, headers=self.headers) sleep(random.randint(0, 1)) res = etree.HTML(html.text) # print(html.text) # category_url_list = res.xpath('//div[@class="content"]//div//a') # # if len(category_url_list) > 19: # category_url_list = res.xpath('//div[@class="inner"][1]//ul[1]//a') category_url_list = res.xpath( '//div[@class="categories"]//ul//li[1]//dd[1]//a') for i in category_url_list: category_name = i.xpath('./text()')[0] category_url = i.xpath('./@href')[0] category_url = category_url.replace('m.', 'www.') if category_name != "不限": self.r0.save_category_url(category_name, category_url) self.category_name_list.append(category_name) def get_sec_category(self): for category_name in self.category_name_list: url = self.r0.get_category_url(category_name) # html = self.f.fetch(url=url,headers=self.headers,method='get') html = requests.get(url=url, headers=self.headers_forpage) sleep(random.randint(0, 1)) res = etree.HTML(html.text) sec_category_list = res.xpath('//div[@class="content"]//div//a') # sec_category_list = res.xpath('//div[@class="inner"][1]//ul//a') for i in sec_category_list: sec_category_name = i.xpath('./text()')[0] sec_category_url = i.xpath('./@href')[0] sec_category_url = sec_category_url.replace('m.', 'www.') if sec_category_name != '不限': print(sec_category_name) self.r1.save_one_dict(category_name, sec_category_name, sec_category_url) def get_all_page(self): for category in self.category_name_list: sec_category_list = self.r1.get_keys(category) for sec_category_name, url in sec_category_list.items(): # html = self.f.fetch(url=url.decode(),headers=self.headers_forpage,method='get') html = requests.get(url=url.decode(), headers=self.headers_forpage) sleep(random.randint(0, 1)) res = etree.HTML(html.text) self.r2.save_page_url( category + ":" + sec_category_name.decode(), url.decode()) while True: try: next_page = res.xpath( '//div[@class="pagination2"]//a[contains(text(),"下一页")]/@href' )[0] except: break if not next_page: break self.r2.save_page_url( category + ":" + sec_category_name.decode(), next_page) html_next = self.f.fetch(url=next_page, headers=self.headers_forpage, method='get') # html_next = requests.get(url=next_page, headers=self.headers_forpage) sleep(random.randint(0, 1)) res = etree.HTML(html_next.text) def get_item_url(self): for category in self.category_name_list: sec_category_list = self.r1.get_keys(category) for sec_category_name in sec_category_list: while True: try: url = self.r2.get_page_url(category + ":" + sec_category_name.decode()) # html = self.f.fetch(url=url, headers=self.headers,method='get') html = requests.get(url=url, headers=self.headers_forpage) sleep(random.randint(1, 2)) res = etree.HTML(html.text) except Exception as e: print('error:', e) break # item_list = res.xpath('//li[@class="Tz"]//child::*/a/@href') item_list = res.xpath( '/html/body/div[7]/div[5]/div/div[6]/div[4]/div[3]/ul/div/span[1]/a/@href' ) for item_url in item_list: # if 'tel' not in item_url: # url = item_url.replace('m.', 'www.') #每个数据url if 'http' not in item_url: item_url = 'http://www.80guakao.com/' + item_url self.r3.save_item_url( category + ':' + sec_category_name.decode(), item_url) def get_info(self): # print(res.xpath('//ul[@class="attr_info"]//li//span[@class="attrVal"]/text()')[0]) #公司名 # print(res.xpath('//ul[@class="attr_info bottom"]//li//span[@class="attrVal"]//a/text()')[0]) #电话 # print(res.xpath('//ul[@class="attr_info bottom"]//li//span[@class="attrVal"]/text()')[0]) # 姓名 for category in self.category_name_list: sec_category_list = self.r1.get_keys(category) for sec_category_name in sec_category_list: while True: try: url = self.r3.get_item_url(category + ":" + sec_category_name.decode()) html = requests.get(url=url.decode(), headers=self.headers_forpage) sleep(random.randint(0, 1)) if html.status_code != 200: html = self.f.fetch(url=url.decode(), headers=self.headers_forpage, method='get') sleep(random.randint(0, 1)) res = etree.HTML(html.text) except: break item = {} # try: # company_name = res.xpath('//ul[@class="attr_info"]//li//span[@class="attrVal"][1]/text()')[0] # except: try: company_name = res.xpath( '//div[@class="zhaopiner"]//li//span[contains(text(),"公司名称")]/parent::li/text()' )[0] except: company_name = 'None' # try: # contact_people = res.xpath('//ul[@class="attr_info bottom"]//li[2]//span[@class="attrVal"]/text()')[0] # contact_people = contact_people.replace(r'\xa0\xa0','') # # except: contact_people = res.xpath( '//ul[@class="contacter"]//li//font/text()')[0] # try: # perf_request = res.xpath('//div[@class="zhaopiner"]//li//span[contains(text(),"专业要求")]/parent::li/text()')[0] # except: # # perf_request = res.xpath('//ul[@class="attr_info"]//li//span[@class="attrVal"][11]//text()')[0] # # try: # phone = res.xpath('//ul[@class="attr_info"]//li//span[@class="attrVal"][11]//a/text()')[0] # if phone == []: # raise Exception # except: # try: phone_url_re = res.xpath( '//ul[@class="contacter"]//li[@class="qqbm"]/a/@onclick' )[0] par = re.compile("'.*?'") phone_url = re.findall(par, phone_url_re)[1].replace( "'", "") # 电话号码url if type(phone_url) == str: html = requests.get(url=phone_url, headers=self.headers_forpage) else: html = requests.get(url=phone_url.decode(), headers=self.headers_forpage) sleep(random.randint(0, 1)) res = etree.HTML(html.text) phone = res.xpath( '//div[@class="number"]//span[@class="num"]/text()')[0] # except: # phone = "None" item['companyCity'] = '宜昌' item['companyProvince'] = '湖北省' item['code'] = 'BUS_YT_ZZ' item['name'] = '资质' item['busCode'] = '' item['webUrl'] = '无' item['orgId'] = '' # 部门ID 字符串 item['deptId'] = '' # 中心ID 字符串 item['centreId'] = '' # item["first_category"] = category # item["sec_category"] = sec_category_name.decode() item["companyName"] = company_name item["outName"] = contact_people item[ "resourceRemark"] = category + ":" + sec_category_name.decode( ) item["companyTel"] = phone.strip() if len(contact_people) == 11: item["companyTel"] = contact_people item["ibossNum"] = None item['isDir'] = 0 item['isShare'] = 0 item["_id"] = md5encryption(item["companyTel"]) print(item) self.m.mongo_add(item) def test(self): url = 'http://www.80guakao.com/shengfen/sc/gonglugongcheng/23988.html' html = requests.get(url=url, headers=self.headers_forpage) print(html.text) res = etree.HTML(html.text) # print(res.xpath('//div[@class="pagination2"]//a[contains(text(),"下一页")]/@href')) # print(res.xpath('//div[@class="content"]//div//a/text()')) # print(html.text) # print(res.xpath('/html/body/div[7]/div[5]/div/div[6]/div[4]/div[3]/ul/div/span/a/@href')) # print(res.xpath('//div[@class="zhaopiner"]//li//span[contains(text(),"公司名称")]/parent::li/text()')[0]) #公司名称 # print(res.xpath('//div[@class="zhaopiner"]//li//span[contains(text(),"专业要求")]/parent::li/text()')) #专业要求 # print(res.xpath('//ul[@class="contacter"]//li//font/text()')[0]) #联系人 phone_url_re = res.xpath( '//ul[@class="contacter"]//li[@class="qqbm"]/a/@onclick')[0] #电话号码 print(phone_url_re) par = re.compile("'.*?'") phone_url = re.findall(par, phone_url_re)[1].replace("'", "") #电话号码url html = requests.get(url=phone_url, headers=self.headers_forpage) res = etree.HTML(html.text) phone = res.xpath( '//div[@class="number"]//span[@class="num"]/text()')[0] print(phone) #Request URL: http://www.80guakao.com/box.php?part=seecontact_tel&id=54336&tel_base64=MTk5NTA0NTk5Mjc= # print(res.xpath('/html/body/div[7]/div[5]/div/div[6]/div[4]/div[3]/ul/div/span[1]/a/@href')) def run(self): self.get_category() self.get_sec_category() self.get_all_page() self.get_item_url() self.get_info()
#2021/1/11 下载图片,识别图片,存入mongo import requests from Func.client import MongoDB from Func.fetchJX import FETCH from PIL import Image as image_P import pytesseract import cv2 import openpyxl import os import sys db = MongoDB('mongodb://localhost', 'cuiworkdb', "Shangbiao_GG-1731") s = FETCH() # 图片识别并写入excel模块 # 将“下载”文件夹中的图片按照数字顺序获取 # 将获取图片切割成小块图片进行图文转换 # 转换出的文字再进行辨识度提高处理 # 将图片数据转换成文字数据导入Mongo ###向excel中写入撤销和答辩种类 # 撤销复审决定书 rec_list1 = [ '发文菩暂菖= 撤锏复市决定书', '发文茎鲤= 撤销复市决定书', '发文莒鲤= 撒淌氯轲决定书', ] # 关于撤销连续三年未使用商标的决定 rec_list2 = [
#2021/1/11 爬取商标文字信息,获取图片页码 import requests import json from TOOLS.mongosave import MongoDB from TOOLS.md5encode import md5encryption #改数据库 db = MongoDB('mongodb://localhost', 'cuiworkdb', "Shangbiao_GG-1731") from Func.fetchJX import FETCH s = FETCH() #改url url = 'http://wsgg.sbj.cnipa.gov.cn:9080/tmann/annInfoView/annSearchDG.html?O56fzBVE=5kGuYaPrUkHH9Lq8YrTHefgypngRP23L4qgXUWGQn1TKi8Yd5igEworl0xfbe_QQgAs_cOt3plSW7uuYJrq7L1WIUpjbSV_Y8jwT7pDt3qBvN3dxHlaivZlvTyYxD3JctgtaJru5MJhZZxGydeS.3ZoIfni9CZxyKko2tQuVGHLbUbVBWout9qOnP1i6mGCnxEGiUea_nSP_3xljf3U6zkgZ.c5DKXAuQiGzZjKcCLOKPsuFP3CgjXwhbt5ESmD3jfvCNBjc.Mtyy4_D_bfDngudJ.DvhsEJGWicOi6eI9.5BhuIoL5WfOPkkmcebfPQXvuh0SBxzitoPdczDRmEvxbzY2c5irpolrybljU4ZbUVkv0X8Dz5Kv38UUvuDfGXI' #改Cookie headers = { "Accept": "application/json, text/javascript, */*; q=0.01", "Accept-Encoding": "gzip, deflate", "Accept-Language": "zh-CN,zh;q=0.9", "Cache-Control": "no-cache", "Connection": "keep-alive", "Content-Length": "283", "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8", "Cookie": "_gscu_1645064636=76464150vsisqf48; _gscu_2023327167=76464150m8szyi25; _trs_uv=k9wi5ba1_4030_8pj2; Hm_lvt_d7682ab43891c68a00de46e9ce5b76aa=1597140998; UM_distinctid=174ae765669480-09ef6ad0f222cb-4353761-1fa400-174ae76566aa07; goN9uW4i0iKzS=5db92.A0J2CMY23basgx2TZ.mTIJ7lkLr89FeTJ1C0aRMHE_2AokqW2_4RJ42AQplsUcWhHGBKqZ3JYJcp..cRA; __jsluid_h=b6457e19fe1b05edea1f19ada75c9f46; tmas_cookie=2272.7688.15400.0000; 018f9ebcc3834ce269=09b16dacaa2e3b985f2267dc76e5ae8f; JSESSIONID=0000mA5W99E1uXfd1qh0wgqzyqA:1bm112s99; goN9uW4i0iKzT=53cCT8DqzzR9qqqm67L0OCGfXkxa3Eg9kcZgg2BzmN4mJeGvNh.af42XRAU.5pBn6JEBVQW9X7_5Q0c0BLcubFHR3V2NtqqslXLY0Rg.3qvRoSOo.eXYEunrAawqXfJ4OYHTCLen_Z85LNWTB77aJOXfqtOqhlOUMzVD_5wlioEYc22WaLxHAvTwqbtDutolgF8kpTIldeoQJwo89qgNpe0ZOzZwHaaYC3qh7.7bucy3WpAnMVKFV_K_LPWPdL195mAzPq8uiBWY5CRMjmCfU88wyS.H5RFGSvrFx87nTLofgdhXnNMBq1vgUkTx5FYpDxvN5jaQg8eqCoedhokTjYW", "Host": "wsgg.sbj.cnipa.gov.cn:9080", "Origin": "http://wsgg.sbj.cnipa.gov.cn:9080", "Pragma": "no-cache", "Referer": "http://wsgg.sbj.cnipa.gov.cn:9080/tmann/annInfoView/annSearch.html", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36", "X-Requested-With": "XMLHttpRequest",