class ZdbPedaily_tzsj: def __init__(self): self.urls = ["https://zdb.pedaily.cn/inv/p{}/".format(i) for i in range(1, 770)] self.util = Util() self.headers = { "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3", "Accept-Encoding": "gzip, deflate, br", "Accept-Language": "zh-CN,zh;q=0.9", "Connection": "keep-alive", "Cookie": "__uid=1452122016; __fromtype=0; ARRAffinity=197ae5372184c64aeca47f780a2e053f3a50366e2bda392cd4bfa3b38e39a929; Hm_lvt_25919c38fb62b67cfb40d17ce3348508=1564455299,1564997145,1565057017,1565061687; BAIDU_SSP_lcr=https://www.baidu.com/link?url=mXXXmWT7-LUN6gg9o-kkJIw_k0SkPj9aL3XGvS6wRVmJjG_3dfydZul0mdFS1rSa&wd=&eqid=cf1c52fe000195ab000000065d48f231; __utma=23980325.1444638820.1563415171.1565057028.1565061688.26; __utmc=23980325; __utmz=23980325.1565061688.26.11.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utmt=1; Hm_lpvt_25919c38fb62b67cfb40d17ce3348508={}; __utmb=23980325.5.10.1565061688", "Host": "zdb.pedaily.cn", "Upgrade-Insecure-Requests": "1", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36" } def get_shareholder(self, id_code, detail_html): shareholder_info = detail_html.xpath("//table[@class=\"shareholder-info\"]/tbody/tr") if shareholder_info: for si in shareholder_info: shareholder_name = si.xpath("./td[1]/text()")[0] shareholder_type = si.xpath("./td[2]/text()")[0] if si.xpath("./td[3]/text()"): shareholder_money = si.xpath("./td[3]/text()")[0] else: shareholder_money = "" crawl_time = self.util.get_now_time() sql_sharholder = "insert into INV_EVT_SHH_INF(ID,SHH_INF,SHH_TYP,SSCR_CTRB_AMT,INPT_DT) " \ "values('%s', '%s', '%s', '%s','%s')" % ( id_code, shareholder_name, shareholder_type, shareholder_money, crawl_time) self.util.insert2mysql("股东信息", sql_sharholder) def get_main_people(self, id_code, detail_html): main_people = detail_html.xpath("//div[@class=\"business-people\"]/ul/li") if main_people: for p in main_people: mp_name = p.xpath("./h3/text()")[0] mp_position = p.xpath("./p/text()")[0] crawl_time = self.util.get_now_time() sql_main_people = "insert into INV_EVT_MAIN_PSN_INF(ID, MAIN_PPL_NM, MAIN_PPL_POS, INPT_DT) values('%s', '%s', '%s','%s')" % ( id_code, mp_name, mp_position, crawl_time) self.util.insert2mysql("主要人物", sql_main_people) def get_detail_info(self, detail_url): self.headers["Cookie"] = self.headers["Cookie"].format(self.util.get_stamp()) detail_res = self.util.get_req(url=detail_url, headers=self.headers) print(detail_res.status_code) if detail_res.status_code == 200: detail_html = self.util.get_xpath_obj(detail_res) # 详情页信息获取 company_name = detail_html.xpath("//h1/text()")[0] company_base = detail_html.xpath("//div[@class=\"box-fix-l\"]/div/ul/li[1]/text()")[0] company_reg_loc = detail_html.xpath("//div[@class=\"box-fix-l\"]/div/ul/li[2]/text()")[0] company_bound_date = detail_html.xpath("//div[@class=\"box-fix-l\"]/div/ul/li[3]/text()")[0] company_industry = detail_html.xpath("//div[@class=\"box-fix-l\"]/div/ul/li[4]/text()")[0] if detail_html.xpath("//div[@class=\"box-fix-l\"]/div/ul/li[@class=\"link\"]/a/text()"): company_site = detail_html.xpath("//div[@class=\"box-fix-l\"]/div/ul/li[@class=\"link\"]/a/text()")[0] else: company_site = "" if detail_html.xpath('//div[@class="box-fix-l"]/p/text()'): company_intro = detail_html.xpath('//div[@class="box-fix-l"]/p/text()')[0] elif detail_html.xpath('//div[@class="box-fix-l"]/p/span/text()'): company_intro = detail_html.xpath('//div[@class="box-fix-l"]/p/span/text()')[0] elif detail_html.xpath('//div[@class="box-fix-l"]/pre/text()'): company_intro = detail_html.xpath('//div[@class="box-fix-l"]/pre/text()')[0] elif detail_html.xpath('//div[@class="box-fix-l"]/div/div/text()'): company_intro = detail_html.xpath('//div[@class="box-fix-l"]/div/div/text()')[0] elif detail_html.xpath('//div[@class="box-fix-l"]/div/text()'): company_intro = detail_html.xpath('//div[@class="box-fix-l"]/div/text()')[0] elif detail_html.xpath('//div[@id="cke_pastebin"]//text()'): company_intro = detail_html.xpath('//div[@id="cke_pastebin"]//text()')[0] elif detail_html.xpath('//div[@class="box-fix-l"]/ul/text()'): company_intro = detail_html.xpath('//div[@class="box-fix-l"]/ul/text()')[0] else: company_intro = "" if detail_html.xpath("//div[@id=\"business\"]"): legal_person = detail_html.xpath("//table[@class=\"base-info\"]/tr[1]/td[2]/text()")[0] founded_time = detail_html.xpath("//table[@class=\"base-info\"]/tr[1]/td[4]/text()")[0] registered_capital = detail_html.xpath("//table[@class=\"base-info\"]/tr[2]/td[2]/text()")[0] operational_authority = detail_html.xpath("//table[@class=\"base-info\"]/tr[2]/td[4]/text()")[0] registered_num = detail_html.xpath("//table[@class=\"base-info\"]/tr[3]/td[2]/text()")[0] approval_date = detail_html.xpath("//table[@class=\"base-info\"]/tr[3]/td[4]/text()")[0] organizational_code = detail_html.xpath("//table[@class=\"base-info\"]/tr[4]/td[2]/text()")[0] creditfcode = detail_html.xpath("//table[@class=\"base-info\"]/tr[4]/td[4]/text()")[0] identification_number = detail_html.xpath("//table[@class=\"base-info\"]/tr[5]/td[2]/text()")[0] registration_authority = detail_html.xpath("//table[@class=\"base-info\"]/tr[5]/td[4]/text()")[0] enterprise_type = detail_html.xpath("//table[@class=\"base-info\"]/tr[6]/td[2]/text()")[0] else: legal_person = "" founded_time = "" registered_capital = "" operational_authority = "" registered_num = "" approval_date = "" organizational_code = "" creditfcode = "" identification_number = "" registration_authority = "" enterprise_type = "" id_code = self.util.MD5(company_name + creditfcode) # 融资事件 信息处理 for rz_html in detail_html.xpath("//div[@class=\"list-invest\"]/ul/li"): if rz_html.xpath("./div[@class=\"view\"]/a/@href")[0].startswith("http"): rz_url = rz_html.xpath("./div[@class=\"view\"]/a/@href")[0] # 融资事件新开页 else: rz_url = "https://zdb.pedaily.cn" + rz_html.xpath("./div[@class=\"view\"]/a/@href")[0] # 融资事件新开页 print(rz_url) rz_res = self.util.get_req(url=rz_url, headers=self.headers) if rz_res.status_code == 200: rz_html = self.util.get_xpath_obj(rz_res.text) # 投资事件 信息获取 rz_title = rz_html.xpath("//h1/text()")[0] rz_info = "".join(rz_html.xpath("//div[@class=\"info\"]/ul/li//text()")) rz_intro = rz_html.xpath("//div[@id=\"desc\"]/p/text()")[0] crawl_time = self.util.get_now_time() sql_rzsj = """insert into INV_EVT_INF(ID,CMP_NM,ORG_TOT_DEPT,REG_PLC_PNT,CMP_SET_UP_TM,AFL_IDT,FORML_WEB,CMP_INTRO,LVRG_NM,LVRG_INF,LVGR_DTL,LGP_INF,SET_UP_TM,REG_CPT,OPR_RIT,REG_NBR,APRV_TM,ORG_ORG_CD_NBR,SOC_CRD_CD,TAX_PSN_RCG_NBR,REG_INSTT,ENTP_TYP,INPT_DT )values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s') """ % (id_code, company_name, company_base, company_reg_loc, company_bound_date, company_industry, company_site, company_intro, rz_title, rz_info, rz_intro, legal_person, founded_time, registered_capital, operational_authority, registered_num, approval_date, organizational_code, creditfcode, identification_number, registration_authority, enterprise_type, crawl_time) self.util.insert2mysql("融资公司信息", sql_rzsj) self.get_main_people(id_code, detail_html) self.get_shareholder(id_code, detail_html) def get_items_list(self, res): html = self.util.get_xpath_obj(res) for li in html.xpath("//ul[@id=\"inv-list\"]/li"): time.sleep(2) # 详情页获取 if li.xpath("./div[1]/a/@href"): detail_url = "https://zdb.pedaily.cn" + li.xpath("./div[1]/a/@href")[0] # 地址获取 else: continue print(detail_url) self.get_detail_info(detail_url) def run(self): self.headers["Cookie"] = self.headers["Cookie"].format(self.util.get_stamp()) for url in self.urls: print("列表页:" + url + "开始爬取") res = self.util.get_req(url=url, headers=self.headers) # 列表页列表获取 self.get_items_list(res)
class Qlm_zbbg: def __init__(self): self.base_url = "http://www.qianlima.com/zbbg/p{}" self.page = 200 self.util = Util() self.headers = { "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3", "Accept-Encoding": "gzip, deflate", "Accept-Language": "zh-CN,zh;q=0.9", "Cache-Control": "max-age=0", "Connection": "keep-alive", "Cookie": "__jsluid_h=144847f002c5e67a5b7bf1888f49e19c; UM_distinctid=16c02c0e9b53d5-083f7603340745-e343166-144000-16c02c0e9b6403; gr_user_id=bfb0c075-bcf5-4e05-a943-8b3448f39a0d; Hm_lvt_0a38bdb0467f2ce847386f381ff6c0e8=1563432734; LXB_REFER=www.baidu.com; bridgeid=59454367; keywordUnit=40461; keywords=%E5%8D%83%E9%87%8C%E9%A9%AC%E6%8B%9B%E6%A0%87%E7%BD%91; CNZZDATA1277608403=172402465-1563412202-%7C1563498692; BAIDU_SSP_lcr=https://www.baidu.com/link?url=BUcmE5CDcuTFAv7tI05xeq_80sbO-X-vNsQ1yhUvF_DGdoPt-o7VQs8t7AYRpXBm&wd=&eqid=da58e9c4000e34dc000000065d312603; Hm_lvt_5dc1b78c0ab996bd6536c3a37f9ceda7=1563414294,1563432734,1563432760,1563502122; qlm_old=\"http://www.qianlima.com/zb/detail/20190719_139475196.html\"; Hm_lpvt_0a38bdb0467f2ce847386f381ff6c0e8=1563502180; qlm_username=15561585051; qlm_password=RCf8ujm8K3EfguKmBCouKpgCKK7uopgU; rem_login=1; qlmll_his=\",139475750,139491436,139497668,139475763,139475196,139264733,139264636,139269995,\"; seo_refUrl=\"http://www.directlyaccess.com\"; seo_curUrl=\"http://www.qianlima.com/common/cat.jsp\"; CNZZDATA1848524=cnzz_eid%3D430053542-1563409337-%26ntime%3D1563503598; fromWhereUrl=\"http://www.qianlima.com/zbbg/\"; seo_intime=\"2019-07-19 10:57:07\"; Hm_lpvt_5dc1b78c0ab996bd6536c3a37f9ceda7=1563506743", "Host": "www.qianlima.com", "Referer": "http://www.qianlima.com/", "Upgrade-Insecure-Requests": "1", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36", } def get_url_mysql(self): for i in range(200): url = self.base_url.format(i) res = self.util.get_req(url=url, headers=self.headers) html = self.util.get_xpath_obj(res) for dl in html.xpath("//div[@class=\"sevenday_list\"]/dl"): detail_url = dl.xpath("./dt/a/@href")[0].strip() sql = "insert into qlm_zbbg_url(url,status) values ('%s','0')" % detail_url self.util.insert2mysql(detail_url, sql) self.util.MySQL().close() def get_mess(self): conn = self.util.MySQL() cursor = conn.cursor() sql = "select url from qlm_zbbg_url where status=0;" cursor.execute(sql) for detail_url in cursor.fetchall(): print(detail_url[0]) detail_html = self.util.get_xpath_obj( self.util.get_req(url=detail_url[0], headers=self.headers).text) try: detail_title = detail_html.xpath("//h2/text()")[0] detail_location = "".join( detail_html.xpath("//span[@class=\"site\"]/a//text()")) detail_status = detail_html.xpath( "//span[@class=\"zhuangtai\"]//text()")[0].replace( "状态:", "") detail_date = detail_html.xpath( "//span[@class=\"d2\"]/text()")[0] detail_content = re.findall( r'<div id="wen".*?</div>', self.util.get_req(url=detail_url[0], headers=self.headers).text, re.S)[0].replace("\"", "\\\"").replace("\'", "\\\'") record_id = self.util.MD5(detail_title + detail_location) crawl_time = self.util.get_now_time() sql = """insert into INVT_PUB_BID_MDF_INF(ID, TTL, ZON, STS, INVT_PUB_BID_CNTNT, ISU_TM, DTL_LINK, INPT_DT,) values('%s','%s','%s','%s','%s','%s','%s','%s')""" \ % (record_id, detail_title, detail_location, detail_status, detail_date, detail_content, detail_url[0], crawl_time) up_sql = "update qlm_zbbg_url set status = 1 where url = '{}';".format( detail_url[0]) self.util.insert2mysql(detail_title, sql, up_sql) conn.commit() except IndexError: print("详情页请求失败") time.sleep(86400) q = Qlm_zbbg() q.run() def run(self): self.get_url_mysql() self.get_mess()
class Jobui: def __init__(self): self.util = Util() self.url = "https://www.jobui.com/changecity/?from=http://www.jobui.com/cmp?keyword=&area=%E6%B7%B1%E5%9C%B3" self.headers = { "Accept": "text/html,application/xhtml+xml," "application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3", "Accept-Encoding": "gzip, deflate, br", "Accept-Language": "zh-CN,zh;q=0.9", "Cache-Control": "no-cache", "Connection": "keep-alive", "Host": "www.jobui.com", "Pragma": "no-cache", "Referer": "https://www.jobui.com/cmp", "Cookie": "jobui_p=1565753151227_21067661; jobui_user_passport=yk15764787441006; jobui_area=%25E7%258F%25A0%25E6%25B5%25B7; Hm_lvt_8b3e2b14eff57d444737b5e71d065e72=1576719314,1576744537,1576805924,1577020459; Hm_lpvt_8b3e2b14eff57d444737b5e71d065e72=1577028389; TN_VisitCookie=344; TN_VisitNum=1", "Upgrade-Insecure-Requests": "1", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36" } def load(self): if os.path.exists("Scrapyed.txt"): with open("Scrapyed.txt", 'r', encoding="utf8") as f: return f.read() else: print("文件不存在!!!!") # 处理数据的总方法 def parse(self): req_area = self.util.get_req(url=self.url, headers=self.headers) res_html = self.util.get_xpath_obj(req_area.text) for dd in res_html.xpath( "//dl[@class=\"j-change\"]/dd")[4:5]: # 遍历多行dd(省份) for area in dd.xpath("./a"): # 遍历行内区域(市级) every_url = "https:" + area.xpath("./@href")[ 0] # 按照城市列表分别请求和处理 print(area.xpath("./text()")[0]) print("每个城市的url: " + every_url) self.parse_area_page( self.util.get_req(url=every_url, headers=self.headers)) # 处理地区页面 def parse_area_page(self, response): area_html = self.util.get_xpath_obj(response.text) tese = area_html.xpath( "//div[@class=\"job-select-box\"]/ul/li[1]/div/div/a/text()") for a in [ "其他行业", "贸易/进出口", "新能源", "广告", "互联网/电子商务", "教育/培训/院校", "电子技术/半导体/集成电路", "专业服务(咨询、人力资源、财会)", "建筑/建材/工程", "家居/室内设计/装潢", "房地产", "公关/市场推广/会展", "金融/投资/证券", "快速消费品(食品、饮料、化妆品)", "汽车及零配件", "家具/家电/玩具/礼品", "餐饮业", "外包服务", "计算机软件", "机械/设备/重工", "批发/零售", "中介服务", "外包服务", "酒店/旅游", "仪器仪表/工业自动化", "服装/纺织/皮革", "医疗/护理/卫生", "影视/媒体/艺术/文化传播", "制药/生物工程", "交通/运输/物流", "美容/保健", "环保", "原材料和加工", "通信/电信/网络设备", "石油/化工/矿产/地质", "娱乐/休闲/体育", "物业管理/商业中心", "印刷/包装/造纸", "农/林/牧/渔", "娱乐/休闲/体育", "电气/电力/水利", "医疗设备/器械", "保险", "学术/科研", "采掘业/冶炼", "计算机服务(系统、数据服务、维修)", "会计/审计", "生活服务", "计算机硬件", "其他" ]: for b in [ "民营公司", "国企", "合资", "上市公司", "创业公司", "外资", "事业单位", "外企代表处", "非营利机构", "其他性质" ]: for c in [ "50-99", "少于50", "100-499", "500-999", "1000-4999", "5000-9999", "10000以上" ]: for d in tese[1:]: use_url = response.request.url \ + "&industry={}".format(self.util.url_encode(a)) \ + "&type={}".format(self.util.url_encode(b)) \ + "&worker={}".format(self.util.url_encode(c)) \ + "&impression={}".format(self.util.url_encode(d)) print(d) print(use_url) self.parse_list(use_url) print("-" * 150) time.sleep(0.5) time.sleep(0.5) time.sleep(1) time.sleep(1.5) # hangye = [] # xingzhi = [] # areacode = [] # guimo = [] # tese = [] # for t in area_html.xpath("//div[@class=\"job-select-box\"]/ul/li"): # if "其他行业" in t.xpath("./div/div/a/text()"): # hangye = t.xpath("./div/div/a/text()") # if "民营公司" in t.xpath("./div/div/a/text()"): # xingzhi = t.xpath("./div/div/a/text()") # 公司性质列表 # if [ac for ac in t.xpath("./div/div/a/@href")[1:] if "areaCode" in ac]: # areacode = [re.findall(r'areaCode=(\d+)', ac)[0] for ac in t.xpath("./div/div/a/@href")[1:]] # 区域代码的提取 # if "50-99" in t.xpath("./div/div/a/text()"): # guimo = t.xpath("./div/div/a/text()") # 公司规模列表 # print(1) # print("hangye: " + str(hangye)) # print("xingzhi: " + str(xingzhi)) # print("areacode: " + str(areacode)) # print("guimo: " + str(guimo)) # if areacode: # for code in areacode: # for a in hangye[1:]: # for b in xingzhi[1:]: # print(code + " " + a + " " + b) # use_url = response.request.url + "&industry={}".format(self.util.url_encode(a)) \ # + "&type={}".format(self.util.url_encode(b)) \ # + "&areaCode={}".format(code) # print(use_url) # r = self.util.get_req(url=use_url, headers=self.headers) # print(self.util.get_xpath_obj(r.text).xpath("//div[@class=\"m-title-box\"]/div/span[@class=\"fr\"]/span/text()")) # if self.util.get_xpath_obj(r.text).xpath("//div[@class=\"m-title-box\"]/div/span[@class=\"fr\"]/span/text()"): # if int(self.util.get_xpath_obj(r.text).xpath("//div[@class=\"m-title-box\"]/div/span[@class=\"fr\"]/span/text()")[0].strip()) > 1000: # if self.util.get_xpath_obj(r.text).xpath("//div[@class=\"job-select-box\"]/ul/li[5]/div/div/a/text()"): # tese = self.util.get_xpath_obj(r.text).xpath("//div[@class=\"job-select-box\"]/ul/li[5]/div/div/a/text()") # if tese[1:]: # for d in tese[1:]: # use_url = use_url + "&impression={}".format(self.util.url_encode(d)) # print(d) # print(use_url) # self.parse_list(use_url) # else: # print("企业特色暂无!!!!") # else: # if int(self.util.get_xpath_obj(r.text).xpath("//div[@class=\"m-title-box\"]/div/span[@class=\"fr\"]/span/text()")[0]) != 0: # self.parse_list(use_url) # else: # pass # else: # print("页面暂无数据!!!") # time.sleep(0.1) # time.sleep(0.5) # time.sleep(1) # else: # print("该城市不存在区级!!") # for a in hangye[1:]: # for b in xingzhi[1:]: # use_url = response.request.url + "&industry={}".format(self.util.url_encode(a)) \ # + "&type={}".format(self.util.url_encode(b)) # print(use_url) # r = self.util.get_req(url=use_url, headers=self.headers) # print(self.util.get_xpath_obj(r.text).xpath("//div[@class=\"m-title-box\"]/div/span[@class=\"fr\"]/span/text()")) # if self.util.get_xpath_obj(r.text).xpath("//div[@class=\"m-title-box\"]/div/span[@class=\"fr\"]/span/text()"): # if int(self.util.get_xpath_obj(r.text).xpath("//div[@class=\"m-title-box\"]/div/span[@class=\"fr\"]/span/text()")[0].strip()) > 1000: # if self.util.get_xpath_obj(r.text).xpath("//div[@class=\"job-select-box\"]/ul/li[5]/div/div/a/text()"): # tese = self.util.get_xpath_obj(r.text).xpath("//div[@class=\"job-select-box\"]/ul/li[5]/div/div/a/text()") # if tese[1:]: # for d in tese[1:]: # use_url = use_url + "&impression={}".format(self.util.url_encode(d)) # print(d) # print(use_url) # self.parse_list(use_url) # else: # print("企业特色暂无!!!!") # else: # if int(self.util.get_xpath_obj(r.text).xpath("//div[@class=\"m-title-box\"]/div/span[@class=\"fr\"]/span/text()")[0]) != 0: # self.parse_list(use_url) # else: # pass # else: # print("页面暂无数据!!!") # time.sleep(0.1) # time.sleep(0.5) # time.sleep(1) # 处理 每一个列表页的方法 def parse_list_page(self, line): for i in range(1, 51): print("第{}页开始抓取".format(i)) page_url = line + "&n={}".format(i) rep = self.util.get_xpath_obj( self.util.get_req(url=page_url, headers=self.headers)) if rep.xpath("//div[@class=\"c-company-list\"]" ): # 此部分提取规则未修改 -- 2019.12.16 for item in rep.xpath("//div[@class=\"c-company-list\"]")[:-1]: detail_url = item.xpath( "./div[@class=\"company-content-box\"]/div/div[1]/a/@href" ) if str.split(detail_url[0], "/")[-2] not in self.load(): if len(detail_url) > 0: url = "https://www.jobui.com" + detail_url[0] try: self.handle_data( self.util.get_req(url=url, headers=self.headers)) except TimeoutError: print("超时了!!!") except Exception: print("188 行出错了!!") time.sleep(5) self.handle_data( self.util.get_req(url=url, headers=self.headers)) time.sleep(1) else: # print("该数据已入库") pass time.sleep(0.1) if len(rep.xpath("//div[@class=\"c-company-list\"]")) <= 20: return False else: print("该页无数据。。") return False print("第{}页抓取完毕!!".format(i)) # 处理排列组合好后的列表页 def parse_list(self, line): data_count = self.util.get_xpath_obj( self.util.get_req(url=line, headers=self.headers).text ).xpath( "//div[@class=\"m-title-box\"]/div/span[@class=\"fr\"]/span/text()" )[0].strip() print("数量总计: " + data_count) if data_count: if int(data_count) > 1000: guimo = [ "少于50", "50-99", "100-499", "500-999", "1000-4999", "5000-9999", "10000以上" ] for c in guimo: print(c) line = line + "&worker={}".format(self.util.url_encode(c)) print(line) self.parse_list_page(line) else: self.parse_list_page(line) else: print("页面无数据!!!") # 处理公司信息 def handle_data(self, res): print("-" * 100) print(res.request.url) # print(res.status_code) if res.status_code == 200: response = self.util.get_xpath_obj(res.text) if len( response.xpath( "//div[@class=\"intro\"]//div[@class=\"company-info-item\"]" )) == 3: # 不确定有没有len() = 2 或是其他数量的情况 title = response.xpath("//h1/a/text()")[0].strip().replace( "\u2022", "") if response.xpath( "//div[@class=\"company-banner-segmetation\"]/p/text()" ): brief_intro = response.xpath( "//div[@class=\"company-banner-segmetation\"]/p/text()" )[0].strip() else: brief_intro = "" xingzhi = "".join( response.xpath( "//div[@class=\"company-nature\"]/text()")).strip() guimo = "".join( response.xpath( "//div[@class=\"company-worker\"]/text()")).strip() hangye = ";".join([ i.strip() for i in response.xpath( "//div[@class=\"company-info-item\"][2]/span/a/text()") ]).strip() # item_info["rongzi"] = response.xpath("//div[@id=\"navTab\"]/div/a[last()]/div[1]/text()")[0] quancheng = "".join([ i for i in response.xpath( "//div[@class=\"company-info-item\"][3]/text()") if len(i.strip()) > 1 ]).strip() try: intro = "".join( response.xpath( "//*[@id=\"textShowMore\"]/text()")).strip() except IndexError: intro = "" else: title = "" brief_intro = "" xingzhi = "" guimo = "" hangye = "" quancheng = "" intro = "" id_code = self.util.MD5(quancheng) comp_code = str.split(res.request.url, "/")[-2] crawl_time = self.util.get_now_time() job_info = response.xpath( "//div[@id=\"navTab\"]//a[2]/div[@class=\"banner-nav-slash\"]/text()" )[0].strip() if job_info == "///": job_count = 0 else: job_count = int(job_info.replace("个", "").strip()) job_count = job_count if job_count > 0: if job_count % 15 == 0: page = int(job_count / 15) + 1 else: page = int(job_count / 15) + 2 for i in range(1, page): job_url = res.request.url + "jobs/p{}/".format(i) self.handle_jobs( self.util.get_req(url=job_url, headers=self.headers)) time.sleep(0.1) rz = response.xpath("//div[@id=\"navTab\"]/div/a[last()]/@href")[ 0] # 融资信息详情页地址,无域名 if "financing" in rz: rongzi = response.xpath( "//div[@id=\"navTab\"]/div/a[last()]/div[1]/text()")[0] self.handle_rz_info( self.util.get_req(url="https://www.jobui.com" + rz, headers=self.headers)) time.sleep(0.1) else: rongzi = "" t = ( id_code, title, brief_intro, xingzhi, guimo, hangye, rongzi, quancheng, pymysql.escape_string(intro), job_count, comp_code, crawl_time, ) self.util.insert2mysql("(企业信息)" + title, self.sql_info(t)) with open("./Scrapyed.txt", 'a', encoding="utf8") as f: f.write(str.split(res.request.url, "/")[-2] + "\n") else: print(res.status_code) return False # 处理招聘信息 def handle_jobs(self, res): print(res.request.url) response = self.util.get_xpath_obj(res.text) while True: try: for item_node in response.xpath( "//div[@class=\"j-joblist\"]/div[@class=\"c-job-list\"]//div[@class=\"job-simple-content\"]" ): comp_code = str.split(res.request.url, "/")[-4] crawl_time = self.util.get_now_time() job_name = item_node.xpath("./div[1]/a/h3/text()")[0] job_location = item_node.xpath( "./div[2]/div/span[1]/text()")[0] job_xueli = "" job_year = "" job_xingzhi = "" job_money = "" for p in item_node.xpath( "./div[2]/div/span[2]/text()")[0].split(" | "): if "在读" in p: job_xueli = p if p in [ "初中以上", "中专以上", "高中以上", "大专以上", "本科以上", "硕士以上", "应届毕业生" ]: job_xueli = p continue if "年" in p: job_year = p continue if p in ["全职", "实习"]: job_xingzhi = p continue for m in ["万", "元", "K", "-", "k", "~"]: if m in p: job_money = p break id_code = self.util.MD5(comp_code + job_name + job_location) t_job = (id_code, job_name, job_location, job_xueli, job_year, job_xingzhi, job_money, comp_code, crawl_time) self.util.insert2mysql(job_name, self.sql_job(t_job)) break except Exception as e: print(e) time.sleep(10) # 处理融资信息 def handle_rz_info(self, res): print("+" * 100) print(res.request.url) response = self.util.get_xpath_obj(res.text) # for rz_item in response.xpath("//div[@class=\"m-box\"]/div[2]"): for rz_item in response.xpath( "//div[@class=\"m-box\"]/div[2]/div[@class=\"c-finace-list\"]" ): try: rz_stage, money = str.split( rz_item.xpath("./div/div/h3/text()")[0], ",") rz_money = money.strip() except IndexError: rz_stage = rz_money = "" try: # 借鉴元组拆分,可以将解压出来的元素分成两部分,一部分是第一个,剩下的都是第二个。 rz_edate, *people = str.split( rz_item.xpath("./div/div/p[@class=\"finace-desc\"]/text()") [0], ",") rz_compy = ";".join(str.split(people[0], ",")).strip() except IndexError: rz_edate = rz_compy = "" id_code = self.util.MD5( response.xpath("//h1[@id=\"companyH1\"]/a/text()")[0] + rz_stage) comp_code = str.split(res.request.url, "/")[-3] crawl_time = self.util.get_now_time() t_rz = (id_code, rz_stage, rz_money, rz_edate, rz_compy, comp_code, crawl_time) self.util.insert2mysql(rz_stage, self.sql_rz(t_rz)) def sql_info(self, tuple): sql_info = """ insert into tmp_jobui_info_n(id, title, brief_intro, xingzhi, guimo, hangye, rongzi, quancheng, intro, job_count, comp_code, crawl_time) values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s') """ % tuple return sql_info def sql_job(self, tuple): sql_job = """ insert into tmp_jobui_job_n(id, job_name, job_location, job_xueli, job_year, job_xingzhi, job_money, comp_code, crawl_time) values('%s','%s','%s','%s','%s','%s','%s','%s','%s') """ % tuple return sql_job def sql_rz(self, tuple): sql_rz = """ insert into tmp_jobui_rz(id, rz_stage, rz_money, rz_edate, rz_compy, comp_code, crawl_time) values('%s','%s','%s','%s','%s','%s','%s') """ % tuple return sql_rz
class ZdbPedaily: def __init__(self): self.urls = [ "https://zdb.pedaily.cn/enterprise/p{}/".format(i) for i in range(1, 770) ] self.util = Util() self.headers = { "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3", "Accept-Encoding": "gzip, deflate, br", "Accept-Language": "zh-CN,zh;q=0.9", "Cache-Control": "max-age=0", "Connection": "keep-alive", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36", "Cookie": "__uid=1452122016; " "__utmc=23980325; " "ARRAffinity=197ae5372184c64aeca47f780a2e053f3a50366e2bda392cd4bfa3b38e39a929; " "BAIDU_SSP_lcr=https://www.baidu.com/link?url=LHrB83UJlUcy6-MhfY_1I-IRwU723Vl0YUkuCsVJ5MlEYZUAvU2Mv5jTfYQ2ZC0u&wd=&eqid=b0d97bf1000ba11a000000065d3018e2; " "Hm_lvt_25919c38fb62b67cfb40d17ce3348508=1563415171,1563433191,1563523111; " "__utma=23980325.1444638820.1563415171.1563433192.1563523112.3; " "__utmz=23980325.1563523112.3.3.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; " "__fromtype=1; " "accesstoken=PQZUMOXSH2; " "Hm_lpvt_25919c38fb62b67cfb40d17ce3348508={}; " "__utmb=23980325.10.10.1563523112", "Host": "zdb.pedaily.cn", "Referer": "https://zdb.pedaily.cn/", "Upgrade - Insecure - Requests": "1", } def get_shareholder(self, id_code, detail_html): shareholder_info = detail_html.xpath( "//table[@class=\"shareholder-info\"]/tbody/tr") if shareholder_info: for si in shareholder_info: shareholder_name = si.xpath("./td[1]/text()")[0] shareholder_type = si.xpath("./td[2]/text()")[0] if si.xpath("./td[3]/text()"): shareholder_money = si.xpath("./td[3]/text()")[0] else: shareholder_money = "" crawl_time = self.util.get_now_time() sql_sharholder = "insert into INV_EVT_ENTP_SHH_INF(ID,SHH_INF,SHH_TYP,SSCR_CTRB_AMT,INPT_DT) " \ "values('%s', '%s', '%s', '%s','%s')" \ % (id_code, shareholder_name, shareholder_type, shareholder_money, crawl_time) self.util.insert2mysql("股东信息", sql_sharholder) def get_main_people(self, id_code, detail_html): main_people = detail_html.xpath( "//div[@class=\"business-people\"]/ul/li") if main_people: for p in main_people: mp_name = p.xpath("./h3/text()")[0] mp_position = p.xpath("./p/text()")[0] crawl_time = self.util.get_now_time() sql_main_people = "insert into INV_EVT_ENTP_MAIN_PSN_INF(ID,MAIN_PPL_NM,MAIN_PPL_POS,INPT_DT) " \ "values('%s', '%s', '%s','%s')" % (id_code, mp_name, mp_position, crawl_time) self.util.insert2mysql("主要人物", sql_main_people) def get_detail_info(self, detail_url): detail_res = self.util.get_req(url=detail_url, headers=self.headers) print(detail_res.status_code) if detail_res.status_code == 200: detail_html = self.util.get_xpath_obj(detail_res) # 详情页信息获取 company_name = detail_html.xpath("//h1/text()")[0] company_base = detail_html.xpath( "//div[@class=\"box-fix-l\"]/div/ul/li[1]/text()")[0] company_reg_loc = detail_html.xpath( "//div[@class=\"box-fix-l\"]/div/ul/li[2]/text()")[0] company_bound_date = detail_html.xpath( "//div[@class=\"box-fix-l\"]/div/ul/li[3]/text()")[0] company_industry = detail_html.xpath( "//div[@class=\"box-fix-l\"]/div/ul/li[4]/text()")[0] if detail_html.xpath( "//div[@class=\"box-fix-l\"]/div/ul/li[@class=\"link\"]/a/text()" ): company_site = detail_html.xpath( "//div[@class=\"box-fix-l\"]/div/ul/li[@class=\"link\"]/a/text()" )[0] else: company_site = "" if detail_html.xpath('//div[@class="box-fix-l"]/p/text()'): company_intro = detail_html.xpath( '//div[@class="box-fix-l"]/p/text()')[0] elif detail_html.xpath('//div[@class="box-fix-l"]/p/span/text()'): company_intro = detail_html.xpath( '//div[@class="box-fix-l"]/p/span/text()')[0] elif detail_html.xpath('//div[@class="box-fix-l"]/pre/text()'): company_intro = detail_html.xpath( '//div[@class="box-fix-l"]/pre/text()')[0] elif detail_html.xpath('//div[@class="box-fix-l"]/div/div/text()'): company_intro = detail_html.xpath( '//div[@class="box-fix-l"]/div/div/text()')[0] elif detail_html.xpath('//div[@class="box-fix-l"]/div/text()'): company_intro = detail_html.xpath( '//div[@class="box-fix-l"]/div/text()')[0] elif detail_html.xpath('//div[@id="cke_pastebin"]//text()'): company_intro = detail_html.xpath( '//div[@id="cke_pastebin"]//text()')[0] elif detail_html.xpath('//div[@class="box-fix-l"]/ul/text()'): company_intro = detail_html.xpath( '//div[@class="box-fix-l"]/ul/text()')[0] else: company_intro = "" if detail_html.xpath("//div[@id=\"business\"]"): legal_person = detail_html.xpath( "//table[@class=\"base-info\"]/tr[1]/td[2]/text()")[0] founded_time = detail_html.xpath( "//table[@class=\"base-info\"]/tr[1]/td[4]/text()")[0] registered_capital = detail_html.xpath( "//table[@class=\"base-info\"]/tr[2]/td[2]/text()")[0] operational_authority = detail_html.xpath( "//table[@class=\"base-info\"]/tr[2]/td[4]/text()")[0] registered_num = detail_html.xpath( "//table[@class=\"base-info\"]/tr[3]/td[2]/text()")[0] approval_date = detail_html.xpath( "//table[@class=\"base-info\"]/tr[3]/td[4]/text()")[0] organizational_code = detail_html.xpath( "//table[@class=\"base-info\"]/tr[4]/td[2]/text()")[0] creditfcode = detail_html.xpath( "//table[@class=\"base-info\"]/tr[4]/td[4]/text()")[0] identification_number = detail_html.xpath( "//table[@class=\"base-info\"]/tr[5]/td[2]/text()")[0] registration_authority = detail_html.xpath( "//table[@class=\"base-info\"]/tr[5]/td[4]/text()")[0] enterprise_type = detail_html.xpath( "//table[@class=\"base-info\"]/tr[6]/td[2]/text()")[0] else: legal_person = "" founded_time = "" registered_capital = "" operational_authority = "" registered_num = "" approval_date = "" organizational_code = "" creditfcode = "" identification_number = "" registration_authority = "" enterprise_type = "" id_code = self.util.MD5(company_name + creditfcode) if detail_html.xpath("//*[@id=\"contact\"]"): contact = "".join( detail_html.xpath( "//*[@id=\"contact\"]/p//text()")).replace("'", "").strip() else: contact = "" # 融资事件 信息处理 if detail_html.xpath("//div[@class=\"list-invest\"]/ul/li"): for rz_html in detail_html.xpath( "//div[@class=\"list-invest\"]/ul/li"): if rz_html.xpath("./div[@class=\"view\"]/a/@href" )[0].startswith("http"): rz_url = rz_html.xpath( "./div[@class=\"view\"]/a/@href")[0] # 融资事件新开页 else: rz_url = "https://zdb.pedaily.cn" + rz_html.xpath( "./div[@class=\"view\"]/a/@href")[0] # 融资事件新开页 print(rz_url) self.headers["Cookie"] = self.headers["Cookie"].format( self.util.get_stamp()) rz_res = self.util.get_req(url=rz_url, headers=self.headers) if rz_res.status_code == 200: print("融资事件详情页请求成功") rz_html = self.util.get_xpath_obj(rz_res.text) # 投资事件 信息获取 rz_title = rz_html.xpath("//h1/text()")[0] rz_info = "".join( rz_html.xpath( "//div[@class=\"info\"]/ul/li//text()")) if rz_html.xpath("//div[@id=\"desc\"]/p/text()"): rz_intro = rz_html.xpath( "//div[@id=\"desc\"]/p/text()")[0] else: rz_intro = "" else: rz_title = "" rz_info = "" rz_intro = "" crawl_time = self.util.get_now_time().replace("'", "") sql_qyk = """insert into INV_EVT_ENTP_BAS_INF( ID ,CMP_NM ,ORG_TOT_DEPT ,REG_PLC_PNT ,CMP_SET_UP_TM ,AFL_IDT ,FORMAL_WEB ,CMP_INTRO ,LVRG_TTL ,LVRG_INF ,LVRG_INTRO ,LGP_RPRS ,SET_UP_TM ,REG_CPT ,OPR_RIT ,REG_NBR ,APRV_TM ,ORG_ORG_CD_NBR ,SOC_CRD_CD ,TAX_PSN_RCG_NBR ,REG_INSTT ,ENTP_TYP ,CTC_MTH ,INPT_DT )values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s') """ % ( id_code, company_name, company_base, company_reg_loc, company_bound_date, pymysql.escape_string(company_industry), company_site, company_intro, rz_title, rz_info, rz_intro, legal_person, founded_time, registered_capital, operational_authority, registered_num, approval_date, organizational_code, creditfcode, identification_number, registration_authority, enterprise_type, contact, crawl_time) # print(sql_qyk) self.util.insert2mysql("融资公司信息", sql_qyk) self.get_main_people(id_code, detail_html) self.get_shareholder(id_code, detail_html) def get_items_list(self, res): html = self.util.get_xpath_obj(res) for li in html.xpath("//ul[@id=\"enterprise-list\"]/li"): time.sleep(2) # 详情页获取 if li.xpath("./div[1]/a/@href"): detail_url = "https://zdb.pedaily.cn" + li.xpath( "./div[1]/a/@href")[0] # 地址获取 else: continue print(detail_url) self.get_detail_info(detail_url) def run(self): self.headers["Cookie"] = self.headers["Cookie"].format( self.util.get_stamp()) for url in self.urls: print("列表页:" + url + "开始爬取") res = self.util.get_req(url=url, headers=self.headers) # 列表页列表获取 self.get_items_list(res)
class JobuiProcess(object): def __init__(self): self.util = Util() self.url = "https://www.jobui.com/changecity/?from=http://www.jobui.com/cmp?keyword=&area=%E6%B7%B1%E5%9C%B3" self.headers = { "Accept": "text/html,application/xhtml+xml," "application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3", "Accept-Encoding": "gzip, deflate, br", "Accept-Language": "zh-CN,zh;q=0.9", "Cache-Control": "no-cache", "Connection": "keep-alive", "Host": "www.jobui.com", "Pragma": "no-cache", "Referer": "https://www.jobui.com/cmp", "Cookie": "jobui_p=1565753151227_21067661; jobui_user_passport=yk15764787441006; jobui_area=%25E7%258F%25A0%25E6%25B5%25B7; Hm_lvt_8b3e2b14eff57d444737b5e71d065e72=1576719314,1576744537,1576805924,1577020459; Hm_lpvt_8b3e2b14eff57d444737b5e71d065e72=1577028389; TN_VisitCookie=344; TN_VisitNum=1", "Upgrade-Insecure-Requests": "1", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36"} self.sleep_time = 0.3 # 多进程初始化队列 self.url_queue = JoinableQueue() self.resp_queue = JoinableQueue() self.item_queue = JoinableQueue() # mongo config self.mongo_host = "mongodb://*****:*****@class=\"j-change\"]/dd")[-1:]: # 遍历多行dd(省份) for area in dd.xpath("./a")[-1:]: # 遍历行内区域(市级) every_url = "https:" + area.xpath("./@href")[0] # 按照城市列表分别请求和处理 print(area.xpath("./text()")[0]) # print("每个城市的url: " + every_url) self.parse_area_page(self.util.get_req(url=every_url, headers=self.headers)) # 处理地区页面 def parse_area_page(self, response): area_html = self.util.get_xpath_obj(response.text) hangye = area_html.xpath("//div[@class=\"job-select-box\"]/ul/li[1]/div/div/a/text()") xingzhi = area_html.xpath("//div[@class=\"job-select-box\"]/ul/li[2]/div/div/a/text()") guimo = ["少于50", "50-99", "100-499", "500-999", "1000-4999", "5000-9999", "10000以上"] for a in hangye[1:]: for b in xingzhi[1:]: use_url = response.request.url + "&industry={}".format(self.util.url_encode(a)) \ + "&type={}".format(self.util.url_encode(b)) r = self.util.get_req(url=use_url, headers=self.headers) # time.sleep(self.sleep_time) if self.util.get_xpath_obj(r.text).xpath( "//div[@class=\"m-title-box\"]/div/span[@class=\"fr\"]//text()"): data_count1 = self.util.get_xpath_obj(r.text).xpath( "//div[@class=\"m-title-box\"]/div/span[@class=\"fr\"]//text()")[1].strip() print("{}-{} 共有:{} 条数据".format(a, b, data_count1)) if int(data_count1) >= 1000: for c in guimo: use_url = use_url + "&worker={}".format(self.util.url_encode(c)) print(use_url) r = self.util.get_req(url=use_url, headers=self.headers) # time.sleep(self.sleep_time) if self.util.get_xpath_obj(r.text).xpath( "//div[@class=\"m-title-box\"]/div/span[@class=\"fr\"]//text()"): data_count2 = self.util.get_xpath_obj(r.text).xpath( "//div[@class=\"m-title-box\"]/div/span[@class=\"fr\"]//text()")[1].strip() print("{}-{}-{} 共有:{} 条数据".format(a, b, c, data_count2)) if int(data_count2) >= 1000: tese = self.util.get_xpath_obj(r.text).xpath( "//div[@class=\"job-select-box\"]/ul/li[last()]/div/div/a/text()") for d in tese[1:]: use_url = use_url + "&impression={}".format(self.util.url_encode(d)) r = self.util.get_req(url=use_url, headers=self.headers) # time.sleep(self.sleep_time) if self.util.get_xpath_obj(r.text).xpath( "//div[@class=\"m-title-box\"]/div/span[@class=\"fr\"]//text()"): data_count3 = self.util.get_xpath_obj(r.text).xpath( "//div[@class=\"m-title-box\"]/div/span[@class=\"fr\"]//text()")[ 1].strip() if int(data_count3) > 1000: print("排列组合后数据大于一千, 具体数量: " + data_count3) else: print("{}-{}-{}-{} 共有:{} 条数据".format(a, b, c, d, data_count3)) self.parse_list_page(use_url) else: self.parse_list_page(use_url) else: self.parse_list_page(use_url) else: self.parse_list_page(use_url) # 处理 每一个列表页的方法 def parse_list_page(self, line): for i in range(1, 51): print("第{}页开始抓取".format(i)) page_url = line + "&n={}".format(i) rep = self.util.get_xpath_obj(self.util.get_req(url=page_url, headers=self.headers)) if rep.xpath("//div[@class=\"c-company-list\"]"): for item in rep.xpath("//div[@class=\"c-company-list\"]")[:-1]: detail_url = item.xpath("./div[@class=\"company-content-box\"]/div/div[1]/a/@href") self.url_queue.put("https://www.jobui.com" + detail_url[0]) # 公司信息添加到url队列中。 # print("添加成功!!") if len(rep.xpath("//div[@class=\"c-company-list\"]")) <= 20: return False else: return False # 处理公司信息 def handle_data(self): item = {} print("*" * 100) while True: try: time.sleep(self.sleep_time) url = self.url_queue.get() response = self.util.get_req(url=url, headers=self.headers) if response.status_code != 200: self.url_queue.put(response.url) except Exception as e: raise e else: res_html = self.util.get_xpath_obj(response.text) if len(res_html.xpath( "//div[@class=\"intro\"]//div[@class=\"company-info-item\"]")) == 3: # 不确定有没有len() = 2 或是其他数量的情况 item["title"] = res_html.xpath("//h1/a/text()")[0].strip().replace("\u2022", "") if response.xpath("//div[@class=\"company-banner-segmetation\"]/p/text()"): item["brief_intro"] = res_html.xpath("//div[@class=\"company-banner-segmetation\"]/p/text()")[0].strip() else: item["brief_intro"] = "" item["xingzhi"] = "".join(res_html.xpath("//div[@class=\"company-nature\"]/text()")).strip() item["guimo"] = "".join(res_html.xpath("//div[@class=\"company-worker\"]/text()")).strip() item["hangye"] = ";".join([i.strip() for i in res_html.xpath("//div[@class=\"company-info-item\"][2]/span/a/text()") ]).strip() item["quancheng"] = "".join([i for i in res_html.xpath("//div[@class=\"company-info-item\"][3]/text()") if len(i.strip()) > 1]).strip().replace("...", "") try: item["intro"] = "".join(res_html.xpath("//*[@id=\"textShowMore\"]/text()")).strip() except IndexError: item["intro"] = "" else: item["title"] = "" item["brief_intro"] = "" item["xingzhi"] = "" item["guimo"] = "" item["hangye"] = "" item["quancheng"] = "" item["intro"] = "" item["id_code"] = self.util.MD5(item["quancheng"]) item["comp_code"] = str.split(response.request.url, "/")[-2] item["crawl_time"] = self.util.get_now_time() job_info = res_html.xpath("//div[@id=\"navTab\"]//a[2]/div[@class=\"banner-nav-slash\"]/text()")[ 0].strip() if job_info == "///": job_count = 0 else: job_count = int(job_info.replace("个", "").strip()) item["job_count"] = job_count if job_count > 0: if job_count % 15 == 0: page = int(item["job_count"] / 15) + 1 else: page = int(item["job_count"] / 15) + 2 for i in range(1, page): job_url = response.request.url + "jobs/p{}/".format(i) self.handle_jobs(self.util.get_req(url=job_url, headers=self.headers)) time.sleep(0.1) rz = res_html.xpath("//div[@id=\"navTab\"]/div/a[last()]/@href")[0] # 融资信息详情页地址,无域名 if "financing" in rz: item["rongzi"] = res_html.xpath("//div[@id=\"navTab\"]/div/a[last()]/div[1]/text()")[0] self.handle_rz_info(self.util.get_req(url="https://www.jobui.com" + rz, headers=self.headers)) time.sleep(0.1) else: item["rongzi"] = "" self.item_queue.put(item) # self.util.insert2mysql("(企业信息)" + title, self.sql_info(t)) with open("./Scrapyed.txt", 'a', encoding="utf8") as f: f.write(str.split(response.request.url, "/")[-2] + "\n") self.url_queue.task_done() # 计数-1 def insert2mongoDB(self, item): myclient = pymongo.MongoClient(self.mongo_host) mydb = myclient[self.mongo_client] mycol = mydb[self.mongo_db] x = mycol.insert_one(item) def save_item(self): while True: item = self.item_queue.get() self.insert2mongoDB(item) self.item_queue.task_done() # 处理招聘信息 def handle_jobs(self, res): # print(res.request.url) response = self.util.get_xpath_obj(res.text) while True: try: for item_node in response.xpath( "//div[@class=\"j-joblist\"]/div[@class=\"c-job-list\"]//div[@class=\"job-simple-content\"]"): comp_code = str.split(res.request.url, "/")[-4] crawl_time = self.util.get_now_time() job_name = item_node.xpath("./div[1]/a/h3/text()")[0] job_location = item_node.xpath("./div[2]/div/span[1]/text()")[0] job_xueli = "" job_year = "" job_xingzhi = "" job_money = "" for p in item_node.xpath("./div[2]/div/span[2]/text()")[0].split(" | "): if "在读" in p: job_xueli = p if p in ["初中以上", "中专以上", "高中以上", "大专以上", "本科以上", "硕士以上", "应届毕业生"]: job_xueli = p continue if "年" in p: job_year = p continue if p in ["全职", "实习"]: job_xingzhi = p continue for m in ["万", "元", "K", "-", "k", "~"]: if m in p: job_money = p break id_code = self.util.MD5(comp_code + job_name + job_location) t_job = ( id_code, job_name, job_location, job_xueli, job_year, job_xingzhi, job_money, comp_code, crawl_time) self.util.insert2mysql(job_name, self.sql_job(t_job)) break except Exception as e: print(e) time.sleep(10) # 处理融资信息 def handle_rz_info(self, res): print("+" * 100) # print(res.request.url) response = self.util.get_xpath_obj(res.text) # for rz_item in response.xpath("//div[@class=\"m-box\"]/div[2]"): for rz_item in response.xpath("//div[@class=\"m-box\"]/div[2]/div[@class=\"c-finace-list\"]"): try: rz_stage, money = str.split(rz_item.xpath("./div/div/h3/text()")[0], ",") rz_money = money.strip() except IndexError: rz_stage = rz_money = "" try: # 借鉴元组拆分,可以将解压出来的元素分成两部分,一部分是第一个,剩下的都是第二个。 rz_edate, *people = str.split(rz_item.xpath("./div/div/p[@class=\"finace-desc\"]/text()")[0], ",") rz_compy = ";".join(str.split(people[0], ",")).strip() except IndexError: rz_edate = rz_compy = "" id_code = self.util.MD5(response.xpath("//h1[@id=\"companyH1\"]/a/text()")[0] + rz_stage) comp_code = str.split(res.request.url, "/")[-3] crawl_time = self.util.get_now_time() t_rz = (id_code, rz_stage, rz_money, rz_edate, rz_compy, comp_code, crawl_time) self.util.insert2mysql(rz_stage, self.sql_rz(t_rz)) def run(self): process_list = [] # 构造url列表 for _ in range(100): t_parse_url_list = Process(target=self.parse) t_parse_url_list.daemon = True t_parse_url_list.start() t_parse_url_list.join() # 发送请求,获取响应 for i in range(5): ti_parse_url = Process(target=self.handle_data) process_list.append(ti_parse_url) for p in process_list: p.daemon = True # 设置守护线程 p.start() for q in [self.url_queue, self.resp_queue]: q.join() # 让主线程阻塞,队列没释放之前不能结束任务 def sql_info(self, tuple): sql_info = """ insert into tmp_jobui_info_n(id, title, brief_intro, xingzhi, guimo, hangye, rongzi, quancheng, intro, job_count, comp_code, crawl_time) values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s') """ % tuple return sql_info def sql_job(self, tuple): sql_job = """ insert into tmp_jobui_job_n(id, job_name, job_location, job_xueli, job_year, job_xingzhi, job_money, comp_code, crawl_time) values('%s','%s','%s','%s','%s','%s','%s','%s','%s') """ % tuple return sql_job def sql_rz(self, tuple): sql_rz = """ insert into tmp_jobui_rz(id, rz_stage, rz_money, rz_edate, rz_compy, comp_code, crawl_time) values('%s','%s','%s','%s','%s','%s','%s') """ % tuple return sql_rz
class Jobui: def __init__(self): self.util = Util() self.url = "https://www.jobui.com/changecity/?from=http://www.jobui.com/cmp?keyword=&area=%E6%B7%B1%E5%9C%B3" self.headers = { "Accept": "text/html,application/xhtml+xml," "application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3", "Accept-Encoding": "gzip, deflate, br", "Accept-Language": "zh-CN,zh;q=0.9", "Cache-Control": "no-cache", "Connection": "keep-alive", "Host": "www.jobui.com", "Pragma": "no-cache", "Referer": "https://www.jobui.com/cmp", "Cookie": "jobui_p=1565753151227_21067661; jobui_user_passport=yk15764787441006; jobui_area=%25E7%258F%25A0%25E6%25B5%25B7; Hm_lvt_8b3e2b14eff57d444737b5e71d065e72=1576719314,1576744537,1576805924,1577020459; Hm_lpvt_8b3e2b14eff57d444737b5e71d065e72=1577028389; TN_VisitCookie=344; TN_VisitNum=1", "Upgrade-Insecure-Requests": "1", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36" } self.sleep_time = 0.1 self.data_num = 0 def load(self): if os.path.exists("Scrapyed.txt"): with open("Scrapyed.txt", 'r', encoding="utf8") as f: return f.read() else: print("文件不存在!!!!") # 处理数据的总方法 def parse(self): req_area = self.util.get_req(url=self.url, headers=self.headers) res_html = self.util.get_xpath_obj(req_area.text) every_url = "https:" + res_html.xpath( "//dl[@class=\"j-change\"]/dd[11]/a[1]/@href")[0] # 遍历多行dd(省份) self.data_num = 0 print( res_html.xpath("//dl[@class=\"j-change\"]/dd[11]/a[1]//text()")[0]) # print("每个城市的url: " + every_url) self.parse_area_page( self.util.get_req(url=every_url, headers=self.headers)) print("此地区共抓取公司数量为:" + str(self.data_num)) # 处理地区页面 def parse_area_page(self, response): area_html = self.util.get_xpath_obj(response.text) hangye = area_html.xpath( "//div[@class=\"job-select-box\"]/ul/li[1]/div/div/a/text()") xingzhi = area_html.xpath( "//div[@class=\"job-select-box\"]/ul/li[2]/div/div/a/text()") guimo = [ "少于50", "50-99", "100-499", "500-999", "1000-4999", "5000-9999", "10000以上" ] for a in hangye[1:]: for b in xingzhi[1:]: use_url = response.request.url + "&industry={}".format(self.util.url_encode(a)) \ + "&type={}".format(self.util.url_encode(b)) # print(use_url) # https://www.jobui.com/cmp?area=哈尔滨&industry=新能源&worker=10000以上&type=民营公司 r = self.util.get_req(url=use_url, headers=self.headers) time.sleep(self.sleep_time) if self.util.get_xpath_obj(r.text).xpath( "//div[@class=\"m-title-box\"]/div/span[@class=\"fr\"]//text()" ): data_count1 = self.util.get_xpath_obj(r.text).xpath( "//div[@class=\"m-title-box\"]/div/span[@class=\"fr\"]//text()" )[1].strip() print("{}-{} 共有:{} 条数据".format(a, b, data_count1)) if int(data_count1) >= 1000: for c in guimo: use_url = use_url + "&worker={}".format( self.util.url_encode(c)) print(use_url) r = self.util.get_req(url=use_url, headers=self.headers) time.sleep(self.sleep_time) if self.util.get_xpath_obj(r.text).xpath( "//div[@class=\"m-title-box\"]/div/span[@class=\"fr\"]//text()" ): data_count2 = self.util.get_xpath_obj( r.text ).xpath( "//div[@class=\"m-title-box\"]/div/span[@class=\"fr\"]//text()" )[1].strip() print("{}-{}-{} 共有:{} 条数据".format( a, b, c, data_count2)) if int(data_count2) >= 1000: tese = self.util.get_xpath_obj( r.text ).xpath( "//div[@class=\"job-select-box\"]/ul/li[last()]/div/div/a/text()" ) for d in tese[1:]: use_url = use_url + "&impression={}".format( self.util.url_encode(d)) r = self.util.get_req( url=use_url, headers=self.headers) time.sleep(self.sleep_time) if self.util.get_xpath_obj( r.text ).xpath("//div[@class=\"m-title-box\"]/div/span[@class=\"fr\"]//text()" ): data_count3 = self.util.get_xpath_obj( r.text ).xpath( "//div[@class=\"m-title-box\"]/div/span[@class=\"fr\"]//text()" )[1].strip() if int(data_count3) > 1000: print("排列组合后数据大于一千, 具体数量: " + data_count3) else: print("{}-{}-{}-{} 共有:{} 条数据". format( a, b, c, d, data_count3)) self.parse_list_page(use_url) else: self.parse_list_page(use_url) else: self.parse_list_page(use_url) else: self.parse_list_page(use_url) # 处理 每一个列表页的方法 def parse_list_page(self, line): for i in range(1, 51): print("第{}页开始抓取".format(i)) page_url = line + "&n={}".format(i) rep = self.util.get_xpath_obj( self.util.get_req(url=page_url, headers=self.headers)) if rep.xpath("//div[@class=\"c-company-list\"]" ): # 此部分提取规则未修改 -- 2019.12.16 for item in rep.xpath("//div[@class=\"c-company-list\"]")[:-1]: detail_url = item.xpath( "./div[@class=\"company-content-box\"]/div/div[1]/a/@href" ) self.data_num += 1 if str.split(detail_url[0], "/")[-2] not in self.load(): if len(detail_url) > 0: url = "https://www.jobui.com" + detail_url[0] try: self.handle_data( self.util.get_req(url=url, headers=self.headers)) except TimeoutError: print("超时了!!!") except Exception: print("188 行出错了!!") time.sleep(5) self.handle_data( self.util.get_req(url=url, headers=self.headers)) time.sleep(1) else: # print("{} 该数据已入库".format(item.xpath("./div[@class=\"company-content-box\"]/div/div[1]/a/@title")[0].replace("怎么样", ""))) pass time.sleep(0.1) if len(rep.xpath("//div[@class=\"c-company-list\"]")) <= 20: return False else: print("该页无数据。。") return False print("第{}页抓取完毕!!".format(i)) # 处理公司信息 def handle_data(self, res): # print("-" * 100) # print(res.request.url) # print(res.status_code) if res.status_code == 200: response = self.util.get_xpath_obj(res.text) if len( response.xpath( "//div[@class=\"intro\"]//div[@class=\"company-info-item\"]" )) == 3: # 不确定有没有len() = 2 或是其他数量的情况 title = response.xpath("//h1/a/text()")[0].strip().replace( "\u2022", "") if response.xpath( "//div[@class=\"company-banner-segmetation\"]/p/text()" ): brief_intro = response.xpath( "//div[@class=\"company-banner-segmetation\"]/p/text()" )[0].strip() else: brief_intro = "" xingzhi = "".join( response.xpath( "//div[@class=\"company-nature\"]/text()")).strip() guimo = "".join( response.xpath( "//div[@class=\"company-worker\"]/text()")).strip() hangye = ";".join([ i.strip() for i in response.xpath( "//div[@class=\"company-info-item\"][2]/span/a/text()") ]).strip() # item_info["rongzi"] = response.xpath("//div[@id=\"navTab\"]/div/a[last()]/div[1]/text()")[0] quancheng = "".join([ i for i in response.xpath( "//div[@class=\"company-info-item\"][3]/text()") if len(i.strip()) > 1 ]).strip().replace("...", "") try: intro = "".join( response.xpath( "//*[@id=\"textShowMore\"]/text()")).strip() except IndexError: intro = "" else: title = "" brief_intro = "" xingzhi = "" guimo = "" hangye = "" quancheng = "" intro = "" id_code = self.util.MD5(quancheng) comp_code = str.split(res.request.url, "/")[-2] crawl_time = self.util.get_now_time() job_info = response.xpath( "//div[@id=\"navTab\"]//a[2]/div[@class=\"banner-nav-slash\"]/text()" )[0].strip() if job_info == "///": job_count = 0 else: job_count = int(job_info.replace("个", "").strip()) job_count = job_count if job_count > 0: if job_count % 15 == 0: page = int(job_count / 15) + 1 else: page = int(job_count / 15) + 2 for i in range(1, page): job_url = res.request.url + "jobs/p{}/".format(i) self.handle_jobs( self.util.get_req(url=job_url, headers=self.headers)) time.sleep(0.1) rz = response.xpath("//div[@id=\"navTab\"]/div/a[last()]/@href")[ 0] # 融资信息详情页地址,无域名 if "financing" in rz: rongzi = response.xpath( "//div[@id=\"navTab\"]/div/a[last()]/div[1]/text()")[0] self.handle_rz_info( self.util.get_req(url="https://www.jobui.com" + rz, headers=self.headers)) time.sleep(0.1) else: rongzi = "" t = ( id_code, title, brief_intro, xingzhi, guimo, hangye, rongzi, quancheng, pymysql.escape_string(intro), job_count, comp_code, crawl_time, ) self.util.insert2mysql("(企业信息)" + title, self.sql_info(t)) with open("./Scrapyed.txt", 'a', encoding="utf8") as f: f.write(str.split(res.request.url, "/")[-2] + "\n") else: print(res.status_code) return False # 处理招聘信息 def handle_jobs(self, res): # print(res.request.url) response = self.util.get_xpath_obj(res.text) while True: try: for item_node in response.xpath( "//div[@class=\"j-joblist\"]/div[@class=\"c-job-list\"]//div[@class=\"job-simple-content\"]" ): comp_code = str.split(res.request.url, "/")[-4] crawl_time = self.util.get_now_time() job_name = item_node.xpath("./div[1]/a/h3/text()")[0] job_location = item_node.xpath( "./div[2]/div/span[1]/text()")[0] job_xueli = "" job_year = "" job_xingzhi = "" job_money = "" for p in item_node.xpath( "./div[2]/div/span[2]/text()")[0].split(" | "): if "在读" in p: job_xueli = p if p in [ "初中以上", "中专以上", "高中以上", "大专以上", "本科以上", "硕士以上", "应届毕业生" ]: job_xueli = p continue if "年" in p: job_year = p continue if p in ["全职", "实习"]: job_xingzhi = p continue for m in ["万", "元", "K", "-", "k", "~"]: if m in p: job_money = p break id_code = self.util.MD5(comp_code + job_name + job_location) t_job = (id_code, job_name, job_location, job_xueli, job_year, job_xingzhi, job_money, comp_code, crawl_time) self.util.insert2mysql(job_name, self.sql_job(t_job)) break except Exception as e: print(e) time.sleep(10) # 处理融资信息 def handle_rz_info(self, res): print("+" * 100) # print(res.request.url) response = self.util.get_xpath_obj(res.text) # for rz_item in response.xpath("//div[@class=\"m-box\"]/div[2]"): for rz_item in response.xpath( "//div[@class=\"m-box\"]/div[2]/div[@class=\"c-finace-list\"]" ): try: rz_stage, money = str.split( rz_item.xpath("./div/div/h3/text()")[0], ",") rz_money = money.strip() except IndexError: rz_stage = rz_money = "" try: # 借鉴元组拆分,可以将解压出来的元素分成两部分,一部分是第一个,剩下的都是第二个。 rz_edate, *people = str.split( rz_item.xpath("./div/div/p[@class=\"finace-desc\"]/text()") [0], ",") rz_compy = ";".join(str.split(people[0], ",")).strip() except IndexError: rz_edate = rz_compy = "" id_code = self.util.MD5( response.xpath("//h1[@id=\"companyH1\"]/a/text()")[0] + rz_stage) comp_code = str.split(res.request.url, "/")[-3] crawl_time = self.util.get_now_time() t_rz = (id_code, rz_stage, rz_money, rz_edate, rz_compy, comp_code, crawl_time) self.util.insert2mysql(rz_stage, self.sql_rz(t_rz)) def sql_info(self, tuple): sql_info = """ insert into tmp_jobui_info_n(id, title, brief_intro, xingzhi, guimo, hangye, rongzi, quancheng, intro, job_count, comp_code, crawl_time) values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s') """ % tuple return sql_info def sql_job(self, tuple): sql_job = """ insert into tmp_jobui_job_n(id, job_name, job_location, job_xueli, job_year, job_xingzhi, job_money, comp_code, crawl_time) values('%s','%s','%s','%s','%s','%s','%s','%s','%s') """ % tuple return sql_job def sql_rz(self, tuple): sql_rz = """ insert into tmp_jobui_rz(id, rz_stage, rz_money, rz_edate, rz_compy, comp_code, crawl_time) values('%s','%s','%s','%s','%s','%s','%s') """ % tuple return sql_rz