Exemplo n.º 1
0
 def parse_detail(self, response):
     """
     parse detail page
     :param response:
     :return:
     """
     try:
         self.logger1.info('start to parse {}'.format(response.url))
         tr_list = response.xpath("//div[@class='fl ml20 mb10 mt10 f_yh']//table//tr")
         if tr_list:
             data_dict = {}
             for tr in tr_list:
                 title = clean_all_space(''.join(tr.xpath(".//th").xpath("string(.)").extract()).replace(":", "").replace(":", ""))
                 value = clean_all_space(''.join(tr.xpath(".//td").xpath("string(.)").extract()))
                 data_dict.update({title: value})
             item = self.result_item_assembler(response)
             item['_id'] = calc_str_md5(response.url)
             item['bbd_html'] = ''
             res_dict = self.convert_time(map_field(data_dict))
             if "xzcf" in self.name:
                 if "license_status" in res_dict.keys():
                     res_dict["punish_status"] = res_dict.pop("license_status", "")
             item['_parsed_data'] = res_dict
             yield item
             self.logger1.info('{} save successfully'.format(response.url))
         else:
             self.logger1.info('retry {}'.format(response.url))
             yield Request(response.url, callback=self.parse_detail, errback=self.error_parse)
     except Exception as e:
         self.logger1.warning("Exception on save detail page {} {} {}".format(
             response.url, traceback.format_exc(), e))
Exemplo n.º 2
0
 def parse_detail(self, response):
     """
     parse the detail page.
     :param response:
     :return:
     """
     try:
         self.logger1.info("start to parse detail page: {}".format(response.url))
         all_lis = response.xpath('//div[@class="warp"]//li')
         if not all_lis:
             raise Exception('this page has changed! please check {}'.format(response.url))
         result_dict = {}
         for li in all_lis:
             key = re.sub(r':|:', r'', clean_all_space(''.join(li.xpath('./text()').extract())))
             value = xpath_extract_text_strip(li, './span')
             if '身份证' in key:
                 value = clean_all_space(value)
             if key.endswith(('期', '戳')):
                 # 许可决定日期、许可截止期、处罚决定日期、数据时间更新戳 等等
                 _, value = convert_formal_date(value, need_time=True)
                 if len(value) == 16:
                     value += ':00'
                 elif len(value) == 10:
                     value += ' 00:00:00'
             result_dict[key] = value
         yield self.handle_result(response, result_dict)
         self.logger1.info("store data to database successfully!")
     except:
         err_msg = traceback.format_exc()
         self.logger.warning("failed to parse detail page, url {url} error:{err_msg}"
                             .format(url=response.url, err_msg=err_msg))
Exemplo n.º 3
0
 def parse_detail(self, response):
     """
     parse detail page
     :param response:
     :return:
     """
     try:
         if "error" in response.url:
             return
         self.logger1.info('start to parse {}'.format(response.url))
         tr_list = response.xpath("//div[@class='display_con']//table//tr[position()>1]")
         if tr_list:
             data_dict = {}
             title_list = []
             value_list = []
             for tr in tr_list:
                 tds = tr.xpath(".//td")
                 title_list += [clean_all_space(''.join(td.xpath("string(.)").extract())) for td in tds[::2]]
                 value_list += [clean_all_space(''.join(td.xpath("string(.)").extract())) for td in tds[1::2]]
             data_dict.update(dict(zip(title_list, value_list)))
             item = self.result_item_assembler(response)
             item['_id'] = calc_str_md5(response.url)
             item['bbd_html'] = ''
             result_dict = map_field(data_dict)
             if "xzcf" in self.bbd_table:
                 result_dict["punish_status"] = result_dict.pop("license_status", "")
             item['_parsed_data'] = self.convert_time(result_dict)
             yield item
             self.logger1.info('{} save successfully'.format(response.url))
         else:
             self.logger1.info('retry {}'.format(response.url))
             yield Request(response.url, callback=self.parse_detail, errback=self.error_parse)
     except Exception as e:
         self.logger1.warning("Exception on save detail page {} {} {}".format(
             response.url, traceback.format_exc(), e))
Exemplo n.º 4
0
 def convert_time(self, res_dict):
     res = {}
     for title, value in res_dict.items():
         if "date" in title:
             if len(clean_all_space(value)) in [10, 11]:
                 d_time = re.sub("[\u4e00-\u9fa5]|/|-", "-", clean_all_space(value)).strip("-")
                 t = time.strptime(d_time, "%Y-%m-%d")
                 y, m, d = t[0:3]
                 res.update({title: str(datetime.datetime(y, m, d))})
             else:
                 res.update({title: value})
         else:
             res.update({title: value})
     return res
Exemplo n.º 5
0
 def parse_detail(self, response):
     """
     parse the detail page.
     :param response:
     :return:
     """
     try:
         self.logger1.info("start to parse detail page: {}".format(
             response.url))
         all_lis = response.xpath('//div[@class="gs_detail"]/ul/li')
         if not all_lis:
             raise Exception(
                 'this page has changed! please check {}'.format(
                     response.url))
         result_dict = {}
         for li in all_lis:
             key = re.sub(
                 r':|:', r'',
                 clean_all_space(''.join(li.xpath('./text()').extract())))
             value = xpath_extract_text_strip(li, './label')
             if key.endswith('期'):
                 # 许可决定日期、许可截止期、处罚决定日期 等等
                 # 样例:   2017/6/13
                 if value:
                     year, month, day = re.findall(r'\d+', value)[:3]
                     value = '{}-{:0>2}-{:0>2} 00:00:00'.format(
                         year, month, day)
             result_dict[key] = value
         yield self.handle_result(response, result_dict)
         self.logger1.info("store data to database successfully!")
     except:
         err_msg = traceback.format_exc()
         self.logger.warning(
             "failed to parse detail page, url {url} error:{err_msg}".
             format(url=response.url, err_msg=err_msg))
Exemplo n.º 6
0
 def parse(self, response):
     try:
         page_count = int(
             clean_all_space(''.join(
                 response.xpath(
                     ".//*[@id='pageForm']//input[contains(@name, 'ttPage')]//@value"
                 ).extract())))
         form_data = copy.deepcopy(self.form_data)
         form_data.update({"ttPage": "{}".format(page_count)})
         for page in range(1, page_count + 1):
             if page == 1:
                 yield Request(response.url,
                               callback=self.parse_link,
                               errback=self.error_parse,
                               dont_filter=True)
             else:
                 form_data.update({
                     "pageNum": "{}".format(page),
                     "prePage": "{}".format(page - 1),
                     "nextPage": "{}".format(page)
                 })
                 yield FormRequest(self.post_url,
                                   formdata=form_data,
                                   meta={"page": page},
                                   callback=self.parse_link,
                                   errback=self.error_parse,
                                   dont_filter=True)
     except:
         err_msg = traceback.format_exc()
         self.logger1.warning(
             "Exception occurred on get the page counts[{url}], error:{err_msg}"
             .format(url=response.url, err_msg=err_msg))
Exemplo n.º 7
0
 def parse_detail(self, response):
     """
     parse detail page
     :param response:
     :return:
     """
     try:
         self.logger1.info('start to parse {}'.format(response.url))
         li_list = response.xpath("//div[@class='warp']//ul//li")
         data_dict = {}
         for li in li_list:
             data_str = clean_all_space(''.join(
                 li.xpath("string()").extract()).replace(":", ":"))
             title = data_str.split(":", 1)[0]
             value = ''.join(
                 li.xpath(".//span").xpath("string()").extract()).strip()
             data_dict.update({title: value})
         item = self.result_item_assembler(response)
         item['_id'] = calc_str_md5(response.url)
         item['bbd_html'] = ''
         item['_parsed_data'] = self.convert_time(map_field(data_dict))
         yield item
         self.logger1.info('{} save successfully'.format(response.url))
     except Exception as e:
         self.logger1.warning(
             "Exception on save detail page {} {} {}".format(
                 response.url, traceback.format_exc(), e))
Exemplo n.º 8
0
 def parse_item(self, response):
     try:
         self.logger1.info("start to parse detail page: {}".format(
             response.url))
         table = response.xpath('//table[@id="headContainer"]')
         if not table:
             raise Exception(
                 "the web page has changed in {}, please check".format(
                     response.url))
         tds_raw_text = [
             ''.join(td.xpath('.//text()').extract()).strip()
             for td in table.xpath('.//td[@colspan="2"]//td')
         ]
         part_result = {}
         for text in tds_raw_text:
             try:
                 key, value = re.split(r':|:', text, 1)
                 part_result[clean_all_space(key)] = value.strip()
             except:
                 self.logger1.error(
                     "can't split it by ':': {}".format(text))
         part_result['案件名称'] = xpath_extract_text_strip(
             response, '//span[@id="lTitle"]')
         part_result['标题'] = part_result['案件名称']
         main_div = self.cleaner.clean_html(
             response.xpath('//div[@id="ContentRegion"]').extract()[0])
         slt = Selector(text=main_div)
         part_result['正文'] = xpath_extract_text_strip(
             slt, '//div[@id="ContentRegion"]')
         yield self.handle_result(response, part_result)
         self.logger1.info("store data to database successfully!")
     except:
         err_msg = "fail to get detail info: {}".format(
             traceback.format_exc())
         self.logger1.error(err_msg)
    def parse_list(self, response):
        """
        parse one page to yield each detail url
        :param response:
        :return:
        """
        try:
            which = response.meta['which']
            try:
                page = response.meta['page']
            except:
                page = "1"
            trs = response.xpath('//div[@class="xia3"]//tr')
            hrefs = [
                "".join(td.xpath('.//a/@href').extract())
                for td in trs.xpath('.//td[position()=1]')
            ]
            titles = [
                "".join(td.xpath('.//a/@title').extract())
                for td in trs.xpath('.//td[position()=1]')
            ]
            pubdates = [
                clean_all_space(td.xpath('string(.)').extract())
                for td in trs.xpath('.//td[last()]')
            ]
            hrefs_pubs_titles = zip(hrefs[0:-1], pubdates[0:-1],
                                    titles[0:-1])  #
            for href, pubdate, title in hrefs_pubs_titles:
                url = response.urljoin(href)
                if "报告" in title or "公告" in title or "目录" in title:
                    self.logger.warning(
                        "{} page {} url {} is unrelated, wont crawl".format(
                            which, page, url))
                    continue
                yield Request(url=url,
                              callback=self.parse_detail,
                              errback=self.err_list,
                              meta={
                                  'which': which,
                                  'page': page,
                                  'public_date': pubdate,
                                  'title': title
                              })
                self.logger.info("yield {} page {} detail url {}".format(
                    which, page, url))

            # yield Request(  # bugfix test
            #     url="http://www.cbrc.gov.cn/chinese/home/docView/CFEDE60A320448CAA3FD920B1F7A1357.html",
            #     callback=self.parse_detail,
            #     errback=self.err_list,
            #     meta={'which': "银监局", 'page': 0, 'public_date': "2016-03-24", 'title': "宁波银监局行政处罚信息公开表"}
            # )
        except Exception as err:
            self.logger.error(
                msg=
                "Exception occurred on parsing for first page[{url}]:{error} trace{tb}"
                .format(url=response.url, error=err,
                        tb=traceback.format_exc()), )
Exemplo n.º 10
0
    def parse_detail(self, response):
        """
        parse the detail page.
        :param response:
        :return:
        """
        try:
            self.logger1.info("start to parse detail page: {}".format(
                response.url))
            request_count = response.meta.get('request_count', 1)
            all_trs = response.xpath('//div[@class="gsxq"]//tr')
            assert all_trs, 'this page maybe changed! please check {}'.format(
                response.url)
            result_dict = {}
            for tr in all_trs:
                if not xpath_extract_text_no_spaces(tr):
                    continue
                key = re.sub(r':|:|\s', r'',
                             ''.join(tr.xpath('./td/b/text()').extract()))
                value = ''.join(tr.xpath('./td/text()').extract()).strip()
                if key.endswith(('期', '时间')):
                    # 许可决定日期、许可截止期、更新时间 等等
                    _, value = convert_formal_date(value, need_time=True)
                    if len(value) == 10:
                        value += ' 00:00:00'
                result_dict[key] = value

            # 如果没有抓到有效的数据,则重新请求该详情页,至多10次
            if not clean_all_space(''.join(result_dict.values())):
                if request_count < 10:
                    self.logger1.warning(
                        "the parsed data was empty, so request again!")
                    yield Request(response.url,
                                  callback=self.parse_detail,
                                  errback=self.error_parse,
                                  dont_filter=True,
                                  meta={'request_count': request_count + 1})
                else:
                    self.logger1.warning(
                        "this page has requested more than 10 times, ignore it!"
                    )
            else:
                yield self.handle_result(response, result_dict)
                self.logger1.info("store data to database successfully!")
        except:
            err_msg = traceback.format_exc()
            self.logger.warning(
                "failed to parse detail page, url {url} error:{err_msg}".
                format(url=response.url, err_msg=err_msg))
Exemplo n.º 11
0
 def parse_detail(self, response):
     """
     parse detail page
     :param response:
     :return:
     """
     try:
         self.logger1.info('start to parse {}'.format(response.url))
         tr_list = response.xpath(
             "//div[@class='fl ml20 mb10 mt10 f_yh']//table//tr")
         data_dict = {}
         for tr in tr_list:
             title = clean_all_space(''.join(
                 tr.xpath(".//th").xpath("string(.)").extract()).replace(
                     ":", "").replace(":", ""))
             value = clean_all_space(''.join(
                 tr.xpath(".//td").xpath("string(.)").extract()))
             data_dict.update({title: value})
         item = ParsedItem()
         self.common_item_assembler(response, item)
         item["_id"] = "{}_{}".format(response.url, uuid.uuid4())
         item['bbd_html'] = ''
         item['bbd_type'] = "credit_jx"
         item['rowkey'] = gen_rowkey(item, keys=('do_time', 'bbd_type'))
         res_dict = self.convert_time(map_field(data_dict))
         if "xzcf" in self.name:
             if "license_status" in res_dict.keys():
                 res_dict["punish_status"] = res_dict.pop(
                     "license_status", "")
         item['_parsed_data'] = res_dict
         yield item
         self.logger1.info('{} save successfully'.format(response.url))
     except Exception as e:
         self.logger1.warning(
             "Exception on save detail page {} {} {}".format(
                 response.url, traceback.format_exc(), e))
Exemplo n.º 12
0
    def parse_detail(self, response):
        """
        parse the detail page.
        :param response:
        :return:
        """
        try:
            self.logger1.info("start to parse detail page: {}".format(
                response.url))
            request_count = response.meta.get('request_count', 1)
            keys = response.xpath('//p[@class="tab1-p-left"]/text()').extract()
            assert keys, "this page has changed, please check xpath: {]".format(
                response.url)
            keys = [re.sub(r':|:', '', key) for key in keys]
            values = response.xpath('//p[@class="tab1-p-right"]').xpath(
                'string()').extract()
            result_dict = dict(zip(keys, values))
            for key, value in result_dict.items():
                if key.endswith(('期', '戳')):
                    # 许可生效期、许可截止期、公示日期 等等
                    _, value = convert_formal_date(value, need_time=True)
                    if len(value) == 10:
                        value += ' 00:00:00'
                    result_dict[key] = value

            # 如果没有抓到有效的数据,则重新请求该详情页,至多10次
            if not clean_all_space(''.join(result_dict.values())):
                if request_count < 10:
                    self.logger1.warning(
                        "the parsed data was empty, so request again!")
                    yield Request(response.url,
                                  callback=self.parse_detail,
                                  errback=self.err_parse_detail,
                                  dont_filter=True,
                                  meta={'request_count': request_count + 1})
                else:
                    self.logger1.warning(
                        "this page has requested more than 10 times, ignore it!"
                    )
            else:
                yield self.handle_result(response, result_dict)
                self.logger1.info("store data to database successfully!")
        except:
            err_msg = traceback.format_exc()
            self.logger.warning(
                "failed to parse detail page, url {url} error:{err_msg}".
                format(url=response.url, err_msg=err_msg))
Exemplo n.º 13
0
    def parse_detail(self, response):
        """
        parse the detail page.
        :param response:
        :return:
        """
        try:
            self.logger1.info("start to parse detail page: {}".format(
                response.url))
            request_count = response.meta.get('request_count', 1)

            # 详情页有两种格式
            if response.xpath('//td[@class="xzcf_jds"]'):
                result_dict = self.parse_data(response, 'td[@class="xzcf_mc"]')
                result_dict['正文'] = xpath_extract_text_strip(
                    response, '//td[@class="xzcf_jds"]')
            else:
                result_dict = self.parse_data(response, 'td[@class="xzcf_tb"]')
                result_dict['正文'] = result_dict.pop('行政处罚决定书(全文或摘要)',
                                                    '') or result_dict.pop(
                                                        '行政许可决定书(全文或摘要)', '')
            if not clean_all_space(''.join(result_dict.values())):
                if request_count < 10:
                    self.logger1.warning(
                        "the parsed data was empty, so request again!")
                    yield Request(response.url,
                                  callback=self.parse_detail,
                                  errback=self.err_parse_detail,
                                  dont_filter=True,
                                  meta={'request_count': request_count + 1})
                else:
                    self.logger1.warning(
                        "this page has requested more than 10 times, ignore it!"
                    )
            else:
                yield self.handle_result(response, result_dict)
                self.logger1.info("store data to database successfully!")
        except:
            err_msg = traceback.format_exc()
            self.logger.warning(
                "failed to parse detail page, url {url} error:{err_msg}".
                format(url=response.url, err_msg=err_msg))
Exemplo n.º 14
0
 def parse_detail(self, response):
     """
     解析详情页
     :param response:
     :return:
     """
     try:
         key = response.meta["key"]
         titles_tds = response.xpath("//table//tr//td[1]")
         values_tds = response.xpath("//table//tr//td[last()]")
         titles = [
             clean_all_space(td.xpath("string(.)").extract())
             for td in titles_tds
         ]
         values = [
             "".join(td.xpath("string(.)").extract()).strip()
             for td in values_tds
         ]
         if len(titles) != len(values):
             raise Exception(
                 "the length of titles and values are not equal, url {}".
                 format(response.url))
         tmp_dict = dict(zip(titles, values))
         res_dict = map_field(tmp_dict)
         if "xzcf" in self.name:
             if "license_status" in res_dict.keys():
                 res_dict["punish_status"] = res_dict.pop(
                     "license_status", "")
         item = ParsedItem()
         self.common_item_assembler(response, item)
         item["_id"] = "{}_{}".format(key, uuid.uuid4())
         item["bbd_html"] = ""
         item["_parsed_data"] = res_dict
         item["rowkey"] = gen_rowkey(item, keys=('do_time', 'bbd_type'))
         yield item
         self.logger1.info("one data {} save to mongodb".format(key))
     except:
         err_msg = traceback.format_exc()
         self.logger1.error(
             "Exception on detail {url}, error:{err_msg}".format(
                 url=response.url, err_msg=err_msg))
Exemplo n.º 15
0
 def parse(self, source, *args, **kwargs):
     """
     parse logic
     :Keyword Arguments:
      self     --
      source   --
      *args    --
      **kwargs --
     :return: parsed dict
     """
     try:
         detail_html = clean_html(source.pop('bbd_html', ''))
         detail_url = source.get('bbd_url', '')
         self.logger.info('开始解析:{} {}'.format(self.parser_info, detail_url))
         response = Selector(text=detail_html)
         titles = [
             clean_all_space(re.sub(r':|:', r'', til.strip()))
             for til in response.xpath('//table//tr[position()>1]//th').
             xpath('string(.)').extract()
         ]
         values = [
             val.strip()
             for val in response.xpath('//table//tr[position()>1]//td').
             xpath('string(.)').extract()
         ]
         tmp_dict = dict(zip(titles, values))
         res_dict = map_field(tmp_dict)
         res_dict["bbd_seed"] = ""
         res_dict["_id"] = "{}".format(uuid.uuid4())
         res_dict["bbd_html"] = ""
         res_dict.update(source)
         res_dict.update(self.base_dict)
         res_dict["rowkey"] = gen_rowkey(res_dict)
         return res_dict
     except Exception as err:
         msg = '{} parse error! msg:{}'.format(self.parser_info,
                                               traceback.format_exc())
         self.logger.warning(msg)
Exemplo n.º 16
0
 def parse(self, response):
     try:
         page_str = clean_all_space("".join(
             response.xpath("//*[@class='pageinfo']").xpath(
                 "string(.)").extract()).strip())
         page_count = int(re.findall(r'共(\d+)页', page_str)[0])
         yield Request(response.url,
                       callback=self.parse_link,
                       errback=self.error_parse,
                       dont_filter=True)
         for page in range(2, page_count + 1):
             form_data = copy.deepcopy(self.form_data)
             form_data.update({"pageNo": "{}".format(page)})
             yield FormRequest(self.post_url,
                               formdata=form_data,
                               callback=self.parse_link,
                               errback=self.error_parse,
                               dont_filter=True)
     except:
         err_msg = traceback.format_exc()
         self.logger1.warning(
             "Exception occurred on get the page counts[{url}], error:{err_msg}"
             .format(url=response.url, err_msg=err_msg))
Exemplo n.º 17
0
    def deal_tables(self, tables, result, response):
        """
        parse table
        :param tables:
        :param result:
        :param response:
        :return:
        """
        try:
            item = self.result_item_assembler(response)
            item['bbd_html'] = ''
            punish_code_num = len(
                re.findall(r'(行政处罚决定书文号|处罚决定书文号)', response.text))
            if response.meta[
                    'which'] != "银监会机关":  # dont parse province for 银监会机关
                province_text = "".join(
                    response.xpath(
                        '//div[@id="docTitle"]/div[contains(., "文章来源")]/text()'
                    ).extract())
                province = re.search(r'文章来源.*?([\u4e00-\u9fa5]{2})',
                                     province_text).group(1)
            else:
                province = ""
            result.update({'province': province})
            if punish_code_num == 1 and len(tables) == 3:
                # 处理 "银监分局" 里的特殊情况
                trs = tables.xpath('.//tr')
                titles, values = [], []
                for tr in trs:
                    tds = tr.xpath('.//td')
                    if len(tds) == 1:
                        continue
                    # http://www.cbrc.gov.cn/chinese/home/docView/7E4321407D13435D99064AA007A538B7.html
                    titles.append(
                        clean_all_space(tds[len(tds) -
                                            2].xpath('string(.)').extract()))
                    values.append("".join(
                        tds[-1].xpath('string(.)').extract()).strip())
                tmp_dict = dict(set(zip(titles, values)))
                tmp_dict.pop("", "")
                maped = map_field(tmp_dict)
                result.update(maped)
                item['_parsed_data'] = result
                yield item
                self.logger.info(
                    "parse special table successed, {} page {} url {} data {}".
                    format(response.meta['which'], response.meta['page'],
                           response.url, json.dumps(result)))
            else:
                for table in tables:
                    trs = table.xpath('.//tr')
                    td_firsts = [
                        clean_all_space(td.xpath('string(.)').extract())
                        for td in trs.xpath('.//td[position()=1]')
                        if len(trs.xpath('.//td')) > 1
                    ]
                    if "序号" in td_firsts:  #  one tr tag, one data
                        #  http://www.cbrc.gov.cn/chinese/home/docView/0C0C87BDA4C3431B925FF7BC2461FA55.html
                        titles = [clean_all_space(td.xpath('string(.)').extract()) \
                                  for td in trs[td_firsts.index("序号")].xpath('.//td')]
                        for tr in trs[td_firsts.index("序号") + 1:]:
                            values = [
                                "".join(
                                    td.xpath('string(.)').extract()).strip()
                                for td in tr.xpath('.//td')
                            ]
                            maped = map_field(dict(zip(titles, values)))
                            maped.pop("序号", "")
                            maped.pop("", "")
                            if re.search(r'[0-9]', maped['punish_date']):
                                punish_date = list(maped.pop('punish_date'))
                                new_punish_date = "{}-{}-{}".format(
                                    "".join(punish_date[:4]),
                                    "".join(punish_date[4:6]),
                                    "".join(punish_date[6:]))
                                maped['punish_date'] = new_punish_date
                            res_dict = copy.deepcopy(result)
                            res_dict.update(maped)
                            new_item = copy.deepcopy(item)
                            new_item['_id'] = "{}".format(uuid.uuid4())
                            new_item['_parsed_data'] = res_dict
                            yield new_item
                            self.logger.info(
                                "parse one line one data table successed, {} page {} url {} data {}"
                                .format(response.meta['which'],
                                        response.meta['page'], response.url,
                                        json.dumps(res_dict)))
                    else:  # title : value in each tr tag
                        titles, values = [], []
                        for tr in trs:
                            tds = tr.xpath('.//td')
                            if len(tds) == 1:
                                continue
                            # http://www.cbrc.gov.cn/chinese/home/docView/D0952CF472AB443486104FB03E6FE862.html
                            titles.append(
                                clean_all_space(
                                    tds[len(tds) -
                                        2].xpath('string(.)').extract()))
                            values.append("".join(
                                tds[-1].xpath('string(.)').extract()).strip())

                        if len(titles) == len(values):
                            tmp_dict = dict(zip(titles, values))
                        else:
                            raise Exception('titles ,values get error')
                        tmp_dict.pop("", "")
                        maped = map_field(tmp_dict)
                        if re.search(r'[0-9]',
                                     maped['punish_date']) and re.search(
                                         r'[年|月|日]', maped['punish_date']):
                            # new_punish_date = maped['punish_date'].replace('年', '-').replace('月', '-').replace('日', '')
                            new_punish_date = "{0}-{1:0>2}-{2:0>2}".format(
                                re.search(r'(\d{4}).*?年',
                                          maped['punish_date']).group(1),
                                re.search(r'年.*?(\d{1,2}).*?月',
                                          maped['punish_date']).group(1),
                                re.search(r'月.*?(\d{1,2}).*?日',
                                          maped['punish_date']).group(1),
                            )
                            maped['punish_date'] = new_punish_date
                        res_dict = copy.deepcopy(result)
                        res_dict.update(maped)
                        new_item = copy.deepcopy(item)
                        new_item['_id'] = "{}".format(uuid.uuid4())
                        new_item['_parsed_data'] = res_dict
                        yield new_item
                        self.logger.info(
                            "parse table successed, {} page {} url {} data {}".
                            format(response.meta['which'],
                                   response.meta['page'], response.url,
                                   json.dumps(res_dict)))
        except:
            self.logger.error("error on deal_tables {} trace {}".format(
                response.url,
                traceback.format_exc(),
            ))
Exemplo n.º 18
0
 def parse_detail(self, response):
     try:
         page = response.meta['page']
         title_tds = response.xpath(
             '//div[contains(@class, "content")]/table//tr//td[@class="label"]'
         )
         sec_title_tds = response.xpath(
             '//div[contains(@class, "content")]/table//tr[@class="label"]//td'
         )
         value_tds = response.xpath(
             '//div[contains(@class, "content")]/table//tr//td[@class="value"]'
         )
         sec_value_tds = response.xpath(
             '//div[contains(@class, "content")]/table//tr[@class="value"]//td'
         )
         titles = [
             clean_all_space(td.xpath('string(.)').extract()).replace(
                 ":", "").replace(":", "") for td in title_tds
             if "相对人代码" not in ''.join(td.xpath('string(.)').extract())
         ]
         sec_titles = [
             clean_all_space(td.xpath('string(.)').extract())
             for td in sec_title_tds
         ]
         values = [
             clean_all_space(td.xpath('string(.)').extract())
             for td in value_tds
         ]
         sec_values = [
             clean_all_space(td.xpath('string(.)').extract())
             for td in sec_value_tds
         ]
         if len(titles) != len(values):
             raise Exception(
                 "the length of titles and values are not equal")
         if len(sec_titles) != len(sec_values):
             raise Exception(
                 "the length of sec_titles and sec_values are not equal")
         tmp_dict = dict(zip(titles, values))
         if not tmp_dict:
             self.logger1.error(
                 "url {} err html {} body {} status_code {}".format(
                     response.url, response.text, response.body,
                     response.status))
             return
         tmp_dict.update(dict(zip(sec_titles, sec_values)))
         res_dict = self.convert_time(map_field(tmp_dict))
         if "xzcf" in self.name:
             if "license_status" in res_dict.keys():
                 res_dict["punish_status"] = res_dict.pop(
                     "license_status", "")
         item = self.result_item_assembler(response)
         item["bbd_html"] = ""
         item["_id"] = "{}_{}".format(res_dict.get("company_name", ""),
                                      uuid.uuid4())
         item["_parsed_data"] = res_dict
         yield item
         self.logger1.info("{} page {} save data {} to mongodb".format(
             self.crawl_type, page, res_dict.get("company_name", "")))
     except:
         err_msg = traceback.format_exc()
         self.logger1.error(
             "Exception on detail page {url}, error:{err_msg}".format(
                 url=response.url, err_msg=err_msg))