Exemplo n.º 1
0
    def parse(self, response):
       node_list = response.xpath("//tr[@class='ZjYhN018']")
       self.totlepage = int(response.xpath("//strong[3]/text()").extract()[0].encode(self.newEndcode))
       newbase_url=response.url[:response.url.rfind("/")]+'/'
       nowItem = 0
       for node in node_list:
           item = czScrapyItem()
           href = str(node.xpath("./td/a[2]/@href").extract()[0].encode("utf-8")).replace("'", "")[1:]
           item["id"] = href.split('_')[1].split('.')[0]
           item["districtName"] = "余杭区"
           #print(href)

           url = newbase_url+href
           yield scrapy.Request(url, meta={'item': item}, callback=self.newparse)
           item["noticePubDate"] = str(node.xpath("./td[3]/text()").extract()[0].encode(self.newEndcode), 'utf-8')
           #item["noticeTitle"] = self.new_item["noticeTitle"]
           self.newday = item["noticePubDate"]
           item["source"] = "杭州余杭政府门户网站"
           item["title"] = str(node.xpath("./td[2]/a[2]/@title").extract()[0].encode(self.newEndcode), 'utf-8')
           #print(node.xpath("./td[2]/a[2]/text()").extract()[0].encode(self.newEndcode).decode('utf-8'))
           item["typeName"] = "招标公告"
           item["url"] = url
           if self.nowpage == 0 and nowItem == 0:
               logging.info("发送email-------")
               send_email(receiver=[ '*****@*****.**', '*****@*****.**'],
               #send_email(receiver=['*****@*****.**'],
                          title=self.curr_time+'杭州余杭招标网站', cont='<h1>今日爬取地址{}\r\n<br>杭州余杭招标网站最新更新日期是{}</h1>'.format(response.url+"\r\n", self.newday))
           nowItem+=1
           yield item

       if  self.nowpage < self.totlepage:
           logging.info("现在爬取第{}页内容".format(self.nowpage+1))
           self.nowpage += 1
           newurl = newbase_url+'index_'+ str(self.nowpage)+'.html'
           yield scrapy.Request(newurl, callback=self.parse)
Exemplo n.º 2
0
    def parse(self, response):
        node_list = response.xpath("//div[@class='ewb-con-bd']/ul/li")
        # newbase_url = response.url[:response.url.rfind("/")] + '/'
        nowItem = 0
        # print(response.url[response.url.rfind('&Paging=')+1:] )
        page_now = int(response.url[response.url.rfind('/') +
                                    1:].split(".")[0])
        typename = ''
        for node in node_list:
            item = czScrapyItem()
            href = str(
                node.xpath("./div/a/@href").extract()[0].encode("utf-8"),
                'utf-8').replace("'", "")
            item["id"] = href[href.rfind('/') + 1:].split(".")[0]
            item["districtName"] = "平湖市"
            # print(href)

            url = self.base_url + href

            # print(url)
            item["noticePubDate"] = str(
                node.xpath("./span/text()").extract()[0].encode(
                    self.newEndcode), 'utf-8').replace('[',
                                                       '').replace(']', '')
            # item["noticeTitle"] = self.new_item["noticeTitle"]
            self.newday = item["noticePubDate"]

            item["source"] = "嘉兴平湖市"
            item["title"] = str(
                node.xpath("./div/a/@title").extract()[0].encode(
                    self.newEndcode), 'utf-8').strip()
            # print(node.xpath("./td[2]/a[2]/text()").extract()[0].encode(self.newEndcode).decode('utf-8'))

            item["typeName"] = "交易公告"

            self.typename = item["typeName"]
            item["url"] = url
            if page_now == 1 and nowItem == 0:
                logging.info(self.typename + "发送email-------")
                send_email(
                    receiver=['*****@*****.**', '*****@*****.**'],
                    # send_email(receiver=['*****@*****.**'],
                    title=self.curr_time + '嘉兴平湖市招标网站',
                    cont='<h1>今日爬取地址{}\r\n<br>嘉兴平湖市招标网站最新更新日期是{}</h1>'.format(
                        response.url + "\r\n", self.newday))
            nowItem += 1
            yield scrapy.Request(url,
                                 meta={'item': item},
                                 callback=self.newparse)

        if (response.xpath(
                "//li[@class='ewb-page-li ewb-page-hover'][2]/a/text()")):
            # page_now = 2
            page_now += 1
            logging.info(self.typename + "现在爬取第{}页内容".format(page_now))
            # self.nowpage += 1
            newurl = response.url[:response.url.rfind('/') +
                                  1] + str(page_now) + ".html"
            print(newurl)
            yield scrapy.Request(newurl, callback=self.parse)
Exemplo n.º 3
0
    def parse(self, response):
        #print(response.text)
        node_list = response.xpath(
            "//div[@id='4771635']/div[@class='default_pgContainer']/ul/li[position()<last()]"
        )

        newbase_url = response.url
        nowItem = 0
        typename = ''
        print(node_list)
        for node in node_list:
            item = czScrapyItem()
            href = str(
                node.xpath("./h1/a/@href").extract()[0].encode(
                    self.newEndcode), self.newEndcode)
            print(href)
            item["id"] = href.split('_')[2].split('.')[0]
            item["districtName"] = "舟山市"

            url = self.base_url + href.replace("'", "")
            #print(url)

            item["noticePubDate"] = str(
                node.xpath("./h3/text()").extract()[0].encode(self.newEndcode),
                'utf-8').strip().replace("(", "").replace(")", "")
            # item["noticeTitle"] = self.new_item["noticeTitle"]
            self.newday = item["noticePubDate"]
            item["source"] = "舟山市"
            #item["title"] = str(node.xpath("./a/text()").extract()[0].encode(self.newEndcode), 'utf-8')
            # print(node.xpath("./td[2]/a[2]/text()").extract()[0].encode(self.newEndcode).decode('utf-8'))
            item["typeName"] = "公告通知"
            typename = item["typeName"]
            item["url"] = url
            page_now = int(response.url.split('&')[1].split('=')[1])
            if (page_now == 1) and nowItem == 0:
                logging.info("发送email-------")
                send_email(
                    receiver=['*****@*****.**', '*****@*****.**'],
                    # send_email(receiver=['*****@*****.**'],
                    title=self.curr_time + '舟山市',
                    cont='<h1>今日爬取地址{}\r\n<br>舟山市最新更新日期是{}</h1>'.format(
                        response.url + "\r\n", self.newday))
            nowItem += 1

            yield scrapy.Request(url,
                                 meta={'item': item},
                                 callback=self.newparse)

        if response.xpath("//a[@class='default_pgBtn default_pgNext']/@href"):
            page = int(response.url.split('&')[1].split('=')[1])
            logging.info(typename + "现在爬取第{}页内容".format(page + 1))
            #print(str(self.nowpage)+'-----'+response.url)
            page += 1
            newurl = newbase_url[:newbase_url.index('&') +
                                 1] + 'pageNum=' + str(page)
            # print(newurl)
            yield scrapy.Request(newurl, callback=self.parse)
Exemplo n.º 4
0
    def parse(self, response):
        #print(response.text)
        node_list = response.xpath("//div[@id='4793621']/div/li")
        if (self.nowpage_1532234 == 1) & ('1532234' in response.url):
            self.totlepage_1532234 = int(
                response.xpath("//span[@class='default_pgTotalPage']/text()").extract()[0].encode(self.newEndcode))
        if (self.nowpage_1532235 == 1) & ('1532235' in response.url):
            self.totlepage_1532235 = int(
                response.xpath("//span[@class='default_pgTotalPage']/text()").extract()[0].encode(self.newEndcode))
        #self.totlepage = int(response.xpath("//span[@class='default_pgTotalPage']/text()").extract()[0].encode(self.newEndcode))
        newbase_url = response.url
        nowItem = 0
        for node in node_list:
            item = czScrapyItem()
            href = str(node.xpath("./a/@href").extract()[0].encode(self.newEndcode),self.newEndcode)
            item["id"] = href.split('_')[2].split('.')[0]
            item["districtName"] = "余杭区"
            #print(href)

            url = self.base_url + href.replace("'" , "")
            #print(url)
            yield scrapy.Request(url, meta={'item': item}, callback=self.newparse)
            item["noticePubDate"] = str(node.xpath("./a/i/text()").extract()[0].encode(self.newEndcode), 'utf-8').replace('[', '').replace(']', '')
            # item["noticeTitle"] = self.new_item["noticeTitle"]
            self.newday = item["noticePubDate"]
            item["source"] = "杭州市余杭区人民政府"
            item["title"] = str(node.xpath("./a/span/text()").extract()[0].encode(self.newEndcode), 'utf-8')
            # print(node.xpath("./td[2]/a[2]/text()").extract()[0].encode(self.newEndcode).decode('utf-8'))
            if '1532234' in newbase_url:
                item["typeName"] = "招标公告"
            else :
                item["typeName"] = "中标公示"
            item["url"] = url
            if (self.nowpage_1532234 == 1 | self.nowpage_1532235 == 1) and nowItem == 0:
                logging.info("发送email-------")
                send_email(receiver=[ '*****@*****.**', '*****@*****.**'],
                           # send_email(receiver=['*****@*****.**'],
                           title=self.curr_time + '杭州余杭招标网站',
                           cont='<h1>今日爬取地址{}\r\n<br>杭州余杭招标网站最新更新日期是{}</h1>'.format(response.url + "\r\n", self.newday))
            nowItem += 1
            yield item

        if (self.nowpage_1532234 < self.totlepage_1532234) & ('1532234' in newbase_url):
            logging.info("招标公示现在爬取第{}页内容".format(self.nowpage_1532234 +1))
            self.nowpage_1532234 += 1
            newurl = newbase_url[:newbase_url.index('&')+1] + 'pageNum=' + str(self.nowpage_1532234)
            #print(newurl)
            yield scrapy.Request(newurl, callback=self.parse)
        if (self.nowpage_1532235 < self.totlepage_1532235) & ('1532235' in newbase_url):
            logging.info("中标公示现在爬取第{}页内容".format(self.nowpage_1532235 +1))
            self.nowpage_1532235 += 1
            newurl = newbase_url[:newbase_url.index('&')+1] + 'pageNum=' + str(self.nowpage_1532235)
            #print(newurl)
            yield scrapy.Request(newurl, callback=self.parse)
Exemplo n.º 5
0
    def parse(self, response):
        print(response.text)
        text = response.text.replace('<![CDATA[', '').replace(']]>', '')
        sel = scrapy.Selector(text=text)
        node_list = sel.xpath("//record/li")
        #node_list = response.xpath("//record/tr")

        newbase_url = response.url
        nowItem = 0
        typename = ''
        page_now = 1

        for node in node_list:
            item = czScrapyItem()
            href = str(
                node.xpath("./a/@href").extract()[0].encode(self.newEndcode),
                self.newEndcode)
            #print(href)
            item["id"] = href.split('_')[2].split('.')[0]
            item["districtName"] = "港区"

            url = self.new_url + href.replace("'", "")
            #print(url)

            item["noticePubDate"] = str(
                node.xpath("./span/text()").extract()[0].encode(
                    self.newEndcode), 'utf-8').strip()
            # item["noticeTitle"] = self.new_item["noticeTitle"]

            self.newday = item["noticePubDate"]
            item["source"] = "嘉兴港区"
            item["title"] = str(
                node.xpath("./a/@title").extract()[0].encode(self.newEndcode),
                'utf-8')
            # print(node.xpath("./td[2]/a[2]/text()").extract()[0].encode(self.newEndcode).decode('utf-8'))

            item["typeName"] = "招标-中标公告"
            typename = item["typeName"]
            item["url"] = url
            #page_now = int(response.url.split('&')[1].split('=')[1])
            if (page_now == 1) and nowItem == 0:
                logging.info("发送email-------")
                send_email(
                    receiver=['*****@*****.**', '*****@*****.**'],
                    # send_email(receiver=['*****@*****.**'],
                    title=self.curr_time + '嘉兴港区',
                    cont='<h1>今日爬取地址{}\r\n<br>嘉兴港区最新更新日期是{}</h1>'.format(
                        response.url + "\r\n", self.newday))
            nowItem += 1
            page_now += 1

            yield scrapy.Request(url,
                                 meta={'item': item},
                                 callback=self.newparse)
Exemplo n.º 6
0
    def parse(self, response):
        node_list = response.xpath("//div[@class='ListNews FloatL hidden']/ul/li")
        newbase_url = response.url[:response.url.rfind("/")] + '/'
        nowItem = 0
        page_len = len(response.url.split("_"))
        typename = ''
        for node in node_list:
            item = czScrapyItem()
            href = str(node.xpath("./a/@href").extract()[0].encode("utf-8"), 'utf-8').replace("'", "")
            item["id"] = href[href.rfind("/")+1:].split(".")[0]
            item["districtName"] = "德清县"
            #print(href)

            url = self.base_url + href

            #print(url)
            item["noticePubDate"] = str(node.xpath("./span/text()").extract()[0].encode(self.newEndcode), 'utf-8')
            # item["noticeTitle"] = self.new_item["noticeTitle"]
            self.newday = item["noticePubDate"]
            item["source"] = "湖州德清县"
            item["title"] = str(node.xpath("./a/text()").extract()[0].encode(self.newEndcode), 'utf-8').strip()
            # print(node.xpath("./td[2]/a[2]/text()").extract()[0].encode(self.newEndcode).decode('utf-8'))
            #if 'zbgg' in response.url :
               # item["typeName"] = "招标公告"
            #elif 'zbgs' in response.url :
            #    item["typeName"] = "中标公示"
            #else:
              #  item["typeName"] = "镇街道信息"
            item["typeName"] = "集体产权"
            self.typename= item["typeName"]
            item["url"] = url
            if page_len == 1 and nowItem == 0:
                logging.info("发送email-------")
                send_email(receiver=[ '*****@*****.**', '*****@*****.**'],
                           # send_email(receiver=['*****@*****.**'],
                           title=self.curr_time + '湖州德清县招标网站',
                           cont='<h1>今日爬取地址{}\r\n<br>湖州德清县招标网站最新更新日期是{}</h1>'.format(response.url + "\r\n", self.newday))
            nowItem += 1
            yield scrapy.Request(url, meta={'item': item}, callback=self.newparse)

        if not (response.xpath("//div[@class='pgPanel clearfix']/div/a[3]/@disabled") ):
            page_now = 2
            if page_len > 1:
                page_now = int(response.url.split("_")[1].split(".")[0])+1
            logging.info(self.typename+"现在爬取第{}页内容".format(page_now))
            #self.nowpage += 1
            newurl = newbase_url + 'index_' + str(page_now) + '.htm'
            print(newurl)
            yield scrapy.Request(newurl, callback=self.parse)
Exemplo n.º 7
0
    def parse(self, response):
        node_list = response.xpath("//div[@class='ListItem']")
        # newbase_url = response.url[:response.url.rfind("/")] + '/'
        nowItem = 0
        # print(response.url[response.url.rfind('&Paging=')+1:] )
        page_now = int(response.url.split("&")[1].split("=")[1])
        typename = ''
        #print(response.url)
        for node in node_list:
            item = czScrapyItem()
            href = str(
                node.xpath("./div[@class='NoWrapHidden ListItemTitle']/a/@href"
                           ).extract()[0].encode("utf-8"),
                'utf-8').replace("'", "")
            #print(href)
            if '=' not in href:
                continue
            item["id"] = href.split("=")[1]
            item["districtName"] = "路桥区"
            #print(href)

            url = href

            #print(url)
            item["noticePubDate"] = str(
                node.xpath("./div[@class='ListItemDate']/text()").extract()
                [0].encode(self.newEndcode),
                'utf-8').strip().replace('[', '').replace(']', '')
            # item["noticeTitle"] = self.new_item["noticeTitle"]
            self.newday = item["noticePubDate"]

            item["source"] = "台州路桥区"
            item["title"] = str(
                node.xpath(
                    "./div[@class='NoWrapHidden ListItemTitle']/a/@title").
                extract()[0].encode(self.newEndcode), 'utf-8').strip()
            # print(node.xpath("./td[2]/a[2]/text()").extract()[0].encode(self.newEndcode).decode('utf-8'))

            item["typeName"] = "投标公告"

            self.typename = item["typeName"]
            item["url"] = url
            if page_now == 1 and nowItem == 0:
                logging.info(self.typename + "发送email-------")
                send_email(
                    receiver=['*****@*****.**', '*****@*****.**'],
                    # send_email(receiver=['*****@*****.**'],
                    title=self.curr_time + '台州路桥区招标网站',
                    cont='<h1>今日爬取地址{}\r\n<br>台州路桥区招标网站最新更新日期是{}</h1>'.format(
                        response.url + "\r\n", self.newday))
            nowItem += 1
            # yield scrapy.Request(url, meta={'item': item}, callback=self.newparse)
            if 'InfoPub' in href:
                item["url"] = self.base_url + url
                yield scrapy.Request(self.base_url + url,
                                     meta={'item': item},
                                     callback=self.newparse)
            elif 'zjzfcg' in href:
                #item["id"] = href[href.index('=') + 1:]
                para = {
                    'noticeId': item["id"],
                    # 'url': 'http://notice.zcygov.cn/new/noticeDetail'
                    'url': 'noticeDetail'
                }
                url = 'http://manager.zjzfcg.gov.cn/cms/api/cors/remote/results?' + urlencode(
                    para)
                yield scrapy.Request(url,
                                     meta={'item': item},
                                     callback=self.newparse_zf)
            else:
                continue

        if not response.xpath(
                "//*[@id='AspNetPager1']/div[2]/a[11]/@disabled"):
            # page_now = 2
            page_now += 1
            logging.info(self.typename + "现在爬取第{}页内容".format(page_now))
            # self.nowpage += 1
            newurl = response.url[:response.url.rfind('&') +
                                  1] + "CurrentPageIndex=" + str(page_now)
            print(newurl)
            yield scrapy.Request(newurl, callback=self.parse)
Exemplo n.º 8
0
    def parse(self, response):
        #print(response.text)
        node_list = response.xpath(
            "//ul[@class='content_right_ul']/li[@class='tab_box']")
        #print(response.xpath("//table[@class='tb_title']/*/tr/td[2]/a[last()]/text()"))
        self.totlepage = int(
            response.xpath(
                "//table[@class='tb_title']/*/tr/td[2]/a[last()]/text()").
            extract()[0].encode(self.newEndcode))
        nowitem = 0
        for node in node_list:
            item = czScrapyItem()
            href = str(
                node.xpath("./a/@href").extract()[0].encode("utf-8"), 'utf-8')
            #print(href)
            #print(href.split('_'))
            if 'fuyang' in href:
                item["id"] = href.split('_')[2].split('.')[0]
                yield scrapy.Request(href,
                                     meta={'item': item},
                                     callback=self.newparse)
                #continue
            elif 'zjzfcg' in href:
                item["id"] = href[href.index('=') + 1:]
                para = {
                    'noticeId': item["id"],
                    # 'url': 'http://notice.zcygov.cn/new/noticeDetail'
                    'url': 'noticeDetail'
                }
                url = 'http://manager.zjzfcg.gov.cn/cms/api/cors/remote/results?' + urlencode(
                    para)
                yield scrapy.Request(url,
                                     meta={'item': item},
                                     callback=self.newparse_zf)
            else:
                continue
            item["districtName"] = "富阳区"

            item["noticePubDate"] = str(
                node.xpath("./a/span[2]/text()").extract()[0].encode(
                    self.newEndcode), 'utf-8')
            item["source"] = "杭州市富阳区人民政府门户网站"
            item["title"] = str(
                node.xpath("./a/span/span/@mc").extract()[0].encode(
                    self.newEndcode), 'utf-8')
            item["typeName"] = "公告公示"
            item["url"] = href
            self.newday = item["noticePubDate"]

            if self.nowpage == 1 and nowitem == 0:
                logging.info("发送email-------")
                send_email(
                    receiver=['*****@*****.**', '*****@*****.**'],
                    #send_email(receiver=['*****@*****.**'],
                    title=self.curr_time + '杭州市富阳区招标网站',
                    cont='<h1>今日爬取地址{}\r\n<br>杭州富阳区招标网站最新更新日期是{}</h1>'.format(
                        response.url + "\r\n", self.newday))
            nowitem += 1
            #print(item)
            yield item
        if self.nowpage <= self.totlepage:
            self.nowpage += 1
            headers = {
                'Cookie':
                'acw_tc = 784e2c9415668935815022229e4756c7bb23a8158617ac49b5f06668370fa6;SERVERID = e146d554a29ee4143047c903abfbc3da | 1566976932 | 1566976712'
            }
            logging.info("现在爬取第{}页内容".format(self.nowpage))
            yield scrapy.FormRequest(
                url=
                'http://www.fuyang.gov.cn//module/xxgk/search.jsp?area=&infotypeId=H001&vc_title=&vc_number=&vc_filenumber=',
                headers=headers,
                formdata={
                    # 'infotypeId': '0',  # 这里不能给bool类型的True,requests模块中可以
                    'fbtime': '',  # 这里不能给int类型的1,requests模块中可以
                    'vc_all': '',
                    'vc_filenumber': '',
                    'vc_number': '',
                    'currpage': str(self.nowpage),
                    'vc_title': '',
                    'sortfield': ", compaltedate: 0",
                    'jdid': '2754',
                    'divid': 'div1416545',
                    'area': '',
                    'infotypeId': 'H001',
                    'texttype': '',
                    'sortfield': ',compaltedate:0'
                },  # 这里的formdata相当于requ模块中的data,key和value只能是键值对形式
                callback=self.parse)
Exemplo n.º 9
0
    def parse(self, response):
        print(response.text)
        typename = ''
        node_list = response.xpath(
            "//div[@class='default_pgContainer']/table/tbody/tr")
        #if (self.nowpage_146== 1) & ('1651779' in response.url) :
        #    self.totlepage_146 = int(
        #     response.xpath("//span[@class='default_pgTotalPage']/text()").extract()[0].encode(self.newEndcode))
        #if (self.nowpage_149== 1 )& ('1651780' in response.url ):
        #   self.totlepage_149 = int(
        #     response.xpath("//span[@class='default_pgTotalPage']/text()").extract()[0].encode(self.newEndcode))
        newbase_url = response.url
        nowItem = 0
        for node in node_list:
            #print(111)
            item = czScrapyItem()
            href = str(
                node.xpath("./td/div[2]/a/@href").extract()[0].encode(
                    self.newEndcode), self.newEndcode)
            item["id"] = href.split('_')[2].split('.')[0]
            item["districtName"] = "杭州市"
            print(href)

            url = self.base_url + href.replace("'", "")
            #print(url)
            yield scrapy.Request(url,
                                 meta={'item': item},
                                 callback=self.newparse)
            item["noticePubDate"] = str(
                node.xpath("./td[2]/text()").extract()[0].encode(
                    self.newEndcode), 'utf-8').replace('[',
                                                       '').replace(']', '')
            # item["noticeTitle"] = self.new_item["noticeTitle"]
            self.newday = item["noticePubDate"]
            item["source"] = "杭州财政"
            item["title"] = str(
                node.xpath("./td/div[2]/a/@title").extract()[0].encode(
                    self.newEndcode), 'utf-8')
            print(
                node.xpath("./td/div[2]/a/@title").extract()[0].encode(
                    self.newEndcode).decode('utf-8'))
            if '1651779' in newbase_url:
                item["typeName"] = "招标公告"
            else:
                item["typeName"] = "中标公告"
            typename = item["typeName"]
            item["url"] = url
            page_now = int(response.url.split('&')[1].split('=')[1])
            if (page_now == 1) and nowItem == 0:
                logging.info("发送email-------")
                send_email(
                    receiver=['*****@*****.**', '*****@*****.**'],
                    # send_email(receiver=['*****@*****.**'],
                    title=self.curr_time + '杭州财政',
                    cont='<h1>今日爬取地址{}\r\n<br>杭州财政网站最新更新日期是{}</h1>'.format(
                        response.url + "\r\n", self.newday))
            nowItem += 1
            yield item

        if response.xpath("//a[@class='default_pgBtn default_pgNext']/@href"):
            page = int(response.url.split('&')[1].split('=')[1])
            logging.info(typename + "现在爬取第{}页内容".format(page + 1))
            # print(str(self.nowpage)+'-----'+response.url)
            page += 1
            newurl = newbase_url[:newbase_url.index('&') +
                                 1] + 'pageNum=' + str(page)
            # print(newurl)
            yield scrapy.Request(newurl, callback=self.parse)
Exemplo n.º 10
0
    def parse(self, response):
        # print(response.text)
        node_list = response.xpath(
            "//table[@id='MoreInfoList1_DataGrid1']//tr")
        # print(response.xpath("//table[@class='tb_title']/*/tr/td[2]/a[last()]/text()"))
        #print(node_list)
        nowitem = 0
        for node in node_list:
            item = czScrapyItem()
            href = str(
                node.xpath("./td[2]/a/@href").extract()[0].encode("utf-8"),
                'utf-8')
            # print(href)
            # print(href.split('_'))
            item["id"] = href.split('&')[0].split("=")[1]

            # continue

            item["districtName"] = "长兴县"

            item["noticePubDate"] = str(
                node.xpath("./td[3]/text()").extract()[0].encode(
                    self.newEndcode), 'utf-8').strip()
            item["source"] = "湖州长兴县人民政府门户网站"
            item["title"] = str(
                node.xpath("./td[2]/a/@title").extract()[0].encode(
                    self.newEndcode), 'utf-8')
            if "012002007001" in response.url:
                item["typeName"] = "交易公告"
            elif "012002007002" in response.url:
                item["typeName"] = "成交公示"
            elif "012002007003" in response.url:
                item["typeName"] = "部门交易"
            elif "012002007004" in response.url:
                item["typeName"] = "乡镇交易"
            self.typename = item["typeName"]
            item["url"] = self.base_url + href
            self.newday = item["noticePubDate"]

            if self.page_now == 1 and nowitem == 0:
                logging.info("发送email-------")
                send_email(
                    receiver=['*****@*****.**', '*****@*****.**'],
                    # send_email(receiver=['*****@*****.**'],
                    title=self.curr_time + '湖州长兴县招标网站',
                    cont='<h1>今日爬取地址{}\r\n<br>湖州长兴县招标网站最新更新日期是{}</h1>'.format(
                        response.url + "\r\n", self.newday))
            nowitem += 1
            # print(item)
            yield scrapy.Request(self.base_url + href,
                                 meta={'item': item},
                                 callback=self.newparse)
        if (response.xpath(
                "//img[@src='/cxweb/images/page/nextn.gif']/../@href")):

            if "012002007001" in response.url:
                self.nowpage_7001 = self.nowpage_7001 + 1
                self.page_now = self.nowpage_7001
            elif "012002007002" in response.url:
                self.nowpage_7002 = self.nowpage_7002 + 1
                self.page_now = self.nowpage_7002
            elif "012002007003" in response.url:
                self.nowpage_7003 = self.nowpage_7003 + 1
                self.page_now = self.nowpage_7002
            elif "012002007004" in response.url:
                self.nowpage_7004 = self.nowpage_7004 + 1
                self.page_now = self.nowpage_7002
            headers = {'Cookie': 'ASP.NET_SessionId=i333enz5lzkmhe45lud2x345'}
            print(response.url)
            yield scrapy.FormRequest(
                url=response.url,
                headers=headers,
                formdata={
                    # 'infotypeId': '0',  # 这里不能给bool类型的True,requests模块中可以
                    '__VIEWSTATE':
                    'YdGW55/GkOwgkxQAutuNaGcuceaJnkvQ+Exx9jRVi8IOa5XSU+ewSj9dTEawybG23kV2IguhzzNuhiufyXImGlrKUZr0bstacHsxtgie3K4ll1Vf56qGG/gQvzV0Ulov1nuD0UyRdKfr7WPU5wv+62u85R9oU2jdxtqNx2QYGzTqkF9NdjxF5Z0xWO4UGVcU4Fpe6EyvvQ+fTAVXBO/zMCZhv/E2cagkfaa6wPjw/j/dHr09MEBoiMN3L5OF0ADogv9/33DGAVmoN6QiHZw+SWC8NQqsJt1dgd4brb5T9pNC4FPM+8u4o1dioLwVaVkhZO/CCiRzJz34L4EWsIbv4dE5C4rvFA9eHSUmZneoZCYI0NIWqQhR/V5JmA9/QVrlNqaXiHpIlyE2i4x3WSSHzynfInnWHFp/K0gafaXAZ5QXRQd+itiW8sM8ggs1JYnADkzl0O7TVf2pvfns/Cr0suj3lwrmawJd0vXYOZw9On7JT0fmcNbP5oVZHrwZxlXal0+k7TiQJSmUvKuGWTweUcJbdVNqycrUFyuZ79wtXkOdS2HdRHBJYfgymt6HaApHHvWKJ6wk03ysccSChXBIoDcdKHb//SQnn3pIq2a+OFDSbpU8Dtf1LYqIp+8zaxncXiGE+sBZXlM0h49Yr4xPcEWkzV/dpJwCaiKoiuDz0juQh0FyxJ3T03PLh0PQM4cpHJyl5tFyxesMr9K9ZicPHJe5BGrud7+pg4LLuZAdLP1Sr+Qkn/R5go7QBIQjFlb6Gjwa8ueXogS4lYhi0hbnvSagdvLUXubJzAui3PHP3fhgPMtogBfKwz17e3TORZFX61UCst5UIOyBbBbwePduXAa/PC7jq23qL4dF71NWKZTZcUZaht5psaO1XW6Jmq0hRCT6zLMyIV36hSjAdB9BtKdauleNyxhwcoQgTNu4EyZyual6DIfhv+JfmKSw3COMxLR91bbmixTJ5fZZQAi3iEan0c1Xfh3XTD6Ybybq6752aPD9Z6Exa3c9uAl7MA5ODJdzWX8tEQQgGxJENNtkbpXgcA6FkWq+HJ/8lVUTsdDiZr6C2NNLHnhw4zi6voMfYzbJGN1ZnR5h6jVMmi5yKH9beD9/htUgROn7purWT8Db63vYo72ySjVmukvw7knem3TSluGrgJxdZO+lSI89sH6iUzlRZVWaDCXKKMsJFL0nv/RraaIeZ9ug3iCP9+4weRWrPWj6QhBQDXxjjvrgU39a1vpM+qKu39gtIIDnQ8aeJFg0wWM4qxg/Q/92CMjb+6x42Ttj9wFq9arprx/UbVFhH3qOETLWPSp+MpycVwCuq/9dJNzqRIa126aBMXqtnWoKp3vbcgEMOCGP2yzBz6WkzYgf0WxdcQ0U5xteXV+s6UB9oRG5yK9NKMtZqmG0WwUbSmHgME0ctL5wd4O9bKGA24E1//LRPHEAFTdWLcTvcvqMAFJhTCNzDYoQW6OWp3kIFUeaJTOnRdY5uTsNe0WDIfhLQ1AYJ4CA36EVPX7nw+PqPTgTgoIrqKARUgLTTbssJK6Fjdi9FkcLFVgHX0xyeg+24d4gWkFwitkE/M1LRh+dolUf4g7rbNSGyPJUSonqGc29wRhDsehJ520vckgMVDU4k9uD+s5L6HSkKgdEga91Et4HDX7eTfyZhq/QYh9a12/0XVmJyav1uag7FxPm33y950ZWy0Mmwxab0eB+FfHYhNPDwweN8hzCLXrrpuTEFC/f++ODbVHt5UFoBfxmaEf0lx8rjw1W2SuOiANQiwgCeT9AU8KenzYZkIG5e9qqbJshaB7EsaWqqYpTzSTkgr36zXMpJ0yvlivtJTtzlUbqgQGI4NdqvhWzVVuLtj20iy9YZMaKcfwib8fpoNzOBj4ZdfdNodeddyI9DwqL1Gu+h4bmZiKFTLVmz3GPZMwSG9lWrSFK3HOAQpQ5g3pP25Wsxe/EXPAbbojP95ONT9JrXOKdQL8uIWtFFhty3VO47rHGZFOQCTJ4JGa6XXBJFlrD4u92+lPbwtFSuo/I4bgW+/yAt30Dl0010P7oYXtI3DZm4iDtJ+KNfjxrMpdJxEcAWoUsGHeZQoiDoQvgqx821zvolYl9p+mrLuH9NacWeSUK5p6en6OIhp28SfH/iU6M2Fmu+OP8/2TKGkmkzPHM5HNi9Kp2NgiiqLXg/9SqY7D8R42BoMm8Zm7qpPsHb6DCHYcBArDr6YBjX6F9f6pXnxQvzZXyGMlC1dBlZ9rSh0ijWs6VEJEX4MuwyY/BWbDVDenMrlthuAGhymf1vKY8h4I76XvrAm3aYRYGs7KYh9kd8G6CkwLov5nqBlkOoiKNEYhIdjvFdFtU1E4aAQviIpvwcjVq8ZsCxc20S9U4JPtumMfa17r61ateItwGGHnvr1lrzQMQRm7rOTVHfkq08ysMrmSTMxzfD5CIf1Gxq7Hy4yAeN50CvEl8F+ySIp1s7RJFmQeL//Qv2lJIWYOWMRTTUvX8jXg8EEOaviaY2Xxn2BAzDDcPRIg6PtUe89nLbJyfAlRdHpCfjrc603aJ7j9VSgb9hEuL3+erD0ht1RQUQU7VY30+vMoFeSNnPUpO6JKxj6XmIUf76CWGGBHW289lAImTJezE4ZC1rjRxqLMfeVysg1tOUgeU8MeYZwJBTNX8vHD7inSgvYwAJG2U/kB1M385nNgYOQ3+grquu6Ldw9vaQq7XmECv5egv8AWCdcmDS5bOZgN4KyNM4XWzOov/Iqpb7MHoekffzxHa41yYJE516zUuirr0axzvNHg03Sd788FbYKbXuYs1eCGbJQ8GMddtXWiKeFxooAK453MB8RjrWI8/S/j2I/FtG8t72h+Mb0QLpHT+Ed9/7ZM7/PoKv08sps9frHtzAWmegezYi6tPIc7/tWLHw6wpMkPLtI7zJTy2ioLII0+A83lBYA1mEdXcMgHXD1e4CI611fQXOVjbABfUbWpRroQD98p36ml3bTW2oFvr99e8ho4CUlyBQ2+wqcZmtPglVrkdlFnrUkoB//P02vqR70QdJ9ghE1KAmcLHGGZ4ZUQJGwP3Bfz9YEYg7KTIXNsgGQYNImnDx2xJLRe9oBdQq++Z2ruzpdP2Ml/BMnsseVoK+LLtAFJ7EcmbMy4yJnuQcsr0fN2NeXM5Gf8YzFGwGh0dvEbeZXnRK4bHCBqkhbDbNQoO+Zv/9cR1uLB1v/qK+BtMubJ8RVngqUenVCukYaTYl9HG//4rB14Pef8eidLpaLbsjNveFwEJJsRIRKx1B8ne91W+vUSLXrK7t7pGRjiVXIYLG0gqDWwsaTLTopd+KlpQsqjM6PnOaU4dJI8W2VcM+qiFeGGIyyVq1KLru+wc9DVd8SfyFOaNXZUavclQISgow1kR7X+ozrR3D39R4l1hrS3fX+TITM/iTeFwi8bbwx3I+d3tO18suzyQ/hbGlmTaJfbhdMks7ddwOYnxnMHrkqhej0iEwEp+laoL22dZMw8BS+7jFKUwmZm2dxrFONhnQr7Z5OJGP/ADoWvzyRFXA1XFXuhwKI1JzFGci3YDjURVIkjC5L23JdiHAYCh6/yXXib67BJXsdlg0eGeXcTH2Z8xV5MaELDwO8+pLnHZh6DlvPNKeXXx0m9QU8nHvGu72G4r7gXFAZpWKBWXTL8mrc84+J6B0V6We6o8Dv1MJT1E1sZZuU9I440qPRyiNc9CHbbR60Gq5N8n4/JT9f2Gq2ia2c4hRkQ6Uj78zirRq7ThVwXiEZVVEj0j5pe2AA/sxqcuSxZl5WHmyjupFZlM9tG2MKGYXIdWhiFDiY8VKIPaH4009bDglzuedfxWmLaL51sYvHM8YYA0oKHB1KEJs94MfeyeLSg7pDF7t5zm9Jo582yVZBdqNvvW04Jez9x31W42atwydhCtR3JsYmU0P3DJAaC0dpUB76ka7gRv0oZzJb64kIgGvOHWsNYlFdXQIb4HSAyXURsKylFpCtD6pW+GTVzG5MAcerMCzN4QGirv91LFWjFl6xTA1mrinKP42N98i2qEiVt6dRVenZSyYKoRlX55sxJS3Yp5RFa6ACoScAJ4HWWWzXe/ZPTccnYHODf2DTZQGzezcWoWrkEx2fcpyIKAQLemNJQITjAT4H/qSbfwxFKjRTof/+lVAHCoWJm8wbygIFXu3na61y3PUSutuc0pADHIu66/quoDKH7Fl2MHKh7iMBY3vAr1zSQOWCYSyrJ9mPSTlmi0Paasiigkg/VkW31OueT2igb3sQl/AYAGtXoC6TuYxOJRzb0JWVydg7+572cLweBg4jy97rGrbUYYMjIkysy/E8Q+Cech8XTio592fiDnEdDGYfPs9bBXm20TnChPxo2cj1avfXb6izIokGsP79jk/KA+1PFBkGRMfyz3LHQPjBhQXR5MdClmAS+mSQdVaFth2m6zElHHQZcfASBiJ92l+jZqXm5OVAFIJOj4QvhmsnnH7cIgPLUv2naSYUdfTSOXsVSylHDuQwL974yaxTP0wQxp+pNd9KTJVYt7t61D4K/K8i9isFyu6lpx8RR5pnHJpgt6YN04YlXJRO2WjY1e8WkiadpE3jGoN0MOcQgHjecXZQM7/vyl6hAOowh8S2Iktnz2KIjRM5oOQIJCL4bBDbFcCEzJMmJ3/jDigJTmlw3U0wys2DnAHL2dLVqmghG/1m1hAY1FeZ0h5QrjfASZCLUQW+HzjwX6W4sKGEe7RGURJCex+gftEDuXWQAIebARj9bKlc4KW5Wdk+OH73OKYA/dALHN6xPV3D0Fyg99pm2xOwwJfHJ3jhuvNLuOVj5NLrumkrqqgrSFj/3oY+qyhR5D9r3C+gaHrAifiLoqSJk2p+GMOJy4dWKwFyDsyD16vy+iGXUxv3yOber0V3WgUITD/T5kjRQ8/inIjybXlyXxJycy7vYNFknGZr9l55QebVBNpQhm9kTro7cWjRCSU0Y3VtwScVPZztZsu2DL8g36DhhpAgTObs+rRsOdjC+caJ9HWWXp8LmHGTvKhb9f+l+4f0YS98zWEocH3631urRglySJSc4Whcoagf7UvB1krWgZV5KrD9fcjBp/tLjkvwpg5xbXYn+pRlfNYnlBKtAkI07/4N5Fts/AR2SwDrbScM9WULHrUPgv/sDPEAg2ra5CLabfkhZBydsFPmPqtMfVBaFgyJRdx+hg9W0Y8W/bD/G5KSiNxMLDNCWJWAxjvQfRd6zA/hgSNRDwNXZk4cknZPmdsPbCT4vBKwwViGVUSzUgA5vemVBXWbc7LOpRpAqUGoQtCiGCU+ZPcCj6oUvmuWG6gNh3WgIOfx1Lq1+hZRpDX6dPeFb7nmpqGeRglC2GnE7zsdL7Zkm6RnjaUK5yKFDiCAejSb6kkm2wTzHNScx9mTXn+bN2gqo2onzv4TV20NM6koDEFBSBGnURlAiodYzw2tGWKOkWW3y+nxgIY2iNZFfPYtXAtFj0xb0mbmFEgZiTWCSoHSpsAAiMA1qT36KGnoCAttnN60Q51GAe2cFkei0qG0Ac2OqWxllYg27D3TohTNbu/bOdWCmXxHkNeM5WQ2xA2oA36xDrtKVyCutWXWjVAi5HBTMIxeqL3A9cJP2e8tA3pAMm1pf75eWgH+FwlgS+KN6LGbPzdwUWvUK6Kjy0iuhL0M0VPOxrGyJCuBkIGc6BSAIZN5BYdnxfZ7jVJW3Ke6+8C7TZEnwg7x1BctDFxSu7xxBQTETsJCehK1tznzhinbQkxSUrXJhkK7ErvP+fPgwcSZqOZV3D/laiIPrHF9WIylbBLx6iwsUoNIP9HYLfZJAlgcN9SO4OCNcSQErJ+xQfQsSVIz1qKK+PJZRldm8g+viN0zAeOPs7YTpRNTjDpx50rzHLbLUH82bDIgBEf4OudXEUobKbKHN1eGQHII1p3O6OALgT+TcjobsLmL6zi3NcCuJjUcfIC0UyxYzqpzWGl7xdy/9VbL28ydtLef2bb3gifm7HJmFcmcPEgqaCWQmpMR6T9xeer+q0V25zbrmCyiVscg4cRpdYFqAU8lH92ACbnFBnpqWmqnofywlxSd329RLAW3L5DOfLOsh51J6ulWy0FI36RlMNsL6Vx0Jb4LcQiZXABgOBLt2ElfSNFIVSbuHQrEVyz9ENCYBI87Q3pXdjlDBpGxYzMFAEyVVRI1R18YWewz5WKmzO7fPttvzGx2OR0cprrXLximykcZGA/BmC24MsKDspA/ZeipH8NGg5sRwvVaMr6NXMmEwTChWvEYG3+0+pVo+x2aXKbkumRltrw/dmQuWH4Xhed3A7o+5yznn8FRaRs1MY1FIqISihIbpz2ny+LXoKy8+aUGxm9sRoNN/uh7gGXZbbHApxiUmkKXZy16j0vzClQDJgRzPAVC3B4fwTWCv2NuOpkedjI6FDcagfyRhDiJo4BE/bKTlfQjgUrtvdlS4BrsqJpxFelD4Xm8YTvECWsigybtJElJTdcuLIq94D1qaRdX7B8lO5PByx9ismz1fCw0fQUgbEeMlC9Bj3uVZevl12xp+Z/ckMt4u+JwM6sMzXmQB0kWtpN1Nt3A6BMZGTzJtGhXqRpdLihT90fsF4pzcJWYVmdhaFMtPOCuVaBMYJWiCOOXdKP6FrSkg1qwUFUVHlapQT6JudHhQfAVJhvpoc2ydvHdfZSlJXUqULLSTm10Qz4MMh+VrHaqOD/pN3SUjJxLAg332NDcYEn+IRzygFEbUTL6TPUCf5dz0ADsvwpntxVrUOst8lFkCGo9eIpnHQcMPb9hBiDD4RDBE3gOWDCDbxql3r2KWmjeVgBKTAC3J7mba6SsdqpIQE6fXsWzAgTChtbwXysDRkYovPmyiMB+ywcncvMm64LZOojyuzSVxrhRNo2nTCbtXcQkzUvqI7t5IIIEXOmDAuhmleAjmrliL+2hhTrm6Hj05wfRMHTNhgSBXCVKYzglDmY5ams6aYA+VzvKh8Dj66oJR5qyWUhoqpSJxUxY1ggYOdC2bxwJq7XAoJKWYnfl3TCHcWBcyGOY0dIm6W+3IJARia78EgoEb9p7aO13NZz1J68DbhbezI8E77Hc326WYuPexMonRkxtXG8CU7D5wyU7IyQqTeS2+Mxhaag9R4x/7rEt/Lmk6mB4sRnD26e0J6MnPRyF8Uzlkk8kgZgSNPgzv+mrnAnqVuKVsfYQvvNYjs9p7dpll3Qmd4FioPZxQwDm0CVI8EnzqAM1CbZLGLZVjGSfFwdcM6vgUwYwn1VA0YbXQUFcR1IThU39TXUaTlVPsvEr4+F7aF43vrYi2IDgD2ayMAWZZ8iyM+i9YnU7UYa6e4kCI4ricuCgo5tP2Hp8EFxRAs2xzRFZ4+Wy3/3gmCoXP61uica+CCLkszoEK42ZnZ1C8LEqto4FaRC3b4iR7lL6zT6WUMx4o1ndPwLn/cPpPuuVroB6/9DFgFVQ6vY2DSj3B2sTawUcZtqgLm1E4KM08ydfwVEZnKFBHhWSkrtjGZwTDMRXZVWeo3ddUShJpHJW6ixBFAkqsUh9LMP7WnqZgqRNKcz0S0WNO1bg+LE5sg+Jw/Nu/8yVQscO3ymcm2EERAnOCOC6BnBUsu9pcFWdnhnFIbIGjjvfx0dDvl22bCPuiPcZ57oBCztd5Q7AOJt1rY/LyPoqg/FvIO87Ar7Mu24Gl97y4gbFwCDbrsFxYd6Q4KWIPGAZNKVQ+0lxqZqBW77tBOpf2U36eLS018ZMNAs0BSWocUZjGCoEf+aSVhsoP8xzV+oNkqv7IjupatzZ88Y9ZhyvpjEvn4lpWp+XfPsXZk7XATwknEq0SAdSoJIekAR61BW3232vMRatGsX36n97ZMyedqJZooGllNMqdG2ptGGStPBxVPoOnLZf8eN32r8yX+bU6lfT0RuM4t4Dg9r3kMGq2O14DuJx7C36i67Vv/hyN4lAwcILgGv0YjFcKGYHDv3hMa+h+TSy7Ng3GrgO965Axt4WYdy80/lnyNBne551dvCRMqKxpPAtWRU/5Gb6Rmbge5JNRD1LHLug4an0RV4QgqAX4BJiPh8x3soHGVO/csrQqm1ip4ZP9EAtSLF37GuzbutvsJfY6QJuiD6OdJOIsHR8eLpST3FOhQKDcrLHyvWEeR0nGgwo7OGdcpNk0LfgGzsUSRpfwVvPHNURaWBQ0sUx1tC0CU8xSEQk7OpErbjblp+Cyc8qG41Z7VtJN9ffmaOnjMnVO+tG0GC/G817DyJtykPoakdSBXaNSzxtG0I63/qLDhRsnkeLbyoLjIJiQAX1/34ZM5V7AjGeqGuNhb/tE1LzG4zrPQVaA9x/x3zd+mwKtuyMnNVo6JhsZZl6CDpNgZFaq6KUz/8XG/igcTPmhKvKMAlhvu5uO8FB8J4yxKByB3jNGOsRIPkt9+pi89CVinh8Yiw306my/xzIBLqR4aYrZGdnsIhuRExG3tPtZhTlPg1BPKls0w5H6RaOrr48Q1lwUbru7e/lWrvulQCIQVp5lFLmS6UwQL9R2lQswjfzNmm0FWSs3XgeoKvm7GdUtzSHW86eDyAUXadmJvqBgiFn+hc76vHArKLQsCzXSw+m3dl5GWZoCZyFk7LuL5yZIV/uzK3QZx3gQTlCOA+Z22qJ4w6RVQq8O4lQuuMxWhd2uv0lkhJDEq0NnIeg/dKzGr3eVLMvLMXUBf9gqJ60bAizt+NgkSSAGDnskJI3l5VDUlyEMyp8tKZnEMVAluePb/F4P/JqGgi2Tn9JE5gCGAwT+WfBt/MWL/3H9MATRd+tRWp6bQXCpptR0n3rFjwdPleE04XfeirYpzyHiqmj35K/t+N1OE3/AB0171M/JQoeNMbx9mQowQqQ5yA3w5bSxV7tYXvM8pVsHb2DtP0OLR+VLEAY5fK9LNmJoegUifrj52SGCIyZEVQSbgO6+u5HWBH6NCWawT57GGOrq117BcCF76SF9ZYnKDhvM/Lw6VF1ue7WOOKjTqQaLyYE5FMCltBBjsaRS7Fpr0DEQ73QEYx1Q6c5g/F5jWRdYiWrwxnjTaMzTqkqWY+a9knIdTHLkOddxjcMzMClKUzY97OJNAd3AJDdeATBvqhITQ7W0QsAZZx+W1N18UGoWVYPiAf8Ry/0dqUtm9HeGRGvbV22782VzR6lBrF0qMcNiDIkS5btpdX3e+FcA+mGcxLWtvga+btdwbqU8x0NShyK1LIiW/ms20dpKwkMaEKdVow4LLuWp4/ZXFxtpS51RqPX+6nVzGYd5ZpFPx0wAAV18PkLBU8QFhdPdHGiM4DKgXjALEBi5UKYJttpGg5f7aL8AnwD4IroZrM7yBQGcSOZLjKJKxGFTXz+qAxGy4cEvu5ouOodAJw42XV9wZj60Au+cSdQVa0cV+jfdZoiSQk7jF+kDZ3M3beeYSCml7id3L17ph1u2UDgKziuFh/phoOKUzG8n9lY+Y2//ucfTABcAwHEPe3Xwf5wbIB1dmVvOQD9lpekCe03YWTJI08EJx5H9u0pUjz3AhCU1IXnZOjtjj6E5Nz3hZ5Mr7jsMFcgwfg3Jb49VetgUoNr0SE/Krv55Xekv3itxf9Y4qxsHGJttZ/m4K9qFUIBFjVOqgOfPd9Im2qS68fAcTnlVeHmT2veLTT8KcqfYrMmmsm4naLBHcZ2a48ZWYgHCW9sVLYb4G0ZCIvIp3BkbsRxxSCcTxvB3rxK37x9CLKcvMubC0CtG2PuD6zNM91+M/vWzY19wJbdtgbdKDkYZzg6AwBYSltPLRAEKE42HNsDF5nVnwj5Du5WwjV7mYUi9FdRPCxpKjgRftASkyF3LNHrb1tnRBsK0KEKYC+BdGJfsleOusDeIPPs9twju8zMsYSR69TI2/BBuZ2NiwILF31QemmIsGXWgCjSDtDdA71NqbYKARu0JTfZxsbHjM+DXdlBnTeNKXsdI6HodOvzKFiyyxcZ64QLBO+YymsCeKzH+8MquAukmorwJX6LmEvqqcBPTWddjIFOuxRIY4trBU1Mw/jXVbthIkTKtjn3pb+dV+KDO805+Wtr/uTmV5D7TrtJuKNewLKJ7irKFk3TovLnpH/QDMUvl7fryEHCwtRRo7jDKhq22XvBprPuagbkbGAnDw/l9VY3F3mmU3GNCxfApU+I+vTF/IiizrmlPBoG3SXqPcNtDYNILCnc5BtsVwpIF4FG0ctgf2Uoc17O7L9KR2/pfO/MdpB4YqVDymgqOSUivf+O1vm5nlqYyYmKJiRy6rx74m9fzU6EUpptiYLxwKAVzDYKd2LJQe050+2xuV0kihmIy3B95zoGxqW8CPoboijUI3UVJNdU1ZIEZODU74OQEssusj8HvzjuK4FD7LJw/uot9zN1Qvz7ifk2V431oqfqvPxt0i9bT2tmHbGyxKsU+bzhDKCol497JtXYpyXFojxv6mwMwHJDKTs0iByDlqMLcDZcm+KlDcfnmKCVFbtiKMoglY4B+4RZXKt4nSsCP9oUMkfi38aNQle4gjsEZv0KpaBD55xuy3+s/T8sLLB7R35C2cy6WUtlrAvb2J9095MVnIz36RWYNAwyuzVZ3wm/JSt6p0LTOESlk/0w7W3YJLzzkBS3Ctv8lMGM9xUQH53VErzds7Yttpc7Hjmc26q0scOU+Zhem4vpKNWrPHYc+1nSxvOnL8ZvsXHDEhZp5eWLquM623RLO+T4XDlm+jU/fQFk5hyCjG3MvlW7zEwIIvlZj/lRLl33etnxMGfNMUE3J5KLS4xMwbjW9TiWXFLBf+w5xHoIBKskk2NzquzQalUPAMqgLuBUyoDn3C2fG9kQE5AIuI/k3UTMaGc7qDKDBgn2Ix4h9PZ1br6k2vfwKmuOFZZQru+4zdVMeT+4PP7L5euZdJRIY1A/SYXMgca0+Rkw7TB/qO/qpi/C6g0Iji5AmMOXSSET2RiTi7i0hZ8tl0JRxrFwzRGUboLpErRJMg0hb4J9kvePQV9U/fOTK1iSqdcqIWLlL+kFSowUpx3iI2vz71HHWX57bmOtubcl3FUpvDiTJORMG7E6Dqrpf3WmDCeNPQHpjm+TrZGSEsCbPml51WO5LsS0Kt+zsXT2ukgLZVYeDEKvpxbEEgts77aBjiabyY4IGlW/7OhhMnifIPdm6uLsW6XBWvxk6VpeEfwcZ1F96MjOBVYpKXGGYa/O8c+K35Kc9qK6RUvnnaL55UutgdpixnpwmQuz0UkR4Lw8CYqSS9qWUDUneKhDi7sEhcXgIf5I6wVISo5HLk5qmRfdwggNiw7kv2vD/OEQ21hIFAWtrxC1glE/CYRnYcr3vLJoo63KNXUnUW3acWlkmGt58kKPWIhwfW/eHezmFKO1uYj0NrisbsPHrjxiOOraAtHOV6MCRzlVD2GIWga7KI2VBjxrpxYuRO1X7zZnh/TfxUr+tuFNHxben25kl8oOGZYuWoOyyEbb1aEA3JsLTJ/aOu825us6PdBVoxscfOrCGZQT+CEfwNOdmcKIG3BYxIbclP/NYz8rS0N8LyRdBJwC0ZfhZZ/AuSMFiWkSP2gxx580cCstXRc9ED4Df3REe7oazSLDEgivbaAH/pdhOUuHOAXvjclzhmJePPylBL1Li+2HZgypRW91fXQIzsfQv/7BdcmCdjcNdqQfrzD6PLV2xb5B56av31Cu8ylvUEE4FhaPNdZMb4lp5IIq5LL/fsSFE3kksQTeFY+h3rQ++c8Jfgsnq765YEnvZgsRVJ3aXGdENm2YVjwQsZFH+1e7bWWc8xDc53eMgyKpaLqJwcyVXPmS89354DoCTYtX/XvfQGxNjU8oG7EYeQkqX2ZLSvRu1h0eKJMP5Utrz39HwMhAGc2wRwbtQAT/bYbLH26L857ZvSJxbZ7e8n3haPeYApzX64uaL2oYYMSijEs3/NuTDq6YSXuYJsfKgTcEB25mTApJQQe0hoiYzYNf7c0c4UzpdFgy+/TT1XGGLJ+XAk/i8c85SeKWQZsXdepAtfnToCaFeYiFolzQhCKJ38ebDo/2i89SeOpRyhGU5IBcO7ukA72I1S7gF4iLysulbrWHZoRGgNla3ygRqnK52nma+U1165qelEjUrvOGMQ0pKHu9y7vYu2F6YsmXee2X7U18tahxHO6dRtJw5mHuPu3oZmDZsPnh6APsRXJhYx4szmYqDu98I5DyhhQA7gE3eU01MjFk5C8+pxac29+Lg5JdqB2+DMxAHiINGKeRJYGJ6DLFihdoxX+IGz1fuIsnlBQSfnKNT94HPgRU0/UK7nXruEoW73mnLOfBBrWkQ6J2TDmBiT1pgkjFqCk1j06XeyJh/a5PXAf3GzHxnN6FOsqfwqUhD5KEI2yp7axJIgcGEa1GrSwq85Ru3nKoqyx9mwOPJ+H7lDUwP/RMWuqhCvKVBXYkj6KO9fBPvzsjvksCtfQp0AVA0R5uE7zdIpzkgJnUNfpJVaeHNYDeushzskhnn9KktlYq83+a5+7NgfRtV5OWCv62l6JFjKIQ5/RuMlhGauf9tnUHgc1B7zCX++OaFFAFHZaWsqCcu0encYXe4ijlCgpQBoxvZ/PM17GzuxciBHevQ/cFpJ9JcPcNAzbmEbMPBefhytRKpkyCEW0Ievy1IWgPJk4wH35+6Nxah+1sbseRpSzB8QCu+DStu3DuqG9o3ot0ZZPx7xiVIWeFcM/q2he2ivuu+phh1DHgJN/tby24kFc+rfJR73BTINjeiixXzqwc9M2CJzDQy0FtrVXsxL4souFipNdqAISrm5JDntpYgMYLzEdmDJFFThRdw5HJQuDnxq63ojC5JtzZXuFNTzpJ+UE55YNF7Uwy5Y4vZGLO9I6swJxy1F+DOYuWtRFckMbKRPotjQk5m/WISUOx2m4/CdXD7io7E1RoiOQNK6165aTJRJwfe3+N3z8ZNvhDJwjtoGvRt/38stP90+9Um54sKCn80AQFsD3oCa3eoXhgkP3sS97dmYUM2E3IRNaTF9cFZYP56QqwYNfR02FTb7TSWZdK9C4lEGUNpGog465rmYIJZToyuDUOn9XX7juMbEVPx4/QAtB5wyu509ceUdyO73S7GgCr+gPQiWC2xV1QxFjhbWNCVkpyzYTeFelI+9p5g5dpFog/6z+qRmjIw6+7qvUYpefl/eo7B9H+0m3FFfn24blVyjrMS42ie1bYs2boMd9tVsu75RtTJAYECwmSB7AutlrNHeyA7aFjqeWEqaDihB2V5JPKmn3DFE3cUI1ZhuN+w5YKp1VpfVzyNotqZaiJN32osQiiFp8H+e9wX+C0yfFX2wbWns3KWM5jPq9u15cg/BTbAL9LY++SSwfUN3IFIASxLsNReBVGf7yWtdTo6UgnbFCKNLYOZI22r/xVZYruofMDLv0qFYeA2jrUAXk3XVCHjfbTTCGAQMlmsa4amaP6CHrrX0NkGSKS3xVlSBiBM88QOo7NhHRcxkTS86Y6pLjiKMzmQv3pvjv7Biue6SBYNzhQrq6m5bk2sWl3RXODDGZIon5gfSI/NaQQpS3nVOG49zPtjXVkN5MF/xZU8fMp0d3w2ecJ7zaBry7LDqMOWwTCuCp90L/CiGeCTZpETJqcv86U7ETR5pvnPspTBpbDQRLJJeXow8CLSDQRNyEil0ax9t2By5iGMN207XPAY+fI1wgO0IIqDyQkGiqbNeAtADEtRvr0G/tN2a5t8ruT1a1eQY+zKIiYBH4yJjY9wyyoqhtmdWQcP0OTMEPCTK01o6Hi6tnFuaIGsNh/U/B+YKoCkMy9HpoJCxrg1KO7NqxNuFiBW1knenXJbGF1d+0M0ci6togiVXDvE87kM3lc8xCjPE6jIOJRHdxQGcaF3+8OxdBwgCp1eAQsI3W021nfe3s3nt4CmPLbt8qMrFodNe+ODwUtHjazHvQS5hMS0Sq/3lBJYju+YMmBErwxOve+e2ukQZBbMDbLKVaVKy5Rqkx0eL/RN5iygrXLiwU6juvIdU09gC7kSeJm51d/sG8knQH7cpgApXZ43M/kBC/udvNQCZ7fbZBcXBnZBPP+lyPDmDVN8ff0xAxp6kiCxQ6MVzuDq/mHH/7anbQJbQOGEA39+WbPORRs2Bd/knanMXUXbM7/XbGMX/PieI8y2eFL88bQR+mgdNoiJ20YfX+85vUMmEg7HYmfILCt3OjJOz/RL6C8vcTMP8Zpcu8SiEXce86z3XCzvHGcoE+U3UgV9WhgdBnxGNAJ9je+1kwlvU05DYjlJM0LBxdJ2ypo4hvBE9MiYNx1LeiKP4+kfKkPqpZ+PdQDl5iGiS5J5AMSKKIlJTE1j8kuowYNoTdETgpuFCQR9CpWXjJl5BB1va5PHrwMIsTxXO19Izpb7qpN3vwqoo53uOy9K1xXv4mEpGB2jiZRYk0Lr8eD/+uHRaPKa1xl35aOedO6nWSHheMvv7K8pIGYB7vgJ/cxJ68ljYHj6zXjgNK0YfO2xNtODsQ9Dr876kqPQPgvWi4ZXoUsr4FeigXOOagfzvSGg4cZQxhWrCH1Yr8SsqwKMj43Uq7sn+zBhEXWVhVN09oUevnqW07SvSef3oaVu1Wl4TKO59c2GAQBvJASSmZN2x8lZWolg5Kovp2OMTmE3C7gzFkr9Kzp2npw==',
                    # 这里不能给int类型的1,requests模块中可以
                    '__VIEWSTATEGENERATOR': '16AA444B',
                    '__EVENTTARGET': 'MoreInfoList1$Pager',
                    '__EVENTARGUMENT': str(self.page_now),
                    '__VIEWSTATEENCRYPTED': ''
                },  # 这里的formdata相当于requ模块中的data,key和value只能是键值对形式
                callback=self.parse)

            logging.info(self.typename + "现在爬取第{}页内容".format(self.page_now))
Exemplo n.º 11
0
    def parse(self, response):
        #print(response.text)
        node_list = response.xpath("//ul[@class='ewb-notice-items']/li")
        # newbase_url = response.url[:response.url.rfind("/")] + '/'
        nowItem = 0
        # print(response.url[response.url.rfind('&Paging=')+1:] )
        page_td = response.url[response.url.rfind('/') + 1:].split(".")[0]
        if 'moreinfo' in page_td:
            page_now = 1
        else:
            page_now = int(page_td)
        if page_now == 1:
            self.total_page = int(''.join(
                response.xpath(
                    "//div[@id='page']/ul[@class='m-pagination-page']/li[last()]/a/text()"
                ).extract()).encode(self.newEndcode)) - 1
        print(self.total_page)
        #print(response.xpath("//div[@id='page']/ul[@class='m-pagination-page']/li[last()]"))
        typename = ''
        for node in node_list:
            item = czScrapyItem()

            href = str(
                node.xpath("./a/@href").extract()[0].encode("utf-8"),
                'utf-8').replace("'", "")
            item["id"] = href[href.rfind('/') + 1:].split(".")[0]
            item["districtName"] = "安吉县"
            # print(href)

            url = self.base_url + href

            # print(url)
            item["noticePubDate"] = str(
                node.xpath("./span/text()").extract()[0].encode(
                    self.newEndcode), 'utf-8').replace('[',
                                                       '').replace(']', '')
            # item["noticeTitle"] = self.new_item["noticeTitle"]
            self.newday = item["noticePubDate"]

            item["source"] = "湖州安吉县"
            item["title"] = str(
                node.xpath("./a/@title").extract()[0].encode(self.newEndcode),
                'utf-8').strip()
            # print(node.xpath("./td[2]/a[2]/text()").extract()[0].encode(self.newEndcode).decode('utf-8'))

            item["typeName"] = "政府采购"

            self.typename = item["typeName"]
            item["url"] = url
            if page_now == 1 and nowItem == 0:
                logging.info(self.typename + "发送email-------")
                send_email(
                    receiver=['*****@*****.**', '*****@*****.**'],
                    # send_email(receiver=['*****@*****.**'],
                    title=self.curr_time + '湖州安吉县招标网站',
                    cont='<h1>今日爬取地址{}\r\n<br>湖州安吉县招标网站最新更新日期是{}</h1>'.format(
                        response.url + "\r\n", self.newday))
            nowItem += 1
            yield scrapy.Request(url,
                                 meta={'item': item},
                                 callback=self.newparse)

        if page_now < self.total_page:
            # page_now = 2
            page_now += 1
            logging.info(self.typename + "现在爬取第{}页内容".format(page_now))
            # self.nowpage += 1
            newurl = response.url[:response.url.rfind('/') +
                                  1] + str(page_now) + ".html"
            print(newurl)
            yield scrapy.Request(newurl, callback=self.parse)
Exemplo n.º 12
0
    def parse(self, response):
        node_list = response.xpath("//div[@id='ggfl']/div/dl")
        # newbase_url = response.url[:response.url.rfind("/")] + '/'
        nowItem = 0
        # print(response.url[response.url.rfind('&Paging=')+1:] )
        page_now = int(response.url.split("&")[0].split("=")[1])
        typename = ''
        for node in node_list:
            item = czScrapyItem()
            href = str(
                node.xpath("./dt/a/@href").extract()[0].encode("utf-8"),
                'utf-8').replace("'", "")
            if "=" not in href:
                continue
            item["id"] = href.split("=")[1]
            item["districtName"] = "经济开发区"
            # print(href)

            url = self.base_url + href

            # print(url)
            item["noticePubDate"] = str(
                node.xpath("./dd/text()").extract()[0].encode(self.newEndcode),
                'utf-8').replace('[', '').replace(']', '')
            # item["noticeTitle"] = self.new_item["noticeTitle"]
            self.newday = item["noticePubDate"]

            item["source"] = "嘉兴经济开发区"
            item["title"] = str(
                node.xpath("./dt/a/text()").extract()[0].encode(
                    self.newEndcode), 'utf-8').strip()
            # print(node.xpath("./td[2]/a[2]/text()").extract()[0].encode(self.newEndcode).decode('utf-8'))

            item["typeName"] = "通知公告"

            self.typename = item["typeName"]
            item["url"] = url
            if page_now == 1 and nowItem == 0:
                logging.info(self.typename + "发送email-------")
                send_email(
                    receiver=['*****@*****.**', '*****@*****.**'],
                    # send_email(receiver=['*****@*****.**'],
                    title=self.curr_time + '嘉兴经济开发区招标网站',
                    cont='<h1>今日爬取地址{}\r\n<br>嘉兴经济开发区招标网站最新更新日期是{}</h1>'.
                    format(response.url + "\r\n", self.newday))
            nowItem += 1
            yield scrapy.Request(url,
                                 meta={'item': item},
                                 callback=self.newparse)

        if "disabled" not in (str(
                response.xpath("//div[@class='pagelist']/a[last()]/@class").
                extract_first().encode(self.newEndcode), 'utf-8')):
            # page_now = 2
            page_now += 1
            logging.info(self.typename + "现在爬取第{}页内容".format(page_now))
            # self.nowpage += 1
            newurl = response.url[:response.url.rfind('p=')] + "p=" + str(
                page_now) + "&t=18"
            print(newurl)
            yield scrapy.Request(newurl, callback=self.parse)
Exemplo n.º 13
0
    def parse(self, response):
        #print(response.text)
        node_list = response.xpath(
            "//div[@class='column border mt10']/div[2]/div[1]//*/tr")
        #print(node_list)
        nowItem = 0
        for node in node_list:
            item = czScrapyItem()
            href = str(
                node.xpath("./td/a/@href").extract()[0].encode(
                    self.newEndcode), self.newEndcode)
            item["id"] = href.split('&')[0].split('=')[1]
            item["districtName"] = "诸暨市"
            #print(href)

            url = self.base_url + href.replace("'", "")
            #print(url)
            yield scrapy.Request(url,
                                 meta={'item': item},
                                 callback=self.newparse)
            item["noticePubDate"] = str(
                node.xpath("./td[@align='right']/text()").extract()[0].encode(
                    self.newEndcode), 'utf-8').replace('[',
                                                       '').replace(']', '')
            # item["noticeTitle"] = self.new_item["noticeTitle"]
            self.newday = item["noticePubDate"]
            item["source"] = "诸暨市公共资源交易网"
            item["title"] = str(
                node.xpath("./td/a/@title").extract()[0].encode(
                    self.newEndcode), 'utf-8')
            # print(node.xpath("./td[2]/a[2]/text()").extract()[0].encode(self.newEndcode).decode('utf-8'))
            if '037001' in response.url:
                item["typeName"] = "要素公示"
            elif '037002' in response.url:
                item["typeName"] = "采购公告"
            elif '037003' in response.url:
                item["typeName"] = "成交公示"
            elif '037004' in response.url:
                item["typeName"] = "成交结果"
            else:
                item["typeName"] = "合同公告"
            item["url"] = url
            self.typename = item["typeName"]
            if (response.url.split('=')[1] == 1) and nowItem == 0:
                logging.info("发送email-------")
                send_email(
                    receiver=['*****@*****.**', '*****@*****.**'],
                    # send_email(receiver=['*****@*****.**'],
                    title=self.curr_time + '诸暨市公共资源交易网',
                    cont='<h1>今日爬取地址{}\r\n<br>诸暨市公共资源交易网最新更新日期是{}</h1>'.format(
                        response.url + "\r\n", self.newday))
            nowItem += 1
            yield item

        if response.xpath("//td[@class='pageout'][3]/@onclick"):
            page = int(response.url.split('=')[1])
            logging.info(self.typename + "现在爬取第{}页内容".format(str(page + 1)))
            #print(str(self.nowpage)+'-----'+response.url)
            page += 1
            newurl = response.url[:response.url.index('=') + 1] + str(page)
            # print(newurl)
            yield scrapy.Request(newurl, callback=self.parse)
Exemplo n.º 14
0
    def parse(self, response):
        node_list = response.xpath(
            "//div[@class='list_info']/div[@class='list_news']")
        # newbase_url = response.url[:response.url.rfind("/")] + '/'
        nowItem = 0
        # print(response.url[response.url.rfind('&Paging=')+1:] )
        page_now = int(int(response.url.split("&")[1].split("=")[1]) / 20) + 1
        typename = ''
        for node in node_list:
            item = czScrapyItem()
            href = str(
                node.xpath("./div/a/@href").extract()[0].encode("utf-8"),
                'utf-8').replace("'", "")
            item["id"] = href.split("=")[1]
            item["districtName"] = "嘉善县"
            # print(href)

            url = self.base_url + href

            # print(url)
            item["noticePubDate"] = str(
                node.xpath("./div[2]/text()").extract()[0].encode(
                    self.newEndcode), 'utf-8').replace('[',
                                                       '').replace(']', '')
            # item["noticeTitle"] = self.new_item["noticeTitle"]
            self.newday = item["noticePubDate"]

            item["source"] = "嘉兴嘉善县"
            item["title"] = str(
                node.xpath("./div[1]/a/@title").extract()[0].encode(
                    self.newEndcode), 'utf-8').strip()
            # print(node.xpath("./td[2]/a[2]/text()").extract()[0].encode(self.newEndcode).decode('utf-8'))
            if "092BD84429CB0B8771B4EC7AADFCCC7F" in response.url:
                item["typeName"] = "政府采购最新公告"
            else:
                item["typeName"] = "其他公告资源公告"

            self.typename = item["typeName"]
            item["url"] = url
            if page_now == 1 and nowItem == 0:
                logging.info(self.typename + "发送email-------")
                send_email(
                    receiver=['*****@*****.**', '*****@*****.**'],
                    # send_email(receiver=['*****@*****.**'],
                    title=self.curr_time + '嘉兴嘉善县招标网站',
                    cont='<h1>今日爬取地址{}\r\n<br>嘉兴嘉善县招标网站最新更新日期是{}</h1>'.format(
                        response.url + "\r\n", self.newday))
            nowItem += 1
            yield scrapy.Request(url,
                                 meta={'item': item},
                                 callback=self.newparse)

        if not (response.xpath("//input[@name='nextPageBtn']/@disabled")):
            # page_now = 2
            page_now += 1
            logging.info(self.typename + "现在爬取第{}页内容".format(page_now))
            # self.nowpage += 1
            newurl = response.url[:response.url.rfind('&') +
                                  1] + "start=" + str((page_now - 1) * 20)
            print(newurl)
            yield scrapy.Request(newurl, callback=self.parse)