コード例 #1
0
    def step2bbs(self, params):
        Logger.getlogging().info("Dm5Commnets.STEP_2")
        # 将STEP_1中的docurl传下来
        docurl = params.customized['docurl']

        comments_count = self.r.parse(ur'(\d+)个回复', params.content)[0]
        # 判断增量
        cmtnum = URLStorage.getcmtnum(params.originalurl)
        if cmtnum >= comments_count:
            return
        URLStorage.setcmtnum(params.originalurl, comments_count)

        # 总数除以page_size,然后加1,可得到评论总页数comments_count
        pagenum = 0
        xparser = XPathUtility(params.content)
        if not xparser.xpath('//*[@class="inkk ma5"]'):
            Logger.getlogging().warning('{0}:30001'.format(params.originalurl))
            return
        pageList = xparser.xpath('//*[@id="search_fy"]/a/text()')
        if not pageList:
            pagenum = 1
        else:
            pagenum = int(pageList[-2])

        for page in range(1, pagenum + 1, 1):
            comment_url = Dm5Commnets.COMMENT_URL.format(docurl=docurl,
                                                         page=page)
            self.storeurl(comment_url, params.originalurl,
                          Dm5Commnets.STEP_3_BBS)
コード例 #2
0
 def step2bbs(self, params):
     Logger.getlogging().info("Ea3wcomments.STEP_2")
     commentinfo_url = params.customized['commentinfo_url'] + "&load=all"
     xparser = XPathUtility(params.content)
     comments_count = xparser.getnumber('//div[@class="at-comment"]/a/span')
     # 保存页面评论量
     cmtnum = URLStorage.getcmtnum(params.originalurl)
     if cmtnum >= comments_count:
         return
     URLStorage.setcmtnum(params.originalurl, comments_count)
     self.storeurl(commentinfo_url, params.originalurl,
                   Ea3wcomments.STEP_3_BBS)
コード例 #3
0
    def step2(self, params):
        Logger.getlogging().info("Dm123NewsComments.STEP_2")
        classid = params.customized['classid']
        id = params.customized['id']
        xparser = XPathUtility(params.content)
        # 评论总数(当评论不满一页时,直接获取到的comments_count为0)
        comments_count = xparser.getnumber('//div/a[1]/b')

        # comments_count为0时分两种情况,真的没有评论和有评论
        if 0 == comments_count:
            commentsinfos = xparser.getcomments('//div[@class="rbvalueout"]')
            commentstimes = xparser.getcomments('//span[@class="rbtime"]')
            # comments_count重新赋值
            comments_count = len(commentsinfos)
            if 0 == comments_count:
                return
            else:
                # 判断增量
                cmtnum = URLStorage.getcmtnum(params.originalurl)
                if cmtnum >= comments_count:
                    return
                URLStorage.setcmtnum(params.originalurl, comments_count)
                self.storeurl(params.originalurl, params.originalurl, Dm123NewsComments.STEP_3,
                              {'is_only_one_page': True, 'commentsinfos': commentsinfos,
                               'commentstimes': commentstimes})
        else:
            # 判断增量
            cmtnum = URLStorage.getcmtnum(params.originalurl)
            if cmtnum >= comments_count:
                return
            URLStorage.setcmtnum(params.originalurl, comments_count)
            # 评论页数
            page_count = int(math.ceil(float(comments_count) / self.page_size))
            for page in range(0, int(page_count), 1):
                comment_url = Dm123NewsComments.COMMENT_URL.format(page=page, classid=classid, id=id)
                self.storeurl(comment_url, params.originalurl, Dm123NewsComments.STEP_3,
                              {'is_only_one_page': False})
コード例 #4
0
 def step2bbs(self, params):
     Logger.getlogging().info("BaozouNewsComments.STEP_2")
     topic_id = params.customized['topic_id']
     commentsinfo = json.loads(params.content)
     comments_count = commentsinfo['total_count']
     # 保存页面评论量
     cmtnum = URLStorage.getcmtnum(params.originalurl)
     if cmtnum >= comments_count:
         return
     URLStorage.setcmtnum(params.originalurl, comments_count)
     for index in range(1, int(commentsinfo['total_pages']) + 1, 1):
         commentinfo_url = BaozouNewsComments.COMMENT_URL.format(
             topic_id=topic_id, page=index)
         self.storeurl(commentinfo_url, params.originalurl,
                       BaozouNewsComments.STEP_3_BBS)
コード例 #5
0
    def step2(self, params):
        Logger.getlogging().info("ThirtysixKryptonComments.STEP_2")
        # 将STEP_1中的cid传下来
        cid = params.customized['cid']

        jsoncontent = json.loads(params.content)
        comments_count = jsoncontent['data']['total_items']
        page_count = jsoncontent['data']['total_pages']

        # 判断增量
        cmtnum = URLStorage.getcmtnum(params.originalurl)
        if cmtnum >= comments_count:
            return
        URLStorage.setcmtnum(params.originalurl, comments_count)

        for page in range(1, page_count+1, 1):
            commentinfo_url = ThirtysixKryptonComments.COMMENT_URL.format(cid, self.page_size, page)
            self.storeurl(commentinfo_url, params.originalurl, ThirtysixKryptonComments.STEP_3)
コード例 #6
0
    def step2bbs(self, params):
        Logger.getlogging().info("Tmtpostcommnets.STEP_2")
        tid = params.customized['tid']
        commentsinfo = json.loads(params.content)
        comments_count = commentsinfo['cursor']['total']

        # 保存页面评论量
        cmtnum = URLStorage.getcmtnum(params.originalurl)
        if cmtnum >= comments_count:
            return
        URLStorage.setcmtnum(params.originalurl, comments_count)
        for index in range(0,
                           int(math.ceil(float(comments_count) / self.limit)),
                           1):
            self.offset = index * self.limit
            commentinfo_url = Tmtpostcommnets.COMMENT_URL.format(
                tid=tid, limit=self.limit, offset=self.offset)
            self.storeurl(commentinfo_url, params.originalurl,
                          Tmtpostcommnets.STEP_3_BBS)
コード例 #7
0
    def step2(self, params):
        Logger.getlogging().info("Flash8Comments.STEP_2")
        # 将STEP_1中的docurl传下来
        docurl = params.customized['docurl']
        xparser = XPathUtility(params.content)
        commentsinfo = xparser.getstring('//div[@class="page"]/span/font[1]')

        # 保存页面评论量
        cmtnum = URLStorage.getcmtnum(params.originalurl)
        if cmtnum >= int(commentsinfo[0]):
            return
        URLStorage.setcmtnum(params.originalurl, int(commentsinfo[0]))

        # 总数除以page_size,然后加1,可得到评论总页数comments_count
        pagecount = xparser.getnumber('//*[@class="pg"]/label/span')
        if pagecount == 0:
            pagecount = pagecount + 1

        for page in range(1, pagecount + 1, 1):
            comment_url = Flash8Comments.COMMENT_URL.format(docurl=docurl,
                                                            page=page)
            self.storeurl(comment_url, params.originalurl,
                          Flash8Comments.STEP_3, {'page': page})
コード例 #8
0
    def step3bbs(self, params):
        Logger.getlogging().info("Chinavaluecomments.STEP_3")
        # Step3: 通过Step2设置的url,得到所有评论,抽取评论
        params.content = params.content[1:len(params.content) - 1]
        commentsinfo = json.loads(params.content)
        comments_count = commentsinfo['RecordCount']
        # 判断增量
        cmtnum = URLStorage.getcmtnum(params.originalurl)
        if cmtnum >= comments_count:
            return
        URLStorage.setcmtnum(params.originalurl, comments_count)

        comments = []
        for index in range(0,len(commentsinfo['CommentObjs'])):
            # 提取时间
            cmti = CommentInfo()
            cmti.content = commentsinfo['CommentObjs'][index]['Content']
            tm = TimeUtility.getuniformtime(TimeUtility.getuniformtime(commentsinfo['CommentObjs'][index]['AddTime'], u'%Y-%m-%d %H:%M'))
            if URLStorage.storeupdatetime(params.originalurl, tm):
                comments.append(cmti)

        # 保存获取的评论
        if len(comments) > 0:
            self.commentstorage.store(params.originalurl, comments)
コード例 #9
0
    def step2(self, params):
        """"""
        uniqid = params.customized['uniqid']
        domain = params.customized['domain']
        url = params.customized['url']
        jsondata = json.loads(params.content)
        comments_count = int(jsondata['show']['total_num'])
        # 检查评论数是否增加,没有增加,返回;有增加,更新增加后的页面评论量
        cmtnum = URLStorage.getcmtnum(params.originalurl)
        if cmtnum >= int(comments_count):
            return
        URLStorage.setcmtnum(params.originalurl, int(comments_count))

        #3. 拼出获取所有评论的url
        max = int(
            math.ceil(float(comments_count) / HuyaComments.DEFAULT_PAGE_SIZE))
        for page in range(1, max + 1, 1):
            #num = (page - 1)*HuyaComments.DEFAULT_PAGE_SIZE
            comments_url = HuyaComments.COMMENTS_URL.format(uniqid=uniqid,
                                                            domain=domain,
                                                            url=url,
                                                            page=page)
            self.storeurl(comments_url, params.originalurl,
                          HuyaComments.STEP_3)
コード例 #10
0
    def process(self, params):
        Logger.getlogging().info(params.url)
        try:
            if params.step is SeventeenKComments.STEP_1:
                #Step1: 通过得到docurl,得到获取评论的首页url。
                #Logger.getlogging().info("proparam.step is None")
                # 在视频url中取出docurl,^http://v\.ifeng\.com\/\w+\/\w+/\d{6}\/[0-9a-z-]+\.shtml
                # 取URL中的([0-9a-z-]+)参数,此参数为docurl
                docurl = self.r.parse(
                    '^http://bbs\.17k\.com\/thread-(\d+)-\d+-1\.html',
                    params.originalurl)[0]
                #Logger.getlogging().debug(docurl)
                # 评论首页URL为http://comment.ifeng.com/getv.php?job=1&docurl=([0-9a-z-]+)&p=1
                commentinfo_url = 'http://bbs.17k.com/thread-{docurl}-1-1.html'.format(
                    docurl=docurl)
                self.storeurl(commentinfo_url, params.originalurl,
                              SeventeenKComments.STEP_2, {'docurl': docurl})

            elif params.step == SeventeenKComments.STEP_2:
                #将STEP_1中的docurl传下来
                docurl = params.customized['docurl']
                # Step2: 通过Step1设置url,得到评论的总数,并根据评论总数得到获取其他评论的url。
                #Logger.getlogging().info("params.step == 2")
                # 打开STEP1中URL,截取"count":num字段,取出num的值,num字段为评论总数
                xparser = XPathUtility(params.content)
                commentsinfo = xparser.getnumber(
                    '//*[@class="hm ptn"]/span[5]')

                #Logger.getlogging().debug(comments_count / self.page_size)
                #Logger.getlogging().debug(math.ceil(comments_count / self.page_size))

                # 保存页面评论量
                cmtnum = URLStorage.getcmtnum(params.originalurl)
                if cmtnum >= int(commentsinfo):
                    return
                URLStorage.setcmtnum(params.originalurl, int(commentsinfo))

                # 总数除以page_size,然后加1,可得到评论总页数comments_count
                # 循环http://comment.ifeng.com/getv.php?job=1&docurl=([0-9a-z-]+)&p=comments_count,从一开始循环到上一步操作取到的数值,从而得到所有评论的URL,并保存
                pagecount = xparser.getnumber('//*[@class="pg"]/label/span')

                for page in range(1, pagecount + 1, 1):
                    comment_url = SeventeenKComments.COMMENT_URL.format(
                        docurl=docurl, page=page)
                    self.storeurl(comment_url, params.originalurl,
                                  SeventeenKComments.STEP_3, {'page': page})

            elif params.step == SeventeenKComments.STEP_3:
                # Step3: 通过Step2设置的url,得到所有评论,抽取评论
                #Logger.getlogging().info("params.step == 3")
                page = params.customized['page']
                xparser = XPathUtility(params.content)
                commentsinfo = xparser.getcomments(
                    '//*[contains(@id,"postmessage")]')
                commentstime = self.r.parse(ur'发表于 (\d+-\d+-\d+ \d+:\d+)</em>',
                                            params.content)
                comments = []

                #获取评论
                # 设置实际的评论量
                if page is 1:
                    statrIndex = 1
                else:
                    statrIndex = 0
                for index in range(statrIndex, len(commentstime), 1):
                    cmti = CommentInfo()
                    if URLStorage.storeupdatetime(params.originalurl,
                                                  commentstime[index] + ':00'):
                        # 获取增加的评论(根据时间比较)
                        cmti.content = commentsinfo[index]
                        comments.append(cmti)
                # 保存获取到的评论
                if len(comments) > 0:
                    self.commentstorage.store(params.originalurl, comments)

            else:
                Logger.getlogging().error(
                    'proparam.step == {step}'.format(step=params.step))

        except Exception, e:
            traceback.print_exc()
コード例 #11
0
    def process(self, params):
        Logger.getlogging().info(params.url)
        try:
            if params.step is zhulangComments.STEP_1:
                #Step1: 通过得到docurl,得到获取评论的首页url参数。
                articleId = self.r.parse('http://www.zhulang.com/(\d+)/',
                                         params.originalurl)[0]

                # 取得评论的url列表
                comments_url = zhulangComments.COMMENT_URL % (articleId, 1)
                self.storeurl(comments_url, params.originalurl,
                              zhulangComments.STEP_2, {'articleId': articleId})

            elif params.step == zhulangComments.STEP_2:
                # 获得评论参数
                articleId = params.customized['articleId']

                # 取得总件数
                comment_count = float(self.r.getid('total', params.content))
                if comment_count == 0:
                    return

                # 判断增量
                cmtnum = URLStorage.getcmtnum(params.originalurl)
                if cmtnum >= comment_count:
                    return
                URLStorage.setcmtnum(params.originalurl, comment_count)

                # 获取页数
                page = int(math.ceil(comment_count /
                                     zhulangComments.PAGE_SIZE))

                # 获得url列表
                for page in range(0, page, 1):
                    url = zhulangComments.COMMENT_URL % (articleId, page)
                    self.storeurl(url, params.originalurl,
                                  zhulangComments.STEP_3)

            elif params.step == zhulangComments.STEP_3:
                # Step3: 通过Step2设置的url,得到所有评论,抽取评论
                Logger.getlogging().info("params.step == 3")
                # 取得所有评论
                comments = self.r.parse(r'<p class=\\"cmt-txt\\">(.+?)<\\/p>',
                                        params.content)

                # 取得所有评论时间
                commenttimes = self.r.parse(
                    r'<span class=\\"cmt-time\\">(.+?)<\\/span>',
                    params.content)

                index = 0
                commentsInfo = []
                # 取得所有评论
                for index in range(index, int(len(comments)), 1):
                    # 提取时间
                    publicTime = commenttimes[index]
                    if URLStorage.storeupdatetime(params.originalurl,
                                                  publicTime):
                        cmti = CommentInfo()
                        x = json.loads('{"comment":"%s"}' %
                                       comments[index].encode('utf8'))
                        cmti.content = (x['comment'])
                        commentsInfo.append(cmti)

                    # 保存获取的评论
                if len(commentsInfo) > 0:
                    self.commentstorage.store(params.originalurl, commentsInfo)
            else:
                Logger.getlogging().error(
                    'proparam.step == {step}'.format(step=params.step))
        except Exception, e:
            traceback.print_exc()
コード例 #12
0
    def process(self, params):
        Logger.getlogging().info(params.url)
        try:
            if params.step is Rain8Comments.STEP_1:
                #Step1: 通过得到docurl,得到获取评论的首页url参数。
                articleId = self.r.parse('http://\w+\.tadu\.com/\w+/(\d+).*', params.originalurl)[0]

                # 取得评论的url列表
                comments_url = Rain8Comments.COMMENT_URL.format (articleId = articleId,page = 1)
                self.storeurl(comments_url, params.originalurl, Rain8Comments.STEP_2, {'articleId': articleId})

            elif params.step == Rain8Comments.STEP_2:
                # 获得评论参数
                articleId = params.customized['articleId']

                # 取得总件数
                #comment_count = float(self.r.getid('total', params.content))
                xparser = XPathUtility(params.content)
                countstr = xparser.getstring('//h4')
                if self.r.search(u'\d+', countstr):
                    comment_count = self.r.parse(u'(\d+)', countstr)[1]
                if comment_count == 0:
                    return

                # 判断增量
                cmtnum = URLStorage.getcmtnum(params.originalurl)
                if cmtnum >= comment_count:
                    return
                URLStorage.setcmtnum(params.originalurl, comment_count)

                # 获取页数
                totalPage = int(math.ceil(float(comment_count) / TaDuComments.PAGE_SIZE))

                # 获得url列表
                for page in range(1, totalPage+1 , 1):
                    url = TaDuComments.COMMENT_URL.format(articleId = articleId,page = page)
                    self.storeurl(url, params.originalurl, TaDuComments.STEP_3)

            elif params.step == TaDuComments.STEP_3:
                # Step3: 通过Step2设置的url,得到所有评论,抽取评论
                Logger.getlogging().info("params.step == 3")
                # 取得所有评论
                xparser = XPathUtility(params.content)
                comments = xparser.getlist('//ul[@class="cmetlist bookreview-cmetlist"]/li/div/div[2]/p')

                # 取得所有评论时间
                commenttimes = xparser.getlist('//ul[@class="cmetlist bookreview-cmetlist"]/li/div/div[2]/span')

                commentsInfo = []
                # 取得所有评论
                for index in range(0, int(len(comments)), 1):
                    # 提取时间
                    publicTime = commenttimes[index][3:]
                    cmti = CommentInfo()
                    tm = TimeUtility.getuniformtime(publicTime,'%Y-%m-%d %H:%M')
                    if URLStorage.storeupdatetime(params.originalurl, tm):
                       cmti.content = comments[index].strip()
                       commentsInfo.append(cmti)
                    # 保存获取的评论
                if len(commentsInfo) > 0:
                    self.commentstorage.store(params.originalurl, commentsInfo)
            else:
                Logger.getlogging().error('proparam.step == {step}'.format(step = params.step))
        except Exception,e:
            traceback.print_exc()
コード例 #13
0
    def process(self, params):
        Logger.getlogging().info(params.url)
        try:
            if params.step is HongXiuComments.STEP_1:
                #Step1: 通过得到docurl,得到获取评论的首页url参数。
                bookId = self.r.parse('http://\w+\.hongxiu\.com/\w+/(\d+).*',
                                      params.originalurl)[0]

                # 取得评论的url列表
                comments_url = HongXiuComments.COMMENT_URL.format(
                    bookId=bookId, page=1)
                self.storeurl(comments_url, params.originalurl,
                              HongXiuComments.STEP_2, {'bookId': bookId})
            elif params.step == HongXiuComments.STEP_2:
                # 获得评论参数
                bookId = params.customized['bookId']

                # 取得总件数
                params.content = (params.content).encode('utf-8')
                comment_count = self.r.parse(
                    'strong id="htmlrecordcnt" class="total">(\d+)</strong>条',
                    params.content)[0]
                if comment_count == 0:
                    return

                # 判断增量
                cmtnum = URLStorage.getcmtnum(params.originalurl)
                if cmtnum >= comment_count:
                    return
                URLStorage.setcmtnum(params.originalurl, comment_count)

                # 获取页数
                totalPage = int(
                    math.ceil(
                        float(comment_count) / HongXiuComments.PAGE_SIZE))

                # 获得url列表
                for page in range(1, totalPage + 1, 1):
                    url = HongXiuComments.COMMENT_URL.format(bookId=bookId,
                                                             page=page)
                    self.storeurl(url, params.originalurl,
                                  HongXiuComments.STEP_3)

            elif params.step == HongXiuComments.STEP_3:
                # Step3: 通过Step2设置的url,得到所有评论,抽取评论
                Logger.getlogging().info("params.step == 3")
                # 取得所有评论
                soup = BeautifulSoup(params.content, 'html5lib')
                comments = soup.select('.inner')

                # 取得所有评论时间
                commenttimes = soup.select('.postTime')

                commentsInfo = []
                # 取得所有评论
                for index in range(0, int(len(comments)), 1):
                    # 提取时间
                    publicTime = self.r.parse(
                        ur'(.*) 发表', commenttimes[index].get_text())[0]
                    tm = getuniformtime(publicTime)
                    if URLStorage.storeupdatetime(params.originalurl, tm):
                        cmti = CommentInfo()
                        cmti.content = self.r.parse(
                            ur'发表([\s\S]*)',
                            comments[index].get_text().strip())[0]
                        commentsInfo.append(cmti)
                    # 保存获取的评论
                if len(commentsInfo) > 0:
                    self.commentstorage.store(params.originalurl, commentsInfo)

            else:
                Logger.getlogging().error(
                    'proparam.step == {step}'.format(step=params.step))
        except Exception, e:
            traceback.print_exc()
コード例 #14
0
    def process(self, params):

        try:
            if params.step is BookComments.STEP_1:
                #Step1: 通过原始url得到Key,得到获取评论的首页url
                urlsplit = params.originalurl.split('/')
                if len(urlsplit[-1].strip()) > 0:
                    key = urlsplit[-1]
                else:
                    key = urlsplit[-2]

                field = params.customized['field']
                if field == 'manhua':
                    comments_url = self.MANHUA_COMMENTS_URL.format(key=key,
                                                                   pg=1)

                    hxpath = XPathUtility(params.content)
                    pubTime = hxpath.getstring(
                        '//*[@class="synopsises_font"]/li[2]/text()', ' ')
                    if pubTime:
                        pubTime = pubTime[0]
                        pubTime = re.findall('\d+/\d+/\d+', params.content)[0]
                        info = BaseInfoStorage.getbasicinfo(params.originalurl)
                        info.pubtime = pubTime
                        BaseInfoStorage.store(params.originalurl, info)

                elif field == 'book':
                    comments_url = self.BOOK_COMMENTS_URL.format(key=key, pg=1)
                else:
                    return
                self.storeurl(comments_url, params.originalurl, self.STEP_2, {
                    'key': key,
                    'field': field
                })

            elif params.step == BookComments.STEP_2:

                html = etree.HTML(params.content)
                comments_total_xpath = html.xpath(
                    '//*[@class="content_title"]/span/a')

                if comments_total_xpath:
                    comments_total_str = self.r.parse(
                        u'(\d+)',
                        comments_total_xpath[0].text.replace(',', ''))
                    if not comments_total_str:
                        return

                    comments_total = int(comments_total_str[0])
                    cmtnum = URLStorage.getcmtnum(params.originalurl)
                    if cmtnum >= comments_total:
                        return
                    URLStorage.setcmtnum(params.originalurl, comments_total)

                    # 获取首页评论
                    self.geturlcomments(params)

                    if comments_total > self.limit:
                        page_max = int(
                            math.ceil(float(comments_total) / self.limit))

                        # 拼出首页之外的所有评论url
                        key = params.customized['key']
                        field = params.customized['field']
                        if field == 'manhua':
                            for page in range(2, page_max + 1, 1):
                                comments_url = self.MANHUA_COMMENTS_URL.format(
                                    key=key, pg=page)
                                self.storeurl(comments_url, params.originalurl,
                                              self.STEP_3)
                        elif field == 'book':
                            for page in range(2, page_max + 1, 1):
                                comments_url = self.BOOK_COMMENTS_URL.format(
                                    key=key, pg=page)
                                self.storeurl(comments_url, params.originalurl,
                                              self.STEP_3)
                        else:
                            return

            elif params.step == BookComments.STEP_3:
                # 获取评论
                self.geturlcomments(params)

            else:
                pass
        except:
            Logger.printexception()
コード例 #15
0
    def process(self, params):

        try:
            if params.step is NewsComments.STEP_1:
                #Step1: 通过原始url得到Key,得到获取评论的首页url
                urlsplit = params.originalurl.split('/')
                field = params.customized['field']
                if field == 'news':
                    fnamesplit = urlsplit[-1].split('.')
                    key = fnamesplit[0]
                    comments_url = self.NEWS_COMMENTS_URL.format(key=key, pg=1)
                elif field == 'comic':
                    if len(urlsplit[-1].strip()) > 0:
                        key = urlsplit[-1]
                    else:
                        key = urlsplit[-2]
                    comments_url = self.COMIC_COMMENTS_UTL.format(key=key, pg=1)
                else:
                    return
                self.storeurl(comments_url, params.originalurl, self.STEP_2, {'key': key, 'field': field})

            elif params.step == NewsComments.STEP_2:

                html = etree.HTML(params.content)
                comments_total_xpath = html.xpath('//*[contains(@class,"li_more")]/strong[1]')

                if comments_total_xpath:
                    comments_total = int(comments_total_xpath[0].text)
                    cmtnum = URLStorage.getcmtnum(params.originalurl)
                    if cmtnum >= comments_total:
                        return
                    URLStorage.setcmtnum(params.originalurl, comments_total)

                    # 获取首页评论
                    self.geturlcomments(params)

                    key = params.customized['key']
                    field = params.customized['field']
                    if field == 'news' and comments_total > self.NEWS_LIMIT:
                        page_max = int(math.ceil(float(comments_total)/self.NEWS_LIMIT))

                        # 拼出首页之外的所有评论url
                        for page in range(2, page_max+1, 1):
                            comments_url = self.NEWS_COMMENTS_URL.format(key=key, pg=page)
                            self.storeurl(comments_url, params.originalurl, self.STEP_3)
                    elif field == 'comic' and comments_total > self.COMMIC_LIMIT:
                        page_max = int(math.ceil(float(comments_total) / self.COMMIC_LIMIT))

                        # 拼出首页之外的所有评论url
                        for page in range(2, page_max + 1, 1):
                            comments_url = self.COMIC_COMMENTS_UTL.format(key=key, pg=page)
                            self.storeurl(comments_url, params.originalurl, self.STEP_3)
                    else:
                        return

            elif params.step == NewsComments.STEP_3:
                # 获取评论
                self.geturlcomments(params)

            else:
                pass
        except:
            Logger.printexception()
コード例 #16
0
    def process(self, params):
        Logger.getlogging().info(params.url)
        try:
            if params.step is Xie17NewsComments.STEP_1:
                #Step1: 通过得到docurl,得到获取评论的首页url参数。
                articleId = self.r.parse('^http://xiaoshuo\.17xie\.com/book/(\d+)/', params.originalurl)[0]

                # 取得评论的url列表
                comments_url = Xie17NewsComments.COMMENT_URL % (articleId, 1)
                self.storeurl(comments_url, params.originalurl, Xie17NewsComments.STEP_2, {'articleId': articleId})

            elif params.step == Xie17NewsComments.STEP_2:
                # 获得评论参数
                articleId = params.customized['articleId']

                # 取得总件数
                comment_count = float(self.r.parse(ur'共(\d+)人说过', params.content)[0])
                if comment_count == 0:
                    return

                # 判断增量
                cmtnum = URLStorage.getcmtnum(params.originalurl)
                if cmtnum >= comment_count:
                    return
                URLStorage.setcmtnum(params.originalurl, comment_count)

                # 获取页数
                page = int(math.ceil(comment_count / Xie17NewsComments.PAGE_SIZE))

                # 获得url列表
                for page in range(1, page + 1, 1):
                    url = Xie17NewsComments.COMMENT_URL % (articleId, page)
                    self.storeurl(url, params.originalurl, Xie17NewsComments.STEP_3)

            elif params.step == Xie17NewsComments.STEP_3:
                # Step3: 通过Step2设置的url,得到所有评论,抽取评论
                Logger.getlogging().info("params.step == 3")
                xparser = XPathUtility(params.content)
                # 取得所有评论
                comments = xparser.getcomments('/html/body/ul/li[2]/dl/dd')
                # 取得所有评论时间
                commenttimes = xparser.xpath('/html/body/ul/li[2]/dl/dt/text()')

                commentsInfo = []
                # 取得所有评论
                for index in range(0, int(len(commenttimes)), 1):
                    # 提取时间
                    if self.r.search(ur'\d+年\d+月',commenttimes[index].strip()):
                        tm = TimeUtility.getuniformtime(str(commenttimes[index]).strip(), '%Y年%m月')
                    else:
                        tm = getuniformtime(commenttimes[index].strip())

                    if URLStorage.storeupdatetime(params.originalurl, tm):
                        cmti = CommentInfo()
                        comment = comments[index * 3] + comments[index * 3 + 1] + comments[index * 3 + 2]
                        cmti.content = comment
                        commentsInfo.append(cmti)

                    # 保存获取的评论
                if len(commentsInfo) > 0:
                    self.commentstorage.store(params.originalurl, commentsInfo)
            else: