示例#1
0
    def parse(self, response):
        articles = response.xpath("//" +
                                  XpathUtil.xpath_for_class('media__content'))
        for article in articles:
            item = GuardianItem()

            item['title'] = StringUtil.get_first(
                article.xpath(
                    XpathUtil.xpath_for_class('media__title') +
                    "/a/text()").extract(), "").strip(' \n')
            item['tags'] = StringUtil.get_first(
                article.xpath(
                    XpathUtil.xpath_for_class('media__tag') +
                    "/text()").extract(), "").strip(' \n')
            item['summary'] = StringUtil.get_first(
                article.xpath(
                    XpathUtil.xpath_for_class('media__summary') +
                    "/text()").extract(), "").strip(' \n')

            article_url = ''.join(
                article.xpath(
                    XpathUtil.xpath_for_class("media__title") +
                    "/a/@href").extract())

            url = response.urljoin(article_url)

            yield scrapy.Request(url,
                                 callback=self.parse_dir_contents,
                                 meta=item)
示例#2
0
    def parse(self, response):
        articles = response.xpath("//" + XpathUtil.xpath_for_class("media__content"))
        for article in articles:
            item = GuardianItem()

            item["title"] = StringUtil.get_first(
                article.xpath(XpathUtil.xpath_for_class("media__title") + "/a/text()").extract(), ""
            ).strip(" \n")
            item["tags"] = StringUtil.get_first(
                article.xpath(XpathUtil.xpath_for_class("media__tag") + "/text()").extract(), ""
            ).strip(" \n")
            item["summary"] = StringUtil.get_first(
                article.xpath(XpathUtil.xpath_for_class("media__summary") + "/text()").extract(), ""
            ).strip(" \n")

            article_url = "".join(article.xpath(XpathUtil.xpath_for_class("media__title") + "/a/@href").extract())

            url = response.urljoin(article_url)

            yield scrapy.Request(url, callback=self.parse_dir_contents, meta=item)
示例#3
0
    def parse_dir_contents(self, response):
        item = response.meta

        header = StringUtil.get_first(
            response.xpath("//" + XpathUtil.xpath_for_class("story-body__h1") + "/text()").extract(), ""
        ).strip(" \n")

        body_list = response.xpath("//" + XpathUtil.xpath_for_class("story-body__inner") + "//p/text()").extract()
        body = " ".join(body_list).strip(" \n")

        item["header"] = header
        item["url"] = response.url
        item["body"] = body
        yield item
示例#4
0
    def parse_dir_contents(self, response):
        item = response.meta

        header = StringUtil.get_first(
            response.xpath("//" + XpathUtil.xpath_for_class("story-body__h1") +
                           "/text()").extract(), "").strip(' \n')

        body_list = response.xpath(
            "//" + XpathUtil.xpath_for_class("story-body__inner") +
            "//p/text()").extract()
        body = ' '.join(body_list).strip(' \n')

        item['header'] = header
        item['url'] = response.url
        item['body'] = body
        yield item
示例#5
0
    def test_get_first_should_return_second_argument_in_empty_list(self):
        list_input = []

        result = StringUtil.get_first(list_input, "d")

        self.assertEqual(result, "d")
示例#6
0
    def test_get_first_should_return_first_item_in_nonempty_list(self):
        list_input = ["a", "b", "c"]

        result = StringUtil.get_first(list_input, "d")

        self.assertEqual(result, "a")