Exemplo n.º 1
0
    def make_book(self, _):
        urls = OrdersOfMagnitude.generate_links()
        print(urls)
        pages = web.download_async(urls)

        chapters = list(map(OrdersOfMagnitude.make_chapter, pages))

        return Book(self.TITLE, self.get_id(), "en-US", self.METADATA, chapters)
Exemplo n.º 2
0
 def make_book(self, _):
     """
     Create an up-to-date copy of the book Citadel
     :return: a Book corresponding to what is published online
     """
     pages = web.download_async(Citadel.generate_links())
     chapters = [Citadel.make_chapter(page) for page in pages]
     return Book(TITLE, self.get_id(), 'en-US', METADATA, chapters)
Exemplo n.º 3
0
    def make_book(self, _):
        links = [d["link"] for d in data]
        titles = [d["title"] for d in data]

        pages = web.download_async(links)
        chapters = [self.make_chapter(title, page) for title, page in zip(titles, pages)]

        return Book(self.TITLE, self.get_id(), self.LANGUAGE, self.METADATA, chapters)
Exemplo n.º 4
0
    def make_book(self, _):
        pages = web.download_async(links)
        chapters = [self.make_chapter(title, page) for title, page in zip(titles, pages)]

        cover_location = Scraper.get_relative_path('covers/worm-cover.jpg')
        with open(cover_location, 'rb') as f:
            photo = f.read()

        return Book(self.TITLE, self.get_id(), self.LANGUAGE, self.METADATA, chapters, photo)
Exemplo n.º 5
0
    def make_book(self, url):
        links = FanfictionDotNet.generate_links(requests.get(url).content)
        pages = web.download_async(links)
        first_tree = html.fromstring(pages[0])
        title = FanfictionDotNet.get_title(first_tree)
        book_id = FanfictionDotNet.get_id(first_tree)
        meta = {'author': FanfictionDotNet.get_author(first_tree)}

        chapters = list(map(FanfictionDotNet.make_chapter, pages))
        return Book(title, book_id, 'en-US', meta, chapters)
Exemplo n.º 6
0
 def setUpClass(cls):
     cls.links = list(citadel.generate_links())
     cls.pages = web.download_async(cls.links)
     cls.trees = [html.fromstring(page) for page in cls.pages]
     cls.titles = [citadel.extract_title(tree) for tree in cls.trees]