예제 #1
0
    def test_suppressed_feed(self):
        """Test the ability to show a paginated feed of suppressed works.
        """

        work1 = self._work(with_open_access_download=True)
        work1.license_pools[0].suppressed = True

        work2 = self._work(with_open_access_download=True)
        work2.license_pools[0].suppressed = True

        pagination = Pagination(size=1)
        annotator = TestAnnotator()
        titles = [work1.title, work2.title]

        def make_page(pagination):
            return AdminFeed.suppressed(_db=self._db,
                                        title="Hidden works",
                                        url=self._url,
                                        annotator=annotator,
                                        pagination=pagination)

        first_page = make_page(pagination)
        parsed = feedparser.parse(unicode(first_page))
        eq_(1, len(parsed['entries']))
        assert parsed['entries'][0].title in titles
        titles.remove(parsed['entries'][0].title)
        [remaining_title] = titles

        # Make sure the links are in place.
        [start] = self.links(parsed, 'start')
        eq_(annotator.groups_url(None), start['href'])
        eq_(annotator.top_level_title(), start['title'])

        [up] = self.links(parsed, 'up')
        eq_(annotator.groups_url(None), up['href'])
        eq_(annotator.top_level_title(), up['title'])

        [next_link] = self.links(parsed, 'next')
        eq_(annotator.suppressed_url(pagination.next_page), next_link['href'])

        # This was the first page, so no previous link.
        eq_([], self.links(parsed, 'previous'))

        # Now get the second page and make sure it has a 'previous' link.
        second_page = make_page(pagination.next_page)
        parsed = feedparser.parse(unicode(second_page))
        [previous] = self.links(parsed, 'previous')
        eq_(annotator.suppressed_url(pagination), previous['href'])
        eq_(1, len(parsed['entries']))
        eq_(remaining_title, parsed['entries'][0]['title'])
예제 #2
0
 def featured_works(self, _db, facets=None):
     library = self.get_library(_db)
     new_facets = FeaturedSeriesFacets(
         library,
         # If a work is in the right series we don't care about its
         # quality.
         collection=FeaturedSeriesFacets.COLLECTION_FULL,
         availability=FeaturedSeriesFacets.AVAILABLE_ALL,
         order=None)
     if facets:
         new_facets.entrypoint = facets.entrypoint
     pagination = Pagination(size=library.featured_lane_size)
     qu = self.works(_db, facets=new_facets, pagination=pagination)
     return qu.all()
예제 #3
0
    def test_suppressed_feed(self):
        # Test the ability to show a paginated feed of suppressed works.

        work1 = self._work(with_open_access_download=True)
        work1.license_pools[0].suppressed = True

        work2 = self._work(with_open_access_download=True)
        work2.license_pools[0].suppressed = True

        # This work won't be included in the feed since its
        # suppressed pool is superceded.
        work3 = self._work(with_open_access_download=True)
        work3.license_pools[0].suppressed = True
        work3.license_pools[0].superceded = True

        pagination = Pagination(size=1)
        annotator = MockAnnotator(self._default_library)
        titles = [work1.title, work2.title]

        def make_page(pagination):
            return AdminFeed.suppressed(
                _db=self._db,
                title="Hidden works",
                url=self._url,
                annotator=annotator,
                pagination=pagination,
            )

        first_page = make_page(pagination)
        parsed = feedparser.parse(str(first_page))
        assert 1 == len(parsed["entries"])
        assert parsed["entries"][0].title in titles
        titles.remove(parsed["entries"][0].title)
        [remaining_title] = titles

        # Make sure the links are in place.
        [start] = self.links(parsed, "start")
        assert annotator.groups_url(None) == start["href"]
        assert annotator.top_level_title() == start["title"]

        [up] = self.links(parsed, "up")
        assert annotator.groups_url(None) == up["href"]
        assert annotator.top_level_title() == up["title"]

        [next_link] = self.links(parsed, "next")
        assert annotator.suppressed_url(pagination.next_page) == next_link["href"]

        # This was the first page, so no previous link.
        assert [] == self.links(parsed, "previous")

        # Now get the second page and make sure it has a 'previous' link.
        second_page = make_page(pagination.next_page)
        parsed = feedparser.parse(str(second_page))
        [previous] = self.links(parsed, "previous")
        assert annotator.suppressed_url(pagination) == previous["href"]
        assert 1 == len(parsed["entries"])
        assert remaining_title == parsed["entries"][0]["title"]

        # The third page is empty.
        third_page = make_page(pagination.next_page.next_page)
        parsed = feedparser.parse(str(third_page))
        [previous] = self.links(parsed, "previous")
        assert annotator.suppressed_url(pagination.next_page) == previous["href"]
        assert 0 == len(parsed["entries"])
예제 #4
0
    def test_complaints_feed(self):
        """Test the ability to show a paginated feed of works with complaints."""

        type = iter(Complaint.VALID_TYPES)
        type1 = next(type)
        type2 = next(type)

        work1 = self._work(
            "fiction work with complaint",
            language="eng",
            fiction=True,
            with_open_access_download=True,
        )
        work1_complaint1 = self._complaint(
            work1.license_pools[0],
            type1,
            "work1 complaint1 source",
            "work1 complaint1 detail",
        )
        work1_complaint2 = self._complaint(
            work1.license_pools[0],
            type1,
            "work1 complaint2 source",
            "work1 complaint2 detail",
        )
        work1_complaint3 = self._complaint(
            work1.license_pools[0],
            type2,
            "work1 complaint3 source",
            "work1 complaint3 detail",
        )
        work2 = self._work(
            "nonfiction work with complaint",
            language="eng",
            fiction=False,
            with_open_access_download=True,
        )
        work2_complaint1 = self._complaint(
            work2.license_pools[0],
            type2,
            "work2 complaint1 source",
            "work2 complaint1 detail",
        )
        work3 = self._work(
            "fiction work without complaint",
            language="eng",
            fiction=True,
            with_open_access_download=True,
        )
        work4 = self._work(
            "nonfiction work without complaint",
            language="eng",
            fiction=False,
            with_open_access_download=True,
        )

        facets = Facets.default(self._default_library)
        pagination = Pagination(size=1)
        annotator = MockAnnotator(self._default_library)

        def make_page(pagination):
            return AdminFeed.complaints(
                library=self._default_library,
                title="Complaints",
                url=self._url,
                annotator=annotator,
                pagination=pagination,
            )

        first_page = make_page(pagination)
        parsed = feedparser.parse(str(first_page))
        assert 1 == len(parsed["entries"])
        assert work1.title == parsed["entries"][0]["title"]
        # Verify that the entry has acquisition links.
        links = parsed["entries"][0]["links"]
        open_access_links = [
            l
            for l in links
            if l["rel"] == "http://opds-spec.org/acquisition/open-access"
        ]
        assert 1 == len(open_access_links)

        # Make sure the links are in place.
        [start] = self.links(parsed, "start")
        assert annotator.groups_url(None) == start["href"]
        assert annotator.top_level_title() == start["title"]

        [up] = self.links(parsed, "up")
        assert annotator.groups_url(None) == up["href"]
        assert annotator.top_level_title() == up["title"]

        [next_link] = self.links(parsed, "next")
        assert (
            annotator.complaints_url(facets, pagination.next_page) == next_link["href"]
        )

        # This was the first page, so no previous link.
        assert [] == self.links(parsed, "previous")

        # Now get the second page and make sure it has a 'previous' link.
        second_page = make_page(pagination.next_page)
        parsed = feedparser.parse(str(second_page))
        [previous] = self.links(parsed, "previous")
        assert annotator.complaints_url(facets, pagination) == previous["href"]
        assert 1 == len(parsed["entries"])
        assert work2.title == parsed["entries"][0]["title"]
예제 #5
0
    def test_complaints_feed(self):
        """Test the ability to show a paginated feed of works with complaints.
        """

        type = iter(Complaint.VALID_TYPES)
        type1 = next(type)
        type2 = next(type)

        work1 = self._work("fiction work with complaint",
                           language="eng",
                           fiction=True,
                           with_open_access_download=True)
        work1_complaint1 = self._complaint(work1.license_pools[0], type1,
                                           "work1 complaint1 source",
                                           "work1 complaint1 detail")
        work1_complaint2 = self._complaint(work1.license_pools[0], type1,
                                           "work1 complaint2 source",
                                           "work1 complaint2 detail")
        work1_complaint3 = self._complaint(work1.license_pools[0], type2,
                                           "work1 complaint3 source",
                                           "work1 complaint3 detail")
        work2 = self._work("nonfiction work with complaint",
                           language="eng",
                           fiction=False,
                           with_open_access_download=True)
        work2_complaint1 = self._complaint(work2.license_pools[0], type2,
                                           "work2 complaint1 source",
                                           "work2 complaint1 detail")
        work3 = self._work("fiction work without complaint",
                           language="eng",
                           fiction=True,
                           with_open_access_download=True)
        work4 = self._work("nonfiction work without complaint",
                           language="eng",
                           fiction=False,
                           with_open_access_download=True)

        facets = Facets.default()
        pagination = Pagination(size=1)
        annotator = TestAnnotator()

        def make_page(pagination):
            return AdminFeed.complaints(_db=self._db,
                                        title="Complaints",
                                        url=self._url,
                                        annotator=annotator,
                                        pagination=pagination)

        first_page = make_page(pagination)
        parsed = feedparser.parse(unicode(first_page))
        eq_(1, len(parsed['entries']))
        eq_(work1.title, parsed['entries'][0]['title'])

        # Make sure the links are in place.
        [start] = self.links(parsed, 'start')
        eq_(annotator.groups_url(None), start['href'])
        eq_(annotator.top_level_title(), start['title'])

        [up] = self.links(parsed, 'up')
        eq_(annotator.groups_url(None), up['href'])
        eq_(annotator.top_level_title(), up['title'])

        [next_link] = self.links(parsed, 'next')
        eq_(annotator.complaints_url(facets, pagination.next_page),
            next_link['href'])

        # This was the first page, so no previous link.
        eq_([], self.links(parsed, 'previous'))

        # Now get the second page and make sure it has a 'previous' link.
        second_page = make_page(pagination.next_page)
        parsed = feedparser.parse(unicode(second_page))
        [previous] = self.links(parsed, 'previous')
        eq_(annotator.complaints_url(facets, pagination), previous['href'])
        eq_(1, len(parsed['entries']))
        eq_(work2.title, parsed['entries'][0]['title'])