예제 #1
0
    def test_lifecycle_with_worklist(self):
        facets = Facets.default(self._default_library)
        pagination = Pagination.default()
        lane = WorkList()
        lane.initialize(self._default_library)

        # Fetch a cached feed from the database. It comes out updated.
        refresher = MockFeedGenerator()
        args = (self._db, lane, facets, pagination, refresher)
        feed = CachedFeed.fetch(*args, max_age=0, raw=True)
        assert "This is feed #1" == feed.content

        assert pagination.query_string == feed.pagination
        assert facets.query_string == feed.facets
        assert None == feed.lane_id
        assert lane.unique_key == feed.unique_key

        # Fetch it again, with a high max_age, and it's cached!
        feed = CachedFeed.fetch(*args, max_age=1000, raw=True)
        assert "This is feed #1" == feed.content

        # Fetch it with a low max_age, and it gets updated again.
        feed = CachedFeed.fetch(*args, max_age=0, raw=True)
        assert "This is feed #2" == feed.content

        # The special constant CACHE_FOREVER means it's always cached.
        feed = CachedFeed.fetch(*args, max_age=CachedFeed.CACHE_FOREVER, raw=True)
        assert "This is feed #2" == feed.content
예제 #2
0
파일: lanes.py 프로젝트: lhuabu/circulation
 def overview_facets(self, _db, facets):
     """Convert a generic FeaturedFacets to some other faceting object,
     suitable for showing an overview of this WorkList in a grouped
     feed.
     """
     # TODO: Since the purpose of the recommendation feed is to
     # suggest books that can be borrowed immediately, it would be
     # better to set availability=AVAILABLE_NOW. However, this feed
     # is cached for so long that we can't rely on the availability
     # information staying accurate. It would be especially bad if
     # people borrowed all of the recommendations that were
     # available at the time this feed was generated, and then
     # recommendations that were unavailable when the feed was
     # generated became available.
     #
     # For now, it's better to show all books and let people put
     # the unavailable ones on hold if they want.
     #
     # TODO: It would be better to order works in the same order
     # they come from the recommendation engine, since presumably
     # the best recommendations are in the front.
     return Facets.default(
         self.get_library(_db), collection=facets.COLLECTION_FULL,
         availability=facets.AVAILABLE_ALL, entrypoint=facets.entrypoint,
     )
예제 #3
0
    def complaints(cls, library, title, url, annotator, pagination=None):
        _db = Session.object_session(library)
        facets = Facets.default(library)
        pagination = pagination or Pagination.default()

        q = LicensePool.with_complaint(library)
        results = pagination.modify_database_query(_db, q).all()

        if len(results) > 0:
            (pools, counts) = list(zip(*results))
        else:
            pools = ()

        works = [pool.work for pool in pools]
        feed = cls(_db, title, url, works, annotator)

        # Render a 'start' link
        top_level_title = annotator.top_level_title()
        start_uri = annotator.groups_url(None)
        AdminFeed.add_link_to_feed(feed.feed,
                                   href=start_uri,
                                   rel="start",
                                   title=top_level_title)

        # Render an 'up' link, same as the 'start' link to indicate top-level feed
        AdminFeed.add_link_to_feed(feed.feed,
                                   href=start_uri,
                                   rel="up",
                                   title=top_level_title)

        if len(works) > 0:
            # There are works in this list. Add a 'next' link.
            AdminFeed.add_link_to_feed(
                feed.feed,
                rel="next",
                href=annotator.complaints_url(facets, pagination.next_page),
            )

        if pagination.offset > 0:
            AdminFeed.add_link_to_feed(
                feed.feed,
                rel="first",
                href=annotator.complaints_url(facets, pagination.first_page),
            )

        previous_page = pagination.previous_page
        if previous_page:
            AdminFeed.add_link_to_feed(
                feed.feed,
                rel="previous",
                href=annotator.complaints_url(facets, previous_page),
            )

        annotator.annotate_feed(feed)
        return str(feed)
예제 #4
0
    def test_do_generate(self):
        # When it's time to generate a feed, AcquisitionFeed.page
        # is called with the right arguments.
        class MockAcquisitionFeed(object):
            called_with = None
            @classmethod
            def page(cls, **kwargs):
                cls.called_with = kwargs
                return "here's your feed"

        # Test our ability to generate a single feed.
        script = CacheFacetListsPerLane(self._db, testing=True, cmd_args=[])
        facets = Facets.default(self._default_library)
        pagination = Pagination.default()

        with script.app.test_request_context("/"):
            lane = self._lane()
            result = script.do_generate(
                lane, facets, pagination, feed_class=MockAcquisitionFeed
            )
            eq_("here's your feed", result)

            args = MockAcquisitionFeed.called_with
            eq_(self._db, args['_db'])
            eq_(lane, args['lane'])
            eq_(lane.display_name, args['title'])
            eq_(True, args['force_refresh'])

            # The Pagination object was passed into
            # MockAcquisitionFeed.page, and it was also used to make the
            # feed URL (see below).
            eq_(pagination, args['pagination'])

            # The Facets object was passed into
            # MockAcquisitionFeed.page, and it was also used to make
            # the feed URL and to create the feed annotator.
            eq_(facets, args['facets'])
            annotator = args['annotator']
            eq_(facets, annotator.facets)
            eq_(
                args['url'],
                annotator.feed_url(lane, facets=facets, pagination=pagination)
            )

            # Try again without mocking AcquisitionFeed to verify that
            # we get something that looks like an OPDS feed.
            result = script.do_generate(lane, facets, pagination)
            assert result.startswith('<feed')
예제 #5
0
    def test_do_generate(self):
        # When it's time to generate a feed, AcquisitionFeed.page
        # is called with the right arguments.
        class MockAcquisitionFeed(object):
            called_with = None
            @classmethod
            def page(cls, **kwargs):
                cls.called_with = kwargs
                return "here's your feed"

        # Test our ability to generate a single feed.
        script = CacheFacetListsPerLane(self._db, testing=True, cmd_args=[])
        facets = Facets.default(self._default_library)
        pagination = Pagination.default()

        with script.app.test_request_context("/"):
            lane = self._lane()
            result = script.do_generate(
                lane, facets, pagination, feed_class=MockAcquisitionFeed
            )
            eq_("here's your feed", result)

            args = MockAcquisitionFeed.called_with
            eq_(self._db, args['_db'])
            eq_(lane, args['lane'])
            eq_(lane.display_name, args['title'])
            eq_(True, args['force_refresh'])

            # The Pagination object was passed into
            # MockAcquisitionFeed.page, and it was also used to make the
            # feed URL (see below).
            eq_(pagination, args['pagination'])

            # The Facets object was passed into
            # MockAcquisitionFeed.page, and it was also used to make
            # the feed URL and to create the feed annotator.
            eq_(facets, args['facets'])
            annotator = args['annotator']
            eq_(facets, annotator.facets)
            eq_(
                args['url'],
                annotator.feed_url(lane, facets=facets, pagination=pagination)
            )

            # Try again without mocking AcquisitionFeed to verify that
            # we get something that looks like an OPDS feed.
            result = script.do_generate(lane, facets, pagination)
            assert result.startswith('<feed')
예제 #6
0
    def complaints(cls, library, title, url, annotator, pagination=None):
        _db = Session.object_session(library)
        facets = Facets.default(library)
        pagination = pagination or Pagination.default()

        q = LicensePool.with_complaint(library)
        results = pagination.apply(q).all()

        if len(results) > 0:
            (pools, counts) = zip(*results)
        else:
            pools = ()

        works = [pool.work for pool in pools]
        feed = cls(_db, title, url, works, annotator)

        # Render a 'start' link
        top_level_title = annotator.top_level_title()
        start_uri = annotator.groups_url(None)
        AdminFeed.add_link_to_feed(feed.feed, href=start_uri, rel="start", title=top_level_title)

        # Render an 'up' link, same as the 'start' link to indicate top-level feed
        AdminFeed.add_link_to_feed(feed.feed, href=start_uri, rel="up", title=top_level_title)

        if len(works) > 0:
            # There are works in this list. Add a 'next' link.
            AdminFeed.add_link_to_feed(feed.feed, rel="next", href=annotator.complaints_url(facets, pagination.next_page))

        if pagination.offset > 0:
            AdminFeed.add_link_to_feed(feed.feed, rel="first", href=annotator.complaints_url(facets, pagination.first_page))

        previous_page = pagination.previous_page
        if previous_page:
            AdminFeed.add_link_to_feed(feed.feed, rel="previous", href=annotator.complaints_url(facets, previous_page))

        annotator.annotate_feed(feed)
        return unicode(feed)
예제 #7
0
    def test_complaints_feed(self):
        """Test the ability to show a paginated feed of works with complaints."""

        type = iter(Complaint.VALID_TYPES)
        type1 = next(type)
        type2 = next(type)

        work1 = self._work(
            "fiction work with complaint",
            language="eng",
            fiction=True,
            with_open_access_download=True,
        )
        work1_complaint1 = self._complaint(
            work1.license_pools[0],
            type1,
            "work1 complaint1 source",
            "work1 complaint1 detail",
        )
        work1_complaint2 = self._complaint(
            work1.license_pools[0],
            type1,
            "work1 complaint2 source",
            "work1 complaint2 detail",
        )
        work1_complaint3 = self._complaint(
            work1.license_pools[0],
            type2,
            "work1 complaint3 source",
            "work1 complaint3 detail",
        )
        work2 = self._work(
            "nonfiction work with complaint",
            language="eng",
            fiction=False,
            with_open_access_download=True,
        )
        work2_complaint1 = self._complaint(
            work2.license_pools[0],
            type2,
            "work2 complaint1 source",
            "work2 complaint1 detail",
        )
        work3 = self._work(
            "fiction work without complaint",
            language="eng",
            fiction=True,
            with_open_access_download=True,
        )
        work4 = self._work(
            "nonfiction work without complaint",
            language="eng",
            fiction=False,
            with_open_access_download=True,
        )

        facets = Facets.default(self._default_library)
        pagination = Pagination(size=1)
        annotator = MockAnnotator(self._default_library)

        def make_page(pagination):
            return AdminFeed.complaints(
                library=self._default_library,
                title="Complaints",
                url=self._url,
                annotator=annotator,
                pagination=pagination,
            )

        first_page = make_page(pagination)
        parsed = feedparser.parse(str(first_page))
        assert 1 == len(parsed["entries"])
        assert work1.title == parsed["entries"][0]["title"]
        # Verify that the entry has acquisition links.
        links = parsed["entries"][0]["links"]
        open_access_links = [
            l
            for l in links
            if l["rel"] == "http://opds-spec.org/acquisition/open-access"
        ]
        assert 1 == len(open_access_links)

        # Make sure the links are in place.
        [start] = self.links(parsed, "start")
        assert annotator.groups_url(None) == start["href"]
        assert annotator.top_level_title() == start["title"]

        [up] = self.links(parsed, "up")
        assert annotator.groups_url(None) == up["href"]
        assert annotator.top_level_title() == up["title"]

        [next_link] = self.links(parsed, "next")
        assert (
            annotator.complaints_url(facets, pagination.next_page) == next_link["href"]
        )

        # This was the first page, so no previous link.
        assert [] == self.links(parsed, "previous")

        # Now get the second page and make sure it has a 'previous' link.
        second_page = make_page(pagination.next_page)
        parsed = feedparser.parse(str(second_page))
        [previous] = self.links(parsed, "previous")
        assert annotator.complaints_url(facets, pagination) == previous["href"]
        assert 1 == len(parsed["entries"])
        assert work2.title == parsed["entries"][0]["title"]
예제 #8
0
    def test_complaints_feed(self):
        """Test the ability to show a paginated feed of works with complaints.
        """

        type = iter(Complaint.VALID_TYPES)
        type1 = next(type)
        type2 = next(type)

        work1 = self._work("fiction work with complaint",
                           language="eng",
                           fiction=True,
                           with_open_access_download=True)
        work1_complaint1 = self._complaint(work1.license_pools[0], type1,
                                           "work1 complaint1 source",
                                           "work1 complaint1 detail")
        work1_complaint2 = self._complaint(work1.license_pools[0], type1,
                                           "work1 complaint2 source",
                                           "work1 complaint2 detail")
        work1_complaint3 = self._complaint(work1.license_pools[0], type2,
                                           "work1 complaint3 source",
                                           "work1 complaint3 detail")
        work2 = self._work("nonfiction work with complaint",
                           language="eng",
                           fiction=False,
                           with_open_access_download=True)
        work2_complaint1 = self._complaint(work2.license_pools[0], type2,
                                           "work2 complaint1 source",
                                           "work2 complaint1 detail")
        work3 = self._work("fiction work without complaint",
                           language="eng",
                           fiction=True,
                           with_open_access_download=True)
        work4 = self._work("nonfiction work without complaint",
                           language="eng",
                           fiction=False,
                           with_open_access_download=True)

        facets = Facets.default()
        pagination = Pagination(size=1)
        annotator = TestAnnotator()

        def make_page(pagination):
            return AdminFeed.complaints(_db=self._db,
                                        title="Complaints",
                                        url=self._url,
                                        annotator=annotator,
                                        pagination=pagination)

        first_page = make_page(pagination)
        parsed = feedparser.parse(unicode(first_page))
        eq_(1, len(parsed['entries']))
        eq_(work1.title, parsed['entries'][0]['title'])

        # Make sure the links are in place.
        [start] = self.links(parsed, 'start')
        eq_(annotator.groups_url(None), start['href'])
        eq_(annotator.top_level_title(), start['title'])

        [up] = self.links(parsed, 'up')
        eq_(annotator.groups_url(None), up['href'])
        eq_(annotator.top_level_title(), up['title'])

        [next_link] = self.links(parsed, 'next')
        eq_(annotator.complaints_url(facets, pagination.next_page),
            next_link['href'])

        # This was the first page, so no previous link.
        eq_([], self.links(parsed, 'previous'))

        # Now get the second page and make sure it has a 'previous' link.
        second_page = make_page(pagination.next_page)
        parsed = feedparser.parse(unicode(second_page))
        [previous] = self.links(parsed, 'previous')
        eq_(annotator.complaints_url(facets, pagination), previous['href'])
        eq_(1, len(parsed['entries']))
        eq_(work2.title, parsed['entries'][0]['title'])
예제 #9
0
    def test_response_format(self):
        # Verify that fetch() can be told to return an appropriate
        # OPDSFeedResponse object. This is the default behavior, since
        # it preserves some useful information that would otherwise be
        # lost.
        facets = Facets.default(self._default_library)
        pagination = Pagination.default()
        wl = WorkList()
        wl.initialize(self._default_library)

        def refresh():
            return "Here's a feed."

        private = object()
        r = CachedFeed.fetch(
            self._db, wl, facets, pagination, refresh, max_age=102, private=private
        )
        assert isinstance(r, OPDSFeedResponse)
        assert 200 == r.status_code
        assert OPDSFeed.ACQUISITION_FEED_TYPE == r.content_type
        assert 102 == r.max_age
        assert "Here's a feed." == str(r)

        # The extra argument `private`, not used by CachedFeed.fetch, was
        # passed on to the OPDSFeedResponse constructor.
        assert private == r.private

        # The CachedFeed was created; just not returned.
        cf = self._db.query(CachedFeed).one()
        assert "Here's a feed." == cf.content

        # Try it again as a cache hit.
        r = CachedFeed.fetch(
            self._db, wl, facets, pagination, refresh, max_age=102, private=private
        )
        assert isinstance(r, OPDSFeedResponse)
        assert 200 == r.status_code
        assert OPDSFeed.ACQUISITION_FEED_TYPE == r.content_type
        assert 102 == r.max_age
        assert "Here's a feed." == str(r)

        # If we tell CachedFeed to cache its feed 'forever', that only
        # applies to the _database_ cache. The client is told to cache
        # the feed for the default period.
        r = CachedFeed.fetch(
            self._db,
            wl,
            facets,
            pagination,
            refresh,
            max_age=CachedFeed.CACHE_FOREVER,
            private=private,
        )
        assert isinstance(r, OPDSFeedResponse)
        assert OPDSFeed.DEFAULT_MAX_AGE == r.max_age

        # If the Library associated with the WorkList used in the feed
        # has root lanes, `private` is always set to True, even if we
        # asked for the opposite.

        from core.model import Library

        Library._has_root_lane_cache[self._default_library.id] = True
        r = CachedFeed.fetch(self._db, wl, facets, pagination, refresh, private=False)
        assert isinstance(r, OPDSFeedResponse)
        assert True == r.private
예제 #10
0
    def test_no_race_conditions(self):
        # Why do we look up a CachedFeed again after feed generation?
        # Well, let's see what happens if someone else messes around
        # with the CachedFeed object _while the refresher is running_.
        #
        # This is a race condition that happens in real life. Rather
        # than setting up a multi-threaded test, we can have the
        # refresher itself simulate a background modification by
        # messing around with the CachedFeed object we know will
        # eventually be returned.
        #
        # The most up-to-date feed always wins, so background
        # modifications will take effect only if they made the
        # CachedFeed look _newer_ than the foreground process does.
        facets = Facets.default(self._default_library)
        pagination = Pagination.default()
        wl = WorkList()
        wl.initialize(self._default_library)

        m = CachedFeed.fetch

        # In this case, two simulated threads try to create the same
        # CachedFeed at the same time. We end up with a single
        # CachedFeed containing the result of the last code that ran.
        def simultaneous_refresher():
            # This refresher method simulates another thread creating
            # a CachedFeed for this feed while this thread's
            # refresher is running.
            def other_thread_refresher():
                return "Another thread made a feed."

            m(self._db, wl, facets, pagination, other_thread_refresher, 0, raw=True)

            return "Then this thread made a feed."

        # This will call simultaneous_refresher(), which will call
        # CachedFeed.fetch() _again_, which will call
        # other_thread_refresher().
        result = m(
            self._db, wl, facets, pagination, simultaneous_refresher, 0, raw=True
        )

        # We ended up with a single CachedFeed containing the
        # latest information.
        assert [result] == self._db.query(CachedFeed).all()
        assert "Then this thread made a feed." == result.content

        # If two threads contend for an existing CachedFeed, the one that
        # sets CachedFeed.timestamp to the later value wins.
        #
        # Here, the other thread wins by setting .timestamp on the
        # existing CachedFeed to a date in the future.
        now = utc_now()
        tomorrow = now + datetime.timedelta(days=1)
        yesterday = now - datetime.timedelta(days=1)

        def tomorrow_vs_now():
            result.content = "Someone in the background set tomorrow's content."
            result.timestamp = tomorrow
            return "Today's content can't compete."

        tomorrow_result = m(
            self._db, wl, facets, pagination, tomorrow_vs_now, 0, raw=True
        )
        assert tomorrow_result == result
        assert (
            "Someone in the background set tomorrow's content."
            == tomorrow_result.content
        )
        assert tomorrow_result.timestamp == tomorrow

        # Here, the other thread sets .timestamp to a date in the past, and
        # it loses out to the (apparently) newer feed.
        def yesterday_vs_now():
            result.content = "Someone in the background set yesterday's content."
            result.timestamp = yesterday
            return "Today's content is fresher."

        now_result = m(self._db, wl, facets, pagination, yesterday_vs_now, 0, raw=True)

        # We got the same CachedFeed we've been getting this whole
        # time, but the outdated data set by the 'background thread'
        # has been fixed.
        assert result == now_result
        assert "Today's content is fresher." == result.content
        assert result.timestamp > yesterday

        # This shouldn't happen, but if the CachedFeed's timestamp or
        # content are *cleared out* in the background, between the
        # time the CacheFeed is fetched and the time the refresher
        # finishes, then we don't know what's going on and we don't
        # take chances. We create a whole new CachedFeed object for
        # the updated version of the feed.

        # First, try the situation where .timestamp is cleared out in
        # the background.
        def timestamp_cleared_in_background():
            result.content = "Someone else sets content and clears timestamp."
            result.timestamp = None

            return "Non-weird content."

        result2 = m(
            self._db,
            wl,
            facets,
            pagination,
            timestamp_cleared_in_background,
            0,
            raw=True,
        )
        now = utc_now()

        # result2 is a brand new CachedFeed.
        assert result2 != result
        assert "Non-weird content." == result2.content
        assert (now - result2.timestamp).total_seconds() < 2

        # We let the background process do whatever it wants to do
        # with the old one.
        assert "Someone else sets content and clears timestamp." == result.content
        assert None == result.timestamp

        # Next, test the situation where .content is cleared out.
        def content_cleared_in_background():
            result2.content = None
            result2.timestamp = tomorrow

            return "Non-weird content."

        result3 = m(
            self._db, wl, facets, pagination, content_cleared_in_background, 0, raw=True
        )
        now = utc_now()

        # Again, a brand new CachedFeed.
        assert result3 != result2
        assert result3 != result
        assert "Non-weird content." == result3.content
        assert (now - result3.timestamp).total_seconds() < 2

        # Again, we let the background process have the old one for
        # whatever weird thing it wants to do.
        assert None == result2.content
        assert tomorrow == result2.timestamp
예제 #11
0
    def test_complaints_feed(self):
        """Test the ability to show a paginated feed of works with complaints.
        """

        type = iter(Complaint.VALID_TYPES)
        type1 = next(type)
        type2 = next(type)

        work1 = self._work(
            "fiction work with complaint",
            language="eng",
            fiction=True,
            with_open_access_download=True)
        work1_complaint1 = self._complaint(
            work1.license_pools[0],
            type1,
            "work1 complaint1 source",
            "work1 complaint1 detail")
        work1_complaint2 = self._complaint(
            work1.license_pools[0],
            type1,
            "work1 complaint2 source",
            "work1 complaint2 detail")
        work1_complaint3 = self._complaint(
            work1.license_pools[0],
            type2,
            "work1 complaint3 source",
            "work1 complaint3 detail")
        work2 = self._work(
            "nonfiction work with complaint",
            language="eng",
            fiction=False,
            with_open_access_download=True)
        work2_complaint1 = self._complaint(
            work2.license_pools[0],
            type2,
            "work2 complaint1 source",
            "work2 complaint1 detail")
        work3 = self._work(
            "fiction work without complaint",
            language="eng",
            fiction=True,
            with_open_access_download=True)
        work4 = self._work(
            "nonfiction work without complaint",
            language="eng",
            fiction=False,
            with_open_access_download=True)

        facets = Facets.default(self._default_library)
        pagination = Pagination(size=1)
        annotator = MockAnnotator(self._default_library)

        def make_page(pagination):
            return AdminFeed.complaints(
                library=self._default_library, title="Complaints",
                url=self._url, annotator=annotator,
                pagination=pagination
            )

        first_page = make_page(pagination)
        parsed = feedparser.parse(unicode(first_page))
        eq_(1, len(parsed['entries']))
        eq_(work1.title, parsed['entries'][0]['title'])
        # Verify that the entry has acquisition links.
        links = parsed['entries'][0]['links']
        open_access_links = [l for l in links if l['rel'] == "http://opds-spec.org/acquisition/open-access"]
        eq_(1, len(open_access_links))

        # Make sure the links are in place.
        [start] = self.links(parsed, 'start')
        eq_(annotator.groups_url(None), start['href'])
        eq_(annotator.top_level_title(), start['title'])

        [up] = self.links(parsed, 'up')
        eq_(annotator.groups_url(None), up['href'])
        eq_(annotator.top_level_title(), up['title'])

        [next_link] = self.links(parsed, 'next')
        eq_(annotator.complaints_url(facets, pagination.next_page), next_link['href'])

        # This was the first page, so no previous link.
        eq_([], self.links(parsed, 'previous'))

        # Now get the second page and make sure it has a 'previous' link.
        second_page = make_page(pagination.next_page)
        parsed = feedparser.parse(unicode(second_page))
        [previous] = self.links(parsed, 'previous')
        eq_(annotator.complaints_url(facets, pagination), previous['href'])
        eq_(1, len(parsed['entries']))
        eq_(work2.title, parsed['entries'][0]['title'])