def suppressed(cls, _db, title, url, annotator, pagination=None): pagination = pagination or Pagination.default() q = _db.query(LicensePool).filter( LicensePool.suppressed == True).order_by( LicensePool.id ) pools = pagination.apply(q).all() works = [pool.work for pool in pools] feed = cls(_db, title, url, works, annotator) # Render a 'start' link top_level_title = annotator.top_level_title() start_uri = annotator.groups_url(None) feed.add_link(href=start_uri, rel="start", title=top_level_title) # Render an 'up' link, same as the 'start' link to indicate top-level feed feed.add_link(href=start_uri, rel="up", title=top_level_title) if len(works) > 0: # There are works in this list. Add a 'next' link. feed.add_link(rel="next", href=annotator.suppressed_url(pagination.next_page)) if pagination.offset > 0: feed.add_link(rel="first", href=annotator.suppressed_url(pagination.first_page)) previous_page = pagination.previous_page if previous_page: feed.add_link(rel="previous", href=annotator.suppressed_url(previous_page)) annotator.annotate_feed(feed) return unicode(feed)
def test_lifecycle_with_worklist(self): facets = Facets.default(self._default_library) pagination = Pagination.default() lane = WorkList() lane.initialize(self._default_library) # Fetch a cached feed from the database. It comes out updated. refresher = MockFeedGenerator() args = (self._db, lane, facets, pagination, refresher) feed = CachedFeed.fetch(*args, max_age=0, raw=True) assert "This is feed #1" == feed.content assert pagination.query_string == feed.pagination assert facets.query_string == feed.facets assert None == feed.lane_id assert lane.unique_key == feed.unique_key # Fetch it again, with a high max_age, and it's cached! feed = CachedFeed.fetch(*args, max_age=1000, raw=True) assert "This is feed #1" == feed.content # Fetch it with a low max_age, and it gets updated again. feed = CachedFeed.fetch(*args, max_age=0, raw=True) assert "This is feed #2" == feed.content # The special constant CACHE_FOREVER means it's always cached. feed = CachedFeed.fetch(*args, max_age=CachedFeed.CACHE_FOREVER, raw=True) assert "This is feed #2" == feed.content
def test_pagination(self): script = CacheFacetListsPerLane(self._db, manager=object(), cmd_args=[]) script.pages = 3 lane = self._lane() p1, p2, p3 = script.pagination(lane) pagination = Pagination.default() eq_(pagination.query_string, p1.query_string) eq_(pagination.next_page.query_string, p2.query_string) eq_(pagination.next_page.next_page.query_string, p3.query_string)
def complaints(cls, library, title, url, annotator, pagination=None): _db = Session.object_session(library) facets = Facets.default(library) pagination = pagination or Pagination.default() q = LicensePool.with_complaint(library) results = pagination.modify_database_query(_db, q).all() if len(results) > 0: (pools, counts) = list(zip(*results)) else: pools = () works = [pool.work for pool in pools] feed = cls(_db, title, url, works, annotator) # Render a 'start' link top_level_title = annotator.top_level_title() start_uri = annotator.groups_url(None) AdminFeed.add_link_to_feed(feed.feed, href=start_uri, rel="start", title=top_level_title) # Render an 'up' link, same as the 'start' link to indicate top-level feed AdminFeed.add_link_to_feed(feed.feed, href=start_uri, rel="up", title=top_level_title) if len(works) > 0: # There are works in this list. Add a 'next' link. AdminFeed.add_link_to_feed( feed.feed, rel="next", href=annotator.complaints_url(facets, pagination.next_page), ) if pagination.offset > 0: AdminFeed.add_link_to_feed( feed.feed, rel="first", href=annotator.complaints_url(facets, pagination.first_page), ) previous_page = pagination.previous_page if previous_page: AdminFeed.add_link_to_feed( feed.feed, rel="previous", href=annotator.complaints_url(facets, previous_page), ) annotator.annotate_feed(feed) return str(feed)
def suppressed(cls, _db, title, url, annotator, pagination=None): pagination = pagination or Pagination.default() q = (_db.query(LicensePool).filter( and_( LicensePool.suppressed == True, LicensePool.superceded == False, )).order_by(LicensePool.id)) pools = pagination.modify_database_query(_db, q).all() works = [pool.work for pool in pools] feed = cls(_db, title, url, works, annotator) # Render a 'start' link top_level_title = annotator.top_level_title() start_uri = annotator.groups_url(None) AdminFeed.add_link_to_feed(feed.feed, href=start_uri, rel="start", title=top_level_title) # Render an 'up' link, same as the 'start' link to indicate top-level feed AdminFeed.add_link_to_feed(feed.feed, href=start_uri, rel="up", title=top_level_title) if len(works) > 0: # There are works in this list. Add a 'next' link. AdminFeed.add_link_to_feed( feed.feed, rel="next", href=annotator.suppressed_url(pagination.next_page), ) if pagination.offset > 0: AdminFeed.add_link_to_feed( feed.feed, rel="first", href=annotator.suppressed_url(pagination.first_page), ) previous_page = pagination.previous_page if previous_page: AdminFeed.add_link_to_feed( feed.feed, rel="previous", href=annotator.suppressed_url(previous_page)) annotator.annotate_feed(feed) return str(feed)
def test_suppressed_feed(self): """Test the ability to show a paginated feed of suppressed works. """ work1 = self._work(with_open_access_download=True) work1.license_pools[0].suppressed = True work2 = self._work(with_open_access_download=True) work2.license_pools[0].suppressed = True pagination = Pagination(size=1) annotator = TestAnnotator() titles = [work1.title, work2.title] def make_page(pagination): return AdminFeed.suppressed(_db=self._db, title="Hidden works", url=self._url, annotator=annotator, pagination=pagination) first_page = make_page(pagination) parsed = feedparser.parse(unicode(first_page)) eq_(1, len(parsed['entries'])) assert parsed['entries'][0].title in titles titles.remove(parsed['entries'][0].title) [remaining_title] = titles # Make sure the links are in place. [start] = self.links(parsed, 'start') eq_(annotator.groups_url(None), start['href']) eq_(annotator.top_level_title(), start['title']) [up] = self.links(parsed, 'up') eq_(annotator.groups_url(None), up['href']) eq_(annotator.top_level_title(), up['title']) [next_link] = self.links(parsed, 'next') eq_(annotator.suppressed_url(pagination.next_page), next_link['href']) # This was the first page, so no previous link. eq_([], self.links(parsed, 'previous')) # Now get the second page and make sure it has a 'previous' link. second_page = make_page(pagination.next_page) parsed = feedparser.parse(unicode(second_page)) [previous] = self.links(parsed, 'previous') eq_(annotator.suppressed_url(pagination), previous['href']) eq_(1, len(parsed['entries'])) eq_(remaining_title, parsed['entries'][0]['title'])
def featured_works(self, _db, facets=None): library = self.get_library(_db) new_facets = FeaturedSeriesFacets( library, # If a work is in the right series we don't care about its # quality. collection=FeaturedSeriesFacets.COLLECTION_FULL, availability=FeaturedSeriesFacets.AVAILABLE_ALL, order=None) if facets: new_facets.entrypoint = facets.entrypoint pagination = Pagination(size=library.featured_lane_size) qu = self.works(_db, facets=new_facets, pagination=pagination) return qu.all()
def test_do_generate(self): # When it's time to generate a feed, AcquisitionFeed.page # is called with the right arguments. class MockAcquisitionFeed(object): called_with = None @classmethod def page(cls, **kwargs): cls.called_with = kwargs return "here's your feed" # Test our ability to generate a single feed. script = CacheFacetListsPerLane(self._db, testing=True, cmd_args=[]) facets = Facets.default(self._default_library) pagination = Pagination.default() with script.app.test_request_context("/"): lane = self._lane() result = script.do_generate( lane, facets, pagination, feed_class=MockAcquisitionFeed ) eq_("here's your feed", result) args = MockAcquisitionFeed.called_with eq_(self._db, args['_db']) eq_(lane, args['lane']) eq_(lane.display_name, args['title']) eq_(True, args['force_refresh']) # The Pagination object was passed into # MockAcquisitionFeed.page, and it was also used to make the # feed URL (see below). eq_(pagination, args['pagination']) # The Facets object was passed into # MockAcquisitionFeed.page, and it was also used to make # the feed URL and to create the feed annotator. eq_(facets, args['facets']) annotator = args['annotator'] eq_(facets, annotator.facets) eq_( args['url'], annotator.feed_url(lane, facets=facets, pagination=pagination) ) # Try again without mocking AcquisitionFeed to verify that # we get something that looks like an OPDS feed. result = script.do_generate(lane, facets, pagination) assert result.startswith('<feed')
def test_process_lane(self): # process_lane() calls do_generate() once for every # combination of items yielded by facets() and pagination(). class MockFacets(object): def __init__(self, query): self.query = query @property def query_string(self): return self.query facets1 = MockFacets("facets1") facets2 = MockFacets("facets2") page1 = Pagination.default() page2 = page1.next_page class Mock(CacheRepresentationPerLane): generated = [] def do_generate(self, lane, facets, pagination): value = (lane, facets, pagination) self.generated.append(value) return value def facets(self, lane): yield facets1 yield facets2 def pagination(self, lane): yield page1 yield page2 lane = self._lane() script = Mock(self._db, manager=object(), cmd_args=[]) generated = script.process_lane(lane) eq_(generated, script.generated) c1, c2, c3, c4 = script.generated eq_((lane, facets1, page1), c1) eq_((lane, facets1, page2), c2) eq_((lane, facets2, page1), c3) eq_((lane, facets2, page2), c4)
def do_generate(self, lane): feeds = [] annotator = self.app.manager.annotator(lane) if isinstance(lane, Lane): languages = lane.language_key lane_name = None else: languages = None lane_name = None url = self.app.manager.cdn_url_for( "feed", languages=lane.languages, lane_name=lane_name ) order_facets = Configuration.enabled_facets( Facets.ORDER_FACET_GROUP_NAME ) availability = Configuration.default_facet( Facets.AVAILABILITY_FACET_GROUP_NAME ) collection = Configuration.default_facet( Facets.COLLECTION_FACET_GROUP_NAME ) for sort_order in order_facets: pagination = Pagination.default() facets = Facets( collection=collection, availability=availability, order=sort_order, order_ascending=True ) title = lane.display_name for pagenum in (0, 2): feeds.append( AcquisitionFeed.page( self._db, title, url, lane, annotator, facets=facets, pagination=pagination, force_refresh=True ) ) pagination = pagination.next_page return feeds
def do_generate(self, lane): feeds = [] annotator = self.app.manager.annotator(lane) if isinstance(lane, Lane) and lane.parent: languages = lane.language_key lane_name = lane.name else: languages = None lane_name = None url = self.app.manager.cdn_url_for("feed", languages=lane.languages, lane_name=lane_name) order_facets = Configuration.enabled_facets( Facets.ORDER_FACET_GROUP_NAME) availability = Configuration.default_facet( Facets.AVAILABILITY_FACET_GROUP_NAME) collection = Configuration.default_facet( Facets.COLLECTION_FACET_GROUP_NAME) for sort_order in self.orders: for availability in self.availabilities: for collection in self.collections: pagination = Pagination.default() facets = Facets(collection=collection, availability=availability, order=sort_order, order_ascending=True) title = lane.display_name for pagenum in range(0, self.pages): yield AcquisitionFeed.page(self._db, title, url, lane, annotator, facets=facets, pagination=pagination, force_refresh=True) pagination = pagination.next_page
def complaints(cls, library, title, url, annotator, pagination=None): _db = Session.object_session(library) facets = Facets.default(library) pagination = pagination or Pagination.default() q = LicensePool.with_complaint(library) results = pagination.apply(q).all() if len(results) > 0: (pools, counts) = zip(*results) else: pools = () works = [pool.work for pool in pools] feed = cls(_db, title, url, works, annotator) # Render a 'start' link top_level_title = annotator.top_level_title() start_uri = annotator.groups_url(None) AdminFeed.add_link_to_feed(feed.feed, href=start_uri, rel="start", title=top_level_title) # Render an 'up' link, same as the 'start' link to indicate top-level feed AdminFeed.add_link_to_feed(feed.feed, href=start_uri, rel="up", title=top_level_title) if len(works) > 0: # There are works in this list. Add a 'next' link. AdminFeed.add_link_to_feed(feed.feed, rel="next", href=annotator.complaints_url(facets, pagination.next_page)) if pagination.offset > 0: AdminFeed.add_link_to_feed(feed.feed, rel="first", href=annotator.complaints_url(facets, pagination.first_page)) previous_page = pagination.previous_page if previous_page: AdminFeed.add_link_to_feed(feed.feed, rel="previous", href=annotator.complaints_url(facets, previous_page)) annotator.annotate_feed(feed) return unicode(feed)
def do_generate(self, lane): feeds = [] annotator = self.app.manager.annotator(lane) if isinstance(lane, Lane): lane_id = lane.id else: # Presumably this is the top-level WorkList. lane_id = None library = lane.get_library(self._db) url = self.app.manager.cdn_url_for( "feed", lane_identifier=lane_id, library_short_name=library.short_name ) default_order = library.default_facet(Facets.ORDER_FACET_GROUP_NAME) allowed_orders = library.enabled_facets(Facets.ORDER_FACET_GROUP_NAME) chosen_orders = self.orders or [default_order] default_availability = library.default_facet( Facets.AVAILABILITY_FACET_GROUP_NAME ) allowed_availabilities = library.enabled_facets( Facets.AVAILABILITY_FACET_GROUP_NAME ) chosen_availabilities = self.availabilities or [default_availability] default_collection = library.default_facet( Facets.COLLECTION_FACET_GROUP_NAME ) allowed_collections = library.enabled_facets( Facets.COLLECTION_FACET_GROUP_NAME ) chosen_collections = self.collections or [default_collection] for order in chosen_orders: if order not in allowed_orders: logging.warn("Ignoring unsupported ordering %s" % order) continue for availability in chosen_availabilities: if availability not in allowed_availabilities: logging.warn("Ignoring unsupported availability %s" % availability) continue for collection in chosen_collections: if collection not in allowed_collections: logging.warn("Ignoring unsupported collection %s" % collection) continue pagination = Pagination.default() facets = Facets( library=library, collection=collection, availability=availability, order=order, order_ascending=True ) title = lane.display_name for pagenum in range(0, self.pages): yield AcquisitionFeed.page( self._db, title, url, lane, annotator, facets=facets, pagination=pagination, force_refresh=True ) pagination = pagination.next_page
def test_suppressed_feed(self): # Test the ability to show a paginated feed of suppressed works. work1 = self._work(with_open_access_download=True) work1.license_pools[0].suppressed = True work2 = self._work(with_open_access_download=True) work2.license_pools[0].suppressed = True # This work won't be included in the feed since its # suppressed pool is superceded. work3 = self._work(with_open_access_download=True) work3.license_pools[0].suppressed = True work3.license_pools[0].superceded = True pagination = Pagination(size=1) annotator = MockAnnotator(self._default_library) titles = [work1.title, work2.title] def make_page(pagination): return AdminFeed.suppressed( _db=self._db, title="Hidden works", url=self._url, annotator=annotator, pagination=pagination, ) first_page = make_page(pagination) parsed = feedparser.parse(str(first_page)) assert 1 == len(parsed["entries"]) assert parsed["entries"][0].title in titles titles.remove(parsed["entries"][0].title) [remaining_title] = titles # Make sure the links are in place. [start] = self.links(parsed, "start") assert annotator.groups_url(None) == start["href"] assert annotator.top_level_title() == start["title"] [up] = self.links(parsed, "up") assert annotator.groups_url(None) == up["href"] assert annotator.top_level_title() == up["title"] [next_link] = self.links(parsed, "next") assert annotator.suppressed_url(pagination.next_page) == next_link["href"] # This was the first page, so no previous link. assert [] == self.links(parsed, "previous") # Now get the second page and make sure it has a 'previous' link. second_page = make_page(pagination.next_page) parsed = feedparser.parse(str(second_page)) [previous] = self.links(parsed, "previous") assert annotator.suppressed_url(pagination) == previous["href"] assert 1 == len(parsed["entries"]) assert remaining_title == parsed["entries"][0]["title"] # The third page is empty. third_page = make_page(pagination.next_page.next_page) parsed = feedparser.parse(str(third_page)) [previous] = self.links(parsed, "previous") assert annotator.suppressed_url(pagination.next_page) == previous["href"] assert 0 == len(parsed["entries"])
def test_complaints_feed(self): """Test the ability to show a paginated feed of works with complaints.""" type = iter(Complaint.VALID_TYPES) type1 = next(type) type2 = next(type) work1 = self._work( "fiction work with complaint", language="eng", fiction=True, with_open_access_download=True, ) work1_complaint1 = self._complaint( work1.license_pools[0], type1, "work1 complaint1 source", "work1 complaint1 detail", ) work1_complaint2 = self._complaint( work1.license_pools[0], type1, "work1 complaint2 source", "work1 complaint2 detail", ) work1_complaint3 = self._complaint( work1.license_pools[0], type2, "work1 complaint3 source", "work1 complaint3 detail", ) work2 = self._work( "nonfiction work with complaint", language="eng", fiction=False, with_open_access_download=True, ) work2_complaint1 = self._complaint( work2.license_pools[0], type2, "work2 complaint1 source", "work2 complaint1 detail", ) work3 = self._work( "fiction work without complaint", language="eng", fiction=True, with_open_access_download=True, ) work4 = self._work( "nonfiction work without complaint", language="eng", fiction=False, with_open_access_download=True, ) facets = Facets.default(self._default_library) pagination = Pagination(size=1) annotator = MockAnnotator(self._default_library) def make_page(pagination): return AdminFeed.complaints( library=self._default_library, title="Complaints", url=self._url, annotator=annotator, pagination=pagination, ) first_page = make_page(pagination) parsed = feedparser.parse(str(first_page)) assert 1 == len(parsed["entries"]) assert work1.title == parsed["entries"][0]["title"] # Verify that the entry has acquisition links. links = parsed["entries"][0]["links"] open_access_links = [ l for l in links if l["rel"] == "http://opds-spec.org/acquisition/open-access" ] assert 1 == len(open_access_links) # Make sure the links are in place. [start] = self.links(parsed, "start") assert annotator.groups_url(None) == start["href"] assert annotator.top_level_title() == start["title"] [up] = self.links(parsed, "up") assert annotator.groups_url(None) == up["href"] assert annotator.top_level_title() == up["title"] [next_link] = self.links(parsed, "next") assert ( annotator.complaints_url(facets, pagination.next_page) == next_link["href"] ) # This was the first page, so no previous link. assert [] == self.links(parsed, "previous") # Now get the second page and make sure it has a 'previous' link. second_page = make_page(pagination.next_page) parsed = feedparser.parse(str(second_page)) [previous] = self.links(parsed, "previous") assert annotator.complaints_url(facets, pagination) == previous["href"] assert 1 == len(parsed["entries"]) assert work2.title == parsed["entries"][0]["title"]
def test_complaints_feed(self): """Test the ability to show a paginated feed of works with complaints. """ type = iter(Complaint.VALID_TYPES) type1 = next(type) type2 = next(type) work1 = self._work("fiction work with complaint", language="eng", fiction=True, with_open_access_download=True) work1_complaint1 = self._complaint(work1.license_pools[0], type1, "work1 complaint1 source", "work1 complaint1 detail") work1_complaint2 = self._complaint(work1.license_pools[0], type1, "work1 complaint2 source", "work1 complaint2 detail") work1_complaint3 = self._complaint(work1.license_pools[0], type2, "work1 complaint3 source", "work1 complaint3 detail") work2 = self._work("nonfiction work with complaint", language="eng", fiction=False, with_open_access_download=True) work2_complaint1 = self._complaint(work2.license_pools[0], type2, "work2 complaint1 source", "work2 complaint1 detail") work3 = self._work("fiction work without complaint", language="eng", fiction=True, with_open_access_download=True) work4 = self._work("nonfiction work without complaint", language="eng", fiction=False, with_open_access_download=True) facets = Facets.default() pagination = Pagination(size=1) annotator = TestAnnotator() def make_page(pagination): return AdminFeed.complaints(_db=self._db, title="Complaints", url=self._url, annotator=annotator, pagination=pagination) first_page = make_page(pagination) parsed = feedparser.parse(unicode(first_page)) eq_(1, len(parsed['entries'])) eq_(work1.title, parsed['entries'][0]['title']) # Make sure the links are in place. [start] = self.links(parsed, 'start') eq_(annotator.groups_url(None), start['href']) eq_(annotator.top_level_title(), start['title']) [up] = self.links(parsed, 'up') eq_(annotator.groups_url(None), up['href']) eq_(annotator.top_level_title(), up['title']) [next_link] = self.links(parsed, 'next') eq_(annotator.complaints_url(facets, pagination.next_page), next_link['href']) # This was the first page, so no previous link. eq_([], self.links(parsed, 'previous')) # Now get the second page and make sure it has a 'previous' link. second_page = make_page(pagination.next_page) parsed = feedparser.parse(unicode(second_page)) [previous] = self.links(parsed, 'previous') eq_(annotator.complaints_url(facets, pagination), previous['href']) eq_(1, len(parsed['entries'])) eq_(work2.title, parsed['entries'][0]['title'])
def test_no_race_conditions(self): # Why do we look up a CachedFeed again after feed generation? # Well, let's see what happens if someone else messes around # with the CachedFeed object _while the refresher is running_. # # This is a race condition that happens in real life. Rather # than setting up a multi-threaded test, we can have the # refresher itself simulate a background modification by # messing around with the CachedFeed object we know will # eventually be returned. # # The most up-to-date feed always wins, so background # modifications will take effect only if they made the # CachedFeed look _newer_ than the foreground process does. facets = Facets.default(self._default_library) pagination = Pagination.default() wl = WorkList() wl.initialize(self._default_library) m = CachedFeed.fetch # In this case, two simulated threads try to create the same # CachedFeed at the same time. We end up with a single # CachedFeed containing the result of the last code that ran. def simultaneous_refresher(): # This refresher method simulates another thread creating # a CachedFeed for this feed while this thread's # refresher is running. def other_thread_refresher(): return "Another thread made a feed." m(self._db, wl, facets, pagination, other_thread_refresher, 0, raw=True) return "Then this thread made a feed." # This will call simultaneous_refresher(), which will call # CachedFeed.fetch() _again_, which will call # other_thread_refresher(). result = m( self._db, wl, facets, pagination, simultaneous_refresher, 0, raw=True ) # We ended up with a single CachedFeed containing the # latest information. assert [result] == self._db.query(CachedFeed).all() assert "Then this thread made a feed." == result.content # If two threads contend for an existing CachedFeed, the one that # sets CachedFeed.timestamp to the later value wins. # # Here, the other thread wins by setting .timestamp on the # existing CachedFeed to a date in the future. now = utc_now() tomorrow = now + datetime.timedelta(days=1) yesterday = now - datetime.timedelta(days=1) def tomorrow_vs_now(): result.content = "Someone in the background set tomorrow's content." result.timestamp = tomorrow return "Today's content can't compete." tomorrow_result = m( self._db, wl, facets, pagination, tomorrow_vs_now, 0, raw=True ) assert tomorrow_result == result assert ( "Someone in the background set tomorrow's content." == tomorrow_result.content ) assert tomorrow_result.timestamp == tomorrow # Here, the other thread sets .timestamp to a date in the past, and # it loses out to the (apparently) newer feed. def yesterday_vs_now(): result.content = "Someone in the background set yesterday's content." result.timestamp = yesterday return "Today's content is fresher." now_result = m(self._db, wl, facets, pagination, yesterday_vs_now, 0, raw=True) # We got the same CachedFeed we've been getting this whole # time, but the outdated data set by the 'background thread' # has been fixed. assert result == now_result assert "Today's content is fresher." == result.content assert result.timestamp > yesterday # This shouldn't happen, but if the CachedFeed's timestamp or # content are *cleared out* in the background, between the # time the CacheFeed is fetched and the time the refresher # finishes, then we don't know what's going on and we don't # take chances. We create a whole new CachedFeed object for # the updated version of the feed. # First, try the situation where .timestamp is cleared out in # the background. def timestamp_cleared_in_background(): result.content = "Someone else sets content and clears timestamp." result.timestamp = None return "Non-weird content." result2 = m( self._db, wl, facets, pagination, timestamp_cleared_in_background, 0, raw=True, ) now = utc_now() # result2 is a brand new CachedFeed. assert result2 != result assert "Non-weird content." == result2.content assert (now - result2.timestamp).total_seconds() < 2 # We let the background process do whatever it wants to do # with the old one. assert "Someone else sets content and clears timestamp." == result.content assert None == result.timestamp # Next, test the situation where .content is cleared out. def content_cleared_in_background(): result2.content = None result2.timestamp = tomorrow return "Non-weird content." result3 = m( self._db, wl, facets, pagination, content_cleared_in_background, 0, raw=True ) now = utc_now() # Again, a brand new CachedFeed. assert result3 != result2 assert result3 != result assert "Non-weird content." == result3.content assert (now - result3.timestamp).total_seconds() < 2 # Again, we let the background process have the old one for # whatever weird thing it wants to do. assert None == result2.content assert tomorrow == result2.timestamp
def test_response_format(self): # Verify that fetch() can be told to return an appropriate # OPDSFeedResponse object. This is the default behavior, since # it preserves some useful information that would otherwise be # lost. facets = Facets.default(self._default_library) pagination = Pagination.default() wl = WorkList() wl.initialize(self._default_library) def refresh(): return "Here's a feed." private = object() r = CachedFeed.fetch( self._db, wl, facets, pagination, refresh, max_age=102, private=private ) assert isinstance(r, OPDSFeedResponse) assert 200 == r.status_code assert OPDSFeed.ACQUISITION_FEED_TYPE == r.content_type assert 102 == r.max_age assert "Here's a feed." == str(r) # The extra argument `private`, not used by CachedFeed.fetch, was # passed on to the OPDSFeedResponse constructor. assert private == r.private # The CachedFeed was created; just not returned. cf = self._db.query(CachedFeed).one() assert "Here's a feed." == cf.content # Try it again as a cache hit. r = CachedFeed.fetch( self._db, wl, facets, pagination, refresh, max_age=102, private=private ) assert isinstance(r, OPDSFeedResponse) assert 200 == r.status_code assert OPDSFeed.ACQUISITION_FEED_TYPE == r.content_type assert 102 == r.max_age assert "Here's a feed." == str(r) # If we tell CachedFeed to cache its feed 'forever', that only # applies to the _database_ cache. The client is told to cache # the feed for the default period. r = CachedFeed.fetch( self._db, wl, facets, pagination, refresh, max_age=CachedFeed.CACHE_FOREVER, private=private, ) assert isinstance(r, OPDSFeedResponse) assert OPDSFeed.DEFAULT_MAX_AGE == r.max_age # If the Library associated with the WorkList used in the feed # has root lanes, `private` is always set to True, even if we # asked for the opposite. from core.model import Library Library._has_root_lane_cache[self._default_library.id] = True r = CachedFeed.fetch(self._db, wl, facets, pagination, refresh, private=False) assert isinstance(r, OPDSFeedResponse) assert True == r.private