def test_last_update_order_facet(self): facets = CrawlableFacets.default(self._default_library) w1 = self._work(with_license_pool=True) w2 = self._work(with_license_pool=True) now = datetime.datetime.utcnow() w1.last_update_time = now - datetime.timedelta(days=4) w2.last_update_time = now - datetime.timedelta(days=3) self.add_to_materialized_view([w1, w2]) from core.model import MaterializedWorkWithGenre as work_model qu = self._db.query(work_model) qu = facets.apply(self._db, qu) # w2 is first because it was updated more recently. eq_([w2.id, w1.id], [mw.works_id for mw in qu]) list, ignore = self._customlist(num_entries=0) e2, ignore = list.add_entry(w2) e1, ignore = list.add_entry(w1) self._db.flush() SessionManager.refresh_materialized_views(self._db) qu = self._db.query(work_model) qu = facets.apply(self._db, qu) # w1 is first because it was added to the list more recently. eq_([w1.id, w2.id], [mw.works_id for mw in qu])
def test_childrens_series_with_same_name_as_adult_series(self): [children, ya, adult, adults_only] = self.sample_works_for_each_audience() # Give them all the same series name. series_name = "Monkey Business" for work in [children, adult, adults_only]: work.presentation_edition.series = series_name self._db.commit() SessionManager.refresh_materialized_views(self._db) # SeriesLane only returns works that match a given audience. children_lane = SeriesLane( self._default_library, series_name, audiences=[Classifier.AUDIENCE_CHILDREN] ) self.assert_works_queries(children_lane, [children]) # It's strict about this, in an attempt to increase series accuracy. # A request for adult material, only returns Adult material, not # Adults Only material. adult_lane = SeriesLane( self._default_library, series_name, audiences=[Classifier.AUDIENCE_ADULT] ) self.assert_works_queries(adult_lane, [adult]) adult_lane = SeriesLane( self._default_library, series_name, audiences=[Classifier.AUDIENCE_ADULTS_ONLY] ) self.assert_works_queries(adult_lane, [adults_only])
def test_works_query_with_source_language(self): # Prepare a number of works with different languages. # TODO: Setting a data source name is necessary because # Gutenberg books get filtered out when children or ya # is one of the lane's audiences. eng = self._work(with_license_pool=True, language='eng', data_source_name=DataSource.OVERDRIVE) fre = self._work(with_license_pool=True, language='fre', data_source_name=DataSource.OVERDRIVE) spa = self._work(with_license_pool=True, language='spa', data_source_name=DataSource.OVERDRIVE) SessionManager.refresh_materialized_views(self._db) # They're all returned as recommendations from NoveList Select. recommendations = list() for work in [eng, fre, spa]: recommendations.append(work.license_pools[0].identifier) # But only the work that matches the source work is included. mock_api = self.generate_mock_api() lane = RecommendationLane(self._default_library, self.work, '', novelist_api=mock_api) lane.recommendations = recommendations self.assert_works_queries(lane, [eng]) # It doesn't matter the language. self.work.presentation_edition.language = 'fre' SessionManager.refresh_materialized_views(self._db) mock_api = self.generate_mock_api() lane = RecommendationLane(self._default_library, self.work, '', novelist_api=mock_api) lane.recommendations = recommendations self.assert_works_queries(lane, [fre])
def test_works_query_with_source_audience(self): # If the lane is created with a source audience, it filters the # recommendations appropriately. works = self.sample_works_for_each_audience() [children, ya, adult, adults_only] = works recommendations = list() for work in works: recommendations.append(work.license_pools[0].identifier) expected = { Classifier.AUDIENCE_CHILDREN : [children], Classifier.AUDIENCE_YOUNG_ADULT : [children, ya], Classifier.AUDIENCE_ADULTS_ONLY : works } for audience, results in expected.items(): self.work.audience = audience SessionManager.refresh_materialized_views(self._db) mock_api = self.generate_mock_api() lane = RecommendationLane( self._default_library, self.work, '', novelist_api=mock_api ) lane.recommendations = recommendations self.assert_works_queries(lane, results)
def test_works_query_with_source_language(self): # Prepare a number of works with different languages. eng = self._work(with_license_pool=True, language='eng') fre = self._work(with_license_pool=True, language='fre') spa = self._work(with_license_pool=True, language='spa') SessionManager.refresh_materialized_views(self._db) # They're all returned as recommendations from NoveList Select. recommendations = list() for work in [eng, fre, spa]: recommendations.append(work.license_pools[0].identifier) # But only the work that matches the source work is included. mock_api = self.generate_mock_api() lane = RecommendationLane(self._db, self.lp, '', novelist_api=mock_api) lane.recommendations = recommendations self.assert_works_queries(lane, [eng]) # It doesn't matter the language. self.lp.presentation_edition.language = 'fre' SessionManager.refresh_materialized_views(self._db) mock_api = self.generate_mock_api() lane = RecommendationLane(self._db, self.lp, '', novelist_api=mock_api) lane.recommendations = recommendations self.assert_works_queries(lane, [fre])
def test_works_query_accounts_for_source_audience(self): works = self.sample_works_for_each_audience() [children, ya] = works[:2] # Give them all the same contributor. for work in works: work.presentation_edition.contributions[ 0].contributor = self.contributor SessionManager.refresh_materialized_views(self._db) # Only childrens works are available in a ContributorLane with a # Children audience source children_lane = ContributorLane( self._db, self._default_library, 'Lois Lane', audiences=[Classifier.AUDIENCE_CHILDREN]) self.assert_works_queries(children_lane, [children]) # When more than one audience is requested, all are included. ya_lane = ContributorLane(self._db, self._default_library, 'Lois Lane', audiences=list( Classifier.AUDIENCES_JUVENILE)) self.assert_works_queries(ya_lane, [children, ya])
def initialize_database(autoinitialize=True): db_url = Configuration.database_url() if autoinitialize: SessionManager.initialize(db_url) session_factory = SessionManager.sessionmaker(db_url) _db = flask_scoped_session(session_factory, app) app._db = _db Configuration.load(_db) testing = 'TESTING' in os.environ log_level = LogConfiguration.initialize(_db, testing=testing) if app.debug is None: debug = log_level == 'DEBUG' app.debug = debug else: debug = app.debug app.config['DEBUG'] = debug _db.commit() app.log = logging.getLogger("Metadata web app") app.log.info("Application debug mode: %r", app.debug) for logger in logging.getLogger().handlers: app.log.info("Logs are going to %r", logger) # Register an error handler that logs exceptions through the # normal logging process and tries to turn them into Problem # Detail Documents. h = ErrorHandler(app, app.config['DEBUG']) @app.errorhandler(Exception) def exception_handler(exception): return h.handle(exception)
def test_search(self): # Put two works into the search index self.english_1.update_external_index(self.manager.external_search) self.english_2.update_external_index(self.manager.external_search) # Update the materialized view to make sure the works show up. SessionManager.refresh_materialized_views(self._db) # Execute a search query designed to find the second one. with self.app.test_request_context("/?q=t&size=1&after=1"): response = self.manager.opds_feeds.search(None, None) feed = feedparser.parse(response.data) entries = feed['entries'] eq_(1, len(entries)) entry = entries[0] eq_(self.english_2.author, entry.author) assert 'links' in entry assert len(entry.links) > 0 borrow_links = [link for link in entry.links if link.rel == 'http://opds-spec.org/acquisition/borrow'] eq_(1, len(borrow_links)) next_links = [link for link in feed['feed']['links'] if link.rel == 'next'] eq_(1, len(next_links)) previous_links = [link for link in feed['feed']['links'] if link.rel == 'previous'] eq_(1, len(previous_links))
def test_multipage_feed(self): self._work("fiction work", language="eng", fiction=True, with_open_access_download=True) SessionManager.refresh_materialized_views(self._db) with self.app.test_request_context("/?size=1"): response = self.manager.opds_feeds.feed('eng', 'Adult Fiction') feed = feedparser.parse(response.data) entries = feed['entries'] eq_(1, len(entries)) links = feed['feed']['links'] next_link = [x for x in links if x['rel'] == 'next'][0]['href'] assert 'after=1' in next_link assert 'size=1' in next_link facet_links = [x for x in links if x['rel'] == 'http://opds-spec.org/facet'] assert any('order=title' in x['href'] for x in facet_links) assert any('order=author' in x['href'] for x in facet_links) search_link = [x for x in links if x['rel'] == 'search'][0]['href'] assert search_link.endswith('/search/eng/Adult%20Fiction') shelf_link = [x for x in links if x['rel'] == 'http://opds-spec.org/shelf'][0]['href'] assert shelf_link.endswith('/loans/')
def test_groups(self): with temp_config() as config: config[Configuration.POLICIES] = { Configuration.GROUPS_MAX_AGE_POLICY : 10, Configuration.MINIMUM_FEATURED_QUALITY: 0, Configuration.FEATURED_LANE_SIZE: 2, } for i in range(2): self._work("fiction work %i" % i, language="eng", fiction=True, with_open_access_download=True) self._work("nonfiction work %i" % i, language="eng", fiction=False, with_open_access_download=True) SessionManager.refresh_materialized_views(self._db) with self.app.test_request_context("/"): response = self.manager.opds_feeds.groups(None, None) feed = feedparser.parse(response.data) entries = feed['entries'] counter = Counter() for entry in entries: links = [x for x in entry.links if x['rel'] == 'collection'] for link in links: counter[link['title']] += 1 eq_(2, counter['Nonfiction']) eq_(2, counter['Fiction']) eq_(1, counter['Other Languages'])
def test_complaints(self): type = iter(Complaint.VALID_TYPES) type1 = next(type) type2 = next(type) work = self._work( "fiction work with complaint", language="eng", fiction=True, with_open_access_download=True) complaint1 = self._complaint( work.license_pools[0], type1, "complaint1 source", "complaint1 detail") complaint2 = self._complaint( work.license_pools[0], type1, "complaint2 source", "complaint2 detail") complaint3 = self._complaint( work.license_pools[0], type2, "complaint3 source", "complaint3 detail") SessionManager.refresh_materialized_views(self._db) [lp] = work.license_pools with self.app.test_request_context("/"): response = self.manager.admin_work_controller.complaints(lp.data_source.name, lp.identifier.identifier) eq_(response['book']['data_source'], lp.data_source.name) eq_(response['book']['identifier'], lp.identifier.identifier) eq_(response['complaints'][type1], 2) eq_(response['complaints'][type2], 1)
def test_feed(self): SessionManager.refresh_materialized_views(self._db) with self.app.test_request_context("/"): with temp_config() as config: config['links'] = { "terms_of_service": "a", "privacy_policy": "b", "copyright": "c", "about": "d", } response = self.manager.opds_feeds.feed( 'eng', 'Adult Fiction' ) assert self.english_1.title in response.data assert self.english_2.title not in response.data assert self.french_1.title not in response.data feed = feedparser.parse(response.data) links = feed['feed']['links'] by_rel = dict() for i in links: by_rel[i['rel']] = i['href'] eq_("a", by_rel['terms-of-service']) eq_("b", by_rel['privacy-policy']) eq_("c", by_rel['copyright']) eq_("d", by_rel['about'])
def test_related_books(self): # A book with no related books returns a ProblemDetail. with temp_config() as config: config['integrations'][Configuration.NOVELIST_INTEGRATION] = {} with self.app.test_request_context('/'): response = self.manager.work_controller.related( self.datasource, self.identifier.type, self.identifier.identifier ) eq_(404, response.status_code) eq_("http://librarysimplified.org/terms/problem/unknown-lane", response.uri) # Prep book with a book in its series and a recommendation. self.lp.presentation_edition.series = "Around the World" self.french_1.presentation_edition.series = "Around the World" SessionManager.refresh_materialized_views(self._db) source = DataSource.lookup(self._db, self.datasource) metadata = Metadata(source) mock_api = MockNoveListAPI() metadata.recommendations = [self.english_2.license_pools[0].identifier] mock_api.setup(metadata) # A grouped feed is returned with both of these related books with self.app.test_request_context('/'): response = self.manager.work_controller.related( self.datasource, self.identifier.type, self.identifier.identifier, novelist_api=mock_api ) eq_(200, response.status_code) feed = feedparser.parse(response.data) eq_(3, len(feed['entries'])) # One book is in the recommendations feed. [e1] = [e for e in feed['entries'] if e['title'] == self.english_2.title] [collection_link] = [link for link in e1['links'] if link['rel']=='collection'] eq_("Recommended Books", collection_link['title']) work_url = "/works/%s/%s/%s/" % (self.datasource, self.identifier.type, self.identifier.identifier) expected = urllib.quote(work_url + 'recommendations') eq_(True, collection_link['href'].endswith(expected)) # Two books are in the series feed. The original work and its companion [e2] = [e for e in feed['entries'] if e['title'] == self.french_1.title] [collection_link] = [link for link in e2['links'] if link['rel']=='collection'] eq_("Around the World", collection_link['title']) expected = urllib.quote(work_url + 'series') eq_(True, collection_link['href'].endswith(expected)) [e3] = [e for e in feed['entries'] if e['title'] == self.english_1.title] [collection_link] = [link for link in e3['links'] if link['rel']=='collection'] eq_("Around the World", collection_link['title']) expected = urllib.quote(work_url + 'series') eq_(True, collection_link['href'].endswith(expected))
def test_preload(self): SessionManager.refresh_materialized_views(self._db) with temp_config() as config: urn = self.english_2.primary_edition.primary_identifier.urn config[Configuration.POLICIES][Configuration.PRELOADED_CONTENT] = [urn] with self.app.test_request_context("/"): response = self.manager.opds_feeds.preload() assert self.english_1.title not in response.data assert self.english_2.title in response.data assert self.french_1.author not in response.data
def test_suppressed(self): suppressed_work = self._work(with_open_access_download=True) suppressed_work.license_pools[0].suppressed = True unsuppressed_work = self._work() SessionManager.refresh_materialized_views(self._db) with self.app.test_request_context("/"): response = self.manager.admin_feed_controller.suppressed() feed = feedparser.parse(response.data) entries = feed['entries'] eq_(1, len(entries)) eq_(suppressed_work.title, entries[0]['title'])
def test_works_query(self): # Prep an empty result. mock_api = self.generate_mock_api() # With an empty recommendation result, the lane is empty. lane = RecommendationLane(self._db, self.lp, '', novelist_api=mock_api) eq_(None, lane.works()) eq_(None, lane.materialized_works()) # Resulting recommendations are returned when available, though. result = self._work(with_license_pool=True) lane.recommendations = [result.license_pools[0].identifier] SessionManager.refresh_materialized_views(self._db) self.assert_works_queries(lane, [result])
def test_initialize_data_does_not_reset_timestamp(self): # initialize_data() has already been called, so the database is # initialized and the 'site configuration changed' Timestamp has # been set. Calling initialize_data() again won't change the # date on the timestamp. timestamp = get_one( self._db, Timestamp, collection=None, service=Configuration.SITE_CONFIGURATION_CHANGED, ) old_timestamp = timestamp.finish SessionManager.initialize_data(self._db) assert old_timestamp == timestamp.finish
def initialize_database(autoinitialize=True): testing = 'TESTING' in os.environ db_url = Configuration.database_url() if autoinitialize: SessionManager.initialize(db_url) session_factory = SessionManager.sessionmaker(db_url) _db = flask_scoped_session(session_factory, app) app._db = _db log_level = LogConfiguration.initialize(_db, testing=testing) debug = log_level == 'DEBUG' app.config['DEBUG'] = debug app.debug = debug _db.commit() logging.getLogger().info("Application debug mode==%r" % app.debug)
def test_works_query(self): # Prep an empty result. mock_api = self.generate_mock_api() # With an empty recommendation result, the lane is empty. lane = RecommendationLane(self._default_library, self.work, '', novelist_api=mock_api) eq_(None, lane.works(self._db)) # Resulting recommendations are returned when available, though. # TODO: Setting a data source name is necessary because Gutenberg # books get filtered out when children or ya is one of the lane's # audiences. result = self._work(with_license_pool=True, data_source_name=DataSource.OVERDRIVE) lane.recommendations = [result.license_pools[0].identifier] SessionManager.refresh_materialized_views(self._db) self.assert_works_queries(lane, [result])
def test_recommendations(self): # Prep an empty recommendation. source = DataSource.lookup(self._db, self.datasource) metadata = Metadata(source) mock_api = MockNoveListAPI() mock_api.setup(metadata) SessionManager.refresh_materialized_views(self._db) with self.app.test_request_context('/'): response = self.manager.work_controller.recommendations( self.datasource, self.identifier.type, self.identifier.identifier, novelist_api=mock_api ) eq_(200, response.status_code) feed = feedparser.parse(response.data) eq_('Recommended Books', feed['feed']['title']) eq_(0, len(feed['entries'])) # Delete the cache and prep a recommendation result. [cached_empty_feed] = self._db.query(CachedFeed).all() self._db.delete(cached_empty_feed) metadata.recommendations = [self.english_2.license_pools[0].identifier] mock_api.setup(metadata) SessionManager.refresh_materialized_views(self._db) with self.app.test_request_context('/'): response = self.manager.work_controller.recommendations( self.datasource, self.identifier.type, self.identifier.identifier, novelist_api=mock_api ) # A feed is returned with the proper recommendation. eq_(200, response.status_code) feed = feedparser.parse(response.data) eq_('Recommended Books', feed['feed']['title']) eq_(1, len(feed['entries'])) [entry] = feed['entries'] eq_(self.english_2.title, entry['title']) eq_(self.english_2.author, entry['author']) with temp_config() as config: with self.app.test_request_context('/'): config['integrations'][Configuration.NOVELIST_INTEGRATION] = {} response = self.manager.work_controller.recommendations( self.datasource, self.identifier.type, self.identifier.identifier ) eq_(404, response.status_code) eq_("http://librarysimplified.org/terms/problem/unknown-lane", response.uri)
def test_resolve_complaints(self): type = iter(Complaint.VALID_TYPES) type1 = next(type) type2 = next(type) work = self._work( "fiction work with complaint", language="eng", fiction=True, with_open_access_download=True) complaint1 = self._complaint( work.license_pools[0], type1, "complaint1 source", "complaint1 detail") complaint2 = self._complaint( work.license_pools[0], type1, "complaint2 source", "complaint2 detail") SessionManager.refresh_materialized_views(self._db) [lp] = work.license_pools # first attempt to resolve complaints of the wrong type with self.app.test_request_context("/"): flask.request.form = ImmutableMultiDict([("type", type2)]) response = self.manager.admin_work_controller.resolve_complaints(lp.data_source.name, lp.identifier.type, lp.identifier.identifier) unresolved_complaints = [complaint for complaint in lp.complaints if complaint.resolved == None] eq_(response.status_code, 404) eq_(len(unresolved_complaints), 2) # then attempt to resolve complaints of the correct type with self.app.test_request_context("/"): flask.request.form = ImmutableMultiDict([("type", type1)]) response = self.manager.admin_work_controller.resolve_complaints(lp.data_source.name, lp.identifier.type, lp.identifier.identifier) unresolved_complaints = [complaint for complaint in lp.complaints if complaint.resolved == None] eq_(response.status_code, 200) eq_(len(unresolved_complaints), 0) # then attempt to resolve the already-resolved complaints of the correct type with self.app.test_request_context("/"): flask.request.form = ImmutableMultiDict([("type", type1)]) response = self.manager.admin_work_controller.resolve_complaints(lp.data_source.name, lp.identifier.type, lp.identifier.identifier) eq_(response.status_code, 409)
def test_workers_are_created_with_sessions(self): session_factory = SessionManager.sessionmaker(session=self._db) bind = session_factory.kw["bind"] pool = DatabasePool(2, session_factory) try: for worker in pool.workers: assert worker._db assert bind == worker._db.connection() finally: pool.join()
def test_classifications(self): e, pool = self._edition(with_license_pool=True) work = self._work(presentation_edition=e) identifier = work.presentation_edition.primary_identifier genres = self._db.query(Genre).all() subject1 = self._subject(type="type1", identifier="subject1") subject1.genre = genres[0] subject2 = self._subject(type="type2", identifier="subject2") subject2.genre = genres[1] subject3 = self._subject(type="type2", identifier="subject3") subject3.genre = None source = DataSource.lookup(self._db, DataSource.AXIS_360) classification1 = self._classification( identifier=identifier, subject=subject1, data_source=source, weight=1) classification2 = self._classification( identifier=identifier, subject=subject2, data_source=source, weight=3) classification3 = self._classification( identifier=identifier, subject=subject3, data_source=source, weight=2) SessionManager.refresh_materialized_views(self._db) [lp] = work.license_pools with self.app.test_request_context("/"): response = self.manager.admin_work_controller.classifications( lp.data_source.name, lp.identifier.type, lp.identifier.identifier) eq_(response['book']['data_source'], lp.data_source.name) eq_(response['book']['identifier_type'], lp.identifier.type) eq_(response['book']['identifier'], lp.identifier.identifier) expected_results = [classification2, classification3, classification1] eq_(len(response['classifications']), len(expected_results)) for i, classification in enumerate(expected_results): subject = classification.subject source = classification.data_source eq_(response['classifications'][i]['name'], subject.identifier) eq_(response['classifications'][i]['type'], subject.type) eq_(response['classifications'][i]['source'], source.name) eq_(response['classifications'][i]['weight'], classification.weight)
def test_classifications(self): e, pool = self._edition(with_license_pool=True) work = self._work(primary_edition=e) identifier = work.primary_edition.primary_identifier genres = self._db.query(Genre).all() subject1 = self._subject(type="type1", identifier="subject1") subject1.genre = genres[0] subject2 = self._subject(type="type2", identifier="subject2") subject2.genre = genres[1] subject3 = self._subject(type="type2", identifier="subject3") subject3.genre = None source = DataSource.lookup(self._db, DataSource.AXIS_360) classification1 = self._classification( identifier=identifier, subject=subject1, data_source=source, weight=1) classification2 = self._classification( identifier=identifier, subject=subject2, data_source=source, weight=2) classification3 = self._classification( identifier=identifier, subject=subject3, data_source=source, weight=1.5) SessionManager.refresh_materialized_views(self._db) [lp] = work.license_pools # first attempt to resolve complaints of the wrong type with self.app.test_request_context("/"): response = self.manager.admin_work_controller.classifications( lp.data_source.name, lp.identifier.identifier) eq_(response['book']['data_source'], lp.data_source.name) eq_(response['book']['identifier'], lp.identifier.identifier) eq_(len(response['classifications']), 2) eq_(response['classifications'][0]['name'], subject2.identifier) eq_(response['classifications'][0]['type'], subject2.type) eq_(response['classifications'][0]['source'], source.name) eq_(response['classifications'][0]['weight'], classification2.weight) eq_(response['classifications'][1]['name'], subject1.identifier) eq_(response['classifications'][1]['type'], subject1.type) eq_(response['classifications'][1]['source'], source.name) eq_(response['classifications'][1]['weight'], classification1.weight)
def test_bibliographic_filter_clause(self): w1 = self._work(with_license_pool=True) w2 = self._work(with_license_pool=True) # Only w2 is in the list. list, ignore = self._customlist(num_entries=0) e2, ignore = list.add_entry(w2) self.add_to_materialized_view([w1, w2]) self._db.flush() SessionManager.refresh_materialized_views(self._db) lane = CrawlableCustomListBasedLane() lane.initialize(self._default_library, list) from core.model import MaterializedWorkWithGenre as work_model qu = self._db.query(work_model) qu, clause = lane.bibliographic_filter_clause(self._db, qu) qu = qu.filter(clause) eq_([w2.id], [mw.works_id for mw in qu])
def test_works_query_accounts_for_source_audience(self): works = self.sample_works_for_each_audience() [children, ya] = works[:2] # Give them all the same contributor. for work in works: work.presentation_edition.contributions[0].contributor = self.contributor self._db.commit() SessionManager.refresh_materialized_views(self._db) # Only childrens works are available in a ContributorLane with a # Children audience source children_lane = ContributorLane( self._default_library, 'Lois Lane', audiences=[Classifier.AUDIENCE_CHILDREN] ) self.assert_works_queries(children_lane, [children]) # When more than one audience is requested, all are included. ya_lane = ContributorLane( self._default_library, 'Lois Lane', audiences=list(Classifier.AUDIENCES_JUVENILE) ) self.assert_works_queries(ya_lane, [children, ya])
def test_complaints(self): type = iter(Complaint.VALID_TYPES) type1 = next(type) type2 = next(type) work1 = self._work( "fiction work with complaint 1", language="eng", fiction=True, with_open_access_download=True) complaint1 = self._complaint( work1.license_pools[0], type1, "complaint source 1", "complaint detail 1") complaint2 = self._complaint( work1.license_pools[0], type2, "complaint source 2", "complaint detail 2") work2 = self._work( "nonfiction work with complaint", language="eng", fiction=False, with_open_access_download=True) complaint3 = self._complaint( work2.license_pools[0], type1, "complaint source 3", "complaint detail 3") SessionManager.refresh_materialized_views(self._db) with self.app.test_request_context("/"): response = self.manager.admin_feed_controller.complaints() feed = feedparser.parse(response.data) entries = feed['entries'] eq_(len(entries), 2)
def test_works_query(self): # If there are no works with the series name, no works are returned. series_name = "Like As If Whatever Mysteries" lane = SeriesLane(self._default_library, series_name) self.assert_works_queries(lane, []) # Works in the series are returned as expected. w1 = self._work(with_license_pool=True) w1.presentation_edition.series = series_name self._db.commit() SessionManager.refresh_materialized_views(self._db) self.assert_works_queries(lane, [w1]) # When there are two works without series_position, they're # returned in alphabetical order by title. w1.presentation_edition.title = "Zoology" w2 = self._work(with_license_pool=True) w2.presentation_edition.title = "Anthropology" w2.presentation_edition.series = series_name self._db.commit() SessionManager.refresh_materialized_views(self._db) self.assert_works_queries(lane, [w2, w1]) # If a series_position is added, they're ordered in numerical order. w1.presentation_edition.series_position = 6 w2.presentation_edition.series_position = 13 self._db.commit() SessionManager.refresh_materialized_views(self._db) self.assert_works_queries(lane, [w1, w2]) # If the lane is created with languages, works in other languages # aren't included. fre = self._work(with_license_pool=True, language='fre') spa = self._work(with_license_pool=True, language='spa') for work in [fre, spa]: work.presentation_edition.series = series_name self._db.commit() SessionManager.refresh_materialized_views(self._db) lane.languages = ['fre', 'spa'] self.assert_works_queries(lane, [fre, spa])
def test_works_query(self): # A work by someone else. w1 = self._work(with_license_pool=True) # A work by the contributor with the same name, without VIAF info. w2 = self._work(title="X is for Xylophone", with_license_pool=True) same_name = w2.presentation_edition.contributions[0].contributor same_name.display_name = 'Lois Lane' SessionManager.refresh_materialized_views(self._db) # The work with a matching name is found in the contributor lane. lane = ContributorLane(self._db, 'Lois Lane', contributor_id=self.contributor.id) self.assert_works_queries(lane, [w2]) # And when we add some additional works, like: # A work by the contributor. w3 = self._work(title="A is for Apple", with_license_pool=True) w3.presentation_edition.add_contributor( self.contributor, [Contributor.PRIMARY_AUTHOR_ROLE]) # A work by the contributor with VIAF info, writing with a pseudonym. w4 = self._work(title="D is for Dinosaur", with_license_pool=True) same_viaf, i = self._contributor('Lane, L', **dict(viaf='7')) w4.presentation_edition.add_contributor(same_viaf, [Contributor.EDITOR_ROLE]) SessionManager.refresh_materialized_views(self._db) # Those works are also included in the lane, in alphabetical order. self.assert_works_queries(lane, [w3, w4, w2]) # When the lane is created without a contributor_id, the query # only searches by name. lane = ContributorLane(self._db, 'Lois Lane') self.assert_works_queries(lane, [w3, w2]) # If the lane is created with languages, works in other languages # aren't included. fre = self._work(with_license_pool=True, language='fre') spa = self._work(with_license_pool=True, language='spa') for work in [fre, spa]: main_contribution = work.presentation_edition.contributions[0] main_contribution.contributor = self.contributor SessionManager.refresh_materialized_views(self._db) lane = ContributorLane(self._db, 'Lois Lane', languages=['eng']) self.assert_works_queries(lane, [w3, w2]) lane.languages = ['fre', 'spa'] self.assert_works_queries(lane, [fre, spa])
def test_works_query(self): # A work by someone else. w1 = self._work(with_license_pool=True) # A work by the contributor with the same name, without VIAF info. w2 = self._work(title="X is for Xylophone", with_license_pool=True) same_name = w2.presentation_edition.contributions[0].contributor same_name.display_name = 'Lois Lane' self._db.commit() SessionManager.refresh_materialized_views(self._db) # The work with a matching name is found in the contributor lane. lane = ContributorLane(self._default_library, 'Lois Lane') self.assert_works_queries(lane, [w2]) # And when we add some additional works, like: # A work by the contributor. w3 = self._work(title="A is for Apple", with_license_pool=True) w3.presentation_edition.add_contributor(self.contributor, [Contributor.PRIMARY_AUTHOR_ROLE]) # A work by the contributor with VIAF info, writing with a pseudonym. w4 = self._work(title="D is for Dinosaur", with_license_pool=True) same_viaf, i = self._contributor('Lane, L', **dict(viaf='7')) w4.presentation_edition.add_contributor(same_viaf, [Contributor.EDITOR_ROLE]) self._db.commit() SessionManager.refresh_materialized_views(self._db) # Those works are also included in the lane, in alphabetical order. self.assert_works_queries(lane, [w3, w4, w2]) # If the lane is created with languages, works in other languages # aren't included. fre = self._work(with_license_pool=True, language='fre') spa = self._work(with_license_pool=True, language='spa') for work in [fre, spa]: main_contribution = work.presentation_edition.contributions[0] main_contribution.contributor = self.contributor self._db.commit() SessionManager.refresh_materialized_views(self._db) lane = ContributorLane(self._default_library, 'Lois Lane', languages=['eng']) self.assert_works_queries(lane, [w3, w4, w2]) lane.languages = ['fre', 'spa'] self.assert_works_queries(lane, [fre, spa])
Flask, Response, redirect, ) from flask_sqlalchemy_session import flask_scoped_session from sqlalchemy.orm import sessionmaker from config import Configuration from core.model import SessionManager from core.util import LanguageCodes from flask.ext.babel import Babel app = Flask(__name__) testing = 'TESTING' in os.environ db_url = Configuration.database_url(testing) SessionManager.initialize(db_url) session_factory = SessionManager.sessionmaker(db_url) _db = flask_scoped_session(session_factory, app) SessionManager.initialize_data(_db) app.config['BABEL_DEFAULT_LOCALE'] = LanguageCodes.three_to_two[Configuration.localization_languages()[0]] app.config['BABEL_TRANSLATION_DIRECTORIES'] = "../translations" babel = Babel(app) import routes if Configuration.get(Configuration.INCLUDE_ADMIN_INTERFACE): import admin.routes debug = Configuration.logging_policy().get("level") == 'DEBUG' logging.getLogger().info("Application debug mode==%r" % debug) app.config['DEBUG'] = debug
def test_series(self): # If the work doesn't have a series, a ProblemDetail is returned. with self.app.test_request_context('/'): response = self.manager.work_controller.series( self.datasource, self.identifier.type, self.identifier.identifier ) eq_(404, response.status_code) eq_("http://librarysimplified.org/terms/problem/unknown-lane", response.uri) # If the work is in a series without other volumes, a feed is # returned containing only that work. self.lp.presentation_edition.series = "Like As If Whatever Mysteries" self.lp.presentation_edition.series_position = 8 SessionManager.refresh_materialized_views(self._db) with self.app.test_request_context('/'): response = self.manager.work_controller.series( self.datasource, self.identifier.type, self.identifier.identifier ) eq_(200, response.status_code) feed = feedparser.parse(response.data) eq_("Like As If Whatever Mysteries", feed['feed']['title']) [entry] = feed['entries'] eq_(self.english_1.title, entry['title']) # Remove cache. [cached_empty_feed] = self._db.query(CachedFeed).all() self._db.delete(cached_empty_feed) # When other volumes present themselves, the feed has more entries. other_volume = self.english_2.license_pools[0].presentation_edition other_volume.series = "Like As If Whatever Mysteries" other_volume.series_position = 1 SessionManager.refresh_materialized_views(self._db) with self.app.test_request_context('/'): response = self.manager.work_controller.series( self.datasource, self.identifier.type, self.identifier.identifier ) eq_(200, response.status_code) feed = feedparser.parse(response.data) eq_(2, len(feed['entries'])) [e1, e2] = feed['entries'] # The entries are sorted according to their series_position. eq_(self.english_2.title, e1['title']) eq_(self.english_1.title, e2['title']) # Remove cache. [cached_empty_feed] = self._db.query(CachedFeed).all() self._db.delete(cached_empty_feed) # Barring series_position, the entries are sorted according to their # titles. self.lp.presentation_edition.series_position = None other_volume.series_position = None with self.app.test_request_context('/'): response = self.manager.work_controller.series( self.datasource, self.identifier.type, self.identifier.identifier ) eq_(200, response.status_code) feed = feedparser.parse(response.data) eq_(2, len(feed['entries'])) [e1, e2] = feed['entries'] eq_(self.english_1.title, e1['title']) eq_(self.english_2.title, e2['title'])
ConfigurationSetting, SessionManager, ) from core.log import LogConfiguration from core.util import LanguageCodes from flask.ext.babel import Babel app = Flask(__name__) testing = 'TESTING' in os.environ db_url = Configuration.database_url(testing) # Initialize a new database session unless we were told not to # (e.g. because a script already initialized it). autoinitialize = os.environ.get('AUTOINITIALIZE') != 'False' if autoinitialize: SessionManager.initialize(db_url) session_factory = SessionManager.sessionmaker(db_url) _db = flask_scoped_session(session_factory, app) if autoinitialize: SessionManager.initialize_data(_db) app.config['BABEL_DEFAULT_LOCALE'] = LanguageCodes.three_to_two[ Configuration.localization_languages()[0]] app.config['BABEL_TRANSLATION_DIRECTORIES'] = "../translations" babel = Babel(app) import routes import admin.routes log_level = LogConfiguration.initialize(_db, testing=testing) debug = log_level == 'DEBUG'