def single_fulfillment_feed(cls, circulation, loan, fulfillment, test_mode=False): db = Session.object_session(loan) work = loan.license_pool.work or loan.license_pool.presentation_edition.work annotator = cls(circulation, None, loan.patron.library, active_loans_by_work={}, active_holds_by_work={}, active_fulfillments_by_work={work: fulfillment}, test_mode=test_mode) identifier = loan.license_pool.identifier url = annotator.url_for( 'loan_or_hold_detail', identifier_type=identifier.type, identifier=identifier.identifier, library_short_name=loan.patron.library.short_name, _external=True) if not work: return AcquisitionFeed(db, "Active loan for unknown work", url, [], annotator) return AcquisitionFeed.single_entry(db, work, annotator)
def fulfill_link(self, license_pool, active_loan, delivery_mechanism): """Create a new fulfillment link. This link may include tags from the OPDS Extensions for DRM. """ if isinstance(delivery_mechanism, LicensePoolDeliveryMechanism): logging.warn("LicensePoolDeliveryMechanism passed into fulfill_link instead of DeliveryMechanism!") delivery_mechanism = delivery_mechanism.delivery_mechanism format_types = AcquisitionFeed.format_types(delivery_mechanism) if not format_types: return None fulfill_url = self.url_for( "fulfill", license_pool_id=license_pool.id, mechanism_id=delivery_mechanism.id, library_short_name=self.library.short_name, _external=True ) rel=OPDSFeed.ACQUISITION_REL link_tag = AcquisitionFeed.acquisition_link( rel=rel, href=fulfill_url, types=format_types ) children = AcquisitionFeed.license_tags(license_pool, active_loan, None) link_tag.extend(children) children = self.drm_device_registration_tags( license_pool, active_loan, delivery_mechanism ) link_tag.extend(children) return link_tag
def fulfill_link(self, data_source_name, identifier_identifier, license_pool, active_loan, delivery_mechanism): """Create a new fulfillment link.""" if isinstance(delivery_mechanism, LicensePoolDeliveryMechanism): logging.warn("LicensePoolDeliveryMechanism passed into fulfill_link instead of DeliveryMechanism!") delivery_mechanism = delivery_mechanism.delivery_mechanism format_types = AcquisitionFeed.format_types(delivery_mechanism) if not format_types: return None fulfill_url = self.url_for( "fulfill", data_source=data_source_name, identifier=identifier_identifier, mechanism_id=delivery_mechanism.id, _external=True ) rel=OPDSFeed.ACQUISITION_REL link_tag = AcquisitionFeed.acquisition_link( rel=rel, href=fulfill_url, types=format_types ) children = AcquisitionFeed.license_tags(license_pool, active_loan, None) link_tag.extend(children) return link_tag
def test_single_entry_no_active_license_pool(self): work = self._work(with_open_access_download=True) pool = work.license_pools[0] # Create an <entry> tag for this work and its LicensePool. feed1 = AcquisitionFeed.single_entry(self._db, work, self.annotator, pool) # If we don't pass in the license pool, it makes a guess to # figure out which license pool we're talking about. feed2 = AcquisitionFeed.single_entry(self._db, work, self.annotator, None) # Both entries are identical. eq_(etree.tostring(feed1), etree.tostring(feed2))
def active_loans_for(cls, circulation, patron, test_mode=False): db = Session.object_session(patron) active_loans_by_work = {} for loan in patron.loans: work = loan.work if work: active_loans_by_work[work] = loan active_holds_by_work = {} for hold in patron.holds: work = hold.work if work: active_holds_by_work[work] = hold annotator = cls(circulation, None, patron, active_loans_by_work, active_holds_by_work, test_mode=test_mode) url = annotator.url_for('active_loans', _external=True) works = patron.works_on_loan_or_on_hold() feed_obj = AcquisitionFeed(db, "Active loans and holds", url, works, annotator) annotator.annotate_feed(feed_obj, None) return feed_obj
def test_acquisition_feed_includes_license_information(self): work = self._work(with_open_access_download=True) pool = work.license_pools[0] # These numbers are impossible, but it doesn't matter for # purposes of this test. pool.open_access = False pool.licenses_owned = 100 pool.licenses_available = 50 pool.patrons_in_hold_queue = 25 self._db.commit() works = self._db.query(Work) feed = AcquisitionFeed( self._db, "test", "url", works, CirculationManagerAnnotator(None, Fantasy, test_mode=True)) u = unicode(feed) holds_re = re.compile('<opds:holds\W+total="25"\W*/>', re.S) assert holds_re.search(u) is not None copies_re = re.compile('<opds:copies[^>]+available="50"', re.S) assert copies_re.search(u) is not None copies_re = re.compile('<opds:copies[^>]+total="100"', re.S) assert copies_re.search(u) is not None
def single_hold_feed(cls, circulation, hold, test_mode=False): db = Session.object_session(hold) work = hold.license_pool.work or hold.license_pool.edition.work annotator = cls(circulation, None, active_loans_by_work={}, active_holds_by_work={work:hold}, test_mode=test_mode) return AcquisitionFeed.single_entry(db, work, annotator)
def borrow_link(self, identifier, borrow_mechanism, fulfillment_mechanisms): if borrow_mechanism: # Following this link will both borrow the book and set # its delivery mechanism. mechanism_id = borrow_mechanism.delivery_mechanism.id else: # Following this link will borrow the book but not set # its delivery mechanism. mechanism_id = None borrow_url = self.url_for( "borrow", identifier_type=identifier.type, identifier=identifier.identifier, mechanism_id=mechanism_id, library_short_name=self.library.short_name, _external=True) rel = OPDSFeed.BORROW_REL borrow_link = AcquisitionFeed.link( rel=rel, href=borrow_url, type=OPDSFeed.ENTRY_TYPE ) indirect_acquisitions = [] for lpdm in fulfillment_mechanisms: # We have information about one or more delivery # mechanisms that will be available at the point of # fulfillment. To the extent possible, put information # about these mechanisms into the <link> tag as # <opds:indirectAcquisition> tags. # These are the formats mentioned in the indirect # acquisition. format_types = AcquisitionFeed.format_types(lpdm.delivery_mechanism) # If we can borrow this book, add this delivery mechanism # to the borrow link as an <opds:indirectAcquisition>. if format_types: indirect_acquisition = AcquisitionFeed.indirect_acquisition( format_types ) indirect_acquisitions.append(indirect_acquisition) if not indirect_acquisitions: raise UnfulfillableWork() borrow_link.extend(indirect_acquisitions) return borrow_link
def test_single_entry_no_active_license_pool(self): work = self._work(with_open_access_download=True) pool = work.license_pools[0] # Create an <entry> tag for this work and its LicensePool. feed1 = AcquisitionFeed.single_entry( self._db, work, self.annotator, pool ) # If we don't pass in the license pool, it makes a guess to # figure out which license pool we're talking about. feed2 = AcquisitionFeed.single_entry( self._db, work, self.annotator, None ) # Both entries are identical. eq_(etree.tostring(feed1), etree.tostring(feed2))
def single_hold_feed(cls, circulation, hold, test_mode=False): db = Session.object_session(hold) work = hold.license_pool.work or hold.license_pool.presentation_edition.work annotator = cls(circulation, None, hold.patron.library, active_loans_by_work={}, active_holds_by_work={work:hold}, test_mode=test_mode) return AcquisitionFeed.single_entry(db, work, annotator)
def metadata_needed_for(self, collection_details): """Returns identifiers in the collection that could benefit from distributor metadata on the circulation manager. """ client = authenticated_client_from_request(self._db) if isinstance(client, ProblemDetail): return client collection = collection_from_details(self._db, client, collection_details) resolver = IdentifierResolutionCoverageProvider unresolved_identifiers = collection.unresolved_catalog( self._db, resolver.DATA_SOURCE_NAME, resolver.OPERATION) # Omit identifiers that currently have metadata pending for # the IntegrationClientCoverImageCoverageProvider. data_source = DataSource.lookup(self._db, collection.name, autocreate=True) is_awaiting_metadata = self._db.query( CoverageRecord.id, CoverageRecord.identifier_id).filter( CoverageRecord.data_source_id == data_source.id, CoverageRecord.status == CoverageRecord.REGISTERED, CoverageRecord.operation == IntegrationClientCoverImageCoverageProvider.OPERATION, ).subquery() unresolved_identifiers = unresolved_identifiers.outerjoin( is_awaiting_metadata, Identifier.id == is_awaiting_metadata.c.identifier_id).filter( is_awaiting_metadata.c.id == None) # Add a message for each unresolved identifier pagination = load_pagination_from_request(default_size=25) feed_identifiers = pagination.apply(unresolved_identifiers).all() messages = list() for identifier in feed_identifiers: messages.append( OPDSMessage(identifier.urn, HTTP_ACCEPTED, "Metadata needed.")) title = "%s Metadata Requests for %s" % (collection.protocol, client.url) metadata_request_url = self.collection_feed_url( 'metadata_needed_for', collection) request_feed = AcquisitionFeed(self._db, title, metadata_request_url, [], VerboseAnnotator, precomposed_entries=messages) self.add_pagination_links_to_feed(pagination, unresolved_identifiers, request_feed, 'metadata_needed_for', collection) return feed_response(request_feed)
def get_parsed_feed(self, works, lane=None): if not lane: lane = self._lane(display_name="Main Lane") feed = AcquisitionFeed( self._db, "test", "url", works, CirculationManagerAnnotator(None, lane, self._default_library, test_mode=True)) return feedparser.parse(unicode(feed))
def test_acquisition_feed_includes_problem_reporting_link(self): w1 = self._work(with_open_access_download=True) self._db.commit() feed = AcquisitionFeed( self._db, "test", "url", [w1], CirculationManagerAnnotator(None, Fantasy, test_mode=True)) feed = feedparser.parse(unicode(feed)) [entry] = feed['entries'] [issues_link] = [x for x in entry['links'] if x['rel'] == 'issues'] assert '/report' in issues_link['href']
def confirm_related_books_link(): """Tests the presence of a /related_books link in a feed.""" feed = AcquisitionFeed( self._db, "test", "url", [work], CirculationManagerAnnotator(None, Fantasy, test_mode=True)) feed = feedparser.parse(unicode(feed)) [entry] = feed['entries'] [recommendations_link ] = [x for x in entry['links'] if x['rel'] == 'related'] eq_(OPDSFeed.ACQUISITION_FEED_TYPE, recommendations_link['type']) assert '/related_books' in recommendations_link['href']
def remove_items(self, collection_details): """Removes identifiers from a Collection's catalog""" client = authenticated_client_from_request(self._db) if isinstance(client, ProblemDetail): return client collection = collection_from_details(self._db, client, collection_details) urns = request.args.getlist('urn') messages = [] identifiers_by_urn, failures = Identifier.parse_urns(self._db, urns) for urn in failures: message = OPDSMessage(urn, INVALID_URN.status_code, INVALID_URN.detail) messages.append(message) # Find the IDs of the subset of provided identifiers that are # in the catalog, so we know which ones to delete and give a # 200 message. Also get a SQLAlchemy clause that selects only # those IDs. matching_ids, identifier_match_clause = self._in_catalog_subset( collection, identifiers_by_urn) # Use that clause to delete all of the relevant catalog # entries. delete_stmt = collections_identifiers.delete().where( identifier_match_clause) self._db.execute(delete_stmt) # IDs that matched get a 200 message; all others get a 404 # message. for urn, identifier in identifiers_by_urn.items(): if identifier.id in matching_ids: status = HTTP_OK description = "Successfully removed" else: status = HTTP_NOT_FOUND description = "Not in catalog" message = OPDSMessage(urn, status, description) messages.append(message) title = "%s Catalog Item Removal for %s" % (collection.protocol, client.url) url = self.collection_feed_url("remove", collection, urn=urns) removal_feed = AcquisitionFeed(self._db, title, url, [], VerboseAnnotator, precomposed_entries=messages) return feed_response(removal_feed)
def test_permalink(self): with self.app.test_request_context("/"): response = self.manager.work_controller.permalink(self.datasource, self.identifier) annotator = CirculationManagerAnnotator(None, None) expect = etree.tostring( AcquisitionFeed.single_entry( self._db, self.english_1, annotator ) ) eq_(200, response.status_code) eq_(expect, response.data) eq_(OPDSFeed.ENTRY_TYPE, response.headers['Content-Type'])
def open_access_link(self, lpdm): url = cdnify(lpdm.resource.url, Configuration.cdns()) kw = dict(rel=OPDSFeed.OPEN_ACCESS_REL, href=url) rep = lpdm.resource.representation if rep and rep.media_type: kw['type'] = rep.media_type link_tag = AcquisitionFeed.link(**kw) always_available = OPDSFeed.makeelement("{%s}availability" % OPDSFeed.OPDS_NS, status="available") link_tag.append(always_available) return link_tag
def borrow_link(self, data_source_name, identifier, borrow_mechanism, fulfillment_mechanisms): if borrow_mechanism: # Following this link will both borrow the book and set # its delivery mechanism. mechanism_id = borrow_mechanism.delivery_mechanism.id else: # Following this link will borrow the book but not set # its delivery mechanism. mechanism_id = None borrow_url = self.url_for( "borrow", data_source=data_source_name, identifier_type=identifier.type, identifier=identifier.identifier, mechanism_id=mechanism_id, _external=True) rel = OPDSFeed.BORROW_REL borrow_link = AcquisitionFeed.link( rel=rel, href=borrow_url, type=OPDSFeed.ENTRY_TYPE ) for lpdm in fulfillment_mechanisms: # We have information about one or more delivery # mechanisms that will be available at the point of # fulfillment. To the extent possible, put information # about these mechanisms into the <link> tag as # <opds:indirectAcquisition> tags. # These are the formats mentioned in the indirect # acquisition. format_types = AcquisitionFeed.format_types(lpdm.delivery_mechanism) # If we can borrow this book, add this delivery mechanism # to the borrow link as an <opds:indirectAcquisition>. if format_types: indirect_acquisition = AcquisitionFeed.indirect_acquisition( format_types ) borrow_link.append(indirect_acquisition) return borrow_link
def open_access_link(self, lpdm): cdn_host = Configuration.cdn_host(Configuration.CDN_OPEN_ACCESS_CONTENT) url = cdnify(lpdm.resource.url, cdn_host) kw = dict(rel=OPDSFeed.OPEN_ACCESS_REL, href=url) rep = lpdm.resource.representation if rep and rep.media_type: kw['type'] = rep.media_type link_tag = AcquisitionFeed.link(**kw) always_available = E._makeelement( "{%s}availability" % opds_ns, status="available" ) link_tag.append(always_available) return link_tag
def single_loan_feed(cls, circulation, loan, test_mode=False): db = Session.object_session(loan) work = loan.license_pool.work or loan.license_pool.edition.work annotator = cls(circulation, None, active_loans_by_work={work:loan}, active_holds_by_work={}, test_mode=test_mode) url = annotator.url_for( 'loan_or_hold_detail', data_source=loan.license_pool.data_source.name, identifier=loan.license_pool.identifier.identifier, _external=True) if not work: return AcquisitionFeed( db, "Active loan for unknown work", url, [], annotator) return AcquisitionFeed.single_entry(db, work, annotator)
def add_items(self, collection_details): """Adds identifiers to a Collection's catalog""" client = authenticated_client_from_request(self._db) if isinstance(client, ProblemDetail): return client collection = collection_from_details(self._db, client, collection_details) urns = request.args.getlist('urn') messages = [] identifiers_by_urn, failures = Identifier.parse_urns(self._db, urns) for urn in failures: message = OPDSMessage(urn, INVALID_URN.status_code, INVALID_URN.detail) messages.append(message) # Find the subset of incoming identifiers that are already # in the catalog. already_in_catalog, ignore = self._in_catalog_subset( collection, identifiers_by_urn) # Everything else needs to be added to the catalog. needs_to_be_added = [ x for x in identifiers_by_urn.values() if x.id not in already_in_catalog ] collection.catalog_identifiers(needs_to_be_added) for urn, identifier in identifiers_by_urn.items(): if identifier.id in already_in_catalog: status = HTTP_OK description = "Already in catalog" else: status = HTTP_CREATED description = "Successfully added" messages.append(OPDSMessage(urn, status, description)) title = "%s Catalog Item Additions for %s" % (collection.protocol, client.url) url = self.collection_feed_url('add', collection, urn=urns) addition_feed = AcquisitionFeed(self._db, title, url, [], VerboseAnnotator, precomposed_entries=messages) return feed_response(addition_feed)
def details(self, data_source, identifier_type, identifier): """Return an OPDS entry with detailed information for admins. This includes relevant links for editing the book. """ pool = self.load_licensepool(data_source, identifier_type, identifier) if isinstance(pool, ProblemDetail): return pool work = pool.work annotator = AdminAnnotator(self.circulation) return entry_response( AcquisitionFeed.single_entry(self._db, work, annotator) )
def test_acquisition_feed_includes_annotations_link(self): w1 = self._work(with_open_access_download=True) self._db.commit() feed = AcquisitionFeed( self._db, "test", "url", [w1], CirculationManagerAnnotator(None, Fantasy, test_mode=True)) feed = feedparser.parse(unicode(feed)) [entry] = feed['entries'] [annotations_link] = [ x for x in entry['links'] if x['rel'] == 'http://www.w3.org/ns/oa#annotationservice' ] assert '/annotations' in annotations_link['href'] identifier = w1.license_pools[0].identifier assert identifier.identifier in annotations_link['href']
def add_items(self, collection_details): """Adds identifiers to a Collection's catalog""" client = authenticated_client_from_request(self._db) if isinstance(client, ProblemDetail): return client collection, ignore = Collection.from_metadata_identifier( self._db, collection_details) urns = request.args.getlist('urn') messages = [] for urn in urns: message = None identifier = None try: identifier, ignore = Identifier.parse_urn(self._db, urn) except Exception as e: identifier = None if not identifier: message = OPDSMessage(urn, INVALID_URN.status_code, INVALID_URN.detail) else: status = HTTP_OK description = "Already in catalog" if identifier not in collection.catalog: collection.catalog_identifier(self._db, identifier) status = HTTP_CREATED description = "Successfully added" message = OPDSMessage(urn, status, description) messages.append(message) title = "%s Catalog Item Additions for %s" % (collection.protocol, client.url) url = cdn_url_for("add", collection_metadata_identifier=collection.name, urn=urns) addition_feed = AcquisitionFeed(self._db, title, url, [], VerboseAnnotator, precomposed_entries=messages) return feed_response(addition_feed)
def do_generate(self, lane): feeds = [] annotator = self.app.manager.annotator(lane) title = lane.display_name if isinstance(lane, Lane): languages = lane.language_key lane_name = lane.name else: languages = None lane_name = None url = self.app.manager.cdn_url_for( "acquisition_groups", languages=languages, lane_name=lane_name ) return AcquisitionFeed.groups( self._db, title, url, lane, annotator, force_refresh=True )
def process_batch(self, batch): """Create an OPDS feed from a batch and upload it to the metadata client.""" works = [] results = [] for identifier in batch: work = self.work(identifier) if not isinstance(work, CoverageFailure): works.append(work) results.append(identifier) else: results.append(work) feed = AcquisitionFeed(self._db, "Metadata Upload Feed", "", works, None) self.upload_client.add_with_metadata(feed) # We grant coverage for all identifiers if the upload doesn't raise an exception. return results
def remove_items(self, collection_details): """Removes identifiers from a Collection's catalog""" client = authenticated_client_from_request(self._db) if isinstance(client, ProblemDetail): return client collection, ignore = Collection.from_metadata_identifier( self._db, collection_details) urns = request.args.getlist('urn') messages = [] for urn in urns: message = None identifier = None try: identifier, ignore = Identifier.parse_urn(self._db, urn) except Exception as e: identifier = None if not identifier: message = OPDSMessage(urn, INVALID_URN.status_code, INVALID_URN.detail) else: if identifier in collection.catalog: collection.catalog.remove(identifier) message = OPDSMessage(urn, HTTP_OK, "Successfully removed") else: message = OPDSMessage(urn, HTTP_NOT_FOUND, "Not in catalog") messages.append(message) title = "%s Catalog Item Removal for %s" % (collection.protocol, client.url) url = cdn_url_for("remove", collection_metadata_identifier=collection.name, urn=urns) removal_feed = AcquisitionFeed(self._db, title, url, [], VerboseAnnotator, precomposed_entries=messages) return feed_response(removal_feed)
def do_generate(self, lane): feeds = [] annotator = self.app.manager.annotator(lane) title = lane.display_name if isinstance(lane, Lane): lane_id = lane.id else: # Presumably this is the top-level WorkList. lane_id = None library = lane.get_library(self._db) url = self.app.manager.cdn_url_for( "acquisition_groups", lane_identifier=lane_id, library_short_name=library.short_name ) yield AcquisitionFeed.groups( self._db, title, url, lane, annotator, force_refresh=True )
def do_generate(self, lane): feeds = [] annotator = self.app.manager.annotator(lane) title = lane.display_name if isinstance(lane, Lane) and lane.parent: languages = lane.language_key lane_name = lane.name else: languages = None lane_name = None url = self.app.manager.cdn_url_for("acquisition_groups", languages=languages, lane_name=lane_name) yield AcquisitionFeed.groups(self._db, title, url, lane, annotator, force_refresh=True)
def details(self, identifier_type, identifier): """Return an OPDS entry with detailed information for admins. This includes relevant links for editing the book. """ self.require_librarian(flask.request.library) work = self.load_work(flask.request.library, identifier_type, identifier) if isinstance(work, ProblemDetail): return work annotator = AdminAnnotator(self.circulation, flask.request.library) # Don't cache these OPDS entries - they should update immediately # in the admin interface when an admin makes a change. return entry_response( AcquisitionFeed.single_entry(self._db, work, annotator), cache_for=0, )
def details(self, identifier_type, identifier): """Return an OPDS entry with detailed information for admins. This includes relevant links for editing the book. :return: An OPDSEntryResponse """ self.require_librarian(flask.request.library) work = self.load_work(flask.request.library, identifier_type, identifier) if isinstance(work, ProblemDetail): return work annotator = AdminAnnotator(self.circulation, flask.request.library) # single_entry returns an OPDSEntryResponse that will not be # cached, which is perfect. We want the admin interface # to update immediately when an admin makes a change. return AcquisitionFeed.single_entry(self._db, work, annotator)
def do_generate(self, lane): feeds = [] annotator = self.app.manager.annotator(lane) if isinstance(lane, Lane): languages = lane.language_key lane_name = None else: languages = None lane_name = None url = self.app.manager.cdn_url_for( "feed", languages=lane.languages, lane_name=lane_name ) order_facets = Configuration.enabled_facets( Facets.ORDER_FACET_GROUP_NAME ) availability = Configuration.default_facet( Facets.AVAILABILITY_FACET_GROUP_NAME ) collection = Configuration.default_facet( Facets.COLLECTION_FACET_GROUP_NAME ) for sort_order in order_facets: pagination = Pagination.default() facets = Facets( collection=collection, availability=availability, order=sort_order, order_ascending=True ) title = lane.display_name for pagenum in (0, 2): feeds.append( AcquisitionFeed.page( self._db, title, url, lane, annotator, facets=facets, pagination=pagination, force_refresh=True ) ) pagination = pagination.next_page return feeds
def test_acquisition_feed_includes_open_access_or_borrow_link(self): w1 = self._work(with_open_access_download=True) w2 = self._work(with_open_access_download=True) w2.license_pools[0].open_access = False w2.license_pools[0].licenses_owned = 1 self._db.commit() works = self._db.query(Work) feed = AcquisitionFeed( self._db, "test", "url", works, CirculationManagerAnnotator(None, Fantasy, test_mode=True)) feed = feedparser.parse(unicode(feed)) entries = sorted(feed['entries'], key=lambda x: int(x['title'])) open_access_links, borrow_links = [x['links'] for x in entries] open_access_rels = [x['rel'] for x in open_access_links] assert OPDSFeed.BORROW_REL in open_access_rels borrow_rels = [x['rel'] for x in borrow_links] assert OPDSFeed.BORROW_REL in borrow_rels
def do_generate(self, lane): feeds = [] annotator = self.app.manager.annotator(lane) if isinstance(lane, Lane) and lane.parent: languages = lane.language_key lane_name = lane.name else: languages = None lane_name = None url = self.app.manager.cdn_url_for("feed", languages=lane.languages, lane_name=lane_name) order_facets = Configuration.enabled_facets( Facets.ORDER_FACET_GROUP_NAME) availability = Configuration.default_facet( Facets.AVAILABILITY_FACET_GROUP_NAME) collection = Configuration.default_facet( Facets.COLLECTION_FACET_GROUP_NAME) for sort_order in self.orders: for availability in self.availabilities: for collection in self.collections: pagination = Pagination.default() facets = Facets(collection=collection, availability=availability, order=sort_order, order_ascending=True) title = lane.display_name for pagenum in range(0, self.pages): yield AcquisitionFeed.page(self._db, title, url, lane, annotator, facets=facets, pagination=pagination, force_refresh=True) pagination = pagination.next_page
def updates_feed(self): collection = self.authenticated_collection_from_request() if isinstance(collection, ProblemDetail): return collection last_update_time = request.args.get('last_update_time', None) if last_update_time: last_update_time = datetime.strptime(last_update_time, "%Y-%m-%dT%H:%M:%SZ") updated_works = collection.works_updated_since(self._db, last_update_time) pagination = load_pagination_from_request() works = pagination.apply(updated_works).all() title = "%s Updates" % collection.name def update_url(time=last_update_time, page=None): kw = dict(_external=True) if time: kw.update({'last_update_time' : last_update_time}) if page: kw.update(page.items()) return cdn_url_for("updates", **kw) update_feed = AcquisitionFeed( self._db, title, update_url(), works, VerboseAnnotator ) if len(updated_works.all()) > pagination.size + pagination.offset: update_feed.add_link_to_feed( update_feed.feed, rel="next", href=update_url(page=pagination.next_page) ) if pagination.offset > 0: update_feed.add_link_to_feed( update_feed.feed, rel="first", href=update_url(page=pagination.first_page) ) previous_page = pagination.previous_page if previous_page: update_feed.add_link_to_feed( update_feed.feed, rel="previous", href=update_url(page=previous_page) ) return feed_response(update_feed)
def test_alternate_link_is_permalink(self): w1 = self._work(with_open_access_download=True) self._db.commit() works = self._db.query(Work) annotator = CirculationManagerAnnotator(None, Fantasy, test_mode=True) pool = annotator.active_licensepool_for(w1) feed = AcquisitionFeed(self._db, "test", "url", works, annotator) feed = feedparser.parse(unicode(feed)) [entry] = feed['entries'] eq_(entry['id'], pool.identifier.urn) [(alternate, type)] = [(x['href'], x['type']) for x in entry['links'] if x['rel'] == 'alternate'] permalink = annotator.permalink_for(w1, pool, pool.identifier) eq_(alternate, permalink) eq_(OPDSFeed.ENTRY_TYPE, type) # Make sure we are using the 'permalink' controller -- we were using # 'work' and that was wrong. assert '/host/permalink' in permalink
def acquisition_links(self, active_license_pool, active_loan, active_hold, active_fulfillment, feed, identifier): """Generate a number of <link> tags that enumerate all acquisition methods.""" can_borrow = False can_fulfill = False can_revoke = False can_hold = self.library.allow_holds if active_loan: can_fulfill = True can_revoke = True elif active_hold: # We display the borrow link even if the patron can't # borrow the book right this minute. can_borrow = True can_revoke = ( not self.circulation or self.circulation.can_revoke_hold( active_license_pool, active_hold) ) elif active_fulfillment: can_fulfill = True can_revoke = True else: # The patron has no existing relationship with this # work. Give them the opportunity to check out the work # or put it on hold. can_borrow = True # If there is something to be revoked for this book, # add a link to revoke it. revoke_links = [] if can_revoke: url = self.url_for( 'revoke_loan_or_hold', license_pool_id=active_license_pool.id, library_short_name=self.library.short_name, _external=True) kw = dict(href=url, rel=OPDSFeed.REVOKE_LOAN_REL) revoke_link_tag = OPDSFeed.makeelement("link", **kw) revoke_links.append(revoke_link_tag) # Add next-step information for every useful delivery # mechanism. borrow_links = [] api = None if self.circulation: api = self.circulation.api_for_license_pool(active_license_pool) if api: set_mechanism_at_borrow = ( api.SET_DELIVERY_MECHANISM_AT == BaseCirculationAPI.BORROW_STEP) else: # This is most likely an open-access book. Just put one # borrow link and figure out the rest later. set_mechanism_at_borrow = False if can_borrow: # Borrowing a book gives you an OPDS entry that gives you # fulfillment links. if set_mechanism_at_borrow: # The ebook distributor requires that the delivery # mechanism be set at the point of checkout. This means # a separate borrow link for each mechanism. for mechanism in active_license_pool.delivery_mechanisms: borrow_links.append( self.borrow_link( identifier, mechanism, [mechanism] ) ) else: # The ebook distributor does not require that the # delivery mechanism be set at the point of # checkout. This means a single borrow link with # indirectAcquisition tags for every delivery # mechanism. If a delivery mechanism must be set, it # will be set at the point of fulfillment. borrow_links.append( self.borrow_link( identifier, None, active_license_pool.delivery_mechanisms ) ) # Generate the licensing tags that tell you whether the book # is available. license_tags = feed.license_tags( active_license_pool, active_loan, active_hold ) for link in borrow_links: for t in license_tags: link.append(t) # Add links for fulfilling an active loan. fulfill_links = [] if can_fulfill: if active_fulfillment: # We're making an entry for a specific fulfill link. type = active_fulfillment.content_type url = active_fulfillment.content_link rel = OPDSFeed.ACQUISITION_REL link_tag = AcquisitionFeed.acquisition_link( rel=rel, href=url, types=[type]) fulfill_links.append(link_tag) elif active_loan.fulfillment: # The delivery mechanism for this loan has been # set. There is one link for the delivery mechanism # that was locked in, and links for any streaming # delivery mechanisms. for lpdm in active_license_pool.delivery_mechanisms: if lpdm is active_loan.fulfillment or lpdm.delivery_mechanism.is_streaming: fulfill_links.append( self.fulfill_link( active_license_pool, active_loan, lpdm.delivery_mechanism ) ) else: # The delivery mechanism for this loan has not been # set. There is one fulfill link for every delivery # mechanism. for lpdm in active_license_pool.delivery_mechanisms: fulfill_links.append( self.fulfill_link( active_license_pool, active_loan, lpdm.delivery_mechanism ) ) # If this is an open-access book, add an open-access link for # every delivery mechanism with an associated resource. open_access_links = [] if active_license_pool.open_access: for lpdm in active_license_pool.delivery_mechanisms: if lpdm.resource: open_access_links.append(self.open_access_link(lpdm)) return [x for x in borrow_links + fulfill_links + open_access_links + revoke_links if x is not None]
def test_acquisition_feed_includes_related_books_link(self): work = self._work(with_license_pool=True, with_open_access_download=True) def confirm_related_books_link(): """Tests the presence of a /related_books link in a feed.""" feed = AcquisitionFeed( self._db, "test", "url", [work], CirculationManagerAnnotator(None, Fantasy, test_mode=True)) feed = feedparser.parse(unicode(feed)) [entry] = feed['entries'] [recommendations_link ] = [x for x in entry['links'] if x['rel'] == 'related'] eq_(OPDSFeed.ACQUISITION_FEED_TYPE, recommendations_link['type']) assert '/related_books' in recommendations_link['href'] # If there is a contributor, there's a related books link. with temp_config() as config: NoveListAPI.IS_CONFIGURED = None config['integrations'][Configuration.NOVELIST_INTEGRATION] = {} confirm_related_books_link() # If there is no possibility of related works, # there's no related books link. with temp_config() as config: # Remove contributors. self._db.delete( work.license_pools[0].presentation_edition.contributions[0]) self._db.commit() # Turn off NoveList. NoveListAPI.IS_CONFIGURED = None config['integrations'][Configuration.NOVELIST_INTEGRATION] = {} feed = AcquisitionFeed( self._db, "test", "url", [work], CirculationManagerAnnotator(None, Fantasy, test_mode=True)) feed = feedparser.parse(unicode(feed)) [entry] = feed['entries'] recommendations_links = [ x for x in entry['links'] if x['rel'] == 'related' ] eq_([], recommendations_links) # If NoveList is configured (and thus recommendations are available), # there's is a related books link. with temp_config() as config: NoveListAPI.IS_CONFIGURED = None config['integrations'][Configuration.NOVELIST_INTEGRATION] = { Configuration.NOVELIST_PROFILE: "library", Configuration.NOVELIST_PASSWORD: "******" } confirm_related_books_link() # If the book is in a series, there's is a related books link. with temp_config() as config: NoveListAPI.IS_CONFIGURED = None config['integrations'][Configuration.NOVELIST_INTEGRATION] = {} work.license_pools[ 0].presentation_edition.series = "Serious Cereal Series" confirm_related_books_link()
def add_with_metadata(self, collection_details): """Adds identifiers with their metadata to a Collection's catalog""" client = authenticated_client_from_request(self._db) if isinstance(client, ProblemDetail): return client collection = collection_from_details(self._db, client, collection_details) data_source = DataSource.lookup(self._db, collection.name, autocreate=True) messages = [] feed = feedparser.parse(request.data) entries = feed.get("entries", []) entries_by_urn = {entry.get('id'): entry for entry in entries} identifiers_by_urn, invalid_urns = Identifier.parse_urns( self._db, entries_by_urn.keys()) messages = list() for urn in invalid_urns: messages.append( OPDSMessage(urn, INVALID_URN.status_code, INVALID_URN.detail)) for urn, identifier in identifiers_by_urn.items(): entry = entries_by_urn[urn] status = HTTP_OK description = "Already in catalog" if identifier not in collection.catalog: collection.catalog_identifier(identifier) status = HTTP_CREATED description = "Successfully added" message = OPDSMessage(urn, status, description) # Get a cover if it exists. image_types = set([Hyperlink.IMAGE, Hyperlink.THUMBNAIL_IMAGE]) images = [ l for l in entry.get("links", []) if l.get("rel") in image_types ] links = [ LinkData(image.get("rel"), image.get("href")) for image in images ] # Create an edition to hold the title and author. LicensePool.calculate_work # refuses to create a Work when there's no title, and if we have a title, author # and language we can attempt to look up the edition in OCLC. title = entry.get("title") or "Unknown Title" author = ContributorData(sort_name=(entry.get("author") or Edition.UNKNOWN_AUTHOR), roles=[Contributor.PRIMARY_AUTHOR_ROLE]) language = entry.get("dcterms_language") presentation = PresentationCalculationPolicy( choose_edition=False, set_edition_metadata=False, classify=False, choose_summary=False, calculate_quality=False, choose_cover=False, regenerate_opds_entries=False, ) replace = ReplacementPolicy( presentation_calculation_policy=presentation) metadata = Metadata( data_source, primary_identifier=IdentifierData(identifier.type, identifier.identifier), title=title, language=language, contributors=[author], links=links, ) edition, ignore = metadata.edition(self._db) metadata.apply(edition, collection, replace=replace) messages.append(message) title = "%s Catalog Item Additions for %s" % (collection.protocol, client.url) url = self.collection_feed_url("add_with_metadata", collection) addition_feed = AcquisitionFeed(self._db, title, url, [], VerboseAnnotator, precomposed_entries=messages) return feed_response(addition_feed)
def do_generate(self, lane): feeds = [] annotator = self.app.manager.annotator(lane) if isinstance(lane, Lane): lane_id = lane.id else: # Presumably this is the top-level WorkList. lane_id = None library = lane.get_library(self._db) url = self.app.manager.cdn_url_for( "feed", lane_identifier=lane_id, library_short_name=library.short_name ) default_order = library.default_facet(Facets.ORDER_FACET_GROUP_NAME) allowed_orders = library.enabled_facets(Facets.ORDER_FACET_GROUP_NAME) chosen_orders = self.orders or [default_order] default_availability = library.default_facet( Facets.AVAILABILITY_FACET_GROUP_NAME ) allowed_availabilities = library.enabled_facets( Facets.AVAILABILITY_FACET_GROUP_NAME ) chosen_availabilities = self.availabilities or [default_availability] default_collection = library.default_facet( Facets.COLLECTION_FACET_GROUP_NAME ) allowed_collections = library.enabled_facets( Facets.COLLECTION_FACET_GROUP_NAME ) chosen_collections = self.collections or [default_collection] for order in chosen_orders: if order not in allowed_orders: logging.warn("Ignoring unsupported ordering %s" % order) continue for availability in chosen_availabilities: if availability not in allowed_availabilities: logging.warn("Ignoring unsupported availability %s" % availability) continue for collection in chosen_collections: if collection not in allowed_collections: logging.warn("Ignoring unsupported collection %s" % collection) continue pagination = Pagination.default() facets = Facets( library=library, collection=collection, availability=availability, order=order, order_ascending=True ) title = lane.display_name for pagenum in range(0, self.pages): yield AcquisitionFeed.page( self._db, title, url, lane, annotator, facets=facets, pagination=pagination, force_refresh=True ) pagination = pagination.next_page