def collection_from_details(_db, client, collection_details): if not (client and isinstance(client, IntegrationClient)): return None if collection_details: # A DataSource may be sent for collections with the # ExternalIntegration.OPDS_IMPORT protocol. data_source_name = request.args.get('data_source') if data_source_name: data_source_name = urllib.unquote(data_source_name) collection, ignore = Collection.from_metadata_identifier( _db, collection_details, data_source=data_source_name) return collection return None
def collection_from_details(_db, client, collection_details): if not (client and isinstance(client, IntegrationClient)): return None if collection_details: # A DataSource may be sent for collections with the # ExternalIntegration.OPDS_IMPORT protocol. data_source_name = request.args.get('data_source') if data_source_name: data_source_name = urllib.unquote(data_source_name) collection, ignore = Collection.from_metadata_identifier( _db, collection_details, data_source=data_source_name ) return collection return None
def add_items(self, collection_details): """Adds identifiers to a Collection's catalog""" client = authenticated_client_from_request(self._db) if isinstance(client, ProblemDetail): return client collection, ignore = Collection.from_metadata_identifier( self._db, collection_details) urns = request.args.getlist('urn') messages = [] for urn in urns: message = None identifier = None try: identifier, ignore = Identifier.parse_urn(self._db, urn) except Exception as e: identifier = None if not identifier: message = OPDSMessage(urn, INVALID_URN.status_code, INVALID_URN.detail) else: status = HTTP_OK description = "Already in catalog" if identifier not in collection.catalog: collection.catalog_identifier(self._db, identifier) status = HTTP_CREATED description = "Successfully added" message = OPDSMessage(urn, status, description) messages.append(message) title = "%s Catalog Item Additions for %s" % (collection.protocol, client.url) url = cdn_url_for("add", collection_metadata_identifier=collection.name, urn=urns) addition_feed = AcquisitionFeed(self._db, title, url, [], VerboseAnnotator, precomposed_entries=messages) return feed_response(addition_feed)
def remove_items(self, collection_details): """Removes identifiers from a Collection's catalog""" client = authenticated_client_from_request(self._db) if isinstance(client, ProblemDetail): return client collection, ignore = Collection.from_metadata_identifier( self._db, collection_details) urns = request.args.getlist('urn') messages = [] for urn in urns: message = None identifier = None try: identifier, ignore = Identifier.parse_urn(self._db, urn) except Exception as e: identifier = None if not identifier: message = OPDSMessage(urn, INVALID_URN.status_code, INVALID_URN.detail) else: if identifier in collection.catalog: collection.catalog.remove(identifier) message = OPDSMessage(urn, HTTP_OK, "Successfully removed") else: message = OPDSMessage(urn, HTTP_NOT_FOUND, "Not in catalog") messages.append(message) title = "%s Catalog Item Removal for %s" % (collection.protocol, client.url) url = cdn_url_for("remove", collection_metadata_identifier=collection.name, urn=urns) removal_feed = AcquisitionFeed(self._db, title, url, [], VerboseAnnotator, precomposed_entries=messages) return feed_response(removal_feed)
def updates_feed(self, collection_details): client = authenticated_client_from_request(self._db) if isinstance(client, ProblemDetail): return client collection, ignore = Collection.from_metadata_identifier( self._db, collection_details) last_update_time = request.args.get('last_update_time', None) if last_update_time: last_update_time = datetime.strptime(last_update_time, "%Y-%m-%dT%H:%M:%SZ") updated_works = collection.works_updated_since(self._db, last_update_time) pagination = load_pagination_from_request() works = pagination.apply(updated_works).all() title = "%s Collection Updates for %s" % (collection.protocol, client.url) def update_url(time=last_update_time, page=None): kw = dict(_external=True, collection_metadata_identifier=collection_details) if time: kw.update({'last_update_time': last_update_time}) if page: kw.update(page.items()) return cdn_url_for("updates", **kw) entries = [] for work in works[:]: entry = work.verbose_opds_entry or work.simple_opds_entry entry = etree.fromstring(entry) if entry: entries.append(entry) works.remove(work) works = [(work.identifier, work) for work in works] update_feed = LookupAcquisitionFeed(self._db, title, update_url(), works, VerboseAnnotator, precomposed_entries=entries) if len(updated_works.all()) > pagination.size + pagination.offset: update_feed.add_link_to_feed( update_feed.feed, rel="next", href=update_url(page=pagination.next_page)) if pagination.offset > 0: update_feed.add_link_to_feed( update_feed.feed, rel="first", href=update_url(page=pagination.first_page)) previous_page = pagination.previous_page if previous_page: update_feed.add_link_to_feed(update_feed.feed, rel="previous", href=update_url(page=previous_page)) return feed_response(update_feed)
class URNLookupController(CoreURNLookupController): UNRESOLVABLE_IDENTIFIER = "I can't gather information about an identifier of this type." IDENTIFIER_REGISTERED = "You're the first one to ask about this identifier. I'll try to find out about it." WORKING_TO_RESOLVE_IDENTIFIER = "I'm working to locate a source for this identifier." SUCCESS_DID_NOT_RESULT_IN_PRESENTATION_READY_WORK = "Something's wrong. I have a record of covering this identifier but there's no presentation-ready work to show you." OPERATION = CoverageRecord.RESOLVE_IDENTIFIER_OPERATION NO_WORK_DONE_EXCEPTION = u'No work done yet' log = logging.getLogger("URN lookup controller") def presentation_ready_work_for(self, identifier): """Either return a presentation-ready work associated with the given `identifier`, or return None. """ pools = identifier.licensed_through if not pools: return None # All LicensePools for a given Identifier have the same Work. work = pools[0].work if not work or not work.presentation_ready: return None return work def can_resolve_identifier(self, identifier): """A chance to determine whether resolution should proceed.""" # We can resolve any ISBN and any Overdrive ID. # # We can resolve any Gutenberg ID by looking it up in the open-access # content server. # # We can attempt to resolve URIs by looking them up in the # open-access content server, though there's no guarantee # it will work. if identifier.type in (Identifier.ISBN, Identifier.OVERDRIVE_ID, Identifier.GUTENBERG_ID, Identifier.URI): return True # We can resolve any identifier that's associated with a # presentation-ready work, since the resolution has already # been done--no need to speculate about how. work = self.presentation_ready_work_for(identifier) if work is None: return False return True def process_urn(self, urn, collection_details=None, **kwargs): """Turn a URN into a Work suitable for use in an OPDS feed. """ try: identifier, is_new = Identifier.parse_urn(self._db, urn) except ValueError, e: identifier = None if not identifier: # Not a well-formed URN. return self.add_message(urn, 400, INVALID_URN.detail) if not self.can_resolve_identifier(identifier): return self.add_message(urn, HTTP_NOT_FOUND, self.UNRESOLVABLE_IDENTIFIER) # We are at least willing to try to resolve this Identifier. # If a Collection was provided by an authenticated IntegrationClient, # this Identifier is part of the Collection's catalog. client = authenticated_client_from_request(self._db, required=False) if client and collection_details: collection, ignore = Collection.from_metadata_identifier( self._db, collection_details) collection.catalog_identifier(self._db, identifier) if (identifier.type == Identifier.ISBN and not identifier.work): # There's not always enough information about an ISBN to # create a full Work. If not, we scrape together the cover # and description information and force the entry. return self.make_opds_entry_from_metadata_lookups(identifier) # All other identifiers need to be associated with a # presentation-ready Work for the lookup to succeed. If there # isn't one, we need to register it as unresolved. work = self.presentation_ready_work_for(identifier) if work: # The work has been done. return self.add_work(identifier, work) # Work remains to be done. return self.register_identifier_as_unresolved(urn, identifier)
production_session, ) log = logging.getLogger(name="Metadata Wrangler configuration import") try: _db = production_session() # Get all of the OPDS_IMPORT collections. collections = Collection.by_protocol(_db, ExternalIntegration.OPDS_IMPORT) for collection in collections: opds_url = collection.external_account_id if not opds_url: decoded_collection, ignore = Collection.from_metadata_identifier( _db, collection.name ) opds_url = decoded_collection.external_account_id if not opds_url: # This shouldn't happen. log.warn( 'Could not find external_account_id for %r' % collection ) continue if opds_url and collection == decoded_collection: log.info( 'Added URL "%s" to collection %r', decoded_collection.external_account_id, decoded_collection ) else:
ExternalIntegration, production_session, ) log = logging.getLogger(name="Metadata Wrangler configuration import") try: _db = production_session() # Get all of the OPDS_IMPORT collections. collections = Collection.by_protocol(_db, ExternalIntegration.OPDS_IMPORT) for collection in collections: opds_url = collection.external_account_id if not opds_url: decoded_collection, ignore = Collection.from_metadata_identifier( _db, collection.name) opds_url = decoded_collection.external_account_id if not opds_url: # This shouldn't happen. log.warn('Could not find external_account_id for %r' % collection) continue if opds_url and collection == decoded_collection: log.info('Added URL "%s" to collection %r', decoded_collection.external_account_id, decoded_collection) else: # Somehow the collection has been duplicated. This shouldn't # happen, but if it does, we shouldn't update the collection's # data_source on faulty information.