def enki_library_id(self, library): """Find the Enki library ID for the given library.""" _db = Session.object_session(library) return ConfigurationSetting.for_library_and_externalintegration( _db, self.ENKI_LIBRARY_ID_KEY, library, self.external_integration(_db) ).value
def add_web_client_urls(self, record, library, identifier, integration=None): _db = Session.object_session(library) settings = [] if integration: marc_setting = self.value(MARCExporter.WEB_CLIENT_URL, integration) if marc_setting: settings.append(marc_setting) from api.registry import Registration settings += [s.value for s in _db.query( ConfigurationSetting ).filter( ConfigurationSetting.key==Registration.LIBRARY_REGISTRATION_WEB_CLIENT, ConfigurationSetting.library_id==library.id ) if s.value] for setting in settings: record.add_field( Field( tag="856", indicators=["4", "0"], subfields=[ "u", setting + "/book/" + urllib.quote(identifier.type + "/" + identifier.identifier, safe='') ]))
def from_config(cls, library): profile, password = cls.values(library) if not (profile and password): raise ValueError("No NoveList client configured.") _db = Session.object_session(library) return cls(_db, profile, password)
def enki_library_id(self, library): """Find the Enki library ID for the given library.""" _db = Session.object_session(library) return ConfigurationSetting.for_library_and_externalintegration( _db, self.ENKI_LIBRARY_ID_KEY, library, self.external_integration(_db) ).value
def single_hold_feed(cls, circulation, hold, test_mode=False): db = Session.object_session(hold) work = hold.license_pool.work or hold.license_pool.edition.work annotator = cls(circulation, None, active_loans_by_work={}, active_holds_by_work={work:hold}, test_mode=test_mode) return AcquisitionFeed.single_entry(db, work, annotator)
def get_lcp_license(self, collection_name, license_id): """Returns an LCP license with the specified ID :param collection_name: Name of the collection :type collection_name: string :param license_id: License ID :type license_id: string :return: Flask response containing the LCP license with the specified ID :rtype: string """ self._logger.info("Started fetching license # {0}".format(license_id)) patron = self._get_patron() lcp_collection = self._get_lcp_collection(patron, collection_name) if isinstance(lcp_collection, ProblemDetail): return lcp_collection lcp_api = self.circulation.api_for_collection.get(lcp_collection.id) lcp_server = self._lcp_server_factory.create(lcp_api) db = Session.object_session(patron) lcp_license = lcp_server.get_license(db, license_id, patron) self._logger.info("Finished fetching license # {0}: {1}".format( license_id, lcp_license)) return flask.jsonify(lcp_license)
def update_loan(self, loan, status_doc=None): """Check a loan's status, and if it is no longer active, delete the loan and update its pool's availability. """ _db = Session.object_session(loan) if not status_doc: status_doc = self.get_license_status_document(loan) status = status_doc.get("status") # We already check that the status is valid in get_license_status_document, # but if the document came from a notification it hasn't been checked yet. if status not in self.STATUS_VALUES: raise BadResponseException( "The License Status Document had an unknown status value.") if status in [ self.REVOKED_STATUS, self.RETURNED_STATUS, self.CANCELLED_STATUS, self.EXPIRED_STATUS ]: # This loan is no longer active. Update the pool's availability # and delete the loan. # If there are holds, the license is reserved for the next patron. _db.delete(loan) self.update_hold_queue(loan.license_pool)
def patron_remote_identifier(self, patron): """Locate the identifier for the given Patron's account on the RBdigital side, creating a new RBdigital account if necessary. The identifier is cached in a persistent Credential object. :return: The remote identifier for this patron, taken from the corresponding Credential. """ def refresher(credential): remote_identifier = self.patron_remote_identifier_lookup(patron) if not remote_identifier: remote_identifier = self.create_patron(patron) credential.credential = remote_identifier credential.expires = None _db = Session.object_session(patron) credential = Credential.lookup( _db, DataSource.RB_DIGITAL, Credential.IDENTIFIER_FROM_REMOTE_SERVICE, patron, refresher_method=refresher, allow_persistent_token=True) if not credential.credential: refresher(credential) return credential.credential
def update_hold_queue(self, licensepool): # Update the pool and the next holds in the queue when a license is reserved. _db = Session.object_session(licensepool) loans_count = _db.query(Loan).filter( Loan.license_pool_id == licensepool.id).filter( or_(Loan.end == None, Loan.end > datetime.datetime.utcnow())).count() remaining_licenses = licensepool.licenses_owned - loans_count holds = _db.query(Hold).filter( Hold.license_pool_id == licensepool.id).filter( or_( Hold.end == None, Hold.end > datetime.datetime.utcnow(), Hold.position > 0, )).order_by(Hold.start).all() if len(holds) > remaining_licenses: licensepool.licenses_available = 0 licensepool.licenses_reserved = remaining_licenses licensepool.patrons_in_hold_queue = len(holds) else: licensepool.licenses_available = remaining_licenses - len(holds) licensepool.licenses_reserved = len(holds) licensepool.patrons_in_hold_queue = len(holds) for hold in holds[:licensepool.licenses_reserved]: if hold.position != 0: # This hold just got a reserved license. self._update_hold_end_date(hold)
def place_hold(self, patron, pin, licensepool, notification_email_address): """Create a new hold.""" _db = Session.object_session(patron) # Make sure pool info is updated. self.update_hold_queue(licensepool) if licensepool.licenses_available > 0: raise CurrentlyAvailable() # Create local hold. hold, is_new = get_one_or_create( _db, Hold, license_pool=licensepool, patron=patron, create_method_kwargs=dict(start=datetime.datetime.utcnow()), ) if not is_new: raise AlreadyOnHold() licensepool.patrons_in_hold_queue += 1 self._update_hold_end_date(hold) return HoldInfo( licensepool.collection, licensepool.data_source.name, licensepool.identifier.type, licensepool.identifier.identifier, start_date=hold.start, end_date=hold.end, hold_position=hold.position, )
def active_loans_for(cls, circulation, patron, test_mode=False): db = Session.object_session(patron) active_loans_by_work = {} for loan in patron.loans: work = loan.work if work: active_loans_by_work[work] = loan active_holds_by_work = {} for hold in patron.holds: work = hold.work if work: active_holds_by_work[work] = hold annotator = cls(circulation, None, patron, active_loans_by_work, active_holds_by_work, test_mode=test_mode) url = annotator.url_for('active_loans', _external=True) works = patron.works_on_loan_or_on_hold() feed_obj = AcquisitionFeed(db, "Active loans and holds", url, works, annotator) annotator.annotate_feed(feed_obj, None) return feed_obj
def fulfill(self, patron, pin, licensepool, internal_format): _db = Session.object_session(patron) loan = _db.query(Loan).filter(Loan.patron == patron).filter( Loan.license_pool_id == licensepool.id) loan = loan.one() doc = self.get_license_status_document(loan) status = doc.get("status") if status not in [self.READY_STATUS, self.ACTIVE_STATUS]: # This loan isn't available for some reason. It's possible # the distributor revoked it or the patron already returned it # through the DRM system, and we didn't get a notification # from the distributor yet. self.update_loan(loan, doc) raise CannotFulfill() expires = doc.get("potential_rights", {}).get("end") expires = datetime.datetime.strptime(expires, self.TIME_FORMAT) content_link = doc.get("links", {}).get("license", {}).get("href") content_type = doc.get("links", {}).get("license", {}).get("type") return FulfillmentInfo( licensepool.collection, licensepool.data_source.name, licensepool.identifier.type, licensepool.identifier.identifier, content_link, content_type, None, expires, )
def checkin(self, patron, pin, licensepool): """Return a loan early.""" _db = Session.object_session(patron) loan = _db.query(Loan).filter(Loan.patron == patron).filter( Loan.license_pool_id == licensepool.id) if loan.count() < 1: raise NotCheckedOut() loan = loan.one() doc = self.get_license_status_document(loan) status = doc.get("status") if status in [ self.REVOKED_STATUS, self.RETURNED_STATUS, self.CANCELLED_STATUS, self.EXPIRED_STATUS ]: # This loan was already returned early or revoked by the distributor, or it expired. self.update_loan(loan, doc) raise NotCheckedOut() elif status == self.ACTIVE_STATUS: # This loan has already been fulfilled, so it needs to be returned through the DRM system. # Do nothing. return return_url = doc.get("links", {}).get("return", {}).get("href") if return_url: # Hit the distributor's return link. self._get(return_url) # Get the status document again to make sure the return was successful, # and if so update the pool availability and delete the local loan. self.update_loan(loan) else: # The distributor didn't provide a link to return this loan. raise CannotReturn()
def __init__(self, library, integration, analytics=None): """Initializes a new instance of SAMLAuthenticationProvider class :param library: Patrons authenticated through this provider are associated with this Library. Don't store this object! It's associated with a scoped database session. Just pull normal Python objects out of it. :type library: Library :param integration: The ExternalIntegration that configures this AuthenticationProvider. Don't store this object! It's associated with a scoped database session. Just pull normal Python objects out of it. :type integration: ExternalIntegration """ super(BaseSAMLAuthenticationProvider, self).__init__(library, integration, analytics) self._logger = logging.getLogger(__name__) self._configuration_storage = ConfigurationStorage(self) self._configuration_factory = SAMLConfigurationFactory( SAMLMetadataParser()) self._authentication_manager_factory = SAMLAuthenticationManagerFactory( ) db = Session.object_session(library) with self.get_configuration(db) as configuration: self._patron_id_use_name_id = ConfigurationMetadata.to_bool( configuration.patron_id_use_name_id) self._patron_id_attributes = configuration.patron_id_attributes self._patron_id_regular_expression = ( configuration.patron_id_regular_expression)
def single_fulfillment_feed(cls, circulation, loan, fulfillment, test_mode=False): db = Session.object_session(loan) work = loan.license_pool.work or loan.license_pool.presentation_edition.work annotator = cls(circulation, None, loan.patron.library, active_loans_by_work={}, active_holds_by_work={}, active_fulfillments_by_work={work: fulfillment}, test_mode=test_mode) identifier = loan.license_pool.identifier url = annotator.url_for( 'loan_or_hold_detail', identifier_type=identifier.type, identifier=identifier.identifier, library_short_name=loan.patron.library.short_name, _external=True) if not work: return AcquisitionFeed(db, "Active loan for unknown work", url, [], annotator) return AcquisitionFeed.single_entry(db, work, annotator)
def single_hold_feed(cls, circulation, hold, test_mode=False): db = Session.object_session(hold) work = hold.license_pool.work or hold.license_pool.presentation_edition.work annotator = cls(circulation, None, hold.patron.library, active_loans_by_work={}, active_holds_by_work={work:hold}, test_mode=test_mode) return AcquisitionFeed.single_entry(db, work, annotator)
def license_pool(self): """Find the LicensePool model object corresponding to this object.""" _db = Session.object_session(self.collection) pool, is_new = LicensePool.for_foreign_id(_db, self.data_source_name, self.identifier_type, self.identifier, collection=self.collection) return pool
def from_config(cls, library): profile, password = cls.values(library) if not (profile and password): raise CannotLoadConfiguration( "No NoveList integration configured for library (%s)." % library.short_name) _db = Session.object_session(library) return cls(_db, profile, password)
def __init__(self, _db, collection, api_class=BibliothecaAPI, **kwargs): _db = Session.object_session(collection) super(BibliothecaCirculationSweep, self).__init__(_db, collection, **kwargs) if isinstance(api_class, BibliothecaAPI): self.api = api_class else: self.api = api_class(_db, collection) self.analytics = Analytics(_db)
def from_config(cls, library): profile, password = cls.values(library) if not (profile and password): raise CannotLoadConfiguration( "No NoveList integration configured for library (%s)." % library.short_name ) _db = Session.object_session(library) return cls(_db, profile, password)
def __init__(self, library, work, display_name=None, novelist_api=None, parent=None): super(RecommendationLane, self).__init__( library, work, display_name=display_name, ) _db = Session.object_session(library) self.api = novelist_api or NoveListAPI.from_config(library) self.recommendations = self.fetch_recommendations(_db) if parent: parent.children.append(self)
def complaints(cls, library, title, url, annotator, pagination=None): _db = Session.object_session(library) facets = Facets.default(library) pagination = pagination or Pagination.default() q = LicensePool.with_complaint(library) results = pagination.modify_database_query(_db, q).all() if len(results) > 0: (pools, counts) = list(zip(*results)) else: pools = () works = [pool.work for pool in pools] feed = cls(_db, title, url, works, annotator) # Render a 'start' link top_level_title = annotator.top_level_title() start_uri = annotator.groups_url(None) AdminFeed.add_link_to_feed(feed.feed, href=start_uri, rel="start", title=top_level_title) # Render an 'up' link, same as the 'start' link to indicate top-level feed AdminFeed.add_link_to_feed(feed.feed, href=start_uri, rel="up", title=top_level_title) if len(works) > 0: # There are works in this list. Add a 'next' link. AdminFeed.add_link_to_feed( feed.feed, rel="next", href=annotator.complaints_url(facets, pagination.next_page), ) if pagination.offset > 0: AdminFeed.add_link_to_feed( feed.feed, rel="first", href=annotator.complaints_url(facets, pagination.first_page), ) previous_page = pagination.previous_page if previous_page: AdminFeed.add_link_to_feed( feed.feed, rel="previous", href=annotator.complaints_url(facets, previous_page), ) annotator.annotate_feed(feed) return str(feed)
def __init__(self, integration, library=None): _db = Session.object_session(integration) if not library: raise CannotLoadConfiguration("Google Analytics can't be configured without a library.") url_setting = ConfigurationSetting.for_externalintegration(ExternalIntegration.URL, integration) self.url = url_setting.value or self.DEFAULT_URL self.tracking_id = ConfigurationSetting.for_library_and_externalintegration( _db, self.TRACKING_ID, library, integration, ).value if not self.tracking_id: raise CannotLoadConfiguration("Missing tracking id for library %s" % library.short_name)
def domains(self): domains = defaultdict(list) if self.integration: _db = Session.object_session(self.integration) for library in self.integration.libraries: setting = ConfigurationSetting.for_library_and_externalintegration( _db, self.DOMAINS, library, self.integration) if setting.json_value: for domain in setting.json_value: domains[domain.lower()].append(library) return domains
def _adobe_patron_identifier(self, patron): _db = Session.object_session(patron) internal = DataSource.lookup(_db, DataSource.INTERNAL_PROCESSING) def refresh(credential): credential.credential = str(uuid.uuid1()) patron_identifier = Credential.lookup( _db, internal, AuthdataUtility.ADOBE_ACCOUNT_ID_PATRON_IDENTIFIER, patron, refresher_method=refresh, allow_persistent_token=True ) return patron_identifier.credential
def domains(self): domains = defaultdict(list) if self.integration: _db = Session.object_session(self.integration) for library in self.integration.libraries: setting = ConfigurationSetting.for_library_and_externalintegration( _db, self.DOMAINS, library, self.integration) if setting.json_value: for domain in setting.json_value: domains[domain.lower()].append(library) return domains
def _count_holds_before(self, hold): # Count holds on the license pool that started before this hold and # aren't expired. _db = Session.object_session(hold) return _db.query(Hold).filter( Hold.license_pool_id == hold.license_pool_id).filter( Hold.start < hold.start).filter( or_( Hold.end == None, Hold.end > datetime.datetime.utcnow(), Hold.position > 0, )).count()
def __init__(self, collection, upload_client=None, **kwargs): _db = Session.object_session(collection) self.upload_client = upload_client or MetadataWranglerOPDSLookup.from_config( _db, collection=collection) super(MetadataUploadCoverageProvider, self).__init__(collection, **kwargs) if not self.upload_client.authenticated: raise CannotLoadConfiguration( "Authentication for the Library Simplified Metadata Wrangler " "is not set up. You can't upload metadata without authenticating." )
def checkout(self, patron, pin, licensepool, internal_format): """ Associate an eBook or eAudio with a patron. :param patron: a Patron object for the patron who wants to check out the book. :param pin: The patron's password (not used). :param licensepool: The Identifier of the book to be checked out is attached to this licensepool. :param internal_format: Represents the patron's desired book format. Ignored for now. :return LoanInfo on success, None on failure. """ patron_oneclick_id = self.patron_remote_identifier(patron) (item_oneclick_id, item_media) = self.validate_item(licensepool) today = datetime.datetime.utcnow() library = patron.library if item_media == Edition.AUDIO_MEDIUM: key = Collection.AUDIOBOOK_LOAN_DURATION_KEY _db = Session.object_session(patron) days = (ConfigurationSetting.for_library_and_externalintegration( _db, key, library, self.collection.external_integration).int_value or Collection.STANDARD_DEFAULT_LOAN_PERIOD) else: days = self.collection.default_loan_period(library) resp_dict = self.circulate_item(patron_id=patron_oneclick_id, item_id=item_oneclick_id, return_item=False, days=days) if not resp_dict or ('error_code' in resp_dict): return None self.log.debug( "Patron %s/%s checked out item %s with transaction id %s.", patron.authorization_identifier, patron_oneclick_id, item_oneclick_id, resp_dict['transactionId']) expires = today + datetime.timedelta(days=days) loan = LoanInfo( self.collection, DataSource.RB_DIGITAL, identifier_type=licensepool.identifier.type, identifier=item_oneclick_id, start_date=today, end_date=expires, fulfillment_info=None, ) return loan
def __init__(self, collection, lookup_client=None, **kwargs): _db = Session.object_session(collection) lookup_client = lookup_client or MetadataWranglerOPDSLookup.from_config( _db, collection=collection) super(MetadataWranglerCoverageProvider, self).__init__(collection, lookup_client, **kwargs) if not self.lookup_client.authenticated: self.log.warn( "Authentication for the Library Simplified Metadata Wrangler " "is not set up. You can still use the metadata wrangler, but " "it will not know which collection you're asking about.")
def __init__(self, library, work, display_name=None, novelist_api=None): super(RelatedBooksLane, self).__init__( library, work, display_name=display_name, ) _db = Session.object_session(library) sublanes = self._get_sublanes(_db, novelist_api) if not sublanes: raise ValueError( "No related books for %s by %s" % (self.work.title, self.work.author) ) self.children = sublanes
def __init__(self, library, work, display_name=None, novelist_api=None): super(RelatedBooksLane, self).__init__( library, work, display_name=display_name, ) _db = Session.object_session(library) sublanes = self._get_sublanes(_db, novelist_api) if not sublanes: raise ValueError( "No related books for %s by %s" % (self.work.title, self.work.author) ) self.children = sublanes
def open_access_link(self, lpdm): _db = Session.object_session(self.library) url = cdnify(lpdm.resource.url) kw = dict(rel=OPDSFeed.OPEN_ACCESS_REL, href=url) rep = lpdm.resource.representation if rep and rep.media_type: kw['type'] = rep.media_type link_tag = AcquisitionFeed.link(**kw) always_available = OPDSFeed.makeelement( "{%s}availability" % OPDSFeed.OPDS_NS, status="available" ) link_tag.append(always_available) return link_tag
def __init__(self, collection, batch_size=None, api_class=OneClickAPI, api_class_kwargs={}): _db = Session.object_session(collection) super(OneClickCirculationMonitor, self).__init__(_db, collection) self.batch_size = batch_size or self.DEFAULT_BATCH_SIZE self.api = api_class(self.collection, **api_class_kwargs) self.bibliographic_coverage_provider = ( OneClickBibliographicCoverageProvider( collection=self.collection, api_class=self.api, ) ) self.analytics = Analytics(self._db)
def checkin(self, patron, pin, licensepool): # Delete the patron's loan for this licensepool. _db = Session.object_session(patron) try: loan = get_one( _db, Loan, patron_id=patron.id, license_pool_id=licensepool.id, ) _db.delete(loan) except Exception, e: # The patron didn't have this book checked out. pass
def checkin(self, patron, pin, licensepool): # Delete the patron's loan for this licensepool. _db = Session.object_session(patron) try: loan = get_one( _db, Loan, patron_id=patron.id, license_pool_id=licensepool.id, ) _db.delete(loan) except Exception, e: # The patron didn't have this book checked out. pass
def patron_activity(self, patron, pin): # Look up loans for this collection in the database. _db = Session.object_session(patron) loans = _db.query(Loan).join(Loan.license_pool).filter( LicensePool.collection_id == self.collection_id).filter( Loan.patron == patron) return [ LoanInfo(loan.license_pool.collection, loan.license_pool.data_source.name, loan.license_pool.identifier.type, loan.license_pool.identifier.identifier, loan.start, loan.end) for loan in loans ]
def single_loan_feed(cls, circulation, loan, test_mode=False): db = Session.object_session(loan) work = loan.license_pool.work or loan.license_pool.edition.work annotator = cls(circulation, None, active_loans_by_work={work:loan}, active_holds_by_work={}, test_mode=test_mode) url = annotator.url_for( 'loan_or_hold_detail', data_source=loan.license_pool.data_source.name, identifier=loan.license_pool.identifier.identifier, _external=True) if not work: return AcquisitionFeed( db, "Active loan for unknown work", url, [], annotator) return AcquisitionFeed.single_entry(db, work, annotator)
def values(cls, library): _db = Session.object_session(library) integration = ExternalIntegration.lookup( _db, ExternalIntegration.NOVELIST, ExternalIntegration.METADATA_GOAL, library=library) if not integration: return (None, None) profile = integration.username password = integration.password return (profile, password)
def values(cls, library): _db = Session.object_session(library) integration = ExternalIntegration.lookup( _db, ExternalIntegration.NOVELIST, ExternalIntegration.METADATA_GOAL, library=library ) if not integration: return (None, None) profile = integration.username password = integration.password return (profile, password)
def __init__(self, library, contributor_name, parent=None, languages=None, audiences=None): if not contributor_name: raise ValueError("ContributorLane can't be created without contributor") self.contributor_name = contributor_name display_name = contributor_name super(ContributorLane, self).initialize( library, display_name=display_name, audiences=audiences, languages=languages, ) if parent: parent.children.append(self) _db = Session.object_session(library) self.contributors = _db.query(Contributor)\ .filter(or_(*self.contributor_name_clauses)).all()
def patron_activity(self, patron, pin): # Look up loans for this collection in the database. _db = Session.object_session(patron) loans = _db.query(Loan).join(Loan.license_pool).filter( LicensePool.collection_id==self.collection_id ).filter( Loan.patron==patron ) return [ LoanInfo( loan.license_pool.collection, loan.license_pool.data_source.name, loan.license_pool.identifier.type, loan.license_pool.identifier.identifier, loan.start, loan.end ) for loan in loans]
def fulfill(self, patron, pin, licensepool, internal_format, **kwargs): """Retrieve a bearer token that can be used to download the book. :param kwargs: A container for arguments to fulfill() which are not relevant to this vendor. :return: a FulfillmentInfo object. """ links = licensepool.identifier.links # Find the acquisition link with the right media type. for link in links: media_type = link.resource.representation.media_type if link.rel == Hyperlink.GENERIC_OPDS_ACQUISITION and media_type == internal_format: url = link.resource.representation.url # Obtain a Credential with the information from our # bearer token. _db = Session.object_session(licensepool) credential = self._get_token(_db) # Build a application/vnd.librarysimplified.bearer-token # document using information from the credential. now = datetime.datetime.utcnow() expiration = int((credential.expires - now).total_seconds()) token_document = dict( token_type="Bearer", access_token=credential.credential, expires_in=expiration, location=url, ) return FulfillmentInfo( licensepool.collection, licensepool.data_source.name, licensepool.identifier.type, licensepool.identifier.identifier, content_link=None, content_type=DeliveryMechanism.BEARER_TOKEN, content=json.dumps(token_document), content_expires=credential.expires, ) # We couldn't find an acquisition link for this book. raise CannotFulfill()
def __init__(self, collection, lookup_client=None, **kwargs): """Since we are processing a specific collection, we must be able to get an _authenticated_ metadata wrangler lookup client for the collection. """ _db = Session.object_session(collection) lookup_client = lookup_client or MetadataWranglerOPDSLookup.from_config( _db, collection=collection ) super(BaseMetadataWranglerCoverageProvider, self).__init__( collection, lookup_client, **kwargs ) if not self.lookup_client.authenticated: raise CannotLoadConfiguration( "Authentication for the Library Simplified Metadata Wrangler " "is not set up. Without this, there is no way to register " "your identifiers with the metadata wrangler." )
def cover_links(cls, work): """The content server sends out _all_ cover links for the work. For books covered by Gutenberg Illustrated, this can be over a hundred cover links. """ _db = Session.object_session(work) ids = work.all_identifier_ids() image_resources = Identifier.resources_for_identifier_ids( _db, ids, Resource.IMAGE) thumbnails = [] full = [] for cover in image_resources: if cover.mirrored_path: full.append(cover.mirrored_path) if cover.scaled_path: thumbnails.append(cover.scaled_path) return thumbnails, full
def check_storage_protocol(self, service): """For MARC Export integrations, check that the storage protocol corresponds to an existing storage integration.""" if service.protocol == MARCExporter.NAME: storage_protocol = service.setting(MARCExporter.STORAGE_PROTOCOL).value _db = Session.object_session(service) integration = ExternalIntegration.lookup( _db, storage_protocol, ExternalIntegration.STORAGE_GOAL) if not integration: return MISSING_SERVICE.detailed(_( "You set the storage protocol to %(protocol)s, but no storage service with that protocol is configured.", protocol=storage_protocol, )) if storage_protocol == ExternalIntegration.S3: # For S3, the storage service must also have a MARC file bucket. bucket = integration.setting(S3Uploader.MARC_BUCKET_KEY).value if not bucket: return MISSING_SERVICE.detailed(_( "You set the storage protocol to %(protocol)s, but the storage service with that protocol does not have a MARC file bucket configured.", protocol=storage_protocol, ))
def active_loans_for(cls, circulation, patron, test_mode=False): db = Session.object_session(patron) active_loans_by_work = {} for loan in patron.loans: work = loan.work if work: active_loans_by_work[work] = loan active_holds_by_work = {} for hold in patron.holds: work = hold.work if work: active_holds_by_work[work] = hold annotator = cls( circulation, None, patron, active_loans_by_work, active_holds_by_work, test_mode=test_mode ) url = annotator.url_for('active_loans', _external=True) works = patron.works_on_loan_or_on_hold() feed_obj = AcquisitionFeed(db, "Active loans and holds", url, works, annotator) annotator.annotate_feed(feed_obj, None) return feed_obj
def complaints(cls, library, title, url, annotator, pagination=None): _db = Session.object_session(library) facets = Facets.default(library) pagination = pagination or Pagination.default() q = LicensePool.with_complaint(library) results = pagination.apply(q).all() if len(results) > 0: (pools, counts) = zip(*results) else: pools = () works = [pool.work for pool in pools] feed = cls(_db, title, url, works, annotator) # Render a 'start' link top_level_title = annotator.top_level_title() start_uri = annotator.groups_url(None) AdminFeed.add_link_to_feed(feed.feed, href=start_uri, rel="start", title=top_level_title) # Render an 'up' link, same as the 'start' link to indicate top-level feed AdminFeed.add_link_to_feed(feed.feed, href=start_uri, rel="up", title=top_level_title) if len(works) > 0: # There are works in this list. Add a 'next' link. AdminFeed.add_link_to_feed(feed.feed, rel="next", href=annotator.complaints_url(facets, pagination.next_page)) if pagination.offset > 0: AdminFeed.add_link_to_feed(feed.feed, rel="first", href=annotator.complaints_url(facets, pagination.first_page)) previous_page = pagination.previous_page if previous_page: AdminFeed.add_link_to_feed(feed.feed, rel="previous", href=annotator.complaints_url(facets, previous_page)) annotator.annotate_feed(feed) return unicode(feed)
def value(self, key, integration): _db = Session.object_session(integration) return ConfigurationSetting.for_library_and_externalintegration( _db, key, self.library, integration).value
def parse_book(cls, collection, g, uri, title): """Turn an RDF graph into a Edition for the given `uri` and `title`. """ source_id = unicode(cls.ID_IN_URI.search(uri).groups()[0]) primary_identifier = IdentifierData( Identifier.GUTENBERG_ID, source_id ) # Split a subtitle out from the main title. title = unicode(title) subtitle = None for separator in "\r\n", "\n": if separator in title: parts = title.split(separator) title = parts[0] subtitle = "\n".join(parts[1:]) break issued = cls._value(g, (uri, cls.dcterms.issued, None)) issued = datetime.datetime.strptime(issued, cls.DATE_FORMAT).date() rights = cls._value(g, (uri, cls.dcterms.rights, None)) if rights: rights = str(rights) else: rights = '' rights_uri = RightsStatus.rights_uri_from_string(rights) # As far as I can tell, Gutenberg descriptions are 100% # useless for our purposes. They should not be used, even if # no other description is available. publisher = cls._value(g, (uri, cls.dcterms.publisher, None)) languages = [] for ignore, ignore, language_uri in g.triples( (uri, cls.dcterms.language, None)): code = str(cls._value(g, (language_uri, cls.rdf.value, None))) code = LanguageCodes.two_to_three[code] if code: languages.append(code) if 'eng' in languages: language = 'eng' elif languages: language = languages[0] else: language = None contributors = [] for ignore, ignore, author_uri in g.triples((uri, cls.dcterms.creator, None)): name = cls._value(g, (author_uri, cls.gutenberg.name, None)) aliases = cls._values(g, (author_uri, cls.gutenberg.alias, None)) contributors.append(ContributorData( sort_name=name, aliases=aliases, roles=[Contributor.AUTHOR_ROLE], )) subjects = [] subject_links = cls._values(g, (uri, cls.dcterms.subject, None)) for subject in subject_links: value = cls._value(g, (subject, cls.rdf.value, None)) vocabulary = cls._value(g, (subject, cls.dcam.memberOf, None)) vocabulary = Subject.by_uri[str(vocabulary)] subjects.append(SubjectData(vocabulary, value)) medium = Edition.BOOK_MEDIUM # Turn the Gutenberg download links into Hyperlinks associated # with the new Edition. They will serve either as open access # downloads or cover images. download_links = cls._values(g, (uri, cls.dcterms.hasFormat, None)) links = [LinkData( rel=Hyperlink.CANONICAL, href=str(uri), )] # Gutenberg won't allow us to use any of the download or image # links--we have to make our own from an rsynced mirror--but # we can look through the links to determine which medium to # assign to this book. formats = [] for href in download_links: for format_uri in cls._values( g, (href, cls.dcterms['format'], None)): media_type = unicode( cls._value(g, (format_uri, cls.rdf.value, None))) if media_type.startswith('audio/'): medium = Edition.AUDIO_MEDIUM formats.append(FormatData( content_type=Representation.MP3_MEDIA_TYPE, drm_scheme=DeliveryMechanism.NO_DRM, )) elif media_type.startswith('video/'): medium = Edition.VIDEO_MEDIUM else: formats.append(FormatData( content_type=Representation.EPUB_MEDIA_TYPE, drm_scheme=DeliveryMechanism.NO_DRM, rights_uri=rights_uri, )) _db = Session.object_session(collection) metadata = Metadata( data_source=DataSource.GUTENBERG, title=title, subtitle=subtitle, language=language, publisher=publisher, issued=issued, medium=medium, primary_identifier=primary_identifier, subjects=subjects, contributors=contributors, links=links, ) edition, new = metadata.edition(_db) metadata.apply(edition, collection) # Ensure that an open-access LicensePool exists for this book. circulation_data = CirculationData( data_source=DataSource.GUTENBERG, primary_identifier=primary_identifier, formats=formats, default_rights_uri=rights_uri, links=links, ) license_pool, new_license_pool = circulation_data.license_pool( _db, collection ) replace = ReplacementPolicy(formats=True) circulation_data.apply(_db, collection, replace=replace) license_pool.calculate_work() return edition, license_pool, new