def single_hold_feed(cls, circulation, hold, test_mode=False): db = Session.object_session(hold) work = hold.license_pool.work or hold.license_pool.edition.work annotator = cls(circulation, None, active_loans_by_work={}, active_holds_by_work={work:hold}, test_mode=test_mode) return AcquisitionFeed.single_entry(db, work, annotator)
def add_web_client_urls(self, record, library, identifier, integration=None): _db = Session.object_session(library) settings = [] if integration: marc_setting = self.value(MARCExporter.WEB_CLIENT_URL, integration) if marc_setting: settings.append(marc_setting) from api.registry import Registration settings += [s.value for s in _db.query( ConfigurationSetting ).filter( ConfigurationSetting.key==Registration.LIBRARY_REGISTRATION_WEB_CLIENT, ConfigurationSetting.library_id==library.id ) if s.value] for setting in settings: record.add_field( Field( tag="856", indicators=["4", "0"], subfields=[ "u", setting + "/book/" + urllib.quote(identifier.type + "/" + identifier.identifier, safe='') ]))
def enki_library_id(self, library): """Find the Enki library ID for the given library.""" _db = Session.object_session(library) return ConfigurationSetting.for_library_and_externalintegration( _db, self.ENKI_LIBRARY_ID_KEY, library, self.external_integration(_db) ).value
def from_config(cls, library): profile, password = cls.values(library) if not (profile and password): raise CannotLoadConfiguration( "No NoveList integration configured for library (%s)." % library.short_name ) _db = Session.object_session(library) return cls(_db, profile, password)
def __init__(self, library, work, display_name=None, novelist_api=None, parent=None): super(RecommendationLane, self).__init__( library, work, display_name=display_name, ) _db = Session.object_session(library) self.api = novelist_api or NoveListAPI.from_config(library) self.recommendations = self.fetch_recommendations(_db) if parent: parent.children.append(self)
def domains(self): domains = defaultdict(list) if self.integration: _db = Session.object_session(self.integration) for library in self.integration.libraries: setting = ConfigurationSetting.for_library_and_externalintegration( _db, self.DOMAINS, library, self.integration) if setting.json_value: for domain in setting.json_value: domains[domain.lower()].append(library) return domains
def __init__(self, integration, library=None): _db = Session.object_session(integration) if not library: raise CannotLoadConfiguration("Google Analytics can't be configured without a library.") url_setting = ConfigurationSetting.for_externalintegration(ExternalIntegration.URL, integration) self.url = url_setting.value or self.DEFAULT_URL self.tracking_id = ConfigurationSetting.for_library_and_externalintegration( _db, self.TRACKING_ID, library, integration, ).value if not self.tracking_id: raise CannotLoadConfiguration("Missing tracking id for library %s" % library.short_name)
def __init__(self, library, work, display_name=None, novelist_api=None): super(RelatedBooksLane, self).__init__( library, work, display_name=display_name, ) _db = Session.object_session(library) sublanes = self._get_sublanes(_db, novelist_api) if not sublanes: raise ValueError( "No related books for %s by %s" % (self.work.title, self.work.author) ) self.children = sublanes
def complaints(cls, library, title, url, annotator, pagination=None): _db = Session.object_session(library) facets = Facets.default(library) pagination = pagination or Pagination.default() q = LicensePool.with_complaint(library) results = pagination.apply(q).all() if len(results) > 0: (pools, counts) = zip(*results) else: pools = () works = [pool.work for pool in pools] feed = cls(_db, title, url, works, annotator) # Render a 'start' link top_level_title = annotator.top_level_title() start_uri = annotator.groups_url(None) AdminFeed.add_link_to_feed(feed.feed, href=start_uri, rel="start", title=top_level_title) # Render an 'up' link, same as the 'start' link to indicate top-level feed AdminFeed.add_link_to_feed(feed.feed, href=start_uri, rel="up", title=top_level_title) if len(works) > 0: # There are works in this list. Add a 'next' link. AdminFeed.add_link_to_feed(feed.feed, rel="next", href=annotator.complaints_url( facets, pagination.next_page)) if pagination.offset > 0: AdminFeed.add_link_to_feed(feed.feed, rel="first", href=annotator.complaints_url( facets, pagination.first_page)) previous_page = pagination.previous_page if previous_page: AdminFeed.add_link_to_feed(feed.feed, rel="previous", href=annotator.complaints_url( facets, previous_page)) annotator.annotate_feed(feed) return unicode(feed)
def patron_activity(self, patron, pin): # Look up loans for this collection in the database. _db = Session.object_session(patron) loans = _db.query(Loan).join(Loan.license_pool).filter( LicensePool.collection_id == self.collection_id).filter( Loan.patron == patron) return [ LoanInfo(loan.license_pool.collection, loan.license_pool.data_source.name, loan.license_pool.identifier.type, loan.license_pool.identifier.identifier, loan.start, loan.end) for loan in loans ]
def __init__(self, collection, batch_size=None, api_class=OneClickAPI, api_class_kwargs={}): _db = Session.object_session(collection) super(OneClickCirculationMonitor, self).__init__(_db, collection) self.batch_size = batch_size or self.DEFAULT_BATCH_SIZE self.api = api_class(self.collection, **api_class_kwargs) self.bibliographic_coverage_provider = ( OneClickBibliographicCoverageProvider( collection=self.collection, api_class=self.api, ) ) self.analytics = Analytics(self._db)
def checkin(self, patron, pin, licensepool): # Delete the patron's loan for this licensepool. _db = Session.object_session(patron) try: loan = get_one( _db, Loan, patron_id=patron.id, license_pool_id=licensepool.id, ) _db.delete(loan) except Exception, e: # The patron didn't have this book checked out. pass
def open_access_link(self, lpdm): _db = Session.object_session(self.library) url = cdnify(lpdm.resource.url) kw = dict(rel=OPDSFeed.OPEN_ACCESS_REL, href=url) rep = lpdm.resource.representation if rep and rep.media_type: kw['type'] = rep.media_type link_tag = AcquisitionFeed.link(**kw) always_available = OPDSFeed.makeelement( "{%s}availability" % OPDSFeed.OPDS_NS, status="available" ) link_tag.append(always_available) return link_tag
def values(cls, library): _db = Session.object_session(library) integration = ExternalIntegration.lookup( _db, ExternalIntegration.NOVELIST, ExternalIntegration.METADATA_GOAL, library=library ) if not integration: return (None, None) profile = integration.username password = integration.password return (profile, password)
def single_loan_feed(cls, circulation, loan, test_mode=False): db = Session.object_session(loan) work = loan.license_pool.work or loan.license_pool.edition.work annotator = cls(circulation, None, active_loans_by_work={work:loan}, active_holds_by_work={}, test_mode=test_mode) url = annotator.url_for( 'loan_or_hold_detail', data_source=loan.license_pool.data_source.name, identifier=loan.license_pool.identifier.identifier, _external=True) if not work: return AcquisitionFeed( db, "Active loan for unknown work", url, [], annotator) return AcquisitionFeed.single_entry(db, work, annotator)
def values(cls, library): _db = Session.object_session(library) integration = ExternalIntegration.lookup( _db, ExternalIntegration.NOVELIST, ExternalIntegration.METADATA_GOAL, library=library) if not integration: return (None, None) profile = integration.username password = integration.password return (profile, password)
def checkout(self, patron, pin, licensepool, internal_format): """ Associate an eBook or eAudio with a patron. :param patron: a Patron object for the patron who wants to check out the book. :param pin: The patron's password (not used). :param licensepool: The Identifier of the book to be checked out is attached to this licensepool. :param internal_format: Represents the patron's desired book format. Ignored for now. :return LoanInfo on success, None on failure. """ patron_oneclick_id = self.patron_remote_identifier(patron) (item_oneclick_id, item_media) = self.validate_item(licensepool) today = datetime.datetime.utcnow() library = patron.library if item_media == Edition.AUDIO_MEDIUM: key = Collection.AUDIOBOOK_LOAN_DURATION_KEY _db = Session.object_session(patron) days = ( ConfigurationSetting.for_library_and_externalintegration( _db, key, library, self.collection.external_integration ).int_value or Collection.STANDARD_DEFAULT_LOAN_PERIOD ) else: days = self.collection.default_loan_period(library) resp_dict = self.circulate_item(patron_id=patron_oneclick_id, item_id=item_oneclick_id, return_item=False, days=days) if not resp_dict or ('error_code' in resp_dict): return None self.log.debug("Patron %s/%s checked out item %s with transaction id %s.", patron.authorization_identifier, patron_oneclick_id, item_oneclick_id, resp_dict['transactionId']) expires = today + datetime.timedelta(days=days) loan = LoanInfo( self.collection, DataSource.RB_DIGITAL, identifier_type=licensepool.identifier.type, identifier=item_oneclick_id, start_date=today, end_date=expires, fulfillment_info=None, ) return loan
def __init__(self, library, work, display_name=None, novelist_api=None, parent=None): """Constructor. :raises: CannotLoadConfiguration if `novelist_api` is not provided and no Novelist integration is configured for this library. """ super(RecommendationLane, self).__init__( library, work, display_name=display_name, ) self.novelist_api = novelist_api or NoveListAPI.from_config(library) if parent: parent.append_child(self) _db = Session.object_session(library) self.recommendations = self.fetch_recommendations(_db)
def _adobe_patron_identifier(self, patron): _db = Session.object_session(patron) internal = DataSource.lookup(_db, DataSource.INTERNAL_PROCESSING) def refresh(credential): credential.credential = str(uuid.uuid1()) patron_identifier = Credential.lookup( _db, internal, AuthdataUtility.ADOBE_ACCOUNT_ID_PATRON_IDENTIFIER, patron, refresher_method=refresh, allow_persistent_token=True) return patron_identifier.credential
def __init__(self, collection, lookup_client=None, **kwargs): """Since we are processing a specific collection, we must be able to get an _authenticated_ metadata wrangler lookup client for the collection. """ _db = Session.object_session(collection) lookup_client = lookup_client or MetadataWranglerOPDSLookup.from_config( _db, collection=collection) super(BaseMetadataWranglerCoverageProvider, self).__init__(collection, lookup_client, **kwargs) if not self.lookup_client.authenticated: raise CannotLoadConfiguration( "Authentication for the Library Simplified Metadata Wrangler " "is not set up. Without this, there is no way to register " "your identifiers with the metadata wrangler.")
def drm_device_registration_feed_tags(self, patron): """Return tags that provide information on DRM device deregistration independent of any particular loan. These tags will go under the <feed> tag. This allows us to deregister an Adobe ID, in preparation for logout, even if there is no active loan that requires one. """ _db = Session.object_session(patron) tags = copy.deepcopy(self.adobe_id_tags(_db, patron)) attr = '{%s}scheme' % OPDSFeed.DRM_NS for tag in tags: tag.attrib[ attr] = "http://librarysimplified.org/terms/drm/scheme/ACS" return tags
def get_license_status_document(self, loan): """Get the License Status Document for a loan. For a new loan, create a local loan with no external identifier and pass it in to this method. This will create the remote loan if one doesn't exist yet. The loan's internal database id will be used to receive notifications from the distributor when the loan's status changes. """ _db = Session.object_session(loan) if loan.external_identifier: url = loan.external_identifier else: id = loan.license_pool.identifier.identifier checkout_id = str(uuid.uuid1()) default_loan_period = self.collection(_db).default_loan_period( loan.patron.library) expires = datetime.datetime.utcnow() + datetime.timedelta( days=default_loan_period) # The patron UUID is generated randomly on each loan, so the distributor # doesn't know when multiple loans come from the same patron. patron_id = str(uuid.uuid1()) notification_url = self._url_for( "odl_notify", library_short_name=loan.patron.library.short_name, loan_id=loan.id, _external=True, ) params = dict( url=self.consolidated_loan_url, id=id, checkout_id=checkout_id, patron_id=patron_id, expires=(expires.isoformat() + 'Z'), notification_url=notification_url, ) url = "%(url)s?id=%(id)s&checkout_id=%(checkout_id)s&patron_id=%(patron_id)s&expires=%(expires)s¬ification_url=%(notification_url)s" % params response = self._get(url) try: status_doc = json.loads(response.content) except ValueError, e: raise BadResponseException( url, "License Status Document was not valid JSON.")
def __init__(self, library, contributor_name, parent=None, languages=None, audiences=None): if not contributor_name: raise ValueError("ContributorLane can't be created without contributor") self.contributor_name = contributor_name display_name = contributor_name super(ContributorLane, self).initialize( library, display_name=display_name, audiences=audiences, languages=languages, ) if parent: parent.children.append(self) _db = Session.object_session(library) self.contributors = _db.query(Contributor)\ .filter(or_(*self.contributor_name_clauses)).all()
def update_formats(self, licensepool): """Update the format information for a single book. """ info = self.metadata_lookup(licensepool.identifier) metadata = OverdriveRepresentationExtractor.book_info_to_metadata( info, include_bibliographic=False, include_formats=True) circulation_data = metadata.circulation # The identifier in the CirculationData needs to match the # identifier associated with the LicensePool -- otherwise # a new LicensePool will be created. circulation_data._primary_identifier.identifier = licensepool.identifier.identifier replace = ReplacementPolicy(formats=True) _db = Session.object_session(licensepool) circulation_data.apply(_db, licensepool.collection, replace)
def patron_activity(self, patron, pin): # Look up loans for this collection in the database. _db = Session.object_session(patron) loans = _db.query(Loan).join(Loan.license_pool).filter( LicensePool.collection_id==self.collection_id ).filter( Loan.patron==patron ) return [ LoanInfo( loan.license_pool.collection, loan.license_pool.data_source.name, loan.license_pool.identifier.type, loan.license_pool.identifier.identifier, loan.start, loan.end ) for loan in loans]
def __init__(self, collection, api_class=EnkiAPI, **kwargs): """Constructor. :param collection: Provide bibliographic coverage to all Enki books in the given Collection. :param api_class: Instantiate this class with the given Collection, rather than instantiating EnkiAPI. """ _db = Session.object_session(collection) super(EnkiBibliographicCoverageProvider, self).__init__(collection, **kwargs) if isinstance(api_class, EnkiAPI): # We were given a specific EnkiAPI instance to use. self.api = api_class else: self.api = api_class(_db, collection) self.parser = BibliographicParser()
def __init__(self, integration, library=None): _db = Session.object_session(integration) if not library: raise CannotLoadConfiguration( "Google Analytics can't be configured without a library.") url_setting = ConfigurationSetting.for_externalintegration( ExternalIntegration.URL, integration) self.url = url_setting.value or self.DEFAULT_URL self.tracking_id = ConfigurationSetting.for_library_and_externalintegration( _db, self.TRACKING_ID, library, integration, ).value if not self.tracking_id: raise CannotLoadConfiguration( "Missing tracking id for library %s" % library.short_name)
def fulfill(self, patron, pin, licensepool, internal_format, **kwargs): """Retrieve a bearer token that can be used to download the book. :param kwargs: A container for arguments to fulfill() which are not relevant to this vendor. :return: a FulfillmentInfo object. """ links = licensepool.identifier.links # Find the acquisition link with the right media type. for link in links: media_type = link.resource.representation.media_type if link.rel == Hyperlink.GENERIC_OPDS_ACQUISITION and media_type == internal_format: url = link.resource.representation.url # Obtain a Credential with the information from our # bearer token. _db = Session.object_session(licensepool) credential = self._get_token(_db) # Build a application/vnd.librarysimplified.bearer-token # document using information from the credential. now = datetime.datetime.utcnow() expiration = int((credential.expires - now).total_seconds()) token_document = dict( token_type="Bearer", access_token=credential.credential, expires_in=expiration, location=url, ) return FulfillmentInfo( licensepool.collection, licensepool.data_source.name, licensepool.identifier.type, licensepool.identifier.identifier, content_link=None, content_type=DeliveryMechanism.BEARER_TOKEN, content=json.dumps(token_document), content_expires=credential.expires, ) # We couldn't find an acquisition link for this book. raise CannotFulfill()
def cover_links(cls, work): """The content server sends out _all_ cover links for the work. For books covered by Gutenberg Illustrated, this can be over a hundred cover links. """ _db = Session.object_session(work) ids = work.all_identifier_ids() image_resources = Identifier.resources_for_identifier_ids( _db, ids, Resource.IMAGE) thumbnails = [] full = [] for cover in image_resources: if cover.mirrored_path: full.append(cover.mirrored_path) if cover.scaled_path: thumbnails.append(cover.scaled_path) return thumbnails, full
def single_loan_feed(cls, circulation, loan, test_mode=False): db = Session.object_session(loan) work = loan.license_pool.work or loan.license_pool.presentation_edition.work annotator = cls(circulation, None, active_loans_by_work={work: loan}, active_holds_by_work={}, test_mode=test_mode) identifier = loan.license_pool.identifier url = annotator.url_for('loan_or_hold_detail', data_source=loan.license_pool.data_source.name, identifier_type=identifier.type, identifier=identifier.identifier, _external=True) if not work: return AcquisitionFeed(db, "Active loan for unknown work", url, [], annotator) return AcquisitionFeed.single_entry(db, work, annotator)
def _update_hold_position(self, hold): _db = Session.object_session(hold) pool = hold.license_pool loans_count = _db.query(Loan).filter( Loan.license_pool_id == pool.id, ).filter( or_(Loan.end == None, Loan.end > datetime.datetime.utcnow())).count() holds_count = self._count_holds_before(hold) remaining_licenses = pool.licenses_owned - loans_count if remaining_licenses > holds_count: # The hold is ready to check out. hold.position = 0 else: # Add 1 since position 0 indicates the hold is ready. hold.position = holds_count + 1
def __init__(self, collection, lookup_client=None, **kwargs): """Since we are processing a specific collection, we must be able to get an _authenticated_ metadata wrangler lookup client for the collection. """ _db = Session.object_session(collection) lookup_client = lookup_client or MetadataWranglerOPDSLookup.from_config( _db, collection=collection ) super(BaseMetadataWranglerCoverageProvider, self).__init__( collection, lookup_client, **kwargs ) if not self.lookup_client.authenticated: raise CannotLoadConfiguration( "Authentication for the Library Simplified Metadata Wrangler " "is not set up. Without this, there is no way to register " "your identifiers with the metadata wrangler." )
def _get_lcp_passphrase(self, patron): """Returns a patron's LCP passphrase :return: Patron's LCP passphrase :rtype: string """ db = Session.object_session(patron) self._logger.info("Started fetching a patron's LCP passphrase") lcp_passphrase = self._credential_factory.get_patron_passphrase( db, patron) self._logger.info( "Finished fetching a patron's LCP passphrase: {0}".format( lcp_passphrase)) return lcp_passphrase
def apply(self, loan, autocommit=True): """Set an appropriate LicensePoolDeliveryMechanism on the given `Loan`, creating a DeliveryMechanism if necessary. :param loan: A Loan object. :param autocommit: Set this to false if you are in the middle of a nested transaction. :return: A LicensePoolDeliveryMechanism if one could be set on the given Loan; None otherwise. """ _db = Session.object_session(loan) # Create or update the DeliveryMechanism. delivery_mechanism, is_new = DeliveryMechanism.lookup( _db, self.content_type, self.drm_scheme ) if (loan.fulfillment and loan.fulfillment.delivery_mechanism == delivery_mechanism): # The work has already been done. Do nothing. return # At this point we know we need to update the local delivery # mechanism. pool = loan.license_pool if not pool: # This shouldn't happen, but bail out if it does. return None # Look up the LicensePoolDeliveryMechanism for the way the # server says this book is available, creating the object if # necessary. # # We set autocommit=False because we're probably in the middle # of a nested transaction. lpdm = LicensePoolDeliveryMechanism.set( pool.data_source, pool.identifier, self.content_type, self.drm_scheme, self.rights_uri, self.resource, autocommit=autocommit ) loan.fulfillment = lpdm return lpdm
def patron_activity(self, patron, pin): """Look up non-expired loans for this collection in the database.""" _db = Session.object_session(patron) loans = _db.query(Loan).join(Loan.license_pool).filter( LicensePool.collection_id == self.collection_id).filter( Loan.patron == patron).filter( Loan.end >= datetime.datetime.utcnow()) # Get the patron's holds. If there are any expired holds, delete them. # Update the end date and position for the remaining holds. holds = _db.query(Hold).join(Hold.license_pool).filter( LicensePool.collection_id == self.collection_id).filter( Hold.patron == patron) remaining_holds = [] for hold in holds: if hold.end and hold.end < datetime.datetime.utcnow(): _db.delete(hold) self.update_hold_queue(hold.license_pool) else: self._update_hold_end_date(hold) remaining_holds.append(hold) return [ LoanInfo( loan.license_pool.collection, loan.license_pool.data_source.name, loan.license_pool.identifier.type, loan.license_pool.identifier.identifier, loan.start, loan.end, external_identifier=loan.external_identifier, ) for loan in loans ] + [ HoldInfo( hold.license_pool.collection, hold.license_pool.data_source.name, hold.license_pool.identifier.type, hold.license_pool.identifier.identifier, start_date=hold.start, end_date=hold.end, hold_position=hold.position, ) for hold in remaining_holds ]
def drm_device_registration_tags(self, license_pool, active_loan, delivery_mechanism): """Construct OPDS Extensions for DRM tags that explain how to register a device with the DRM server that manages this loan. :param delivery_mechanism: A DeliveryMechanism """ if not active_loan or not delivery_mechanism: return [] if delivery_mechanism.drm_scheme == DeliveryMechanism.ADOBE_DRM: # Get an identifier for the patron that will be registered # with the DRM server. _db = Session.object_session(active_loan) patron = active_loan.patron # Generate a <drm:licensor> tag that can feed into the # Vendor ID service. return self.adobe_id_tags(patron) return []
def add_web_client_urls(self, record, library, identifier, integration=None): _db = Session.object_session(library) settings = [] if integration: marc_setting = self.value(MARCExporter.WEB_CLIENT_URL, integration) if marc_setting: settings.append(marc_setting) from api.registration.registry import Registration settings += [ s.value for s in _db.query(ConfigurationSetting).filter( ConfigurationSetting.key == Registration.LIBRARY_REGISTRATION_WEB_CLIENT, ConfigurationSetting.library_id == library.id, ) if s.value ] qualified_identifier = urllib.parse.quote(identifier.type + "/" + identifier.identifier, safe="") for web_client_base_url in settings: link = "{}/{}/works/{}".format( self.base_url, library.short_name, qualified_identifier, ) encoded_link = urllib.parse.quote(link, safe="") url = "{}/book/{}".format(web_client_base_url, encoded_link) record.add_field( Field( tag="856", indicators=["4", "0"], subfields=["u", url], ))
def fulfill(self, patron, pin, licensepool, internal_format): # Download the book from the appropriate acquisition link and return its content. # TODO: Implement https://github.com/NYPL-Simplified/Simplified/wiki/BearerTokenPropagation#advertising-bearer-token-propagation # instead. links = licensepool.identifier.links # Find the acquisition link with the right media type. for link in links: media_type = link.resource.representation.media_type if link.rel == Hyperlink.GENERIC_OPDS_ACQUISITION and media_type == internal_format: url = link.resource.representation.url # Obtain a Credential with the information from our # bearer token. _db = Session.object_session(patron) credential = self._get_token(_db) # Build a application/vnd.librarysimplified.bearer-token # document using information from the credential. now = datetime.datetime.utcnow() expiration = int((credential.expires - now).total_seconds()) token_document = dict( token_type="Bearer", access_token=credential.credential, expires_in=expiration, location=url, ) return FulfillmentInfo( licensepool.collection, licensepool.data_source.name, licensepool.identifier.type, licensepool.identifier.identifier, content_link=None, content_type=DeliveryMechanism.BEARER_TOKEN, content=json.dumps(token_document), content_expires=credential.expires, ) # We couldn't find an acquisition link for this book. raise CannotFulfill()
def check_storage_protocol(self, service): """For MARC Export integrations, check that the storage protocol corresponds to an existing storage integration.""" if service.protocol == MARCExporter.NAME: storage_protocol = service.setting(MARCExporter.STORAGE_PROTOCOL).value _db = Session.object_session(service) integration = ExternalIntegration.lookup( _db, storage_protocol, ExternalIntegration.STORAGE_GOAL) if not integration: return MISSING_SERVICE.detailed(_( "You set the storage protocol to %(protocol)s, but no storage service with that protocol is configured.", protocol=storage_protocol, )) if storage_protocol == ExternalIntegration.S3: # For S3, the storage service must also have a MARC file bucket. bucket = integration.setting(S3Uploader.MARC_BUCKET_KEY).value if not bucket: return MISSING_SERVICE.detailed(_( "You set the storage protocol to %(protocol)s, but the storage service with that protocol does not have a MARC file bucket configured.", protocol=storage_protocol, ))
def _get_lcp_collection(self, patron, collection_name): """Returns an LCP collection for a specified library NOTE: We assume that there is only ONE LCP collection per library :param patron: Patron object :type patron: core.model.patron.Patron :param collection_name: Name of the collection :type collection_name: string :return: LCP collection :rtype: core.model.collection.Collection """ db = Session.object_session(patron) lcp_collection, _ = Collection.by_name_and_protocol( db, collection_name, ExternalIntegration.LCP) if not lcp_collection or lcp_collection not in patron.library.collections: return MISSING_COLLECTION return lcp_collection
def release_hold(self, patron, pin, licensepool): """Cancel a hold.""" _db = Session.object_session(patron) hold = get_one( _db, Hold, license_pool_id=licensepool.id, patron=patron, ) if not hold: raise NotOnHold() # If the book was ready and the patron revoked the hold instead # of checking it out, but no one else had the book on hold, the # book is now available for anyone to check out. If someone else # had a hold, the license is now reserved for the next patron. # If someone else had a hold, the license is now reserved for the # next patron, and we need to update that hold. _db.delete(hold) self.update_hold_queue(licensepool) return True
def add_configuration_links(self, feed): _db = Session.object_session(self.library) def _add_link(l): if isinstance(feed, OPDSFeed): feed.add_link_to_feed(feed.feed, **l) else: # This is an ElementTree object. link = OPDSFeed.link(**l) feed.append(link) for rel in self.CONFIGURATION_LINKS: setting = ConfigurationSetting.for_library(rel, self.library) if setting.value: d = dict(href=setting.value, type="text/html", rel=rel) _add_link(d) for type, value in Configuration.help_uris(self.library): d = dict(href=value, rel="help") if type: d['type'] = type _add_link(d)
def active_loans_for(cls, circulation, patron, test_mode=False): db = Session.object_session(patron) active_loans_by_work = {} for loan in patron.loans: work = loan.work if work: active_loans_by_work[work] = loan active_holds_by_work = {} for hold in patron.holds: work = hold.work if work: active_holds_by_work[work] = hold annotator = cls( circulation, None, patron, active_loans_by_work, active_holds_by_work, test_mode=test_mode ) url = annotator.url_for('active_loans', _external=True) works = patron.works_on_loan_or_on_hold() feed_obj = AcquisitionFeed(db, "Active loans and holds", url, works, annotator) annotator.annotate_feed(feed_obj, None) return feed_obj
def __init__(self, library, contributor_name, parent=None, languages=None, audiences=None): if not contributor_name: raise ValueError( "ContributorLane can't be created without contributor") self.contributor_name = contributor_name display_name = contributor_name super(ContributorLane, self).initialize( library, display_name=display_name, audiences=audiences, languages=languages, ) if parent: parent.children.append(self) _db = Session.object_session(library) self.contributors = _db.query(Contributor)\ .filter(or_(*self.contributor_name_clauses)).all()
def active_loans_for(cls, circulation, patron, test_mode=False): db = Session.object_session(patron) active_loans_by_work = {} for loan in patron.loans: work = loan.work if work: active_loans_by_work[work] = loan active_holds_by_work = {} for hold in patron.holds: work = hold.work if work: active_holds_by_work[work] = hold annotator = cls( circulation, None, patron.library, patron, active_loans_by_work, active_holds_by_work, test_mode=test_mode ) url = annotator.url_for('active_loans', library_short_name=patron.library.short_name, _external=True) works = patron.works_on_loan_or_on_hold() feed_obj = AcquisitionFeed(db, "Active loans and holds", url, works, annotator) annotator.annotate_feed(feed_obj, None) return feed_obj
def complaints(cls, library, title, url, annotator, pagination=None): _db = Session.object_session(library) facets = Facets.default(library) pagination = pagination or Pagination.default() q = LicensePool.with_complaint(library) results = pagination.apply(q).all() if len(results) > 0: (pools, counts) = zip(*results) else: pools = () works = [pool.work for pool in pools] feed = cls(_db, title, url, works, annotator) # Render a 'start' link top_level_title = annotator.top_level_title() start_uri = annotator.groups_url(None) AdminFeed.add_link_to_feed(feed.feed, href=start_uri, rel="start", title=top_level_title) # Render an 'up' link, same as the 'start' link to indicate top-level feed AdminFeed.add_link_to_feed(feed.feed, href=start_uri, rel="up", title=top_level_title) if len(works) > 0: # There are works in this list. Add a 'next' link. AdminFeed.add_link_to_feed(feed.feed, rel="next", href=annotator.complaints_url(facets, pagination.next_page)) if pagination.offset > 0: AdminFeed.add_link_to_feed(feed.feed, rel="first", href=annotator.complaints_url(facets, pagination.first_page)) previous_page = pagination.previous_page if previous_page: AdminFeed.add_link_to_feed(feed.feed, rel="previous", href=annotator.complaints_url(facets, previous_page)) annotator.annotate_feed(feed) return unicode(feed)
def value(self, key, integration): _db = Session.object_session(integration) return ConfigurationSetting.for_library_and_externalintegration( _db, key, self.library, integration).value
def parse_book(cls, collection, g, uri, title): """Turn an RDF graph into a Edition for the given `uri` and `title`. """ source_id = unicode(cls.ID_IN_URI.search(uri).groups()[0]) primary_identifier = IdentifierData( Identifier.GUTENBERG_ID, source_id ) # Split a subtitle out from the main title. title = unicode(title) subtitle = None for separator in "\r\n", "\n": if separator in title: parts = title.split(separator) title = parts[0] subtitle = "\n".join(parts[1:]) break issued = cls._value(g, (uri, cls.dcterms.issued, None)) issued = datetime.datetime.strptime(issued, cls.DATE_FORMAT).date() rights = cls._value(g, (uri, cls.dcterms.rights, None)) if rights: rights = str(rights) else: rights = '' rights_uri = RightsStatus.rights_uri_from_string(rights) # As far as I can tell, Gutenberg descriptions are 100% # useless for our purposes. They should not be used, even if # no other description is available. publisher = cls._value(g, (uri, cls.dcterms.publisher, None)) languages = [] for ignore, ignore, language_uri in g.triples( (uri, cls.dcterms.language, None)): code = str(cls._value(g, (language_uri, cls.rdf.value, None))) code = LanguageCodes.two_to_three[code] if code: languages.append(code) if 'eng' in languages: language = 'eng' elif languages: language = languages[0] else: language = None contributors = [] for ignore, ignore, author_uri in g.triples((uri, cls.dcterms.creator, None)): name = cls._value(g, (author_uri, cls.gutenberg.name, None)) aliases = cls._values(g, (author_uri, cls.gutenberg.alias, None)) contributors.append(ContributorData( sort_name=name, aliases=aliases, roles=[Contributor.AUTHOR_ROLE], )) subjects = [] subject_links = cls._values(g, (uri, cls.dcterms.subject, None)) for subject in subject_links: value = cls._value(g, (subject, cls.rdf.value, None)) vocabulary = cls._value(g, (subject, cls.dcam.memberOf, None)) vocabulary = Subject.by_uri[str(vocabulary)] subjects.append(SubjectData(vocabulary, value)) medium = Edition.BOOK_MEDIUM # Turn the Gutenberg download links into Hyperlinks associated # with the new Edition. They will serve either as open access # downloads or cover images. download_links = cls._values(g, (uri, cls.dcterms.hasFormat, None)) links = [LinkData( rel=Hyperlink.CANONICAL, href=str(uri), )] # Gutenberg won't allow us to use any of the download or image # links--we have to make our own from an rsynced mirror--but # we can look through the links to determine which medium to # assign to this book. formats = [] for href in download_links: for format_uri in cls._values( g, (href, cls.dcterms['format'], None)): media_type = unicode( cls._value(g, (format_uri, cls.rdf.value, None))) if media_type.startswith('audio/'): medium = Edition.AUDIO_MEDIUM formats.append(FormatData( content_type=Representation.MP3_MEDIA_TYPE, drm_scheme=DeliveryMechanism.NO_DRM, )) elif media_type.startswith('video/'): medium = Edition.VIDEO_MEDIUM else: formats.append(FormatData( content_type=Representation.EPUB_MEDIA_TYPE, drm_scheme=DeliveryMechanism.NO_DRM, rights_uri=rights_uri, )) _db = Session.object_session(collection) metadata = Metadata( data_source=DataSource.GUTENBERG, title=title, subtitle=subtitle, language=language, publisher=publisher, issued=issued, medium=medium, primary_identifier=primary_identifier, subjects=subjects, contributors=contributors, links=links, ) edition, new = metadata.edition(_db) metadata.apply(edition, collection) # Ensure that an open-access LicensePool exists for this book. circulation_data = CirculationData( data_source=DataSource.GUTENBERG, primary_identifier=primary_identifier, formats=formats, default_rights_uri=rights_uri, links=links, ) license_pool, new_license_pool = circulation_data.license_pool( _db, collection ) replace = ReplacementPolicy(formats=True) circulation_data.apply(_db, collection, replace=replace) license_pool.calculate_work() return edition, license_pool, new
import conf from core import migration import web web.config.debug = conf.get_web_config_debug() web.config.debug_sql = conf.get_config_debug_sql() migration.setup_db() webapp = web.application(conf.URLs, globals()) if web.config.get('_session') is None: from core.db import dbutil from core.model import Session session = web.session.Session(webapp, web.session.DBStore(dbutil.get_dbconn(), Session.get_modelname()), initializer={}) web.config._session = session web.config.session_parameters['timeout'] = conf.get_session_timeout() else: session = web.config._session def session_hook(): web.ctx.session = session webapp.add_processor(web.loadhook(session_hook)) def notfound(): return "Not Found" def main(): webapp.run()