Example #1
0
    def test_missing_coverage_from_with_cutoff_date(self):
        gutenberg = DataSource.lookup(self._db, DataSource.GUTENBERG)
        oclc = DataSource.lookup(self._db, DataSource.OCLC)
        web = DataSource.lookup(self._db, DataSource.WEB)

        # Here's an Edition with a coverage record from OCLC classify.
        gutenberg, ignore = Edition.for_foreign_id(self._db, gutenberg,
                                                   Identifier.GUTENBERG_ID,
                                                   "1")
        identifier = gutenberg.primary_identifier
        oclc = DataSource.lookup(self._db, DataSource.OCLC)
        coverage = self._coverage_record(gutenberg, oclc)

        # The CoverageRecord knows when the coverage was provided.
        timestamp = coverage.timestamp

        # If we ask for Identifiers that are missing coverage records
        # as of that time, we see nothing.
        assert ([] == Identifier.missing_coverage_from(
            self._db, [identifier.type],
            oclc,
            count_as_missing_before=timestamp).all())

        # But if we give a time one second later, the Identifier is
        # missing coverage.
        assert [identifier] == Identifier.missing_coverage_from(
            self._db,
            [identifier.type],
            oclc,
            count_as_missing_before=timestamp + datetime.timedelta(seconds=1),
        ).all()
Example #2
0
    def test_lookup(self):
        key = DataSource.GUTENBERG

        gutenberg = DataSource.lookup(self._db, DataSource.GUTENBERG)
        assert key == gutenberg.name
        assert True == gutenberg.offers_licenses
        assert key == gutenberg.cache_key()

        # Object has been loaded into cache.
        assert (gutenberg,
                False) == DataSource.by_cache_key(self._db, key, None)

        # Now try creating a new data source.
        key = "New data source"
        new_source = DataSource.lookup(self._db,
                                       key,
                                       autocreate=True,
                                       offers_licenses=True)

        # A new data source has been created.
        assert key == new_source.name
        assert True == new_source.offers_licenses

        assert (new_source,
                False) == DataSource.by_cache_key(self._db, key, None)
Example #3
0
    def test_missing_coverage_from(self):
        gutenberg = DataSource.lookup(self._db, DataSource.GUTENBERG)
        oclc = DataSource.lookup(self._db, DataSource.OCLC)
        web = DataSource.lookup(self._db, DataSource.WEB)

        # Here are two Gutenberg records.
        g1, ignore = Edition.for_foreign_id(self._db, gutenberg,
                                            Identifier.GUTENBERG_ID, "1")

        g2, ignore = Edition.for_foreign_id(self._db, gutenberg,
                                            Identifier.GUTENBERG_ID, "2")

        # One of them has coverage from OCLC Classify
        c1 = self._coverage_record(g1, oclc)

        # The other has coverage from a specific operation on OCLC Classify
        c2 = self._coverage_record(g2, oclc, "some operation")

        # Here's a web record, just sitting there.
        w, ignore = Edition.for_foreign_id(self._db, web, Identifier.URI,
                                           "http://www.foo.com/")

        # If we run missing_coverage_from we pick up the Gutenberg
        # record with no generic OCLC coverage. It doesn't pick up the
        # other Gutenberg record, it doesn't pick up the web record,
        # and it doesn't pick up the OCLC coverage for a specific
        # operation.
        [in_gutenberg_but_not_in_oclc
         ] = Identifier.missing_coverage_from(self._db,
                                              [Identifier.GUTENBERG_ID],
                                              oclc).all()

        assert g2.primary_identifier == in_gutenberg_but_not_in_oclc

        # If we ask about a specific operation, we get the Gutenberg
        # record that has coverage for that operation, but not the one
        # that has generic OCLC coverage.

        [has_generic_coverage_only
         ] = Identifier.missing_coverage_from(self._db,
                                              [Identifier.GUTENBERG_ID], oclc,
                                              "some operation").all()
        assert g1.primary_identifier == has_generic_coverage_only

        # We don't put web sites into OCLC, so this will pick up the
        # web record (but not the Gutenberg record).
        [in_web_but_not_in_oclc
         ] = Identifier.missing_coverage_from(self._db, [Identifier.URI],
                                              oclc).all()
        assert w.primary_identifier == in_web_but_not_in_oclc

        # We don't use the web as a source of coverage, so this will
        # return both Gutenberg records (but not the web record).
        assert [g1.primary_identifier.id, g2.primary_identifier.id] == sorted([
            x.id for x in Identifier.missing_coverage_from(
                self._db, [Identifier.GUTENBERG_ID], web)
        ])
Example #4
0
    def test_lookup_with_autocreate(self):
        name = "Brand new data source " + self._str
        new_source = DataSource.lookup(self._db, name, autocreate=True)
        assert name == new_source.name
        assert False == new_source.offers_licenses

        name = "New data source with licenses" + self._str
        new_source = DataSource.lookup(self._db,
                                       name,
                                       autocreate=True,
                                       offers_licenses=True)
        assert True == new_source.offers_licenses
    def test_unmirrored(self):

        ds = DataSource.lookup(self._db, DataSource.GUTENBERG)
        overdrive = DataSource.lookup(self._db, DataSource.OVERDRIVE)

        c1 = self._default_collection
        c1.data_source = ds

        # Here's an Identifier associated with a collection.
        work = self._work(with_license_pool=True, collection=c1)
        [pool] = work.license_pools
        i1 = pool.identifier

        # This is a random identifier not associated with the collection.
        i2 = self._identifier()

        def m():
            return Hyperlink.unmirrored(c1).all()

        # Identifier is not in the collection.
        not_in_collection, ignore = i2.add_link(Hyperlink.IMAGE, self._url, ds)
        assert [] == m()

        # Hyperlink rel is not mirrorable.
        wrong_type, ignore = i1.add_link("not mirrorable", self._url, ds,
                                         "text/plain")
        assert [] == m()

        # Hyperlink has no associated representation -- it needs to be
        # mirrored, which will create one!
        hyperlink, ignore = i1.add_link(Hyperlink.IMAGE, self._url, ds,
                                        "image/png")
        assert [hyperlink] == m()

        # Representation is already mirrored, so does not show up
        # in the unmirrored list.
        representation = hyperlink.resource.representation
        representation.set_as_mirrored(self._url)
        assert [] == m()

        # Representation exists in database but is not mirrored -- it needs
        # to be mirrored!
        representation.mirror_url = None
        assert [hyperlink] == m()

        # Hyperlink is associated with a data source other than the
        # data source of the collection. It ought to be mirrored, but
        # this collection isn't responsible for mirroring it.
        hyperlink.data_source = overdrive
        assert [] == m()
    def test_isbns_updated_since(self):
        i1 = self._identifier(identifier_type=Identifier.ISBN, foreign_id=self._isbn)
        i2 = self._identifier(identifier_type=Identifier.ISBN, foreign_id=self._isbn)
        i3 = self._identifier(identifier_type=Identifier.ISBN, foreign_id=self._isbn)
        i4 = self._identifier(identifier_type=Identifier.ISBN, foreign_id=self._isbn)

        timestamp = utc_now()

        # An empty catalog returns nothing..
        assert [] == self.collection.isbns_updated_since(self._db, None).all()

        # Give the ISBNs some coverage.
        content_cafe = DataSource.lookup(self._db, DataSource.CONTENT_CAFE)
        for isbn in [i2, i3, i1]:
            self._coverage_record(isbn, content_cafe)

        # Give one ISBN more than one coverage record.
        oclc = DataSource.lookup(self._db, DataSource.OCLC)
        i1_oclc_record = self._coverage_record(i1, oclc)

        def assert_isbns(expected, result_query):
            results = [r[0] for r in result_query]
            assert expected == results

        # When no timestamp is given, all ISBNs in the catalog are returned,
        # in order of their CoverageRecord timestamp.
        self.collection.catalog_identifiers([i1, i2])
        updated_isbns = self.collection.isbns_updated_since(self._db, None).all()
        assert_isbns([i2, i1], updated_isbns)

        # That CoverageRecord timestamp is also returned.
        i1_timestamp = updated_isbns[1][1]
        assert isinstance(i1_timestamp, datetime.datetime)
        assert i1_oclc_record.timestamp == i1_timestamp

        # When a timestamp is passed, only works that have been updated since
        # then will be returned.
        timestamp = utc_now()
        i1.coverage_records[0].timestamp = utc_now()
        updated_isbns = self.collection.isbns_updated_since(self._db, timestamp)
        assert_isbns([i1], updated_isbns)

        # Prepare an ISBN associated with a Work.
        work = self._work(with_license_pool=True)
        work.license_pools[0].identifier = i2
        i2.coverage_records[0].timestamp = utc_now()

        # ISBNs that have a Work will be ignored.
        updated_isbns = self.collection.isbns_updated_since(self._db, timestamp)
        assert_isbns([i1], updated_isbns)
    def test_quality_as_thumbnail_image(self):

        # Get some data sources ready, since a big part of image
        # quality comes from data source.
        gutenberg = DataSource.lookup(self._db, DataSource.GUTENBERG)
        gutenberg_cover_generator = DataSource.lookup(
            self._db, DataSource.GUTENBERG_COVER_GENERATOR)
        overdrive = DataSource.lookup(self._db, DataSource.OVERDRIVE)
        metadata_wrangler = DataSource.lookup(self._db,
                                              DataSource.METADATA_WRANGLER)

        # Here's a book with a thumbnail image.
        edition, pool = self._edition(with_license_pool=True)
        hyperlink, ignore = pool.add_link(Hyperlink.THUMBNAIL_IMAGE, self._url,
                                          overdrive)
        resource = hyperlink.resource

        # Without a representation, the thumbnail image is useless.
        assert 0 == resource.quality_as_thumbnail_image

        ideal_height = Identifier.IDEAL_IMAGE_HEIGHT
        ideal_width = Identifier.IDEAL_IMAGE_WIDTH

        cover = self.sample_cover_representation("tiny-image-cover.png")
        resource.representation = cover
        assert 1.0 == resource.quality_as_thumbnail_image

        # Changing the image aspect ratio affects the quality as per
        # thumbnail_size_quality_penalty.
        cover.image_height = ideal_height * 2
        cover.image_width = ideal_width
        assert 0.5 == resource.quality_as_thumbnail_image

        # Changing the data source also affects the quality. Gutenberg
        # covers are penalized heavily...
        cover.image_height = ideal_height
        cover.image_width = ideal_width
        resource.data_source = gutenberg
        assert 0.5 == resource.quality_as_thumbnail_image

        # The Gutenberg cover generator is penalized less heavily.
        resource.data_source = gutenberg_cover_generator
        assert 0.6 == resource.quality_as_thumbnail_image

        # The metadata wrangler actually gets a _bonus_, to encourage the
        # use of its covers over those provided by license sources.
        resource.data_source = metadata_wrangler
        assert 2 == resource.quality_as_thumbnail_image
Example #8
0
    def from_dict(self, data):
        _db = self._db

        # Identify the source of the event.
        source_name = data["source"]
        source = DataSource.lookup(_db, source_name)

        # Identify which LicensePool the event is talking about.
        foreign_id = data["id"]
        identifier_type = source.primary_identifier_type
        collection = data["collection"]

        license_pool, was_new = LicensePool.for_foreign_id(
            _db, source, identifier_type, foreign_id, collection=collection)

        # Finally, gather some information about the event itself.
        type = data.get("type")
        start = self._get_datetime(data, "start")
        end = self._get_datetime(data, "end")
        old_value = self._get_int(data, "old_value")
        new_value = self._get_int(data, "new_value")
        delta = self._get_int(data, "delta")
        event, was_new = get_one_or_create(
            _db,
            CirculationEvent,
            license_pool=license_pool,
            type=type,
            start=start,
            create_method_kwargs=dict(old_value=old_value,
                                      new_value=new_value,
                                      delta=delta,
                                      end=end),
        )
        return event, was_new
 def setup_method(self):
     super(TestUniquenessConstraints, self).setup_method()
     self.data_source = DataSource.lookup(self._db, DataSource.OVERDRIVE)
     self.type = "a credential type"
     self.patron = self._patron()
     self.col1 = self._default_collection
     self.col2 = self._collection()
    def test_force_refresher_method(self):
        # Ensure that passing `force_refresh=True` triggers the
        # refresher method, even when none of the usual conditions
        # are satisfied.

        def refresher(self):
            raise Exception("Refresher method was called")

        # Create a persistent token and ensure that it's present
        data_source = DataSource.lookup(self._db, DataSource.ADOBE)
        patron = self._patron()
        token, is_new = Credential.persistent_token_create(
            self._db, data_source, "some random type", patron)
        assert data_source == token.data_source
        assert "some random type" == token.type
        assert patron == token.patron

        # We'll vary the `force_refresh` setting, but otherwise
        # use the same parameters for the next to calls to `lookup`.
        args = self._db, data_source, token.type, patron, refresher

        # This call should should not run the refresher method.
        again_token = Credential.lookup(*args,
                                        allow_persistent_token=True,
                                        force_refresh=False)
        assert again_token == token

        # This call should run the refresher method.
        with pytest.raises(Exception) as excinfo:
            Credential.lookup(*args,
                              allow_persistent_token=True,
                              force_refresh=True)
        assert "Refresher method was called" in str(excinfo.value)
    def test_empty_token(self):
        # Test the behavior when a credential is empty.

        # First, create a token with an empty credential.
        data_source = DataSource.lookup(self._db, DataSource.ADOBE)
        token, is_new = Credential.persistent_token_create(
            self._db, data_source, "i am empty", None)
        token.credential = None

        # If allow_empty_token is true, the token is returned as-is
        # and the refresher method is not called.
        def refresher(self):
            raise Exception("Refresher method was called")

        args = (
            self._db,
            data_source,
            token.type,
            None,
            refresher,
        )
        again_token = Credential.lookup(*args,
                                        allow_persistent_token=True,
                                        allow_empty_token=True)
        assert again_token == token

        # If allow_empty_token is False, the refresher method is
        # created.
        with pytest.raises(Exception) as excinfo:
            Credential.lookup(*args,
                              allow_persistent_token=True,
                              allow_empty_token=False)
        assert "Refresher method was called" in str(excinfo.value)
    def test_persistent_token(self):

        # Create a persistent token.
        data_source = DataSource.lookup(self._db, DataSource.ADOBE)
        patron = self._patron()
        token, is_new = Credential.persistent_token_create(
            self._db, data_source, "some random type", patron)
        assert data_source == token.data_source
        assert "some random type" == token.type
        assert patron == token.patron

        # Now try to look up the credential based solely on the UUID.
        new_token = Credential.lookup_by_token(
            self._db,
            data_source,
            token.type,
            token.credential,
            allow_persistent_token=True,
        )
        assert new_token == token
        credential = new_token.credential

        # We can keep calling lookup_by_token and getting the same
        # Credential object with the same .credential -- it doesn't
        # expire.
        again_token = Credential.lookup_by_token(
            self._db,
            data_source,
            token.type,
            token.credential,
            allow_persistent_token=True,
        )
        assert again_token == new_token
        assert again_token.credential == credential
    def test_unresolved_catalog(self):
        # A regular schmegular identifier: untouched, pure.
        pure_id = self._identifier()

        # A 'resolved' identifier that doesn't have a work yet.
        # (This isn't supposed to happen, but jic.)
        source = DataSource.lookup(self._db, DataSource.GUTENBERG)
        operation = "test-thyself"
        resolved_id = self._identifier()
        self._coverage_record(
            resolved_id, source, operation=operation, status=CoverageRecord.SUCCESS
        )

        # An unresolved identifier--we tried to resolve it, but
        # it all fell apart.
        unresolved_id = self._identifier()
        self._coverage_record(
            unresolved_id,
            source,
            operation=operation,
            status=CoverageRecord.TRANSIENT_FAILURE,
        )

        # An identifier with a Work already.
        id_with_work = self._work().presentation_edition.primary_identifier

        self.collection.catalog_identifiers(
            [pure_id, resolved_id, unresolved_id, id_with_work]
        )

        result = self.collection.unresolved_catalog(self._db, source.name, operation)

        # Only the failing identifier is in the query.
        assert [unresolved_id] == result.all()
Example #14
0
    def test_find(self):
        source = DataSource.lookup(self._db, DataSource.NYT)
        # When there's no CustomList to find, nothing is returned.
        result = CustomList.find(self._db, "my-list", source)
        assert None == result

        custom_list = self._customlist(
            foreign_identifier="a-list", name="My List", num_entries=0
        )[0]
        # A CustomList can be found by its foreign_identifier.
        result = CustomList.find(self._db, "a-list", source)
        assert custom_list == result

        # Or its name.
        result = CustomList.find(self._db, "My List", source.name)
        assert custom_list == result

        # The list can also be found by name without a data source.
        result = CustomList.find(self._db, "My List")
        assert custom_list == result

        # By default, we only find lists with no associated Library.
        # If we look for a list from a library, there isn't one.
        result = CustomList.find(
            self._db, "My List", source, library=self._default_library
        )
        assert None == result

        # If we add the Library to the list, it's returned.
        custom_list.library = self._default_library
        result = CustomList.find(
            self._db, "My List", source, library=self._default_library
        )
        assert custom_list == result
Example #15
0
    def test_add_for(self):
        source = DataSource.lookup(self._db, DataSource.OCLC)
        edition = self._edition()
        operation = "foo"
        record, is_new = CoverageRecord.add_for(edition, source, operation)
        assert True == is_new

        # If we call add_for again we get the same record back, but we
        # can modify the timestamp.
        a_week_ago = utc_now() - datetime.timedelta(days=7)
        record2, is_new = CoverageRecord.add_for(edition, source, operation, a_week_ago)
        assert record == record2
        assert False == is_new
        assert a_week_ago == record2.timestamp

        # If we don't specify an operation we get a totally different
        # record.
        record3, ignore = CoverageRecord.add_for(edition, source)
        assert record3 != record
        assert None == record3.operation
        seconds = (utc_now() - record3.timestamp).seconds
        assert seconds < 10

        # If we call lookup we get the same record.
        record4 = CoverageRecord.lookup(edition.primary_identifier, source)
        assert record3 == record4

        # We can change the status.
        record5, is_new = CoverageRecord.add_for(
            edition, source, operation, status=CoverageRecord.PERSISTENT_FAILURE
        )
        assert record5 == record
        assert CoverageRecord.PERSISTENT_FAILURE == record.status
Example #16
0
    def test_metadata_sources_for(self):
        content_cafe = DataSource.lookup(self._db, DataSource.CONTENT_CAFE)
        isbn_metadata_sources = DataSource.metadata_sources_for(
            self._db, Identifier.ISBN)

        assert 1 == len(isbn_metadata_sources)
        assert [content_cafe] == isbn_metadata_sources
Example #17
0
    def test_lookup(self):
        source = DataSource.lookup(self._db, DataSource.OCLC)
        edition = self._edition()
        operation = "foo"
        collection = self._default_collection
        record = self._coverage_record(
            edition, source, operation, collection=collection
        )

        # To find the CoverageRecord, edition, source, operation,
        # and collection must all match.
        result = CoverageRecord.lookup(
            edition, source, operation, collection=collection
        )
        assert record == result

        # You can substitute the Edition's primary identifier for the
        # Edition iteslf.
        lookup = CoverageRecord.lookup(
            edition.primary_identifier,
            source,
            operation,
            collection=self._default_collection,
        )
        assert lookup == record

        # Omit the collection, and you find nothing.
        result = CoverageRecord.lookup(edition, source, operation)
        assert None == result

        # Same for operation.
        result = CoverageRecord.lookup(edition, source, collection=collection)
        assert None == result

        result = CoverageRecord.lookup(
            edition, source, "other operation", collection=collection
        )
        assert None == result

        # Same for data source.
        other_source = DataSource.lookup(self._db, DataSource.OVERDRIVE)
        result = CoverageRecord.lookup(
            edition, other_source, operation, collection=collection
        )
        assert None == result
    def test_temporary_token(self):

        # Create a temporary token good for one hour.
        duration = datetime.timedelta(hours=1)
        data_source = DataSource.lookup(self._db, DataSource.ADOBE)
        patron = self._patron()
        now = utc_now()
        expect_expires = now + duration
        token, is_new = Credential.temporary_token_create(
            self._db, data_source, "some random type", patron, duration)
        assert data_source == token.data_source
        assert "some random type" == token.type
        assert patron == token.patron
        expires_difference = abs((token.expires - expect_expires).seconds)
        assert expires_difference < 2

        # Now try to look up the credential based solely on the UUID.
        new_token = Credential.lookup_by_token(self._db, data_source,
                                               token.type, token.credential)
        assert new_token == token

        # When we call lookup_and_expire_temporary_token, the token is automatically
        # expired and we cannot use it anymore.
        new_token = Credential.lookup_and_expire_temporary_token(
            self._db, data_source, token.type, token.credential)
        assert new_token == token
        assert new_token.expires < now

        new_token = Credential.lookup_by_token(self._db, data_source,
                                               token.type, token.credential)
        assert None == new_token

        new_token = Credential.lookup_and_expire_temporary_token(
            self._db, data_source, token.type, token.credential)
        assert None == new_token

        # A token with no expiration date is treated as expired...
        token.expires = None
        self._db.commit()
        no_expiration_token = Credential.lookup_by_token(
            self._db, data_source, token.type, token.credential)
        assert None == no_expiration_token

        # ...unless we specifically say we're looking for a persistent token.
        no_expiration_token = Credential.lookup_by_token(
            self._db,
            data_source,
            token.type,
            token.credential,
            allow_persistent_token=True,
        )
        assert token == no_expiration_token
    def test_temporary_token_overwrites_old_token(self):
        duration = datetime.timedelta(hours=1)
        data_source = DataSource.lookup(self._db, DataSource.ADOBE)
        patron = self._patron()
        old_token, is_new = Credential.temporary_token_create(
            self._db, data_source, "some random type", patron, duration)
        assert True == is_new
        old_credential = old_token.credential

        # Creating a second temporary token overwrites the first.
        token, is_new = Credential.temporary_token_create(
            self._db, data_source, "some random type", patron, duration)
        assert False == is_new
        assert token.id == old_token.id
        assert old_credential != token.credential
Example #20
0
    def test_equivalent_identifiers(self):

        edition = self._edition()
        identifier = self._identifier()
        data_source = DataSource.lookup(self._db, DataSource.OCLC)

        identifier.equivalent_to(data_source, edition.primary_identifier, 0.6)

        policy = PresentationCalculationPolicy(
            equivalent_identifier_threshold=0.5)
        assert set([identifier, edition.primary_identifier
                    ]) == set(edition.equivalent_identifiers(policy=policy))

        policy.equivalent_identifier_threshold = 0.7
        assert set([edition.primary_identifier
                    ]) == set(edition.equivalent_identifiers(policy=policy))
    def test_by_datasource(self):
        """Collections can be found by their associated DataSource"""
        c1 = self._collection(data_source_name=DataSource.GUTENBERG)
        c2 = self._collection(data_source_name=DataSource.OVERDRIVE)

        # Using the DataSource name
        assert set([c1]) == set(
            Collection.by_datasource(self._db, DataSource.GUTENBERG).all()
        )

        # Using the DataSource itself
        overdrive = DataSource.lookup(self._db, DataSource.OVERDRIVE)
        assert set([c2]) == set(Collection.by_datasource(self._db, overdrive).all())

        # A collection marked for deletion is filtered out.
        c2.marked_for_deletion = True
        assert 0 == Collection.by_datasource(self._db, overdrive).count()
 def test_specify_value_of_temporary_token(self):
     """By default, a temporary token has a randomly generated value, but
     you can give a specific value to represent a temporary token you got
     from somewhere else.
     """
     patron = self._patron()
     duration = datetime.timedelta(hours=1)
     data_source = DataSource.lookup(self._db, DataSource.ADOBE)
     token, is_new = Credential.temporary_token_create(
         self._db,
         data_source,
         "some random type",
         patron,
         duration,
         "Some random value",
     )
     assert "Some random value" == token.credential
Example #23
0
    def test_set_summary(self):
        e, pool = self._edition(with_license_pool=True)
        work = self._work(presentation_edition=e)
        overdrive = DataSource.lookup(self._db, DataSource.OVERDRIVE)

        # Set the work's summmary.
        l1, new = pool.add_link(Hyperlink.DESCRIPTION, None, overdrive,
                                "text/plain", "F")
        work.set_summary(l1.resource)

        assert l1.resource == work.summary
        assert "F" == work.summary_text

        # Remove the summary.
        work.set_summary(None)

        assert None == work.summary
        assert "" == work.summary_text
Example #24
0
    def test_set_work(self):

        # Start with a custom list with no entries
        list, ignore = self._customlist(num_entries=0)

        # Now create an entry with an edition but no license pool.
        edition = self._edition()

        entry, ignore = get_one_or_create(
            self._db,
            CustomListEntry,
            list_id=list.id,
            edition_id=edition.id,
        )

        assert edition == entry.edition
        assert None == entry.work

        # Here's another edition, with a license pool.
        other_edition, lp = self._edition(with_open_access_download=True)

        # And its identifier is equivalent to the entry's edition's identifier.
        data_source = DataSource.lookup(self._db, DataSource.OCLC)
        lp.identifier.equivalent_to(data_source, edition.primary_identifier, 1)

        # If we call set_work, it does nothing, because there is no work
        # associated with either edition.
        entry.set_work()

        # But if we assign a Work with the LicensePool, and try again...
        work, ignore = lp.calculate_work()
        entry.set_work()
        assert work == other_edition.work

        # set_work() traces the line from the CustomListEntry to its
        # Edition to the equivalent Edition to its Work, and associates
        # that Work with the CustomListEntry.
        assert work == entry.work

        # Even though the CustomListEntry's edition is not directly
        # associated with the Work.
        assert None == edition.work
Example #25
0
    def test_identifier_to_remote_service(self):

        # Here's a patron.
        patron = self._patron()

        # Get identifiers to use when identifying that patron on two
        # different remote services.
        axis = DataSource.AXIS_360
        axis_identifier = patron.identifier_to_remote_service(axis)

        feedbooks = DataSource.lookup(self._db, DataSource.FEEDBOOKS)
        feedbooks_identifier = patron.identifier_to_remote_service(feedbooks)

        # The identifiers are different.
        assert axis_identifier != feedbooks_identifier

        # But they're both 36-character UUIDs.
        assert 36 == len(axis_identifier)
        assert 36 == len(feedbooks_identifier)

        # They're persistent.
        assert feedbooks_identifier == patron.identifier_to_remote_service(
            feedbooks)
        assert axis_identifier == patron.identifier_to_remote_service(axis)

        # You can customize the function used to generate the
        # identifier, in case the data source won't accept a UUID as a
        # patron identifier.
        def fake_generator():
            return "fake string"

        bib = DataSource.BIBLIOTHECA
        assert "fake string" == patron.identifier_to_remote_service(
            bib, fake_generator)

        # Once the identifier is created, specifying a different generator
        # does nothing.
        assert "fake string" == patron.identifier_to_remote_service(bib)
        assert axis_identifier == patron.identifier_to_remote_service(
            axis, fake_generator)
    def test_custom_lists(self):
        # A Collection can be associated with one or more CustomLists.
        list1, ignore = get_one_or_create(self._db, CustomList, name=self._str)
        list2, ignore = get_one_or_create(self._db, CustomList, name=self._str)
        self.collection.customlists = [list1, list2]
        assert 0 == len(list1.entries)
        assert 0 == len(list2.entries)

        # When a new pool is added to the collection and its presentation edition is
        # calculated for the first time, it's automatically added to the lists.
        work = self._work(collection=self.collection, with_license_pool=True)
        assert 1 == len(list1.entries)
        assert 1 == len(list2.entries)
        assert work == list1.entries[0].work
        assert work == list2.entries[0].work

        # Now remove it from one of the lists. If its presentation edition changes
        # again or its pool changes works, it's not added back.
        self._db.delete(list1.entries[0])
        self._db.commit()
        assert 0 == len(list1.entries)
        assert 1 == len(list2.entries)

        pool = work.license_pools[0]
        identifier = pool.identifier
        staff_data_source = DataSource.lookup(self._db, DataSource.LIBRARY_STAFF)
        staff_edition, ignore = Edition.for_foreign_id(
            self._db, staff_data_source, identifier.type, identifier.identifier
        )

        staff_edition.title = self._str
        work.calculate_presentation()
        assert 0 == len(list1.entries)
        assert 1 == len(list2.entries)

        new_work = self._work(collection=self.collection)
        pool.work = new_work
        assert 0 == len(list1.entries)
        assert 1 == len(list2.entries)
Example #27
0
    def test_for_foreign_id(self):
        """Verify we can get a data source's view of a foreign id."""
        data_source = DataSource.lookup(self._db, DataSource.GUTENBERG)
        id = "549"
        type = Identifier.GUTENBERG_ID

        record, was_new = Edition.for_foreign_id(self._db, data_source, type,
                                                 id)
        assert data_source == record.data_source
        identifier = record.primary_identifier
        assert id == identifier.identifier
        assert type == identifier.type
        assert True == was_new
        assert [identifier] == record.equivalent_identifiers()

        # We can get the same work record by providing only the name
        # of the data source.
        record, was_new = Edition.for_foreign_id(self._db,
                                                 DataSource.GUTENBERG, type,
                                                 id)
        assert data_source == record.data_source
        assert identifier == record.primary_identifier
        assert False == was_new
Example #28
0
    def test_author_contributors(self):
        data_source = DataSource.lookup(self._db, DataSource.GUTENBERG)
        id = self._str
        type = Identifier.GUTENBERG_ID

        edition, was_new = Edition.for_foreign_id(self._db, data_source, type,
                                                  id)

        # We've listed the same person as primary author and author.
        [alice], ignore = Contributor.lookup(self._db, "Adder, Alice")
        edition.add_contributor(
            alice, [Contributor.AUTHOR_ROLE, Contributor.PRIMARY_AUTHOR_ROLE])

        # We've listed a different person as illustrator.
        [bob], ignore = Contributor.lookup(self._db, "Bitshifter, Bob")
        edition.add_contributor(bob, [Contributor.ILLUSTRATOR_ROLE])

        # Both contributors show up in .contributors.
        assert set([alice, bob]) == edition.contributors

        # Only the author shows up in .author_contributors, and she
        # only shows up once.
        assert [alice] == edition.author_contributors
Example #29
0
    def test_missing_coverage_from_with_collection(self):
        gutenberg = DataSource.lookup(self._db, DataSource.GUTENBERG)
        identifier = self._identifier()
        collection1 = self._default_collection
        collection2 = self._collection()
        self._coverage_record(identifier, gutenberg, collection=collection1)

        # The Identifier has coverage in collection 1.
        assert ([] == Identifier.missing_coverage_from(
            self._db, [identifier.type], gutenberg,
            collection=collection1).all())

        # It is missing coverage in collection 2.
        assert [identifier] == Identifier.missing_coverage_from(
            self._db, [identifier.type], gutenberg,
            collection=collection2).all()

        # If no collection is specified, we look for a CoverageRecord
        # that also has no collection specified, and the Identifier is
        # not treated as covered.
        assert [identifier
                ] == Identifier.missing_coverage_from(self._db,
                                                      [identifier.type],
                                                      gutenberg).all()
    def test_collection_token(self):
        # Make sure we can have two tokens from the same data_source with
        # different collections.
        data_source = DataSource.lookup(self._db, DataSource.FEEDBOOKS)
        collection1 = self._collection("test collection 1")
        collection2 = self._collection("test collection 2")
        patron = self._patron()
        type = "super secret"

        # Create our credentials
        credential1 = Credential.lookup(self._db,
                                        data_source,
                                        type,
                                        patron,
                                        None,
                                        collection=collection1)
        credential2 = Credential.lookup(self._db,
                                        data_source,
                                        type,
                                        patron,
                                        None,
                                        collection=collection2)
        credential1.credential = "test1"
        credential2.credential = "test2"

        # Make sure the text matches what we expect
        assert ("test1" == Credential.lookup(
            self._db, data_source, type, patron, None,
            collection=collection1).credential)
        assert ("test2" == Credential.lookup(
            self._db, data_source, type, patron, None,
            collection=collection2).credential)

        # Make sure we don't get anything if we don't pass a collection
        assert (None == Credential.lookup(self._db, data_source, type, patron,
                                          None).credential)