def test_loading_paths_traversal(app, db, es_clear, subjects_service): dir_ = Path(__file__).parent fixtures = PrioritizedVocabulariesFixtures(system_identity, dir_ / "app_data", dir_ / "data", "vocabularies.yaml", delay=False) fixtures.load() # app_data/vocabularies/resource_types.yaml only has image resource types with pytest.raises(PIDDoesNotExistError): vocabulary_service.read( ('resourcetypes', 'publication-annotationcollection'), system_identity) # languages are found item = vocabulary_service.read(('languages', 'aae'), system_identity) assert item.id == "aae" # Only subjects from app_data/ are loaded item = subjects_service.read("https://id.nlm.nih.gov/mesh/D000001", system_identity) assert item.id == "https://id.nlm.nih.gov/mesh/D000001" # - subjects in extension but from already loaded scheme are not loaded with pytest.raises(PIDDoesNotExistError): subjects_service.read("https://id.nlm.nih.gov/mesh/D000015", system_identity) # - subjects in extension from not already loaded scheme are loaded item = subjects_service.read( "https://id.loc.gov/authorities/subjects/sh85118623", system_identity) assert item.id == "https://id.loc.gov/authorities/subjects/sh85118623"
def get_programme_activity_from_record( identity: Identity, record: Record, programme_vocabulary: str = "geowptypes") -> Union[None, Dict]: """Retrieve the GEO Work Programme activity metadata associated with the record. Args: identity (flask_principal.Identity): User identity record (invenio_records.Record): Record API Object from where the GEO Work Programme activity must be extracted. programme_vocabulary (str): Vocabulary used to search the programme metadata. Returns: Union[None, Dict]: None or the GEO Work Programme metadata (as dict). """ result = None # extracting the geo work programme activity metadata activity_id = py_.get(record, "metadata.geo_work_programme_activity.id", None) if activity_id: result = vocabulary_service.read( identity, (programme_vocabulary, activity_id)).to_dict() return result
def test_loading_paths_traversal(app, db): dir_ = Path(__file__).parent fixtures = PrioritizedVocabulariesFixtures(system_identity, dir_ / "app_data", dir_ / "data", "vocabularies.yaml", delay=False) fixtures.load() # app_data/vocabularies/resource_types.yaml only has image resource types with pytest.raises(PIDDoesNotExistError): vocabulary_service.read( ('resourcetypes', 'publication-annotationcollection'), system_identity) # languages are found item = vocabulary_service.read(('languages', 'aae'), system_identity) assert item.id == "aae" # Only subjects A from app_data/ are loaded item = vocabulary_service.read(('subjects', 'A-D000008'), system_identity) assert item.id == "A-D000008" with pytest.raises(PIDDoesNotExistError): vocabulary_service.read(('subjects', 'A-D000015'), system_identity) # subjects B from an extension are loaded item = vocabulary_service.read(('subjects', 'B-D000008'), system_identity) assert item.id == "B-D000008"
def test_load_languages(app, db, es_clear): id_ = 'languages' languages = GenericVocabularyEntry( Path(__file__).parent / "data", id_, { "pid-type": "lng", "data-file": "vocabularies/languages.yaml" }) languages.load(system_identity, delay=False) item = vocabulary_service.read((id_, 'aae'), system_identity) assert item.id == "aae"
def test_loading_paths_traversal(app, db): dir_ = Path(__file__).parent vocabularies = VocabulariesFixture( system_identity, [dir_ / "app_data", dir_ / "data"], "vocabularies.yaml" ) vocabularies.load() # app_data/vocabularies/resource_types.yaml only has image resource types with pytest.raises(PIDDoesNotExistError): vocabulary_service.read( ('resource_types', 'publication-annotationcollection'), system_identity ) # languages are found item = vocabulary_service.read( ('languages', 'aae'), system_identity) item_dict = item.to_dict() assert item_dict["id"] == "aae"
def test_load_languages(app, db, vocabularies): id_ = 'languages' filepath = Path(__file__).parent / "data/vocabularies/languages.yaml" vocabularies.create_vocabulary_type( id_, { "pid-type": "lng", "data-file": filepath }, ) vocabularies.load_datafile(id_, filepath, delay=False) item = vocabulary_service.read((id_, 'aae'), system_identity) assert item.id == "aae"
def test_load_languages(app, db, vocabularies): vocabularies.load_vocabulary( 'languages', { "pid-type": "lng", "data-file": ( Path(__file__).parent / "data/vocabularies/languages.yaml" ) }, delay=False ) item = vocabulary_service.read( ('languages', 'aae'), system_identity) item_dict = item.to_dict() assert item_dict["id"] == "aae"
def test_load_resource_types(app, db, es_clear): id_ = 'resourcetypes' resource_types = GenericVocabularyEntry( Path(__file__).parent / "data", id_, { "pid-type": "rsrct", "data-file": "vocabularies/resource_types.yaml" }, ) resource_types.load(system_identity, delay=False) item = vocabulary_service.read((id_, 'publication-annotationcollection'), system_identity) item_dict = item.to_dict() assert item_dict["id"] == "publication-annotationcollection" assert item_dict["props"]["datacite_general"] == "Collection"
def test_load_resource_types(app, db, vocabularies): id_ = 'resourcetypes' filepath = Path(__file__).parent / "data/vocabularies/resource_types.yaml" vocabularies.create_vocabulary_type( id_, { "pid-type": "rsrct", "data-file": filepath }, ) vocabularies.load_datafile(id_, filepath, delay=False) item = vocabulary_service.read((id_, 'publication-annotationcollection'), system_identity) item_dict = item.to_dict() assert item_dict["id"] == "publication-annotationcollection" assert item_dict["props"]["datacite_general"] == "Collection"
def test_load_resource_types(app, db, vocabularies): vocabularies.load_vocabulary( 'resource_types', { "pid-type": "rsrct", "data-file": ( Path(__file__).parent / "data/vocabularies/resource_types.yaml" ) }, delay=False ) item = vocabulary_service.read( ('resource_types', 'publication-annotationcollection'), system_identity ) item_dict = item.to_dict() assert item_dict["id"] == "publication-annotationcollection" assert item_dict["props"]["datacite_general"] == "Collection"
def update_license(license_dict): """Update the stored vocabulary with the new icon.""" try: echo(f"Updating license: {license_dict['id']}... ", nl=False) license_ = vocabulary_svc.read( system_identity, ("licenses", license_dict["id"]))._obj # NOTE: we don't use the service update method here because we # want to evade validation errors, and the migration guide tells # the users to completely rebuild the search indices anyway # and we pop the '$schema' because it might be outdated and is # a constant field anyway license_["icon"] = license_dict["icon"] license_.pop("$schema", None) license_.commit() secho("OK", fg="green") except Exception as e: secho("Error", fg="red") return f"Error updating license '{license_dict['id']}': {e}" return None
def _read_resource_type(self, id_): """Retrieve resource type record using service.""" return vocabulary_service.read(('resource_types', id_), system_identity)._record
def _read_resource_type(self, id_): """Retrieve resource type record using service.""" rec = vocabulary_service.read(system_identity, ("resourcetypes", id_)) return rec._record