def make_schema_collection_label( bundle_db: BundleDB, info: Citation_Information, collection_lidvid: str, bundle_lidvid: str, verify: bool, mod_date: str, ) -> bytes: """ Create the label text for the schema collection having this LIDVID using the bundle database. If verify is True, verify the label against its XML and Schematron schemas. Raise an exception if either fails. """ # TODO this is sloppy; is there a better way? products = bundle_db.get_schema_products() record_count = len(products) if record_count <= 0: raise ValueError(f"{collection_lidvid} has no schema products.") collection_lid = lidvid_to_lid(collection_lidvid) collection_vid = lidvid_to_vid(collection_lidvid) collection: Collection = bundle_db.get_collection(collection_lidvid) proposal_id = bundle_db.get_bundle(bundle_lidvid).proposal_id instruments = ",".join(bundle_db.get_instruments_of_the_bundle()).upper() title: NodeBuilder = make_schema_collection_title( { "instrument": instruments, "proposal_id": str(proposal_id), } ) inventory_name = get_collection_inventory_name(bundle_db, collection_lidvid) try: label = ( make_label( { "collection_lid": collection_lid, "collection_vid": collection_vid, "record_count": record_count, "title": title, "mod_date": mod_date, "proposal_id": str(proposal_id), "Citation_Information": make_citation_information(info), "inventory_name": inventory_name, "Context_Area": combine_nodes_into_fragment([]), "Reference_List": combine_nodes_into_fragment([]), "collection_type": "Schema", } ) .toxml() .encode() ) except Exception as e: raise LabelError(collection_lidvid) from e return pretty_and_verify(label, verify)
def make_document_product_label( bundle_db: BundleDB, info: Citation_Information, document_product_lidvid: str, bundle_lidvid: str, verify: bool, publication_date: Optional[str] = None, ) -> bytes: """ Create the label text for the document product in the bundle having this :class:`~pdart.pds4.lidvid` using the database connection. If verify is True, verify the label against its XML and Schematron schemas. Raise an exception if either fails. """ bundle = bundle_db.get_bundle(bundle_lidvid) proposal_id = bundle.proposal_id investigation_lidvid = ( f"urn:nasa:pds:context:investigation:individual.hst_{proposal_id:05}::1.0" ) title = f"Summary of the observation plan for HST proposal {proposal_id}" product_lid = lidvid_to_lid(document_product_lidvid) product_vid = lidvid_to_vid(document_product_lidvid) publication_date = publication_date or date.today().isoformat() product_files: List[File] = bundle_db.get_product_files(document_product_lidvid) document_file_basenames = [file.basename for file in product_files] try: label = ( make_label( { "investigation_lidvid": investigation_lidvid, "product_lid": product_lid, "product_vid": product_vid, "title": title, "publication_date": publication_date, "Citation_Information": make_doc_citation_information(info), "Document_Edition": make_document_edition( "0.0", document_file_basenames ), } ) .toxml() .encode() ) except Exception as e: raise LabelError(document_product_lidvid) from e return pretty_and_verify(label, verify)
def make_browse_product_label( bundle_db: BundleDB, browse_collection_lidvid: str, browse_product_lidvid: str, browse_file_basename: str, bundle_lidvid: str, verify: bool, ) -> bytes: """ Create the label text for the browse product having the given LIDVID using the bundle database. If verify is True, verify the label against its XML and Schematron schemas. Raise an exception if either fails. """ product: Product = bundle_db.get_product(browse_product_lidvid) if not isinstance(product, BrowseProduct): raise TypeError(f"{product} is not a BrowseProduct.") browse_product: BrowseProduct = product fits_product_lidvid = browse_product.fits_product_lidvid file: File = bundle_db.get_file(browse_file_basename, browse_product_lidvid) if not isinstance(file, BrowseFile): raise TypeError(f"{file} is not a BrowseFile.") browse_file: BrowseFile = file collection: Collection = bundle_db.get_collection(browse_collection_lidvid) if not isinstance(collection, OtherCollection): raise TypeError(f"{collection} is not a OtherCollection.") browse_collection: OtherCollection = collection bundle = bundle_db.get_bundle(bundle_lidvid) try: label = (make_label({ "proposal_id": bundle.proposal_id, "suffix": browse_collection.suffix, "browse_lid": lidvid_to_lid(browse_product_lidvid), "browse_vid": lidvid_to_vid(browse_product_lidvid), "data_lidvid": fits_product_lidvid, "browse_file_name": browse_file_basename, "object_length": browse_file.byte_size, }).toxml().encode()) except Exception as e: raise LabelError(browse_product_lidvid, browse_file_basename) from e return pretty_and_verify(label, verify)
def make_bundle_label( bundle_db: BundleDB, bundle_lidvid: str, info: Citation_Information, verify: bool, use_mod_date_for_testing: bool = False, ) -> bytes: """ Create the label text for the bundle in the bundle database using the database connection. If verify is True, verify the label against its XML and Schematron schemas. Raise an exception if either fails. """ bundle = bundle_db.get_bundle(bundle_lidvid) proposal_id = bundle.proposal_id def get_ref_type(collection: Collection) -> str: ref_type = switch_on_collection_subtype( collection, "bundle_has_context_collection", "bundle_has_document_collection", "bundle_has_schema_collection", "bundle_has_other_collection", ) if ref_type == "bundle_has_other_collection": collection_type = cast(OtherCollection, collection).prefix ref_type = f"bundle_has_{collection_type}_collection" return ref_type reduced_collections = [ make_bundle_entry_member({ "collection_lidvid": collection.lidvid, "ref_type": get_ref_type(collection), }) for collection in bundle_db.get_bundle_collections(bundle.lidvid) ] # Get the bundle title from part of CitationInformation description title = (info.title + ", HST Cycle " + str(info.cycle) + " Program " + str(info.propno) + ", " + info.publication_year + ".") # Get the list of target identifications nodes for the collection target_identifications = bundle_db.get_all_target_identification() target_identification_nodes: List[NodeBuilder] = [] target_identification_nodes = create_target_identification_nodes( bundle_db, target_identifications, "bundle") # Get the investigation node for the collection investigation_area_name = mk_Investigation_Area_name(proposal_id) investigation_area_lidvid = mk_Investigation_Area_lidvid(proposal_id) investigation_area_node = investigation_area(investigation_area_name, investigation_area_lidvid, "bundle") # Get min start_time and max stop_time start_time, stop_time = bundle_db.get_roll_up_time_from_db() # Make sure start/stop time exists in db. if start_time is None: raise ValueError("Start time is not stored in FitsProduct table.") if stop_time is None: raise ValueError("Stop time is not stored in FitsProduct table.") start_stop_times = { "start_date_time": start_time, "stop_date_time": stop_time, } time_coordinates_node = get_time_coordinates(start_stop_times) # Dictionary used for primary result summary primary_result_dict: Dict[str, Any] = {} # Put dummy value in processing level, wait for update. primary_result_dict["processing_level"] = "Raw" instruments_list = bundle_db.get_instruments_of_the_bundle() instruments = ", ".join(instruments_list).upper() p_title = (f"{instruments} observations obtained by the HST " + f"Observing Program {proposal_id}.") primary_result_dict["description"] = p_title # Get unique wavelength names for roll-up in bundle wavelength_range = bundle_db.get_wavelength_range_from_db() primary_result_dict["wavelength_range"] = wavelength_range primary_result_summary_node = primary_result_summary(primary_result_dict) # Get the observing system node for the bundle observing_system_nodes: List[NodeBuilder] = [ observing_system(instrument) for instrument in instruments_list ] context_node: List[NodeBuilder] = [] context_node = [ make_bundle_context_node( time_coordinates_node, primary_result_summary_node, investigation_area_node, observing_system_nodes, target_identification_nodes, ) ] if not use_mod_date_for_testing: # Get the date when the label is created mod_date = get_current_date() else: mod_date = MOD_DATE_FOR_TESTESING try: label = (make_label({ "bundle_lid": lidvid_to_lid(bundle.lidvid), "bundle_vid": lidvid_to_vid(bundle.lidvid), "proposal_id": str(proposal_id), "title": title, "Citation_Information": make_citation_information(info, is_for_bundle=True), "mod_date": mod_date, "Bundle_Member_Entries": combine_nodes_into_fragment(reduced_collections), "Context_Area": combine_nodes_into_fragment(context_node), "Reference_List": make_document_reference_list(instruments_list, "bundle"), }).toxml().encode()) except Exception as e: raise LabelError(bundle.lidvid) from e if label[:6] != b"<?xml ": raise ValueError("Bundle label is not XML.") return pretty_and_verify(label, verify)
def make_investigation_label( bundle_db: BundleDB, bundle_lidvid: str, info: Citation_Information, verify: bool, use_mod_date_for_testing: bool = False, ) -> bytes: """ Create the label text for the context investigation having this LIDVID using the bundle database. If verify is True, verify the label against its XML and Schematron schemas. Raise an exception if either fails. """ bundle = bundle_db.get_bundle(bundle_lidvid) proposal_id = bundle.proposal_id # Get the bundle title from part of CitationInformation description title = ( info.title + ", HST Cycle " + str(info.cycle) + " Program " + str(info.propno) + ", " + info.publication_year + "." ) investigation_lid = mk_Investigation_Area_lid(proposal_id) investigation_lidvid = mk_Investigation_Area_lidvid(proposal_id) # Get min start_time and max stop_time start_time, stop_time = bundle_db.get_roll_up_time_from_db() # Make sure start/stop time exists in db. if start_time is None: raise ValueError("Start time is not stored in FitsProduct table.") if stop_time is None: raise ValueError("Stop time is not stored in FitsProduct table.") start_date = date_time_to_date(start_time) stop_date = date_time_to_date(stop_time) # internal_reference_nodes: List[NodeBuilder] = [make_alias(alias) for alias in alias_list] context_products = bundle_db.get_reference_context_products(investigation_lidvid) internal_reference_nodes: List[NodeBuilder] = [] for product in context_products: ref_lid = lidvid_to_lid(product.lidvid) ref_type = f"investigation_to_{product.ref_type}" ref_node = make_internal_ref(ref_lid, ref_type) internal_reference_nodes.append(ref_node) description = info.abstract_formatted(indent=8) # type: ignore if len(description) != 0: description = "\n".join(description) else: description = " " * 8 + "None" description_nodes: List[NodeBuilder] = [make_description(description)] if not use_mod_date_for_testing: # Get the date when the label is created mod_date = get_current_date() else: mod_date = MOD_DATE_FOR_TESTESING try: label = ( make_label( { "investigation_lid": investigation_lid, "bundle_vid": lidvid_to_vid(bundle.lidvid), "title": title, "mod_date": mod_date, "start_date": start_date, "stop_date": stop_date, "internal_reference": combine_nodes_into_fragment( internal_reference_nodes ), "description": combine_nodes_into_fragment(description_nodes), } ) .toxml() .encode() ) except Exception as e: raise LabelError(investigation_lid) from e return pretty_and_verify(label, verify)
def make_other_collection_label( bundle_db: BundleDB, info: Citation_Information, collection_lidvid: str, bundle_lidvid: str, verify: bool, mod_date: str, ) -> bytes: """ Create the label text for the document, browse, and data collection having this LIDVID using the bundle database. If verify is True, verify the label against its XML and Schematron schemas. Raise an exception if either fails. """ # TODO this is sloppy; is there a better way? products = bundle_db.get_collection_products(collection_lidvid) record_count = len(products) if record_count <= 0: raise ValueError(f"{collection_lidvid} has no products.") collection_lid = lidvid_to_lid(collection_lidvid) collection_vid = lidvid_to_vid(collection_lidvid) collection: Collection = bundle_db.get_collection(collection_lidvid) proposal_id = bundle_db.get_bundle(bundle_lidvid).proposal_id instruments = ",".join(bundle_db.get_instruments_of_the_bundle()).upper() def make_ctxt_coll_title(_coll: Collection) -> NodeBuilder: return make_context_collection_title( { "instrument": instruments, "proposal_id": str(proposal_id), } ) def make_doc_coll_title(_coll: Collection) -> NodeBuilder: return make_document_collection_title( { "instrument": instruments, "proposal_id": str(proposal_id), } ) def make_sch_coll_title(_coll: Collection) -> NodeBuilder: return make_schema_collection_title( { "instrument": instruments, "proposal_id": str(proposal_id), } ) def make_other_coll_title(coll: Collection) -> NodeBuilder: other_collection = cast(OtherCollection, coll) if other_collection.prefix == "browse": collection_title = ( f"{other_collection.prefix.capitalize()} " + f"collection of {other_collection.instrument.upper()} " + f"observations obtained from HST Observing Program {proposal_id}." ) else: # Get the data/misc collection title from db. collection_title = str(other_collection.title) return make_other_collection_title({"collection_title": collection_title}) title: NodeBuilder = switch_on_collection_subtype( collection, make_ctxt_coll_title, make_doc_coll_title, make_sch_coll_title, make_other_coll_title, )(collection) inventory_name = get_collection_inventory_name(bundle_db, collection_lidvid) # Properly assign collection type for Document, Browse, or Data collection. # Context node only exists in Data collection label. # Reference_List only exists in Data collection label. context_node: List[NodeBuilder] = [] reference_list_node: List[NodeBuilder] = [] collection_type: str = "" type_name = type(collection).__name__ if type_name == "DocumentCollection": collection_type = "Document" # For document collection, we need to add all handbooks in the csv but # we won't create the label for it. inst_list = bundle_db.get_instruments_of_the_bundle() record_count += 2 * len(inst_list) elif type_name == "OtherCollection": collection_type = cast(OtherCollection, collection).prefix.capitalize() suffix = cast(OtherCollection, collection).suffix instrument = cast(OtherCollection, collection).instrument # Roll-up (Context node) only exists in data collection if collection_type == "Data": # Get min start_time and max stop_time start_time, stop_time = bundle_db.get_roll_up_time_from_db(suffix) # Make sure start/stop time exists in db. if start_time is None: raise ValueError("Start time is not stored in FitsProduct table.") if stop_time is None: raise ValueError("Stop time is not stored in FitsProduct table.") start_stop_times = { "start_date_time": start_time, "stop_date_time": stop_time, } time_coordinates_node = get_time_coordinates(start_stop_times) # Dictionary used for primary result summary primary_result_dict: Dict[str, Any] = {} # Check if it's raw or calibrated image, we will update this later processing_level = get_processing_level( suffix=suffix, instrument_id=instrument.upper() ) primary_result_dict["processing_level"] = processing_level p_title = bundle_db.get_fits_product_collection_title(collection_lidvid) primary_result_dict["description"] = p_title # Get unique wavelength names for roll-up in data collection wavelength_range = bundle_db.get_wavelength_range_from_db(suffix) primary_result_dict["wavelength_range"] = wavelength_range primary_result_summary_node = primary_result_summary(primary_result_dict) # Get the list of target identifications nodes for the collection target_identifications = bundle_db.get_all_target_identification() target_identification_nodes: List[NodeBuilder] = [] target_identification_nodes = create_target_identification_nodes( bundle_db, target_identifications, "collection" ) # Get the investigation node for the collection investigation_area_name = mk_Investigation_Area_name(proposal_id) investigation_area_lidvid = mk_Investigation_Area_lidvid(proposal_id) investigation_area_node = investigation_area( investigation_area_name, investigation_area_lidvid, "collection" ) # Get the observing system node for the collection observing_system_node = observing_system(instrument) context_node = [ make_collection_context_node( time_coordinates_node, primary_result_summary_node, investigation_area_node, observing_system_node, target_identification_nodes, ) ] # document reference list only exists in data collection reference_list_node = [ make_document_reference_list([instrument], "collection") ] try: label = ( make_label( { "collection_lid": collection_lid, "collection_vid": collection_vid, "record_count": record_count, "title": title, "mod_date": mod_date, "proposal_id": str(proposal_id), "Citation_Information": make_citation_information(info), "inventory_name": inventory_name, "Context_Area": combine_nodes_into_fragment(context_node), "collection_type": collection_type, "Reference_List": combine_nodes_into_fragment(reference_list_node), } ) .toxml() .encode() ) except Exception as e: raise LabelError(collection_lidvid) from e return pretty_and_verify(label, verify)
def make_fits_product_label( working_dir: str, bundle_db: BundleDB, collection_lidvid: str, product_lidvid: str, bundle_lidvid: str, file_basename: str, verify: bool, use_mod_date_for_testing: bool = False, ) -> bytes: try: product = bundle_db.get_product(product_lidvid) collection = bundle_db.get_collection(collection_lidvid) if not isinstance(collection, OtherCollection): raise TypeError(f"{collection} is not OtherCollection.") instrument = collection.instrument suffix = collection.suffix # If a label is created for testing purpose to compare with pre-made XML # we will use MOD_DATE_FOR_TESTESING as the modification date. if not use_mod_date_for_testing: # Get the date when the label is created mod_date = get_current_date() else: mod_date = MOD_DATE_FOR_TESTESING card_dicts = bundle_db.get_card_dictionaries(product_lidvid, file_basename) lookup = DictLookup(file_basename, card_dicts) siblings = _directory_siblings(working_dir, bundle_db, product_lidvid) hdu_lookups = _find_RAWish_lookups(bundle_db, product_lidvid, file_basename, siblings) shm_lookup = _find_SHMish_lookup(bundle_db, product_lidvid, file_basename, siblings) start_date_time, stop_date_time = get_start_stop_date_times( hdu_lookups, shm_lookup) exposure_duration = get_exposure_duration(hdu_lookups, shm_lookup) start_stop_times = { "start_date_time": start_date_time, "stop_date_time": stop_date_time, "exposure_duration": exposure_duration, } # Store start/stop time for each fits_product in fits_products table. # The min/max will be pulled out for roll-up in data collection/bundle. bundle_db.update_fits_product_time(product_lidvid, start_date_time, stop_date_time) hst_parameters = get_hst_parameters(hdu_lookups, shm_lookup) bundle = bundle_db.get_bundle(bundle_lidvid) proposal_id = bundle.proposal_id investigation_area_name = mk_Investigation_Area_name(proposal_id) investigation_area_lidvid = mk_Investigation_Area_lidvid(proposal_id) bundle_db.create_context_product(investigation_area_lidvid, "investigation") bundle_db.create_context_product(instrument_host_lidvid(), "instrument_host") bundle_db.create_context_product(observing_system_lidvid(instrument), "instrument") # Fetch target identifications from db target_id = shm_lookup["TARG_ID"] target_identifications = bundle_db.get_target_identifications_based_on_id( target_id) # At this stage, target identifications should be in the db if len(target_identifications) == 0: raise ValueError("Target identification is not stored in db.") target_identification_nodes: List[NodeBuilder] = [] target_identification_nodes = create_target_identification_nodes( bundle_db, target_identifications, "data") # Get wavelength instrument_id = get_instrument_id(hdu_lookups, shm_lookup) detector_ids = get_detector_ids(hdu_lookups, shm_lookup) filter_name = get_filter_name(hdu_lookups, shm_lookup) wavelength_range = wavelength_ranges(instrument_id, detector_ids, filter_name) bundle_db.update_wavelength_range(product_lidvid, wavelength_range) # Get title channel_id = get_channel_id(hdu_lookups, shm_lookup) try: titles = get_titles_format(instrument_id, channel_id, suffix) product_title = titles[0] + "." product_title = product_title.format(I=instrument_id + "/" + channel_id, F=file_basename, P=proposal_id) collection_title = titles[1] + "." collection_title = collection_title.format(I=instrument_id + "/" + channel_id, F=file_basename, P=proposal_id) # save data/misc collection title to OtherCollection table bundle_db.update_fits_product_collection_title( collection_lidvid, collection_title) except KeyError: # If product_title doesn't exist in SUFFIX_TITLES, we use the # following text as the product_title. product_title = ( f"{instrument_id} data file {file_basename} " + f"obtained by the HST Observing Program {proposal_id}.") # Dictionary used for primary result summary processing_level = get_processing_level(suffix, instrument_id, channel_id) primary_result_dict: Dict[str, Any] = {} primary_result_dict["processing_level"] = processing_level primary_result_dict["description"] = product_title primary_result_dict["wavelength_range"] = wavelength_range # Dictionary passed into templates. Use the same data dictionary for # either data label template or misc label template data_dict = { "lid": lidvid_to_lid(product_lidvid), "vid": lidvid_to_vid(product_lidvid), "title": product_title, "mod_date": mod_date, "file_name": file_basename, "file_contents": get_file_contents(bundle_db, card_dicts, instrument, product_lidvid), "Investigation_Area": investigation_area(investigation_area_name, investigation_area_lidvid, "data"), "Observing_System": observing_system(instrument), "Time_Coordinates": get_time_coordinates(start_stop_times), "Target_Identification": combine_nodes_into_fragment(target_identification_nodes), "HST": hst_parameters, "Primary_Result_Summary": primary_result_summary(primary_result_dict), "Reference_List": make_document_reference_list([instrument], "data"), } # Pass the data_dict to either data label or misc label based on # collection_type collection_type = get_collection_type(suffix, instrument_id, channel_id) if collection_type == "data": label = make_data_label(data_dict).toxml().encode() elif collection_type == "miscellaneous": label = make_misc_label(data_dict).toxml().encode() except AssertionError: raise AssertionError( f"{product_lidvid} has no target identifications stored in DB.") except Exception as e: print(str(e)) raise LabelError(product_lidvid, file_basename, (lookup, hdu_lookups[0], shm_lookup)) from e return pretty_and_verify(label, verify)
def make_context_target_label( bundle_db: BundleDB, target: str, verify: bool, use_mod_date_for_testing: bool = False, ) -> bytes: """ Create the label text for the context target having this LIDVID using the bundle database. If verify is True, verify the label against its XML and Schematron schemas. Raise an exception if either fails. """ target_lid = f"urn:nasa:pds:context:target:{target}" target_lidvid = f"{target_lid}::1.0" target_identification = bundle_db.get_target_identification_based_on_lid( target_lid) bundle_db.create_context_product( get_target_lidvid( [target_identification.type, target_identification.name]), "target", ) alias = str(target_identification.alternate_designations) if len(alias) != 0: alias_list = alias.split("\n") alias_nodes: List[NodeBuilder] = [ make_alias(alias) for alias in alias_list ] target_description = str(target_identification.description) if len(target_description) != 0: # properly align multi line textnodes with 8 spaces target_description = " " * 8 + target_description target_description = target_description.replace("\n", "\n" + " " * 8) else: target_description = " " * 8 + "None" description_nodes: List[NodeBuilder] = [ make_description(target_description) ] if not use_mod_date_for_testing: # Get the date when the label is created mod_date = get_current_date() else: mod_date = MOD_DATE_FOR_TESTESING try: label = (make_label({ "target_lid": target_lid, "target_vid": "1.0", "title": target_identification.name, "alias": combine_nodes_into_fragment(alias_nodes), "name": target_identification.name, "type": target_identification.type, "description": combine_nodes_into_fragment(description_nodes), "mod_date": mod_date, }).toxml().encode()) except Exception as e: raise LabelError(target_lidvid) from e return pretty_and_verify(label, verify)