def get_collection_inventory_name(bundle_db: BundleDB, collection_lidvid: str) -> str: # We have to jump through some hoops to apply # switch_on_collection_type(). def get_context_collection_inventory_name(collection: Collection) -> str: return "collection_context.csv" def get_document_collection_inventory_name(collection: Collection) -> str: return "collection.csv" def get_schema_collection_inventory_name(collection: Collection) -> str: return "collection_schema.csv" def get_other_collection_inventory_name(collection: Collection) -> str: collection_obj = cast(OtherCollection, collection) prefix = collection_obj.prefix instrument = collection_obj.instrument suffix = collection_obj.suffix return f"collection_{prefix}_{instrument}_{suffix}.csv" collection: Collection = bundle_db.get_collection(collection_lidvid) return switch_on_collection_subtype( collection, get_context_collection_inventory_name, get_document_collection_inventory_name, get_schema_collection_inventory_name, get_other_collection_inventory_name, )(collection)
def make_collection_label( bundle_db: BundleDB, info: Citation_Information, collection_lidvid: str, bundle_lidvid: str, verify: bool, use_mod_date_for_testing: bool = False, ) -> bytes: """ Create the label text for the collection having this LIDVID using the bundle database. If verify is True, verify the label against its XML and Schematron schemas. Raise an exception if either fails. """ collection = bundle_db.get_collection(collection_lidvid) # If a label is created for testing purpose to compare with pre-made XML # we will use MOD_DATE_FOR_TESTESING as the modification date. if not use_mod_date_for_testing: # Get the date when the label is created mod_date = get_current_date() else: mod_date = MOD_DATE_FOR_TESTESING return switch_on_collection_subtype( collection, make_context_collection_label, make_other_collection_label, make_schema_collection_label, make_other_collection_label, )(bundle_db, info, collection_lidvid, bundle_lidvid, verify, mod_date)
def make_schema_collection_label( bundle_db: BundleDB, info: Citation_Information, collection_lidvid: str, bundle_lidvid: str, verify: bool, mod_date: str, ) -> bytes: """ Create the label text for the schema collection having this LIDVID using the bundle database. If verify is True, verify the label against its XML and Schematron schemas. Raise an exception if either fails. """ # TODO this is sloppy; is there a better way? products = bundle_db.get_schema_products() record_count = len(products) if record_count <= 0: raise ValueError(f"{collection_lidvid} has no schema products.") collection_lid = lidvid_to_lid(collection_lidvid) collection_vid = lidvid_to_vid(collection_lidvid) collection: Collection = bundle_db.get_collection(collection_lidvid) proposal_id = bundle_db.get_bundle(bundle_lidvid).proposal_id instruments = ",".join(bundle_db.get_instruments_of_the_bundle()).upper() title: NodeBuilder = make_schema_collection_title( { "instrument": instruments, "proposal_id": str(proposal_id), } ) inventory_name = get_collection_inventory_name(bundle_db, collection_lidvid) try: label = ( make_label( { "collection_lid": collection_lid, "collection_vid": collection_vid, "record_count": record_count, "title": title, "mod_date": mod_date, "proposal_id": str(proposal_id), "Citation_Information": make_citation_information(info), "inventory_name": inventory_name, "Context_Area": combine_nodes_into_fragment([]), "Reference_List": combine_nodes_into_fragment([]), "collection_type": "Schema", } ) .toxml() .encode() ) except Exception as e: raise LabelError(collection_lidvid) from e return pretty_and_verify(label, verify)
def make_collection_inventory(bundle_db: BundleDB, collection_lidvid: str) -> bytes: """ Create the inventory text for the collection having this LIDVID using the bundle database. """ collection = bundle_db.get_collection(collection_lidvid) return switch_on_collection_subtype( collection, make_context_collection_inventory, make_document_collection_inventory, make_schema_collection_inventory, make_other_collection_inventory, )(bundle_db, collection_lidvid)
def make_browse_product_label( bundle_db: BundleDB, browse_collection_lidvid: str, browse_product_lidvid: str, browse_file_basename: str, bundle_lidvid: str, verify: bool, ) -> bytes: """ Create the label text for the browse product having the given LIDVID using the bundle database. If verify is True, verify the label against its XML and Schematron schemas. Raise an exception if either fails. """ product: Product = bundle_db.get_product(browse_product_lidvid) if not isinstance(product, BrowseProduct): raise TypeError(f"{product} is not a BrowseProduct.") browse_product: BrowseProduct = product fits_product_lidvid = browse_product.fits_product_lidvid file: File = bundle_db.get_file(browse_file_basename, browse_product_lidvid) if not isinstance(file, BrowseFile): raise TypeError(f"{file} is not a BrowseFile.") browse_file: BrowseFile = file collection: Collection = bundle_db.get_collection(browse_collection_lidvid) if not isinstance(collection, OtherCollection): raise TypeError(f"{collection} is not a OtherCollection.") browse_collection: OtherCollection = collection bundle = bundle_db.get_bundle(bundle_lidvid) try: label = (make_label({ "proposal_id": bundle.proposal_id, "suffix": browse_collection.suffix, "browse_lid": lidvid_to_lid(browse_product_lidvid), "browse_vid": lidvid_to_vid(browse_product_lidvid), "data_lidvid": fits_product_lidvid, "browse_file_name": browse_file_basename, "object_length": browse_file.byte_size, }).toxml().encode()) except Exception as e: raise LabelError(browse_product_lidvid, browse_file_basename) from e return pretty_and_verify(label, verify)
def make_other_collection_label( bundle_db: BundleDB, info: Citation_Information, collection_lidvid: str, bundle_lidvid: str, verify: bool, mod_date: str, ) -> bytes: """ Create the label text for the document, browse, and data collection having this LIDVID using the bundle database. If verify is True, verify the label against its XML and Schematron schemas. Raise an exception if either fails. """ # TODO this is sloppy; is there a better way? products = bundle_db.get_collection_products(collection_lidvid) record_count = len(products) if record_count <= 0: raise ValueError(f"{collection_lidvid} has no products.") collection_lid = lidvid_to_lid(collection_lidvid) collection_vid = lidvid_to_vid(collection_lidvid) collection: Collection = bundle_db.get_collection(collection_lidvid) proposal_id = bundle_db.get_bundle(bundle_lidvid).proposal_id instruments = ",".join(bundle_db.get_instruments_of_the_bundle()).upper() def make_ctxt_coll_title(_coll: Collection) -> NodeBuilder: return make_context_collection_title( { "instrument": instruments, "proposal_id": str(proposal_id), } ) def make_doc_coll_title(_coll: Collection) -> NodeBuilder: return make_document_collection_title( { "instrument": instruments, "proposal_id": str(proposal_id), } ) def make_sch_coll_title(_coll: Collection) -> NodeBuilder: return make_schema_collection_title( { "instrument": instruments, "proposal_id": str(proposal_id), } ) def make_other_coll_title(coll: Collection) -> NodeBuilder: other_collection = cast(OtherCollection, coll) if other_collection.prefix == "browse": collection_title = ( f"{other_collection.prefix.capitalize()} " + f"collection of {other_collection.instrument.upper()} " + f"observations obtained from HST Observing Program {proposal_id}." ) else: # Get the data/misc collection title from db. collection_title = str(other_collection.title) return make_other_collection_title({"collection_title": collection_title}) title: NodeBuilder = switch_on_collection_subtype( collection, make_ctxt_coll_title, make_doc_coll_title, make_sch_coll_title, make_other_coll_title, )(collection) inventory_name = get_collection_inventory_name(bundle_db, collection_lidvid) # Properly assign collection type for Document, Browse, or Data collection. # Context node only exists in Data collection label. # Reference_List only exists in Data collection label. context_node: List[NodeBuilder] = [] reference_list_node: List[NodeBuilder] = [] collection_type: str = "" type_name = type(collection).__name__ if type_name == "DocumentCollection": collection_type = "Document" # For document collection, we need to add all handbooks in the csv but # we won't create the label for it. inst_list = bundle_db.get_instruments_of_the_bundle() record_count += 2 * len(inst_list) elif type_name == "OtherCollection": collection_type = cast(OtherCollection, collection).prefix.capitalize() suffix = cast(OtherCollection, collection).suffix instrument = cast(OtherCollection, collection).instrument # Roll-up (Context node) only exists in data collection if collection_type == "Data": # Get min start_time and max stop_time start_time, stop_time = bundle_db.get_roll_up_time_from_db(suffix) # Make sure start/stop time exists in db. if start_time is None: raise ValueError("Start time is not stored in FitsProduct table.") if stop_time is None: raise ValueError("Stop time is not stored in FitsProduct table.") start_stop_times = { "start_date_time": start_time, "stop_date_time": stop_time, } time_coordinates_node = get_time_coordinates(start_stop_times) # Dictionary used for primary result summary primary_result_dict: Dict[str, Any] = {} # Check if it's raw or calibrated image, we will update this later processing_level = get_processing_level( suffix=suffix, instrument_id=instrument.upper() ) primary_result_dict["processing_level"] = processing_level p_title = bundle_db.get_fits_product_collection_title(collection_lidvid) primary_result_dict["description"] = p_title # Get unique wavelength names for roll-up in data collection wavelength_range = bundle_db.get_wavelength_range_from_db(suffix) primary_result_dict["wavelength_range"] = wavelength_range primary_result_summary_node = primary_result_summary(primary_result_dict) # Get the list of target identifications nodes for the collection target_identifications = bundle_db.get_all_target_identification() target_identification_nodes: List[NodeBuilder] = [] target_identification_nodes = create_target_identification_nodes( bundle_db, target_identifications, "collection" ) # Get the investigation node for the collection investigation_area_name = mk_Investigation_Area_name(proposal_id) investigation_area_lidvid = mk_Investigation_Area_lidvid(proposal_id) investigation_area_node = investigation_area( investigation_area_name, investigation_area_lidvid, "collection" ) # Get the observing system node for the collection observing_system_node = observing_system(instrument) context_node = [ make_collection_context_node( time_coordinates_node, primary_result_summary_node, investigation_area_node, observing_system_node, target_identification_nodes, ) ] # document reference list only exists in data collection reference_list_node = [ make_document_reference_list([instrument], "collection") ] try: label = ( make_label( { "collection_lid": collection_lid, "collection_vid": collection_vid, "record_count": record_count, "title": title, "mod_date": mod_date, "proposal_id": str(proposal_id), "Citation_Information": make_citation_information(info), "inventory_name": inventory_name, "Context_Area": combine_nodes_into_fragment(context_node), "collection_type": collection_type, "Reference_List": combine_nodes_into_fragment(reference_list_node), } ) .toxml() .encode() ) except Exception as e: raise LabelError(collection_lidvid) from e return pretty_and_verify(label, verify)
def make_fits_product_label( working_dir: str, bundle_db: BundleDB, collection_lidvid: str, product_lidvid: str, bundle_lidvid: str, file_basename: str, verify: bool, use_mod_date_for_testing: bool = False, ) -> bytes: try: product = bundle_db.get_product(product_lidvid) collection = bundle_db.get_collection(collection_lidvid) if not isinstance(collection, OtherCollection): raise TypeError(f"{collection} is not OtherCollection.") instrument = collection.instrument suffix = collection.suffix # If a label is created for testing purpose to compare with pre-made XML # we will use MOD_DATE_FOR_TESTESING as the modification date. if not use_mod_date_for_testing: # Get the date when the label is created mod_date = get_current_date() else: mod_date = MOD_DATE_FOR_TESTESING card_dicts = bundle_db.get_card_dictionaries(product_lidvid, file_basename) lookup = DictLookup(file_basename, card_dicts) siblings = _directory_siblings(working_dir, bundle_db, product_lidvid) hdu_lookups = _find_RAWish_lookups(bundle_db, product_lidvid, file_basename, siblings) shm_lookup = _find_SHMish_lookup(bundle_db, product_lidvid, file_basename, siblings) start_date_time, stop_date_time = get_start_stop_date_times( hdu_lookups, shm_lookup) exposure_duration = get_exposure_duration(hdu_lookups, shm_lookup) start_stop_times = { "start_date_time": start_date_time, "stop_date_time": stop_date_time, "exposure_duration": exposure_duration, } # Store start/stop time for each fits_product in fits_products table. # The min/max will be pulled out for roll-up in data collection/bundle. bundle_db.update_fits_product_time(product_lidvid, start_date_time, stop_date_time) hst_parameters = get_hst_parameters(hdu_lookups, shm_lookup) bundle = bundle_db.get_bundle(bundle_lidvid) proposal_id = bundle.proposal_id investigation_area_name = mk_Investigation_Area_name(proposal_id) investigation_area_lidvid = mk_Investigation_Area_lidvid(proposal_id) bundle_db.create_context_product(investigation_area_lidvid, "investigation") bundle_db.create_context_product(instrument_host_lidvid(), "instrument_host") bundle_db.create_context_product(observing_system_lidvid(instrument), "instrument") # Fetch target identifications from db target_id = shm_lookup["TARG_ID"] target_identifications = bundle_db.get_target_identifications_based_on_id( target_id) # At this stage, target identifications should be in the db if len(target_identifications) == 0: raise ValueError("Target identification is not stored in db.") target_identification_nodes: List[NodeBuilder] = [] target_identification_nodes = create_target_identification_nodes( bundle_db, target_identifications, "data") # Get wavelength instrument_id = get_instrument_id(hdu_lookups, shm_lookup) detector_ids = get_detector_ids(hdu_lookups, shm_lookup) filter_name = get_filter_name(hdu_lookups, shm_lookup) wavelength_range = wavelength_ranges(instrument_id, detector_ids, filter_name) bundle_db.update_wavelength_range(product_lidvid, wavelength_range) # Get title channel_id = get_channel_id(hdu_lookups, shm_lookup) try: titles = get_titles_format(instrument_id, channel_id, suffix) product_title = titles[0] + "." product_title = product_title.format(I=instrument_id + "/" + channel_id, F=file_basename, P=proposal_id) collection_title = titles[1] + "." collection_title = collection_title.format(I=instrument_id + "/" + channel_id, F=file_basename, P=proposal_id) # save data/misc collection title to OtherCollection table bundle_db.update_fits_product_collection_title( collection_lidvid, collection_title) except KeyError: # If product_title doesn't exist in SUFFIX_TITLES, we use the # following text as the product_title. product_title = ( f"{instrument_id} data file {file_basename} " + f"obtained by the HST Observing Program {proposal_id}.") # Dictionary used for primary result summary processing_level = get_processing_level(suffix, instrument_id, channel_id) primary_result_dict: Dict[str, Any] = {} primary_result_dict["processing_level"] = processing_level primary_result_dict["description"] = product_title primary_result_dict["wavelength_range"] = wavelength_range # Dictionary passed into templates. Use the same data dictionary for # either data label template or misc label template data_dict = { "lid": lidvid_to_lid(product_lidvid), "vid": lidvid_to_vid(product_lidvid), "title": product_title, "mod_date": mod_date, "file_name": file_basename, "file_contents": get_file_contents(bundle_db, card_dicts, instrument, product_lidvid), "Investigation_Area": investigation_area(investigation_area_name, investigation_area_lidvid, "data"), "Observing_System": observing_system(instrument), "Time_Coordinates": get_time_coordinates(start_stop_times), "Target_Identification": combine_nodes_into_fragment(target_identification_nodes), "HST": hst_parameters, "Primary_Result_Summary": primary_result_summary(primary_result_dict), "Reference_List": make_document_reference_list([instrument], "data"), } # Pass the data_dict to either data label or misc label based on # collection_type collection_type = get_collection_type(suffix, instrument_id, channel_id) if collection_type == "data": label = make_data_label(data_dict).toxml().encode() elif collection_type == "miscellaneous": label = make_misc_label(data_dict).toxml().encode() except AssertionError: raise AssertionError( f"{product_lidvid} has no target identifications stored in DB.") except Exception as e: print(str(e)) raise LabelError(product_lidvid, file_basename, (lookup, hdu_lookups[0], shm_lookup)) from e return pretty_and_verify(label, verify)