def make_bundle_context_node( time_coordinates_node: NodeBuilder, primary_result_summary_node: NodeBuilder, investigation_area_node: NodeBuilder, observing_system_nodes: List[NodeBuilder], target_identification_nodes: List[NodeBuilder], ) -> NodeBuilder: func = interpret_template("""<Context_Area> <NODE name="Time_Coordinates" /> <NODE name="Primary_Result_Summary" /> <NODE name="Investigation_Area" /> <FRAGMENT name="Observing_System" /> <FRAGMENT name="Target_Identification" /> </Context_Area>""")({ "Time_Coordinates": time_coordinates_node, "Primary_Result_Summary": primary_result_summary_node, "Investigation_Area": investigation_area_node, "Observing_System": combine_nodes_into_fragment(observing_system_nodes), "Target_Identification": combine_nodes_into_fragment(target_identification_nodes), }) return func
def make_schema_collection_label( bundle_db: BundleDB, info: Citation_Information, collection_lidvid: str, bundle_lidvid: str, verify: bool, mod_date: str, ) -> bytes: """ Create the label text for the schema collection having this LIDVID using the bundle database. If verify is True, verify the label against its XML and Schematron schemas. Raise an exception if either fails. """ # TODO this is sloppy; is there a better way? products = bundle_db.get_schema_products() record_count = len(products) if record_count <= 0: raise ValueError(f"{collection_lidvid} has no schema products.") collection_lid = lidvid_to_lid(collection_lidvid) collection_vid = lidvid_to_vid(collection_lidvid) collection: Collection = bundle_db.get_collection(collection_lidvid) proposal_id = bundle_db.get_bundle(bundle_lidvid).proposal_id instruments = ",".join(bundle_db.get_instruments_of_the_bundle()).upper() title: NodeBuilder = make_schema_collection_title( { "instrument": instruments, "proposal_id": str(proposal_id), } ) inventory_name = get_collection_inventory_name(bundle_db, collection_lidvid) try: label = ( make_label( { "collection_lid": collection_lid, "collection_vid": collection_vid, "record_count": record_count, "title": title, "mod_date": mod_date, "proposal_id": str(proposal_id), "Citation_Information": make_citation_information(info), "inventory_name": inventory_name, "Context_Area": combine_nodes_into_fragment([]), "Reference_List": combine_nodes_into_fragment([]), "collection_type": "Schema", } ) .toxml() .encode() ) except Exception as e: raise LabelError(collection_lidvid) from e return pretty_and_verify(label, verify)
def target_identification( target_name: str, target_type: str, alternate_designations: str, target_description: str, target_lid: str, reference_type: str, ) -> NodeBuilder: """ Given the info of target identifications, return a function that takes a document and returns a filled-out ``<Target_Identification />`` XML node, used in product labels. """ alternate_designations_list = [] if len(alternate_designations) != 0: alternate_designations_list = alternate_designations.split("\n") alternate_designation_nodes: List[NodeBuilder] = [ _make_alternate_designation(alternate_designation) for alternate_designation in alternate_designations_list ] description_nodes: List[NodeBuilder] = [] if len(target_description) != 0: # properly align multi line textnodes with 8 spaces target_description = " " * 8 + target_description target_description = target_description.replace("\n", "\n" + " " * 8) description_nodes = [make_description(target_description)] func = interpret_template( """<Target_Identification> <name><NODE name="name"/></name> <FRAGMENT name="alternate_designations"/> <type><NODE name="type"/></type> <FRAGMENT name="description"/> <Internal_Reference> <lid_reference><NODE name="target_lid"/></lid_reference> <reference_type><NODE name="reference_type"/></reference_type> </Internal_Reference> </Target_Identification>""" )( { "name": target_name, "type": target_type, "alternate_designations": combine_nodes_into_fragment( alternate_designation_nodes ), "description": combine_nodes_into_fragment(description_nodes), "target_lid": target_lid, "reference_type": reference_type, } ) return func
def make_document_reference_list( instruments: list, ref: str, ) -> NodeBuilder: """ Given a list of instruments, return a <Reference_List> node in the label """ internal_reference_nodes: List[NodeBuilder] = [] for instrument in instruments: ref_lid = get_hst_data_hand_book_lid(instrument) ref_type = f"{ref}_to_document" comment = get_hst_data_hand_book_comment(instrument) data_handbook_node = _make_doc_internal_ref(ref_lid, ref_type, comment) internal_reference_nodes.append(data_handbook_node) ref_lid = get_hst_inst_hand_book_lid(instrument) comment = get_hst_inst_hand_book_comment(instrument) inst_handbook_node = _make_doc_internal_ref(ref_lid, ref_type, comment) internal_reference_nodes.append(inst_handbook_node) func = reference_list({ "internal_reference": combine_nodes_into_fragment(internal_reference_nodes), }) return func
def _mk_axis_arrays(card_dicts: List[Dict[str, Any]], hdu_index: int, axes: int) -> FragBuilder: def mk_axis_array(i: int) -> NodeBuilder: axis_name = AXIS_NAME_TABLE[i] elements = card_dicts[hdu_index][f"NAXIS{i}"] # TODO Check the semantics of sequence_number sequence_number = str(i) return axis_array({ "axis_name": axis_name, "elements": str(elements), "sequence_number": sequence_number, }) return combine_nodes_into_fragment( [mk_axis_array(i + 1) for i in range(0, axes)])
def primary_result_summary(result_dict: Dict[str, Any]) -> NodeBuilder: """ Given an instrument, return an interpreted fragment template to create an ``<Primary_Result_Summary />`` XML element. """ wavelength_range_list = result_dict["wavelength_range"] wavelength_range_nodes: List[NodeBuilder] = [ _make_wavelength_range(wavelength_range) for wavelength_range in wavelength_range_list ] return _primary_result_summary({ "processing_level": result_dict["processing_level"], "description": result_dict["description"], "wavelength_range": combine_nodes_into_fragment(wavelength_range_nodes), })
def make_document_edition(edition_name: str, file_basenames: List[str]) -> NodeBuilder: nodes: List[NodeBuilder] = [ _make_document_file(file_basename, _get_document_standard_id(file_basename)) for file_basename in file_basenames ] return _make_document_edition({ "edition_name": edition_name, "language": "English", "files": len(file_basenames), "document_files": combine_nodes_into_fragment(nodes), })
def make_bundle_label( bundle_db: BundleDB, bundle_lidvid: str, info: Citation_Information, verify: bool, use_mod_date_for_testing: bool = False, ) -> bytes: """ Create the label text for the bundle in the bundle database using the database connection. If verify is True, verify the label against its XML and Schematron schemas. Raise an exception if either fails. """ bundle = bundle_db.get_bundle(bundle_lidvid) proposal_id = bundle.proposal_id def get_ref_type(collection: Collection) -> str: ref_type = switch_on_collection_subtype( collection, "bundle_has_context_collection", "bundle_has_document_collection", "bundle_has_schema_collection", "bundle_has_other_collection", ) if ref_type == "bundle_has_other_collection": collection_type = cast(OtherCollection, collection).prefix ref_type = f"bundle_has_{collection_type}_collection" return ref_type reduced_collections = [ make_bundle_entry_member({ "collection_lidvid": collection.lidvid, "ref_type": get_ref_type(collection), }) for collection in bundle_db.get_bundle_collections(bundle.lidvid) ] # Get the bundle title from part of CitationInformation description title = (info.title + ", HST Cycle " + str(info.cycle) + " Program " + str(info.propno) + ", " + info.publication_year + ".") # Get the list of target identifications nodes for the collection target_identifications = bundle_db.get_all_target_identification() target_identification_nodes: List[NodeBuilder] = [] target_identification_nodes = create_target_identification_nodes( bundle_db, target_identifications, "bundle") # Get the investigation node for the collection investigation_area_name = mk_Investigation_Area_name(proposal_id) investigation_area_lidvid = mk_Investigation_Area_lidvid(proposal_id) investigation_area_node = investigation_area(investigation_area_name, investigation_area_lidvid, "bundle") # Get min start_time and max stop_time start_time, stop_time = bundle_db.get_roll_up_time_from_db() # Make sure start/stop time exists in db. if start_time is None: raise ValueError("Start time is not stored in FitsProduct table.") if stop_time is None: raise ValueError("Stop time is not stored in FitsProduct table.") start_stop_times = { "start_date_time": start_time, "stop_date_time": stop_time, } time_coordinates_node = get_time_coordinates(start_stop_times) # Dictionary used for primary result summary primary_result_dict: Dict[str, Any] = {} # Put dummy value in processing level, wait for update. primary_result_dict["processing_level"] = "Raw" instruments_list = bundle_db.get_instruments_of_the_bundle() instruments = ", ".join(instruments_list).upper() p_title = (f"{instruments} observations obtained by the HST " + f"Observing Program {proposal_id}.") primary_result_dict["description"] = p_title # Get unique wavelength names for roll-up in bundle wavelength_range = bundle_db.get_wavelength_range_from_db() primary_result_dict["wavelength_range"] = wavelength_range primary_result_summary_node = primary_result_summary(primary_result_dict) # Get the observing system node for the bundle observing_system_nodes: List[NodeBuilder] = [ observing_system(instrument) for instrument in instruments_list ] context_node: List[NodeBuilder] = [] context_node = [ make_bundle_context_node( time_coordinates_node, primary_result_summary_node, investigation_area_node, observing_system_nodes, target_identification_nodes, ) ] if not use_mod_date_for_testing: # Get the date when the label is created mod_date = get_current_date() else: mod_date = MOD_DATE_FOR_TESTESING try: label = (make_label({ "bundle_lid": lidvid_to_lid(bundle.lidvid), "bundle_vid": lidvid_to_vid(bundle.lidvid), "proposal_id": str(proposal_id), "title": title, "Citation_Information": make_citation_information(info, is_for_bundle=True), "mod_date": mod_date, "Bundle_Member_Entries": combine_nodes_into_fragment(reduced_collections), "Context_Area": combine_nodes_into_fragment(context_node), "Reference_List": make_document_reference_list(instruments_list, "bundle"), }).toxml().encode()) except Exception as e: raise LabelError(bundle.lidvid) from e if label[:6] != b"<?xml ": raise ValueError("Bundle label is not XML.") return pretty_and_verify(label, verify)
def make_investigation_label( bundle_db: BundleDB, bundle_lidvid: str, info: Citation_Information, verify: bool, use_mod_date_for_testing: bool = False, ) -> bytes: """ Create the label text for the context investigation having this LIDVID using the bundle database. If verify is True, verify the label against its XML and Schematron schemas. Raise an exception if either fails. """ bundle = bundle_db.get_bundle(bundle_lidvid) proposal_id = bundle.proposal_id # Get the bundle title from part of CitationInformation description title = ( info.title + ", HST Cycle " + str(info.cycle) + " Program " + str(info.propno) + ", " + info.publication_year + "." ) investigation_lid = mk_Investigation_Area_lid(proposal_id) investigation_lidvid = mk_Investigation_Area_lidvid(proposal_id) # Get min start_time and max stop_time start_time, stop_time = bundle_db.get_roll_up_time_from_db() # Make sure start/stop time exists in db. if start_time is None: raise ValueError("Start time is not stored in FitsProduct table.") if stop_time is None: raise ValueError("Stop time is not stored in FitsProduct table.") start_date = date_time_to_date(start_time) stop_date = date_time_to_date(stop_time) # internal_reference_nodes: List[NodeBuilder] = [make_alias(alias) for alias in alias_list] context_products = bundle_db.get_reference_context_products(investigation_lidvid) internal_reference_nodes: List[NodeBuilder] = [] for product in context_products: ref_lid = lidvid_to_lid(product.lidvid) ref_type = f"investigation_to_{product.ref_type}" ref_node = make_internal_ref(ref_lid, ref_type) internal_reference_nodes.append(ref_node) description = info.abstract_formatted(indent=8) # type: ignore if len(description) != 0: description = "\n".join(description) else: description = " " * 8 + "None" description_nodes: List[NodeBuilder] = [make_description(description)] if not use_mod_date_for_testing: # Get the date when the label is created mod_date = get_current_date() else: mod_date = MOD_DATE_FOR_TESTESING try: label = ( make_label( { "investigation_lid": investigation_lid, "bundle_vid": lidvid_to_vid(bundle.lidvid), "title": title, "mod_date": mod_date, "start_date": start_date, "stop_date": stop_date, "internal_reference": combine_nodes_into_fragment( internal_reference_nodes ), "description": combine_nodes_into_fragment(description_nodes), } ) .toxml() .encode() ) except Exception as e: raise LabelError(investigation_lid) from e return pretty_and_verify(label, verify)
def get_hdu_contents(hdu_index: int, hdrLoc: int, datLoc: int, datSpan: int) -> FragBuilder: """ Return an XML fragment containing the needed ``<Header />`` and ``<Array />`` or ``<Array_2D_Image />`` elements for the FITS file's HDUs. """ local_identifier = f"hdu_{hdu_index}" offset = str(hdrLoc) object_length = str(datLoc - hdrLoc) description = "Global FITS Header" if hdu_index == 0 else "FITS Header" header = header_contents({ "local_identifier": local_identifier, "offset": offset, "object_length": object_length, "description": description, }) if datSpan: hdu_card_dict = card_dicts[hdu_index] bitpix = int(hdu_card_dict["BITPIX"]) axes = int(hdu_card_dict["NAXIS"]) data_type = BITPIX_TABLE[bitpix] elmt_arr = element_array({"data_type": data_type}) if axes not in [1, 2, 3]: raise ValueError( f"NAXIS = {axes} in hdu #{hdu_index} in {fits_product_lidvid}." ) if axes == 1: data = data_1d_contents({ "offset": str(datLoc), "Element_Array": elmt_arr, "Axis_Arrays": _mk_axis_arrays(card_dicts, hdu_index, axes), }) node_functions = [header, data] elif axes == 2: data = data_2d_contents({ "offset": str(datLoc), "Element_Array": elmt_arr, "Axis_Arrays": _mk_axis_arrays(card_dicts, hdu_index, axes), }) node_functions = [header, data] elif axes == 3: # "3-D" images from WFPC2 are really four separate 2-D # images. We document them as such. Well, four or # two. Well, four or two or maybe something else... # Aw, we'll say four or two for now. if instrument != "wfpc2": raise ValueError(f"NAXIS=3 and instrument={instrument}.") naxis3 = int(hdu_card_dict["NAXIS3"]) if naxis3 not in [2, 4]: raise ValueError(f"NAXIS1={hdu_card_dict['NAXIS1']}, " + f"NAXIS2={hdu_card_dict['NAXIS2']}, " + f"NAXIS3={hdu_card_dict['NAXIS3']}.") if datSpan % naxis3 != 0: raise ValueError(f"datSpan={datSpan} & naxis3={naxis3}") layerOffset = datSpan // naxis3 node_functions = [header] for n in range(0, naxis3): data = data_2d_contents({ "offset": str(datLoc + n * layerOffset), "Element_Array": elmt_arr, "Axis_Arrays": _mk_axis_arrays(card_dicts, hdu_index, 2), }) node_functions.append(data) else: node_functions = [header] return combine_nodes_into_fragment(node_functions)
def make_other_collection_label( bundle_db: BundleDB, info: Citation_Information, collection_lidvid: str, bundle_lidvid: str, verify: bool, mod_date: str, ) -> bytes: """ Create the label text for the document, browse, and data collection having this LIDVID using the bundle database. If verify is True, verify the label against its XML and Schematron schemas. Raise an exception if either fails. """ # TODO this is sloppy; is there a better way? products = bundle_db.get_collection_products(collection_lidvid) record_count = len(products) if record_count <= 0: raise ValueError(f"{collection_lidvid} has no products.") collection_lid = lidvid_to_lid(collection_lidvid) collection_vid = lidvid_to_vid(collection_lidvid) collection: Collection = bundle_db.get_collection(collection_lidvid) proposal_id = bundle_db.get_bundle(bundle_lidvid).proposal_id instruments = ",".join(bundle_db.get_instruments_of_the_bundle()).upper() def make_ctxt_coll_title(_coll: Collection) -> NodeBuilder: return make_context_collection_title( { "instrument": instruments, "proposal_id": str(proposal_id), } ) def make_doc_coll_title(_coll: Collection) -> NodeBuilder: return make_document_collection_title( { "instrument": instruments, "proposal_id": str(proposal_id), } ) def make_sch_coll_title(_coll: Collection) -> NodeBuilder: return make_schema_collection_title( { "instrument": instruments, "proposal_id": str(proposal_id), } ) def make_other_coll_title(coll: Collection) -> NodeBuilder: other_collection = cast(OtherCollection, coll) if other_collection.prefix == "browse": collection_title = ( f"{other_collection.prefix.capitalize()} " + f"collection of {other_collection.instrument.upper()} " + f"observations obtained from HST Observing Program {proposal_id}." ) else: # Get the data/misc collection title from db. collection_title = str(other_collection.title) return make_other_collection_title({"collection_title": collection_title}) title: NodeBuilder = switch_on_collection_subtype( collection, make_ctxt_coll_title, make_doc_coll_title, make_sch_coll_title, make_other_coll_title, )(collection) inventory_name = get_collection_inventory_name(bundle_db, collection_lidvid) # Properly assign collection type for Document, Browse, or Data collection. # Context node only exists in Data collection label. # Reference_List only exists in Data collection label. context_node: List[NodeBuilder] = [] reference_list_node: List[NodeBuilder] = [] collection_type: str = "" type_name = type(collection).__name__ if type_name == "DocumentCollection": collection_type = "Document" # For document collection, we need to add all handbooks in the csv but # we won't create the label for it. inst_list = bundle_db.get_instruments_of_the_bundle() record_count += 2 * len(inst_list) elif type_name == "OtherCollection": collection_type = cast(OtherCollection, collection).prefix.capitalize() suffix = cast(OtherCollection, collection).suffix instrument = cast(OtherCollection, collection).instrument # Roll-up (Context node) only exists in data collection if collection_type == "Data": # Get min start_time and max stop_time start_time, stop_time = bundle_db.get_roll_up_time_from_db(suffix) # Make sure start/stop time exists in db. if start_time is None: raise ValueError("Start time is not stored in FitsProduct table.") if stop_time is None: raise ValueError("Stop time is not stored in FitsProduct table.") start_stop_times = { "start_date_time": start_time, "stop_date_time": stop_time, } time_coordinates_node = get_time_coordinates(start_stop_times) # Dictionary used for primary result summary primary_result_dict: Dict[str, Any] = {} # Check if it's raw or calibrated image, we will update this later processing_level = get_processing_level( suffix=suffix, instrument_id=instrument.upper() ) primary_result_dict["processing_level"] = processing_level p_title = bundle_db.get_fits_product_collection_title(collection_lidvid) primary_result_dict["description"] = p_title # Get unique wavelength names for roll-up in data collection wavelength_range = bundle_db.get_wavelength_range_from_db(suffix) primary_result_dict["wavelength_range"] = wavelength_range primary_result_summary_node = primary_result_summary(primary_result_dict) # Get the list of target identifications nodes for the collection target_identifications = bundle_db.get_all_target_identification() target_identification_nodes: List[NodeBuilder] = [] target_identification_nodes = create_target_identification_nodes( bundle_db, target_identifications, "collection" ) # Get the investigation node for the collection investigation_area_name = mk_Investigation_Area_name(proposal_id) investigation_area_lidvid = mk_Investigation_Area_lidvid(proposal_id) investigation_area_node = investigation_area( investigation_area_name, investigation_area_lidvid, "collection" ) # Get the observing system node for the collection observing_system_node = observing_system(instrument) context_node = [ make_collection_context_node( time_coordinates_node, primary_result_summary_node, investigation_area_node, observing_system_node, target_identification_nodes, ) ] # document reference list only exists in data collection reference_list_node = [ make_document_reference_list([instrument], "collection") ] try: label = ( make_label( { "collection_lid": collection_lid, "collection_vid": collection_vid, "record_count": record_count, "title": title, "mod_date": mod_date, "proposal_id": str(proposal_id), "Citation_Information": make_citation_information(info), "inventory_name": inventory_name, "Context_Area": combine_nodes_into_fragment(context_node), "collection_type": collection_type, "Reference_List": combine_nodes_into_fragment(reference_list_node), } ) .toxml() .encode() ) except Exception as e: raise LabelError(collection_lidvid) from e return pretty_and_verify(label, verify)
def make_fits_product_label( working_dir: str, bundle_db: BundleDB, collection_lidvid: str, product_lidvid: str, bundle_lidvid: str, file_basename: str, verify: bool, use_mod_date_for_testing: bool = False, ) -> bytes: try: product = bundle_db.get_product(product_lidvid) collection = bundle_db.get_collection(collection_lidvid) if not isinstance(collection, OtherCollection): raise TypeError(f"{collection} is not OtherCollection.") instrument = collection.instrument suffix = collection.suffix # If a label is created for testing purpose to compare with pre-made XML # we will use MOD_DATE_FOR_TESTESING as the modification date. if not use_mod_date_for_testing: # Get the date when the label is created mod_date = get_current_date() else: mod_date = MOD_DATE_FOR_TESTESING card_dicts = bundle_db.get_card_dictionaries(product_lidvid, file_basename) lookup = DictLookup(file_basename, card_dicts) siblings = _directory_siblings(working_dir, bundle_db, product_lidvid) hdu_lookups = _find_RAWish_lookups(bundle_db, product_lidvid, file_basename, siblings) shm_lookup = _find_SHMish_lookup(bundle_db, product_lidvid, file_basename, siblings) start_date_time, stop_date_time = get_start_stop_date_times( hdu_lookups, shm_lookup) exposure_duration = get_exposure_duration(hdu_lookups, shm_lookup) start_stop_times = { "start_date_time": start_date_time, "stop_date_time": stop_date_time, "exposure_duration": exposure_duration, } # Store start/stop time for each fits_product in fits_products table. # The min/max will be pulled out for roll-up in data collection/bundle. bundle_db.update_fits_product_time(product_lidvid, start_date_time, stop_date_time) hst_parameters = get_hst_parameters(hdu_lookups, shm_lookup) bundle = bundle_db.get_bundle(bundle_lidvid) proposal_id = bundle.proposal_id investigation_area_name = mk_Investigation_Area_name(proposal_id) investigation_area_lidvid = mk_Investigation_Area_lidvid(proposal_id) bundle_db.create_context_product(investigation_area_lidvid, "investigation") bundle_db.create_context_product(instrument_host_lidvid(), "instrument_host") bundle_db.create_context_product(observing_system_lidvid(instrument), "instrument") # Fetch target identifications from db target_id = shm_lookup["TARG_ID"] target_identifications = bundle_db.get_target_identifications_based_on_id( target_id) # At this stage, target identifications should be in the db if len(target_identifications) == 0: raise ValueError("Target identification is not stored in db.") target_identification_nodes: List[NodeBuilder] = [] target_identification_nodes = create_target_identification_nodes( bundle_db, target_identifications, "data") # Get wavelength instrument_id = get_instrument_id(hdu_lookups, shm_lookup) detector_ids = get_detector_ids(hdu_lookups, shm_lookup) filter_name = get_filter_name(hdu_lookups, shm_lookup) wavelength_range = wavelength_ranges(instrument_id, detector_ids, filter_name) bundle_db.update_wavelength_range(product_lidvid, wavelength_range) # Get title channel_id = get_channel_id(hdu_lookups, shm_lookup) try: titles = get_titles_format(instrument_id, channel_id, suffix) product_title = titles[0] + "." product_title = product_title.format(I=instrument_id + "/" + channel_id, F=file_basename, P=proposal_id) collection_title = titles[1] + "." collection_title = collection_title.format(I=instrument_id + "/" + channel_id, F=file_basename, P=proposal_id) # save data/misc collection title to OtherCollection table bundle_db.update_fits_product_collection_title( collection_lidvid, collection_title) except KeyError: # If product_title doesn't exist in SUFFIX_TITLES, we use the # following text as the product_title. product_title = ( f"{instrument_id} data file {file_basename} " + f"obtained by the HST Observing Program {proposal_id}.") # Dictionary used for primary result summary processing_level = get_processing_level(suffix, instrument_id, channel_id) primary_result_dict: Dict[str, Any] = {} primary_result_dict["processing_level"] = processing_level primary_result_dict["description"] = product_title primary_result_dict["wavelength_range"] = wavelength_range # Dictionary passed into templates. Use the same data dictionary for # either data label template or misc label template data_dict = { "lid": lidvid_to_lid(product_lidvid), "vid": lidvid_to_vid(product_lidvid), "title": product_title, "mod_date": mod_date, "file_name": file_basename, "file_contents": get_file_contents(bundle_db, card_dicts, instrument, product_lidvid), "Investigation_Area": investigation_area(investigation_area_name, investigation_area_lidvid, "data"), "Observing_System": observing_system(instrument), "Time_Coordinates": get_time_coordinates(start_stop_times), "Target_Identification": combine_nodes_into_fragment(target_identification_nodes), "HST": hst_parameters, "Primary_Result_Summary": primary_result_summary(primary_result_dict), "Reference_List": make_document_reference_list([instrument], "data"), } # Pass the data_dict to either data label or misc label based on # collection_type collection_type = get_collection_type(suffix, instrument_id, channel_id) if collection_type == "data": label = make_data_label(data_dict).toxml().encode() elif collection_type == "miscellaneous": label = make_misc_label(data_dict).toxml().encode() except AssertionError: raise AssertionError( f"{product_lidvid} has no target identifications stored in DB.") except Exception as e: print(str(e)) raise LabelError(product_lidvid, file_basename, (lookup, hdu_lookups[0], shm_lookup)) from e return pretty_and_verify(label, verify)
def make_context_target_label( bundle_db: BundleDB, target: str, verify: bool, use_mod_date_for_testing: bool = False, ) -> bytes: """ Create the label text for the context target having this LIDVID using the bundle database. If verify is True, verify the label against its XML and Schematron schemas. Raise an exception if either fails. """ target_lid = f"urn:nasa:pds:context:target:{target}" target_lidvid = f"{target_lid}::1.0" target_identification = bundle_db.get_target_identification_based_on_lid( target_lid) bundle_db.create_context_product( get_target_lidvid( [target_identification.type, target_identification.name]), "target", ) alias = str(target_identification.alternate_designations) if len(alias) != 0: alias_list = alias.split("\n") alias_nodes: List[NodeBuilder] = [ make_alias(alias) for alias in alias_list ] target_description = str(target_identification.description) if len(target_description) != 0: # properly align multi line textnodes with 8 spaces target_description = " " * 8 + target_description target_description = target_description.replace("\n", "\n" + " " * 8) else: target_description = " " * 8 + "None" description_nodes: List[NodeBuilder] = [ make_description(target_description) ] if not use_mod_date_for_testing: # Get the date when the label is created mod_date = get_current_date() else: mod_date = MOD_DATE_FOR_TESTESING try: label = (make_label({ "target_lid": target_lid, "target_vid": "1.0", "title": target_identification.name, "alias": combine_nodes_into_fragment(alias_nodes), "name": target_identification.name, "type": target_identification.type, "description": combine_nodes_into_fragment(description_nodes), "mod_date": mod_date, }).toxml().encode()) except Exception as e: raise LabelError(target_lidvid) from e return pretty_and_verify(label, verify)
def _make_fragment(param_name: str, param_values: List[str], node_builder: NodeBuilderTemplate) -> FragBuilder: return combine_nodes_into_fragment( [node_builder({param_name: value}) for value in param_values])