def getreferences(parameters, reftypes=None, context=None, ignore_cache=False, observatory="jwst", fast=False): """ This is the top-level get reference call for all of CRDS. Based on `parameters`, getreferences() will download/cache the corresponding best reference and mapping files and return a map from reference file types to local reference file locations. parameters { str: str,int,float,bool, ... } `parameters` should be a dictionary-like object mapping best reference matching parameters to their values for this dataset. reftypes [ str, ... ] If `reftypes` is None, return all possible reference types. Otherwise return the reference types specified by `reftypes`. context str Specifies the pipeline context, i.e. specific version of CRDS rules used to do the best references match. If `context` is None, use the latest available context. ignore_cache bool If `ignore_cache` is True, download files from server even if already present. observatory str nominally 'jwst' or 'hst'. fast bool If fast is True, skip verbose output, parameter screening, implicit config update, and bad reference checking. Returns { reftype : cached_bestref_path } returns a mapping from types requested in `reftypes` to the path for each cached reference file. """ final_context, bestrefs = _initial_recommendations("getreferences", parameters, reftypes, context, ignore_cache, observatory, fast) # Attempt to cache the recommended references, which unlike dump_mappings # should work without network access if files are already cached. best_refs_paths = api.cache_references(final_context, bestrefs, ignore_cache=ignore_cache) return best_refs_paths
def getreferences(parameters, reftypes=None, context=None, ignore_cache=False, observatory="jwst", fast=False): """ This is the top-level get reference call for all of CRDS. Based on `parameters`, getreferences() will download/cache the corresponding best reference and mapping files and return a map from reference file types to local reference file locations. parameters { str: str,int,float,bool, ... } `parameters` should be a dictionary-like object mapping best reference matching parameters to their values for this dataset. reftypes [ str, ... ] If `reftypes` is None, return all possible reference types. Otherwise return the reference types specified by `reftypes`. context str Specifies the pipeline context, i.e. specific version of CRDS rules used to do the best references match. If `context` is None, use the latest available context. ignore_cache bool If `ignore_cache` is True, download files from server even if already present. observatory str nominally 'jwst' or 'hst'. fast bool If fast is True, skip verbose output, parameter screening, implicit config update, and bad reference checking. Returns { reftype : cached_bestref_path } returns a mapping from types requested in `reftypes` to the path for each cached reference file. """ final_context, bestrefs = _initial_recommendations("getreferences", parameters, reftypes, context, ignore_cache, observatory, fast) # Attempt to cache the recommended references, which unlike dump_mappings # should work without network access if files are already cached. best_refs_paths = api.cache_references( final_context, bestrefs, ignore_cache=ignore_cache) return best_refs_paths
def test_crds_selectors_vs_datamodel(jail_environ, instrument): os.environ["CRDS_SERVER_URL"] = 'https://jwst-crds-pub.stsci.edu' log.info(f"CRDS_PATH: {os.environ['CRDS_PATH']}") import crds from crds.client.api import cache_references from crds.core.exceptions import IrrelevantReferenceTypeError context = crds.get_context_name('jwst') pmap = crds.get_cached_mapping(context) imap = pmap.get_imap(instrument) log.info(f"Beginning tests for {instrument}") # get the reftypes reftypes = imap.get_filekinds() # remove pars- files _ = [ reftypes.remove(name) for name in reftypes[::-1] if name.startswith('pars-') ] # iterate over reftypes for this instrument for reftype in reftypes: try: r = imap.get_rmap(reftype) parkeys = [ p for p in list(flatten(list(r.parkey))) if p not in ignored_parkeys ] log.debug(f"Parkeys for {reftype}: {parkeys}") for f in r.reference_names(): # Ensure filetype is kind to be loaded into datamodel if 'fits' in f or 'asdf' in f: # Find datamodel appropriate for this reference file # If reftype has multiple datamodels possible, do some guesswork if reftype in ref_to_multiples_dict.keys(): model_map = ref_to_multiples_dict[reftype] with warnings.catch_warnings(): warnings.simplefilter('ignore', NoTypeWarning) refs = cache_references(context, {reftype: f}) with dm.open(refs[reftype]) as model: try: ref_exptype = model.meta.exposure.type except AttributeError: ref_exptype = None ref_instrument = model.meta.instrument.name if ref_exptype in model_map.keys(): ref_model = model_map[ref_exptype] elif ref_instrument in model_map.keys(): ref_model = model_map[ref_instrument] else: ref_model = model_map['other'] # Simple one to one translation of reftype to datamodel else: ref_model = ref_to_datamodel_dict[reftype] log.debug( f"Loading {reftype} reference for {instrument} as {ref_model}" ) if ref_model is None: log.warning( f"No datamodel found for {reftype}: skipping...") break # No need to actually load the reference file into the datamodel! with ref_model() as m: for key in parkeys: assert len(m.search_schema(key.lower())) > 0 break except IrrelevantReferenceTypeError as e: log.debug(e) pass