Ejemplo n.º 1
0
 def _check_value(self, filename, value):
     """Raises ValueError if `value` is not valid."""
     if self._match_value(value):
         if self._values and log.get_verbose():
             self.verbose(filename, value, "is in", repr(self._values))
     else:
         raise ValueError("Value " + str(log.PP(value)) +
                          " is not one of " + str(log.PP(self._values)))
Ejemplo n.º 2
0
def header_to_pipelines(header, context=None):
    """Given a dataset `header`,  extract the EXP_TYPE or META.EXPOSURE.TYPE keyword
    from and use it to look up the pipelines required to process it.

    Return a list of pipeline .cfg names.
    """
    with log.augment_exception(
            "Failed determining exp_type, cal_ver from header",
            log.PP(header)):
        exp_type, cal_ver = _header_to_exptype_calver(header)
    config_manager = _get_config_manager(context, cal_ver)
    pipelines = _get_pipelines(exp_type, cal_ver, context)  # uncorrected
    if config_manager.pipeline_exceptions:  # correction based on extra non-EXP_TYPE params
        pipelines2 = []
        for cfg in pipelines:
            for param, exceptions in config_manager.pipeline_exceptions.items(
            ):
                exceptions = dict(exceptions)
                dont_replace = exceptions.pop("dont_replace")
                default_missing = exceptions.pop("default_missing")
                paramval = header.get(param.upper(), default_missing)
                if paramval not in dont_replace:
                    cfg = exceptions.get(cfg, cfg)
            pipelines2.append(cfg)
        pipelines = pipelines2
    log.verbose("Applicable pipelines for", srepr(exp_type), "are",
                srepr(pipelines))
    return pipelines
Ejemplo n.º 3
0
    def upload_file(self, filepath):
        abs_url = self.abs_url("/upload/chunked/")
        response = self.session.get(abs_url)
        log.verbose("COOKIES:", log.PP(response.cookies))
        csrf_token = response.cookies["csrftoken"]
        file_size = os.stat(filepath).st_size
        filename = os.path.basename(filepath)

        if file_size < _UPLOAD_CHUNK_SIZE:
            files = {"files": (filename, open(filepath, "rb"))}
            data = {"csrfmiddlewaretoken": csrf_token}
            self.session.post(abs_url, files=files, data=data)
        else:
            with open(filepath, "rb") as f:
                start_byte = 0
                while True:
                    chunk = f.read(_UPLOAD_CHUNK_SIZE)
                    if len(chunk) == 0:
                        break

                    files = {"files": (filename, io.BytesIO(chunk))}
                    data = {"csrfmiddlewaretoken": csrf_token}
                    end_byte = start_byte + len(chunk) - 1
                    content_range = f"bytes {start_byte}-{end_byte}/{file_size}"
                    headers = {"Content-Range": content_range}
                    response = self.session.post(abs_url,
                                                 files=files,
                                                 data=data,
                                                 headers=headers)
                    csrf_token = response.cookies["csrftoken"]
                    start_byte = end_byte + 1
Ejemplo n.º 4
0
 def upload_file(self, relative_url, filepath):
     abs_url = self.abs_url(relative_url)
     response = self.session.get(abs_url)
     log.verbose("COOKIES:", log.PP(response.cookies))
     csrf_token = response.cookies['csrftoken']
     files = {"files": open(filepath, "rb")}
     data = {'csrfmiddlewaretoken': csrf_token}
     self.session.post(abs_url, files=files, data=data)
Ejemplo n.º 5
0
 def get_ingested_files(self):
     """Return the server-side JSON info on the files already in the submitter's ingest directory."""
     log.verbose("Querying for existing files.")
     result = self.connection.get('/upload/list/').json()
     log.verbose("JSON info on existing ingested files:\n", log.PP(result))
     if "files" in result and isinstance(result["files"], list):
         return {info["name"]: info for info in result["files"]}
     return {info["name"]: info for info in result}
Ejemplo n.º 6
0
 def get_extra_parameters(self):
     '''Return the form dictionary mapping form variables to value strings for
     new variables being added by the streamlining project.'''
     with open(self.args.redcat_parameters) as f:
         text = f.read()
         log.verbose("Raw YAML read:\n", text, verbosity=75)
         loaded = yaml.safe_load(text)
         log.verbose("ReDCaT parameters:\n", log.PP(loaded))
         return loaded
Ejemplo n.º 7
0
def header_to_pipelines(header, context=None):
    """Given a dataset `header`,  extract the EXP_TYPE or META.EXPOSURE.TYPE keyword
    from and use it to look up the pipelines required to process it.

    Return a list of reftype names.
    """
    with log.augment_exception("Failed determining exp_type, cal_ver from header", log.PP(header)):
        exp_type, cal_ver = _header_to_exptype_calver(header)
    return get_pipelines(exp_type, cal_ver, context)
Ejemplo n.º 8
0
 def log_affected(self, i, affected):
     """PLUGIN: Banner log and debug output for each context switch."""
     if log.get_verbose():
         print("#" * 100, file=sys.stderr)
         log.debug("History:", i, "Effects:\n", log.PP(affected))
     else:
         if not self.args.quiet:
             print("#" * 100, file=sys.stderr)
             print(affected.bestrefs_err_summary, file=sys.stderr)
Ejemplo n.º 9
0
def get_fits_header_union(filepath, needed_keys=(), original_name=None, observatory=None, **keys):
    """Get the union of keywords from all header extensions of FITS
    file `fname`.  In the case of collisions, keep the first value
    found as extensions are loaded in numerical order.

    DOES NOT verify FITS checksums.
    """
    file_obj = FitsFile(filepath)
    header = file_obj.get_header(needed_keys, checksum=False)
    log.verbose("Header of", repr(filepath), "=", log.PP(header), verbosity=90)
    return header
Ejemplo n.º 10
0
 def __call__(self, *args, **kwargs):
     jsonrpc = self._call(*args, **kwargs)
     if jsonrpc["error"]:
         decoded = html.unescape(jsonrpc["error"]["message"])
         raise self.classify_exception(decoded)
     else:
         result = crds_decode(jsonrpc["result"])
         if isinstance(result, (str,int,float,bool)):
             log.verbose("RPC OK -->", repr(result))
         else:
             log.verbose("RPC OK", log.PP(result) if log.get_verbose() >= 75 else "")
         return result
Ejemplo n.º 11
0
def equivalence_dict_to_pairs(equivalent_keywords_dict):
    """Convert a dictionary mapping master keywords to equivalents to
    a list of keyword pairs that should be cross-strapped.
    """
    pairs = []
    log.verbose("Explicitly cross_strapped_keywords:",
                log.PP(equivalent_keywords_dict), verbosity=90)
    for master, slaves in equivalent_keywords_dict.items():
        for slave in slaves:
            if master != slave:
                pairs.append((master, slave))
                pairs.append((slave, master))
    return pairs
Ejemplo n.º 12
0
def get_validators(observatory, refpath, context=None):
    """Given `observatory` and a path to a reference file `refpath`,  load the
    corresponding validators that define individual constraints that reference
    should satisfy.
    """
    tpns = _get_reffile_tpninfos(observatory, refpath)
    checkers = [validator(x, context=context) for x in tpns]
    log.verbose("Validators for",
                repr(refpath),
                "(" + str(len(checkers)) + "):\n",
                log.PP(checkers),
                verbosity=65)
    return checkers
Ejemplo n.º 13
0
def _get_fits_datamodel_pairs(header):
    """Return the (FITS, DM) and (DM, FITS) cross strap pairs associated with
    every keyword in `header` as defined by the datamodels interface functions
    defined by the CRDS JWST schema module.
    """
    pairs = []
    from . import schema
    for key in header:
        with log.verbose_warning_on_exception("Failed cross strapping keyword", repr(key)):
            fitskey = schema.dm_to_fits(key) or key
            dmkey = schema.fits_to_dm(key) or key
            pairs.append((fitskey, dmkey))
            pairs.append((dmkey, fitskey))
    log.verbose("Cal code datamodels keyword equivalencies:\n", log.PP(pairs), verbosity=90)
    return pairs
Ejemplo n.º 14
0
 def test_0_recursive_modify_rmap(self): # , header, value, classes):
     # Load the test rmap from a string.   The top level selector must exist.
     # This is not a "realistic" test case.   It's a test of the recursive
     # insertion capabilities of all the Selector classes in one go.
     log.verbose("-"*60)
     r = rmap.ReferenceMapping.from_string(self.rmap_str, "./test.rmap", ignore_checksum=True)
     log.verbose("insert_header:", log.PP(self.insert_header))
     result = r.insert(self.insert_header, "foo.fits")
     result.write(self.result_filename)
     diffs = r.difference(result)
     log.verbose("diffs:", diffs)
     diffs = [diff for diff in diffs if "Selector" not in diff[-1]]
     assert len(diffs) == 1, "Fewer/more differences than expected: " + repr(diffs)
     log.verbose("recursive insert result rmap:")
     log.verbose(open(self.result_filename).read())
Ejemplo n.º 15
0
def header_to_reftypes(header, context=None):
    """Given a dataset `header`,  extract the EXP_TYPE or META.EXPOSURE.TYPE keyword
    from and use it to look up the reftypes required to process it.

    Return a list of reftype names.
    """
    with log.warn_on_exception("Failed determining reftypes for", log.PP(header)):
        exp_type, cal_ver = _header_to_exptype_calver(header)
        config_manager = _get_config_manager(context, cal_ver)
        pipelines = header_to_pipelines(header, context)
        reftypes = set()
        for cfg in pipelines:
            steps = config_manager.pipeline_cfgs_to_steps[cfg]
            for step in steps:
                reftypes |= set(config_manager.steps_to_reftypes[step])
        return sorted(list(reftypes))
    return []
Ejemplo n.º 16
0
 def dump_header(self, context, returned_id, header):
     """Print out dataset `header` for `id` and `context` in either .json or
     multi-line formats.
     """
     pmap = crds.get_cached_mapping(context)
     if self.args.minimize_headers:
         header2 = pmap.minimize_header(header)
     else:
         header2 = dict(header)
     header2.pop("REFTYPE", None)
     header2["dataset_id"] = returned_id
     header2["CRDS_CTX"] = context
     if self.args.json:
         json_header = {returned_id: header}
         print(json.dumps(json_header))
     else:
         print("Dataset pars for", repr(returned_id), "with respect to",
               repr(context) + ":\n", log.PP(header2))
Ejemplo n.º 17
0
def get_free_header(filepath,
                    needed_keys=(),
                    original_name=None,
                    observatory=None):
    """Return the complete unconditioned header dictionary of a reference file.

    DOES NOT hijack warnings.   DOES NOT verify checksums.

    Original name is used to determine file type for web upload temporary files
    which have no distinguishable extension.  Original name is browser-side
    name for file.

    get_free_header() is a cached function to prevent repeat file reads.  
    Although parameters are given default values,  for caching to work correctly
    even default parameters should be specified positionally.

    Since get_free_header() is cached,  loading file updates requires first
    clearing the function cache.
    """
    file_obj = file_factory(filepath, original_name, observatory)
    header = file_obj.get_header(needed_keys, checksum=False)
    log.verbose("Header of", repr(filepath), "=", log.PP(header), verbosity=90)
    return header
Ejemplo n.º 18
0
def _pp_lines(obj):
    """Pretty print `obj` and return the resulting text as a list of lines."""
    return str(log.PP(obj)).splitlines()
Ejemplo n.º 19
0
def reference_keys_to_dataset_keys(rmapping, header):
    """Given a header dictionary for a reference file, map the header back to keys
    relevant to datasets.  So for ACS biasfile the reference says BINAXIS1 but
    the dataset says NUMCOLS.  This would convert { "BINAXIS1": 1024 } to {
    "NUMCOLS" : 1024 }.

    In general,  rmap parkeys are matched against datset values and are defined
    as dataset header keywords.   For refactoring though,  what's initially
    available are reference file keywords...  which need to be mapped into the
    terms rmaps know:  dataset keywords.
    """
    header = dict(header)

    # Basic common pattern translations
    translations = {
            "META.EXPOSURE.P_EXPTYPE" : "META.EXPOSURE.TYPE",
            "P_EXP_TY" : "META.EXPOSURE.TYPE",

            "META.INSTRUMENT.P_BAND" : "META.INSTRUMENT.BAND",
            "P_BAND" : "META.INSTRUMENT.BAND",

            "META.INSTRUMENT.P_DETECTOR"  : "META.INSTRUMENT.DETECTOR",
            "P_DETECT"  : "META.INSTRUMENT.DETECTOR",

            "META.INSTRUMENT.P_CHANNEL" : "META.INSTRUMENT.CHANNEL",
            "P_CHANNE" : "META.INSTRUMENT.CHANNEL",

            "META.INSTRUMENT.P_FILTER" : "META.INSTRUMENT.FILTER",
            "P_FILTER"  : "META.INSTRUMENT.FILTER",

            "META.INSTRUMENT.P_PUPIL"  : "META.INSTRUMENT.PUPIL",
            "P_PUPIL" : "META.INSTRUMENT.PUPIL",

            "META.INSTRUMENT.P_MODULE"  : "META.INSTRUMENT.MODULE",
            "P_MODULE" : "META.INSTRUMENT.MODULE",

            "META.SUBARRAY.P_SUBARRAY" : "META.SUBARRAY.NAME",
            "P_SUBARR" : "META.SUBARRAY.NAME",

            "META.INSTRUMENT.P_GRATING" : "META.INSTRUMENT.GRATING",
            "P_GRATIN" : "META.INSTRUMENT.GRATING",

            "META.EXPOSURE.PREADPATT" : "META.EXPOSURE.READPATT",
            "META.EXPOSURE.P_READPATT" : "META.EXPOSURE.READPATT",
            "P_READPA" : "META.EXPOSURE.READPATT",

            # vvvv Speculative,  not currently defined or required by CAL vvvvv
            "META.INSTRUMENT.PCORONAGRAPH" : "META.INSTRUMENT.CORONAGRAPH",
            "P_CORONM" : "META.INSTRUMENT.CORONAGRAPH",
        }

    # Rmap header reference_to_dataset field tranlations,  can override basic!
    try:
        translations.update(rmapping.reference_to_dataset)
    except AttributeError:
        pass

    log.verbose("reference_to_dataset translations:\n", log.PP(translations), verbosity=60)
    log.verbose("reference_to_dataset input header:\n", log.PP(header), verbosity=80)

    for key in header:
        # Match META.X.P_SOMETHING or P_SOMETH
        if (key.split(".")[-1].startswith("P_")) and key not in translations:
            log.warning("CRDS-pattern-like keyword", repr(key),
                        "w/o CRDS translation to corresponding dataset keyword.")
            log.info("Pattern-like keyword", repr(key),
                     "may be misspelled or missing its translation in CRDS.  Pattern will not be used.")
            log.info("The translation for", repr(key),
                     "can be defined in crds.jwst.locate or rmap header reference_to_dataset field.")
            log.info("If this is not a pattern keyword, adding a translation to 'not-a-pattern'",
                     "will suppress this warning.")

    # Add replacements for translations *if* the existing untranslated value
    # is poor and the translated value is better defined.   This is to do
    # translations w/o replacing valid/concrete DM values with something
    # like guessed values of "UNDEFINED" or "N/A".
    for rkey in sorted(translations):
        if rkey in header:
            dkey = translations[rkey]
            dval = header.get(translations[rkey], None)
            rval = header[rkey]
            if rval not in [None, "UNDEFINED"] and rval != dval:
                log.info("Setting", repr(dkey), "=", repr(dval),
                         "to value of", repr(rkey), "=", repr(rval))
                header[dkey] = rval

    header = abstract.cross_strap_header(header)

    # NOTE:  the hacks below happen after cross-strapping and pattern handling
    # so if the keywords are still undefined they're undefined.  They have to
    # be explicitly defined as UNDEFINED somehow since they're nearly universally
    # used in constraints as condition variables even if they're not used in rmaps.
    # Unlike the targets of constraints,  CRDS is nominally unaware of condition
    # variables so they need to be incidentally defined.  This currently doesn't
    # work out if the rmap doesn't use them.  Condition variables are eval'ed in
    # expressions.

    if "SUBARRAY" not in header:
        header["SUBARRAY"] = header["META.SUBARRAY.NAME"] = "UNDEFINED"

    if "EXP_TYPE" not in header:
        header["EXP_TYPE"] = header["META.EXPOSURE.TYPE"] = "UNDEFINED"

    if "USEAFTER" not in header and "META.USEAFTER" in header:
        header["USEAFTER"] = header["META.USEAFTER"]
    if "USEAFTER" not in header and "META.USEAFTER" in header:
        header["USEAFTER"] = header["META.USEAFTER"]

    # If USEAFTER is defined,  or we're configured to fake it...
    #   don't invent one if its missing and we're not faking it.
    if "USEAFTER" in header or config.ALLOW_BAD_USEAFTER:

        # Identify this as best as possible,
        filename = header.get("FILENAME", None) or rmapping.filename

        reformatted = timestamp.reformat_useafter(filename, header).split()
        header["DATE-OBS"] = header["META.OBSERVATION.DATE"] = reformatted[0]
        header["TIME-OBS"] = header["META.OBSERVATION.TIME"] = reformatted[1]

    log.verbose("reference_to_dataset output header:\n", log.PP(header), verbosity=80)

    return header
Ejemplo n.º 20
0
def reference_keys_to_dataset_keys(rmapping, header):
    """Given a header dictionary for a reference file, map the header back to keys
    relevant to datasets.  So for ACS biasfile the reference says BINAXIS1 but
    the dataset says NUMCOLS.  This would convert { "BINAXIS1": 1024 } to {
    "NUMCOLS" : 1024 }.

    In general,  rmap parkeys are matched against datset values and are defined
    as dataset header keywords.   For refactoring though,  what's initially
    available are reference file keywords...  which need to be mapped into the
    terms rmaps know:  dataset keywords.

    Another aspect of this translation is handling reference file "pattern"
    keywords which typically define or-barred sets of values rather than
    discrete values, any of which the reference is defined to support:
    e.g. 'DETECTOR1|DETECTOR2' vs. 'DETECTOR1'.  In this case, the reference
    file will define a pattern keyword used to define the match pattern in the
    rmap, while a dataset will define a discrete valued keyword which is
    matched on.  e.g. reference file keyword "META.EXPOSURE.P_EXPTYPE" is
    translated back to dataset keyword "META.EXPOSURE.TYPE".  Reference files
    can specify parameters in either form and the P_ pattern variant is given
    preference if both values are defined.  For CRDS purposes, only the P_
    version is checked and used since it will be used to replace the discrete
    valued keyword in the header which is certified and used to define the rmap
    updates.
    """
    header = dict(header)

    # Basic common pattern translations
    translations = {
            "META.EXPOSURE.P_EXPTYPE" : "META.EXPOSURE.TYPE",

            "META.INSTRUMENT.P_BAND" : "META.INSTRUMENT.BAND",

            "META.INSTRUMENT.P_DETECTOR"  : "META.INSTRUMENT.DETECTOR",

            "META.INSTRUMENT.P_CHANNEL" : "META.INSTRUMENT.CHANNEL",

            "META.INSTRUMENT.P_FILTER" : "META.INSTRUMENT.FILTER",

            "META.INSTRUMENT.P_MODULE"  : "META.INSTRUMENT.MODULE",

            "META.SUBARRAY.P_SUBARRAY" : "META.SUBARRAY.NAME",

            "META.INSTRUMENT.P_GRATING" : "META.INSTRUMENT.GRATING",

            "META.EXPOSURE.PREADPATT" : "META.EXPOSURE.READPATT",
            "META.EXPOSURE.P_READPATT" : "META.EXPOSURE.READPATT",

            # vvvv Speculative,  not currently defined or required by CAL vvvvv
            "META.INSTRUMENT.PCORONAGRAPH" : "META.INSTRUMENT.CORONAGRAPH",
        }

    # Rmap header reference_to_dataset field tranlations,  can override basic!
    try:
        translations.update(rmapping.reference_to_dataset)
    except AttributeError:
        pass

    log.verbose("reference_to_dataset translations:\n", log.PP(translations), verbosity=60)
    log.verbose("reference_to_dataset input header:\n", log.PP(header), verbosity=80)

    for key in header:
        # Match META.X.P_SOMETHING or P_SOMETH
        if (key.split(".")[-1].startswith("P_")) and key not in translations:
            log.warning("CRDS-pattern-like keyword", repr(key),
                        "w/o CRDS translation to corresponding dataset keyword.")
            log.info("Pattern-like keyword", repr(key),
                     "may be misspelled or missing its translation in CRDS.  Pattern will not be used.")
            log.info("The translation for", repr(key),
                     "can be defined in crds.roman.locate or rmap header reference_to_dataset field.")
            log.info("If this is not a pattern keyword, adding a translation to 'not-a-pattern'",
                     "will suppress this warning.")

    # Add replacements for translations *if* the existing untranslated value
    # is poor and the translated value is better defined.   This is to do
    # translations w/o replacing valid/concrete DM values with something
    # like guessed values of "UNDEFINED" or "N/A".
    for rkey in sorted(translations):
        if rkey in header:
            dkey = translations[rkey]
            dval = header.get(translations[rkey], None)
            rval = header[rkey]
            if rval not in [None, "UNDEFINED"] and rval != dval:
                log.info("Setting", repr(dkey), "=", repr(dval),
                         "to value of", repr(rkey), "=", repr(rval))
                header[dkey] = rval

    if "META.SUBARRAY.NAME" not in header:
        header["META.SUBARRAY.NAME"] = "UNDEFINED"

    if "META.EXPOSURE.TYPE" not in header:
        header["META.EXPOSURE.TYPE"] = "UNDEFINED"

    # If USEAFTER is defined,  or we're configured to fake it...
    #   don't invent one if its missing and we're not faking it.
    if "META.USEAFTER" in header or config.ALLOW_BAD_USEAFTER:

        # Identify this as best as possible,
        filename = header.get("META.FILENAME", None) or rmapping.filename

        reformatted = timestamp.reformat_useafter(filename, header).split()
        header["META.OBSERVATION.DATE"] = reformatted[0]
        header["META.OBSERVATION.TIME"] = reformatted[1]

    log.verbose("reference_to_dataset output header:\n", log.PP(header), verbosity=80)

    return header
Ejemplo n.º 21
0
def reference_keys_to_dataset_keys(rmapping, header):
    """Given a header dictionary for a reference file, map the header back to keys
    relevant to datasets.  So for ACS biasfile the reference says BINAXIS1 but
    the dataset says NUMCOLS.  This would convert { "BINAXIS1": 1024 } to {
    "NUMCOLS" : 1024 }.

    In general,  rmap parkeys are matched against datset values and are defined
    as dataset header keywords.   For refactoring though,  what's initially
    available are reference file keywords...  which need to be mapped into the
    terms rmaps know:  dataset keywords.

    Another aspect of this translation is handling reference file "pattern"
    keywords which typically define or-barred sets of values rather than
    discrete values, any of which the reference is defined to support:
    e.g. 'DETECTOR1|DETECTOR2' vs. 'DETECTOR1'.  In this case, the reference
    file will define a pattern keyword used to define the match pattern in the
    rmap, while a dataset will define a discrete valued keyword which is
    matched on.  e.g. reference file keyword "META.EXPOSURE.P_EXPTYPE" is
    translated back to dataset keyword "META.EXPOSURE.TYPE".  Reference files
    can specify parameters in either form and the P_ pattern variant is given
    preference if both values are defined.  For CRDS purposes, only the P_
    version is checked and used since it will be used to replace the discrete
    valued keyword in the header which is certified and used to define the rmap
    updates.

    Note, can't test unrecognized "P_" keywords because the logging appeares to go to stderr which doctests don't check.

    ==================================================
    Test adding a translation.

    >>> reference_keys_to_dataset_keys( \
    namedtuple('x', ['reference_to_dataset', 'filename'])({'MOUSE' : 'RAT'}, ''), \
    {"MOUSE" : "MICKEY", "RAT" : "MORTIMER"})
    {'MOUSE': 'MICKEY', 'RAT': 'MICKEY', 'ROMAN.META.SUBARRAY.NAME': 'UNDEFINED', 'ROMAN.META.EXPOSURE.TYPE': 'UNDEFINED'}

    ==================================================
    Test replacing translated values with untranslated values.

    >>> reference_keys_to_dataset_keys( \
    namedtuple('x', ['reference_to_dataset', 'filename'])({'MOUSE' : 'RAT'}, ''), \
    {"ROMAN.META.EXPOSURE.P_EXPTYPE" : None, \
    "ROMAN.META.INSTRUMENT.P_BAND" : "UNDEFINED", \
    "ROMAN.META.INSTRUMENT.P_DETECTOR"  : "RADAR", \
    "ROMAN.META.INSTRUMENT.P_CHANNEL" : None, \
    "ROMAN.META.INSTRUMENT.CHANNEL" : None, \
    "ROMAN.META.INSTRUMENT.P_FILTER" : "UNDEFINED", \
    "ROMAN.META.INSTRUMENT.FILTER" : None, \
    "ROMAN.META.INSTRUMENT.P_MODULE" : "LUNAR", \
    "ROMAN.META.INSTRUMENT.MODULE" : None, \
    "ROMAN.META.SUBARRAY.P_SUBARRAY" : None, \
    "ROMAN.META.SUBARRAY.NAME" : "YELLOW", \
    "ROMAN.META.INSTRUMENT.P_GRATING" : "UNDEFINED", \
    "ROMAN.META.INSTRUMENT.GRATING" : "MOZZARELLA", \
    "ROMAN.META.EXPOSURE.PREADPATT" : "CHECKERBOARD", \
    "ROMAN.META.EXPOSURE.READPATT" : "CHESSBOARD"})
    {'ROMAN.META.EXPOSURE.P_EXPTYPE': None, 'ROMAN.META.INSTRUMENT.P_BAND': 'UNDEFINED', 'ROMAN.META.INSTRUMENT.P_DETECTOR': 'RADAR', 'ROMAN.META.INSTRUMENT.P_CHANNEL': None, 'ROMAN.META.INSTRUMENT.CHANNEL': None, 'ROMAN.META.INSTRUMENT.P_FILTER': 'UNDEFINED', 'ROMAN.META.INSTRUMENT.FILTER': None, 'ROMAN.META.INSTRUMENT.P_MODULE': 'LUNAR', 'ROMAN.META.INSTRUMENT.MODULE': 'LUNAR', 'ROMAN.META.SUBARRAY.P_SUBARRAY': None, 'ROMAN.META.SUBARRAY.NAME': 'YELLOW', 'ROMAN.META.INSTRUMENT.P_GRATING': 'UNDEFINED', 'ROMAN.META.INSTRUMENT.GRATING': 'MOZZARELLA', 'ROMAN.META.EXPOSURE.PREADPATT': 'CHECKERBOARD', 'ROMAN.META.EXPOSURE.READPATT': 'CHECKERBOARD', 'ROMAN.META.INSTRUMENT.DETECTOR': 'RADAR', 'ROMAN.META.EXPOSURE.TYPE': 'UNDEFINED'}

    ==================================================
    Test setting missing subarray and exposure type values.

    >>> reference_keys_to_dataset_keys( \
    namedtuple('x', ['reference_to_dataset', 'filename'])({}, ''), \
    {})
    {'ROMAN.META.SUBARRAY.NAME': 'UNDEFINED', 'ROMAN.META.EXPOSURE.TYPE': 'UNDEFINED'}

    >>> reference_keys_to_dataset_keys( \
    namedtuple('x', ['reference_to_dataset', 'filename'])({}, ''), \
    {'ROMAN.META.SUBARRAY.NAME' : 'REDOCTOBER', \
    'ROMAN.META.EXPOSURE.TYPE' : 'NORTHFACE'})
    {'ROMAN.META.SUBARRAY.NAME': 'REDOCTOBER', 'ROMAN.META.EXPOSURE.TYPE': 'NORTHFACE'}

    ==================================================
    Test preserving existing subarry adn exposure type values.

    >>> reference_keys_to_dataset_keys( \
    namedtuple('x', ['reference_to_dataset', 'filename'])({}, ''), \
    {'ROMAN.META.SUBARRAY.NAME' : 'REDOCTOBER', \
    'ROMAN.META.EXPOSURE.TYPE' : 'NORTHFACE'})
    {'ROMAN.META.SUBARRAY.NAME': 'REDOCTOBER', 'ROMAN.META.EXPOSURE.TYPE': 'NORTHFACE'}

    ==================================================
    Test preseverving existing DATE/TIME if no USEAFTER value.

    >>> config.ALLOW_BAD_USEAFTER.reset()
    >>> reference_keys_to_dataset_keys( \
    namedtuple('x', ['reference_to_dataset', 'filename'])({}, 'secret_code_file.txt'), \
    {'ROMAN.META.OBSERVATION.DATE' : '1879-03-14', \
     'ROMAN.META.OBSERVATION.TIME' : '12:34:56'})
    {'ROMAN.META.OBSERVATION.DATE': '1879-03-14', 'ROMAN.META.OBSERVATION.TIME': '12:34:56', 'ROMAN.META.SUBARRAY.NAME': 'UNDEFINED', 'ROMAN.META.EXPOSURE.TYPE': 'UNDEFINED'}

    ==================================================
    Test setting DATE/TIME with no USEAFTER, but allowed "bad use after".

    >>> config.ALLOW_BAD_USEAFTER.reset()
    >>> config.ALLOW_BAD_USEAFTER.set("1")
    False
    >>> reference_keys_to_dataset_keys(namedtuple('x', ['reference_to_dataset', 'filename'])({}, 'secret_code_file.txt'), {})
    {'ROMAN.META.SUBARRAY.NAME': 'UNDEFINED', 'ROMAN.META.EXPOSURE.TYPE': 'UNDEFINED', 'ROMAN.META.OBSERVATION.DATE': '1900-01-01', 'ROMAN.META.OBSERVATION.TIME': '00:00:00'}

    ==================================================
    Test setting DATE/TIME from USEAFTER.

    >>> config.ALLOW_BAD_USEAFTER.reset()
    >>> config.ALLOW_BAD_USEAFTER.set("1")
    False
    >>> reference_keys_to_dataset_keys(namedtuple('x', ['reference_to_dataset', 'filename'])({}, 'secret_code_file.txt'), \
    {'ROMAN.META.USEAFTER' : '1770-12-01T01:23:45', \
     'ROMAN.META.OBSERVATION.DATE' : '1879-03-14', \
     'ROMAN.META.OBSERVATION.TIME' : '12:34:56'})
    {'ROMAN.META.USEAFTER': '1770-12-01T01:23:45', 'ROMAN.META.OBSERVATION.DATE': '1770-12-01', 'ROMAN.META.OBSERVATION.TIME': '01:23:45', 'ROMAN.META.SUBARRAY.NAME': 'UNDEFINED', 'ROMAN.META.EXPOSURE.TYPE': 'UNDEFINED'}

    ==================================================
    Test bad formatted USEAFTER.

    >>> config.ALLOW_BAD_USEAFTER.reset()
    >>> reference_keys_to_dataset_keys(namedtuple('x', ['reference_to_dataset', 'filename'])({}, 'secret_code_file.txt'), \
    {'ROMAN.META.USEAFTER' : 'bad user after', \
     'ROMAN.META.OBSERVATION.DATE' : '1879-03-14', \
     'ROMAN.META.OBSERVATION.TIME' : '12:34:56'})
    Traceback (most recent call last):
    ...
    crds.core.exceptions.InvalidUseAfterFormat: Bad USEAFTER time format = 'bad user after'
    """
    header = dict(header)

    # Basic common pattern translations
    translations = {
        "ROMAN.META.EXPOSURE.P_EXPTYPE": "ROMAN.META.EXPOSURE.TYPE",
        "ROMAN.META.INSTRUMENT.P_BAND": "ROMAN.META.INSTRUMENT.BAND",
        "ROMAN.META.INSTRUMENT.P_DETECTOR": "ROMAN.META.INSTRUMENT.DETECTOR",
        "ROMAN.META.INSTRUMENT.P_CHANNEL": "ROMAN.META.INSTRUMENT.CHANNEL",
        "ROMAN.META.INSTRUMENT.P_FILTER": "ROMAN.META.INSTRUMENT.FILTER",
        "ROMAN.META.INSTRUMENT.P_MODULE": "ROMAN.META.INSTRUMENT.MODULE",
        "ROMAN.META.SUBARRAY.P_SUBARRAY": "ROMAN.META.SUBARRAY.NAME",
        "ROMAN.META.INSTRUMENT.P_GRATING": "ROMAN.META.INSTRUMENT.GRATING",
        "ROMAN.META.EXPOSURE.PREADPATT": "ROMAN.META.EXPOSURE.READPATT",
        "ROMAN.META.EXPOSURE.P_READPATT": "ROMAN.META.EXPOSURE.READPATT",

        # vvvv Speculative,  not currently defined or required by CAL vvvvv
        "ROMAN.META.INSTRUMENT.PCORONAGRAPH":
        "ROMAN.META.INSTRUMENT.CORONAGRAPH",
    }

    # Rmap header reference_to_dataset field tranlations,  can override basic!
    try:
        translations.update(rmapping.reference_to_dataset)
    except AttributeError:
        pass

    log.verbose("reference_to_dataset translations:\n",
                log.PP(translations),
                verbosity=60)
    log.verbose("reference_to_dataset input header:\n",
                log.PP(header),
                verbosity=80)

    for key in header:
        # Match META.X.P_SOMETHING or P_SOMETH
        if (key.split(".")[-1].startswith("P_")) and key not in translations:
            log.warning(
                "CRDS-pattern-like keyword", repr(key),
                "w/o CRDS translation to corresponding dataset keyword.")
            log.info(
                "Pattern-like keyword", repr(key),
                "may be misspelled or missing its translation in CRDS.  Pattern will not be used."
            )
            log.info(
                "The translation for", repr(key),
                "can be defined in crds.roman.locate or rmap header reference_to_dataset field."
            )
            log.info(
                "If this is not a pattern keyword, adding a translation to 'not-a-pattern'",
                "will suppress this warning.")

    # Add replacements for translations *if* the existing untranslated value
    # is poor and the translated value is better defined.   This is to do
    # translations w/o replacing valid/concrete DM values with something
    # like guessed values of "UNDEFINED" or "N/A".
    for rkey in sorted(translations):
        if rkey in header:
            dkey = translations[rkey]
            dval = header.get(translations[rkey], None)
            rval = header[rkey]
            if rval not in [None, "UNDEFINED"] and rval != dval:
                log.info("Setting", repr(dkey), "=", repr(dval), "to value of",
                         repr(rkey), "=", repr(rval))
                header[dkey] = rval

    if "ROMAN.META.SUBARRAY.NAME" not in header:
        header["ROMAN.META.SUBARRAY.NAME"] = "UNDEFINED"

    if "ROMAN.META.EXPOSURE.TYPE" not in header:
        header["ROMAN.META.EXPOSURE.TYPE"] = "UNDEFINED"

    # If USEAFTER is defined,  or we're configured to fake it...
    #   don't invent one if its missing and we're not faking it.
    if "ROMAN.META.USEAFTER" in header or config.ALLOW_BAD_USEAFTER:

        # Identify this as best as possible,
        filename = header.get("ROMAN.META.FILENAME", None) or rmapping.filename

        reformatted = timestamp.reformat_useafter(filename, header).split()
        header["ROMAN.META.OBSERVATION.DATE"] = reformatted[0]
        header["ROMAN.META.OBSERVATION.TIME"] = reformatted[1]

    log.verbose("reference_to_dataset output header:\n",
                log.PP(header),
                verbosity=80)

    return header