示例#1
0
文件: list.py 项目: jaytmiller/crds
 def check_exptypes(self):
     """Based on EXP_TYPEs defined by CAL schema and the specified instrument
     contexts, print out log info on missing or unexpected coverage.
     """
     for imap_name in self.contexts:
         i_loaded = crds.get_cached_mapping(imap_name)
         s_exp_types = self.locator.get_exptypes(i_loaded.instrument)
         for exp_type in s_exp_types:
             reftypes = self.locator.get_reftypes(exp_type)
             for filekind in i_loaded.selections:
                 ufilekind = (i_loaded.instrument.upper(), filekind.upper())
                 rmap_name = i_loaded.selections[filekind]
                 if rmap_name == 'N/A':
                     if filekind in reftypes:
                         log.verbose("Reftype rmap", repr(ufilekind), "is defined as N/A for", repr(exp_type))
                 else:
                     r_loaded = i_loaded.get_rmap(filekind)
                     r_exp_types = r_loaded.get_parkey_map().get("META.EXPOSURE.TYPE", None)
                     if r_exp_types is None:   # ???
                         log.verbose("Reftype", repr(ufilekind), "does not match using EXP_TYPE.")
                     elif exp_type in r_exp_types:
                         if filekind in reftypes:
                             log.verbose("Reftype", repr(ufilekind), "explicitly mentions", repr(exp_type))
                         else:
                             log.warning("Reftype", repr(ufilekind), "has unexpected coverage for", repr(exp_type))
                     elif "ANY" in r_exp_types or "N/A" in r_exp_types:
                         log.verbose("Reftype", repr(ufilekind), "is satisfied by ANY or N/A for", repr(exp_type))
                     elif filekind in reftypes:
                         log.info("Reftype", repr(ufilekind), "is missing coverage for", repr(exp_type))
                     else:
                         log.verbose("Reftype", repr(ufilekind), "has no expected coverage for", repr(exp_type))
示例#2
0
文件: core.py 项目: oirlab/tmt-crds
    def check_header(self, filename, header):
        """Evalutate the header expression associated with this validator (as its sole value)
        with respect to the given `header`.  Read `header` from `filename` if `header` is None.
        """
        # super(KernelunityValidator, self).check_header(filename, header)
        array_name = self.complex_name
        all_data = header[array_name].DATA.transpose()
        images = int(np.product(all_data.shape[:-2]))
        images_shape = (images, ) + all_data.shape[-2:]
        images_data = np.reshape(all_data, images_shape)
        log.verbose("File=" + repr(os.path.basename(filename)), "Checking",
                    len(images_data), repr(array_name), "kernel(s) of size",
                    images_data[0].shape,
                    "for individual sums of 1+-1e-6.   Center pixels >= 1.")

        center_0 = images_data.shape[-2] // 2
        center_1 = images_data.shape[-1] // 2
        center_pixels = images_data[..., center_0, center_1]
        if not np.all(center_pixels >= 1.0):
            log.warning(
                "Possible bad IPC Kernel:  One or more kernel center pixel value(s) too small, should be >= 1.0"
            )
            # raise BadKernelCenterPixelTooSmall(
            #    "One or more kernel center pixel value(s) too small,  should be >= 1.0")

        for (i, image) in enumerate(images_data):
            if abs(image.sum() - 1.0) > 1.0e-6:
                raise BadKernelSumError("Kernel sum", image.sum(),
                                        "is not 1+-1e-6 for kernel #" + str(i),
                                        ":", repr(image))
示例#3
0
文件: api.py 项目: sean-lockwood/crds
def get_crds_server():
    """Return the base URL for the CRDS JSON RPC server.
    """
    url = URL[:-len(URL_SUFFIX)]
    if not url.startswith("https://") and "localhost" not in url:
        log.warning("CRDS_SERVER_URL does not start with https://  ::", url)
    return url
示例#4
0
    def update_headers(self, headers2, only_ids=None):
        """Incorporate `headers2` updated values into `self.headers`.  Since `headers2` may be incomplete,
        do param-by-param update.   Nominally,  this is to add OPUS bestrefs corrections (definitive) to DADSOPS
        database bestrefs (fast but not definitive).
        """
        if only_ids is None:
            only_ids = headers2.keys()

        items = headers2.items()
        for dataset_id, header in items:
            if isinstance(header, str):
                log.warning("Skipping bad dataset", dataset_id, ":", headers2[dataset_id])
                del headers2[dataset_id]

        # Munge for consistent case and value formatting regardless of source
        headers2 = {dataset_id:
                    {key.upper(): bestrefs_condition(val) for (key, val) in headers2[dataset_id].items()}
                    for dataset_id in headers2 if dataset_id in only_ids}

        # replace param-by-param,  not id-by-id, since headers2[id] may be partial
        for dataset_id in headers2:
            if dataset_id not in self.headers:
                log.verbose("Adding headers for", repr(dataset_id))
                self.headers[dataset_id] = {}
            else:
                log.verbose("Updating headers for", repr(dataset_id))
            header1, header2 = self.headers[dataset_id], headers2[dataset_id]
            for key in header2:
                if key not in header1 or header1[key] != header2[key]:
                    if key in header1:
                        log.verbose("Updating/correcting", repr(dataset_id), "key", repr(key),
                                    "from", repr(header1[key]), "to", repr(header2[key]))
                    else:
                        log.verbose("Adding", repr(dataset_id), "key", repr(key), "=", repr(header2[key]))
                    header1[key] = header2[key]
示例#5
0
文件: api.py 项目: jhunkeler/crds
def get_crds_server():
    """Return the base URL for the CRDS JSON RPC server.
    """
    url = URL[:-len(URL_SUFFIX)]
    if not url.startswith("https://") and "localhost" not in url:
        log.warning("CRDS_SERVER_URL does not start with https://  ::", url)
    return url
示例#6
0
文件: proxy.py 项目: jaytmiller/crds
    def _call(self, *args, **kwargs):
        """Core of RPC dispatch without error interpretation, logging, or return value decoding."""
        params = kwargs if len(kwargs) else args
        # if Any.kind(params) == Object and self.__version != '2.0':
        #   raise Exception('Unsupport arg type for JSON-RPC 1.0 '
        #                  '(the default version for this client, '
        #                  'pass version="2.0" to use keyword arguments)')
        jsonrpc_params = {"jsonrpc": self.__version,
                          "method": self.__service_name,
                          'params': params,
                          'id': message_id()
                         }
        
        parameters = json.dumps(jsonrpc_params)
        
        url = self._get_url(jsonrpc_params)
        
        if "serverless" in url or "server-less" in url:
            raise exceptions.ServiceError("Configured for server-less mode.  Skipping JSON RPC " + repr(self.__service_name))

        if log.get_verbose() <= 50:
            log.verbose("CRDS JSON RPC", self.__service_name, params if len(str(params)) <= 60 else "(...)", "-->")
        else:
            log.verbose("CRDS JSON RPC to", url, "parameters", params, "-->")
        
        response = apply_with_retries(self._call_service, parameters, url)

        try:
            rval = json.loads(response)
        except Exception as exc:
            log.warning("Invalid CRDS jsonrpc response:\n", response)
            raise
        
        return rval
示例#7
0
文件: schema.py 项目: oirlab/tmt-crds
def _flat_to_tpns(flat=None, schema_name=None):
    """Convert flat representation of DM schema to list of all TpnInfo objects."""
    if flat is None:
        flat = _schema_to_flat(_load_schema(schema_name))
    tpns = []
    for key, value in flat.items():
        if key.endswith(".TYPE"):
            basekey = str(key[:-len(".TYPE")])
            legal_values = [
                str(val) for val in flat.get(basekey + ".ENUM", [])
            ]
            if legal_values:
                legal_values += ["ANY", "N/A"]
            legal_values = tuple(sorted(set(legal_values)))
            if isinstance(value, list):
                value = tuple(value)
            datatype = SCHEMA_TYPE_TO_TPN.get(value, None)
            if datatype is not None:
                tpn = TpnInfo(name=basekey.upper(),
                              keytype="H",
                              datatype=datatype[0],
                              presence=datatype[1],
                              values=legal_values)
                log.verbose("Adding tpn constraint from DM schema:",
                            repr(tpn),
                            verbosity=65)
                tpns.append(tpn)
            else:
                log.warning("No TPN form for", repr(key), repr(value))
    return sorted(tpns)
示例#8
0
文件: core.py 项目: oirlab/tmt-crds
    def check_header(self, filename, header):
        """Evalutate the header expression associated with this validator (as its sole value)
        with respect to the given `header`.

        Note that array-based checkers are not automatically loaded during a classic header
        fetch and expressions can involve operations on multiple keywords or arrays.
        """
        log.verbose("File=" + repr(os.path.basename(filename)), "Checking",
                    repr(self.name), "condition", str(self._expr))
        for keyword in expr_identifiers(self._expr):
            if header.get(keyword, "UNDEFINED") == "UNDEFINED":
                log.verbose_warning("Keyword or Array", repr(keyword),
                                    "is 'UNDEFINED'. Skipping ",
                                    repr(self._expr))
                return True  # fake satisfied
        try:
            satisfied = eval(self._expr_code, header, self._eval_namespace)
        except Exception as exc:
            raise RequiredConditionError("Failed checking constraint",
                                         repr(self._expr), ":", str(exc))
        if not satisfied:
            raise RequiredConditionError("Constraint", str(self._expr),
                                         "is not satisfied.")
        elif satisfied == "W":  # from warn_only() helper
            log.warning("Constraint", str(self._expr), "is not satisfied.")
            satisfied = True
        return satisfied
示例#9
0
文件: core.py 项目: oirlab/tmt-crds
 def _check_value(self, filename, value):
     self.verbose(filename, value)
     #         try:
     #             timestamp.Jwstdate.get_datetime(value)
     #         except Exception:
     #             raise ValueError(log.format(
     #                 "Invalid JWST date", repr(value), "for", repr(self.name),
     #                 "format should be", repr("YYYY-MM-DDTHH:MM:SS")))
     try:
         timestamp.Jwstdate.get_datetime(value)
     except ValueError:
         try:
             timestamp.Anydate.get_datetime(value)
         except ValueError:
             try:
                 timestamp.Jwstdate.get_datetime(value.replace(" ", "T"))
             except ValueError:
                 timestamp.Jwstdate.get_datetime(
                     value)  # re-execute to replace exception raised
         log.warning(
             "Non-compliant date format",
             repr(value),
             "for",
             repr(self.name),
             "should be",
             repr("YYYY-MM-DDTHH:MM:SS"),
         )
示例#10
0
def difference(observatory, old_file, new_file, *args, **keys):
    """Difference different kinds of CRDS files (mappings, FITS references, etc.)
    named `old_file` and `new_file` against one another and print out the results
    on stdout.

    Returns:

    0 no differences
    1 some differences
    2 errors in subshells

    """
    filetype = config.filetype(old_file)
    differs = {
        "mapping" : MappingDifferencer,
        "asdf" : AsdfDifferencer,
        "fits" : FitsDifferencer,
        "text" : TextDifferencer,
        "yaml" : TextDifferencer,
        "json" : JsonDifferencer,
        }
    differ_class = differs.get(filetype, None)
    if differ_class is None:
        log.warning("Cannot difference file of type", repr(filetype), ":", repr(old_file), repr(new_file))
        status = 2   #  arguably, this should be an error not a warning.  wary of changing now.
    else:
        differ = differ_class(observatory, old_file, new_file, *args, **keys)
        status = differ.difference()
    return status
示例#11
0
    def is_applicable(self, header):
        """Return True IFF this Validator is applicable based upon header and the
        presence field of the TpnInfo.   The presence field can contain an expression
        which is evaluated in the context of `header`.

        There are variations of "True" which can be returned.  Some checks are
        designated optional (O), warning (W), or as only applying to FULL (F)
        frame or true SUBARRAY (S) cases.  These cases return the presence
        character which as a non-zero length string also evaluates to True but
        carries extra information,  particularly "optional" or "warning".
        """
        SUBARRAY = header.get('SUBARRAY','UNDEFINED')
        if self._presence_condition_code:
            try:
                presence = eval(self._presence_condition_code, header, self._eval_namespace)
                log.verbose("Validator", self.info, "is",
                            "applicable." if presence else "not applicable.", verbosity=70)
                if not presence:
                    return False
            except Exception as exc:
                log.warning("Failed checking applicability of", repr(self.info),"skipping check : ", str(exc))
                return False
        else:
            presence = self.info.presence
        if presence in ["O","W"]:
            return presence
#            return header.get(self.name, False) != "UNDEFINED"
        elif presence == "F": # IF_FULL_FRAME
            return validator_helpers.is_full_frame(SUBARRAY)
        elif presence == "S": # IF_SUBARRAY        
            return validator_helpers.is_subarray(SUBARRAY)
        elif presence == "A":
            return validator_helpers.subarray_defined(header)
        else:    
            return True
示例#12
0
文件: sync.py 项目: oirlab/tmt-crds
 def organize_references(self, new_mode):
     """Find all references in the CRDS cache and relink them to the paths which are implied by `new_mode`.
     This is used to reroganize existing file caches into new layouts,  e.g. flat -->  by instrument.
     """
     old_refpaths = rmap.list_references("*", observatory=self.observatory, full_path=True)
     old_mode = config.get_crds_ref_subdir_mode(self.observatory)
     log.info("Reorganizing", len(old_refpaths), "references from", repr(old_mode), "to", repr(new_mode))
     config.set_crds_ref_subdir_mode(new_mode, observatory=self.observatory)
     new_mode = config.get_crds_ref_subdir_mode(self.observatory)  # did it really change.
     for refpath in old_refpaths:
         with log.error_on_exception("Failed relocating:", repr(refpath)):
             desired_loc = config.locate_file(os.path.basename(refpath), observatory=self.observatory)
             if desired_loc != refpath:
                 if os.path.exists(desired_loc):
                     if not self.args.organize_delete_junk:
                         log.warning("Link or directory already exists at", repr(desired_loc), "Skipping", repr(refpath))
                         continue
                     utils.remove(desired_loc, observatory=self.observatory)
                 if config.writable_cache_or_info("Skipping file relocation from", repr(refpath), "to", repr(desired_loc)):
                     log.info("Relocating", repr(refpath), "to", repr(desired_loc))
                     shutil.move(refpath, desired_loc)
             else:
                 if old_mode != new_mode:
                     log.verbose_warning("Keeping existing cached file", repr(desired_loc), "already in target mode", repr(new_mode))
                 else:
                     log.verbose_warning("No change in subdirectory mode", repr(old_mode), "skipping reorganization of", repr(refpath))
     if new_mode == "flat" and old_mode == "instrument":
         log.info("Reorganizing from 'instrument' to 'flat' cache,  removing instrument directories.")
         for instrument in self.locator.INSTRUMENTS:
             self.remove_dir(instrument)
示例#13
0
 def handle_missing(self, header):
     """This Validator's key is missing.   Either raise an exception or
     ignore it depending on whether this Validator's key is required.
     """
     presence = self.info.presence
     if self.conditionally_required:
         if header: 
             presence = self.is_applicable(header)
             if not presence:
                 log.verbose("Conditional constraint on", repr(self.name),
                             "is not required by", repr(self.info.presence), verbosity=70)
                 return "UNDEFINED"
         else:
             log.verbose("No header supplied to evaluate conditional constraint",
                         repr(self.name), "based on", repr(self.info.presence),
                         "  Skipping.")
             return "UNDEFINED"
     if presence in ["R","P",True]:
         raise MissingKeywordError("Missing required", self._keytype_descr, repr(self.name))
     elif presence in ["W"]:
         log.warning("Missing suggested", self._keytype_descr, repr(self.name))
     elif presence in ["O"]:
         log.verbose("Optional", self._keytype_descr, repr(self.name), " is missing.", verbosity=70)
     elif presence in ["S","F","A"]:
         log.verbose("Conditional SUBARRAY parameter is not defined.")
     else:
         raise TpnDefinitionError("Unexpected validator 'presence' value:",
                                  repr(self.info.presence))
     return "UNDEFINED"
示例#14
0
文件: sync.py 项目: jaytmiller/crds
 def organize_references(self, new_mode):
     """Find all references in the CRDS cache and relink them to the paths which are implied by `new_mode`.   
     This is used to reroganize existing file caches into new layouts,  e.g. flat -->  by instrument.
     """
     old_refpaths = rmap.list_references("*", observatory=self.observatory, full_path=True)
     old_mode = config.get_crds_ref_subdir_mode(self.observatory)
     log.info("Reorganizing", len(old_refpaths), "references from", repr(old_mode), "to", repr(new_mode))
     config.set_crds_ref_subdir_mode(new_mode, observatory=self.observatory)
     new_mode = config.get_crds_ref_subdir_mode(self.observatory)  # did it really change.
     for refpath in old_refpaths:
         with log.error_on_exception("Failed relocating:", repr(refpath)):
             desired_loc = rmap.locate_file(os.path.basename(refpath), observatory=self.observatory)
             if desired_loc != refpath:
                 if os.path.exists(desired_loc):
                     if not self.args.organize_delete_junk:
                         log.warning("Link or directory already exists at", repr(desired_loc), "Skipping", repr(refpath))
                         continue
                     utils.remove(desired_loc, observatory=self.observatory)
                 if config.writable_cache_or_info("Skipping file relocation from", repr(refpath), "to", repr(desired_loc)):
                     log.info("Relocating", repr(refpath), "to", repr(desired_loc))
                     shutil.move(refpath, desired_loc)
             else:
                 if old_mode != new_mode:
                     log.verbose_warning("Keeping existing cached file", repr(desired_loc), "already in target mode", repr(new_mode))
                 else:
                     log.verbose_warning("No change in subdirectory mode", repr(old_mode), "skipping reorganization of", repr(refpath))
     if new_mode == "flat" and old_mode == "instrument":
         log.info("Reorganizing from 'instrument' to 'flat' cache,  removing instrument directories.")
         for instrument in self.locator.INSTRUMENTS:
             self.remove_dir(instrument)
示例#15
0
文件: proxy.py 项目: oirlab/tmt-crds
    def _call(self, *args, **kwargs):
        """Core of RPC dispatch without error interpretation, logging, or return value decoding."""
        params = kwargs if len(kwargs) else args
        jsonrpc_params = {"jsonrpc": self.__version,
                          "method": self.__service_name,
                          'params': params,
                          'id': message_id()
                         }

        parameters = json.dumps(jsonrpc_params)

        url = self._get_url(jsonrpc_params)

        if "serverless" in url or "server-less" in url:
            raise exceptions.ServiceError("Configured for server-less mode.  Skipping JSON RPC " + repr(self.__service_name))

        if log.get_verbose() <= 50:
            log.verbose("CRDS JSON RPC", self.__service_name, params if len(str(params)) <= 60 else "(...)", "-->")
        else:
            log.verbose("CRDS JSON RPC to", url, "parameters", params, "-->")

        response = apply_with_retries(self._call_service, parameters, url)

        try:
            rval = json.loads(response)
        except Exception as exc:
            log.warning("Invalid CRDS jsonrpc response:\n", response)
            raise

        return rval
示例#16
0
文件: diff.py 项目: jaytmiller/crds
def difference(observatory, old_file, new_file, *args, **keys):
    """Difference different kinds of CRDS files (mappings, FITS references, etc.)
    named `old_file` and `new_file` against one another and print out the results 
    on stdout.
    
    Returns:
    
    0 no differences
    1 some differences
    2 errors in subshells
    
    """
    filetype = config.filetype(old_file)
    differs = {
        "mapping" : MappingDifferencer,
        "asdf" : AsdfDifferencer,
        "fits" : FitsDifferencer,
        "text" : TextDifferencer,
        "yaml" : TextDifferencer,
        "json" : JsonDifferencer,
        }
    differ_class = differs.get(filetype, None)
    if differ_class is None:
        log.warning("Cannot difference file of type", repr(filetype), ":", repr(old_file), repr(new_file))
        status = 2   #  arguably, this should be an error not a warning.  wary of changing now.
    else:
        differ = differ_class(observatory, old_file, new_file, *args, **keys)
        status = differ.difference()
    return status
示例#17
0
    def check_header(self, filename, header):
        """Evalutate the header expression associated with this validator (as its sole value)
        with respect to the given `header`.  Read `header` from `filename` if `header` is None.
        """
        # super(KernelunityValidator, self).check_header(filename, header)
        array_name = self.complex_name
        all_data = header[array_name].DATA.transpose()
        images = int(np.product(all_data.shape[:-2]))
        images_shape = (images,) + all_data.shape[-2:]
        images_data = np.reshape(all_data, images_shape)
        log.verbose("File=" + repr(os.path.basename(filename)),
                   "Checking", len(images_data), repr(array_name), "kernel(s) of size", 
                    images_data[0].shape, "for individual sums of 1+-1e-6.   Center pixels >= 1.")

        center_0 = images_data.shape[-2]//2
        center_1 = images_data.shape[-1]//2
        center_pixels = images_data[..., center_0, center_1]
        if not np.all(center_pixels >= 1.0):
            log.warning("Possible bad IPC Kernel:  One or more kernel center pixel value(s) too small, should be >= 1.0")
            # raise BadKernelCenterPixelTooSmall(
            #    "One or more kernel center pixel value(s) too small,  should be >= 1.0")
                                 
        for (i, image) in enumerate(images_data):
            if abs(image.sum()-1.0) > 1.0e-6:
                raise BadKernelSumError("Kernel sum", image.sum(),
                    "is not 1+-1e-6 for kernel #" + str(i), ":", repr(image))    
示例#18
0
文件: diff.py 项目: jaytmiller/crds
    def main(self):
        """Perform the differencing."""
        self.args.files = [ self.args.old_file, self.args.new_file ]   # for defining self.observatory
        self.old_file = self.locate_file(self.args.old_file)
        self.new_file = self.locate_file(self.args.new_file)
        if self.args.brief:
            self.args.lowest_mapping_only = True
            self.args.remove_paths = True
            self.args.hide_boring_diffs = True
            self.args.include_header_diffs = True
        if self.args.sync_files:
            assert not (self.args.cache1 or self.args.cache2), \
                "--sync-files is not compatible with cache-to-cache differences."
            if self.args.print_all_new_files:
                serial_old = naming.newstyle_serial(self.old_file)
                serial_new = naming.newstyle_serial(self.new_file) + 1
                if None not in [serial_old, serial_new]:
                    errs = sync.SyncScript("crds.sync --range {0}:{1}".format(serial_old, serial_new))()
                    assert not errs, "Errors occurred while syncing all rules to CRDS cache."
                else:
                    log.warning("Cannot sync non-standard mapping names,  results may be incomplete.")
            else:
                self.sync_files([self.old_file, self.new_file])
        elif self.args.print_all_new_files:
            log.warning("--print-all-new-files requires a complete set of rules.  suggest --sync-files.")
            
        # self.args.files = [ self.old_file, self.new_file ]   # for defining self.observatory
    
        assert (self.args.cache1 and self.args.cache2) or (not self.args.cache1 and not self.args.cache2), \
            "Cache-to-cache comparison requires both --cache1 and --cache2;  otherwise neither for single cache comparison."

        if self.args.print_new_files:
            status = self.print_new_files()
        elif self.args.print_all_new_files:
            status = self.print_all_new_files()
        elif self.args.print_affected_instruments:
            status = self.print_affected_instruments()
        elif self.args.print_affected_types:
            status = self.print_affected_types()
        elif self.args.print_affected_modes:
            status = self.print_affected_modes()
        else:
            status = difference(self.observatory, self.old_file, self.new_file, 
                                primitive_diffs=self.args.primitive_diffs, 
                                check_diffs=self.args.check_diffs,
                                check_references=self.args.check_references,
                                mapping_text_diffs=self.args.mapping_text_diffs,
                                include_header_diffs=self.args.include_header_diffs,
                                hide_boring_diffs=self.args.hide_boring_diffs,
                                recurse_added_deleted=self.args.recurse_added_deleted,
                                lowest_mapping_only=self.args.lowest_mapping_only,
                                remove_paths=self.args.remove_paths,
                                squash_tuples=self.args.squash_tuples,
                                cache1=self.args.cache1,
                                cache2=self.args.cache2)
        if log.errors() or log.warnings():
            return 2
        else:
            return status
示例#19
0
    def main(self):
        """Perform the differencing."""
        self.args.files = [ self.args.old_file, self.args.new_file ]   # for defining self.observatory
        self.old_file = self.locate_file(self.args.old_file)
        self.new_file = self.locate_file(self.args.new_file)
        if self.args.brief:
            self.args.lowest_mapping_only = True
            self.args.remove_paths = True
            self.args.hide_boring_diffs = True
            self.args.include_header_diffs = True
        if self.args.sync_files:
            assert not (self.args.cache1 or self.args.cache2), \
                "--sync-files is not compatible with cache-to-cache differences."
            if self.args.print_all_new_files:
                serial_old = naming.newstyle_serial(self.old_file)
                serial_new = naming.newstyle_serial(self.new_file) + 1
                if None not in [serial_old, serial_new]:
                    errs = sync.SyncScript("crds.sync --range {0}:{1}".format(serial_old, serial_new))()
                    assert not errs, "Errors occurred while syncing all rules to CRDS cache."
                else:
                    log.warning("Cannot sync non-standard mapping names,  results may be incomplete.")
            else:
                self.sync_files([self.old_file, self.new_file])
        elif self.args.print_all_new_files:
            log.warning("--print-all-new-files requires a complete set of rules.  suggest --sync-files.")

        # self.args.files = [ self.old_file, self.new_file ]   # for defining self.observatory

        assert (self.args.cache1 and self.args.cache2) or (not self.args.cache1 and not self.args.cache2), \
            "Cache-to-cache comparison requires both --cache1 and --cache2;  otherwise neither for single cache comparison."

        if self.args.print_new_files:
            status = self.print_new_files()
        elif self.args.print_all_new_files:
            status = self.print_all_new_files()
        elif self.args.print_affected_instruments:
            status = self.print_affected_instruments()
        elif self.args.print_affected_types:
            status = self.print_affected_types()
        elif self.args.print_affected_modes:
            status = self.print_affected_modes()
        else:
            status = difference(self.observatory, self.old_file, self.new_file,
                                primitive_diffs=self.args.primitive_diffs,
                                check_diffs=self.args.check_diffs,
                                check_references=self.args.check_references,
                                mapping_text_diffs=self.args.mapping_text_diffs,
                                include_header_diffs=self.args.include_header_diffs,
                                hide_boring_diffs=self.args.hide_boring_diffs,
                                recurse_added_deleted=self.args.recurse_added_deleted,
                                lowest_mapping_only=self.args.lowest_mapping_only,
                                remove_paths=self.args.remove_paths,
                                squash_tuples=self.args.squash_tuples,
                                cache1=self.args.cache1,
                                cache2=self.args.cache2)
        if log.errors() or log.warnings():
            return 2
        else:
            return status
示例#20
0
    def handle_cancel(self, message):
        """Generic "cancel" handler reports on commanded cancellation of remote process
        and possibly why it was cancelled.   Then stops monitoring /exits.
        """
        log.warning(self.format_remote("Processing cancelled:", message.data))

        self.result = message.data["result"]

        return _STATUS_CANCELLED
示例#21
0
    def run(self, progress_callback=None):
        """
        Create stsynphot bandpass objects from the observation modes in the obsmodes file.
        Emits appropriate log messages and returns True if validations succeed,
        False if there's an error.
        """

        failed = 0
        with fits.open(self.obsmodes_file) as hdul:
            total_modes = len(hdul[-1].data)
            log.info(
                "Creating bandpass objects from {} observation modes".format(
                    total_modes))

            if self.processes > 1:
                with Pool(processes=self.processes) as pool:
                    for start_index in range(0, total_modes, self.batch_size):
                        end_index = start_index + self.batch_size
                        results = pool.starmap(
                            _test_stsynphot_mode,
                            [(self.synphot_root, m) for m in
                             hdul[-1].data["OBSMODE"][start_index:end_index]])
                        for i, (result, errors, warns) in enumerate(results):
                            if not result:
                                failed += 1
                            for warning in warns:
                                log.warning(warning)
                            for error in errors:
                                log.error(error)

                            if progress_callback:
                                progress_callback(start_index + i + 1,
                                                  total_modes)
            else:
                for i, obsmode in enumerate(hdul[-1].data["OBSMODE"]):
                    result, errors, warns = _test_stsynphot_mode(
                        self.synphot_root, obsmode)
                    if not result:
                        failed += 1
                    for warning in warns:
                        log.warning(warning)
                    for error in errors:
                        log.error(error)

                    if progress_callback:
                        progress_callback(i + 1, total_modes)

        if failed > 0:
            log.info("{} / {} observation modes failed".format(
                failed, total_modes))
        else:
            log.info("Congratulations, all observation modes succeeded!")

        return failed == 0
示例#22
0
文件: api.py 项目: sean-lockwood/crds
def set_crds_server(url):
    """Configure the CRDS JSON services server to `url`,  
    e.g. 'http://localhost:8000'
    """
    if not url.startswith("https://") and "localhost" not in url:
        log.warning("CRDS_SERVER_URL does not start with https://  ::", url)
    if url.endswith("/"):
        url = url[:-1]
    global URL, S
    URL = url + URL_SUFFIX
    S = CheckingProxy(URL, version="1.0")
示例#23
0
文件: api.py 项目: jhunkeler/crds
def set_crds_server(url):
    """Configure the CRDS JSON services server to `url`,  
    e.g. 'http://localhost:8000'
    """
    if not url.startswith("https://") and "localhost" not in url:
        log.warning("CRDS_SERVER_URL does not start with https://  ::", url)
    if url.endswith("/"):
        url = url[:-1]
    global URL, S
    URL = url + URL_SUFFIX
    S = CheckingProxy(URL, version="1.0")
示例#24
0
def load_all_type_constraints(observatory):
    """Load all the type constraint files from `observatory` package.

    There are constraints that apply to:

    ALL instruments and types
    ALL types of one instrument
    ALL instruments of one type
    One instrument and type

    Generally these should be thought of as designed for successive refinement,
    so all constraints are applied, but as their scope narrows they can become
    stricter.  Since increasing strictness and refinement require more knowledge,
    the development order of the constraints mirrored that.

    However, in the (revised) loading below, constraints are loaded by order of
    decreasing strictness; this makes it possible to define strict
    constants/replacements early in the loading process and to apply those
    to customize the more generalized constraints loaded later.
    """
    from crds.core import rmap, heavy_client
    pmap_name = heavy_client.load_server_info(observatory).operational_context
    pmap = rmap.get_cached_mapping(pmap_name)
    locator = utils.get_locator_module(observatory)
    for instr in pmap.selections:
        imap = pmap.get_imap(instr)
        for filekind in imap.selections:
            if imap.selections[filekind] == "N/A":
                continue
            try:
                suffix = locator.TYPES.filekind_to_suffix(instr, filekind)
            except Exception as exc:
                log.warning("Missing suffix coverage for",
                            repr((instr, filekind)), ":", exc)
            else:
                locator.get_all_tpninfos(
                    instr, suffix,
                    "tpn")  # With core schema,  one type loads all
                locator.get_all_tpninfos(
                    instr, suffix,
                    "ld_tpn")  # With core schema,  one type loads all
                locator.get_all_tpninfos(
                    "all", suffix,
                    "tpn")  # With core schema,  one type loads all
                locator.get_all_tpninfos(
                    "all", suffix,
                    "ld_tpn")  # With core schema,  one type loads all
        locator.get_all_tpninfos(instr, "all", "tpn")
        locator.get_all_tpninfos(instr, "all", "ld_tpn")
    locator.get_all_tpninfos("all", "all", "tpn")
    locator.get_all_tpninfos("all", "all", "ld_tpn")
示例#25
0
文件: diff.py 项目: jaytmiller/crds
def mapping_check_references(mapping, derived_from):
    """Regardless of matching criteria,  do a simple check listing added or deleted
    references as appropritate.
    """
    mapping = rmap.asmapping(mapping, cached="readonly")
    derived_from = rmap.asmapping(derived_from, cached="readonly")
    old_refs = set(derived_from.reference_names())
    new_refs = set(mapping.reference_names())
    if old_refs - new_refs:
        log.warning("Deleted references for", repr(derived_from.filename), "and", repr(mapping.filename), "=",
                 list(old_refs - new_refs))
    if new_refs - old_refs:
        log.warning("Added references for", repr(derived_from.filename), "and", repr(mapping.filename), "=",
                 list(new_refs - old_refs))
示例#26
0
def hijacked_showwarning(message, category, filename, lineno, *args, **keys):
    """Map the warnings.showwarning plugin function parameters onto log.warning."""
    try:
        scat = str(category).split(".")[-1].split("'")[0]
    except Exception:
        scat = category
    try:
        sfile = str(filename).split(".egg")[-1].split("site-packages")[-1].replace("/",".").replace(".py", "")
        while sfile.startswith(("/",".")):
            sfile = sfile[1:]
    except Exception:
        sfile = filename
    message = str(message).replace("\n","")
    log.warning(scat, ":", sfile, ":", message)
示例#27
0
def mapping_check_references(mapping, derived_from):
    """Regardless of matching criteria,  do a simple check listing added or deleted
    references as appropritate.
    """
    mapping = rmap.asmapping(mapping, cached="readonly")
    derived_from = rmap.asmapping(derived_from, cached="readonly")
    old_refs = set(derived_from.reference_names())
    new_refs = set(mapping.reference_names())
    if old_refs - new_refs:
        log.warning("Deleted references for", repr(derived_from.filename), "and", repr(mapping.filename), "=",
                 list(old_refs - new_refs))
    if new_refs - old_refs:
        log.warning("Added references for", repr(derived_from.filename), "and", repr(mapping.filename), "=",
                 list(new_refs - old_refs))
示例#28
0
def verify_checksum(file_):
    """Verify checksums in `file_`."""
    log.info("Verifying checksum for", repr(file_))
    if config.is_reference(file_):
        data_file.verify_checksum(file_)
    elif rmap.is_mapping(file_):
        if config.CRDS_IGNORE_MAPPING_CHECKSUM.get():
            log.warning(
                "Mapping checksums are disabled by config.CRDS_IGNORE_MAPPING_CHECKSM."
            )
        rmap.load_mapping(file_)
    else:
        raise exceptions.CrdsError(
            "File", repr(file_),
            "does not appear to be a CRDS reference or mapping file.")
示例#29
0
    def _check_throughput_first_and_last(self, hdul):
        """
        Confirm that the first and last values of THROUGHPUT and MJD# columns
        is zero.  Emit a warning only, since there are exceptions to this rule.
        """
        for column_name in hdul[-1].data.names:
            if column_name == "THROUGHPUT" or column_name.startswith("MJD#"):
                if not (hdul[-1].data[column_name][0] == 0.0):
                    log.warning("First value of column '{}' is not 0.0".format(
                        column_name))
                if not (hdul[-1].data[column_name][-1] == 0.0):
                    log.warning("Last value of column '{}' is not 0.0".format(
                        column_name))

        return True
示例#30
0
    def __init__(self, context, datasets, datasets_since):
        """"Contact the CRDS server and get headers for the list of `datasets` ids with respect to `context`."""
        super(DatasetHeaderGenerator, self).__init__(context, datasets, datasets_since)
        server = api.get_crds_server()
        log.info("Dumping dataset parameters from CRDS server at", repr(server), "for", repr(datasets))
        self.headers = api.get_dataset_headers_by_id(context, datasets)
        log.info("Dumped", len(self.headers), "of", len(datasets), "datasets from CRDS server at", repr(server))

        # every command line id should correspond to 1 or more headers
        for source in self.sources:
            if self.matching_two_part_id(source) not in self.headers.keys():
                log.warning("Dataset", repr(source), "isn't represented by downloaded parameters.")

        # Process according to downloaded 2-part ids,  not command line ids.
        self.sources = sorted(self.headers.keys())
示例#31
0
文件: sync.py 项目: oirlab/tmt-crds
    def verify_file(self, file, info, bytes_so_far, total_bytes, nth_file, total_files):
        """Check one `file` against the provided CRDS database `info` dictionary."""
        path = config.locate_file(file, observatory=self.observatory)
        base = os.path.basename(file)
        n_bytes = int(info["size"])

        # Only output verification info for slow sha1sum checks by default
        log.verbose(
            api.file_progress(
                "Verifying", base, path, n_bytes, bytes_so_far, total_bytes, nth_file, total_files),
            verbosity=10 if self.args.check_sha1sum else 60)

        if not os.path.exists(path):
            if base not in self.bad_files:
                log.error("File", repr(base), "doesn't exist at", repr(path))
            return

        # Checks which force repairs should do if/else to avoid repeat repair
        size = os.stat(path).st_size
        if int(info["size"]) != size:
            self.error_and_repair(path, "File", repr(base), "length mismatch LOCAL size=" + srepr(size),
                                  "CRDS size=" + srepr(info["size"]))
        elif self.args.check_sha1sum or config.is_mapping(base):
            log.verbose("Computing checksum for", repr(base), "of size", repr(size), verbosity=60)
            sha1sum = utils.checksum(path)
            if info["sha1sum"] == "none":
                log.warning("CRDS doesn't know the checksum for", repr(base))
            elif info["sha1sum"] != sha1sum:
                self.error_and_repair(path, "File", repr(base), "checksum mismatch CRDS=" + repr(info["sha1sum"]),
                                      "LOCAL=" + repr(sha1sum))

        if info["state"] not in ["archived", "operational"]:
            log.warning("File", repr(base), "has an unusual CRDS file state", repr(info["state"]))

        if info["rejected"] != "false":
            log.verbose_warning("File", repr(base), "has been explicitly rejected.", verbosity=60)
            if self.args.purge_rejected:
                self.remove_files([path], "file")
            return

        if info["blacklisted"] != "false":
            log.verbose_warning("File", repr(base), "has been blacklisted or is dependent on a blacklisted file.",
                                verbosity=60)
            if self.args.purge_blacklisted:
                self.remove_files([path], "file")
            return
        return
示例#32
0
文件: wfc3.py 项目: jaytmiller/crds
def wfc3_darkfile_filter(kmap_orig):
    """Filter to customize DARKFILE for hst_gentools/gen_rmap.py.
    
    Removes dead SUBTYPE='' darkfiles.   Driven by CDBS reffile_ops database defects.
    """
    darkfile_match_keys = ('DETECTOR', 'CCDAMP', 'BINAXIS1', 'BINAXIS2', 'CCDGAIN', 'SAMP_SEQ', 'SUBTYPE')
    kmap = copy.deepcopy(kmap_orig)
    for match in kmap_orig:
        header = dict(list(zip(darkfile_match_keys, match)))
        if header["SUBTYPE"] == '':
            header["SUBTYPE"] = "N/A"
            new_match = tuple(header[key] for key in darkfile_match_keys)
            for filemap in kmap[match]:
                log.warning("Re-mapping match with SUBTYPE='' to SUBTYPE='N/A' for", filemap)
            kmap[new_match] = kmap[match]
            del kmap[match]
    return kmap, []
示例#33
0
文件: sync.py 项目: jaytmiller/crds
    def verify_file(self, file, info, bytes_so_far, total_bytes, nth_file, total_files):
        """Check one `file` against the provided CRDS database `info` dictionary."""
        path = rmap.locate_file(file, observatory=self.observatory)
        base = os.path.basename(file)
        n_bytes = int(info["size"])
        
        # Only output verification info for slow sha1sum checks by default
        log.verbose(
            api.file_progress(
                "Verifying", base, path, n_bytes, bytes_so_far, total_bytes, nth_file, total_files),
            verbosity=10 if self.args.check_sha1sum else 60)
        
        if not os.path.exists(path):
            log.error("File", repr(base), "doesn't exist at", repr(path))
            return

        # Checks which force repairs should do if/else to avoid repeat repair
        size = os.stat(path).st_size
        if int(info["size"]) != size:
            self.error_and_repair(path, "File", repr(base), "length mismatch LOCAL size=" + srepr(size), 
                                  "CRDS size=" + srepr(info["size"]))
        elif self.args.check_sha1sum or config.is_mapping(base):
            log.verbose("Computing checksum for", repr(base), "of size", repr(size), verbosity=60)
            sha1sum = utils.checksum(path)
            if info["sha1sum"] == "none":
                log.warning("CRDS doesn't know the checksum for", repr(base))
            elif info["sha1sum"] != sha1sum:
                self.error_and_repair(path, "File", repr(base), "checksum mismatch CRDS=" + repr(info["sha1sum"]), 
                                      "LOCAL=" + repr(sha1sum))

        if info["state"] not in ["archived", "operational"]:
            log.warning("File", repr(base), "has an unusual CRDS file state", repr(info["state"]))

        if info["rejected"] != "false":
            log.verbose_warning("File", repr(base), "has been explicitly rejected.", verbosity=60)
            if self.args.purge_rejected:
                self.remove_files([path], "files")
            return

        if info["blacklisted"] != "false":
            log.verbose_warning("File", repr(base), "has been blacklisted or is dependent on a blacklisted file.",
                                verbosity=60)
            if self.args.purge_blacklisted:
                self.remove_files([path], "files")
            return
        return
示例#34
0
文件: sync.py 项目: jaytmiller/crds
    def update_context(self):
        """Update the CRDS operational context in the cache.  Handle pipeline-specific
        targeted features of (a) verifying a context switch as actually recorded in
        the local CRDS cache and (b) echoing/pushing the pipeline context back up to the
        CRDS server for tracking using an id/authorization key.

        If errors occurred during the sync and --force_config_update is not set,
        """
        if not log.errors() or self.args.force_config_update:
            if self.args.verify_context_change:
                old_context = heavy_client.load_server_info(self.observatory).operational_context
            heavy_client.update_config_info(self.observatory)
            if self.args.verify_context_change:
                self.verify_context_change(old_context)
            if self.args.push_context:
                self.push_context()
        else:
            log.warning("Errors occurred during sync,  skipping CRDS cache config and context update.")
示例#35
0
文件: sync.py 项目: oirlab/tmt-crds
    def update_context(self):
        """Update the CRDS operational context in the cache.  Handle pipeline-specific
        targeted features of (a) verifying a context switch as actually recorded in
        the local CRDS cache and (b) echoing/pushing the pipeline context back up to the
        CRDS server for tracking using an id/authorization key.

        If errors occurred during the sync and --force_config_update is not set,
        """
        if not log.errors() or self.args.force_config_update:
            if self.args.verify_context_change:
                old_context = heavy_client.load_server_info(self.observatory).operational_context
            heavy_client.update_config_info(self.observatory)
            if self.args.verify_context_change:
                self.verify_context_change(old_context)
            if self.args.push_context:
                self.push_context()
        else:
            log.warning("Errors occurred during sync,  skipping CRDS cache config and context update.")
示例#36
0
文件: tables.py 项目: jaytmiller/crds
def tables(filename):
    """Return [ SimpleTable(filename, segment), ... ] for each table segment in filename.
    
    This function is self-cached.    Clear the cache using clear_cache().
    """
    if filename.endswith(".fits"):
        tables = []
        with data_file.fits_open(filename) as hdus:
            for i,hdu in enumerate(hdus):
                classname = hdu.__class__.__name__.upper()
                if "TABLEHDU" in classname:
                    tables.append(SimpleTable(filename, i))
                    if classname == "TABLEHDU":
                        log.warning("ASCII Table detected in HDU#", str(i) +
                                    ".  Particularly for HST, verify that it should not be a BIN Table HDU.")
        return tables
    else:
        return [ SimpleTable(filename, segment=1) ]
示例#37
0
 def check_exptypes(self):
     """Based on EXP_TYPEs defined by CAL schema and the specified instrument
     contexts, print out log info on missing or unexpected coverage.
     """
     for imap_name in self.contexts:
         i_loaded = crds.get_cached_mapping(imap_name)
         s_exp_types = self.locator.get_exptypes(i_loaded.instrument)
         for exp_type in s_exp_types:
             reftypes = self.locator.get_reftypes(exp_type)
             for filekind in i_loaded.selections:
                 ufilekind = (i_loaded.instrument.upper(), filekind.upper())
                 rmap_name = i_loaded.selections[filekind]
                 if rmap_name == 'N/A':
                     if filekind in reftypes:
                         log.verbose("Reftype rmap", repr(ufilekind),
                                     "is defined as N/A for",
                                     repr(exp_type))
                 else:
                     r_loaded = i_loaded.get_rmap(filekind)
                     r_exp_types = r_loaded.get_parkey_map().get(
                         "META.EXPOSURE.TYPE", None)
                     if r_exp_types is None:  # ???
                         log.verbose("Reftype", repr(ufilekind),
                                     "does not match using EXP_TYPE.")
                     elif exp_type in r_exp_types:
                         if filekind in reftypes:
                             log.verbose("Reftype", repr(ufilekind),
                                         "explicitly mentions",
                                         repr(exp_type))
                         else:
                             log.warning("Reftype", repr(ufilekind),
                                         "has unexpected coverage for",
                                         repr(exp_type))
                     elif "ANY" in r_exp_types or "N/A" in r_exp_types:
                         log.verbose("Reftype", repr(ufilekind),
                                     "is satisfied by ANY or N/A for",
                                     repr(exp_type))
                     elif filekind in reftypes:
                         log.info("Reftype", repr(ufilekind),
                                  "is missing coverage for", repr(exp_type))
                     else:
                         log.verbose("Reftype", repr(ufilekind),
                                     "has no expected coverage for",
                                     repr(exp_type))
示例#38
0
def load_all_type_constraints(observatory):
    """Load all the type constraint files from `observatory` package.

    There are constraints that apply to:

    ALL instruments and types
    ALL types of one instrument
    ALL instruments of one type
    One instrument and type

    Generally these should be thought of as designed for successive refinement,
    so all constraints are applied, but as their scope narrows they can become
    stricter.  Since increasing strictness and refinement require more knowledge,
    the development order of the constraints mirrored that.

    However, in the (revised) loading below, constraints are loaded by order of
    decreasing strictness; this makes it possible to define strict
    constants/replacements early in the loading process and to apply those
    to customize the more generalized constraints loaded later.
    """
    from crds.core import rmap, heavy_client
    pmap_name = heavy_client.load_server_info(observatory).operational_context
    pmap = rmap.get_cached_mapping(pmap_name)
    locator = utils.get_locator_module(observatory)
    for instr in pmap.selections:
        imap = pmap.get_imap(instr)
        for filekind in imap.selections:
            if imap.selections[filekind] == "N/A":
                continue
            try:
                suffix  = locator.TYPES.filekind_to_suffix(instr, filekind)
            except Exception as exc:
                log.warning("Missing suffix coverage for", repr((instr, filekind)), ":", exc)
            else:
                locator.get_all_tpninfos(instr, suffix, "tpn")  # With core schema,  one type loads all
                locator.get_all_tpninfos(instr, suffix, "ld_tpn")  # With core schema,  one type loads all
                locator.get_all_tpninfos("all", suffix, "tpn")  # With core schema,  one type loads all
                locator.get_all_tpninfos("all", suffix, "ld_tpn")  # With core schema,  one type loads all
        locator.get_all_tpninfos(instr, "all", "tpn")
        locator.get_all_tpninfos(instr, "all", "ld_tpn")
    locator.get_all_tpninfos("all","all","tpn")
    locator.get_all_tpninfos("all","all","ld_tpn")
示例#39
0
def wfc3_darkfile_filter(kmap_orig):
    """Filter to customize DARKFILE for hst_gentools/gen_rmap.py.
    
    Removes dead SUBTYPE='' darkfiles.   Driven by CDBS reffile_ops database defects.
    """
    darkfile_match_keys = ('DETECTOR', 'CCDAMP', 'BINAXIS1', 'BINAXIS2',
                           'CCDGAIN', 'SAMP_SEQ', 'SUBTYPE')
    kmap = copy.deepcopy(kmap_orig)
    for match in kmap_orig:
        header = dict(list(zip(darkfile_match_keys, match)))
        if header["SUBTYPE"] == '':
            header["SUBTYPE"] = "N/A"
            new_match = tuple(header[key] for key in darkfile_match_keys)
            for filemap in kmap[match]:
                log.warning(
                    "Re-mapping match with SUBTYPE='' to SUBTYPE='N/A' for",
                    filemap)
            kmap[new_match] = kmap[match]
            del kmap[match]
    return kmap, []
示例#40
0
    def _check_value(self, filename, value):
        self.verbose(filename, value)
#         try:
#             timestamp.Jwstdate.get_datetime(value)
#         except Exception:
#             raise ValueError(log.format(
#                 "Invalid JWST date", repr(value), "for", repr(self.name),
#                 "format should be", repr("YYYY-MM-DDTHH:MM:SS")))
        try:
            timestamp.Jwstdate.get_datetime(value)
        except ValueError:
            try:
                timestamp.Anydate.get_datetime(value)
            except ValueError:
                try:
                    timestamp.Jwstdate.get_datetime(value.replace(" ","T"))
                except ValueError:
                    timestamp.Jwstdate.get_datetime(value)   # re-execute to replace exception raised                 
            log.warning("Non-compliant date format", repr(value), "for", repr(self.name),
                        "should be", repr("YYYY-MM-DDTHH:MM:SS"),)
示例#41
0
文件: core.py 项目: oirlab/tmt-crds
    def check_value(self, filename, value):
        if value in [None,
                     "UNDEFINED"]:  # missing optional or excluded keyword
            return True
        value = self.condition(value)

        try:
            satisfied = eval(self._expr_code, {"VALUE": value},
                             self._eval_namespace)
        except Exception as exc:
            raise RequiredConditionError("Failed checking constraint",
                                         repr(self._expr), ":", str(exc))

        if not satisfied:
            raise RequiredConditionError("Constraint", str(self._expr),
                                         "is not satisfied.")
        elif satisfied == "W":  # from warn_only() helper
            log.warning("Constraint", str(self._expr), "is not satisfied.")
            satisfied = True

        return satisfied
示例#42
0
def tables(filename):
    """Return [ SimpleTable(filename, segment), ... ] for each table segment in filename.
    
    This function is self-cached.    Clear the cache using clear_cache().
    """
    if filename.endswith(".fits"):
        tables = []
        with data_file.fits_open(filename) as hdus:
            for i, hdu in enumerate(hdus):
                classname = hdu.__class__.__name__.upper()
                if "TABLEHDU" in classname:
                    tables.append(SimpleTable(filename, i))
                    if classname == "TABLEHDU":
                        log.warning(
                            "ASCII Table detected in HDU#",
                            str(i) +
                            ".  Particularly for HST, verify that it should not be a BIN Table HDU."
                        )
        return tables
    else:
        return [SimpleTable(filename, segment=1)]
示例#43
0
文件: core.py 项目: oirlab/tmt-crds
    def is_applicable(self, header):
        """Return True IFF this Validator is applicable based upon header and the
        presence field of the TpnInfo.   The presence field can contain an expression
        which is evaluated in the context of `header`.

        There are variations of "True" which can be returned.  Some checks are
        designated optional (O), warning (W), or as only applying to FULL (F)
        frame or true SUBARRAY (S) cases.  These cases return the presence
        character which as a non-zero length string also evaluates to True but
        carries extra information,  particularly "optional" or "warning".
        """
        SUBARRAY = header.get('SUBARRAY', 'UNDEFINED')
        if self._presence_condition_code:
            try:
                presence = eval(self._presence_condition_code, header,
                                self._eval_namespace)
                log.verbose("Validator",
                            self.info,
                            "is",
                            "applicable." if presence else "not applicable.",
                            verbosity=70)
                if not presence:
                    return False
            except Exception as exc:
                log.warning("Failed checking applicability of",
                            repr(self.info), "skipping check : ", str(exc))
                return False
        else:
            presence = self.info.presence
        if presence in ["O", "W"]:
            return presence
#            return header.get(self.name, False) != "UNDEFINED"
        elif presence == "F":  # IF_FULL_FRAME
            return validator_helpers.is_full_frame(SUBARRAY)
        elif presence == "S":  # IF_SUBARRAY
            return validator_helpers.is_subarray(SUBARRAY)
        elif presence == "A":
            return validator_helpers.subarray_defined(header)
        else:
            return True
示例#44
0
文件: core.py 项目: oirlab/tmt-crds
 def handle_missing(self, header):
     """This Validator's key is missing.   Either raise an exception or
     ignore it depending on whether this Validator's key is required.
     """
     presence = self.info.presence
     if self.conditionally_required:
         if header:
             presence = self.is_applicable(header)
             if not presence:
                 log.verbose("Conditional constraint on",
                             repr(self.name),
                             "is not required by",
                             repr(self.info.presence),
                             verbosity=70)
                 return "UNDEFINED"
         else:
             log.verbose(
                 "No header supplied to evaluate conditional constraint",
                 repr(self.name), "based on", repr(self.info.presence),
                 "  Skipping.")
             return "UNDEFINED"
     if presence in ["R", "P", True]:
         raise MissingKeywordError("Missing required", self._keytype_descr,
                                   repr(self.name))
     elif presence in ["W"]:
         log.warning("Missing suggested", self._keytype_descr,
                     repr(self.name))
     elif presence in ["O"]:
         log.verbose("Optional",
                     self._keytype_descr,
                     repr(self.name),
                     " is missing.",
                     verbosity=70)
     elif presence in ["S", "F", "A"]:
         log.verbose("Conditional SUBARRAY parameter is not defined.")
     else:
         raise TpnDefinitionError("Unexpected validator 'presence' value:",
                                  repr(self.info.presence))
     return "UNDEFINED"
示例#45
0
def _check_component_filename(context, reftype, suffix, filename, header):
    compname = header.get("COMPNAME", "").lower()
    if not compname:
        log.error("Header missing COMPNAME, unable to certify filename")
        return False

    pattern = re.compile(".*_([0-9]{3})_" + suffix + ".fits")

    match = pattern.match(os.path.basename(filename))
    if not match:
        log.error("Invalid filename: must end in _###_{}.fits".format(suffix))
        return False

    if context:
        new_version = int(match.group(1))

        original_filename = _get_synphot_filename(context, reftype, compname)
        if original_filename:
            match = pattern.match(os.path.basename(original_filename))
            if not match:
                log.warning(
                    "Previous filename '{}' does not end in _###_{}.fits, skipping version check"
                    .format(original_filename, suffix))
                return True

            original_version = int(match.group(1))

            if new_version <= original_version:
                log.error(
                    "New filename version ({:03d}) must exceed previous version ({:03d})"
                    .format(new_version, original_version))
                return False
        else:
            log.warning(
                "No previous file exists for COMPNAME '{}', skipping version check"
                .format(compname))

    return True
示例#46
0
    def _call(self, *args, **kwargs):
        """Core of RPC dispatch without error interpretation, logging, or return value decoding."""
        params = kwargs if len(kwargs) else args
        # if Any.kind(params) == Object and self.__version != '2.0':
        #   raise Exception('Unsupport arg type for JSON-RPC 1.0 '
        #                  '(the default version for this client, '
        #                  'pass version="2.0" to use keyword arguments)')
        jsonrpc_params = {
            "jsonrpc": self.__version,
            "method": self.__service_name,
            'params': params,
            'id': message_id()
        }

        parameters = json.dumps(jsonrpc_params)

        url = self._get_url(jsonrpc_params)

        if "serverless" in url or "server-less" in url:
            raise exceptions.ServiceError(
                "Configured for server-less mode.  Skipping JSON RPC " +
                repr(self.__service_name))

        if log.get_verbose() <= 50:
            log.verbose("CRDS JSON RPC", self.__service_name,
                        params if len(str(params)) <= 60 else "(...)", "-->")
        else:
            log.verbose("CRDS JSON RPC to", url, "parameters", params, "-->")

        response = apply_with_retries(self._call_service, parameters, url)

        try:
            rval = json.loads(response)
        except Exception as exc:
            log.warning("Invalid CRDS jsonrpc response:\n", response)
            raise

        return rval
示例#47
0
文件: schema.py 项目: jaytmiller/crds
def _flat_to_tpns(flat=None, schema_name=None):
    """Convert flat representation of DM schema to list of all TpnInfo objects."""
    if flat is None:
        flat = _schema_to_flat(_load_schema(schema_name))
    tpns = []
    for key, value in flat.items():
        if key.endswith(".TYPE"):
            basekey = str(key[:-len(".TYPE")])
            legal_values = [str(val) for val in flat.get(basekey + ".ENUM", [])]
            if legal_values:
                legal_values += ["ANY", "N/A"]
            legal_values = tuple(sorted(set(legal_values)))
            if isinstance(value, list):
                value = tuple(value)
            datatype = SCHEMA_TYPE_TO_TPN.get(value, None)
            if datatype is not None:
                tpn = TpnInfo(name=basekey.upper(), keytype="H", datatype=datatype[0], 
                              presence=datatype[1], values=legal_values)
                log.verbose("Adding tpn constraint from DM schema:", repr(tpn), verbosity=65)
                tpns.append(tpn)
            else:
                log.warning("No TPN form for", repr(key), repr(value))
    return sorted(tpns)
示例#48
0
    def check_header(self, filename, header):
        """Evalutate the header expression associated with this validator (as its sole value)
        with respect to the given `header`.

        Note that array-based checkers are not automatically loaded during a classic header
        fetch and expressions can involve operations on multiple keywords or arrays.
        """
        log.verbose("File=" + repr(os.path.basename(filename)), "Checking",
                    repr(self.name), "condition", str(self._expr))
        for keyword in expr_identifiers(self._expr):
            if header.get(keyword, "UNDEFINED") == "UNDEFINED":
                log.verbose_warning("Keyword or Array", repr(keyword), 
                                    "is 'UNDEFINED'. Skipping ", repr(self._expr))
                return True   # fake satisfied     
        try:
            satisfied = eval(self._expr_code, header, self._eval_namespace)
        except Exception as exc:
            raise RequiredConditionError("Failed checking constraint", repr(self._expr), ":", str(exc))
        if not satisfied:
            raise RequiredConditionError("Constraint", str(self._expr), "is not satisfied.")
        elif satisfied == "W":  # from warn_only() helper
            log.warning("Constraint", str(self._expr), "is not satisfied.")
            satisfied = True
        return satisfied
示例#49
0
 def handle_warning(self, message):
     """Generic "warning" handler issues a  warning from remote process and 
     contiues monitoring.
     """
     log.warning(self.format_remote(message.data))
     return False
示例#50
0
文件: diff.py 项目: jaytmiller/crds
def mapping_check_diffs_core(diffs):
    """Perform the core difference checks on difference tuples `diffs`."""
    categorized = sorted([ (diff_action(d), d) for d in diffs ])
    for action, msg in categorized:
        if "header" in action:
            log.verbose("In", _diff_tail(msg)[:-1], msg[-1])
        elif action == "add":
            log.verbose("In", _diff_tail(msg)[:-1], msg[-1])
        elif "rule" in action:
            log.warning("Rule change at", _diff_tail(msg)[:-1], msg[-1])
        elif action == "replace":
            old_val, new_val = diff_replace_old_new(msg)
            if old_val and new_val:
                old_val, new_val = [x for x in diff_replace_old_new(msg)]
                if naming.newer(new_val, old_val):
                    log.verbose("In", _diff_tail(msg)[:-1], msg[-1])
                else:
                    log.warning("Reversion at", _diff_tail(msg)[:-1], msg[-1])
            else:
                log.warning("Unusual replacement", _diff_tail(msg)[:-1], msg[-1])
        elif action == "delete":
            log.warning("Deletion at", _diff_tail(msg)[:-1], msg[-1])
        elif action == "parkey_difference":
            log.warning("Different lookup parameters", _diff_tail(msg)[:-1], msg[-1])
        elif action == "class_difference":
            log.warning("Different classes at", _diff_tail(msg)[:-1], msg[-1])
        else:
            raise ValueError("Unexpected difference action:", action)
示例#51
0
def warn_filekind_once(filekind):
    log.warning("No apparent JWST cal code data models schema support for", log.srepr(filekind))
示例#52
0
 def check_group(self, _filename, _header):
     """Probably related to pre-FITS HST GEIS files,  not implemented."""
     log.warning("Group keys are not currently supported by CRDS.")
示例#53
0
文件: locate.py 项目: jaytmiller/crds
def check_naming_consistency(checked_instrument=None, exhaustive_mapping_check=False):
    """Dev function to compare the properties returned by name decomposition
    to the properties determined by file contents and make sure they're the same.
    Also checks rmap membership.

    >> from crds.tests import test_config
    >> old_config = test_config.setup()
    >> check_naming_consistency("acs")
    >> check_naming_consistency("cos")
    >> check_naming_consistency("nicmos")
    >> check_naming_consistency("stis")
    >> check_naming_consistency("wfc3")
    >> check_naming_consistency("wfpc2")
    >> test_config.cleanup(old_config)
    """
    from crds import certify

    for ref in rmap.list_references("*", observatory="hst", full_path=True):
        with log.error_on_exception("Failed processing:", repr(ref)):

            _path, _observ, instrument, filekind, _serial, _ext = ref_properties_from_cdbs_path(ref)

            if checked_instrument is not None and instrument != checked_instrument:
                continue

            if data_file.is_geis_data(ref):
                if os.path.exists(data_file.get_conjugate(ref)):
                    continue
                else:
                    log.warning("No GEIS header for", repr(ref))

            log.verbose("Processing:", instrument, filekind, ref)
            
            _path2, _observ2, instrument2, filekind2, _serial2, _ext2 = ref_properties_from_header(ref)
            if instrument != instrument2:
                log.error("Inconsistent instruments", repr(instrument), "vs.", repr(instrument2), 
                          "for", repr(ref))
            if filekind != filekind2:
                log.error("Inconsistent filekinds", repr(filekind), "vs.", repr(filekind2), 
                          "for", repr(ref))

            for pmap_name in reversed(sorted(rmap.list_mappings("*.pmap", observatory="hst"))):

                r = certify.find_governing_rmap(pmap_name, ref)

                if not r:
                    continue

                if r.instrument != instrument:
                    log.error("Rmap instrument", repr(r.instrument), 
                              "inconsistent with name derived instrument", repr(instrument), "for", repr(ref), "in", repr(pmap_name))
                if r.filekind != filekind:
                    log.error("Rmap filekind", repr(r.filekind), 
                              "inconsistent with name derived filekind", repr(filekind), "for", repr(ref), "in", repr(pmap_name))
                if r.instrument != instrument2:
                    log.error("Rmap instrument", repr(r.instrument), 
                              "inconsistent with content derived instrument", repr(instrument2), "for", repr(ref), "in", repr(pmap_name))
                if r.filekind != filekind2:
                    log.error("Rmap filekind", repr(r.filekind), 
                              "inconsistent with content derived filekind", repr(filekind2), "for", repr(ref), "in", repr(pmap_name))
                
                if not exhaustive_mapping_check:
                    break

            else:
                log.error("Orphan reference", repr(ref), "not found under any context.")
示例#54
0
 def handle_unknown(self,  message):
     """Handle unknown `message` types by issuing a warning and continuing monitoring."""
     log.warning(self.format_remote("Unknown message type", repr(message.type), "in", repr(message)))
     return False
示例#55
0
 def handle_cancel(self, message):
     """Generic "cancel" handler reports on commanded cancellation of remote process
     and possibly why it was cancelled.   Then stops monitoring /exits.
     """
     log.warning(self.format_remote("Processing cancelled:", message.data))
     return message.data["result"]