def reduce(file_list,
           label,
           calib_files,
           recipe_name=None,
           save_to=None,
           user_pars=None):
    """
    Helper function used to prevent replication of code.

    Parameters
    ----------
    file_list : list
        List of files that will be reduced.
    label : str
        Labed used on log files name.
    calib_files : list
        List of calibration files properly formatted for DRAGONS Reduce().
    recipe_name : str, optional
        Name of the recipe used to reduce the data.
    save_to : str, optional
        Stores the calibration files locally in a list.
    user_pars : list, optional
        List of user parameters

    Returns
    -------
    str : Output reduced file.
    list : An updated list of calibration files.
    """
    objgraph = pytest.importorskip("objgraph")

    logutils.get_logger().info("\n\n\n")
    logutils.config(file_name=f"test_image_{label}.log")
    r = Reduce()
    r.files = file_list
    r.ucals = normalize_ucals(r.files, calib_files)
    r.uparms = user_pars

    if recipe_name:
        r.recipename = recipe_name

    r.runr()
    output_file = r.output_filenames[0]

    if save_to:
        calib_files.append("{}:{}".format(
            save_to,
            os.path.join("calibrations", save_to, r.output_filenames[0])))
        [os.remove(f) for f in r.output_filenames]

    # check that we are not leaking objects
    assert len(objgraph.by_type('NDAstroData')) == 0

    return output_file, calib_files
Exemple #2
0
def get_illum_mask_filename(ad):
    """
    Gets the illumMask filename for an input science frame, using
    illumMask_dict in geminidr.gnirs.lookups.maskdb.py

    Returns
    -------
    str/None: Filename of the appropriate illumination mask
    """
    log = logutils.get_logger(__name__)
    key1 = ad.camera()
    filter = ad.filter_name(pretty=True)
    if filter in ['Y', 'J', 'H', 'K', 'H2', 'PAH']:
        key2 = 'Wings'
    elif filter in ['JPHOT', 'HPHOT', 'KPHOT']:
        key2 = 'NoWings'
    else:
        log.warning("Unrecognised filter, no illumination mask can "
                         "be found for {}".format(ad.filename))
        return None

    try:
        illum = path.join(maskdb.illumMask_dict[key1, key2])
    except KeyError:
        log.warning("No illumination mask found for {}".format(ad.filename))
        return None

    return illum if illum.startswith(path.sep) else \
        path.join(path.dirname(maskdb.__file__), 'BPM', illum)
Exemple #3
0
    def __init__(self, adinputs, mode='sq', ucals=None, uparms=None, upload=None):
        self.streams          = {'main': adinputs}
        self.mode             = mode
        self.params           = {}
        self.log              = logutils.get_logger(__name__)
        self._upload          = upload
        self.user_params      = uparms if uparms else {}
        self.calurl_dict      = calurl_dict.calurl_dict
        self.timestamp_keys   = timestamp_keywords.timestamp_keys
        self.keyword_comments = keyword_comments.keyword_comments
        self.sx_dict          = sextractor_dict.sx_dict.copy()
        # Prepend paths to SExtractor input files now
        self.sx_dict.update({k:
                os.path.join(os.path.dirname(sextractor_dict.__file__), v)
                for k,v in self.sx_dict.items()})

        self.cachedict        = set_caches()
        self.calibrations     = Calibrations(calindfile, user_cals=ucals)
        self.stacks           = load_cache(stkindfile)

        # This lambda will return the name of the current caller.
        self.myself           = lambda: stack()[1][3]

        warnings.simplefilter('ignore', category=VerifyWarning)

        # Create a parallel process to which we can send shell commands.
        # Spawning a shell command makes a copy of its parent process in RAM
        # so we need this process to have a small memory footprint and hence
        # create it now. Garbage collect too, in case stuff has happened
        # previously.
        gc.collect()
        self.eti_subprocess = ETISubprocess()
        atexit.register(cleanup, self.eti_subprocess)
Exemple #4
0
def get_illum_mask_filename(ad):
    """
    Gets the illumMask filename for an input science frame, using
    illumMask_dict in geminidr.gnirs.lookups.maskdb.py

    Returns
    -------
    str/None: Filename of the appropriate illumination mask
    """
    log = logutils.get_logger(__name__)
    key1 = ad.camera()
    filter = ad.filter_name(pretty=True)
    if filter in ['Y', 'J', 'H', 'K', 'H2', 'PAH']:
        key2 = 'Wings'
    elif filter in ['JPHOT', 'HPHOT', 'KPHOT']:
        key2 = 'NoWings'
    else:
        log.warning("Unrecognised filter, no illumination mask can "
                         "be found for {}".format(ad.filename))
        return None

    try:
        illum = path.join(maskdb.illumMask_dict[key1, key2])
    except KeyError:
        log.warning("No illumination mask found for {}".format(ad.filename))
        return None

    return illum if illum.startswith(path.sep) else \
        path.join(path.dirname(maskdb.__file__), 'BPM', illum)
Exemple #5
0
    def __init__(self, ad, mosaic_ad_function):
        """
        Parameters
        ----------
        ad: <AstroData>, Input Astrodata object

        mosaic_ad_function: <func>, A required user function returning a
            MosaicData and a MosaicGeometry objects. This function that will act
            as an interface to the particular 'ad', e.g., knows which keywords
            represent the coordinate systems to use and whether they are binned
            or not, or which values in the geometry look-up table require to be
            binned. For instruments GMOS and GSAOI, DRAGONS/mosaic provides the
            'gemini_mosaic_function'.

        For help on this function please see its description in the mosaic.py
        module.

        """
        verr = "Nothing to mosaic. < 2 extensions found on file {}"
        self.ad = ad
        if len(ad) < 2:
            raise ValueError(verr.format(ad.filename))

        self.log = logutils.get_logger(__name__)
        mosaic_data, geometry = mosaic_ad_function(ad)  # Call geometry function.
        Mosaic.__init__(self, mosaic_data, geometry)
        self.jfactor = []               # Jacobians applied to interpolated pixels.
        self.calculate_jfactor()        # Fill the jfactor vector with the
                                        # jacobian of transformation matrix.
        self.mosaic_shape = None        # Shape of the mosaicked output frame.
        self.sx_dict = sextractor_dict.sx_dict.copy()
        # Prepend paths to SExtractor input files now
        self.sx_dict.update({k: join(dirname(sextractor_dict.__file__), v)
                             for k, v in self.sx_dict.items()})
Exemple #6
0
 def setup_class(cls):
     """Run once at the beginning."""
     if os.path.exists(logfilename):
         os.remove(logfilename)
     log = logutils.get_logger(__name__)
     log.root.handlers = []
     logutils.config(mode='standard', file_name=logfilename)
Exemple #7
0
 def setup_class(cls):
     """Run once at the beginning."""
     if os.path.exists(logfilename):
         os.remove(logfilename)
     log = logutils.get_logger(__name__)
     log.root.handlers = []
     logutils.config(mode='standard', console_lvl='stdinfo',
                     file_name=logfilename)
Exemple #8
0
def _create_wcs_from_offsets(adinput, adref, center_of_rotation=None):
    """
    This function uses the POFFSET, QOFFSET, and PA header keywords to create
    a new WCS for an image. Its primary role is for GNIRS. For ease, it works
    out the (RA,DEC) of the centre of rotation in the reference image and
    determines where in the input image this is.

    Parameters
    ----------
    adinput: AstroData
        The input image whose WCS needs to be rewritten
    adreference: AstroData
        The reference image with a trustworthy WCS
    center_of_rotation: 2-tuple
        Location of rotation center (x, y)
    """
    log = logutils.get_logger(__name__)
    if len(adinput) != len(adref):
        log.warning("Number of extensions in input files are different. "
                    "Cannot correct WCS.")
        return adinput

    log.stdinfo("Updating WCS of {} based on {}".format(adinput.filename,
                                                        adref.filename))
    try:
        xdiff = adref.detector_x_offset() - adinput.detector_x_offset()
        ydiff = adref.detector_y_offset() - adinput.detector_y_offset()
        pa1 = adref.phu['PA']
        pa2 = adinput.phu['PA']
    except (KeyError, TypeError):  # TypeError if offset is None
        log.warning("Cannot obtain necessary offsets from headers "
                    "so no change will be made")
        return adinput

    # We expect mosaicked inputs but there's no reason why this couldn't
    # work for all extensions in an image
    for extin, extref in zip(adinput, adref):
        # Will need to have some sort of LUT here eventually. But for now...
        if center_of_rotation is None:
            center_of_rotation = (630.0, 520.0) if 'GNIRS' in adref.tags \
                else tuple(0.5*(x-1) for x in extref.data.shape[::-1])

        wcsref = WCS(extref.hdr)
        ra0, dec0 = wcsref.all_pix2world(center_of_rotation[0],
                                         center_of_rotation[1], 1)
        extin.hdr['CRVAL1'] = float(ra0)
        extin.hdr['CRVAL2'] = float(dec0)
        extin.hdr['CRPIX1'] = center_of_rotation[0] - xdiff
        extin.hdr['CRPIX2'] = center_of_rotation[1] - ydiff
        cd = models.Rotation2D(angle=pa1-pa2)(*wcsref.wcs.cd)
        extin.hdr['CD1_1'] = cd[0][0]
        extin.hdr['CD1_2'] = cd[0][1]
        extin.hdr['CD2_1'] = cd[1][0]
        extin.hdr['CD2_2'] = cd[1][1]
    return adinput
def _create_wcs_from_offsets(adinput, adref, center_of_rotation=None):
    """
    This function uses the POFFSET, QOFFSET, and PA header keywords to create
    a new WCS for an image. Its primary role is for GNIRS. For ease, it works
    out the (RA,DEC) of the centre of rotation in the reference image and
    determines where in the input image this is.

    Parameters
    ----------
    adinput: AstroData
        The input image whose WCS needs to be rewritten
    adreference: AstroData
        The reference image with a trustworthy WCS
    center_of_rotation: 2-tuple
        Location of rotation center (x, y)
    """
    log = logutils.get_logger(__name__)
    if len(adinput) != len(adref):
        log.warning("Number of extensions in input files are different. "
                    "Cannot correct WCS.")
        return adinput

    log.stdinfo("Updating WCS of {} based on {}".format(adinput.filename,
                                                        adref.filename))
    try:
        xdiff = adref.detector_x_offset() - adinput.detector_x_offset()
        ydiff = adref.detector_y_offset() - adinput.detector_y_offset()
        pa1 = adref.phu['PA']
        pa2 = adinput.phu['PA']
    except (KeyError, TypeError):  # TypeError if offset is None
        log.warning("Cannot obtain necessary offsets from headers "
                    "so no change will be made")
        return adinput

    # We expect mosaicked inputs but there's no reason why this couldn't
    # work for all extensions in an image
    for extin, extref in zip(adinput, adref):
        # Will need to have some sort of LUT here eventually. But for now...
        if center_of_rotation is None:
            center_of_rotation = (630.0, 520.0) if 'GNIRS' in adref.tags \
                else tuple(0.5*x for x in extref.data.shape[::-1])

        wcsref = WCS(extref.hdr)
        ra0, dec0 = wcsref.all_pix2world(center_of_rotation[0],
                                         center_of_rotation[1], 1)
        extin.hdr['CRVAL1'] = float(ra0)
        extin.hdr['CRVAL2'] = float(dec0)
        extin.hdr['CRPIX1'] = center_of_rotation[0] - xdiff
        extin.hdr['CRPIX2'] = center_of_rotation[1] - ydiff
        cd = models.Rotation2D(angle=pa1-pa2)(*wcsref.wcs.cd)
        extin.hdr['CD1_1'] = cd[0][0]
        extin.hdr['CD1_2'] = cd[0][1]
        extin.hdr['CD2_1'] = cd[1][0]
        extin.hdr['CD2_2'] = cd[1][1]
    return adinput
Exemple #10
0
def log():

    if os.path.exists(logfilename):
        os.remove(logfilename)

    log = logutils.get_logger(__name__)
    log.root.handlers = []
    logutils.config(mode='standard', file_name=logfilename)

    yield log

    os.remove(logfilename)
def _build_area_keys(corners):
    log = logutils.get_logger(__name__)
    log.fullinfo("Setting AREA keywords to denote original data area.")
    log.fullinfo("AREATYPE = 'P4'     / Polygon with 4 vertices")
    area_keys = [("AREATYPE", "P4", "Polygon with 4 vertices")]
    for i in range(len(corners)):
        for axis in range(len(corners[i])):
            key_name = "AREA{}_{}".format(i + 1, axis + 1)
            key_value = corners[i][axis]
            key_comment = "Vertex {}, dimension {}".format(i + 1, axis + 1)
            area_keys.append((key_name, key_value, key_comment))
            log.fullinfo("{:8s} = {:7.2f}  / {}".format(
                key_name, key_value, key_comment))
    return area_keys
def _build_area_keys(corners):
    log = logutils.get_logger(__name__)
    log.fullinfo("Setting AREA keywords to denote original data area.")
    log.fullinfo("AREATYPE = 'P4'     / Polygon with 4 vertices")
    area_keys = [("AREATYPE", "P4", "Polygon with 4 vertices")]
    for i in range(len(corners)):
        for axis in range(len(corners[i])):
            key_name = "AREA{}_{}".format(i+1, axis+1)
            key_value = corners[i][axis]
            key_comment = "Vertex {}, dimension {}".format(i+1, axis+1)
            area_keys.append((key_name, key_value, key_comment))
            log.fullinfo("{:8s} = {:7.2f}  / {}".format(key_name, key_value,
                                                        key_comment))
    return area_keys
Exemple #13
0
 def __init__(self,
              name=None,
              get_cal=True,
              store_cal=False,
              valid_caltypes=None,
              procmode=None,
              log=None):
     self._valid_caltypes = valid_caltypes or VALID_CALTYPES
     self.caldir = CALDIR
     self.name = name
     self.get_cal = get_cal
     self.store_cal = store_cal
     self.procmode = procmode
     self.nextdb = None
     self.log = log or logutils.get_logger(__name__)
def _composite_transformation_matrix(ad, out_wcs, keyword_comments):
    log = logutils.get_logger(__name__)
    img_wcs = WCS(ad[0].hdr)
    # get transformation matrix from composite of wcs's
    # matrix = in_sky2pix*out_pix2sky (converts output to input)
    xy_matrix = np.dot(np.linalg.inv(img_wcs.wcs.cd), out_wcs.wcs.cd)
    # switch x and y for compatibility with numpy ordering
    flip_xy = np.roll(np.eye(2), 2)
    matrix = np.dot(flip_xy, np.dot(xy_matrix, flip_xy))
    matrix_det = np.linalg.det(matrix)

    # offsets: shift origin of transformation to the reference
    # pixel by subtracting the transformation of the output
    # reference pixel and adding the input reference pixel
    # back in
    refcrpix = np.roll(out_wcs.wcs.crpix, 1)
    imgcrpix = np.roll(img_wcs.wcs.crpix, 1)
    offset = imgcrpix - np.dot(matrix, refcrpix)

    # then add in the shift of origin due to dithering offset.
    # This is the transform of the reference CRPIX position,
    # minus the original position
    trans_crpix = img_wcs.all_world2pix(
        out_wcs.all_pix2world([out_wcs.wcs.crpix], 1), 1)[0]
    trans_crpix = np.roll(trans_crpix, 1)
    offset = offset + trans_crpix - imgcrpix

    # Since the transformation really is into the reference
    # WCS coordinate system as near as possible, just set image
    # WCS equal to reference WCS
    log.fullinfo("Offsets: " + repr(np.roll(offset, 1)))
    log.fullinfo("Transformation matrix:\n" + repr(matrix))
    log.fullinfo("Updating WCS to match reference WCS")

    for ax in (1, 2):
        ad.hdr.set('CRPIX{}'.format(ax),
                   out_wcs.wcs.crpix[ax - 1],
                   comment=keyword_comments["CRPIX{}".format(ax)])
        ad.hdr.set('CRVAL{}'.format(ax),
                   out_wcs.wcs.crval[ax - 1],
                   comment=keyword_comments["CRVAL{}".format(ax)])
        for ax2 in (1, 2):
            ad.hdr.set('CD{}_{}'.format(ax, ax2),
                       out_wcs.wcs.cd[ax - 1, ax2 - 1],
                       comment=keyword_comments["CD{}_{}".format(ax, ax2)])

    return (matrix, matrix_det, img_wcs, offset)  # ad ?
def _shifts_and_shapes(all_corners, ref_shape, naxis, interpolator, trim_data,
                       shifts):
    """
    all_corners are locations (y,x) of all 4 corners of the reference image in the
    pixel space of each image
    """
    log = logutils.get_logger(__name__)
    if trim_data:
        refoff = [0] * naxis
        out_shape = ref_shape
        log.fullinfo("Trimming data to size of reference image")
    else:
        log.fullinfo("Growing reference image to keep all data; "
                     "centering data, and updating WCS to account "
                     "for shift")

        # Otherwise, use the corners of the images to get the minimum
        # required output shape to hold all data
        out_shape = []
        refoff = []
        for axis in range(naxis):
            # get output shape from corner values
            cvals = [corner[axis] for ic in all_corners for corner in ic]
            out_shape.append(int(max(cvals) - min(cvals) + 1))
            refoff.append(-max(0, int(max(cvals) - ref_shape[axis] + 1)))

            # if just shifting, need to set centering shift
            # for reference image from offsets already calculated
            if interpolator is None:
                svals = [shift[axis] for shift in shifts]
                # include a 0 shift for the reference image
                # (in case it's already centered)
                svals.append(0.0)
                refoff.append(-int(max(svals)))

        out_shape = tuple(out_shape)
        log.fullinfo("New output shape: " + repr(out_shape))

        # if not shifting, get offset required to center reference image
        # from the size of the image
        #if interpolator:
        #    incen = [0.5*(axlen-1) for axlen in ref_shape]
        #    outcen = [0.5*(axlen-1) for axlen in out_shape]
        #    cenoff = np.rint(incen) - np.rint(outcen)
    return refoff, out_shape
def _shifts_and_shapes(all_corners, ref_shape, naxis, interpolator, trim_data, shifts):
    """
    all_corners are locations (y,x) of all 4 corners of the reference image in the
    pixel space of each image
    """
    log = logutils.get_logger(__name__)
    if trim_data:
        refoff=[0]*naxis
        out_shape = ref_shape
        log.fullinfo("Trimming data to size of reference image")
    else:
        log.fullinfo("Growing reference image to keep all data; "
                     "centering data, and updating WCS to account "
                     "for shift")

        # Otherwise, use the corners of the images to get the minimum
        # required output shape to hold all data
        out_shape = []
        refoff = []
        for axis in range(naxis):
            # get output shape from corner values
            cvals = [corner[axis] for ic in all_corners for corner in ic]
            out_shape.append(int(max(cvals)-min(cvals)+1))
            refoff.append(-max(0, int(max(cvals) - ref_shape[axis] + 1)))

            # if just shifting, need to set centering shift
            # for reference image from offsets already calculated
            if interpolator is None:
                svals = [shift[axis] for shift in shifts]
                # include a 0 shift for the reference image
                # (in case it's already centered)
                svals.append(0.0)
                refoff.append(-int(max(svals)))

        out_shape = tuple(out_shape)
        log.fullinfo("New output shape: "+repr(out_shape))

        # if not shifting, get offset required to center reference image
        # from the size of the image
        #if interpolator:
        #    incen = [0.5*(axlen-1) for axlen in ref_shape]
        #    outcen = [0.5*(axlen-1) for axlen in out_shape]
        #    cenoff = np.rint(incen) - np.rint(outcen)
    return refoff, out_shape
Exemple #17
0
    def __init__(self, adinputs, mode='sq', ucals=None, uparms=None, upload=None,
                 config_file=None):
        # This is a general config file so we should load it now. Some of its
        # information may be overridden by other parameters passed here.
        load_config(config_file)

        self.streams          = {'main': adinputs}
        self.mode             = mode
        self.params           = {}
        self.log              = logutils.get_logger(__name__)
        self._upload          = upload
        self.user_params      = dict(uparms) if uparms else {}
        self.timestamp_keys   = timestamp_keywords.timestamp_keys
        self.keyword_comments = keyword_comments.keyword_comments
        self.sx_dict          = sextractor_dict.sx_dict.copy()

        # Prepend paths to SExtractor input files now
        self.sx_dict.update({
            k: os.path.join(os.path.dirname(sextractor_dict.__file__), v)
            for k, v in self.sx_dict.items()
        })

        self.caldb            = init_calibration_databases(
            getattr(self, "inst_lookups", None), ucals=ucals, upload=upload,
            procmode=self.mode)
        self.cachedict        = set_caches()
        self.stacks           = load_cache(stkindfile)

        # This lambda will return the name of the current caller.
        self.myself = lambda: currentframe().f_back.f_code.co_name

        warnings.simplefilter('ignore', category=VerifyWarning)

        # Create a parallel process to which we can send shell commands.
        # Spawning a shell command makes a copy of its parent process in RAM
        # so we need this process to have a small memory footprint and hence
        # create it now. Garbage collect too, in case stuff has happened
        # previously.
        gc.collect()
        self.eti_subprocess = ETISubprocess()
        atexit.register(cleanup, self.eti_subprocess)

        # Instantiate a dormantViewer(). Only ds9 for now.
        self.viewer = dormantViewer(self, 'ds9')
def _composite_transformation_matrix(ad, out_wcs, keyword_comments):
    log = logutils.get_logger(__name__)
    img_wcs = WCS(ad[0].hdr)
    # get transformation matrix from composite of wcs's
    # matrix = in_sky2pix*out_pix2sky (converts output to input)
    xy_matrix = np.dot(np.linalg.inv(img_wcs.wcs.cd), out_wcs.wcs.cd)
    # switch x and y for compatibility with numpy ordering
    flip_xy = np.roll(np.eye(2), 2)
    matrix = np.dot(flip_xy,np.dot(xy_matrix, flip_xy))
    matrix_det = np.linalg.det(matrix)

    # offsets: shift origin of transformation to the reference
    # pixel by subtracting the transformation of the output
    # reference pixel and adding the input reference pixel
    # back in
    refcrpix = np.roll(out_wcs.wcs.crpix, 1)
    imgcrpix = np.roll(img_wcs.wcs.crpix, 1)
    offset = imgcrpix - np.dot(matrix, refcrpix)

    # then add in the shift of origin due to dithering offset.
    # This is the transform of the reference CRPIX position,
    # minus the original position
    trans_crpix = img_wcs.all_world2pix(
                  out_wcs.all_pix2world([out_wcs.wcs.crpix],1), 1)[0]
    trans_crpix = np.roll(trans_crpix, 1)
    offset = offset + trans_crpix-imgcrpix

    # Since the transformation really is into the reference
    # WCS coordinate system as near as possible, just set image
    # WCS equal to reference WCS
    log.fullinfo("Offsets: "+repr(np.roll(offset, 1)))
    log.fullinfo("Transformation matrix:\n"+repr(matrix))
    log.fullinfo("Updating WCS to match reference WCS")

    for ax in (1, 2):
        ad.hdr.set('CRPIX{}'.format(ax), out_wcs.wcs.crpix[ax-1],
                   comment=keyword_comments["CRPIX{}".format(ax)])
        ad.hdr.set('CRVAL{}'.format(ax), out_wcs.wcs.crval[ax-1],
                    comment=keyword_comments["CRVAL{}".format(ax)])
        for ax2 in (1, 2):
            ad.hdr.set('CD{}_{}'.format(ax,ax2), out_wcs.wcs.cd[ax-1,ax2-1],
                       comment=keyword_comments["CD{}_{}".format(ax,ax2)])

    return (matrix, matrix_det, img_wcs, offset) # ad ?
Exemple #19
0
    def __init__(self,
                 adinputs,
                 mode='sq',
                 ucals=None,
                 uparms=None,
                 upload=None):
        self.streams = {'main': adinputs}
        self.mode = mode
        self.params = {}
        self.log = logutils.get_logger(__name__)
        self._upload = upload
        self.user_params = dict(uparms) if uparms else {}
        self.calurl_dict = calurl_dict.calurl_dict
        self.timestamp_keys = timestamp_keywords.timestamp_keys
        self.keyword_comments = keyword_comments.keyword_comments
        self.sx_dict = sextractor_dict.sx_dict.copy()

        # Prepend paths to SExtractor input files now
        self.sx_dict.update({
            k: os.path.join(os.path.dirname(sextractor_dict.__file__), v)
            for k, v in self.sx_dict.items()
        })

        self.cachedict = set_caches()
        self.calibrations = Calibrations(calindfile, user_cals=ucals)
        self.stacks = load_cache(stkindfile)

        # This lambda will return the name of the current caller.
        self.myself = lambda: stack()[1][3]

        warnings.simplefilter('ignore', category=VerifyWarning)

        # Create a parallel process to which we can send shell commands.
        # Spawning a shell command makes a copy of its parent process in RAM
        # so we need this process to have a small memory footprint and hence
        # create it now. Garbage collect too, in case stuff has happened
        # previously.
        gc.collect()
        self.eti_subprocess = ETISubprocess()
        atexit.register(cleanup, self.eti_subprocess)

        # Instantiate a dormantViewer(). Only ds9 for now.
        self.viewer = dormantViewer(self, 'ds9')
def _composite_from_ref_wcs(ad, out_wcs, keyword_comments):
    log = logutils.get_logger(__name__)
    img_wcs = WCS(ad[0].hdr)
    img_shape = ad[0].data.shape

    # recalculate shift from new reference wcs
    x1y1 = np.array([img_shape[1] / 2.0, img_shape[0]/2.0])
    x2y2 = img_wcs.all_world2pix(out_wcs.all_pix2world([x1y1], 1), 1)[0]
    shift = np.roll(np.rint(x2y2 - x1y1), 1)
    if np.any(shift > 0):
        log.warning("Shift was calculated to be > 0; interpolator=None "
                    "may not be appropriate for this data.")
        shift = np.where(shift > 0, 0, shift)

    # update PHU WCS keywords
    log.fullinfo("Offsets: " + repr(np.roll(shift, 1)))
    log.fullinfo("Updating WCS to track shift in data")
    ad.hdr.set("CRPIX1", img_wcs.wcs.crpix[0] - shift[1],
                         comment=keyword_comments["CRPIX1"])
    ad.hdr.set("CRPIX2", img_wcs.wcs.crpix[1] - shift[0],
                         comment=keyword_comments["CRPIX2"])
    return shift  # ad ?
def _composite_from_ref_wcs(ad, out_wcs, keyword_comments):
    log = logutils.get_logger(__name__)
    img_wcs = WCS(ad[0].hdr)
    img_shape = ad[0].data.shape

    # recalculate shift from new reference wcs
    x1y1 = np.array([img_shape[1] / 2.0, img_shape[0] / 2.0])
    x2y2 = img_wcs.all_world2pix(out_wcs.all_pix2world([x1y1], 1), 1)[0]
    shift = np.roll(np.rint(x2y2 - x1y1), 1)
    if np.any(shift > 0):
        log.warning("Shift was calculated to be > 0; interpolator=None "
                    "may not be appropriate for this data.")
        shift = np.where(shift > 0, 0, shift)

    # update PHU WCS keywords
    log.fullinfo("Offsets: " + repr(np.roll(shift, 1)))
    log.fullinfo("Updating WCS to track shift in data")
    ad.hdr.set("CRPIX1",
               img_wcs.wcs.crpix[0] - shift[1],
               comment=keyword_comments["CRPIX1"])
    ad.hdr.set("CRPIX2",
               img_wcs.wcs.crpix[1] - shift[0],
               comment=keyword_comments["CRPIX2"])
    return shift  # ad ?
Exemple #22
0
from recipe_system import __version__

from recipe_system.utils.errors import ModeError
from recipe_system.utils.errors import RecipeNotFound
from recipe_system.utils.errors import PrimitivesNotFound

from recipe_system.utils.reduce_utils import buildParser
from recipe_system.utils.reduce_utils import normalize_ucals
from recipe_system.utils.reduce_utils import set_btypes
from recipe_system.utils.rs_utilities import log_traceback

from recipe_system.mappers.recipeMapper import RecipeMapper
from recipe_system.mappers.primitiveMapper import PrimitiveMapper

# ------------------------------------------------------------------------------
log = logutils.get_logger(__name__)


# ------------------------------------------------------------------------------
class Reduce:
    """
    The Reduce class encapsulates the core processing to be done by reduce.
    __init__ may receive one (1) parameter, nominally, an argparse Namespace
    instance. However, this object type is not required, but only that any
    passed object *must* present an equivalent interface to that of an
    <argparse.Namespace> instance, i.e. a duck type.

    The class provides one (1) public method, runr(), the only call needed to
    run reduce on the supplied argument set.

    Parameters
Exemple #23
0
def main(args):
    """
    'main' is called with a Namespace 'args' parameter, or an object that
    presents an equivalent interface.
    
    Eg.,
    
    Get "args' from the defined reduce parser:
    
    >>> args = buildParser(version).parse_args()
    >>> import reduce_alpha
    >>> reduce_alpha.main(args)
    
    In the above example, 'args' is

    -- argparse Namespace instance
    
    Use of the reduce_utils function buildParser will get the caller a fully
    defined reduce Namespace instance, values for which can be then be adjusted
    as desired.
    
    Eg.,
    
    buildParser:
    -----------
    >>> args = buildParser(version).parse_args()
    >>> args.logfile
    'reduce.log'
    >>> args.files
    []
    >>> args.files.append('some_fits_file.fits')
    
    Once 'args' attributes have been appropriately set, the caller then simply
    calls main():
    
    >>> reduce_alpha.main(args)

    :parameter args: argparse Namespace object
    :type args: <Namespace>

    :return: exit code
    :rtype:  <int>

    """
    global log
    estat = 0
    log = logutils.get_logger(__name__)
    try:
        assert log.root.handlers
        log.root.handlers = []
        logutils.config(mode=args.logmode, file_name=args.logfile)
        log = logutils.get_logger(__name__)
        log.info("Logging configured for application: reduce")
        log.info(" ")
    except AssertionError:
        pass

    # Config local calibration manager with passed args object
    if localmanager_available:
        set_calservice(local_db_dir=args.local_db_dir)

    log.stdinfo("\n\t\t\t--- reduce v{} ---".format(rs_version))
    log.stdinfo("\nRunning on Python {}".format(sys.version.split()[0]))
    r_reduce = Reduce(args)
    try:
        r_reduce.runr()
    except KeyboardInterrupt:
        log.error("Caught KeyboardInterrupt (^C) signal")
        estat = signal.SIGINT
    except Exception as err:
        log.error("reduce caught an unhandled exception.")
        log.error("\nReduce instance aborted.")
        estat = signal.SIGABRT

    if estat != 0:
        log.stdinfo("\n\nreduce exit status: %d\n" % estat)
    else:
        pass
    return estat
Exemple #24
0
from pyraf import iraf
from gempy.utils import logutils
from gempy.eti_core.pyrafetiparam import PyrafETIParam, IrafStdout

log = logutils.get_logger(__name__)

class GemcombineParam(PyrafETIParam):
    """This class coordinates the ETI parameters as it pertains to the IRAF
    task gemcombine directly.
    """
    inputs = None
    params = None

    adinput = None
    key = None
    value = None
    def __init__(self, inputs=None, params=None, key=None, value=None):
        """
        :param rc: Used to store reduction information
        :type rc: ReductionContext

        :param key: A parameter name that is added as a dict key in prepare
        :type key: any

        :param value: A parameter value that is added as a dict value
                      in prepare
        :type value: any
        """
        log.debug("GemcombineParam __init__")
        PyrafETIParam.__init__(self, inputs, params)
        self.adinput = self.inputs
Exemple #25
0
def ad_compare(ad1, ad2):
    """
    Compares the tags, headers, and pixel values of two images

    Parameters
    ----------
    ad1: AstroData/other
        first file (AD or can be opened by astrodata.open())
    ad2: AstroData/other
        second file (AD or...)

    Returns
    -------
    bool: are the two AD instances basically the same?
    """
    log = logutils.get_logger(__name__)

    if not isinstance(ad1, astrodata.AstroData):
        ad1 = astrodata.open(ad1)
    if not isinstance(ad2, astrodata.AstroData):
        ad2 = astrodata.open(ad2)

    fname1 = ad1.filename
    fname2 = ad2.filename
    ok = True
    errorlist = []

    # If images have different lengths, give up now
    if len(ad1) != len(ad2):
        log.warning(
            'Files have different numbers of extensions: {} v {}'.format(
                len(ad1), len(ad2)))
        return False

    assert fname1 == fname2, ('Files have different filename attributes: '
                              '{} v {}'.format(fname1, fname2))

    # Check tags
    if ad1.tags == ad2.tags:
        log.stdinfo('TAGS match')
    else:
        log.warning('TAGS do not match:')
        log.warning('  {} (1): {}'.format(fname1, ad1.tags))
        log.warning('  {} (2): {}'.format(fname2, ad2.tags))
        errorlist.append("TAGS do not match! {} contains {}, "
                         "while {} contains {}".format(
                             fname1, list(set(ad1.tags) - set(ad2.tags)),
                             fname2, list(set(ad2.tags) - set(ad1.tags))))
        errorlist.append("TAGS (cont): TAGS have {} in common".format(
            list(set(ad1.tags).intersection(ad2.tags))))
        ok = False

    # Check header keywords in PHU and all extension HDUs
    log.stdinfo('Checking headers...')
    for i, (h1, h2) in enumerate(zip(ad1.header, ad2.header)):
        hstr = 'PHU' if i == 0 else 'HDU {}'.format(i)
        log.stdinfo('  Checking {}'.format(hstr))

        # Compare keyword lists
        s1 = set(h1.keys()) - {'HISTORY', 'COMMENT'}
        s2 = set(h2.keys()) - {'HISTORY', 'COMMENT'}
        if s1 != s2:
            log.warning('Header keyword mismatch...')
            if s1 - s2:
                log.warning('  {} (1) contains keywords {}'.format(
                    fname1, s1 - s2))
                errorlist.append("Header {} (1) contains keywords {}".format(
                    fname1, s1 - s2))
            if s2 - s1:
                log.warning('  {} (2) contains keywords {}'.format(
                    fname2, s2 - s1))
                errorlist.append("Header {} (2) contains keywords {}".format(
                    fname2, s2 - s1))
            ok = False

        # Compare values for meaningful keywords
        for kw in h1:
            # GEM-TLM is "time last modified"
            if kw not in timestamp_keys.values() and kw not in [
                    'GEM-TLM', 'HISTORY', 'COMMENT', ''
            ]:
                try:
                    v1, v2 = h1[kw], h2[kw]
                except KeyError:  # Missing keyword in AD2
                    continue
                if isinstance(v1, float):
                    if abs(v1 - v2) > 0.01:
                        log.warning('{} value mismatch: {} v {}'.format(
                            kw, v1, v2))
                        errorlist.append('{} value mismatch: {} v {}'.format(
                            kw, v1, v2))
                        ok = False
                else:
                    if v1 != v2:
                        log.warning('{} value mismatch: {} v {}'.format(
                            kw, v1, v2))
                        errorlist.append('{} value mismatch: {} v {}'.format(
                            kw, v1, v2))
                        ok = False

    # Check REFCAT status, just equal lengths
    attr1 = getattr(ad1, 'REFCAT', None)
    attr2 = getattr(ad2, 'REFCAT', None)
    if (attr1 is None) ^ (attr2 is None):
        log.warning('    Attribute mismatch for REFCAT: {} v {}'.format(
            attr1 is not None, attr2 is not None))
        errorlist.append('Attribute mismatch for REFCAT: {} v {}'.format(
            attr1 is not None, attr2 is not None))
        ok = False
    elif attr1 is not None and attr2 is not None:
        if len(attr1) != len(attr2):
            log.warning('    REFCAT lengths differ: {} v {}'.format(
                len(attr1), len(attr2)))
            errorlist.append(
                'The REFCAT lenghts differ: {} = {} vs. {} = {}'.append(
                    attr1 is not None, len(attr1), attr2 is not None,
                    len(attr2)))

            ok = False

    # Extension by extension, check all the attributes
    log.stdinfo('Checking extensions...')
    for ext1, ext2 in zip(ad1, ad2):
        log.stdinfo('  Checking extver {}'.format(ext1.hdr['EXTVER']))
        for attr in ['data', 'mask', 'variance', 'OBJMASK', 'OBJCAT']:
            attr1 = getattr(ext1, attr, None)
            attr2 = getattr(ext2, attr, None)
            if (attr1 is None) ^ (attr2 is None):
                log.warning('    Attribute mismatch for {}: {} v {}'.format(
                    attr, attr1 is not None, attr2 is not None))
                errorlist.append("Attribute error for {}: {} v {}".format(
                    attr, attr1 is not None, attr2 is not None))
                ok = False
                continue
            if attr1 is not None and attr2 is not None:
                if attr == 'OBJCAT':
                    if len(attr1) != len(attr2):
                        log.warning(
                            '    OBJCAT lengths differ: {} v {}'.format(
                                len(attr1), len(attr2)))
                        errorlist.append(
                            "OBJCAT lengths differ: {} vs {}".append(
                                format(len(attr1), len(attr2))))
                        ok = False
                else:
                    # Pixel-data extensions
                    if attr1.dtype.name != attr2.dtype.name:
                        log.warning(
                            '    Datatype mismatch for {}: {} v {}'.format(
                                attr, attr1.dtype, attr2.dtype))
                        errorlist.append(
                            "Datatype differ for {}: {} vs {}".format(
                                attr, attr1.dtype, attr2.dtype))
                        ok = False
                    if attr1.shape != attr2.shape:
                        log.warning(
                            '    Shape mismatch for {}: {} v {}'.format(
                                attr, attr1.shape, attr2.shape))
                        errorlist.append(
                            "Shapes differ between {}: {} vs {}".format(
                                attr, attr1.shape, attr2.shape))
                        ok = False
                    else:
                        diff = attr1 - attr2
                        maxdiff = np.max(abs(diff))
                        # Let's assume int arrays should be identical, but
                        # allow tolerance for float arrays.
                        # TODO: Maybe compare data difference against variance?
                        if 'int' in attr1.dtype.name:
                            if maxdiff > 0:
                                log.warning('    {} int arrays not identical: '
                                            'max difference {}'.format(
                                                attr, maxdiff))
                                errorlist.append(
                                    "{} int arrays not identical!".format(
                                        attr))
                                ok = False
                        elif maxdiff > 0.1:
                            log.warning(
                                '    {} floaXt arrays differ: max difference '
                                '{}'.format(attr, maxdiff))
                            ok = False
    if not ok:
        for i, e in enumerate(errorlist):
            print("%d) %s" % (i, e))
    return ok
    def display(self, pix, name=None, bufname=None, z1=None, z2=None,
                transform=None, bpm=None, zscale=False, contrast=0.25,
                scale=None, masks=None, mask_colors=None,
                offset=None, frame=None, quiet=False):

        """ Displays byte-scaled (UInt8) n to XIMTOOL device.
            This method uses the IIS protocol for displaying the data
            to the image display device, which requires the data to be
            byte-scaled.
            If input is not byte-scaled, it will perform scaling using
            set values/defaults.
        """
        log = logutils.get_logger(__name__)

        #Ensure that the input array 'pix' is a numpy array
        pix = np.array(pix)
        self.z1 = z1
        self.z2 = z2

        # If any of the display parameters are specified here, apply them
        # if z1 or z2 or transform or scale or offset or frame:
        # If zscale=True (like IRAF's display) selected, calculate z1 and z2 from
        # the data, and clear any transform specified in the call
        # Offset and scale are applied to the data and z1,z2,
        # so they have no effect on the display
        if zscale:
            if transform != None:
                if not quiet:
                    log.fullinfo("transform disallowed when zscale=True")
                transform = None
            if bpm is None:
                z1, z2 = nd.zscale.zscale(pix, contrast=contrast)
            else:
                goodpix = pix[bpm==0]
                # Ignore the mask unless a decent number of pixels are "good"
                if len(goodpix) >= 0.01 * np.multiply(*pix.shape):
                    sq_side = int(np.sqrt(len(goodpix)))
                    goodpix = goodpix[:sq_side**2].reshape(sq_side, sq_side)
                    z1, z2 = nd.zscale.zscale(goodpix, contrast=contrast)
                else:
                    z1, z2 = nd.zscale.zscale(pix, contrast=contrast)

        self.set(frame=frame, z1=z1, z2=z2,
                transform=transform, scale=scale, offset=offset)

        # Initialize the display device
        if not self.view._display or self.view.checkDisplay() is False:
            self.open()
        _d = self.view._display
        self.handle = _d.getHandle()

        # If no user specified values are provided, interrogate the array itself
        # for the full range of pixel values
        if self.z1 == None:
            self.z1 = np.minimum.reduce(np.ravel(pix))
        if self.z2 == None:
            self.z2 = np.maximum.reduce(np.ravel(pix))

        # If the user has not selected a specific buffer for the display,
        # select and set the frame buffer size based on input image size.
        if bufname == 'iraf':
            useiraf = True
            bufname = None
        else:
            useiraf = False

        if bufname != None:
            _d.setFBconfig(None,bufname=bufname)
        else:
            _ny,_nx = pix.shape
            _d.selectFB(_nx,_ny,reset=1,useiraf=useiraf)

        # Initialize the specified frame buffer
        _d.setFrame(self.frame)
        _d.eraseFrame()

        # Apply user specified scaling to image, returns original
        # if none are specified.
        bpix = self._transformImage(pix)

        # Recompute the pixel range of (possibly) transformed array
        _z1 = self._transformImage(self.z1)
        _z2 = self._transformImage(self.z2)

        # If there was a problem in the transformation, then restore the original
        # array as the one to be displayed, even though it may not be ideal.
        if _z1 == _z2:
            if not quiet:
                log.warning('Error encountered during transformation. '
                            'No transformation applied...')
            bpix = pix
            self.z1 = np.minimum.reduce(np.ravel(bpix))
            self.z2 = np.maximum.reduce(np.ravel(bpix))
            # Failsafe in case input image is flat:
            if self.z1 == self.z2:
                self.z1 -= 1.
                self.z2 += 1.
        else:
            # Reset z1/z2 values now so that image gets displayed with
            # correct range.  Also, when displaying transformed images
            # this allows the input pixel value to be displayed, rather
            # than the transformed pixel value.
            self.z1 = _z1
            self.z2 = _z2

        _wcsinfo = nd.displaydev.ImageWCS(bpix,z1=self.z1,z2=self.z2,name=name)
        if not quiet:
            log.fullinfo('Image displayed with z1: {} z2: {}'.format(self.z1,
                                                                     self.z2))

        bpix = self._fbclipImage(bpix,_d.fbwidth,_d.fbheight)

        # Change pixel value to specified color if desired
        if masks is not None:
            if not isinstance(masks,list):
                masks = [masks]
            if mask_colors is None:
                # Set to red as default
                mask_colors = [204]*len(masks)
            for i in range(len(masks)):
                if (masks[i][0].size>0 and masks[i][1].size>0):
                    bpix[masks[i]] = mask_colors[i]

        # Update the WCS to match the frame buffer being used.
        _d.syncWCS(_wcsinfo)

        # write out WCS to frame buffer, then erase buffer
        _d.writeWCS(_wcsinfo)

        # Now, send the trimmed image (section) to the display device
        _d.writeImage(bpix,_wcsinfo)
def _calculate_var(adinput, add_read_noise=False, add_poisson_noise=False):
    """
    Calculates the variance of each extension in the input AstroData
    object and updates the .variance attribute

    Parameters
    ----------
    adinput: AstroData
        AD instance to add variance planes to
    add_read_noise: bool
        add the read noise component?
    add_poisson_noise: bool
        add the Poisson noise component?
    """
    log = logutils.get_logger(__name__)
    gain_list = adinput.gain()
    read_noise_list = adinput.read_noise()
    var_dtype = np.float32

    in_adu = adinput.is_in_adu()
    for ext, gain, read_noise in zip(adinput, gain_list, read_noise_list):
        extver = ext.hdr['EXTVER']

        # Create a variance array with the read noise (or zero)
        if add_read_noise:
            if read_noise is None:
                log.warning('Read noise for {} extver {} = None. Setting '
                            'to zero'.format(adinput.filename, extver))
                read_noise = 0.0
            else:
                log.fullinfo('Read noise for {} extver {} = {} electrons'.
                         format(adinput.filename, extver, read_noise))
                log.fullinfo('Calculating the read noise component of the '
                             'variance in {}'.format('ADU' if in_adu else 'electrons'))
                if in_adu:
                    read_noise /= gain
            var_array = np.full(ext.data.shape, read_noise*read_noise)
        else:
            var_array = np.zeros(ext.data.shape)

        # Add the Poisson noise if desired
        if add_poisson_noise:
            poisson_array = (ext.data if ext.is_coadds_summed() else
                             ext.data / ext.coadds())
            if bunit.upper() == 'ADU':
                poisson_array = poisson_array / gain
            log.fullinfo('Calculating the Poisson noise component of '
                         'the variance in {}'.format(bunit))
            var_array += np.where(poisson_array > 0, poisson_array, 0)

        if ext.variance is not None:
            if add_read_noise and add_poisson_noise:
                raise ValueError("Cannot add read noise and Poisson noise"
                                 " components to variance as variance "
                                 "already exists")
            else:
                log.fullinfo("Combining the newly calculated variance "
                             "with the current variance extension {}:{}".
                             format(ext.filename, extver))
                var_array += ext.variance
        else:
            log.fullinfo("Adding variance to {}:{}".format(ext.filename,
                                                           extver))
        # Attach to the extension
        ext.variance = var_array.astype(var_dtype)
    return
Exemple #28
0
def trace_lines(ext,
                axis,
                start=None,
                initial=None,
                cwidth=5,
                rwidth=None,
                nsum=10,
                step=10,
                initial_tolerance=1.0,
                max_shift=0.05,
                max_missed=5,
                func=NDStacker.mean,
                viewer=None):
    """
    This function traces features along one axis of a two-dimensional image.
    Initial peak locations are provided and then these are matched to peaks
    found a small distance away along the direction of tracing. In terms of
    its use to map the distortion from a 2D spectral image of an arc lamp,
    these lists of coordinates can then be used to determine a distortion map
    that will remove any curvature of lines of constant wavelength.

    For a horizontally-dispersed spectrum like GMOS, the reference y-coords
    will match the input y-coords, while the reference x-coords will all be
    equal to the initial x-coords of the peaks.

    Parameters
    ----------
    ext : single-sliced AD object
        The extension within which to trace features.

    axis : int (0 or 1)
        Axis along which to trace (0=y-direction, 1=x-direction).

    start : int/None
        Row/column to start trace (None => middle).

    initial : sequence
        Coordinates of peaks.

    cwidth : int
        Width of centroid box in pixels.

    rwidth : int/None
        width of Ricker filter to apply to each collapsed 1D slice

    nsum : int
        Number of rows/columns to combine at each step.

    step : int
        Step size along axis in pixels.

    initial_tolerance : float/None
        Maximum perpendicular shift (in pixels) between provided location and
        first calculation of peak.

    max_shift: float
        Maximum perpendicular shift (in pixels) from pixel to pixel.

    max_missed: int
        Maximum number of interactions without finding line before line is
        considered lost forever.

    func: callable
        function to use when collapsing to 1D. This takes the data, mask, and
        variance as arguments, and returns 1D versions of all three

    viewer: imexam viewer or None
        Viewer to draw lines on.

    Returns
    -------
    refcoords, incoords: 2xN arrays (x-first) of coordinates
    """
    log = logutils.get_logger(__name__)

    # Make life easier for the poor coder by transposing data if needed,
    # so that we're always tracing along columns
    if axis == 0:
        ext_data = ext.data
        ext_mask = None if ext.mask is None else ext.mask & DQ.not_signal
        direction = "row"
    else:
        ext_data = ext.data.T
        ext_mask = None if ext.mask is None else ext.mask.T & DQ.not_signal
        direction = "column"

    if start is None:
        start = ext_data.shape[0] // 2
        log.stdinfo(f"Starting trace at {direction} {start}")

    # Get accurate starting positions for all peaks
    halfwidth = cwidth // 2
    y1 = int(start - 0.5 * nsum + 0.5)
    data, mask, var = func(ext_data[y1:y1 + nsum],
                           mask=None if ext_mask is None else ext_mask[y1:y1 +
                                                                       nsum],
                           variance=None)

    if rwidth:
        data = signal.cwt(data, signal.ricker, widths=[rwidth])[0]

    # Get better peak positions if requested
    if initial_tolerance is None:
        initial_peaks = initial
    else:
        peaks = pinpoint_peaks(data, mask, initial)
        initial_peaks = []
        for peak in initial:
            j = np.argmin(abs(np.array(peaks) - peak))
            new_peak = peaks[j]
            if abs(new_peak - peak) <= initial_tolerance:
                initial_peaks.append(new_peak)
            else:
                log.debug(f"Cannot recenter peak at coordinate {peak}")

    # Allocate space for collapsed arrays of different sizes
    data = np.empty((max_missed, ext_data.shape[1]))
    mask = np.zeros_like(data, dtype=DQ.datatype)
    var = np.empty_like(data)

    coord_lists = [[] for peak in initial_peaks]
    for direction in (-1, 1):
        ypos = start
        last_coords = [[ypos, peak] for peak in initial_peaks]
        lookback = 0

        while True:
            ypos += direction * step
            lookback = min(lookback + 1, max_missed)
            # Reached the bottom or top?
            if ypos < 0.5 * nsum or ypos > ext_data.shape[0] - 0.5 * nsum:
                break

            # Make multiple arrays covering nsum to nsum*(largest_missed+1) rows
            y2 = int(ypos + 0.5 * nsum + 0.5)
            for i in range(lookback):
                slices = [
                    slice(y2 - j * step - nsum, y2 - j * step)
                    for j in range(i + 1)
                ]
                d, m, v = func(np.concatenate(list(ext_data[s]
                                                   for s in slices)),
                               mask=None if ext_mask is None else
                               np.concatenate(list(ext_mask[s]
                                                   for s in slices)),
                               variance=None)
                # Variance could plausibly be zero
                var[i] = np.where(v <= 0, np.inf, v)
                if rwidth:
                    data[i] = np.where(
                        d / np.sqrt(var[i]) > 0.5,
                        signal.cwt(d, signal.ricker, widths=[rwidth])[0], 0)
                else:
                    data[i] = np.where(d / np.sqrt(var[i]) > 0.5, d, 0)
                if m is not None:
                    mask[i] = m

            if any(mask[0] == 0):
                last_peaks = [c[1] for c in last_coords if not np.isnan(c[1])]
                peaks = pinpoint_peaks(data[0],
                                       mask[0],
                                       last_peaks,
                                       halfwidth=halfwidth)

                for i, (last_row, old_peak) in enumerate(last_coords):
                    if np.isnan(old_peak):
                        continue
                    # If we found no peaks at all, then continue through
                    # the loop but nothing will match
                    if peaks:
                        j = np.argmin(abs(np.array(peaks) - old_peak))
                        new_peak = peaks[j]
                    else:
                        new_peak = np.inf

                    # Is this close enough to the existing peak?
                    steps_missed = min(int(abs(ypos - last_row) / step),
                                       lookback)
                    for j in range(steps_missed):
                        tolerance = max_shift * (j + 1) * step
                        if abs(new_peak - old_peak) <= tolerance:
                            new_coord = [ypos - 0.5 * j * step, new_peak]
                            break
                        elif j + 1 < lookback:
                            # Investigate more heavily-binned profiles
                            try:
                                new_peak = pinpoint_peaks(
                                    data[j + 1],
                                    mask[j + 1], [old_peak],
                                    halfwidth=halfwidth)[0]
                            except IndexError:  # No peak there
                                new_peak = np.inf
                    else:
                        # We haven't found the continuation of this line.
                        # If it's gone for good, set the coord to NaN to avoid it
                        # picking up a different line if there's significant tilt
                        if lookback > max_missed:
                            coord_lists[i].append([ypos, np.nan])
                        continue

                    # Too close to the edge?
                    if (new_coord[1] < halfwidth or new_coord[1] >
                            ext_data.shape[1] - 0.5 * halfwidth):
                        last_coords[i][1] = np.nan
                        continue

                    if viewer:
                        kwargs = dict(
                            zip(('y1', 'x1'), last_coords[i]
                                if axis == 0 else reversed(last_coords[i])))
                        kwargs.update(
                            dict(
                                zip(('y2', 'x2'), new_coord
                                    if axis == 0 else reversed(new_coord))))
                        viewer.line(origin=0, **kwargs)

                    coord_lists[i].append(new_coord)
                    last_coords[i] = new_coord.copy()
            else:  # We don't bin across completely dead regions
                lookback = 0

            # Lost all lines!
            if all(np.isnan(c[1]) for c in last_coords):
                break

    # List of traced peak positions
    in_coords = np.array([c for coo in coord_lists for c in coo]).T
    # List of "reference" positions (i.e., the coordinate perpendicular to
    # the line remains constant at its initial value
    ref_coords = np.array([(ypos, ref)
                           for coo, ref in zip(coord_lists, initial_peaks)
                           for (ypos, xpos) in coo]).T

    # Return the coordinate lists, in the form (x-coords, y-coords),
    # regardless of the dispersion axis
    return (ref_coords, in_coords) if axis == 1 else (ref_coords[::-1],
                                                      in_coords[::-1])
Exemple #29
0
def trace_lines(ext, axis, start=None, initial=None, width=5, nsum=10,
                step=1, initial_tolerance=1.0, max_shift=0.05, max_missed=10,
                func=NDStacker.mean, viewer=None):
    """
    This function traces features along one axis of a two-dimensional image.
    Initial peak locations are provided and then these are matched to peaks
    found a small distance away along the direction of tracing. In terms of
    its use to map the distortion from a 2D spectral image of an arc lamp,
    these lists of coordinates can then be used to determine a distortion map
    that will remove any curvature of lines of constant wavelength.

    For a horizontally-dispersed spectrum like GMOS, the reference y-coords
    will match the input y-coords, while the reference x-coords will all be
    equal to the initial x-coords of the peaks.

    Parameters
    ----------
    ext : single-sliced AD object
        The extension within which to trace features.

    axis : int (0 or 1)
        Axis along which to trace (0=y-direction, 1=x-direction).

    start : int/None
        Row/column to start trace (None => middle).

    initial : sequence
        Coordinates of peaks.

    width : int
        Width of centroid box in pixels.

    nsum : int
        Number of rows/columns to combine at each step.

    step : int
        Step size along axis in pixels.

    initial_tolerance : float
        Maximum perpendicular shift (in pixels) between provided location and
        first calculation of peak.

    max_shift: float
        Maximum perpendicular shift (in pixels) from pixel to pixel.

    max_missed: int
        Maximum number of interactions without finding line before line is
        considered lost forever.

    func: callable
        function to use when collapsing to 1D. This takes the data, mask, and
        variance as arguments.

    viewer: imexam viewer or None
        Viewer to draw lines on.

    Returns
    -------
    refcoords, incoords: 2xN arrays (x-first) of coordinates
    """
    log = logutils.get_logger(__name__)

    # We really don't care about non-linear/saturated pixels
    bad_bits = 65535 ^ (DQ.non_linear | DQ.saturated)

    halfwidth = int(0.5 * width)

    # Make life easier for the poor coder by transposing data if needed,
    # so that we're always tracing along columns
    if axis == 0:
        ext_data = ext.data
        ext_mask = None if ext.mask is None else ext.mask & bad_bits
        direction = "row"
    else:
        ext_data = ext.data.T
        ext_mask = None if ext.mask is None else ext.mask.T & bad_bits
        direction = "column"

    if start is None:
        start = int(0.5 * ext_data.shape[0])
        log.stdinfo("Starting trace at {} {}".format(direction, start))

    if initial is None:
        y1 = int(start - 0.5 * nsum + 0.5)
        data, mask, var = NDStacker.mean(ext_data[y1:y1 + nsum],
                                         mask=None if ext_mask is None else ext_mask[y1:y1 + nsum],
                                         variance=None)
        fwidth = estimate_peak_width(data.copy(), 10)
        widths = 0.42466 * fwidth * np.arange(0.8, 1.21, 0.05)  # TODO!
        initial, _ = find_peaks(data, widths, mask=mask,
                                variance=var, min_snr=5)
        print("Feature width", fwidth, "nlines", len(initial))

    coord_lists = [[] for peak in initial]
    for direction in (-1, 1):
        ypos = start
        last_coords = [[ypos, peak] for peak in initial]

        while True:
            y1 = int(ypos - 0.5 * nsum + 0.5)
            data, mask, var = func(ext_data[y1:y1 + nsum],
                                   mask=None if ext_mask is None else ext_mask[y1:y1 + nsum],
                                   variance=None)
            # Variance could plausibly be zero
            var = np.where(var <= 0, np.inf, var)
            clipped_data = np.where(data / np.sqrt(var) > 0.5, data, 0)
            last_peaks = [c[1] for c in last_coords if not np.isnan(c[1])]
            peaks = pinpoint_peaks(clipped_data, mask, last_peaks)
            # if ypos == start:
            #    print("Found {} peaks".format(len(peaks)))
            #    print(peaks)

            for i, (last_row, old_peak) in enumerate(last_coords):
                if np.isnan(old_peak):
                    continue
                # If we found no peaks at all, then continue through
                # the loop but nothing will match
                if peaks:
                    j = np.argmin(abs(np.array(peaks) - old_peak))
                    new_peak = peaks[j]
                else:
                    new_peak = np.inf

                # Is this close enough to the existing peak?
                tolerance = (initial_tolerance if ypos == start
                             else max_shift * abs(ypos - last_row))
                if (abs(new_peak - old_peak) > tolerance):
                    # If it's gone for good, set the coord to NaN to avoid it
                    # picking up a different line if there's significant tilt
                    if abs(ypos - last_row) > max_missed * step:
                        last_coords[i][1] = np.nan
                    continue

                # Too close to the edge?
                if (new_peak < halfwidth or
                        new_peak > ext_data.shape[1] - 0.5 * halfwidth):
                    last_coords[i][1] = np.nan
                    continue

                new_coord = [ypos, new_peak]
                if viewer:
                    kwargs = dict(zip(('y1', 'x1'), last_coords[i] if axis == 0
                    else reversed(last_coords[i])))
                    kwargs.update(dict(zip(('y2', 'x2'), new_coord if axis == 0
                    else reversed(new_coord))))
                    viewer.line(origin=0, **kwargs)

                if not (ypos == start and direction > 1):
                    coord_lists[i].append(new_coord)
                last_coords[i] = new_coord.copy()

            ypos += direction * step
            # Reached the bottom or top?
            if ypos < 0.5 * nsum or ypos > ext_data.shape[0] - 0.5 * nsum:
                break

            # Lost all lines!
            if all(np.isnan(c[1]) for c in last_coords):
                break

    # List of traced peak positions
    in_coords = np.array([c for coo in coord_lists for c in coo]).T
    # List of "reference" positions (i.e., the coordinate perpendicular to
    # the line remains constant at its initial value
    ref_coords = np.array([(ypos, ref) for coo, ref in zip(coord_lists, initial) for (ypos, xpos) in coo]).T

    # Return the coordinate lists, in the form (x-coords, y-coords),
    # regardless of the dispersion axis
    return (ref_coords, in_coords) if axis == 1 else (ref_coords[::-1], in_coords[::-1])
def _create_wcs_from_offsets(adinput, adref, center_of_rotation=None):
    """
    This function uses the POFFSET, QOFFSET, and PA header keywords to create
    a transform between pixel coordinates. Its primary role is for GNIRS.
    For ease, it works out the (RA,DEC) of the centre of rotation in the
    reference image and determines where in the input image this is. The
    AstroData object's WCS is updated with a new WCS based on the WCS of the
    reference AD and the relative offsets/rotation from the headers.

    Parameters
    ----------
    adinput: AstroData
        The input image whose WCS needs to be rewritten
    adreference: AstroData
        The reference image with a trustworthy WCS
    center_of_rotation: 2-tuple
        Location of rotation center (x, y)
    """
    log = logutils.get_logger(__name__)
    if len(adinput) != len(adref):
        log.warning("Number of extensions in input files are different. "
                    "Cannot correct WCS.")
        return

    log.stdinfo(f"Updating WCS of {adinput.filename} from {adref.filename}")
    try:
        # Coerce to float to raise TypeError if a descriptor returns None
        xoff_in = float(adinput.detector_x_offset())
        yoff_in = float(adinput.detector_y_offset())
        xoff_ref = float(adref.detector_x_offset())
        yoff_ref = float(adref.detector_y_offset())
        pa1 = adref.phu['PA']
        pa2 = adinput.phu['PA']
    except (KeyError, TypeError):  # TypeError if offset is None
        log.warning("Cannot obtain necessary offsets from headers "
                    "so no change will be made")
        return

    if center_of_rotation is None:
        if 'GNIRS' in adref.tags:
            center_of_rotation = (629.0, 519.0)  # (x, y; 0-indexed)
        else:
            try:
                for m in adref[0].wcs.forward_transform:
                    if isinstance(m, models.RotateNative2Celestial):
                        ra, dec = m.lon.value, m.lat.value
                        center_of_rotation = adref[0].wcs.backward_transform(
                            ra, dec)
                        break
            except (AttributeError, IndexError, TypeError):
                if len(adref) == 1:
                    # Assume it's the center of the image
                    center_of_rotation = tuple(0.5 * (x - 1)
                                               for x in adref[0].shape[::-1])
                else:
                    log.warning("Cannot determine center of rotation so no "
                                "change will be made")
                    return

    try:
        t = ((models.Shift(-xoff_in - center_of_rotation[1])
              & models.Shift(-yoff_in - center_of_rotation[0]))
             | models.Rotation2D(pa1 - pa2) |
             (models.Shift(xoff_ref + center_of_rotation[1])
              & models.Shift(yoff_ref + center_of_rotation[0])))
    except TypeError:
        log.warning(
            "Problem creating offset transform so no change will be made")
        return
    adinput[0].wcs = deepcopy(adref[0].wcs)
    adinput[0].wcs.insert_transform(adinput[0].wcs.input_frame, t, after=True)
Exemple #31
0
    def display(self,
                pix,
                name=None,
                bufname=None,
                z1=None,
                z2=None,
                transform=None,
                bpm=None,
                zscale=False,
                contrast=0.25,
                scale=None,
                masks=None,
                mask_colors=None,
                offset=None,
                frame=None,
                quiet=False):
        """ Displays byte-scaled (UInt8) n to XIMTOOL device.
            This method uses the IIS protocol for displaying the data
            to the image display device, which requires the data to be
            byte-scaled.
            If input is not byte-scaled, it will perform scaling using
            set values/defaults.
        """
        log = logutils.get_logger(__name__)

        #Ensure that the input array 'pix' is a numpy array
        pix = np.array(pix)
        self.z1 = z1
        self.z2 = z2

        # If any of the display parameters are specified here, apply them
        # if z1 or z2 or transform or scale or offset or frame:
        # If zscale=True (like IRAF's display) selected, calculate z1 and z2 from
        # the data, and clear any transform specified in the call
        # Offset and scale are applied to the data and z1,z2,
        # so they have no effect on the display
        if zscale:
            if transform != None:
                if not quiet:
                    log.fullinfo("transform disallowed when zscale=True")
                transform = None
            if bpm is None:
                z1, z2 = nd.zscale.zscale(pix, contrast=contrast)
            else:
                goodpix = pix[bpm == 0]
                # Ignore the mask unless a decent number of pixels are "good"
                if len(goodpix) >= 0.01 * np.multiply(*pix.shape):
                    sq_side = int(np.sqrt(len(goodpix)))
                    goodpix = goodpix[:sq_side**2].reshape(sq_side, sq_side)
                    z1, z2 = nd.zscale.zscale(goodpix, contrast=contrast)
                else:
                    z1, z2 = nd.zscale.zscale(pix, contrast=contrast)

        self.set(frame=frame,
                 z1=z1,
                 z2=z2,
                 transform=transform,
                 scale=scale,
                 offset=offset)

        # Initialize the display device
        if not self.view._display or self.view.checkDisplay() is False:
            self.open()
        _d = self.view._display
        self.handle = _d.getHandle()

        # If no user specified values are provided, interrogate the array itself
        # for the full range of pixel values
        if self.z1 == None:
            self.z1 = np.minimum.reduce(np.ravel(pix))
        if self.z2 == None:
            self.z2 = np.maximum.reduce(np.ravel(pix))

        # If the user has not selected a specific buffer for the display,
        # select and set the frame buffer size based on input image size.
        if bufname == 'iraf':
            useiraf = True
            bufname = None
        else:
            useiraf = False

        if bufname != None:
            _d.setFBconfig(None, bufname=bufname)
        else:
            _ny, _nx = pix.shape
            _d.selectFB(_nx, _ny, reset=1, useiraf=useiraf)

        # Initialize the specified frame buffer
        _d.setFrame(self.frame)
        _d.eraseFrame()

        # Apply user specified scaling to image, returns original
        # if none are specified.
        bpix = self._transformImage(pix)

        # Recompute the pixel range of (possibly) transformed array
        _z1 = self._transformImage(self.z1)
        _z2 = self._transformImage(self.z2)

        # If there was a problem in the transformation, then restore the original
        # array as the one to be displayed, even though it may not be ideal.
        if _z1 == _z2:
            if not quiet:
                log.warning('Error encountered during transformation. '
                            'No transformation applied...')
            bpix = pix
            self.z1 = np.minimum.reduce(np.ravel(bpix))
            self.z2 = np.maximum.reduce(np.ravel(bpix))
            # Failsafe in case input image is flat:
            if self.z1 == self.z2:
                self.z1 -= 1.
                self.z2 += 1.
        else:
            # Reset z1/z2 values now so that image gets displayed with
            # correct range.  Also, when displaying transformed images
            # this allows the input pixel value to be displayed, rather
            # than the transformed pixel value.
            self.z1 = _z1
            self.z2 = _z2

        _wcsinfo = nd.displaydev.ImageWCS(bpix,
                                          z1=self.z1,
                                          z2=self.z2,
                                          name=name)
        if not quiet:
            log.fullinfo('Image displayed with z1: {} z2: {}'.format(
                self.z1, self.z2))

        bpix = self._fbclipImage(bpix, _d.fbwidth, _d.fbheight)

        # Change pixel value to specified color if desired
        if masks is not None:
            if not isinstance(masks, list):
                masks = [masks]
            if mask_colors is None:
                # Set to red as default
                mask_colors = [204] * len(masks)
            for i in range(len(masks)):
                if (masks[i][0].size > 0 and masks[i][1].size > 0):
                    bpix[masks[i]] = mask_colors[i]

        # Update the WCS to match the frame buffer being used.
        _d.syncWCS(_wcsinfo)

        # write out WCS to frame buffer, then erase buffer
        _d.writeWCS(_wcsinfo)

        # Now, send the trimmed image (section) to the display device
        _d.writeImage(bpix, _wcsinfo)
Exemple #32
0
def remove_single_length_dimension(adinput):
    """
    If there is only one single length dimension in the pixel data, the
    remove_single_length_dimension function will remove the single length
    dimension. In addition, this function removes any keywords associated with
    that dimension. Used ONLY by the standardizeStructure primitive in
    primitives_F2.py.

    Parameters
    ----------
    adinput
        AstroDataFits: input AD object

    Returns
    -------
        AstroData: modified object of same subclass as input
    """

    log = logutils.get_logger(__name__)

    for ext in adinput:
        # Ensure that there is only one single length dimension in the pixel
        # data
        if ext.data.shape.count(1) == 1:

            # Determine the position of the single length dimension in the
            # tuple of array dimensions output by ext.data.shape
            axis = np.where([length==1 for length in ext.data.shape])[0][0]

            # numpy arrays use 0-based indexing and the axes are ordered from
            # slow to fast. So, if the position of the single length dimension
            # is located in e.g., ext.data.shape[0], the dimension number of
            # the FITS pixel data array is ext.data.ndim + 1 (since FITS pixel
            # data arrays use 1-based indexing).
            dimension = ext.data.ndim - axis

            # The np.squeeze method only removes a dimension from the array if
            # the dimension has a length equal to 1
            log.status("Removing dimension {} from {}".
                       format(dimension, adinput.filename))
            ext.operate(np.squeeze)

            # Set the NAXIS keyword appropriately now that a dimension has been
            # removed
            ext.hdr.set("NAXIS", ext.data.ndim)

            # This should be a log.debug call, but that doesn't appear to work
            # right now, so using log.fullinfo
            #log.fullinfo("Updated dimensions of {}[{},{}] = {}".format(adinput.filename,
            #                                                       ext.hdr['EXTNAME'],
            #                                                       ext.hdr['EXTVER'],
            #                                                       ext.data.shape))

            # Remove the keywords relating to the dimension that has been
            # removed (IRAF seems to add WCSDIM=3, CTYPE3='LINEAR  ', CD3_3=1.,
            # LTM1_1=1., LTM2_2=1., LTM3_3=1., WAXMAP01='1 0 2 0 0 0 ',
            # WAT0_001='system=image', WAT1_001='wtype=tan axtype=ra' and
            # WAT2_001= 'wtype=tan axtype=dec' when doing e.g., imcopy
            # f2.fits[*,*,1], so perhaps these should be removed as well?)
            #
            # old data don't have all keywords.  need to check first.
            keywords = ("NAXIS{0}, AXISLAB{0}, CD{0}_{0}".format(dimension))
            for keyword in keywords.split(','):
                if keyword in ext.hdr:
                    ext.hdr.remove(keyword)
        else:
            log.warning("No dimension of length 1 in extension pixel data."
                        "No changes will be made to {}. ".format(adinput.filename))

    return adinput
Exemple #33
0
 def __init__(self, adinputs, **kwargs):
     self.log = logutils.get_logger(__name__)
     self.myself = lambda: stack()[1][3]
     self.adinputs = adinputs
Exemple #34
0
def ad_compare(ad1, ad2):
    """
    Compares the tags, headers, and pixel values of two images

    Parameters
    ----------
    ad1: AstroData/other
        first file (AD or can be opened by astrodata.open())
    ad2: AstroData/other
        second file (AD or...)

    Returns
    -------
    bool: are the two AD instances basically the same?
    """
    log = logutils.get_logger(__name__)

    if not isinstance(ad1, astrodata.AstroData):
        ad1 = astrodata.open(ad1)
    if not isinstance(ad2, astrodata.AstroData):
        ad2 = astrodata.open(ad2)

    fname1 = ad1.filename
    fname2 = ad2.filename
    ok = True
    errorlist = []

    # If images have different lengths, give up now
    if len(ad1) != len(ad2):
        log.warning('Files have different numbers of extensions: {} v {}'.
                      format(len(ad1), len(ad2)))
        return False

    assert fname1 == fname2, ('Files have different filename attributes: '
                              '{} v {}'.format(fname1, fname2))

    # Check tags
    if ad1.tags == ad2.tags:
        log.stdinfo('TAGS match')
    else:
        log.warning('TAGS do not match:')
        log.warning('  {} (1): {}'.format(fname1, ad1.tags))
        log.warning('  {} (2): {}'.format(fname2, ad2.tags))
        errorlist.append("TAGS do not match! {} contains {}, "
                         "while {} contains {}".format(fname1, list(set(ad1.tags) - set(ad2.tags)),
                                                       fname2, list(set(ad2.tags) - set(ad1.tags))))
        errorlist.append("TAGS (cont): TAGS have {} in common".format(list(set(ad1.tags).intersection(ad2.tags))))
        ok = False

    # Check header keywords in PHU and all extension HDUs
    log.stdinfo('Checking headers...')
    for i, (h1, h2) in enumerate(zip(ad1.header, ad2.header)):
        hstr = 'PHU' if i==0 else 'HDU {}'.format(i)
        log.stdinfo('  Checking {}'.format(hstr))

        # Compare keyword lists
        s1 = set(h1.keys()) - set(['HISTORY', 'COMMENT'])
        s2 = set(h2.keys()) - set(['HISTORY', 'COMMENT'])
        if s1 != s2:
            log.warning('Header keyword mismatch...')
            if s1-s2:
                log.warning('  {} (1) contains keywords {}'.
                            format(fname1, s1-s2))
                errorlist.append("Header {} (1) contains keywords {}".format(fname1, s1 - s2))
            if s2-s1:
                log.warning('  {} (2) contains keywords {}'.
                            format(fname2, s2-s1))
                errorlist.append("Header {} (2) contains keywords {}".format(fname2, s2 - s1))
            ok = False

        # Compare values for meaningful keywords
        for kw in h1:
            # GEM-TLM is "time last modified"
            if kw not in timestamp_keys.values() and kw not in ['GEM-TLM',
                                                    'HISTORY', 'COMMENT', '']:
                try:
                    v1, v2 = h1[kw], h2[kw]
                except KeyError:  # Missing keyword in AD2
                    continue
                if isinstance(v1, float):
                    if abs(v1 - v2) > 0.01:
                        log.warning('{} value mismatch: {} v {}'.
                                    format(kw, v1, v2))
                        errorlist.append('{} value mismatch: {} v {}'.
                                    format(kw, v1, v2))
                        ok = False
                else:
                    if v1 != v2:
                        log.warning('{} value mismatch: {} v {}'.
                                    format(kw, v1, v2))
                        errorlist.append('{} value mismatch: {} v {}'.
                                    format(kw, v1, v2))
                        ok = False

    # Check REFCAT status, just equal lengths
    attr1 = getattr(ad1, 'REFCAT', None)
    attr2 = getattr(ad2, 'REFCAT', None)
    if (attr1 is None) ^ (attr2 is None):
        log.warning('    Attribute mismatch for REFCAT: {} v {}'.
                    format(attr1 is not None, attr2 is not None))
        errorlist.append('Attribute mismatch for REFCAT: {} v {}'.
                         format(attr1 is not None, attr2 is not None))
        ok = False
    elif attr1 is not None and attr2 is not None:
        if len(attr1) != len(attr2):
            log.warning('    REFCAT lengths differ: {} v {}'.
                        format(len(attr1), len(attr2)))
            errorlist.append('The REFCAT lenghts differ: {} = {} vs. {} = {}'.
                              append(attr1 is not None, len(attr1),
                                     attr2 is not None, len(attr2)))

            ok = False

    # Extension by extension, check all the attributes
    log.stdinfo('Checking extensions...')
    for ext1, ext2 in zip(ad1, ad2):
        log.stdinfo('  Checking extver {}'.format(ext1.hdr['EXTVER']))
        for attr in ['data', 'mask', 'variance', 'OBJMASK', 'OBJCAT']:
            attr1 = getattr(ext1, attr, None)
            attr2 = getattr(ext2, attr, None)
            if (attr1 is None) ^ (attr2 is None):
                log.warning('    Attribute mismatch for {}: {} v {}'.
                            format(attr, attr1 is not None, attr2 is not None))
                errorlist.append("Attribute error for {}: {} v {}"
                                 .format(attr, attr1 is not None, attr2 is not None))
                ok = False
                continue
            if attr1 is not None and attr2 is not None:
                if attr == 'OBJCAT':
                    if len(attr1) != len(attr2):
                        log.warning('    OBJCAT lengths differ: {} v {}'.
                                    format(len(attr1), len(attr2)))
                        errorlist.append("OBJCAT lengths differ: {} vs {}".
                                         append(format(len(attr1), len(attr2))))
                        ok = False
                else:
                    # Pixel-data extensions
                    if attr1.dtype.name != attr2.dtype.name:
                        log.warning('    Datatype mismatch for {}: {} v {}'.
                                    format(attr, attr1.dtype, attr2.dtype))
                        errorlist.append("Datatype differ for {}: {} vs {}".
                                    format(attr, attr1.dtype, attr2.dtype))
                        ok = False
                    if attr1.shape != attr2.shape:
                        log.warning('    Shape mismatch for {}: {} v {}'.
                                    format(attr, attr1.shape, attr2.shape))
                        errorlist.append("Shapes differ between {}: {} vs {}".
                                    format(attr, attr1.shape, attr2.shape))
                        ok = False
                    else:
                        diff = attr1 - attr2
                        maxdiff = np.max(abs(diff))
                        # Let's assume int arrays should be identical, but
                        # allow tolerance for float arrays.
                        # TODO: Maybe compare data difference against variance?
                        if 'int' in attr1.dtype.name:
                            if maxdiff > 0:
                                log.warning('    {} int arrays not identical: '
                                    'max difference {}'.format(attr, maxdiff))
                                errorlist.append("{} int arrays not identical!".format(attr))
                                ok = False
                        elif maxdiff > 0.1:
                            log.warning('    {} floaXt arrays differ: max difference '
                                        '{}'.format(attr, maxdiff))
                            ok = False
    if not ok:
        for i,e in enumerate(errorlist):
            print("%d) %s" % (i, e))
    return ok