Beispiel #1
0
def min_med(images,
            weight_images,
            readnoise_list,
            exptime_list,
            background_values,
            weight_masks=None,
            combine_grow=1,
            combine_nsigma1=4,
            combine_nsigma2=3,
            fillval=False):
    """ Create a median array, rejecting the highest pixel and
    computing the lowest valid pixel after mask application.

    .. note::
        In this version of the mimmed algorithm we assume that the units of
        all input data is electons.

    Parameters
    ----------
    images : list of numpy.ndarray
        List of input data to be combined.

    weight_images : list of numpy.ndarray
        List of input data weight images to be combined.

    readnoise_list : list
        List of readnoise values to use for the input images.

    exptime_list : list
        List of exposure times to use for the input images.

    background_values : list
        List of image background values to use for the input images.

    weight_masks : list of numpy.ndarray, None
        List of imput data weight masks to use for pixel rejection.
        (Default: `None`)

    combine_grow : int
        Radius (pixels) for neighbor rejection. (Default: 1)

    combine_nsigma1 : float
        Significance for accepting minimum instead of median. (Default: 4)

    combine_nsigma2 : float
        Significance for accepting minimum instead of median. (Default: 3)

    fillval : bool
        Turn on use of imedian/imean. (Default: `False`)

    Returns
    -------
    combined_array : numpy.ndarray
        Combined array.

    """
    # In this case we want to calculate two things:
    #   1) the median array, rejecting the highest pixel (thus running
    #      imcombine with nlow=0, nhigh=1, nkeep=1, using the masks)
    #   2) the lowest valid pixel after applying the masks (thus running
    #      imcombine with nlow=0, nhigh=3, nkeep=1, using the masks)
    #
    # We also calculate the sum of the weight files (to produce the total
    # effective exposure time for each pixel).
    #
    # The total effective background in the final image is calculated as
    # follows:
    #   - convert background for each input image to counts/s
    #     (divide by exptime)
    #   - multiply this value by the weight image, to obtain the effective
    #     background counts (in DN) for each pixel, for each image
    #   - Add these images together, to obtain the total effective background
    #     for the combined image.
    #
    # Once we've made these two files, then calculate the SNR based on the
    # median-pixel image, and compare with the minimum.

    nimages = len(images)
    combtype_median = 'imedian' if fillval else 'median'
    images = np.asarray(images)
    weight_images = np.asarray(weight_images)

    if weight_masks == [] or weight_masks is None:
        weight_masks = None
        mask_sum = np.zeros(images.shape[1:], dtype=np.int16)
        all_bad_idx = np.array([], dtype=np.int)
        all_bad_idy = np.array([], dtype=np.int)
    else:
        weight_masks = np.asarray(weight_masks, dtype=np.bool)
        mask_sum = np.sum(weight_masks, axis=0, dtype=np.int16)
        all_bad_idx, all_bad_idy = np.where(mask_sum == nimages)

    # Create a different median image based upon the number of images in the
    # input list.
    if nimages == 2:
        median_file = num_combine(
            images,
            masks=weight_masks,
            combination_type='imean' if fillval else 'mean',
            nlow=0,
            nhigh=0,
            lower=None,
            upper=None)

    else:
        # The value of NHIGH=1 will cause problems when there is only 1 valid
        # unmasked input image for that pixel due to a difference in behavior
        # between 'num_combine' and 'iraf.imcombine'.
        # This value may need to be adjusted on the fly based on the number of
        # inputs and the number of masked values/pixel.
        #
        median_file = num_combine(images,
                                  masks=weight_masks,
                                  combination_type=combtype_median,
                                  nlow=0,
                                  nhigh=1,
                                  lower=None,
                                  upper=None)

        # The following section of code will address the problem caused by
        # having a value of nhigh = 1.  This will behave in a way similar to
        # the way the IRAF task IMCOMBINE behaves.  In order to accomplish
        # this, the following procedure will be followed:
        # 1) The input masks will be summed.
        # 2) The science data will be summed.
        # 3) In the locations of the summed mask where the sum is 1 less than
        #    the total number of images, the value of that location in the
        #    summed science image will be used to replace the existing value
        #    in the existing median_file.
        #
        # This procedure is being used to prevent too much data from being
        # thrown out of the image. Take for example the case of 3 input images.
        # In two of the images the pixel locations have been masked out.
        # Now, if nhigh is applied there will be no value to use for that
        # position.  However, if this new procedure is used that value in
        # the resulting images will be the value that was rejected by the
        # nhigh rejection step.

        # We need to make certain that "bad" pixels in the sci data are set to
        # 0. That way, when the sci images are summed, the value of the sum
        # will only come from the "good" pixels.
        if weight_masks is None:
            sci_sum = np.sum(images, axis=0)
            if nimages == 1:
                median_file = sci_sum

        else:
            sci_sum = np.sum(images * np.logical_not(weight_masks), axis=0)
            # Use the summed sci image values in locations where the mask_sum
            # indicates that there is only 1 good pixel to use. The value will
            # be used in the median_file image
            idx = np.where(mask_sum == (nimages - 1))
            median_file[idx] = sci_sum[idx]

    # Create the minimum image from the stack of input images.
    if weight_masks is not None:
        # make a copy of images to avoid side-effect of modifying input
        # argument:
        images = images.copy()
        images[weight_masks] = np.nan
        images[:, all_bad_idx, all_bad_idy] = 0
        minimum_file = np.nanmin(images, axis=0)
    else:
        minimum_file = np.amin(images, axis=0)

    # Scale the weight images by the background values and add them to the bk
    # Create an image of the total effective background (in DN) per pixel:
    # (which is the sum of all the background-scaled weight files)
    s = np.asarray(
        [bv / et for bv, et in zip(background_values, exptime_list)])
    bkgd_file = np.sum(weight_images * s[:, None, None], axis=0)

    # Scale the weight mask images by the square of the readnoise values.
    # Create an image of the total readnoise**2 per pixel
    # (which is the sum of all the input readnoise values).
    if weight_masks is None:
        rdn2 = sum((r**2 for r in readnoise_list))
        readnoise_file = rdn2 * np.ones_like(images[0])

    else:
        readnoise_file = np.sum(np.logical_not(weight_masks) *
                                (np.asarray(readnoise_list)**2)[:, None, None],
                                axis=0)

    # Create an image of the total effective exposure time per pixel:
    # (which is simply the sum of all the drizzle output weight files)
    weight_file = np.sum(weight_images, axis=0)

    # Scale up both the median and minimum arrays by the total effective
    # exposure time per pixel.
    minimum_file_weighted = minimum_file * weight_file
    median_file_weighted = median_file * weight_file
    del weight_file

    # Calculate the 1-sigma r.m.s.:
    #   variance = median_electrons + bkgd_electrons + readnoise**2
    #   rms = sqrt(variance)
    #   This image has units of electrons.
    #
    # make this the abs value so that negative numbers dont throw an exception?
    rms_file2 = np.fmax(median_file_weighted + bkgd_file + readnoise_file,
                        np.zeros_like(median_file_weighted))
    rms_file = np.sqrt(rms_file2)
    del bkgd_file, readnoise_file

    # For the median array, calculate the n-sigma lower threshold to the array
    # and incorporate that into the pixel values.
    median_rms_file = median_file_weighted - rms_file * combine_nsigma1

    if combine_grow != 0:
        # Do a more sophisticated rejection: For all cases where the minimum
        # pixel will be accepted instead of the median, set a lower threshold
        # for that pixel and the ones around it (ie become less conservative
        # in rejecting the median). This is because in cases of
        # triple-incidence cosmic rays, quite often the low-lying outliers
        # of the CRs can influence the median for the initial relatively high
        # value of sigma, so a lower threshold must be used to mnake sure that
        # the minimum is selected.
        #
        # This is done as follows:
        # 1) make an image which is zero everywhere except where the minimum
        #    will be accepted
        # 2) box-car smooth this image, to make these regions grow.
        # 3) In the file "median_rms_file_electrons", replace these pixels
        #     by median - combine_nsigma2 * rms
        #
        # Then use this image in the final replacement, in the same way as for
        # the case where this option is not selected.
        minimum_flag_file = np.less(minimum_file_weighted,
                                    median_rms_file).astype(np.float64)

        # The box size value must be an integer. This is not a problem since
        # __combine_grow should always be an integer type. The combine_grow
        # column in the MDRIZTAB should also be an integer type.
        boxsize = int(2 * combine_grow + 1)
        boxshape = (boxsize, boxsize)
        minimum_grow_file = np.zeros_like(images[0])

        # If the boxcar convolution has failed it is potentially for
        # two reasons:
        #   1) The kernel size for the boxcar is bigger than the actual image.
        #   2) The grow parameter was specified with a value < 0.  This would
        #      result in an illegal boxshape kernel. The dimensions of the
        #      kernel box *MUST* be integer and greater than zero.
        #
        #   If the boxcar convolution has failed, try to give a meaningfull
        #   explanation as to why based upon the conditionals described above.
        if boxsize <= 0:
            errormsg1 = "############################################################\n"
            errormsg1 += "# The boxcar convolution in minmed has failed.  The 'grow' #\n"
            errormsg1 += "# parameter must be greater than or equal to zero. You     #\n"
            errormsg1 += "# specified an input value for the 'grow' parameter of:    #\n"
            errormsg1 += "        combine_grow: " + str(combine_grow) + '\n'
            errormsg1 += "############################################################\n"
            raise ValueError(errormsg1)

        if boxsize > images.shape[1]:
            errormsg2 = "############################################################\n"
            errormsg2 += "# The boxcar convolution in minmed has failed.  The 'grow' #\n"
            errormsg2 += "# parameter specified has resulted in a boxcar kernel that #\n"
            errormsg2 += "# has dimensions larger than the actual image.  You        #\n"
            errormsg2 += "# specified an input value for the 'grow' parameter of:    #\n"
            errormsg2 += "        combine_grow: " + str(combine_grow) + '\n'
            errormsg2 += "############################################################\n"
            print(images.shape[1:])
            raise ValueError(errormsg2)

        # Attempt the boxcar convolution using the boxshape based upon the user
        # input value of "grow"
        boxcar(minimum_flag_file,
               boxshape,
               output=minimum_grow_file,
               mode='constant',
               cval=0)
        del minimum_flag_file

        median_rms_file = np.where(
            np.equal(minimum_grow_file,
                     0), median_file_weighted - rms_file * combine_nsigma1,
            median_file_weighted - rms_file * combine_nsigma2)
        del rms_file, minimum_grow_file

    # Finally decide whether to use the minimim or the median (in counts/s),
    # based on whether the median is more than 3 sigma above the minimum.
    combined_array = np.where(np.less(minimum_file_weighted, median_rms_file),
                              minimum_file, median_file)
    # Set fill regions to a pixel value of 0.
    combined_array[all_bad_idx, all_bad_idy] = 0

    return combined_array
Beispiel #2
0
def min_med(images, weight_images, readnoise_list, exptime_list,
            background_values, weight_masks=None, combine_grow=1,
            combine_nsigma1=4, combine_nsigma2=3, fillval=False):
    """ Create a median array, rejecting the highest pixel and
    computing the lowest valid pixel after mask application.

    .. note::
        In this version of the mimmed algorithm we assume that the units of
        all input data is electons.

    Parameters
    ----------
    images : list of numpy.ndarray
        List of input data to be combined.

    weight_images : list of numpy.ndarray
        List of input data weight images to be combined.

    readnoise_list : list
        List of readnoise values to use for the input images.

    exptime_list : list
        List of exposure times to use for the input images.

    background_values : list
        List of image background values to use for the input images.

    weight_masks : list of numpy.ndarray, None
        List of imput data weight masks to use for pixel rejection.
        (Default: `None`)

    combine_grow : int
        Radius (pixels) for neighbor rejection. (Default: 1)

    combine_nsigma1 : float
        Significance for accepting minimum instead of median. (Default: 4)

    combine_nsigma2 : float
        Significance for accepting minimum instead of median. (Default: 3)

    fillval : bool
        Turn on use of imedian/imean. (Default: `False`)

    Returns
    -------
    combined_array : numpy.ndarray
        Combined array.

    """
    # In this case we want to calculate two things:
    #   1) the median array, rejecting the highest pixel (thus running
    #      imcombine with nlow=0, nhigh=1, nkeep=1, using the masks)
    #   2) the lowest valid pixel after applying the masks (thus running
    #      imcombine with nlow=0, nhigh=3, nkeep=1, using the masks)
    #
    # We also calculate the sum of the weight files (to produce the total
    # effective exposure time for each pixel).
    #
    # The total effective background in the final image is calculated as
    # follows:
    #   - convert background for each input image to counts/s
    #     (divide by exptime)
    #   - multiply this value by the weight image, to obtain the effective
    #     background counts (in DN) for each pixel, for each image
    #   - Add these images together, to obtain the total effective background
    #     for the combined image.
    #
    # Once we've made these two files, then calculate the SNR based on the
    # median-pixel image, and compare with the minimum.

    nimages = len(images)
    combtype_median = 'imedian' if fillval else 'median'
    images = np.asarray(images)
    weight_images = np.asarray(weight_images)

    if weight_masks == [] or weight_masks is None:
        weight_masks = None
        mask_sum = np.zeros(images.shape[1:], dtype=np.int16)
        all_bad_idx = np.array([], dtype=np.int)
        all_bad_idy = np.array([], dtype=np.int)
    else:
        weight_masks = np.asarray(weight_masks, dtype=np.bool)
        mask_sum = np.sum(weight_masks, axis=0, dtype=np.int16)
        all_bad_idx, all_bad_idy = np.where(mask_sum == nimages)

    # Create a different median image based upon the number of images in the
    # input list.
    if nimages == 2:
        median_file = num_combine(
            images,
            masks=weight_masks,
            combination_type='imean' if fillval else 'mean',
            nlow=0, nhigh=0, lower=None, upper=None
        )

    else:
        # The value of NHIGH=1 will cause problems when there is only 1 valid
        # unmasked input image for that pixel due to a difference in behavior
        # between 'num_combine' and 'iraf.imcombine'.
        # This value may need to be adjusted on the fly based on the number of
        # inputs and the number of masked values/pixel.
        #
        median_file = num_combine(
            images,
            masks=weight_masks,
            combination_type=combtype_median,
            nlow=0, nhigh=1, lower=None, upper=None
        )

        # The following section of code will address the problem caused by
        # having a value of nhigh = 1.  This will behave in a way similar to
        # the way the IRAF task IMCOMBINE behaves.  In order to accomplish
        # this, the following procedure will be followed:
        # 1) The input masks will be summed.
        # 2) The science data will be summed.
        # 3) In the locations of the summed mask where the sum is 1 less than
        #    the total number of images, the value of that location in the
        #    summed science image will be used to replace the existing value
        #    in the existing median_file.
        #
        # This procedure is being used to prevent too much data from being
        # thrown out of the image. Take for example the case of 3 input images.
        # In two of the images the pixel locations have been masked out.
        # Now, if nhigh is applied there will be no value to use for that
        # position.  However, if this new procedure is used that value in
        # the resulting images will be the value that was rejected by the
        # nhigh rejection step.

        # We need to make certain that "bad" pixels in the sci data are set to
        # 0. That way, when the sci images are summed, the value of the sum
        # will only come from the "good" pixels.
        if weight_masks is None:
            sci_sum = np.sum(images, axis=0)
            if nimages == 1:
                median_file = sci_sum

        else:
            sci_sum = np.sum(images * np.logical_not(weight_masks), axis=0)
            # Use the summed sci image values in locations where the mask_sum
            # indicates that there is only 1 good pixel to use. The value will
            # be used in the median_file image
            idx = np.where(mask_sum == (nimages - 1))
            median_file[idx] = sci_sum[idx]

    # Create the minimum image from the stack of input images.
    if weight_masks is not None:
        # make a copy of images to avoid side-effect of modifying input
        # argument:
        images = images.copy()
        images[weight_masks] = np.nan
        images[:, all_bad_idx, all_bad_idy] = 0
        minimum_file = np.nanmin(images, axis=0)
    else:
        minimum_file = np.amin(images, axis=0)

    # Scale the weight images by the background values and add them to the bk
    # Create an image of the total effective background (in DN) per pixel:
    # (which is the sum of all the background-scaled weight files)
    s = np.asarray([bv / et for bv, et in
                    zip(background_values, exptime_list)])
    bkgd_file = np.sum(weight_images * s[:, None, None], axis=0)

    # Scale the weight mask images by the square of the readnoise values.
    # Create an image of the total readnoise**2 per pixel
    # (which is the sum of all the input readnoise values).
    if weight_masks is None:
        rdn2 = sum((r**2 for r in readnoise_list))
        readnoise_file = rdn2 * np.ones_like(images[0])

    else:
        readnoise_file = np.sum(
            np.logical_not(weight_masks) *
            (np.asarray(readnoise_list)**2)[:, None, None],
            axis=0
        )

    # Create an image of the total effective exposure time per pixel:
    # (which is simply the sum of all the drizzle output weight files)
    weight_file = np.sum(weight_images, axis=0)

    # Scale up both the median and minimum arrays by the total effective
    # exposure time per pixel.
    minimum_file_weighted = minimum_file * weight_file
    median_file_weighted = median_file * weight_file
    del weight_file

    # Calculate the 1-sigma r.m.s.:
    #   variance = median_electrons + bkgd_electrons + readnoise**2
    #   rms = sqrt(variance)
    #   This image has units of electrons.
    #
    # make this the abs value so that negative numbers dont throw an exception?
    rms_file2 = np.fmax(
        median_file_weighted + bkgd_file + readnoise_file,
        np.zeros_like(median_file_weighted)
    )
    rms_file = np.sqrt(rms_file2)
    del bkgd_file, readnoise_file

    # For the median array, calculate the n-sigma lower threshold to the array
    # and incorporate that into the pixel values.
    median_rms_file = median_file_weighted - rms_file * combine_nsigma1

    if combine_grow != 0:
        # Do a more sophisticated rejection: For all cases where the minimum
        # pixel will be accepted instead of the median, set a lower threshold
        # for that pixel and the ones around it (ie become less conservative
        # in rejecting the median). This is because in cases of
        # triple-incidence cosmic rays, quite often the low-lying outliers
        # of the CRs can influence the median for the initial relatively high
        # value of sigma, so a lower threshold must be used to mnake sure that
        # the minimum is selected.
        #
        # This is done as follows:
        # 1) make an image which is zero everywhere except where the minimum
        #    will be accepted
        # 2) box-car smooth this image, to make these regions grow.
        # 3) In the file "median_rms_file_electrons", replace these pixels
        #     by median - combine_nsigma2 * rms
        #
        # Then use this image in the final replacement, in the same way as for
        # the case where this option is not selected.
        minimum_flag_file = np.less(minimum_file_weighted,
                                    median_rms_file).astype(np.float64)

        # The box size value must be an integer. This is not a problem since
        # __combine_grow should always be an integer type. The combine_grow
        # column in the MDRIZTAB should also be an integer type.
        boxsize = int(2 * combine_grow + 1)
        boxshape = (boxsize, boxsize)
        minimum_grow_file = np.zeros_like(images[0])

        # If the boxcar convolution has failed it is potentially for
        # two reasons:
        #   1) The kernel size for the boxcar is bigger than the actual image.
        #   2) The grow parameter was specified with a value < 0.  This would
        #      result in an illegal boxshape kernel. The dimensions of the
        #      kernel box *MUST* be integer and greater than zero.
        #
        #   If the boxcar convolution has failed, try to give a meaningfull
        #   explanation as to why based upon the conditionals described above.
        if boxsize <= 0:
            errormsg1 = "############################################################\n"
            errormsg1 += "# The boxcar convolution in minmed has failed.  The 'grow' #\n"
            errormsg1 += "# parameter must be greater than or equal to zero. You     #\n"
            errormsg1 += "# specified an input value for the 'grow' parameter of:    #\n"
            errormsg1 += "        combine_grow: " + str(combine_grow)+'\n'
            errormsg1 += "############################################################\n"
            raise ValueError(errormsg1)

        if boxsize > images.shape[1]:
            errormsg2 = "############################################################\n"
            errormsg2 += "# The boxcar convolution in minmed has failed.  The 'grow' #\n"
            errormsg2 += "# parameter specified has resulted in a boxcar kernel that #\n"
            errormsg2 += "# has dimensions larger than the actual image.  You        #\n"
            errormsg2 += "# specified an input value for the 'grow' parameter of:    #\n"
            errormsg2 += "        combine_grow: " + str(combine_grow) + '\n'
            errormsg2 += "############################################################\n"
            print(images.shape[1:])
            raise ValueError(errormsg2)

        # Attempt the boxcar convolution using the boxshape based upon the user
        # input value of "grow"
        boxcar(minimum_flag_file, boxshape, output=minimum_grow_file,
               mode='constant', cval=0)
        del minimum_flag_file

        median_rms_file = np.where(
            np.equal(minimum_grow_file, 0),
            median_file_weighted - rms_file * combine_nsigma1,
            median_file_weighted - rms_file * combine_nsigma2
        )
        del rms_file, minimum_grow_file

    # Finally decide whether to use the minimim or the median (in counts/s),
    # based on whether the median is more than 3 sigma above the minimum.
    combined_array = np.where(
        np.less(minimum_file_weighted, median_rms_file),
        minimum_file,
        median_file
    )
    # Set fill regions to a pixel value of 0.
    combined_array[all_bad_idx, all_bad_idy] = 0

    return combined_array
Beispiel #3
0
def _median(imageObjectList, paramDict):
    """Create a median image from the list of image Objects
       that has been given.
    """
    newmasks = paramDict['median_newmasks']
    comb_type = paramDict['combine_type'].lower()
    nlow = paramDict['combine_nlow']
    nhigh = paramDict['combine_nhigh']
    grow = paramDict['combine_grow'] if 'minmed' in comb_type else 0
    maskpt = paramDict['combine_maskpt']
    proc_units = paramDict['proc_unit']
    compress = paramDict['compress']
    bufsizeMB = paramDict['combine_bufsize']

    sigma = paramDict["combine_nsigma"]
    sigmaSplit = sigma.split()
    nsigma1 = float(sigmaSplit[0])
    nsigma2 = float(sigmaSplit[1])

    if paramDict['combine_lthresh'] is None:
        lthresh = None
    else:
        lthresh = float(paramDict['combine_lthresh'])

    if paramDict['combine_hthresh'] is None:
        hthresh = None
    else:
        hthresh = float(paramDict['combine_hthresh'])

    # the name of the output median file isdefined in the output wcs object and
    # stuck in the image.outputValues["outMedian"] dict of every imageObject
    medianfile = imageObjectList[0].outputNames["outMedian"]

    # Build combined array from single drizzled images.

    # Start by removing any previous products...
    if os.access(medianfile, os.F_OK):
        os.remove(medianfile)

    # Define lists for instrument specific parameters, these should be in
    # the image objects need to be passed to the minmed routine
    readnoiseList = []
    exposureTimeList = []
    backgroundValueList = []  # list of  MDRIZSKY *platescale values
    singleDrizList = []  # these are the input images
    singleWeightList = []  # pointers to the data arrays
    wht_mean = []  # Compute the mean value of each wht image

    single_hdr = None
    virtual = None

    # for each image object
    for image in imageObjectList:
        if virtual is None:
            virtual = image.inmemory

        det_gain = image.getGain(1)
        img_exptime = image._image['sci', 1]._exptime
        native_units = image.native_units
        native_units_lc = native_units.lower()

        if proc_units.lower() == 'native':
            if native_units_lc not in [
                    'counts', 'electrons', 'counts/s', 'electrons/s'
            ]:
                raise ValueError(
                    "Unexpected native units: '{}'".format(native_units))

            if lthresh is not None:
                if native_units_lc.startswith('counts'):
                    lthresh *= det_gain
                if native_units_lc.endswith('/s'):
                    lthresh *= img_exptime

            if hthresh is not None:
                if native_units_lc.startswith('counts'):
                    hthresh *= det_gain
                if native_units_lc.endswith('/s'):
                    hthresh *= img_exptime

        singleDriz = image.getOutputName("outSingle")
        singleDriz_name = image.outputNames['outSingle']
        singleWeight = image.getOutputName("outSWeight")
        singleWeight_name = image.outputNames['outSWeight']

        # If compression was used, reference ext=1 as CompImageHDU only writes
        # out MEF files, not simple FITS.
        if compress:
            wcs_ext = '[1]'
            wcs_extnum = 1
        else:
            wcs_ext = '[0]'
            wcs_extnum = 0

        if not virtual:
            if isinstance(singleDriz, str):
                iter_singleDriz = singleDriz + wcs_ext
                iter_singleWeight = singleWeight + wcs_ext
            else:
                iter_singleDriz = singleDriz[wcs_extnum]
                iter_singleWeight = singleWeight[wcs_extnum]
        else:
            iter_singleDriz = singleDriz_name + wcs_ext
            iter_singleWeight = singleWeight_name + wcs_ext

        # read in WCS from first single drizzle image to use as WCS for
        # median image
        if single_hdr is None:
            if virtual:
                single_hdr = singleDriz[wcs_extnum].header
            else:
                single_hdr = fits.getheader(singleDriz_name,
                                            ext=wcs_extnum,
                                            memmap=False)

        single_image = iterfile.IterFitsFile(iter_singleDriz)
        if virtual:
            single_image.handle = singleDriz
            single_image.inmemory = True

        singleDrizList.append(single_image)  # add to an array for bookkeeping

        # If it exists, extract the corresponding weight images
        if (not virtual
                and os.access(singleWeight, os.F_OK)) or (virtual
                                                          and singleWeight):
            weight_file = iterfile.IterFitsFile(iter_singleWeight)
            if virtual:
                weight_file.handle = singleWeight
                weight_file.inmemory = True

            singleWeightList.append(weight_file)
            try:
                tmp_mean_value = ImageStats(weight_file.data,
                                            lower=1e-8,
                                            fields="mean",
                                            nclip=0).mean
            except ValueError:
                tmp_mean_value = 0.0
            wht_mean.append(tmp_mean_value * maskpt)

            # Extract instrument specific parameters and place in lists

            # If an image has zero exposure time we will
            # redefine that value as '1'.  Although this will cause inaccurate
            # scaling of the data to occur in the 'minmed' combination
            # algorith, this is a necessary evil since it avoids divide by
            # zero exceptions.  It is more important that the divide by zero
            # exceptions not cause AstroDrizzle to crash in the pipeline than
            # it is to raise an exception for this obviously bad data even
            # though this is not the type of data you would wish to process
            # with AstroDrizzle.
            #
            # Get the exposure time from the InputImage object
            #
            # MRD 19-May-2011
            # Changed exposureTimeList to take exposure time from img_exptime
            # variable instead of hte image._exptime attribute, since
            # image._exptime was just giving 1.
            #
            exposureTimeList.append(img_exptime)

            # Use only "commanded" chips to extract subtractedSky and rdnoise:
            rdnoise = 0.0
            nchips = 0
            bsky = None  # minimum sky across **used** chips

            for chip in image.returnAllChips(extname=image.scienceExt):
                # compute sky value as sky/pixel using the single_drz
                # pixel scale:
                if bsky is None or bsky > chip.subtractedSky:
                    bsky = chip.subtractedSky * chip._conversionFactor

                # Extract the readnoise value for the chip
                rdnoise += chip._rdnoise**2
                nchips += 1

            if bsky is None:
                bsky = 0.0

            if nchips > 0:
                rdnoise = math.sqrt(rdnoise / nchips)

            backgroundValueList.append(bsky)
            readnoiseList.append(rdnoise)

            print("reference sky value for image '{}' is {}".format(
                image._filename, backgroundValueList[-1]))
        #
        # END Loop over input image list
        #

    # create an array for the median output image, use the size of the first
    # image in the list. Store other useful image characteristics:
    single_driz_data = singleDrizList[0].data
    data_item_size = single_driz_data.itemsize
    single_data_dtype = single_driz_data.dtype
    imrows, imcols = single_driz_data.shape

    medianImageArray = np.zeros_like(single_driz_data)

    del single_driz_data

    if comb_type == "minmed" and not newmasks:
        # Issue a warning if minmed is being run with newmasks turned off.
        print('\nWARNING: Creating median image without the application of '
              'bad pixel masks!\n')

    # The overlap value needs to be set to 2*grow in order to
    # avoid edge effects when scrolling down the image, and to
    # insure that the last section returned from the iterator
    # has enough rows to span the kernel used in the boxcar method
    # within minmed.
    overlap = 2 * grow
    buffsize = BUFSIZE if bufsizeMB is None else (BUFSIZE * bufsizeMB)
    section_nrows = min(imrows, int(buffsize / (imcols * data_item_size)))

    if section_nrows == 0:
        buffsize = imcols * data_item_size
        print("WARNING: Buffer size is too small to hold a single row.\n"
              "         Buffer size size will be increased to minimal "
              "required: {}MB".format(float(buffsize) / 1048576.0))
        section_nrows = 1

    if section_nrows < overlap + 1:
        new_grow = int((section_nrows - 1) / 2)
        if section_nrows == imrows:
            print("'grow' parameter is too large for actual image size. "
                  "Reducing 'grow' to {}".format(new_grow))
        else:
            print("'grow' parameter is too large for requested buffer size. "
                  "Reducing 'grow' to {}".format(new_grow))
        grow = new_grow
        overlap = 2 * grow

    nbr = section_nrows - overlap
    nsec = (imrows - overlap) // nbr
    if (imrows - overlap) % nbr > 0:
        nsec += 1

    for k in range(nsec):
        e1 = k * nbr
        e2 = e1 + section_nrows
        u1 = grow
        u2 = u1 + nbr

        if k == 0:  # first section
            u1 = 0

        if k == nsec - 1:  # last section
            e2 = min(e2, imrows)
            e1 = min(e1, e2 - overlap - 1)
            u2 = e2 - e1

        imdrizSectionsList = np.empty((len(singleDrizList), e2 - e1, imcols),
                                      dtype=single_data_dtype)
        for i, w in enumerate(singleDrizList):
            imdrizSectionsList[i, :, :] = w[e1:e2]

        if singleWeightList:
            weightSectionsList = np.empty(
                (len(singleWeightList), e2 - e1, imcols),
                dtype=single_data_dtype)
            for i, w in enumerate(singleWeightList):
                weightSectionsList[i, :, :] = w[e1:e2]
        else:
            weightSectionsList = None

        weight_mask_list = None

        if newmasks and weightSectionsList is not None:
            # Build new masks from single drizzled images.
            # Generate new pixel mask file for median step.
            # This mask will be created from the single-drizzled
            # weight image for this image.

            # The mean of the weight array will be computed and all
            # pixels with values less than 0.7 of the mean will be flagged
            # as bad in this mask. This mask will then be used when
            # creating the median image.
            # 0 means good, 1 means bad here...
            weight_mask_list = np.less(
                weightSectionsList,
                np.asarray(wht_mean)[:, None, None]).astype(np.uint8)

        if 'minmed' in comb_type:  # Do MINMED
            # set up use of 'imedian'/'imean' in minmed algorithm
            fillval = comb_type.startswith('i')

            # Create the combined array object using the minmed algorithm
            result = min_med(imdrizSectionsList,
                             weightSectionsList,
                             readnoiseList,
                             exposureTimeList,
                             backgroundValueList,
                             weight_masks=weight_mask_list,
                             combine_grow=grow,
                             combine_nsigma1=nsigma1,
                             combine_nsigma2=nsigma2,
                             fillval=fillval)

        else:  # DO NUMCOMBINE
            # Create the combined array object using the numcombine task
            result = numcombine.num_combine(imdrizSectionsList,
                                            masks=weight_mask_list,
                                            combination_type=comb_type,
                                            nlow=nlow,
                                            nhigh=nhigh,
                                            upper=hthresh,
                                            lower=lthresh)

        # Write out the processed image sections to the final output array:
        medianImageArray[e1 + u1:e1 + u2, :] = result[u1:u2, :]

    # Write out the combined image
    # use the header from the first single drizzled image in the list
    pf = _writeImage(medianImageArray, inputHeader=single_hdr)

    if virtual:
        mediandict = {}
        mediandict[medianfile] = pf
        for img in imageObjectList:
            img.saveVirtualOutputs(mediandict)
    else:
        try:
            print("Saving output median image to: '{}'".format(medianfile))
            pf.writeto(medianfile)
        except IOError:
            msg = "Problem writing file '{}'".format(medianfile)
            print(msg)
            raise IOError(msg)

    # Always close any files opened to produce median image; namely,
    # single drizzle images and singly-drizzled weight images
    #
    for img in singleDrizList:
        if not virtual:
            img.close()

    # Close all singly drizzled weight images used to create median image.
    for img in singleWeightList:
        if not virtual:
            img.close()
Beispiel #4
0
def _median(imageObjectList, paramDict):
    """Create a median image from the list of image Objects
       that has been given.
    """
    newmasks = paramDict['median_newmasks']
    comb_type = paramDict['combine_type'].lower()
    nlow = paramDict['combine_nlow']
    nhigh = paramDict['combine_nhigh']
    grow = paramDict['combine_grow'] if 'minmed' in comb_type else 0
    maskpt = paramDict['combine_maskpt']
    proc_units = paramDict['proc_unit']
    compress = paramDict['compress']
    bufsizeMB = paramDict['combine_bufsize']

    sigma = paramDict["combine_nsigma"]
    sigmaSplit = sigma.split()
    nsigma1 = float(sigmaSplit[0])
    nsigma2 = float(sigmaSplit[1])

    if paramDict['combine_lthresh'] is None:
        lthresh = None
    else:
        lthresh = float(paramDict['combine_lthresh'])

    if paramDict['combine_hthresh'] is None:
        hthresh = None
    else:
        hthresh = float(paramDict['combine_hthresh'])

    # the name of the output median file isdefined in the output wcs object and
    # stuck in the image.outputValues["outMedian"] dict of every imageObject
    medianfile = imageObjectList[0].outputNames["outMedian"]

    # Build combined array from single drizzled images.

    # Start by removing any previous products...
    if os.access(medianfile, os.F_OK):
        os.remove(medianfile)

    # Define lists for instrument specific parameters, these should be in
    # the image objects need to be passed to the minmed routine
    readnoiseList = []
    exposureTimeList = []
    backgroundValueList = []  # list of  MDRIZSKY *platescale values
    singleDrizList = []  # these are the input images
    singleWeightList = []  # pointers to the data arrays
    wht_mean = []  # Compute the mean value of each wht image

    single_hdr = None
    virtual = None

    # for each image object
    for image in imageObjectList:
        if virtual is None:
            virtual = image.inmemory

        det_gain = image.getGain(1)
        img_exptime = image._image['sci', 1]._exptime
        native_units = image.native_units
        native_units_lc = native_units.lower()

        if proc_units.lower() == 'native':
            if native_units_lc not in ['counts', 'electrons', 'counts/s',
                                       'electrons/s']:
                raise ValueError("Unexpected native units: '{}'"
                                 .format(native_units))

            if lthresh is not None:
                if native_units_lc.startswith('counts'):
                    lthresh *= det_gain
                if native_units_lc.endswith('/s'):
                    lthresh *= img_exptime

            if hthresh is not None:
                if native_units_lc.startswith('counts'):
                    hthresh *= det_gain
                if native_units_lc.endswith('/s'):
                    hthresh *= img_exptime

        singleDriz = image.getOutputName("outSingle")
        singleDriz_name = image.outputNames['outSingle']
        singleWeight = image.getOutputName("outSWeight")
        singleWeight_name = image.outputNames['outSWeight']

        # If compression was used, reference ext=1 as CompImageHDU only writes
        # out MEF files, not simple FITS.
        if compress:
            wcs_ext = '[1]'
            wcs_extnum = 1
        else:
            wcs_ext = '[0]'
            wcs_extnum = 0

        if not virtual:
            if isinstance(singleDriz, str):
                iter_singleDriz = singleDriz + wcs_ext
                iter_singleWeight = singleWeight + wcs_ext
            else:
                iter_singleDriz = singleDriz[wcs_extnum]
                iter_singleWeight = singleWeight[wcs_extnum]
        else:
            iter_singleDriz = singleDriz_name + wcs_ext
            iter_singleWeight = singleWeight_name + wcs_ext

        # read in WCS from first single drizzle image to use as WCS for
        # median image
        if single_hdr is None:
            if virtual:
                single_hdr = singleDriz[wcs_extnum].header
            else:
                single_hdr = fits.getheader(singleDriz_name, ext=wcs_extnum,
                                            memmap=False)

        single_image = iterfile.IterFitsFile(iter_singleDriz)
        if virtual:
            single_image.handle = singleDriz
            single_image.inmemory = True

        singleDrizList.append(single_image)  # add to an array for bookkeeping

        # If it exists, extract the corresponding weight images
        if (not virtual and os.access(singleWeight, os.F_OK)) or (
                virtual and singleWeight):
            weight_file = iterfile.IterFitsFile(iter_singleWeight)
            if virtual:
                weight_file.handle = singleWeight
                weight_file.inmemory = True

            singleWeightList.append(weight_file)
            try:
                tmp_mean_value = ImageStats(weight_file.data, lower=1e-8,
                                            fields="mean", nclip=0).mean
            except ValueError:
                tmp_mean_value = 0.0
            wht_mean.append(tmp_mean_value * maskpt)

            # Extract instrument specific parameters and place in lists

            # If an image has zero exposure time we will
            # redefine that value as '1'.  Although this will cause inaccurate
            # scaling of the data to occur in the 'minmed' combination
            # algorith, this is a necessary evil since it avoids divide by
            # zero exceptions.  It is more important that the divide by zero
            # exceptions not cause AstroDrizzle to crash in the pipeline than
            # it is to raise an exception for this obviously bad data even
            # though this is not the type of data you would wish to process
            # with AstroDrizzle.
            #
            # Get the exposure time from the InputImage object
            #
            # MRD 19-May-2011
            # Changed exposureTimeList to take exposure time from img_exptime
            # variable instead of hte image._exptime attribute, since
            # image._exptime was just giving 1.
            #
            exposureTimeList.append(img_exptime)

            # Use only "commanded" chips to extract subtractedSky and rdnoise:
            rdnoise = 0.0
            nchips = 0
            bsky = None  # minimum sky across **used** chips

            for chip in image.returnAllChips(extname=image.scienceExt):
                # compute sky value as sky/pixel using the single_drz
                # pixel scale:
                if bsky is None or bsky > chip.subtractedSky:
                    bsky = chip.subtractedSky * chip._conversionFactor

                # Extract the readnoise value for the chip
                rdnoise += chip._rdnoise**2
                nchips += 1

            if bsky is None:
                bsky = 0.0

            if nchips > 0:
                rdnoise = math.sqrt(rdnoise/nchips)

            backgroundValueList.append(bsky)
            readnoiseList.append(rdnoise)

            print("reference sky value for image '{}' is {}"
                  .format(image._filename, backgroundValueList[-1]))
        #
        # END Loop over input image list
        #

    # create an array for the median output image, use the size of the first
    # image in the list. Store other useful image characteristics:
    single_driz_data = singleDrizList[0].data
    data_item_size = single_driz_data.itemsize
    single_data_dtype = single_driz_data.dtype
    imrows, imcols = single_driz_data.shape

    medianImageArray = np.zeros_like(single_driz_data)

    del single_driz_data

    if comb_type == "minmed" and not newmasks:
        # Issue a warning if minmed is being run with newmasks turned off.
        print('\nWARNING: Creating median image without the application of '
              'bad pixel masks!\n')

    # The overlap value needs to be set to 2*grow in order to
    # avoid edge effects when scrolling down the image, and to
    # insure that the last section returned from the iterator
    # has enough rows to span the kernel used in the boxcar method
    # within minmed.
    overlap = 2 * grow
    buffsize = BUFSIZE if bufsizeMB is None else (BUFSIZE * bufsizeMB)
    section_nrows = min(imrows, int(buffsize / (imcols * data_item_size)))

    if section_nrows == 0:
        buffsize = imcols * data_item_size
        print("WARNING: Buffer size is too small to hold a single row.\n"
              "         Buffer size size will be increased to minimal "
              "required: {}MB".format(float(buffsize) / 1048576.0))
        section_nrows = 1

    if section_nrows < overlap + 1:
        new_grow = int((section_nrows - 1) / 2)
        if section_nrows == imrows:
            print("'grow' parameter is too large for actual image size. "
                  "Reducing 'grow' to {}".format(new_grow))
        else:
            print("'grow' parameter is too large for requested buffer size. "
                  "Reducing 'grow' to {}".format(new_grow))
        grow = new_grow
        overlap = 2 * grow

    nbr = section_nrows - overlap
    nsec = (imrows - overlap) // nbr
    if (imrows - overlap) % nbr > 0:
        nsec += 1

    for k in range(nsec):
        e1 = k * nbr
        e2 = e1 + section_nrows
        u1 = grow
        u2 = u1 + nbr

        if k == 0:  # first section
            u1 = 0

        if k == nsec - 1:  # last section
            e2 = min(e2, imrows)
            e1 = min(e1, e2 - overlap - 1)
            u2 = e2 - e1

        imdrizSectionsList = np.empty(
            (len(singleDrizList), e2 - e1, imcols),
            dtype=single_data_dtype
        )
        for i, w in enumerate(singleDrizList):
            imdrizSectionsList[i, :, :] = w[e1:e2]

        if singleWeightList:
            weightSectionsList = np.empty(
                (len(singleWeightList), e2 - e1, imcols),
                dtype=single_data_dtype
            )
            for i, w in enumerate(singleWeightList):
                weightSectionsList[i, :, :] = w[e1:e2]
        else:
            weightSectionsList = None

        weight_mask_list = None

        if newmasks and weightSectionsList is not None:
            # Build new masks from single drizzled images.
            # Generate new pixel mask file for median step.
            # This mask will be created from the single-drizzled
            # weight image for this image.

            # The mean of the weight array will be computed and all
            # pixels with values less than 0.7 of the mean will be flagged
            # as bad in this mask. This mask will then be used when
            # creating the median image.
            # 0 means good, 1 means bad here...
            weight_mask_list = np.less(
                weightSectionsList,
                np.asarray(wht_mean)[:, None, None]
            ).astype(np.uint8)

        if 'minmed' in comb_type:  # Do MINMED
            # set up use of 'imedian'/'imean' in minmed algorithm
            fillval = comb_type.startswith('i')

            # Create the combined array object using the minmed algorithm
            result = min_med(
                imdrizSectionsList,
                weightSectionsList,
                readnoiseList,
                exposureTimeList,
                backgroundValueList,
                weight_masks=weight_mask_list,
                combine_grow=grow,
                combine_nsigma1=nsigma1,
                combine_nsigma2=nsigma2,
                fillval=fillval
            )

        else:  # DO NUMCOMBINE
            # Create the combined array object using the numcombine task
            result = numcombine.num_combine(
                imdrizSectionsList,
                masks=weight_mask_list,
                combination_type=comb_type,
                nlow=nlow,
                nhigh=nhigh,
                upper=hthresh,
                lower=lthresh
            )

        # Write out the processed image sections to the final output array:
        medianImageArray[e1+u1:e1+u2, :] = result[u1:u2, :]

    # Write out the combined image
    # use the header from the first single drizzled image in the list
    pf = _writeImage(medianImageArray, inputHeader=single_hdr)

    if virtual:
        mediandict = {}
        mediandict[medianfile] = pf
        for img in imageObjectList:
            img.saveVirtualOutputs(mediandict)
    else:
        try:
            print("Saving output median image to: '{}'".format(medianfile))
            pf.writeto(medianfile)
        except IOError:
            msg = "Problem writing file '{}'".format(medianfile)
            print(msg)
            raise IOError(msg)

    # Always close any files opened to produce median image; namely,
    # single drizzle images and singly-drizzled weight images
    #
    for img in singleDrizList:
        if not virtual:
            img.close()

    # Close all singly drizzled weight images used to create median image.
    for img in singleWeightList:
        if not virtual:
            img.close()