예제 #1
0
파일: create_median.py 프로젝트: rij/jwst
def do_median(drizzle_groups_sci, drizzle_groups_wht, **pars):
    # start by interpreting input parameters
    nlow = pars.get('nlow', 0)
    nhigh = pars.get('nhigh', 0)
    high_threshold = pars.get('hthresh', None)
    low_threshold = pars.get('lthresh', None)
    nsigma = pars.get('nsigma', '4 3')
    maskpt = pars.get('maskpt', 0.7)

    # Perform additional interpretation of some parameters
    sigmaSplit = nsigma.split()
    nsigma1 = float(sigmaSplit[0])
    nsigma2 = float(sigmaSplit[1])

    if high_threshold is not None and (high_threshold.strip() == ""
                                       or high_threshold < 0):
        high_threshold = None
    if low_threshold is not None and (low_threshold.strip() == ""
                                      or low_threshold < 0):
        low_threshold = None

    if high_threshold is not None: high_threshold = float(high_threshold)
    if low_threshold is not None: low_threshold = float(low_threshold)

    _weight_mask_list = []

    for weight_arr in drizzle_groups_wht:
        # Initialize an output mask array to ones
        # This array will be reused for every output weight image
        _weight_mask = np.zeros(weight_arr.shape, dtype=np.uint8)
        try:
            tmp_mean_value = ImageStats(weight_arr,
                                        lower=1e-8,
                                        fields="mean",
                                        nclip=0).mean
        except ValueError:
            tmp_mean_value = 0.0
        _wht_mean = tmp_mean_value * maskpt
        # 0 means good, 1 means bad here...
        np.putmask(_weight_mask, np.less(weight_arr, _wht_mean), 1)
        #_weight_mask.info()
        _weight_mask_list.append(_weight_mask)

    # Create the combined array object using the numcombine task
    result = numcombine.numCombine(drizzle_groups_sci,
                                   numarrayMaskList=_weight_mask_list,
                                   combinationType="median",
                                   nlow=nlow,
                                   nhigh=nhigh,
                                   upper=high_threshold,
                                   lower=low_threshold)
    median_array = result.combArrObj

    del _weight_mask_list

    return median_array
예제 #2
0
def do_median(drizzle_groups_sci, drizzle_groups_wht, **pars):
    # start by interpreting input parameters
    nlow = pars.get('nlow', 0)
    nhigh = pars.get('nhigh', 0)
    high_threshold = pars.get('hthresh', None)
    low_threshold = pars.get('lthresh', None)
    nsigma = pars.get('nsigma', '4 3')
    maskpt = pars.get('maskpt', 0.7)

    # Perform additional interpretation of some parameters
    sigmaSplit = nsigma.split()
    nsigma1 = float(sigmaSplit[0])
    nsigma2 = float(sigmaSplit[1])

    if high_threshold is not None and (high_threshold.strip() == "" or high_threshold < 0):
        high_threshold = None
    if low_threshold is not None and (low_threshold.strip() == "" or low_threshold < 0):
        low_threshold = None

    if high_threshold is not None: high_threshold = float(high_threshold)
    if low_threshold is not None: low_threshold = float(low_threshold)


    _weight_mask_list = []

    for weight_arr in drizzle_groups_wht:
        # Initialize an output mask array to ones
        # This array will be reused for every output weight image
        _weight_mask = np.zeros(weight_arr.shape, dtype=np.uint8)
        try:
            tmp_mean_value = ImageStats(weight_arr, lower=1e-8,
                fields="mean", nclip=0).mean
        except ValueError:
            tmp_mean_value = 0.0
        _wht_mean = tmp_mean_value * maskpt
        # 0 means good, 1 means bad here...
        np.putmask(_weight_mask, np.less(weight_arr, _wht_mean), 1)
        #_weight_mask.info()
        _weight_mask_list.append(_weight_mask)

    # Create the combined array object using the numcombine task
    result = numcombine.numCombine(drizzle_groups_sci,
                            numarrayMaskList=_weight_mask_list,
                            combinationType="median",
                            nlow=nlow,
                            nhigh=nhigh,
                            upper=high_threshold,
                            lower=low_threshold
                        )
    median_array = result.combArrObj

    del _weight_mask_list

    return median_array
예제 #3
0
    def __init__(
        self,
        imageList,  # list of input data to be combined.
        weightImageList,  # list of input data weight images to be combined.
        readnoiseList,  # list of readnoise values to use for the input images.
        exposureTimeList,  # list of exposure times to use for the input images.
        backgroundValueList,  # list of image background values to use for the input images
        weightMaskList=None,  # list of imput data weight masks to use for pixel rejection.
        combine_grow=1,  # Radius (pixels) for neighbor rejection
        combine_nsigma1=4,  # Significance for accepting minimum instead of median
        combine_nsigma2=3,  # Significance for accepting minimum instead of median
        fillval=False  # Turn on use of imedian/imean
    ):

        warnings.warn(
            "The 'minmed' class is deprecated and may be removed"
            " in a future version. Use 'min_med()' instead.",
            DeprecationWarning)

        # Define input variables
        self._imageList = imageList
        self._weightImageList = weightImageList
        self._weightMaskList = weightMaskList
        self._exposureTimeList = exposureTimeList
        self._readnoiseList = readnoiseList
        self._backgroundValueList = backgroundValueList
        self._numberOfImages = len(self._imageList)
        self._combine_grow = combine_grow
        self._combine_nsigma1 = combine_nsigma1
        self._combine_nsigma2 = combine_nsigma2

        if fillval:
            combtype_mean = 'imean'
            combtype_median = 'imedian'
        else:
            combtype_mean = 'mean'
            combtype_median = 'median'

        # Create a different median image based upon the number of images in the input list.
        median_file = np.zeros(self._imageList[0].shape,
                               dtype=self._imageList[0].dtype)
        if (self._numberOfImages == 2):
            tmp = numCombine(self._imageList,
                             numarrayMaskList=self._weightMaskList,
                             combinationType=combtype_mean,
                             nlow=0,
                             nhigh=0,
                             nkeep=1,
                             upper=None,
                             lower=None)
            median_file = tmp.combArrObj
        else:
            # The value of NHIGH=1 will cause problems when there is only 1 valid
            # unmasked input image for that pixel due to a difference in behavior
            # between 'numcombine' and 'iraf.imcombine'.
            # This value may need to be adjusted on the fly based on the number of
            # inputs and the number of masked values/pixel.
            #
            tmp = numCombine(self._imageList,
                             numarrayMaskList=self._weightMaskList,
                             combinationType=combtype_median,
                             nlow=0,
                             nhigh=1,
                             nkeep=1,
                             upper=None,
                             lower=None)
            median_file = tmp.combArrObj

            if self._weightMaskList in [None, []]:
                self._weightMaskList = [
                    np.zeros(self._imageList[0].shape,
                             dtype=self._imageList[0].dtype)
                ] * len(self._imageList)
            # The following section of code will address the problem caused by having
            # a value of nhigh = 1.  This will behave in a way similar to the way the
            # IRAF task IMCOMBINE behaves.  In order to accomplish this, the following
            # procedure will be followed:
            # 1) The input masks will be summed.
            # 2) The science data will be summed.
            # 3) In the locations of the summed mask where the sum is 1 less than the
            #    the total number of images, the value of that location in the summed
            #    sceince image will be used to replace the existing value in the
            #    existing median_file.
            #
            # This procuedure is being used to prevent too much data from being thrown
            # out of the image.  Take for example the case of 3 input images.  In two
            # of the images the pixel locations have been masked out.  Now, if nhigh
            # is applied there will be no value to use for that position.  However,
            # if this new procedure is used that value in the resulting images will
            # be the value that was rejected by the nhigh rejection step.
            #

            # We need to make certain that "bad" pixels in the sci data are set to 0.  That way,
            # when the sci images are summed, the value of the sum will only come from the "good"
            # pixels.
            tmpList = []
            for image in range(len(self._imageList)):
                tmp = np.where(self._weightMaskList[image] == 1, 0,
                               self._imageList[image])
                tmpList.append(tmp)

            # Sum the mask files
            maskSum = self._sumImages(self._weightMaskList)
            # Sum the science images
            sciSum = self._sumImages(tmpList)
            del (tmpList)
            # Use the summed sci image values in locations where the maskSum indicates
            # that there is only 1 good pixel to use.  The value will be used in the
            # median_file image
            median_file = np.where(maskSum == self._numberOfImages - 1, sciSum,
                                   median_file)

        if self._weightMaskList in [None, []]:
            self._weightMaskList = [
                np.zeros(self._imageList[0].shape,
                         dtype=self._imageList[0].dtype)
            ] * len(self._imageList)
        # Sum the weightMaskList elements
        maskSum = self._sumImages(self._weightMaskList)

        # Create the minimum image from the stack of input images.
        # Find the maximum pixel value for the image stack.
        maxValue = -1e+9
        for image in self._imageList:
            newMax = image.max()
            if (newMax > maxValue):
                maxValue = newMax

        # For each image, set pixels masked as "bad" to the "super-maximum" value.
        for image in range(len(self._imageList)):
            self._imageList[image] = np.where(self._weightMaskList[image] == 1,
                                              maxValue + 1,
                                              self._imageList[image])

        # Call numcombine throwing out the highest N - 1 pixels.
        tmp = numCombine(self._imageList,
                         numarrayMaskList=None,
                         combinationType=combtype_median,
                         nlow=0,
                         nhigh=self._numberOfImages - 1,
                         nkeep=1,
                         upper=None,
                         lower=None)
        minimum_file = tmp.combArrObj
        # Reset any pixl at maxValue + 1 to 0.
        minimum_file = np.where(maskSum == self._numberOfImages, 0,
                                minimum_file)

        # Scale the weight images by the background values and add them to the bk
        backgroundFileList = []
        for image in range(len(self._weightImageList)):
            tmp = self._weightImageList[image] * (
                self._backgroundValueList[image] /
                (self._exposureTimeList[image]))
            backgroundFileList.append(tmp)

        # Create an image of the total effective background (in DN) per pixel:
        # (which is the sum of all the background-scaled weight files)
        #
        bkgd_file = self._sumImages(backgroundFileList)
        del (backgroundFileList)

        #
        # Scale the weight mask images by the square of the readnoise values
        #
        readnoiseFileList = []
        for image in range(len(self._weightMaskList)):
            tmp = (np.logical_not(self._weightMaskList[image]) *
                   (self._readnoiseList[image] * self._readnoiseList[image]))
            readnoiseFileList.append(tmp)

        # Create an image of the total readnoise**2 per pixel:
        # (which is the sum of all the input readnoise values)
        #
        readnoise_file = self._sumImages(readnoiseFileList)
        del (readnoiseFileList)

        # Create an image of the total effective exposure time per pixel:
        # (which is simply the sum of all the drizzle output weight files)
        #
        weight_file = self._sumImages(self._weightImageList)

        # Scale up both the median and minimum arrays by the total effective exposure time
        # per pixel.
        #
        minimum_file_weighted = minimum_file * weight_file
        median_file_weighted = median_file * weight_file
        del (weight_file)

        # Calculate the 1-sigma r.m.s.:
        #   variance = median_electrons + bkgd_electrons + readnoise**2
        #   rms = sqrt(variance)
        #   This image has units of electrons.
        #
        # make this the abs value so that negative numbers dont throw an exception?
        rms_file2 = np.fmax(median_file_weighted + bkgd_file + readnoise_file,
                            np.zeros_like(median_file_weighted))
        rms_file = np.sqrt(rms_file2)

        del bkgd_file
        del readnoise_file
        # For the median array, calculate the n-sigma lower threshold to the array
        # and incorporate that into the pixel values.
        #
        median_rms_file = median_file_weighted - (rms_file *
                                                  self._combine_nsigma1)

        if self._combine_grow != 0:
            #
            # Do a more sophisticated rejection: For all cases where the minimum pixel will
            # be accepted instead of the median, set a lower threshold for that pixel and the
            # ones around it (ie become less conservative in rejecting the median). This is
            # because in cases of triple-incidence cosmic rays, quite often the low-lying
            # outliers of the CRs can influence the median for the initial relatively high
            # value of sigma, so a lower threshold must be used to mnake sure that the minimum
            # is selected.
            #
            # This is done as follows:
            # 1) make an image which is zero everywhere except where the minimum will be accepted
            # 2) box-car smooth this image, to make these regions grow.
            # 3) In the file "median_rms_file_electrons", replace these pixels
            #     by median - combine_nsigma2 * rms
            #
            # Then use this image in the final replacement, in the same way as for the
            # case where this option is not selected.

            minimum_flag_file = np.where(
                np.less(minimum_file_weighted, median_rms_file), 1, 0)

            # The box size value must be an integer.  This is not a problem since __combine_grow should always
            # be an integer type.  The combine_grow column in the MDRIZTAB should also be an integer type.
            boxsize = int(2 * self._combine_grow + 1)
            boxshape = (boxsize, boxsize)
            minimum_grow_file = np.zeros(self._imageList[0].shape,
                                         dtype=self._imageList[0].dtype)

            # If the boxcar convolution has failed it is potentially for two reasons:
            #   1) The kernel size for the boxcar is bigger than the actual image.
            #   2) The grow parameter was specified with a value < 0.  This would result
            #      in an illegal boxshape kernel.  The dimensions of the kernel box *MUST*
            #      be integer and greater than zero.
            #
            #   If the boxcar convolution has failed, try to give a meaningfull explanation
            #   as to why based upon the conditionals described above.

            if (boxsize <= 0):
                errormsg1 = "############################################################\n"
                errormsg1 += "# The boxcar convolution in minmed has failed.  The 'grow' #\n"
                errormsg1 += "# parameter must be greater than or equal to zero. You     #\n"
                errormsg1 += "# specified an input value for the 'grow' parameter of:    #\n"
                errormsg1 += "        combine_grow: " + str(
                    self._combine_grow) + '\n'
                errormsg1 += "############################################################\n"
                raise ValueError(errormsg1)
            if (boxsize > self._imageList[0].shape[0]):
                errormsg2 = "############################################################\n"
                errormsg2 += "# The boxcar convolution in minmed has failed.  The 'grow' #\n"
                errormsg2 += "# parameter specified has resulted in a boxcar kernel that #\n"
                errormsg2 += "# has dimensions larger than the actual image.  You        #\n"
                errormsg2 += "# specified an input value for the 'grow' parameter of:    #\n"
                errormsg2 += "        combine_grow: " + str(
                    self._combine_grow) + '\n'
                errormsg2 += "############################################################\n"
                print(self._imageList[0].shape)
                raise ValueError(errormsg2)

            # Attempt the boxcar convolution using the boxshape based upon the user input value of "grow"
            boxcar(minimum_flag_file,
                   boxshape,
                   output=minimum_grow_file,
                   mode='constant',
                   cval=0)

            del (minimum_flag_file)

            temp1 = (median_file_weighted - (rms_file * self._combine_nsigma1))
            temp2 = (median_file_weighted - (rms_file * self._combine_nsigma2))
            median_rms2_file = np.where(np.equal(minimum_grow_file, 0), temp1,
                                        temp2)
            del (temp1)
            del (temp2)
            del (rms_file)
            del (minimum_grow_file)

            # Finally decide whether to use the minimim or the median (in counts/s),
            # based on whether the median is more than 3 sigma above the minimum.
            #
            self.combArrObj = np.where(
                np.less(minimum_file_weighted, median_rms2_file), minimum_file,
                median_file)

        else:
            # Finally decide whether to use the minimim or the median (in counts/s),
            # based on whether the median is more than 3 sigma above the minimum.
            #
            self.combArrObj = np.where(
                np.less(minimum_file_weighted, median_rms_file), minimum_file,
                median_file)

        # Set fill regions to a pixel value of 0.
        self.combArrObj = np.where(maskSum == self._numberOfImages, 0,
                                   self.combArrObj)
예제 #4
0
파일: dither.py 프로젝트: sosey/hstaxe
    def run(self):
        """
        Run the median combine step

        The code was either directly stolen from the corresponding
        pydrizzle version or done after this version. Necessary
        adjustments to the slitless data were applied.
        """
        sci_data = []

        for one_image in self.input_data['sci_imgs']:
            if os.access(one_image, os.F_OK):
                in_fits = fits.open(one_image, 'readonly')
                sci_data.append(in_fits[0].data)
                in_fits.close()

        wht_data = []
        for one_image in self.input_data['wht_imgs']:
            if os.access(one_image, os.F_OK):
                in_fits = fits.open(one_image, 'readonly')
                wht_data.append(in_fits[0].data)
                in_fits.close()
            else:
                _log.info("{0:s} not found/created by drizzle"
                      "...skipping it.".format(one_image))

        if len(sci_data) != len(wht_data):
            _log.info("The number of single_sci images created by "
                  "drizzle does not match the number of single_wht"
                  " files created!")
            raise aXeError("drizzle error")

        weight_mask_list = []

        # added the except so that if the image area contains only
        # zeros then the zero value is returned which is better for later
        # processing
        # we dont understand why the original lower=1e-8 value was
        # supplied unless it was for the case of spectral in the normal
        # field of view see #1110
        for wht_arr in wht_data:
            try:
                tmp_mean_value = self.combine_maskpt * ImageStats(wht_arr,lower=1e-8,lsig=None,usig=None,fields="mean",nclip=0).mean
            except (ValueError, AttributeError):
                tmp_mean_value = 0.
                _log.info("tmp_mean_value set to 0 because no good "
                      "pixels found; {0:s}".format(self.ext_names["MEF"]))
            except:
                tmp_mean_value = 0.
                _log.info("tmp_mean_value set to 0; possible uncaught "
                      "exception in dither.py; {0:s}"
                      .format(self.ext_names["MEF"]))

            weight_mask = np.zeros(wht_arr.shape, dtype=np.uint8)
            np.putmask(weight_mask, np.less(wht_arr, tmp_mean_value), 1)

            weight_mask_list.append(weight_mask)

        if len(sci_data) < 2:
            _log.info('\nNumber of images to flatten: %i!' % len(sci_data))
            _log.info('Set combine type to "minimum"!')
            self.combine_type = 'minimum'

        if (self.combine_type == "minmed"):
            # Create the combined array object using the minmed algorithm
            result = minmed(sci_data,  # list of input data to be combined.
                            wht_data,# list of input data weight images to be combined.
                            self.input_data['rdn_vals'],  # list of readnoise values to use for the input images.
                            self.input_data['exp_vals'],  # list of exposure times to use for the input images.
                            self.input_data['sky_vals'],  # list of image background values to use for the input images
                            weightMaskList = weight_mask_list,  # list of imput data weight masks to use for pixel rejection.
                            combine_grow = self.combine_grow,  # Radius (pixels) for neighbor rejection
                            combine_nsigma1 = self.combine_nsigma1,  # Significance for accepting minimum instead of median
                            combine_nsigma2 = self.combine_nsigma2  # Significance for accepting minimum instead of median
                            )
        else:
            # _log.info 'going to other', combine_type
            # Create the combined array object using the numcombine task
            result = numCombine(sci_data,
                                numarrayMaskList=weight_mask_list,
                                combinationType=self.combine_type,
                                nlow=self.combine_nlow,
                                nhigh=self.combine_nhigh,
                                upper=self.combine_hthresh,
                                lower=self.combine_lthresh
                                )

        # _log.info result.combArrObj
        hdu = fits.PrimaryHDU(result.combArrObj)
        hdulist = fits.HDUList([hdu])
        hdulist[0].header['EXPTIME'] = (self.input_data['exp_tot'],
                                        'total exposure time')
        hdulist.writeto(self.median_image)

        # delete the various arrays
        for one_item in sci_data:
            del one_item
        del sci_data
        for one_item in wht_data:
            del one_item
        del wht_data
        for one_item in weight_mask_list:
            del one_item
        del weight_mask_list
예제 #5
0
    def __init__(self,
            imageList,              # list of input data to be combined.
            weightImageList,        # list of input data weight images to be combined.
            readnoiseList,          # list of readnoise values to use for the input images.
            exposureTimeList,       # list of exposure times to use for the input images.
            backgroundValueList,    # list of image background values to use for the input images
            weightMaskList= None,   # list of imput data weight masks to use for pixel rejection.
            combine_grow = 1,       # Radius (pixels) for neighbor rejection
            combine_nsigma1 = 4,    # Significance for accepting minimum instead of median
            combine_nsigma2 = 3,     # Significance for accepting minimum instead of median
            fillval = False         # Turn on use of imedian/imean

            ):

        warnings.warn("The 'minmed' class is deprecated and may be removed"
                      " in a future version. Use 'min_med()' instead.",
                      DeprecationWarning)

        # Define input variables
        self._imageList = imageList
        self._weightImageList = weightImageList
        self._weightMaskList = weightMaskList
        self._exposureTimeList = exposureTimeList
        self._readnoiseList = readnoiseList
        self._backgroundValueList = backgroundValueList
        self._numberOfImages = len( self._imageList)
        self._combine_grow = combine_grow
        self._combine_nsigma1 = combine_nsigma1
        self._combine_nsigma2 = combine_nsigma2

        if fillval:
            combtype_mean = 'imean'
            combtype_median = 'imedian'
        else:
            combtype_mean = 'mean'
            combtype_median = 'median'


        # Create a different median image based upon the number of images in the input list.
        median_file = np.zeros(self._imageList[0].shape,dtype=self._imageList[0].dtype)
        if (self._numberOfImages == 2):
            tmp = numCombine(self._imageList,numarrayMaskList=self._weightMaskList,
                                 combinationType=combtype_mean,nlow=0,nhigh=0,
                                 nkeep=1,upper=None,lower=None)
            median_file = tmp.combArrObj
        else:
            # The value of NHIGH=1 will cause problems when there is only 1 valid
            # unmasked input image for that pixel due to a difference in behavior
            # between 'numcombine' and 'iraf.imcombine'.
            # This value may need to be adjusted on the fly based on the number of
            # inputs and the number of masked values/pixel.
            #
            tmp = numCombine(self._imageList,numarrayMaskList=self._weightMaskList,
                                 combinationType=combtype_median,nlow=0,nhigh=1,
                                 nkeep=1,upper=None,lower=None)
            median_file = tmp.combArrObj

            if self._weightMaskList in [None,[]]:
                self._weightMaskList = [np.zeros(self._imageList[0].shape,dtype=self._imageList[0].dtype)]*len(self._imageList)
            # The following section of code will address the problem caused by having
            # a value of nhigh = 1.  This will behave in a way similar to the way the
            # IRAF task IMCOMBINE behaves.  In order to accomplish this, the following
            # procedure will be followed:
            # 1) The input masks will be summed.
            # 2) The science data will be summed.
            # 3) In the locations of the summed mask where the sum is 1 less than the
            #    the total number of images, the value of that location in the summed
            #    sceince image will be used to replace the existing value in the
            #    existing median_file.
            #
            # This procuedure is being used to prevent too much data from being thrown
            # out of the image.  Take for example the case of 3 input images.  In two
            # of the images the pixel locations have been masked out.  Now, if nhigh
            # is applied there will be no value to use for that position.  However,
            # if this new procedure is used that value in the resulting images will
            # be the value that was rejected by the nhigh rejection step.
            #

            # We need to make certain that "bad" pixels in the sci data are set to 0.  That way,
            # when the sci images are summed, the value of the sum will only come from the "good"
            # pixels.
            tmpList = []
            for image in range(len(self._imageList)):
                tmp =np.where(self._weightMaskList[image] == 1, 0, self._imageList[image])
                tmpList.append(tmp)

            # Sum the mask files
            maskSum = self._sumImages(self._weightMaskList)
            # Sum the science images
            sciSum = self._sumImages(tmpList)
            del(tmpList)
            # Use the summed sci image values in locations where the maskSum indicates
            # that there is only 1 good pixel to use.  The value will be used in the
            # median_file image
            median_file = np.where(maskSum == self._numberOfImages-1,sciSum,median_file)

        if self._weightMaskList in [None,[]]:
            self._weightMaskList = [np.zeros(self._imageList[0].shape,dtype=self._imageList[0].dtype)]*len(self._imageList)
        # Sum the weightMaskList elements
        maskSum = self._sumImages(self._weightMaskList)

        # Create the minimum image from the stack of input images.
        # Find the maximum pixel value for the image stack.
        maxValue = -1e+9
        for image in self._imageList:
            newMax = image.max()
            if (newMax > maxValue):
                maxValue = newMax

        # For each image, set pixels masked as "bad" to the "super-maximum" value.
        for image in range(len(self._imageList)):
            self._imageList[image] = np.where(self._weightMaskList[image] == 1,maxValue+1,self._imageList[image])

        # Call numcombine throwing out the highest N - 1 pixels.
        tmp = numCombine(self._imageList,numarrayMaskList=None,
                                 combinationType=combtype_median,nlow=0,nhigh=self._numberOfImages-1,
                                 nkeep=1,upper=None,lower=None)
        minimum_file = tmp.combArrObj
        # Reset any pixl at maxValue + 1 to 0.
        minimum_file = np.where(maskSum == self._numberOfImages, 0, minimum_file)

        # Scale the weight images by the background values and add them to the bk
        backgroundFileList = []
        for image in range(len(self._weightImageList)):
            tmp = self._weightImageList[image] * (self._backgroundValueList[image]/(self._exposureTimeList[image]))
            backgroundFileList.append(tmp)

        # Create an image of the total effective background (in DN) per pixel:
        # (which is the sum of all the background-scaled weight files)
        #
        bkgd_file = self._sumImages(backgroundFileList)
        del(backgroundFileList)

        #
        # Scale the weight mask images by the square of the readnoise values
        #
        readnoiseFileList = []
        for image in range(len(self._weightMaskList)):
            tmp = (np.logical_not(self._weightMaskList[image]) *
                   (self._readnoiseList[image] * self._readnoiseList[image]))
            readnoiseFileList.append(tmp)

        # Create an image of the total readnoise**2 per pixel:
        # (which is the sum of all the input readnoise values)
        #
        readnoise_file = self._sumImages(readnoiseFileList)
        del(readnoiseFileList)

        # Create an image of the total effective exposure time per pixel:
        # (which is simply the sum of all the drizzle output weight files)
        #
        weight_file = self._sumImages(self._weightImageList)


        # Scale up both the median and minimum arrays by the total effective exposure time
        # per pixel.
        #
        minimum_file_weighted = minimum_file * weight_file
        median_file_weighted = median_file * weight_file
        del(weight_file)

        # Calculate the 1-sigma r.m.s.:
        #   variance = median_electrons + bkgd_electrons + readnoise**2
        #   rms = sqrt(variance)
        #   This image has units of electrons.
        #
        # make this the abs value so that negative numbers dont throw an exception?
        rms_file2 = np.fmax(
            median_file_weighted + bkgd_file + readnoise_file,
            np.zeros_like(median_file_weighted)
        )
        rms_file = np.sqrt(rms_file2)

        del bkgd_file
        del readnoise_file
        # For the median array, calculate the n-sigma lower threshold to the array
        # and incorporate that into the pixel values.
        #
        median_rms_file = median_file_weighted - (rms_file * self._combine_nsigma1)

        if self._combine_grow != 0:
            #
            # Do a more sophisticated rejection: For all cases where the minimum pixel will
            # be accepted instead of the median, set a lower threshold for that pixel and the
            # ones around it (ie become less conservative in rejecting the median). This is
            # because in cases of triple-incidence cosmic rays, quite often the low-lying
            # outliers of the CRs can influence the median for the initial relatively high
            # value of sigma, so a lower threshold must be used to mnake sure that the minimum
            # is selected.
            #
            # This is done as follows:
            # 1) make an image which is zero everywhere except where the minimum will be accepted
            # 2) box-car smooth this image, to make these regions grow.
            # 3) In the file "median_rms_file_electrons", replace these pixels
            #     by median - combine_nsigma2 * rms
            #
            # Then use this image in the final replacement, in the same way as for the
            # case where this option is not selected.

            minimum_flag_file = np.where(np.less(minimum_file_weighted,median_rms_file), 1, 0)

            # The box size value must be an integer.  This is not a problem since __combine_grow should always
            # be an integer type.  The combine_grow column in the MDRIZTAB should also be an integer type.
            boxsize = int(2 * self._combine_grow + 1)
            boxshape = (boxsize, boxsize)
            minimum_grow_file = np.zeros(self._imageList[0].shape,dtype=self._imageList[0].dtype)


            # If the boxcar convolution has failed it is potentially for two reasons:
            #   1) The kernel size for the boxcar is bigger than the actual image.
            #   2) The grow parameter was specified with a value < 0.  This would result
            #      in an illegal boxshape kernel.  The dimensions of the kernel box *MUST*
            #      be integer and greater than zero.
            #
            #   If the boxcar convolution has failed, try to give a meaningfull explanation
            #   as to why based upon the conditionals described above.

            if (boxsize <= 0):
                errormsg1 =  "############################################################\n"
                errormsg1 += "# The boxcar convolution in minmed has failed.  The 'grow' #\n"
                errormsg1 += "# parameter must be greater than or equal to zero. You     #\n"
                errormsg1 += "# specified an input value for the 'grow' parameter of:    #\n"
                errormsg1 += "        combine_grow: " + str(self._combine_grow)+'\n'
                errormsg1 += "############################################################\n"
                raise ValueError(errormsg1)
            if (boxsize > self._imageList[0].shape[0]):
                errormsg2 =  "############################################################\n"
                errormsg2 += "# The boxcar convolution in minmed has failed.  The 'grow' #\n"
                errormsg2 += "# parameter specified has resulted in a boxcar kernel that #\n"
                errormsg2 += "# has dimensions larger than the actual image.  You        #\n"
                errormsg2 += "# specified an input value for the 'grow' parameter of:    #\n"
                errormsg2 += "        combine_grow: " +str(self._combine_grow)+'\n'
                errormsg2 += "############################################################\n"
                print(self._imageList[0].shape)
                raise ValueError(errormsg2)

            # Attempt the boxcar convolution using the boxshape based upon the user input value of "grow"
            boxcar(minimum_flag_file, boxshape, output=minimum_grow_file,
                   mode='constant', cval=0)

            del(minimum_flag_file)

            temp1 = (median_file_weighted - (rms_file * self._combine_nsigma1))
            temp2 = (median_file_weighted - (rms_file * self._combine_nsigma2))
            median_rms2_file = np.where(np.equal(minimum_grow_file, 0), temp1, temp2)
            del(temp1)
            del(temp2)
            del(rms_file)
            del(minimum_grow_file)

            # Finally decide whether to use the minimim or the median (in counts/s),
            # based on whether the median is more than 3 sigma above the minimum.
            #
            self.combArrObj = np.where(
                np.less(minimum_file_weighted, median_rms2_file),
                minimum_file,
                median_file
            )

        else:
            # Finally decide whether to use the minimim or the median (in counts/s),
            # based on whether the median is more than 3 sigma above the minimum.
            #
            self.combArrObj = np.where(
                np.less(minimum_file_weighted, median_rms_file),
                minimum_file,
                median_file
            )

        # Set fill regions to a pixel value of 0.
        self.combArrObj = np.where(maskSum == self._numberOfImages, 0, self.combArrObj)
예제 #6
0
def _median(imageObjectList, paramDict):
    """Create a median image from the list of image Objects
       that has been given.
    """

    newmasks = paramDict['median_newmasks']
    comb_type = paramDict['combine_type']
    nlow = paramDict['combine_nlow']
    nhigh = paramDict['combine_nhigh']
    grow = paramDict['combine_grow']
    maskpt = paramDict['combine_maskpt']
    proc_units = paramDict['proc_unit']
    compress = paramDict['compress']
    bufsizeMb = paramDict['combine_bufsize']

    sigma = paramDict["combine_nsigma"]
    sigmaSplit = sigma.split()
    nsigma1 = float(sigmaSplit[0])
    nsigma2 = float(sigmaSplit[1])

    #print "Checking parameters:"
    #print comb_type,nlow,nhigh,grow,maskpt,nsigma1,nsigma2
    if (paramDict['combine_lthresh'] == None):
        lthresh = None
    else:
        lthresh = float(paramDict['combine_lthresh'])
    if (paramDict['combine_hthresh'] == None):
        hthresh = None
    else:
        hthresh = float(paramDict['combine_hthresh'])

    #the name of the output median file isdefined in the output wcs object
    #and stuck in the image.outputValues["outMedian"] dict of every imageObject
    medianfile = imageObjectList[0].outputNames["outMedian"]
    """ Builds combined array from single drizzled images."""
    # Start by removing any previous products...
    if (os.access(medianfile, os.F_OK)):
        os.remove(medianfile)

    # Define lists for instrument specific parameters, these should be in the image objects
    # need to be passed to the minmed routine
    readnoiseList = []
    exposureTimeList = []
    backgroundValueList = []  #list of  MDRIZSKY *platescale values
    singleDrizList = []  #these are the input images
    singleWeightList = []  #pointers to the data arrays
    #skylist=[] #the list of platescale values for the images
    _wht_mean = []  # Compute the mean value of each wht image

    _single_hdr = None
    virtual = None

    #for each image object
    for image in imageObjectList:
        if virtual is None:
            virtual = image.inmemory

        det_gain = image.getGain(1)
        img_exptime = image._image['sci', 1]._exptime
        native_units = image.native_units
        if lthresh is not None:
            if proc_units.lower() == 'native':
                if native_units.lower() == "counts":
                    lthresh = lthresh * det_gain
                    if native_units.lower() == "counts/s":
                        lthresh = lthresh * img_exptime
        if hthresh is not None:
            if proc_units.lower() == 'native':
                if native_units.lower() == "counts":
                    hthresh = hthresh * det_gain
                    if native_units.lower() == "counts/s":
                        hthresh = hthresh * img_exptime

        singleDriz = image.getOutputName("outSingle")
        singleDriz_name = image.outputNames['outSingle']
        singleWeight = image.getOutputName("outSWeight")
        singleWeight_name = image.outputNames['outSWeight']
        #singleDriz=image.outputNames["outSingle"] #all chips are drizzled to a single output image
        #singleWeight=image.outputNames["outSWeight"]

        # If compression was used, reference ext=1 as CompImageHDU only writes
        # out MEF files, not simple FITS.
        if compress:
            wcs_ext = '[1]'
            wcs_extnum = 1
        else:
            wcs_ext = '[0]'
            wcs_extnum = 0
        if not virtual:
            if isinstance(singleDriz, str):
                iter_singleDriz = singleDriz + wcs_ext
                iter_singleWeight = singleWeight + wcs_ext
            else:
                iter_singleDriz = singleDriz[wcs_extnum]
                iter_singleWeight = singleWeight[wcs_extnum]
        else:
            iter_singleDriz = singleDriz_name + wcs_ext
            iter_singleWeight = singleWeight_name + wcs_ext

        # read in WCS from first single drizzle image to use as WCS for median image
        if _single_hdr is None:
            if virtual:
                _single_hdr = singleDriz[wcs_extnum].header
            else:
                _single_hdr = fits.getheader(singleDriz_name, ext=wcs_extnum)

        _singleImage = iterfile.IterFitsFile(iter_singleDriz)
        if virtual:
            _singleImage.handle = singleDriz
            _singleImage.inmemory = True

        singleDrizList.append(_singleImage)  #add to an array for bookkeeping

        # If it exists, extract the corresponding weight images
        if (not virtual
                and os.access(singleWeight, os.F_OK)) or (virtual
                                                          and singleWeight):
            _weight_file = iterfile.IterFitsFile(iter_singleWeight)
            if virtual:
                _weight_file.handle = singleWeight
                _weight_file.inmemory = True

            singleWeightList.append(_weight_file)
            try:
                tmp_mean_value = ImageStats(_weight_file.data,
                                            lower=1e-8,
                                            fields="mean",
                                            nclip=0).mean
            except ValueError:
                tmp_mean_value = 0.0
            _wht_mean.append(tmp_mean_value * maskpt)

            # Extract instrument specific parameters and place in lists

            # If an image has zero exposure time we will
            # redefine that value as '1'.  Although this will cause inaccurate scaling
            # of the data to occur in the 'minmed' combination algorith, this is a
            # necessary evil since it avoids divide by zero exceptions.  It is more
            # important that the divide by zero exceptions not cause Multidrizzle to
            # crash in the pipeline than it is to raise an exception for this obviously
            # bad data even though this is not the type of data you would wish to process
            # with Multidrizzle.
            #
            # Get the exposure time from the InputImage object
            #
            # MRD 19-May-2011
            # Changed exposureTimeList to take exposure time from img_exptime
            # variable instead of hte image._exptime attribute, since
            # image._exptime was just giving 1.
            #
            exposureTimeList.append(img_exptime)

            # Use only "commanded" chips to extract subtractedSky and rdnoise:
            rdnoise = 0.0
            nchips = 0
            bsky = None  # minimum sky across **used** chips

            for chip in image.returnAllChips(extname=image.scienceExt):
                # compute sky value as sky/pixel using the single_drz pixel scale
                if bsky is None or bsky > chip.subtractedSky:
                    bsky = chip.subtractedSky

                # Extract the readnoise value for the chip
                rdnoise += (chip._rdnoise)**2
                nchips += 1

            if bsky is None:
                bsky = 0.0

            if nchips > 0:
                rdnoise = math.sqrt(rdnoise / nchips)

            backgroundValueList.append(bsky)
            readnoiseList.append(rdnoise)

            ## compute sky value as sky/pixel using the single_drz pixel scale
            #bsky = image._image[image.scienceExt,1].subtractedSky# * (image.outputValues['scale']**2)
            #backgroundValueList.append(bsky)

            ## Extract the readnoise value for the chip
            #sci_chip = image._image[image.scienceExt,1]
            #readnoiseList.append(sci_chip._rdnoise) #verify this is calculated correctly in the image object

            print("reference sky value for image ", image._filename, " is ",
                  backgroundValueList[-1])
        #
        # END Loop over input image list
        #

    # create an array for the median output image, use the size of the first image in the list
    medianImageArray = np.zeros(singleDrizList[0].shape,
                                dtype=singleDrizList[0].type())

    if (comb_type.lower() == "minmed") and not newmasks:
        # Issue a warning if minmed is being run with newmasks turned off.
        print(
            '\nWARNING: Creating median image without the application of bad pixel masks!\n'
        )

    # create the master list to be used by the image iterator
    masterList = []
    masterList.extend(singleDrizList)
    masterList.extend(singleWeightList)

    print('\n')

    # Specify the location of the drz image sections
    startDrz = 0
    endDrz = len(singleDrizList) + startDrz

    # Specify the location of the wht image sections
    startWht = len(singleDrizList) + startDrz
    endWht = startWht + len(singleWeightList)
    _weight_mask_list = None

    # Fire up the image iterator
    #
    # The overlap value needs to be set to 2*grow in order to
    # avoid edge effects when scrolling down the image, and to
    # insure that the last section returned from the iterator
    # has enough row to span the kernel used in the boxcar method
    # within minmed.
    _overlap = 2 * int(grow)

    #Start by computing the buffer size for the iterator
    _imgarr = masterList[0].data
    _bufsize = nimageiter.BUFSIZE
    if bufsizeMb is not None:
        _bufsize *= bufsizeMb
    _imgrows = _imgarr.shape[0]
    _nrows = nimageiter.computeBuffRows(_imgarr)
    #        _overlaprows = _nrows - (_overlap+1)
    #        _niter = int(_imgrows/_nrows)
    #        _niter = 1 + int( (_imgrows - _overlaprows)/_nrows)
    niter = nimageiter.computeNumberBuff(_imgrows, _nrows, _overlap)
    #computeNumberBuff actually returns (niter,buffrows)
    _niter = niter[0]
    _nrows = niter[1]
    _lastrows = _imgrows - (_niter * (_nrows - _overlap))

    # check to see if this buffer size will leave enough rows for
    # the section returned on the last iteration
    if _lastrows < _overlap + 1:
        _delta_rows = (_overlap + 1 - _lastrows) // _niter
        if _delta_rows < 1 and _delta_rows >= 0: _delta_rows = 1
        _bufsize += (_imgarr.shape[1] * _imgarr.itemsize) * _delta_rows

    if not virtual:
        masterList[0].close()
    del _imgarr

    for imageSectionsList, prange in nimageiter.FileIter(masterList,
                                                         overlap=_overlap,
                                                         bufsize=_bufsize):

        if newmasks:
            """ Build new masks from single drizzled images. """
            _weight_mask_list = []
            listIndex = 0
            for _weight_arr in imageSectionsList[startWht:endWht]:
                # Initialize an output mask array to ones
                # This array will be reused for every output weight image
                _weight_mask = np.zeros(_weight_arr.shape, dtype=np.uint8)
                """ Generate new pixel mask file for median step.
                This mask will be created from the single-drizzled
                weight image for this image.

                The mean of the weight array will be computed and all
                pixels with values less than 0.7 of the mean will be flagged
                as bad in this mask.  This mask will then be used when
                creating the median image.
                """
                # Compute image statistics
                _mean = _wht_mean[listIndex]

                # 0 means good, 1 means bad here...
                np.putmask(_weight_mask, np.less(_weight_arr, _mean), 1)
                #_weight_mask.info()
                _weight_mask_list.append(_weight_mask)
                listIndex += 1

        # Do MINMED
        if ("minmed" in comb_type.lower()):
            if comb_type.lower()[0] == 'i':
                # set up use of 'imedian'/'imean' in minmed algorithm
                fillval = True
            else:
                fillval = False

            if (_weight_mask_list in [None, []]):
                _weight_mask_list = None

            # Create the combined array object using the minmed algorithm
            result = minmed(
                imageSectionsList[
                    startDrz:endDrz],  # list of input data to be combined.
                imageSectionsList[
                    startWht:
                    endWht],  # list of input data weight images to be combined.
                readnoiseList,  # list of readnoise values to use for the input images.
                exposureTimeList,  # list of exposure times to use for the input images.
                backgroundValueList,  # list of image background values to use for the input images
                weightMaskList=
                _weight_mask_list,  # list of imput data weight masks to use for pixel rejection.
                combine_grow=grow,  # Radius (pixels) for neighbor rejection
                combine_nsigma1=
                nsigma1,  # Significance for accepting minimum instead of median
                combine_nsigma2=
                nsigma2,  # Significance for accepting minimum instead of median
                fillval=fillval  # turn on use of imedian/imean
            )


#              medianOutput[prange[0]:prange[1],:] = result.out_file1
#             minOutput[prange[0]:prange[1],:] = result.out_file2

# DO NUMCOMBINE
        else:
            # Create the combined array object using the numcombine task
            result = numcombine.numCombine(imageSectionsList[startDrz:endDrz],
                                           numarrayMaskList=_weight_mask_list,
                                           combinationType=comb_type.lower(),
                                           nlow=nlow,
                                           nhigh=nhigh,
                                           upper=hthresh,
                                           lower=lthresh)

        # We need to account for any specified overlap when writing out
        # the processed image sections to the final output array.
        if prange[1] != _imgrows:
            medianImageArray[prange[0]:prange[1] -
                             _overlap, :] = result.combArrObj[:-_overlap, :]
        else:
            medianImageArray[prange[0]:prange[1], :] = result.combArrObj

    del result
    del _weight_mask_list
    _weight_mask_list = None

    # Write out the combined image
    # use the header from the first single drizzled image in the list
    #header=fits.getheader(imageObjectList[0].outputNames["outSingle"])
    _pf = _writeImage(medianImageArray, inputHeader=_single_hdr)

    if virtual:
        mediandict = {}
        mediandict[medianfile] = _pf
        for img in imageObjectList:
            img.saveVirtualOutputs(mediandict)
    else:
        try:
            print("Saving output median image to: ", medianfile)
            _pf.writeto(medianfile)
        except IOError:
            msg = "Problem writing file: " + medianfile
            print(msg)
            raise IOError(msg)

    del _pf

    # Always close any files opened to produce median image; namely,
    # single drizzle images and singly-drizzled weight images
    #

    for img in singleDrizList:
        if not virtual:
            img.close()
    singeDrizList = []

    # Close all singly drizzled weight images used to create median image.
    for img in singleWeightList:
        if not virtual:
            img.close()
    singleWeightList = []

    # If new median masks was turned on, close those files
    if _weight_mask_list:
        for arr in _weight_mask_list:
            del arr
        _weight_mask_list = None

    del masterList
    del medianImageArray
예제 #7
0
def _median(imageObjectList, paramDict):
    """Create a median image from the list of image Objects
       that has been given.
    """

    newmasks = paramDict['median_newmasks']
    comb_type = paramDict['combine_type']
    nlow = paramDict['combine_nlow']
    nhigh = paramDict['combine_nhigh']
    grow = paramDict['combine_grow']
    maskpt = paramDict['combine_maskpt']
    proc_units = paramDict['proc_unit']
    compress = paramDict['compress']
    bufsizeMb = paramDict['combine_bufsize']

    sigma=paramDict["combine_nsigma"]
    sigmaSplit=sigma.split()
    nsigma1 = float(sigmaSplit[0])
    nsigma2 = float(sigmaSplit[1])

    #print "Checking parameters:"
    #print comb_type,nlow,nhigh,grow,maskpt,nsigma1,nsigma2
    if (paramDict['combine_lthresh'] == None):
        lthresh = None
    else:
        lthresh = float(paramDict['combine_lthresh'])
    if (paramDict['combine_hthresh'] == None):
        hthresh = None
    else:
        hthresh = float(paramDict['combine_hthresh'])

    #the name of the output median file isdefined in the output wcs object
    #and stuck in the image.outputValues["outMedian"] dict of every imageObject
    medianfile=imageObjectList[0].outputNames["outMedian"]


    """ Builds combined array from single drizzled images."""
    # Start by removing any previous products...
    if(os.access(medianfile,os.F_OK)):
        os.remove(medianfile)


    # Define lists for instrument specific parameters, these should be in the image objects
    # need to be passed to the minmed routine
    readnoiseList = []
    exposureTimeList = []
    backgroundValueList = [] #list of  MDRIZSKY *platescale values
    singleDrizList=[] #these are the input images
    singleWeightList=[] #pointers to the data arrays
    #skylist=[] #the list of platescale values for the images
    _wht_mean = [] # Compute the mean value of each wht image

    _single_hdr = None
    virtual = None

    #for each image object
    for image in imageObjectList:
        if virtual is None:
            virtual = image.inmemory

        det_gain = image.getGain(1)
        img_exptime = image._image['sci',1]._exptime
        native_units = image.native_units
        if lthresh is not None:
            if proc_units.lower() == 'native':
                if native_units.lower() == "counts":
                    lthresh = lthresh * det_gain
                    if native_units.lower() == "counts/s":
                        lthresh = lthresh * img_exptime
        if hthresh is not None:
            if proc_units.lower() == 'native':
                if native_units.lower() == "counts":
                    hthresh = hthresh * det_gain
                    if native_units.lower() == "counts/s":
                        hthresh = hthresh * img_exptime

        singleDriz = image.getOutputName("outSingle")
        singleDriz_name = image.outputNames['outSingle']
        singleWeight = image.getOutputName("outSWeight")
        singleWeight_name = image.outputNames['outSWeight']
        #singleDriz=image.outputNames["outSingle"] #all chips are drizzled to a single output image
        #singleWeight=image.outputNames["outSWeight"]

        # If compression was used, reference ext=1 as CompImageHDU only writes
        # out MEF files, not simple FITS.
        if compress:
            wcs_ext = '[1]'
            wcs_extnum = 1
        else:
            wcs_ext = '[0]'
            wcs_extnum = 0
        if not virtual:
            if isinstance(singleDriz,str):
                iter_singleDriz = singleDriz + wcs_ext
                iter_singleWeight = singleWeight + wcs_ext
            else:
                iter_singleDriz = singleDriz[wcs_extnum]
                iter_singleWeight = singleWeight[wcs_extnum]
        else:
            iter_singleDriz = singleDriz_name + wcs_ext
            iter_singleWeight = singleWeight_name + wcs_ext

        # read in WCS from first single drizzle image to use as WCS for median image
        if _single_hdr is None:
            if virtual:
                _single_hdr = singleDriz[wcs_extnum].header
            else:
                _single_hdr = fits.getheader(singleDriz_name,ext=wcs_extnum)

        _singleImage=iterfile.IterFitsFile(iter_singleDriz)
        if virtual:
            _singleImage.handle = singleDriz
            _singleImage.inmemory = True

        singleDrizList.append(_singleImage) #add to an array for bookkeeping

        # If it exists, extract the corresponding weight images
        if (not virtual and os.access(singleWeight,os.F_OK)) or (
                virtual and singleWeight):
            _weight_file=iterfile.IterFitsFile(iter_singleWeight)
            if virtual:
                _weight_file.handle = singleWeight
                _weight_file.inmemory = True

            singleWeightList.append(_weight_file)
            try:
                tmp_mean_value = ImageStats(_weight_file.data, lower=1e-8,
                    fields="mean", nclip=0).mean
            except ValueError:
                tmp_mean_value = 0.0
            _wht_mean.append(tmp_mean_value * maskpt)

            # Extract instrument specific parameters and place in lists

            # If an image has zero exposure time we will
            # redefine that value as '1'.  Although this will cause inaccurate scaling
            # of the data to occur in the 'minmed' combination algorith, this is a
            # necessary evil since it avoids divide by zero exceptions.  It is more
            # important that the divide by zero exceptions not cause Multidrizzle to
            # crash in the pipeline than it is to raise an exception for this obviously
            # bad data even though this is not the type of data you would wish to process
            # with Multidrizzle.
            #
            # Get the exposure time from the InputImage object
            #
            # MRD 19-May-2011
            # Changed exposureTimeList to take exposure time from img_exptime
            # variable instead of hte image._exptime attribute, since
            # image._exptime was just giving 1.
            #
            exposureTimeList.append(img_exptime)

            # Use only "commanded" chips to extract subtractedSky and rdnoise:
            rdnoise = 0.0
            nchips  = 0
            bsky    = None # minimum sky across **used** chips

            for chip in image.returnAllChips(extname=image.scienceExt):
                # compute sky value as sky/pixel using the single_drz pixel scale
                if bsky is None or bsky > chip.subtractedSky:
                    bsky = chip.subtractedSky

                # Extract the readnoise value for the chip
                rdnoise += (chip._rdnoise)**2
                nchips  += 1

            if bsky is None:
                bsky = 0.0

            if nchips > 0:
                rdnoise = math.sqrt(rdnoise/nchips)

            backgroundValueList.append(bsky)
            readnoiseList.append(rdnoise)

            ## compute sky value as sky/pixel using the single_drz pixel scale
            #bsky = image._image[image.scienceExt,1].subtractedSky# * (image.outputValues['scale']**2)
            #backgroundValueList.append(bsky)

            ## Extract the readnoise value for the chip
            #sci_chip = image._image[image.scienceExt,1]
            #readnoiseList.append(sci_chip._rdnoise) #verify this is calculated correctly in the image object

            print("reference sky value for image ",image._filename," is ", backgroundValueList[-1])
        #
        # END Loop over input image list
        #

    # create an array for the median output image, use the size of the first image in the list
    medianImageArray = np.zeros(singleDrizList[0].shape,dtype=singleDrizList[0].type())

    if ( comb_type.lower() == "minmed") and not newmasks:
        # Issue a warning if minmed is being run with newmasks turned off.
            print('\nWARNING: Creating median image without the application of bad pixel masks!\n')

    # create the master list to be used by the image iterator
    masterList = []
    masterList.extend(singleDrizList)
    masterList.extend(singleWeightList)

    print('\n')

    # Specify the location of the drz image sections
    startDrz = 0
    endDrz = len(singleDrizList)+startDrz

    # Specify the location of the wht image sections
    startWht = len(singleDrizList)+startDrz
    endWht = startWht + len(singleWeightList)
    _weight_mask_list = None

    # Fire up the image iterator
    #
    # The overlap value needs to be set to 2*grow in order to
    # avoid edge effects when scrolling down the image, and to
    # insure that the last section returned from the iterator
    # has enough row to span the kernel used in the boxcar method
    # within minmed.
    _overlap = 2*int(grow)

    #Start by computing the buffer size for the iterator
    _imgarr = masterList[0].data
    _bufsize = nimageiter.BUFSIZE
    if bufsizeMb is not None:
        _bufsize *= bufsizeMb
    _imgrows = _imgarr.shape[0]
    _nrows = nimageiter.computeBuffRows(_imgarr)
#        _overlaprows = _nrows - (_overlap+1)
#        _niter = int(_imgrows/_nrows)
#        _niter = 1 + int( (_imgrows - _overlaprows)/_nrows)
    niter = nimageiter.computeNumberBuff(_imgrows,_nrows,_overlap)
    #computeNumberBuff actually returns (niter,buffrows)
    _niter= niter[0]
    _nrows = niter[1]
    _lastrows = _imgrows - (_niter*(_nrows-_overlap))

    # check to see if this buffer size will leave enough rows for
    # the section returned on the last iteration
    if _lastrows < _overlap+1:
        _delta_rows = (_overlap+1 - _lastrows)//_niter
        if _delta_rows < 1 and _delta_rows >= 0: _delta_rows = 1
        _bufsize += (_imgarr.shape[1]*_imgarr.itemsize) * _delta_rows

    if not virtual:
        masterList[0].close()
    del _imgarr

    for imageSectionsList,prange in nimageiter.FileIter(masterList,overlap=_overlap,bufsize=_bufsize):

        if newmasks:
            """ Build new masks from single drizzled images. """
            _weight_mask_list = []
            listIndex = 0
            for _weight_arr in imageSectionsList[startWht:endWht]:
                # Initialize an output mask array to ones
                # This array will be reused for every output weight image
                _weight_mask = np.zeros(_weight_arr.shape,dtype=np.uint8)

                """ Generate new pixel mask file for median step.
                This mask will be created from the single-drizzled
                weight image for this image.

                The mean of the weight array will be computed and all
                pixels with values less than 0.7 of the mean will be flagged
                as bad in this mask.  This mask will then be used when
                creating the median image.
                """
                # Compute image statistics
                _mean = _wht_mean[listIndex]

                # 0 means good, 1 means bad here...
                np.putmask(_weight_mask, np.less(_weight_arr,_mean), 1)
                #_weight_mask.info()
                _weight_mask_list.append(_weight_mask)
                listIndex += 1

        # Do MINMED
        if ( "minmed" in comb_type.lower()):
            if comb_type.lower()[0] == 'i':
                # set up use of 'imedian'/'imean' in minmed algorithm
                fillval = True
            else:
                fillval = False

            if (_weight_mask_list in [None,[]]):
                _weight_mask_list = None

            # Create the combined array object using the minmed algorithm
            result = minmed(imageSectionsList[startDrz:endDrz],  # list of input data to be combined.
                                imageSectionsList[startWht:endWht],# list of input data weight images to be combined.
                                readnoiseList,                         # list of readnoise values to use for the input images.
                                exposureTimeList,                      # list of exposure times to use for the input images.
                                backgroundValueList,                   # list of image background values to use for the input images
                                weightMaskList = _weight_mask_list,  # list of imput data weight masks to use for pixel rejection.
                                combine_grow = grow,                   # Radius (pixels) for neighbor rejection
                                combine_nsigma1 = nsigma1,             # Significance for accepting minimum instead of median
                                combine_nsigma2 = nsigma2,              # Significance for accepting minimum instead of median
                                fillval=fillval                      # turn on use of imedian/imean
                            )
#              medianOutput[prange[0]:prange[1],:] = result.out_file1
#             minOutput[prange[0]:prange[1],:] = result.out_file2

        # DO NUMCOMBINE
        else:
            # Create the combined array object using the numcombine task
            result = numcombine.numCombine(imageSectionsList[startDrz:endDrz],
                                    numarrayMaskList=_weight_mask_list,
                                    combinationType=comb_type.lower(),
                                    nlow=nlow,
                                    nhigh=nhigh,
                                    upper=hthresh,
                                    lower=lthresh
                                )

        # We need to account for any specified overlap when writing out
        # the processed image sections to the final output array.
        if prange[1] != _imgrows:
            medianImageArray[prange[0]:prange[1]-_overlap,:] = result.combArrObj[:-_overlap,:]
        else:
            medianImageArray[prange[0]:prange[1],:] = result.combArrObj


    del result
    del _weight_mask_list
    _weight_mask_list = None

    # Write out the combined image
    # use the header from the first single drizzled image in the list
    #header=fits.getheader(imageObjectList[0].outputNames["outSingle"])
    _pf = _writeImage(medianImageArray, inputHeader=_single_hdr)

    if virtual:
        mediandict = {}
        mediandict[medianfile] = _pf
        for img in imageObjectList:
            img.saveVirtualOutputs(mediandict)
    else:
        try:
            print("Saving output median image to: ",medianfile)
            _pf.writeto(medianfile)
        except IOError:
            msg = "Problem writing file: "+medianfile
            print(msg)
            raise IOError(msg)

    del _pf

    # Always close any files opened to produce median image; namely,
    # single drizzle images and singly-drizzled weight images
    #

    for img in singleDrizList:
        if not virtual:
            img.close()
    singeDrizList = []

    # Close all singly drizzled weight images used to create median image.
    for img in singleWeightList:
        if not virtual:
            img.close()
    singleWeightList = []

    # If new median masks was turned on, close those files
    if _weight_mask_list:
        for arr in _weight_mask_list:
            del arr
        _weight_mask_list = None

    del masterList
    del medianImageArray