Exemplo n.º 1
0
def do_median(drizzle_groups_sci, drizzle_groups_wht, **pars):
    # start by interpreting input parameters
    nlow = pars.get('nlow', 0)
    nhigh = pars.get('nhigh', 0)
    high_threshold = pars.get('hthresh', None)
    low_threshold = pars.get('lthresh', None)
    nsigma = pars.get('nsigma', '4 3')
    maskpt = pars.get('maskpt', 0.7)

    # Perform additional interpretation of some parameters
    sigmaSplit = nsigma.split()
    nsigma1 = float(sigmaSplit[0])
    nsigma2 = float(sigmaSplit[1])

    if high_threshold is not None and (high_threshold.strip() == ""
                                       or high_threshold < 0):
        high_threshold = None
    if low_threshold is not None and (low_threshold.strip() == ""
                                      or low_threshold < 0):
        low_threshold = None

    if high_threshold is not None: high_threshold = float(high_threshold)
    if low_threshold is not None: low_threshold = float(low_threshold)

    _weight_mask_list = []

    for weight_arr in drizzle_groups_wht:
        # Initialize an output mask array to ones
        # This array will be reused for every output weight image
        _weight_mask = np.zeros(weight_arr.shape, dtype=np.uint8)
        try:
            tmp_mean_value = ImageStats(weight_arr,
                                        lower=1e-8,
                                        fields="mean",
                                        nclip=0).mean
        except ValueError:
            tmp_mean_value = 0.0
        _wht_mean = tmp_mean_value * maskpt
        # 0 means good, 1 means bad here...
        np.putmask(_weight_mask, np.less(weight_arr, _wht_mean), 1)
        #_weight_mask.info()
        _weight_mask_list.append(_weight_mask)

    # Create the combined array object using the numcombine task
    result = numcombine.numCombine(drizzle_groups_sci,
                                   numarrayMaskList=_weight_mask_list,
                                   combinationType="median",
                                   nlow=nlow,
                                   nhigh=nhigh,
                                   upper=high_threshold,
                                   lower=low_threshold)
    median_array = result.combArrObj

    del _weight_mask_list

    return median_array
Exemplo n.º 2
0
 def getcorr():
     imstat = ImageStats(image.science[i][BMask],
                         'midpt',
                         lower=lower,
                         upper=upper,
                         nclip=0)
     if imstat.npix != NPix:
         raise ValueError(f'imstate.npix ({imstat.npix}) != '
                          f'NPix ({NPix})')
     return (imstat.midpt)
Exemplo n.º 3
0
    def calc_sky(self, data):
        """ Computes statistics on data.

        Parameters
        -----------
        data : numpy.ndarray
            A numpy array of values for which the statistics needs to be computed.

        Returns
        --------
        statistics : tuple
            A tuple of two values: (`skyvalue`, `npix`), where `skyvalue` is the statistics
            specified by the `skystat` parameter during the initialization
            of the `SkyStats` object and `npix` is the number of pixels used
            in computing the statistics reported in `skyvalue`.

        """
        imstat = ImageStats(image=data, fields=self._fields, **(self._kwargs))
        self.skyval = self._skystat(imstat)
        self.npix = imstat.npix
        return (self.skyval, self.npix)
Exemplo n.º 4
0
def _median(imageObjectList, paramDict):
    """Create a median image from the list of image Objects
       that has been given.
    """
    newmasks = paramDict['median_newmasks']
    comb_type = paramDict['combine_type'].lower()
    nlow = paramDict['combine_nlow']
    nhigh = paramDict['combine_nhigh']
    grow = paramDict['combine_grow'] if 'minmed' in comb_type else 0
    maskpt = paramDict['combine_maskpt']
    proc_units = paramDict['proc_unit']
    compress = paramDict['compress']
    bufsizeMB = paramDict['combine_bufsize']

    sigma = paramDict["combine_nsigma"]
    sigmaSplit = sigma.split()
    nsigma1 = float(sigmaSplit[0])
    nsigma2 = float(sigmaSplit[1])

    if paramDict['combine_lthresh'] is None:
        lthresh = None
    else:
        lthresh = float(paramDict['combine_lthresh'])

    if paramDict['combine_hthresh'] is None:
        hthresh = None
    else:
        hthresh = float(paramDict['combine_hthresh'])

    # the name of the output median file isdefined in the output wcs object and
    # stuck in the image.outputValues["outMedian"] dict of every imageObject
    medianfile = imageObjectList[0].outputNames["outMedian"]

    # Build combined array from single drizzled images.

    # Start by removing any previous products...
    if os.access(medianfile, os.F_OK):
        os.remove(medianfile)

    # Define lists for instrument specific parameters, these should be in
    # the image objects need to be passed to the minmed routine
    readnoiseList = []
    exposureTimeList = []
    backgroundValueList = []  # list of  MDRIZSKY *platescale values
    singleDrizList = []  # these are the input images
    singleWeightList = []  # pointers to the data arrays
    wht_mean = []  # Compute the mean value of each wht image

    single_hdr = None
    virtual = None

    # for each image object
    for image in imageObjectList:
        if virtual is None:
            virtual = image.inmemory

        det_gain = image.getGain(1)
        img_exptime = image._image['sci', 1]._exptime
        native_units = image.native_units
        native_units_lc = native_units.lower()

        if proc_units.lower() == 'native':
            if native_units_lc not in [
                    'counts', 'electrons', 'counts/s', 'electrons/s'
            ]:
                raise ValueError(
                    "Unexpected native units: '{}'".format(native_units))

            if lthresh is not None:
                if native_units_lc.startswith('counts'):
                    lthresh *= det_gain
                if native_units_lc.endswith('/s'):
                    lthresh *= img_exptime

            if hthresh is not None:
                if native_units_lc.startswith('counts'):
                    hthresh *= det_gain
                if native_units_lc.endswith('/s'):
                    hthresh *= img_exptime

        singleDriz = image.getOutputName("outSingle")
        singleDriz_name = image.outputNames['outSingle']
        singleWeight = image.getOutputName("outSWeight")
        singleWeight_name = image.outputNames['outSWeight']

        # If compression was used, reference ext=1 as CompImageHDU only writes
        # out MEF files, not simple FITS.
        if compress:
            wcs_ext = '[1]'
            wcs_extnum = 1
        else:
            wcs_ext = '[0]'
            wcs_extnum = 0

        if not virtual:
            if isinstance(singleDriz, str):
                iter_singleDriz = singleDriz + wcs_ext
                iter_singleWeight = singleWeight + wcs_ext
            else:
                iter_singleDriz = singleDriz[wcs_extnum]
                iter_singleWeight = singleWeight[wcs_extnum]
        else:
            iter_singleDriz = singleDriz_name + wcs_ext
            iter_singleWeight = singleWeight_name + wcs_ext

        # read in WCS from first single drizzle image to use as WCS for
        # median image
        if single_hdr is None:
            if virtual:
                single_hdr = singleDriz[wcs_extnum].header
            else:
                single_hdr = fits.getheader(singleDriz_name,
                                            ext=wcs_extnum,
                                            memmap=False)

        single_image = iterfile.IterFitsFile(iter_singleDriz)
        if virtual:
            single_image.handle = singleDriz
            single_image.inmemory = True

        singleDrizList.append(single_image)  # add to an array for bookkeeping

        # If it exists, extract the corresponding weight images
        if (not virtual
                and os.access(singleWeight, os.F_OK)) or (virtual
                                                          and singleWeight):
            weight_file = iterfile.IterFitsFile(iter_singleWeight)
            if virtual:
                weight_file.handle = singleWeight
                weight_file.inmemory = True

            singleWeightList.append(weight_file)
            try:
                tmp_mean_value = ImageStats(weight_file.data,
                                            lower=1e-8,
                                            fields="mean",
                                            nclip=0).mean
            except ValueError:
                tmp_mean_value = 0.0
            wht_mean.append(tmp_mean_value * maskpt)

            # Extract instrument specific parameters and place in lists

            # If an image has zero exposure time we will
            # redefine that value as '1'.  Although this will cause inaccurate
            # scaling of the data to occur in the 'minmed' combination
            # algorith, this is a necessary evil since it avoids divide by
            # zero exceptions.  It is more important that the divide by zero
            # exceptions not cause AstroDrizzle to crash in the pipeline than
            # it is to raise an exception for this obviously bad data even
            # though this is not the type of data you would wish to process
            # with AstroDrizzle.
            #
            # Get the exposure time from the InputImage object
            #
            # MRD 19-May-2011
            # Changed exposureTimeList to take exposure time from img_exptime
            # variable instead of hte image._exptime attribute, since
            # image._exptime was just giving 1.
            #
            exposureTimeList.append(img_exptime)

            # Use only "commanded" chips to extract subtractedSky and rdnoise:
            rdnoise = 0.0
            nchips = 0
            bsky = None  # minimum sky across **used** chips

            for chip in image.returnAllChips(extname=image.scienceExt):
                # compute sky value as sky/pixel using the single_drz
                # pixel scale:
                if bsky is None or bsky > chip.subtractedSky:
                    bsky = chip.subtractedSky * chip._conversionFactor

                # Extract the readnoise value for the chip
                rdnoise += chip._rdnoise**2
                nchips += 1

            if bsky is None:
                bsky = 0.0

            if nchips > 0:
                rdnoise = math.sqrt(rdnoise / nchips)

            backgroundValueList.append(bsky)
            readnoiseList.append(rdnoise)

            print("reference sky value for image '{}' is {}".format(
                image._filename, backgroundValueList[-1]))
        #
        # END Loop over input image list
        #

    # create an array for the median output image, use the size of the first
    # image in the list. Store other useful image characteristics:
    single_driz_data = singleDrizList[0].data
    data_item_size = single_driz_data.itemsize
    single_data_dtype = single_driz_data.dtype
    imrows, imcols = single_driz_data.shape

    medianImageArray = np.zeros_like(single_driz_data)

    del single_driz_data

    if comb_type == "minmed" and not newmasks:
        # Issue a warning if minmed is being run with newmasks turned off.
        print('\nWARNING: Creating median image without the application of '
              'bad pixel masks!\n')

    # The overlap value needs to be set to 2*grow in order to
    # avoid edge effects when scrolling down the image, and to
    # insure that the last section returned from the iterator
    # has enough rows to span the kernel used in the boxcar method
    # within minmed.
    overlap = 2 * grow
    buffsize = BUFSIZE if bufsizeMB is None else (BUFSIZE * bufsizeMB)
    section_nrows = min(imrows, int(buffsize / (imcols * data_item_size)))

    if section_nrows == 0:
        buffsize = imcols * data_item_size
        print("WARNING: Buffer size is too small to hold a single row.\n"
              "         Buffer size size will be increased to minimal "
              "required: {}MB".format(float(buffsize) / 1048576.0))
        section_nrows = 1

    if section_nrows < overlap + 1:
        new_grow = int((section_nrows - 1) / 2)
        if section_nrows == imrows:
            print("'grow' parameter is too large for actual image size. "
                  "Reducing 'grow' to {}".format(new_grow))
        else:
            print("'grow' parameter is too large for requested buffer size. "
                  "Reducing 'grow' to {}".format(new_grow))
        grow = new_grow
        overlap = 2 * grow

    nbr = section_nrows - overlap
    nsec = (imrows - overlap) // nbr
    if (imrows - overlap) % nbr > 0:
        nsec += 1

    for k in range(nsec):
        e1 = k * nbr
        e2 = e1 + section_nrows
        u1 = grow
        u2 = u1 + nbr

        if k == 0:  # first section
            u1 = 0

        if k == nsec - 1:  # last section
            e2 = min(e2, imrows)
            e1 = min(e1, e2 - overlap - 1)
            u2 = e2 - e1

        imdrizSectionsList = np.empty((len(singleDrizList), e2 - e1, imcols),
                                      dtype=single_data_dtype)
        for i, w in enumerate(singleDrizList):
            imdrizSectionsList[i, :, :] = w[e1:e2]

        if singleWeightList:
            weightSectionsList = np.empty(
                (len(singleWeightList), e2 - e1, imcols),
                dtype=single_data_dtype)
            for i, w in enumerate(singleWeightList):
                weightSectionsList[i, :, :] = w[e1:e2]
        else:
            weightSectionsList = None

        weight_mask_list = None

        if newmasks and weightSectionsList is not None:
            # Build new masks from single drizzled images.
            # Generate new pixel mask file for median step.
            # This mask will be created from the single-drizzled
            # weight image for this image.

            # The mean of the weight array will be computed and all
            # pixels with values less than 0.7 of the mean will be flagged
            # as bad in this mask. This mask will then be used when
            # creating the median image.
            # 0 means good, 1 means bad here...
            weight_mask_list = np.less(
                weightSectionsList,
                np.asarray(wht_mean)[:, None, None]).astype(np.uint8)

        if 'minmed' in comb_type:  # Do MINMED
            # set up use of 'imedian'/'imean' in minmed algorithm
            fillval = comb_type.startswith('i')

            # Create the combined array object using the minmed algorithm
            result = min_med(imdrizSectionsList,
                             weightSectionsList,
                             readnoiseList,
                             exposureTimeList,
                             backgroundValueList,
                             weight_masks=weight_mask_list,
                             combine_grow=grow,
                             combine_nsigma1=nsigma1,
                             combine_nsigma2=nsigma2,
                             fillval=fillval)

        else:  # DO NUMCOMBINE
            # Create the combined array object using the numcombine task
            result = numcombine.num_combine(imdrizSectionsList,
                                            masks=weight_mask_list,
                                            combination_type=comb_type,
                                            nlow=nlow,
                                            nhigh=nhigh,
                                            upper=hthresh,
                                            lower=lthresh)

        # Write out the processed image sections to the final output array:
        medianImageArray[e1 + u1:e1 + u2, :] = result[u1:u2, :]

    # Write out the combined image
    # use the header from the first single drizzled image in the list
    pf = _writeImage(medianImageArray, inputHeader=single_hdr)

    if virtual:
        mediandict = {}
        mediandict[medianfile] = pf
        for img in imageObjectList:
            img.saveVirtualOutputs(mediandict)
    else:
        try:
            print("Saving output median image to: '{}'".format(medianfile))
            pf.writeto(medianfile)
        except IOError:
            msg = "Problem writing file '{}'".format(medianfile)
            print(msg)
            raise IOError(msg)

    # Always close any files opened to produce median image; namely,
    # single drizzle images and singly-drizzled weight images
    #
    for img in singleDrizList:
        if not virtual:
            img.close()

    # Close all singly drizzled weight images used to create median image.
    for img in singleWeightList:
        if not virtual:
            img.close()
Exemplo n.º 5
0
    def addMember(self, imagePtr=None):
        """
        Combines the input image with the static mask that
        has the same signature.

        Parameters
        ----------
        imagePtr : object
            An imageObject reference

        Notes
        -----
        The signature parameter consists of the tuple::

            (instrument/detector, (nx,ny), chip_id)

        The signature is defined in the image object for each chip

        """
        numchips=imagePtr._numchips
        log.info("Computing static mask:\n")

        chips = imagePtr.group
        if chips is None:
            chips = imagePtr.getExtensions()

        #for chip in range(1,numchips+1,1):
        for chip in chips:
            chipid=imagePtr.scienceExt + ','+ str(chip)
            chipimage=imagePtr.getData(chipid)
            signature=imagePtr[chipid].signature

            # If this is a new signature, create a new Static Mask file which is empty
            # only create a new mask if one doesn't already exist
            if ((signature not in self.masklist) or (len(self.masklist) == 0)):
                self.masklist[signature] = self._buildMaskArray(signature)
                maskname =  constructFilename(signature)
                self.masknames[signature] = maskname
            else:
                chip_sig = buildSignatureKey(signature)
                for s in self.masknames:
                    if chip_sig in self.masknames[s]:
                        maskname  = self.masknames[s]
                        break
            imagePtr[chipid].outputNames['staticMask'] = maskname

            stats = ImageStats(chipimage,nclip=3,fields='mode')
            mode = stats.mode
            rms  = stats.stddev
            nbins = len(stats.histogram)
            del stats

            log.info('  mode = %9f;   rms = %7f;   static_sig = %0.2f' %
                     (mode, rms, self.static_sig))

            if nbins >= 2: # only combine data from new image if enough data to mask
                sky_rms_diff = mode - (self.static_sig*rms)
                np.bitwise_and(self.masklist[signature],
                               np.logical_not(np.less(chipimage, sky_rms_diff)),
                               self.masklist[signature])
            del chipimage
Exemplo n.º 6
0
    def run(self):
        """
        Run the median combine step

        The code was either directly stolen from the corresponding
        pydrizzle version or done after this version. Necessary
        adjustments to the slitless data were applied.
        """
        sci_data = []

        for one_image in self.input_data['sci_imgs']:
            if os.access(one_image, os.F_OK):
                in_fits = fits.open(one_image, 'readonly')
                sci_data.append(in_fits[0].data)
                in_fits.close()

        wht_data = []
        for one_image in self.input_data['wht_imgs']:
            if os.access(one_image, os.F_OK):
                in_fits = fits.open(one_image, 'readonly')
                wht_data.append(in_fits[0].data)
                in_fits.close()
            else:
                _log.info("{0:s} not found/created by drizzle"
                      "...skipping it.".format(one_image))

        if len(sci_data) != len(wht_data):
            _log.info("The number of single_sci images created by "
                  "drizzle does not match the number of single_wht"
                  " files created!")
            raise aXeError("drizzle error")

        weight_mask_list = []

        # added the except so that if the image area contains only
        # zeros then the zero value is returned which is better for later
        # processing
        # we dont understand why the original lower=1e-8 value was
        # supplied unless it was for the case of spectral in the normal
        # field of view see #1110
        for wht_arr in wht_data:
            try:
                tmp_mean_value = self.combine_maskpt * ImageStats(wht_arr,lower=1e-8,lsig=None,usig=None,fields="mean",nclip=0).mean
            except (ValueError, AttributeError):
                tmp_mean_value = 0.
                _log.info("tmp_mean_value set to 0 because no good "
                      "pixels found; {0:s}".format(self.ext_names["MEF"]))
            except:
                tmp_mean_value = 0.
                _log.info("tmp_mean_value set to 0; possible uncaught "
                      "exception in dither.py; {0:s}"
                      .format(self.ext_names["MEF"]))

            weight_mask = np.zeros(wht_arr.shape, dtype=np.uint8)
            np.putmask(weight_mask, np.less(wht_arr, tmp_mean_value), 1)

            weight_mask_list.append(weight_mask)

        if len(sci_data) < 2:
            _log.info('\nNumber of images to flatten: %i!' % len(sci_data))
            _log.info('Set combine type to "minimum"!')
            self.combine_type = 'minimum'

        if (self.combine_type == "minmed"):
            # Create the combined array object using the minmed algorithm
            result = minmed(sci_data,  # list of input data to be combined.
                            wht_data,# list of input data weight images to be combined.
                            self.input_data['rdn_vals'],  # list of readnoise values to use for the input images.
                            self.input_data['exp_vals'],  # list of exposure times to use for the input images.
                            self.input_data['sky_vals'],  # list of image background values to use for the input images
                            weightMaskList = weight_mask_list,  # list of imput data weight masks to use for pixel rejection.
                            combine_grow = self.combine_grow,  # Radius (pixels) for neighbor rejection
                            combine_nsigma1 = self.combine_nsigma1,  # Significance for accepting minimum instead of median
                            combine_nsigma2 = self.combine_nsigma2  # Significance for accepting minimum instead of median
                            )
        else:
            # _log.info 'going to other', combine_type
            # Create the combined array object using the numcombine task
            result = numCombine(sci_data,
                                numarrayMaskList=weight_mask_list,
                                combinationType=self.combine_type,
                                nlow=self.combine_nlow,
                                nhigh=self.combine_nhigh,
                                upper=self.combine_hthresh,
                                lower=self.combine_lthresh
                                )

        # _log.info result.combArrObj
        hdu = fits.PrimaryHDU(result.combArrObj)
        hdulist = fits.HDUList([hdu])
        hdulist[0].header['EXPTIME'] = (self.input_data['exp_tot'],
                                        'total exposure time')
        hdulist.writeto(self.median_image)

        # delete the various arrays
        for one_item in sci_data:
            del one_item
        del sci_data
        for one_item in wht_data:
            del one_item
        del wht_data
        for one_item in weight_mask_list:
            del one_item
        del weight_mask_list
Exemplo n.º 7
0
def _median(imageObjectList, paramDict):
    """Create a median image from the list of image Objects
       that has been given.
    """

    newmasks = paramDict['median_newmasks']
    comb_type = paramDict['combine_type']
    nlow = paramDict['combine_nlow']
    nhigh = paramDict['combine_nhigh']
    grow = paramDict['combine_grow']
    maskpt = paramDict['combine_maskpt']
    proc_units = paramDict['proc_unit']
    compress = paramDict['compress']
    bufsizeMb = paramDict['combine_bufsize']

    sigma = paramDict["combine_nsigma"]
    sigmaSplit = sigma.split()
    nsigma1 = float(sigmaSplit[0])
    nsigma2 = float(sigmaSplit[1])

    #print "Checking parameters:"
    #print comb_type,nlow,nhigh,grow,maskpt,nsigma1,nsigma2
    if (paramDict['combine_lthresh'] == None):
        lthresh = None
    else:
        lthresh = float(paramDict['combine_lthresh'])
    if (paramDict['combine_hthresh'] == None):
        hthresh = None
    else:
        hthresh = float(paramDict['combine_hthresh'])

    #the name of the output median file isdefined in the output wcs object
    #and stuck in the image.outputValues["outMedian"] dict of every imageObject
    medianfile = imageObjectList[0].outputNames["outMedian"]
    """ Builds combined array from single drizzled images."""
    # Start by removing any previous products...
    if (os.access(medianfile, os.F_OK)):
        os.remove(medianfile)

    # Define lists for instrument specific parameters, these should be in the image objects
    # need to be passed to the minmed routine
    readnoiseList = []
    exposureTimeList = []
    backgroundValueList = []  #list of  MDRIZSKY *platescale values
    singleDrizList = []  #these are the input images
    singleWeightList = []  #pointers to the data arrays
    #skylist=[] #the list of platescale values for the images
    _wht_mean = []  # Compute the mean value of each wht image

    _single_hdr = None
    virtual = None

    #for each image object
    for image in imageObjectList:
        if virtual is None:
            virtual = image.inmemory

        det_gain = image.getGain(1)
        img_exptime = image._image['sci', 1]._exptime
        native_units = image.native_units
        if lthresh is not None:
            if proc_units.lower() == 'native':
                if native_units.lower() == "counts":
                    lthresh = lthresh * det_gain
                    if native_units.lower() == "counts/s":
                        lthresh = lthresh * img_exptime
        if hthresh is not None:
            if proc_units.lower() == 'native':
                if native_units.lower() == "counts":
                    hthresh = hthresh * det_gain
                    if native_units.lower() == "counts/s":
                        hthresh = hthresh * img_exptime

        singleDriz = image.getOutputName("outSingle")
        singleDriz_name = image.outputNames['outSingle']
        singleWeight = image.getOutputName("outSWeight")
        singleWeight_name = image.outputNames['outSWeight']
        #singleDriz=image.outputNames["outSingle"] #all chips are drizzled to a single output image
        #singleWeight=image.outputNames["outSWeight"]

        # If compression was used, reference ext=1 as CompImageHDU only writes
        # out MEF files, not simple FITS.
        if compress:
            wcs_ext = '[1]'
            wcs_extnum = 1
        else:
            wcs_ext = '[0]'
            wcs_extnum = 0
        if not virtual:
            if isinstance(singleDriz, str):
                iter_singleDriz = singleDriz + wcs_ext
                iter_singleWeight = singleWeight + wcs_ext
            else:
                iter_singleDriz = singleDriz[wcs_extnum]
                iter_singleWeight = singleWeight[wcs_extnum]
        else:
            iter_singleDriz = singleDriz_name + wcs_ext
            iter_singleWeight = singleWeight_name + wcs_ext

        # read in WCS from first single drizzle image to use as WCS for median image
        if _single_hdr is None:
            if virtual:
                _single_hdr = singleDriz[wcs_extnum].header
            else:
                _single_hdr = fits.getheader(singleDriz_name, ext=wcs_extnum)

        _singleImage = iterfile.IterFitsFile(iter_singleDriz)
        if virtual:
            _singleImage.handle = singleDriz
            _singleImage.inmemory = True

        singleDrizList.append(_singleImage)  #add to an array for bookkeeping

        # If it exists, extract the corresponding weight images
        if (not virtual
                and os.access(singleWeight, os.F_OK)) or (virtual
                                                          and singleWeight):
            _weight_file = iterfile.IterFitsFile(iter_singleWeight)
            if virtual:
                _weight_file.handle = singleWeight
                _weight_file.inmemory = True

            singleWeightList.append(_weight_file)
            try:
                tmp_mean_value = ImageStats(_weight_file.data,
                                            lower=1e-8,
                                            fields="mean",
                                            nclip=0).mean
            except ValueError:
                tmp_mean_value = 0.0
            _wht_mean.append(tmp_mean_value * maskpt)

            # Extract instrument specific parameters and place in lists

            # If an image has zero exposure time we will
            # redefine that value as '1'.  Although this will cause inaccurate scaling
            # of the data to occur in the 'minmed' combination algorith, this is a
            # necessary evil since it avoids divide by zero exceptions.  It is more
            # important that the divide by zero exceptions not cause Multidrizzle to
            # crash in the pipeline than it is to raise an exception for this obviously
            # bad data even though this is not the type of data you would wish to process
            # with Multidrizzle.
            #
            # Get the exposure time from the InputImage object
            #
            # MRD 19-May-2011
            # Changed exposureTimeList to take exposure time from img_exptime
            # variable instead of hte image._exptime attribute, since
            # image._exptime was just giving 1.
            #
            exposureTimeList.append(img_exptime)

            # Use only "commanded" chips to extract subtractedSky and rdnoise:
            rdnoise = 0.0
            nchips = 0
            bsky = None  # minimum sky across **used** chips

            for chip in image.returnAllChips(extname=image.scienceExt):
                # compute sky value as sky/pixel using the single_drz pixel scale
                if bsky is None or bsky > chip.subtractedSky:
                    bsky = chip.subtractedSky

                # Extract the readnoise value for the chip
                rdnoise += (chip._rdnoise)**2
                nchips += 1

            if bsky is None:
                bsky = 0.0

            if nchips > 0:
                rdnoise = math.sqrt(rdnoise / nchips)

            backgroundValueList.append(bsky)
            readnoiseList.append(rdnoise)

            ## compute sky value as sky/pixel using the single_drz pixel scale
            #bsky = image._image[image.scienceExt,1].subtractedSky# * (image.outputValues['scale']**2)
            #backgroundValueList.append(bsky)

            ## Extract the readnoise value for the chip
            #sci_chip = image._image[image.scienceExt,1]
            #readnoiseList.append(sci_chip._rdnoise) #verify this is calculated correctly in the image object

            print("reference sky value for image ", image._filename, " is ",
                  backgroundValueList[-1])
        #
        # END Loop over input image list
        #

    # create an array for the median output image, use the size of the first image in the list
    medianImageArray = np.zeros(singleDrizList[0].shape,
                                dtype=singleDrizList[0].type())

    if (comb_type.lower() == "minmed") and not newmasks:
        # Issue a warning if minmed is being run with newmasks turned off.
        print(
            '\nWARNING: Creating median image without the application of bad pixel masks!\n'
        )

    # create the master list to be used by the image iterator
    masterList = []
    masterList.extend(singleDrizList)
    masterList.extend(singleWeightList)

    print('\n')

    # Specify the location of the drz image sections
    startDrz = 0
    endDrz = len(singleDrizList) + startDrz

    # Specify the location of the wht image sections
    startWht = len(singleDrizList) + startDrz
    endWht = startWht + len(singleWeightList)
    _weight_mask_list = None

    # Fire up the image iterator
    #
    # The overlap value needs to be set to 2*grow in order to
    # avoid edge effects when scrolling down the image, and to
    # insure that the last section returned from the iterator
    # has enough row to span the kernel used in the boxcar method
    # within minmed.
    _overlap = 2 * int(grow)

    #Start by computing the buffer size for the iterator
    _imgarr = masterList[0].data
    _bufsize = nimageiter.BUFSIZE
    if bufsizeMb is not None:
        _bufsize *= bufsizeMb
    _imgrows = _imgarr.shape[0]
    _nrows = nimageiter.computeBuffRows(_imgarr)
    #        _overlaprows = _nrows - (_overlap+1)
    #        _niter = int(_imgrows/_nrows)
    #        _niter = 1 + int( (_imgrows - _overlaprows)/_nrows)
    niter = nimageiter.computeNumberBuff(_imgrows, _nrows, _overlap)
    #computeNumberBuff actually returns (niter,buffrows)
    _niter = niter[0]
    _nrows = niter[1]
    _lastrows = _imgrows - (_niter * (_nrows - _overlap))

    # check to see if this buffer size will leave enough rows for
    # the section returned on the last iteration
    if _lastrows < _overlap + 1:
        _delta_rows = (_overlap + 1 - _lastrows) // _niter
        if _delta_rows < 1 and _delta_rows >= 0: _delta_rows = 1
        _bufsize += (_imgarr.shape[1] * _imgarr.itemsize) * _delta_rows

    if not virtual:
        masterList[0].close()
    del _imgarr

    for imageSectionsList, prange in nimageiter.FileIter(masterList,
                                                         overlap=_overlap,
                                                         bufsize=_bufsize):

        if newmasks:
            """ Build new masks from single drizzled images. """
            _weight_mask_list = []
            listIndex = 0
            for _weight_arr in imageSectionsList[startWht:endWht]:
                # Initialize an output mask array to ones
                # This array will be reused for every output weight image
                _weight_mask = np.zeros(_weight_arr.shape, dtype=np.uint8)
                """ Generate new pixel mask file for median step.
                This mask will be created from the single-drizzled
                weight image for this image.

                The mean of the weight array will be computed and all
                pixels with values less than 0.7 of the mean will be flagged
                as bad in this mask.  This mask will then be used when
                creating the median image.
                """
                # Compute image statistics
                _mean = _wht_mean[listIndex]

                # 0 means good, 1 means bad here...
                np.putmask(_weight_mask, np.less(_weight_arr, _mean), 1)
                #_weight_mask.info()
                _weight_mask_list.append(_weight_mask)
                listIndex += 1

        # Do MINMED
        if ("minmed" in comb_type.lower()):
            if comb_type.lower()[0] == 'i':
                # set up use of 'imedian'/'imean' in minmed algorithm
                fillval = True
            else:
                fillval = False

            if (_weight_mask_list in [None, []]):
                _weight_mask_list = None

            # Create the combined array object using the minmed algorithm
            result = minmed(
                imageSectionsList[
                    startDrz:endDrz],  # list of input data to be combined.
                imageSectionsList[
                    startWht:
                    endWht],  # list of input data weight images to be combined.
                readnoiseList,  # list of readnoise values to use for the input images.
                exposureTimeList,  # list of exposure times to use for the input images.
                backgroundValueList,  # list of image background values to use for the input images
                weightMaskList=
                _weight_mask_list,  # list of imput data weight masks to use for pixel rejection.
                combine_grow=grow,  # Radius (pixels) for neighbor rejection
                combine_nsigma1=
                nsigma1,  # Significance for accepting minimum instead of median
                combine_nsigma2=
                nsigma2,  # Significance for accepting minimum instead of median
                fillval=fillval  # turn on use of imedian/imean
            )


#              medianOutput[prange[0]:prange[1],:] = result.out_file1
#             minOutput[prange[0]:prange[1],:] = result.out_file2

# DO NUMCOMBINE
        else:
            # Create the combined array object using the numcombine task
            result = numcombine.numCombine(imageSectionsList[startDrz:endDrz],
                                           numarrayMaskList=_weight_mask_list,
                                           combinationType=comb_type.lower(),
                                           nlow=nlow,
                                           nhigh=nhigh,
                                           upper=hthresh,
                                           lower=lthresh)

        # We need to account for any specified overlap when writing out
        # the processed image sections to the final output array.
        if prange[1] != _imgrows:
            medianImageArray[prange[0]:prange[1] -
                             _overlap, :] = result.combArrObj[:-_overlap, :]
        else:
            medianImageArray[prange[0]:prange[1], :] = result.combArrObj

    del result
    del _weight_mask_list
    _weight_mask_list = None

    # Write out the combined image
    # use the header from the first single drizzled image in the list
    #header=fits.getheader(imageObjectList[0].outputNames["outSingle"])
    _pf = _writeImage(medianImageArray, inputHeader=_single_hdr)

    if virtual:
        mediandict = {}
        mediandict[medianfile] = _pf
        for img in imageObjectList:
            img.saveVirtualOutputs(mediandict)
    else:
        try:
            print("Saving output median image to: ", medianfile)
            _pf.writeto(medianfile)
        except IOError:
            msg = "Problem writing file: " + medianfile
            print(msg)
            raise IOError(msg)

    del _pf

    # Always close any files opened to produce median image; namely,
    # single drizzle images and singly-drizzled weight images
    #

    for img in singleDrizList:
        if not virtual:
            img.close()
    singeDrizList = []

    # Close all singly drizzled weight images used to create median image.
    for img in singleWeightList:
        if not virtual:
            img.close()
    singleWeightList = []

    # If new median masks was turned on, close those files
    if _weight_mask_list:
        for arr in _weight_mask_list:
            del arr
        _weight_mask_list = None

    del masterList
    del medianImageArray
Exemplo n.º 8
0
 def getcorr():
     imstat = ImageStats(image.science[i][BMask], 'midpt',
                         lower=lower, upper=upper, nclip=0)
     assert(imstat.npix == NPix)
     return (imstat.midpt)