コード例 #1
0
    def __sub__(self, other):
        """
        Subtract a ScienceImage object from another
        Extras (e.g. ivar, masks) are included if they are present

        Args:
            other (ScienceImage):

        Returns:
            ScienceImage:

        """
        if not isinstance(other, ScienceImage):
            msgs.error("Misuse of the subtract method")
        # Images
        newimg = self.image - other.image

        # Mask time
        outmask_comb = (self.mask == 0) & (other.mask == 0)

        # Variance
        if self.ivar is not None:
            new_ivar = utils.inverse(
                utils.inverse(self.ivar, positive=True) +
                utils.inverse(other.ivar, positive=True))
            new_ivar[np.invert(outmask_comb)] = 0
        else:
            new_ivar = None

        # RN2
        if self.rn2img is not None:
            new_rn2 = self.rn2img + other.rn2img
        else:
            new_rn2 = None

        # Files
        new_files = self.files + other.files

        # Instantiate
        new_sciImg = ScienceImage.from_images(self.spectrograph,
                                              self.det,
                                              self.par,
                                              self.bpm,
                                              newimg,
                                              new_ivar,
                                              new_rn2,
                                              files=new_files)
        #TODO: KW properly handle adding the bits
        #embed(header='279 in sciImg')
        crmask_diff = new_sciImg.build_crmask()
        # crmask_eff assumes evertything masked in the outmask_comb is a CR in the individual images
        new_sciImg.crmask = crmask_diff | np.invert(outmask_comb)
        # Note that the following uses the saturation and mincounts held in
        # self.spectrograph.detector[self.det-1]
        new_sciImg.build_mask()

        return new_sciImg
コード例 #2
0
ファイル: pypeitimage.py プロジェクト: ninoc/PypeIt
    def sub(self, other, par):
        """
        Subtract one PypeItImage from another
        Extras (e.g. ivar, masks) are included if they are present

        Args:
            other (:class:`PypeItImage`):
            par (:class:`pypeit.par.pypeitpar.ProcessImagesPar`):
                Parameters that dictate the processing of the images.  See
                :class:`pypeit.par.pypeitpar.ProcessImagesPar` for the defaults
        Returns:
            PypeItImage:
        """
        if not isinstance(other, PypeItImage):
            msgs.error("Misuse of the subtract method")
        # Images
        newimg = self.image - other.image

        # Mask time
        outmask_comb = (self.fullmask == 0) & (other.fullmask == 0)

        # Variance
        if self.ivar is not None:
            new_ivar = utils.inverse(
                utils.inverse(self.ivar) + utils.inverse(other.ivar))
            new_ivar[np.invert(outmask_comb)] = 0
        else:
            new_ivar = None

        # RN2
        if self.rn2img is not None and other.rn2img is not None:
            new_rn2 = self.rn2img + other.rn2img
        else:
            new_rn2 = None

        # Instantiate
        new_sciImg = PypeItImage(image=newimg,
                                 ivar=new_ivar,
                                 bpm=self.bpm,
                                 rn2img=new_rn2,
                                 detector=self.detector)
        # Files
        new_sciImg.files = self.files + other.files

        #TODO: KW properly handle adding the bits
        #crmask_diff = new_sciImg.build_crmask(par) if par['mask_cr'] else np.zeros_like(other.image, dtype=bool)
        # crmask_eff assumes evertything masked in the outmask_comb is a CR in the individual images
        # JFH changed to below because this was not respecting the desire not to mask_crs
        new_sciImg.crmask = (
            new_sciImg.build_crmask(par) | np.logical_not(outmask_comb)
        ) if par['mask_cr'] else np.logical_not(outmask_comb)
        #new_sciImg.crmask = crmask_diff | np.logical_not(outmask_comb)
        # Note that the following uses the saturation and mincounts held in self.detector
        new_sciImg.build_mask()

        return new_sciImg
コード例 #3
0
ファイル: test_utils.py プロジェクト: tbowers7/PypeIt
def test_calc_ivar():
    """ Run the parameter setup script
    """
    x = np.array([-1.0, -0.1, 0.0, 0.1, 1.0])
    res = utils.inverse(x)
    assert np.array_equal(res, np.array([0.0, 0.0, 0.0, 10.0, 1.0]))
    assert np.array_equal(utils.calc_ivar(res), np.array([0.0, 0.0, 0.0, 0.1, 1.0]))
コード例 #4
0
    def build_ivar(self):
        """
        Generate the Inverse Variance frame

        Uses procimg.variance_frame

        Returns:
            np.ndarray: Copy of self.ivar

        """
        msgs.info(
            "Generating raw variance frame (from detected counts [flat fielded])"
        )
        # Convenience
        detector = self.spectrograph.detector[self.det - 1]
        # Generate
        rawvarframe = procimg.variance_frame(
            self.datasec_img,
            self.image,
            detector['gain'],
            detector['ronoise'],
            numamplifiers=detector['numamplifiers'],
            darkcurr=detector['darkcurr'],
            exptime=self.exptime,
            rnoise=self.rn2img)
        # Ivar
        self.ivar = utils.inverse(rawvarframe)
        # Return
        return self.ivar.copy()
コード例 #5
0
ファイル: pypeitimage.py プロジェクト: ninoc/PypeIt
    def build_crmask(self, par, subtract_img=None):
        """
        Generate the CR mask frame

        Mainly a wrapper to :func:`pypeit.core.procimg.lacosmic`

        Args:
            par (:class:`pypeit.par.pypeitpar.ProcessImagesPar`):
                Parameters that dictate the processing of the images.  See
                :class:`pypeit.par.pypeitpar.ProcessImagesPar` for the
                defaults.
            subtract_img (`numpy.ndarray`_, optional):
                If provided, subtract this from the image prior to CR detection

        Returns:
            `numpy.ndarray`_: Copy of self.crmask (boolean)

        """
        var = utils.inverse(self.ivar)
        use_img = self.image if subtract_img is None else self.image - subtract_img
        # Run LA Cosmic to get the cosmic ray mask
        self.crmask = procimg.lacosmic(use_img,
                                       self.detector['saturation'],
                                       self.detector['nonlinear'],
                                       varframe=var,
                                       maxiter=par['lamaxiter'],
                                       grow=par['grow'],
                                       remove_compact_obj=par['rmcompact'],
                                       sigclip=par['sigclip'],
                                       sigfrac=par['sigfrac'],
                                       objlim=par['objlim'])
        # Return
        return self.crmask.copy()
コード例 #6
0
def test_lacosmic():
    spec = load_spectrograph('keck_deimos')
    file = os.path.join(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_deimos',
                        '1200G_M_5500', 'd0315_45929.fits')
    par = ProcessImagesPar(use_biasimage=False,
                           use_pixelflat=False,
                           use_illumflat=False)
    img = RawImage(file, spec, 1)
    pimg = img.process(par)
    test_img = pimg.image[:500, :500]
    test_var = utils.inverse(pimg.ivar[:500, :500])
    crmask = procimg.lacosmic(test_img, varframe=test_var, maxiter=1)
    assert np.sum(crmask) == 1240, 'L.A.Cosmic changed'

    _crmask = procimg.lacosmic(test_img, varframe=test_var, maxiter=2)
    assert np.sum(_crmask) > np.sum(
        crmask), '2nd iteration should find more cosmics'

    _crmask = procimg.lacosmic(test_img,
                               saturation=6000.,
                               varframe=test_var,
                               maxiter=1)
    assert np.sum(_crmask) < np.sum(
        crmask), 'Should have flagged some pixels as saturated'

    __crmask = procimg.lacosmic(test_img,
                                saturation=np.full(test_img.shape, 6000.),
                                varframe=test_var,
                                maxiter=1)
    assert np.array_equal(__crmask, _crmask), 'Saturation array failed.'
コード例 #7
0
ファイル: onespec.py プロジェクト: tbowers7/PypeIt
    def sig(self):
        """ Return the 1-sigma array

        Returns:
            `numpy.ndarray`_: error array
        """
        return np.sqrt(utils.inverse(self.ivar))
コード例 #8
0
def dummy_spectrum(s2n=10., rstate=None, seed=1234, wave=None):
    """
    Parameters
    ----------
    s2n
    seed
    wave

    Returns
    -------
    spec : XSpectrum1D

    """
    if rstate is None:
        rstate=np.random.RandomState(seed)
    if wave is None:
        wave = np.linspace(4000., 5000., 2000)
    # Create
    flux = np.ones_like(wave)
    sig = np.ones_like(wave) / s2n
    ispec = XSpectrum1D.from_tuple((wave,flux,sig))
    # Noise and append
    spec = ispec.add_noise(rstate=rstate)
    flux, sig, mask = spec.data['flux'], spec.data['sig'], spec.data['flux'].mask
    ivar = utils.inverse(sig**2)
    return flux, ivar, mask
コード例 #9
0
    def save(self, coaddfile, telluric=None, obj_model=None, overwrite=True):
        """
        Routine to save 1d coadds to a fits file. This replaces save.save_coadd1d_to_fits

        Args:
            coaddfile (str):
               File to outuput coadded spectrum to.
            telluric (str):
               This is vestigial and should probably be removed.
            obj_model (str):
               This is vestigial and should probably be removed
            overwrite (bool):
               Overwrite existing file?

        """

        self.coaddfile = coaddfile
        ex_value = self.par['ex_value']
        # Estimate sigma from ivar
        sig = np.sqrt(utils.inverse(self.ivar_coadd))
        if (os.path.exists(self.coaddfile)) and (np.invert(overwrite)):
            hdulist = fits.open(self.coaddfile)
            msgs.info("Reading primary HDU from existing file: {:s}".format(self.coaddfile))
        else:
            msgs.info("Creating a new primary HDU.")
            prihdu = fits.PrimaryHDU()
            if self.header is None:
                msgs.warn('The primary header is none')
            else:
                prihdu.header = self.header
            hdulist = fits.HDUList([prihdu])

        wave_mask = self.wave_coadd > 1.0
        # Add Spectrum Table
        cols = []
        cols += [fits.Column(array=self.wave_coadd[wave_mask], name='{:}_WAVE'.format(ex_value), format='D')]
        cols += [fits.Column(array=self.flux_coadd[wave_mask], name='{:}_FLAM'.format(ex_value), format='D')]
        cols += [fits.Column(array=self.ivar_coadd[wave_mask], name='{:}_FLAM_IVAR'.format(ex_value), format='D')]
        cols += [fits.Column(array=sig[wave_mask], name='{:}_FLAM_SIG'.format(ex_value), format='D')]
        cols += [fits.Column(array=self.mask_coadd[wave_mask].astype(float), name='{:}_MASK'.format(ex_value), format='D')]
        if telluric is not None:
            cols += [fits.Column(array=telluric[wave_mask], name='TELLURIC', format='D')]
        if obj_model is not None:
            cols += [fits.Column(array=obj_model[wave_mask], name='OBJ_MODEL', format='D')]

        coldefs = fits.ColDefs(cols)
        tbhdu = fits.BinTableHDU.from_columns(coldefs)
        tbhdu.name = 'OBJ0001-SPEC0001-{:}'.format(ex_value.capitalize())
        hdulist.append(tbhdu)

        if (os.path.exists(self.coaddfile)) and (np.invert(overwrite)):
            hdulist.writeto(self.coaddfile, overwrite=True)
            msgs.info("Appending 1D spectra to existing file {:s}".format(self.coaddfile))
        else:
            hdulist.writeto(self.coaddfile, overwrite=overwrite)
            msgs.info("Wrote 1D spectra to {:s}".format(self.coaddfile))
コード例 #10
0
ファイル: sensfunc.py プロジェクト: tbowers7/PypeIt
    def sensfunc_weights(cls, sensfile, waves, debug=False, extrap_sens=True):
        """
        Get the weights based on the sensfunc

        Args:
            sensfile (str):
                the name of your fits format sensfile
            waves (ndarray): (nspec, norders, nexp) or (nspec, norders)
                wavelength grid for your output weights
            debug (bool): default=False
                show the weights QA

        Returns:
            ndarray: sensfunc weights evaluated on the input waves
            wavelength grid
        """
        sens = cls.from_file(sensfile)
        #    wave, zeropoint, meta_table, out_table, header_sens = sensfunc.SensFunc.load(sensfile)

        if waves.ndim == 2:
            nspec, norder = waves.shape
            nexp = 1
            waves_stack = np.reshape(waves, (nspec, norder, 1))
        elif waves.ndim == 3:
            nspec, norder, nexp = waves.shape
            waves_stack = waves
        else:
            msgs.error('Unrecognized dimensionality for waves')

        weights_stack = np.zeros_like(waves_stack)

        if norder != sens.zeropoint.shape[1]:
            msgs.error(
                'The number of orders in {:} does not agree with your data. Wrong sensfile?'
                .format(sensfile))

        for iord in range(norder):
            for iexp in range(nexp):
                sensfunc_iord = flux_calib.get_sensfunc_factor(
                    waves_stack[:, iord, iexp],
                    sens.wave[:, iord],
                    sens.zeropoint[:, iord],
                    1.0,
                    extrap_sens=extrap_sens)
                weights_stack[:, iord, iexp] = utils.inverse(sensfunc_iord)

        if debug:
            weights_qa(waves_stack,
                       weights_stack, (waves_stack > 1.0),
                       title='sensfunc_weights')

        if waves.ndim == 2:
            weights_stack = np.reshape(weights_stack, (nspec, norder))

        return weights_stack
コード例 #11
0
    def to_xspec1d(self, **kwargs):
        """
        Push the data in :class:`SpecObj` into an XSpectrum1D object


        Returns:
            linetools.spectra.xspectrum1d.XSpectrum1D:  Spectrum object

        """
        wave, flux, ivar, _ = self.to_arrays(**kwargs)
        sig = np.sqrt(utils.inverse(ivar))
        # Create
        return xspectrum1d.XSpectrum1D.from_tuple((wave, flux, sig))
コード例 #12
0
ファイル: show_1dspec.py プロジェクト: YoemanLeung/PypeIt
def main(args):
    """ Runs the XSpecGui on an input file
    """

    try:
        sobjs = specobjs.SpecObjs.from_fitsfile(args.file)
    except:
        # place holder until coadd data model is sorted out
        wave, flux, flux_ivar, flux_mask, meta_spec, head = general_spec_reader(
            args.file)
        spec = XSpectrum1D.from_tuple(
            (wave * u.AA, flux, np.sqrt(utils.inverse(flux_ivar))),
            masking='none')
    else:
        # List only?
        if args.list:
            print("Showing object names for input file...")
            for ii in range(len(sobjs)):
                name = sobjs[ii].NAME
                print("EXT{:07d} = {}".format(ii + 1, name))
            return

        if args.obj is not None:
            exten = sobjs.name.index(args.obj)
            if exten < 0:
                msgs.error("Bad input object name: {:s}".format(args.obj))
        else:
            exten = args.exten - 1  # 1-index in FITS file

        # Check Extraction
        if args.extract == 'OPT':
            if 'OPT_WAVE' not in sobjs[exten]._data.keys():
                msgs.error(
                    "Spectrum not extracted with OPT.  Try --extract=BOX")

        spec = sobjs[exten].to_xspec1d(extraction=args.extract,
                                       fluxed=args.flux)

    # Setup
    app = QApplication(sys.argv)
    # Screen dimensions
    width = app.desktop().screenGeometry().width()
    scale = 2. * (width / 3200.)

    # Launch
    gui = XSpecGui(spec, screen_scale=scale)
    gui.show()
    app.exec_()
コード例 #13
0
    def build_crmask(self, subtract_img=None):
        """
        Call to ImageMask.build_crmask which will
        generate the cosmic ray mask

        Args:
            subtract_img (np.ndarray, optional):
                Image to be subtracted off of self.image prior to CR evaluation

        Returns:
            np.ndarray: Boolean array of self.crmask

        """
        return super(ScienceImage, self).build_crmask(self.spectrograph, self.det,
                                                      self.par, self.image,
                                                      utils.inverse(self.ivar),
                                                      subtract_img=subtract_img).copy()
コード例 #14
0
    def update_mask_cr(self, subtract_img=None):
        """
        Updates the CR mask values in self.mask
        through a call to ImageMask.build_crmask which
        generates a new CR mask and then a call to
        ImageMask.update_mask_cr() which updates self.mask

        Args:
            subtract_img (np.ndarray, optional):
                If provided, this is subtracted from self.image prior to
                CR masking
        """
        # Generate the CR mask (and save in self.crmask)
        super(ScienceImage, self).build_crmask(self.spectrograph, self.det,
                                               self.par, self.image,
                                               utils.inverse(self.ivar),
                                               subtract_img=subtract_img).copy()
        # Now update the mask
        super(ScienceImage, self).update_mask_cr(self.crmask)
コード例 #15
0
    def build_ivar(self):
        """
        Generate the Inverse Variance frame

        Uses procimg.variance_frame

        Returns:
            `numpy.ndarray`_: Copy of self.ivar

        """
        # Generate
        rawvarframe = procimg.variance_frame(
            self.datasec_img,
            self.image,
            self.detector['gain'],
            self.detector['ronoise'],
            darkcurr=self.detector['darkcurr'],
            exptime=self.exptime,
            rnoise=self.rn2img)
        # Ivar
        self.ivar = utils.inverse(rawvarframe)
        # Return
        return self.ivar.copy()
コード例 #16
0
ファイル: chk_noise_2dspec.py プロジェクト: tbowers7/PypeIt
def get_flux_slit(spec2DObj, slitidx, pad=0):
    """
    Returns the flux and error of a specific slit.
    The flux would be sky subtracted and object removed.

    Args:
        spec2DObj (:class:`~pypeit.spec2dobj.Spec2DObj`): 2D spectra object
        slitidx (int): Given slit/order
        pad (int, optional):  Ignore pixels within pad of edges.

    Returns:
        :obj:`tuple`: tuple of `numpy.ndarray`_ with flux and error of the 2D spectrum

    """
    slit_select = spec2DObj.slits.slit_img(pad=pad, slitidx=slitidx)

    flux = spec2DObj.sciimg - spec2DObj.skymodel
    if spec2DObj.objmodel is not None:
        flux -= spec2DObj.objmodel
    flux_slit = flux * (slit_select == spec2DObj.slits.spat_id[slitidx])
    # Error
    err_slit = np.sqrt(utils.inverse(spec2DObj.ivarmodel)) * (
        slit_select == spec2DObj.slits.spat_id[slitidx])
    return flux_slit, err_slit
コード例 #17
0
    def run(self,
            process_steps,
            bias,
            pixel_flat=None,
            illum_flat=None,
            ignore_saturation=False,
            sigma_clip=True,
            bpm=None,
            sigrej=None,
            maxiters=5):
        """
        Generate a PypeItImage from a list of images

        Mainly a wrapper to coadd2d.weighted_combine()

        This may also generate the ivar, crmask, rn2img and mask

        Args:
            process_steps (list):
            bias (np.ndarray or None):
                Bias image or instruction
            pixel_flat (np.ndarray, optional):
                Flat image
            illum_flat (np.ndarray, optional):
                Illumination image
            sigma_clip (bool, optional):
                Perform sigma clipping
            sigrej (int or float, optional): Rejection threshold for sigma clipping.
                 Code defaults to determining this automatically based on the number of images provided.
            maxiters (int, optional):
                Number of iterations for the clipping
            bpm (np.ndarray, optional):
                Bad pixel mask.  Held in ImageMask
            ignore_saturation (bool, optional):
                If True, turn off the saturation flag in the individual images before stacking
                This avoids having such values set to 0 which for certain images (e.g. flat calibrations)
                can have unintended consequences.

        Returns:
            :class:`pypeit.images.pypeitimage.PypeItImage`:

        """
        # Loop on the files
        nimages = len(self.files)
        for kk, ifile in enumerate(self.files):
            # Process a single image
            pypeitImage = self.process_one(ifile,
                                           process_steps,
                                           bias,
                                           pixel_flat=pixel_flat,
                                           illum_flat=illum_flat,
                                           bpm=bpm)
            # Are we all done?
            if len(self.files) == 1:
                return pypeitImage
            elif kk == 0:
                # Get ready
                shape = (nimages, pypeitImage.bpm.shape[0],
                         pypeitImage.bpm.shape[1])
                img_stack = np.zeros(shape)
                ivar_stack = np.zeros(shape)
                rn2img_stack = np.zeros(shape)
                crmask_stack = np.zeros(shape, dtype=bool)
                # Mask
                bitmask = maskimage.ImageBitMask()
                mask_stack = np.zeros(shape,
                                      bitmask.minimum_dtype(asuint=True))
            # Process
            img_stack[kk, :, :] = pypeitImage.image
            # Construct raw variance image and turn into inverse variance
            if pypeitImage.ivar is not None:
                ivar_stack[kk, :, :] = pypeitImage.ivar
            else:
                ivar_stack[kk, :, :] = 1.
            # Mask cosmic rays
            if pypeitImage.crmask is not None:
                crmask_stack[kk, :, :] = pypeitImage.crmask
            # Read noise squared image
            if pypeitImage.rn2img is not None:
                rn2img_stack[kk, :, :] = pypeitImage.rn2img
            # Final mask for this image
            # TODO This seems kludgy to me. Why not just pass ignore_saturation to process_one and ignore the saturation
            # when the mask is actually built, rather than untoggling the bit here
            if ignore_saturation:  # Important for calibrations as we don't want replacement by 0
                indx = pypeitImage.bitmask.flagged(pypeitImage.mask,
                                                   flag=['SATURATION'])
                pypeitImage.mask[indx] = pypeitImage.bitmask.turn_off(
                    pypeitImage.mask[indx], 'SATURATION')
            mask_stack[kk, :, :] = pypeitImage.mask

        # Coadd them
        weights = np.ones(nimages) / float(nimages)
        img_list = [img_stack]
        var_stack = utils.inverse(ivar_stack)
        var_list = [var_stack, rn2img_stack]
        img_list_out, var_list_out, outmask, nused = combine.weighted_combine(
            weights,
            img_list,
            var_list, (mask_stack == 0),
            sigma_clip=sigma_clip,
            sigma_clip_stack=img_stack,
            sigrej=sigrej,
            maxiters=maxiters)

        # Build the last one
        final_pypeitImage = pypeitimage.PypeItImage(
            img_list_out[0],
            ivar=utils.inverse(var_list_out[0]),
            bpm=pypeitImage.bpm,
            rn2img=var_list_out[1],
            crmask=np.invert(outmask),
            binning=pypeitImage.binning)
        nonlinear_counts = self.spectrograph.nonlinear_counts(
            self.det, apply_gain='apply_gain' in process_steps)
        final_pypeitImage.build_mask(
            final_pypeitImage.image,
            final_pypeitImage.ivar,
            saturation=
            nonlinear_counts,  #self.spectrograph.detector[self.det-1]['saturation'],
            mincounts=self.spectrograph.detector[self.det - 1]['mincounts'])
        # Return
        return final_pypeitImage
コード例 #18
0
ファイル: extract.py プロジェクト: mcoughlin/PypeIt
def pca_trace(xinit_in,
              spec_min_max=None,
              predict=None,
              npca=None,
              pca_explained_var=99.0,
              coeff_npoly=None,
              coeff_weights=None,
              debug=True,
              order_vec=None,
              lower=3.0,
              upper=3.0,
              minv=None,
              maxv=None,
              maxrej=1,
              xinit_mean=None):
    """
    Use a PCA model to determine the best object (or slit edge) traces for echelle spectrographs.

    Args:
      xinit:  ndarray, (nspec, norders)
         Array of input traces that one wants to PCA model. For object finding this will be the traces for orders where
         an object was detected. If an object was not detected on some orders (see ech_objfind), the standard star
         (or order boundaries)  will be  assigned to these orders at the correct fractional slit position, and a joint PCA
         fit will be performed to the detected traces and the standard/slit traces.

    spec_min_max: float or int ndarray, (2, norders), default=None.
         This is a 2-d array which defines the minimum and maximum of each order in the
         spectral direction on the detector. This should only be used for echelle spectrographs for which the orders do not
         entirely cover the detector, and each order passed in for xinit_in is a succession of orders on the detector.
         The code will re-map the traces such that they all have the same length, compute the PCA, and then re-map the orders
         back. This improves performanc for echelle spectrographs by removing the nonlinear shrinking of the orders so that
         the linear pca operation can better predict the traces. THIS IS AN EXPERIMENTAL FEATURE. INITIAL TESTS WITH
         XSHOOTER-NIR INDICATED THAT IT DID NOT IMPROVE PERFORMANCE AND SIMPLY LINEAR EXTRAPOLATION OF THE ORDERS INTO THE
         REGIONS THAT ARE NOT ILLUMINATED PERFORMED SIGNIFICANTLY BETTER. DO NOT USE UNTIL FURTHER TESTING IS PERFORMED. IT
         COULD HELP WITH OTHER MORE NONLINEAR SPECTROGRAPHS.
    predict: ndarray, bool (norders,), default = None
         Orders which have True are those that will be predicted by extrapolating the fit of the PCA coefficents for those
         orders which have False set in this array. The default is None, which means that the coefficients of all orders
         will be fit simultaneously and no extrapolation will be performed. For object finding, we use the standard star
         (or slit boundaries) as the input for orders for which a trace is not identified and fit the coefficients of all
         simultaneously. Thus no extrapolation is performed. For tracing slit boundaries it may be useful to perform
          extrapolations.
    npca: int, default = None
         number of PCA components to be kept. The maximum number of possible PCA components would be = norders, which is to say
         that no PCA compression woulud be performed. For the default of None, npca will be automatically determinedy by
         calculating the minimum number of components required to explain 99% (pca_explained_var) of the variance in the different orders.
    pca_explained_var: float, default = 99
         Amount of explained variance cut used to determine where to truncate the PCA, i.e. to determine npca.
    coeff_npoly: int, default = None
         Order of polynomial fits used for PCA coefficients fitting. The defualt is None, which means that coeff_noly
         will be automatically determined by taking the number of orders into account. PCA components that explain
         less variance (and are thus much noiser) are fit with lower order.
    coeff_weights (np.ndarray): shape = (norders,), default=None
         If input these weights will be used for the polynomial fit to the PCA coefficients. Even if you are predicting
         orders and hence only fitting a subset of the orders != norders, the shape of coeff_weights must be norders.
         Just give the orders you don't plan to fit a weight of zero. This option is useful for fitting object
         traces since the weights can be set to (S/N)^2 of each order.
         TODO: Perhaps we should get rid of the predict option and simply allow the user to set the weights of the orders
         they want predicted to be zero. That would be more straightforward, but would require a rework of the code.

    debug: bool, default = False
         Show plots useful for debugging.

    Returns:
    --------
    pca_fit:  ndarray, float (nspec, norders)
        Array with the same size as xinit, which contains the pca fitted orders.
    """

    nspec, norders = xinit_in.shape

    if order_vec is None:
        order_vec = np.arange(norders, dtype=float)

    if predict is None:
        predict = np.zeros(norders, dtype=bool)

    # use_order = True orders used to predict the predict = True bad orders
    use_order = np.invert(predict)
    ngood = np.sum(use_order)

    if ngood < 2:
        msgs.warn(
            'There are no good traces to PCA fit. There is probably a bug somewhere. Exiting and returning input traces.'
        )
        return xinit_in, {}, None, None

    if spec_min_max is not None:
        xinit = remap_orders(xinit_in, spec_min_max)
    else:
        xinit = xinit_in

    # Take out the mean position of each input trace
    if xinit_mean is None:
        xinit_mean = np.mean(xinit, axis=0)

    xpca = xinit - xinit_mean
    xpca_use = xpca[:, use_order].T
    pca_full = PCA()
    pca_full.fit(xpca_use)
    var = np.cumsum(
        np.round(pca_full.explained_variance_ratio_, decimals=6) * 100)
    npca_full = var.size
    if npca is None:
        if var[0] >= pca_explained_var:
            npca = 1
            msgs.info(
                'The first PCA component contains more than {:5.3f} of the information'
                .format(pca_explained_var))
        else:
            npca = int(
                np.ceil(
                    np.interp(pca_explained_var, var,
                              np.arange(npca_full) + 1)))
            msgs.info(
                'Truncated PCA to contain {:5.3f}'.format(pca_explained_var) +
                '% of the total variance. ' +
                'Number of components to keep is npca = {:d}'.format(npca))
    else:
        npca = int(npca)
        var_trunc = np.interp(float(npca), np.arange(npca_full) + 1.0, var)
        msgs.info('Truncated PCA with npca={:d} components contains {:5.3f}'.
                  format(npca, var_trunc) + '% of the total variance.')

    if npca_full < npca:
        msgs.warn(
            'Not enough good traces for a PCA fit of the requested dimensionality. The full (non-compressing) PCA has size: '
            'npca_full = {:d}'.format(npca_full) +
            ' is < npca = {:d}'.format(npca))
        msgs.warn(
            'Using the input trace for now. But you should lower npca <= npca_full'
        )
        return xinit_in, {}, None, None

    if coeff_npoly is None:
        coeff_npoly = int(
            np.fmin(np.fmax(np.floor(3.3 * ngood / norders), 1.0), 3.0))

    # Polynomial coefficient for PCA coefficients
    npoly_vec = np.zeros(npca, dtype=int)
    # Fit first pca dimension (with largest variance) with a higher order npoly depending on number of good orders.
    # Fit all higher dimensions (with lower variance) with a line
    # Cascade down and use lower order polynomial for PCA directions that contain less variance
    for ipoly in range(npca):
        npoly_vec[ipoly] = np.fmax(coeff_npoly - ipoly, 1)

        pca = PCA(n_components=npca)
        pca_coeffs_use = pca.fit_transform(xpca_use)
        pca_vectors = pca.components_

    pca_coeffs_new = np.zeros((norders, npca))
    fit_dict = {}
    # Now loop over the dimensionality of the compression and perform a polynomial fit to
    for idim in range(npca):
        # Only fit the use_order orders, then use this to predict the others
        xfit = order_vec[use_order]
        yfit = pca_coeffs_use[:, idim]
        ncoeff = npoly_vec[idim]
        # Apply a 10% relative error to each coefficient. This performs better than use_mad, since larger coefficients
        # will always be considered inliers, if the coefficients vary rapidly with order as they sometimes do.
        sigma = np.fmax(0.1 * np.abs(yfit), 0.1)
        invvar = utils.inverse(sigma**2)
        use_weights = coeff_weights[
            use_order] if coeff_weights is not None else None
        # TODO Note that we are doing a weighted fit using the coeff_weights, but the rejection is still done
        # usnig the ad-hoc invvar created in the line above. I cannot think of a better way.
        msk_new, poly_out = utils.robust_polyfit_djs(xfit,
                                                     yfit,
                                                     ncoeff,
                                                     invvar=invvar,
                                                     weights=use_weights,
                                                     function='polynomial',
                                                     maxiter=25,
                                                     lower=lower,
                                                     upper=upper,
                                                     maxrej=maxrej,
                                                     sticky=False,
                                                     use_mad=False,
                                                     minx=minv,
                                                     maxx=maxv)
        # ToDO robust_poly_fit needs to return minv and maxv as outputs for the fits to be usable downstream
        pca_coeffs_new[:, idim] = utils.func_val(poly_out, order_vec,
                                                 'polynomial')
        fit_dict[str(idim)] = {}
        fit_dict[str(idim)]['coeffs'] = poly_out
        fit_dict[str(idim)]['minv'] = minv
        fit_dict[str(idim)]['maxv'] = maxv
        if debug:
            # Evaluate the fit
            xvec = np.linspace(order_vec.min(), order_vec.max(), num=100)
            robust_mask_new = msk_new == 1
            plt.plot(xfit,
                     yfit,
                     'ko',
                     mfc='None',
                     markersize=8.0,
                     label='pca coeff')
            plt.plot(xfit[~robust_mask_new],
                     yfit[~robust_mask_new],
                     'r+',
                     markersize=20.0,
                     label='robust_polyfit_djs rejected')
            plt.plot(xvec,
                     utils.func_val(poly_out, xvec, 'polynomial'),
                     ls='-.',
                     color='steelblue',
                     label='Polynomial fit of order={:d}'.format(ncoeff))
            plt.xlabel('Order Number', fontsize=14)
            plt.ylabel('PCA Coefficient', fontsize=14)
            plt.title('PCA Fit for Dimension #{:d}/{:d}'.format(
                idim + 1, npca))
            plt.legend()
            plt.show()

    pca_model = np.outer(pca.mean_, np.ones(norders)) + (np.dot(
        pca_coeffs_new, pca_vectors)).T
    #   pca_model_mean = np.mean(pca_model,0)
    #   pca_fit = np.outer(np.ones(nspec), xinit_mean) + (pca_model - pca_model_mean)
    #   JFH which is correct?
    pca_fit = np.outer(np.ones(nspec), xinit_mean) + (pca_model)

    if spec_min_max is not None:
        pca_out = remap_orders(pca_fit, spec_min_max, inverse=True)
    else:
        pca_out = pca_fit

    return pca_out, fit_dict, pca.mean_, pca_vectors
コード例 #19
0
    def process(self,
                process_steps,
                pixel_flat=None,
                illum_flat=None,
                bias=None):
        """
        Process the image

        Note:  The processing step order is currently 'frozen' as is.
          We may choose to allow optional ordering

        Here are the allowed steps, in the order they will be applied:
            subtract_overscan -- Analyze the overscan region and subtract from the image
            trim -- Trim the image down to the data (i.e. remove the overscan)
            orient -- Orient the image in the PypeIt orientation (spec, spat) with blue to red going down to up
            subtract_bias -- Subtract a bias image
            apply_gain -- Convert to counts, amp by amp
            flatten -- Divide by the pixel flat and (if provided) the illumination flat
            extras -- Generate the RN2 and IVAR images
            crmask -- Generate a CR mask

        Args:
            process_steps (list):
                List of processing steps
            pixel_flat (np.ndarray, optional):
                Pixel flat image
            illum_flat (np.ndarray, optional):
                Illumination flat
            bias (np.ndarray, optional):
                Bias image
            bpm (np.ndarray, optional):
                Bad pixel mask image

        Returns:
            :class:`pypeit.images.pypeitimage.PypeItImage`:

        """
        # For error checking
        steps_copy = process_steps.copy()
        # Get started
        # Standard order
        #   -- May need to allow for other order some day..
        if 'subtract_overscan' in process_steps:
            self.subtract_overscan()
            steps_copy.remove('subtract_overscan')
        if 'trim' in process_steps:
            self.trim()
            steps_copy.remove('trim')
        if 'orient' in process_steps:
            self.orient()
            steps_copy.remove('orient')
        if 'subtract_bias' in process_steps:  # Bias frame, if it exists, is trimmed and oriented
            self.subtract_bias(bias)
            steps_copy.remove('subtract_bias')
        if 'apply_gain' in process_steps:
            self.apply_gain()
            steps_copy.remove('apply_gain')
        # Flat field
        if 'flatten' in process_steps:
            self.flatten(pixel_flat, illum_flat=illum_flat, bpm=self.bpm)
            steps_copy.remove('flatten')

        # Fresh BPM
        bpm = self.spectrograph.bpm(self.filename,
                                    self.det,
                                    shape=self.image.shape)

        # Extras
        if 'extras' in process_steps:
            self.build_rn2img()
            self.build_ivar()
            steps_copy.remove('extras')

        # Generate a PypeItImage
        pypeitImage = pypeitimage.PypeItImage(self.image,
                                              binning=self.binning,
                                              ivar=self.ivar,
                                              rn2img=self.rn2img,
                                              bpm=bpm)
        # Mask(s)
        if 'crmask' in process_steps:
            if 'extras' in process_steps:
                var = utils.inverse(pypeitImage.ivar)
            else:
                var = np.ones_like(pypeitImage.image)
            #
            pypeitImage.build_crmask(self.spectrograph, self.det, self.par,
                                     pypeitImage.image, var)
            steps_copy.remove('crmask')
        nonlinear_counts = self.spectrograph.nonlinear_counts(
            self.det, apply_gain='apply_gain' in process_steps)
        pypeitImage.build_mask(
            pypeitImage.image,
            pypeitImage.ivar,
            saturation=
            nonlinear_counts,  #self.spectrograph.detector[self.det-1]['saturation'],
            mincounts=self.spectrograph.detector[self.det - 1]['mincounts'])
        # Error checking
        assert len(steps_copy) == 0

        # Return
        return pypeitImage
コード例 #20
0
ファイル: combineimage.py プロジェクト: tbowers7/PypeIt
    def run(self, bias=None, flatimages=None, ignore_saturation=False, sigma_clip=True,
            bpm=None, sigrej=None, maxiters=5, slits=None, dark=None, combine_method='mean',
            mosaic=False):
        r"""
        Process and combine all images.

        All processing is performed by the
        :class:`~pypeit.images.rawimage.RawImage` class; see 
        :func:`~pypeit.images.rawimage.RawImage.process`.

        If there is only one file (see :attr:`files`), this simply processes the
        file and returns the result.
        
        If there are multiple files, all the files are processed and the
        processed images are combined based on the ``combine_method``, where the
        options are:

            - 'mean': If ``sigma_clip`` is True, this is a sigma-clipped mean;
              otherwise, this is a simple average.  The combination is done
              using :func:`~pypeit.core.combine.weighted_combine`.

            - 'median': This is a simple masked median (using
              `numpy.ma.median`_).

        The errors in the image are also propagated through the stacking
        procedure; however, this isn't a simple propagation of the inverse
        variance arrays.  The image processing produces arrays with individual
        components used to construct the variance model for an individual frame.
        See :ref:`image_proc` and :func:`~pypeit.procimg.variance_model` for a
        description of these arrays.  Briefly, the relevant arrays are the
        readnoise variance (:math:`V_{\rm rn}`), the "processing" variance
        (:math:`V_{\rm proc}`), and the image scaling (i.e., the flat-field
        correction) (:math:`s`).  The variance calculation for the stacked image
        directly propagates the error in these.  For example, the propagated
        processing variance (modulo the masking) is:

        .. math::

            V_{\rm proc,stack} = \frac{\sum_i s_i^2 V_{{\rm
            proc},i}}\frac{s_{\rm stack}^2}

        where :math:`s_{\rm stack}` is the combined image scaling array,
        combined in the same way as the image data are combined.  This ensures
        that the reconstruction of the uncertainty in the combined image
        calculated using :func:`~pypeit.procimg.variance_model` accurately
        includes, e.g., the processing uncertainty.

        The uncertainty in the combined image, however, recalculates the
        variance model, using the combined image (which should have less noise)
        to set the Poisson statistics.  The same parameters used when processing
        the individual frames are applied to the combined frame; see
        :func:`~pypeit.images.rawimage.RawImage.build_ivar`.  This calculation
        is then the equivalent of when the observed counts are replaced by the
        model object and sky counts during sky subtraction and spectral
        extraction.

        Bitmasks from individual frames in the stack are *not* propagated to the
        combined image, except to indicate when a pixel was masked for all
        images in the stack (cf., ``ignore_saturation``).  Additionally, the
        instrument-specific bad-pixel mask, see the
        :func:`~pypeit.spectrographs.spectrograph.Spectrograph.bpm` method for
        each instrument subclass, saturated-pixel mask, and other default mask
        bits (e.g., NaN and non-positive inverse variance values) are all
        propagated to the combined-image mask; see
        :func:`~pypeit.images.pypeitimage.PypeItImage.build_mask`.
        
        .. warning::

            All image processing of the data in :attr:`files` *must* result
            in images of the same shape.

        Args:
            bias (:class:`~pypeit.images.buildimage.BiasImage`, optional):
                Bias image for bias subtraction; passed directly to
                :func:`~pypeit.images.rawimage.RawImage.process` for all images.
            flatimages (:class:`~pypeit.flatfield.FlatImages`, optional):
                Flat-field images for flat fielding; passed directly to
                :func:`~pypeit.images.rawimage.RawImage.process` for all images.
            ignore_saturation (:obj:`bool`, optional):
                If True, turn off the saturation flag in the individual images
                before stacking.  This avoids having such values set to 0, which
                for certain images (e.g. flat calibrations) can have unintended
                consequences.
            sigma_clip (:obj:`bool`, optional):
                When ``combine_method='mean'``, perform a sigma-clip the data;
                see :func:`~pypeit.core.combine.weighted_combine`.
            bpm (`numpy.ndarray`_, optional):
                Bad pixel mask; passed directly to
                :func:`~pypeit.images.rawimage.RawImage.process` for all images.
            sigrej (:obj:`float`, optional):
                When ``combine_method='mean'``, this sets the sigma-rejection
                thresholds used when sigma-clipping the image combination.
                Ignored if ``sigma_clip`` is False.  If None and ``sigma_clip``
                is True, the thresholds are determined automatically based on
                the number of images provided; see
                :func:`~pypeit.core.combine.weighted_combine``.
            maxiters (:obj:`int`, optional):
                When ``combine_method='mean'``) and sigma-clipping
                (``sigma_clip`` is True), this sets the maximum number of
                rejection iterations.  If None, rejection iterations continue
                until no more data are rejected; see
                :func:`~pypeit.core.combine.weighted_combine``.
            slits (:class:`~pypeit.slittrace.SlitTraceSet`, optional):
                Slit edge trace locations; passed directly to
                :func:`~pypeit.images.rawimage.RawImage.process` for all images.
            dark (:class:`~pypeit.images.buildimage.DarkImage`, optional):
                Dark-current image; passed directly to
                :func:`~pypeit.images.rawimage.RawImage.process` for all images.
            combine_method (:obj:`str`, optional):
                Method used to combine images.  Must be ``'mean'`` or
                ``'median'``; see above.
            mosaic (:obj:`bool`, optional):
                If multiple detectors are being processes, mosaic them into a
                single image.  See
                :func:`~pypeit.images.rawimage.RawImage.process`.

        Returns:
            :class:`~pypeit.images.pypeitimage.PypeItImage`: The combination of
            all the processed images.
        """
        # Check the input (i.e., bomb out *before* it does any processing)
        if self.nfiles == 0:
            msgs.error('Object contains no files to process!')
        if self.nfiles > 1 and combine_method not in ['mean', 'median']:
            msgs.error(f'Unknown image combination method, {combine_method}.  Must be '
                       '"mean" or "median".')

        # If not provided, generate the bpm for this spectrograph and detector.
        # Regardless of the file used, this must result in the same bpm, so we
        # just use the first one.
        # TODO: Why is this done here?  It's the same thing as what's done if
        # bpm is not passed to RawImage.process...
#        if bpm is None:
#            bpm = self.spectrograph.bpm(self.files[0], self.det)

        # Loop on the files
        for kk, ifile in enumerate(self.files):
            # Load raw image
            rawImage = rawimage.RawImage(ifile, self.spectrograph, self.det)
            # Process
            pypeitImage = rawImage.process(self.par, bias=bias, bpm=bpm, dark=dark,
                                           flatimages=flatimages, slits=slits, mosaic=mosaic)

            if self.nfiles == 1:
                # Only 1 file, so we're done
                pypeitImage.files = self.files
                return pypeitImage
            elif kk == 0:
                # Allocate arrays to collect data for each frame
                shape = (self.nfiles,) + pypeitImage.shape
                img_stack = np.zeros(shape, dtype=float)
                scl_stack = np.ones(shape, dtype=float)
                rn2img_stack = np.zeros(shape, dtype=float)
                basev_stack = np.zeros(shape, dtype=float)
                gpm_stack = np.zeros(shape, dtype=bool)
                lampstat = [None]*self.nfiles
                exptime = np.zeros(self.nfiles, dtype=float)

            # Save the lamp status
            lampstat[kk] = self.spectrograph.get_lamps_status(pypeitImage.rawheadlist)
            # Save the exposure time to check if it's consistent for all images.
            exptime[kk] = pypeitImage.exptime
            # Processed image
            img_stack[kk] = pypeitImage.image
            # Get the count scaling
            if pypeitImage.img_scale is not None:
                scl_stack[kk] = pypeitImage.img_scale
            # Read noise squared image
            if pypeitImage.rn2img is not None:
                rn2img_stack[kk] = pypeitImage.rn2img * scl_stack[kk]**2
            # Processing variance image
            if pypeitImage.base_var is not None:
                basev_stack[kk] = pypeitImage.base_var * scl_stack[kk]**2
            # Final mask for this image
            # TODO: This seems kludgy to me. Why not just pass ignore_saturation
            # to process_one and ignore the saturation when the mask is actually
            # built, rather than untoggling the bit here?
            if ignore_saturation:  # Important for calibrations as we don't want replacement by 0
                pypeitImage.update_mask('SATURATION', action='turn_off')
            # Get a simple boolean good-pixel mask for all the unmasked pixels
            gpm_stack[kk] = pypeitImage.select_flag(invert=True)

        # Check that the lamps being combined are all the same:
        if not lampstat[1:] == lampstat[:-1]:
            msgs.warn("The following files contain different lamp status")
            # Get the longest strings
            maxlen = max([len("Filename")]+[len(os.path.split(x)[1]) for x in self.files])
            maxlmp = max([len("Lamp status")]+[len(x) for x in lampstat])
            strout = "{0:" + str(maxlen) + "}  {1:s}"
            # Print the messages
            print(msgs.indent() + '-'*maxlen + "  " + '-'*maxlmp)
            print(msgs.indent() + strout.format("Filename", "Lamp status"))
            print(msgs.indent() + '-'*maxlen + "  " + '-'*maxlmp)
            for ff, file in enumerate(self.files):
                print(msgs.indent()
                      + strout.format(os.path.split(file)[1], " ".join(lampstat[ff].split("_"))))
            print(msgs.indent() + '-'*maxlen + "  " + '-'*maxlmp)

        # Do a similar check for exptime
        if np.any(np.absolute(np.diff(exptime)) > 0):
            # TODO: This should likely throw an error instead!
            msgs.warn('Exposure time is not consistent for all images being combined!  '
                      'Using the average.')
            comb_texp = np.mean(exptime)
        else:
            comb_texp = exptime[0]

        # Coadd them
        if combine_method == 'mean':
            weights = np.ones(self.nfiles, dtype=float)/self.nfiles
            img_list_out, var_list_out, gpm, nstack \
                    = combine.weighted_combine(weights,
                                               [img_stack, scl_stack],  # images to stack
                                               [rn2img_stack, basev_stack], # variances to stack
                                               gpm_stack, sigma_clip=sigma_clip,
                                               sigma_clip_stack=img_stack,  # clipping based on img
                                               sigrej=sigrej, maxiters=maxiters)
            comb_img, comb_scl = img_list_out
            comb_rn2, comb_basev = var_list_out
            comb_rn2[gpm] /= comb_scl[gpm]**2
            comb_basev[gpm] /= comb_scl[gpm]**2
        elif combine_method == 'median':
            bpm_stack = np.logical_not(gpm_stack)
            nstack = np.sum(gpm_stack, axis=0)
            gpm = nstack > 0
            comb_img = np.ma.median(np.ma.MaskedArray(img_stack, mask=bpm_stack),axis=0).filled(0.)
            # TODO: I'm not sure if this is right.  Maybe we should just take
            # the masked average scale instead?
            comb_scl = np.ma.median(np.ma.MaskedArray(scl_stack, mask=bpm_stack),axis=0).filled(0.)
            # First calculate the error in the sum.  The variance is set to 0
            # for pixels masked in all images.
            comb_rn2 = np.ma.sum(np.ma.MaskedArray(rn2img_stack, mask=bpm_stack),axis=0).filled(0.)
            comb_basev = np.ma.sum(np.ma.MaskedArray(basev_stack, mask=bpm_stack),axis=0).filled(0.)
            # Convert to standard error in the median (pi/2 factor relates standard variance
            # in mean (sum(variance_i)/n^2) to standard variance in median)
            comb_rn2[gpm] *= np.pi/2/nstack[gpm]**2/comb_scl[gpm]**2
            comb_basev[gpm] *= np.pi/2/nstack[gpm]**2/comb_scl[gpm]**2
        else:
            # NOTE: Given the check at the beginning of the function, the code
            # should *never* make it here.
            msgs.error("Bad choice for combine.  Allowed options are 'median', 'mean'.")

        # Recompute the inverse variance using the combined image
        comb_var = procimg.variance_model(comb_basev,
                                          counts=comb_img if self.par['shot_noise'] else None,
                                          count_scale=comb_scl,
                                          noise_floor=self.par['noise_floor'])

        # Build the combined image
        comb = pypeitimage.PypeItImage(image=comb_img, ivar=utils.inverse(comb_var), nimg=nstack,
                                       amp_img=pypeitImage.amp_img, det_img=pypeitImage.det_img,
                                       rn2img=comb_rn2, base_var=comb_basev, img_scale=comb_scl,
                                       bpm=np.logical_not(gpm).astype(np.uint8),
                                       # NOTE: The detector is needed here so
                                       # that we can get the dark current later.
                                       detector=pypeitImage.detector,
                                       PYP_SPEC=self.spectrograph.name,
                                       units='e-' if self.par['apply_gain'] else 'ADU',
                                       exptime=comb_texp, noise_floor=self.par['noise_floor'],
                                       shot_noise=self.par['shot_noise'])

        # Internals
        # TODO: Do we need these?
        comb.files = self.files
        comb.rawheadlist = pypeitImage.rawheadlist
        comb.process_steps = pypeitImage.process_steps

        # Build the base level mask
        comb.build_mask(saturation='default', mincounts='default')

        # Flag all pixels with no contributions from any of the stacked images.
        comb.update_mask('STCKMASK', indx=np.logical_not(gpm))

        # Return
        return comb
コード例 #21
0
    def proc_list(self,
                  ltype,
                  reject_cr=True,
                  sigma_clip=False,
                  sigrej=None,
                  maxiters=5):
        """
        Process a list of science images

        This includes stacking the images if there is more than 1

        Args:
            ltype (str): Type of images to process ('sci', 'bkg')
            reject_cr (bool, optional):
            sigrej (int or float, optional): Rejection threshold for sigma clipping.
                 Code defaults to determining this automatically based on the numberr of images provided.
            maxiters (int, optional):
            show (bool, optional):

        Returns:
            ndarray, ndarray, ndarray, ndarray, ndarray:
              sciimg
              sciivar
              rn2img
              mask
              crmask

        """
        # Init
        if ltype == 'sci':
            files = self.file_list
        elif ltype == 'bkg':
            files = self.bg_file_list
        else:
            msgs.error("Bad ltype for proc_list")
        nimg = len(files)
        weights = np.ones(nimg) / float(nimg)

        # Load
        img_stack, ivar_stack, rn2img_stack, crmask_stack, mask_stack = self.build_stack(
            files, self.sci_bpm, reject_cr=reject_cr)

        # ToDO The bitmask is not being properly propagated here!
        if nimg > 1:
            img_list = [img_stack]
            var_stack = utils.inverse(ivar_stack, positive=True)
            var_list = [var_stack, rn2img_stack]
            img_list_out, var_list_out, outmask, nused = coadd2d.weighted_combine(
                weights,
                img_list,
                var_list, (mask_stack == 0),
                sigma_clip=sigma_clip,
                sigma_clip_stack=img_stack,
                sigrej=sigrej,
                maxiters=maxiters)
            '''
            img = img_list_out[0]
            ivar = utils.calc_ivar(var_list_out[0])
            rn2img = var_list_out[1]
            '''
            sciImage = scienceimage.ScienceImage.from_images(
                self.spectrograph,
                self.det,
                self.par['process'],
                self.sci_bpm,
                img_list_out[0],
                utils.inverse(var_list_out[0], positive=True),
                var_list_out[1],
                np.invert(outmask),
                files=files)
            sciImage.build_mask(saturation=self.saturation,
                                mincounts=self.mincounts)
            '''
            # assumes everything masked in the outmask is a CR in the individual images
            crmask = np.invert(outmask)
            # Create a mask for this combined image
            #processImage = processimage.ProcessImage(None, self.spectrograph, self.det, self.proc_par)
            #processImage.image = img
            #processImage.rawvarframe = var_list_out[0]
            #processImage.crmask = crmask
            #mask = procimg.build_mask(bpm=self.sci_bpm, saturation=self.saturation, mincounts=self.mincounts)
            mask = procimg.build_mask(self.bitmask, img, ivar, self.sci_bpm, crmask,
                                           saturation=self.saturation, mincounts=self.mincounts)
            '''
        else:
            mask = mask_stack[0, :, :]
            crmask = crmask_stack[0, :, :]
            img = img_stack[0, :, :]
            ivar = ivar_stack[0, :, :]
            rn2img = rn2img_stack[0, :, :]
            sciImage = scienceimage.ScienceImage.from_images(
                self.spectrograph,
                self.det,
                self.par['process'],
                self.sci_bpm,
                img,
                ivar,
                rn2img,
                crmask=crmask,
                mask=mask,
                files=files)

        return sciImage
コード例 #22
0
def pca_trace_object(trace_cen,
                     order=None,
                     trace_bpm=None,
                     min_length=0.6,
                     npca=None,
                     pca_explained_var=99.0,
                     reference_row=None,
                     coo=None,
                     minx=None,
                     maxx=None,
                     trace_wgt=None,
                     function='polynomial',
                     lower=3.0,
                     upper=3.0,
                     maxrej=1,
                     maxiter=25,
                     debug=False):
    r"""
    Decompose and reconstruct the provided traces using
    principle-component analysis.

    Args:
        trace_cen (`numpy.ndarray`_):
            A floating-point array with the spatial location of each
            each trace. Shape is :math:`(N_{\rm spec}, N_{\rm
            trace})`.
        order (:obj:`int`, :obj:`list`, optional):
            The order of the polynomial to use fit each PCA
            coefficient as a function of trace position. If None,
            `order` is set to :math:`3.3 N_{\rm use}/N_{\rm trace}`,
            where :math:`N_{\rm use}` is the number of traces used to
            construct the PCA and :math:`N_{\rm trace}` is the number
            of total traces provided. If an integer (determined
            automatically if the argument is `None`), the order per
            PCA component (see `npca`) is set to cascade from
            high-to-low order as follows::

                _order = np.clip(order - np.arange(npca), 1, None).astype(int)

        trace_bpm (`numpy.ndarray`_, optional):
            Bad-pixel mask for the trace data (True is bad; False is
            good). Must match the shape of `trace_cen`.
        min_length (:obj:`float`, optional):
            The good position of the trace must cover at least this
            fraction of the spectral dimension for use in the PCA
            decomposition.
        npca (:obj:`bool`, optional):
            The number of PCA components to keep. See
            :func:`pypeit.core.pca.pca_decomposition`.
        pca_explained_var (:obj:`float`, optional):
            The percentage (i.e., not the fraction) of the variance
            in the data accounted for by the PCA used to truncate the
            number of PCA coefficients to keep (see `npca`). Ignored
            if `npca` is provided directly. See
            :func:`pypeit.core.pca.pca_decomposition`.
        reference_row (:obj:`int`, optional):
            The row (spectral position) in `trace_cen` to use as the
            reference coordinate system for the PCA. If None, set to
            the :math:`N_{\rm spec}/2` or based on the spectral
            position that crosses the most number of valid trace
            positions.
        coo (`numpy.ndarray`_, optional):
            Floating-point array with the reference coordinates to
            use for each trace. If None, coordinates are defined at
            the reference row of `trace_cen`. Shape must be
            :math:`(N_{\rm trace},)`.
        minx, maxx (:obj:`float`, optional):
            Minimum and maximum values used to rescale the
            independent axis data. If None, the minimum and maximum
            values of `coo` are used. See
            :func:`utils.robust_polyfit_djs`.
        trace_wgt (`numpy.ndarray`_, optional):
            Weights to apply to the PCA coefficient of each trace
            during the fit. Weights are independent of the PCA
            component. See `weights` parameter of
            :func:`pypeit.core.pca.fit_pca_coefficients`. Shape must
            be :math:`(N_{\rm trace},)`.
        function (:obj:`str`, optional):
            Type of function used to fit the data.
        lower (:obj:`float`, optional):
            Number of standard deviations used for rejecting data
            **below** the mean residual. If None, no rejection is
            performed. See :func:`utils.robust_polyfit_djs`.
        upper (:obj:`float`, optional):
            Number of standard deviations used for rejecting data
            **above** the mean residual. If None, no rejection is
            performed. See :func:`utils.robust_polyfit_djs`.
        maxrej (:obj:`int`, optional):
            Maximum number of points to reject during fit iterations.
            See :func:`utils.robust_polyfit_djs`.
        maxiter (:obj:`int`, optional):
            Maximum number of rejection iterations allows. To force
            no rejection iterations, set to 0.
        debug (:obj:`bool`, optional):
            Show plots useful for debugging.
    """
    # Check the input
    if trace_bpm is None:
        use_trace = np.ones(trace_cen.shape[1], dtype=bool)
        _reference_row = trace_cen.shape[
            0] // 2 if reference_row is None else reference_row
    else:
        use_trace = np.sum(np.invert(trace_bpm),
                           axis=0) / trace_cen.shape[0] > min_length
        _reference_row = trace.most_common_trace_row(trace_bpm[:,use_trace]) \
                                if reference_row is None else reference_row
    _coo = None if coo is None else coo[use_trace]

    # Instantiate the PCA
    cenpca = TracePCA(trace_cen[:, use_trace],
                      npca=npca,
                      pca_explained_var=pca_explained_var,
                      reference_row=_reference_row,
                      coo=_coo)

    # Set the order of the function fit to the PCA coefficients:
    # Order is set to cascade down to lower order for components
    # that account for a smaller percentage of the variance.
    if order is None:
        # TODO: Where does this come from?
        order = int(
            np.clip(np.floor(3.3 * np.sum(use_trace) / trace_cen.shape[1]),
                    1.0, 3.0))
    _order = np.atleast_1d(order)
    if _order.size == 1:
        _order = np.clip(order - np.arange(cenpca.npca), 1, None).astype(int)
    if _order.size != cenpca.npca:
        msgs.error(
            'Number of polynomial orders does not match the number of PCA components.'
        )
    msgs.info('Order of function fit to each component: {0}'.format(_order))

    # Apply a 10% relative error to each coefficient. This performs
    # better than use_mad, since larger coefficients will always be
    # considered inliers, if the coefficients vary rapidly with
    # order as they sometimes do.

    # TODO: This inverse variance usage has performance issues and
    # tends to lead to rejection of coefficients that are near 0.
    # Instead of setting the floor to an absolute value 0.1, why not a
    # relative value like the mean or median of the coefficients? I.e.
    #    ivar = utils.inverse(numpy.square(np.fmax(0.1*np.absolute(cenpca.pca_coeffs),
    #                                              0.1*np.median(cenpca.pca_coeffs))))
    ivar = utils.inverse(
        np.square(np.fmax(0.1 * np.absolute(cenpca.pca_coeffs), 0.1)))

    # Set any additional weights for each trace
    weights = np.ones(np.sum(use_trace), dtype=float) \
                if trace_wgt is None else trace_wgt[use_trace]

    # TODO: This combination of ivar and weights is as it has been
    # previously. However, we recently changed the slit-edge tracing
    # code to fit the PCA coefficients with unity weights (the default
    # when passing weights=None to build_interpolator) and ivar=None.
    # The means the PCA coefficients are fit with uniform weighting and
    # the rejection is based on the median absolute deviation of the
    # data with respect to the fitted model.
    #
    # This current call below to build_interpolator will instead weight
    # the fit according to the weights set above, and it will reject
    # points based on the inverse variance set above. We need to check
    # that this makes sense!

    # Build the interpolator that allows prediction of new traces
    cenpca.build_interpolator(_order,
                              ivar=ivar,
                              weights=weights,
                              function=function,
                              lower=lower,
                              upper=upper,
                              maxrej=maxrej,
                              maxiter=maxiter,
                              minx=minx,
                              maxx=maxx,
                              debug=debug)

    # Return the traces predicted for all input traces
    return cenpca.predict(trace_cen[_reference_row, :] if coo is None else coo)
コード例 #23
0
def apply_sens_tell_specobjs(specobjs, sens_meta, sens_table, airmass, exptime, extinct_correct=True, tell_correct=False,
                            longitude=None, latitude=None, debug=False, show=False):

    # TODO This function should operate on a single object
    func = sens_meta['FUNC'][0]
    polyorder_vec = sens_meta['POLYORDER_VEC'][0]
    nimgs = len(specobjs)

    if show:
        fig = plt.figure(figsize=(12, 8))
        xmin, xmax = [], []
        ymin, ymax = [], []

    for ispec in range(nimgs):
        # get the ECH_ORDER, ECH_ORDERINDX, WAVELENGTH from your science
        sobj_ispec = specobjs[ispec]
        ## TODO Comment on the logich here. Hard to follow
        try:
            ech_order, ech_orderindx, idx = sobj_ispec.ech_order, sobj_ispec.ech_orderindx, sobj_ispec.idx
            msgs.info('Applying sensfunc to Echelle data')
        except:
            ech_orderindx = 0
            idx = sobj_ispec.idx
            msgs.info('Applying sensfunc to Longslit/Multislit data')

        for extract_type in ['boxcar', 'optimal']:
            extract = getattr(sobj_ispec, extract_type)

            if len(extract) == 0:
                continue
            msgs.info("Fluxing {:s} extraction for:".format(extract_type) + msgs.newline() + "{}".format(idx))
            wave = extract['WAVE'].value.copy()
            wave_mask = wave > 1.0
            counts = extract['COUNTS'].copy()
            counts_ivar = extract['COUNTS_IVAR'].copy()
            mask = extract['MASK'].copy()

            # get sensfunc from the sens_table
            coeff = sens_table[ech_orderindx]['OBJ_THETA'][0:polyorder_vec[ech_orderindx] + 2]
            wave_min = sens_table[ech_orderindx]['WAVE_MIN']
            wave_max = sens_table[ech_orderindx]['WAVE_MAX']
            sensfunc = np.zeros_like(wave)
            sensfunc[wave_mask] = np.exp(utils.func_val(coeff, wave[wave_mask], func,
                                             minx=wave_min, maxx=wave_max))

            # get telluric from the sens_table
            if tell_correct:
                msgs.work('Evaluate telluric!')
                telluric = None
            else:
                telluric = None

            flam, flam_ivar, outmask = apply_sens_tell_spec(wave, counts, counts_ivar, sensfunc, airmass, exptime,
                                                      mask=mask, extinct_correct=extinct_correct, telluric=telluric,
                                                      longitude=longitude, latitude=latitude, debug=debug)
            flam_sig = np.sqrt(utils.inverse(flam_ivar))
            # The following will be changed directly in the specobjs, so do not need to return anything.
            extract['MASK'] = outmask
            extract['FLAM'] = flam
            extract['FLAM_SIG'] = flam_sig
            extract['FLAM_IVAR'] = flam_ivar

            if show:
                xmin_ispec = wave[wave_mask].min()
                xmax_ispec = wave[wave_mask].max()
                xmin.append(xmin_ispec)
                xmax.append(xmax_ispec)
                ymin_ispec, ymax_ispec = coadd1d.get_ylim(flam, flam_ivar, outmask)
                ymin.append(ymin_ispec)
                ymax.append(ymax_ispec)

                med_width = (2.0 * np.ceil(0.1 / 10.0 * np.size(wave[outmask])) + 1).astype(int)
                flam_med, flam_ivar_med = coadd1d.median_filt_spec(flam, flam_ivar, outmask, med_width)
                if extract_type == 'boxcar':
                    plt.plot(wave[wave_mask], flam_med[wave_mask], color='black', drawstyle='steps-mid', zorder=1, alpha=0.8)
                    #plt.plot(wave[wave_mask], np.sqrt(utils.calc_ivar(flam_ivar_med[wave_mask])), zorder=2, color='m',
                    #         alpha=0.7, drawstyle='steps-mid', linestyle=':')
                else:
                    plt.plot(wave[wave_mask], flam_med[wave_mask], color='dodgerblue', drawstyle='steps-mid', zorder=1, alpha=0.8)
                    #plt.plot(wave[wave_mask], np.sqrt(utils.calc_ivar(flam_ivar_med[wave_mask])), zorder=2, color='red',
                    #         alpha=0.7, drawstyle='steps-mid', linestyle=':')
    if show:
        xmin_final, xmax_final = np.min(xmin), np.max(xmax)
        ymax_final = 1.3*np.median(ymax)
        ymin_final = -0.15*ymax_final
        plt.xlim([xmin_final, xmax_final])
        plt.ylim([ymin_final, ymax_final])
        plt.title('Blue is Optimal extraction and Black is Boxcar extraction',fontsize=16)
        plt.xlabel('Wavelength (Angstrom)')
        plt.ylabel('Flux')
        plt.show()
コード例 #24
0
ファイル: mosaic.py プロジェクト: tbowers7/PypeIt
def build_image_mosaic(imgs,
                       tforms,
                       ivar=None,
                       bpm=None,
                       mosaic_shape=None,
                       cval=0.,
                       order=0,
                       overlap='combine'):
    r"""
    Use the provided images and transformation matrices to construct an image
    mosaic.

    .. warning::

        Beware when using ``order > 0``!

        Bad-pixel masks are *always* mapped to the mosaic image using
        ``order=0`` (i.e., without interpolation).  However, masked pixels are
        not excluded from the input images during the transformation.  For
        higher order interpolations (``order > 0``), this means that the masked
        pixels can contribute to the interpolation for any given output pixel.
        Users should appropriately consider how these pixels will affect the
        mosaic pixels *before* calling this function.

        Similarly, error propagation from the input image to the mosaic image is
        only approximate when ``order > 0``.  Error propagaion is performed
        simply by applying the coordinate transform to each variance image with
        the same order as used for the input image, and then combining those
        variances as necessary in overlap regions.

        Tests show that this approach is also not invertable.  I.e., iteratively
        transforming the image back and forth between the native and mosaic
        frames lead to image drifts.

    Args:
        imgs (:obj:`list`, `numpy.ndarray`_):
            List of `numpy.ndarray`_ images to include in the mosaic.  If arrays
            do not contain floating-point values, they will be cast as
            ``np.float64`` before passing them to
            `scipy.ndimage.affine_transform`_.  The shape of all the input images
            must be identical if ``mosaic_shape`` is None.
        tforms (:obj:`list`, `numpy.ndarray`_):
            List of `numpy.ndarray`_ objects with the transformation matrices
            necessary to convert between image and mosaic coordinates.  See
            :func:`pypeit.core.mosaic.build_image_mosaic_transform`.  The number
            of transforms must match the number of images.  If ``mosaic_shape``
            is None, the transforms are considered in a relative sense.  That
            is, the shape of the output mosaic is determined by applying these
            transforms to the bounding boxes of each image and then determining
            the shape needed to retain all pixels in the input images.  The
            transforms are then adjusted appropriately to map to this shape; see
            :func:`~pypeit.core.mosaic.prepare_mosaic`.  If ``mosaic_shape`` is
            *not* None, these transforms are expected to map directly to the
            output mosaic coordinates.
        ivar (:obj:`list`, `numpy.ndarray`_, optional):
            List of `numpy.ndarray`_ images with the inverse variance of the
            image data.  The number of inverse-variance images must match the
            number of images in the mosaic.  If None, inverse variance is
            returned as None.
        bpm (:obj:`list`, `numpy.ndarray`_, optional):
            List of boolean `numpy.ndarray`_ objects with the bad-pixel mask for
            each image in the mosaic.  The number of bad-pixel masks must match
            the number of images in the mosaic.  If None, all input pixels are
            considered valid.
        mosaic_shape (:obj:`tuple`, optional):
            Shape for the output image.  If None, the shape is determined by
            :func:`pypeit.core.mosaic.prepare_mosaic` and the shape of all the
            input images *must* be identical.
        cval (:obj:`float`, optional):
            The value used to fill empty pixels in the mosaic.
        order (:obj:`int`, optional):
            The order of the spline interpolation of each input image onto the
            mosaic grid.  This is passed directly to
            `scipy.ndimage.affine_transform`_.  The order has to be in the range
            0-5; ``order=0`` is nearest-grid-point interpolations, ``order=1``
            is linear.
        overlap (:obj:`str`, optional):
            Keyword that indicates how to handle pixels in the regions where
            multiple images overlap in the mosaic.  Options are:

                - ``'combine'``: Average the values of the pixels and, if the
                  inverse variance is provided, propagate the error.
                - ``'error'``: Raise an exception.  Largely provided for testing
                  under the expectation that *no* pixels should overlap in the
                  mosaic.

    Returns:
        :obj:`tuple`: Four objects are returned. The first three are
        `numpy.ndarray`_ objects with the mosaic image, its inverse variance
        (None if no inverse variance is provided), and an integer array with the
        number of input pixels in each output pixel.  The last contains the
        detailed transformation matrices applied to each image.  If
        ``mosaic_shape`` is provided, these are identical to the input
        ``tforms``; otherwise, these are the transforms adjusted from the
        relative frame to the absolute mosaic frame given its determined shape;
        see :func:`~pypeit.core.mosaic.prepare_mosaic`.
    """
    # Check the input
    nimg = len(imgs)
    if len(tforms) != nimg:
        msgs.error(
            'Number of image transformations does not match number of images to mosaic.'
        )
    if ivar is not None and len(ivar) != nimg:
        msgs.error(
            'If providing any, must provide inverse-variance for each image in the mosaic.'
        )
    if bpm is not None and len(bpm) != nimg:
        msgs.error(
            'If providing any, must provide bad-pixel masks for each image in the mosaic.'
        )
    if overlap not in ['combine', 'error']:
        msgs.error(
            f'Unknown value for overlap ({overlap}), must be "combine" or "error".'
        )

    if any([not np.issubdtype(img.dtype, np.floating) for img in imgs]):
        msgs.warn(
            'Images must be floating type, and will be recast before transforming.'
        )

    # Get the output shape, if necessary
    if mosaic_shape is None:
        shape = imgs[0].shape
        if not np.all([img.shape == shape for img in imgs]):
            msgs.error(
                'If output mosaic shape is not provided, all input images must have the '
                'same shape!')
        mosaic_shape, _tforms = prepare_mosaic(shape, tforms)
    else:
        _tforms = tforms

    msgs.info(
        f'Constructing image mosaic with {nimg} images and output shape {mosaic_shape}.'
    )

    if ivar is not None:
        var = [inverse(_ivar) for _ivar in ivar]

    mosaic_npix = np.zeros(mosaic_shape, dtype=int)
    mosaic_data = np.zeros(mosaic_shape, dtype=float)
    # NOTE: "mosaic_ivar" is actually the variance until it's inverted just
    # before output
    mosaic_ivar = None if ivar is None else np.zeros(mosaic_shape, dtype=float)

    # TODO: These loops can end up creating and destroying lots of big arrays.
    # Is there a way to make this faster?  If memory becomes an issue, try adding
    #
    #   del _tform_img
    #   gc.collect()
    #
    # at the end of each loop
    for i in range(nimg):
        _inv_tform = np.linalg.inv(_tforms[i])
        img = imgs[i] if np.issubdtype(imgs[i].dtype,
                                       np.floating) else imgs[i].astype(float)
        _tform_img = ndimage.affine_transform(img,
                                              _inv_tform,
                                              output_shape=mosaic_shape,
                                              cval=-1e20,
                                              order=order)
        filled = _tform_img > -1e20
        if bpm is not None:
            _tform_gpm = ndimage.affine_transform(np.logical_not(
                bpm[i]).astype(float),
                                                  _inv_tform,
                                                  output_shape=mosaic_shape,
                                                  cval=0.,
                                                  order=0)
            filled &= _tform_gpm > 0.
        mosaic_data[filled] += _tform_img[filled]
        mosaic_npix[filled] += 1
        if ivar is not None:
            # NOTE: "mosaic_ivar" is actually the variance until it's inverted
            # just before output
            _var = var[i] if np.issubdtype(
                var[i], np.floating) else var[i].astype(float)
            mosaic_ivar[filled] += ndimage.affine_transform(
                _var,
                _inv_tform,
                output_shape=mosaic_shape,
                cval=0.,
                order=order)[filled]

    # TODO: This test is crude.  Input and output pixel sizes should be
    # identical, but I don't know if the order=0 approach will ever lead to a
    # single mosaic pixel being filled by more than one input pixel.  If so,
    # `overlap='error'` will catch both those cases and when multiple input
    # images overlap.
    has_overlap = np.any(mosaic_npix > 1)
    if has_overlap and overlap == 'error':
        # Input images should not be allowed to overlap
        msgs.error(
            'Mosaic has pixels with contributions by more than one input image!'
        )

    filled = mosaic_npix > 0
    mosaic_data[np.logical_not(filled)] = cval
    # Average the overlapping pixels
    if has_overlap:
        mosaic_data[filled] /= mosaic_npix[filled]
    if mosaic_ivar is not None:
        # Propagate the error by averaging the variances
        if has_overlap:
            mosaic_ivar[filled] /= mosaic_npix[filled]
        # Revert to inverse variance
        mosaic_ivar = inverse(mosaic_ivar)

    return mosaic_data, mosaic_ivar, mosaic_npix, _tforms
コード例 #25
0
ファイル: combineimage.py プロジェクト: p-holguin/PypeIt
    def run(self, bias=None, flatimages=None, ignore_saturation=False, sigma_clip=True,
            bpm=None, sigrej=None, maxiters=5, slits=None, dark=None, combine_method='weightmean'):
        """
        Generate a PypeItImage from a list of images

        This may also generate the ivar, crmask, rn2img and mask

        Args:
            bias (:class:`pypeit.images.buildimage.BiasImage`, optional): Bias image
            flatimages (:class:`pypeit.flatfield.FlatImages`, optional):  For flat fielding
            dark (:class:`pypeit.images.buildimage.DarkImage`, optional): Dark image
            slits (:class:`pypeit.slittrace.SlitTraceSet`, optional): Slit object
            sigma_clip (bool, optional):
                Perform sigma clipping
            sigrej (int or float, optional): Rejection threshold for sigma clipping.
                 Code defaults to determining this automatically based on the number of images provided.
            maxiters (int, optional):
                Number of iterations for the clipping
            bpm (`numpy.ndarray`_, optional):
                Bad pixel mask.  Held in ImageMask
            ignore_saturation (:obj:`bool`, optional):
                If True, turn off the saturation flag in the individual images before stacking
                This avoids having such values set to 0 which for certain images (e.g. flat calibrations)
                can have unintended consequences.
            combine_method (str):
                Method to combine images
                Allowed options are 'weightmean', 'median'

        Returns:
            :class:`pypeit.images.pypeitimage.PypeItImage`:

        """
        # Loop on the files
        nimages = len(self.files)
        lampstat = []
        for kk, ifile in enumerate(self.files):
            # Load raw image
            rawImage = rawimage.RawImage(ifile, self.spectrograph, self.det)
            # Process
            pypeitImage = rawImage.process(self.par, bias=bias, bpm=bpm, dark=dark,
                                           flatimages=flatimages, slits=slits)
            #embed(header='96 of combineimage')
            # Are we all done?
            if nimages == 1:
                return pypeitImage
            elif kk == 0:
                # Get ready
                shape = (nimages, pypeitImage.image.shape[0], pypeitImage.image.shape[1])
                img_stack = np.zeros(shape)
                ivar_stack= np.zeros(shape)
                rn2img_stack = np.zeros(shape)
                crmask_stack = np.zeros(shape, dtype=bool)
                # Mask
                bitmask = imagebitmask.ImageBitMask()
                mask_stack = np.zeros(shape, bitmask.minimum_dtype(asuint=True))
            # Grab the lamp status
            lampstat += [self.spectrograph.get_lamps_status(pypeitImage.rawheadlist)]
            # Process
            img_stack[kk,:,:] = pypeitImage.image
            # Construct raw variance image and turn into inverse variance
            if pypeitImage.ivar is not None:
                ivar_stack[kk, :, :] = pypeitImage.ivar
            else:
                ivar_stack[kk, :, :] = 1.
            # Mask cosmic rays
            if pypeitImage.crmask is not None:
                crmask_stack[kk, :, :] = pypeitImage.crmask
            # Read noise squared image
            if pypeitImage.rn2img is not None:
                rn2img_stack[kk, :, :] = pypeitImage.rn2img
            # Final mask for this image
            # TODO This seems kludgy to me. Why not just pass ignore_saturation to process_one and ignore the saturation
            # when the mask is actually built, rather than untoggling the bit here
            if ignore_saturation:  # Important for calibrations as we don't want replacement by 0
                indx = pypeitImage.bitmask.flagged(pypeitImage.fullmask, flag=['SATURATION'])
                pypeitImage.fullmask[indx] = pypeitImage.bitmask.turn_off(
                    pypeitImage.fullmask[indx], 'SATURATION')
            mask_stack[kk, :, :] = pypeitImage.fullmask

        # Check that the lamps being combined are all the same:
        if not lampstat[1:] == lampstat[:-1]:
            msgs.warn("The following files contain different lamp status")
            # Get the longest strings
            maxlen = max([len("Filename")]+[len(os.path.split(x)[1]) for x in self.files])
            maxlmp = max([len("Lamp status")]+[len(x) for x in lampstat])
            strout = "{0:" + str(maxlen) + "}  {1:s}"
            # Print the messages
            print(msgs.indent() + '-'*maxlen + "  " + '-'*maxlmp)
            print(msgs.indent() + strout.format("Filename", "Lamp status"))
            print(msgs.indent() + '-'*maxlen + "  " + '-'*maxlmp)
            for ff, file in enumerate(self.files):
                print(msgs.indent() + strout.format(os.path.split(file)[1], " ".join(lampstat[ff].split("_"))))
            print(msgs.indent() + '-'*maxlen + "  " + '-'*maxlmp)

        # Coadd them
        weights = np.ones(nimages)/float(nimages)
        img_list = [img_stack]
        var_stack = utils.inverse(ivar_stack)
        var_list = [var_stack, rn2img_stack]
        if combine_method == 'weightmean':
            img_list_out, var_list_out, gpm, nused = combine.weighted_combine(
                weights, img_list, var_list, (mask_stack == 0),
                sigma_clip=sigma_clip, sigma_clip_stack=img_stack, sigrej=sigrej, maxiters=maxiters)
        elif combine_method == 'median':
            img_list_out = [np.median(img_stack, axis=0)]
            var_list_out = [np.median(var_stack, axis=0)]
            var_list_out += [np.median(rn2img_stack, axis=0)]
            gpm = np.ones_like(img_list_out[0], dtype='bool')
        else:
            msgs.error("Bad choice for combine.  Allowed options are 'median', 'weightmean'.")

        # Build the last one
        final_pypeitImage = pypeitimage.PypeItImage(img_list_out[0],
                                                    ivar=utils.inverse(var_list_out[0]),
                                                    bpm=pypeitImage.bpm,
                                                    rn2img=var_list_out[1],
                                                    crmask=np.logical_not(gpm),
                                                    detector=pypeitImage.detector,
                                                    PYP_SPEC=pypeitImage.PYP_SPEC)
        # Internals
        final_pypeitImage.rawheadlist = pypeitImage.rawheadlist
        final_pypeitImage.process_steps = pypeitImage.process_steps

        nonlinear_counts = self.spectrograph.nonlinear_counts(pypeitImage.detector,
                                                              apply_gain=self.par['apply_gain'])
        final_pypeitImage.build_mask(saturation=nonlinear_counts)
        # Return
        return final_pypeitImage
コード例 #26
0
    def from_file_list(cls,
                       spectrograph,
                       det,
                       par,
                       bpm,
                       file_list,
                       bias,
                       pixel_flat,
                       illum_flat=None,
                       sigma_clip=False,
                       sigrej=None,
                       maxiters=5):
        """
        Instantiate from file list

        This will also generate the ivar, crmask, rn2img and mask

        Args:
            spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`):
                Spectrograph used to take the data.
            det (:obj:`int`, optional):
                The 1-indexed detector number to process.
            par (:class:`pypeit.par.pypeitpar.ProcessImagesPar`):
                Parameters that dictate the processing of the images.  See
                :class:`pypeit.par.pypeitpar.ProcessImagesPar` for the
                defaults.
            bpm (np.ndarray):
                Bad pixel mask.  Held in ImageMask
            file_list (list):
                List of files
            bias (np.ndarray or None):
                Bias image
            pixel_flat (np.ndarray):
                Flat image
            illum_flat (np.ndarray, optional):
                Illumination image
            sigrej (int or float, optional): Rejection threshold for sigma clipping.
                 Code defaults to determining this automatically based on the numberr of images provided.
            maxiters (int, optional):

        Returns:
            ScienceImage:

        """
        # Single file?
        if len(file_list) == 1:
            return cls.from_single_file(spectrograph,
                                        det,
                                        par,
                                        bpm,
                                        file_list[0],
                                        bias,
                                        pixel_flat,
                                        illum_flat=illum_flat)

        # Continue with an actual list
        # Get it ready
        nimages = len(file_list)
        shape = (nimages, bpm.shape[0], bpm.shape[1])
        sciimg_stack = np.zeros(shape)
        sciivar_stack = np.zeros(shape)
        rn2img_stack = np.zeros(shape)
        crmask_stack = np.zeros(shape, dtype=bool)

        # Mask
        bitmask = maskimage.ImageBitMask()
        mask_stack = np.zeros(shape, bitmask.minimum_dtype(asuint=True))

        # Loop on the files
        for kk, ifile in enumerate(file_list):
            # Instantiate
            sciImage = ScienceImage.from_single_file(spectrograph,
                                                     det,
                                                     par,
                                                     bpm,
                                                     ifile,
                                                     bias,
                                                     pixel_flat,
                                                     illum_flat=illum_flat)
            # Process
            sciimg_stack[kk, :, :] = sciImage.image
            # Construct raw variance image and turn into inverse variance
            sciivar_stack[kk, :, :] = sciImage.ivar
            # Mask cosmic rays
            crmask_stack[kk, :, :] = sciImage.crmask
            # Build read noise squared image
            rn2img_stack[kk, :, :] = sciImage.build_rn2img()
            # Final mask for this image
            mask_stack[kk, :, :] = sciImage.mask

        # Coadd them
        weights = np.ones(nimages) / float(nimages)
        img_list = [sciimg_stack]
        var_stack = utils.inverse(sciivar_stack, positive=True)
        var_list = [var_stack, rn2img_stack]
        img_list_out, var_list_out, outmask, nused = coadd2d.weighted_combine(
            weights,
            img_list,
            var_list, (mask_stack == 0),
            sigma_clip=sigma_clip,
            sigma_clip_stack=sciimg_stack,
            sigrej=sigrej,
            maxiters=maxiters)

        # Build the last one
        slf = ScienceImage.from_images(spectrograph,
                                       det,
                                       par,
                                       bpm,
                                       img_list_out[0],
                                       utils.inverse(var_list_out[0],
                                                     positive=True),
                                       var_list_out[1],
                                       np.invert(outmask),
                                       files=file_list)
        slf.build_mask(
            saturation=slf.spectrograph.detector[slf.det - 1]['saturation'],
            mincounts=slf.spectrograph.detector[slf.det - 1]['mincounts'])
        # Return
        return slf