def ech_generate_sensfunc(stdframe,spectrograph=None, telluric=True, star_type=None,
                      star_mag=None, ra=None, dec=None, std_file = None, BALM_MASK_WID=5., nresln=None,debug=False):

    if spectrograph is None:
        std_specobjs, std_header = ech_load_specobj(stdframe, order=0)
        spectrograph = std_header['INSTRUME']
        msgs.info('You are working on {:s}'.format(spectrograph))
    ext_final = fits.getheader(stdframe, -1)
    norder = 5
    #norder = ext_final['ORDER'] + 1

    sens_dicts = {}
    for iord in range(norder):
        std_specobjs, std_header = ech_load_specobj(stdframe, order=iord)
        std_idx = flux.find_standard(std_specobjs)
        std = std_specobjs[std_idx]
        wavemask = std.boxcar['WAVE']>1000.0*units.AA
        wave,counts,ivar = std.boxcar['WAVE'][wavemask],std.boxcar['COUNTS'][wavemask],std.boxcar['COUNTS_IVAR'][wavemask]
        sens_dict = flux.generate_sensfunc(wave,counts,ivar,std_header['AIRMASS'],std_header['EXPTIME'],
                                           spectrograph,star_type=star_type,star_mag=star_mag,
                                           telluric=telluric,ra=ra,dec=dec,BALM_MASK_WID=BALM_MASK_WID,
                                           nresln=nresln,std_file=std_file,debug=debug)
        sens_dict['ech_orderindx'] = iord
        sens_dicts[str(iord)] = sens_dict
    sens_dicts['norder'] = norder

    return sens_dicts
    def load_master(self, filename, force=False):

        # Does the master file exist?
        if not os.path.isfile(filename):
            # msgs.warn("No Master frame found of type {:s}: {:s}".format(self.frametype, filename))
            msgs.warn("No Master frame found of {:s}".format(filename))
            if force:
                msgs.error("Crashing out because reduce-masters-force=True:" + msgs.newline() + filename)
            return None
        else:
            # msgs.info("Loading a pre-existing master calibration frame of type: {:}".format(self.frametype) + " from filename: {:}".format(filename))
            msgs.info("Loading a pre-existing master calibration frame of SENSFUNC from filename: {:}".format(filename))

            hdu = fits.open(filename)
            norder = hdu[0].header['NORDER']
            sens_dicts = {}
            for iord in range(norder):
                head = hdu[iord + 1].header
                tbl = hdu['SENSFUNC-ORDER{0:04}'.format(iord)].data
                sens_dict = {}
                sens_dict['wave'] = tbl['WAVE']
                sens_dict['sensfunc'] = tbl['SENSFUNC']
                for key in ['wave_min', 'wave_max', 'exptime', 'airmass', 'std_file', 'std_ra', 'std_dec',
                            'std_name', 'cal_file', 'ech_orderindx']:
                    try:
                        sens_dict[key] = head[key.upper()]
                    except:
                        pass
                sens_dicts[str(iord)] = sens_dict
            sens_dicts['norder'] = norder
            return sens_dicts
Exemple #3
0
    def determine_bias_mode(self, prev_build=False):
        """
        Determine the bias mode to use in this reduction
          - None -- No bias subtraction
          - 'overscan' -- Overscan subtract
          - msbias -- Use a generated bias image

        Args:
            prev_build (bool, optional):  Load the master frame if it exists and was
            built on this run.

        Returns:
            ndarray, str or None: :attr:`msbias` str, np.ndarray or None

        """
        # How are we treating biases?
        # 1) No bias subtraction
        if self.par['useframe'].lower() == 'none':
            msgs.info("Will not perform bias/dark subtraction")
            self.msbias = None
        # 2) Use overscan
        elif self.par['useframe'] == 'overscan':
            self.msbias = 'overscan'
        # 3) User wants bias subtractions, use a Master biasframe?
        elif self.par['useframe'] in ['bias', 'dark']:
            # Load the MasterFrame if it exists and user requested one to load it
            self.msbias, _ = self.master(prev_build=prev_build)

        return self.msbias
def load_spec_order(fname,objid=None,order=None,extract='OPT',flux=True):
    """
    Loading single order spectrum from a PypeIt 1D specctrum fits file
    :param file:
    :param objid:
    :param order:
    :param extract:
    :param flux:
    :return:
    """
    if objid is None:
        objid = 0
    if order is None:
        msgs.error('Please specify which order you want to load')

    # read extension name into a list
    primary_header = fits.getheader(fname, 0)
    nspec = primary_header['NSPEC']
    extnames = [primary_header['EXT0001']] * nspec
    for kk in range(nspec):
        extnames[kk] = primary_header['EXT' + '{0:04}'.format(kk + 1)]
    extnameroot = extnames[0]

    # Figure out which extension is the required data
    ordername = '{0:04}'.format(order)
    extname = extnameroot.replace('OBJ0000', objid)
    extname = extname.replace('ORDER0000', 'ORDER' + ordername)
    try:
        exten = extnames.index(extname) + 1
        msgs.info("Loading extension {:s} of spectrum {:s}".format(extname, fname))
    except:
        msgs.error("Spectrum {:s} does not contain {:s} extension".format(fname, extname))

    spectrum = load.load_1dspec(fname, exten=exten, extract=extract, flux=flux)
    # Polish a bit -- Deal with NAN, inf, and *very* large values that will exceed
    #   the floating point precision of float32 for var which is sig**2 (i.e. 1e38)
    bad_flux = np.any([np.isnan(spectrum.flux), np.isinf(spectrum.flux),
                       np.abs(spectrum.flux) > 1e30,
                       spectrum.sig ** 2 > 1e10,
                       ], axis=0)
    # Sometimes Echelle spectra have zero wavelength
    bad_wave = spectrum.wavelength < 1000.0*units.AA
    bad_all = bad_flux + bad_wave
    ## trim bad part
    wave_out,flux_out,sig_out = spectrum.wavelength[~bad_all],spectrum.flux[~bad_all],spectrum.sig[~bad_all]
    spectrum_out = XSpectrum1D.from_tuple((wave_out,flux_out,sig_out), verbose=False)
    #if np.sum(bad_flux):
    #    msgs.warn("There are some bad flux values in this spectrum.  Will zero them out and mask them (not ideal)")
    #    spectrum.data['flux'][spectrum.select][bad_flux] = 0.
    #    spectrum.data['sig'][spectrum.select][bad_flux] = 0.

    return spectrum_out
def ech_load_spec(files,objid=None,order=None,extract='OPT',flux=True):
    """
    files: A list of file names
    objid:
    extract:
    flux:
    """

    nfiles = len(files)
    if objid is None:
        objid = ['OBJ0000'] * nfiles
    elif len(objid) == 1:
        objid = objid * nfiles
    elif len(objid) != nfiles:
        msgs.error('The length of objid should be either 1 or equal to the number of spectra files.')

    fname = files[0]
    ext_final = fits.getheader(fname, -1)
    norder = ext_final['ORDER'] + 1
    msgs.info('spectrum {:s} has {:d} orders'.format(fname, norder))
    if norder <= 1:
        msgs.error('The number of orders have to be greater than one for echelle. Longslit data?')

    # Load spectra
    spectra_list = []
    for ii, fname in enumerate(files):

        if order is None:
            msgs.info('Loading all orders into a gaint spectra')
            for iord in range(norder):
                spectrum = load_spec_order(fname,objid=objid[ii],order=iord,extract=extract,flux=flux)
                # Append
                spectra_list.append(spectrum)
        elif order >= norder:
            msgs.error('order number cannot greater than the total number of orders')
        else:
            spectrum = load_spec_order(fname, objid=objid[ii], order=order, extract=extract, flux=flux)
            # Append
            spectra_list.append(spectrum)
    # Join into one XSpectrum1D object
    spectra = collate(spectra_list)
    # Return
    return spectra
def ech_flux_science(sci_specobjs,sens_dicts,sci_header,spectrograph=None):
    """
    Flux the internal list of sci_specobjs

    Wrapper to flux.apply_sensfunc()

    Returns
    -------

    """
    norder = sens_dicts['norder']
    if spectrograph is None:
        spectrograph = sci_header['INSTRUME']
        msgs.info('You are working on {:s}'.format(spectrograph))
    for iord in range(norder):
        sens_dict = sens_dicts[str(iord)]
        for sci_obj in sci_specobjs:
            if sci_obj.ech_orderindx == iord:
                flux.apply_sensfunc(sci_obj, sens_dict, sci_header['AIRMASS'],
                                      sci_header['EXPTIME'], spectrograph)
Exemple #7
0
def main(args):

    # List only?
    if args.list:
        hdu = io.fits_open(args.file)
        hdu.info()
        return

    # Load it up -- NOTE WE ALLOW *OLD* VERSIONS TO GO FORTH
    spec2DObj = spec2dobj.Spec2DObj.from_file(args.file,
                                              args.det,
                                              chk_version=False)

    # Setup for PypeIt imports
    msgs.reset(verbosity=2)

    # Init
    # TODO: get_dnum needs to be deprecated...
    sdet = get_dnum(args.det, prefix=False)

    # Grab the slit edges
    slits = spec2DObj.slits
    if spec2DObj.sci_spat_flexure is not None:
        msgs.info("Offseting slits by {}".format(spec2DObj.sci_spat_flexure))
    all_left, all_right, mask = slits.select_edges(
        flexure=spec2DObj.sci_spat_flexure)
    # TODO -- This may be too restrictive, i.e. ignore BADFLTCALIB??
    gpm = mask == 0
    left = all_left[:, gpm]
    right = all_right[:, gpm]
    slid_IDs = spec2DObj.slits.slitord_id[gpm]
    maskdef_id = spec2DObj.slits.maskdef_id[
        gpm] if spec2DObj.slits.maskdef_id is not None else None

    bitMask = ImageBitMask()

    # Object traces from spec1d file
    spec1d_file = args.file.replace('spec2d', 'spec1d')
    if args.file[-2:] == 'gz':
        spec1d_file = spec1d_file[:-3]
    if os.path.isfile(spec1d_file):
        sobjs = specobjs.SpecObjs.from_fitsfile(spec1d_file)
    else:
        sobjs = None
        msgs.warn('Could not find spec1d file: {:s}'.format(spec1d_file) +
                  msgs.newline() +
                  '                          No objects were extracted.')

    display.connect_to_ginga(raise_err=True, allow_new=True)

    # Now show each image to a separate channel

    # Show the bitmask?
    mask_in = None
    if args.showmask:
        viewer, ch = display.show_image(spec2DObj.bpmmask,
                                        chname="BPM",
                                        waveimg=spec2DObj.waveimg,
                                        clear=True)
        #bpm, crmask, satmask, minmask, offslitmask, nanmask, ivar0mask, ivarnanmask, extractmask \

    # SCIIMG
    image = spec2DObj.sciimg  # Processed science image
    mean, med, sigma = sigma_clipped_stats(image[spec2DObj.bpmmask == 0],
                                           sigma_lower=5.0,
                                           sigma_upper=5.0)
    cut_min = mean - 1.0 * sigma
    cut_max = mean + 4.0 * sigma
    chname_skysub = 'sciimg-det{:s}'.format(sdet)
    # Clear all channels at the beginning
    viewer, ch = display.show_image(image,
                                    chname=chname_skysub,
                                    waveimg=spec2DObj.waveimg,
                                    clear=True)

    if sobjs is not None:
        show_trace(sobjs, args.det, viewer, ch)
    display.show_slits(viewer,
                       ch,
                       left,
                       right,
                       slit_ids=slid_IDs,
                       maskdef_ids=maskdef_id)

    # SKYSUB
    if args.ignore_extract_mask:
        # TODO -- Is there a cleaner way to do this?
        gpm = (spec2DObj.bpmmask == 0) | (spec2DObj.bpmmask == 2**
                                          bitMask.bits['EXTRACT'])
    else:
        gpm = spec2DObj.bpmmask == 0

    image = (spec2DObj.sciimg - spec2DObj.skymodel
             ) * gpm  #(spec2DObj.mask == 0)  # sky subtracted image
    mean, med, sigma = sigma_clipped_stats(image[spec2DObj.bpmmask == 0],
                                           sigma_lower=5.0,
                                           sigma_upper=5.0)
    cut_min = mean - 1.0 * sigma
    cut_max = mean + 4.0 * sigma
    chname_skysub = 'skysub-det{:s}'.format(sdet)
    # Clear all channels at the beginning
    # TODO: JFH For some reason Ginga crashes when I try to put cuts in here.
    viewer, ch = display.show_image(
        image,
        chname=chname_skysub,
        waveimg=spec2DObj.waveimg,
        bitmask=bitMask,
        mask=mask_in)  #, cuts=(cut_min, cut_max),wcs_match=True)
    if not args.removetrace and sobjs is not None:
        show_trace(sobjs, args.det, viewer, ch)
    display.show_slits(viewer,
                       ch,
                       left,
                       right,
                       slit_ids=slid_IDs,
                       maskdef_ids=maskdef_id)

    # SKRESIDS
    chname_skyresids = 'sky_resid-det{:s}'.format(sdet)
    image = (spec2DObj.sciimg - spec2DObj.skymodel) * np.sqrt(
        spec2DObj.ivarmodel
    ) * gpm  #(spec2DObj.bpmmask == 0)  # sky residual map
    viewer, ch = display.show_image(image,
                                    chname_skyresids,
                                    waveimg=spec2DObj.waveimg,
                                    cuts=(-5.0, 5.0),
                                    bitmask=bitMask,
                                    mask=mask_in)
    if not args.removetrace and sobjs is not None:
        show_trace(sobjs, args.det, viewer, ch)
    display.show_slits(viewer,
                       ch,
                       left,
                       right,
                       slit_ids=slid_IDs,
                       maskdef_ids=maskdef_id)

    # RESIDS
    chname_resids = 'resid-det{:s}'.format(sdet)
    # full model residual map
    image = (spec2DObj.sciimg - spec2DObj.skymodel - spec2DObj.objmodel
             ) * np.sqrt(spec2DObj.ivarmodel) * (spec2DObj.bpmmask == 0)
    viewer, ch = display.show_image(image,
                                    chname=chname_resids,
                                    waveimg=spec2DObj.waveimg,
                                    cuts=(-5.0, 5.0),
                                    bitmask=bitMask,
                                    mask=mask_in)
    if not args.removetrace and sobjs is not None:
        show_trace(sobjs, args.det, viewer, ch)
    display.show_slits(viewer,
                       ch,
                       left,
                       right,
                       slit_ids=slid_IDs,
                       maskdef_ids=maskdef_id)

    # After displaying all the images sync up the images with WCS_MATCH
    shell = viewer.shell()
    shell.start_global_plugin('WCSMatch')
    shell.call_global_plugin_method('WCSMatch', 'set_reference_channel',
                                    [chname_resids], {})

    if args.embed:
        embed()
Exemple #8
0
    def run(self, doqa=True, debug=False, show=False):
        """
        Main driver for tracing arc lines

        Code flow:

            #. Extract an arc spectrum down the center of each slit/order
            #. Loop on slits/orders
                #. Trace and fit the arc lines (This is done twice, once
                   with trace_crude as the tracing crutch, then again
                   with a PCA model fit as the crutch).
                #. Repeat trace.
                #.  2D Fit to the offset from slitcen
                #. Save

        Args:
            doqa (bool):
            debug (bool):
            show (bool):

        Returns:
            :class:`WaveTilts`:

        """
        # Extract the arc spectra for all slits
        self.arccen, self.arccen_bpm = self.extract_arcs()

        # TODO: Leave for now.  Used for debugging
        #        self.par['rm_continuum'] = True
        #        debug = True
        #        show = True

        # Subtract arc continuum
        _mstilt = self.mstilt.image.copy()
        if self.par['rm_continuum']:
            continuum = self.model_arc_continuum(debug=debug)
            _mstilt -= continuum
            if debug:
                # TODO: Put this into a function
                vmin, vmax = visualization.ZScaleInterval().get_limits(_mstilt)
                w, h = plt.figaspect(1)
                fig = plt.figure(figsize=(3 * w, h))
                ax = fig.add_axes([0.15 / 3, 0.1, 0.8 / 3, 0.8])
                ax.imshow(self.mstilt.image,
                          origin='lower',
                          interpolation='nearest',
                          aspect='auto',
                          vmin=vmin,
                          vmax=vmax)
                ax.set_title('MasterArc')
                ax = fig.add_axes([1.15 / 3, 0.1, 0.8 / 3, 0.8])
                ax.imshow(continuum,
                          origin='lower',
                          interpolation='nearest',
                          aspect='auto',
                          vmin=vmin,
                          vmax=vmax)
                ax.set_title('Continuum')
                ax = fig.add_axes([2.15 / 3, 0.1, 0.8 / 3, 0.8])
                ax.imshow(_mstilt,
                          origin='lower',
                          interpolation='nearest',
                          aspect='auto',
                          vmin=vmin,
                          vmax=vmax)
                ax.set_title('MasterArc - Continuum')
                plt.show()

        # Final tilts image
        self.final_tilts = np.zeros(self.shape_science, dtype=float)
        max_spat_dim = (np.asarray(self.par['spat_order']) + 1).max()
        max_spec_dim = (np.asarray(self.par['spec_order']) + 1).max()
        self.coeffs = np.zeros((max_spec_dim, max_spat_dim, self.slits.nslits))
        self.spat_order = np.zeros(self.slits.nslits, dtype=int)
        self.spec_order = np.zeros(self.slits.nslits, dtype=int)

        # TODO sort out show methods for debugging
        if show:
            viewer, ch = display.show_image(self.mstilt.image *
                                            (self.slitmask > -1),
                                            chname='tilts')

        # Loop on all slits
        for slit_idx, slit_spat in enumerate(self.slits.spat_id):
            if self.tilt_bpm[slit_idx]:
                continue
            #msgs.info('Computing tilts for slit {0}/{1}'.format(slit, self.slits.nslits-1))
            msgs.info('Computing tilts for slit {0}/{1}'.format(
                slit_idx, self.slits.nslits))
            # Identify lines for tracing tilts
            msgs.info('Finding lines for tilt analysis')
            self.lines_spec, self.lines_spat \
                    = self.find_lines(self.arccen[:,slit_idx], self.slitcen[:,slit_idx],
                                      slit_idx,
                                      bpm=self.arccen_bpm[:,slit_idx], debug=debug)

            if self.lines_spec is None:
                self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(
                    self.slits.mask[slit_idx], 'BADTILTCALIB')
                continue

            thismask = self.slitmask == slit_spat

            # Performs the initial tracing of the line centroids as a
            # function of spatial position resulting in 1D traces for
            # each line.
            msgs.info('Trace the tilts')
            self.trace_dict = self.trace_tilts(_mstilt, self.lines_spec,
                                               self.lines_spat, thismask,
                                               self.slitcen[:, slit_idx])

            # TODO: Show the traces before running the 2D fit

            if show:
                display.show_tilts(viewer, ch, self.trace_dict)

            self.spat_order[slit_idx] = self._parse_param(
                self.par, 'spat_order', slit_idx)
            self.spec_order[slit_idx] = self._parse_param(
                self.par, 'spec_order', slit_idx)
            # 2D model of the tilts, includes construction of QA
            # NOTE: This also fills in self.all_fit_dict and self.all_trace_dict
            coeff_out = self.fit_tilts(self.trace_dict,
                                       thismask,
                                       self.slitcen[:, slit_idx],
                                       self.spat_order[slit_idx],
                                       self.spec_order[slit_idx],
                                       slit_idx,
                                       doqa=doqa,
                                       show_QA=show)
            self.coeffs[:self.spec_order[slit_idx] +
                        1, :self.spat_order[slit_idx] + 1,
                        slit_idx] = coeff_out

            # TODO: Need a way to assess the success of fit_tilts and
            # flag the slit if it fails

            # Tilts are created with the size of the original slitmask,
            # which corresonds to the same binning as the science
            # images, trace images, and pixelflats etc.
            self.tilts = tracewave.fit2tilts(self.slitmask_science.shape,
                                             coeff_out, self.par['func2d'])
            # Save to final image
            thismask_science = self.slitmask_science == slit_spat
            self.final_tilts[thismask_science] = self.tilts[thismask_science]

        if debug:
            # TODO: Add this to the show method?
            vmin, vmax = visualization.ZScaleInterval().get_limits(_mstilt)
            plt.imshow(_mstilt,
                       origin='lower',
                       interpolation='nearest',
                       aspect='auto',
                       vmin=vmin,
                       vmax=vmax)
            for slit_idx, slit_spat in enumerate(self.slits.spat_id):
                spat = self.all_trace_dict[slit_idx]['tilts_spat']
                spec = self.all_trace_dict[slit_idx]['tilts']
                spec_fit = self.all_trace_dict[slit_idx]['tilts_fit']
                in_fit = self.all_trace_dict[slit_idx]['tot_mask']
                not_fit = np.invert(in_fit) & (spec > 0)
                fit_rej = in_fit & np.invert(
                    self.all_trace_dict[slit_idx]['fit_mask'])
                fit_keep = in_fit & self.all_trace_dict[slit_idx]['fit_mask']
                plt.scatter(spat[not_fit],
                            spec[not_fit],
                            color='C1',
                            marker='.',
                            s=30,
                            lw=0)
                plt.scatter(spat[fit_rej],
                            spec[fit_rej],
                            color='C3',
                            marker='.',
                            s=30,
                            lw=0)
                plt.scatter(spat[fit_keep],
                            spec[fit_keep],
                            color='k',
                            marker='.',
                            s=30,
                            lw=0)
                with_fit = np.invert(np.all(np.invert(fit_keep), axis=0))
                for t in range(in_fit.shape[1]):
                    if not with_fit[t]:
                        continue
                    l, r = np.nonzero(in_fit[:, t])[0][[0, -1]]
                    plt.plot(spat[l:r + 1, t], spec_fit[l:r + 1, t], color='k')
            plt.show()

        # Record the Mask
        bpmtilts = np.zeros_like(self.slits.mask,
                                 dtype=self.slits.bitmask.minimum_dtype())
        for flag in ['BADTILTCALIB']:
            bpm = self.slits.bitmask.flagged(self.slits.mask, flag)
            if np.any(bpm):
                bpmtilts[bpm] = self.slits.bitmask.turn_on(bpmtilts[bpm], flag)

        # Build and return DataContainer
        tilts_dict = {
            'coeffs': self.coeffs,
            'func2d': self.par['func2d'],
            'nslit': self.slits.nslits,
            'spat_order': self.spat_order,
            'spec_order': self.spec_order,
            'spat_id': self.slits.spat_id,
            'bpmtilts': bpmtilts,
            'spat_flexure': self.spat_flexure,
            'PYP_SPEC': self.spectrograph.name
        }
        return WaveTilts(**tilts_dict)
Exemple #9
0
    def run(self,
            setup_only=False,
            calibration_check=False,
            use_header_id=False,
            sort_dir=None,
            write_bkg_pairs=False):
        """
        Once instantiated, this is the main method used to construct the
        object.
        
        The code flow is as follows::
            - Build the fitstbl from an input file_list (optional)
            - Type the files (bias, arc, etc.)
            - Match calibration files to the science files
            - Generate the setup_dict
                - Write group info to disk
                - Write calib info to disk (if main run)

        It is expected that a user will run this three times if they're
        being careful.  Once with `setup_only=True` to confirm the
        images are properly typed and grouped together for calibration.
        A second time with `calibration_check=True` to confirm the
        appropriate calibrations frames are available.  And a third time
        to do the actual setup before proceeding with the reductions.

        Args:
            setup_only (:obj:`bool`, optional):
                Only this setup will be performed.  Pypit is expected to
                execute in a way that ends after this class is fully
                instantiated such that the user can inspect the results
                before proceeding.  This has the effect of providing
                more output describing the success of the setup and how
                to proceed, and provides warnings (instead of errors)
                for issues that may cause the reduction itself to fail.
            calibration_check (obj:`bool`, optional):
                Only check that the calibration frames are appropriately
                setup and exist on disk.  Pypit is expected to execute
                in a way that ends after this class is fully
                instantiated such that the user can inspect the results
                before proceeding. 
            use_header_id (:obj:`bool`, optional):
                Allow setup to use the frame types drawn from single,
                instrument-specific header keywords set to `idname` in
                the metadata table (:attr:`fitstbl`).
            sort_dir (:obj:`str`, optional):
                The directory to put the '.sorted' file.

        Returns:
            :class:`pypeit.par.pypeitpar.PypeItPar`,
            :class:`pypeit.spectrographs.spectrograph.Spectrograph`,
            :class:`astropy.table.Table`: Returns the attributes
            :attr:`par`, :attr:`spectrograph`, :attr:`fitstbl`
            If running with `setup_only` or
            `calibrations_check`, these are all returned as `None`
            values.
        """
        # Kludge
        pypeit_file = '' if self.pypeit_file is None else self.pypeit_file

        # Build fitstbl
        if self.fitstbl is None:
            self.build_fitstbl(strict=not setup_only)  #, bkg_pairs=bkg_pairs)

        # File typing
        self.get_frame_types(flag_unknown=setup_only or calibration_check,
                             use_header_id=use_header_id)

        # Determine the configurations and assign each frame to the
        # specified configuration
        ignore_frames = ['bias', 'dark']
        cfgs = self.fitstbl.unique_configurations(ignore_frames=ignore_frames)
        self.fitstbl.set_configurations(cfgs, ignore_frames=ignore_frames)

        # Assign frames to calibration groups
        self.fitstbl.set_calibration_groups(global_frames=['bias', 'dark'])

        # Set default comb_id (only done if needed)
        self.fitstbl.set_defaults()

        # Assign science IDs based on the calibrations groups (to be
        # deprecated)
        #self.fitstbl.calib_to_science()
        self.fitstbl['failures'] = False  # TODO: placeholder

        #        if self.par['scienceimage'] is not None and self.par['scienceimage']['nodding']:
        #            self.match_ABBA()

        if setup_only:
            # Collate all matching files and write .sorted Table (on pypeit_setup only)
            sorted_file = self.spectrograph.spectrograph + '.sorted' \
                                if pypeit_file is None or len(pypeit_file) == 0 \
                                else pypeit_file.replace('.pypeit', '.sorted')
            if sort_dir is not None:
                sorted_file = os.path.join(sort_dir,
                                           os.path.split(sorted_file)[1])
            self.fitstbl.write_sorted(sorted_file,
                                      write_bkg_pairs=write_bkg_pairs)
            msgs.info("Wrote sorted file data to {:s}".format(sorted_file))

        else:
            # Write the calib file
            calib_file = self.spectrograph.spectrograph + '.calib' \
                                if pypeit_file is None or len(pypeit_file) == 0 \
                                else pypeit_file.replace('.pypeit', '.calib')
            if sort_dir is not None:
                calib_file = os.path.join(sort_dir,
                                          os.path.split(calib_file)[1])
            self.fitstbl.write_calib(calib_file)

        # Finish (depends on PypeIt run mode)
        # TODO: Do we need this functionality?
        if calibration_check:
            msgs.info("Inspect the .calib file: {:s}".format(calib_file))
            msgs.info(
                "*********************************************************")
            msgs.info("Calibration check complete and successful!")
            #            msgs.info("Set 'run calcheck False' to continue with data reduction")
            msgs.info(
                "*********************************************************")
            #TODO: Why should this not return the same as when setup_only is True

        if setup_only:
            for idx in np.where(self.fitstbl['failures'])[0]:
                msgs.warn(
                    "No Arc found: Skipping object {:s} with file {:s}".format(
                        self.fitstbl['target'][idx],
                        self.fitstbl['filename'][idx]))
            msgs.info("Setup is complete.")
            msgs.info("Inspect the .sorted file")
            return None, None, None

        return self.par, self.spectrograph, self.fitstbl
Exemple #10
0
def slit_match(x_det,
               x_model,
               step=1,
               xlag_range=[-50, 50],
               sigrej=3,
               print_matches=False,
               edge=None):
    """
    Script that perform the slit edges matching.

    This method uses :func:`discrete_correlate_match` to find the
    indices of x_model that match x_det.

    Taken from DEEP2/spec2d/pro/deimos_slit_match.pro

    Parameters
    ----------
    x_det: `numpy.ndarray`_
        1D array of slit edge spatial positions found from image.
    x_model: `numpy.ndarray`_
        1D array of slit edge spatial positions predicted by the
        optical model.
    step: :obj:`int`, optional
        Step size in pixels used to generate a list of possible
        offsets within the `offsets_range`.
    xlag_range: :obj:`list`, optional
        Range of offsets in pixels allowed between the slit
        positions predicted by the mask design and the traced
        slit positions.
    sigrej: :obj:`float`, optional
        Reject slit matches larger than this number of sigma in
        the match residuals.
    print_matches: :obj:`bool`, optional
        Print the result of the matching.
    edge: :obj:`str`, optional
        String that indicates which edges are being plotted,
        i.e., left of right. Ignored if ``print_matches`` is
        False.

    Returns
    -------
    ind: `numpy.ndarray`_
        1D array of indices for `x_model`, which defines the matches
        to `x_det`, i.e., `x_det` matches `x_model[ind]`
    dupl: `numpy.ndarray`_
        1D array of `bool` that flags which `ind` are duplicates.
    coeff: `numpy.ndarray`_
        pypeitFit coefficients of the fitted relation between `x_det`
        and `x_model[ind]`
    sigres: :obj:`float`
        RMS residual for the fitted relation between `x_det` and
        `x_model[ind]`

    """
    # Determine the indices of `x_model` that match `x_det`
    ind = discrete_correlate_match(x_det,
                                   np.ma.masked_equal(x_model, -1),
                                   step=step,
                                   xlag_range=xlag_range)

    # Define the weights for the fitting
    residual = (x_det - x_model[ind]) - np.median(x_det - x_model[ind])
    weights = np.zeros(residual.size, dtype=int)
    weights[np.abs(residual) < 100.] = 1
    if weights.sum() == 0:
        weights = np.ones(residual.size, dtype=int)
    # Fit between `x_det` and `x_model[ind]`
    pypeitFit = fitting.robust_fit(x_model[ind],
                                   x_det,
                                   1,
                                   maxiter=100,
                                   weights=weights,
                                   lower=3,
                                   upper=3)
    coeff = pypeitFit.fitc
    yfit = pypeitFit.eval(x_model[ind])

    # compute residuals
    res = yfit - x_det
    sigres = sigma_clipped_stats(res, sigma=sigrej)[2]  # RMS residuals
    # flag the matches that have residuals > `sigrej` times the RMS, or if res>5
    cut = 5 if res.size < 5 else sigrej * sigres
    out = np.abs(res) > cut

    # check for duplicate indices
    dupl = np.ones(ind.size, dtype=bool)
    # If there are duplicates of `ind`, for now we keep only the first one. We don't remove the others yet
    dupl[np.unique(ind, return_index=True)[1]] = False
    wdupl = np.where(dupl)[0]
    # Iterate over the duplicates flagged as bad
    if wdupl.size > 0:
        for i in range(wdupl.size):
            duplind = ind[wdupl[i]]
            # Where are the other duplicates of this `ind`?
            w = np.where(ind == duplind)[0]
            # set those to be bad (for the moment)
            dupl[w] = True
            # Among the duplicates of this particular `ind`, which one has the smallest residual?
            wdif = np.argmin(np.abs(res[w]))
            # The one with the smallest residuals, is then set to not bad
            dupl[w[wdif]] = False
        # Both duplicates and matches with high RMS are considered bad
        dupl = dupl | out
        if edge is not None:
            msgs.warn('{} duplicate match(es) for {} edges'.format(
                dupl[dupl == 1].size, edge))
        else:
            msgs.warn('{} duplicate match(es)'.format(dupl[dupl == 1].size))
        # I commented the 3 lines below because I don't really need to trim the duplicate matches. I just
        # propagate the flag.
        # good = dupl == 0
        # ind = ind[good]
        # x_det=x_det[good]
    if print_matches:
        if edge is not None:
            msgs.info('-----------------------------------------------')
            msgs.info('             {} slit edges               '.format(edge))
        msgs.info('-----------------------------------------------')
        msgs.info('Index      omodel_edge       spat_edge               ')
        msgs.info('-----------------------------------------------')
        for i in range(ind.size):
            msgs.info('{}  {}  {}'.format(ind[i], x_model[ind][i], x_det[i]))
        msgs.info('-----------------------------------------------')
    return ind, dupl, coeff, sigres
Exemple #11
0
    def find_objects_pypeline(self,
                              image,
                              std=False,
                              std_trace=None,
                              maskslits=None,
                              manual_extract_dict=None,
                              show_peaks=False,
                              show_fits=False,
                              show_trace=False,
                              show=False,
                              debug=False):
        """
        Find objects in the slits. This is currently setup only for ARMS

        Wrapper to extract.objfind

        Parameters
        ----------
        tslits_dict: dict
           Dictionary containing information on the slits traced for this image

        Optional Parameters
        -------------------
        SHOW_PEAKS:  bool
          Generate QA showing peaks identified by object finding

        SHOW_FITS:  bool
          Generate QA  showing fits to traces

        SHOW_TRACE:  bool
          Generate QA  showing traces identified. Requires an open ginga RC modules window

        Returns
        -------
        specobjs : Specobjs object
            Container holding Specobj objects
        nobj:
            Number of objects identified
        self.skymask : ndarray
                Boolean image indicating which pixels are useful for global sky subtraction

        """
        self.maskslits = self.maskslits if maskslits is None else maskslits
        gdslits = np.where(np.invert(self.maskslits))[0]

        # create the ouptut image for skymask
        skymask = np.zeros_like(image, dtype=bool)
        # Instantiate the specobjs container
        sobjs = specobjs.SpecObjs()

        # Loop on slits
        for slit in gdslits:
            qa_title = "Finding objects on slit # {:d}".format(slit)
            msgs.info(qa_title)
            thismask = (self.slitmask == slit)
            inmask = (self.sciImg.mask == 0) & thismask
            # Find objects
            specobj_dict = {
                'setup': self.setup,
                'slitid': slit,
                'orderindx': 999,
                'det': self.det,
                'objtype': self.objtype,
                'pypeline': self.pypeline
            }

            # TODO we need to add QA paths and QA hooks. QA should be
            # done through objfind where all the relevant information
            # is. This will be a png file(s) per slit.

            sig_thresh = 30.0 if std else self.redux_par['sig_thresh']
            #
            sobjs_slit, skymask[thismask] = \
                extract.objfind(image, thismask, self.tslits_dict['slit_left'][:,slit],
                                self.tslits_dict['slit_righ'][:,slit], inmask=inmask,
                                ncoeff=self.redux_par['trace_npoly'], std_trace=std_trace,
                                sig_thresh=sig_thresh, hand_extract_dict=manual_extract_dict,
                                specobj_dict=specobj_dict, show_peaks=show_peaks,
                                show_fits=show_fits, show_trace=show_trace,
                                trim_edg=self.redux_par['find_trim_edge'],
                                qa_title=qa_title, nperslit=self.redux_par['maxnumber'])
            sobjs.add_sobj(sobjs_slit)

        # Steps
        self.steps.append(inspect.stack()[0][3])
        if show:
            self.show('image',
                      image=image * (self.sciImg.mask == 0),
                      chname='objfind',
                      sobjs=sobjs,
                      slits=True)

        # Return
        return sobjs, len(sobjs), skymask
Exemple #12
0
    def __init__(self,
                 pypeit_file,
                 verbosity=2,
                 overwrite=True,
                 reuse_masters=False,
                 logname=None,
                 show=False,
                 redux_path=None,
                 calib_only=False):

        # Set up logging
        self.logname = logname
        self.verbosity = verbosity
        self.pypeit_file = pypeit_file

        self.msgs_reset()

        # Load
        cfg_lines, data_files, frametype, usrdata, setups, _ \
                = parse_pypeit_file(pypeit_file, runtime=True)
        self.calib_only = calib_only

        # Spectrograph
        cfg = ConfigObj(cfg_lines)
        spectrograph_name = cfg['rdx']['spectrograph']
        self.spectrograph = load_spectrograph(spectrograph_name)
        msgs.info('Loaded spectrograph {0}'.format(self.spectrograph.name))

        # --------------------------------------------------------------
        # Get the full set of PypeIt parameters
        #   - Grab a science or standard file for configuration specific parameters

        config_specific_file = None
        for idx, row in enumerate(usrdata):
            if ('science' in row['frametype']) or ('standard'
                                                   in row['frametype']):
                config_specific_file = data_files[idx]
        # search for arcs, trace if no scistd was there
        if config_specific_file is None:
            for idx, row in enumerate(usrdata):
                if ('arc' in row['frametype']) or ('trace'
                                                   in row['frametype']):
                    config_specific_file = data_files[idx]
        if config_specific_file is not None:
            msgs.info(
                'Setting configuration-specific parameters using {0}'.format(
                    os.path.split(config_specific_file)[1]))
        spectrograph_cfg_lines = self.spectrograph.config_specific_par(
            config_specific_file).to_config()

        #   - Build the full set, merging with any user-provided
        #     parameters
        self.par = PypeItPar.from_cfg_lines(cfg_lines=spectrograph_cfg_lines,
                                            merge_with=cfg_lines)
        msgs.info('Built full PypeIt parameter set.')

        # Check the output paths are ready
        if redux_path is not None:
            self.par['rdx']['redux_path'] = redux_path

        # TODO: Write the full parameter set here?
        # --------------------------------------------------------------

        # --------------------------------------------------------------
        # Build the meta data
        #   - Re-initilize based on the file data
        msgs.info('Compiling metadata')
        self.fitstbl = PypeItMetaData(self.spectrograph,
                                      self.par,
                                      files=data_files,
                                      usrdata=usrdata,
                                      strict=True)
        #   - Interpret automated or user-provided data from the PypeIt
        #   file
        self.fitstbl.finalize_usr_build(frametype, setups[0])

        # --------------------------------------------------------------
        #   - Write .calib file (For QA naming amongst other things)
        calib_file = pypeit_file.replace('.pypeit', '.calib')
        self.fitstbl.write_calib(calib_file)

        # Other Internals
        self.overwrite = overwrite

        # Currently the runtime argument determines the behavior for
        # reuse_masters.
        self.reuse_masters = reuse_masters
        self.show = show

        # Set paths
        self.calibrations_path = os.path.join(
            self.par['rdx']['redux_path'],
            self.par['calibrations']['master_dir'])

        # Check for calibrations
        if not self.calib_only:
            calibrations.check_for_calibs(
                self.par,
                self.fitstbl,
                raise_error=self.par['calibrations']['raise_chk_error'])

        # Report paths
        msgs.info('Setting reduction path to {0}'.format(
            self.par['rdx']['redux_path']))
        msgs.info('Master calibration data output to: {0}'.format(
            self.calibrations_path))
        msgs.info('Science data output to: {0}'.format(self.science_path))
        msgs.info('Quality assessment plots output to: {0}'.format(
            self.qa_path))
        # TODO: Is anything written to the qa dir or only to qa/PNGs?
        # Should we have separate calibration and science QA
        # directories?
        # An html file wrapping them all too

        # Init
        # TODO: I don't think this ever used

        self.det = None

        self.tstart = None
        self.basename = None
        self.sciI = None
        self.obstime = None
Exemple #13
0
def lacosmic(det,
             sciframe,
             saturation,
             nonlinear,
             varframe=None,
             maxiter=1,
             grow=1.5,
             remove_compact_obj=True,
             sigclip=5.0,
             sigfrac=0.3,
             objlim=5.0):
    """
    Identify cosmic rays using the L.A.Cosmic algorithm
    U{http://www.astro.yale.edu/dokkum/lacosmic/}
    (article : U{http://arxiv.org/abs/astro-ph/0108003})
    This routine is mostly courtesy of Malte Tewes

    Args:
        det:
        sciframe:
        saturation:
        nonlinear:
        varframe:
        maxiter:
        grow:
        remove_compact_obj:
        sigclip:
        sigfrac:
        objlim:

    Returns:
        ndarray: mask of cosmic rays (0=no CR, 1=CR)

    """

    dnum = parse.get_dnum(det)

    msgs.info("Detecting cosmic rays with the L.A.Cosmic algorithm")
    #    msgs.work("Include these parameters in the settings files to be adjusted by the user")
    # Set the settings
    scicopy = sciframe.copy()
    crmask = np.cast['bool'](np.zeros(sciframe.shape))
    sigcliplow = sigclip * sigfrac

    # Determine if there are saturated pixels
    satpix = np.zeros_like(sciframe)
    #    satlev = settings_det['saturation']*settings_det['nonlinear']
    satlev = saturation * nonlinear
    wsat = np.where(sciframe >= satlev)
    if wsat[0].size == 0: satpix = None
    else:
        satpix[wsat] = 1.0
        satpix = np.cast['bool'](satpix)

    # Define the kernels
    laplkernel = np.array([[0.0, -1.0, 0.0], [-1.0, 4.0, -1.0],
                           [0.0, -1.0, 0.0]])  # Laplacian kernal
    growkernel = np.ones((3, 3))
    for i in range(1, maxiter + 1):
        msgs.info("Convolving image with Laplacian kernel")
        # Subsample, convolve, clip negative values, and rebin to original size
        subsam = utils.subsample(scicopy)
        conved = signal.convolve2d(subsam,
                                   laplkernel,
                                   mode="same",
                                   boundary="symm")
        cliped = conved.clip(min=0.0)
        lplus = utils.rebin_evlist(cliped, np.array(cliped.shape) / 2.0)

        msgs.info("Creating noise model")
        # Build a custom noise map, and compare  this to the laplacian
        m5 = ndimage.filters.median_filter(scicopy, size=5, mode='mirror')
        if varframe is None:
            noise = np.sqrt(np.abs(m5))
        else:
            noise = np.sqrt(varframe)
        msgs.info("Calculating Laplacian signal to noise ratio")

        # Laplacian S/N
        s = lplus / (2.0 * noise
                     )  # Note that the 2.0 is from the 2x2 subsampling

        # Remove the large structures
        sp = s - ndimage.filters.median_filter(s, size=5, mode='mirror')

        msgs.info("Selecting candidate cosmic rays")
        # Candidate cosmic rays (this will include HII regions)
        candidates = sp > sigclip
        nbcandidates = np.sum(candidates)

        msgs.info("{0:5d} candidate pixels".format(nbcandidates))

        # At this stage we use the saturated stars to mask the candidates, if available :
        if satpix is not None:
            msgs.info("Masking saturated pixels")
            candidates = np.logical_and(np.logical_not(satpix), candidates)
            nbcandidates = np.sum(candidates)

            msgs.info(
                "{0:5d} candidate pixels not part of saturated stars".format(
                    nbcandidates))

        msgs.info("Building fine structure image")

        # We build the fine structure image :
        m3 = ndimage.filters.median_filter(scicopy, size=3, mode='mirror')
        m37 = ndimage.filters.median_filter(m3, size=7, mode='mirror')
        f = m3 - m37
        f /= noise
        f = f.clip(min=0.01)

        msgs.info("Removing suspected compact bright objects")

        # Now we have our better selection of cosmics :

        if remove_compact_obj:
            cosmics = np.logical_and(candidates, sp / f > objlim)
        else:
            cosmics = candidates
        nbcosmics = np.sum(cosmics)

        msgs.info("{0:5d} remaining candidate pixels".format(nbcosmics))

        # What follows is a special treatment for neighbors, with more relaxed constains.

        msgs.info("Finding neighboring pixels affected by cosmic rays")

        # We grow these cosmics a first time to determine the immediate neighborhod  :
        growcosmics = np.cast['bool'](signal.convolve2d(
            np.cast['float32'](cosmics),
            growkernel,
            mode="same",
            boundary="symm"))

        # From this grown set, we keep those that have sp > sigmalim
        # so obviously not requiring sp/f > objlim, otherwise it would be pointless
        growcosmics = np.logical_and(sp > sigclip, growcosmics)

        # Now we repeat this procedure, but lower the detection limit to sigmalimlow :

        finalsel = np.cast['bool'](signal.convolve2d(
            np.cast['float32'](growcosmics),
            growkernel,
            mode="same",
            boundary="symm"))
        finalsel = np.logical_and(sp > sigcliplow, finalsel)

        # Unmask saturated pixels:
        if satpix is not None:
            msgs.info("Masking saturated stars")
            finalsel = np.logical_and(np.logical_not(satpix), finalsel)

        ncrp = np.sum(finalsel)

        msgs.info("{0:5d} pixels detected as cosmics".format(ncrp))

        # We find how many cosmics are not yet known :
        newmask = np.logical_and(np.logical_not(crmask), finalsel)
        nnew = np.sum(newmask)

        # We update the mask with the cosmics we have found :
        crmask = np.logical_or(crmask, finalsel)

        msgs.info(
            "Iteration {0:d} -- {1:d} pixels identified as cosmic rays ({2:d} new)"
            .format(i, ncrp, nnew))
        if ncrp == 0: break
    # Additional algorithms (not traditionally implemented by LA cosmic) to remove some false positives.
    msgs.work(
        "The following algorithm would be better on the rectified, tilts-corrected image"
    )
    filt = ndimage.sobel(sciframe, axis=1, mode='constant')
    filty = ndimage.sobel(filt / np.sqrt(np.abs(sciframe)),
                          axis=0,
                          mode='constant')
    filty[np.where(np.isnan(filty))] = 0.0

    sigimg = cr_screen(filty)

    sigsmth = ndimage.filters.gaussian_filter(sigimg, 1.5)
    sigsmth[np.where(np.isnan(sigsmth))] = 0.0
    sigmask = np.cast['bool'](np.zeros(sciframe.shape))
    sigmask[np.where(sigsmth > sigclip)] = True
    crmask = np.logical_and(crmask, sigmask)
    msgs.info("Growing cosmic ray mask by 1 pixel")
    crmask = grow_masked(crmask.astype(np.float), grow, 1.0)

    return crmask.astype(bool)
Exemple #14
0
def fit_tilts(trc_tilt_dict, thismask, slit_cen, spat_order=3, spec_order=4, maxdev = 0.2,
              maxrej = None,
              maxiter = 100, sigrej = 3.0, pad_spec = 30, pad_spat =5,
              func2d='legendre2d', doqa=True, master_key='test',
              slit = 0, show_QA=False, out_dir=None, debug=False):
    """

    Parameters
    ----------
    trc_tilt_dict: dict
        Diciontary containing tilt info

    Optional Parameters
    -------------------
        slit:
        all_tilts:
        order:
        yorder:
        func2D:
        maskval:
        setup:
        doqa:
        show_QA:
        out_dir:

    Returns:

    """

    nspec = trc_tilt_dict['nspec']
    nspat = trc_tilt_dict['nspat']
    fwhm = trc_tilt_dict['fwhm']
    maxdev_pix = maxdev*fwhm
    xnspecmin1 = float(nspec-1)
    xnspatmin1 = float(nspat-1)
    nspat = trc_tilt_dict['nspat']
    use_tilt = trc_tilt_dict['use_tilt']                 # mask for good/bad tilts, based on aggregate fit, frac good pixels
    nuse = np.sum(use_tilt)
    tilts = trc_tilt_dict['tilts']   # legendre polynomial fit
    #JFH Before we were fitting the fits. Now we fit the actual flux weighted centroided tilts.
    tilts_err = trc_tilt_dict['tilts_err']   # flux weighted centroidding error
    tilts_dspat = trc_tilt_dict['tilts_dspat'] # spatial offset from the central trace
    #tilts_spat = trc_tilt_dict['tilts_dspat'][:,use_tilt] # spatial offset from the central trace
    tilts_spec = trc_tilt_dict['tilts_spec'] # line spectral pixel position from legendre fit evaluated at slit center
    tilts_mask = trc_tilt_dict['tilts_mask'] # Reflects if trace is on the slit
    tilts_mad = trc_tilt_dict['tilts_mad']   # quantitfies aggregate error of this tilt

    use_mask = np.outer(np.ones(nspat,dtype=bool),use_tilt)
    tot_mask = tilts_mask & (tilts_err < 900) & use_mask
    fitxy = [spec_order, spat_order]

    # Fit the inverted model with a 2D polynomial
    msgs.info("Fitting tilts with a low order, 2D {:s}".format(func2d))

    adderr = 0.03
    tilts_sigma = ((tilts_mad < 100.0) & (tilts_mad > 0.0))*np.sqrt(np.abs(tilts_mad)**2 + adderr**2)

    fitmask, coeff2 = utils.robust_polyfit_djs(tilts_spec.flatten()/xnspecmin1, (tilts.flatten() - tilts_spec.flatten())/xnspecmin1,
                                               fitxy, x2=tilts_dspat.flatten()/xnspatmin1, inmask = tot_mask.flatten(),
                                               sigma=tilts_sigma.flatten()/xnspecmin1,
                                               function=func2d, maxiter=maxiter, lower=sigrej, upper=sigrej,
                                               maxdev=maxdev_pix/xnspecmin1,minx=-0.0, maxx=1.0, minx2=-1.0, maxx2=1.0,
                                               use_mad=False, sticky=False)
    fitmask = fitmask.reshape(tilts_dspat.shape)
    # Compute a rejection  mask that we will use later. These are locations that were fit but were rejected
    rej_mask = tot_mask & np.invert(fitmask)
    # Compute and store the 2d tilts fit
    delta_tilt_1 = xnspecmin1*utils.func_val(coeff2, tilts_spec[tilts_mask]/xnspecmin1, func2d, x2=tilts_dspat[tilts_mask]/xnspatmin1,
                                            minx=0.0, maxx=1.0, minx2=-1.0, maxx2=1.0)
    delta_tilt = np.zeros_like(tilts_dspat)
    tilts_2dfit = np.zeros_like(tilts_dspat)
    delta_tilt[tilts_mask] = delta_tilt_1
    tilts_2dfit[tilts_mask] = tilts_spec[tilts_mask] + delta_tilt[tilts_mask]
    # Add the 2d fit to the tracetilt dictionary
    trc_tilt_dict_out = copy.deepcopy(trc_tilt_dict)
    trc_tilt_dict_out['tilt_2dfit'] = tilts_2dfit

    # Report the residuals in pixels
    res_fit = tilts[fitmask] - tilts_2dfit[fitmask]
    rms_fit = np.std(res_fit)
    msgs.info("Residuals: 2D Legendre Fit")
    msgs.info("RMS (pixels): {}".format(rms_fit))
    msgs.info("RMS/FWHM: {}".format(rms_fit/fwhm))

    msgs.info('Inverting the fit to generate the tilts image')
    spec_vec = np.arange(nspec)
    spat_vec = np.arange(nspat)
    spat_img, spec_img = np.meshgrid(spat_vec, spec_vec)
    # We do some padding here to guarantee that the tilts arc lines falling off the image get tilted onto the image
    spec_vec_pad = np.arange(-pad_spec, nspec + pad_spec)
    spat_vec_pad = np.arange(-pad_spat, nspat + pad_spat)
    spat_img_pad, spec_img_pad = np.meshgrid(np.arange(-pad_spat, nspat + pad_spat),np.arange(-pad_spec, nspec + pad_spec))
    slit_cen_pad = (scipy.interpolate.interp1d(spec_vec, slit_cen, bounds_error=False, fill_value='extrapolate'))(spec_vec_pad)
    thismask_pad = np.zeros_like(spec_img_pad, dtype=bool)
    ind_spec, ind_spat = np.where(thismask)
    slit_cen_img_pad = np.outer(slit_cen_pad, np.ones(nspat + 2 * pad_spat))  # center of the slit replicated spatially
    # Normalized spatial offset image (from central trace)
    dspat_img_nrm = (spat_img_pad - slit_cen_img_pad) / xnspatmin1
    # normalized spec image
    spec_img_nrm = spec_img_pad / xnspecmin1
    # Embed the old thismask in the new larger padded thismask
    thismask_pad[ind_spec + pad_spec, ind_spat + pad_spat] = thismask[ind_spec, ind_spat]
    # Now grow the thismask_pad
    kernel = np.ones((2*pad_spec, 2*pad_spat)) / float(4 * pad_spec * pad_spat)
    thismask_grow = scipy.ndimage.convolve(thismask_pad.astype(float), kernel, mode='nearest') > 0.0
    # Evaluate the tilts on the padded image grid
    tiltpix = spec_img_pad[thismask_grow] + xnspecmin1 * utils.func_val(coeff2, spec_img_nrm[thismask_grow], func2d,
                                                                         x2=dspat_img_nrm[thismask_grow],
                                                                         minx=0.0, maxx=1.0, minx2=-1.0, maxx2=1.0)
    # Now do one last fit to invert the function above to obtain the final tilts model in normalized image coordinates
    inmask = np.isfinite(tiltpix)
    sigma = np.full_like(spec_img_pad, 10.0)
    # JFH What I find confusing is that this last fit was actually what Burles was doing on the raw tilts, so why was that failing?
    fitmask_tilts, coeff2_tilts = utils.robust_polyfit_djs(tiltpix/xnspecmin1, spec_img_pad[thismask_grow]/xnspecmin1,
                                                           fitxy, x2=spat_img_pad[thismask_grow]/xnspatmin1,
                                                           sigma=sigma[thismask_grow]/xnspecmin1,
                                                           upper=5.0, lower=5.0, maxdev=10.0/xnspecmin1,
                                                           inmask=inmask, function=func2d, maxiter=20,
                                                           minx=0.0, maxx=1.0, minx2=0.0, maxx2=1.0, use_mad=False)
    irej = np.invert(fitmask_tilts) & inmask
    msgs.info('Rejected {:d}/{:d} pixels in final inversion tilts image fit'.format(np.sum(irej),np.sum(inmask)))
    # normalized tilts image
    #tilts_img = utils.func_val(coeff2_tilts, spec_img/xnspecmin1, func2d, x2=spat_img/xnspatmin1,minx=0.0, maxx=1.0, minx2=0.0, maxx2=1.0)
    #tilts_img = np.fmax(np.fmin(tilts_img, 1.2),-0.2)

    tilt_fit_dict = dict(nspec = nspec, nspat = nspat, ngood_lines=np.sum(use_tilt), npix_fit = np.sum(tot_mask),
                         npix_rej = np.sum(fitmask == False), coeff2=coeff2_tilts, spec_order = spec_order, spat_order = spat_order,
                         minx = 0.0, maxx = 1.0, minx2 = 0.0, maxx2 = 1.0, func=func2d)

    # Now do some QA
    if doqa:
        plot_tilt_2d(tilts_dspat, tilts, tilts_2dfit, tot_mask, rej_mask, spat_order, spec_order, rms_fit, fwhm,
                     slit=slit, setup=master_key, show_QA=show_QA, out_dir=out_dir)
        plot_tilt_spat(tilts_dspat, tilts, tilts_2dfit, tilts_spec, tot_mask, rej_mask, spat_order, spec_order, rms_fit, fwhm,
                       slit=slit, setup=master_key, show_QA=show_QA, out_dir=out_dir)
        plot_tilt_spec(tilts_spec, tilts, tilts_2dfit, tot_mask, rej_mask, rms_fit, fwhm, slit=slit,
                       setup = master_key, show_QA=show_QA, out_dir=out_dir)

    return tilt_fit_dict, trc_tilt_dict_out
Exemple #15
0
def trace_tilts(arcimg, lines_spec, lines_spat, thismask, slit_cen, inmask=None, gauss=False, fwhm=4.0,spat_order=5, maxdev_tracefit=0.2,
                sigrej_trace=3.0, max_badpix_frac=0.20, tcrude_nave = 5,
                npca = 1, coeff_npoly_pca = 2, sigrej_pca = 2.0,debug_pca = False, show_tracefits=False):

    """
    Use a PCA model to determine the best object (or slit edge) traces for echelle spectrographs.

    Parameters
    ----------
    arcimg:  ndarray, float (nspec, nspat)
       Image of arc or sky that will be used for tracing tilts.
    lines_spec: ndarray, float (nlines,)
       Array containing arc line centroids along the center of the slit for each arc line that will be traced. This is
       in pixels in image coordinates.
    lines_spat: ndarray, float (nlines,)
       Array contianing the spatial position of the center of the slit along which the arc was extracted. This is is in
       pixels in image coordinates.
    thismask: ndarray, boolean (nspec, nsapt)
        Boolean mask image specifying the pixels which lie on the slit/order to search for objects on.
        The convention is: True = on the slit/order, False  = off the slit/order. This must be the same size as the arcimg.
    Optional Parameters
    -------------------
    inmask: float ndarray, default = None
        Input mask image.
    gauss: bool, default = False
        If true the code will trace the arc lines usign Gaussian weighted centroiding (trace_gweight) instead of the default,
        which is flux weighted centroiding (trace_fweight)
    fwhm: float
       Expected FWHM of the arc lines.
    spat_order: int, default = None
       Order of the legendre polynomial that will be fit to the tilts.
    maxdev_tracefit: float, default = 1.0
       Maximum absolute deviation for the arc tilt fits during iterative trace fitting expressed in units of the fwhm.
    sigrej_trace: float, default =  3.0
       From each line we compute a median absolute deviation of the trace from the polynomial fit. We then
       analyze the distribution of maximxum absolute deviations (MADs) for all the lines, and reject sigrej_trace outliers
       from that distribution.
    max_badpix_frac: float, default = 0.20
       Maximum fraction of total pixels that can be masked by the trace_gweight algorithm
       (because the residuals are too large) to still be usable for tilt fitting.
    tcrude_nave: int, default = 5
       Trace crude is used to determine the initial arc line tilts, which are then iteratively fit. Trace crude
       can optionally boxcar smooth the image (along the spatial direction of the image, i.e. roughly along the arc line tilts)
       to improve the tracing.
    npca: int, default = 1
       Tilts are initially traced and then a PCA is performed. The PCA is used to determine better crutches for a second
       round of improved tilt tracing. This parameter is the order of that PCA and determined how much the tilts behavior
       is being compressed. npca = 0 would be just using the mean tilt. This PCA is only an intermediate step to
       improve the crutches and is an attempt to make the tilt tracing that goes into the final fit more robust.
    coeff_npoly_pca: int, default = 1
       Order of polynomial fits used for PCA coefficients fitting for the PCA described above.
    sigrej_pca: float, default = 2.0
       Significance threhsold for rejection of outliers from fits to PCA coefficients for the PCA described above.
    show_tracefits: bool, default = False
       If true the fits will be shown to each arc line trace by iter_fitting.py


    Returns:
    --------
    """

    tcrude_maxerr = fwhm/4.0
    tcrude_maxshift = 3.0*fwhm/4.0
    tcrude_maxshift0 = fwhm

    trace_dict0 = trace_tilts_work(
        arcimg, lines_spec, lines_spat, thismask, slit_cen, inmask=inmask, gauss=gauss, tilts_guess=None, fwhm=fwhm, spat_order=spat_order,
        maxdev_tracefit=maxdev_tracefit,sigrej_trace=sigrej_trace, max_badpix_frac=max_badpix_frac,
        tcrude_maxerr=tcrude_maxerr, tcrude_maxshift=tcrude_maxshift,tcrude_maxshift0=tcrude_maxshift0,
        tcrude_nave=tcrude_nave, show_tracefits=show_tracefits)

    # TODO THE PCA may not be necessary. It appears to improve the results though for some instruments where the
    # tracing is problematic. We could consider making this optional to speed things up.
    debug_pca_fit = False
    if debug_pca_fit:
        # !!!! FOR TESTING ONLY!!!!  Evaluate the model fit to the tilts for all of our lines
        msgs.info('TESTING: Performing an initial fit before PCA.')
        # JFH Note spec_order is hard wired here as we don't pass it in
        tilt_fit_dict0 = fit_tilts(trace_dict0, spat_order=spat_order, spec_order=6, debug=True,
                                   maxdev=0.2, sigrej=3.0,doqa=True, setup='test', slit=0, show_QA=True)


    # Do a PCA fit, which rejects some outliers
    iuse = trace_dict0['use_tilt']
    nuse = np.sum(iuse)
    msgs.info('PCA modeling {:d} good tilts'.format(nuse))
    pca_fit, poly_fit_dict, pca_mean, pca_vectors = extract.pca_trace(
        trace_dict0['tilts_sub_fit'], predict=np.invert(iuse), npca=npca, coeff_npoly=coeff_npoly_pca,
        lower=sigrej_pca, upper=sigrej_pca, order_vec=lines_spec, xinit_mean=lines_spec,
        minv=0.0, maxv=float(trace_dict0['nsub'] - 1), debug=debug_pca)

    # Now trace again with the PCA predictions as the starting crutches
    trace_dict1 = trace_tilts_work(arcimg, lines_spec, lines_spat, thismask, slit_cen, inmask=inmask, gauss=gauss, tilts_guess=pca_fit,
                                      fwhm=fwhm, spat_order=spat_order, maxdev_tracefit=maxdev_tracefit,sigrej_trace=sigrej_trace,
                                      max_badpix_frac=max_badpix_frac,show_tracefits=show_tracefits)

    return trace_dict1
Exemple #16
0
def tilts_find_lines(arc_spec, slit_cen, tracethresh=10.0, sig_neigh=5.0, nfwhm_neigh=3.0,
                    only_these_lines=None, fwhm=4.0, nonlinear_counts=1e10, fit_frac_fwhm=1.25, cont_frac_fwhm=1.0,
                    max_frac_fwhm=2.0, cont_samp=30, niter_cont=3, debug_lines=False, debug_peaks=False):
    """
    I can't believe this method has no docs

    Args:
        arc_spec:
        slit_cen:
        tracethresh:
        sig_neigh:
        nfwhm_neigh:
        only_these_lines:
        fwhm:
        nonlinear_counts:
        fit_frac_fwhm:
        cont_frac_fwhm:
        max_frac_fwhm:
        cont_samp:
        niter_cont:
        debug_lines:
        debug_peaks:

    Returns:
        (np.ndarray, np.ndarray) or (None,None):

    """


    nspec = arc_spec.size
    spec_vec = np.arange(nspec)
    # Find peaks with a liberal threshold of sigdetect = 5.0
    tampl_tot, tampl_cont_tot, tcent_tot, twid_tot, _, wgood, arc_cont_sub, nsig_tot = arc.detect_lines(
        arc_spec, sigdetect=np.min([sig_neigh,tracethresh]), fwhm=fwhm, fit_frac_fwhm=fit_frac_fwhm, cont_frac_fwhm=cont_frac_fwhm,
        max_frac_fwhm=max_frac_fwhm, cont_samp=cont_samp, niter_cont=niter_cont, nonlinear_counts=nonlinear_counts,
        debug=debug_peaks)
    # Good lines
    arcdet = tcent_tot[wgood]
    arc_ampl = tampl_cont_tot[wgood]
    nsig = nsig_tot[wgood]

    npix_neigh = nfwhm_neigh*fwhm
    # Determine the best lines to use to trace the tilts
    aduse = np.zeros(arcdet.size, dtype=np.bool)  # Which lines should be used to trace the tilts
    w = np.where(nsig >= tracethresh)
    aduse[w] = 1
    # Remove lines that are within npix_neigh pixels.
    # #ToDO replce this with a near-neighbor based approach, where
    # we identify groups and take the brightest line in a given group?
    nuse = np.sum(aduse)
    detuse = arcdet[aduse]
    idxuse = np.arange(arcdet.size)[aduse]
    olduse = aduse.copy()
    for s in range(nuse):
        w = np.where((np.abs(arcdet - detuse[s]) <= npix_neigh) & (np.abs(arcdet - detuse[s]) >= 1.0) & (nsig > sig_neigh))[0]
        for u in range(w.size):
            if nsig[w[u]] > nsig[olduse][s]:
                aduse[idxuse[s]] = False
                break
    # Restricted to ID lines? [introduced to avoid LRIS ghosts]
    if only_these_lines is not None:
        ids_pix = np.array(only_these_lines)
        idxuse = np.arange(arcdet.size)[aduse]
        for s in idxuse:
            if np.min(np.abs(arcdet[s] - ids_pix)) > 2.0:
                msgs.info("Ignoring line at spectral position={:6.1f} which was not identified".format(arcdet[s]))
                aduse[s] = False

    # Final spectral positions of arc lines we will trace
    lines_spec = arcdet[aduse]
    nlines = len(lines_spec)
    if nlines == 0:
        msgs.warn('No arc lines were deemed usable on this slit. Cannot compute tilts. Try lowering tracethresh.')
        msgs.warn('Or, more likely, this was a bad slit (which you might remove)')
        return None, None
    else:
        msgs.info('Modelling arc line tilts with {:d} arc lines'.format(nlines))


    if debug_lines:
        xrng = np.arange(nspec)
        plt.figure(figsize=(14, 6))
        plt.plot(xrng, arc_cont_sub, color='black', drawstyle='steps-mid', lw=3, label='arc', linewidth=1.0)
        plt.plot(arcdet[~aduse], arc_ampl[~aduse], 'r+', markersize=6.0, label='bad for tilts')
        plt.plot(arcdet[aduse], arc_ampl[aduse], 'g+', markersize=6.0, label='good for tilts')
        if nonlinear_counts < 1e9:
            plt.hlines(nonlinear_counts, xrng.min(), xrng.max(), color='orange', linestyle='--', linewidth=2.0,
            label='nonlinear', zorder=10)
        plt.title('Good Lines = {:d}'.format(np.sum(aduse)) + ',  Bad Lines = {:d}'.format(np.sum(~aduse)))
        plt.ylim(arc_cont_sub.min(), 1.5 * arc_cont_sub.max())
        plt.legend()
        plt.show()

    # Spatial position of line, i.e. the central trace interpolated onto the spectral pixel of the line
    lines_spat = np.interp(lines_spec, spec_vec, slit_cen)

    return lines_spec, lines_spat
def ech_coadd(files,objids=None,extract='OPT',flux=True,giantcoadd=False,
              wave_grid_method='velocity', niter=5,wave_grid_min=None, wave_grid_max=None,v_pix=None,
              scale_method='auto', do_offset=False, sigrej_final=3.,do_var_corr=False,
              qafile=None, outfile=None,do_cr=True, debug=False,**kwargs):

    nfile = len(files)
    if nfile <=1:
        msgs.info('Only one spectrum exits coadding...')
        return

    fname = files[0]
    ext_final = fits.getheader(fname, -1)
    norder = ext_final['ORDER'] + 1
    msgs.info('spectrum {:s} has {:d} orders'.format(fname, norder))
    if norder <= 1:
        msgs.error('The number of orders have to be greater than one for echelle. Longslit data?')

    if giantcoadd:
        msgs.info('Coadding all orders and exposures at once')
        spectra = ech_load_spec(files, objid=objids,order=None, extract=extract, flux=flux)
        wave_grid = np.zeros((2,spectra.nspec))
        for i in range(spectra.nspec):
            wave_grid[0, i] = spectra[i].wvmin.value
            wave_grid[1, i] = spectra[i].wvmax.value
        ech_kwargs = {'echelle': True, 'wave_grid_min': np.min(wave_grid), 'wave_grid_max': np.max(wave_grid),
                      'v_pix': v_pix}
        kwargs.update(ech_kwargs)
        # Coadding
        spec1d = coadd.coadd_spectra(spectra, wave_grid_method=wave_grid_method, niter=niter,
                                          scale_method=scale_method, do_offset=do_offset, sigrej_final=sigrej_final,
                                          do_var_corr=do_var_corr, qafile=qafile, outfile=outfile,
                                          do_cr=do_cr, debug=debug,**kwargs)
    else:
        msgs.info('Coadding individual orders first and then merge order')
        spectra_list = []
        # Keywords for Table
        rsp_kwargs = {}
        rsp_kwargs['wave_tag'] = '{:s}_WAVE'.format(extract)
        rsp_kwargs['flux_tag'] = '{:s}_FLAM'.format(extract)
        rsp_kwargs['sig_tag'] = '{:s}_FLAM_SIG'.format(extract)
        wave_grid = np.zeros((2,norder))
        for iord in range(norder):
            spectra = ech_load_spec(files, objid=objids, order=iord, extract=extract, flux=flux)
            ech_kwargs = {'echelle': False, 'wave_grid_min': spectra.wvmin.value, 'wave_grid_max': spectra.wvmax.value, 'v_pix': v_pix}
            wave_grid[0,iord] = spectra.wvmin.value
            wave_grid[1,iord] = spectra.wvmax.value
            kwargs.update(ech_kwargs)
            # Coadding the individual orders
            if qafile is not None:
                qafile_iord = qafile+'_%s'%str(iord)
            else:
                qafile_iord =  None
            spec1d_iord = coadd.coadd_spectra(spectra, wave_grid_method=wave_grid_method, niter=niter,
                                       scale_method=scale_method, do_offset=do_offset, sigrej_final=sigrej_final,
                                       do_var_corr=do_var_corr, qafile=qafile_iord, outfile=outfile,
                                       do_cr=do_cr, debug=debug, **kwargs)
            spectrum = spec_from_array(spec1d_iord.wavelength, spec1d_iord.flux, spec1d_iord.sig,**rsp_kwargs)
            spectra_list.append(spectrum)
        # Join into one XSpectrum1D object
        spectra_coadd = collate(spectra_list)
        kwargs['echelle'] = True
        kwargs['wave_grid_min'] = np.min(wave_grid)
        kwargs['wave_grid_max'] = np.max(wave_grid)
        # ToDo: Currently I'm not using the first order due to some problem. Need to add it back after fix the problem.
        spec1d = coadd.coadd_spectra(spectra_coadd[1:], wave_grid_method=wave_grid_method, niter=niter,
                                          scale_method=scale_method, do_offset=do_offset, sigrej_final=sigrej_final,
                                          do_var_corr=do_var_corr, qafile=qafile, outfile=outfile,
                                          do_cr=do_cr, debug=debug, **kwargs)

    return spec1d
Exemple #18
0
    def write_to_fits(self,
                      subheader,
                      outfile,
                      overwrite=True,
                      update_det=None,
                      slitspatnum=None,
                      debug=False):
        """
        Write the set of SpecObj objects to one multi-extension FITS file

        Args:
            subheader (:obj:`dict`):
            outfile (str):
            overwrite (bool, optional):
            slitspatnum (:obj:`str` or :obj:`list`, optional):
                Restricted set of slits for reduction
            update_det (int or list, optional):
              If provided, do not clobber the existing file but only update
              the indicated detectors.  Useful for re-running on a subset of detectors

        """
        if os.path.isfile(outfile) and (not overwrite):
            msgs.warn("Outfile exists.  Set overwrite=True to clobber it")
            return

        # If the file exists and update_det (and slit_spat_num) is provided, use the existing header
        #   and load up all the other hdus so that we only over-write the ones
        #   we are updating
        if os.path.isfile(outfile) and (update_det is not None
                                        or slitspatnum is not None):
            _specobjs = SpecObjs.from_fitsfile(outfile)
            mask = np.ones(_specobjs.nobj, dtype=bool)
            # Update_det
            if update_det is not None:
                # Pop out those with this detector (and slit if slit_spat_num is provided)
                for det in np.atleast_1d(update_det):
                    mask[_specobjs.DET == det] = False
            elif slitspatnum is not None:  # slitspatnum
                dets, spat_ids = slittrace.parse_slitspatnum(slitspatnum)
                for det, spat_id in zip(dets, spat_ids):
                    mask[(_specobjs.DET == det)
                         & (_specobjs.SLITID == spat_id)] = False
            _specobjs = _specobjs[mask]
            # Add in the new
            for sobj in self.specobjs:
                _specobjs.add_sobj(sobj)
        else:
            _specobjs = self.specobjs

        # Build up the Header
        header = io.initialize_header(primary=True)
        for key in subheader.keys():
            header[key.upper()] = subheader[key]

        # Init
        prihdu = fits.PrimaryHDU()
        hdus = [prihdu]
        prihdu.header = header

        # Add class info
        prihdu.header['DMODCLS'] = (self.__class__.__name__, 'Datamodel class')
        prihdu.header['DMODVER'] = (self.version, 'Datamodel version')

        detector_hdus = {}
        nspec, ext = 0, 0
        # Loop on the SpecObj objects
        for sobj in _specobjs:
            if sobj is None:
                continue
            # HDUs
            if debug:
                import pdb
                pdb.set_trace()
            shdul = sobj.to_hdu()
            if len(shdul) == 2:  # Detector?
                detector_hdus[sobj['DET']] = shdul[1]
                shdu = [shdul[0]]
            elif len(shdul) == 1:  # Detector?
                shdu = shdul
            else:
                msgs.error("Should not get here...")
            # Check -- If sobj had only 1 array, the BinTableHDU test will fail
            assert len(shdu) == 1, 'Bad data model!!'
            assert isinstance(shdu[0],
                              fits.hdu.table.BinTableHDU), 'Bad data model2'
            #shdu[0].header['DMODCLS'] = (self.__class__.__name__, 'Datamodel class')
            #shdu[0].header['DMODVER'] = (self.version, 'Datamodel version')
            # Name
            shdu[0].name = sobj.NAME
            # Extension
            keywd = 'EXT{:04d}'.format(ext)
            prihdu.header[keywd] = sobj.NAME
            ext += 1
            nspec += 1
            # Append
            hdus += shdu

        # Deal with Detectors
        for key, item in detector_hdus.items():
            # TODO - Add EXT to the primary header for these??
            prefix = specobj.det_hdu_prefix(key)
            # Name
            if prefix not in item.name:  # In case we are re-loading
                item.name = specobj.det_hdu_prefix(key) + item.name
            # Append
            hdus += [item]

        # A few more for the header
        prihdu.header['NSPEC'] = nspec

        # Code versions
        io.initialize_header(hdr=prihdu.header)

        # Finish
        hdulist = fits.HDUList(hdus)
        if debug:
            import pdb
            pdb.set_trace()
        hdulist.writeto(outfile, overwrite=overwrite)
        msgs.info("Wrote 1D spectra to {:s}".format(outfile))
        return
Exemple #19
0
    def get_rawimage(self, raw_file, det):
        """
        Read a raw DEIMOS data frame (one or more detectors).

        Data are unpacked from the multi-extension HDU.  Function is
        based :func:`pypeit.spectrographs.keck_lris.read_lris`, which
        was based on the IDL procedure ``readmhdufits.pro``.

        Parameters
        ----------
        raw_file : str
            Filename

        Returns
        -------
        array : ndarray
            Combined image
        hdu: HDUList
        sections : tuple
            List of datasec, oscansec sections

        """
        # Check for file; allow for extra .gz, etc. suffix
        fil = glob.glob(raw_file + '*')
        if len(fil) != 1:
            msgs.error('Found {0} files matching {1}'.format(
                len(fil), raw_file + '*'))
        # Read
        try:
            msgs.info("Reading DEIMOS file: {:s}".format(fil[0]))
        except AttributeError:
            print("Reading DEIMOS file: {:s}".format(fil[0]))

        hdu = fits.open(fil[0])
        head0 = hdu[0].header

        # Get post, pre-pix values
        postpix = head0['POSTPIX']
        detlsize = head0['DETLSIZE']
        x0, x_npix, y0, y_npix = np.array(
            parse.load_sections(detlsize)).flatten()

        # Create final image
        if det is None:
            image = np.zeros((x_npix, y_npix + 4 * postpix))
            rawdatasec_img = np.zeros_like(image, dtype=int)
            oscansec_img = np.zeros_like(image, dtype=int)

        # get the x and y binning factors...
        binning = head0['BINNING']
        if binning != '1,1':
            msgs.error(
                "This binning for DEIMOS might not work.  But it might..")

        # DEIMOS detectors
        nchip = 8

        if det is None:
            chips = range(nchip)
        else:
            chips = [det - 1]  # Indexing starts at 0 here
        # Loop
        for tt in chips:
            data, oscan = deimos_read_1chip(hdu, tt + 1)

            # One detector??
            if det is not None:
                image = np.zeros(
                    (data.shape[0], data.shape[1] + oscan.shape[1]))
                rawdatasec_img = np.zeros_like(image, dtype=int)
                oscansec_img = np.zeros_like(image, dtype=int)

            # Indexing
            x1, x2, y1, y2, o_x1, o_x2, o_y1, o_y2 = indexing(tt,
                                                              postpix,
                                                              det=det)

            # Fill
            image[y1:y2, x1:x2] = data
            rawdatasec_img[y1:y2, x1:x2] = 1  # Amp
            image[o_y1:o_y2, o_x1:o_x2] = oscan
            oscansec_img[o_y1:o_y2, o_x1:o_x2] = 1  # Amp

        # Return
        exptime = hdu[self.meta['exptime']['ext']].header[self.meta['exptime']
                                                          ['card']]
        return image, hdu, exptime, rawdatasec_img, oscansec_img
Exemple #20
0
    def get_tiltimg(self):
        """
        Load or generate the Tilt image

        Requirements:
          master_key, det, par

        Args:

        Returns:
            ndarray: :attr:`mstilt` image

        """
        # Check internals
        self._chk_set(['det', 'calib_ID', 'par'])

        # Prep
        tilt_rows = self.fitstbl.find_frames('tilt',
                                             calib_ID=self.calib_ID,
                                             index=True)
        if len(tilt_rows) == 0:
            msgs.error('Must identify tilt frames to construct tilt image.')
        self.tilt_files = self.fitstbl.frame_paths(tilt_rows)
        self.master_key_dict['tilt'] \
                = self.fitstbl.master_key(tilt_rows[0] if len(tilt_rows) > 0 else self.frame,
                                          det=self.det)

        if self._cached('tiltimg', self.master_key_dict['tilt']):
            # Previously calculated
            self.mstilt = self.calib_dict[
                self.master_key_dict['tilt']]['tiltimg']
            return self.mstilt

        # Instantiate with everything needed to generate the image (in case we do)
        self.tiltImage = tiltimage.TiltImage(
            self.spectrograph,
            files=self.tilt_files,
            det=self.det,
            msbias=self.msbias,
            par=self.par['tiltframe'],
            master_key=self.master_key_dict['tilt'],
            master_dir=self.master_dir,
            reuse_masters=self.reuse_masters)

        # Load the MasterFrame (if it exists and is desired)?
        self.mstilt = self.tiltImage.load()
        if self.mstilt is None:  # Otherwise build it
            msgs.info("Preparing a master {0:s} frame".format(
                self.tiltImage.frametype))
            self.mstilt = self.tiltImage.build_image(bias=self.msbias,
                                                     bpm=self.msbpm)
            # JFH Add a cr_masking option here. The image processing routines are not ready for it yet.

            # Save to Masters
            if self.save_masters:
                self.tiltImage.save()

        # Save & return
        self._update_cache('tilt', 'tiltimg', self.mstilt)
        # TODO in the future add in a tilt_inmask
        #self._update_cache('tilt', 'tilt_inmask', self.mstilt_inmask)
        return self.mstilt
#slit_ids = [trace_slits.get_slitid(flat.shape, tslits_dict['lcen'], tslits_dict['rcen'], ii)[0]
#            for ii in range(tslits_dict['lcen'].shape[1])]
#viewer, ch = ginga.show_image(flat, cuts = (1000.0,30000.0),clear=True)
#ginga.show_slits(viewer, ch, tslits_dict['lcen'], tslits_dict['rcen'], slit_ids)  # , args.det)

maskslits = np.zeros(tslits_dict['lcen'].shape[1], dtype=bool)
gdslits = np.where(~maskslits)[0]

pixelflat = np.ones_like(flatimg)
illumflat = np.ones_like(flatimg)
flat_model = np.zeros_like(flatimg)

debug = True
# Loop on slits
for slit in gdslits:
    msgs.info("Computing flat field image for slit: {:d}".format(slit + 1))
    slit_left = tslits_dict['lcen'][:, slit]
    slit_righ = tslits_dict['rcen'][:, slit]
    thismask = (tslits_dict['slitpix'] == slit + 1)
    inmask = None # in the future set this to the bpm
    sys.exit(-1)
    pixelflat[thismask], illumflat[thismask], flat_model[thismask] = flat.fit_flat(flatimg, mstilts, thismask,
                                                                                   slit_left, slit_righ,inmask=inmask,
                                                                                   debug = debug)


ginga.show_image(pixelflat,cuts = (0.9,1.1),chname='pixeflat', wcs_match=True, clear=True)
ginga.show_image(illumflat,cuts = (0.9,1.1), chname='illumflat', wcs_match=True)
ginga.show_image(flatimg,chname='flat', wcs_match=True)
ginga.show_image(flat_model,chname='flat_model',wcs_match = True)
Exemple #22
0
    def get_flats(self):
        """
        Load or generate a normalized pixel flat and slit illumination
        flat.

        Requires :attr:`tslits_dict`, :attr:`tilts_dict`, :attr:`det`,
        :attr:`par`.

        Returns:
            `numpy.ndarray`_: Two arrays are returned, the normalized
            pixel flat image (:attr:`mspixelflat`) and the slit
            illumination flat (:attr:`msillumflat`).  If the user
            requested the field flattening be skipped
            (`FlatFieldPar['method'] == 'skip'`) or if the slit and tilt
            traces are not provided, the function returns two None
            objects instead.
        """
        # Check for existing data
        if not self._chk_objs(['msarc', 'msbpm', 'tslits_dict', 'wv_calib']):
            msgs.error('dont have all the objects')

        if self.par['flatfield']['method'] is 'skip':
            # User does not want to flat-field
            self.mspixelflat = None
            self.msillumflat = None
            msgs.warning(
                'Parameter calibrations.flatfield.method is set to skip. You are NOT '
                'flatfielding your data!!!')
            # TODO: Why does this not return unity arrays, like what's
            # done below?
            return self.mspixelflat, self.msillumflat

        # Slit and tilt traces are required to flat-field the data
        if not self._chk_objs(['tslits_dict', 'tilts_dict']):
            msgs.warning(
                'Flats were requested, but there are quantities missing necessary to '
                'create flats.  Proceeding without flat fielding....')
            # User cannot flat-field
            self.mspixelflat = None
            self.msillumflat = None
            # TODO: Why does this not return unity arrays, like what's
            # done below?
            return self.mspixelflat, self.msillumflat

        # Check internals
        self._chk_set(['det', 'calib_ID', 'par'])

        pixflat_rows = self.fitstbl.find_frames('pixelflat',
                                                calib_ID=self.calib_ID,
                                                index=True)
        # TODO: Why aren't these set to self
        #   KBW: They're kept in self.flatField.files
        pixflat_image_files = self.fitstbl.frame_paths(pixflat_rows)
        # Allow for user-supplied file (e.g. LRISb)
        self.master_key_dict['flat'] \
                = self.fitstbl.master_key(pixflat_rows[0] if len(pixflat_rows) > 0 else self.frame,
                                          det=self.det)

        # Return already generated data
        if self._cached('pixelflat', self.master_key_dict['flat']) \
                and self._cached('illumflat', self.master_key_dict['flat']):
            self.mspixelflat = self.calib_dict[
                self.master_key_dict['flat']]['pixelflat']
            self.msillumflat = self.calib_dict[
                self.master_key_dict['flat']]['illumflat']
            return self.mspixelflat, self.msillumflat

        # Instantiate
        # TODO: This should automatically attempt to load and instatiate
        # from a file if it exists.
        self.flatField = flatfield.FlatField(
            self.spectrograph,
            self.par['pixelflatframe'],
            files=pixflat_image_files,
            det=self.det,
            master_key=self.master_key_dict['flat'],
            master_dir=self.master_dir,
            reuse_masters=self.reuse_masters,
            flatpar=self.par['flatfield'],
            msbias=self.msbias,
            # TODO: msbpm is not passed?
            tslits_dict=self.tslits_dict,
            tilts_dict=self.tilts_dict)

        # --- Pixel flats

        # 1)  Try to load master files from disk (MasterFrame)?
        _, self.mspixelflat, self.msillumflat = self.flatField.load()

        # 2) Did the user specify a flat? If so load it in  (e.g. LRISb with pixel flat)?
        # TODO: We need to document this format for the user!
        if self.par['flatfield']['frame'] != 'pixelflat':
            # - Name is explicitly correct?
            if os.path.isfile(self.par['flatfield']['frame']):
                flat_file = self.par['flatfield']['frame']
            # - Is it in the master directory?
            elif os.path.isfile(
                    os.path.join(self.flatField.master_dir,
                                 self.par['flatfield']['frame'])):
                flat_file = os.path.join(self.flatField.master_dir,
                                         self.par['flatfield']['frame'])
            else:
                msgs.error(
                    'Could not find user-defined flatfield file: {0}'.format(
                        self.par['flatfield']['frame']))
            msgs.info('Using user-defined file: {0}'.format(flat_file))
            with fits.open(flat_file) as hdu:
                self.mspixelflat = hdu[self.det].data
            self.msillumflat = None

        # 3) there is no master or no user supplied flat, generate the flat
        if self.mspixelflat is None and len(pixflat_image_files) != 0:
            # Run
            self.mspixelflat, self.msillumflat = self.flatField.run(
                show=self.show, maskslits=self.tslits_dict['maskslits'])

            # If we tweaked the slits, update the tilts_dict and
            # tslits_dict to reflect new slit edges
            if self.par['flatfield']['tweak_slits']:
                msgs.info(
                    'Using slit boundary tweaks from IllumFlat and updated tilts image'
                )
                self.tslits_dict = self.flatField.tslits_dict
                self.tilts_dict = self.flatField.tilts_dict

            # Save to Masters
            if self.save_masters:
                self.flatField.save()

                # If we tweaked the slits update the master files for tilts and slits
                # TODO: These should be saved separately
                if self.par['flatfield']['tweak_slits']:
                    msgs.info(
                        'Updating MasterTrace and MasterTilts using tweaked slit boundaries'
                    )
                    self.edges.update_using_tslits_dict(
                        self.flatField.tslits_dict)
                    self.edges.save()
                    # Write the final_tilts using the new slit boundaries to the MasterTilts file
                    self.waveTilts.final_tilts = self.flatField.tilts_dict[
                        'tilts']
                    self.waveTilts.tilts_dict = self.flatField.tilts_dict
                    self.waveTilts.save()

        # 4) If either of the two flats are still None, use unity
        # everywhere and print out a warning
        # TODO: These will barf if self.tilts_dict['tilts'] isn't
        # defined.
        if self.mspixelflat is None:
            self.mspixelflat = np.ones_like(self.tilts_dict['tilts'])
            msgs.warn('You are not pixel flat fielding your data!!!')
        if self.msillumflat is None:
            self.msillumflat = np.ones_like(self.tilts_dict['tilts'])
            msgs.warn('You are not illumination flat fielding your data!')

        # Save & return
        self._update_cache('flat', ('pixelflat', 'illumflat'),
                           (self.mspixelflat, self.msillumflat))
        return self.mspixelflat, self.msillumflat
Exemple #23
0
    def global_skysub(self,
                      std=False,
                      skymask=None,
                      update_crmask=True,
                      maskslits=None,
                      show_fit=False,
                      show=False,
                      show_objs=False):
        """
        Perform global sky subtraction, slit by slit

        Wrapper to skysub.global_skysub

        Parameters
        ----------
        tslits_dict: dict
           Dictionary containing information on the slits traced for this image

        Optional Parameters
        -------------------
        bspline_spaceing: (float):
           Break-point spacing for bspline

        use_skymask: (bool, optional):
           Mask objects using self.skymask if object finding has been run
           (This requires they were found previously, i.e. that find_objects was already run)

        Returns:
            global_sky: (numpy.ndarray) image of the the global sky model
        """

        # Prep
        self.global_sky = np.zeros_like(self.sciImg.image)
        if (std and not self.redux_par['global_sky_std']):
            msgs.info('Skipping global sky-subtraction for standard star.')
            return self.global_sky

        if std:
            sigrej = 7.0
            update_crmask = False
        else:
            sigrej = 3.0

        self.maskslits = self.maskslits if maskslits is None else maskslits
        gdslits = np.where(np.invert(self.maskslits))[0]

        # Mask objects using the skymask? If skymask has been set by objfinding, and masking is requested, then do so
        skymask_now = skymask if (skymask is not None) else np.ones_like(
            self.sciImg.image, dtype=bool)
        # Loop on slits
        for slit in gdslits:
            msgs.info("Global sky subtraction for slit: {:d}".format(slit))
            thismask = (self.slitmask == slit)
            inmask = (self.sciImg.mask == 0) & thismask & skymask_now
            # Find sky
            self.global_sky[thismask] = skysub.global_skysub(
                self.sciImg.image,
                self.sciImg.ivar,
                self.tilts,
                thismask,
                self.tslits_dict['slit_left'][:, slit],
                self.tslits_dict['slit_righ'][:, slit],
                inmask=inmask,
                sigrej=sigrej,
                bsp=self.redux_par['bspline_spacing'],
                no_poly=self.redux_par['no_poly'],
                pos_mask=(not self.ir_redux),
                show_fit=show_fit)
            # Mask if something went wrong
            if np.sum(self.global_sky[thismask]) == 0.:
                self.maskslits[slit] = True

        if update_crmask:
            self.sciImg.update_mask_cr(subtract_img=self.global_sky)
            #self.crmask = procimg.lacosmic(self.det, self.sciimg-self.global_sky,
            #                               self.spectrograph.detector[self.det-1]['saturation'],
            #                               self.spectrograph.detector[self.det-1]['nonlinear'],
            #                               varframe=utils.calc_ivar(self.sciivar),
            #                               maxiter=self.proc_par['lamaxiter'],
            #                               grow=self.proc_par['grow'],
            #                               remove_compact_obj=self.proc_par['rmcompact'],
            #                               sigclip=self.proc_par['sigclip'],
            #                               sigfrac=self.proc_par['sigfrac'],
            #                               objlim=self.proc_par['objlim'])
            # Rebuild the mask with this new crmask
            #self.mask = procimg.update_mask_cr(self.sciImg.bitmask, self.mask, self.crmask)

        # Step
        self.steps.append(inspect.stack()[0][3])

        if show:
            sobjs_show = None if show_objs else self.sobjs_obj
            # Global skysub is the first step in a new extraction so clear the channels here
            self.show('global', slits=True, sobjs=sobjs_show, clear=False)

        # Return
        return self.global_sky
Exemple #24
0
    def get_wv_calib(self):
        """
        Load or generate the 1D wavelength calibrations

        Requirements:
          msarc, msbpm, tslits_dict
          det, par

        Returns:
            dict, ndarray: :attr:`wv_calib` calibration dict and the updated slit mask array
        """
        # Check for existing data
        if not self._chk_objs(['msarc', 'msbpm', 'tslits_dict']):
            msgs.error('dont have all the objects')

        # Check internals
        self._chk_set(['det', 'calib_ID', 'par'])
        if 'arc' not in self.master_key_dict.keys():
            msgs.error('Arc master key not set.  First run get_arc.')

        # Return existing data
        if self._cached('wavecalib', self.master_key_dict['arc']) \
                and self._cached('wvmask', self.master_key_dict['arc']):
            self.wv_calib = self.calib_dict[
                self.master_key_dict['arc']]['wavecalib']
            self.wv_maskslits = self.calib_dict[
                self.master_key_dict['arc']]['wvmask']
            self.tslits_dict['maskslits'] += self.wv_maskslits
            return self.wv_calib

        # No wavelength calibration requested
        if self.par['wavelengths']['reference'] == 'pixel':
            msgs.info("A wavelength calibration will not be performed")
            self.wv_calib = None
            self.wv_maskslits = np.zeros_like(self.maskslits, dtype=bool)
            self.tslits_dict['maskslits'] += self.wv_maskslits
            return self.wv_calib

        # Grab arc binning (may be different from science!)
        arc_rows = self.fitstbl.find_frames('arc',
                                            calib_ID=self.calib_ID,
                                            index=True)
        self.arc_files = self.fitstbl.frame_paths(arc_rows)
        binspec, binspat = parse.parse_binning(
            self.spectrograph.get_meta_value(self.arc_files[0], 'binning'))
        # Instantiate
        self.waveCalib = wavecalib.WaveCalib(
            self.msarc,
            self.tslits_dict,
            self.spectrograph,
            self.par['wavelengths'],
            binspectral=binspec,
            det=self.det,
            master_key=self.master_key_dict['arc'],
            master_dir=self.master_dir,
            reuse_masters=self.reuse_masters,
            qa_path=self.qa_path,
            msbpm=self.msbpm)
        # Load from disk (MasterFrame)?
        self.wv_calib = self.waveCalib.load()
        if self.wv_calib is None:
            self.wv_calib, _ = self.waveCalib.run(skip_QA=(not self.write_qa))
            # Save to Masters
            if self.save_masters:
                self.waveCalib.save()

        # Create the mask (needs to be done here in case wv_calib was loaded from Masters)
        # TODO: This should either be done here or save as part of the
        # master frame file.  As it is, if not loaded from the master
        # frame file, mask_maskslits is run twice, once in run above and
        # once here...
        self.wv_maskslits = self.waveCalib.make_maskslits(
            self.tslits_dict['slit_left'].shape[1])
        self.tslits_dict['maskslits'] += self.wv_maskslits

        # Save & return
        self._update_cache('arc', ('wavecalib', 'wvmask'),
                           (self.wv_calib, self.wv_maskslits))
        # Return
        return self.wv_calib
Exemple #25
0
    def local_skysub_extract(self,
                             waveimg,
                             global_sky,
                             sobjs,
                             spat_pix=None,
                             maskslits=None,
                             model_noise=True,
                             std=False,
                             show_profile=False,
                             show=False):
        """
        Perform local sky subtraction, profile fitting, and optimal extraction slit by slit

        Wrapper to skysub.local_skysub_extract

        Parameters
        ----------
        sobjs: object
           Specobjs object containing Specobj objects containing information about objects found.
        waveimg: ndarray, shape (nspec, nspat)
           Wavelength map

        Optional Parameters
        -------------------


        Returns:
            global_sky: (numpy.ndarray) image of the the global sky model
        """
        self.waveimg = waveimg
        self.global_sky = global_sky

        # get the good slits and assign self.maskslits
        self.maskslits = self.maskslits if maskslits is None else maskslits
        gdslits = np.where(np.invert(self.maskslits))[0]

        # Allocate the images that are needed
        # Initialize to mask in case no objects were found
        self.outmask = np.copy(self.sciImg.mask)
        # Initialize to input mask in case no objects were found
        self.extractmask = (self.sciImg.mask == 0)
        # Initialize to zero in case no objects were found
        self.objmodel = np.zeros_like(self.sciImg.image)
        # Set initially to global sky in case no objects were found
        self.skymodel = np.copy(self.global_sky)
        # Set initially to sciivar in case no obects were found.
        self.ivarmodel = np.copy(self.sciImg.ivar)

        # Could actually create a model anyway here, but probably
        # overkill since nothing is extracted

        self.sobjs = sobjs.copy()
        # Loop on slits
        for slit in gdslits:
            msgs.info(
                "Local sky subtraction and extraction for slit: {:d}".format(
                    slit))
            thisobj = (self.sobjs.slitid == slit
                       )  # indices of objects for this slit
            if np.any(thisobj):
                thismask = (self.slitmask == slit)  # pixels for this slit
                # True  = Good, False = Bad for inmask
                inmask = (self.sciImg.mask == 0) & thismask
                # Local sky subtraction and extraction
                self.skymodel[thismask], self.objmodel[thismask], self.ivarmodel[thismask], \
                    self.extractmask[thismask] = skysub.local_skysub_extract(
                    self.sciImg.image, self.sciImg.ivar, self.tilts, self.waveimg, self.global_sky, self.sciImg.rn2img,
                    thismask, self.tslits_dict['slit_left'][:,slit], self.tslits_dict['slit_righ'][:, slit],
                    self.sobjs[thisobj], spat_pix=spat_pix, model_full_slit=self.redux_par['model_full_slit'],
                    box_rad=self.redux_par['boxcar_radius']/self.spectrograph.detector[self.det-1]['platescale'],
                    sigrej=self.redux_par['sky_sigrej'],
                    model_noise=model_noise, std=std, bsp=self.redux_par['bspline_spacing'],
                    sn_gauss=self.redux_par['sn_gauss'], inmask=inmask, show_profile=show_profile)

        # Set the bit for pixels which were masked by the extraction.
        # For extractmask, True = Good, False = Bad
        iextract = (self.sciImg.mask == 0) & (self.extractmask == False)
        self.outmask[iextract] = self.sciImg.bitmask.turn_on(
            self.outmask[iextract], 'EXTRACT')

        # Step
        self.steps.append(inspect.stack()[0][3])

        if show:
            self.show('local', sobjs=self.sobjs, slits=True)
            self.show('resid', sobjs=self.sobjs, slits=True)

        # Return
        return self.skymodel, self.objmodel, self.ivarmodel, self.outmask, self.sobjs
Exemple #26
0
def iterative_fitting(spec, tcent, ifit, IDs, llist, disp,
                      match_toler = 2.0, func = 'legendre', n_first=2, sigrej_first=2.0,
                      n_final=4, sigrej_final=3.0, input_only=False,
                      weights=None, plot_fil=None, verbose=False):

    """ Routine for iteratively fitting wavelength solutions.

    Parameters
    ----------
    spec : ndarray, shape = (nspec,)
      arcline spectrum
    tcent : ndarray
      Centroids in pixels of lines identified in spec
    ifit : ndarray
      Indices of the lines that will be fit
    IDs: ndarray
      wavelength IDs of the lines that will be fit (I think?)
    llist: dict
      Linelist dictionary
    disp: float
      dispersion

    Optional Parameters
    -------------------
    match_toler: float, default = 3.0
      Matching tolerance when searching for new lines. This is the difference in pixels between the wavlength assigned to
      an arc line by an iteration of the wavelength solution to the wavelength in the line list.
    func: str, default = 'legendre'
      Name of function used for the wavelength solution
    n_first: int, default = 2
      Order of first guess to the wavelength solution.
    sigrej_first: float, default = 2.0
      Number of sigma for rejection for the first guess to the wavelength solution.
    n_final: int, default = 4
      Order of the final wavelength solution fit
    sigrej_final: float, default = 3.0
      Number of sigma for rejection for the final fit to the wavelength solution.
    input_only: bool
      If True, the routine will only perform a robust polyfit to the input IDs.
      If False, the routine will fit the input IDs, and then include additional
      lines in the linelist that are a satisfactory fit.
    weights: ndarray
      Weights to be used?
    verbose : bool
      If True, print out more information.
    plot_fil:
      Filename for plotting some QA?

    Returns
    -------
    final_fit: :class:`pypeit.core.wavecal.wv_fitting.WaveFit`
    """

    #TODO JFH add error checking here to ensure that IDs and ifit have the same size!

    if weights is None:
        weights = np.ones(tcent.size)

    nspec = spec.size
    xnspecmin1 = float(nspec-1)
    # Setup for fitting
    sv_ifit = list(ifit)  # Keep the originals
    all_ids = -999.*np.ones(len(tcent))
    all_idsion = np.array(['UNKNWN']*len(tcent))
    all_ids[ifit] = IDs

    # Fit
    n_order = n_first
    flg_continue = True
    flg_penultimate = False
    fmin, fmax = 0.0, 1.0
    # Note the number of parameters is actually n_order and not n_order+1
    while flg_continue:
        if flg_penultimate:
            flg_continue = False
        # Fit with rejection
        xfit, yfit, wfit = tcent[ifit], all_ids[ifit], weights[ifit]
        maxiter = xfit.size - n_order - 2
        #
        if xfit.size == 0:
            msgs.warn("All points rejected !!")
            return None
        # Fit
        pypeitFit = fitting.robust_fit(xfit/xnspecmin1, yfit, n_order, function=func, maxiter=maxiter,
                                       lower=sigrej_first, upper=sigrej_first, maxrej=1, sticky=True,
                                       minx=fmin, maxx=fmax, weights=wfit)
        # Junk fit?
        if pypeitFit is None:
            msgs.warn("Bad fit!!")
            return None

        rms_ang = pypeitFit.calc_fit_rms(apply_mask=True)
        rms_pix = rms_ang/disp
        if verbose:
            msgs.info('n_order = {:d}'.format(n_order) + ': RMS = {:g}'.format(rms_pix))

        # Reject but keep originals (until final fit)
        ifit = list(ifit[pypeitFit.gpm == 1]) + sv_ifit
        if not input_only:
            # Find new points from the linelist (should we allow removal of the originals?)
            twave = pypeitFit.eval(tcent/xnspecmin1)#, func, minx=fmin, maxx=fmax)
            for ss, iwave in enumerate(twave):
                mn = np.min(np.abs(iwave-llist['wave']))
                if mn/disp < match_toler:
                    imn = np.argmin(np.abs(iwave-llist['wave']))
                    #if verbose:
                    #    print('Adding {:g} at {:g}'.format(llist['wave'][imn],tcent[ss]))
                    # Update and append
                    all_ids[ss] = llist['wave'][imn]
                    all_idsion[ss] = llist['ion'][imn]
                    ifit.append(ss)
        # Keep unique ones
        ifit = np.unique(np.array(ifit, dtype=int))
        # Increment order?
        if n_order < n_final:
            n_order += 1
        else:
            flg_penultimate = True

    # Final fit (originals can now be rejected)
    xfit, yfit, wfit = tcent[ifit], all_ids[ifit], weights[ifit]
    pypeitFit = fitting.robust_fit(xfit/xnspecmin1, yfit, n_order, function=func,
                                   lower=sigrej_final, upper=sigrej_final, maxrej=1, sticky=True,
                                   minx=fmin, maxx=fmax, weights=wfit)#, debug=True)
    irej = np.where(np.logical_not(pypeitFit.bool_gpm))[0]
    if len(irej) > 0:
        xrej = xfit[irej]
        yrej = yfit[irej]
        if verbose:
            for kk, imask in enumerate(irej):
                wave = pypeitFit.eval(xrej[kk]/xnspecmin1)#, func, minx=fmin, maxx=fmax)
                msgs.info('Rejecting arc line {:g}; {:g}'.format(yfit[imask], wave))
    else:
        xrej = []
        yrej = []

    ions = all_idsion[ifit]
    # Final RMS
    rms_ang = pypeitFit.calc_fit_rms(apply_mask=True)
    rms_pix = rms_ang/disp

    # Pack up fit
    spec_vec = np.arange(nspec)
    wave_soln = pypeitFit.eval(spec_vec/xnspecmin1)
    cen_wave = pypeitFit.eval(float(nspec)/2/xnspecmin1)
    cen_wave_min1 = pypeitFit.eval((float(nspec)/2 - 1.0)/xnspecmin1)
    cen_disp = cen_wave - cen_wave_min1

    # Ions bit
    ion_bits = np.zeros(len(ions), dtype=WaveFit.bitmask.minimum_dtype())
    for kk,ion in enumerate(ions):
        ion_bits[kk] = WaveFit.bitmask.turn_on(ion_bits[kk], ion.replace(' ', ''))

    # DataContainer time
    # spat_id is set to an arbitrary -1 here and is updated in wavecalib.py
    final_fit = WaveFit(-1, pypeitfit=pypeitFit, pixel_fit=xfit, wave_fit=yfit,
                        ion_bits=ion_bits, xnorm=xnspecmin1,
                        cen_wave=cen_wave, cen_disp=cen_disp,
                        spec=spec, wave_soln = wave_soln, sigrej=sigrej_final,
                        shift=0., tcent=tcent, rms=rms_pix)

    # QA
    if plot_fil is not None:
        autoid.arc_fit_qa(final_fit, plot_fil)
    # Return
    return final_fit
Exemple #27
0
def gen_pixloc(frame_shape, xgap=0, ygap=0, ysize=1., gen=True):
    """
    Generate an array of physical pixel coordinates

    Parameters
    ----------
    frame : ndarray
      uniformly illuminated and normalized flat field frame
    xgap : int (optional)
    ygap : int (optional)
    ysize : float (optional)
    gen : bool, optional
      Only allows True right now

    Returns
    -------
    locations : ndarray
      A 3D array containing the x center, y center, x width and y width of each pixel.
      The returned array has a shape:   frame.shape + (4,)
    """
    #dnum = settings.get_dnum(det)
    msgs.info("Deriving physical pixel locations on the detector")
    locations = np.zeros((frame_shape[0],frame_shape[1],4))
    if gen:
        msgs.info("Pixel gap in the dispersion direction = {0:4.3f}".format(xgap))
        msgs.info("Pixel size in the dispersion direction = {0:4.3f}".format(1.0))
        xs = np.arange(frame_shape[0]*1.0)*xgap
        xt = 0.5 + np.arange(frame_shape[0]*1.0) + xs
        msgs.info("Pixel gap in the spatial direction = {0:4.3f}".format(ygap))
        msgs.info("Pixel size in the spatial direction = {0:4.3f}".format(ysize))
        ys = np.arange(frame_shape[1])*ygap*ysize
        yt = ysize*(0.5 + np.arange(frame_shape[1]*1.0)) + ys
        xloc, yloc = np.meshgrid(xt, yt)
#		xwid, ywid = np.meshgrid(xs,ys)
        msgs.info("Saving pixel locations")
        locations[:,:,0] = xloc.T
        locations[:,:,1] = yloc.T
        locations[:,:,2] = 1.0
        locations[:,:,3] = ysize
    else:
        msgs.error("Have not yet included an algorithm to automatically generate pixel locations")
    return locations
Exemple #28
0
    def main(pargs):

        from astropy.io import fits

        # Load the file
        config_lines, spec1dfiles = read_flexfile(pargs.flex_file)

        # Read in spectrograph from spec1dfile header
        header = fits.getheader(spec1dfiles[0])
        spectrograph = load_spectrograph(header['PYP_SPEC'])

        # Parameters
        spectrograph_def_par = spectrograph.default_pypeit_par()
        par = pypeitpar.PypeItPar.from_cfg_lines(
            cfg_lines=spectrograph_def_par.to_config(),
            merge_with=config_lines)

        # Loop to my loop
        for filename in spec1dfiles:
            # Instantiate
            mdFlex = flexure.MultiSlitFlexure(s1dfile=filename)
            # Initalize
            msgs.info("Setup")
            mdFlex.init(spectrograph, par['flexure'])

            # INITIAL SKY LINE STUFF
            msgs.info("Measuring sky lines")
            mdFlex.measure_sky_lines()

            # FIT SURFACES
            msgs.info("Fitting the surface")
            mdFlex.fit_mask_surfaces()

            # Apply
            msgs.info("Applying flexure correction")
            mdFlex.update_fit()

            # REFIT FOR QA PLOTS
            msgs.info("Generate QA")
            mask = header['TARGET'].strip()
            fnames = header['FILENAME'].split('.')
            root = mask + '_' + fnames[2]
            mdFlex.qa_plots('./', root)

            # Write
            msgs.info("Write to disk")
            mdFlex.to_file(pargs.outroot + root + '.fits',
                           overwrite=pargs.clobber)

            # Apply??

        print("All done!!")
Exemple #29
0
    def apply_flux_calib(self,
                         sens_dict,
                         exptime,
                         telluric_correct=False,
                         extinct_correct=False,
                         airmass=None,
                         longitude=None,
                         latitude=None):
        """
        Apply a sensitivity function to our spectrum

        FLAM, FLAM_SIG, and FLAM_IVAR are generated

        Args:
            sens_dict (dict):
                Sens Function dict
            exptime (float):
            telluric_correct:
            extinct_correct:
            airmass (float, optional):
            longitude (float, optional):
                longitude in degree for observatory
            latitude:
                latitude in degree for observatory
                Used for extinction correction

        """
        # Loop on extraction modes
        for attr in ['BOX', 'OPT']:
            if attr + '_WAVE' not in self._data.keys():
                continue
            msgs.info("Fluxing {:s} extraction for:".format(attr) +
                      msgs.newline() + "{}".format(self))
            #
            #try:
            #    wave = np.copy(np.array(extract['WAVE_GRID']))
            #except KeyError:
            wave = self[attr + '_WAVE']
            wave_sens = sens_dict['wave']
            sensfunc = sens_dict['sensfunc'].copy()

            # Did the user request a telluric correction from the same file?
            if telluric_correct and 'telluric' in sens_dict.keys():
                # This assumes there is a separate telluric key in this dict.
                telluric = sens_dict['telluric']
                msgs.info('Applying telluric correction')
                sensfunc = sensfunc * (telluric > 1e-10) / (telluric +
                                                            (telluric < 1e-10))

            sensfunc_obs = interpolate.interp1d(wave_sens,
                                                sensfunc,
                                                bounds_error=False,
                                                fill_value='extrapolate')(wave)
            if extinct_correct:
                if longitude is None or latitude is None:
                    msgs.error(
                        'You must specify longitude and latitude if we are extinction correcting'
                    )
                # Apply Extinction if optical bands
                msgs.info("Applying extinction correction")
                msgs.warn(
                    "Extinction correction applyed only if the spectra covers <10000Ang."
                )
                extinct = flux_calib.load_extinction_data(longitude, latitude)
                ext_corr = flux_calib.extinction_correction(
                    wave * units.AA, airmass, extinct)
                senstot = sensfunc_obs * ext_corr
            else:
                senstot = sensfunc_obs.copy()

            flam = self[attr + '_COUNTS'] * senstot / exptime
            flam_sig = (senstot / exptime) / (np.sqrt(
                self[attr + '_COUNTS_IVAR']))
            flam_var = self[attr + '_COUNTS_IVAR'] / (senstot / exptime)**2

            # Mask bad pixels
            msgs.info(" Masking bad pixels")
            msk = np.zeros_like(senstot).astype(bool)
            msk[senstot <= 0.] = True
            msk[self[attr + '_COUNTS_IVAR'] <= 0.] = True
            flam[msk] = 0.
            flam_sig[msk] = 0.
            flam_var[msk] = 0.

            # Finish
            self[attr + '_FLAM'] = flam
            self[attr + '_FLAM_SIG'] = flam_sig
            self[attr + '_FLAM_IVAR'] = flam_var
Exemple #30
0
 def print_end_time(self):
     """
     Print the elapsed time
     """
     # Capture the end time and print it to user
     msgs.info(utils.get_time_string(time.time() - self.tstart))
Exemple #31
0
    def splice(self, wave):
        """
        Routine to splice together sensitivity functions into one global sensitivity function for spectrographs
        with multiple detectors extending across the wavelength direction.

        Parameters
        ----------
        wave: ndarray, shape (nspec, norddet)

        Returns
        -------
        wave_splice: ndarray, shape (nspec_splice,)
        sensfunc_splice: ndarray, shape (nspec_splice,)


        """

        msgs.info('Merging sensfunc for {:d} detectors {:}'.format(
            self.norderdet, self.par['multi_spec_det']))
        wave_splice_min = wave.min()
        wave_splice_max = wave.max()
        wave_splice, _, _ = coadd.get_wave_grid(wave,
                                                wave_method='linear',
                                                wave_grid_min=wave_splice_min,
                                                wave_grid_max=wave_splice_max,
                                                samp_fact=1.0)
        sensfunc_splice = np.zeros_like(wave_splice)
        for idet in range(self.norderdet):
            wave_min = self.out_table['WAVE_MIN'][idet]
            wave_max = self.out_table['WAVE_MAX'][idet]
            if idet == 0:
                # If this is the bluest detector, extrapolate to wave_extrap_min
                wave_mask_min = wave_splice_min
                wave_mask_max = wave_max
            elif idet == (self.norderdet - 1):
                # If this is the reddest detector, extrapolate to wave_extrap_max
                wave_mask_min = wave_min
                wave_mask_max = wave_splice_max
            else:
                wave_mask_min = wave_min
                wave_mask_max = wave_max
            splice_wave_mask = (wave_splice >= wave_mask_min) & (wave_splice <=
                                                                 wave_mask_max)
            sensfunc_splice[splice_wave_mask] = self.eval_sensfunc(
                wave_splice[splice_wave_mask], idet)

        # Interpolate over gaps
        zeros = sensfunc_splice == 0.
        if np.any(zeros):
            msgs.info(
                "Interpolating over gaps (and extrapolating with fill_value=1, if need be)"
            )
            interp_func = scipy.interpolate.interp1d(
                wave_splice[np.invert(zeros)],
                sensfunc_splice[np.invert(zeros)],
                kind='nearest',
                fill_value=0.,
                bounds_error=False)  #
            #kind='nearest', fill_value='extrapoloate', bounds_error=False) #  extrapolate fails for JXP, even on 1.4.1
            zero_values = interp_func(wave_splice[zeros])
            sensfunc_splice[zeros] = zero_values

        self.steps.append(inspect.stack()[0][3])

        return wave_splice, sensfunc_splice
Exemple #32
0
    def calib_all(self, run=True):
        """
        Create calibrations for all setups

        This will not crash if not all of the standard set of files are not provided

        Args:
            run (bool, optional): If False, only print the calib names and do
            not actually run.  Only used with the pypeit_parse_calib_id script

        Returns:
            dict: A simple dict summarizing the calibration names
        """
        calib_dict = {}

        self.tstart = time.time()

        # Frame indices
        frame_indx = np.arange(len(self.fitstbl))
        for i in range(self.fitstbl.n_calib_groups):
            # 1-indexed calib number
            calib_grp = str(i + 1)
            # Find all the frames in this calibration group
            in_grp = self.fitstbl.find_calib_group(i)
            grp_frames = frame_indx[in_grp]

            # Find the detectors to reduce
            #            detectors = PypeIt.select_detectors(detnum=self.par['rdx']['detnum'],
            #                                                ndet=self.spectrograph.ndet)
            detectors = self.spectrograph.select_detectors(
                subset=self.par['rdx']['detnum'])
            calib_dict[calib_grp] = {}
            # Loop on Detectors
            for self.det in detectors:
                # Instantiate Calibrations class
                self.caliBrate = calibrations.Calibrations.get_instance(
                    self.fitstbl,
                    self.par['calibrations'],
                    self.spectrograph,
                    self.calibrations_path,
                    qadir=self.qa_path,
                    reuse_masters=self.reuse_masters,
                    show=self.show,
                    user_slits=slittrace.merge_user_slit(
                        self.par['rdx']['slitspatnum'],
                        self.par['rdx']['maskIDs']))
                # Do it
                # TODO: Why isn't set_config part of the Calibrations.__init__ method?
                self.caliBrate.set_config(grp_frames[0], self.det,
                                          self.par['calibrations'])

                # Allow skipping the run (e.g. parse_calib_id.py script)
                if run:
                    self.caliBrate.run_the_steps()

                key = self.caliBrate.master_key_dict['frame']
                calib_dict[calib_grp][key] = {}
                for step in self.caliBrate.steps:
                    if step in ['bpm', 'slits', 'wv_calib', 'tilts', 'flats']:
                        continue
                    elif step == 'tiltimg':  # Annoying kludge
                        step = 'tilt'
                    # Prep
                    raw_files, self.caliBrate.master_key_dict[
                        step] = self.caliBrate._prep_calibrations(step)
                    masterframe_name = masterframe.construct_file_name(
                        buildimage.frame_image_classes[step],
                        self.caliBrate.master_key_dict[step],
                        master_dir=self.caliBrate.master_dir)

                    # Add to dict
                    if len(raw_files) > 0:
                        calib_dict[calib_grp][key][step] = {}
                        calib_dict[calib_grp][key][step][
                            'master_key'] = self.caliBrate.master_key_dict[
                                step]
                        calib_dict[calib_grp][key][step][
                            'master_name'] = os.path.basename(masterframe_name)
                        calib_dict[calib_grp][key][step]['raw_files'] = [
                            os.path.basename(ifile) for ifile in raw_files
                        ]

        # Print the results
        print(json.dumps(calib_dict, sort_keys=True, indent=4))

        # Write
        msgs.info('Writing calib file')
        calib_file = self.pypeit_file.replace('.pypeit', '.calib_ids')
        ltu.savejson(calib_file, calib_dict, overwrite=True, easy_to_read=True)

        # Finish
        self.print_end_time()

        # Return
        return calib_dict
Exemple #33
0
def main(args):
    """ Executes 2d coadding
    """
    msgs.warn('PATH =' + os.getcwd())
    # Load the file
    if args.file is not None:
        spectrograph, config_lines, spec2d_files = read_coadd2d_file(args.file)
        # Parameters
        # TODO: Shouldn't this reinstantiate the same parameters used in
        # the PypeIt run that extracted the objects?  Why are we not
        # just passing the pypeit file?
        # JFH: The reason is that the coadd2dfile may want different reduction parameters
        spectrograph_def_par = spectrograph.default_pypeit_par()
        parset = par.PypeItPar.from_cfg_lines(
            cfg_lines=spectrograph_def_par.to_config(),
            merge_with=config_lines)
    elif args.obj is not None:
        # TODO: We should probably be reading the pypeit file and using those parameters here rather than using the
        # default parset.
        # TODO: This needs to define the science path
        spec2d_files = glob.glob('./Science/spec2d_*' + args.obj + '*')
        head0 = fits.getheader(spec2d_files[0])
        spectrograph_name = head0['SPECTROG']
        spectrograph = load_spectrograph(spectrograph_name)
        parset = spectrograph.default_pypeit_par()
    else:
        msgs.error(
            'You must either input a coadd2d file with --file or an object name with --obj'
        )

    # If detector was passed as an argument override whatever was in the coadd2d_file
    if args.det is not None:
        msgs.info("Restricting reductions to detector={}".format(args.det))
        parset['rdx']['detnum'] = int(args.det)

    # Get headers (if possible) and base names
    spec1d_files = [
        files.replace('spec2d', 'spec1d') for files in spec2d_files
    ]
    head1d = None
    for spec1d_file in spec1d_files:
        if os.path.isfile(spec1d_file):
            head1d = fits.getheader(spec1d_file)
            break
    if head1d is None:
        msgs.warn("No 1D spectra so am generating a dummy header for output")
        head1d = io.initialize_header()

    head2d = fits.getheader(spec2d_files[0])
    if args.basename is None:
        filename = os.path.basename(spec2d_files[0])
        basename = filename.split('_')[2]
    else:
        basename = args.basename

    # Write the par to disk
    par_outfile = basename + '_coadd2d.par'
    print("Writing the parameters to {}".format(par_outfile))
    parset.to_config(par_outfile)

    # Now run the coadds

    skysub_mode = head2d['SKYSUB']
    ir_redux = True if 'DIFF' in skysub_mode else False

    # Print status message
    msgs_string = 'Reducing target {:s}'.format(basename) + msgs.newline()
    msgs_string += 'Performing coadd of frames reduce with {:s} imaging'.format(
        skysub_mode)
    msgs_string += msgs.newline(
    ) + 'Combining frames in 2d coadd:' + msgs.newline()
    for file in spec2d_files:
        msgs_string += '{0:s}'.format(os.path.basename(file)) + msgs.newline()
    msgs.info(msgs_string)

    # TODO: This needs to be added to the parameter list for rdx
    redux_path = os.getcwd()
    master_dirname = os.path.basename(head2d['PYPMFDIR']) + '_coadd'
    master_dir = os.path.join(redux_path, master_dirname)

    # Make the new Master dir
    if not os.path.isdir(master_dir):
        msgs.info(
            'Creating directory for Master output: {0}'.format(master_dir))
        os.makedirs(master_dir)

    # Instantiate the sci_dict
    sci_dict = OrderedDict()  # This needs to be ordered
    sci_dict['meta'] = {}
    sci_dict['meta']['vel_corr'] = 0.
    sci_dict['meta']['ir_redux'] = ir_redux

    # Find the detectors to reduce
    detectors = PypeIt.select_detectors(detnum=parset['rdx']['detnum'],
                                        ndet=spectrograph.ndet)
    if len(detectors) != spectrograph.ndet:
        msgs.warn('Not reducing detectors: {0}'.format(' '.join([
            str(d)
            for d in set(np.arange(spectrograph.ndet) + 1) - set(detectors)
        ])))

    # Loop on detectors
    for det in detectors:
        msgs.info("Working on detector {0}".format(det))
        sci_dict[det] = {}

        # Instantiate Coadd2d
        coadd = coadd2d.instantiate_me(spec2d_files,
                                       spectrograph,
                                       det=det,
                                       offsets=parset['coadd2d']['offsets'],
                                       weights=parset['coadd2d']['weights'],
                                       par=parset,
                                       ir_redux=ir_redux,
                                       debug_offsets=args.debug_offsets,
                                       debug=args.debug,
                                       samp_fact=args.samp_fact)

        # Coadd the slits
        coadd_dict_list = coadd.coadd(
            only_slits=None)  # TODO implement only_slits later
        # Create the psuedo images
        psuedo_dict = coadd.create_psuedo_image(coadd_dict_list)
        # Reduce
        msgs.info('Running the extraction')
        sci_dict[det]['sciimg'], sci_dict[det]['sciivar'], sci_dict[det]['skymodel'], sci_dict[det]['objmodel'], \
        sci_dict[det]['ivarmodel'], sci_dict[det]['outmask'], sci_dict[det]['specobjs'] = coadd.reduce(
            psuedo_dict, show = args.show, show_peaks = args.peaks)
        # Save psuedo image master files
        coadd.save_masters(master_dir)

    # Make the new Science dir
    # TODO: This needs to be defined by the user
    scipath = os.path.join(redux_path, 'Science_coadd')
    if not os.path.isdir(scipath):
        msgs.info('Creating directory for Science output: {0}'.format(scipath))
        os.makedirs(scipath)

    # Save the results
    save.save_all(sci_dict, coadd.stack_dict['master_key_dict'], master_dir,
                  spectrograph, head1d, head2d, scipath,
                  basename)  #, binning=coadd.binning)
Exemple #34
0
    def reduce_all(self):
        """
        Main driver of the entire reduction

        Calibration and extraction via a series of calls to reduce_exposure()

        """
        # Validate the parameter set
        self.par.validate_keys(required=[
            'rdx', 'calibrations', 'scienceframe', 'reduce', 'flexure'
        ])
        self.tstart = time.time()

        # Find the standard frames
        is_standard = self.fitstbl.find_frames('standard')

        # Find the science frames
        is_science = self.fitstbl.find_frames('science')

        # Frame indices
        frame_indx = np.arange(len(self.fitstbl))

        # Standard Star(s) Loop
        # Iterate over each calibration group and reduce the standards
        for i in range(self.fitstbl.n_calib_groups):

            # Find all the frames in this calibration group
            in_grp = self.fitstbl.find_calib_group(i)

            # Find the indices of the standard frames in this calibration group:
            grp_standards = frame_indx[is_standard & in_grp]

            # Reduce all the standard frames, loop on unique comb_id
            u_combid_std = np.unique(self.fitstbl['comb_id'][grp_standards])
            for j, comb_id in enumerate(u_combid_std):
                frames = np.where(self.fitstbl['comb_id'] == comb_id)[0]
                bg_frames = np.where(self.fitstbl['bkg_id'] == comb_id)[0]
                if not self.outfile_exists(frames[0]) or self.overwrite:
                    # Build history to document what contributd to the reduced
                    # exposure
                    history = History(self.fitstbl.frame_paths(frames[0]))
                    history.add_reduce(i, self.fitstbl, frames, bg_frames)
                    std_spec2d, std_sobjs = self.reduce_exposure(
                        frames, bg_frames=bg_frames)

                    # TODO come up with sensible naming convention for save_exposure for combined files
                    self.save_exposure(frames[0], std_spec2d, std_sobjs,
                                       self.basename, history)
                else:
                    msgs.info(
                        'Output file: {:s} already exists'.format(
                            self.fitstbl.construct_basename(frames[0])) +
                        '. Set overwrite=True to recreate and overwrite.')

        # Science Frame(s) Loop
        # Iterate over each calibration group again and reduce the science frames
        for i in range(self.fitstbl.n_calib_groups):
            # Find all the frames in this calibration group
            in_grp = self.fitstbl.find_calib_group(i)

            # Find the indices of the science frames in this calibration group:
            grp_science = frame_indx[is_science & in_grp]
            # Associate standards (previously reduced above) for this setup
            std_outfile = self.get_std_outfile(frame_indx[is_standard])
            # Reduce all the science frames; keep the basenames of the science frames for use in flux calibration
            science_basename = [None] * len(grp_science)
            # Loop on unique comb_id
            u_combid = np.unique(self.fitstbl['comb_id'][grp_science])

            for j, comb_id in enumerate(u_combid):
                frames = np.where(self.fitstbl['comb_id'] == comb_id)[0]
                # Find all frames whose comb_id matches the current frames bkg_id.
                bg_frames = np.where((self.fitstbl['comb_id'] ==
                                      self.fitstbl['bkg_id'][frames][0])
                                     & (self.fitstbl['comb_id'] >= 0))[0]
                # JFH changed the syntax below to that above, which allows frames to be used more than once
                # as a background image. The syntax below would require that we could somehow list multiple
                # numbers for the bkg_id which is impossible without a comma separated list
                #                bg_frames = np.where(self.fitstbl['bkg_id'] == comb_id)[0]
                if not self.outfile_exists(frames[0]) or self.overwrite:

                    # Build history to document what contributd to the reduced
                    # exposure
                    history = History(self.fitstbl.frame_paths(frames[0]))
                    history.add_reduce(i, self.fitstbl, frames, bg_frames)

                    # TODO -- Should we reset/regenerate self.slits.mask for a new exposure
                    sci_spec2d, sci_sobjs = self.reduce_exposure(
                        frames, bg_frames=bg_frames, std_outfile=std_outfile)
                    science_basename[j] = self.basename

                    # TODO come up with sensible naming convention for save_exposure for combined files
                    self.save_exposure(frames[0], sci_spec2d, sci_sobjs,
                                       self.basename, history)
                else:
                    msgs.warn(
                        'Output file: {:s} already exists'.format(
                            self.fitstbl.construct_basename(frames[0])) +
                        '. Set overwrite=True to recreate and overwrite.')

            msgs.info('Finished calibration group {0}'.format(i))

        # Finish
        self.print_end_time()
Exemple #35
0
    def find_standard(self):
        """
        Identify the standard star from the list of all spectra in the specobjs

          Wrapper to flux.find_standard which simply takes the brightest

        Returns
        -------
        self.std : SpecObj
          Corresponds to the chosen spectrum
        """
        if self.par['std_obj_id'] is not None:
            _ = self._set_std_obj()
            return
        if self.multi_det is not None:
            sv_stds = []
            # Find the standard in each detector
            for det in self.multi_det:
                stds = [sobj for sobj in self.std_specobjs if sobj.det == det]
                if len(stds) == 0:
                    debugger.set_trace()
                idx = flux.find_standard(stds)
                sv_stds.append(stds[idx])
                msgs.info("Using standard {} for det={}".format(
                    stds[idx], det))

            # Now splice
            msgs.info(
                "Splicing the standards -- The name will be for the first detector"
            )
            std_splice = sv_stds[0].copy()
            # Append
            for ostd in sv_stds[1:]:
                try:
                    std_splice.optimal['WAVE_GRID'] = np.append(
                        std_splice.optimal['WAVE_GRID'].value,
                        ostd.optimal['WAVE_GRID'].value) * units.AA
                except KeyError:
                    std_splice.optimal['WAVE'] = np.append(
                        std_splice.optimal['WAVE'].value,
                        ostd.optimal['WAVE'].value) * units.AA
                for key in ['COUNTS', 'COUNTS_IVAR']:
                    std_splice.optimal[key] = np.append(
                        std_splice.optimal[key], ostd.optimal[key])
            self.std = std_splice
        elif self.spectrograph.pypeline == 'Echelle':
            # Find brightest object in each order
            std_brightest = self.std_specobjs[flux.find_standard(
                self.std_specobjs)]
            std_objid = std_brightest['idx'].split('-')[0]
            self.std_idx = np.zeros(len(self.std_specobjs), dtype=bool)
            for ii in range(len(self.std_specobjs)):
                if std_objid in self.std_specobjs[ii]['idx']:
                    self.std_idx[ii] = True
            # Set internal
            self.std = self.std_specobjs[self.std_idx]
            # Step
            self.steps.append(inspect.stack()[0][3])
            # Return
            return self.std
        else:
            # Find brightest object in the exposures
            # Searches over all slits (over all detectors), and all objects
            self.std_idx = flux.find_standard(self.std_specobjs)
            # Set internal
            self.std = self.std_specobjs[self.std_idx]
            # Step
            self.steps.append(inspect.stack()[0][3])
            # Return
            return self.std
    def save_master(self, sens_dicts, outfile=None):
        """

        Parameters
        ----------
        outfile : str, optional
          Use this input instead of the 'proper' (or unattainable) MasterFrame name

        Returns
        -------

        """
        # Step
        self.steps.append(inspect.stack()[0][3])
        # Allow one to over-ride output name
        if outfile is None:
            outfile = self.ms_name

        # Add steps
        self.sens_dict['steps'] = self.steps

        norder = self.sens_dict['norder']

        # Do it
        prihdu = fits.PrimaryHDU()
        hdus = [prihdu]

        for iord in range(norder):
            sens_dict_iord = self.sens_dict[str(iord)]
            cols = []
            cols += [fits.Column(array=sens_dict_iord['wave'], name=str('WAVE'), format=sens_dict_iord['wave'].dtype)]
            cols += [
                fits.Column(array=sens_dict_iord['sensfunc'], name=str('SENSFUNC'),
                            format=sens_dict_iord['sensfunc'].dtype)]
            # Finish
            coldefs = fits.ColDefs(cols)
            tbhdu = fits.BinTableHDU.from_columns(coldefs)
            tbhdu.name = 'SENSFUNC-ORDER{0:04}'.format(iord)
            # Add critical keys from sens_dict to header
            for key in ['wave_min', 'wave_max', 'exptime', 'airmass', 'std_file', 'std_ra',
                        'std_dec', 'std_name', 'cal_file', 'ech_orderindx']:
                try:
                    tbhdu.header[key.upper()] = sens_dict_iord[key].value
                except AttributeError:
                    tbhdu.header[key.upper()] = sens_dict_iord[key]
                except KeyError:
                    pass  # Will not require all of these
            hdus += [tbhdu]

        # Add critical keys from sens_dict to primary header
        for key in ['exptime', 'airmass', 'std_file', 'std_ra',
                    'std_dec', 'std_name', 'cal_file']:
            try:
                prihdu.header[key.upper()] = sens_dict_iord[key].value
            except AttributeError:
                prihdu.header[key.upper()] = sens_dict_iord[key]
            except KeyError:
                pass  # Will not require all of these
        prihdu.header['NORDER'] = norder

        # Finish
        hdulist = fits.HDUList(hdus)
        hdulist.writeto(outfile, overwrite=True)

        # Finish
        msgs.info("Wrote sensfunc to MasterFrame: {:s}".format(outfile))
Exemple #37
0
    def reduce_exposure(self, frames, bg_frames=None, std_outfile=None):
        """
        Reduce a single exposure

        Args:
            frames (:obj:`list`):
                List of 0-indexed rows in :attr:`fitstbl` with the frames to
                reduce.
            bg_frames (:obj:`list`, optional):
                List of frame indices for the background.
            std_outfile (:obj:`str`, optional):
                File with a previously reduced standard spectrum from
                PypeIt.

        Returns:
            dict: The dictionary containing the primary outputs of
            extraction.

        """

        # if show is set, clear the ginga channels at the start of each new sci_ID
        if self.show:
            # TODO: Put this in a try/except block?
            display.clear_all(allow_new=True)

        has_bg = True if bg_frames is not None and len(
            bg_frames) > 0 else False
        # Is this an b/g subtraction reduction?
        if has_bg:
            self.bkg_redux = True
            # The default is to find_negative objects if the bg_frames are classified as "science", and to not find_negative
            # objects if the bg_frames are classified as "sky". This can be explicitly overridden if
            # par['reduce']['findobj']['find_negative'] is set to something other than the default of None.
            self.find_negative = (('science' in self.fitstbl['frametype'][bg_frames[0]]) |
                                  ('standard' in self.fitstbl['frametype'][bg_frames[0]]))\
                if self.par['reduce']['findobj']['find_negative'] is None else self.par['reduce']['findobj']['find_negative']
        else:
            self.bkg_redux = False
            self.find_negative = False

        # Container for all the Spec2DObj
        all_spec2d = spec2dobj.AllSpec2DObj()
        all_spec2d['meta']['bkg_redux'] = self.bkg_redux
        all_spec2d['meta']['find_negative'] = self.find_negative
        # TODO -- Should we reset/regenerate self.slits.mask for a new exposure

        # container for specobjs during first loop (objfind)
        all_specobjs_objfind = specobjs.SpecObjs()
        # container for specobjs during second loop (extraction)
        all_specobjs_extract = specobjs.SpecObjs()
        # list of global_sky obtained during objfind and used in extraction
        initial_sky_list = []
        # list of sciImg
        sciImg_list = []
        # List of detectors with successful calibration
        calibrated_det = []
        # list of successful MasterSlits calibrations to be used in the extraction loop
        calib_slits = []
        # List of objFind objects
        objFind_list = []

        # Print status message
        msgs_string = 'Reducing target {:s}'.format(
            self.fitstbl['target'][frames[0]]) + msgs.newline()
        # TODO: Print these when the frames are actually combined,
        # backgrounds are used, etc?
        msgs_string += 'Combining frames:' + msgs.newline()
        for iframe in frames:
            msgs_string += '{0:s}'.format(
                self.fitstbl['filename'][iframe]) + msgs.newline()
        msgs.info(msgs_string)
        if has_bg:
            bg_msgs_string = ''
            for iframe in bg_frames:
                bg_msgs_string += '{0:s}'.format(
                    self.fitstbl['filename'][iframe]) + msgs.newline()
            bg_msgs_string = msgs.newline(
            ) + 'Using background from frames:' + msgs.newline(
            ) + bg_msgs_string
            msgs.info(bg_msgs_string)

        # Find the detectors to reduce
        subset = self.par['rdx']['slitspatnum'] if self.par['rdx']['slitspatnum'] is not None \
                    else self.par['rdx']['detnum']
        detectors = self.spectrograph.select_detectors(subset=subset)
        msgs.info(f'Detectors to work on: {detectors}')

        # Loop on Detectors
        # TODO: Attempt to put in a multiprocessing call here?
        # objfind
        for self.det in detectors:
            msgs.info("Working on detector {0}".format(self.det))
            # run calibration
            self.caliBrate = self.calib_one(frames, self.det)
            if not self.caliBrate.success:
                msgs.warn(
                    f'Calibrations for detector {self.det} were unsuccessful!  The step '
                    f'that failed was {self.caliBrate.failed_step}.  Continuing by '
                    f'skipping this detector.')
                continue

            # we save only the detectors that had a successful calibration,
            # and we use only those in the extract loop below
            calibrated_det.append(self.det)
            # we also save the successful MasterSlits calibrations because they are used and modified
            # in the slitmask stuff in between the two loops
            calib_slits.append(self.caliBrate.slits)
            # global_sky, skymask and sciImg are needed in the extract loop
            initial_sky, sobjs_obj, sciImg, objFind = self.objfind_one(
                frames, self.det, bg_frames, std_outfile=std_outfile)
            if len(sobjs_obj) > 0:
                all_specobjs_objfind.add_sobj(sobjs_obj)
            initial_sky_list.append(initial_sky)
            sciImg_list.append(sciImg)
            objFind_list.append(objFind)

        # slitmask stuff
        if self.par['reduce']['slitmask']['assign_obj']:
            # get object positions from slitmask design and slitmask offsets for all the detectors
            spat_flexure = np.array([ss.spat_flexure for ss in sciImg_list])
            # Grab platescale with binning
            bin_spec, bin_spat = parse.parse_binning(self.binning)
            platescale = np.array(
                [ss.detector.platescale * bin_spat for ss in sciImg_list])
            # get the dither offset if available
            if self.par['reduce']['slitmask']['use_dither_offset']:
                dither = self.spectrograph.parse_dither_pattern(
                    [self.fitstbl.frame_paths(frames[0])])
                dither_off = dither[2][0] if dither is not None else None
            else:
                dither_off = None
            calib_slits = slittrace.get_maskdef_objpos_offset_alldets(
                all_specobjs_objfind,
                calib_slits,
                spat_flexure,
                platescale,
                self.par['calibrations']['slitedges']['det_buffer'],
                self.par['reduce']['slitmask'],
                dither_off=dither_off)
            # determine if slitmask offsets exist and compute an average offsets over all the detectors
            calib_slits = slittrace.average_maskdef_offset(
                calib_slits, platescale[0],
                self.spectrograph.list_detectors(
                    mosaic='MSC' in calib_slits[0].detname))
            # slitmask design matching and add undetected objects
            all_specobjs_objfind = slittrace.assign_addobjs_alldets(
                all_specobjs_objfind, calib_slits, spat_flexure, platescale,
                self.par['reduce']['slitmask'],
                self.par['reduce']['findobj']['find_fwhm'])

        # Extract
        for i, self.det in enumerate(calibrated_det):
            # re-run (i.e., load) calibrations
            self.caliBrate = self.calib_one(frames, self.det)
            self.caliBrate.slits = calib_slits[i]

            detname = sciImg_list[i].detector.name

            # TODO: pass back the background frame, pass in background
            # files as an argument. extract one takes a file list as an
            # argument and instantiates science within
            if all_specobjs_objfind.nobj > 0:
                all_specobjs_on_det = all_specobjs_objfind[
                    all_specobjs_objfind.DET == detname]
            else:
                all_specobjs_on_det = all_specobjs_objfind

            # Extract
            all_spec2d[detname], tmp_sobjs \
                    = self.extract_one(frames, self.det, sciImg_list[i], objFind_list[i],
                                       initial_sky_list[i], all_specobjs_on_det)
            # Hold em
            if tmp_sobjs.nobj > 0:
                all_specobjs_extract.add_sobj(tmp_sobjs)
            # JFH TODO write out the background frame?

            # TODO -- Save here?  Seems like we should.  Would probably need to use update_det=True

        # Return
        return all_spec2d, all_specobjs_extract
def fit2darc(all_wv,
             all_pix,
             t,
             nspec,
             nycoeff=3,
             nocoeff=5,
             sigmarjct=3.0,
             debug=True):
    """Routine to obtain the 2D wavelength solution for an
    echelle spectrograph. This is calculated from the y-centroid
    and the order number of identified arc lines. The fit is a 
    simple least-squares with one round of rejections.
    This is a direct porting of the XIDL code: x_fit2darc.pro

    Parameters
    ----------
    all_wv: np.array
     wavelength of the identified lines
    all_pix: np.array
      y-centroid position of the identified lines
    t: np.array
      order number of the identified lines
    nycoeff : np.int
      order of the fitting along the pixel direction for each order
    nocoeff : np.int
      order of the fitting in the order direction
    sigmarjct: np.float
      sigma level for the rejection
    debug: boolean
      Extra plots to check the status of the procedure

    Returns:
    -------
    """

    # To use the legendre polynomial pixels and orders
    # need to be normalized in the -1,+1 range
    # Normalize pixels
    mnx = 0 #np.min(all_pix)
    mxx = float(nspec - 1) #np.max(all_pix)
    nrmp = np.array([0.5 * (mnx + mxx), mxx - mnx])
    pix_nrm = 2. * (all_pix - nrmp[0])/nrmp[1]
    # Normalize orders
    mnx = np.min(t)
    mxx = np.max(t)
    nrmt = np.array([0.5 * (mnx + mxx), mxx - mnx])
    t_nrm = 2. * (t - nrmt[0])/nrmt[1]

    if debug:
        # set some plotting parameters
        prettyplot()
        plt.figure(figsize=(7,5))
        msgs.info("Plot identified lines")
        cm = plt.cm.get_cmap('RdYlBu_r')
        sc = plt.scatter(t_nrm, pix_nrm,
                         c=all_wv/10000., cmap=cm)
        cbar = plt.colorbar(sc)
        cbar.set_label(r'Wavelength [$\mu$m]', rotation=270,
                       labelpad=20)
        plt.xlabel(r'Normalized Orders')
        plt.ylabel(r'Normalized Pixels')
        plt.title(r'Location of the identified lines')
        plt.show()

    msgs.info("First iteration")
    # all lines have the same weight
    invvar = np.ones(len(all_wv), dtype=np.float64)
    all_wv_order = all_wv * t
    work2d = np.zeros((nycoeff*nocoeff, len(all_wv)), dtype=np.float64)
    worky = pydl.flegendre(pix_nrm, nycoeff)
    workt = pydl.flegendre(t_nrm, nocoeff)
    for i in range(nocoeff):
        for j in range(nycoeff):
            work2d[j*nocoeff+i,:] = worky[j,:] * workt[i,:]
    work2di = np.transpose(work2d * np.outer(np.ones(nocoeff*nycoeff,
                                             dtype=np.float64),
                                             invvar))
    alpha = work2d.dot(work2di)
    beta = all_wv_order.dot(work2di)
    res = np.linalg.solve(alpha,beta)
    wv_mod = res.dot(work2d)
    if debug:
        # set some plotting parameters
        prettyplot()
        plt.figure(figsize=(7,5))
        plt.axhline(y=np.average(wv_mod / t - all_wv),
                    color='r', linestyle='--')
        plt.axhline(y=+np.std(wv_mod / t - all_wv),
                    color='r', linestyle=':')
        plt.axhline(y=-np.std(wv_mod / t - all_wv),
                    color='r', linestyle=':')
        plt.scatter(all_wv/10000.,
                    wv_mod / t - all_wv,
                    marker="v")
        plt.text(np.min(all_wv/10000), np.average(wv_mod/t-all_wv),
                 r'Average={0:.1f}$\AA$'.format(np.average(wv_mod/t-all_wv)),
                 ha="left", va="bottom",
                 bbox=dict(boxstyle="square",
                           ec=(1., 0.5, 0.5),
                           fc=(1., 0.8, 0.8),
                           alpha=0.7,
                           ))
        plt.text(np.max(all_wv/10000), np.std(wv_mod/t-all_wv),
                 r'Sigma={0:.1f}$\AA$'.format(np.std(wv_mod/t-all_wv)),
                 ha="right", va="bottom",
                 bbox=dict(boxstyle="square",
                           ec=(1., 0.5, 0.5),
                           fc=(1., 0.8, 0.8),
                           alpha=0.7,
                           ))
        plt.title(r'Residuals after 1st iteration')
        plt.xlabel(r'Wavelength [$\mu$m]')
        plt.ylabel(r'Residuals [$\AA$]')
        plt.show()

    msgs.info("Second iteration")
    # Mask Values
    # msk = True means a bad value
    msk = sigma_clip(wv_mod-all_wv_order, sigma=sigmarjct, cenfunc=np.ma.mean).mask
    if np.any(msk):
        msgs.info("Rejecting: {} of {} lines.".format(len(msk[np.where(msk == True)]),len(msk)))
        invvar[msk] = 0.
        work2di = np.transpose(work2d * np.outer(np.ones(nocoeff*nycoeff,
                                                 dtype=np.float64),
                                                 invvar))
        alpha = work2d.dot(work2di)
        beta = all_wv_order.dot(work2di)
        res = np.linalg.solve(alpha,beta)
        wv_mod = res.dot(work2d)
        if debug:
            prettyplot()
            plt.figure(figsize=(7,5))
            plt.axhline(y=np.average(wv_mod[~msk] / t[~msk] - all_wv[~msk]),
                        color='r', linestyle='--')
            plt.axhline(y=+np.std(wv_mod[~msk] / t[~msk] - all_wv[~msk]),
                        color='r', linestyle=':')
            plt.axhline(y=-np.std(wv_mod[~msk] / t[~msk] - all_wv[~msk]),
                        color='r', linestyle=':')
            plt.scatter(all_wv[msk]/10000.,
                        wv_mod[msk] / t[msk] - all_wv[msk],
                        marker="v",
                        label=r'Rejected values')
            plt.scatter(all_wv[~msk]/10000.,
                        wv_mod[~msk] / t[~msk] - all_wv[~msk],
                        marker="v",
                        label=r'Good values')
            plt.text(np.min(all_wv/10000), np.average(wv_mod[~msk]/t[~msk]-all_wv[~msk]),
                     r'Average={0:.1f}$\AA$'.format(np.average(wv_mod[~msk]/t[~msk]-all_wv[~msk])),
                     ha="left", va="bottom",
                     bbox=dict(boxstyle="square",
                               ec=(1., 0.5, 0.5),
                               fc=(1., 0.8, 0.8),
                               alpha=0.7,
                               ))
            plt.text(np.max(all_wv/10000), np.std(wv_mod[~msk]/t[~msk]-all_wv[~msk]),
                     r'Sigma={0:.1f}$\AA$'.format(np.std(wv_mod[~msk]/t[~msk]-all_wv[~msk])),
                     ha="right", va="bottom",
                     bbox=dict(boxstyle="square",
                               ec=(1., 0.5, 0.5),
                               fc=(1., 0.8, 0.8),
                               alpha=0.7,
                               ))
            plt.legend()
            plt.title(r'Residuals after 2nd iteration')
            plt.xlabel(r'Wavelength [$\mu$m]')
            plt.ylabel(r'Residuals [$\AA$]')
            plt.show()
    else:
        msgs.info("No line rejected")

    # Check quality
    gd_wv = invvar > 0.
    resid = (wv_mod[gd_wv]-all_wv_order[gd_wv])
    fin_rms = np.sqrt(np.mean(resid**2))
    msgs.info("RMS: {0:.5f} Ang*Order#".format(fin_rms))

    # Plot QA

    # Full plot

    all_pix_qa = np.arange(np.min(all_pix),np.max(all_pix),1)
    pix_nrm_qa = 2. * (all_pix_qa - nrmp[0])/nrmp[1]
    worky_qa = pydl.flegendre(pix_nrm_qa, nycoeff)
    mn, mx = np.min(wv_mod/t), np.max(wv_mod/t)
    order = np.arange(np.min(t),np.max(t)+1,1)

    prettyplot()
    plt.figure(figsize=(7,5))
    plt.title(r'Arc 2D FIT, nx={0:.0f}, ny={1:.0f}, RMS={2:.5f} Ang*Order#'.format(nocoeff, nycoeff,fin_rms))
    plt.xlabel(r'Wavelength [$\AA$]')
    plt.ylabel(r'Row [pixel]')

    for ii in order:
        # define the color
        rr = (ii-np.max(order))/(np.min(order)-np.max(order))
        gg = 0.0
        bb = (ii-np.min(order))/(np.max(order)-np.min(order))
        tsub = np.ones_like(len(all_pix_qa),dtype=np.float64) * ii
        t_nrm_qa = 2. * (tsub - nrmt[0])/nrmt[1]
        work2d_qa = np.zeros((nycoeff*nocoeff, len(all_pix_qa)), dtype=np.float64)
        workt_qa = pydl.flegendre(t_nrm_qa, nocoeff)
        for i in range(nocoeff):
            for j in range(nycoeff):
                work2d_qa[j*nocoeff+i,:] = worky_qa[j,:] * workt_qa[i,:]
        wv_mod_qa = res.dot(work2d_qa)
        plt.plot(wv_mod_qa/ii, all_pix_qa,
                 color=(rr,gg,bb), linestyle='-')
        # Residuals
        resid_qa = (wv_mod[t == ii]-all_wv_order[t == ii])/t[t == ii]
        plt.scatter(wv_mod[t == ii]/t[t == ii]+100*resid_qa, all_pix[t == ii],
                    color=(rr,gg,bb))
    plt.text(mx,np.max(all_pix),
             r'residuals $\times$100',
             ha="right", va="top",)
    plt.show()

    # Individual plots

    prettyplot()


    nrow = np.int(2)
    ncol = np.int(np.ceil(len(order)/2.))
    fig, ax = plt.subplots(nrow,ncol,figsize=(4*ncol,4*nrow))
    for ii_row in np.arange(nrow):
        for ii_col in np.arange(ncol):
            if (ii_row*(nrow+1))+ii_col < len(order):
                from IPython import embed
                embed()
                ii = order[(ii_row*(nrow+1))+ii_col]
                rr = (ii-np.max(order))/(np.min(order)-np.max(order))
                gg = 0.0
                bb = (ii-np.min(order))/(np.max(order)-np.min(order))
                tsub = np.ones_like(len(all_pix_qa),dtype=np.float64) * ii
                t_nrm_qa = 2. * (tsub - nrmt[0])/nrmt[1]
                work2d_qa = np.zeros((nycoeff*nocoeff, len(all_pix_qa)), dtype=np.float64)
                workt_qa = pydl.flegendre(t_nrm_qa, nocoeff)
                for i in range(nocoeff):
                    for j in range(nycoeff):
                        work2d_qa[j*nocoeff+i,:] = worky_qa[j,:] * workt_qa[i,:]
                wv_mod_qa = res.dot(work2d_qa)
                ax[ii_row,ii_col].plot(all_pix_qa, wv_mod_qa/ii/10000.,
                               color=(rr,gg,bb), linestyle='-')
                ax[ii_row,ii_col].set_title('Order = {0:0.0f}'.format(ii))
                # Residuals
                resid_qa = (wv_mod[t == ii]-all_wv_order[t == ii])/t[t == ii]
                resid_qa_mask = (wv_mod[gd_wv][t[gd_wv] == ii]-all_wv_order[gd_wv][t[gd_wv] == ii])/t[gd_wv][t[gd_wv] == ii]
                rms_qa = np.sqrt(np.mean(resid_qa_mask**2))
                dwl=(wv_mod_qa[-1]-wv_mod_qa[0])/ii/(all_pix_qa[-1]-all_pix_qa[0])
                ax[ii_row,ii_col].scatter(all_pix[t == ii], wv_mod[t == ii]/t[t == ii]/10000.+100.*resid_qa/10000.,
                                          color=(rr,gg,bb))
                ax[ii_row,ii_col].text(0.9,0.9,
                                  r'RMS={0:.2f} Pixel'.format(rms_qa/np.abs(dwl)),
                                  ha="right", va="top",
                                  transform = ax[ii_row,ii_col].transAxes)
                ax[ii_row,ii_col].text(0.9,0.8,
                                  r'$\Delta\lambda$={0:.2f} $\AA$/Pixel'.format(np.abs(dwl)),
                                  ha="right", va="top",
                                  transform = ax[ii_row,ii_col].transAxes)
            else:
                ax[ii_row,ii_col].axis('off')

    fig.text(0.5, 0.04, r'Row [pixel]', ha='center', size='large')
    fig.text(0.04, 0.5, r'Wavelength [$\mu$m]', va='center',
             rotation='vertical', size='large')
    fig.suptitle(r'Arc 2D FIT, nx={0:.0f}, ny={1:.0f}, RMS={2:.5f} Ang*Order#, residuals $\times$100'.format(nocoeff, nycoeff,fin_rms))
    plt.show()
    return wv_mod
    def __init__(self, std_spec1d_file=None, sci_spec1d_file=None, sens_file=None,
                 std_specobjs=None, std_header=None, spectrograph=None,
                 telluric=False, setup=None, master_dir=None, mode=None,
                 star_type=None, star_mag=None, BALM_MASK_WID=5.0, nresln=None, debug=False):

        # Load standard files
        std_spectro = None
        self.std_spec1d_file = std_spec1d_file
        # Need to unwrap these (sometimes)..
        self.std_specobjs = std_specobjs
        self.std_header = std_header
        if self.std_spec1d_file is not None:
            self.std_specobjs, self.std_header = load.ech_load_specobj(self.std_spec1d_file)
            msgs.info('Loaded {0} spectra from the spec1d standard star file: {1}'.format(
                len(self.std_specobjs), self.std_spec1d_file))
            std_spectro = self.std_header['INSTRUME']

        try:
            self.std_ra = self.std_header['RA']
        except:
            self.std_ra = None
        try:
            self.std_dec = self.std_header['DEC']
        except:
            self.std_dec = None
        try:
            self.std_file = self.std_header['FILENAME']
        except:
            self.std_file = None

        # Load the science files
        sci_spectro = None
        self.sci_spec1d_file = sci_spec1d_file
        self.sci_specobjs = []
        self.sci_header = None
        if self.sci_spec1d_file is not None:
            self.sci_specobjs, self.sci_header = load.ech_load_specobj(self.sci_spec1d_file)
            msgs.info('Loaded {0} spectra from the spec1d science file: {1}'.format(
                len(self.sci_specobjs), self.sci_spec1d_file))
            sci_spectro = self.sci_header['INSTRUME']

        # Compare instruments if they exist
        if std_spectro is not None and sci_spectro is not None and std_spectro != sci_spectro:
            msgs.error('Standard spectra are not the same instrument as science!!')

        # Instantiate the spectrograph
        _spectrograph = spectrograph
        if _spectrograph is None:
            _spectrograph = std_spectro
            if _spectrograph is not None:
                msgs.info("Spectrograph set to {0} from standard file".format(_spectrograph))
        if _spectrograph is None:
            _spectrograph = sci_spectro
            if _spectrograph is not None:
                msgs.info("Spectrograph set to {0} from science file".format(_spectrograph))
        self.spectrograph = load_spectrograph(_spectrograph)

        # MasterFrame
        masterframe.MasterFrame.__init__(self, self.frametype, setup,
                                         master_dir=master_dir, mode=mode)
        # Get the extinction data
        self.extinction_data = None
        if self.spectrograph is not None:
            self.extinction_data \
                = flux.load_extinction_data(self.spectrograph.telescope['longitude'],
                                            self.spectrograph.telescope['latitude'])
        elif self.sci_header is not None and 'LON-OBS' in self.sci_header.keys():
            self.extinction_data \
                = flux.load_extinction_data(self.sci_header['LON-OBS'],
                                            self.sci_header['LAT-OBS'])

        # Once the spectrograph is instantiated, can also set the
        # extinction data
        # Parameters
        self.sens_file = sens_file

        # Set telluric option
        self.telluric = telluric

        # Main outputs
        self.sens_dict = None if self.sens_file is None \
            else self.load_master(self.sens_file)

        # Attributes
        self.steps = []

        # Key Internals
        self.std = None  # Standard star spectrum (SpecObj object)
        self.std_idx = None  # Nested indices for the std_specobjs list that corresponds
        # to the star!
        # Echelle key
        self.star_type = star_type
        self.star_mag = star_mag
        self.BALM_MASK_WID = BALM_MASK_WID
        self.nresln = nresln
        self.debug = debug
Exemple #40
0
    def objfind_one(self, frames, det, bg_frames, std_outfile=None):
        """
        Reduce + Find Objects in a single exposure/detector pair

        sci_ID and det need to have been set internally prior to calling this method

        Parameters
        ----------
        frames : :obj:`list`
            List of frames to extract; stacked if more than one is provided
        det : :obj:`int`
            Detector number (1-indexed)
        bg_frames : :obj:`list`
            List of frames to use as the background. Can be empty.
        std_outfile : :obj:`str`, optional
            Filename for the standard star spec1d file. Passed directly to
            :func:`get_std_trace`.

        Returns
        -------
        global_sky : `numpy.ndarray`_
            Initial global sky model
        sobjs_obj : :class:`~pypeit.specobjs.SpecObjs`
            List of objects found
        sciImg : :class:`~pypeit.images.pypeitimage.PypeItImage`
            Science image
        objFind : :class:`~pypeit.find_objects.FindObjects`
            Object finding speobject

        """
        # Grab some meta-data needed for the reduction from the fitstbl
        self.objtype, self.setup, self.obstime, self.basename, self.binning \
                = self.get_sci_metadata(frames[0], det)

        msgs.info("Object finding begins for {} on det={}".format(
            self.basename, det))

        # Is this a standard star?
        self.std_redux = 'standard' in self.objtype
        frame_par = self.par['calibrations'][
            'standardframe'] if self.std_redux else self.par['scienceframe']
        # Get the standard trace if need be

        if self.std_redux is False and std_outfile is not None:
            std_trace = specobjs.get_std_trace(
                self.spectrograph.get_det_name(det), std_outfile)
        else:
            std_trace = None

        # Build Science image
        sci_files = self.fitstbl.frame_paths(frames)
        sciImg = buildimage.buildimage_fromlist(
            self.spectrograph,
            det,
            frame_par,
            sci_files,
            bias=self.caliBrate.msbias,
            bpm=self.caliBrate.msbpm,
            dark=self.caliBrate.msdark,
            flatimages=self.caliBrate.flatimages,
            slits=self.caliBrate.slits,  # For flexure correction
            ignore_saturation=False)

        # Background Image?
        if len(bg_frames) > 0:
            bg_file_list = self.fitstbl.frame_paths(bg_frames)
            sciImg = sciImg.sub(
                buildimage.buildimage_fromlist(
                    self.spectrograph,
                    det,
                    frame_par,
                    bg_file_list,
                    bpm=self.caliBrate.msbpm,
                    bias=self.caliBrate.msbias,
                    dark=self.caliBrate.msdark,
                    flatimages=self.caliBrate.flatimages,
                    slits=self.caliBrate.slits,  # For flexure correction
                    ignore_saturation=False),
                frame_par['process'])

        # Deal with manual extraction
        row = self.fitstbl[frames[0]]
        manual_obj = ManualExtractionObj.by_fitstbl_input(
            row['filename'], row['manual'],
            self.spectrograph) if len(row['manual'].strip()) > 0 else None

        # Instantiate Reduce object
        # Required for pypeline specific object
        # At instantiaton, the fullmask in self.sciImg is modified
        objFind = find_objects.FindObjects.get_instance(
            sciImg,
            self.spectrograph,
            self.par,
            self.caliBrate,
            self.objtype,
            bkg_redux=self.bkg_redux,
            manual=manual_obj,
            find_negative=self.find_negative,
            std_redux=self.std_redux,
            show=self.show,
            basename=self.basename)

        # Do it
        initial_sky, sobjs_obj = objFind.run(std_trace=std_trace,
                                             show_peaks=self.show)
        # Return
        return initial_sky, sobjs_obj, sciImg, objFind
def ech_objfind(image, ivar, ordermask, slit_left, slit_righ,inmask=None,plate_scale=0.2,npca=2,ncoeff = 5,min_snr=0.0,nabove_min_snr=0,
                pca_percentile=20.0,snr_pca=3.0,box_radius=2.0,show_peaks=False,show_fits=False,show_trace=False):


    if inmask is None:
        inmask = (ordermask > 0)


    frameshape = image.shape
    nspec = frameshape[0]
    norders = slit_left.shape[1]

    if isinstance(plate_scale,(float, int)):
        plate_scale_ord = np.full(norders, plate_scale)  # 0.12 binned by 3 spatially for HIRES
    elif isinstance(plate_scale,(np.ndarray, list, tuple)):
        if len(plate_scale) == norders:
            plate_scale_ord = plate_scale
        elif len(plate_scale) == 1:
            plate_scale_ord = np.full(norders, plate_scale[0])
        else:
            msgs.error('Invalid size for plate_scale. It must either have one element or norders elements')
    else:
        msgs.error('Invalid type for plate scale')

    specmid = nspec // 2
    slit_width = slit_righ - slit_left
    spec_vec = np.arange(nspec)
    slit_spec_pos = nspec/2.0
    slit_spat_pos = np.zeros((norders, 2))
    for iord in range(norders):
        slit_spat_pos[iord, :] = (np.interp(slit_spec_pos, spec_vec, slit_left[:,iord]), np.interp(slit_spec_pos, spec_vec, slit_righ[:,iord]))

    # Loop over orders and find objects
    sobjs = specobjs.SpecObjs()
    show_peaks=True
    show_fits=True
    # ToDo replace orderindx with the true order number here? Maybe not. Clean up slitid and orderindx!
    for iord  in range(norders):
        msgs.info('Finding objects on slit # {:d}'.format(iord + 1))
        thismask = ordermask == (iord + 1)
        inmask_iord = inmask & thismask
        specobj_dict = {'setup': 'HIRES', 'slitid': iord + 1, 'scidx': 0,'det': 1, 'objtype': 'science'}
        sobjs_slit, skymask[thismask], objmask[thismask], proc_list = \
            extract.objfind(image, thismask, slit_left[:,iord], slit_righ[:,iord], inmask=inmask_iord,show_peaks=show_peaks,
                            show_fits=show_fits, show_trace=False, specobj_dict = specobj_dict)#, sig_thresh = 3.0)
        # ToDO make the specobjs _set_item_ work with expressions like this spec[:].orderindx = iord
        for spec in sobjs_slit:
            spec.ech_orderindx = iord
        sobjs.add_sobj(sobjs_slit)


    nfound = len(sobjs)

    # Compute the FOF linking length based on the instrument place scale and matching length FOFSEP = 1.0"
    FOFSEP = 1.0 # separation of FOF algorithm in arcseconds
    FOF_frac = FOFSEP/(np.median(slit_width)*np.median(plate_scale_ord))

    # Feige: made the code also works for only one object found in one order
    # Run the FOF. We use fake coordinaes
    fracpos = sobjs.spat_fracpos
    ra_fake = fracpos/1000.0 # Divide all angles by 1000 to make geometry euclidian
    dec_fake = 0.0*fracpos
    if nfound>1:
        (ingroup, multgroup, firstgroup, nextgroup) = spheregroup(ra_fake, dec_fake, FOF_frac/1000.0)
        group = ingroup.copy()
        uni_group, uni_ind = np.unique(group, return_index=True)
        nobj = len(uni_group)
        msgs.info('FOF matching found {:d}'.format(nobj) + ' unique objects')
    elif nfound==1:
        group = np.zeros(1,dtype='int')
        uni_group, uni_ind = np.unique(group, return_index=True)
        nobj = len(group)
        msgs.warn('Only find one object no FOF matching is needed')

    gfrac = np.zeros(nfound)
    for jj in range(nobj):
        this_group = group == uni_group[jj]
        gfrac[this_group] = np.median(fracpos[this_group])

    uni_frac = gfrac[uni_ind]

    sobjs_align = sobjs.copy()
    # Now fill in the missing objects and their traces
    for iobj in range(nobj):
        for iord in range(norders):
            # Is there an object on this order that grouped into the current group in question?
            on_slit = (group == uni_group[iobj]) & (sobjs_align.ech_orderindx == iord)
            if not np.any(on_slit):
                # Add this to the sobjs_align, and assign required tags
                thisobj = specobjs.SpecObj(frameshape, slit_spat_pos[iord,:], slit_spec_pos, det = sobjs_align[0].det,
                                           setup = sobjs_align[0].setup, slitid = (iord + 1),
                                           scidx = sobjs_align[0].scidx, objtype=sobjs_align[0].objtype)
                thisobj.ech_orderindx = iord
                thisobj.spat_fracpos = uni_frac[iobj]
                thisobj.trace_spat = slit_left[:,iord] + slit_width[:,iord]*uni_frac[iobj] # new trace
                thisobj.trace_spec = spec_vec
                thisobj.spat_pixpos = thisobj.trace_spat[specmid]
                thisobj.set_idx()
                # Use the real detections of this objects for the FWHM
                this_group = group == uni_group[iobj]
                # Assign to the fwhm of the nearest detected order
                imin = np.argmin(np.abs(sobjs_align[this_group].ech_orderindx - iord))
                thisobj.fwhm = sobjs_align[imin].fwhm
                thisobj.maskwidth = sobjs_align[imin].maskwidth
                thisobj.ech_fracpos = uni_frac[iobj]
                thisobj.ech_group = uni_group[iobj]
                thisobj.ech_usepca = True
                sobjs_align.add_sobj(thisobj)
                group = np.append(group, uni_group[iobj])
                gfrac = np.append(gfrac, uni_frac[iobj])
            else:
                # ToDo fix specobjs to get rid of these crappy loops!
                for spec in sobjs_align[on_slit]:
                    spec.ech_fracpos = uni_frac[iobj]
                    spec.ech_group = uni_group[iobj]
                    spec.ech_usepca = False

    # Some code to ensure that the objects are sorted in the sobjs_align by fractional position on the order and by order
    # respectively
    sobjs_sort = specobjs.SpecObjs()
    for iobj in range(nobj):
        this_group = group == uni_group[iobj]
        this_sobj = sobjs_align[this_group]
        sobjs_sort.add_sobj(this_sobj[np.argsort(this_sobj.ech_orderindx)])

    # Loop over the objects and perform a quick and dirty extraction to assess S/N.
    varimg = utils.calc_ivar(ivar)
    flux_box = np.zeros((nspec, norders, nobj))
    ivar_box = np.zeros((nspec, norders, nobj))
    mask_box = np.zeros((nspec, norders, nobj))
    SNR_arr = np.zeros((norders, nobj))
    for iobj in range(nobj):
        for iord in range(norders):
            indx = (sobjs_sort.ech_group == uni_group[iobj]) & (sobjs_sort.ech_orderindx == iord)
            spec = sobjs_sort[indx]
            thismask = ordermask == (iord + 1)
            inmask_iord = inmask & thismask
            box_rad_pix = box_radius/plate_scale_ord[iord]
            flux_tmp  = extract.extract_boxcar(image*inmask_iord, spec.trace_spat,box_rad_pix, ycen = spec.trace_spec)
            var_tmp  = extract.extract_boxcar(varimg*inmask_iord, spec.trace_spat,box_rad_pix, ycen = spec.trace_spec)
            ivar_tmp = utils.calc_ivar(var_tmp)
            pixtot  = extract.extract_boxcar(ivar*0 + 1.0, spec.trace_spat,box_rad_pix, ycen = spec.trace_spec)
            mask_tmp = (extract.extract_boxcar(ivar*inmask_iord == 0.0, spec.trace_spat,box_rad_pix, ycen = spec.trace_spec) != pixtot)
            flux_box[:,iord,iobj] = flux_tmp*mask_tmp
            ivar_box[:,iord,iobj] = np.fmax(ivar_tmp*mask_tmp,0.0)
            mask_box[:,iord,iobj] = mask_tmp
            (mean, med_sn, stddev) = sigma_clipped_stats(flux_box[mask_tmp,iord,iobj]*np.sqrt(ivar_box[mask_tmp,iord,iobj]),
                                                         sigma_lower=5.0,sigma_upper=5.0)
            SNR_arr[iord,iobj] = med_sn



    # Purge objects with low SNR and that don't show up in enough orders
    keep_obj = np.zeros(nobj,dtype=bool)
    sobjs_trim = specobjs.SpecObjs()
    uni_group_trim = np.array([],dtype=int)
    uni_frac_trim =  np.array([],dtype=float)
    for iobj in range(nobj):
        if (np.sum(SNR_arr[:,iobj] > min_snr) >= nabove_min_snr):
            keep_obj[iobj] = True
            ikeep = sobjs_sort.ech_group == uni_group[iobj]
            sobjs_trim.add_sobj(sobjs_sort[ikeep])
            uni_group_trim = np.append(uni_group_trim, uni_group[iobj])
            uni_frac_trim = np.append(uni_frac_trim, uni_frac[iobj])
        else:
            msgs.info('Purging object #{:d}'.format(iobj) + ' which does not satisfy min_snr > {:5.2f}'.format(min_snr) +
                      ' on at least nabove_min_snr >= {:d}'.format(nabove_min_snr) + ' orders')

    nobj_trim = np.sum(keep_obj)
    if nobj_trim == 0:
        return specobjs.SpecObjs()

    SNR_arr_trim = SNR_arr[:,keep_obj]

    # Do a final loop over objects and make the final decision about which orders will be interpolated/extrapolated by the PCA
    for iobj in range(nobj_trim):
        SNR_now = SNR_arr_trim[:,iobj]
        indx = (sobjs_trim.ech_group == uni_group_trim[iobj])
        # PCA interp/extrap if:
        #      (SNR is below pca_percentile of the total SNRs) AND (SNR < snr_pca)
        #                                 OR
        #      (if this order was not originally traced by the object finding, see above)
        usepca = ((SNR_now < np.percentile(SNR_now, pca_percentile)) & (SNR_now < snr_pca)) | sobjs_trim[indx].ech_usepca
        # ToDo fix specobjs to get rid of these crappy loops!
        for iord, spec in enumerate(sobjs_trim[indx]):
            spec.ech_usepca = usepca[iord]
            if usepca[iord]:
                msgs.info('Using PCA to predict trace for object #{:d}'.format(iobj) + ' on order #{:d}'.format(iord))

    sobjs_final = sobjs_trim.copy()
    # Loop over the objects one by one and adjust/predict the traces
    npoly_cen = 3
    pca_fits = np.zeros((nspec, norders, nobj_trim))
    for iobj in range(nobj_trim):
        igroup = sobjs_final.ech_group == uni_group_trim[iobj]
        # PCA predict the masked orders which were not traced
        pca_fits[:,:,iobj] = pca_trace((sobjs_final[igroup].trace_spat).T, usepca = None, npca = npca, npoly_cen = npoly_cen)
        # usepca = sobjs_final[igroup].ech_usepca,
        # Perform iterative flux weighted centroiding using new PCA predictions
        xinit_fweight = pca_fits[:,:,iobj].copy()
        inmask_now = inmask & (ordermask > 0)
        xfit_fweight = extract.iter_tracefit(image, xinit_fweight, ncoeff, inmask = inmask_now, show_fits=show_fits)
        # Perform iterative Gaussian weighted centroiding
        xinit_gweight = xfit_fweight.copy()
        xfit_gweight = extract.iter_tracefit(image, xinit_gweight, ncoeff, inmask = inmask_now, gweight=True,show_fits=show_fits)
        # Assign the new traces
        for iord, spec in enumerate(sobjs_final[igroup]):
            spec.trace_spat = xfit_gweight[:,iord]
            spec.spat_pixpos = spec.trace_spat[specmid]


    # Set the IDs
    sobjs_final.set_idx()
    if show_trace:
        viewer, ch = ginga.show_image(objminsky*(ordermask > 0))
        for iobj in range(nobj_trim):
            for iord in range(norders):
                ginga.show_trace(viewer, ch, pca_fits[:,iord, iobj], str(uni_frac[iobj]), color='yellow')

        for spec in sobjs_trim:
            color = 'green' if spec.ech_usepca else 'magenta'
            ginga.show_trace(viewer, ch, spec.trace_spat, spec.idx, color=color)

        #for spec in sobjs_final:
        #    color = 'red' if spec.ech_usepca else 'green'
        #    ginga.show_trace(viewer, ch, spec.trace_spat, spec.idx, color=color)

    return sobjs_final
Exemple #42
0
    def extract_one(self, frames, det, sciImg, objFind, initial_sky,
                    sobjs_obj):
        """
        Extract Objects in a single exposure/detector pair

        sci_ID and det need to have been set internally prior to calling this method

        Args:
            frames (:obj:`list`):
                List of frames to extract; stacked if more than one
                is provided
            det (:obj:`int`):
                Detector number (1-indexed)
            sciImg (:class:`PypeItImage`):
                Data container that holds a single image from a
                single detector its related images (e.g. ivar, mask)
            objFind : :class:`~pypeit.find_objects.FindObjects`
                Object finding object
            initial_sky (`numpy.ndarray`_):
                Initial global sky model
            sobjs_obj (:class:`pypeit.specobjs.SpecObjs`):
                List of objects found during `run_objfind`

        Returns:
            tuple: Returns six `numpy.ndarray`_ objects and a
            :class:`pypeit.specobjs.SpecObjs` object with the
            extracted spectra from this exposure/detector pair. The
            six `numpy.ndarray`_ objects are (1) the science image,
            (2) its inverse variance, (3) the sky model, (4) the
            object model, (5) the model inverse variance, and (6) the
            mask.
        """
        # Grab some meta-data needed for the reduction from the fitstbl
        self.objtype, self.setup, self.obstime, self.basename, self.binning \
                = self.get_sci_metadata(frames[0], det)
        # Is this a standard star?
        self.std_redux = 'standard' in self.objtype

        # Update the skymask
        skymask = objFind.create_skymask(sobjs_obj)
        # Update the global sky
        if 'standard' in self.fitstbl['frametype'][frames[0]] or \
                self.par['reduce']['findobj']['skip_final_global'] or \
                self.par['reduce']['skysub']['load_mask'] or \
                self.par['reduce']['skysub']['user_regions'] is not None:
            final_global_sky = initial_sky
        else:
            final_global_sky = objFind.global_skysub(previous_sky=initial_sky,
                                                     skymask=skymask,
                                                     show=self.show)
        scaleImg = objFind.scaleimg

        # update here slits.mask since global_skysub modify reduce_bpm and we need to propagate it into extraction
        flagged_slits = np.where(objFind.reduce_bpm)[0]
        if len(flagged_slits) > 0:
            self.caliBrate.slits.mask[flagged_slits] = \
                self.caliBrate.slits.bitmask.turn_on(self.caliBrate.slits.mask[flagged_slits], 'BADREDUCE')

        msgs.info("Extraction begins for {} on det={}".format(
            self.basename, det))

        # Instantiate Reduce object
        # Required for pypeline specific object
        # At instantiaton, the fullmask in self.sciImg is modified
        # TODO Are we repeating steps in the init for FindObjects and Extract??
        self.exTract = extraction.Extract.get_instance(
            sciImg,
            sobjs_obj,
            self.spectrograph,
            self.par,
            self.caliBrate,
            self.objtype,
            bkg_redux=self.bkg_redux,
            return_negative=self.par['reduce']['extraction']
            ['return_negative'],
            std_redux=self.std_redux,
            show=self.show,
            basename=self.basename)

        if not self.par['reduce']['extraction']['skip_extraction']:
            skymodel, objmodel, ivarmodel, outmask, sobjs, waveImg, \
                tilts = self.exTract.run(final_global_sky, ra=self.fitstbl["ra"][frames[0]],
                                         dec=self.fitstbl["dec"][frames[0]], obstime=self.obstime)
        else:
            # Although exrtaction is not performed, still need to prepare some masks and the tilts
            self.exTract.prepare_extraction()
            # Since the extraction was not performed, fill the arrays with the best available information
            skymodel = final_global_sky
            objmodel = np.zeros_like(self.exTract.sciImg.image)
            ivarmodel = np.copy(self.exTract.sciImg.ivar)
            outmask = self.exTract.sciImg.fullmask
            waveImg = self.exTract.waveimg
            tilts = self.exTract.tilts
            sobjs = sobjs_obj

        # TODO -- Do this upstream
        # Tack on detector and wavelength RMS
        for sobj in sobjs:
            sobj.DETECTOR = sciImg.detector
            iwv = np.where(
                self.caliBrate.wv_calib.spat_ids == sobj.SLITID)[0][0]
            sobj.WAVE_RMS = self.caliBrate.wv_calib.wv_fits[iwv].rms

        # Construct table of spectral flexure
        spec_flex_table = Table()
        spec_flex_table['spat_id'] = self.caliBrate.slits.spat_id
        spec_flex_table['sci_spec_flexure'] = self.exTract.slitshift

        # pull out maskdef_designtab from caliBrate.slits
        maskdef_designtab = self.caliBrate.slits.maskdef_designtab
        slits = copy.deepcopy(self.caliBrate.slits)
        slits.maskdef_designtab = None

        # Construct the Spec2DObj
        spec2DObj = spec2dobj.Spec2DObj(
            sciimg=sciImg.image,
            ivarraw=sciImg.ivar,
            skymodel=skymodel,
            objmodel=objmodel,
            ivarmodel=ivarmodel,
            scaleimg=scaleImg,
            waveimg=waveImg,
            bpmmask=outmask,
            detector=sciImg.detector,
            sci_spat_flexure=sciImg.spat_flexure,
            sci_spec_flexure=spec_flex_table,
            vel_corr=self.exTract.vel_corr,
            vel_type=self.par['calibrations']['wavelengths']['refframe'],
            tilts=tilts,
            slits=slits,
            maskdef_designtab=maskdef_designtab)
        spec2DObj.process_steps = sciImg.process_steps

        # QA
        spec2DObj.gen_qa()

        # Return
        return spec2DObj, sobjs