def load_master(self, filename, force=False):

        # Does the master file exist?
        if not os.path.isfile(filename):
            # msgs.warn("No Master frame found of type {:s}: {:s}".format(self.frametype, filename))
            msgs.warn("No Master frame found of {:s}".format(filename))
            if force:
                msgs.error("Crashing out because reduce-masters-force=True:" + msgs.newline() + filename)
            return None
        else:
            # msgs.info("Loading a pre-existing master calibration frame of type: {:}".format(self.frametype) + " from filename: {:}".format(filename))
            msgs.info("Loading a pre-existing master calibration frame of SENSFUNC from filename: {:}".format(filename))

            hdu = fits.open(filename)
            norder = hdu[0].header['NORDER']
            sens_dicts = {}
            for iord in range(norder):
                head = hdu[iord + 1].header
                tbl = hdu['SENSFUNC-ORDER{0:04}'.format(iord)].data
                sens_dict = {}
                sens_dict['wave'] = tbl['WAVE']
                sens_dict['sensfunc'] = tbl['SENSFUNC']
                for key in ['wave_min', 'wave_max', 'exptime', 'airmass', 'std_file', 'std_ra', 'std_dec',
                            'std_name', 'cal_file', 'ech_orderindx']:
                    try:
                        sens_dict[key] = head[key.upper()]
                    except:
                        pass
                sens_dicts[str(iord)] = sens_dict
            sens_dicts['norder'] = norder
            return sens_dicts
def write_science(sci_specobjs, sci_header, outfile):
    """
    Write the flux-calibrated science spectra

    Parameters
    ----------
    outfile : str

    Returns
    -------

    """
    if len(sci_specobjs) == 0:
        msgs.warn("No science spectra to write to disk!")
    #
    if 'VEL-TYPE' in sci_header.keys():
        helio_dict = dict(refframe=sci_header['VEL-TYPE'],
                          vel_correction=sci_header['VEL'])
    else:
        helio_dict = None
    telescope=None
    if 'LON-OBS' in sci_header.keys():
        telescope = TelescopePar(longitude=sci_header['LON-OBS'],
                                 latitude=sci_header['LAT-OBS'],
                                 elevation=sci_header['ALT-OBS'])
    # KLUDGE ME
    if isinstance(sci_specobjs, list):
        specObjs = specobjs.SpecObjs(sci_specobjs)
    elif isinstance(sci_specobjs, specobjs.SpecObjs):
        specObjs = sci_specobjs
    else:
        msgs.error("BAD INPUT")
    save.save_1d_spectra_fits(specObjs, sci_header,'ECHELLE', outfile,
                              helio_dict=helio_dict,
                              telescope=telescope, overwrite=True)
def rebin(a, newshape):
    '''Rebin an array to a new shape using slicing. This routine is taken from:
    https://scipy-cookbook.readthedocs.io/items/Rebinning.html
    '''

    if not len(a.shape) == len(newshape):
        msgs.error('Dimension of a image does not match dimension of new requested image shape')

    slices = [slice(0, old, float(old) / new) for old, new in zip(a.shape, newshape)]
    coordinates = np.mgrid[slices]
    indices = coordinates.astype('i')  # choose the biggest smaller integer index
    return a[tuple(indices)]
def load_spec_order(fname,objid=None,order=None,extract='OPT',flux=True):
    """
    Loading single order spectrum from a PypeIt 1D specctrum fits file
    :param file:
    :param objid:
    :param order:
    :param extract:
    :param flux:
    :return:
    """
    if objid is None:
        objid = 0
    if order is None:
        msgs.error('Please specify which order you want to load')

    # read extension name into a list
    primary_header = fits.getheader(fname, 0)
    nspec = primary_header['NSPEC']
    extnames = [primary_header['EXT0001']] * nspec
    for kk in range(nspec):
        extnames[kk] = primary_header['EXT' + '{0:04}'.format(kk + 1)]
    extnameroot = extnames[0]

    # Figure out which extension is the required data
    ordername = '{0:04}'.format(order)
    extname = extnameroot.replace('OBJ0000', objid)
    extname = extname.replace('ORDER0000', 'ORDER' + ordername)
    try:
        exten = extnames.index(extname) + 1
        msgs.info("Loading extension {:s} of spectrum {:s}".format(extname, fname))
    except:
        msgs.error("Spectrum {:s} does not contain {:s} extension".format(fname, extname))

    spectrum = load.load_1dspec(fname, exten=exten, extract=extract, flux=flux)
    # Polish a bit -- Deal with NAN, inf, and *very* large values that will exceed
    #   the floating point precision of float32 for var which is sig**2 (i.e. 1e38)
    bad_flux = np.any([np.isnan(spectrum.flux), np.isinf(spectrum.flux),
                       np.abs(spectrum.flux) > 1e30,
                       spectrum.sig ** 2 > 1e10,
                       ], axis=0)
    # Sometimes Echelle spectra have zero wavelength
    bad_wave = spectrum.wavelength < 1000.0*units.AA
    bad_all = bad_flux + bad_wave
    ## trim bad part
    wave_out,flux_out,sig_out = spectrum.wavelength[~bad_all],spectrum.flux[~bad_all],spectrum.sig[~bad_all]
    spectrum_out = XSpectrum1D.from_tuple((wave_out,flux_out,sig_out), verbose=False)
    #if np.sum(bad_flux):
    #    msgs.warn("There are some bad flux values in this spectrum.  Will zero them out and mask them (not ideal)")
    #    spectrum.data['flux'][spectrum.select][bad_flux] = 0.
    #    spectrum.data['sig'][spectrum.select][bad_flux] = 0.

    return spectrum_out
def ech_load_spec(files,objid=None,order=None,extract='OPT',flux=True):
    """
    files: A list of file names
    objid:
    extract:
    flux:
    """

    nfiles = len(files)
    if objid is None:
        objid = ['OBJ0000'] * nfiles
    elif len(objid) == 1:
        objid = objid * nfiles
    elif len(objid) != nfiles:
        msgs.error('The length of objid should be either 1 or equal to the number of spectra files.')

    fname = files[0]
    ext_final = fits.getheader(fname, -1)
    norder = ext_final['ORDER'] + 1
    msgs.info('spectrum {:s} has {:d} orders'.format(fname, norder))
    if norder <= 1:
        msgs.error('The number of orders have to be greater than one for echelle. Longslit data?')

    # Load spectra
    spectra_list = []
    for ii, fname in enumerate(files):

        if order is None:
            msgs.info('Loading all orders into a gaint spectra')
            for iord in range(norder):
                spectrum = load_spec_order(fname,objid=objid[ii],order=iord,extract=extract,flux=flux)
                # Append
                spectra_list.append(spectrum)
        elif order >= norder:
            msgs.error('order number cannot greater than the total number of orders')
        else:
            spectrum = load_spec_order(fname, objid=objid[ii], order=order, extract=extract, flux=flux)
            # Append
            spectra_list.append(spectrum)
    # Join into one XSpectrum1D object
    spectra = collate(spectra_list)
    # Return
    return spectra
def ech_objfind(image,
                ivar,
                ordermask,
                slit_left,
                slit_righ,
                inmask=None,
                plate_scale=0.2,
                npca=2,
                ncoeff=5,
                min_snr=0.0,
                nabove_min_snr=0,
                pca_percentile=20.0,
                snr_pca=3.0,
                box_radius=2.0,
                show_peaks=False,
                show_fits=False,
                show_trace=False):

    if inmask is None:
        inmask = (ordermask > 0)

    frameshape = image.shape
    nspec = frameshape[0]
    norders = slit_left.shape[1]

    if isinstance(plate_scale, (float, int)):
        plate_scale_ord = np.full(
            norders, plate_scale)  # 0.12 binned by 3 spatially for HIRES
    elif isinstance(plate_scale, (np.ndarray, list, tuple)):
        if len(plate_scale) == norders:
            plate_scale_ord = plate_scale
        elif len(plate_scale) == 1:
            plate_scale_ord = np.full(norders, plate_scale[0])
        else:
            msgs.error(
                'Invalid size for plate_scale. It must either have one element or norders elements'
            )
    else:
        msgs.error('Invalid type for plate scale')

    specmid = nspec // 2
    slit_width = slit_righ - slit_left
    spec_vec = np.arange(nspec)
    slit_spec_pos = nspec / 2.0
    slit_spat_pos = np.zeros((norders, 2))
    for iord in range(norders):
        slit_spat_pos[iord, :] = (np.interp(slit_spec_pos, spec_vec,
                                            slit_left[:, iord]),
                                  np.interp(slit_spec_pos, spec_vec,
                                            slit_righ[:, iord]))

    # Loop over orders and find objects
    sobjs = specobjs.SpecObjs()
    show_peaks = True
    show_fits = True
    # ToDo replace orderindx with the true order number here? Maybe not. Clean up slitid and orderindx!
    for iord in range(norders):
        msgs.info('Finding objects on slit # {:d}'.format(iord + 1))
        thismask = ordermask == (iord + 1)
        inmask_iord = inmask & thismask
        specobj_dict = {
            'setup': 'HIRES',
            'slitid': iord + 1,
            'scidx': 0,
            'det': 1,
            'objtype': 'science'
        }
        sobjs_slit, skymask[thismask], objmask[thismask], proc_list = \
            extract.objfind(image, thismask, slit_left[:,iord], slit_righ[:,iord], inmask=inmask_iord,show_peaks=show_peaks,
                            show_fits=show_fits, show_trace=False, specobj_dict = specobj_dict)#, sig_thresh = 3.0)
        # ToDO make the specobjs _set_item_ work with expressions like this spec[:].orderindx = iord
        for spec in sobjs_slit:
            spec.ech_orderindx = iord
        sobjs.add_sobj(sobjs_slit)

    nfound = len(sobjs)

    # Compute the FOF linking length based on the instrument place scale and matching length FOFSEP = 1.0"
    FOFSEP = 1.0  # separation of FOF algorithm in arcseconds
    FOF_frac = FOFSEP / (np.median(slit_width) * np.median(plate_scale_ord))

    # Feige: made the code also works for only one object found in one order
    # Run the FOF. We use fake coordinaes
    fracpos = sobjs.spat_fracpos
    ra_fake = fracpos / 1000.0  # Divide all angles by 1000 to make geometry euclidian
    dec_fake = 0.0 * fracpos
    if nfound > 1:
        (ingroup, multgroup, firstgroup,
         nextgroup) = spheregroup(ra_fake, dec_fake, FOF_frac / 1000.0)
        group = ingroup.copy()
        uni_group, uni_ind = np.unique(group, return_index=True)
        nobj = len(uni_group)
        msgs.info('FOF matching found {:d}'.format(nobj) + ' unique objects')
    elif nfound == 1:
        group = np.zeros(1, dtype='int')
        uni_group, uni_ind = np.unique(group, return_index=True)
        nobj = len(group)
        msgs.warn('Only find one object no FOF matching is needed')

    gfrac = np.zeros(nfound)
    for jj in range(nobj):
        this_group = group == uni_group[jj]
        gfrac[this_group] = np.median(fracpos[this_group])

    uni_frac = gfrac[uni_ind]

    sobjs_align = sobjs.copy()
    # Now fill in the missing objects and their traces
    for iobj in range(nobj):
        for iord in range(norders):
            # Is there an object on this order that grouped into the current group in question?
            on_slit = (group == uni_group[iobj]) & (sobjs_align.ech_orderindx
                                                    == iord)
            if not np.any(on_slit):
                # Add this to the sobjs_align, and assign required tags
                thisobj = specobjs.SpecObj(frameshape,
                                           slit_spat_pos[iord, :],
                                           slit_spec_pos,
                                           det=sobjs_align[0].det,
                                           setup=sobjs_align[0].setup,
                                           slitid=(iord + 1),
                                           scidx=sobjs_align[0].scidx,
                                           objtype=sobjs_align[0].objtype)
                thisobj.ech_orderindx = iord
                thisobj.spat_fracpos = uni_frac[iobj]
                thisobj.trace_spat = slit_left[:,
                                               iord] + slit_width[:, iord] * uni_frac[
                                                   iobj]  # new trace
                thisobj.trace_spec = spec_vec
                thisobj.spat_pixpos = thisobj.trace_spat[specmid]
                thisobj.set_idx()
                # Use the real detections of this objects for the FWHM
                this_group = group == uni_group[iobj]
                # Assign to the fwhm of the nearest detected order
                imin = np.argmin(
                    np.abs(sobjs_align[this_group].ech_orderindx - iord))
                thisobj.fwhm = sobjs_align[imin].fwhm
                thisobj.maskwidth = sobjs_align[imin].maskwidth
                thisobj.ech_fracpos = uni_frac[iobj]
                thisobj.ech_group = uni_group[iobj]
                thisobj.ech_usepca = True
                sobjs_align.add_sobj(thisobj)
                group = np.append(group, uni_group[iobj])
                gfrac = np.append(gfrac, uni_frac[iobj])
            else:
                # ToDo fix specobjs to get rid of these crappy loops!
                for spec in sobjs_align[on_slit]:
                    spec.ech_fracpos = uni_frac[iobj]
                    spec.ech_group = uni_group[iobj]
                    spec.ech_usepca = False

    # Some code to ensure that the objects are sorted in the sobjs_align by fractional position on the order and by order
    # respectively
    sobjs_sort = specobjs.SpecObjs()
    for iobj in range(nobj):
        this_group = group == uni_group[iobj]
        this_sobj = sobjs_align[this_group]
        sobjs_sort.add_sobj(this_sobj[np.argsort(this_sobj.ech_orderindx)])

    # Loop over the objects and perform a quick and dirty extraction to assess S/N.
    varimg = utils.calc_ivar(ivar)
    flux_box = np.zeros((nspec, norders, nobj))
    ivar_box = np.zeros((nspec, norders, nobj))
    mask_box = np.zeros((nspec, norders, nobj))
    SNR_arr = np.zeros((norders, nobj))
    for iobj in range(nobj):
        for iord in range(norders):
            indx = (sobjs_sort.ech_group
                    == uni_group[iobj]) & (sobjs_sort.ech_orderindx == iord)
            spec = sobjs_sort[indx]
            thismask = ordermask == (iord + 1)
            inmask_iord = inmask & thismask
            box_rad_pix = box_radius / plate_scale_ord[iord]
            flux_tmp = extract.extract_boxcar(image * inmask_iord,
                                              spec.trace_spat,
                                              box_rad_pix,
                                              ycen=spec.trace_spec)
            var_tmp = extract.extract_boxcar(varimg * inmask_iord,
                                             spec.trace_spat,
                                             box_rad_pix,
                                             ycen=spec.trace_spec)
            ivar_tmp = utils.calc_ivar(var_tmp)
            pixtot = extract.extract_boxcar(ivar * 0 + 1.0,
                                            spec.trace_spat,
                                            box_rad_pix,
                                            ycen=spec.trace_spec)
            mask_tmp = (extract.extract_boxcar(ivar * inmask_iord == 0.0,
                                               spec.trace_spat,
                                               box_rad_pix,
                                               ycen=spec.trace_spec) != pixtot)
            flux_box[:, iord, iobj] = flux_tmp * mask_tmp
            ivar_box[:, iord, iobj] = np.fmax(ivar_tmp * mask_tmp, 0.0)
            mask_box[:, iord, iobj] = mask_tmp
            (mean, med_sn, stddev) = sigma_clipped_stats(
                flux_box[mask_tmp, iord, iobj] *
                np.sqrt(ivar_box[mask_tmp, iord, iobj]),
                sigma_lower=5.0,
                sigma_upper=5.0)
            SNR_arr[iord, iobj] = med_sn

    # Purge objects with low SNR and that don't show up in enough orders
    keep_obj = np.zeros(nobj, dtype=bool)
    sobjs_trim = specobjs.SpecObjs()
    uni_group_trim = np.array([], dtype=int)
    uni_frac_trim = np.array([], dtype=float)
    for iobj in range(nobj):
        if (np.sum(SNR_arr[:, iobj] > min_snr) >= nabove_min_snr):
            keep_obj[iobj] = True
            ikeep = sobjs_sort.ech_group == uni_group[iobj]
            sobjs_trim.add_sobj(sobjs_sort[ikeep])
            uni_group_trim = np.append(uni_group_trim, uni_group[iobj])
            uni_frac_trim = np.append(uni_frac_trim, uni_frac[iobj])
        else:
            msgs.info(
                'Purging object #{:d}'.format(iobj) +
                ' which does not satisfy min_snr > {:5.2f}'.format(min_snr) +
                ' on at least nabove_min_snr >= {:d}'.format(nabove_min_snr) +
                ' orders')

    nobj_trim = np.sum(keep_obj)
    if nobj_trim == 0:
        return specobjs.SpecObjs()

    SNR_arr_trim = SNR_arr[:, keep_obj]

    # Do a final loop over objects and make the final decision about which orders will be interpolated/extrapolated by the PCA
    for iobj in range(nobj_trim):
        SNR_now = SNR_arr_trim[:, iobj]
        indx = (sobjs_trim.ech_group == uni_group_trim[iobj])
        # PCA interp/extrap if:
        #      (SNR is below pca_percentile of the total SNRs) AND (SNR < snr_pca)
        #                                 OR
        #      (if this order was not originally traced by the object finding, see above)
        usepca = ((SNR_now < np.percentile(SNR_now, pca_percentile)) &
                  (SNR_now < snr_pca)) | sobjs_trim[indx].ech_usepca
        # ToDo fix specobjs to get rid of these crappy loops!
        for iord, spec in enumerate(sobjs_trim[indx]):
            spec.ech_usepca = usepca[iord]
            if usepca[iord]:
                msgs.info('Using PCA to predict trace for object #{:d}'.format(
                    iobj) + ' on order #{:d}'.format(iord))

    sobjs_final = sobjs_trim.copy()
    # Loop over the objects one by one and adjust/predict the traces
    npoly_cen = 3
    pca_fits = np.zeros((nspec, norders, nobj_trim))
    for iobj in range(nobj_trim):
        igroup = sobjs_final.ech_group == uni_group_trim[iobj]
        # PCA predict the masked orders which were not traced
        pca_fits[:, :, iobj] = pca_trace((sobjs_final[igroup].trace_spat).T,
                                         usepca=None,
                                         npca=npca,
                                         npoly_cen=npoly_cen)
        # usepca = sobjs_final[igroup].ech_usepca,
        # Perform iterative flux weighted centroiding using new PCA predictions
        xinit_fweight = pca_fits[:, :, iobj].copy()
        inmask_now = inmask & (ordermask > 0)
        xfit_fweight = extract.iter_tracefit(image,
                                             xinit_fweight,
                                             ncoeff,
                                             inmask=inmask_now,
                                             show_fits=show_fits)
        # Perform iterative Gaussian weighted centroiding
        xinit_gweight = xfit_fweight.copy()
        xfit_gweight = extract.iter_tracefit(image,
                                             xinit_gweight,
                                             ncoeff,
                                             inmask=inmask_now,
                                             gweight=True,
                                             show_fits=show_fits)
        # Assign the new traces
        for iord, spec in enumerate(sobjs_final[igroup]):
            spec.trace_spat = xfit_gweight[:, iord]
            spec.spat_pixpos = spec.trace_spat[specmid]

    # Set the IDs
    sobjs_final.set_idx()
    if show_trace:
        viewer, ch = ginga.show_image(objminsky * (ordermask > 0))
        for iobj in range(nobj_trim):
            for iord in range(norders):
                ginga.show_trace(viewer,
                                 ch,
                                 pca_fits[:, iord, iobj],
                                 str(uni_frac[iobj]),
                                 color='yellow')

        for spec in sobjs_trim:
            color = 'green' if spec.ech_usepca else 'magenta'
            ginga.show_trace(viewer,
                             ch,
                             spec.trace_spat,
                             spec.idx,
                             color=color)

        #for spec in sobjs_final:
        #    color = 'red' if spec.ech_usepca else 'green'
        #    ginga.show_trace(viewer, ch, spec.trace_spat, spec.idx, color=color)

    return sobjs_final
Exemple #7
0
def main(args):

    import os
    import numpy as np

    from IPython import embed

    from pypeit import pypeit
    from pypeit import pypeitsetup
    from pypeit.core import framematch
    from pypeit import msgs

    spec = args.spectrograph

    # Config the run
    cfg_lines = ['[rdx]']
    cfg_lines += ['    spectrograph = {0}'.format(spec)]
    cfg_lines += [
        '    redux_path = {0}_A'.format(os.path.join(os.getcwd(), spec))
    ]
    if args.slit_spat is not None:
        msgs.info("--slit_spat provided.  Ignoring --det")
    else:
        cfg_lines += ['    detnum = {0}'.format(args.det)]
    # Restrict on slit
    if args.slit_spat is not None:
        cfg_lines += ['    slitspatnum = {0}'.format(args.slit_spat)]
    # Allow for bad headers
    if args.ignore_headers:
        cfg_lines += ['    ignore_bad_headers = True']
    cfg_lines += ['[scienceframe]']
    cfg_lines += ['    [[process]]']
    cfg_lines += ['        mask_cr = False']
    # Calibrations
    cfg_lines += ['[baseprocess]']
    cfg_lines += ['    use_biasimage = False']
    cfg_lines += ['[calibrations]']
    # Input pixel flat?
    if args.user_pixflat is not None:
        cfg_lines += ['    [[flatfield]]']
        cfg_lines += ['        pixelflat_file = {0}'.format(args.user_pixflat)]
    # Reduction restrictions
    cfg_lines += ['[reduce]']
    cfg_lines += ['    [[extraction]]']
    cfg_lines += ['         skip_optimal = True']
    # Set boxcar radius
    if args.box_radius is not None:
        cfg_lines += ['    boxcar_radius = {0}'.format(args.box_radius)]
    cfg_lines += ['    [[findobj]]']
    cfg_lines += ['         skip_second_find = True']

    # Data files
    data_files = [
        os.path.join(args.full_rawpath, args.arc),
        os.path.join(args.full_rawpath, args.flat),
        os.path.join(args.full_rawpath, args.science)
    ]

    # Setup
    ps = pypeitsetup.PypeItSetup(data_files,
                                 path='./',
                                 spectrograph_name=spec,
                                 cfg_lines=cfg_lines)
    ps.build_fitstbl()
    # TODO -- Get the type_bits from  'science'
    bm = framematch.FrameTypeBitMask()
    file_bits = np.zeros(3, dtype=bm.minimum_dtype())
    file_bits[0] = bm.turn_on(file_bits[0], ['arc', 'tilt'])
    file_bits[1] = bm.turn_on(
        file_bits[1], ['pixelflat', 'trace', 'illumflat']
        if args.user_pixflat is None else ['trace', 'illumflat'])
    file_bits[2] = bm.turn_on(file_bits[2], 'science')

    # PypeItSetup sorts according to MJD
    #   Deal with this
    asrt = []
    for ifile in data_files:
        bfile = os.path.basename(ifile)
        idx = ps.fitstbl['filename'].data.tolist().index(bfile)
        asrt.append(idx)
    asrt = np.array(asrt)

    # Set bits
    ps.fitstbl.set_frame_types(file_bits[asrt])
    ps.fitstbl.set_combination_groups()
    # Extras
    ps.fitstbl['setup'] = 'A'

    # Write
    ofiles = ps.fitstbl.write_pypeit(configs='A',
                                     write_bkg_pairs=True,
                                     cfg_lines=cfg_lines)
    if len(ofiles) > 1:
        msgs.error("Bad things happened..")

    # Instantiate the main pipeline reduction object
    pypeIt = pypeit.PypeIt(ofiles[0],
                           verbosity=2,
                           reuse_masters=True,
                           overwrite=True,
                           logname='mos.log',
                           show=False)
    # Run
    pypeIt.reduce_all()
    msgs.info('Data reduction complete')
    # QA HTML
    msgs.info('Generating QA HTML')
    pypeIt.build_qa()

    return 0
Exemple #8
0
    def compound_meta(self, headarr, meta_key):
        """
        Methods to generate metadata requiring interpretation of the header
        data, instead of simply reading the value of a header card.

        Args:
            headarr (:obj:`list`):
                List of `astropy.io.fits.Header`_ objects.
            meta_key (:obj:`str`):
                Metadata keyword to construct.

        Returns:
            object: Metadata value read from the header(s).
        """
        # Populate the idname based on the header information of LUCI
        # This is an implicit way of pre-typing without adding too many
        # variables to the self.meta.
        if meta_key == 'idname':
            targetname = (headarr[0].get('OBJECT'))
            dispname = (headarr[0].get('GRATNAME'))
            calib_unit = (headarr[0].get('CALIB'))
            filter1 = (headarr[0].get('FILTER1'))
            filter2 = (headarr[0].get('FILTER2'))
            lamp1 = (headarr[0].get('LAMP1'))
            lamp2 = (headarr[0].get('LAMP2'))
            lamp3 = (headarr[0].get('LAMP3'))
            lamp4 = (headarr[0].get('LAMP4'))
            lamp5 = (headarr[0].get('LAMP5'))
            lamp6 = (headarr[0].get('LAMP6'))

            # object frame -> will be typed as science
            # This currently includes sky flats, science and standard images
            # We will guess standards using the beginning of their names.
            if ((dispname != 'Mirror') and (calib_unit == False)
                    and (lamp1 == False) and (lamp2 == False)
                    and (lamp3 == False) and (lamp4 == False)
                    and (lamp5 == False) and (lamp6 == False)):
                if (targetname[:3] == 'HIP' or targetname[:2] == 'HD'
                        or targetname[:5] == 'Feige'):
                    return 'standard'
                else:
                    return 'object'
            # flat frames -> will be typed as pixelflat, trace
            elif ((calib_unit == True)
                  and ((lamp4 == True) or (lamp5 == True) or (lamp6 == True))):
                return 'flat'
            # arcs -> will be typed as arc, tilt
            elif ((dispname != 'Mirror') and (calib_unit == True)
                  and ((lamp1 == True) or (lamp2 == True) or (lamp3 == True))):
                return 'arc'
            # pixelflat off -> will be typed as bias
            elif ((dispname != 'Mirror') and (calib_unit == True)
                  and (lamp1 == False) and (lamp2 == False)
                  and (lamp3 == False) and (lamp4 == False)
                  and (lamp5 == False) and (lamp6 == False)
                  and (filter1 != 'blind') and (filter2 != 'blind')):
                return 'flat_off'
            # darks -> will not be typed currently
            elif ((filter1 == 'blind') or (filter2 == 'blind')):
                return 'dark'
        msgs.error("Not ready for this compound meta")
Exemple #9
0
def read_gmos(raw_file, det=1):
    """
    Read the GMOS data file

    Parameters
    ----------
    raw_file : str
      Filename
    detector_par : ParSet
      Needed for numamplifiers if not other things
    det : int, optional
      Detector number; Default = 1

    Returns
    -------
    array : ndarray
      Combined image 
    header : FITS header
    sections : list
      List of datasec, oscansec, ampsec sections
    """

    # Check for file; allow for extra .gz, etc. suffix
    fil = glob.glob(raw_file + '*')
    if len(fil) != 1:
        msgs.error("Found {:d} files matching {:s}".format(len(fil)))

    # Read
    msgs.info("Reading GMOS file: {:s}".format(fil[0]))
    hdu = fits.open(fil[0])
    head0 = hdu[0].header
    head1 = hdu[1].header

    # Number of amplifiers (could pull from DetectorPar but this avoids needing the spectrograph, e.g. view_fits)
    numamp = (len(hdu) - 1) // 3

    # Setup for datasec, oscansec
    dsec = []
    osec = []

    # get the x and y binning factors...
    binning = head1['CCDSUM']
    xbin, ybin = [int(ibin) for ibin in binning.split(' ')]

    # First read over the header info to determine the size of the output array...
    datasec = head1['DATASEC']
    x1, x2, y1, y2 = np.array(parse.load_sections(datasec,
                                                  fmt_iraf=False)).flatten()
    biassec = head1['BIASSEC']
    b1, b2, b3, b4 = np.array(parse.load_sections(biassec,
                                                  fmt_iraf=False)).flatten()
    nxb = b2 - b1 + 1

    # determine the output array size...
    nx = (x2 - x1 + 1) * numamp + nxb * numamp
    ny = y2 - y1 + 1

    # allocate output array...
    array = np.zeros((nx, ny))

    if numamp == 2:
        if det == 1:  # BLUEST DETECTOR
            order = range(6, 4, -1)
        elif det == 2:  # BLUEST DETECTOR
            order = range(3, 5)
        elif det == 3:  # BLUEST DETECTOR
            order = range(1, 3)
    elif numamp == 4:
        if det == 1:  # BLUEST DETECTOR
            order = range(12, 8, -1)
        elif det == 2:  # BLUEST DETECTOR
            order = range(8, 4, -1)
        elif det == 3:  # BLUEST DETECTOR
            order = range(4, 0, -1)
    else:
        debugger.set_trace()

    # insert extensions into master image...
    for kk, jj in enumerate(order):

        # grab complete extension...
        data, overscan, datasec, biassec, x1, x2 = gemini_read_amp(hdu, jj)
        #, linebias=linebias, nobias=nobias, $
        #x1=x1, x2=x2, y1=y1, y2=y2, gaindata=gaindata)
        # insert components into output array...
        inx = data.shape[0]
        xs = inx * kk
        xe = xs + inx

        # insert data...
        # Data section
        #section = '[:,{:d}:{:d}]'.format(xs, xe)  # Eliminate lines
        section = '[{:d}:{:d},:]'.format(xs, xe)  # Eliminate lines
        dsec.append(section)
        array[xs:xe, :] = np.flipud(data)

        #; insert postdata...
        xs = nx - numamp * nxb + kk * nxb
        xe = xs + nxb
        #debugger.set_trace()
        #section = '[:,{:d}:{:d}]'.format(xs, xe)
        osection = '[{:d}:{:d},:]'.format(xs, xe)  # TRANSPOSED FOR WHAT COMES
        osec.append(osection)
        array[xs:xe, :] = overscan

    # make sure BZERO is a valid integer for IRAF
    obzero = head1['BZERO']
    #head0['O_BZERO'] = obzero
    head0['BZERO'] = 32768 - obzero

    # Return, transposing array back to goofy Python indexing
    return array, head0, (dsec, osec)
Exemple #10
0
    def get_std(self, multi_spec_det=None):
        """
        Return the standard star from this Specobjs. For MultiSlit this
        will be a single specobj in SpecObjs container, for Echelle it
        will be the standard for all the orders.

        Args:
            multi_spec_det (list):
                If there are multiple detectors arranged in the spectral direction, return the sobjs for
                the standard on each detector.

        Returns:
            SpecObj or SpecObjs or None

        """
        # Is this MultiSlit or Echelle
        pypeline = (self.PYPELINE)[0]
        if 'MultiSlit' in pypeline or 'IFU' in pypeline:
            # Have to do a loop to extract the counts for all objects
            if self.OPT_COUNTS[0] is not None:
                SNR = np.median(self.OPT_COUNTS *
                                np.sqrt(self.OPT_COUNTS_IVAR),
                                axis=1)
            elif self.BOX_COUNTS[0] is not None:
                SNR = np.median(self.BOX_COUNTS *
                                np.sqrt(self.BOX_COUNTS_IVAR),
                                axis=1)
            else:
                return None
            # For multiple detectors grab the requested detectors
            if multi_spec_det is not None:
                sobjs_std = SpecObjs(header=self.header)
                # Now append the maximum S/N object on each detector
                for idet in multi_spec_det:
                    this_det = self.DET == idet
                    istd = SNR[this_det].argmax()
                    sobjs_std.add_sobj(self[this_det][istd])
            else:  # For normal multislit take the brightest object
                istd = SNR.argmax()
                # Return
                sobjs_std = SpecObjs(specobjs=[self[istd]], header=self.header)
            sobjs_std.header = self.header
            return sobjs_std
        elif 'Echelle' in pypeline:
            uni_objid = np.unique(
                self.ECH_FRACPOS)  # A little risky using floats
            uni_order = np.unique(self.ECH_ORDER)
            nobj = len(uni_objid)
            norders = len(uni_order)
            # Build up S/N
            SNR = np.zeros((norders, nobj))
            for iobj in range(nobj):
                for iord in range(norders):
                    ind = (self.ECH_FRACPOS == uni_objid[iobj]) & (
                        self.ECH_ORDER == uni_order[iord])
                    spec = self[ind]
                    # Grab SNR
                    if self.OPT_COUNTS[0] is not None:
                        SNR[iord,
                            iobj] = np.median(spec[0].OPT_COUNTS *
                                              np.sqrt(spec[0].OPT_COUNTS_IVAR))
                    elif self.BOX_COUNTS[0] is not None:
                        SNR[iord,
                            iobj] = np.median(spec[0].BOX_COUNTS *
                                              np.sqrt(spec[0].BOX_COUNTS_IVAR))
                    else:
                        return None
            # Maximize S/N
            SNR_all = np.sqrt(np.sum(SNR**2, axis=0))
            objid_std = uni_objid[SNR_all.argmax()]
            # Finish
            indx = self.ECH_FRACPOS == objid_std
            # Return
            sobjs_std = SpecObjs(specobjs=self[indx], header=self.header)
            sobjs_std.header = self.header
            return sobjs_std
        else:
            msgs.error('Unknown pypeline')
Exemple #11
0
    def echelle_2dfit(self, wv_calib, debug=False, skip_QA=False):
        """
        Fit a two-dimensional wavelength solution for echelle data.

        Primarily a wrapper for :func:`pypeit.core.arc.fit2darc`,
        using data unpacked from the ``wv_calib`` dictionary.

        Args:
            wv_calib (:class:`pypeit.wavecalib.WaveCalib`):
                Wavelength calibration object
            debug (:obj:`bool`, optional):
                Show debugging info
            skip_QA (:obj:`bool`, optional):
                Flag to skip construction of the nominal QA plots.

        Returns:
            :class:`pypeit.fitting.PypeItFit`: object containing information from 2-d fit.
        """
        if self.spectrograph.pypeline != 'Echelle':
            msgs.error(
                'Cannot execute echelle_2dfit for a non-echelle spectrograph.')

        msgs.info('Fitting 2-d wavelength solution for echelle....')
        all_wave = np.array([], dtype=float)
        all_pixel = np.array([], dtype=float)
        all_order = np.array([], dtype=float)

        # Obtain a list of good slits
        ok_mask_idx = np.where(np.invert(self.wvc_bpm))[0]
        ok_mask_order = self.slits.slitord_id[ok_mask_idx]
        nspec = self.msarc.image.shape[0]
        # Loop
        for ii in range(wv_calib.nslits):
            iorder = self.slits.ech_order[ii]
            if iorder not in ok_mask_order:
                continue
            # Slurp
            mask_now = wv_calib.wv_fits[ii].pypeitfit.bool_gpm
            all_wave = np.append(all_wave,
                                 wv_calib.wv_fits[ii]['wave_fit'][mask_now])
            all_pixel = np.append(all_pixel,
                                  wv_calib.wv_fits[ii]['pixel_fit'][mask_now])
            all_order = np.append(
                all_order,
                np.full_like(wv_calib.wv_fits[ii]['pixel_fit'][mask_now],
                             float(iorder)))

        # Fit
        # THIS NEEDS TO BE DEVELOPED
        fit2d = arc.fit2darc(all_wave,
                             all_pixel,
                             all_order,
                             nspec,
                             nspec_coeff=self.par['ech_nspec_coeff'],
                             norder_coeff=self.par['ech_norder_coeff'],
                             sigrej=self.par['ech_sigrej'],
                             debug=debug)

        self.steps.append(inspect.stack()[0][3])

        # QA
        # TODO -- TURN QA BACK ON!
        #skip_QA = True
        if not skip_QA:
            outfile_global = qa.set_qa_filename(self.master_key,
                                                'arc_fit2d_global_qa',
                                                out_dir=self.qa_path)
            arc.fit2darc_global_qa(fit2d, nspec, outfile=outfile_global)
            outfile_orders = qa.set_qa_filename(self.master_key,
                                                'arc_fit2d_orders_qa',
                                                out_dir=self.qa_path)
            arc.fit2darc_orders_qa(fit2d, nspec, outfile=outfile_orders)

        return fit2d
Exemple #12
0
def main(args):
    """
    Executes telluric correction.
    """

    # Determine the spectrograph
    header = fits.getheader(args.spec1dfile)
    spectrograph = load_spectrograph(header['PYP_SPEC'])
    spectrograph_def_par = spectrograph.default_pypeit_par()

    # If the .tell file was passed in read it and overwrite default parameters
    if args.tell_file is not None:
        cfg_lines = read_tellfile(args.tell_file)
        par = pypeitpar.PypeItPar.from_cfg_lines(
            cfg_lines=spectrograph_def_par.to_config(), merge_with=cfg_lines)
    else:
        par = spectrograph_def_par

    # If args was provided override defaults. Note this does undo .tell file
    if args.objmodel is not None:
        par['tellfit']['objmodel'] = args.objmodel
    if args.pca_file is not None:
        par['tellfit']['pca_file'] = args.pca_file
    if args.redshift is not None:
        par['tellfit']['redshift'] = args.redshift

    if args.tell_grid is not None:
        par['tellfit']['tell_grid'] = args.tell_grid
    elif par['sensfunc']['IR']['telgridfile'] is not None:
        par['tellfit']['tell_grid'] = par['sensfunc']['IR']['telgridfile']
    else:
        msgs.warn('No telluric grid file given. Using {:}'.format(
            'TelFit_MaunaKea_3100_26100_R20000.fits'))
        par['tellfit']['tell_grid'] = resource_filename(
            'pypeit', '/data/telluric/TelFit_MaunaKea_3100_26100_R20000.fits')

    # Write the par to disk
    print("Writing the parameters to {}".format(args.par_outfile))
    par['tellfit'].to_config('telluric.par',
                             section_name='tellfit',
                             include_descr=False)

    # Parse the output filename
    outfile = (os.path.basename(args.spec1dfile)).replace(
        '.fits', '_tellcorr.fits')
    modelfile = (os.path.basename(args.spec1dfile)).replace(
        '.fits', '_tellmodel.fits')

    # Run the telluric fitting procedure.
    if par['tellfit']['objmodel'] == 'qso':
        # run telluric.qso_telluric to get the final results
        TelQSO = telluric.qso_telluric(
            args.spec1dfile,
            par['tellfit']['tell_grid'],
            par['tellfit']['pca_file'],
            par['tellfit']['redshift'],
            modelfile,
            outfile,
            npca=par['tellfit']['npca'],
            pca_lower=par['tellfit']['pca_lower'],
            pca_upper=par['tellfit']['pca_upper'],
            bounds_norm=par['tellfit']['bounds_norm'],
            tell_norm_thresh=par['tellfit']['tell_norm_thresh'],
            only_orders=par['tellfit']['only_orders'],
            bal_wv_min_max=par['tellfit']['bal_wv_min_max'],
            debug_init=args.debug,
            disp=args.debug,
            debug=args.debug,
            show=args.plot)
    elif par['tellfit']['objmodel'] == 'star':
        TelStar = telluric.star_telluric(
            args.spec1dfile,
            par['tellfit']['tell_grid'],
            modelfile,
            outfile,
            star_type=par['tellfit']['star_type'],
            star_mag=par['tellfit']['star_mag'],
            star_ra=par['tellfit']['star_ra'],
            star_dec=par['tellfit']['star_dec'],
            func=par['tellfit']['func'],
            model=par['tellfit']['model'],
            polyorder=par['tellfit']['polyorder'],
            only_orders=par['tellfit']['only_orders'],
            mask_abs_lines=par['tellfit']['mask_abs_lines'],
            delta_coeff_bounds=par['tellfit']['delta_coeff_bounds'],
            minmax_coeff_bounds=par['tellfit']['minmax_coeff_bounds'],
            debug_init=args.debug,
            disp=args.debug,
            debug=args.debug,
            show=args.plot)
    elif par['tellfit']['objmodel'] == 'poly':
        TelPoly = telluric.poly_telluric(
            args.spec1dfile,
            par['tellfit']['tell_grid'],
            modelfile,
            outfile,
            z_obj=par['tellfit']['redshift'],
            func=par['tellfit']['func'],
            model=par['tellfit']['model'],
            polyorder=par['tellfit']['polyorder'],
            fit_wv_min_max=par['tellfit']['fit_wv_min_max'],
            mask_lyman_a=par['tellfit']['mask_lyman_a'],
            delta_coeff_bounds=par['tellfit']['delta_coeff_bounds'],
            minmax_coeff_bounds=par['tellfit']['minmax_coeff_bounds'],
            only_orders=par['tellfit']['only_orders'],
            debug_init=args.debug,
            disp=args.debug,
            debug=args.debug,
            show=args.plot)
    else:
        msgs.error(
            "Object model is not supported yet. Please choose one of 'qso', 'star', 'poly'."
        )
def ech_load_specobj(fname, order=None):
    """ Load a spec1d file into a list of SpecObjExp objects
    Parameters
    ----------
    fname : str

    Returns
    -------
    specObjs : list of SpecObjExp
    head0
    """
    #if order is None:
    #    msgs.warn('You did not specify an order. Return specObjs with all orders.')
    #    specObjs, head0 = load.load_specobj(fname)
    #    return specObjs, head0

    speckeys = [
        'WAVE', 'SKY', 'MASK', 'FLAM', 'FLAM_IVAR', 'FLAM_SIG', 'COUNTS_IVAR',
        'COUNTS'
    ]
    #
    specObjs = []
    hdulist = fits.open(fname)
    head0 = hdulist[0].header
    for hdu in hdulist:
        if hdu.name == 'PRIMARY':
            continue
        #elif hdu.name[8:17] != 'ORDER'+'{0:04}'.format(order):
        #    continue
        # Parse name
        idx = hdu.name
        objp = idx.split('-')
        if objp[-1][0:3] == 'DET':
            det = int(objp[-1][3:])
        else:
            det = int(objp[-1][1:])
        if objp[-2][:5] == 'ORDER':
            iord = int(objp[-2][5:])
        else:
            msgs.warn('Loading longslit data ?')
            iord = int(-1)
        # if order is not None and iord !=order then do not return this extenction
        # if order is None return all extensions
        # if order is not None and iord ==order then only return the specific order you want.
        if (order is not None) and (iord != order):
            continue
        # Load data
        spec = Table(hdu.data)
        shape = (len(spec), 1024)  # 2nd number is dummy
        # New and wrong
        try:
            specobj = specobjs.SpecObj(shape, None, None, idx=idx)
        except:
            debugger.set_trace()
            msgs.error("BUG ME")
        # Add order number
        specobj.ech_orderindx = iord
        # ToDo: need to changed to the real order number?
        specobj.ech_order = iord
        # Add trace
        try:
            specobj.trace_spat = spec['TRACE']
        except:
            # KLUDGE!
            specobj.trace_spat = np.arange(len(spec['BOX_WAVE']))
        # Add spectrum
        if 'BOX_COUNTS' in spec.keys():
            for skey in speckeys:
                try:
                    specobj.boxcar[skey] = spec['BOX_{:s}'.format(skey)].data
                except KeyError:
                    pass
            # Add units on wave
            specobj.boxcar['WAVE'] = specobj.boxcar['WAVE'] * units.AA

        if 'OPT_COUNTS' in spec.keys():
            for skey in speckeys:
                try:
                    specobj.optimal[skey] = spec['OPT_{:s}'.format(skey)].data
                except KeyError:
                    pass
            # Add units on wave
            specobj.optimal['WAVE'] = specobj.optimal['WAVE'] * units.AA
        # Append
        specObjs.append(specobj)
    # Return
    return specObjs, head0
Exemple #14
0
    def build_wv_calib(self, arccen, method, skip_QA=False):
        """
        Main routine to generate the wavelength solutions in a loop over slits
        Wrapper to arc.simple_calib or arc.calib_with_arclines

        self.maskslits is updated for slits that fail

        Args:
            method : str
              'simple' -- arc.simple_calib
              'arclines' -- arc.calib_with_arclines
              'holy-grail' -- wavecal.autoid.HolyGrail
              'reidentify' -- wavecal.auotid.ArchiveReid
              'full_template' -- wavecal.auotid.full_template
            skip_QA (bool, optional)

        Returns:
            dict:  self.wv_calib
        """
        # Obtain a list of good slits
        ok_mask = np.where(~self.maskslits)[0]

        # Obtain calibration for all slits
        if method == 'simple':
            # Should only run this on 1 slit
            #self.par['n_first'] = 2
            #self.par['n_final'] = 3
            #self.par['func'] = 'legendre'
            #self.par['sigrej_first'] = 2.
            #self.par['sigrej_final'] = 3.
            #self.par['match_toler'] = 3.

            #CuI = wavecal.waveio.load_line_list('CuI', use_ion=True, NIST=True)
            #ArI = wavecal.waveio.load_line_list('ArI', use_ion=True, NIST=True)
            #ArII = wavecal.waveio.load_line_list('ArII', use_ion=True, NIST=True)
            #llist = vstack([CuI, ArI, ArII])
            lines = self.par['lamps']
            line_lists = waveio.load_line_lists(lines)

            self.wv_calib = arc.simple_calib_driver(self.msarc, line_lists, arccen, ok_mask,
                                                    nfitpix=self.par['nfitpix'],

                                                    IDpixels=self.par['IDpixels'],
                                                    IDwaves=self.par['IDwaves'])
        elif method == 'semi-brute':
            debugger.set_trace()  # THIS IS BROKEN
            final_fit = {}
            for slit in ok_mask:
                # HACKS BY JXP
                self.par['wv_cen'] = 8670.
                self.par['disp'] = 1.524
                # ToDO remove these hacks and use the parset in semi_brute
                best_dict, ifinal_fit = autoid.semi_brute(arccen[:, slit],
                                                                  self.par['lamps'], self.par['wv_cen'],
                                                                  (self)['disp'],match_toler=self.par['match_toler'],
                                                                  func=self.par['func'],n_first=self.par['n_first'],
                                                                  sigrej_first=self.par['n_first'],
                                                                  n_final=self.par['n_final'],
                                                                  sigrej_final=self.par['sigrej_final'],
                                                                  sigdetect=self.par['sigdetect'],
                                                                  nonlinear_counts= self.nonlinear_counts)
                final_fit[str(slit)] = ifinal_fit.copy()
        elif method == 'basic':
            final_fit = {}
            for slit in ok_mask:
                status, ngd_match, match_idx, scores, ifinal_fit = \
                    autoid.basic(arccen[:, slit], self.par['lamps'], self.par['wv_cen'], self.par['disp'],
                                 nonlinear_counts = self.nonlinear_counts)
                final_fit[str(slit)] = ifinal_fit.copy()
                if status != 1:
                    self.maskslits[slit] = True
        elif method == 'holy-grail':
            # Sometimes works, sometimes fails
            arcfitter = autoid.HolyGrail(arccen, par=self.par, ok_mask=ok_mask)
            patt_dict, final_fit = arcfitter.get_results()
        elif method == 'reidentify':
            # Now preferred
            arcfitter = autoid.ArchiveReid(arccen, par=self.par, ok_mask=ok_mask)
            patt_dict, final_fit = arcfitter.get_results()
        elif method == 'full_template':
            # Now preferred
            if self.binspectral is None:
                msgs.error("You must specify binspectral for the full_template method!")
            final_fit = autoid.full_template(arccen, self.par, ok_mask, self.det,
                                                     self.binspectral,
                                                     nsnippet=self.par['nsnippet'])

        else:
            msgs.error('Unrecognized wavelength calibration method: {:}'.format(method))

        self.wv_calib = final_fit

        # Remake mask (*mainly for the QA that follows*)
        self.maskslits = self.make_maskslits(len(self.maskslits))
        ok_mask = np.where(~self.maskslits)[0]

        # QA
        if not skip_QA:
            for slit in ok_mask:
                outfile = qa.set_qa_filename(self.master_key, 'arc_fit_qa', slit=slit, out_dir=self.redux_path)
                autoid.arc_fit_qa(self.wv_calib[str(slit)], outfile = outfile)
        # Step
        self.steps.append(inspect.stack()[0][3])
        # Return
        return self.wv_calib
Exemple #15
0
    def get_rawimage(self, raw_file, det):
        """
        Load up the raw image and generate a few other bits and pieces
        that are key for image processing

        Args:
            raw_file (str):
            det (int):

        Returns:
            tuple:
                raw_img (np.ndarray) -- Raw image for this detector
                hdu (astropy.io.fits.HDUList)
                exptime (float)
                rawdatasec_img (np.ndarray)
                oscansec_img (np.ndarray)

        """
        # Check for file; allow for extra .gz, etc. suffix
        fil = glob.glob(raw_file + '*')
        if len(fil) != 1:
            msgs.error("Found {:d} files matching {:s}".format(len(fil)))

        # Read
        msgs.info("Reading LBT/MODS file: {:s}".format(fil[0]))
        hdu = fits.open(fil[0])
        head = hdu[0].header

        # TODO These parameters should probably be stored in the detector par

        # Number of amplifiers (could pull from DetectorPar but this avoids needing the spectrograph, e.g. view_fits)
        numamp = 4

        # get the x and y binning factors...
        xbin, ybin = head['CCDXBIN'], head['CCDYBIN']

        datasize = head['DETSIZE']  # Unbinned size of detector full array
        _, nx_full, _, ny_full = np.array(
            parse.load_sections(datasize, fmt_iraf=False)).flatten()

        # Determine the size of the output array...
        nx, ny = int(nx_full / xbin), int(ny_full / ybin)
        nbias1 = 48
        nbias2 = 8240

        # allocate output array...
        array = hdu[
            0].data.T * 1.0  ## Convert to float in order to get it processed with procimg.py
        rawdatasec_img = np.zeros_like(array, dtype=int)
        oscansec_img = np.zeros_like(array, dtype=int)

        ## allocate datasec and oscansec to the image
        # apm 1
        rawdatasec_img[int(nbias1 / xbin):int(nx / 2), :int(ny / 2)] = 1
        oscansec_img[1:int(nbias1 / xbin), :int(
            ny / 2)] = 1  # exclude the first pixel since it always has problem

        # apm 2
        rawdatasec_img[int(nx / 2):int(nbias2 / xbin), :int(ny / 2)] = 2
        oscansec_img[int(nbias2 / xbin):nx - 1, :int(
            ny / 2)] = 2  # exclude the last pixel since it always has problem

        # apm 3
        rawdatasec_img[int(nbias1 / xbin):int(nx / 2), int(ny / 2):] = 3
        oscansec_img[
            1:int(nbias1 / xbin),
            int(ny /
                2):] = 3  # exclude the first pixel since it always has problem

        # apm 4
        rawdatasec_img[int(nx / 2):int(nbias2 / xbin), int(ny / 2):] = 4
        oscansec_img[
            int(nbias2 / xbin):nx - 1,
            int(ny /
                2):] = 4  # exclude the last pixel since it always has problem

        # Need the exposure time
        exptime = hdu[self.meta['exptime']['ext']].header[self.meta['exptime']
                                                          ['card']]
        # Return, transposing array back to orient the overscan properly
        return np.flipud(array), hdu, exptime, np.flipud(
            rawdatasec_img), np.flipud(oscansec_img)
def ech_objfind(image, ivar, ordermask, slit_left, slit_righ,inmask=None,plate_scale=0.2,npca=2,ncoeff = 5,min_snr=0.0,nabove_min_snr=0,
                pca_percentile=20.0,snr_pca=3.0,box_radius=2.0,show_peaks=False,show_fits=False,show_trace=False):


    if inmask is None:
        inmask = (ordermask > 0)


    frameshape = image.shape
    nspec = frameshape[0]
    norders = slit_left.shape[1]

    if isinstance(plate_scale,(float, int)):
        plate_scale_ord = np.full(norders, plate_scale)  # 0.12 binned by 3 spatially for HIRES
    elif isinstance(plate_scale,(np.ndarray, list, tuple)):
        if len(plate_scale) == norders:
            plate_scale_ord = plate_scale
        elif len(plate_scale) == 1:
            plate_scale_ord = np.full(norders, plate_scale[0])
        else:
            msgs.error('Invalid size for plate_scale. It must either have one element or norders elements')
    else:
        msgs.error('Invalid type for plate scale')

    specmid = nspec // 2
    slit_width = slit_righ - slit_left
    spec_vec = np.arange(nspec)
    slit_spec_pos = nspec/2.0
    slit_spat_pos = np.zeros((norders, 2))
    for iord in range(norders):
        slit_spat_pos[iord, :] = (np.interp(slit_spec_pos, spec_vec, slit_left[:,iord]), np.interp(slit_spec_pos, spec_vec, slit_righ[:,iord]))

    # Loop over orders and find objects
    sobjs = specobjs.SpecObjs()
    show_peaks=True
    show_fits=True
    # ToDo replace orderindx with the true order number here? Maybe not. Clean up slitid and orderindx!
    for iord  in range(norders):
        msgs.info('Finding objects on slit # {:d}'.format(iord + 1))
        thismask = ordermask == (iord + 1)
        inmask_iord = inmask & thismask
        specobj_dict = {'setup': 'HIRES', 'slitid': iord + 1, 'scidx': 0,'det': 1, 'objtype': 'science'}
        sobjs_slit, skymask[thismask], objmask[thismask], proc_list = \
            extract.objfind(image, thismask, slit_left[:,iord], slit_righ[:,iord], inmask=inmask_iord,show_peaks=show_peaks,
                            show_fits=show_fits, show_trace=False, specobj_dict = specobj_dict)#, sig_thresh = 3.0)
        # ToDO make the specobjs _set_item_ work with expressions like this spec[:].orderindx = iord
        for spec in sobjs_slit:
            spec.ech_orderindx = iord
        sobjs.add_sobj(sobjs_slit)


    nfound = len(sobjs)

    # Compute the FOF linking length based on the instrument place scale and matching length FOFSEP = 1.0"
    FOFSEP = 1.0 # separation of FOF algorithm in arcseconds
    FOF_frac = FOFSEP/(np.median(slit_width)*np.median(plate_scale_ord))

    # Feige: made the code also works for only one object found in one order
    # Run the FOF. We use fake coordinaes
    fracpos = sobjs.spat_fracpos
    ra_fake = fracpos/1000.0 # Divide all angles by 1000 to make geometry euclidian
    dec_fake = 0.0*fracpos
    if nfound>1:
        (ingroup, multgroup, firstgroup, nextgroup) = spheregroup(ra_fake, dec_fake, FOF_frac/1000.0)
        group = ingroup.copy()
        uni_group, uni_ind = np.unique(group, return_index=True)
        nobj = len(uni_group)
        msgs.info('FOF matching found {:d}'.format(nobj) + ' unique objects')
    elif nfound==1:
        group = np.zeros(1,dtype='int')
        uni_group, uni_ind = np.unique(group, return_index=True)
        nobj = len(group)
        msgs.warn('Only find one object no FOF matching is needed')

    gfrac = np.zeros(nfound)
    for jj in range(nobj):
        this_group = group == uni_group[jj]
        gfrac[this_group] = np.median(fracpos[this_group])

    uni_frac = gfrac[uni_ind]

    sobjs_align = sobjs.copy()
    # Now fill in the missing objects and their traces
    for iobj in range(nobj):
        for iord in range(norders):
            # Is there an object on this order that grouped into the current group in question?
            on_slit = (group == uni_group[iobj]) & (sobjs_align.ech_orderindx == iord)
            if not np.any(on_slit):
                # Add this to the sobjs_align, and assign required tags
                thisobj = specobjs.SpecObj(frameshape, slit_spat_pos[iord,:], slit_spec_pos, det = sobjs_align[0].det,
                                           setup = sobjs_align[0].setup, slitid = (iord + 1),
                                           scidx = sobjs_align[0].scidx, objtype=sobjs_align[0].objtype)
                thisobj.ech_orderindx = iord
                thisobj.spat_fracpos = uni_frac[iobj]
                thisobj.trace_spat = slit_left[:,iord] + slit_width[:,iord]*uni_frac[iobj] # new trace
                thisobj.trace_spec = spec_vec
                thisobj.spat_pixpos = thisobj.trace_spat[specmid]
                thisobj.set_idx()
                # Use the real detections of this objects for the FWHM
                this_group = group == uni_group[iobj]
                # Assign to the fwhm of the nearest detected order
                imin = np.argmin(np.abs(sobjs_align[this_group].ech_orderindx - iord))
                thisobj.fwhm = sobjs_align[imin].fwhm
                thisobj.maskwidth = sobjs_align[imin].maskwidth
                thisobj.ech_fracpos = uni_frac[iobj]
                thisobj.ech_group = uni_group[iobj]
                thisobj.ech_usepca = True
                sobjs_align.add_sobj(thisobj)
                group = np.append(group, uni_group[iobj])
                gfrac = np.append(gfrac, uni_frac[iobj])
            else:
                # ToDo fix specobjs to get rid of these crappy loops!
                for spec in sobjs_align[on_slit]:
                    spec.ech_fracpos = uni_frac[iobj]
                    spec.ech_group = uni_group[iobj]
                    spec.ech_usepca = False

    # Some code to ensure that the objects are sorted in the sobjs_align by fractional position on the order and by order
    # respectively
    sobjs_sort = specobjs.SpecObjs()
    for iobj in range(nobj):
        this_group = group == uni_group[iobj]
        this_sobj = sobjs_align[this_group]
        sobjs_sort.add_sobj(this_sobj[np.argsort(this_sobj.ech_orderindx)])

    # Loop over the objects and perform a quick and dirty extraction to assess S/N.
    varimg = utils.calc_ivar(ivar)
    flux_box = np.zeros((nspec, norders, nobj))
    ivar_box = np.zeros((nspec, norders, nobj))
    mask_box = np.zeros((nspec, norders, nobj))
    SNR_arr = np.zeros((norders, nobj))
    for iobj in range(nobj):
        for iord in range(norders):
            indx = (sobjs_sort.ech_group == uni_group[iobj]) & (sobjs_sort.ech_orderindx == iord)
            spec = sobjs_sort[indx]
            thismask = ordermask == (iord + 1)
            inmask_iord = inmask & thismask
            box_rad_pix = box_radius/plate_scale_ord[iord]
            flux_tmp  = extract.extract_boxcar(image*inmask_iord, spec.trace_spat,box_rad_pix, ycen = spec.trace_spec)
            var_tmp  = extract.extract_boxcar(varimg*inmask_iord, spec.trace_spat,box_rad_pix, ycen = spec.trace_spec)
            ivar_tmp = utils.calc_ivar(var_tmp)
            pixtot  = extract.extract_boxcar(ivar*0 + 1.0, spec.trace_spat,box_rad_pix, ycen = spec.trace_spec)
            mask_tmp = (extract.extract_boxcar(ivar*inmask_iord == 0.0, spec.trace_spat,box_rad_pix, ycen = spec.trace_spec) != pixtot)
            flux_box[:,iord,iobj] = flux_tmp*mask_tmp
            ivar_box[:,iord,iobj] = np.fmax(ivar_tmp*mask_tmp,0.0)
            mask_box[:,iord,iobj] = mask_tmp
            (mean, med_sn, stddev) = sigma_clipped_stats(flux_box[mask_tmp,iord,iobj]*np.sqrt(ivar_box[mask_tmp,iord,iobj]),
                                                         sigma_lower=5.0,sigma_upper=5.0)
            SNR_arr[iord,iobj] = med_sn



    # Purge objects with low SNR and that don't show up in enough orders
    keep_obj = np.zeros(nobj,dtype=bool)
    sobjs_trim = specobjs.SpecObjs()
    uni_group_trim = np.array([],dtype=int)
    uni_frac_trim =  np.array([],dtype=float)
    for iobj in range(nobj):
        if (np.sum(SNR_arr[:,iobj] > min_snr) >= nabove_min_snr):
            keep_obj[iobj] = True
            ikeep = sobjs_sort.ech_group == uni_group[iobj]
            sobjs_trim.add_sobj(sobjs_sort[ikeep])
            uni_group_trim = np.append(uni_group_trim, uni_group[iobj])
            uni_frac_trim = np.append(uni_frac_trim, uni_frac[iobj])
        else:
            msgs.info('Purging object #{:d}'.format(iobj) + ' which does not satisfy min_snr > {:5.2f}'.format(min_snr) +
                      ' on at least nabove_min_snr >= {:d}'.format(nabove_min_snr) + ' orders')

    nobj_trim = np.sum(keep_obj)
    if nobj_trim == 0:
        return specobjs.SpecObjs()

    SNR_arr_trim = SNR_arr[:,keep_obj]

    # Do a final loop over objects and make the final decision about which orders will be interpolated/extrapolated by the PCA
    for iobj in range(nobj_trim):
        SNR_now = SNR_arr_trim[:,iobj]
        indx = (sobjs_trim.ech_group == uni_group_trim[iobj])
        # PCA interp/extrap if:
        #      (SNR is below pca_percentile of the total SNRs) AND (SNR < snr_pca)
        #                                 OR
        #      (if this order was not originally traced by the object finding, see above)
        usepca = ((SNR_now < np.percentile(SNR_now, pca_percentile)) & (SNR_now < snr_pca)) | sobjs_trim[indx].ech_usepca
        # ToDo fix specobjs to get rid of these crappy loops!
        for iord, spec in enumerate(sobjs_trim[indx]):
            spec.ech_usepca = usepca[iord]
            if usepca[iord]:
                msgs.info('Using PCA to predict trace for object #{:d}'.format(iobj) + ' on order #{:d}'.format(iord))

    sobjs_final = sobjs_trim.copy()
    # Loop over the objects one by one and adjust/predict the traces
    npoly_cen = 3
    pca_fits = np.zeros((nspec, norders, nobj_trim))
    for iobj in range(nobj_trim):
        igroup = sobjs_final.ech_group == uni_group_trim[iobj]
        # PCA predict the masked orders which were not traced
        pca_fits[:,:,iobj] = pca_trace((sobjs_final[igroup].trace_spat).T, usepca = None, npca = npca, npoly_cen = npoly_cen)
        # usepca = sobjs_final[igroup].ech_usepca,
        # Perform iterative flux weighted centroiding using new PCA predictions
        xinit_fweight = pca_fits[:,:,iobj].copy()
        inmask_now = inmask & (ordermask > 0)
        xfit_fweight = extract.iter_tracefit(image, xinit_fweight, ncoeff, inmask = inmask_now, show_fits=show_fits)
        # Perform iterative Gaussian weighted centroiding
        xinit_gweight = xfit_fweight.copy()
        xfit_gweight = extract.iter_tracefit(image, xinit_gweight, ncoeff, inmask = inmask_now, gweight=True,show_fits=show_fits)
        # Assign the new traces
        for iord, spec in enumerate(sobjs_final[igroup]):
            spec.trace_spat = xfit_gweight[:,iord]
            spec.spat_pixpos = spec.trace_spat[specmid]


    # Set the IDs
    sobjs_final.set_idx()
    if show_trace:
        viewer, ch = ginga.show_image(objminsky*(ordermask > 0))
        for iobj in range(nobj_trim):
            for iord in range(norders):
                ginga.show_trace(viewer, ch, pca_fits[:,iord, iobj], str(uni_frac[iobj]), color='yellow')

        for spec in sobjs_trim:
            color = 'green' if spec.ech_usepca else 'magenta'
            ginga.show_trace(viewer, ch, spec.trace_spat, spec.idx, color=color)

        #for spec in sobjs_final:
        #    color = 'red' if spec.ech_usepca else 'green'
        #    ginga.show_trace(viewer, ch, spec.trace_spat, spec.idx, color=color)

    return sobjs_final
    def __init__(self, std_spec1d_file=None, sci_spec1d_file=None, sens_file=None,
                 std_specobjs=None, std_header=None, spectrograph=None,
                 telluric=False, setup=None, master_dir=None, mode=None,
                 star_type=None, star_mag=None, BALM_MASK_WID=5.0, nresln=None, debug=False):

        # Load standard files
        std_spectro = None
        self.std_spec1d_file = std_spec1d_file
        # Need to unwrap these (sometimes)..
        self.std_specobjs = std_specobjs
        self.std_header = std_header
        if self.std_spec1d_file is not None:
            self.std_specobjs, self.std_header = load.ech_load_specobj(self.std_spec1d_file)
            msgs.info('Loaded {0} spectra from the spec1d standard star file: {1}'.format(
                len(self.std_specobjs), self.std_spec1d_file))
            std_spectro = self.std_header['INSTRUME']

        try:
            self.std_ra = self.std_header['RA']
        except:
            self.std_ra = None
        try:
            self.std_dec = self.std_header['DEC']
        except:
            self.std_dec = None
        try:
            self.std_file = self.std_header['FILENAME']
        except:
            self.std_file = None

        # Load the science files
        sci_spectro = None
        self.sci_spec1d_file = sci_spec1d_file
        self.sci_specobjs = []
        self.sci_header = None
        if self.sci_spec1d_file is not None:
            self.sci_specobjs, self.sci_header = load.ech_load_specobj(self.sci_spec1d_file)
            msgs.info('Loaded {0} spectra from the spec1d science file: {1}'.format(
                len(self.sci_specobjs), self.sci_spec1d_file))
            sci_spectro = self.sci_header['INSTRUME']

        # Compare instruments if they exist
        if std_spectro is not None and sci_spectro is not None and std_spectro != sci_spectro:
            msgs.error('Standard spectra are not the same instrument as science!!')

        # Instantiate the spectrograph
        _spectrograph = spectrograph
        if _spectrograph is None:
            _spectrograph = std_spectro
            if _spectrograph is not None:
                msgs.info("Spectrograph set to {0} from standard file".format(_spectrograph))
        if _spectrograph is None:
            _spectrograph = sci_spectro
            if _spectrograph is not None:
                msgs.info("Spectrograph set to {0} from science file".format(_spectrograph))
        self.spectrograph = load_spectrograph(_spectrograph)

        # MasterFrame
        masterframe.MasterFrame.__init__(self, self.frametype, setup,
                                         master_dir=master_dir, mode=mode)
        # Get the extinction data
        self.extinction_data = None
        if self.spectrograph is not None:
            self.extinction_data \
                = flux.load_extinction_data(self.spectrograph.telescope['longitude'],
                                            self.spectrograph.telescope['latitude'])
        elif self.sci_header is not None and 'LON-OBS' in self.sci_header.keys():
            self.extinction_data \
                = flux.load_extinction_data(self.sci_header['LON-OBS'],
                                            self.sci_header['LAT-OBS'])

        # Once the spectrograph is instantiated, can also set the
        # extinction data
        # Parameters
        self.sens_file = sens_file

        # Set telluric option
        self.telluric = telluric

        # Main outputs
        self.sens_dict = None if self.sens_file is None \
            else self.load_master(self.sens_file)

        # Attributes
        self.steps = []

        # Key Internals
        self.std = None  # Standard star spectrum (SpecObj object)
        self.std_idx = None  # Nested indices for the std_specobjs list that corresponds
        # to the star!
        # Echelle key
        self.star_type = star_type
        self.star_mag = star_mag
        self.BALM_MASK_WID = BALM_MASK_WID
        self.nresln = nresln
        self.debug = debug
 def compound_meta(self, headarr, meta_key):
     if meta_key == 'mjd':
         time = headarr[0]['DATE']
         ttime = Time(time, format='isot')
         return ttime.mjd
     msgs.error("Not ready for this compound meta")
Exemple #19
0
    def echelle_2dfit(self, wv_calib, debug=False, skip_QA=False):
        """
        Fit a two-dimensional wavelength solution for echelle data.

        Primarily a wrapper for :func:`pypeit.core.arc.fit2darc`,
        using data unpacked from the ``wv_calib`` dictionary.
        
        Args:
            wv_calib (:obj:`dict`):
                Wavelength calibration dictionary.  See ??
            debug (:obj:`bool`, optional):
                Show debugging info
            skip_QA (:obj:`bool`, optional):
                Flag to skip construction of the nominal QA plots.

        Returns:
            :obj:`dict`: Dictionary containing information from 2-d
            fit.
        """
        if self.spectrograph.pypeline != 'Echelle':
            msgs.error(
                'Cannot execute echelle_2dfit for a non-echelle spectrograph.')

        msgs.info('Fitting 2-d wavelength solution for echelle....')
        all_wave = np.array([], dtype=float)
        all_pixel = np.array([], dtype=float)
        all_order = np.array([], dtype=float)

        # Obtain a list of good slits
        ok_mask_idx = np.where(np.invert(self.wvc_bpm))[0]
        ok_mask_order = self.slits.slitord_id[ok_mask_idx]
        nspec = self.msarc.image.shape[0]
        for iorder in wv_calib.keys():  # Spatial based
            if int(iorder) not in ok_mask_order:
                continue
            #try:
            #    iorder, iindx = self.spectrograph.slit2order(self.spat_coo[self.slits.spatid_to_zero(int(islit))])
            #except:
            #    embed()
            mask_now = wv_calib[iorder]['mask']
            all_wave = np.append(all_wave,
                                 wv_calib[iorder]['wave_fit'][mask_now])
            all_pixel = np.append(all_pixel,
                                  wv_calib[iorder]['pixel_fit'][mask_now])
            all_order = np.append(
                all_order,
                np.full_like(wv_calib[iorder]['pixel_fit'][mask_now],
                             float(iorder)))

        # Fit
        fit2d_dict = arc.fit2darc(all_wave,
                                  all_pixel,
                                  all_order,
                                  nspec,
                                  nspec_coeff=self.par['ech_nspec_coeff'],
                                  norder_coeff=self.par['ech_norder_coeff'],
                                  sigrej=self.par['ech_sigrej'],
                                  debug=debug)

        self.steps.append(inspect.stack()[0][3])

        # QA
        if not skip_QA:
            outfile_global = qa.set_qa_filename(self.master_key,
                                                'arc_fit2d_global_qa',
                                                out_dir=self.qa_path)
            arc.fit2darc_global_qa(fit2d_dict, outfile=outfile_global)
            outfile_orders = qa.set_qa_filename(self.master_key,
                                                'arc_fit2d_orders_qa',
                                                out_dir=self.qa_path)
            arc.fit2darc_orders_qa(fit2d_dict, outfile=outfile_orders)

        return fit2d_dict
Exemple #20
0
    def get_rawimage(self, raw_file, det):
        """
        Read a raw KCWI data frame

        NOTE: The amplifiers are arranged as follows:

        |   (0,ny)  --------- (nx,ny)
        |           | 3 | 4 |
        |           ---------
        |           | 1 | 2 |
        |     (0,0) --------- (nx, 0)

        Parameters
        ----------
        raw_file : str
            Filename
        det (int or None):
            Detector number

        Returns
        -------
        array : ndarray
            Combined image
        hdu : HDUList
            Opened fits file.
        sections : list
            List of datasec, oscansec, ampsec sections. datasec,
            oscansec needs to be for an *unbinned* image as per
            standard convention
        """
        # Check for file; allow for extra .gz, etc. suffix
        fil = glob.glob(raw_file + '*')
        if len(fil) != 1:
            msgs.error("Found {:d} files matching {:s}".format(
                len(fil), raw_file))

        # Read
        msgs.info("Reading KCWI file: {:s}".format(fil[0]))
        hdu = fits.open(fil[0])
        detpar = self.get_detector_par(hdu, det if det is None else 1)
        head0 = hdu[0].header
        raw_img = hdu[detpar['dataext']].data.astype(float)

        # Some properties of the image
        numamps = head0['NVIDINP']
        # Exposure time (used by ProcessRawImage)
        headarr = self.get_headarr(hdu)
        exptime = self.get_meta_value(headarr, 'exptime')

        # get the x and y binning factors...
        binning = head0['BINNING']
        xbin, ybin = [int(ibin) for ibin in binning.split(',')]
        binning_raw = binning

        # Always assume normal FITS header formatting
        one_indexed = True
        include_last = True
        for section in ['DSEC', 'BSEC']:

            # Initialize the image (0 means no amplifier)
            pix_img = np.zeros(raw_img.shape, dtype=int)
            for i in range(numamps):
                # Get the data section
                sec = head0[section + "{0:1d}".format(i + 1)]

                # Convert the data section from a string to a slice
                datasec = parse.sec2slice(sec,
                                          one_indexed=one_indexed,
                                          include_end=include_last,
                                          require_dim=2,
                                          binning=binning_raw)
                # Flip the datasec
                datasec = datasec[::-1]

                # Assign the amplifier
                pix_img[datasec] = i + 1

            # Finish
            if section == 'DSEC':
                rawdatasec_img = pix_img.copy()
            elif section == 'BSEC':
                oscansec_img = pix_img.copy()

        # Return
        return detpar, raw_img, hdu, exptime, rawdatasec_img, oscansec_img
Exemple #21
0
def build_waveimg(spectrograph, tilts, slits, wv_calib, spat_flexure=None):
    """
    Main algorithm to build the wavelength image.

    The wavelength image is only constructed for good slits based on
    ``slits.mask`` and selected using
    :func:`pypeit.slittrace.SlitTraceSetBitMask.exclude_for_reducing`.

    Args:
        spectrograph (:class:`~pypeit.spectrographs.spectrograph.Spectrograph`):
            Spectrograph object
        tilts (`numpy.ndarray`_):
            Image holding tilts
        slits (:class:`~pypeit.slittrace.SlitTraceSet`):
            Object holding the slit left and right edge traces.
        wv_calib (:obj:`dict`):
            Object holding the wavelength calibration
        spat_flexure (:obj:`float`, optional):
            Spatial offset to apply for flexure.

    Returns:
        `numpy.ndarray`_: The wavelength image.
    """
    # Setup
    #ok_slits = slits.mask == 0
    bpm = slits.mask.astype(bool)
    bpm &= np.invert(
        slits.bitmask.flagged(slits.mask,
                              flag=slits.bitmask.exclude_for_reducing))
    ok_slits = np.invert(bpm)
    #
    image = np.zeros_like(tilts)
    slitmask = slits.slit_img(flexure=spat_flexure,
                              exclude_flag=slits.bitmask.exclude_for_reducing)

    par = wv_calib['par']
    if slits.ech_order is None and par['echelle']:
        msgs.error('Echelle orders must be provided by the slits object!')

#    slit_spat_pos = slits.spatial_coordinates(flexure=spat_flexure)

# If this is echelle print out a status message and do some error checking
    if par['echelle']:
        msgs.info('Evaluating 2-d wavelength solution for echelle....')
        if len(wv_calib['fit2d']['orders']) != np.sum(ok_slits):
            msgs.error(
                'wv_calib and ok_slits do not line up. Something is very wrong!'
            )

    # Unpack some 2-d fit parameters if this is echelle
#    for slit_spat in slits.spat_id[ok_slits]:
    for i in range(slits.nslits):
        if not ok_slits[i]:
            continue
        slit_spat = slits.spat_id[i]
        thismask = (slitmask == slit_spat)
        if not np.any(thismask):
            msgs.error("Something failed in wavelengths or masking..")
        if par['echelle']:
            #            # TODO: Put this in `SlitTraceSet`?
            #            order, indx = spectrograph.slit2order(slit_spat_pos[slits.spatid_to_zero(slit_spat)])
            # evaluate solution
            image[thismask] = utils.func_val(
                wv_calib['fit2d']['coeffs'],
                tilts[thismask],
                wv_calib['fit2d']['func2d'],
                #                                             x2=np.ones_like(tilts[thismask])*order,
                x2=np.full_like(tilts[thismask], slits.ech_order[i]),
                minx=wv_calib['fit2d']['min_spec'],
                maxx=wv_calib['fit2d']['max_spec'],
                minx2=wv_calib['fit2d']['min_order'],
                maxx2=wv_calib['fit2d']['max_order'])
            image[thismask] /= slits.ech_order[i]
        else:
            #iwv_calib = wv_calib[str(slit)]
            iwv_calib = wv_calib[str(slit_spat)]
            image[thismask] = utils.func_val(iwv_calib['fitc'],
                                             tilts[thismask],
                                             iwv_calib['function'],
                                             minx=iwv_calib['fmin'],
                                             maxx=iwv_calib['fmax'])
    # Return
    return image
Exemple #22
0
    def build_wv_calib(self, arccen, method, skip_QA=False):
        """
        Main routine to generate the wavelength solutions in a loop over slits
        Wrapper to arc.simple_calib or arc.calib_with_arclines

        self.maskslits is updated for slits that fail

        Args:
            method : str
              'simple' -- arc.simple_calib
              'arclines' -- arc.calib_with_arclines
              'holy-grail' -- wavecal.autoid.HolyGrail
              'reidentify' -- wavecal.auotid.ArchiveReid
              'identify' -- wavecal.identify.Identify
              'full_template' -- wavecal.auotid.full_template
            skip_QA (bool, optional)

        Returns:
            dict:  self.wv_calib
        """
        # Obtain a list of good slits
        ok_mask_idx = np.where(np.invert(self.wvc_bpm))[0]

        # Obtain calibration for all slits
        if method == 'simple':
            lines = self.par['lamps']
            line_lists = waveio.load_line_lists(lines)

            final_fit = arc.simple_calib_driver(
                line_lists,
                arccen,
                ok_mask_idx,
                n_final=self.par['n_final'],
                sigdetect=self.par['sigdetect'],
                IDpixels=self.par['IDpixels'],
                IDwaves=self.par['IDwaves'])
        elif method == 'holy-grail':
            # Sometimes works, sometimes fails
            arcfitter = autoid.HolyGrail(
                arccen,
                par=self.par,
                ok_mask=ok_mask_idx,
                nonlinear_counts=self.nonlinear_counts)
            patt_dict, final_fit = arcfitter.get_results()
        elif method == 'identify':
            final_fit = {}
            # Manually identify lines
            msgs.info("Initializing the wavelength calibration tool")
            embed(header='line 222 wavecalib.py')
            for slit_idx in ok_mask_idx:
                arcfitter = Identify.initialise(arccen,
                                                self.slits,
                                                slit=slit_idx,
                                                par=self.par)
                final_fit[str(slit_idx)] = arcfitter.get_results()
                arcfitter.store_solution(final_fit[str(slit_idx)],
                                         "",
                                         self.binspectral,
                                         specname=self.spectrograph.name,
                                         gratname="UNKNOWN",
                                         dispangl="UNKNOWN")
        elif method == 'reidentify':
            # Now preferred
            # Slit positions
            arcfitter = autoid.ArchiveReid(
                arccen,
                self.spectrograph,
                self.par,
                ok_mask=ok_mask_idx,
                #slit_spat_pos=self.spat_coo,
                orders=self.orders,
                nonlinear_counts=self.nonlinear_counts)
            patt_dict, final_fit = arcfitter.get_results()
        elif method == 'full_template':
            # Now preferred
            if self.binspectral is None:
                msgs.error(
                    "You must specify binspectral for the full_template method!"
                )
            final_fit = autoid.full_template(
                arccen,
                self.par,
                ok_mask_idx,
                self.det,
                self.binspectral,
                nonlinear_counts=self.nonlinear_counts,
                nsnippet=self.par['nsnippet'])
        else:
            msgs.error(
                'Unrecognized wavelength calibration method: {:}'.format(
                    method))

        # Build the DataContainer
        # Loop on WaveFit items
        tmp = []
        for idx in range(self.slits.nslits):
            item = final_fit.pop(str(idx))
            if item is None:  # Add an empty WaveFit
                tmp.append(wv_fitting.WaveFit(self.slits.spat_id[idx]))
            else:
                # This is for I/O naming
                item.spat_id = self.slits.spat_id[idx]
                tmp.append(item)
        self.wv_calib = WaveCalib(
            wv_fits=np.asarray(tmp),
            arc_spectra=arccen,
            nslits=self.slits.nslits,
            spat_ids=self.slits.spat_id,
            PYP_SPEC=self.spectrograph.name,
        )

        # Update mask
        self.update_wvmask()

        #TODO For generalized echelle (not hard wired) assign order number here before, i.e. slits.ech_order

        # QA
        if not skip_QA:
            ok_mask_idx = np.where(np.invert(self.wvc_bpm))[0]
            for slit_idx in ok_mask_idx:
                outfile = qa.set_qa_filename(
                    self.master_key,
                    'arc_fit_qa',
                    slit=self.slits.slitord_id[slit_idx],
                    out_dir=self.qa_path)
                #
                autoid.arc_fit_qa(self.wv_calib.wv_fits[slit_idx],
                                  outfile=outfile)

        # Return
        self.steps.append(inspect.stack()[0][3])
        return self.wv_calib
Exemple #23
0
    def main(args):

        # Parse the detector name
        try:
            det = int(args.det)
        except:
            detname = args.det
        else:
            detname = DetectorContainer.get_name(det)

        # Load em
        line_names, line_wav = list_of_spectral_lines()

        files = np.array(args.files)

        if args.z is not None:
            zs = np.array(args.z)

        # Loop on the files
        for i in range(files.size):
            # reinitialize lines wave
            line_wav_z = line_wav.copy()

            # Load 2D object
            file = files[i]
            # List only?
            if args.list:
                io.fits_open(file).info()
                continue
            spec2DObj = spec2dobj.Spec2DObj.from_file(file,
                                                      detname,
                                                      chk_version=False)

            # Deal with redshifts
            if args.z is not None:
                z = zs[i] if zs.size == files.size else zs[0]
                line_wav_z *= (1 + z)  #redshift linelist
            else:
                z = None

            # Save?
            folder = None
            if args.mode == 'save':
                folder = '{}_noisecheck'.format(file.split('.fits')[0])
                if not os.path.exists(folder): os.makedirs(folder)
            elif args.mode == 'print':
                # Generate a Table for pretty printing
                tbl = Table()
                tbl['Slit'] = spec2DObj.slits.slitord_id
                tbl['med_chis'] = spec2DObj.med_chis
                tbl['std_chis'] = spec2DObj.std_chis
                print('')
                print(tbl)
                print('-----------------------------------------------------')
                return

            # Find the slit of interest
            all_maskdef_ids = spec2DObj.slits.maskdef_id
            all_pypeit_ids = spec2DObj.slits.slitord_id
            if args.maskdef_id is not None and all_maskdef_ids is None:
                msgs.error(
                    'This spec2d does not have maskdef_id. Choose a pypeit_id insteed.'
                )

            # Build the mask
            input_mask = spec2DObj.bpmmask == 0
            if args.wavemin is not None:
                input_mask *= spec2DObj.waveimg > args.wavemin
            if args.wavemax is not None:
                input_mask *= spec2DObj.waveimg < args.wavemax

            # Decide on slits to show
            show_slits = range(all_pypeit_ids.size)
            if args.pypeit_id is not None or args.maskdef_id is not None:
                if args.maskdef_id is not None and args.maskdef_id in all_maskdef_ids:
                    slitidx = np.where(
                        all_maskdef_ids == args.maskdef_id)[0][0]
                elif args.pypeit_id is not None and args.pypeit_id in all_pypeit_ids:
                    slitidx = np.where(all_pypeit_ids == args.pypeit_id)[0][0]
                show_slits = range(slitidx, slitidx + 1)

            # loop on em
            for i in show_slits:
                pypeit_id = all_pypeit_ids[i]
                if all_maskdef_ids is not None:
                    basename = '{}_{}_maskdefID{}_pypeitID{}'.format(
                        spec2DObj.head0['DECKER'], detname, all_maskdef_ids[i],
                        pypeit_id)
                else:
                    basename = '{}_{}_pypeitID{}'.format(
                        spec2DObj.head0['DECKER'], detname, pypeit_id)

                # Chi
                chi_slit, _, _ = spec2DObj.calc_chi_slit(i, pad=args.pad)

                if chi_slit is None:
                    continue

                # Cut down
                chi_select = chi_slit * input_mask
                if np.all(chi_select == 0):
                    msgs.warn(
                        f"All of the chi values are masked in slit {pypeit_id} of {basename}!"
                    )
                    continue

                # Flux to show
                # get flux and err from in this slit
                flux_slit, err_slit = get_flux_slit(spec2DObj, i, pad=args.pad)
                # flux in the wavelength range
                flux_select = flux_slit * input_mask
                # Error in the wavelength range
                err_select = err_slit * input_mask

                # get edges of the slit to plot
                left, right, _ = spec2DObj.slits.select_edges()
                spat_start = int(left[:, i].min())
                spat_end = int(right[:, i].max())
                mid_spat = int((spat_end + spat_start) / 2.)

                # Wavelengths
                if spec2DObj.waveimg[input_mask].size == 0:
                    msgs.warn(
                        f"None of the wavelength values work in slit {pypeit_id} of {basename}!"
                    )
                    continue
                lbda_1darray = spec2DObj.waveimg[:, mid_spat]

                line_wav_plt = np.array([])
                line_names_plt = np.array([])
                if z is not None:
                    for i in range(line_wav_z.shape[0]):
                        if lbda_1darray[lbda_1darray != 0].min() < line_wav_z[
                                i] < lbda_1darray[lbda_1darray != 0].max():
                            line_wav_plt = np.append(
                                line_wav_plt,
                                lbda_1darray.searchsorted(line_wav_z[i]))
                            line_names_plt = np.append(line_names_plt,
                                                       line_names[i])

                plot(chi_slit[:, spat_start:spat_end],
                     chi_select,
                     flux_select,
                     err_select,
                     basename,
                     line_wav_plt,
                     line_names_plt,
                     lbda_1darray,
                     lbda_min=args.wavemin,
                     lbda_max=args.wavemax,
                     aspect_ratio=args.aspect_ratio)
                if args.mode == 'plot':
                    plt.show()
                if args.mode == 'save':
                    plt.savefig('{}/noisecheck_{}.png'.format(
                        folder, basename),
                                bbox_inches='tight',
                                dpi=400)
                plt.close()
Exemple #24
0
    def unpack_object(self, ret_flam=False, extract_type='OPT'):
        """
        Utility function to unpack the sobjs for one object and
        return various numpy arrays describing the spectrum and meta
        data. The user needs to already have trimmed the Specobjs to
        the relevant indices for the object.

        Args:
           ret_flam (:obj:`bool`, optional):
              If True return the FLAM, otherwise return COUNTS.

        Returns:
            tuple: Returns the following where all numpy arrays
            returned have shape (nspec, norders) for Echelle data and
            (nspec,) for Multislit data.

                - wave (`numpy.ndarray`_): Wavelength grids
                - flux (`numpy.ndarray`_): Flambda or counts
                - flux_ivar (`numpy.ndarray`_): Inverse variance (of
                  Flambda or counts)
                - flux_gpm (`numpy.ndarray`_): Good pixel mask.
                  True=Good
                - meta_spec (dict:) Dictionary containing meta data.
                  The keys are defined by
                  spectrograph.header_cards_from_spec()
                - header (astropy.io.header object): header from
                  spec1d file
        """
        # Prep
        norddet = self.nobj
        flux_attr = 'FLAM' if ret_flam else 'COUNTS'
        flux_key = '{}_{}'.format(extract_type, flux_attr)
        wave_key = '{}_WAVE'.format(extract_type)
        # Test
        if getattr(self, flux_key)[0] is None:
            msgs.error(
                "Flux not available for {}.  Try the other ".format(flux_key))
        #
        nspec = getattr(self, flux_key)[0].size
        # Allocate arrays and unpack spectrum
        wave = np.zeros((nspec, norddet))
        flux = np.zeros((nspec, norddet))
        flux_ivar = np.zeros((nspec, norddet))
        flux_gpm = np.zeros((nspec, norddet), dtype=bool)
        detector = np.zeros(norddet, dtype=int)
        ech_orders = np.zeros(norddet, dtype=int)

        # TODO make the extraction that is desired OPT vs BOX an optional input variable.
        for iorddet in range(norddet):
            wave[:, iorddet] = getattr(self, wave_key)[iorddet]
            flux_gpm[:, iorddet] = getattr(
                self, '{}_MASK'.format(extract_type))[iorddet]
            detector[iorddet] = self[iorddet].DET
            if self[0].PYPELINE == 'Echelle':
                ech_orders[iorddet] = self[iorddet].ECH_ORDER
            flux[:, iorddet] = getattr(self, flux_key)[iorddet]
            flux_ivar[:, iorddet] = getattr(self, flux_key +
                                            '_IVAR')[iorddet]  #OPT_FLAM_IVAR

        # Populate meta data
        spectrograph = load_spectrograph(self.header['PYP_SPEC'])

        meta_spec = spectrograph.parse_spec_header(self.header)
        # Add the pyp spec.
        # TODO JFH: Make this an atribute of the specobj by default.
        meta_spec['PYP_SPEC'] = self.header['PYP_SPEC']
        meta_spec['PYPELINE'] = self[0].PYPELINE
        meta_spec['DET'] = detector
        # Return
        if self[0].PYPELINE in ['MultiSlit', 'IFU'] and self.nobj == 1:
            meta_spec['ECH_ORDERS'] = None
            return wave.reshape(nspec), flux.reshape(nspec), flux_ivar.reshape(nspec), \
                   flux_gpm.reshape(nspec), meta_spec, self.header
        else:
            meta_spec['ECH_ORDERS'] = ech_orders
            return wave, flux, flux_ivar, flux_gpm, meta_spec, self.header
Exemple #25
0
def ech_coadd(files,objids=None,extract='OPT',flux=True,giantcoadd=False,orderscale='median',mergeorder=True,
              wave_grid_method='velocity', niter=5,wave_grid_min=None, wave_grid_max=None,v_pix=None,
              scale_method='auto', do_offset=False, sigrej_final=3.,do_var_corr=False,
              SN_MIN_MEDSCALE = 0.5, overlapfrac = 0.01, num_min_pixels=10,phot_scale_dicts=None,
              qafile=None, outfile=None,do_cr=True, debug=False,**kwargs):
    """
    routines for coadding spectra observed with echelle spectrograph.
    parameters:
        files (list): file names
        objids (str): objid
        extract (str): 'OPT' or 'BOX'
        flux (bool): fluxed or not
        giantcoadd (bool): coadding order by order or do it at once?
        wave_grid_method (str): default velocity
        niter (int): number of iteration for rejections
        wave_grid_min (float): min wavelength, None means it will find the min value from your spectra
        wave_grid_max (float): max wavelength, None means it will find the max value from your spectra
        v_pix (float): delta velocity, see coadd.py
        scale_method (str): see coadd.py
        do_offset (str): see coadd.py, not implemented yet.
        sigrej_final (float): see coadd.py
        do_var_corr (bool): see coadd.py, default False. It seems True will results in a large error
        SN_MIN_MEDSCALE (float): minimum SNR for scaling different orders
        overlapfrac (float): minimum overlap fraction for scaling different orders.
        qafile (str): name of qafile
        outfile (str): name of coadded spectrum
        do_cr (bool): remove cosmic rays?
        debug (bool): show debug plots?
        kwargs: see coadd.py
    returns:
        spec1d: coadded XSpectrum1D
    """

    nfile = len(files)
    if nfile <=1:
        msgs.info('Only one spectrum exits coadding...')
        return

    fname = files[0]
    ext_final = fits.getheader(fname, -1)
    norder = ext_final['ECHORDER'] + 1
    msgs.info('spectrum {:s} has {:d} orders'.format(fname, norder))
    if norder <= 1:
        msgs.error('The number of orders have to be greater than one for echelle. Longslit data?')

    if giantcoadd:
        msgs.info('Coadding all orders and exposures at once')
        spectra = load.ech_load_spec(files, objid=objids,order=None, extract=extract, flux=flux)
        wave_grid = np.zeros((2,spectra.nspec))
        for i in range(spectra.nspec):
            wave_grid[0, i] = spectra[i].wvmin.value
            wave_grid[1, i] = spectra[i].wvmax.value
        ech_kwargs = {'echelle': True, 'wave_grid_min': np.min(wave_grid), 'wave_grid_max': np.max(wave_grid),
                      'v_pix': v_pix}
        kwargs.update(ech_kwargs)
        # Coadding
        spec1d = coadd.coadd_spectra(spectra, wave_grid_method=wave_grid_method, niter=niter,
                                          scale_method=scale_method, do_offset=do_offset, sigrej_final=sigrej_final,
                                          do_var_corr=do_var_corr, qafile=qafile, outfile=outfile,
                                          do_cr=do_cr, debug=debug,**kwargs)
    else:
        msgs.info('Coadding individual orders first and then merge order')
        spectra_list = []
        # Keywords for Table
        rsp_kwargs = {}
        rsp_kwargs['wave_tag'] = '{:s}_WAVE'.format(extract)
        rsp_kwargs['flux_tag'] = '{:s}_FLAM'.format(extract)
        rsp_kwargs['sig_tag'] = '{:s}_FLAM_SIG'.format(extract)
        #wave_grid = np.zeros((2,norder))
        for iord in range(norder):
            spectra = load.ech_load_spec(files, objid=objids, order=iord, extract=extract, flux=flux)
            ech_kwargs = {'echelle': False, 'wave_grid_min': spectra.wvmin.value, 'wave_grid_max': spectra.wvmax.value, 'v_pix': v_pix}
            #wave_grid[0,iord] = spectra.wvmin.value
            #wave_grid[1,iord] = spectra.wvmax.value
            kwargs.update(ech_kwargs)
            # Coadding the individual orders
            if qafile is not None:
                qafile_iord = qafile+'_%s'%str(iord)
            else:
                qafile_iord =  None
            spec1d_iord = coadd.coadd_spectra(spectra, wave_grid_method=wave_grid_method, niter=niter,
                                       scale_method=scale_method, do_offset=do_offset, sigrej_final=sigrej_final,
                                       do_var_corr=do_var_corr, qafile=qafile_iord, outfile=None,
                                       do_cr=do_cr, debug=debug, **kwargs)
            spectrum = spec_from_array(spec1d_iord.wavelength, spec1d_iord.flux, spec1d_iord.sig,**rsp_kwargs)
            spectra_list.append(spectrum)

        spectra_coadd = collate(spectra_list)

        # Rebin the spectra
        # ToDo: we should read in JFH's wavelength grid here.
        # Join into one XSpectrum1D object
        # Final wavelength array
        kwargs['wave_grid_min'] = np.min(spectra_coadd.data['wave'][spectra_coadd.data['wave'] > 0])
        kwargs['wave_grid_max'] = np.max(spectra_coadd.data['wave'][spectra_coadd.data['wave'] > 0])
        wave_final = coadd.new_wave_grid(spectra_coadd.data['wave'], wave_method=wave_grid_method, **kwargs)
        # The rebin function in linetools can not work on collated spectra (i.e. filled 0).
        # Thus I have to rebin the spectra first and then collate again.
        spectra_list_new = []
        for i in range(spectra_coadd.nspec):
            speci = spectra_list[i].rebin(wave_final * units.AA, all=True, do_sig=True, grow_bad_sig=True,
                                          masking='none')
            spectra_list_new.append(speci)
        spectra_coadd_rebin = collate(spectra_list_new)

        ## Note
        if orderscale == 'photometry':
            # Only tested on NIRES.
            if phot_scale_dicts is not None:
                spectra_coadd_rebin = order_phot_scale(spectra_coadd_rebin, phot_scale_dicts, debug=debug)
            else:
                msgs.warn('No photometric information is provided. Will use median scale.')
                orderscale = 'median'
        elif orderscale == 'median':
            #rmask = spectra_coadd_rebin.data['sig'].filled(0.) > 0.
            #sn2, weights = coadd.sn_weights(fluxes, sigs, rmask, wave)
            ## scaling different orders
            order_median_scale(spectra_coadd_rebin, nsig=sigrej_final, niter=niter, overlapfrac=overlapfrac,
                               num_min_pixels=num_min_pixels, SN_MIN_MEDSCALE=SN_MIN_MEDSCALE, debug=debug)
        else:
            msgs.warn('No any scaling is performed between different orders.')


        if mergeorder:
            fluxes, sigs, wave = coadd.unpack_spec(spectra_coadd_rebin, all_wave=False)
            ## Megering orders
            msgs.info('Merging different orders')
            ## ToDo: Joe claimed not to use pixel depedent weighting.
            weights = 1.0 / sigs**2
            weights[~np.isfinite(weights)] = 0.0
            weight_combine = np.sum(weights, axis=0)
            weight_norm = weights / weight_combine
            weight_norm[np.isnan(weight_norm)] = 1.0
            flux_final = np.sum(fluxes * weight_norm, axis=0)
            sig_final = np.sqrt(np.sum((weight_norm * sigs) ** 2, axis=0))
            spec1d_final = spec_from_array(wave_final * units.AA,flux_final,sig_final,**rsp_kwargs)

            if outfile is not None:
                msgs.info('Saving the final calibrated spectrum as {:s}'.format(outfile))
                coadd.write_to_disk(spec1d_final, outfile)

            if (qafile is not None) or (debug):
                # plot and save qa
                plt.figure(figsize=(12, 6))
                ax1 = plt.axes([0.07, 0.13, 0.9, 0.4])
                ax2 = plt.axes([0.07, 0.55, 0.9, 0.4])
                plt.setp(ax2.get_xticklabels(), visible=False)

                medf = np.median(spec1d_final.flux)
                ylim = (np.sort([0. - 0.3 * medf, 5 * medf]))
                cmap = plt.get_cmap('RdYlBu_r')
                for idx in range(spectra_coadd_rebin.nspec):
                    spectra_coadd_rebin.select = idx
                    color = cmap(float(idx) / spectra_coadd_rebin.nspec)
                    ind_good = spectra_coadd_rebin.sig > 0
                    ax1.plot(spectra_coadd_rebin.wavelength[ind_good], spectra_coadd_rebin.flux[ind_good], color=color)

                if (np.max(spec1d_final.wavelength) > (9000.0 * units.AA)):
                    skytrans_file = resource_filename('pypeit', '/data/skisim/atm_transmission_secz1.5_1.6mm.dat')
                    skycat = np.genfromtxt(skytrans_file, dtype='float')
                    scale = 0.85 * ylim[1]
                    ax2.plot(skycat[:, 0] * 1e4, skycat[:, 1] * scale, 'm-', alpha=0.5)

                ax2.plot(spec1d_final.wavelength, spec1d_final.sig, ls='steps-', color='0.7')
                ax2.plot(spec1d_final.wavelength, spec1d_final.flux, ls='steps-', color='b')

                ax1.set_xlim([np.min(spec1d_final.wavelength.value), np.max(spec1d_final.wavelength.value)])
                ax2.set_xlim([np.min(spec1d_final.wavelength.value), np.max(spec1d_final.wavelength.value)])
                ax1.set_ylim(ylim)
                ax2.set_ylim(ylim)
                ax1.set_xlabel('Wavelength (Angstrom)')
                ax1.set_ylabel('Flux')
                ax2.set_ylabel('Flux')

                plt.tight_layout(pad=0.2, h_pad=0., w_pad=0.2)

                if len(qafile.split('.')) == 1:
                    msgs.info("No fomat given for the qafile, save to PDF format.")
                    qafile = qafile + '.pdf'
                if qafile:
                    plt.savefig(qafile)
                    msgs.info("Wrote coadd QA: {:s}".format(qafile))
                if debug:
                    plt.show()
                plt.close()

            ### Do NOT remove this part althoug it is deprecated.
            # we may need back to using this pieces of code after fixing the coadd.coadd_spectra problem on first order.
            #kwargs['echelle'] = True
            #kwargs['wave_grid_min'] = np.min(wave_grid)
            #kwargs['wave_grid_max'] = np.max(wave_grid)
            #spec1d_final = coadd.coadd_spectra(spectra_coadd_rebin, wave_grid_method=wave_grid_method, niter=niter,
            #                                  scale_method=scale_method, do_offset=do_offset, sigrej_final=sigrej_final,
            #                                  do_var_corr=do_var_corr, qafile=qafile, outfile=outfile,
            #                                  do_cr=do_cr, debug=debug, **kwargs)
            return spec1d_final
        else:
            msgs.warn('Skipped merging orders')
            if outfile is not None:
                for iord in range(len(spectra_list)):
                    outfile_iord = outfile.replace('.fits','_ORDER{:04d}.fits'.format(iord))
                    msgs.info('Saving the final calibrated spectrum of order {:d} as {:s}'.format(iord,outfile))
                    spectra_list[iord].write_to_fits(outfile_iord)
            return spectra_list
Exemple #26
0
    def write_to_fits(self,
                      subheader,
                      outfile,
                      overwrite=True,
                      update_det=None,
                      slitspatnum=None,
                      debug=False):
        """
        Write the set of SpecObj objects to one multi-extension FITS file

        Args:
            outfile (str):
            subheader (:obj:`dict`):
            overwrite (bool, optional):
            slitspatnum (:obj:`str` or :obj:`list`, optional):
                Restricted set of slits for reduction
            update_det (int or list, optional):
              If provided, do not clobber the existing file but only update
              the indicated detectors.  Useful for re-running on a subset of detectors

        """
        if os.path.isfile(outfile) and (not overwrite):
            msgs.warn("Outfile exists.  Set overwrite=True to clobber it")
            return

        # If the file exists and update_det (and slit_spat_num) is provided, use the existing header
        #   and load up all the other hdus so that we only over-write the ones
        #   we are updating
        if os.path.isfile(outfile) and (update_det is not None
                                        or slitspatnum is not None):
            _specobjs = SpecObjs.from_fitsfile(outfile)
            mask = np.ones(_specobjs.nobj, dtype=bool)
            # Update_det
            if update_det is not None:
                # Pop out those with this detector (and slit if slit_spat_num is provided)
                for det in np.atleast_1d(update_det):
                    mask[_specobjs.DET == det] = False
            elif slitspatnum is not None:  # slitspatnum
                dets, spat_ids = slittrace.parse_slitspatnum(slitspatnum)
                for det, spat_id in zip(dets, spat_ids):
                    mask[(_specobjs.DET == det)
                         & (_specobjs.SLITID == spat_id)] = False
            _specobjs = _specobjs[mask]
            # Add in the new
            for sobj in self.specobjs:
                _specobjs.add_sobj(sobj)
        else:
            _specobjs = self.specobjs

        # Build up the Header
        header = initialize_header(primary=True)
        for key in subheader.keys():
            header[key.upper()] = subheader[key]

        # Init
        prihdu = fits.PrimaryHDU()
        hdus = [prihdu]
        prihdu.header = header

        # Add class info
        prihdu.header['DMODCLS'] = (self.__class__.__name__, 'Datamodel class')
        prihdu.header['DMODVER'] = (self.version, 'Datamodel version')

        detector_hdus = {}
        nspec, ext = 0, 0
        # Loop on the SpecObj objects
        for sobj in _specobjs:
            if sobj is None:
                continue
            # HDUs
            if debug:
                import pdb
                pdb.set_trace()
            shdul = sobj.to_hdu()
            if len(shdul) == 2:  # Detector?
                detector_hdus[sobj['DET']] = shdul[1]
                shdu = [shdul[0]]
            elif len(shdul) == 1:  # Detector?
                shdu = shdul
            else:
                msgs.error("Should not get here...")
            # Check -- If sobj had only 1 array, the BinTableHDU test will fail
            assert len(shdu) == 1, 'Bad data model!!'
            assert isinstance(shdu[0],
                              fits.hdu.table.BinTableHDU), 'Bad data model2'
            #shdu[0].header['DMODCLS'] = (self.__class__.__name__, 'Datamodel class')
            #shdu[0].header['DMODVER'] = (self.version, 'Datamodel version')
            # Name
            shdu[0].name = sobj.NAME
            # Extension
            keywd = 'EXT{:04d}'.format(ext)
            prihdu.header[keywd] = sobj.NAME
            ext += 1
            nspec += 1
            # Append
            hdus += shdu

        # Deal with Detectors
        for key, item in detector_hdus.items():
            # TODO - Add EXT to the primary header for these??
            prefix = specobj.det_hdu_prefix(key)
            # Name
            if prefix not in item.name:  # In case we are re-loading
                item.name = specobj.det_hdu_prefix(key) + item.name
            # Append
            hdus += [item]

        # A few more for the header
        prihdu.header['NSPEC'] = nspec

        # Code versions
        initialize_header(hdr=prihdu.header)

        # Finish
        hdulist = fits.HDUList(hdus)
        if debug:
            import pdb
            pdb.set_trace()
        hdulist.writeto(outfile, overwrite=overwrite)
        msgs.info("Wrote 1D spectra to {:s}".format(outfile))
        return
Exemple #27
0
def subtract_overscan(rawframe, datasec_img, oscansec_img,
                          method='savgol', params=[5, 65]):
    """
    Subtract overscan

    Args:
        rawframe (:obj:`numpy.ndarray`):
            Frame from which to subtract overscan
        numamplifiers (int):
            Number of amplifiers for this detector.
        datasec_img (:obj:`numpy.ndarray`):
            An array the same shape as rawframe that identifies
            the pixels associated with the data on each amplifier.
            0 for not data, 1 for amplifier 1, 2 for amplifier 2, etc.
        oscansec_img (:obj:`numpy.ndarray`):
            An array the same shape as rawframe that identifies
            the pixels associated with the overscan region on each
            amplifier.
            0 for not data, 1 for amplifier 1, 2 for amplifier 2, etc.
        method (:obj:`str`, optional):
            The method used to fit the overscan region.  Options are
            polynomial, savgol, median.
        params (:obj:`list`, optional):
            Parameters for the overscan subtraction.  For
            method=polynomial, set params = order, number of pixels,
            number of repeats ; for method=savgol, set params = order,
            window size ; for method=median, params are ignored.

    Returns:
        :obj:`numpy.ndarray`: The input frame with the overscan region
        subtracted
    """
    # Copy the data so that the subtraction is not done in place
    no_overscan = rawframe.copy()

    # Amplifiers
    amps = np.unique(datasec_img[datasec_img > 0]).tolist()

    # Perform the overscan subtraction for each amplifier
    for amp in amps:
        # Pull out the overscan data
        overscan, _ = rect_slice_with_mask(rawframe, oscansec_img, amp)
        # Pull out the real data
        data, data_slice = rect_slice_with_mask(rawframe, datasec_img, amp)

        # Shape along at least one axis must match
        data_shape = data.shape
        if not np.any([dd == do for dd, do in zip(data_shape, overscan.shape)]):
            msgs.error('Overscan sections do not match amplifier sections for'
                       'amplifier {0}'.format(amp))
        compress_axis = 1 if data_shape[0] == overscan.shape[0] else 0

        # Fit/Model the overscan region
        osfit = np.median(overscan) if method.lower() == 'median' \
            else np.median(overscan, axis=compress_axis)
        if method.lower() == 'polynomial':
            # TODO: Use np.polynomial.polynomial.polyfit instead?
            c = np.polyfit(np.arange(osfit.size), osfit, params[0])
            ossub = np.polyval(c, np.arange(osfit.size))
        elif method.lower() == 'savgol':
            ossub = signal.savgol_filter(osfit, params[1], params[0])
        elif method.lower() == 'median':
            # Subtract scalar and continue
            no_overscan[data_slice] -= osfit
            continue
        else:
            raise ValueError('Unrecognized overscan subtraction method: {0}'.format(method))

        # Subtract along the appropriate axis
        no_overscan[tuple(data_slice)] -= (ossub[:, None] if compress_axis == 1 else ossub[None, :])

    return no_overscan
Exemple #28
0
def read_deimos(raw_file, det=None):
    """
    Read a raw DEIMOS data frame (one or more detectors)
    Packed in a multi-extension HDU
    Based on pypeit.arlris.read_lris...
       Based on readmhdufits.pro

    Parameters
    ----------
    raw_file : str
      Filename

    Returns
    -------
    array : ndarray
      Combined image
    header : FITS header
    sections : tuple
      List of datasec, oscansec sections
    """

    # Check for file; allow for extra .gz, etc. suffix
    fil = glob.glob(raw_file + '*')
    if len(fil) != 1:
        msgs.error('Found {0} files matching {1}'.format(
            len(fil), raw_file + '*'))
    # Read
    try:
        msgs.info("Reading DEIMOS file: {:s}".format(fil[0]))
    except AttributeError:
        print("Reading DEIMOS file: {:s}".format(fil[0]))

    hdu = fits.open(fil[0])
    head0 = hdu[0].header

    # Get post, pre-pix values
    precol = head0['PRECOL']
    postpix = head0['POSTPIX']
    preline = head0['PRELINE']
    postline = head0['POSTLINE']
    detlsize = head0['DETLSIZE']
    x0, x_npix, y0, y_npix = np.array(parse.load_sections(detlsize)).flatten()

    # Create final image
    if det is None:
        image = np.zeros((x_npix, y_npix + 4 * postpix))

    # Setup for datasec, oscansec
    dsec = []
    osec = []

    # get the x and y binning factors...
    binning = head0['BINNING']
    if binning != '1,1':
        msgs.error("This binning for DEIMOS might not work.  But it might..")

    xbin, ybin = [int(ibin) for ibin in binning.split(',')]

    # DEIMOS detectors
    nchip = 8

    if det is None:
        chips = range(nchip)
    else:
        chips = [det - 1]  # Indexing starts at 0 here
    # Loop
    for tt in chips:
        data, oscan = deimos_read_1chip(hdu, tt + 1)

        # if n_elements(nobias) eq 0 then nobias = 0

        # One detector??
        if det is not None:
            image = np.zeros((data.shape[0], data.shape[1] + oscan.shape[1]))

        # Indexing
        x1, x2, y1, y2, o_x1, o_x2, o_y1, o_y2 = indexing(tt, postpix, det=det)

        # Fill
        image[y1:y2, x1:x2] = data
        image[o_y1:o_y2, o_x1:o_x2] = oscan

        # Sections
        idsec = '[{:d}:{:d},{:d}:{:d}]'.format(y1, y2, x1, x2)
        iosec = '[{:d}:{:d},{:d}:{:d}]'.format(o_y1, o_y2, o_x1, o_x2)
        dsec.append(idsec)
        osec.append(iosec)
    # Return
    return image, head0, (dsec, osec)
Exemple #29
0
def pattern_frequency(frame, axis=1):
    """
    Using the supplied 2D array, calculate the pattern frequency
    along the specified axis.

    Args:
        frame (:obj:`numpy.ndarray`):
            2D array to measure the pattern frequency
        axis (int, optional):
            Which axis should the pattern frequency be measured?

    Returns:
        :obj:`float`: The frequency of the sinusoidal pattern.
    """
    # For axis=0, transpose
    arr = frame.copy()
    if axis == 0:
        arr = frame.T
    elif axis != 1:
        msgs.error("frame must be a 2D image, and axis must be 0 or 1")

    # Calculate the output image dimensions of the model signal
    # Subtract the DC offset
    arr -= np.median(arr, axis=1)[:, np.newaxis]
    # Find significant deviations and ignore those rows
    mad = 1.4826*np.median(np.abs(arr))
    ww = np.where(arr > 10*mad)
    # Create a mask of these rows
    msk = np.sort(np.unique(ww[0]))

    # Compute the Fourier transform to obtain an estimate of the dominant frequency component
    amp = np.fft.rfft(arr, axis=1)
    idx = (np.arange(arr.shape[0]), np.argmax(np.abs(amp), axis=1))

    # Construct the variables of the sinusoidal waveform
    amps = (np.abs(amp))[idx] * (2.0 / arr.shape[1])
    phss = np.arctan2(amp.imag, amp.real)[idx]
    frqs = idx[1]

    # Use the above to as initial guess parameters in chi-squared minimisation
    cosfunc = lambda xarr, *p: p[0] * np.cos(2.0 * np.pi * p[1] * xarr + p[2])
    xdata = np.linspace(0.0, 1.0, arr.shape[1])
    # Calculate the amplitude distribution
    amp_dist = np.zeros(arr.shape[0])
    frq_dist = np.zeros(arr.shape[0])
    # Loop over all rows to new independent values that can be averaged
    for ii in range(arr.shape[0]):
        if ii in msk:
            continue
        try:
            popt, pcov = curve_fit(cosfunc, xdata, arr[ii, :], p0=[amps[ii], frqs[ii], phss[ii]],
                                   bounds=([-np.inf, frqs[ii]-1, -np.inf],
                                           [+np.inf, frqs[ii]+1, +np.inf]))
        except ValueError:
            msgs.warn("Input data invalid for pattern frequency fit of row {0:d}/{1:d}".format(ii+1, arr.shape[0]))
            continue
        except RuntimeError:
            msgs.warn("Pattern frequency fit failed for row {0:d}/{1:d}".format(ii+1, arr.shape[0]))
            continue
        amp_dist[ii] = popt[0]
        frq_dist[ii] = popt[1]
    ww = np.where(amp_dist > 0.0)
    use_amp = np.median(amp_dist[ww])
    use_frq = np.median(frq_dist[ww])
    # Calculate the frequency distribution with a prior on the amplitude
    frq_dist = np.zeros(arr.shape[0])
    for ii in range(arr.shape[0]):
        if ii in msk:
            continue
        try:
            popt, pcov = curve_fit(cosfunc, xdata, arr[ii, :], p0=[use_amp, use_frq, phss[ii]],
                                   bounds=([use_amp * 0.99999999, use_frq-1, -np.inf],
                                           [use_amp * 1.00000001, use_frq+1, +np.inf]))
        except ValueError:
            msgs.warn("Input data invalid for patern frequency fit of row {0:d}/{1:d}".format(ii+1, arr.shape[0]))
            continue
        except RuntimeError:
            msgs.warn("Pattern frequency fit failed for row {0:d}/{1:d}".format(ii+1, arr.shape[0]))
            continue
        frq_dist[ii] = popt[1]
    # Ignore masked values, and return the best estimate of the frequency
    ww = np.where(frq_dist > 0.0)
    medfrq = np.median(frq_dist[ww])
    return medfrq/(arr.shape[1]-1)
Exemple #30
0
def main(args):
    """ Executes 2d coadding
    """
    msgs.warn('PATH =' + os.getcwd())
    # Load the file
    if args.file is not None:
        spectrograph, config_lines, spec2d_files = read_coadd2d_file(args.file)
        # Parameters
        # TODO: Shouldn't this reinstantiate the same parameters used in
        # the PypeIt run that extracted the objects?  Why are we not
        # just passing the pypeit file?
        # JFH: The reason is that the coadd2dfile may want different reduction parameters
        spectrograph_def_par = spectrograph.default_pypeit_par()
        parset = par.PypeItPar.from_cfg_lines(cfg_lines=spectrograph_def_par.to_config(),
                                                 merge_with=config_lines)
    elif args.obj is not None:
        # TODO: We should probably be reading the pypeit file and using those parameters here rather than using the
        # default parset.
        # TODO: This needs to define the science path
        spec2d_files = glob.glob('./Science/spec2d_*' + args.obj + '*')
        head0 = fits.getheader(spec2d_files[0])
        spectrograph_name = head0['SPECTROG']
        spectrograph = load_spectrograph(spectrograph_name)
        parset = spectrograph.default_pypeit_par()
    else:
        msgs.error('You must either input a coadd2d file with --file or an object name with --obj')

    # Update with configuration specific parameters (which requires science file) and initialize spectrograph
    spectrograph_cfg_lines = spectrograph.config_specific_par(spec2d_files[0]).to_config()
    parset = par.PypeItPar.from_cfg_lines(cfg_lines=spectrograph_cfg_lines, merge_with=parset.to_config())

    # If detector was passed as an argument override whatever was in the coadd2d_file
    if args.det is not None:
        msgs.info("Restricting reductions to detector={}".format(args.det))
        parset['rdx']['detnum'] = int(args.det)

    # Get headers (if possible) and base names
    spec1d_files = [files.replace('spec2d', 'spec1d') for files in spec2d_files]
    head1d = None
    for spec1d_file in spec1d_files:
        if os.path.isfile(spec1d_file):
            head1d = fits.getheader(spec1d_file)
            break
    if head1d is None:
        msgs.warn("No 1D spectra so am generating a dummy header for output")
        head1d = io.initialize_header()

    head2d = fits.getheader(spec2d_files[0])
    if args.basename is None:
        filename = os.path.basename(spec2d_files[0])
        basename = filename.split('_')[2]
    else:
        basename = args.basename

    # Write the par to disk
    par_outfile = basename+'_coadd2d.par'
    print("Writing the parameters to {}".format(par_outfile))
    parset.to_config(par_outfile)

    # Now run the coadds

    skysub_mode = head2d['SKYSUB']
    ir_redux = True if 'DIFF' in skysub_mode else False

    # Print status message
    msgs_string = 'Reducing target {:s}'.format(basename) + msgs.newline()
    msgs_string += 'Performing coadd of frames reduce with {:s} imaging'.format(skysub_mode)
    msgs_string += msgs.newline() + 'Combining frames in 2d coadd:' + msgs.newline()
    for file in spec2d_files:
        msgs_string += '{0:s}'.format(os.path.basename(file)) + msgs.newline()
    msgs.info(msgs_string)

    # TODO: This needs to be added to the parameter list for rdx
    redux_path = os.getcwd()
    master_dirname = os.path.basename(head2d['PYPMFDIR']) + '_coadd'
    master_dir = os.path.join(redux_path, master_dirname)

    # Make the new Master dir
    if not os.path.isdir(master_dir):
        msgs.info('Creating directory for Master output: {0}'.format(master_dir))
        os.makedirs(master_dir)

    # Instantiate the sci_dict
    sci_dict = OrderedDict()  # This needs to be ordered
    sci_dict['meta'] = {}
    sci_dict['meta']['vel_corr'] = 0.
    sci_dict['meta']['ir_redux'] = ir_redux

    # Find the detectors to reduce
    detectors = PypeIt.select_detectors(detnum=parset['rdx']['detnum'], ndet=spectrograph.ndet)
    if len(detectors) != spectrograph.ndet:
        msgs.warn('Not reducing detectors: {0}'.format(' '.join([str(d) for d in
        set(np.arange(spectrograph.ndet) + 1) - set(detectors)])))

    # Loop on detectors
    for det in detectors:
        msgs.info("Working on detector {0}".format(det))
        sci_dict[det] = {}

        # Instantiate Coadd2d
        coadd = coadd2d.CoAdd2D.get_instance(spec2d_files, spectrograph, parset, det=det,
                                             offsets=parset['coadd2d']['offsets'],
                                             weights=parset['coadd2d']['weights'],
                                             ir_redux=ir_redux,
                                             debug_offsets=args.debug_offsets, debug=args.debug,
                                             samp_fact=args.samp_fact)

        # Coadd the slits
        coadd_dict_list = coadd.coadd(only_slits=None) # TODO implement only_slits later
        # Create the psuedo images
        psuedo_dict = coadd.create_psuedo_image(coadd_dict_list)
        # Reduce
        msgs.info('Running the extraction')
        sci_dict[det]['sciimg'], sci_dict[det]['sciivar'], sci_dict[det]['skymodel'], sci_dict[det]['objmodel'], \
        sci_dict[det]['ivarmodel'], sci_dict[det]['outmask'], sci_dict[det]['specobjs'] = coadd.reduce(
            psuedo_dict, show = args.show, show_peaks = args.peaks)
        # Save psuedo image master files
        coadd.save_masters(master_dir)

    # Make the new Science dir
    # TODO: This needs to be defined by the user
    scipath = os.path.join(redux_path, 'Science_coadd')
    if not os.path.isdir(scipath):
        msgs.info('Creating directory for Science output: {0}'.format(scipath))
        os.makedirs(scipath)

    # Save the results
    save.save_all(sci_dict, coadd.stack_dict['master_key_dict'], master_dir, spectrograph, head1d,
                  head2d, scipath, basename)#, binning=coadd.binning)
Exemple #31
0
def replace_columns(img, bad_cols, replace_with='mean', copy=False):
    """
    Replace bad image columns.

    Args:
        img (`numpy.ndarray`_):
            A 2D array with image values to replace.
        bad_cols (`numpy.ndarray`_):
            Boolean array selecting bad columns in `img`.  Must have the
            correct shape.
        replace_with (:obj:`str`, optional):
            Method to use for the replacements.  Can be 'mean' (see
            :func:`replace_column_mean`) or 'linear' (see
            :func:`replace_column_linear`).
        copy (:obj:`bool`, optional):
            Copy `img` to a new array before making any
            modifications.  Otherwise, `img` is modified in-place.

    Returns:
        `numpy.ndarray`_: The modified image, which is either a new
        array or points to the in-place modification of `img` according
        to the value of `copy`.
    """
    # Check
    if img.ndim != 2:
        msgs.error('Images must be 2D!')
    if bad_cols.size != img.shape[1]:
        msgs.error('Bad column array has incorrect length!')
    if np.all(bad_cols):
        msgs.error('All columns are bad!')

    _img = img.copy() if copy else img

    if np.sum(bad_cols) == 0:
        # No bad columns
        return _img

    # Find the starting/ending indices of adjacent bad columns
    borders = np.zeros(img.shape[1], dtype=int)
    borders[bad_cols] = 1
    borders = borders - np.roll(borders,1)
    if borders[0] == -1:
        borders[0] = 0

    # Get edge indices and deal with edge cases
    lindx = borders == 1
    ledges = np.where(lindx)[0] if np.any(lindx) else [0]
    rindx = borders == -1
    redges = np.where(rindx)[0] if np.any(rindx) else [img.shape[1]]
    if ledges[0] > redges[0]:
        ledges = np.append([0], ledges)
    if ledges[-1] > redges[-1]:
        redges = np.append(redges, [img.shape[1]])
    # If this is tripped, there's a coding error
    assert len(ledges) == len(redges), 'Problem in edge setup'

    # Replace the image values
    if replace_with == 'mean':
        for l,r in zip(ledges, redges):
            replace_column_mean(_img, l, r)
    elif replace_with == 'linear':
        for l,r in zip(ledges, redges):
            replace_column_linear(_img, l, r)
    else:
        msgs.error('Unknown replace_columns method.  Must be mean or linear.')
    return _img
Exemple #32
0
def read_coaddfile(ifile):
    """
    Read a PypeIt .coadd1d file, akin to a standard PypeIt file

    The top is a config block that sets ParSet parameters
      The spectrograph is required

    Args:
        ifile (str):
          Name of the flux file

    Returns:
        cfg_lines (list):
          Config lines to modify ParSet values
        spec1dfiles (list):
          Contains spec1dfiles to be coadded
        objids (list):
          Object ids aligned with each of the spec1dfiles


    """
    # Read in the pypeit reduction file
    msgs.info('Loading the coadd1d file')
    lines = par.util._read_pypeit_file_lines(ifile)
    is_config = np.ones(len(lines), dtype=bool)


    # Parse the fluxing block
    spec1dfiles = []
    objids_in = []
    s, e = par.util._find_pypeit_block(lines, 'coadd1d')
    if s >= 0 and e < 0:
        msgs.error("Missing 'coadd1d end' in {0}".format(ifile))
    elif (s < 0) or (s==e):
        msgs.error("Missing coadd1d block in in {0}. Check the input format for the .coadd1d file".format(ifile))
    else:
        for ctr, line in enumerate(lines[s:e]):
            prs = line.split(' ')
            spec1dfiles.append(prs[0])
            if ctr == 0 and len(prs) != 2:
                msgs.error('Invalid format for .coadd1d file.' + msgs.newline() +
                           'You must have specify a spec1dfile and objid on the first line of the coadd1d block')
            if len(prs) > 1:
                objids_in.append(prs[1])
        is_config[s-1:e+1] = False

    # Chck the sizes of the inputs
    nspec = len(spec1dfiles)
    if len(objids_in) == 1:
        objids = nspec*objids_in
    elif len(objids_in) == nspec:
        objids = objids_in
    else:
        msgs.error('Invalid format for .flux file.' + msgs.newline() +
                   'You must specify a single objid on the first line of the coadd1d block,' + msgs.newline() +
                   'or specify am objid for every spec1dfile in the coadd1d block.' + msgs.newline() +
                   'Run pypeit_coadd_1dspec --help for information on the format')
    # Construct config to get spectrograph
    cfg_lines = list(lines[is_config])

    # Return
    return cfg_lines, spec1dfiles, objids
def ech_load_specobj(fname,order=None):

    """ Load a spec1d file into a list of SpecObjExp objects
    Parameters
    ----------
    fname : str

    Returns
    -------
    specObjs : list of SpecObjExp
    head0
    """
    #if order is None:
    #    msgs.warn('You did not specify an order. Return specObjs with all orders.')
    #    specObjs, head0 = load.load_specobj(fname)
    #    return specObjs, head0

    speckeys = ['WAVE', 'SKY', 'MASK', 'FLAM', 'FLAM_IVAR', 'FLAM_SIG', 'COUNTS_IVAR', 'COUNTS']
    #
    specObjs = []
    hdulist = fits.open(fname)
    head0 = hdulist[0].header
    for hdu in hdulist:
        if hdu.name == 'PRIMARY':
            continue
        #elif hdu.name[8:17] != 'ORDER'+'{0:04}'.format(order):
        #    continue
        # Parse name
        idx = hdu.name
        objp = idx.split('-')
        if objp[-1][0:3] == 'DET':
            det = int(objp[-1][3:])
        else:
            det = int(objp[-1][1:])
        if objp[-2][:5] == 'ORDER':
            iord = int(objp[-2][5:])
        else:
            msgs.warn('Loading longslit data ?')
            iord = int(-1)
        # if order is not None and iord !=order then do not return this extenction
        # if order is None return all extensions
        # if order is not None and iord ==order then only return the specific order you want.
        if (order is not None) and (iord !=order):
            continue
        # Load data
        spec = Table(hdu.data)
        shape = (len(spec), 1024)  # 2nd number is dummy
        # New and wrong
        try:
            specobj = specobjs.SpecObj(shape, None, None, idx = idx)
        except:
            debugger.set_trace()
            msgs.error("BUG ME")
        # Add order number
        specobj.ech_orderindx = iord
        # ToDo: need to changed to the real order number?
        specobj.ech_order = iord
        # Add trace
        try:
            specobj.trace_spat = spec['TRACE']
        except:
            # KLUDGE!
            specobj.trace_spat = np.arange(len(spec['BOX_WAVE']))
        # Add spectrum
        if 'BOX_COUNTS' in spec.keys():
            for skey in speckeys:
                try:
                    specobj.boxcar[skey] = spec['BOX_{:s}'.format(skey)].data
                except KeyError:
                    pass
            # Add units on wave
            specobj.boxcar['WAVE'] = specobj.boxcar['WAVE'] * units.AA

        if 'OPT_COUNTS' in spec.keys():
            for skey in speckeys:
                try:
                    specobj.optimal[skey] = spec['OPT_{:s}'.format(skey)].data
                except KeyError:
                    pass
            # Add units on wave
            specobj.optimal['WAVE'] = specobj.optimal['WAVE'] * units.AA
        # Append
        specObjs.append(specobj)
    # Return
    return specObjs, head0
Exemple #34
0
    def get_meta_value(self, inp, meta_key, required=False, ignore_bad_header=False, usr_row=None):
        """
        Return meta data from a given file (or its array of headers)

        Args:
            inp (str or list):
              Input filename or headarr list
            meta_key: str or list of str
            headarr: list, optional
              List of headers
            required: bool, optional
              Require the meta key to be returnable
            ignore_bad_header: bool, optional
              Over-ride required;  not recommended
            usr_row: Row
              Provides user supplied frametype (and other things not used)

        Returns:
            value: value or list of values

        """
        if isinstance(inp, str):
            headarr = self.get_headarr(inp)
        else:
            headarr = inp

        # Loop?
        if isinstance(meta_key, list):
            values = []
            for mdict in meta_key:
                values.append(self.get_meta_value(headarr, mdict, required=required))
            #
            return values

        # Are we prepared to provide this meta data?
        if meta_key not in self.meta.keys():
            if required:
                msgs.error("Need to allow for meta_key={} in your meta data".format(meta_key))
            else:
                msgs.warn("Requested meta data does not exist...")
                return None
        # Is this not derivable?  If so, use the default
        #   or search for it as a compound method
        value = None
        if self.meta[meta_key]['card'] is None:
            if 'default' in self.meta[meta_key].keys():
                value = self.meta[meta_key]['default']
            elif 'compound' in self.meta[meta_key].keys():
                value = self.compound_meta(headarr, meta_key)
            else:
                msgs.error("Failed to load spectrograph value for meta: {}".format(meta_key))
        else:
            # Grab from the header, if we can
            try:
                value = headarr[self.meta[meta_key]['ext']][self.meta[meta_key]['card']]
            except (KeyError, TypeError):
                value = None

        if value is None:
            # Was this required?
            if required:
                kerror = True
                if not ignore_bad_header:
                    # Is this meta required for this frame type (Spectrograph specific)
                    if ('required_ftypes' in self.meta[meta_key]) and (usr_row is not None):
                        kerror = False
                        # Is it required?
                        for ftype in usr_row['frametype'].split(','):
                            if ftype in self.meta[meta_key]['required_ftypes']:
                                kerror = True
                    # Bomb out?
                    if kerror:
                        msgs.error('Required meta "{:s}" did not load!  You may have a corrupt header'.format(meta_key))
                else:
                    msgs.warn("Required card {:s} missing from your header.  Proceeding with risk..".format(
                        self.meta[meta_key]['card']))
            return None

        # Deal with dtype (DO THIS HERE OR IN METADATA?  I'M TORN)
        if self.meta_data_model[meta_key]['dtype'] == str:
            value = str(value).strip()
        elif self.meta_data_model[meta_key]['dtype'] == int:
            value = int(value)
        elif self.meta_data_model[meta_key]['dtype'] == float:
            value = float(value)
        elif self.meta_data_model[meta_key]['dtype'] == tuple:
            assert isinstance(value, tuple)
        else:
            debugger.set_trace()
        # Return
        return value
Exemple #35
0
def flexure_obj_oldbuggyversion(specobjs, maskslits, method, sky_spectrum, sky_file=None, mxshft=None):
    """Correct wavelengths for flexure, object by object

    Parameters:
    ----------
    method : str
      'boxcar' -- Recommneded
      'slitpix' --

    Returns:
    ----------
    flex_list: list
      list of dicts containing flexure results
        Aligned with specobjs
        Filled with a basically empty dict if the slit is skipped or there is no object

    """
    msgs.work("Consider doing 2 passes in flexure as in LowRedux")
    # Load Archive
#    skyspec_fil, arx_sky = flexure_archive(spectrograph=spectrograph, skyspec_fil=skyspec_fil)

    # Loop on objects
    flex_list = []

    gdslits = np.where(~maskslits)[0]
    for sl in range(len(specobjs)):
        # Reset
        flex_dict = dict(polyfit=[], shift=[], subpix=[], corr=[],
                         corr_cen=[], spec_file=sky_file, smooth=[],
                         arx_spec=[], sky_spec=[])
        if sl not in gdslits:
            flex_list.append(flex_dict.copy())
            continue
        msgs.info("Working on flexure in slit (if an object was detected): {:d}".format(sl))
        for specobj in specobjs[sl]:  # for convenience
            if specobj is None:
                continue

            # Using boxcar
            if method in ['boxcar', 'slitcen']:
                sky_wave = specobj.boxcar['WAVE'] #.to('AA').value
                sky_flux = specobj.boxcar['COUNTS_SKY']
            else:
                msgs.error("Not ready for this flexure method: {}".format(method))

            # Generate 1D spectrum for object
            obj_sky = xspectrum1d.XSpectrum1D.from_tuple((sky_wave, sky_flux))

            # Calculate the shift
            fdict = flex_shift(obj_sky, sky_spectrum, mxshft=mxshft)

            # Simple interpolation to apply
            npix = len(sky_wave)
            x = np.linspace(0., 1., npix)
            # Apply
            for attr in ['boxcar', 'optimal']:
                if not hasattr(specobj, attr):
                    continue
                if 'WAVE' in getattr(specobj, attr).keys():
                    msgs.info("Applying flexure correction to {0:s} extraction for object:".format(attr) +
                              msgs.newline() + "{0:s}".format(str(specobj)))
                    f = interpolate.interp1d(x, sky_wave, bounds_error=False, fill_value="extrapolate")
                    getattr(specobj, attr)['WAVE'] = f(x+fdict['shift']/(npix-1))*units.AA
            # Shift sky spec too
            cut_sky = fdict['sky_spec']
            x = np.linspace(0., 1., cut_sky.npix)
            f = interpolate.interp1d(x, cut_sky.wavelength.value, bounds_error=False, fill_value="extrapolate")
            twave = f(x + fdict['shift']/(cut_sky.npix-1))*units.AA
            new_sky = xspectrum1d.XSpectrum1D.from_tuple((twave, cut_sky.flux))

            # Update dict
            for key in ['polyfit', 'shift', 'subpix', 'corr', 'corr_cen', 'smooth', 'arx_spec']:
                flex_dict[key].append(fdict[key])
            flex_dict['sky_spec'].append(new_sky)
        flex_list.append(flex_dict.copy())
    return flex_list
Exemple #36
0
    def get_rawimage(self, raw_file, det):
        """
        Read raw images and generate a few other bits and pieces
        that are key for image processing.

        Parameters
        ----------
        raw_file : :obj:`str`
            File to read
        det : :obj:`int`
            1-indexed detector to read

        Returns
        -------
        detector_par : :class:`pypeit.images.detector_container.DetectorContainer`
            Detector metadata parameters.
        raw_img : `numpy.ndarray`_
            Raw image for this detector.
        hdu : `astropy.io.fits.HDUList`_
            Opened fits file
        exptime : :obj:`float`
            Exposure time read from the file header
        rawdatasec_img : `numpy.ndarray`_
            Data (Science) section of the detector as provided by setting the
            (1-indexed) number of the amplifier used to read each detector
            pixel. Pixels unassociated with any amplifier are set to 0.
        oscansec_img : `numpy.ndarray`_
            Overscan section of the detector as provided by setting the
            (1-indexed) number of the amplifier used to read each detector
            pixel. Pixels unassociated with any amplifier are set to 0.
        """
        # Check for file; allow for extra .gz, etc. suffix
        fil = glob.glob(raw_file + '*')
        if len(fil) != 1:
            msgs.error("Found {:d} files matching {:s}".format(len(fil)))

        # Read FITS image
        msgs.info("Reading MMT Blue Channel file: {:s}".format(fil[0]))
        hdu = fits.open(fil[0])
        hdr = hdu[0].header

        # we're flipping FITS x/y to pypeit y/x here. pypeit wants blue on the
        # bottom, slit bottom on the right...
        rawdata = np.fliplr(hdu[0].data.astype(float).transpose())

        exptime = hdr['EXPTIME']

        # TODO Store these parameters in the DetectorPar.
        # Number of amplifiers
        detector_par = self.get_detector_par(hdu, det if det is None else 1)
        numamp = detector_par['numamplifiers']

        # First read over the header info to determine the size of the output array...
        datasec = hdr['DATASEC']
        xdata1, xdata2, ydata1, ydata2 = np.array(
            parse.load_sections(datasec, fmt_iraf=False)).flatten()

        # Get the overscan section
        biassec = hdr['BIASSEC']
        xbias1, xbias2, ybias1, ybias2 = np.array(
            parse.load_sections(biassec, fmt_iraf=False)).flatten()

        # allocate output arrays and fill in with mask values
        rawdatasec_img = np.zeros_like(rawdata, dtype=int)
        oscansec_img = np.zeros_like(rawdata, dtype=int)

        # trim bad sections at beginning of data and bias sections
        rawdatasec_img[xdata1 + 2:xdata2, ydata1:ydata2 - 1] = 1
        oscansec_img[xbias1 + 2:xbias2, ybias1:ybias2 - 1] = 1

        return detector_par, rawdata, hdu, exptime, rawdatasec_img, oscansec_img
Exemple #37
0
    def build_waveimg(self,
                      tilts,
                      slits,
                      spat_flexure=None,
                      spec_flexure=None):
        """
        Main algorithm to build the wavelength image

        Only applied to good slits, which means any non-flagged or flagged
         in the exclude_for_reducing list

        Args:
            tilts (`numpy.ndarray`_):
                Image holding tilts
            slits (:class:`pypeit.slittrace.SlitTraceSet`):
            spat_flexure (float, optional):
                Spatial flexure correction in pixels.
            spec_flexure (float, `numpy.ndarray`_, optional):
                Spectral flexure correction in pixels. If a float,
                the same spectral flexure correction will be applied
                to all slits. If a numpy array, the length of the
                array should be the same as the number of slits. The
                value of each element is the spectral shift in pixels
                to be applied to each slit.

        Returns:
            `numpy.ndarray`_: The wavelength image.
        """
        # Check spatial flexure type
        if (spat_flexure
                is not None) and (not isinstance(spat_flexure, float)):
            msgs.error("Spatial flexure must be None or float")
        # Check spectral flexure type
        if spec_flexure is None: spec_flex = np.zeros(slits.nslits)
        elif isinstance(spec_flexure, float):
            spec_flex = spec_flexure * np.ones(slits.nslits)
        elif isinstance(spec_flexure, np.ndarray):
            spec_flex = spec_flexure.copy()
            assert (spec_flexure.size == slits.nslits)
        spec_flex /= (slits.nspec - 1)

        # Setup
        #ok_slits = slits.mask == 0
        bpm = slits.mask.astype(bool)
        bpm &= np.logical_not(
            slits.bitmask.flagged(slits.mask,
                                  flag=slits.bitmask.exclude_for_reducing))
        ok_slits = np.logical_not(bpm)
        #
        image = np.zeros_like(tilts)
        slitmask = slits.slit_img(
            flexure=spat_flexure,
            exclude_flag=slits.bitmask.exclude_for_reducing)

        # If this is echelle print out a status message and do some error checking
        if self.par['echelle']:
            msgs.info('Evaluating 2-d wavelength solution for echelle....')
            # TODO UPDATE THIS!!
            #if len(wv_calib['fit2d']['orders']) != np.sum(ok_slits):
            #    msgs.error('wv_calib and ok_slits do not line up. Something is very wrong!')

        # Unpack some 2-d fit parameters if this is echelle
        for islit in np.where(ok_slits)[0]:
            slit_spat = slits.spat_id[islit]
            thismask = (slitmask == slit_spat)
            if not np.any(thismask):
                msgs.error("Something failed in wavelengths or masking..")
            if self.par['echelle']:
                # # TODO: Put this in `SlitTraceSet`?
                # evaluate solution --
                image[thismask] = self.wv_fit2d.eval(
                    tilts[thismask] + spec_flex[islit],
                    x2=np.full_like(tilts[thismask], slits.ech_order[islit]))
                image[thismask] /= slits.ech_order[islit]
            else:
                iwv_fits = self.wv_fits[islit]
                image[thismask] = iwv_fits.pypeitfit.eval(tilts[thismask] +
                                                          spec_flex[islit])
        # Return
        return image
Exemple #38
0
def flexure_obj(specobjs, maskslits, method, sky_file, mxshft=None):
    """Correct wavelengths for flexure, object by object

    Parameters:
    ----------
    method : str
      'boxcar' -- Recommneded
      'slitpix' --
    sky_file: str

    Returns:
    ----------
    flex_list: list
      list of dicts containing flexure results
        Aligned with specobjs
        Filled with a basically empty dict if the slit is skipped or there is no object

    """
    sv_fdict = None
    msgs.work("Consider doing 2 passes in flexure as in LowRedux")
    # Load Archive
    sky_spectrum = load_sky_spectrum(sky_file)

    nslits = len(maskslits)
    gdslits = np.where(~maskslits)[0]

    # Loop on objects
    flex_list = []

    # Slit/objects to come back to
    return_later_sobjs = []

    # Loop over slits, and then over objects here
    for slit in range(nslits):
        msgs.info("Working on flexure in slit (if an object was detected): {:d}".format(slit))
        indx = specobjs.slitid == slit
        this_specobjs = specobjs[indx]
        # Reset
        flex_dict = dict(polyfit=[], shift=[], subpix=[], corr=[],
                         corr_cen=[], spec_file=sky_file, smooth=[],
                         arx_spec=[], sky_spec=[])
        # If no objects on this slit append an empty dictionary
        if slit not in gdslits:
            flex_list.append(flex_dict.copy())
            continue
        for ss, specobj in enumerate(this_specobjs):
            if specobj is None:
                continue
            msgs.info("Working on flexure for object # {:d}".format(specobj.objid) + "in slit # {:d}".format(specobj.slitid))
            # Using boxcar
            if method in ['boxcar', 'slitcen']:
                sky_wave = specobj.boxcar['WAVE'] #.to('AA').value
                sky_flux = specobj.boxcar['COUNTS_SKY']
            else:
                msgs.error("Not ready for this flexure method: {}".format(method))

            # Generate 1D spectrum for object
            obj_sky = xspectrum1d.XSpectrum1D.from_tuple((sky_wave, sky_flux))

            # Calculate the shift
            fdict = flex_shift(obj_sky, sky_spectrum, mxshft=mxshft)
            punt = False
            if fdict is None:
                msgs.warn("Flexure shift calculation failed for this spectrum.")
                if sv_fdict is not None:
                    msgs.warn("Will used saved estimate from a previous slit/object")
                    fdict = copy.deepcopy(sv_fdict)
                else:
                    # One does not exist yet
                    # Save it for later
                    return_later_sobjs.append([slit, ss])
                    punt = True
            else:
                sv_fdict = copy.deepcopy(fdict)

            # Punt?
            if punt:
                break

            # Interpolate
            new_sky = specobj.flexure_interp(sky_wave, fdict)
            # Update dict
            for key in ['polyfit', 'shift', 'subpix', 'corr', 'corr_cen', 'smooth', 'arx_spec']:
                flex_dict[key].append(fdict[key])
            flex_dict['sky_spec'].append(new_sky)

        flex_list.append(flex_dict.copy())

        # Do we need to go back?
        for items in return_later_sobjs:
            if sv_fdict is None:
                msgs.info("No flexure corrections could be made")
                break
            # Setup
            slit, ss = items
            flex_dict = flex_list[slit]
            specobj = specobjs[ss]
            sky_wave = specobj.boxcar['WAVE'] #.to('AA').value
            # Copy me
            fdict = copy.deepcopy(sv_fdict)
            # Interpolate
            new_sky = specobj.flexure_interp(sky_wave, fdict)
            # Update dict
            for key in ['polyfit', 'shift', 'subpix', 'corr', 'corr_cen', 'smooth', 'arx_spec']:
                flex_dict[key].append(fdict[key])
            flex_dict['sky_spec'].append(new_sky)

    return flex_list
def ech_coadd(files,objids=None,extract='OPT',flux=True,giantcoadd=False,
              wave_grid_method='velocity', niter=5,wave_grid_min=None, wave_grid_max=None,v_pix=None,
              scale_method='auto', do_offset=False, sigrej_final=3.,do_var_corr=False,
              qafile=None, outfile=None,do_cr=True, debug=False,**kwargs):

    nfile = len(files)
    if nfile <=1:
        msgs.info('Only one spectrum exits coadding...')
        return

    fname = files[0]
    ext_final = fits.getheader(fname, -1)
    norder = ext_final['ORDER'] + 1
    msgs.info('spectrum {:s} has {:d} orders'.format(fname, norder))
    if norder <= 1:
        msgs.error('The number of orders have to be greater than one for echelle. Longslit data?')

    if giantcoadd:
        msgs.info('Coadding all orders and exposures at once')
        spectra = ech_load_spec(files, objid=objids,order=None, extract=extract, flux=flux)
        wave_grid = np.zeros((2,spectra.nspec))
        for i in range(spectra.nspec):
            wave_grid[0, i] = spectra[i].wvmin.value
            wave_grid[1, i] = spectra[i].wvmax.value
        ech_kwargs = {'echelle': True, 'wave_grid_min': np.min(wave_grid), 'wave_grid_max': np.max(wave_grid),
                      'v_pix': v_pix}
        kwargs.update(ech_kwargs)
        # Coadding
        spec1d = coadd.coadd_spectra(spectra, wave_grid_method=wave_grid_method, niter=niter,
                                          scale_method=scale_method, do_offset=do_offset, sigrej_final=sigrej_final,
                                          do_var_corr=do_var_corr, qafile=qafile, outfile=outfile,
                                          do_cr=do_cr, debug=debug,**kwargs)
    else:
        msgs.info('Coadding individual orders first and then merge order')
        spectra_list = []
        # Keywords for Table
        rsp_kwargs = {}
        rsp_kwargs['wave_tag'] = '{:s}_WAVE'.format(extract)
        rsp_kwargs['flux_tag'] = '{:s}_FLAM'.format(extract)
        rsp_kwargs['sig_tag'] = '{:s}_FLAM_SIG'.format(extract)
        wave_grid = np.zeros((2,norder))
        for iord in range(norder):
            spectra = ech_load_spec(files, objid=objids, order=iord, extract=extract, flux=flux)
            ech_kwargs = {'echelle': False, 'wave_grid_min': spectra.wvmin.value, 'wave_grid_max': spectra.wvmax.value, 'v_pix': v_pix}
            wave_grid[0,iord] = spectra.wvmin.value
            wave_grid[1,iord] = spectra.wvmax.value
            kwargs.update(ech_kwargs)
            # Coadding the individual orders
            if qafile is not None:
                qafile_iord = qafile+'_%s'%str(iord)
            else:
                qafile_iord =  None
            spec1d_iord = coadd.coadd_spectra(spectra, wave_grid_method=wave_grid_method, niter=niter,
                                       scale_method=scale_method, do_offset=do_offset, sigrej_final=sigrej_final,
                                       do_var_corr=do_var_corr, qafile=qafile_iord, outfile=outfile,
                                       do_cr=do_cr, debug=debug, **kwargs)
            spectrum = spec_from_array(spec1d_iord.wavelength, spec1d_iord.flux, spec1d_iord.sig,**rsp_kwargs)
            spectra_list.append(spectrum)
        # Join into one XSpectrum1D object
        spectra_coadd = collate(spectra_list)
        kwargs['echelle'] = True
        kwargs['wave_grid_min'] = np.min(wave_grid)
        kwargs['wave_grid_max'] = np.max(wave_grid)
        # ToDo: Currently I'm not using the first order due to some problem. Need to add it back after fix the problem.
        spec1d = coadd.coadd_spectra(spectra_coadd[1:], wave_grid_method=wave_grid_method, niter=niter,
                                          scale_method=scale_method, do_offset=do_offset, sigrej_final=sigrej_final,
                                          do_var_corr=do_var_corr, qafile=qafile, outfile=outfile,
                                          do_cr=do_cr, debug=debug, **kwargs)

    return spec1d