コード例 #1
0
ファイル: pypeit.py プロジェクト: catherinemanea/PypeIt
    def msgs_reset(self):
        """
        Reset the msgs object
        """

        # Reset the global logger
        msgs.reset(log=self.logname, verbosity=self.verbosity)
        msgs.pypeit_file = self.pypeit_file
コード例 #2
0
def main(args):

    import subprocess

    from astropy.io import fits

    from pypeit import msgs
    from pypeit.spectrographs import keck_lris
    from pypeit.spectrographs import keck_deimos
    from pypeit.spectrographs import gemini_gmos
    from pypeit import msgs
    from pypeit import ginga

    # List only?
    if args.list:
        hdu = fits.open(args.file)
        print(hdu.info())
        return


    # Setup for PYPIT imports
    msgs.reset(verbosity=2)

    # RAW_LRIS??
    if 'keck_lris' in args.spectrograph:
        #
        if args.spectrograph == 'keck_lris_red_orig':
            gen_lris = keck_lris.KeckLRISROrigSpectrograph()
            img = gen_lris.get_rawimage(args.file, 1)[1]
        else:
            gen_lris = keck_lris.KeckLRISRSpectrograph()  # Using LRISr, but this will work for LRISb too
            img = gen_lris.get_rawimage(args.file,  None)[1]
    # RAW_DEIMOS??
    elif args.spectrograph == 'keck_deimos':
        #
        gen_deimos = keck_deimos.KeckDEIMOSSpectrograph()
        img = gen_deimos.get_rawimage(args.file, None)[1]
    # RAW_GEMINI??
    elif 'gemini_gmos' in args.spectrograph:
        # TODO this routine should show the whole mosaic if no detector number is passed in!
        # Need to figure out the number of amps
        gen_gmos = gemini_gmos.GeminiGMOSSpectrograph()
        img = gen_gmos.get_rawimage(args.file, args.det)[1]
    else:
        hdu = fits.open(args.file)
        img = hdu[args.exten].data
        # Write

    ginga.show_image(img)
コード例 #3
0
ファイル: view_fits.py プロジェクト: seib2/PypeIt
def main(args):

    import subprocess

    from astropy.io import fits

    from pypeit import msgs
    from pypeit.spectrographs import keck_lris
    from pypeit.spectrographs import keck_deimos
    from pypeit.spectrographs import gemini_gmos
    from pypeit import msgs
    from pypeit import ginga

    # List only?
    if args.list:
        hdu = fits.open(args.file)
        print(hdu.info())
        return


    # Setup for PYPIT imports
    msgs.reset(verbosity=2)

    # RAW_LRIS??
    if args.raw_lris:
        # 
        img, _, _ = keck_lris.read_lris(args.file)
    # RAW_DEIMOS??
    elif args.raw_deimos:
        #
        img, _, _ = keck_deimos.read_deimos(args.file)
    # RAW_GEMINI??
    elif args.raw_gmos:
        # TODO this routine should show the whole mosaic if no detector number is passed in!
        # Need to figure out the number of amps
        img, _, _ = gemini_gmos.read_gmos(args.file, det=args.det)
    else:
        hdu = fits.open(args.file)
        img = hdu[args.exten].data
        # Write

    ginga.show_image(img)
コード例 #4
0
ファイル: arcid_plot.py プロジェクト: catherinemanea/PypeIt
def main(args):
    """
    Parameters
    ----------
    args

    Returns
    -------

    """
    import numpy as np

    try:
        from xastropy.xutils import xdebug as debugger
    except:
        import pdb as debugger

    from linetools.utils import loadjson

    from pypeit import arqa
    from pypeit import msgs
    msgs.reset(verbosity=2)

    # Read JSON
    fdict = loadjson(args.wave_soln)
    for key in fdict.keys():
        if isinstance(fdict[key], list):
            fdict[key] = np.array(fdict[key])

    # Generate QA
    arqa.arc_fit_qa(None,
                    fdict,
                    outfil=args.outfile,
                    ids_only=True,
                    title=args.title)
    print("Wrote {:s}".format(args.outfile))
コード例 #5
0
ファイル: coadd_datacube.py プロジェクト: ninoc/PypeIt
def coadd_cube(files, parset, overwrite=False):
    """ Main routine to coadd spec2D files into a 3D datacube

    Args:
        files (list):
            List of all spec2D files
        parset (:class:`pypeit.par.core.PypeItPar`):
            An instance of the parameter set.
        overwrite (bool):
            Overwrite the output file, if it exists?
    """
    # Get the detector number
    det = 1 if parset is None else parset['rdx']['detnum']

    # Load the spectrograph
    spec2DObj = spec2dobj.Spec2DObj.from_file(files[0], det)
    specname = spec2DObj.head0['PYP_SPEC']
    spec = load_spectrograph(specname)

    # Grab the parset, if not provided
    if parset is None: parset = spec.default_pypeit_par()
    cubepar = parset['reduce']['cube']

    # Check the output file
    outfile = cubepar['output_filename'] if ".fits" in cubepar[
        'output_filename'] else cubepar['output_filename'] + ".fits"
    out_whitelight = outfile.replace(".fits", "_whitelight.fits")
    if os.path.exists(outfile) and not overwrite:
        msgs.error("Output filename already exists:" + msgs.newline() +
                   outfile)
    elif os.path.exists(
            out_whitelight) and cubepar['save_whitelight'] and not overwrite:
        msgs.error("Output filename already exists:" + msgs.newline() +
                   out_whitelight)
    # Check the reference cube and image exist, if requested
    ref_scale = None  # This will be used to correct relative scaling among the various input frames
    if cubepar['standard_cube'] is not None:
        if not os.path.exists(cubepar['standard_cube']):
            msgs.error("Standard cube does not exist:" + msgs.newline() +
                       cubepar['reference_cube'])
        cube = fits.open(cubepar['standard_cube'])
        ref_scale = cube['REFSCALE'].data
    if cubepar['reference_image'] is not None:
        if not os.path.exists(cubepar['reference_image']):
            msgs.error("Reference cube does not exist:" + msgs.newline() +
                       cubepar['reference_image'])
    if cubepar['flux_calibrate']:
        msgs.error("Flux calibration is not currently implemented" +
                   msgs.newline() + "Please set 'flux_calibrate = False'")

    # prep
    numfiles = len(files)
    combine = cubepar['combine']

    all_ra, all_dec, all_wave = np.array([]), np.array([]), np.array([])
    all_sci, all_ivar, all_idx, all_wghts = np.array([]), np.array(
        []), np.array([]), np.array([])
    all_wcs = []
    dspat = None if cubepar['spatial_delta'] is None else cubepar[
        'spatial_delta'] / 3600.0  # binning size on the sky (/3600 to convert to degrees)
    dwv = cubepar[
        'wave_delta']  # binning size in wavelength direction (in Angstroms)
    wave_ref = None
    whitelight_img = None  # This is the whitelight image based on all input spec2d frames
    weights = np.ones(numfiles)  # Weights to use when combining cubes
    for ff, fil in enumerate(files):
        # Load it up
        spec2DObj = spec2dobj.Spec2DObj.from_file(fil, det)
        detector = spec2DObj.detector

        # Setup for PypeIt imports
        msgs.reset(verbosity=2)

        if ref_scale is None:
            ref_scale = spec2DObj.scaleimg.copy()
        # Extract the information
        sciimg = (spec2DObj.sciimg - spec2DObj.skymodel) * (
            ref_scale / spec2DObj.scaleimg
        )  # Subtract sky and apply relative sky
        ivar = spec2DObj.ivarraw / (ref_scale / spec2DObj.scaleimg)**2
        waveimg = spec2DObj.waveimg
        bpmmask = spec2DObj.bpmmask

        # Grab the slit edges
        slits = spec2DObj.slits

        wave0 = waveimg[waveimg != 0.0].min()
        diff = waveimg[1:, :] - waveimg[:-1, :]
        dwv = float(np.median(diff[diff != 0.0]))
        msgs.info(
            "Using wavelength solution: wave0={0:.3f}, dispersion={1:.3f} Angstrom/pixel"
            .format(wave0, dwv))

        msgs.info("Constructing slit image")
        slitid_img_init = slits.slit_img(pad=0,
                                         initial=True,
                                         flexure=spec2DObj.sci_spat_flexure)
        onslit_gpm = (slitid_img_init > 0) & (bpmmask == 0)

        # Grab the WCS of this frame
        wcs = spec.get_wcs(spec2DObj.head0, slits, detector.platescale, wave0,
                           dwv)
        all_wcs.append(copy.deepcopy(wcs))

        # Find the largest spatial scale of all images being combined
        # TODO :: probably need to put this in the DetectorContainer
        pxscl = detector.platescale * parse.parse_binning(
            detector.binning)[1] / 3600.0  # This should be degrees/pixel
        slscl = spec.get_meta_value([spec2DObj.head0], 'slitwid')
        if dspat is None:
            dspat = max(pxscl, slscl)
        elif max(pxscl, slscl) > dspat:
            dspat = max(pxscl, slscl)

        # Generate an RA/DEC image
        msgs.info("Generating RA/DEC image")
        raimg, decimg, minmax = slits.get_radec_image(
            wcs, initial=True, flexure=spec2DObj.sci_spat_flexure)

        # Perform the DAR correction
        if wave_ref is None:
            wave_ref = 0.5 * (np.min(waveimg[onslit_gpm]) +
                              np.max(waveimg[onslit_gpm]))
        # Get DAR parameters
        raval = spec.get_meta_value([spec2DObj.head0], 'ra')
        decval = spec.get_meta_value([spec2DObj.head0], 'dec')
        obstime = spec.get_meta_value([spec2DObj.head0], 'obstime')
        pressure = spec.get_meta_value([spec2DObj.head0], 'pressure')
        temperature = spec.get_meta_value([spec2DObj.head0], 'temperature')
        rel_humidity = spec.get_meta_value([spec2DObj.head0], 'humidity')
        coord = SkyCoord(raval, decval, unit=(units.deg, units.deg))
        location = spec.location  # TODO :: spec.location should probably end up in the TelescopePar (spec.telescope.location)
        ra_corr, dec_corr = dc_utils.dar_correction(waveimg[onslit_gpm],
                                                    coord,
                                                    obstime,
                                                    location,
                                                    pressure,
                                                    temperature,
                                                    rel_humidity,
                                                    wave_ref=wave_ref)
        raimg[onslit_gpm] += ra_corr
        decimg[onslit_gpm] += dec_corr

        # Get copies of arrays to be saved
        wave_ext = waveimg[onslit_gpm].copy()
        flux_ext = sciimg[onslit_gpm].copy()
        ivar_ext = ivar[onslit_gpm].copy()

        # Perform extinction correction
        msgs.info("Applying extinction correction")
        longitude = spec.telescope['longitude']
        latitude = spec.telescope['latitude']
        airmass = spec2DObj.head0[spec.meta['airmass']['card']]
        extinct = load_extinction_data(longitude, latitude)
        # extinction_correction requires the wavelength is sorted
        wvsrt = np.argsort(wave_ext)
        ext_corr = extinction_correction(wave_ext[wvsrt] * units.AA, airmass,
                                         extinct)
        # Correct for extinction
        flux_sav = flux_ext[wvsrt] * ext_corr
        ivar_sav = ivar_ext[wvsrt] / ext_corr**2
        # sort back to the original ordering
        resrt = np.argsort(wvsrt)

        # Calculate the weights relative to the zeroth cube
        if ff != 0:
            weights[ff] = np.median(flux_sav[resrt] *
                                    np.sqrt(ivar_sav[resrt]))**2

        # Store the information
        numpix = raimg[onslit_gpm].size
        all_ra = np.append(all_ra, raimg[onslit_gpm].copy())
        all_dec = np.append(all_dec, decimg[onslit_gpm].copy())
        all_wave = np.append(all_wave, wave_ext.copy())
        all_sci = np.append(all_sci, flux_sav[resrt].copy())
        all_ivar = np.append(all_ivar, ivar_sav[resrt].copy())
        all_idx = np.append(all_idx, ff * np.ones(numpix))
        all_wghts = np.append(all_wghts, weights[ff] * np.ones(numpix))

    # Grab cos(dec) for convenience
    cosdec = np.cos(np.mean(all_dec) * np.pi / 180.0)

    # Register spatial offsets between all frames if several frames are being combined
    if combine:

        # Check if a reference whitelight image should be used to register the offsets
        if cubepar["reference_image"] is None:
            # Generate white light images
            whitelight_imgs, _, _ = dc_utils.make_whitelight(
                all_ra, all_dec, all_wave, all_sci, all_wghts, all_idx, dspat)
            # ref_idx will be the index of the cube with the highest S/N
            ref_idx = np.argmax(weights)
            reference_image = whitelight_imgs[:, :, ref_idx].copy()
            msgs.info(
                "Calculating spatial translation of each cube relative to cube #{0:d})"
                .format(ref_idx + 1))
        else:
            ref_idx = -1  # Don't use an index
            # Load reference information
            reference_image, whitelight_imgs, wlwcs = \
                dc_utils.make_whitelight_fromref(all_ra, all_dec, all_wave, all_sci, all_wghts, all_idx, dspat,
                                                 cubepar['reference_image'])
            msgs.info(
                "Calculating the spatial translation of each cube relative to user-defined 'reference_image'"
            )
        # Calculate the image offsets - check the reference is a zero shift
        ra_shift_ref, dec_shift_ref = calculate_image_offset(
            reference_image.copy(), reference_image.copy())
        for ff in range(numfiles):
            # Don't correlate the reference image with itself
            if ff == ref_idx:
                continue
            # Calculate the shift
            ra_shift, dec_shift = calculate_image_offset(
                whitelight_imgs[:, :, ff], reference_image.copy())
            # Convert to reference
            ra_shift -= ra_shift_ref
            dec_shift -= dec_shift_ref
            # Convert pixel shift to degress shift
            ra_shift *= dspat / cosdec
            dec_shift *= dspat
            msgs.info(
                "Spatial shift of cube #{0:d}: RA, DEC (arcsec) = {1:+0.3f}, {2:+0.3f}"
                .format(ff + 1, ra_shift * 3600.0, dec_shift * 3600.0))
            # Apply the shift
            all_ra[all_idx == ff] += ra_shift
            all_dec[all_idx == ff] += dec_shift

        # Generate a white light image of *all* data
        msgs.info("Generating global white light image")
        if cubepar["reference_image"] is None:
            whitelight_img, _, wlwcs = dc_utils.make_whitelight(
                all_ra, all_dec, all_wave, all_sci, all_wghts,
                np.zeros(all_ra.size), dspat)
        else:
            _, whitelight_img, wlwcs = \
                dc_utils.make_whitelight_fromref(all_ra, all_dec, all_wave, all_sci, all_wghts, np.zeros(all_ra.size),
                                                 dspat, cubepar['reference_image'])

        # Calculate the relative spectral weights of all pixels
        all_wghts = dc_utils.compute_weights(
            all_ra,
            all_dec,
            all_wave,
            all_sci,
            all_ivar,
            all_idx,
            whitelight_img[:, :, 0],
            dspat,
            dwv,
            relative_weights=cubepar['relative_weights'])
    # Check if a whitelight image should be saved
    if cubepar['save_whitelight']:
        # Check if the white light image still needs to be generated - if so, generate it now
        if whitelight_img is None:
            msgs.info("Generating global white light image")
            if cubepar["reference_image"] is None:
                whitelight_img, _, wlwcs = dc_utils.make_whitelight(
                    all_ra, all_dec, all_wave, all_sci, all_wghts,
                    np.zeros(all_ra.size), dspat)
            else:
                _, whitelight_img, wlwcs = \
                    dc_utils.make_whitelight_fromref(all_ra, all_dec, all_wave, all_sci, all_wghts,
                                                     np.zeros(all_ra.size),
                                                     dspat, cubepar['reference_image'])
        # Prepare and save the fits file
        msgs.info("Saving white light image as: {0:s}".format(out_whitelight))
        img_hdu = fits.PrimaryHDU(whitelight_img.T, header=wlwcs.to_header())
        img_hdu.writeto(out_whitelight, overwrite=overwrite)

    # Setup the cube ranges
    ra_min = cubepar['ra_min'] if cubepar['ra_min'] is not None else np.min(
        all_ra)
    ra_max = cubepar['ra_max'] if cubepar['ra_max'] is not None else np.max(
        all_ra)
    dec_min = cubepar['dec_min'] if cubepar['dec_min'] is not None else np.min(
        all_dec)
    dec_max = cubepar['dec_max'] if cubepar['dec_max'] is not None else np.max(
        all_dec)
    wav_min = cubepar['wave_min'] if cubepar[
        'wave_min'] is not None else np.min(all_wave)
    wav_max = cubepar['wave_max'] if cubepar[
        'wave_max'] is not None else np.max(all_wave)
    if cubepar['wave_delta'] is not None: dwv = cubepar['wave_delta']
    # Generate a master WCS to register all frames
    coord_min = [ra_min, dec_min, wav_min]
    coord_dlt = [dspat, dspat, dwv]
    masterwcs = dc_utils.generate_masterWCS(coord_min,
                                            coord_dlt,
                                            name=specname)
    msgs.info(msgs.newline() + "-" * 40 + msgs.newline() +
              "Parameters of the WCS:" + msgs.newline() +
              "RA   min, max = {0:f}, {1:f}".format(ra_min, ra_max) +
              msgs.newline() +
              "DEC  min, max = {0:f}, {1:f}".format(dec_min, dec_max) +
              msgs.newline() +
              "WAVE min, max = {0:f}, {1:f}".format(wav_min, wav_max) +
              msgs.newline() + "Spaxel size = {0:f}''".format(3600.0 * dspat) +
              msgs.newline() + "Wavelength step = {0:f} A".format(dwv) +
              msgs.newline() + "-" * 40)

    # Generate the output binning
    if combine:
        numra = int((ra_max - ra_min) * cosdec / dspat)
        numdec = int((dec_max - dec_min) / dspat)
        numwav = int((wav_max - wav_min) / dwv)
        xbins = np.arange(1 + numra) - 0.5
        ybins = np.arange(1 + numdec) - 0.5
        spec_bins = np.arange(1 + numwav) - 0.5
    else:
        slitlength = int(
            np.round(
                np.median(slits.get_slitlengths(initial=True, median=True))))
        numwav = int((np.max(waveimg) - wave0) / dwv)
        xbins, ybins, spec_bins = spec.get_datacube_bins(
            slitlength, minmax, numwav)

    # Make the cube
    msgs.info("Generating pixel coordinates")
    if combine:
        pix_coord = masterwcs.wcs_world2pix(all_ra, all_dec,
                                            all_wave * 1.0E-10, 0)
        hdr = masterwcs.to_header()
    else:
        pix_coord = wcs.wcs_world2pix(
            np.vstack((all_ra, all_dec, all_wave * 1.0E-10)).T, 0)
        hdr = wcs.to_header()

    # Find the NGP coordinates for all input pixels
    msgs.info("Generating data cube")
    bins = (xbins, ybins, spec_bins)
    datacube, edges = np.histogramdd(pix_coord,
                                     bins=bins,
                                     weights=all_sci * all_wghts)
    norm, edges = np.histogramdd(pix_coord, bins=bins, weights=all_wghts)
    norm_cube = (norm > 0) / (norm + (norm == 0))
    datacube *= norm_cube
    # Create the variance cube, including weights
    msgs.info("Generating variance cube")
    all_var = (all_ivar > 0) / (all_ivar + (all_ivar == 0))
    var_cube, edges = np.histogramdd(pix_coord,
                                     bins=bins,
                                     weights=all_var * all_wghts**2)
    var_cube *= norm_cube**2

    # Save the datacube
    debug = False
    if debug:
        datacube_resid, edges = np.histogramdd(pix_coord,
                                               bins=(xbins, ybins, spec_bins),
                                               weights=all_sci *
                                               np.sqrt(all_ivar))
        norm, edges = np.histogramdd(pix_coord, bins=(xbins, ybins, spec_bins))
        norm_cube = (norm > 0) / (norm + (norm == 0))
        outfile = "datacube_resid.fits"
        msgs.info("Saving datacube as: {0:s}".format(outfile))
        hdu = fits.PrimaryHDU((datacube_resid * norm_cube).T,
                              header=masterwcs.to_header())
        hdu.writeto(outfile, overwrite=overwrite)

    msgs.info("Saving datacube as: {0:s}".format(outfile))
    final_cube = dc_utils.DataCube(datacube.T,
                                   var_cube.T,
                                   specname,
                                   refscale=ref_scale,
                                   fluxed=cubepar['flux_calibrate'])
    final_cube.to_file(outfile, hdr=hdr, overwrite=overwrite)
コード例 #6
0
def main(args):

    import subprocess

    from astropy.io import fits

    from pypeit import msgs
    from pypeit.spectrographs import keck_lris
    from pypeit.spectrographs import keck_deimos
    from pypeit.spectrographs import gemini_gmos
    from pypeit.display import display
    from pypeit.spectrographs import mmt_binospec
    from pypeit.spectrographs import mmt_mmirs
    from pypeit.spectrographs import mmt_bluechannel
    from pypeit.spectrographs import util
    from pypeit import msgs
    from pypeit import io

    # List only?
    if args.list:
        hdu = io.fits_open(args.file)
        print(hdu.info())
        return

    # Setup for PYPIT imports
    msgs.reset(verbosity=2)

    # RAW_LRIS??
    if 'keck_lris' in args.spectrograph:
        #
        if args.spectrograph == 'keck_lris_red_orig':
            gen_lris = keck_lris.KeckLRISROrigSpectrograph()
            img = gen_lris.get_rawimage(args.file, 1)[1]
        else:
            gen_lris = keck_lris.KeckLRISRSpectrograph(
            )  # Using LRISr, but this will work for LRISb too
            img = gen_lris.get_rawimage(args.file, None)[1]
    # RAW_DEIMOS??
    elif args.spectrograph == 'keck_deimos':
        #
        gen_deimos = keck_deimos.KeckDEIMOSSpectrograph()
        img = gen_deimos.get_rawimage(args.file, None)[1]
    # RAW_GEMINI??
    elif 'gemini_gmos' in args.spectrograph:
        # TODO this routine should show the whole mosaic if no detector number is passed in!
        # Need to figure out the number of amps
        spectrograph = util.load_spectrograph(args.spectrograph)
        img = spectrograph.get_rawimage(args.file, args.det)[1]
    # RAW_BinoSpec
    elif args.spectrograph == 'mmt_binospec':
        #
        gen_bino = mmt_binospec.MMTBINOSPECSpectrograph()
        img = gen_bino.get_rawimage(args.file, args.det)[1]
    # RAW_MMIRS
    elif args.spectrograph == 'mmt_mmirs':
        gen_mmirs = mmt_mmirs.MMTMMIRSSpectrograph()
        img = gen_mmirs.get_rawimage(args.file, args.det)[1]
    # RAW MMT blue channel
    elif args.spectrograph == 'mmt_bluechannel':
        gen_bluechan = mmt_bluechannel.MMTBlueChannelSpectrograph()
        img = gen_bluechan.get_rawimage(args.file, args.det)[1]
    else:
        hdu = io.fits_open(args.file)
        img = hdu[args.exten].data
        # Write

    display.show_image(img, chname=args.chname)
コード例 #7
0
ファイル: coadd_datacube.py プロジェクト: p-holguin/PypeIt
def coadd_cube(files, det=1, overwrite=False):
    """ Main routine to coadd spec2D files

    Args:
        files (list):
            List of all spec2D files
        det (int):
            detector
        overwrite (bool):
            Overwrite the output file, if it exists?
    """
    outfile = "datacube.fits"
    if os.path.exists(outfile) and not overwrite:
        msgs.error("Output filename already exists:" + msgs.newline() +
                   outfile)
    # prep
    numfiles = len(files)
    combine = True if numfiles > 1 else False

    all_ra, all_dec, all_wave = np.array([]), np.array([]), np.array([])
    all_sci, all_ivar, all_idx, all_wghts = np.array([]), np.array(
        []), np.array([]), np.array([])
    all_wcs = []
    dspat = None  # binning size on the sky (in arcsec)
    ref_scale = None  # This will be used to correct relative scaling among the various input frames
    wave_ref = None
    weights = np.ones(numfiles)  # Weights to use when combining cubes
    for ff, fil in enumerate(files):
        # Load it up
        spec2DObj = spec2dobj.Spec2DObj.from_file(fil, det)

        # Load the spectrograph
        specname = spec2DObj.head0['SPECTROG']
        spec = load_spectrograph(specname)
        detector = spec2DObj.detector

        # Setup for PypeIt imports
        msgs.reset(verbosity=2)

        if ref_scale is None:
            ref_scale = spec2DObj.scaleimg.copy()
        # Extract the information
        sciimg = (spec2DObj.sciimg - spec2DObj.skymodel) * (
            ref_scale / spec2DObj.scaleimg
        )  # Subtract sky and apply relative sky
        ivar = spec2DObj.ivarraw * (ref_scale / spec2DObj.scaleimg)**2
        waveimg = spec2DObj.waveimg
        bpmmask = spec2DObj.bpmmask

        # Grab the slit edges
        slits = spec2DObj.slits

        wave0 = waveimg[waveimg != 0.0].min()
        diff = waveimg[1:, :] - waveimg[:-1, :]
        dwv = float(np.median(diff[diff != 0.0]))
        msgs.info(
            "Using wavelength solution: wave0={0:.3f}, dispersion={1:.3f} Angstrom/pixel"
            .format(wave0, dwv))

        msgs.info("Constructing slit image")
        slitid_img_init = slits.slit_img(pad=0,
                                         initial=True,
                                         flexure=spec2DObj.sci_spat_flexure)
        onslit_gpm = (slitid_img_init > 0) & (bpmmask == 0)

        # Grab the WCS of this frame
        wcs = spec.get_wcs(spec2DObj.head0, slits, detector.platescale, wave0,
                           dwv)
        all_wcs.append(copy.deepcopy(wcs))

        # Find the largest spatial scale of all images being combined
        # TODO :: probably need to put this in the DetectorContainer
        pxscl = detector.platescale * parse.parse_binning(
            detector.binning)[1] / 3600.0  # This should be degrees/pixel
        slscl = spec.get_meta_value([spec2DObj.head0], 'slitwid')
        if dspat is None:
            dspat = max(pxscl, slscl)
        elif max(pxscl, slscl) > dspat:
            dspat = max(pxscl, slscl)

        # Generate an RA/DEC image
        msgs.info("Generating RA/DEC image")
        raimg, decimg, minmax = slits.get_radec_image(
            wcs, initial=True, flexure=spec2DObj.sci_spat_flexure)

        # Perform the DAR correction
        if wave_ref is None:
            wave_ref = 0.5 * (np.min(waveimg[onslit_gpm]) +
                              np.max(waveimg[onslit_gpm]))
        # Get DAR parameters
        raval = spec.get_meta_value([spec2DObj.head0], 'ra')
        decval = spec.get_meta_value([spec2DObj.head0], 'dec')
        obstime = spec.get_meta_value([spec2DObj.head0], 'obstime')
        pressure = spec.get_meta_value([spec2DObj.head0], 'pressure')
        temperature = spec.get_meta_value([spec2DObj.head0], 'temperature')
        rel_humidity = spec.get_meta_value([spec2DObj.head0], 'humidity')
        coord = SkyCoord(raval, decval, unit=(units.deg, units.deg))
        location = spec.location  # TODO :: spec.location should probably end up in the TelescopePar (spec.telescope.location)
        ra_corr, dec_corr = dc_utils.dar_correction(waveimg[onslit_gpm],
                                                    coord,
                                                    obstime,
                                                    location,
                                                    pressure,
                                                    temperature,
                                                    rel_humidity,
                                                    wave_ref=wave_ref)
        raimg[onslit_gpm] += ra_corr
        decimg[onslit_gpm] += dec_corr

        # Get copies of arrays to be saved
        wave_ext = waveimg[onslit_gpm].copy()
        flux_ext = sciimg[onslit_gpm].copy()
        ivar_ext = ivar[onslit_gpm].copy()

        # Perform extinction correction
        msgs.info("Applying extinction correction")
        longitude = spec.telescope['longitude']
        latitude = spec.telescope['latitude']
        airmass = spec2DObj.head0[spec.meta['airmass']['card']]
        extinct = load_extinction_data(longitude, latitude)
        # extinction_correction requires the wavelength is sorted
        wvsrt = np.argsort(wave_ext)
        ext_corr = extinction_correction(wave_ext[wvsrt] * units.AA, airmass,
                                         extinct)
        # Correct for extinction
        flux_sav = flux_ext[wvsrt] * ext_corr
        ivar_sav = ivar_ext[wvsrt] / ext_corr**2
        # sort back to the original ordering
        resrt = np.argsort(wvsrt)

        # Calculate the weights relative to the zeroth cube
        if ff != 0:
            weights[ff] = np.median(flux_sav[resrt] *
                                    np.sqrt(ivar_sav[resrt]))**2

        # Store the information
        numpix = raimg[onslit_gpm].size
        all_ra = np.append(all_ra, raimg[onslit_gpm].copy())
        all_dec = np.append(all_dec, decimg[onslit_gpm].copy())
        all_wave = np.append(all_wave, wave_ext.copy())
        all_sci = np.append(all_sci, flux_sav[resrt].copy())
        all_ivar = np.append(all_ivar, ivar_sav[resrt].copy())
        all_idx = np.append(all_idx, ff * np.ones(numpix))
        all_wghts = np.append(all_wghts, weights[ff] * np.ones(numpix))

    # Grab cos(dec) for convenience
    cosdec = np.cos(np.mean(all_dec) * np.pi / 180.0)

    # Register spatial offsets between all frames if several frames are being combined
    if combine:
        # Generate white light images
        whitelight_imgs, _ = dc_utils.make_whitelight(all_ra,
                                                      all_dec,
                                                      all_wave,
                                                      all_sci,
                                                      all_wghts,
                                                      all_idx,
                                                      dspat,
                                                      numfiles=numfiles)

        # ref_idx will be the index of the cube with the highest S/N
        ref_idx = np.argmax(weights)
        msgs.info(
            "Calculating the relative spatial translation of each cube (reference cube = {0:d})"
            .format(ref_idx + 1))
        # Calculate the image offsets - check the reference is a zero shift
        ra_shift_ref, dec_shift_ref = calculate_image_offset(
            whitelight_imgs[:, :, ref_idx], whitelight_imgs[:, :, ref_idx])
        for ff in range(numfiles):
            # Don't correlate the reference image with itself
            if ff == ref_idx:
                continue
            # Calculate the shift
            ra_shift, dec_shift = calculate_image_offset(
                whitelight_imgs[:, :, ff], whitelight_imgs[:, :, ref_idx])
            # Convert to reference
            ra_shift -= ra_shift_ref
            dec_shift -= dec_shift_ref
            # Convert pixel shift to degress shift
            ra_shift *= dspat / cosdec
            dec_shift *= dspat
            msgs.info(
                "Image shift of cube {0:d}: RA, DEC (arcsec) = {1:+0.3f}, {2:+0.3f}"
                .format(ff + 1, ra_shift * 3600.0, dec_shift * 3600.0))
            # Apply the shift
            all_ra[all_idx == ff] += ra_shift
            all_dec[all_idx == ff] += dec_shift

        # Calculate the relative spectral weights of all pixels
        all_wghts = dc_utils.compute_weights(all_ra,
                                             all_dec,
                                             all_wave,
                                             all_sci,
                                             all_ivar,
                                             all_wghts,
                                             all_idx,
                                             dspat,
                                             dwv,
                                             numfiles=numfiles)

    # Generate a master WCS to register all frames
    coord_min = [np.min(all_ra), np.min(all_dec), np.min(all_wave)]
    coord_dlt = [dspat, dspat, dwv]
    masterwcs = dc_utils.generate_masterWCS(coord_min, coord_dlt)

    # Generate the output binning
    if combine:
        numra = int((np.max(all_ra) - np.min(all_ra)) * cosdec / dspat)
        numdec = int((np.max(all_dec) - np.min(all_dec)) / dspat)
        numwav = int((np.max(all_wave) - np.min(all_wave)) / dwv)
        xbins = np.arange(1 + numra) - 0.5
        ybins = np.arange(1 + numdec) - 0.5
        spec_bins = np.arange(1 + numwav) - 0.5
    else:
        slitlength = int(
            np.round(
                np.median(slits.get_slitlengths(initial=True, median=True))))
        numwav = int((np.max(waveimg) - wave0) / dwv)
        xbins, ybins, spec_bins = spec.get_datacube_bins(
            slitlength, minmax, numwav)

    # Make the cube
    msgs.info("Generating pixel coordinates")
    if combine:
        pix_coord = masterwcs.wcs_world2pix(all_ra, all_dec,
                                            all_wave * 1.0E-10, 0)
        hdr = masterwcs.to_header()
    else:
        pix_coord = wcs.wcs_world2pix(
            np.vstack((all_ra, all_dec, all_wave * 1.0E-10)).T, 0)
        hdr = wcs.to_header()

    # Find the NGP coordinates for all input pixels
    msgs.info("Generating data cube")
    bins = (xbins, ybins, spec_bins)
    datacube, edges = np.histogramdd(pix_coord,
                                     bins=bins,
                                     weights=all_sci * all_wghts)
    norm, edges = np.histogramdd(pix_coord, bins=bins, weights=all_wghts)
    norm_cube = (norm > 0) / (norm + (norm == 0))
    datacube *= norm_cube
    # Create the variance cube, including weights
    msgs.info("Generating variance cube")
    all_var = (all_ivar > 0) / (all_ivar + (all_ivar == 0))
    var_cube, edges = np.histogramdd(pix_coord,
                                     bins=bins,
                                     weights=all_var * all_wghts**2)
    var_cube *= norm_cube**2

    # Save the datacube
    debug = False
    if debug:
        datacube_resid, edges = np.histogramdd(pix_coord,
                                               bins=(xbins, ybins, spec_bins),
                                               weights=all_sci *
                                               np.sqrt(all_ivar))
        norm, edges = np.histogramdd(pix_coord, bins=(xbins, ybins, spec_bins))
        norm_cube = (norm > 0) / (norm + (norm == 0))
        outfile = "datacube_resid.fits"
        msgs.info("Saving datacube as: {0:s}".format(outfile))
        hdu = fits.PrimaryHDU((datacube_resid * norm_cube).T,
                              header=masterwcs.to_header())
        hdu.writeto(outfile, overwrite=overwrite)

    msgs.info("Saving datacube as: {0:s}".format(outfile))
    primary_hdu = fits.PrimaryHDU(header=spec2DObj.head0)
    sci_hdu = fits.ImageHDU(datacube.T, name="scicube", header=hdr)
    var_hdu = fits.ImageHDU(var_cube.T, name="varcube", header=hdr)
    hdulist = fits.HDUList([primary_hdu, sci_hdu, var_hdu])
    hdulist.writeto(outfile, overwrite=overwrite)
コード例 #8
0
    def main(args):

        # List only?
        if args.list:
            io.fits_open(args.file).info()
            return

        # Parse the detector name
        try:
            det = int(args.det)
        except:
            detname = args.det
        else:
            detname = DetectorContainer.get_name(det)

        # Load it up -- NOTE WE ALLOW *OLD* VERSIONS TO GO FORTH
        spec2DObj = spec2dobj.Spec2DObj.from_file(args.file,
                                                  detname,
                                                  chk_version=False)
        # Use the appropriate class to get the "detector" number
        det = spec2DObj.detector.parse_name(detname)

        # Setup for PypeIt imports
        msgs.reset(verbosity=args.verbosity)

        # Find the set of channels to show
        if args.channels is not None:
            show_channels = [int(item) for item in args.channels.split(',')]
        else:
            show_channels = [0, 1, 2, 3]

        # Grab the slit edges
        slits = spec2DObj.slits
        if spec2DObj.sci_spat_flexure is not None:
            msgs.info("Offseting slits by {}".format(
                spec2DObj.sci_spat_flexure))
        all_left, all_right, mask = slits.select_edges(
            flexure=spec2DObj.sci_spat_flexure)
        # TODO -- This may be too restrictive, i.e. ignore BADFLTCALIB??
        gpm = mask == 0
        left = all_left[:, gpm]
        right = all_right[:, gpm]
        slid_IDs = spec2DObj.slits.slitord_id[gpm]
        maskdef_id = None if spec2DObj.slits.maskdef_id is None \
                            else spec2DObj.slits.maskdef_id[gpm]
        bitMask = ImageBitMask()

        # Object traces from spec1d file
        spec1d_file = args.file.replace('spec2d', 'spec1d')
        if args.file[-2:] == 'gz':
            spec1d_file = spec1d_file[:-3]
        if os.path.isfile(spec1d_file):
            sobjs = specobjs.SpecObjs.from_fitsfile(spec1d_file,
                                                    chk_version=False)
        else:
            sobjs = None
            msgs.warn('Could not find spec1d file: {:s}'.format(spec1d_file) +
                      msgs.newline() +
                      '                          No objects were extracted.')

        display.connect_to_ginga(raise_err=True, allow_new=True)

        # Now show each image to a separate channel

        # Show the bitmask?
        mask_in = None
        if args.showmask:
            viewer, ch_mask = display.show_image(spec2DObj.bpmmask,
                                                 chname="BPM",
                                                 waveimg=spec2DObj.waveimg,
                                                 clear=args.clear)

        channel_names = []
        # SCIIMG
        if 0 in show_channels:
            image = spec2DObj.sciimg  # Processed science image
            mean, med, sigma = sigma_clipped_stats(
                image[spec2DObj.bpmmask == 0],
                sigma_lower=5.0,
                sigma_upper=5.0)
            cut_min = mean - 1.0 * sigma
            cut_max = mean + 4.0 * sigma
            chname_sci = args.prefix + f'sciimg-{detname}'
            # Clear all channels at the beginning
            viewer, ch_sci = display.show_image(image,
                                                chname=chname_sci,
                                                waveimg=spec2DObj.waveimg,
                                                clear=args.clear,
                                                cuts=(cut_min, cut_max))

            if sobjs is not None:
                show_trace(sobjs, detname, viewer, ch_sci)
            display.show_slits(viewer,
                               ch_sci,
                               left,
                               right,
                               slit_ids=slid_IDs,
                               maskdef_ids=maskdef_id)
            channel_names.append(chname_sci)

        # SKYSUB
        if 1 in show_channels:
            if args.ignore_extract_mask:
                # TODO -- Is there a cleaner way to do this?
                gpm = (spec2DObj.bpmmask == 0) | (spec2DObj.bpmmask == 2**
                                                  bitMask.bits['EXTRACT'])
            else:
                gpm = spec2DObj.bpmmask == 0

            image = (spec2DObj.sciimg - spec2DObj.skymodel) * gpm
            mean, med, sigma = sigma_clipped_stats(
                image[spec2DObj.bpmmask == 0],
                sigma_lower=5.0,
                sigma_upper=5.0)
            cut_min = mean - 1.0 * sigma
            cut_max = mean + 4.0 * sigma
            chname_skysub = args.prefix + f'skysub-{detname}'
            viewer, ch_skysub = display.show_image(image,
                                                   chname=chname_skysub,
                                                   waveimg=spec2DObj.waveimg,
                                                   bitmask=bitMask,
                                                   mask=mask_in,
                                                   cuts=(cut_min, cut_max),
                                                   wcs_match=True)
            if not args.removetrace and sobjs is not None:
                show_trace(sobjs, detname, viewer, ch_skysub)
            display.show_slits(viewer,
                               ch_skysub,
                               left,
                               right,
                               slit_ids=slid_IDs,
                               maskdef_ids=maskdef_id)
            channel_names.append(chname_skysub)

        # TODO Place holder for putting in sensfunc
        #if args.sensfunc:
        #    # Load the sensitivity function
        #    wave_sens, sfunc, _, _, _ = sensfunc.SensFunc.load(sensfunc_masterframe_name)
        #    # Interpolate the sensitivity function onto the wavelength grid of the data. Since the image is rectified
        #    # this is trivial and we don't need to do a 2d interpolation
        #    sens_factor = flux_calib.get_sensfunc_factor(
        #        pseudo_dict['wave_mid'][:,islit], wave_sens, sfunc, fits.getheader(files[0])['TRUITIME'],
        #        extrap_sens=parset['fluxcalib']['extrap_sens'])
        #    # Compute the median sensitivity and set the sensitivity to zero at locations 100 times the median. This
        #    # prevents the 2d image from blowing up where the sens_factor explodes because there is no throughput
        #    sens_gpm = sens_factor < 100.0*np.median(sens_factor)
        #    sens_factor_masked = sens_factor*sens_gpm
        #    sens_factor_img = np.repeat(sens_factor_masked[:, np.newaxis], pseudo_dict['nspat'], axis=1)
        #    imgminsky = sens_factor_img*pseudo_dict['imgminsky']
        #    imgminsky_gpm = sens_gpm[:, np.newaxis] & pseudo_dict['inmask']
        #else:
        #    imgminsky= pseudo_dict['imgminsky']

        # SKRESIDS
        if 2 in show_channels:
            # the block below is repeated because if showing this channel but
            # not channel 1 it will crash
            if args.ignore_extract_mask:
                # TODO -- Is there a cleaner way to do this?
                gpm = (spec2DObj.bpmmask == 0) | (spec2DObj.bpmmask == 2**
                                                  bitMask.bits['EXTRACT'])
            else:
                gpm = spec2DObj.bpmmask == 0
            chname_skyresids = args.prefix + f'sky_resid-{detname}'
            image = (spec2DObj.sciimg - spec2DObj.skymodel) * np.sqrt(
                spec2DObj.ivarmodel) * gpm
            viewer, ch_sky_resids = display.show_image(
                image,
                chname_skyresids,
                waveimg=spec2DObj.waveimg,
                cuts=(-5.0, 5.0),
                bitmask=bitMask,
                mask=mask_in)
            if not args.removetrace and sobjs is not None:
                show_trace(sobjs, detname, viewer, ch_sky_resids)
            display.show_slits(viewer,
                               ch_sky_resids,
                               left,
                               right,
                               slit_ids=slid_IDs,
                               maskdef_ids=maskdef_id)
            channel_names.append(chname_skyresids)

        # RESIDS
        if 3 in show_channels:
            chname_resids = args.prefix + f'resid-{detname}'
            # full model residual map
            image = (spec2DObj.sciimg - spec2DObj.skymodel - spec2DObj.objmodel) \
                        * np.sqrt(spec2DObj.ivarmodel) * (spec2DObj.bpmmask == 0)
            viewer, ch_resids = display.show_image(image,
                                                   chname=chname_resids,
                                                   waveimg=spec2DObj.waveimg,
                                                   cuts=(-5.0, 5.0),
                                                   bitmask=bitMask,
                                                   mask=mask_in,
                                                   wcs_match=True)
            if not args.removetrace and sobjs is not None:
                show_trace(sobjs, detname, viewer, ch_resids)
            display.show_slits(viewer,
                               ch_resids,
                               left,
                               right,
                               slit_ids=slid_IDs,
                               maskdef_ids=maskdef_id)
            channel_names.append(chname_resids)

        # After displaying all the images sync up the images with WCS_MATCH
        shell = viewer.shell()
        shell.start_global_plugin('WCSMatch')
        shell.call_global_plugin_method('WCSMatch', 'set_reference_channel',
                                        [channel_names[-1]], {})

        if args.embed:
            embed()
コード例 #9
0
ファイル: view_fits.py プロジェクト: tbowers7/PypeIt
    def main(args):

        from pypeit import msgs
        from pypeit.display import display
        from pypeit.spectrographs import util
        from pypeit import io
        from pypeit.images import buildimage

        # List only?
        if args.list:
            hdu = io.fits_open(args.file)
            print(hdu.info())
            return

        # Setup for PYPIT imports
        msgs.reset(verbosity=2)

        if args.proc and args.exten is not None:
            msgs.error(
                'You cannot specify --proc and --exten, since --exten shows the raw image'
            )
#        if args.proc and args.det == 'mosaic':
#            msgs.error('You cannot specify --proc and --det mosaic, since --mosaic can only '
#                       'display the raw image mosaic')
        if args.exten is not None and args.det == 'mosaic':
            msgs.error(
                'You cannot specify --exten and --det mosaic, since --mosaic displays '
                'multiple extensions by definition')

        if args.exten is not None:
            hdu = io.fits_open(args.file)
            img = hdu[args.exten].data
            hdu.close()
        else:
            spectrograph = util.load_spectrograph(args.spectrograph)
            bad_read_message = 'Unable to construct image due to a read or image processing ' \
                               'error.  Use case interpreted from command-line inputs requires ' \
                               'a raw image, not an output image product from pypeit.  To show ' \
                               'a pypeit output image, specify the extension using --exten.  ' \
                               'Use --list to show the extension names.'
            if 'mosaic' in args.det:
                mosaic = True
                _det = spectrograph.default_mosaic
                if _det is None:
                    msgs.error(
                        f'{args.spectrograph} does not have a known mosaic')
            else:
                try:
                    _det = tuple(int(d) for d in args.det)
                except:
                    msgs.error(f'Could not convert detector input to integer.')
                mosaic = len(_det) > 1
                if not mosaic:
                    _det = _det[0]

            if args.proc:
                # Use the biasframe processing parameters because processing
                # these frames is independent of any other frames (ie., does not
                # perform bias subtraction or flat-fielding)
                par = spectrograph.default_pypeit_par(
                )['calibrations']['biasframe']
                try:
                    Img = buildimage.buildimage_fromlist(spectrograph,
                                                         _det,
                                                         par, [args.file],
                                                         mosaic=mosaic)
                except Exception as e:
                    msgs.error(
                        bad_read_message +
                        f'  Original exception -- {type(e).__name__}: {str(e)}'
                    )

                if args.bkg_file is not None:
                    try:
                        bkgImg = buildimage.buildimage_fromlist(
                            spectrograph,
                            _det,
                            par, [args.bkg_file],
                            mosaic=mosaic)
                    except Exception as e:
                        msgs.error(
                            bad_read_message +
                            f'  Original exception -- {type(e).__name__}: {str(e)}'
                        )
                    Img = Img.sub(bkgImg, par['process'])

                img = Img.image

            else:
                try:
                    img = spectrograph.get_rawimage(args.file, _det)[1]
                except Exception as e:
                    msgs.error(
                        bad_read_message +
                        f'  Original exception -- {type(e).__name__}: {str(e)}'
                    )

        display.connect_to_ginga(raise_err=True, allow_new=True)
        display.show_image(img, chname=args.chname)