Example #1
0
def main(args):

    # Build the fitstable since we currently need it for output. This should not be the case!
    A_files = [os.path.join(args.full_rawpath, file) for file in args.Afiles]
    B_files = [os.path.join(args.full_rawpath, file) for file in args.Bfiles]
    data_files = A_files + B_files
    ps = pypeitsetup.PypeItSetup(A_files,
                                 path='./',
                                 spectrograph_name='keck_mosfire')
    ps.build_fitstbl()
    fitstbl = ps.fitstbl

    # Read in the spectrograph, config the parset
    spectrograph = load_spectrograph('keck_mosfire')
    spectrograph_def_par = spectrograph.default_pypeit_par()
    parset = par.PypeItPar.from_cfg_lines(
        cfg_lines=spectrograph_def_par.to_config(),
        merge_with=config_lines(args))
    science_path = os.path.join(parset['rdx']['redux_path'],
                                parset['rdx']['scidir'])

    # Calibration Master directory
    if args.master_dir is None:
        msgs.error(
            "You need to set an Environmental variable MOSFIRE_MASTERS that points at the Master Calibs"
        )

    # Define some hard wired master files here to be later parsed out of the directory
    slit_masterframe_name = os.path.join(args.master_dir,
                                         'MasterSlits_E_15_01.fits.gz')
    tilts_masterframe_name = os.path.join(args.master_dir,
                                          'MasterTilts_E_1_01.fits')
    wvcalib_masterframe_name = os.path.join(args.master_dir,
                                            'MasterWaveCalib_E_1_01.fits')
    # For now don't require a standard
    std_outfile = None
    #std_outfile = os.path.join('/Users/joe/Dropbox/PypeIt_Redux/MOSFIRE/Nov19/quicklook/Science/',
    #                           'spec1d_m191118_0064-GD71_MOSFIRE_2019Nov18T104704.507.fits')
    # make the get_std from pypeit a utility function or class method
    det = 1  # MOSFIRE has a single detector
    if std_outfile is not None:
        # Get the standard trace if need be
        sobjs = specobjs.SpecObjs.from_fitsfile(std_outfile)
        this_det = sobjs.DET == det
        if np.any(this_det):
            sobjs_det = sobjs[this_det]
            sobjs_std = sobjs_det.get_std()
            std_trace = None if sobjs_std is None else sobjs_std.TRACE_SPAT.flatten(
            )
        else:
            std_trace = None
    else:
        std_trace = None

    # Read in the msbpm
    sdet = get_dnum(det, prefix=False)
    msbpm = spectrograph.bpm(A_files[0], det)
    # Read in the slits
    slits = slittrace.SlitTraceSet.from_file(slit_masterframe_name)
    # Reset the bitmask
    slits.mask = slits.mask_init.copy()
    # Read in the wv_calib
    wv_calib = wavecalib.WaveCalib.from_file(wvcalib_masterframe_name)
    wv_calib.is_synced(slits)
    slits.mask_wvcalib(wv_calib)
    # Read in the tilts
    tilts_obj = wavetilts.WaveTilts.from_file(tilts_masterframe_name)
    tilts_obj.is_synced(slits)
    slits.mask_wavetilts(tilts_obj)

    # Build Science image
    sciImg = buildimage.buildimage_fromlist(spectrograph,
                                            det,
                                            parset['scienceframe'],
                                            A_files,
                                            bpm=msbpm,
                                            slits=slits,
                                            ignore_saturation=False)

    # Background Image?
    sciImg = sciImg.sub(
        buildimage.buildimage_fromlist(spectrograph,
                                       det,
                                       parset['scienceframe'],
                                       B_files,
                                       bpm=msbpm,
                                       slits=slits,
                                       ignore_saturation=False),
        parset['scienceframe']['process'])
    # Build the Calibrate object
    caliBrate = calibrations.Calibrations(None, parset['calibrations'],
                                          spectrograph, None)
    caliBrate.slits = slits
    caliBrate.wavetilts = tilts_obj
    caliBrate.wv_calib = wv_calib

    # Instantiate Reduce object
    # Required for pypeline specific object
    # At instantiaton, the fullmask in self.sciImg is modified
    redux = reduce.Reduce.get_instance(sciImg,
                                       spectrograph,
                                       parset,
                                       caliBrate,
                                       'science',
                                       ir_redux=True,
                                       show=args.show,
                                       det=det,
                                       std_outfile=std_outfile)

    manual_extract_dict = None
    skymodel, objmodel, ivarmodel, outmask, sobjs, waveImg, tilts = redux.run(
        std_trace=std_trace,
        return_negative=True,
        manual_extract_dict=manual_extract_dict,
        show_peaks=args.show)

    # TODO -- Do this upstream
    # Tack on detector
    for sobj in sobjs:
        sobj.DETECTOR = sciImg.detector

    # Construct the Spec2DObj with the positive image
    spec2DObj_A = spec2dobj.Spec2DObj(det=det,
                                      sciimg=sciImg.image,
                                      ivarraw=sciImg.ivar,
                                      skymodel=skymodel,
                                      objmodel=objmodel,
                                      ivarmodel=ivarmodel,
                                      waveimg=waveImg,
                                      bpmmask=outmask,
                                      detector=sciImg.detector,
                                      sci_spat_flexure=sciImg.spat_flexure,
                                      tilts=tilts,
                                      slits=copy.deepcopy(caliBrate.slits))
    spec2DObj_A.process_steps = sciImg.process_steps
    all_spec2d = spec2dobj.AllSpec2DObj()
    all_spec2d['meta']['ir_redux'] = True
    all_spec2d[det] = spec2DObj_A
    # Save image A but with all the objects extracted, i.e. positive and negative
    #outfile2d, outfile1d = save_exposure(fitstbl, 0, spectrograph, science_path, parset, caliBrate, all_spec2d, sobjs)

    # Construct the Spec2DObj with the negative image
    spec2DObj_B = spec2dobj.Spec2DObj(det=det,
                                      sciimg=-sciImg.image,
                                      ivarraw=sciImg.ivar,
                                      skymodel=-skymodel,
                                      objmodel=-objmodel,
                                      ivarmodel=ivarmodel,
                                      waveimg=waveImg,
                                      bpmmask=outmask,
                                      detector=sciImg.detector,
                                      sci_spat_flexure=sciImg.spat_flexure,
                                      tilts=tilts,
                                      slits=copy.deepcopy(caliBrate.slits))

    # Parse the offset information out of the headers. TODO in the future get this out of fitstable
    dither_pattern_A, dither_id_A, offset_arcsec_A = parse_dither_pattern(
        A_files, spectrograph.primary_hdrext)
    dither_pattern_B, dither_id_B, offset_arcsec_B = parse_dither_pattern(
        B_files, spectrograph.primary_hdrext)
    # Print out a report on the offsets
    msg_string = msgs.newline(
    ) + '****************************************************'
    msg_string += msgs.newline(
    ) + ' Summary of offsets for dither pattern:   {:s}'.format(
        dither_pattern_A[0])
    msg_string += msgs.newline(
    ) + '****************************************************'
    msg_string += msgs.newline(
    ) + 'Position     filename         arcsec    pixels    '
    msg_string += msgs.newline(
    ) + '----------------------------------------------------'
    for iexp, file in enumerate(A_files):
        msg_string += msgs.newline(
        ) + '    A    {:s}   {:6.2f}    {:6.2f}'.format(
            os.path.basename(file), offset_arcsec_A[iexp],
            offset_arcsec_A[iexp] / sciImg.detector.platescale)
    for iexp, file in enumerate(B_files):
        msg_string += msgs.newline(
        ) + '    B    {:s}   {:6.2f}    {:6.2f}'.format(
            os.path.basename(file), offset_arcsec_B[iexp],
            offset_arcsec_B[iexp] / sciImg.detector.platescale)
    msg_string += msgs.newline(
    ) + '****************************************************'
    msgs.info(msg_string)

    #offset_dith_pix = offset_dith_pix = offset_arcsec_A[0]/sciImg.detector.platescale
    offsets_dith_pix = (np.array([
        0.0, np.mean(offset_arcsec_B) - np.mean(offset_arcsec_A)
    ])) / sciImg.detector.platescale
    if args.offset is not None:
        offsets_pixels = np.array([0.0, args.offset])
        msgs.info('Using user specified offsets instead: {:5.2f}'.format(
            args.offset))
    else:
        offsets_pixels = offsets_dith_pix

    spec2d_list = [spec2DObj_A, spec2DObj_B]
    # Instantiate Coadd2d
    coadd = coadd2d.CoAdd2D.get_instance(spec2d_list,
                                         spectrograph,
                                         parset,
                                         det=det,
                                         offsets=offsets_pixels,
                                         weights='uniform',
                                         ir_redux=True,
                                         debug=args.show,
                                         samp_fact=args.samp_fact)
    # Coadd the slits
    coadd_dict_list = coadd.coadd(
        only_slits=None, interp_dspat=False)  # TODO implement only_slits later
    # Create the pseudo images
    pseudo_dict = coadd.create_pseudo_image(coadd_dict_list)

    ##########################
    # Now display the images #
    ##########################
    display.display.connect_to_ginga(raise_err=True, allow_new=True)
    # Bug in ginga prevents me from using cuts here for some reason
    #mean, med, sigma = sigma_clipped_stats(pseudo_dict['imgminsky'][pseudo_dict['inmask']], sigma_lower=5.0,sigma_upper=5.0)
    #cut_min = mean - 4.0 * sigma
    #cut_max = mean + 4.0 * sigma
    chname_skysub = 'skysub-det{:s}'.format(sdet)
    # Clear all channels at the beginning
    # TODO: JFH For some reason Ginga crashes when I try to put cuts in here.
    viewer, ch = ginga.show_image(pseudo_dict['imgminsky'],
                                  chname=chname_skysub,
                                  waveimg=pseudo_dict['waveimg'],
                                  clear=True)  # cuts=(cut_min, cut_max),
    slit_left, slit_righ, _ = pseudo_dict['slits'].select_edges()
    slit_id = slits.slitord_id[0]
    ginga.show_slits(viewer, ch, slit_left, slit_righ, slit_ids=slit_id)

    # SKRESIDS
    chname_skyresids = 'sky_resid-det{:s}'.format(sdet)
    image = pseudo_dict['imgminsky'] * np.sqrt(
        pseudo_dict['sciivar']) * pseudo_dict['inmask']  # sky residual map
    viewer, ch = ginga.show_image(
        image,
        chname_skyresids,
        waveimg=pseudo_dict['waveimg'],
        cuts=(-5.0, 5.0),
    )
    ginga.show_slits(viewer,
                     ch,
                     slit_left,
                     slit_righ,
                     slit_ids=slits.slitord_id[0])
    shell = viewer.shell()
    out = shell.start_global_plugin('WCSMatch')
    out = shell.call_global_plugin_method('WCSMatch', 'set_reference_channel',
                                          [chname_skyresids], {})

    if args.embed:
        embed()

    return 0
Example #2
0
    def main(args):

        tstart = time.time()
        # Parse the files sort by MJD
        files = np.array([os.path.join(args.full_rawpath, file) for file in args.files])
        nfiles = len(files)

        # Read in the spectrograph, config the parset
        spectrograph = load_spectrograph('vlt_fors2')
        #spectrograph_def_par = spectrograph.default_pypeit_par()
        spectrograph_cfg_lines = spectrograph.config_specific_par(files[0]).to_config()
        parset = par.PypeItPar.from_cfg_lines(cfg_lines=spectrograph_cfg_lines,
                                              merge_with=config_lines(args))
        science_path = os.path.join(parset['rdx']['redux_path'], parset['rdx']['scidir'])

        target = spectrograph.get_meta_value(files[0], 'target')
        mjds = np.zeros(nfiles)
        for ifile, file in enumerate(files):
            mjds[ifile] = spectrograph.get_meta_value(file, 'mjd', ignore_bad_header=True,
                                                      no_fussing=True)
        files = files[np.argsort(mjds)]

        # Calibration Master directory
        #TODO hardwired for now
        master_dir ='./'
        #master_dir = resource_filename('pypeit', 'data/QL_MASTERS') \
        #    if args.master_dir is None else args.master_dir
        if not os.path.isdir(master_dir):
            msgs.error(f'{master_dir} does not exist!  You must install the QL_MASTERS '
                       'directory; download the data from the PypeIt dev-suite Google Drive and '
                       'either define a QL_MASTERS environmental variable or use the '
                       'pypeit_install_ql_masters script.')

        # Define some hard wired master files here to be later parsed out of the directory
        fors2_grism = spectrograph.get_meta_value(files[0], 'dispname')
        fors2_masters = os.path.join(master_dir, 'FORS2_MASTERS', fors2_grism)


        bias_masterframe_name = \
            utils.find_single_file(os.path.join(fors2_masters, "MasterBias*"))
        slit_masterframe_name \
            = utils.find_single_file(os.path.join(fors2_masters, "MasterSlits*"))
        tilts_masterframe_name \
            = utils.find_single_file(os.path.join(fors2_masters, "MasterTilts*"))
        wvcalib_masterframe_name \
            = utils.find_single_file(os.path.join(fors2_masters, 'MasterWaveCalib*'))
        std_spec1d_file = utils.find_single_file(os.path.join(fors2_masters, 'spec1d_*'))
        sensfunc_masterframe_name = utils.find_single_file(os.path.join(fors2_masters, 'sens_*'))

        # TODO make and impelement sensfunc
        if (bias_masterframe_name is None or not os.path.isfile(bias_masterframe_name)) or \
                (slit_masterframe_name is None or not os.path.isfile(slit_masterframe_name)) or \
                (tilts_masterframe_name is None or not os.path.isfile(tilts_masterframe_name)) or \
                (std_spec1d_file is None or not os.path.isfile(std_spec1d_file)):
            # or (sensfunc_masterframe_name is None or not os.path.isfile(sensfunc_masterframe_name)):
            msgs.error('Master frames not found.  Check that environment variable QL_MASTERS '
                       'points at the Master Calibs')

        # We need the platescale

        # Get detector (there's only one)
        #det = 1 # MOSFIRE has a single detector
        #detector = spectrograph.get_detector_par(det)
        #detname = detector.name

        # We need the platescale
        det_container = spectrograph.get_detector_par(1, hdu=fits.open(files[0]))
        binspectral, binspatial = parse_binning(det_container['binning'])
        platescale = det_container['platescale']*binspatial
        # Parse the offset information out of the headers.
        _, _, offset_arcsec = spectrograph.parse_dither_pattern(files)

        # Print out a report on the offsets
        msg_string = msgs.newline()  + '*******************************************************'
        msg_string += msgs.newline() + ' Summary of offsets for target {:s}:                   '
        msg_string += msgs.newline() + '*******************************************************'
        msg_string += msgs.newline() + '           filename                arcsec   pixels    '
        msg_string += msgs.newline() + '----------------------------------------------------'
        for iexp, file in enumerate(files):
            msg_string += msgs.newline() + '    {:s}    {:6.2f}    {:6.2f}'.format(
                os.path.basename(file), offset_arcsec[iexp], offset_arcsec[iexp] / platescale)
        msg_string += msgs.newline() + '********************************************************'
        msgs.info(msg_string)

        ## Read in the master frames that we need
        ##
        det = 1  # Currently CHIP1 is supported
        if std_spec1d_file is not None:
            # Get the standard trace if need be
            sobjs = specobjs.SpecObjs.from_fitsfile(std_spec1d_file)
            this_det = sobjs.DET == det
            if np.any(this_det):
                sobjs_det = sobjs[this_det]
                sobjs_std = sobjs_det.get_std()
                std_trace = None if sobjs_std is None else sobjs_std.TRACE_SPAT.flatten()
            else:
                std_trace = None
        else:
            std_trace = None

        # Read in the bias
        msbias = buildimage.BiasImage.from_file(bias_masterframe_name)
        # Read in the msbpm
        sdet = get_dnum(det, prefix=False)
        msbpm = spectrograph.bpm(files[0], det)
        # Read in the slits
        slits = slittrace.SlitTraceSet.from_file(slit_masterframe_name)
        # Reset the bitmask
        slits.mask = slits.mask_init.copy()
        # Read in the wv_calib
        wv_calib = wavecalib.WaveCalib.from_file(wvcalib_masterframe_name)
        # wv_calib.is_synced(slits)
        slits.mask_wvcalib(wv_calib)
        # Read in the tilts
        tilts_obj = wavetilts.WaveTilts.from_file(tilts_masterframe_name)
        tilts_obj.is_synced(slits)
        slits.mask_wavetilts(tilts_obj)

        # Build the Calibrate object
        caliBrate = calibrations.Calibrations(None, parset['calibrations'], spectrograph, None)
        caliBrate.msbias = msbias
        caliBrate.msbpm = msbpm
        caliBrate.slits = slits
        caliBrate.wavetilts = tilts_obj
        caliBrate.wv_calib = wv_calib

        # Find the unique offsets. This is a bit of a kludge, i.e. we are considering offsets within
        # 0.1 arcsec of each other to be the same throw, but I should like to be able to specify a tolerance here,
        # but then I need a version of unique that accepts a tolerance
        uniq_offsets, uni_indx = np.unique(np.around(offset_arcsec), return_inverse=True)
        nuniq = uniq_offsets.size
        spec2d_list = []
        offset_ref = offset_arcsec[0]
        offsets_dith_pix = []
        # Generalize to a multiple slits, doing one slit at a time?
        islit = 0

        # Loop over the unique throws and create a spec2d_A and spec2D_B for
        # each, which are then fed into coadd2d with the correct offsets

        # TODO Rework the logic here so that we can print out a unified report
        # on what was actually reduced.

        for iuniq in range(nuniq):
            indx = uni_indx == iuniq
            files_uni = files[indx]
            offsets = offset_arcsec[indx]
            msgs.info('Reducing images for offset = {:}'.format(offsets[0]))
            spec2DObj = run(files_uni, caliBrate, spectrograph, det, parset, show=args.show, std_trace=std_trace)
            spec2d_list += [spec2DObj]
            offsets_dith_pix += [np.mean(offsets)/platescale]

        offsets_dith_pix = np.array(offsets_dith_pix)

        if args.offset is not None:
            offsets_pixels = np.array([0.0, args.offset])
            msgs.info('Using user specified offsets instead: {:5.2f}'.format(args.offset))
        else:
            offsets_pixels = offsets_dith_pix


        # Instantiate Coadd2d
        coadd = coadd2d.CoAdd2D.get_instance(spec2d_list, spectrograph, parset, det=det,
                                             offsets=offsets_pixels, weights='uniform',
                                             spec_samp_fact=args.spec_samp_fact,
                                             spat_samp_fact=args.spat_samp_fact,
                                             ir_redux=True, debug=args.show)
        # Coadd the slits
        # TODO implement only_slits later
        coadd_dict_list = coadd.coadd(only_slits=None, interp_dspat=False)
        # Create the pseudo images
        pseudo_dict = coadd.create_pseudo_image(coadd_dict_list)

        # Multiply in a sensitivity function to flux the 2d image
        if args.flux:
            # Load the sensitivity function
            #            wave_sens, sfunc, _, _, _ = sensfunc.SensFunc.load(sensfunc_masterframe_name)
            sens = sensfunc.SensFunc.from_file(sensfunc_masterframe_name)
            # Interpolate the sensitivity function onto the wavelength grid of
            # the data. Since the image is rectified this is trivial and we
            # don't need to do a 2d interpolation
            exptime = spectrograph.get_meta_value(files[0], 'exptime')
            sens_factor = flux_calib.get_sensfunc_factor(pseudo_dict['wave_mid'][:, islit],
                                                         sens.wave, sens.zeropoint, exptime,
                                                         extrap_sens=parset['fluxcalib']['extrap_sens'])

            # Compute the median sensitivity and set the sensitivity to zero at
            # locations 100 times the median. This prevents the 2d image from
            # blowing up where the sens_factor explodes because there is no
            # throughput
            sens_gpm = sens_factor < 100.0 * np.median(sens_factor)
            sens_factor_masked = sens_factor * sens_gpm
            sens_factor_img = np.repeat(sens_factor_masked[:, np.newaxis], pseudo_dict['nspat'],
                                        axis=1)
            imgminsky = sens_factor_img * pseudo_dict['imgminsky']
            imgminsky_gpm = sens_gpm[:, np.newaxis] & pseudo_dict['inmask']
        else:
            imgminsky = pseudo_dict['imgminsky']
            imgminsky_gpm = pseudo_dict['inmask']

        ##########################
        # Now display the images #
        ##########################
        if not args.no_gui:
            display.connect_to_ginga(raise_err=True, allow_new=True)

            # TODO: Bug in ginga prevents me from using cuts here for some
            # reason
            mean, med, sigma = sigma_clipped_stats(imgminsky[imgminsky_gpm], sigma_lower=3.0,
                                                   sigma_upper=3.0)
            chname_skysub = 'fluxed-skysub-det{:s}'.format(sdet) \
                if args.flux else 'skysub-det{:s}'.format(sdet)
            cuts_skysub = (med - 3.0 * sigma, med + 3.0 * sigma)
            cuts_resid = (-5.0, 5.0)
            # fits.writeto('/Users/joe/ginga_test.fits',imgminsky, overwrite=True)
            # fits.writeto('/Users/joe/ginga_mask.fits',imgminsky_gpm.astype(float), overwrite=True)
            # embed()

            # Clear all channels at the beginning
            # TODO: JFH For some reason Ginga crashes when I try to put cuts in here.
            viewer, ch_skysub = display.show_image(imgminsky, chname=chname_skysub,
                                                   waveimg=pseudo_dict['waveimg'], clear=True,
                                                   cuts=cuts_skysub)
            slit_left, slit_righ, _ = pseudo_dict['slits'].select_edges()
            slit_id = slits.slitord_id[0]
            display.show_slits(viewer, ch_skysub, slit_left, slit_righ, slit_ids=slit_id)

            # SKRESIDS
            chname_skyresids = 'sky_resid-det{:s}'.format(sdet)
            # sky residual map
            image = pseudo_dict['imgminsky'] * np.sqrt(pseudo_dict['sciivar']) * pseudo_dict['inmask']
            viewer, ch_skyresids = display.show_image(image, chname_skyresids,
                                                      waveimg=pseudo_dict['waveimg'],
                                                      cuts=cuts_resid)

            display.show_slits(viewer, ch_skyresids, slit_left, slit_righ,
                               slit_ids=slits.slitord_id[0])
            shell = viewer.shell()
            out = shell.start_global_plugin('WCSMatch')
            out = shell.call_global_plugin_method('WCSMatch', 'set_reference_channel',
                                                  [chname_skysub], {})

        # TODO extract along a spatial position
        if args.writefits:
            head0 = fits.getheader(files[0])
            # TODO use meta tools for the object name in the future.
            outfile = target + '_specXspat_{:3.2f}X{:3.2f}.fits'.format(args.spec_samp_fact,
                                                                        args.spat_samp_fact)
            hdu = fits.PrimaryHDU(imgminsky, header=head0)
            hdu_resid = fits.ImageHDU(pseudo_dict['imgminsky'] \
                                      * np.sqrt(pseudo_dict['sciivar']) * pseudo_dict['inmask'])
            hdu_wave = fits.ImageHDU(pseudo_dict['waveimg'])
            hdul = fits.HDUList([hdu, hdu_resid, hdu_wave])
            msgs.info('Writing sky subtracted image to {:s}'.format(outfile))
            hdul.writeto(outfile, overwrite=True)

        msgs.info(utils.get_time_string(time.time()-tstart))


        if args.embed:
            embed()

        return 0
Example #3
0
    def main(args):

        tstart = time.time()

        # Read in the spectrograph, config the parset
        spectrograph = load_spectrograph('keck_mosfire')
        spectrograph_def_par = spectrograph.default_pypeit_par()
        parset = par.PypeItPar.from_cfg_lines(
            cfg_lines=spectrograph_def_par.to_config(),
            merge_with=config_lines(args))
        science_path = os.path.join(parset['rdx']['redux_path'],
                                    parset['rdx']['scidir'])

        # Parse the files sort by MJD
        files = np.array(
            [os.path.join(args.full_rawpath, file) for file in args.files])
        nfiles = len(files)
        target = spectrograph.get_meta_value(files[0], 'target')
        mjds = np.zeros(nfiles)
        for ifile, file in enumerate(files):
            mjds[ifile] = spectrograph.get_meta_value(file,
                                                      'mjd',
                                                      ignore_bad_header=True,
                                                      no_fussing=True)
        files = files[np.argsort(mjds)]

        # Calibration Master directory
        master_dir = os.path.join(data.Paths.data, 'QL_MASTERS') \
                        if args.master_dir is None else args.master_dir
        if not os.path.isdir(master_dir):
            msgs.error(
                f'{master_dir} does not exist!  You must install the QL_MASTERS '
                'directory; download the data from the PypeIt dev-suite Google Drive and '
                'either define a QL_MASTERS environmental variable or use the '
                'pypeit_install_ql_masters script.')

        # Define some hard wired master files here to be later parsed out of the directory
        mosfire_filter = spectrograph.get_meta_value(files[0], 'filter1')
        mosfire_masters = os.path.join(master_dir, 'MOSFIRE_MASTERS',
                                       mosfire_filter)

        slit_masterframe_name \
                = utils.find_single_file(os.path.join(mosfire_masters, "MasterSlits*"))
        tilts_masterframe_name \
                = utils.find_single_file(os.path.join(mosfire_masters, "MasterTilts*"))
        wvcalib_masterframe_name \
                = utils.find_single_file(os.path.join(mosfire_masters, 'MasterWaveCalib*'))
        std_spec1d_file = utils.find_single_file(
            os.path.join(mosfire_masters, 'spec1d_*'))
        sensfunc_masterframe_name = utils.find_single_file(
            os.path.join(mosfire_masters, 'sens_*'))

        if (slit_masterframe_name is None or not os.path.isfile(slit_masterframe_name)) or  \
           (tilts_masterframe_name is None or not os.path.isfile(tilts_masterframe_name)) or \
           (sensfunc_masterframe_name is None or not os.path.isfile(sensfunc_masterframe_name)) or \
           (std_spec1d_file is None or not os.path.isfile(std_spec1d_file)):
            msgs.error(
                'Master frames not found.  Check that environment variable QL_MASTERS '
                'points at the Master Calibs')

        # Get detector (there's only one)
        det = 1  # MOSFIRE has a single detector
        detector = spectrograph.get_detector_par(det)
        detname = detector.name

        # We need the platescale
        platescale = detector['platescale']
        # Parse the offset information out of the headers. TODO in the future
        # get this out of fitstable
        dither_pattern, dither_id, offset_arcsec = spectrograph.parse_dither_pattern(
            files)
        if len(np.unique(dither_pattern)) > 1:
            msgs.error(
                'Script only supported for a single type of dither pattern.')
        A_files = files[dither_id == 'A']
        B_files = files[dither_id == 'B']
        nA = len(A_files)
        nB = len(B_files)

        # Print out a report on the offsets
        msg_string = msgs.newline(
        ) + '*******************************************************'
        msg_string += msgs.newline(
        ) + ' Summary of offsets for target {:s} with dither pattern:   {:s}'.format(
            target, dither_pattern[0])
        msg_string += msgs.newline(
        ) + '*******************************************************'
        msg_string += msgs.newline(
        ) + 'filename     Position         arcsec    pixels    '
        msg_string += msgs.newline(
        ) + '----------------------------------------------------'
        for iexp, file in enumerate(files):
            msg_string += msgs.newline(
            ) + '    {:s}    {:s}   {:6.2f}    {:6.2f}'.format(
                os.path.basename(file), dither_id[iexp], offset_arcsec[iexp],
                offset_arcsec[iexp] / platescale)
        msg_string += msgs.newline(
        ) + '********************************************************'
        msgs.info(msg_string)

        #offset_dith_pix = offset_dith_pix = offset_arcsec_A[0]/sciImg.detector.platescale

        ## Read in the master frames that we need
        ##
        if std_spec1d_file is not None:
            # Get the standard trace if need be
            sobjs = specobjs.SpecObjs.from_fitsfile(std_spec1d_file,
                                                    chk_version=False)
            this_det = sobjs.DET == detname
            if np.any(this_det):
                sobjs_det = sobjs[this_det]
                sobjs_std = sobjs_det.get_std()
                std_trace = None if sobjs_std is None else sobjs_std.TRACE_SPAT.flatten(
                )
            else:
                std_trace = None
        else:
            std_trace = None

        # Read in the msbpm
        msbpm = spectrograph.bpm(A_files[0], det)
        # Read in the slits
        slits = slittrace.SlitTraceSet.from_file(slit_masterframe_name)
        # Reset the bitmask
        slits.mask = slits.mask_init.copy()
        # Read in the wv_calib
        wv_calib = wavecalib.WaveCalib.from_file(wvcalib_masterframe_name)
        #wv_calib.is_synced(slits)
        slits.mask_wvcalib(wv_calib)
        # Read in the tilts
        tilts_obj = wavetilts.WaveTilts.from_file(tilts_masterframe_name)
        tilts_obj.is_synced(slits)
        slits.mask_wavetilts(tilts_obj)

        # Build the Calibrate object
        caliBrate = calibrations.Calibrations(None, parset['calibrations'],
                                              spectrograph, None)
        caliBrate.det = det
        caliBrate.slits = slits
        caliBrate.msbpm = msbpm
        caliBrate.wavetilts = tilts_obj
        caliBrate.wv_calib = wv_calib
        caliBrate.binning = f'{slits.binspec},{slits.binspat}'

        # Find the unique throw absolute value, which defines each MASK_NOD seqeunce
        #uniq_offsets, _ = np.unique(offset_arcsec, return_inverse=True)

        spec2d_list = []
        offset_ref = offset_arcsec[0]
        offsets_dith_pix = []
        # Generalize to a multiple slits, doing one slit at a time?
        islit = 0

        # Loop over the unique throws and create a spec2d_A and spec2D_B for
        # each, which are then fed into coadd2d with the correct offsets

        # TODO Rework the logic here so that we can print out a unified report
        # on what was actually reduced.

        uniq_throws, uni_indx = np.unique(np.abs(offset_arcsec),
                                          return_inverse=True)
        # uniq_throws = uniq values of the dither throw
        # uni_indx = indices into the uniq_throws array needed to reconstruct the original array
        nuniq = uniq_throws.size
        for iuniq in range(nuniq):
            A_ind = (uni_indx == iuniq) & (dither_id == 'A')
            B_ind = (uni_indx == iuniq) & (dither_id == 'B')
            A_files_uni = files[A_ind]
            A_dither_id_uni = dither_id[A_ind]
            B_dither_id_uni = dither_id[B_ind]
            B_files_uni = files[B_ind]
            A_offset = offset_arcsec[A_ind]
            B_offset = offset_arcsec[B_ind]
            throw = np.abs(A_offset[0])
            msgs.info('Reducing A-B pairs for throw = {:}'.format(throw))
            if (len(A_files_uni) > 0) & (len(B_files_uni) > 0):
                spec2DObj_A, spec2DObj_B = reduce_IR(A_files_uni,
                                                     B_files_uni,
                                                     caliBrate,
                                                     spectrograph,
                                                     det,
                                                     parset,
                                                     show=args.show,
                                                     std_trace=std_trace)
                spec2d_list += [spec2DObj_A, spec2DObj_B]
                offsets_dith_pix += [
                    (np.mean(A_offset) - offset_ref) / platescale,
                    (np.mean(B_offset) - offset_ref) / platescale
                ]
            else:
                msgs.warn(
                    'Skpping files that do not have an A-B match with the same throw:'
                )
                for iexp in range(len(A_files_uni)):
                    msg_string += msgs.newline(
                    ) + '    {:s}    {:s}   {:6.2f}    {:6.2f}'.format(
                        os.path.basename(
                            A_files_uni[iexp]), A_dither_id_uni[iexp],
                        A_offset[iexp], A_offset[iexp] / platescale)
                for iexp in range(len(B_files_uni)):
                    msg_string += msgs.newline(
                    ) + '    {:s}    {:s}   {:6.2f}    {:6.2f}'.format(
                        os.path.basename(
                            B_files_uni[iexp]), B_dither_id_uni[iexp],
                        B_offset[iexp], B_offset[iexp] / platescale)

        offsets_dith_pix = np.array(offsets_dith_pix)
        #else:
        #    msgs.error('Unrecognized mode')

        if args.offset is not None:
            offsets_pixels = np.array([0.0, args.offset])
            msgs.info('Using user specified offsets instead: {:5.2f}'.format(
                args.offset))
        else:
            offsets_pixels = offsets_dith_pix

        # Instantiate Coadd2d
        coadd = coadd2d.CoAdd2D.get_instance(
            spec2d_list,
            spectrograph,
            parset,
            det=det,
            offsets=offsets_pixels,
            weights='uniform',
            spec_samp_fact=args.spec_samp_fact,
            spat_samp_fact=args.spat_samp_fact,
            bkg_redux=True,
            debug=args.show)
        # Coadd the slits
        # TODO implement only_slits later
        coadd_dict_list = coadd.coadd(only_slits=None, interp_dspat=False)
        # Create the pseudo images
        pseudo_dict = coadd.create_pseudo_image(coadd_dict_list)

        # Multiply in a sensitivity function to flux the 2d image
        if args.flux:
            # Load the sensitivity function
            #            wave_sens, sfunc, _, _, _ = sensfunc.SensFunc.load(sensfunc_masterframe_name)
            sens = sensfunc.SensFunc.from_file(sensfunc_masterframe_name)
            # Interpolate the sensitivity function onto the wavelength grid of
            # the data. Since the image is rectified this is trivial and we
            # don't need to do a 2d interpolation
            exptime = spectrograph.get_meta_value(files[0], 'exptime')
            sens_factor = flux_calib.get_sensfunc_factor(
                pseudo_dict['wave_mid'][:, islit],
                sens.wave.flatten(),
                sens.zeropoint.flatten(),
                exptime,
                extrap_sens=True)  #parset['fluxcalib']['extrap_sens'])

            # Compute the median sensitivity and set the sensitivity to zero at
            # locations 100 times the median. This prevents the 2d image from
            # blowing up where the sens_factor explodes because there is no
            # throughput
            sens_gpm = sens_factor < 100.0 * np.median(sens_factor)
            sens_factor_masked = sens_factor * sens_gpm
            sens_factor_img = np.repeat(sens_factor_masked[:, np.newaxis],
                                        pseudo_dict['nspat'],
                                        axis=1)
            imgminsky = sens_factor_img * pseudo_dict['imgminsky']
            imgminsky_gpm = sens_gpm[:, np.newaxis] & pseudo_dict['inmask']
        else:
            imgminsky = pseudo_dict['imgminsky']
            imgminsky_gpm = pseudo_dict['inmask']

        ##########################
        # Now display the images #
        ##########################
        if not args.no_gui:
            display.connect_to_ginga(raise_err=True, allow_new=True)

            # TODO: Bug in ginga prevents me from using cuts here for some
            # reason
            mean, med, sigma = sigma_clipped_stats(imgminsky[imgminsky_gpm],
                                                   sigma_lower=3.0,
                                                   sigma_upper=3.0)
            chname_skysub = f'fluxed-skysub-{detname.lower()}' \
                                if args.flux else f'skysub-{detname.lower()}'
            cuts_skysub = (med - 3.0 * sigma, med + 3.0 * sigma)
            cuts_resid = (-5.0, 5.0)
            #fits.writeto('/Users/joe/ginga_test.fits',imgminsky, overwrite=True)
            #fits.writeto('/Users/joe/ginga_mask.fits',imgminsky_gpm.astype(float), overwrite=True)
            #embed()

            # Clear all channels at the beginning
            # TODO: JFH For some reason Ginga crashes when I try to put cuts in here.
            viewer, ch_skysub = display.show_image(
                imgminsky,
                chname=chname_skysub,
                waveimg=pseudo_dict['waveimg'],
                clear=True,
                cuts=cuts_skysub)
            slit_left, slit_righ, _ = pseudo_dict['slits'].select_edges()
            slit_id = slits.slitord_id[0]
            display.show_slits(viewer,
                               ch_skysub,
                               slit_left,
                               slit_righ,
                               slit_ids=slit_id)

            # SKRESIDS
            chname_skyresids = f'sky_resid-{detname.lower()}'
            # sky residual map
            image = pseudo_dict['imgminsky'] * np.sqrt(
                pseudo_dict['sciivar']) * pseudo_dict['inmask']
            viewer, ch_skyresids = display.show_image(
                image,
                chname_skyresids,
                waveimg=pseudo_dict['waveimg'],
                cuts=cuts_resid)

            display.show_slits(viewer,
                               ch_skyresids,
                               slit_left,
                               slit_righ,
                               slit_ids=slits.slitord_id[0])
            shell = viewer.shell()
            out = shell.start_global_plugin('WCSMatch')
            out = shell.call_global_plugin_method('WCSMatch',
                                                  'set_reference_channel',
                                                  [chname_skysub], {})

        # TODO extract along a spatial position
        if args.writefits:
            head0 = fits.getheader(files[0])
            # TODO use meta tools for the object name in the future.
            outfile = target + '_specXspat_{:3.2f}X{:3.2f}.fits'.format(
                args.spec_samp_fact, args.spat_samp_fact)
            hdu = fits.PrimaryHDU(imgminsky, header=head0)
            hdu_resid = fits.ImageHDU(pseudo_dict['imgminsky'] \
                            * np.sqrt(pseudo_dict['sciivar'])*pseudo_dict['inmask'])
            hdu_wave = fits.ImageHDU(pseudo_dict['waveimg'])
            hdul = fits.HDUList([hdu, hdu_resid, hdu_wave])
            msgs.info('Writing sky subtracted image to {:s}'.format(outfile))
            hdul.writeto(outfile, overwrite=True)

        msgs.info(utils.get_time_string(time.time() - tstart))

        if args.embed:
            embed()

        return 0
Example #4
0
    def reduce(self, pseudo_dict, show=None, show_peaks=None):
        """
        ..todo.. Please document me

        Args:
            pseudo_dict:
            show:
            show_peaks:

        Returns:

        """

        show = self.show if show is None else show
        show_peaks = self.show_peaks if show_peaks is None else show_peaks
        sciImage = pypeitimage.PypeItImage(image=pseudo_dict['imgminsky'],
                                           ivar=pseudo_dict['sciivar'],
                                           bpm=np.zeros_like(pseudo_dict['inmask'].astype(int)),  # Dummy bpm
                                           rn2img=np.zeros_like(pseudo_dict['inmask']).astype(float),  # Dummy rn2img
                                           crmask=np.invert(pseudo_dict['inmask'].astype(bool)))
        sciImage.detector = self.stack_dict['detectors'][0]
        #
        slitmask_pseudo = pseudo_dict['slits'].slit_img()
        sciImage.build_mask(slitmask=slitmask_pseudo)

        # Make changes to parset specific to 2d coadds
        parcopy = copy.deepcopy(self.par)
        parcopy['reduce']['findobj']['trace_npoly'] = 3        # Low order traces since we are rectified
        #parcopy['calibrations']['save_masters'] = False
        #parcopy['scienceimage']['find_extrap_npoly'] = 1  # Use low order for trace extrapolation

        # Build the Calibrate object
        caliBrate = calibrations.Calibrations(None, self.par['calibrations'], self.spectrograph, None)
        caliBrate.slits = pseudo_dict['slits']


        redux=reduce.Reduce.get_instance(sciImage, self.spectrograph, parcopy, caliBrate,
                                         'science_coadd2d', ir_redux=self.ir_redux, det=self.det, show=show)
        #redux=reduce.Reduce.get_instance(sciImage, self.spectrograph, parcopy, pseudo_dict['slits'],
        #                                 None, None, 'science_coadd2d', ir_redux=self.ir_redux, det=self.det, show=show)
        # Set the tilts and waveimg attributes from the psuedo_dict here, since we generate these dynamically from fits
        # normally, but this is not possible for coadds
        redux.tilts = pseudo_dict['tilts']
        redux.waveimg = pseudo_dict['waveimg']
        redux.binning = self.binning

        # Masking
        #  TODO: Treat the masking of the slits objects
        #   from every exposure, come up with an aggregate mask (if it is masked on one slit,
        #   mask the slit for all) and that should be propagated into the slits object in the psuedo_dict
        slits = self.stack_dict['slits_list'][0]
        reduce_bpm = (slits.mask > 0) & (np.invert(slits.bitmask.flagged(
            slits.mask, flag=slits.bitmask.exclude_for_reducing)))
        redux.reduce_bpm = reduce_bpm

        if show:
            redux.show('image', image=pseudo_dict['imgminsky']*(sciImage.fullmask == 0), chname = 'imgminsky', slits=True, clear=True)

        # TODO:
        #  Object finding, this appears inevitable for the moment, since we need to be able to call find_objects
        #  outside of reduce. I think the solution here is to create a method in reduce for that performs the modified
        #  2d coadd reduce
        sobjs_obj, nobj, skymask_init = redux.find_objects(
            sciImage.image, show_peaks=show_peaks,
            manual_extract_dict=self.par['reduce']['extraction']['manual'].dict_for_objfind())

        # Local sky-subtraction
        global_sky_pseudo = np.zeros_like(pseudo_dict['imgminsky']) # No global sky for co-adds since we go straight to local
        skymodel_pseudo, objmodel_pseudo, ivarmodel_pseudo, outmask_pseudo, sobjs = redux.local_skysub_extract(
            global_sky_pseudo, sobjs_obj, spat_pix=pseudo_dict['spat_img'], model_noise=False,
            show_profile=show, show=show)

        if self.ir_redux:
            sobjs.purge_neg()

        # TODO: Removed this, but I'm not sure that's what you want...
#        # Add the information about the fixed wavelength grid to the sobjs
#        for spec in sobjs:
#            idx = spec.slit_orderindx
#            # Fill
#            spec.BOX_WAVE_GRID_MASK, spec.OPT_WAVE_GRID_MASK = [pseudo_dict['wave_mask'][:,idx]]*2
#            spec.BOX_WAVE_GRID, spec.OPT_WAVE_GRID = [pseudo_dict['wave_mid'][:,idx]]*2
#            spec.BOX_WAVE_GRID_MIN, spec.OPT_WAVE_GRID_MIN = [pseudo_dict['wave_min'][:,idx]]*2
#            spec.BOX_WAVE_GRID_MAX, spec.OPT_WAVE_GRID_MAX = [pseudo_dict['wave_max'][:,idx]]*2

        # Add the rest to the pseudo_dict
        pseudo_dict['skymodel'] = skymodel_pseudo
        pseudo_dict['objmodel'] = objmodel_pseudo
        pseudo_dict['ivarmodel'] = ivarmodel_pseudo
        pseudo_dict['outmask'] = outmask_pseudo
        pseudo_dict['sobjs'] = sobjs
        self.pseudo_dict=pseudo_dict

        return pseudo_dict['imgminsky'], pseudo_dict['sciivar'], skymodel_pseudo, \
               objmodel_pseudo, ivarmodel_pseudo, outmask_pseudo, sobjs, sciImage.detector, pseudo_dict['slits'], \
               pseudo_dict['tilts'], pseudo_dict['waveimg']