def show_alignment(alignframe, align_traces=None, slits=None, clear=False): """ Show one of the class internals Parameters ---------- alignframe : `numpy.ndarray`_ Image to be plotted (i.e. the master align frame) align_traces : list, optional The align traces slits : :class:`pypeit.slittrace.SlitTraceSet`, optional properties of the slits, including traces. clear : bool, optional Clear the plotting window in ginga? Returns ------- """ display.connect_to_ginga(raise_err=True, allow_new=True) ch_name = 'alignment' viewer, channel = display.show_image(alignframe, chname=ch_name, clear=clear, wcs_match=False) # Display the slit edges if slits is not None and viewer is not None: left, right, mask = slits.select_edges() display.show_slits(viewer, channel, left, right) # Display the alignment traces if align_traces is not None and viewer is not None: for bar in range(align_traces.shape[1]): for slt in range(align_traces.shape[2]): # Alternate the colors of the slits color = 'orange' if slt % 2 == 0: color = 'magenta' # Display the trace display.show_trace(viewer, channel, align_traces[:, bar, slt], trc_name="", color=color)
def process(self, par, bpm=bpm, flatimages=None, bias=None, slits=None, debug=False, dark=None): """ Process the image Note: The processing step order is currently 'frozen' as is. We may choose to allow optional ordering Here are the allowed steps, in the order they will be applied: subtract_overscan -- Analyze the overscan region and subtract from the image trim -- Trim the image down to the data (i.e. remove the overscan) orient -- Orient the image in the PypeIt orientation (spec, spat) with blue to red going down to up subtract_bias -- Subtract a bias image apply_gain -- Convert to counts, amp by amp flatten -- Divide by the pixel flat and (if provided) the illumination flat extras -- Generate the RN2 and IVAR images crmask -- Generate a CR mask Args: par (:class:`pypeit.par.pypeitpar.ProcessImagesPar`): Parameters that dictate the processing of the images. See :class:`pypeit.par.pypeitpar.ProcessImagesPar` for the defaults. bpm (`numpy.ndarray`_, optional): flatimages (:class:`pypeit.flatfield.FlatImages`): bias (`numpy.ndarray`_, optional): Bias image slits (:class:`pypeit.slittrace.SlitTraceSet`, optional): Used to calculate spatial flexure between the image and the slits Returns: :class:`pypeit.images.pypeitimage.PypeItImage`: """ self.par = par self._bpm = bpm # Get started # Standard order # -- May need to allow for other order some day.. if par['use_pattern']: # Note, this step *must* be done before use_overscan self.subtract_pattern() if par['use_overscan']: self.subtract_overscan() if par['trim']: self.trim() if par['orient']: self.orient() if par['use_biasimage']: # Bias frame, if it exists, is *not* trimmed nor oriented self.subtract_bias(bias) if par['use_darkimage']: # Dark frame, if it exists, is TODO:: check: trimmed, oriented (and oscan/bias subtracted?) self.subtract_dark(dark) if par['apply_gain']: self.apply_gain() # This needs to come after trim, orient # Calculate flexure -- May not be used, but always calculated when slits are provided if slits is not None and self.par['spat_flexure_correct']: self.spat_flexure_shift = flexure.spat_flexure_shift( self.image, slits) # Generate the illumination flat, as needed illum_flat = None if self.par['use_illumflat']: if flatimages is None: msgs.error( "Cannot illumflatten, no such image generated. Add one or more illumflat images to your PypeIt file!!" ) if slits is None: msgs.error("Need to provide slits to create illumination flat") illum_flat = flatimages.fit2illumflat( slits, flexure_shift=self.spat_flexure_shift) if debug: left, right = slits.select_edges( flexure=self.spat_flexure_shift) viewer, ch = display.show_image(illum_flat, chname='illum_flat') display.show_slits(viewer, ch, left, right) # , slits.id) # orig_image = self.image.copy() viewer, ch = display.show_image(orig_image, chname='orig_image') display.show_slits(viewer, ch, left, right) # , slits.id) # Apply the relative spectral illumination spec_illum = 1.0 if self.par['use_specillum']: if flatimages is None or flatimages.get_spec_illum() is None: msgs.error( "Spectral illumination correction desired but not generated/provided." ) else: spec_illum = flatimages.get_spec_illum().copy() # Flat field -- We cannot do illumination flat without a pixel flat (yet) if self.par['use_pixelflat'] or self.par['use_illumflat']: if flatimages is None or flatimages.get_pixelflat() is None: msgs.error("Flat fielding desired but not generated/provided.") else: self.flatten(flatimages.get_pixelflat() * spec_illum, illum_flat=illum_flat, bpm=self.bpm) # Fresh BPM bpm = self.spectrograph.bpm(self.filename, self.det, shape=self.image.shape) # Extras self.build_rn2img() self.build_ivar() # Generate a PypeItImage pypeitImage = pypeitimage.PypeItImage( self.image, ivar=self.ivar, rn2img=self.rn2img, bpm=bpm, detector=self.detector, spat_flexure=self.spat_flexure_shift, PYP_SPEC=self.spectrograph.spectrograph) pypeitImage.rawheadlist = self.headarr pypeitImage.process_steps = [ key for key in self.steps.keys() if self.steps[key] ] # Mask(s) if par['mask_cr']: pypeitImage.build_crmask(self.par) # nonlinear_counts = self.spectrograph.nonlinear_counts( self.detector, apply_gain=self.par['apply_gain']) # Build pypeitImage.build_mask(saturation=nonlinear_counts) # Return return pypeitImage
def main(args): # List only? if args.list: hdu = fits.open(args.file) hdu.info() return # Load it up -- NOTE WE ALLOW *OLD* VERSIONS TO GO FORTH spec2DObj = spec2dobj.Spec2DObj.from_file(args.file, args.det, chk_version=False) # Setup for PypeIt imports msgs.reset(verbosity=2) # Init # TODO: get_dnum needs to be deprecated... sdet = get_dnum(args.det, prefix=False) # Grab the slit edges slits = spec2DObj.slits if spec2DObj.sci_spat_flexure is not None: msgs.info("Offseting slits by {}".format(spec2DObj.sci_spat_flexure)) all_left, all_right, mask = slits.select_edges( flexure=spec2DObj.sci_spat_flexure) # TODO -- This may be too restrictive, i.e. ignore BADFLTCALIB?? gpm = mask == 0 left = all_left[:, gpm] right = all_right[:, gpm] slid_IDs = spec2DObj.slits.slitord_id[gpm] bitMask = ImageBitMask() # Object traces from spec1d file spec1d_file = args.file.replace('spec2d', 'spec1d') if args.file[-2:] == 'gz': spec1d_file = spec1d_file[:-3] if os.path.isfile(spec1d_file): sobjs = specobjs.SpecObjs.from_fitsfile(spec1d_file) else: sobjs = None msgs.warn('Could not find spec1d file: {:s}'.format(spec1d_file) + msgs.newline() + ' No objects were extracted.') display.connect_to_ginga(raise_err=True, allow_new=True) # Now show each image to a separate channel # Show the bitmask? mask_in = None if args.showmask: viewer, ch = display.show_image(spec2DObj.bpmmask, chname="BPM", waveimg=spec2DObj.waveimg, clear=True) #bpm, crmask, satmask, minmask, offslitmask, nanmask, ivar0mask, ivarnanmask, extractmask \ # SCIIMG image = spec2DObj.sciimg # Processed science image mean, med, sigma = sigma_clipped_stats(image[spec2DObj.bpmmask == 0], sigma_lower=5.0, sigma_upper=5.0) cut_min = mean - 1.0 * sigma cut_max = mean + 4.0 * sigma chname_skysub = 'sciimg-det{:s}'.format(sdet) # Clear all channels at the beginning viewer, ch = display.show_image(image, chname=chname_skysub, waveimg=spec2DObj.waveimg, clear=True) if sobjs is not None: show_trace(sobjs, args.det, viewer, ch) display.show_slits(viewer, ch, left, right, slit_ids=slid_IDs) # SKYSUB if args.ignore_extract_mask: # TODO -- Is there a cleaner way to do this? gpm = (spec2DObj.bpmmask == 0) | (spec2DObj.bpmmask == 2** bitMask.bits['EXTRACT']) else: gpm = spec2DObj.bpmmask == 0 image = (spec2DObj.sciimg - spec2DObj.skymodel ) * gpm #(spec2DObj.mask == 0) # sky subtracted image mean, med, sigma = sigma_clipped_stats(image[spec2DObj.bpmmask == 0], sigma_lower=5.0, sigma_upper=5.0) cut_min = mean - 1.0 * sigma cut_max = mean + 4.0 * sigma chname_skysub = 'skysub-det{:s}'.format(sdet) # Clear all channels at the beginning # TODO: JFH For some reason Ginga crashes when I try to put cuts in here. viewer, ch = display.show_image( image, chname=chname_skysub, waveimg=spec2DObj.waveimg, bitmask=bitMask, mask=mask_in) #, cuts=(cut_min, cut_max),wcs_match=True) if not args.removetrace and sobjs is not None: show_trace(sobjs, args.det, viewer, ch) display.show_slits(viewer, ch, left, right, slit_ids=slid_IDs) # SKRESIDS chname_skyresids = 'sky_resid-det{:s}'.format(sdet) image = (spec2DObj.sciimg - spec2DObj.skymodel) * np.sqrt( spec2DObj.ivarmodel ) * gpm #(spec2DObj.bpmmask == 0) # sky residual map viewer, ch = display.show_image(image, chname_skyresids, waveimg=spec2DObj.waveimg, cuts=(-5.0, 5.0), bitmask=bitMask, mask=mask_in) if not args.removetrace and sobjs is not None: show_trace(sobjs, args.det, viewer, ch) display.show_slits(viewer, ch, left, right, slit_ids=slid_IDs) # RESIDS chname_resids = 'resid-det{:s}'.format(sdet) # full model residual map image = (spec2DObj.sciimg - spec2DObj.skymodel - spec2DObj.objmodel ) * np.sqrt(spec2DObj.ivarmodel) * (spec2DObj.bpmmask == 0) viewer, ch = display.show_image(image, chname=chname_resids, waveimg=spec2DObj.waveimg, cuts=(-5.0, 5.0), bitmask=bitMask, mask=mask_in) if not args.removetrace and sobjs is not None: show_trace(sobjs, args.det, viewer, ch) display.show_slits(viewer, ch, left, right, slit_ids=slid_IDs) # After displaying all the images sync up the images with WCS_MATCH shell = viewer.shell() shell.start_global_plugin('WCSMatch') shell.call_global_plugin_method('WCSMatch', 'set_reference_channel', [chname_resids], {}) if args.embed: embed()
def main(args): tstart = time.time() # Read in the spectrograph, config the parset spectrograph = load_spectrograph('keck_mosfire') spectrograph_def_par = spectrograph.default_pypeit_par() parset = par.PypeItPar.from_cfg_lines( cfg_lines=spectrograph_def_par.to_config(), merge_with=config_lines(args)) science_path = os.path.join(parset['rdx']['redux_path'], parset['rdx']['scidir']) # Parse the files sort by MJD files = np.array( [os.path.join(args.full_rawpath, file) for file in args.files]) nfiles = len(files) target = spectrograph.get_meta_value(files[0], 'target') mjds = np.zeros(nfiles) for ifile, file in enumerate(files): mjds[ifile] = spectrograph.get_meta_value(file, 'mjd', ignore_bad_header=True, no_fussing=True) files = files[np.argsort(mjds)] # Calibration Master directory master_dir = os.path.join(data.Paths.data, 'QL_MASTERS') \ if args.master_dir is None else args.master_dir if not os.path.isdir(master_dir): msgs.error( f'{master_dir} does not exist! You must install the QL_MASTERS ' 'directory; download the data from the PypeIt dev-suite Google Drive and ' 'either define a QL_MASTERS environmental variable or use the ' 'pypeit_install_ql_masters script.') # Define some hard wired master files here to be later parsed out of the directory mosfire_filter = spectrograph.get_meta_value(files[0], 'filter1') mosfire_masters = os.path.join(master_dir, 'MOSFIRE_MASTERS', mosfire_filter) slit_masterframe_name \ = utils.find_single_file(os.path.join(mosfire_masters, "MasterSlits*")) tilts_masterframe_name \ = utils.find_single_file(os.path.join(mosfire_masters, "MasterTilts*")) wvcalib_masterframe_name \ = utils.find_single_file(os.path.join(mosfire_masters, 'MasterWaveCalib*')) std_spec1d_file = utils.find_single_file( os.path.join(mosfire_masters, 'spec1d_*')) sensfunc_masterframe_name = utils.find_single_file( os.path.join(mosfire_masters, 'sens_*')) if (slit_masterframe_name is None or not os.path.isfile(slit_masterframe_name)) or \ (tilts_masterframe_name is None or not os.path.isfile(tilts_masterframe_name)) or \ (sensfunc_masterframe_name is None or not os.path.isfile(sensfunc_masterframe_name)) or \ (std_spec1d_file is None or not os.path.isfile(std_spec1d_file)): msgs.error( 'Master frames not found. Check that environment variable QL_MASTERS ' 'points at the Master Calibs') # Get detector (there's only one) det = 1 # MOSFIRE has a single detector detector = spectrograph.get_detector_par(det) detname = detector.name # We need the platescale platescale = detector['platescale'] # Parse the offset information out of the headers. TODO in the future # get this out of fitstable dither_pattern, dither_id, offset_arcsec = spectrograph.parse_dither_pattern( files) if len(np.unique(dither_pattern)) > 1: msgs.error( 'Script only supported for a single type of dither pattern.') A_files = files[dither_id == 'A'] B_files = files[dither_id == 'B'] nA = len(A_files) nB = len(B_files) # Print out a report on the offsets msg_string = msgs.newline( ) + '*******************************************************' msg_string += msgs.newline( ) + ' Summary of offsets for target {:s} with dither pattern: {:s}'.format( target, dither_pattern[0]) msg_string += msgs.newline( ) + '*******************************************************' msg_string += msgs.newline( ) + 'filename Position arcsec pixels ' msg_string += msgs.newline( ) + '----------------------------------------------------' for iexp, file in enumerate(files): msg_string += msgs.newline( ) + ' {:s} {:s} {:6.2f} {:6.2f}'.format( os.path.basename(file), dither_id[iexp], offset_arcsec[iexp], offset_arcsec[iexp] / platescale) msg_string += msgs.newline( ) + '********************************************************' msgs.info(msg_string) #offset_dith_pix = offset_dith_pix = offset_arcsec_A[0]/sciImg.detector.platescale ## Read in the master frames that we need ## if std_spec1d_file is not None: # Get the standard trace if need be sobjs = specobjs.SpecObjs.from_fitsfile(std_spec1d_file, chk_version=False) this_det = sobjs.DET == detname if np.any(this_det): sobjs_det = sobjs[this_det] sobjs_std = sobjs_det.get_std() std_trace = None if sobjs_std is None else sobjs_std.TRACE_SPAT.flatten( ) else: std_trace = None else: std_trace = None # Read in the msbpm msbpm = spectrograph.bpm(A_files[0], det) # Read in the slits slits = slittrace.SlitTraceSet.from_file(slit_masterframe_name) # Reset the bitmask slits.mask = slits.mask_init.copy() # Read in the wv_calib wv_calib = wavecalib.WaveCalib.from_file(wvcalib_masterframe_name) #wv_calib.is_synced(slits) slits.mask_wvcalib(wv_calib) # Read in the tilts tilts_obj = wavetilts.WaveTilts.from_file(tilts_masterframe_name) tilts_obj.is_synced(slits) slits.mask_wavetilts(tilts_obj) # Build the Calibrate object caliBrate = calibrations.Calibrations(None, parset['calibrations'], spectrograph, None) caliBrate.det = det caliBrate.slits = slits caliBrate.msbpm = msbpm caliBrate.wavetilts = tilts_obj caliBrate.wv_calib = wv_calib caliBrate.binning = f'{slits.binspec},{slits.binspat}' # Find the unique throw absolute value, which defines each MASK_NOD seqeunce #uniq_offsets, _ = np.unique(offset_arcsec, return_inverse=True) spec2d_list = [] offset_ref = offset_arcsec[0] offsets_dith_pix = [] # Generalize to a multiple slits, doing one slit at a time? islit = 0 # Loop over the unique throws and create a spec2d_A and spec2D_B for # each, which are then fed into coadd2d with the correct offsets # TODO Rework the logic here so that we can print out a unified report # on what was actually reduced. uniq_throws, uni_indx = np.unique(np.abs(offset_arcsec), return_inverse=True) # uniq_throws = uniq values of the dither throw # uni_indx = indices into the uniq_throws array needed to reconstruct the original array nuniq = uniq_throws.size for iuniq in range(nuniq): A_ind = (uni_indx == iuniq) & (dither_id == 'A') B_ind = (uni_indx == iuniq) & (dither_id == 'B') A_files_uni = files[A_ind] A_dither_id_uni = dither_id[A_ind] B_dither_id_uni = dither_id[B_ind] B_files_uni = files[B_ind] A_offset = offset_arcsec[A_ind] B_offset = offset_arcsec[B_ind] throw = np.abs(A_offset[0]) msgs.info('Reducing A-B pairs for throw = {:}'.format(throw)) if (len(A_files_uni) > 0) & (len(B_files_uni) > 0): spec2DObj_A, spec2DObj_B = reduce_IR(A_files_uni, B_files_uni, caliBrate, spectrograph, det, parset, show=args.show, std_trace=std_trace) spec2d_list += [spec2DObj_A, spec2DObj_B] offsets_dith_pix += [ (np.mean(A_offset) - offset_ref) / platescale, (np.mean(B_offset) - offset_ref) / platescale ] else: msgs.warn( 'Skpping files that do not have an A-B match with the same throw:' ) for iexp in range(len(A_files_uni)): msg_string += msgs.newline( ) + ' {:s} {:s} {:6.2f} {:6.2f}'.format( os.path.basename( A_files_uni[iexp]), A_dither_id_uni[iexp], A_offset[iexp], A_offset[iexp] / platescale) for iexp in range(len(B_files_uni)): msg_string += msgs.newline( ) + ' {:s} {:s} {:6.2f} {:6.2f}'.format( os.path.basename( B_files_uni[iexp]), B_dither_id_uni[iexp], B_offset[iexp], B_offset[iexp] / platescale) offsets_dith_pix = np.array(offsets_dith_pix) #else: # msgs.error('Unrecognized mode') if args.offset is not None: offsets_pixels = np.array([0.0, args.offset]) msgs.info('Using user specified offsets instead: {:5.2f}'.format( args.offset)) else: offsets_pixels = offsets_dith_pix # Instantiate Coadd2d coadd = coadd2d.CoAdd2D.get_instance( spec2d_list, spectrograph, parset, det=det, offsets=offsets_pixels, weights='uniform', spec_samp_fact=args.spec_samp_fact, spat_samp_fact=args.spat_samp_fact, bkg_redux=True, debug=args.show) # Coadd the slits # TODO implement only_slits later coadd_dict_list = coadd.coadd(only_slits=None, interp_dspat=False) # Create the pseudo images pseudo_dict = coadd.create_pseudo_image(coadd_dict_list) # Multiply in a sensitivity function to flux the 2d image if args.flux: # Load the sensitivity function # wave_sens, sfunc, _, _, _ = sensfunc.SensFunc.load(sensfunc_masterframe_name) sens = sensfunc.SensFunc.from_file(sensfunc_masterframe_name) # Interpolate the sensitivity function onto the wavelength grid of # the data. Since the image is rectified this is trivial and we # don't need to do a 2d interpolation exptime = spectrograph.get_meta_value(files[0], 'exptime') sens_factor = flux_calib.get_sensfunc_factor( pseudo_dict['wave_mid'][:, islit], sens.wave.flatten(), sens.zeropoint.flatten(), exptime, extrap_sens=True) #parset['fluxcalib']['extrap_sens']) # Compute the median sensitivity and set the sensitivity to zero at # locations 100 times the median. This prevents the 2d image from # blowing up where the sens_factor explodes because there is no # throughput sens_gpm = sens_factor < 100.0 * np.median(sens_factor) sens_factor_masked = sens_factor * sens_gpm sens_factor_img = np.repeat(sens_factor_masked[:, np.newaxis], pseudo_dict['nspat'], axis=1) imgminsky = sens_factor_img * pseudo_dict['imgminsky'] imgminsky_gpm = sens_gpm[:, np.newaxis] & pseudo_dict['inmask'] else: imgminsky = pseudo_dict['imgminsky'] imgminsky_gpm = pseudo_dict['inmask'] ########################## # Now display the images # ########################## if not args.no_gui: display.connect_to_ginga(raise_err=True, allow_new=True) # TODO: Bug in ginga prevents me from using cuts here for some # reason mean, med, sigma = sigma_clipped_stats(imgminsky[imgminsky_gpm], sigma_lower=3.0, sigma_upper=3.0) chname_skysub = f'fluxed-skysub-{detname.lower()}' \ if args.flux else f'skysub-{detname.lower()}' cuts_skysub = (med - 3.0 * sigma, med + 3.0 * sigma) cuts_resid = (-5.0, 5.0) #fits.writeto('/Users/joe/ginga_test.fits',imgminsky, overwrite=True) #fits.writeto('/Users/joe/ginga_mask.fits',imgminsky_gpm.astype(float), overwrite=True) #embed() # Clear all channels at the beginning # TODO: JFH For some reason Ginga crashes when I try to put cuts in here. viewer, ch_skysub = display.show_image( imgminsky, chname=chname_skysub, waveimg=pseudo_dict['waveimg'], clear=True, cuts=cuts_skysub) slit_left, slit_righ, _ = pseudo_dict['slits'].select_edges() slit_id = slits.slitord_id[0] display.show_slits(viewer, ch_skysub, slit_left, slit_righ, slit_ids=slit_id) # SKRESIDS chname_skyresids = f'sky_resid-{detname.lower()}' # sky residual map image = pseudo_dict['imgminsky'] * np.sqrt( pseudo_dict['sciivar']) * pseudo_dict['inmask'] viewer, ch_skyresids = display.show_image( image, chname_skyresids, waveimg=pseudo_dict['waveimg'], cuts=cuts_resid) display.show_slits(viewer, ch_skyresids, slit_left, slit_righ, slit_ids=slits.slitord_id[0]) shell = viewer.shell() out = shell.start_global_plugin('WCSMatch') out = shell.call_global_plugin_method('WCSMatch', 'set_reference_channel', [chname_skysub], {}) # TODO extract along a spatial position if args.writefits: head0 = fits.getheader(files[0]) # TODO use meta tools for the object name in the future. outfile = target + '_specXspat_{:3.2f}X{:3.2f}.fits'.format( args.spec_samp_fact, args.spat_samp_fact) hdu = fits.PrimaryHDU(imgminsky, header=head0) hdu_resid = fits.ImageHDU(pseudo_dict['imgminsky'] \ * np.sqrt(pseudo_dict['sciivar'])*pseudo_dict['inmask']) hdu_wave = fits.ImageHDU(pseudo_dict['waveimg']) hdul = fits.HDUList([hdu, hdu_resid, hdu_wave]) msgs.info('Writing sky subtracted image to {:s}'.format(outfile)) hdul.writeto(outfile, overwrite=True) msgs.info(utils.get_time_string(time.time() - tstart)) if args.embed: embed() return 0
def main(args): tstart = time.time() # Parse the files sort by MJD files = np.array([os.path.join(args.full_rawpath, file) for file in args.files]) nfiles = len(files) # Read in the spectrograph, config the parset spectrograph = load_spectrograph('vlt_fors2') #spectrograph_def_par = spectrograph.default_pypeit_par() spectrograph_cfg_lines = spectrograph.config_specific_par(files[0]).to_config() parset = par.PypeItPar.from_cfg_lines(cfg_lines=spectrograph_cfg_lines, merge_with=config_lines(args)) science_path = os.path.join(parset['rdx']['redux_path'], parset['rdx']['scidir']) target = spectrograph.get_meta_value(files[0], 'target') mjds = np.zeros(nfiles) for ifile, file in enumerate(files): mjds[ifile] = spectrograph.get_meta_value(file, 'mjd', ignore_bad_header=True, no_fussing=True) files = files[np.argsort(mjds)] # Calibration Master directory #TODO hardwired for now master_dir ='./' #master_dir = resource_filename('pypeit', 'data/QL_MASTERS') \ # if args.master_dir is None else args.master_dir if not os.path.isdir(master_dir): msgs.error(f'{master_dir} does not exist! You must install the QL_MASTERS ' 'directory; download the data from the PypeIt dev-suite Google Drive and ' 'either define a QL_MASTERS environmental variable or use the ' 'pypeit_install_ql_masters script.') # Define some hard wired master files here to be later parsed out of the directory fors2_grism = spectrograph.get_meta_value(files[0], 'dispname') fors2_masters = os.path.join(master_dir, 'FORS2_MASTERS', fors2_grism) bias_masterframe_name = \ utils.find_single_file(os.path.join(fors2_masters, "MasterBias*")) slit_masterframe_name \ = utils.find_single_file(os.path.join(fors2_masters, "MasterSlits*")) tilts_masterframe_name \ = utils.find_single_file(os.path.join(fors2_masters, "MasterTilts*")) wvcalib_masterframe_name \ = utils.find_single_file(os.path.join(fors2_masters, 'MasterWaveCalib*')) std_spec1d_file = utils.find_single_file(os.path.join(fors2_masters, 'spec1d_*')) sensfunc_masterframe_name = utils.find_single_file(os.path.join(fors2_masters, 'sens_*')) # TODO make and impelement sensfunc if (bias_masterframe_name is None or not os.path.isfile(bias_masterframe_name)) or \ (slit_masterframe_name is None or not os.path.isfile(slit_masterframe_name)) or \ (tilts_masterframe_name is None or not os.path.isfile(tilts_masterframe_name)) or \ (std_spec1d_file is None or not os.path.isfile(std_spec1d_file)): # or (sensfunc_masterframe_name is None or not os.path.isfile(sensfunc_masterframe_name)): msgs.error('Master frames not found. Check that environment variable QL_MASTERS ' 'points at the Master Calibs') # We need the platescale # Get detector (there's only one) #det = 1 # MOSFIRE has a single detector #detector = spectrograph.get_detector_par(det) #detname = detector.name # We need the platescale det_container = spectrograph.get_detector_par(1, hdu=fits.open(files[0])) binspectral, binspatial = parse_binning(det_container['binning']) platescale = det_container['platescale']*binspatial # Parse the offset information out of the headers. _, _, offset_arcsec = spectrograph.parse_dither_pattern(files) # Print out a report on the offsets msg_string = msgs.newline() + '*******************************************************' msg_string += msgs.newline() + ' Summary of offsets for target {:s}: ' msg_string += msgs.newline() + '*******************************************************' msg_string += msgs.newline() + ' filename arcsec pixels ' msg_string += msgs.newline() + '----------------------------------------------------' for iexp, file in enumerate(files): msg_string += msgs.newline() + ' {:s} {:6.2f} {:6.2f}'.format( os.path.basename(file), offset_arcsec[iexp], offset_arcsec[iexp] / platescale) msg_string += msgs.newline() + '********************************************************' msgs.info(msg_string) ## Read in the master frames that we need ## det = 1 # Currently CHIP1 is supported if std_spec1d_file is not None: # Get the standard trace if need be sobjs = specobjs.SpecObjs.from_fitsfile(std_spec1d_file) this_det = sobjs.DET == det if np.any(this_det): sobjs_det = sobjs[this_det] sobjs_std = sobjs_det.get_std() std_trace = None if sobjs_std is None else sobjs_std.TRACE_SPAT.flatten() else: std_trace = None else: std_trace = None # Read in the bias msbias = buildimage.BiasImage.from_file(bias_masterframe_name) # Read in the msbpm sdet = get_dnum(det, prefix=False) msbpm = spectrograph.bpm(files[0], det) # Read in the slits slits = slittrace.SlitTraceSet.from_file(slit_masterframe_name) # Reset the bitmask slits.mask = slits.mask_init.copy() # Read in the wv_calib wv_calib = wavecalib.WaveCalib.from_file(wvcalib_masterframe_name) # wv_calib.is_synced(slits) slits.mask_wvcalib(wv_calib) # Read in the tilts tilts_obj = wavetilts.WaveTilts.from_file(tilts_masterframe_name) tilts_obj.is_synced(slits) slits.mask_wavetilts(tilts_obj) # Build the Calibrate object caliBrate = calibrations.Calibrations(None, parset['calibrations'], spectrograph, None) caliBrate.msbias = msbias caliBrate.msbpm = msbpm caliBrate.slits = slits caliBrate.wavetilts = tilts_obj caliBrate.wv_calib = wv_calib # Find the unique offsets. This is a bit of a kludge, i.e. we are considering offsets within # 0.1 arcsec of each other to be the same throw, but I should like to be able to specify a tolerance here, # but then I need a version of unique that accepts a tolerance uniq_offsets, uni_indx = np.unique(np.around(offset_arcsec), return_inverse=True) nuniq = uniq_offsets.size spec2d_list = [] offset_ref = offset_arcsec[0] offsets_dith_pix = [] # Generalize to a multiple slits, doing one slit at a time? islit = 0 # Loop over the unique throws and create a spec2d_A and spec2D_B for # each, which are then fed into coadd2d with the correct offsets # TODO Rework the logic here so that we can print out a unified report # on what was actually reduced. for iuniq in range(nuniq): indx = uni_indx == iuniq files_uni = files[indx] offsets = offset_arcsec[indx] msgs.info('Reducing images for offset = {:}'.format(offsets[0])) spec2DObj = run(files_uni, caliBrate, spectrograph, det, parset, show=args.show, std_trace=std_trace) spec2d_list += [spec2DObj] offsets_dith_pix += [np.mean(offsets)/platescale] offsets_dith_pix = np.array(offsets_dith_pix) if args.offset is not None: offsets_pixels = np.array([0.0, args.offset]) msgs.info('Using user specified offsets instead: {:5.2f}'.format(args.offset)) else: offsets_pixels = offsets_dith_pix # Instantiate Coadd2d coadd = coadd2d.CoAdd2D.get_instance(spec2d_list, spectrograph, parset, det=det, offsets=offsets_pixels, weights='uniform', spec_samp_fact=args.spec_samp_fact, spat_samp_fact=args.spat_samp_fact, ir_redux=True, debug=args.show) # Coadd the slits # TODO implement only_slits later coadd_dict_list = coadd.coadd(only_slits=None, interp_dspat=False) # Create the pseudo images pseudo_dict = coadd.create_pseudo_image(coadd_dict_list) # Multiply in a sensitivity function to flux the 2d image if args.flux: # Load the sensitivity function # wave_sens, sfunc, _, _, _ = sensfunc.SensFunc.load(sensfunc_masterframe_name) sens = sensfunc.SensFunc.from_file(sensfunc_masterframe_name) # Interpolate the sensitivity function onto the wavelength grid of # the data. Since the image is rectified this is trivial and we # don't need to do a 2d interpolation exptime = spectrograph.get_meta_value(files[0], 'exptime') sens_factor = flux_calib.get_sensfunc_factor(pseudo_dict['wave_mid'][:, islit], sens.wave, sens.zeropoint, exptime, extrap_sens=parset['fluxcalib']['extrap_sens']) # Compute the median sensitivity and set the sensitivity to zero at # locations 100 times the median. This prevents the 2d image from # blowing up where the sens_factor explodes because there is no # throughput sens_gpm = sens_factor < 100.0 * np.median(sens_factor) sens_factor_masked = sens_factor * sens_gpm sens_factor_img = np.repeat(sens_factor_masked[:, np.newaxis], pseudo_dict['nspat'], axis=1) imgminsky = sens_factor_img * pseudo_dict['imgminsky'] imgminsky_gpm = sens_gpm[:, np.newaxis] & pseudo_dict['inmask'] else: imgminsky = pseudo_dict['imgminsky'] imgminsky_gpm = pseudo_dict['inmask'] ########################## # Now display the images # ########################## if not args.no_gui: display.connect_to_ginga(raise_err=True, allow_new=True) # TODO: Bug in ginga prevents me from using cuts here for some # reason mean, med, sigma = sigma_clipped_stats(imgminsky[imgminsky_gpm], sigma_lower=3.0, sigma_upper=3.0) chname_skysub = 'fluxed-skysub-det{:s}'.format(sdet) \ if args.flux else 'skysub-det{:s}'.format(sdet) cuts_skysub = (med - 3.0 * sigma, med + 3.0 * sigma) cuts_resid = (-5.0, 5.0) # fits.writeto('/Users/joe/ginga_test.fits',imgminsky, overwrite=True) # fits.writeto('/Users/joe/ginga_mask.fits',imgminsky_gpm.astype(float), overwrite=True) # embed() # Clear all channels at the beginning # TODO: JFH For some reason Ginga crashes when I try to put cuts in here. viewer, ch_skysub = display.show_image(imgminsky, chname=chname_skysub, waveimg=pseudo_dict['waveimg'], clear=True, cuts=cuts_skysub) slit_left, slit_righ, _ = pseudo_dict['slits'].select_edges() slit_id = slits.slitord_id[0] display.show_slits(viewer, ch_skysub, slit_left, slit_righ, slit_ids=slit_id) # SKRESIDS chname_skyresids = 'sky_resid-det{:s}'.format(sdet) # sky residual map image = pseudo_dict['imgminsky'] * np.sqrt(pseudo_dict['sciivar']) * pseudo_dict['inmask'] viewer, ch_skyresids = display.show_image(image, chname_skyresids, waveimg=pseudo_dict['waveimg'], cuts=cuts_resid) display.show_slits(viewer, ch_skyresids, slit_left, slit_righ, slit_ids=slits.slitord_id[0]) shell = viewer.shell() out = shell.start_global_plugin('WCSMatch') out = shell.call_global_plugin_method('WCSMatch', 'set_reference_channel', [chname_skysub], {}) # TODO extract along a spatial position if args.writefits: head0 = fits.getheader(files[0]) # TODO use meta tools for the object name in the future. outfile = target + '_specXspat_{:3.2f}X{:3.2f}.fits'.format(args.spec_samp_fact, args.spat_samp_fact) hdu = fits.PrimaryHDU(imgminsky, header=head0) hdu_resid = fits.ImageHDU(pseudo_dict['imgminsky'] \ * np.sqrt(pseudo_dict['sciivar']) * pseudo_dict['inmask']) hdu_wave = fits.ImageHDU(pseudo_dict['waveimg']) hdul = fits.HDUList([hdu, hdu_resid, hdu_wave]) msgs.info('Writing sky subtracted image to {:s}'.format(outfile)) hdul.writeto(outfile, overwrite=True) msgs.info(utils.get_time_string(time.time()-tstart)) if args.embed: embed() return 0
def show(self, attr, image=None, showmask=False, sobjs=None, chname=None, slits=False,clear=False): """ Show one of the internal images .. todo:: Should probably put some of these in ProcessImages Parameters ---------- attr : str global -- Sky model (global) sci -- Processed science image rawvar -- Raw variance image modelvar -- Model variance image crmasked -- Science image with CRs set to 0 skysub -- Science image with global sky subtracted image -- Input image display : str, optional image : ndarray, optional User supplied image to display """ if showmask: mask_in = self.sciImg.fullmask bitmask_in = self.sciImg.bitmask else: mask_in = None bitmask_in = None img_gpm = self.sciImg.select_flag(invert=True) detname = self.spectrograph.get_det_name(self.det) # TODO Do we still need this here? if attr == 'global' and all([a is not None for a in [self.sciImg.image, self.global_sky, self.sciImg.fullmask]]): # global sky subtraction # sky subtracted image image = (self.sciImg.image - self.global_sky) * img_gpm.astype(float) mean, med, sigma = stats.sigma_clipped_stats(image[img_gpm], sigma_lower=5.0, sigma_upper=5.0) cut_min = mean - 1.0 * sigma cut_max = mean + 4.0 * sigma ch_name = chname if chname is not None else f'global_sky_{detname}' viewer, ch = display.show_image(image, chname=ch_name, bitmask=bitmask_in, mask=mask_in, clear=clear, wcs_match=True) #, cuts=(cut_min, cut_max)) elif attr == 'local' and all([a is not None for a in [self.sciImg.image, self.skymodel, self.sciImg.fullmask]]): # local sky subtraction # sky subtracted image image = (self.sciImg.image - self.skymodel) * img_gpm.astype(float) mean, med, sigma = stats.sigma_clipped_stats(image[img_gpm], sigma_lower=5.0, sigma_upper=5.0) cut_min = mean - 1.0 * sigma cut_max = mean + 4.0 * sigma ch_name = chname if chname is not None else f'local_sky_{detname}' viewer, ch = display.show_image(image, chname=ch_name, bitmask=bitmask_in, mask=mask_in, clear=clear, wcs_match=True) #, cuts=(cut_min, cut_max)) elif attr == 'sky_resid' and all([a is not None for a in [self.sciImg.image, self.skymodel, self.objmodel, self.ivarmodel, self.sciImg.fullmask]]): # sky residual map with object included image = (self.sciImg.image - self.skymodel) * np.sqrt(self.ivarmodel) image *= img_gpm.astype(float) ch_name = chname if chname is not None else f'sky_resid_{detname}' viewer, ch = display.show_image(image, chname=ch_name, cuts=(-5.0, 5.0), bitmask=bitmask_in, mask=mask_in, clear=clear, wcs_match=True) elif attr == 'resid' and all([a is not None for a in [self.sciImg.image, self.skymodel, self.objmodel, self.ivarmodel, self.sciImg.fullmask]]): # full residual map with object model subtractede # full model residual map image = (self.sciImg.image - self.skymodel - self.objmodel) * np.sqrt(self.ivarmodel) image *= img_gpm.astype(float) ch_name = chname if chname is not None else f'resid_{detname}' viewer, ch = display.show_image(image, chname=ch_name, cuts=(-5.0, 5.0), bitmask=bitmask_in, mask=mask_in, clear=clear, wcs_match=True) elif attr == 'image': ch_name = chname if chname is not None else 'image' viewer, ch = display.show_image(image, chname=ch_name, clear=clear, wcs_match=True) else: msgs.warn("Not an option for show") if sobjs is not None: for spec in sobjs: color = 'magenta' if spec.hand_extract_flag else 'orange' display.show_trace(viewer, ch, spec.TRACE_SPAT, spec.NAME, color=color) if slits and self.slits_left is not None: display.show_slits(viewer, ch, self.slits_left, self.slits_right)
def main(args): # List only? if args.list: io.fits_open(args.file).info() return # Parse the detector name try: det = int(args.det) except: detname = args.det else: detname = DetectorContainer.get_name(det) # Load it up -- NOTE WE ALLOW *OLD* VERSIONS TO GO FORTH spec2DObj = spec2dobj.Spec2DObj.from_file(args.file, detname, chk_version=False) # Use the appropriate class to get the "detector" number det = spec2DObj.detector.parse_name(detname) # Setup for PypeIt imports msgs.reset(verbosity=args.verbosity) # Find the set of channels to show if args.channels is not None: show_channels = [int(item) for item in args.channels.split(',')] else: show_channels = [0, 1, 2, 3] # Grab the slit edges slits = spec2DObj.slits if spec2DObj.sci_spat_flexure is not None: msgs.info("Offseting slits by {}".format( spec2DObj.sci_spat_flexure)) all_left, all_right, mask = slits.select_edges( flexure=spec2DObj.sci_spat_flexure) # TODO -- This may be too restrictive, i.e. ignore BADFLTCALIB?? gpm = mask == 0 left = all_left[:, gpm] right = all_right[:, gpm] slid_IDs = spec2DObj.slits.slitord_id[gpm] maskdef_id = None if spec2DObj.slits.maskdef_id is None \ else spec2DObj.slits.maskdef_id[gpm] bitMask = ImageBitMask() # Object traces from spec1d file spec1d_file = args.file.replace('spec2d', 'spec1d') if args.file[-2:] == 'gz': spec1d_file = spec1d_file[:-3] if os.path.isfile(spec1d_file): sobjs = specobjs.SpecObjs.from_fitsfile(spec1d_file, chk_version=False) else: sobjs = None msgs.warn('Could not find spec1d file: {:s}'.format(spec1d_file) + msgs.newline() + ' No objects were extracted.') display.connect_to_ginga(raise_err=True, allow_new=True) # Now show each image to a separate channel # Show the bitmask? mask_in = None if args.showmask: viewer, ch_mask = display.show_image(spec2DObj.bpmmask, chname="BPM", waveimg=spec2DObj.waveimg, clear=args.clear) channel_names = [] # SCIIMG if 0 in show_channels: image = spec2DObj.sciimg # Processed science image mean, med, sigma = sigma_clipped_stats( image[spec2DObj.bpmmask == 0], sigma_lower=5.0, sigma_upper=5.0) cut_min = mean - 1.0 * sigma cut_max = mean + 4.0 * sigma chname_sci = args.prefix + f'sciimg-{detname}' # Clear all channels at the beginning viewer, ch_sci = display.show_image(image, chname=chname_sci, waveimg=spec2DObj.waveimg, clear=args.clear, cuts=(cut_min, cut_max)) if sobjs is not None: show_trace(sobjs, detname, viewer, ch_sci) display.show_slits(viewer, ch_sci, left, right, slit_ids=slid_IDs, maskdef_ids=maskdef_id) channel_names.append(chname_sci) # SKYSUB if 1 in show_channels: if args.ignore_extract_mask: # TODO -- Is there a cleaner way to do this? gpm = (spec2DObj.bpmmask == 0) | (spec2DObj.bpmmask == 2** bitMask.bits['EXTRACT']) else: gpm = spec2DObj.bpmmask == 0 image = (spec2DObj.sciimg - spec2DObj.skymodel) * gpm mean, med, sigma = sigma_clipped_stats( image[spec2DObj.bpmmask == 0], sigma_lower=5.0, sigma_upper=5.0) cut_min = mean - 1.0 * sigma cut_max = mean + 4.0 * sigma chname_skysub = args.prefix + f'skysub-{detname}' viewer, ch_skysub = display.show_image(image, chname=chname_skysub, waveimg=spec2DObj.waveimg, bitmask=bitMask, mask=mask_in, cuts=(cut_min, cut_max), wcs_match=True) if not args.removetrace and sobjs is not None: show_trace(sobjs, detname, viewer, ch_skysub) display.show_slits(viewer, ch_skysub, left, right, slit_ids=slid_IDs, maskdef_ids=maskdef_id) channel_names.append(chname_skysub) # TODO Place holder for putting in sensfunc #if args.sensfunc: # # Load the sensitivity function # wave_sens, sfunc, _, _, _ = sensfunc.SensFunc.load(sensfunc_masterframe_name) # # Interpolate the sensitivity function onto the wavelength grid of the data. Since the image is rectified # # this is trivial and we don't need to do a 2d interpolation # sens_factor = flux_calib.get_sensfunc_factor( # pseudo_dict['wave_mid'][:,islit], wave_sens, sfunc, fits.getheader(files[0])['TRUITIME'], # extrap_sens=parset['fluxcalib']['extrap_sens']) # # Compute the median sensitivity and set the sensitivity to zero at locations 100 times the median. This # # prevents the 2d image from blowing up where the sens_factor explodes because there is no throughput # sens_gpm = sens_factor < 100.0*np.median(sens_factor) # sens_factor_masked = sens_factor*sens_gpm # sens_factor_img = np.repeat(sens_factor_masked[:, np.newaxis], pseudo_dict['nspat'], axis=1) # imgminsky = sens_factor_img*pseudo_dict['imgminsky'] # imgminsky_gpm = sens_gpm[:, np.newaxis] & pseudo_dict['inmask'] #else: # imgminsky= pseudo_dict['imgminsky'] # SKRESIDS if 2 in show_channels: # the block below is repeated because if showing this channel but # not channel 1 it will crash if args.ignore_extract_mask: # TODO -- Is there a cleaner way to do this? gpm = (spec2DObj.bpmmask == 0) | (spec2DObj.bpmmask == 2** bitMask.bits['EXTRACT']) else: gpm = spec2DObj.bpmmask == 0 chname_skyresids = args.prefix + f'sky_resid-{detname}' image = (spec2DObj.sciimg - spec2DObj.skymodel) * np.sqrt( spec2DObj.ivarmodel) * gpm viewer, ch_sky_resids = display.show_image( image, chname_skyresids, waveimg=spec2DObj.waveimg, cuts=(-5.0, 5.0), bitmask=bitMask, mask=mask_in) if not args.removetrace and sobjs is not None: show_trace(sobjs, detname, viewer, ch_sky_resids) display.show_slits(viewer, ch_sky_resids, left, right, slit_ids=slid_IDs, maskdef_ids=maskdef_id) channel_names.append(chname_skyresids) # RESIDS if 3 in show_channels: chname_resids = args.prefix + f'resid-{detname}' # full model residual map image = (spec2DObj.sciimg - spec2DObj.skymodel - spec2DObj.objmodel) \ * np.sqrt(spec2DObj.ivarmodel) * (spec2DObj.bpmmask == 0) viewer, ch_resids = display.show_image(image, chname=chname_resids, waveimg=spec2DObj.waveimg, cuts=(-5.0, 5.0), bitmask=bitMask, mask=mask_in, wcs_match=True) if not args.removetrace and sobjs is not None: show_trace(sobjs, detname, viewer, ch_resids) display.show_slits(viewer, ch_resids, left, right, slit_ids=slid_IDs, maskdef_ids=maskdef_id) channel_names.append(chname_resids) # After displaying all the images sync up the images with WCS_MATCH shell = viewer.shell() shell.start_global_plugin('WCSMatch') shell.call_global_plugin_method('WCSMatch', 'set_reference_channel', [channel_names[-1]], {}) if args.embed: embed()