def main(args): """ Executes telluric correction. """ import os from astropy.io import fits from pypeit import msgs from pypeit import io from pypeit.par import pypeitpar from pypeit.spectrographs.util import load_spectrograph from pypeit.core import telluric # Determine the spectrograph header = fits.getheader(args.spec1dfile) spectrograph = load_spectrograph(header['PYP_SPEC']) spectrograph_def_par = spectrograph.default_pypeit_par() # If the .tell file was passed in read it and overwrite default parameters par = spectrograph_def_par if args.tell_file is None else \ pypeitpar.PypeItPar.from_cfg_lines(cfg_lines=spectrograph_def_par.to_config(), merge_with=io.read_tellfile(args.tell_file)) # If args was provided override defaults. Note this does undo .tell file if args.objmodel is not None: par['telluric']['objmodel'] = args.objmodel if args.pca_file is not None: par['telluric']['pca_file'] = args.pca_file if args.redshift is not None: par['telluric']['redshift'] = args.redshift if args.tell_grid is not None: par['telluric']['telgridfile'] = args.tell_grid if par['telluric']['telgridfile'] is None: if par['sensfunc']['IR']['telgridfile'] is not None: par['telluric']['telgridfile'] = par['sensfunc']['IR'][ 'telgridfile'] else: par['telluric'][ 'telgridfile'] = 'TelFit_MaunaKea_3100_26100_R20000.fits' msgs.warn( f"No telluric grid file given. Using {par['telluric']['telgridfile']}." ) # Checks if par['telluric']['telgridfile'] is None: msgs.error('A file with the telluric grid must be provided.') elif not os.path.isfile( os.path.join(data.Paths.telgrid, par['telluric']['telgridfile'])): msgs.error( f"{par['telluric']['telgridfile']} does not exist. Check your " f"installation.") # Write the par to disk # TODO: Make it optional to write this file? Is the relevant metadata # saved to the main output file? msgs.info( f'Writing the telluric fitting parameters to {args.par_outfile}') par['telluric'].to_config(args.par_outfile, section_name='telluric', include_descr=False) # Parse the output filename outfile = (os.path.basename(args.spec1dfile)).replace( '.fits', '_tellcorr.fits') modelfile = (os.path.basename(args.spec1dfile)).replace( '.fits', '_tellmodel.fits') msgs.info(f'Telluric-corrected spectrum will be saved to: {outfile}.') msgs.info(f'Best-fit telluric model will be saved to: {modelfile}.') # Run the telluric fitting procedure. if par['telluric']['objmodel'] == 'qso': # run telluric.qso_telluric to get the final results TelQSO = telluric.qso_telluric( args.spec1dfile, par['telluric']['telgridfile'], par['telluric']['pca_file'], par['telluric']['redshift'], modelfile, outfile, npca=par['telluric']['npca'], pca_lower=par['telluric']['pca_lower'], pca_upper=par['telluric']['pca_upper'], bounds_norm=par['telluric']['bounds_norm'], tell_norm_thresh=par['telluric']['tell_norm_thresh'], only_orders=par['telluric']['only_orders'], bal_wv_min_max=par['telluric']['bal_wv_min_max'], maxiter=par['telluric']['maxiter'], debug_init=args.debug, disp=args.debug, debug=args.debug, show=args.plot) elif par['telluric']['objmodel'] == 'star': TelStar = telluric.star_telluric( args.spec1dfile, par['telluric']['telgridfile'], modelfile, outfile, star_type=par['telluric']['star_type'], star_mag=par['telluric']['star_mag'], star_ra=par['telluric']['star_ra'], star_dec=par['telluric']['star_dec'], func=par['telluric']['func'], model=par['telluric']['model'], polyorder=par['telluric']['polyorder'], only_orders=par['telluric']['only_orders'], mask_abs_lines=par['telluric']['mask_abs_lines'], delta_coeff_bounds=par['telluric']['delta_coeff_bounds'], minmax_coeff_bounds=par['telluric']['minmax_coeff_bounds'], maxiter=par['telluric']['maxiter'], debug_init=args.debug, disp=args.debug, debug=args.debug, show=args.plot) elif par['telluric']['objmodel'] == 'poly': TelPoly = telluric.poly_telluric( args.spec1dfile, par['telluric']['telgridfile'], modelfile, outfile, z_obj=par['telluric']['redshift'], func=par['telluric']['func'], model=par['telluric']['model'], polyorder=par['telluric']['polyorder'], fit_wv_min_max=par['telluric']['fit_wv_min_max'], mask_lyman_a=par['telluric']['mask_lyman_a'], delta_coeff_bounds=par['telluric']['delta_coeff_bounds'], minmax_coeff_bounds=par['telluric']['minmax_coeff_bounds'], only_orders=par['telluric']['only_orders'], maxiter=par['telluric']['maxiter'], debug_init=args.debug, disp=args.debug, debug=args.debug, show=args.plot) else: msgs.error( "Object model is not supported yet. Must be 'qso', 'star', or 'poly'." )
newcoords_tr = newcoords.transpose(newcoords_dims) # makes a view that affects newcoords newcoords_tr += ofs deltas = (np.asarray(old) - m1) / (newdims - m1) newcoords_tr *= deltas newcoords_tr -= ofs newa = ndimage.map_coordinates(a, newcoords) return newa else: print("Congrid error: Unrecognized interpolation type. Currently only \'neighbour\', \'nearest\',\'linear\',", \ "and \'spline\' are supported.") return None TSlits = TraceSlits(None, None) masterfile = '/Users/joe/python/PypeIt-development-suite/REDUX_OUT_old/Keck_NIRES/NIRES/MF_keck_nires/MasterTrace_A_15_01' tset_slits = TSlits.load_master(masterfile) spectrograph = load_spectrograph('keck_nires') slitmask_orig = spectrograph.slitmask(tset_slits) slitmask_orig = slitmask_orig[1:,1:] #slitmask_new = (np.round(congrid(slitmask_orig.astype(np.float64), (2048,2048), method='neighbour'))).astype(slitmask_orig.dtype) newshape = (2047*2,1023) slitmask_new = rebin(slitmask_orig, newshape) slitmask_old = rebin(slitmask_new,slitmask_orig.shape) slitmask_new1 = ((np.round(resize(slitmask_orig.astype(np.integer), newshape, preserve_range=True, order=0))).astype(np.integer)).astype(slitmask_orig.dtype) slitmask_old1 = ((np.round(resize(slitmask_new1.astype(np.integer), slitmask_orig.shape, preserve_range=True, order=0))).astype(np.integer)).astype(slitmask_orig.dtype)
def load_coadd2d_stacks(spec2d_files, det): """ Args: spec2d_files: list List of spec2d filenames det: int detector in question Returns: stack_dict: dict Dictionary containing all the images and keys required for perfomring 2d coadds. """ # Get the detector string sdet = parse.get_dnum(det, prefix=False) # Get the master dir head0 = fits.getheader(spec2d_files[0]) master_dir = os.path.basename(head0['PYPMFDIR']) redux_path = os.getcwd() master_path = os.path.join(redux_path, master_dir) # Grab the files head2d_list=[] tracefiles = [] waveimgfiles = [] tiltfiles = [] spec1d_files = [] for f in spec2d_files: head = fits.getheader(f) trace_key = '{0}_{1:02d}'.format(head['TRACMKEY'], det) wave_key = '{0}_{1:02d}'.format(head['ARCMKEY'], det) head2d_list.append(head) spec1d_files.append(f.replace('spec2d', 'spec1d')) tracefiles.append(os.path.join(master_path, MasterFrame.construct_file_name('Trace', trace_key))) waveimgfiles.append(os.path.join(master_path, MasterFrame.construct_file_name('Wave', wave_key))) tiltfiles.append(os.path.join(master_path, MasterFrame.construct_file_name('Tilts', wave_key))) nfiles = len(spec2d_files) specobjs_list = [] head1d_list=[] # TODO Sort this out with the correct detector extensions etc. # Read in the image stacks for ifile in range(nfiles): waveimg = WaveImage.load_from_file(waveimgfiles[ifile]) tilts = WaveTilts.load_from_file(tiltfiles[ifile]) hdu = fits.open(spec2d_files[ifile]) # One detector, sky sub for now names = [hdu[i].name for i in range(len(hdu))] # science image try: exten = names.index('DET{:s}-PROCESSED'.format(sdet)) except: # Backwards compatability det_error_msg(exten, sdet) sciimg = hdu[exten].data # skymodel try: exten = names.index('DET{:s}-SKY'.format(sdet)) except: # Backwards compatability det_error_msg(exten, sdet) skymodel = hdu[exten].data # Inverse variance model try: exten = names.index('DET{:s}-IVARMODEL'.format(sdet)) except ValueError: # Backwards compatability det_error_msg(exten, sdet) sciivar = hdu[exten].data # Mask try: exten = names.index('DET{:s}-MASK'.format(sdet)) except ValueError: # Backwards compatability det_error_msg(exten, sdet) mask = hdu[exten].data if ifile == 0: # the two shapes accomodate the possibility that waveimg and tilts are binned differently shape_wave = (nfiles,waveimg.shape[0],waveimg.shape[1]) shape_sci = (nfiles,sciimg.shape[0],sciimg.shape[1]) waveimg_stack = np.zeros(shape_wave,dtype=float) tilts_stack = np.zeros(shape_wave,dtype=float) sciimg_stack = np.zeros(shape_sci,dtype=float) skymodel_stack = np.zeros(shape_sci,dtype=float) sciivar_stack = np.zeros(shape_sci,dtype=float) mask_stack = np.zeros(shape_sci,dtype=float) waveimg_stack[ifile,:,:] = waveimg tilts_stack[ifile,:,:] = tilts['tilts'] sciimg_stack[ifile,:,:] = sciimg sciivar_stack[ifile,:,:] = sciivar mask_stack[ifile,:,:] = mask skymodel_stack[ifile,:,:] = skymodel sobjs, head = load.load_specobjs(spec1d_files[ifile]) head1d_list.append(head) specobjs_list.append(sobjs) # Right now we assume there is a single tslits_dict for all images and read in the first one # TODO this needs to become a tslits_dict for each file to accomodate slits defined by flats taken on different # nights tslits_dict, _ = TraceSlits.load_from_file(tracefiles[0]) spectrograph = util.load_spectrograph(tslits_dict['spectrograph']) slitmask = pixels.tslits2mask(tslits_dict) slitmask_stack = np.einsum('i,jk->ijk', np.ones(nfiles), slitmask) # Fill the master key dict head2d = head2d_list[0] master_key_dict = {} master_key_dict['frame'] = head2d['FRAMMKEY'] + '_{:02d}'.format(det) master_key_dict['bpm'] = head2d['BPMMKEY'] + '_{:02d}'.format(det) master_key_dict['bias'] = head2d['BIASMKEY'] + '_{:02d}'.format(det) master_key_dict['arc'] = head2d['ARCMKEY'] + '_{:02d}'.format(det) master_key_dict['trace'] = head2d['TRACMKEY'] + '_{:02d}'.format(det) master_key_dict['flat'] = head2d['FLATMKEY'] + '_{:02d}'.format(det) stack_dict = dict(specobjs_list=specobjs_list, tslits_dict=tslits_dict, slitmask_stack=slitmask_stack, sciimg_stack=sciimg_stack, sciivar_stack=sciivar_stack, skymodel_stack=skymodel_stack, mask_stack=mask_stack, tilts_stack=tilts_stack, waveimg_stack=waveimg_stack, head1d_list = head1d_list, head2d_list=head2d_list, redux_path=redux_path, master_path=master_path, master_dir=master_dir, master_key_dict=master_key_dict, spectrograph = tslits_dict['spectrograph']) return stack_dict
def test_assign_maskinfo_add_missing(): instr_names = ['keck_deimos', 'keck_mosfire'] for name in instr_names: # Spectrograph instrument = load_spectrograph(name) par = instrument.default_pypeit_par() # working only on detector 3 (det=3 for DEIMOS. For MOSFIRE does not matter because we have only one det) det = 3 if name == 'keck_deimos' else 1 # Built trace image traceImage = buildimage.buildimage_fromlist( instrument, det, par['calibrations']['traceframe'], flat_files(instr=name)) # load specific config parameters par = instrument.config_specific_par(traceImage.files[0]) # Run edge trace edges = edgetrace.EdgeTraceSet(traceImage, instrument, par['calibrations']['slitedges'], auto=True, debug=False, show_stages=False, qa_path=None) slits = edges.get_slits() # Test that the maskfile is saved properly hdul = fits.open(slits.maskfile) det_par = instrument.get_detector_par(det, hdu=hdul) if name == 'keck_deimos': specobjs_file = os.path.join( os.getenv('PYPEIT_DEV'), 'Cooked', 'Science', 'spec1d_DE.20100913.22358-CFHQS1_DEIMOS_20100913T061231.334.fits' ) sobjs = specobjs.SpecObjs.from_fitsfile(specobjs_file) # correct value slitid = sobjs[sobjs.MASKDEF_OBJNAME == 'ero89'].SLITID[0] true_maskdef_objname = sobjs[sobjs.SLITID == slitid].MASKDEF_OBJNAME[0] true_ra = round(sobjs[sobjs.SLITID == slitid].RA[0], 6) true_dec = round(sobjs[sobjs.SLITID == slitid].DEC[0], 6) true_spat_pixpos = round( sobjs[sobjs.MASKDEF_OBJNAME == 'ero884'].SPAT_PIXPOS[0]) true_spat_pixpos_2 = round( sobjs[sobjs.MASKDEF_OBJNAME == 'ero191'].SPAT_PIXPOS[0]) elif name == 'keck_mosfire': specobjs_file = os.path.join( os.getenv('PYPEIT_DEV'), 'Cooked', 'Science', 'spec1d_m191014_0170-2M2228_12_MOSFIRE_20191014T095212.598.fits' ) sobjs = specobjs.SpecObjs.from_fitsfile(specobjs_file) # correct value slitid = sobjs[sobjs.MASKDEF_OBJNAME == '18'].SLITID[0] true_maskdef_objname = sobjs[sobjs.SLITID == slitid].MASKDEF_OBJNAME[0] true_ra = round(sobjs[sobjs.SLITID == slitid].RA[0], 6) true_dec = round(sobjs[sobjs.SLITID == slitid].DEC[0], 6) true_spat_pixpos = round( sobjs[sobjs.MASKDEF_OBJNAME == '7'].SPAT_PIXPOS[0]) # Init at null and remove the force extraction idx_remove = [] for i, sobj in enumerate(sobjs): if sobj.MASKDEF_EXTRACT: idx_remove.append(i) else: sobj.MASKDEF_ID = None sobj.MASKDEF_OBJNAME = None sobj.RA = None sobj.DEC = None sobj.MASKDEF_EXTRACT = None sobjs.remove_sobj(idx_remove) # get the dither offset if available if name == 'keck_deimos': dither_off = None elif name == 'keck_mosfire': dither_off = instrument.parse_dither_pattern([ os.path.join(os.getenv('PYPEIT_DEV'), 'RAW_DATA', 'keck_mosfire', 'J_multi', 'm191014_0170.fits') ])[2][0] # get object positions from slitmask design and slitmask offsets calib_slits = slittrace.get_maskdef_objpos_offset_alldets( sobjs, [slits], [None], [det_par['platescale']], par['calibrations']['slitedges']['det_buffer'], par['reduce']['slitmask'], dither_off=dither_off) # determine if slitmask offsets exist and compute an average offsets over all the detectors calib_slits = slittrace.average_maskdef_offset( calib_slits, det_par['platescale'], instrument.list_detectors()) # slitmask design matching and add undetected objects sobjs = slittrace.assign_addobjs_alldets( sobjs, calib_slits, [None], [det_par['platescale']], par['reduce']['slitmask'], par['reduce']['findobj']['find_fwhm']) # Test if name == 'keck_deimos': # Check if recover the maskdef assignment assert sobjs[sobjs.SLITID == slitid].MASKDEF_OBJNAME[ 0] == true_maskdef_objname, 'Wrong DEIMOS MASKDEF_OBJNAME' assert round(sobjs[sobjs.SLITID == slitid].RA[0], 6) == true_ra, 'Wrong object DEIMOS RA' assert round(sobjs[sobjs.SLITID == slitid].DEC[0], 6) == true_dec, 'Wrong object DEIMOS DEC' # Test that undetected objects are found at the correct location (the correct location is # verified by visual inspection) assert round(sobjs[sobjs.MASKDEF_OBJNAME == 'ero884'].SPAT_PIXPOS[0]) == true_spat_pixpos, \ 'Wrong object (ero884) location on the DEIMOS slit' assert round(sobjs[sobjs.MASKDEF_OBJNAME == 'ero191'].SPAT_PIXPOS[0]) == true_spat_pixpos_2, \ 'Wrong object (ero191) location on the DEIMOS slit' elif name == 'keck_mosfire': # Check if recover the maskdef assignment assert sobjs[sobjs.SLITID == slitid].MASKDEF_OBJNAME[ 0] == true_maskdef_objname, 'Wrong MOSFIRE MASKDEF_OBJNAME' assert round(sobjs[sobjs.SLITID == slitid].RA[0], 6) == true_ra, 'Wrong object MOSFIRE RA' assert round(sobjs[sobjs.SLITID == slitid].DEC[0], 6) == true_dec, 'Wrong object MOSFIRE DEC' # Test that undetected object are found at the correct location (the correct location is # verified by visual inspection) assert round(sobjs[sobjs.MASKDEF_OBJNAME == '7'].SPAT_PIXPOS[0]) == true_spat_pixpos, \ 'Wrong object (7) location on the MOSFIRE slit' # Write sobjs sobjs.write_to_fits({}, data_path('tst_sobjs.fits')) os.remove(data_path('tst_sobjs.fits'))
def grab_img(specstr, rawfile, det=1): spec = load_spectrograph(specstr) rawImage = RawImage(rawfile, spec, det) return rawImage
def main(args): """ Executes telluric correction. """ # Determine the spectrograph header = fits.getheader(args.spec1dfile) spectrograph = load_spectrograph(header['PYP_SPEC']) spectrograph_def_par = spectrograph.default_pypeit_par() # If the .tell file was passed in read it and overwrite default parameters if args.tell_file is not None: cfg_lines = read_tellfile(args.tell_file) par = pypeitpar.PypeItPar.from_cfg_lines( cfg_lines=spectrograph_def_par.to_config(), merge_with=cfg_lines) else: par = spectrograph_def_par # If args was provided override defaults. Note this does undo .tell file if args.objmodel is not None: par['tellfit']['objmodel'] = args.objmodel if args.pca_file is not None: par['tellfit']['pca_file'] = args.pca_file if args.redshift is not None: par['tellfit']['redshift'] = args.redshift if args.tell_grid is not None: par['tellfit']['tell_grid'] = args.tell_grid elif par['sensfunc']['IR']['telgridfile'] is not None: par['tellfit']['tell_grid'] = par['sensfunc']['IR']['telgridfile'] else: msgs.warn('No telluric grid file given. Using {:}'.format( 'TelFit_MaunaKea_3100_26100_R20000.fits')) par['tellfit']['tell_grid'] = resource_filename( 'pypeit', '/data/telluric/TelFit_MaunaKea_3100_26100_R20000.fits') # Write the par to disk print("Writing the parameters to {}".format(args.par_outfile)) par['tellfit'].to_config('telluric.par', section_name='tellfit', include_descr=False) # Parse the output filename outfile = (os.path.basename(args.spec1dfile)).replace( '.fits', '_tellcorr.fits') modelfile = (os.path.basename(args.spec1dfile)).replace( '.fits', '_tellmodel.fits') # Run the telluric fitting procedure. if par['tellfit']['objmodel'] == 'qso': # run telluric.qso_telluric to get the final results TelQSO = telluric.qso_telluric( args.spec1dfile, par['tellfit']['tell_grid'], par['tellfit']['pca_file'], par['tellfit']['redshift'], modelfile, outfile, npca=par['tellfit']['npca'], pca_lower=par['tellfit']['pca_lower'], pca_upper=par['tellfit']['pca_upper'], bounds_norm=par['tellfit']['bounds_norm'], tell_norm_thresh=par['tellfit']['tell_norm_thresh'], only_orders=par['tellfit']['only_orders'], bal_wv_min_max=par['tellfit']['bal_wv_min_max'], debug_init=args.debug, disp=args.debug, debug=args.debug, show=args.plot) elif par['tellfit']['objmodel'] == 'star': TelStar = telluric.star_telluric( args.spec1dfile, par['tellfit']['tell_grid'], modelfile, outfile, star_type=par['tellfit']['star_type'], star_mag=par['tellfit']['star_mag'], star_ra=par['tellfit']['star_ra'], star_dec=par['tellfit']['star_dec'], func=par['tellfit']['func'], model=par['tellfit']['model'], polyorder=par['tellfit']['polyorder'], only_orders=par['tellfit']['only_orders'], mask_abs_lines=par['tellfit']['mask_abs_lines'], delta_coeff_bounds=par['tellfit']['delta_coeff_bounds'], minmax_coeff_bounds=par['tellfit']['minmax_coeff_bounds'], debug_init=args.debug, disp=args.debug, debug=args.debug, show=args.plot) elif par['tellfit']['objmodel'] == 'poly': TelPoly = telluric.poly_telluric( args.spec1dfile, par['tellfit']['tell_grid'], modelfile, outfile, z_obj=par['tellfit']['redshift'], func=par['tellfit']['func'], model=par['tellfit']['model'], polyorder=par['tellfit']['polyorder'], fit_wv_min_max=par['tellfit']['fit_wv_min_max'], mask_lyman_a=par['tellfit']['mask_lyman_a'], delta_coeff_bounds=par['tellfit']['delta_coeff_bounds'], minmax_coeff_bounds=par['tellfit']['minmax_coeff_bounds'], only_orders=par['tellfit']['only_orders'], debug_init=args.debug, disp=args.debug, debug=args.debug, show=args.plot) else: msgs.error( "Object model is not supported yet. Please choose one of 'qso', 'star', 'poly'." )
def main(args): """ Executes 2d coadding """ msgs.warn('PATH =' + os.getcwd()) # Load the file if args.file is not None: spectrograph, config_lines, spec2d_files = read_coadd2d_file(args.file) # Parameters # TODO: Shouldn't this reinstantiate the same parameters used in # the PypeIt run that extracted the objects? Why are we not # just passing the pypeit file? # JFH: The reason is that the coadd2dfile may want different reduction parameters spectrograph_def_par = spectrograph.default_pypeit_par() parset = par.PypeItPar.from_cfg_lines(cfg_lines=spectrograph_def_par.to_config(), merge_with=config_lines) elif args.obj is not None: # TODO: We should probably be reading the pypeit file and using those parameters here rather than using the # default parset. # TODO: This needs to define the science path spec2d_files = glob.glob('./Science/spec2d_*' + args.obj + '*') head0 = fits.getheader(spec2d_files[0]) spectrograph_name = head0['SPECTROG'] spectrograph = load_spectrograph(spectrograph_name) parset = spectrograph.default_pypeit_par() else: msgs.error('You must either input a coadd2d file with --file or an object name with --obj') # Update with configuration specific parameters (which requires science file) and initialize spectrograph spectrograph_cfg_lines = spectrograph.config_specific_par(spec2d_files[0]).to_config() parset = par.PypeItPar.from_cfg_lines(cfg_lines=spectrograph_cfg_lines, merge_with=parset.to_config()) # If detector was passed as an argument override whatever was in the coadd2d_file if args.det is not None: msgs.info("Restricting reductions to detector={}".format(args.det)) parset['rdx']['detnum'] = int(args.det) # Get headers (if possible) and base names spec1d_files = [files.replace('spec2d', 'spec1d') for files in spec2d_files] head1d = None for spec1d_file in spec1d_files: if os.path.isfile(spec1d_file): head1d = fits.getheader(spec1d_file) break if head1d is None: msgs.warn("No 1D spectra so am generating a dummy header for output") head1d = io.initialize_header() head2d = fits.getheader(spec2d_files[0]) if args.basename is None: filename = os.path.basename(spec2d_files[0]) basename = filename.split('_')[2] else: basename = args.basename # Write the par to disk par_outfile = basename+'_coadd2d.par' print("Writing the parameters to {}".format(par_outfile)) parset.to_config(par_outfile) # Now run the coadds skysub_mode = head2d['SKYSUB'] ir_redux = True if 'DIFF' in skysub_mode else False # Print status message msgs_string = 'Reducing target {:s}'.format(basename) + msgs.newline() msgs_string += 'Performing coadd of frames reduce with {:s} imaging'.format(skysub_mode) msgs_string += msgs.newline() + 'Combining frames in 2d coadd:' + msgs.newline() for file in spec2d_files: msgs_string += '{0:s}'.format(os.path.basename(file)) + msgs.newline() msgs.info(msgs_string) # TODO: This needs to be added to the parameter list for rdx redux_path = os.getcwd() master_dirname = os.path.basename(head2d['PYPMFDIR']) + '_coadd' master_dir = os.path.join(redux_path, master_dirname) # Make the new Master dir if not os.path.isdir(master_dir): msgs.info('Creating directory for Master output: {0}'.format(master_dir)) os.makedirs(master_dir) # Instantiate the sci_dict sci_dict = OrderedDict() # This needs to be ordered sci_dict['meta'] = {} sci_dict['meta']['vel_corr'] = 0. sci_dict['meta']['ir_redux'] = ir_redux # Find the detectors to reduce detectors = PypeIt.select_detectors(detnum=parset['rdx']['detnum'], ndet=spectrograph.ndet) if len(detectors) != spectrograph.ndet: msgs.warn('Not reducing detectors: {0}'.format(' '.join([str(d) for d in set(np.arange(spectrograph.ndet) + 1) - set(detectors)]))) # Loop on detectors for det in detectors: msgs.info("Working on detector {0}".format(det)) sci_dict[det] = {} # Instantiate Coadd2d coadd = coadd2d.CoAdd2D.get_instance(spec2d_files, spectrograph, parset, det=det, offsets=parset['coadd2d']['offsets'], weights=parset['coadd2d']['weights'], ir_redux=ir_redux, debug_offsets=args.debug_offsets, debug=args.debug, samp_fact=args.samp_fact) # Coadd the slits coadd_dict_list = coadd.coadd(only_slits=None) # TODO implement only_slits later # Create the psuedo images psuedo_dict = coadd.create_psuedo_image(coadd_dict_list) # Reduce msgs.info('Running the extraction') sci_dict[det]['sciimg'], sci_dict[det]['sciivar'], sci_dict[det]['skymodel'], sci_dict[det]['objmodel'], \ sci_dict[det]['ivarmodel'], sci_dict[det]['outmask'], sci_dict[det]['specobjs'] = coadd.reduce( psuedo_dict, show = args.show, show_peaks = args.peaks) # Save psuedo image master files coadd.save_masters(master_dir) # Make the new Science dir # TODO: This needs to be defined by the user scipath = os.path.join(redux_path, 'Science_coadd') if not os.path.isdir(scipath): msgs.info('Creating directory for Science output: {0}'.format(scipath)) os.makedirs(scipath) # Save the results save.save_all(sci_dict, coadd.stack_dict['master_key_dict'], master_dir, spectrograph, head1d, head2d, scipath, basename)#, binning=coadd.binning)
def main(args, unit_test=False, path=''): """ Runs the XSpecGui on an input file path : str, optional Mainly for running the unit test """ import glob import yaml from numpy import isnan import pdb as debugger from astropy.io import fits from pypeit import msgs from pypeit.core import coadd from pypeit import specobjs from pypeit.spectrographs import util # Load the input file with open(args.infile, 'r') as infile: coadd_dict = yaml.load(infile) # Spectrograph spectrograph = util.load_spectrograph(coadd_dict.pop('spectrograph')) # Grab object names in the spectra filelist = coadd_dict.pop('filenames') # Allow for wildcards files = [] for ifl in filelist: if '*' in ifl: files += glob.glob(path + ifl) else: files += [path + ifl] # Load spectra if len(files) == 0: msgs.error("No files match your input list") else: msgs.info("Coadding {:d} data frames".format(len(files))) # figure out whether it is Echelle or Longslit header0 = fits.getheader(files[0], 0) pypeline = header0['PYPELINE'] # also need norder for Echelle data if pypeline == 'Echelle': ext_final = fits.getheader(files[0], -1) norder = ext_final['ECHORDER'] + 1 fdict = {} for ifile in files: # Open file hdulist = fits.open(ifile) # Grab objects objects = [hdu.name for hdu in hdulist][1:] fdict[ifile] = objects # Global parameters? if 'global' in coadd_dict.keys(): gparam = coadd_dict.pop('global') else: gparam = {} if args.debug: gparam['debug'] = True sv_gparam = gparam.copy() # Extraction if 'extract' in coadd_dict.keys(): ex_value = coadd_dict.pop('extract') else: ex_value = 'OPT' msgs.info("Using {:s} extraction".format(ex_value)) # Fluxed data? if 'flux' in coadd_dict.keys(): flux_value = coadd_dict.pop('flux') else: flux_value = True # Loop on sources for key in coadd_dict.keys(): # Re-init gparam gparam = sv_gparam.copy() iobj = coadd_dict[key]['object'] # Check iobj input if isinstance(iobj, list): if len(iobj) != len(files): raise IOError( "Input list of object names must have same length as files" ) # outfile = coadd_dict[key]['outfile'] # Scale if 'scale' in coadd_dict[key]: scale_dict = coadd_dict[key]['scale'] else: scale_dict = None # Generate local keywords try: local_kwargs = coadd_dict[key]['local'] except KeyError: local_kwargs = {} else: for lkey in local_kwargs: gparam[lkey] = local_kwargs[lkey] if unit_test: return gparam, ex_value, flux_value, iobj, outfile, files, local_kwargs # Loop on spec1d files gdfiles = [] extensions = [] gdobj = [] for fkey in fdict: # Input as str or list if not isinstance(iobj, list) == 1: # Simple single object use_obj = iobj else: ind = files.index(fkey) use_obj = iobj[ind] if pypeline == 'Echelle': gdfiles.append(fkey) gdobj += [use_obj] else: # Find object indices # FW: mtch_obj_to_objects will return None when no matching and raise TypeError: cannot unpack non-iterable NoneType object try: mtch_obj, idx = specobjs.mtch_obj_to_objects( use_obj, fdict[fkey], **local_kwargs) except TypeError: mtch_obj = None if mtch_obj is None: msgs.info("No object {:s} in file {:s}".format(iobj, fkey)) elif len(mtch_obj) == 1: #Check if optimal extraction is present in all objects. # If not, warn the user and set ex_value to 'box'. hdulist = fits.open(fkey) try: #In case the optimal extraction array is a NaN array if flux_value is True: # If we have a fluxed spectrum, look for flam obj_opt = hdulist[mtch_obj[0]].data['OPT_FLAM'] else: # If not, look for counts obj_opt = hdulist[mtch_obj[0]].data['OPT_COUNTS'] if any(isnan(obj_opt)): msgs.warn( "Object {:s} in file {:s} has a NaN array for optimal extraction. Boxcar will be used instead." .format(mtch_obj[0], fkey)) ex_value = 'box' except KeyError: #In case the array is absent altogether. msgs.warn( "Object {:s} in file {:s} doesn't have an optimal extraction. Boxcar will be used instead." .format(mtch_obj[0], fkey)) try: if flux_value is True: # If we have a fluxed spectrum, look for flam hdulist[mtch_obj[0]].data['BOX_FLAM'] else: # If not, look for counts hdulist[mtch_obj[0]].data['BOX_COUNTS'] except KeyError: #In case the boxcar extract is also absent msgs.error( "Object {:s} in file {:s} doesn't have a boxcar extraction either. Co-addition cannot be performed" .format(mtch_obj[0], fkey)) ex_value = 'box' gdfiles.append(fkey) gdobj += mtch_obj extensions.append(idx[0] + 1) else: raise ValueError( "Multiple matches to object {:s} in file {:s}".format( iobj, fkey)) # Load spectra if len(gdfiles) == 0: msgs.error("No files match your input criteria") # QA file name exten = outfile.split('.')[-1] # Allow for hdf or fits or whatever qafile = outfile.replace(exten, 'pdf') if pypeline == 'Echelle': # Check whether the scale_dict is in the right shape. if 'orderscale' in gparam.keys(): orderscale_value = gparam['orderscale'] else: orderscale_value = 'median' if (scale_dict is not None) and (orderscale_value == 'photometry'): if len(scale_dict) != norder: raise IOError( "You need to specifiy the photometric information for every order." ) spec1d = coadd.ech_coadd(gdfiles, objids=gdobj, extract=ex_value, flux=flux_value, phot_scale_dicts=scale_dict, outfile=outfile, qafile=qafile, **gparam) else: spectra = coadd.load_spec(gdfiles, iextensions=extensions, extract=ex_value, flux=flux_value) # Coadd! coadd.coadd_spectra(spectrograph, gdfiles, spectra, qafile=qafile, outfile=outfile, flux_scale=scale_dict, **gparam)
def __init__(self, pypeit_file, verbosity=2, overwrite=True, reuse_masters=False, logname=None, show=False, redux_path=None): # Load cfg_lines, data_files, frametype, usrdata, setups \ = parse_pypeit_file(pypeit_file, runtime=True) self.pypeit_file = pypeit_file # Spectrograph cfg = ConfigObj(cfg_lines) spectrograph_name = cfg['rdx']['spectrograph'] self.spectrograph = load_spectrograph(spectrograph_name, ifile=data_files[0]) msgs.info('Loaded spectrograph {0}'.format( self.spectrograph.spectrograph)) # -------------------------------------------------------------- # Get the full set of PypeIt parameters # - Grab a science or standard file for configuration specific parameters scistd_file = None for idx, row in enumerate(usrdata): if ('science' in row['frametype']) or ('standard' in row['frametype']): scistd_file = data_files[idx] break # - Configuration specific parameters for the spectrograph if scistd_file is not None: msgs.info( 'Setting configuration-specific parameters using {0}'.format( os.path.split(scistd_file)[1])) spectrograph_cfg_lines = self.spectrograph.config_specific_par( scistd_file).to_config() # - Build the full set, merging with any user-provided # parameters self.par = PypeItPar.from_cfg_lines(cfg_lines=spectrograph_cfg_lines, merge_with=cfg_lines) msgs.info('Built full PypeIt parameter set.') # Check the output paths are ready if redux_path is not None: self.par['rdx']['redux_path'] = redux_path # TODO: Write the full parameter set here? # -------------------------------------------------------------- # -------------------------------------------------------------- # Build the meta data # - Re-initilize based on the file data msgs.info('Compiling metadata') self.fitstbl = PypeItMetaData(self.spectrograph, self.par, files=data_files, usrdata=usrdata, strict=True) # - Interpret automated or user-provided data from the PypeIt # file self.fitstbl.finalize_usr_build(frametype, setups[0]) # -------------------------------------------------------------- # - Write .calib file (For QA naming amongst other things) calib_file = pypeit_file.replace('.pypeit', '.calib') self.fitstbl.write_calib(calib_file) # Other Internals self.logname = logname self.overwrite = overwrite # Currently the runtime argument determines the behavior for # reuse_masters. self.reuse_masters = reuse_masters self.show = show # Set paths if self.par['calibrations']['caldir'] == 'default': self.calibrations_path = os.path.join( self.par['rdx']['redux_path'], 'Masters') else: self.calibrations_path = self.par['calibrations']['caldir'] # Report paths msgs.info('Setting reduction path to {0}'.format( self.par['rdx']['redux_path'])) msgs.info('Master calibration data output to: {0}'.format( self.calibrations_path)) msgs.info('Science data output to: {0}'.format(self.science_path)) msgs.info('Quality assessment plots output to: {0}'.format( self.qa_path)) # TODO: Is anything written to the qa dir or only to qa/PNGs? # Should we have separate calibration and science QA # directories? # Instantiate Calibrations class self.caliBrate \ = calibrations.MultiSlitCalibrations(self.fitstbl, self.par['calibrations'], self.spectrograph, caldir=self.calibrations_path, qadir=self.qa_path, reuse_masters=self.reuse_masters, show=self.show) # Init self.verbosity = verbosity # TODO: I don't think this ever used self.frame = None self.det = None self.tstart = None self.basename = None self.sciI = None self.obstime = None
lines += [''] lines += ['Instrument-Specific Default Configuration'] lines += ['+++++++++++++++++++++++++++++++++++++++++'] lines += [''] lines += textwrap.wrap('The following provides the changes to the global default parameters ' 'provided above for each instrument. That is, if one were to include ' 'these in the PypeIt file, you would be reproducing the effect of the ' '`default_pypeit_par` method specific to each derived ' ':class:`pypeit.spectrographs.spectrograph.Spectrograph` class.', 72) lines += [''] spectrographs = valid_spectrographs() for spec in spectrographs: s = load_spectrograph(spec) lines += [ ' '.join([s.telescope['name'], s.camera]) ] lines += [ '-'*len(lines[-1]) ] lines += [ 'Alterations to the default parameters are::' ] lines += [''] sl = s.default_pypeit_par().to_config(include_descr=False, exclude_defaults=True) lines += [ ' ' + l for l in sl ] lines += [''] lines += [''] output_rst = os.path.join(pypeit_root, 'doc', 'pypeit_par.rst') with open(output_rst, 'w') as f: f.write('\n'.join(lines)) print('Elapsed time: {0} seconds'.format(time.clock() - t))
def main(args): import os import sys from pypeit import masterframe from pypeit.spectrographs.util import load_spectrograph from pypeit.core.gui.identify import Identify from pypeit.core.wavecal import waveio from pypeit.wavecalib import WaveCalib from pypeit import slittrace from pypeit.images.buildimage import ArcImage # Load the MasterArc file if os.path.exists(args.arc_file): arcfil = args.arc_file else: try: arcfil = "Masters/{0:s}".format(args.arc_file) except FileNotFoundError: print("Could not find MasterArc file.") sys.exit() msarc = ArcImage.from_file(arcfil) mdir = msarc.head0['MSTRDIR'] mkey = msarc.head0['MSTRKEY'] # Load the spectrograph specname = msarc.head0['PYP_SPEC'] spec = load_spectrograph(specname) par = spec.default_pypeit_par()['calibrations']['wavelengths'] # Get the lamp list if args.lamps is None: lamplist = par['lamps'] if lamplist is None: print("ERROR :: Cannot determine the lamps") sys.exit() else: lamplist = args.lamps.split(",") par['lamps'] = lamplist # Load the slits slits = slittrace.SlitTraceSet.from_file(args.slits_file) # Reset the mask slits.mask = slits.mask_init # Check if a solution exists solnname = os.path.join(mdir, masterframe.construct_file_name(WaveCalib, mkey)) wv_calib = waveio.load_wavelength_calibration( solnname) if os.path.exists(solnname) and args.solution else None # Load the MasterFrame (if it exists and is desired)? wavecal = WaveCalib(msarc, slits, spec, par, binspectral=slits.binspec, det=args.det, master_key=mkey, msbpm=msarc.fullmask) arccen, arc_maskslit = wavecal.extract_arcs(slitIDs=[args.slit]) # Launch the identify window arcfitter = Identify.initialise(arccen, slits, slit=int(args.slit), par=par, wv_calib_all=wv_calib, wavelim=[args.wmin, args.wmax], nonlinear_counts=spec.nonlinear_counts( msarc.detector)) final_fit = arcfitter.get_results() # Ask the user if they wish to store the result in PypeIt calibrations arcfitter.store_solution(final_fit, mdir, slits.binspec, rmstol=args.rmstol, specname=specname)
def main(args): from pypeit.spectrographs.util import load_spectrograph from pypeit.pypeitsetup import PypeItSetup # Check that input spectrograph is supported if args.spec not in available_spectrographs: raise ValueError( 'Instrument \'{0}\' unknown to PypeIt.\n'.format(args.spec) + '\tOptions are: {0}\n'.format(', '.join(available_spectrographs)) + '\tSelect an available instrument or consult the documentation ' + 'on how to add a new instrument.') if args.keys: # Only print the metadata to header card mapping load_spectrograph(args.spec).meta_key_map() return if args.bad_types not in ['keep', 'rm', 'only']: raise ValueError( f'{args.bad_types} is not a valid keyword for the --bad_types argument.' ) # Generate the metadata table ps = PypeItSetup.from_file_root(args.root, args.spec, extension=args.extension) ps.run(setup_only=True, write_files=False, groupings=args.groupings, clean_config=args.bad_frames) # Check the file can be written (this is here because the spectrograph # needs to be defined first) _file = args.file if _file == 'default': _file = f'{ps.spectrograph.name}.obslog' if _file is not None: _odir, _file = os.path.split(_file) _file = os.path.join(args.output_path, _file) if not os.path.isdir(args.output_path): os.makedirs(args.output_path) if not args.interact and os.path.isfile(_file) and not args.overwrite: raise FileExistsError( f'{_file} already exists. Use -o to overwrite.') # Write/Print the data header = [ 'Auto-generated PypeIt Observing Log', '{0}'.format(time.strftime("%a %d %b %Y %H:%M:%S", time.localtime())), f'Root file string: {args.root}' ] if args.bad_types == 'keep': nrows = len(ps.fitstbl) indx = np.ones(nrows, dtype=bool) elif args.bad_types == 'rm': indx = ps.fitstbl['frametype'] != 'None' elif args.bad_types == 'only': indx = ps.fitstbl['frametype'] == 'None' else: raise ValueError('CODING ERROR: Should never get here.') fitstbl = ps.fitstbl.write(output='table' if args.interact else _file, rows=indx, columns=args.columns, sort_col=args.sort, overwrite=args.overwrite, header=header) if args.interact: embed()
def main(args): tstart = time.time() # Read in the spectrograph, config the parset spectrograph = load_spectrograph('keck_mosfire') spectrograph_def_par = spectrograph.default_pypeit_par() parset = par.PypeItPar.from_cfg_lines( cfg_lines=spectrograph_def_par.to_config(), merge_with=config_lines(args)) science_path = os.path.join(parset['rdx']['redux_path'], parset['rdx']['scidir']) # Parse the files sort by MJD files = np.array( [os.path.join(args.full_rawpath, file) for file in args.files]) nfiles = len(files) target = spectrograph.get_meta_value(files[0], 'target') mjds = np.zeros(nfiles) for ifile, file in enumerate(files): mjds[ifile] = spectrograph.get_meta_value(file, 'mjd', ignore_bad_header=True, no_fussing=True) files = files[np.argsort(mjds)] # Calibration Master directory master_dir = os.path.join(data.Paths.data, 'QL_MASTERS') \ if args.master_dir is None else args.master_dir if not os.path.isdir(master_dir): msgs.error( f'{master_dir} does not exist! You must install the QL_MASTERS ' 'directory; download the data from the PypeIt dev-suite Google Drive and ' 'either define a QL_MASTERS environmental variable or use the ' 'pypeit_install_ql_masters script.') # Define some hard wired master files here to be later parsed out of the directory mosfire_filter = spectrograph.get_meta_value(files[0], 'filter1') mosfire_masters = os.path.join(master_dir, 'MOSFIRE_MASTERS', mosfire_filter) slit_masterframe_name \ = utils.find_single_file(os.path.join(mosfire_masters, "MasterSlits*")) tilts_masterframe_name \ = utils.find_single_file(os.path.join(mosfire_masters, "MasterTilts*")) wvcalib_masterframe_name \ = utils.find_single_file(os.path.join(mosfire_masters, 'MasterWaveCalib*')) std_spec1d_file = utils.find_single_file( os.path.join(mosfire_masters, 'spec1d_*')) sensfunc_masterframe_name = utils.find_single_file( os.path.join(mosfire_masters, 'sens_*')) if (slit_masterframe_name is None or not os.path.isfile(slit_masterframe_name)) or \ (tilts_masterframe_name is None or not os.path.isfile(tilts_masterframe_name)) or \ (sensfunc_masterframe_name is None or not os.path.isfile(sensfunc_masterframe_name)) or \ (std_spec1d_file is None or not os.path.isfile(std_spec1d_file)): msgs.error( 'Master frames not found. Check that environment variable QL_MASTERS ' 'points at the Master Calibs') # Get detector (there's only one) det = 1 # MOSFIRE has a single detector detector = spectrograph.get_detector_par(det) detname = detector.name # We need the platescale platescale = detector['platescale'] # Parse the offset information out of the headers. TODO in the future # get this out of fitstable dither_pattern, dither_id, offset_arcsec = spectrograph.parse_dither_pattern( files) if len(np.unique(dither_pattern)) > 1: msgs.error( 'Script only supported for a single type of dither pattern.') A_files = files[dither_id == 'A'] B_files = files[dither_id == 'B'] nA = len(A_files) nB = len(B_files) # Print out a report on the offsets msg_string = msgs.newline( ) + '*******************************************************' msg_string += msgs.newline( ) + ' Summary of offsets for target {:s} with dither pattern: {:s}'.format( target, dither_pattern[0]) msg_string += msgs.newline( ) + '*******************************************************' msg_string += msgs.newline( ) + 'filename Position arcsec pixels ' msg_string += msgs.newline( ) + '----------------------------------------------------' for iexp, file in enumerate(files): msg_string += msgs.newline( ) + ' {:s} {:s} {:6.2f} {:6.2f}'.format( os.path.basename(file), dither_id[iexp], offset_arcsec[iexp], offset_arcsec[iexp] / platescale) msg_string += msgs.newline( ) + '********************************************************' msgs.info(msg_string) #offset_dith_pix = offset_dith_pix = offset_arcsec_A[0]/sciImg.detector.platescale ## Read in the master frames that we need ## if std_spec1d_file is not None: # Get the standard trace if need be sobjs = specobjs.SpecObjs.from_fitsfile(std_spec1d_file, chk_version=False) this_det = sobjs.DET == detname if np.any(this_det): sobjs_det = sobjs[this_det] sobjs_std = sobjs_det.get_std() std_trace = None if sobjs_std is None else sobjs_std.TRACE_SPAT.flatten( ) else: std_trace = None else: std_trace = None # Read in the msbpm msbpm = spectrograph.bpm(A_files[0], det) # Read in the slits slits = slittrace.SlitTraceSet.from_file(slit_masterframe_name) # Reset the bitmask slits.mask = slits.mask_init.copy() # Read in the wv_calib wv_calib = wavecalib.WaveCalib.from_file(wvcalib_masterframe_name) #wv_calib.is_synced(slits) slits.mask_wvcalib(wv_calib) # Read in the tilts tilts_obj = wavetilts.WaveTilts.from_file(tilts_masterframe_name) tilts_obj.is_synced(slits) slits.mask_wavetilts(tilts_obj) # Build the Calibrate object caliBrate = calibrations.Calibrations(None, parset['calibrations'], spectrograph, None) caliBrate.det = det caliBrate.slits = slits caliBrate.msbpm = msbpm caliBrate.wavetilts = tilts_obj caliBrate.wv_calib = wv_calib caliBrate.binning = f'{slits.binspec},{slits.binspat}' # Find the unique throw absolute value, which defines each MASK_NOD seqeunce #uniq_offsets, _ = np.unique(offset_arcsec, return_inverse=True) spec2d_list = [] offset_ref = offset_arcsec[0] offsets_dith_pix = [] # Generalize to a multiple slits, doing one slit at a time? islit = 0 # Loop over the unique throws and create a spec2d_A and spec2D_B for # each, which are then fed into coadd2d with the correct offsets # TODO Rework the logic here so that we can print out a unified report # on what was actually reduced. uniq_throws, uni_indx = np.unique(np.abs(offset_arcsec), return_inverse=True) # uniq_throws = uniq values of the dither throw # uni_indx = indices into the uniq_throws array needed to reconstruct the original array nuniq = uniq_throws.size for iuniq in range(nuniq): A_ind = (uni_indx == iuniq) & (dither_id == 'A') B_ind = (uni_indx == iuniq) & (dither_id == 'B') A_files_uni = files[A_ind] A_dither_id_uni = dither_id[A_ind] B_dither_id_uni = dither_id[B_ind] B_files_uni = files[B_ind] A_offset = offset_arcsec[A_ind] B_offset = offset_arcsec[B_ind] throw = np.abs(A_offset[0]) msgs.info('Reducing A-B pairs for throw = {:}'.format(throw)) if (len(A_files_uni) > 0) & (len(B_files_uni) > 0): spec2DObj_A, spec2DObj_B = reduce_IR(A_files_uni, B_files_uni, caliBrate, spectrograph, det, parset, show=args.show, std_trace=std_trace) spec2d_list += [spec2DObj_A, spec2DObj_B] offsets_dith_pix += [ (np.mean(A_offset) - offset_ref) / platescale, (np.mean(B_offset) - offset_ref) / platescale ] else: msgs.warn( 'Skpping files that do not have an A-B match with the same throw:' ) for iexp in range(len(A_files_uni)): msg_string += msgs.newline( ) + ' {:s} {:s} {:6.2f} {:6.2f}'.format( os.path.basename( A_files_uni[iexp]), A_dither_id_uni[iexp], A_offset[iexp], A_offset[iexp] / platescale) for iexp in range(len(B_files_uni)): msg_string += msgs.newline( ) + ' {:s} {:s} {:6.2f} {:6.2f}'.format( os.path.basename( B_files_uni[iexp]), B_dither_id_uni[iexp], B_offset[iexp], B_offset[iexp] / platescale) offsets_dith_pix = np.array(offsets_dith_pix) #else: # msgs.error('Unrecognized mode') if args.offset is not None: offsets_pixels = np.array([0.0, args.offset]) msgs.info('Using user specified offsets instead: {:5.2f}'.format( args.offset)) else: offsets_pixels = offsets_dith_pix # Instantiate Coadd2d coadd = coadd2d.CoAdd2D.get_instance( spec2d_list, spectrograph, parset, det=det, offsets=offsets_pixels, weights='uniform', spec_samp_fact=args.spec_samp_fact, spat_samp_fact=args.spat_samp_fact, bkg_redux=True, debug=args.show) # Coadd the slits # TODO implement only_slits later coadd_dict_list = coadd.coadd(only_slits=None, interp_dspat=False) # Create the pseudo images pseudo_dict = coadd.create_pseudo_image(coadd_dict_list) # Multiply in a sensitivity function to flux the 2d image if args.flux: # Load the sensitivity function # wave_sens, sfunc, _, _, _ = sensfunc.SensFunc.load(sensfunc_masterframe_name) sens = sensfunc.SensFunc.from_file(sensfunc_masterframe_name) # Interpolate the sensitivity function onto the wavelength grid of # the data. Since the image is rectified this is trivial and we # don't need to do a 2d interpolation exptime = spectrograph.get_meta_value(files[0], 'exptime') sens_factor = flux_calib.get_sensfunc_factor( pseudo_dict['wave_mid'][:, islit], sens.wave.flatten(), sens.zeropoint.flatten(), exptime, extrap_sens=True) #parset['fluxcalib']['extrap_sens']) # Compute the median sensitivity and set the sensitivity to zero at # locations 100 times the median. This prevents the 2d image from # blowing up where the sens_factor explodes because there is no # throughput sens_gpm = sens_factor < 100.0 * np.median(sens_factor) sens_factor_masked = sens_factor * sens_gpm sens_factor_img = np.repeat(sens_factor_masked[:, np.newaxis], pseudo_dict['nspat'], axis=1) imgminsky = sens_factor_img * pseudo_dict['imgminsky'] imgminsky_gpm = sens_gpm[:, np.newaxis] & pseudo_dict['inmask'] else: imgminsky = pseudo_dict['imgminsky'] imgminsky_gpm = pseudo_dict['inmask'] ########################## # Now display the images # ########################## if not args.no_gui: display.connect_to_ginga(raise_err=True, allow_new=True) # TODO: Bug in ginga prevents me from using cuts here for some # reason mean, med, sigma = sigma_clipped_stats(imgminsky[imgminsky_gpm], sigma_lower=3.0, sigma_upper=3.0) chname_skysub = f'fluxed-skysub-{detname.lower()}' \ if args.flux else f'skysub-{detname.lower()}' cuts_skysub = (med - 3.0 * sigma, med + 3.0 * sigma) cuts_resid = (-5.0, 5.0) #fits.writeto('/Users/joe/ginga_test.fits',imgminsky, overwrite=True) #fits.writeto('/Users/joe/ginga_mask.fits',imgminsky_gpm.astype(float), overwrite=True) #embed() # Clear all channels at the beginning # TODO: JFH For some reason Ginga crashes when I try to put cuts in here. viewer, ch_skysub = display.show_image( imgminsky, chname=chname_skysub, waveimg=pseudo_dict['waveimg'], clear=True, cuts=cuts_skysub) slit_left, slit_righ, _ = pseudo_dict['slits'].select_edges() slit_id = slits.slitord_id[0] display.show_slits(viewer, ch_skysub, slit_left, slit_righ, slit_ids=slit_id) # SKRESIDS chname_skyresids = f'sky_resid-{detname.lower()}' # sky residual map image = pseudo_dict['imgminsky'] * np.sqrt( pseudo_dict['sciivar']) * pseudo_dict['inmask'] viewer, ch_skyresids = display.show_image( image, chname_skyresids, waveimg=pseudo_dict['waveimg'], cuts=cuts_resid) display.show_slits(viewer, ch_skyresids, slit_left, slit_righ, slit_ids=slits.slitord_id[0]) shell = viewer.shell() out = shell.start_global_plugin('WCSMatch') out = shell.call_global_plugin_method('WCSMatch', 'set_reference_channel', [chname_skysub], {}) # TODO extract along a spatial position if args.writefits: head0 = fits.getheader(files[0]) # TODO use meta tools for the object name in the future. outfile = target + '_specXspat_{:3.2f}X{:3.2f}.fits'.format( args.spec_samp_fact, args.spat_samp_fact) hdu = fits.PrimaryHDU(imgminsky, header=head0) hdu_resid = fits.ImageHDU(pseudo_dict['imgminsky'] \ * np.sqrt(pseudo_dict['sciivar'])*pseudo_dict['inmask']) hdu_wave = fits.ImageHDU(pseudo_dict['waveimg']) hdul = fits.HDUList([hdu, hdu_resid, hdu_wave]) msgs.info('Writing sky subtracted image to {:s}'.format(outfile)) hdul.writeto(outfile, overwrite=True) msgs.info(utils.get_time_string(time.time() - tstart)) if args.embed: embed() return 0
def main(args): tstart = time.time() # Parse the files sort by MJD files = np.array([os.path.join(args.full_rawpath, file) for file in args.files]) nfiles = len(files) # Read in the spectrograph, config the parset spectrograph = load_spectrograph('vlt_fors2') #spectrograph_def_par = spectrograph.default_pypeit_par() spectrograph_cfg_lines = spectrograph.config_specific_par(files[0]).to_config() parset = par.PypeItPar.from_cfg_lines(cfg_lines=spectrograph_cfg_lines, merge_with=config_lines(args)) science_path = os.path.join(parset['rdx']['redux_path'], parset['rdx']['scidir']) target = spectrograph.get_meta_value(files[0], 'target') mjds = np.zeros(nfiles) for ifile, file in enumerate(files): mjds[ifile] = spectrograph.get_meta_value(file, 'mjd', ignore_bad_header=True, no_fussing=True) files = files[np.argsort(mjds)] # Calibration Master directory #TODO hardwired for now master_dir ='./' #master_dir = resource_filename('pypeit', 'data/QL_MASTERS') \ # if args.master_dir is None else args.master_dir if not os.path.isdir(master_dir): msgs.error(f'{master_dir} does not exist! You must install the QL_MASTERS ' 'directory; download the data from the PypeIt dev-suite Google Drive and ' 'either define a QL_MASTERS environmental variable or use the ' 'pypeit_install_ql_masters script.') # Define some hard wired master files here to be later parsed out of the directory fors2_grism = spectrograph.get_meta_value(files[0], 'dispname') fors2_masters = os.path.join(master_dir, 'FORS2_MASTERS', fors2_grism) bias_masterframe_name = \ utils.find_single_file(os.path.join(fors2_masters, "MasterBias*")) slit_masterframe_name \ = utils.find_single_file(os.path.join(fors2_masters, "MasterSlits*")) tilts_masterframe_name \ = utils.find_single_file(os.path.join(fors2_masters, "MasterTilts*")) wvcalib_masterframe_name \ = utils.find_single_file(os.path.join(fors2_masters, 'MasterWaveCalib*')) std_spec1d_file = utils.find_single_file(os.path.join(fors2_masters, 'spec1d_*')) sensfunc_masterframe_name = utils.find_single_file(os.path.join(fors2_masters, 'sens_*')) # TODO make and impelement sensfunc if (bias_masterframe_name is None or not os.path.isfile(bias_masterframe_name)) or \ (slit_masterframe_name is None or not os.path.isfile(slit_masterframe_name)) or \ (tilts_masterframe_name is None or not os.path.isfile(tilts_masterframe_name)) or \ (std_spec1d_file is None or not os.path.isfile(std_spec1d_file)): # or (sensfunc_masterframe_name is None or not os.path.isfile(sensfunc_masterframe_name)): msgs.error('Master frames not found. Check that environment variable QL_MASTERS ' 'points at the Master Calibs') # We need the platescale # Get detector (there's only one) #det = 1 # MOSFIRE has a single detector #detector = spectrograph.get_detector_par(det) #detname = detector.name # We need the platescale det_container = spectrograph.get_detector_par(1, hdu=fits.open(files[0])) binspectral, binspatial = parse_binning(det_container['binning']) platescale = det_container['platescale']*binspatial # Parse the offset information out of the headers. _, _, offset_arcsec = spectrograph.parse_dither_pattern(files) # Print out a report on the offsets msg_string = msgs.newline() + '*******************************************************' msg_string += msgs.newline() + ' Summary of offsets for target {:s}: ' msg_string += msgs.newline() + '*******************************************************' msg_string += msgs.newline() + ' filename arcsec pixels ' msg_string += msgs.newline() + '----------------------------------------------------' for iexp, file in enumerate(files): msg_string += msgs.newline() + ' {:s} {:6.2f} {:6.2f}'.format( os.path.basename(file), offset_arcsec[iexp], offset_arcsec[iexp] / platescale) msg_string += msgs.newline() + '********************************************************' msgs.info(msg_string) ## Read in the master frames that we need ## det = 1 # Currently CHIP1 is supported if std_spec1d_file is not None: # Get the standard trace if need be sobjs = specobjs.SpecObjs.from_fitsfile(std_spec1d_file) this_det = sobjs.DET == det if np.any(this_det): sobjs_det = sobjs[this_det] sobjs_std = sobjs_det.get_std() std_trace = None if sobjs_std is None else sobjs_std.TRACE_SPAT.flatten() else: std_trace = None else: std_trace = None # Read in the bias msbias = buildimage.BiasImage.from_file(bias_masterframe_name) # Read in the msbpm sdet = get_dnum(det, prefix=False) msbpm = spectrograph.bpm(files[0], det) # Read in the slits slits = slittrace.SlitTraceSet.from_file(slit_masterframe_name) # Reset the bitmask slits.mask = slits.mask_init.copy() # Read in the wv_calib wv_calib = wavecalib.WaveCalib.from_file(wvcalib_masterframe_name) # wv_calib.is_synced(slits) slits.mask_wvcalib(wv_calib) # Read in the tilts tilts_obj = wavetilts.WaveTilts.from_file(tilts_masterframe_name) tilts_obj.is_synced(slits) slits.mask_wavetilts(tilts_obj) # Build the Calibrate object caliBrate = calibrations.Calibrations(None, parset['calibrations'], spectrograph, None) caliBrate.msbias = msbias caliBrate.msbpm = msbpm caliBrate.slits = slits caliBrate.wavetilts = tilts_obj caliBrate.wv_calib = wv_calib # Find the unique offsets. This is a bit of a kludge, i.e. we are considering offsets within # 0.1 arcsec of each other to be the same throw, but I should like to be able to specify a tolerance here, # but then I need a version of unique that accepts a tolerance uniq_offsets, uni_indx = np.unique(np.around(offset_arcsec), return_inverse=True) nuniq = uniq_offsets.size spec2d_list = [] offset_ref = offset_arcsec[0] offsets_dith_pix = [] # Generalize to a multiple slits, doing one slit at a time? islit = 0 # Loop over the unique throws and create a spec2d_A and spec2D_B for # each, which are then fed into coadd2d with the correct offsets # TODO Rework the logic here so that we can print out a unified report # on what was actually reduced. for iuniq in range(nuniq): indx = uni_indx == iuniq files_uni = files[indx] offsets = offset_arcsec[indx] msgs.info('Reducing images for offset = {:}'.format(offsets[0])) spec2DObj = run(files_uni, caliBrate, spectrograph, det, parset, show=args.show, std_trace=std_trace) spec2d_list += [spec2DObj] offsets_dith_pix += [np.mean(offsets)/platescale] offsets_dith_pix = np.array(offsets_dith_pix) if args.offset is not None: offsets_pixels = np.array([0.0, args.offset]) msgs.info('Using user specified offsets instead: {:5.2f}'.format(args.offset)) else: offsets_pixels = offsets_dith_pix # Instantiate Coadd2d coadd = coadd2d.CoAdd2D.get_instance(spec2d_list, spectrograph, parset, det=det, offsets=offsets_pixels, weights='uniform', spec_samp_fact=args.spec_samp_fact, spat_samp_fact=args.spat_samp_fact, ir_redux=True, debug=args.show) # Coadd the slits # TODO implement only_slits later coadd_dict_list = coadd.coadd(only_slits=None, interp_dspat=False) # Create the pseudo images pseudo_dict = coadd.create_pseudo_image(coadd_dict_list) # Multiply in a sensitivity function to flux the 2d image if args.flux: # Load the sensitivity function # wave_sens, sfunc, _, _, _ = sensfunc.SensFunc.load(sensfunc_masterframe_name) sens = sensfunc.SensFunc.from_file(sensfunc_masterframe_name) # Interpolate the sensitivity function onto the wavelength grid of # the data. Since the image is rectified this is trivial and we # don't need to do a 2d interpolation exptime = spectrograph.get_meta_value(files[0], 'exptime') sens_factor = flux_calib.get_sensfunc_factor(pseudo_dict['wave_mid'][:, islit], sens.wave, sens.zeropoint, exptime, extrap_sens=parset['fluxcalib']['extrap_sens']) # Compute the median sensitivity and set the sensitivity to zero at # locations 100 times the median. This prevents the 2d image from # blowing up where the sens_factor explodes because there is no # throughput sens_gpm = sens_factor < 100.0 * np.median(sens_factor) sens_factor_masked = sens_factor * sens_gpm sens_factor_img = np.repeat(sens_factor_masked[:, np.newaxis], pseudo_dict['nspat'], axis=1) imgminsky = sens_factor_img * pseudo_dict['imgminsky'] imgminsky_gpm = sens_gpm[:, np.newaxis] & pseudo_dict['inmask'] else: imgminsky = pseudo_dict['imgminsky'] imgminsky_gpm = pseudo_dict['inmask'] ########################## # Now display the images # ########################## if not args.no_gui: display.connect_to_ginga(raise_err=True, allow_new=True) # TODO: Bug in ginga prevents me from using cuts here for some # reason mean, med, sigma = sigma_clipped_stats(imgminsky[imgminsky_gpm], sigma_lower=3.0, sigma_upper=3.0) chname_skysub = 'fluxed-skysub-det{:s}'.format(sdet) \ if args.flux else 'skysub-det{:s}'.format(sdet) cuts_skysub = (med - 3.0 * sigma, med + 3.0 * sigma) cuts_resid = (-5.0, 5.0) # fits.writeto('/Users/joe/ginga_test.fits',imgminsky, overwrite=True) # fits.writeto('/Users/joe/ginga_mask.fits',imgminsky_gpm.astype(float), overwrite=True) # embed() # Clear all channels at the beginning # TODO: JFH For some reason Ginga crashes when I try to put cuts in here. viewer, ch_skysub = display.show_image(imgminsky, chname=chname_skysub, waveimg=pseudo_dict['waveimg'], clear=True, cuts=cuts_skysub) slit_left, slit_righ, _ = pseudo_dict['slits'].select_edges() slit_id = slits.slitord_id[0] display.show_slits(viewer, ch_skysub, slit_left, slit_righ, slit_ids=slit_id) # SKRESIDS chname_skyresids = 'sky_resid-det{:s}'.format(sdet) # sky residual map image = pseudo_dict['imgminsky'] * np.sqrt(pseudo_dict['sciivar']) * pseudo_dict['inmask'] viewer, ch_skyresids = display.show_image(image, chname_skyresids, waveimg=pseudo_dict['waveimg'], cuts=cuts_resid) display.show_slits(viewer, ch_skyresids, slit_left, slit_righ, slit_ids=slits.slitord_id[0]) shell = viewer.shell() out = shell.start_global_plugin('WCSMatch') out = shell.call_global_plugin_method('WCSMatch', 'set_reference_channel', [chname_skysub], {}) # TODO extract along a spatial position if args.writefits: head0 = fits.getheader(files[0]) # TODO use meta tools for the object name in the future. outfile = target + '_specXspat_{:3.2f}X{:3.2f}.fits'.format(args.spec_samp_fact, args.spat_samp_fact) hdu = fits.PrimaryHDU(imgminsky, header=head0) hdu_resid = fits.ImageHDU(pseudo_dict['imgminsky'] \ * np.sqrt(pseudo_dict['sciivar']) * pseudo_dict['inmask']) hdu_wave = fits.ImageHDU(pseudo_dict['waveimg']) hdul = fits.HDUList([hdu, hdu_resid, hdu_wave]) msgs.info('Writing sky subtracted image to {:s}'.format(outfile)) hdul.writeto(outfile, overwrite=True) msgs.info(utils.get_time_string(time.time()-tstart)) if args.embed: embed() return 0
def main(args): import subprocess from astropy.io import fits from pypeit import msgs from pypeit.spectrographs import keck_lris from pypeit.spectrographs import keck_deimos from pypeit.spectrographs import gemini_gmos from pypeit.display import display from pypeit.spectrographs import mmt_binospec from pypeit.spectrographs import mmt_mmirs from pypeit.spectrographs import mmt_bluechannel from pypeit.spectrographs import util from pypeit import msgs from pypeit import io # List only? if args.list: hdu = io.fits_open(args.file) print(hdu.info()) return # Setup for PYPIT imports msgs.reset(verbosity=2) # RAW_LRIS?? if 'keck_lris' in args.spectrograph: # if args.spectrograph == 'keck_lris_red_orig': gen_lris = keck_lris.KeckLRISROrigSpectrograph() img = gen_lris.get_rawimage(args.file, 1)[1] else: gen_lris = keck_lris.KeckLRISRSpectrograph( ) # Using LRISr, but this will work for LRISb too img = gen_lris.get_rawimage(args.file, None)[1] # RAW_DEIMOS?? elif args.spectrograph == 'keck_deimos': # gen_deimos = keck_deimos.KeckDEIMOSSpectrograph() img = gen_deimos.get_rawimage(args.file, None)[1] # RAW_GEMINI?? elif 'gemini_gmos' in args.spectrograph: # TODO this routine should show the whole mosaic if no detector number is passed in! # Need to figure out the number of amps spectrograph = util.load_spectrograph(args.spectrograph) img = spectrograph.get_rawimage(args.file, args.det)[1] # RAW_BinoSpec elif args.spectrograph == 'mmt_binospec': # gen_bino = mmt_binospec.MMTBINOSPECSpectrograph() img = gen_bino.get_rawimage(args.file, args.det)[1] # RAW_MMIRS elif args.spectrograph == 'mmt_mmirs': gen_mmirs = mmt_mmirs.MMTMMIRSSpectrograph() img = gen_mmirs.get_rawimage(args.file, args.det)[1] # RAW MMT blue channel elif args.spectrograph == 'mmt_bluechannel': gen_bluechan = mmt_bluechannel.MMTBlueChannelSpectrograph() img = gen_bluechan.get_rawimage(args.file, args.det)[1] else: hdu = io.fits_open(args.file) img = hdu[args.exten].data # Write display.show_image(img, chname=args.chname)
def test_instantiate(fitstbl): par = pypeitpar.PypeItPar() spectrograph = load_spectrograph('shane_kast_blue') caliBrate = calibrations.MultiSlitCalibrations(fitstbl, par['calibrations'], spectrograph)
def main(args): import os import sys import astropy.io.fits as fits from pypeit import masterframe from pypeit.spectrographs.util import load_spectrograph from pypeit.core import parse from pypeit.core import gui from pypeit.core.wavecal import waveio, templates from pypeit.wavecalib import WaveCalib from pypeit import slittrace from pypeit.images.buildimage import ArcImage # Load the MasterArc file if os.path.exists(args.arc_file): arcfil = args.arc_file else: try: arcfil = "Masters/{0:s}".format(args.arc_file) except FileNotFoundError: print("Could not find MasterArc file.") sys.exit() msarc = ArcImage.from_file(arcfil) mdir = msarc.head0['MSTRDIR'] mkey = msarc.head0['MSTRKEY'] # Load the spectrograph specname = msarc.head0['PYP_SPEC'] spec = load_spectrograph(specname) par = spec.default_pypeit_par()['calibrations']['wavelengths'] # Get the lamp list if args.lamps == '': lamplist = par['lamps'] if lamplist is None: print("ERROR :: Cannot determine the lamps") sys.exit() else: lamplist = args.lamps.split(",") par['lamps'] = lamplist # Load the slits slits = slittrace.SlitTraceSet.from_file(args.slits_file) # Reset the mask slits.mask = slits.mask_init # Check if a solution exists solnname = os.path.join(mdir, masterframe.construct_file_name(WaveCalib, mkey)) wv_calib = waveio.load_wavelength_calibration(solnname) if os.path.exists( solnname) else None # Load the MasterFrame (if it exists and is desired)? wavecal = WaveCalib(msarc, slits, spec, par, binspectral=slits.binspec, det=args.det, master_key=mkey, msbpm=msarc.fullmask) arccen, arc_maskslit = wavecal.extract_arcs() # Launch the identify window arcfitter = gui.identify.initialise(arccen, slit=int(args.slit), par=par, wv_calib_all=wv_calib, wavelim=[args.wmin, args.wmax], nonlinear_counts=spec.nonlinear_counts( msarc.detector)) final_fit = arcfitter.get_results() # Ask the user if they wish to store the result in PypeIt calibrations if final_fit['rms'] < args.rmstol: ans = '' while ans != 'y' and ans != 'n': ans = input( "Would you like to store this wavelength solution in the archive? (y/n): " ) if ans == 'y': gratname = fits.getheader( msarc.head0['F1'])[spec.meta['dispname']['card']].replace( "/", "_") dispangl = "UNKNOWN" templates.pypeit_identify_record(final_fit, slits.binspec, specname, gratname, dispangl) print("Your wavelength solution has been stored") print("Please consider sending your solution to the PypeIt team!") else: print( "Final fit RMS: {0:0.3f} is larger than the allowed tolerance: {1:0.3f}" .format(final_fit['rms'], args.rmstol)) print( "Set the variable --rmstol on the command line to allow a more flexible RMS tolerance" ) ans = '' while ans != 'y' and ans != 'n': ans = input("Would you like to store the line IDs? (y/n): ") if ans == 'y': arcfitter.save_IDs()
def __init__(self, std_spec1d_file=None, sci_spec1d_file=None, sens_file=None, std_specobjs=None, std_header=None, spectrograph=None, telluric=False, setup=None, master_dir=None, mode=None, star_type=None, star_mag=None, BALM_MASK_WID=5.0, nresln=None, debug=False): # Load standard files std_spectro = None self.std_spec1d_file = std_spec1d_file # Need to unwrap these (sometimes).. self.std_specobjs = std_specobjs self.std_header = std_header if self.std_spec1d_file is not None: self.std_specobjs, self.std_header = load.ech_load_specobj(self.std_spec1d_file) msgs.info('Loaded {0} spectra from the spec1d standard star file: {1}'.format( len(self.std_specobjs), self.std_spec1d_file)) std_spectro = self.std_header['INSTRUME'] try: self.std_ra = self.std_header['RA'] except: self.std_ra = None try: self.std_dec = self.std_header['DEC'] except: self.std_dec = None try: self.std_file = self.std_header['FILENAME'] except: self.std_file = None # Load the science files sci_spectro = None self.sci_spec1d_file = sci_spec1d_file self.sci_specobjs = [] self.sci_header = None if self.sci_spec1d_file is not None: self.sci_specobjs, self.sci_header = load.ech_load_specobj(self.sci_spec1d_file) msgs.info('Loaded {0} spectra from the spec1d science file: {1}'.format( len(self.sci_specobjs), self.sci_spec1d_file)) sci_spectro = self.sci_header['INSTRUME'] # Compare instruments if they exist if std_spectro is not None and sci_spectro is not None and std_spectro != sci_spectro: msgs.error('Standard spectra are not the same instrument as science!!') # Instantiate the spectrograph _spectrograph = spectrograph if _spectrograph is None: _spectrograph = std_spectro if _spectrograph is not None: msgs.info("Spectrograph set to {0} from standard file".format(_spectrograph)) if _spectrograph is None: _spectrograph = sci_spectro if _spectrograph is not None: msgs.info("Spectrograph set to {0} from science file".format(_spectrograph)) self.spectrograph = load_spectrograph(_spectrograph) # MasterFrame masterframe.MasterFrame.__init__(self, self.frametype, setup, master_dir=master_dir, mode=mode) # Get the extinction data self.extinction_data = None if self.spectrograph is not None: self.extinction_data \ = flux.load_extinction_data(self.spectrograph.telescope['longitude'], self.spectrograph.telescope['latitude']) elif self.sci_header is not None and 'LON-OBS' in self.sci_header.keys(): self.extinction_data \ = flux.load_extinction_data(self.sci_header['LON-OBS'], self.sci_header['LAT-OBS']) # Once the spectrograph is instantiated, can also set the # extinction data # Parameters self.sens_file = sens_file # Set telluric option self.telluric = telluric # Main outputs self.sens_dict = None if self.sens_file is None \ else self.load_master(self.sens_file) # Attributes self.steps = [] # Key Internals self.std = None # Standard star spectrum (SpecObj object) self.std_idx = None # Nested indices for the std_specobjs list that corresponds # to the star! # Echelle key self.star_type = star_type self.star_mag = star_mag self.BALM_MASK_WID = BALM_MASK_WID self.nresln = nresln self.debug = debug
def unpack_object(self, ret_flam=False, extract_type='OPT'): """ Utility function to unpack the sobjs for one object and return various numpy arrays describing the spectrum and meta data. The user needs to already have trimmed the Specobjs to the relevant indices for the object. Args: ret_flam (:obj:`bool`, optional): If True return the FLAM, otherwise return COUNTS. Returns: tuple: Returns the following where all numpy arrays returned have shape (nspec, norders) for Echelle data and (nspec,) for Multislit data. - wave (`numpy.ndarray`_): Wavelength grids - flux (`numpy.ndarray`_): Flambda or counts - flux_ivar (`numpy.ndarray`_): Inverse variance (of Flambda or counts) - flux_gpm (`numpy.ndarray`_): Good pixel mask. True=Good - meta_spec (dict:) Dictionary containing meta data. The keys are defined by spectrograph.header_cards_from_spec() - header (astropy.io.header object): header from spec1d file """ # Prep norddet = self.nobj flux_attr = 'FLAM' if ret_flam else 'COUNTS' flux_key = '{}_{}'.format(extract_type, flux_attr) wave_key = '{}_WAVE'.format(extract_type) # Test if getattr(self, flux_key)[0] is None: msgs.error( "Flux not available for {}. Try the other ".format(flux_key)) # nspec = getattr(self, flux_key)[0].size # Allocate arrays and unpack spectrum wave = np.zeros((nspec, norddet)) flux = np.zeros((nspec, norddet)) flux_ivar = np.zeros((nspec, norddet)) flux_gpm = np.zeros((nspec, norddet), dtype=bool) detector = np.zeros(norddet, dtype=int) ech_orders = np.zeros(norddet, dtype=int) # TODO make the extraction that is desired OPT vs BOX an optional input variable. for iorddet in range(norddet): wave[:, iorddet] = getattr(self, wave_key)[iorddet] flux_gpm[:, iorddet] = getattr( self, '{}_MASK'.format(extract_type))[iorddet] detector[iorddet] = self[iorddet].DET if self[0].PYPELINE == 'Echelle': ech_orders[iorddet] = self[iorddet].ECH_ORDER flux[:, iorddet] = getattr(self, flux_key)[iorddet] flux_ivar[:, iorddet] = getattr(self, flux_key + '_IVAR')[iorddet] #OPT_FLAM_IVAR # Populate meta data spectrograph = load_spectrograph(self.header['PYP_SPEC']) meta_spec = spectrograph.parse_spec_header(self.header) # Add the pyp spec. # TODO JFH: Make this an atribute of the specobj by default. meta_spec['PYP_SPEC'] = self.header['PYP_SPEC'] meta_spec['PYPELINE'] = self[0].PYPELINE meta_spec['DET'] = detector # Return if self[0].PYPELINE in ['MultiSlit', 'IFU'] and self.nobj == 1: meta_spec['ECH_ORDERS'] = None return wave.reshape(nspec), flux.reshape(nspec), flux_ivar.reshape(nspec), \ flux_gpm.reshape(nspec), meta_spec, self.header else: meta_spec['ECH_ORDERS'] = ech_orders return wave, flux, flux_ivar, flux_gpm, meta_spec, self.header
def load_kast_blue_masters(aimg=False, edges=False, tilts=False, wvcalib=False, pixflat=False): """ Load up the set of shane_kast_blue master frames Order is Arc, edges, tilts_dict, wv_calib, pixflat Args: get_spectrograph: aimg: edges (bool, optional): Load the slit edges tilts: datasec: wvcalib: Returns: list: List of calibration items """ spectrograph = load_spectrograph('shane_kast_blue') spectrograph.naxis = (2112, 350) # Image shape with overscan master_dir = os.path.join(os.getenv('PYPEIT_DEV'), 'Cooked', 'Shane_Kast_blue') reuse_masters = True # Load up the Masters ret = [] master_key = 'A_1_01' if aimg: AImg = arcimage.ArcImage(spectrograph, master_key=master_key, master_dir=master_dir, reuse_masters=reuse_masters) msarc = AImg.load() ret.append(msarc) if edges: trace_file = '{0}.gz'.format( os.path.join(master_dir, MasterFrame.construct_file_name('Edges', master_key))) ret.append(edgetrace.EdgeTraceSet.from_file(trace_file)) if tilts: tilts_file = os.path.join( master_dir, MasterFrame.construct_file_name('Tilts', master_key)) tilts_dict = wavetilts.WaveTilts.from_master_file( tilts_file).tilts_dict ret.append(tilts_dict) if wvcalib: calib_file = os.path.join( master_dir, MasterFrame.construct_file_name('WaveCalib', master_key, file_format='json')) wv_calib = waveio.load_wavelength_calibration(calib_file) ret.append(wv_calib) # Pixelflat if pixflat: calib_file = os.path.join( master_dir, MasterFrame.construct_file_name('Flat', master_key)) flatField = flatfield.FlatField.from_master_file(calib_file) ret.append(flatField.mspixelflat) # Return return ret
def test_gemini_gmos(): """ The DRAGONS output was constructed as follows: .. code-block:: python from astropy.io import fits import astrodata from geminidr.gmos.lookups import geometry_conf from geminidr.gemini.lookups.keyword_comments import keyword_comments from gempy.gemini import gemini_tools from gempy.library import transform # Read in the data ad = astrodata.open(file) # There must be a better way to do this, but this converts the type # to float ad = ad * 1. # Trim it gemini_tools.trim_to_data_section(adinput=ad, keyword_comments=keyword_comments) # Add the transformation information transform.add_mosaic_wcs(ad, geometry_conf) # And perform the transformations to create the mosaic image. mosaic_dragons = transform.resample_from_wcs(ad, 'mosaic', order=0)[0].data fits.HDUList([fits.PrimaryHDU(data=mosaic_dragons.astype(np.uint16))] ).writeto(dragons_file, overwrite=True) It's worth noting that this reproduces the DRAGONS result exactly *specifically for this case*, but that's not true for some of the other setups. E.g., for some reason, the red chip in the Gemini South data sets in the dev-suite is slightly off; the mosaic images are within ~1e-10 when the order is >0, but there's different behavior in the nearest-grid-point result leading to significant differences. We're using exactly the same transformations, so I don't understand why this should happen. """ dragons_file = data_path( 'GN_HAM_R400_885_N20190205S0035_dragons_mosaic.fits') file = os.path.join(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'gemini_gmos', 'GN_HAM_R400_885', 'N20190205S0035.fits') # Load the spectrograph spec = load_spectrograph('gemini_gmos_north_ham') msc = (1, 2, 3) msc_par = spec.get_mosaic_par(msc, hdu=fits.open(file)) # Load the images and trim and orient them imgs = [None] * spec.ndet for i, det in enumerate(msc): imgs[i] = RawImage(file, spec, det) imgs[i].trim() imgs[i].orient() imgs[i] = imgs[i].image[0] mosaic_pypeit, mosaic_ivar, mosaic_npix, _ = mosaic.build_image_mosaic( imgs, msc_par.tform) _mosaic_pypeit = np.fliplr(mosaic_pypeit.T).astype(np.uint16) mosaic_dragons = fits.open(dragons_file)[0].data assert np.array_equal(mosaic_dragons, _mosaic_pypeit[1:, :-2]), 'Bad mosaic'
def dummy_fitstbl(nfile=10, spectro_name='shane_kast_blue', directory='', notype=False): """ Generate a dummy fitstbl for testing Parameters ---------- nfile : int, optional Number of files to mimic spectro_name : str, optional Name of spectrograph to mimic notype : bool (optional) If True, do not add image type info to the fitstbl Returns ------- fitstbl : PypeItMetaData """ fitsdict = {} fitsdict['index'] = np.arange(nfile) fitsdict['directory'] = [directory] * nfile fitsdict['filename'] = ['b{:03d}.fits.gz'.format(i) for i in range(nfile)] # TODO: The below will fail at 60 dates = ['2015-01-23T00:{:02d}:11.04'.format(i) for i in range(nfile)] ttime = time.Time(dates, format='isot') fitsdict['mjd'] = ttime.mjd fitsdict['target'] = ['Dummy'] * nfile fitsdict['ra'] = ['00:00:00'] * nfile fitsdict['dec'] = ['+00:00:00'] * nfile fitsdict['exptime'] = [300.] * nfile fitsdict['dispname'] = ['600/4310'] * nfile fitsdict['dichroic'] = ['560'] * nfile fitsdict["binning"] = ['1,1'] * nfile fitsdict["airmass"] = [1.0] * nfile if spectro_name == 'shane_kast_blue': fitsdict['numamplifiers'] = [1] * nfile # Lamps for i in range(1, 17): fitsdict['lampstat{:02d}'.format(i)] = ['off'] * nfile fitsdict['exptime'][0] = 0 # Bias fitsdict['lampstat06'][1] = 'on' # Arc fitsdict['exptime'][1] = 30 # Arc fitsdict['lampstat01'][2] = 'on' # Trace, pixel, slit flat fitsdict['lampstat01'][3] = 'on' # Trace, pixel, slit flat fitsdict['exptime'][2] = 30 # flat fitsdict['exptime'][3] = 30 # flat fitsdict['ra'][4] = '05:06:36.6' # Standard fitsdict['dec'][4] = '52:52:01.0' fitsdict['airmass'][4] = 1.2 fitsdict['ra'][5] = '07:06:23.45' # Random object fitsdict['dec'][5] = '+30:20:50.5' fitsdict['decker'] = ['0.5 arcsec'] * nfile # arrays for k in fitsdict.keys(): fitsdict[k] = np.array(fitsdict[k]) spectrograph = load_spectrograph(spectro_name) fitstbl = PypeItMetaData(spectrograph, spectrograph.default_pypeit_par(), data=fitsdict) fitstbl['instrume'] = spectro_name type_bits = np.zeros(len(fitstbl), dtype=fitstbl.type_bitmask.minimum_dtype()) # Image typing if not notype: if spectro_name == 'shane_kast_blue': #fitstbl['sci_ID'] = 1 # This links all the files to the science object type_bits[0] = fitstbl.type_bitmask.turn_on(type_bits[0], flag='bias') type_bits[1] = fitstbl.type_bitmask.turn_on(type_bits[1], flag='arc') type_bits[1] = fitstbl.type_bitmask.turn_on(type_bits[1], flag='tilt') type_bits[2:4] = fitstbl.type_bitmask.turn_on( type_bits[2:4], flag=['pixelflat', 'trace']) type_bits[4] = fitstbl.type_bitmask.turn_on(type_bits[4], flag='standard') type_bits[5:] = fitstbl.type_bitmask.turn_on(type_bits[5:], flag='science') fitstbl.set_frame_types(type_bits) # Calibration groups cfgs = fitstbl.unique_configurations( ignore_frames=['bias', 'dark']) fitstbl.set_configurations(cfgs) fitstbl.set_calibration_groups(global_frames=['bias', 'dark']) return fitstbl
def coadd_cube(files, parset, overwrite=False): """ Main routine to coadd spec2D files into a 3D datacube Args: files (list): List of all spec2D files parset (:class:`pypeit.par.core.PypeItPar`): An instance of the parameter set. overwrite (bool): Overwrite the output file, if it exists? """ # Get the detector number det = 1 if parset is None else parset['rdx']['detnum'] # Load the spectrograph spec2DObj = spec2dobj.Spec2DObj.from_file(files[0], det) specname = spec2DObj.head0['PYP_SPEC'] spec = load_spectrograph(specname) # Grab the parset, if not provided if parset is None: parset = spec.default_pypeit_par() cubepar = parset['reduce']['cube'] # Check the output file outfile = cubepar['output_filename'] if ".fits" in cubepar[ 'output_filename'] else cubepar['output_filename'] + ".fits" out_whitelight = outfile.replace(".fits", "_whitelight.fits") if os.path.exists(outfile) and not overwrite: msgs.error("Output filename already exists:" + msgs.newline() + outfile) elif os.path.exists( out_whitelight) and cubepar['save_whitelight'] and not overwrite: msgs.error("Output filename already exists:" + msgs.newline() + out_whitelight) # Check the reference cube and image exist, if requested ref_scale = None # This will be used to correct relative scaling among the various input frames if cubepar['standard_cube'] is not None: if not os.path.exists(cubepar['standard_cube']): msgs.error("Standard cube does not exist:" + msgs.newline() + cubepar['reference_cube']) cube = fits.open(cubepar['standard_cube']) ref_scale = cube['REFSCALE'].data if cubepar['reference_image'] is not None: if not os.path.exists(cubepar['reference_image']): msgs.error("Reference cube does not exist:" + msgs.newline() + cubepar['reference_image']) if cubepar['flux_calibrate']: msgs.error("Flux calibration is not currently implemented" + msgs.newline() + "Please set 'flux_calibrate = False'") # prep numfiles = len(files) combine = cubepar['combine'] all_ra, all_dec, all_wave = np.array([]), np.array([]), np.array([]) all_sci, all_ivar, all_idx, all_wghts = np.array([]), np.array( []), np.array([]), np.array([]) all_wcs = [] dspat = None if cubepar['spatial_delta'] is None else cubepar[ 'spatial_delta'] / 3600.0 # binning size on the sky (/3600 to convert to degrees) dwv = cubepar[ 'wave_delta'] # binning size in wavelength direction (in Angstroms) wave_ref = None whitelight_img = None # This is the whitelight image based on all input spec2d frames weights = np.ones(numfiles) # Weights to use when combining cubes for ff, fil in enumerate(files): # Load it up spec2DObj = spec2dobj.Spec2DObj.from_file(fil, det) detector = spec2DObj.detector # Setup for PypeIt imports msgs.reset(verbosity=2) if ref_scale is None: ref_scale = spec2DObj.scaleimg.copy() # Extract the information sciimg = (spec2DObj.sciimg - spec2DObj.skymodel) * ( ref_scale / spec2DObj.scaleimg ) # Subtract sky and apply relative sky ivar = spec2DObj.ivarraw / (ref_scale / spec2DObj.scaleimg)**2 waveimg = spec2DObj.waveimg bpmmask = spec2DObj.bpmmask # Grab the slit edges slits = spec2DObj.slits wave0 = waveimg[waveimg != 0.0].min() diff = waveimg[1:, :] - waveimg[:-1, :] dwv = float(np.median(diff[diff != 0.0])) msgs.info( "Using wavelength solution: wave0={0:.3f}, dispersion={1:.3f} Angstrom/pixel" .format(wave0, dwv)) msgs.info("Constructing slit image") slitid_img_init = slits.slit_img(pad=0, initial=True, flexure=spec2DObj.sci_spat_flexure) onslit_gpm = (slitid_img_init > 0) & (bpmmask == 0) # Grab the WCS of this frame wcs = spec.get_wcs(spec2DObj.head0, slits, detector.platescale, wave0, dwv) all_wcs.append(copy.deepcopy(wcs)) # Find the largest spatial scale of all images being combined # TODO :: probably need to put this in the DetectorContainer pxscl = detector.platescale * parse.parse_binning( detector.binning)[1] / 3600.0 # This should be degrees/pixel slscl = spec.get_meta_value([spec2DObj.head0], 'slitwid') if dspat is None: dspat = max(pxscl, slscl) elif max(pxscl, slscl) > dspat: dspat = max(pxscl, slscl) # Generate an RA/DEC image msgs.info("Generating RA/DEC image") raimg, decimg, minmax = slits.get_radec_image( wcs, initial=True, flexure=spec2DObj.sci_spat_flexure) # Perform the DAR correction if wave_ref is None: wave_ref = 0.5 * (np.min(waveimg[onslit_gpm]) + np.max(waveimg[onslit_gpm])) # Get DAR parameters raval = spec.get_meta_value([spec2DObj.head0], 'ra') decval = spec.get_meta_value([spec2DObj.head0], 'dec') obstime = spec.get_meta_value([spec2DObj.head0], 'obstime') pressure = spec.get_meta_value([spec2DObj.head0], 'pressure') temperature = spec.get_meta_value([spec2DObj.head0], 'temperature') rel_humidity = spec.get_meta_value([spec2DObj.head0], 'humidity') coord = SkyCoord(raval, decval, unit=(units.deg, units.deg)) location = spec.location # TODO :: spec.location should probably end up in the TelescopePar (spec.telescope.location) ra_corr, dec_corr = dc_utils.dar_correction(waveimg[onslit_gpm], coord, obstime, location, pressure, temperature, rel_humidity, wave_ref=wave_ref) raimg[onslit_gpm] += ra_corr decimg[onslit_gpm] += dec_corr # Get copies of arrays to be saved wave_ext = waveimg[onslit_gpm].copy() flux_ext = sciimg[onslit_gpm].copy() ivar_ext = ivar[onslit_gpm].copy() # Perform extinction correction msgs.info("Applying extinction correction") longitude = spec.telescope['longitude'] latitude = spec.telescope['latitude'] airmass = spec2DObj.head0[spec.meta['airmass']['card']] extinct = load_extinction_data(longitude, latitude) # extinction_correction requires the wavelength is sorted wvsrt = np.argsort(wave_ext) ext_corr = extinction_correction(wave_ext[wvsrt] * units.AA, airmass, extinct) # Correct for extinction flux_sav = flux_ext[wvsrt] * ext_corr ivar_sav = ivar_ext[wvsrt] / ext_corr**2 # sort back to the original ordering resrt = np.argsort(wvsrt) # Calculate the weights relative to the zeroth cube if ff != 0: weights[ff] = np.median(flux_sav[resrt] * np.sqrt(ivar_sav[resrt]))**2 # Store the information numpix = raimg[onslit_gpm].size all_ra = np.append(all_ra, raimg[onslit_gpm].copy()) all_dec = np.append(all_dec, decimg[onslit_gpm].copy()) all_wave = np.append(all_wave, wave_ext.copy()) all_sci = np.append(all_sci, flux_sav[resrt].copy()) all_ivar = np.append(all_ivar, ivar_sav[resrt].copy()) all_idx = np.append(all_idx, ff * np.ones(numpix)) all_wghts = np.append(all_wghts, weights[ff] * np.ones(numpix)) # Grab cos(dec) for convenience cosdec = np.cos(np.mean(all_dec) * np.pi / 180.0) # Register spatial offsets between all frames if several frames are being combined if combine: # Check if a reference whitelight image should be used to register the offsets if cubepar["reference_image"] is None: # Generate white light images whitelight_imgs, _, _ = dc_utils.make_whitelight( all_ra, all_dec, all_wave, all_sci, all_wghts, all_idx, dspat) # ref_idx will be the index of the cube with the highest S/N ref_idx = np.argmax(weights) reference_image = whitelight_imgs[:, :, ref_idx].copy() msgs.info( "Calculating spatial translation of each cube relative to cube #{0:d})" .format(ref_idx + 1)) else: ref_idx = -1 # Don't use an index # Load reference information reference_image, whitelight_imgs, wlwcs = \ dc_utils.make_whitelight_fromref(all_ra, all_dec, all_wave, all_sci, all_wghts, all_idx, dspat, cubepar['reference_image']) msgs.info( "Calculating the spatial translation of each cube relative to user-defined 'reference_image'" ) # Calculate the image offsets - check the reference is a zero shift ra_shift_ref, dec_shift_ref = calculate_image_offset( reference_image.copy(), reference_image.copy()) for ff in range(numfiles): # Don't correlate the reference image with itself if ff == ref_idx: continue # Calculate the shift ra_shift, dec_shift = calculate_image_offset( whitelight_imgs[:, :, ff], reference_image.copy()) # Convert to reference ra_shift -= ra_shift_ref dec_shift -= dec_shift_ref # Convert pixel shift to degress shift ra_shift *= dspat / cosdec dec_shift *= dspat msgs.info( "Spatial shift of cube #{0:d}: RA, DEC (arcsec) = {1:+0.3f}, {2:+0.3f}" .format(ff + 1, ra_shift * 3600.0, dec_shift * 3600.0)) # Apply the shift all_ra[all_idx == ff] += ra_shift all_dec[all_idx == ff] += dec_shift # Generate a white light image of *all* data msgs.info("Generating global white light image") if cubepar["reference_image"] is None: whitelight_img, _, wlwcs = dc_utils.make_whitelight( all_ra, all_dec, all_wave, all_sci, all_wghts, np.zeros(all_ra.size), dspat) else: _, whitelight_img, wlwcs = \ dc_utils.make_whitelight_fromref(all_ra, all_dec, all_wave, all_sci, all_wghts, np.zeros(all_ra.size), dspat, cubepar['reference_image']) # Calculate the relative spectral weights of all pixels all_wghts = dc_utils.compute_weights( all_ra, all_dec, all_wave, all_sci, all_ivar, all_idx, whitelight_img[:, :, 0], dspat, dwv, relative_weights=cubepar['relative_weights']) # Check if a whitelight image should be saved if cubepar['save_whitelight']: # Check if the white light image still needs to be generated - if so, generate it now if whitelight_img is None: msgs.info("Generating global white light image") if cubepar["reference_image"] is None: whitelight_img, _, wlwcs = dc_utils.make_whitelight( all_ra, all_dec, all_wave, all_sci, all_wghts, np.zeros(all_ra.size), dspat) else: _, whitelight_img, wlwcs = \ dc_utils.make_whitelight_fromref(all_ra, all_dec, all_wave, all_sci, all_wghts, np.zeros(all_ra.size), dspat, cubepar['reference_image']) # Prepare and save the fits file msgs.info("Saving white light image as: {0:s}".format(out_whitelight)) img_hdu = fits.PrimaryHDU(whitelight_img.T, header=wlwcs.to_header()) img_hdu.writeto(out_whitelight, overwrite=overwrite) # Setup the cube ranges ra_min = cubepar['ra_min'] if cubepar['ra_min'] is not None else np.min( all_ra) ra_max = cubepar['ra_max'] if cubepar['ra_max'] is not None else np.max( all_ra) dec_min = cubepar['dec_min'] if cubepar['dec_min'] is not None else np.min( all_dec) dec_max = cubepar['dec_max'] if cubepar['dec_max'] is not None else np.max( all_dec) wav_min = cubepar['wave_min'] if cubepar[ 'wave_min'] is not None else np.min(all_wave) wav_max = cubepar['wave_max'] if cubepar[ 'wave_max'] is not None else np.max(all_wave) if cubepar['wave_delta'] is not None: dwv = cubepar['wave_delta'] # Generate a master WCS to register all frames coord_min = [ra_min, dec_min, wav_min] coord_dlt = [dspat, dspat, dwv] masterwcs = dc_utils.generate_masterWCS(coord_min, coord_dlt, name=specname) msgs.info(msgs.newline() + "-" * 40 + msgs.newline() + "Parameters of the WCS:" + msgs.newline() + "RA min, max = {0:f}, {1:f}".format(ra_min, ra_max) + msgs.newline() + "DEC min, max = {0:f}, {1:f}".format(dec_min, dec_max) + msgs.newline() + "WAVE min, max = {0:f}, {1:f}".format(wav_min, wav_max) + msgs.newline() + "Spaxel size = {0:f}''".format(3600.0 * dspat) + msgs.newline() + "Wavelength step = {0:f} A".format(dwv) + msgs.newline() + "-" * 40) # Generate the output binning if combine: numra = int((ra_max - ra_min) * cosdec / dspat) numdec = int((dec_max - dec_min) / dspat) numwav = int((wav_max - wav_min) / dwv) xbins = np.arange(1 + numra) - 0.5 ybins = np.arange(1 + numdec) - 0.5 spec_bins = np.arange(1 + numwav) - 0.5 else: slitlength = int( np.round( np.median(slits.get_slitlengths(initial=True, median=True)))) numwav = int((np.max(waveimg) - wave0) / dwv) xbins, ybins, spec_bins = spec.get_datacube_bins( slitlength, minmax, numwav) # Make the cube msgs.info("Generating pixel coordinates") if combine: pix_coord = masterwcs.wcs_world2pix(all_ra, all_dec, all_wave * 1.0E-10, 0) hdr = masterwcs.to_header() else: pix_coord = wcs.wcs_world2pix( np.vstack((all_ra, all_dec, all_wave * 1.0E-10)).T, 0) hdr = wcs.to_header() # Find the NGP coordinates for all input pixels msgs.info("Generating data cube") bins = (xbins, ybins, spec_bins) datacube, edges = np.histogramdd(pix_coord, bins=bins, weights=all_sci * all_wghts) norm, edges = np.histogramdd(pix_coord, bins=bins, weights=all_wghts) norm_cube = (norm > 0) / (norm + (norm == 0)) datacube *= norm_cube # Create the variance cube, including weights msgs.info("Generating variance cube") all_var = (all_ivar > 0) / (all_ivar + (all_ivar == 0)) var_cube, edges = np.histogramdd(pix_coord, bins=bins, weights=all_var * all_wghts**2) var_cube *= norm_cube**2 # Save the datacube debug = False if debug: datacube_resid, edges = np.histogramdd(pix_coord, bins=(xbins, ybins, spec_bins), weights=all_sci * np.sqrt(all_ivar)) norm, edges = np.histogramdd(pix_coord, bins=(xbins, ybins, spec_bins)) norm_cube = (norm > 0) / (norm + (norm == 0)) outfile = "datacube_resid.fits" msgs.info("Saving datacube as: {0:s}".format(outfile)) hdu = fits.PrimaryHDU((datacube_resid * norm_cube).T, header=masterwcs.to_header()) hdu.writeto(outfile, overwrite=overwrite) msgs.info("Saving datacube as: {0:s}".format(outfile)) final_cube = dc_utils.DataCube(datacube.T, var_cube.T, specname, refscale=ref_scale, fluxed=cubepar['flux_calibrate']) final_cube.to_file(outfile, hdr=hdr, overwrite=overwrite)
def __init__(self, pypeit_file, verbosity=2, overwrite=True, reuse_masters=False, logname=None, show=False, redux_path=None): # Load cfg_lines, data_files, frametype, usrdata, setups = parse_pypeit_file(pypeit_file, runtime=True) self.pypeit_file = pypeit_file # Spectrograph cfg = ConfigObj(cfg_lines) spectrograph_name = cfg['rdx']['spectrograph'] self.spectrograph = load_spectrograph(spectrograph_name) # Par # Defaults spectrograph_def_par = self.spectrograph.default_pypeit_par() # Grab a science file for configuration specific parameters sci_file = None for idx, row in enumerate(usrdata): if 'science' in row['frametype']: sci_file = data_files[idx] break # Set spectrograph_cfg_lines = self.spectrograph.config_specific_par(spectrograph_def_par, sci_file).to_config() self.par = PypeItPar.from_cfg_lines(cfg_lines=spectrograph_cfg_lines, merge_with=cfg_lines) # Fitstbl self.fitstbl = PypeItMetaData(self.spectrograph, self.par, file_list=data_files, usrdata=usrdata, strict=True) # The following could be put in a prepare_to_run() method in PypeItMetaData if 'setup' not in self.fitstbl.keys(): self.fitstbl['setup'] = setups[0] self.fitstbl.get_frame_types(user=frametype) # This sets them using the user inputs self.fitstbl.set_defaults() # Only does something if values not set in PypeIt file self.fitstbl._set_calib_group_bits() self.fitstbl._check_calib_groups() # Write .calib file (For QA naming amongst other things) calib_file = pypeit_file.replace('.pypeit', '.calib') self.fitstbl.write_calib(calib_file) # Other Internals self.logname = logname self.overwrite = overwrite # Currently the runtime argument determines the behavior for reuse_masters. There is also a reuse_masters # parameter in the parset but it is currently ignored. self.reuse_masters=reuse_masters self.show = show # Make the output directories self.par['rdx']['redux_path'] = os.getcwd() if redux_path is None else redux_path msgs.info("Setting reduction path to {:s}".format(self.par['rdx']['redux_path'])) paths.make_dirs(self.spectrograph.spectrograph, self.par['calibrations']['caldir'], self.par['rdx']['scidir'], self.par['rdx']['qadir'], overwrite=self.overwrite, redux_path=self.par['rdx']['redux_path']) # Instantiate Calibrations class self.caliBrate \ = calibrations.MultiSlitCalibrations(self.fitstbl, self.par['calibrations'], self.spectrograph, redux_path=self.par['rdx']['redux_path'], reuse_masters=self.reuse_masters, save_masters=True, write_qa=True, show=self.show) # Init self.verbosity = verbosity # TODO: I don't think this ever used self.frame = None self.det = None self.tstart = None self.basename = None self.sciI = None self.obstime = None
def main(args): import time import os import numpy as np from pypeit.spectrographs.util import load_spectrograph from pypeit import traceimage, edgetrace, biasframe from pypeit.pypeit import PypeIt from pypeit.core import parse from IPython import embed if args.pypeit_file is not None: pypeit_file = args.pypeit_file if not os.path.isfile(pypeit_file): raise FileNotFoundError( 'File does not exist: {0}'.format(pypeit_file)) pypeit_file = os.path.abspath(pypeit_file) redux_path = os.path.abspath( os.path.split(pypeit_file)[0] if args.redux_path is None else args. redux_path) rdx = PypeIt(pypeit_file, redux_path=redux_path) # Save the spectrograph spec = rdx.spectrograph # Get the calibration group to use group = np.unique( rdx.fitstbl['calib'])[0] if args.group is None else args.group if group not in np.unique(rdx.fitstbl['calib']): raise ValueError( 'Not a valid calibration group: {0}'.format(group)) # Find the rows in the metadata table with trace frames in the # specified calibration group tbl_rows = rdx.fitstbl.find_frames('trace', calib_ID=int(group), index=True) # Master keyword master_key_base = '_'.join( rdx.fitstbl.master_key(tbl_rows[0]).split('_')[:2]) # Save the binning binning = rdx.fitstbl['binning'][tbl_rows[0]] # Save the full file paths files = rdx.fitstbl.frame_paths(tbl_rows) # Trace image processing parameters proc_par = rdx.caliBrate.par['traceframe'] # Slit tracing parameters trace_par = rdx.caliBrate.par['slitedges'] # Get the bias files, if requested bias_rows = rdx.fitstbl.find_frames('bias', calib_ID=int(group), index=True) bias_files = rdx.fitstbl.frame_paths(bias_rows) bias_par = rdx.caliBrate.par['biasframe'] # Set the QA path qa_path = rdx.qa_path else: spec = load_spectrograph(args.spectrograph) master_key_base = 'A_1' binning = '1,1' if args.binning is None else args.binning if not os.path.isfile(args.trace_file): raise FileNotFoundError('File does not exist: {0}'.format( args.trace_file)) files = [os.path.abspath(args.trace_file)] redux_path = os.path.abspath( os.path.split(files[0])[0] if args.redux_path is None else args. redux_path) par = spec.default_pypeit_par() proc_par = par['calibrations']['traceframe'] trace_par = par['calibrations']['slitedges'] bias_files = None bias_par = None # Set the QA path qa_path = os.path.join(os.path.abspath(os.path.split(files[0])[0]), 'QA') detectors = np.arange(spec.ndet) + 1 if args.detector is None else [ args.detector ] master_dir = os.path.join(redux_path, args.master_dir) for det in detectors: # Master keyword for output file name master_key = '{0}_{1}'.format(master_key_base, str(det).zfill(2)) # Get the bias frame if requested if bias_files is None: proc_par['process']['bias'] = 'skip' msbias = None else: biasFrame = biasframe.BiasFrame(spec, files=bias_files, det=det, par=bias_par, master_key=master_key, master_dir=master_dir) msbias = biasFrame.build_image() msbpm = spec.bpm(files[0], det) # Build the trace image traceImage = traceimage.TraceImage(spec, files=files, det=det, par=proc_par, bias=msbias) traceImage.build_image(bias=msbias, bpm=msbpm) # Trace the slit edges t = time.perf_counter() edges = edgetrace.EdgeTraceSet(spec, trace_par, master_key=master_key, master_dir=master_dir, img=traceImage, det=det, bpm=msbpm, auto=True, debug=args.debug, show_stages=args.show, qa_path=qa_path) print('Tracing for detector {0} finished in {1} s.'.format( det, time.perf_counter() - t)) edges.save() return 0
lines += [''] lines += ['Instrument-Specific Default Configuration'] lines += ['+++++++++++++++++++++++++++++++++++++++++'] lines += [''] lines += textwrap.wrap( 'The following provides the changes to the global default parameters ' 'provided above for each instrument. That is, if one were to include ' 'these in the PypeIt file, you would be reproducing the effect of the ' '`default_pypeit_par` method specific to each derived ' ':class:`pypeit.spectrographs.spectrograph.Spectrograph` class.', 72) lines += [''] for spec in available_spectrographs: s = load_spectrograph(spec) lines += [ ' '.join( [s.telescope['name'], s.camera, '(``{0}``)'.format(s.name)]) ] lines += ['-' * len(lines[-1])] lines += ['Alterations to the default parameters are::'] lines += [''] sl = s.default_pypeit_par().to_config(include_descr=False, exclude_defaults=True) lines += [' ' + l for l in sl] lines += [''] lines += [''] output_rst = os.path.join(pypeit_root, 'doc', 'pypeit_par.rst') with open(output_rst, 'w') as f:
Module to run tests on ProcessImages class Requires files in Development suite and an Environmental variable """ import os import pytest import glob import numpy as np from pypeit.images import buildimage from pypeit.tests.tstutils import dev_suite_required from pypeit.par import pypeitpar from pypeit.spectrographs.util import load_spectrograph from pypeit.core import procimg kast_blue = load_spectrograph('shane_kast_blue') @pytest.fixture @dev_suite_required def deimos_flat_files(): # Longslit in dets 3,7 deimos_flat_files = [ os.path.join(os.getenv('PYPEIT_DEV'), 'RAW_DATA', 'keck_deimos', '830G_L_8400', ifile) for ifile in ['d0914_0014.fits.gz', 'd0914_0015.fits.gz'] ] assert len(deimos_flat_files) == 2 return deimos_flat_files
def main(args): # Build the fitstable since we currently need it for output. This should not be the case! A_files = [os.path.join(args.full_rawpath, file) for file in args.Afiles] B_files = [os.path.join(args.full_rawpath, file) for file in args.Bfiles] data_files = A_files + B_files ps = pypeitsetup.PypeItSetup(A_files, path='./', spectrograph_name='keck_mosfire') ps.build_fitstbl() fitstbl = ps.fitstbl # Read in the spectrograph, config the parset spectrograph = load_spectrograph('keck_mosfire') spectrograph_def_par = spectrograph.default_pypeit_par() parset = par.PypeItPar.from_cfg_lines( cfg_lines=spectrograph_def_par.to_config(), merge_with=config_lines(args)) science_path = os.path.join(parset['rdx']['redux_path'], parset['rdx']['scidir']) # Calibration Master directory if args.master_dir is None: msgs.error( "You need to set an Environmental variable MOSFIRE_MASTERS that points at the Master Calibs" ) # Define some hard wired master files here to be later parsed out of the directory slit_masterframe_name = os.path.join(args.master_dir, 'MasterSlits_E_15_01.fits.gz') tilts_masterframe_name = os.path.join(args.master_dir, 'MasterTilts_E_1_01.fits') wvcalib_masterframe_name = os.path.join(args.master_dir, 'MasterWaveCalib_E_1_01.fits') # For now don't require a standard std_outfile = None #std_outfile = os.path.join('/Users/joe/Dropbox/PypeIt_Redux/MOSFIRE/Nov19/quicklook/Science/', # 'spec1d_m191118_0064-GD71_MOSFIRE_2019Nov18T104704.507.fits') # make the get_std from pypeit a utility function or class method det = 1 # MOSFIRE has a single detector if std_outfile is not None: # Get the standard trace if need be sobjs = specobjs.SpecObjs.from_fitsfile(std_outfile) this_det = sobjs.DET == det if np.any(this_det): sobjs_det = sobjs[this_det] sobjs_std = sobjs_det.get_std() std_trace = None if sobjs_std is None else sobjs_std.TRACE_SPAT.flatten( ) else: std_trace = None else: std_trace = None # Read in the msbpm sdet = get_dnum(det, prefix=False) msbpm = spectrograph.bpm(A_files[0], det) # Read in the slits slits = slittrace.SlitTraceSet.from_file(slit_masterframe_name) # Reset the bitmask slits.mask = slits.mask_init.copy() # Read in the wv_calib wv_calib = wavecalib.WaveCalib.from_file(wvcalib_masterframe_name) wv_calib.is_synced(slits) slits.mask_wvcalib(wv_calib) # Read in the tilts tilts_obj = wavetilts.WaveTilts.from_file(tilts_masterframe_name) tilts_obj.is_synced(slits) slits.mask_wavetilts(tilts_obj) # Build Science image sciImg = buildimage.buildimage_fromlist(spectrograph, det, parset['scienceframe'], A_files, bpm=msbpm, slits=slits, ignore_saturation=False) # Background Image? sciImg = sciImg.sub( buildimage.buildimage_fromlist(spectrograph, det, parset['scienceframe'], B_files, bpm=msbpm, slits=slits, ignore_saturation=False), parset['scienceframe']['process']) # Build the Calibrate object caliBrate = calibrations.Calibrations(None, parset['calibrations'], spectrograph, None) caliBrate.slits = slits caliBrate.wavetilts = tilts_obj caliBrate.wv_calib = wv_calib # Instantiate Reduce object # Required for pypeline specific object # At instantiaton, the fullmask in self.sciImg is modified redux = reduce.Reduce.get_instance(sciImg, spectrograph, parset, caliBrate, 'science', ir_redux=True, show=args.show, det=det, std_outfile=std_outfile) manual_extract_dict = None skymodel, objmodel, ivarmodel, outmask, sobjs, waveImg, tilts = redux.run( std_trace=std_trace, return_negative=True, manual_extract_dict=manual_extract_dict, show_peaks=args.show) # TODO -- Do this upstream # Tack on detector for sobj in sobjs: sobj.DETECTOR = sciImg.detector # Construct the Spec2DObj with the positive image spec2DObj_A = spec2dobj.Spec2DObj(det=det, sciimg=sciImg.image, ivarraw=sciImg.ivar, skymodel=skymodel, objmodel=objmodel, ivarmodel=ivarmodel, waveimg=waveImg, bpmmask=outmask, detector=sciImg.detector, sci_spat_flexure=sciImg.spat_flexure, tilts=tilts, slits=copy.deepcopy(caliBrate.slits)) spec2DObj_A.process_steps = sciImg.process_steps all_spec2d = spec2dobj.AllSpec2DObj() all_spec2d['meta']['ir_redux'] = True all_spec2d[det] = spec2DObj_A # Save image A but with all the objects extracted, i.e. positive and negative #outfile2d, outfile1d = save_exposure(fitstbl, 0, spectrograph, science_path, parset, caliBrate, all_spec2d, sobjs) # Construct the Spec2DObj with the negative image spec2DObj_B = spec2dobj.Spec2DObj(det=det, sciimg=-sciImg.image, ivarraw=sciImg.ivar, skymodel=-skymodel, objmodel=-objmodel, ivarmodel=ivarmodel, waveimg=waveImg, bpmmask=outmask, detector=sciImg.detector, sci_spat_flexure=sciImg.spat_flexure, tilts=tilts, slits=copy.deepcopy(caliBrate.slits)) # Parse the offset information out of the headers. TODO in the future get this out of fitstable dither_pattern_A, dither_id_A, offset_arcsec_A = parse_dither_pattern( A_files, spectrograph.primary_hdrext) dither_pattern_B, dither_id_B, offset_arcsec_B = parse_dither_pattern( B_files, spectrograph.primary_hdrext) # Print out a report on the offsets msg_string = msgs.newline( ) + '****************************************************' msg_string += msgs.newline( ) + ' Summary of offsets for dither pattern: {:s}'.format( dither_pattern_A[0]) msg_string += msgs.newline( ) + '****************************************************' msg_string += msgs.newline( ) + 'Position filename arcsec pixels ' msg_string += msgs.newline( ) + '----------------------------------------------------' for iexp, file in enumerate(A_files): msg_string += msgs.newline( ) + ' A {:s} {:6.2f} {:6.2f}'.format( os.path.basename(file), offset_arcsec_A[iexp], offset_arcsec_A[iexp] / sciImg.detector.platescale) for iexp, file in enumerate(B_files): msg_string += msgs.newline( ) + ' B {:s} {:6.2f} {:6.2f}'.format( os.path.basename(file), offset_arcsec_B[iexp], offset_arcsec_B[iexp] / sciImg.detector.platescale) msg_string += msgs.newline( ) + '****************************************************' msgs.info(msg_string) #offset_dith_pix = offset_dith_pix = offset_arcsec_A[0]/sciImg.detector.platescale offsets_dith_pix = (np.array([ 0.0, np.mean(offset_arcsec_B) - np.mean(offset_arcsec_A) ])) / sciImg.detector.platescale if args.offset is not None: offsets_pixels = np.array([0.0, args.offset]) msgs.info('Using user specified offsets instead: {:5.2f}'.format( args.offset)) else: offsets_pixels = offsets_dith_pix spec2d_list = [spec2DObj_A, spec2DObj_B] # Instantiate Coadd2d coadd = coadd2d.CoAdd2D.get_instance(spec2d_list, spectrograph, parset, det=det, offsets=offsets_pixels, weights='uniform', ir_redux=True, debug=args.show, samp_fact=args.samp_fact) # Coadd the slits coadd_dict_list = coadd.coadd( only_slits=None, interp_dspat=False) # TODO implement only_slits later # Create the pseudo images pseudo_dict = coadd.create_pseudo_image(coadd_dict_list) ########################## # Now display the images # ########################## display.display.connect_to_ginga(raise_err=True, allow_new=True) # Bug in ginga prevents me from using cuts here for some reason #mean, med, sigma = sigma_clipped_stats(pseudo_dict['imgminsky'][pseudo_dict['inmask']], sigma_lower=5.0,sigma_upper=5.0) #cut_min = mean - 4.0 * sigma #cut_max = mean + 4.0 * sigma chname_skysub = 'skysub-det{:s}'.format(sdet) # Clear all channels at the beginning # TODO: JFH For some reason Ginga crashes when I try to put cuts in here. viewer, ch = ginga.show_image(pseudo_dict['imgminsky'], chname=chname_skysub, waveimg=pseudo_dict['waveimg'], clear=True) # cuts=(cut_min, cut_max), slit_left, slit_righ, _ = pseudo_dict['slits'].select_edges() slit_id = slits.slitord_id[0] ginga.show_slits(viewer, ch, slit_left, slit_righ, slit_ids=slit_id) # SKRESIDS chname_skyresids = 'sky_resid-det{:s}'.format(sdet) image = pseudo_dict['imgminsky'] * np.sqrt( pseudo_dict['sciivar']) * pseudo_dict['inmask'] # sky residual map viewer, ch = ginga.show_image( image, chname_skyresids, waveimg=pseudo_dict['waveimg'], cuts=(-5.0, 5.0), ) ginga.show_slits(viewer, ch, slit_left, slit_righ, slit_ids=slits.slitord_id[0]) shell = viewer.shell() out = shell.start_global_plugin('WCSMatch') out = shell.call_global_plugin_method('WCSMatch', 'set_reference_channel', [chname_skyresids], {}) if args.embed: embed() return 0
def extract_coadd2d(stack_dict, master_dir, samp_fact = 1.0,ir_redux=False, par=None, std=False, show=False, show_peaks=False): """ Main routine to run the extraction for 2d coadds. Algorithm steps are as follows: - Fill this in. This performs 2d coadd specific tasks, and then also performs some of the tasks analogous to the pypeit.extract_one method. Docs coming soon.... Args: stack_dict: master_dir: samp_fact: float sampling factor to make the wavelength grid finer or coarser. samp_fact > 1.0 oversamples (finer), samp_fact < 1.0 undersamples (coarser) ir_redux: par: show: show_peaks: Returns: """ # Find the objid of the brighest object, and the average snr across all orders nslits = stack_dict['tslits_dict']['slit_left'].shape[1] objid, snr_bar = get_brightest_obj(stack_dict['specobjs_list'], echelle=True) # TODO Print out a report here on the image stack, i.e. S/N of each image spectrograph = util.load_spectrograph(stack_dict['spectrograph']) par = spectrograph.default_pypeit_par() if par is None else par binning = np.array([stack_dict['tslits_dict']['binspectral'],stack_dict['tslits_dict']['binspatial']]) # Grab the wavelength grid that we will rectify onto wave_grid = spectrograph.wavegrid(binning=binning,samp_fact=samp_fact) wave_grid_mid = spectrograph.wavegrid(midpoint=True,binning=binning,samp_fact=samp_fact) coadd_list = [] nspec_vec = np.zeros(nslits,dtype=int) nspat_vec = np.zeros(nslits,dtype=int) # TODO: Generalize this to be a loop over detectors, such tha the # coadd_list is an ordered dict (perhaps) with all the slits on all # detectors for islit in range(nslits): msgs.info('Performing 2d coadd for slit: {:d}/{:d}'.format(islit,nslits-1)) # Determine the wavelength dependent optimal weights and grab the reference trace rms_sn, weights, trace_stack, wave_stack = optimal_weights(stack_dict['specobjs_list'], islit, objid) thismask_stack = stack_dict['slitmask_stack'] == islit # Perform the 2d coadd coadd_dict = coadd2d(trace_stack, stack_dict['sciimg_stack'], stack_dict['sciivar_stack'], stack_dict['skymodel_stack'], stack_dict['mask_stack'] == 0, stack_dict['tilts_stack'], stack_dict['waveimg_stack'], thismask_stack, weights=weights, wave_grid=wave_grid) coadd_list.append(coadd_dict) nspec_vec[islit]=coadd_dict['nspec'] nspat_vec[islit]=coadd_dict['nspat'] # Determine the size of the psuedo image nspat_pad = 10 nspec_psuedo = nspec_vec.max() nspat_psuedo = np.sum(nspat_vec) + (nslits + 1)*nspat_pad spec_vec_psuedo = np.arange(nspec_psuedo) shape_psuedo = (nspec_psuedo, nspat_psuedo) imgminsky_psuedo = np.zeros(shape_psuedo) sciivar_psuedo = np.zeros(shape_psuedo) waveimg_psuedo = np.zeros(shape_psuedo) tilts_psuedo = np.zeros(shape_psuedo) spat_psuedo = np.zeros(shape_psuedo) nused_psuedo = np.zeros(shape_psuedo, dtype=int) inmask_psuedo = np.zeros(shape_psuedo, dtype=bool) wave_mid = np.zeros((nspec_psuedo, nslits)) wave_mask = np.zeros((nspec_psuedo, nslits),dtype=bool) wave_min = np.zeros((nspec_psuedo, nslits)) wave_max = np.zeros((nspec_psuedo, nslits)) dspat_mid = np.zeros((nspat_psuedo, nslits)) spat_left = nspat_pad slit_left = np.zeros((nspec_psuedo, nslits)) slit_righ = np.zeros((nspec_psuedo, nslits)) spec_min1 = np.zeros(nslits) spec_max1 = np.zeros(nslits) for islit, coadd_dict in enumerate(coadd_list): spat_righ = spat_left + nspat_vec[islit] ispec = slice(0,nspec_vec[islit]) ispat = slice(spat_left,spat_righ) imgminsky_psuedo[ispec, ispat] = coadd_dict['imgminsky'] sciivar_psuedo[ispec, ispat] = coadd_dict['sciivar'] waveimg_psuedo[ispec, ispat] = coadd_dict['waveimg'] tilts_psuedo[ispec, ispat] = coadd_dict['tilts'] # spat_psuedo is the sub-pixel image position on the rebinned psuedo image inmask_psuedo[ispec, ispat] = coadd_dict['outmask'] image_temp = (coadd_dict['dspat'] - coadd_dict['dspat_mid'][0] + spat_left)*coadd_dict['outmask'] spat_psuedo[ispec, ispat] = image_temp nused_psuedo[ispec, ispat] = coadd_dict['nused'] wave_min[ispec, islit] = coadd_dict['wave_min'] wave_max[ispec, islit] = coadd_dict['wave_max'] wave_mid[ispec, islit] = coadd_dict['wave_mid'] wave_mask[ispec, islit] = True # Fill in the rest of the wave_mid with the corresponding points in the wave_grid wave_this = wave_mid[wave_mask[:,islit], islit] ind_upper = np.argmin(np.abs(wave_grid_mid - np.max(wave_this.max()))) + 1 if nspec_vec[islit] != nspec_psuedo: wave_mid[nspec_vec[islit]:, islit] = wave_grid_mid[ind_upper:ind_upper + (nspec_psuedo-nspec_vec[islit])] dspat_mid[ispat, islit] = coadd_dict['dspat_mid'] slit_left[:,islit] = np.full(nspec_psuedo, spat_left) slit_righ[:,islit] = np.full(nspec_psuedo, spat_righ) spec_max1[islit] = nspec_vec[islit]-1 spat_left = spat_righ + nspat_pad slitcen = (slit_left + slit_righ)/2.0 tslits_dict_psuedo = dict(slit_left=slit_left, slit_righ=slit_righ, slitcen=slitcen, nspec=nspec_psuedo, nspat=nspat_psuedo, pad=0, nslits = nslits, binspectral=1, binspatial=1, spectrograph=spectrograph.spectrograph, spec_min=spec_min1, spec_max=spec_max1) slitmask_psuedo = pixels.tslits2mask(tslits_dict_psuedo) # This is a kludge to deal with cases where bad wavelengths result in large regions where the slit is poorly sampled, # which wreaks havoc on the local sky-subtraction min_slit_frac = 0.70 spec_min = np.zeros(nslits) spec_max = np.zeros(nslits) for islit in range(nslits): slit_width = np.sum(inmask_psuedo*(slitmask_psuedo == islit),axis=1) slit_width_img = np.outer(slit_width, np.ones(nspat_psuedo)) med_slit_width = np.median(slit_width_img[slitmask_psuedo==islit]) nspec_eff = np.sum(slit_width > min_slit_frac*med_slit_width) nsmooth = int(np.fmax(np.ceil(nspec_eff*0.02),10)) slit_width_sm = scipy.ndimage.filters.median_filter(slit_width, size=nsmooth, mode='reflect') igood = (slit_width_sm > min_slit_frac*med_slit_width) spec_min[islit] = spec_vec_psuedo[igood].min() spec_max[islit] = spec_vec_psuedo[igood].max() bad_pix = (slit_width_img < min_slit_frac*med_slit_width) & (slitmask_psuedo == islit) inmask_psuedo[bad_pix] = False # Update with tslits_dict_psuedo tslits_dict_psuedo['spec_min'] = spec_min tslits_dict_psuedo['spec_max'] = spec_max slitmask_psuedo = pixels.tslits2mask(tslits_dict_psuedo) # Make a fake bitmask from the outmask. We are kludging the crmask to be the outmask_psuedo here, and setting the bpm to # be good everywhere mask = processimages.ProcessImages.build_mask(imgminsky_psuedo, sciivar_psuedo, np.invert(inmask_psuedo), np.zeros_like(inmask_psuedo), slitmask=slitmask_psuedo) redux = reduce.instantiate_me(spectrograph, tslits_dict_psuedo, mask, ir_redux=ir_redux, par=par, objtype = 'science', binning=binning) if show: redux.show('image', image=imgminsky_psuedo*(mask == 0), chname = 'imgminsky', slits=True, clear=True) # Object finding sobjs_obj, nobj, skymask_init = redux.find_objects(imgminsky_psuedo, sciivar_psuedo, ir_redux=ir_redux, std=std, show_peaks=show_peaks, show=show) # Local sky-subtraction global_sky_psuedo = np.zeros_like(imgminsky_psuedo) # No global sky for co-adds since we go straight to local rn2img_psuedo = global_sky_psuedo # No rn2img for co-adds since we go do not model noise skymodel_psuedo, objmodel_psuedo, ivarmodel_psuedo, outmask_psuedo, sobjs = \ redux.local_skysub_extract(imgminsky_psuedo, sciivar_psuedo, tilts_psuedo, waveimg_psuedo, global_sky_psuedo, rn2img_psuedo, sobjs_obj, spat_pix=spat_psuedo, std=std, model_noise=False, show_profile=show, show=show) if ir_redux: sobjs.purge_neg() # Add the information about the fixed wavelength grid to the sobjs for spec in sobjs: spec.boxcar['WAVE_GRID_MASK'] = wave_mask[:,spec.slitid] spec.boxcar['WAVE_GRID'] = wave_mid[:,spec.slitid] spec.boxcar['WAVE_GRID_MIN'] = wave_min[:,spec.slitid] spec.boxcar['WAVE_GRID_MAX'] = wave_max[:,spec.slitid] spec.optimal['WAVE_GRID_MASK'] = wave_mask[:,spec.slitid] spec.optimal['WAVE_GRID'] = wave_mid[:,spec.slitid] spec.optimal['WAVE_GRID_MIN'] = wave_min[:,spec.slitid] spec.optimal['WAVE_GRID_MAX'] = wave_max[:,spec.slitid] # TODO Implement flexure and heliocentric corrections on the single exposure 1d reductions and apply them to the # waveimage. Change the data model to accomodate a wavelength model for each image. # Using the same implementation as in core/pypeit # Write out the psuedo master files to disk master_key_dict = stack_dict['master_key_dict'] # TODO: These saving operations are a temporary kludge waveImage = WaveImage(None, None, None, None, None, master_key=master_key_dict['arc'], master_dir=master_dir) waveImage.save(mswave=waveimg_psuedo) traceSlits = TraceSlits(None, None, master_key=master_key_dict['trace'], master_dir=master_dir) traceSlits.save(tslits_dict=tslits_dict_psuedo) return imgminsky_psuedo, sciivar_psuedo, skymodel_psuedo, objmodel_psuedo, ivarmodel_psuedo, outmask_psuedo, sobjs
def main(args): """ Executes 2d coadding """ msgs.warn('PATH =' + os.getcwd()) # Load the file if args.file is not None: spectrograph_name, config_lines, spec2d_files = io.read_spec2d_file( args.file, filetype="coadd2d") spectrograph = load_spectrograph(spectrograph_name) # Parameters # TODO: Shouldn't this reinstantiate the same parameters used in # the PypeIt run that extracted the objects? Why are we not # just passing the pypeit file? # JFH: The reason is that the coadd2dfile may want different reduction parameters spectrograph_def_par = spectrograph.default_pypeit_par() parset = par.PypeItPar.from_cfg_lines( cfg_lines=spectrograph_def_par.to_config(), merge_with=config_lines) elif args.obj is not None: # TODO: We should probably be reading the pypeit file and using those parameters here rather than using the # default parset. # TODO: This needs to define the science path spec2d_files = glob.glob('./Science/spec2d_*' + args.obj + '*') head0 = fits.getheader(spec2d_files[0]) spectrograph_name = head0['PYP_SPEC'] spectrograph = load_spectrograph(spectrograph_name) parset = spectrograph.default_pypeit_par() else: msgs.error( 'You must either input a coadd2d file with --file or an object name with --obj' ) # Update with configuration specific parameters (which requires science file) and initialize spectrograph spectrograph_cfg_lines = spectrograph.config_specific_par( spec2d_files[0]).to_config() parset = par.PypeItPar.from_cfg_lines(cfg_lines=spectrograph_cfg_lines, merge_with=parset.to_config()) # If detector was passed as an argument override whatever was in the coadd2d_file if args.det is not None: msgs.info("Restricting reductions to detector={}".format(args.det)) parset['rdx']['detnum'] = int(args.det) # Get headers (if possible) and base names spec1d_files = [ files.replace('spec2d', 'spec1d') for files in spec2d_files ] head1d = None for spec1d_file in spec1d_files: if os.path.isfile(spec1d_file): head1d = fits.getheader(spec1d_file) break if head1d is None: msgs.warn("No 1D spectra so am generating a dummy header for output") head1d = io.initialize_header() head2d = fits.getheader(spec2d_files[0]) if args.basename is None: filename = os.path.basename(spec2d_files[0]) basename = filename.split('_')[2] else: basename = args.basename # Write the par to disk par_outfile = basename + '_coadd2d.par' print("Writing the parameters to {}".format(par_outfile)) parset.to_config(par_outfile) # Now run the coadds skysub_mode = head2d['SKYSUB'] ir_redux = True if 'DIFF' in skysub_mode else False # Print status message msgs_string = 'Reducing target {:s}'.format(basename) + msgs.newline() msgs_string += 'Performing coadd of frames reduce with {:s} imaging'.format( skysub_mode) msgs_string += msgs.newline( ) + 'Combining frames in 2d coadd:' + msgs.newline() for file in spec2d_files: msgs_string += '{0:s}'.format(os.path.basename(file)) + msgs.newline() msgs.info(msgs_string) # TODO: This needs to be added to the parameter list for rdx redux_path = os.getcwd() master_dirname = os.path.basename(head2d['PYPMFDIR']) + '_coadd' master_dir = os.path.join(redux_path, master_dirname) # Make the new Master dir if not os.path.isdir(master_dir): msgs.info( 'Creating directory for Master output: {0}'.format(master_dir)) os.makedirs(master_dir) # Instantiate the sci_dict sci_dict = OrderedDict() # This needs to be ordered sci_dict['meta'] = {} sci_dict['meta']['vel_corr'] = 0. sci_dict['meta']['ir_redux'] = ir_redux # Find the detectors to reduce detectors = PypeIt.select_detectors(detnum=parset['rdx']['detnum'], ndet=spectrograph.ndet) if len(detectors) != spectrograph.ndet: msgs.warn('Not reducing detectors: {0}'.format(' '.join([ str(d) for d in set(np.arange(spectrograph.ndet) + 1) - set(detectors) ]))) # Loop on detectors for det in detectors: msgs.info("Working on detector {0}".format(det)) sci_dict[det] = {} # Instantiate Coadd2d coadd = coadd2d.CoAdd2D.get_instance( spec2d_files, spectrograph, parset, det=det, offsets=parset['coadd2d']['offsets'], weights=parset['coadd2d']['weights'], ir_redux=ir_redux, debug_offsets=args.debug_offsets, debug=args.debug, samp_fact=args.samp_fact) # Coadd the slits coadd_dict_list = coadd.coadd( only_slits=None) # TODO implement only_slits later # Create the pseudo images pseudo_dict = coadd.create_pseudo_image(coadd_dict_list) # Reduce msgs.info('Running the extraction') # TODO -- This should mirror what is in pypeit.extract_one # TODO -- JFH :: This ought to return a Spec2DObj and SpecObjs which would be slurped into # AllSpec2DObj and all_specobsj, as below. # TODO -- JFH -- Check that the slits we are using are correct sci_dict[det]['sciimg'], sci_dict[det]['sciivar'], sci_dict[det]['skymodel'], sci_dict[det]['objmodel'], \ sci_dict[det]['ivarmodel'], sci_dict[det]['outmask'], sci_dict[det]['specobjs'], sci_dict[det]['detector'], \ sci_dict[det]['slits'], sci_dict[det]['tilts'], sci_dict[det]['waveimg'] = coadd.reduce( pseudo_dict, show = args.show, show_peaks = args.peaks) # Save pseudo image master files #coadd.save_masters() # Make the new Science dir # TODO: This needs to be defined by the user scipath = os.path.join(redux_path, 'Science_coadd') if not os.path.isdir(scipath): msgs.info('Creating directory for Science output: {0}'.format(scipath)) os.makedirs(scipath) # THE FOLLOWING MIMICS THE CODE IN pypeit.save_exposure() # TODO -- These lines should be above once reduce() passes back something sensible all_specobjs = specobjs.SpecObjs() for det in detectors: all_specobjs.add_sobj(sci_dict[det]['specobjs']) # Write outfile1d = os.path.join(scipath, 'spec1d_{:s}.fits'.format(basename)) subheader = spectrograph.subheader_for_spec(head2d, head2d) all_specobjs.write_to_fits(subheader, outfile1d) # 2D spectra # TODO -- These lines should be above once reduce() passes back something sensible all_spec2d = spec2dobj.AllSpec2DObj() all_spec2d['meta']['ir_redux'] = ir_redux for det in detectors: all_spec2d[det] = spec2dobj.Spec2DObj( det=det, sciimg=sci_dict[det]['sciimg'], ivarraw=sci_dict[det]['sciivar'], skymodel=sci_dict[det]['skymodel'], objmodel=sci_dict[det]['objmodel'], ivarmodel=sci_dict[det]['ivarmodel'], scaleimg=np.array([1.0], dtype=np.float), bpmmask=sci_dict[det]['outmask'], detector=sci_dict[det]['detector'], slits=sci_dict[det]['slits'], waveimg=sci_dict[det]['waveimg'], tilts=sci_dict[det]['tilts'], sci_spat_flexure=None, sci_spec_flexure=None, vel_corr=None, vel_type=None) # Build header outfile2d = os.path.join(scipath, 'spec2d_{:s}.fits'.format(basename)) pri_hdr = all_spec2d.build_primary_hdr( head2d, spectrograph, subheader=subheader, # TODO -- JFH :: Decide if we need any of these redux_path=None, master_key_dict=None, master_dir=None) # Write all_spec2d.write_to_fits(outfile2d, pri_hdr=pri_hdr)
def unpack_object(self, ret_flam=False): """ Utility function to unpack the sobjs for one object and return various numpy arrays describing the spectrum and meta data. The user needs to already have trimmed the Specobjs to the relevant indices for the object. Args: ret_flam (:obj:`bool`, optional): If True return the FLAM, otherwise return COUNTS. Returns: tuple: Returns the following where all numpy arrays returned have shape (nspec, norders) for Echelle data and (nspec,) for Multislit data. - wave (`numpy.ndarray`_): Wavelength grids - flux (`numpy.ndarray`_): Flambda or counts - flux_ivar (`numpy.ndarray`_): Inverse variance (of Flambda or counts) - flux_gpm (`numpy.ndarray`_): Good pixel mask. True=Good - meta_spec (dict:) Dictionary containing meta data. The keys are defined by spectrograph.header_cards_from_spec() - header (astropy.io.header object): header from spec1d file """ # Read in the spec1d file norddet = self.nobj if ret_flam: # TODO Should nspec be an attribute of specobj? nspec = self[0].OPT_FLAM.size else: nspec = self[0].OPT_COUNTS.size # Allocate arrays and unpack spectrum wave = np.zeros((nspec, norddet)) flux = np.zeros((nspec, norddet)) flux_ivar = np.zeros((nspec, norddet)) flux_gpm = np.zeros((nspec, norddet), dtype=bool) detector = np.zeros(norddet, dtype=int) ech_orders = np.zeros(norddet, dtype=int) # TODO make the extraction that is desired OPT vs BOX and optional input variable. for iorddet in range(norddet): wave[:, iorddet] = self[iorddet].OPT_WAVE flux_gpm[:, iorddet] = self[iorddet].OPT_MASK detector[iorddet] = self[iorddet].DET if self[0].PYPELINE == 'Echelle': ech_orders[iorddet] = self[iorddet].ECH_ORDER if ret_flam: flux[:, iorddet] = self[iorddet].OPT_FLAM flux_ivar[:, iorddet] = self[iorddet].OPT_FLAM_IVAR else: flux[:, iorddet] = self[iorddet].OPT_COUNTS flux_ivar[:, iorddet] = self[iorddet].OPT_COUNTS_IVAR # Populate meta data # TODO Remove this hack is it needed? If PYP_SPEC is always written then it is not. try: spectrograph = load_spectrograph(self.header['PYP_SPEC']) except: # TODO JFH This is a hack until a generic spectrograph is implemented. spectrograph = load_spectrograph('shane_kast_blue') meta_spec = {} core_keys = spectrograph.header_cards_for_spec() for key in core_keys: try: meta_spec[key.upper()] = self.header[key.upper()] except KeyError: msgs.warn( 'Core meta data is missing from the specobjs header ') pass # Add the pyp spec. # TODO JFH: Make this an atribute of the specobj by default. meta_spec['PYP_SPEC'] = self.header['PYP_SPEC'] meta_spec['PYPELINE'] = self[0].PYPELINE meta_spec['DET'] = detector if self[0].PYPELINE == 'MultiSlit' and self.nobj == 1: meta_spec['ECH_ORDERS'] = None return wave.reshape(nspec), flux.reshape(nspec), flux_ivar.reshape(nspec), \ flux_gpm.reshape(nspec), meta_spec, self.header else: meta_spec['ECH_ORDERS'] = ech_orders return wave, flux, flux_ivar, flux_gpm, meta_spec, self.header
type = 'ESI' devpath = os.getenv('PYPEIT_DEV') if type == 'LRIS_red': det = 1 sdet = parse.get_dnum(det, prefix=False) rawpath = devpath + '/RAW_DATA/Keck_LRIS_red/multi_400_8500_d560/' masterpath = devpath + '/REDUX_OUT/Keck_LRIS_red/multi_400_8500_d560/MF_keck_lris_red/' # Read in the msbias for bias subtraction biasfile = masterpath + 'MasterBias_A_' + sdet + '_aa.fits' msbias = fits.getdata(biasfile) # Read in and process flat field images pixflat_image_files = np.core.defchararray.add(rawpath, ['r170320_2057.fits','r170320_2058.fits','r170320_2059.fits']).tolist() spectro_name = 'keck_lris_red' spectrograph = load_spectrograph(spectrograph=spectro_name) par = spectrograph.default_pypeit_par() flatField = flatfield.FlatField(spectrograph, file_list=pixflat_image_files,det=det, par=par['calibrations']['pixelflatframe'] , msbias = msbias) flatimg = flatField.build_pixflat() # Read in the tilts tiltsfile = masterpath + 'MasterTilts_A_' + sdet + '_aa.fits' mstilts = fits.getdata(tiltsfile) # Read in the tslits_dict traceslitsroot = masterpath + 'MasterTrace_A_' + sdet + '_aa' Tslits = traceslits.TraceSlits.from_master_files(traceslitsroot) tslits_dict = {} tslits_dict['lcen']=Tslits.lcen tslits_dict['rcen']=Tslits.rcen tslits_dict['slitpix'] = pixels.slit_pixels(tslits_dict['lcen'],tslits_dict['rcen'], flatimg.shape, Tslits.par['pad']) elif type == 'ESI':