def test_compute_fiberflat(self): """ Tests desi_compute_fiberflat --infile frame.fits --outfile fiberflat.fits """ self._write_frame(flavor='flat') self._write_fibermap() # QA fig requires fibermapfile cmd = '{} {}/desi_compute_fiberflat --infile {} --outfile {} --qafile {} --qafig {}'.format( sys.executable, self.binDir, self.framefile, self.fiberflatfile, self.qa_calib_file, self.qafig) outputs = [self.fiberflatfile,self.qa_calib_file,self.qafig] inputs = [self.framefile,] err = runcmd(cmd, inputs=inputs, outputs=outputs, clobber=True) self.assertEqual(err, 0) #- Confirm that the output file can be read as a fiberflat ff1 = io.read_fiberflat(self.fiberflatfile) #- Remove outputs and call again via function instead of system call self._remove_files(outputs) args = desispec.scripts.fiberflat.parse(cmd.split()[2:]) err = runcmd(desispec.scripts.fiberflat.main, args=[args,], inputs=inputs, outputs=outputs, clobber=True) #- Confirm that the output file can be read as a fiberflat ff2 = io.read_fiberflat(self.fiberflatfile) self.assertTrue(np.all(ff1.fiberflat == ff2.fiberflat)) self.assertTrue(np.all(ff1.ivar == ff2.ivar)) self.assertTrue(np.all(ff1.mask == ff2.mask)) self.assertTrue(np.all(ff1.meanspec == ff2.meanspec)) self.assertTrue(np.all(ff1.wave == ff2.wave)) self.assertTrue(np.all(ff1.fibers == ff2.fibers))
def main(args): log = get_logger() log.info("starting at {}".format(time.asctime())) inputs = [] for filename in args.infile: inflat = read_fiberflat(filename) if args.program is not None: if args.program != inflat.header["PROGRAM"]: log.info("skip {}".format(filename)) continue inputs.append(read_fiberflat(filename)) fiberflat = average_fiberflat(inputs) write_fiberflat(args.outfile, fiberflat) log.info("successfully wrote %s" % args.outfile)
def main(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--infile', type=str, default=None, required=True, help='path of DESI exposure frame fits file') parser.add_argument('--fibermap', type=str, default=None, required=True, help='path of DESI exposure frame fits file') parser.add_argument('--fiberflat', type=str, default=None, required=True, help='path of DESI fiberflat fits file') parser.add_argument('--outfile', type=str, default=None, required=True, help='path of DESI sky fits file') args = parser.parse_args() log = get_logger() log.info("starting") # read exposure to load data and get range of spectra frame = read_frame(args.infile) specmin = frame.header["SPECMIN"] specmax = frame.header["SPECMAX"] # read fibermap to locate sky fibers fibermap = read_fibermap(args.fibermap) selection = np.where((fibermap["OBJTYPE"] == "SKY") & (fibermap["FIBER"] >= specmin) & (fibermap["FIBER"] <= specmax))[0] if selection.size == 0: log.error("no sky fiber in fibermap %s" % args.fibermap) sys.exit(12) # read fiberflat fiberflat = read_fiberflat(args.fiberflat) # apply fiberflat to sky fibers apply_fiberflat(frame, fiberflat) # compute sky model skymodel = compute_sky(frame, fibermap) # write result write_sky(args.outfile, skymodel, frame.header) log.info("successfully wrote %s" % args.outfile)
def main(args) : log=get_logger() log.info("read frame") # read frame frame = read_frame(args.infile) log.info("apply fiberflat") # read fiberflat fiberflat = read_fiberflat(args.fiberflat) # apply fiberflat apply_fiberflat(frame, fiberflat) log.info("subtract sky") # read sky skymodel=read_sky(args.sky) # subtract sky subtract_sky(frame, skymodel) log.info("compute flux calibration") # read models model_flux,model_wave,model_fibers=read_stdstar_models(args.models) # check that the model_fibers are actually standard stars fibermap = frame.fibermap model_fibers = model_fibers%500 if np.any(fibermap['OBJTYPE'][model_fibers] != 'STD'): for i in model_fibers: log.error("inconsistency with spectrum %d, OBJTYPE='%s' in fibermap"%(i,fibermap["OBJTYPE"][i])) sys.exit(12) fluxcalib = compute_flux_calibration(frame, model_wave, model_flux) # QA if (args.qafile is not None): log.info("performing fluxcalib QA") # Load qaframe = load_qa_frame(args.qafile, frame, flavor=frame.meta['FLAVOR']) # Run #import pdb; pdb.set_trace() qaframe.run_qa('FLUXCALIB', (frame, fluxcalib)) # Write if args.qafile is not None: write_qa_frame(args.qafile, qaframe) log.info("successfully wrote {:s}".format(args.qafile)) # Figure(s) if args.qafig is not None: qa_plots.frame_fluxcalib(args.qafig, qaframe, frame, fluxcalib) # write result write_flux_calibration(args.outfile, fluxcalib, header=frame.meta) log.info("successfully wrote %s"%args.outfile)
def main(args): log = get_logger() log.info("starting at {}".format(time.asctime())) inputs = [] for filename in args.infile: inputs.append(read_fiberflat(filename)) fiberflat = average_fiberflat(inputs) write_fiberflat(args.outfile, fiberflat) log.info("successfully wrote %s" % args.outfile)
def main(args) : log=get_logger() log.info("starting at {}".format(time.asctime())) inputs=[] for filename in args.infile : inputs.append(read_fiberflat(filename)) fiberflat = average_fiberflat(inputs) write_fiberflat(args.outfile,fiberflat) log.info("successfully wrote %s"%args.outfile)
def main() : parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--infile', type = str, default = None, required=True, help = 'path of DESI exposure frame fits file') parser.add_argument('--fiberflat', type = str, default = None, help = 'path of DESI fiberflat fits file') parser.add_argument('--sky', type = str, default = None, help = 'path of DESI sky fits file') parser.add_argument('--calib', type = str, default = None, help = 'path of DESI calibration fits file') parser.add_argument('--outfile', type = str, default = None, required=True, help = 'path of DESI sky fits file') # add calibration here when exists args = parser.parse_args() log = get_logger() if (args.fiberflat is None) and (args.sky is None) and (args.calib is None): log.critical('no --fiberflat, --sky, or --calib; nothing to do ?!?') sys.exit(12) frame = read_frame(args.infile) if args.fiberflat!=None : log.info("apply fiberflat") # read fiberflat fiberflat = read_fiberflat(args.fiberflat) # apply fiberflat to sky fibers apply_fiberflat(frame, fiberflat) if args.sky!=None : log.info("subtract sky") # read sky skymodel=read_sky(args.sky) # subtract sky subtract_sky(frame, skymodel) if args.calib!=None : log.info("calibrate") # read calibration fluxcalib=read_flux_calibration(args.calib) # apply calibration apply_flux_calibration(frame, fluxcalib) # save output write_frame(args.outfile, frame) log.info("successfully wrote %s"%args.outfile)
def main(args): log = get_logger() frame=read_frame(args.infile, skip_resolution=True) fibermap=read_fibermap(args.infile) fiberflat=read_fiberflat(args.fiberflat) skymodel=read_sky(args.sky) fluxcalib=read_flux_calibration(args.calib) cam=args.infile.split('/')[-1].split('-')[1] band=cam[0] bands=[band] # Indices of sky fibers. sky_indx = np.where(fibermap['OBJTYPE'] == 'SKY')[0] rd_var, sky_var = calc_var(bands, args.nea, args.psf, frame, fluxcalib, fiberflat, skymodel, components=True) var = calc_var(bands, args.nea, args.psf, frame, fluxcalib, fiberflat, skymodel, components=False) nsky = 4 fig, axes = plt.subplots(1, nsky, figsize=(5 * nsky, 5)) for i in range(nsky): def calc_alphavar(alpha): return alpha * rd_var[sky_indx,:] + sky_var[sky_indx,:] def alpha_fit(alpha): _var = calc_alphavar(alpha) ivar = 1. / _var X2 = (frame.ivar[sky_indx,:] - ivar)**2. return np.sum(X2) res = minimize(alpha_fit, x0=[1.]) alpha = res.x[0] indx = sky_indx[i] axes[i].plot(skymodel.wave, median_filter(frame.ivar[indx,:], 10), lw=0.4, label='Sky frame IVAR', alpha=0.4) axes[i].plot(skymodel.wave, 1./rd_var[indx,:], lw=0.4, label='Model rd. IVAR', alpha=0.4) # axes[i].plot(skymodel.wave, 1./sky_var[indx,:], lw=0.4, label='Model Sky IVAR', alpha=0.4) # axes[i].plot(skymodel.wave, 1./var[indx,:], lw=0.4, label=r'Model IVAR', alpha=0.4) axes[i].plot(skymodel.wave, median_filter(1./calc_alphavar(alpha)[i,:], 10), lw=0.4, label=r'$\alpha$ Model IVAR', alpha=0.4) axes[i].set_title(r'Fiber {:d} ($\alpha$ = {:.6f})'.format(indx, alpha)) axes[i].set_xlabel(r'Wavelength [$AA$]') axes[i].set_yscale('log') axes[i].set_ylim(bottom=5.e-4, top=3.e-2) axes[i].legend(frameon=False, loc=2) axes[0].set_ylabel('e/A') pl.show()
def test_compute_fiberflat(self): """ Tests desi_compute_fiberflat.py --infile frame.fits --outfile fiberflat.fits """ self._write_frame() #- run the command and confirm error code = 0 cmd = '{} {}/desi_compute_fiberflat.py --infile {} --outfile {}'.format( sys.executable, self.binDir, self.framefile, self.fiberflatfile) # self.assertTrue(os.path.exists(os.path.join(self.binDir,'desi_compute_fiberflat.py'))) err = runcmd(cmd, [self.framefile,], [self.fiberflatfile,], clobber=True) self.assertEqual(err, 0) #- Confirm that the output file can be read as a fiberflat ff = io.read_fiberflat(self.fiberflatfile)
def run(self, indir): '''TODO: document''' log = desiutil.log.get_logger() results = list() infiles = glob.glob(os.path.join(indir, 'qframe-*.fits')) if len(infiles) == 0: log.error("no qframe in {}".format(indir)) return None for filename in infiles: qframe = read_qframe(filename) night = int(qframe.meta['NIGHT']) expid = int(qframe.meta['EXPID']) cam = qframe.meta['CAMERA'][0].upper() spectro = int(qframe.meta['CAMERA'][1]) try: cfinder = CalibFinder([qframe.meta]) except: log.error( "failed to find calib for qframe {}".format(filename)) continue if not cfinder.haskey("FIBERFLAT"): log.warning( "no known fiberflat for qframe {}".format(filename)) continue fflat = read_fiberflat(cfinder.findfile("FIBERFLAT")) tmp = np.median(fflat.fiberflat, axis=1) reference_fflat = tmp / np.median(tmp) tmp = np.median(qframe.flux, axis=1) this_fflat = tmp / np.median(tmp) for f, fiber in enumerate(qframe.fibermap["FIBER"]): results.append( collections.OrderedDict(NIGHT=night, EXPID=expid, SPECTRO=spectro, CAM=cam, FIBER=fiber, FIBERFLAT=this_fflat[f], REF_FIBERFLAT=reference_fflat[f])) if len(results) == 0: return None return Table(results, names=results[0].keys())
def main(args) : log=get_logger() log.info("starting") # read exposure to load data and get range of spectra frame = read_frame(args.infile) specmin, specmax = np.min(frame.fibers), np.max(frame.fibers) if args.cosmics_nsig>0 : # Reject cosmics reject_cosmic_rays_1d(frame,args.cosmics_nsig) # read fiberflat fiberflat = read_fiberflat(args.fiberflat) # apply fiberflat to sky fibers apply_fiberflat(frame, fiberflat) # compute sky model skymodel = compute_sky(frame,add_variance=(not args.no_extra_variance),\ angular_variation_deg=args.angular_variation_deg,\ chromatic_variation_deg=args.chromatic_variation_deg,\ adjust_wavelength=args.adjust_wavelength,\ adjust_lsf=args.adjust_lsf) # QA if (args.qafile is not None) or (args.qafig is not None): log.info("performing skysub QA") # Load qaframe = load_qa_frame(args.qafile, frame_meta=frame.meta, flavor=frame.meta['FLAVOR']) # Run qaframe.run_qa('SKYSUB', (frame, skymodel)) # Write if args.qafile is not None: write_qa_frame(args.qafile, qaframe) log.info("successfully wrote {:s}".format(args.qafile)) # Figure(s) if args.qafig is not None: qa_plots.frame_skyres(args.qafig, frame, skymodel, qaframe) # record inputs frame.meta['IN_FRAME'] = shorten_filename(args.infile) frame.meta['FIBERFLT'] = shorten_filename(args.fiberflat) # write result write_sky(args.outfile, skymodel, frame.meta) log.info("successfully wrote %s"%args.outfile)
def main() : parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--infile', type = str, default = None, required=True, help = 'path of DESI exposure frame fits file') parser.add_argument('--fibermap', type = str, default = None, required=True, help = 'path of DESI exposure frame fits file') parser.add_argument('--fiberflat', type = str, default = None, required=True, help = 'path of DESI fiberflat fits file') parser.add_argument('--outfile', type = str, default = None, required=True, help = 'path of DESI sky fits file') args = parser.parse_args() log=get_logger() log.info("starting") # read exposure to load data and get range of spectra frame = read_frame(args.infile) specmin=frame.header["SPECMIN"] specmax=frame.header["SPECMAX"] # read fibermap to locate sky fibers fibermap = read_fibermap(args.fibermap) selection=np.where((fibermap["OBJTYPE"]=="SKY")&(fibermap["FIBER"]>=specmin)&(fibermap["FIBER"]<=specmax))[0] if selection.size == 0 : log.error("no sky fiber in fibermap %s"%args.fibermap) sys.exit(12) # read fiberflat fiberflat = read_fiberflat(args.fiberflat) # apply fiberflat to sky fibers apply_fiberflat(frame, fiberflat) # compute sky model skymodel = compute_sky(frame, fibermap) # write result write_sky(args.outfile, skymodel, frame.header) log.info("successfully wrote %s"%args.outfile)
def main(args): log = get_logger() log.info("starting") # read exposure to load data and get range of spectra frame = read_frame(args.infile) specmin, specmax = np.min(frame.fibers), np.max(frame.fibers) # read fiberflat fiberflat = read_fiberflat(args.fiberflat) # apply fiberflat to sky fibers apply_fiberflat(frame, fiberflat) # compute sky model skymodel = compute_sky(frame) # QA if (args.qafile is not None) or (args.qafig is not None): log.info("performing skysub QA") # Load qaframe = load_qa_frame(args.qafile, frame, flavor=frame.meta['FLAVOR']) # Run qaframe.run_qa('SKYSUB', (frame, skymodel)) # Write if args.qafile is not None: write_qa_frame(args.qafile, qaframe) log.info("successfully wrote {:s}".format(args.qafile)) # Figure(s) if args.qafig is not None: qa_plots.frame_skyres(args.qafig, frame, skymodel, qaframe) # write result write_sky(args.outfile, skymodel, frame.meta) log.info("successfully wrote %s" % args.outfile)
def main(args): log = get_logger() if (args.fiberflat is None) and (args.sky is None) and (args.calib is None): log.critical('no --fiberflat, --sky, or --calib; nothing to do ?!?') sys.exit(12) frame = read_frame(args.infile) if args.fiberflat!=None : log.info("apply fiberflat") # read fiberflat fiberflat = read_fiberflat(args.fiberflat) # apply fiberflat to sky fibers apply_fiberflat(frame, fiberflat) if args.sky!=None : log.info("subtract sky") # read sky skymodel=read_sky(args.sky) # subtract sky subtract_sky(frame, skymodel) if args.calib!=None : log.info("calibrate") # read calibration fluxcalib=read_flux_calibration(args.calib) # apply calibration apply_flux_calibration(frame, fluxcalib) # save output write_frame(args.outfile, frame, units='1e-17 erg/(s cm2 A)') log.info("successfully wrote %s"%args.outfile)
def main(args) : log=get_logger() log.info("starting") # read exposure to load data and get range of spectra frame = read_frame(args.infile) specmin, specmax = np.min(frame.fibers), np.max(frame.fibers) # read fiberflat fiberflat = read_fiberflat(args.fiberflat) # apply fiberflat to sky fibers apply_fiberflat(frame, fiberflat) # compute sky model skymodel = compute_sky(frame) # QA if (args.qafile is not None) or (args.qafig is not None): log.info("performing skysub QA") # Load qaframe = load_qa_frame(args.qafile, frame, flavor=frame.meta['FLAVOR']) # Run qaframe.run_qa('SKYSUB', (frame, skymodel)) # Write if args.qafile is not None: write_qa_frame(args.qafile, qaframe) log.info("successfully wrote {:s}".format(args.qafile)) # Figure(s) if args.qafig is not None: qa_plots.frame_skyres(args.qafig, frame, skymodel, qaframe) # write result write_sky(args.outfile, skymodel, frame.meta) log.info("successfully wrote %s"%args.outfile)
def main() : parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--infile', type = str, default = None, required=True, help = 'path of DESI exposure frame fits file') parser.add_argument('--fibermap', type = str, default = None, required=True, help = 'path of DESI exposure frame fits file') parser.add_argument('--fiberflat', type = str, default = None, required=True, help = 'path of DESI fiberflat fits file') parser.add_argument('--sky', type = str, default = None, required=True, help = 'path of DESI sky fits file') parser.add_argument('--models', type = str, default = None, required=True, help = 'path of spetro-photometric stellar spectra fits file') parser.add_argument('--outfile', type = str, default = None, required=True, help = 'path of DESI flux calbration fits file') args = parser.parse_args() log=get_logger() log.info("read frame") # read frame frame = read_frame(args.infile) log.info("apply fiberflat") # read fiberflat fiberflat = read_fiberflat(args.fiberflat) # apply fiberflat apply_fiberflat(frame, fiberflat) log.info("subtract sky") # read sky skymodel=read_sky(args.sky) # subtract sky subtract_sky(frame, skymodel) log.info("compute flux calibration") # read models model_flux,model_wave,model_fibers=read_stdstar_models(args.models) # select fibers SPECMIN=frame.header["SPECMIN"] SPECMAX=frame.header["SPECMAX"] selec=np.where((model_fibers>=SPECMIN)&(model_fibers<=SPECMAX))[0] if selec.size == 0 : log.error("no stellar models for this spectro") sys.exit(12) fibers=model_fibers[selec]-frame.header["SPECMIN"] log.info("star fibers= %s"%str(fibers)) table = read_fibermap(args.fibermap) bad=np.where(table["OBJTYPE"][fibers]!="STD")[0] if bad.size > 0 : for fiber in fibers[bad] : log.error("inconsistency with fiber %d, OBJTYPE='%s' in fibermap"%(fiber,table["OBJTYPE"][fiber])) sys.exit(12) fluxcalib = compute_flux_calibration(frame, fibers, model_wave, model_flux) # write result write_flux_calibration(args.outfile, fluxcalib, header=frame.header) log.info("successfully wrote %s"%args.outfile)
def main(args=None): if args is None: args = parse() elif isinstance(args, (list, tuple)): args = parse(args) t0 = time.time() log = get_logger() # guess if it is a preprocessed or a raw image hdulist = fits.open(args.image) is_input_preprocessed = ("IMAGE" in hdulist) & ("IVAR" in hdulist) primary_header = hdulist[0].header hdulist.close() if is_input_preprocessed: image = read_image(args.image) else: if args.camera is None: print( "ERROR: Need to specify camera to open a raw fits image (with all cameras in different fits HDUs)" ) print( "Try adding the option '--camera xx', with xx in {brz}{0-9}, like r7, or type 'desi_qproc --help' for more options" ) sys.exit(12) image = read_raw(args.image, args.camera, fill_header=[ 1, ]) if args.auto: log.debug("AUTOMATIC MODE") try: night = image.meta['NIGHT'] if not 'EXPID' in image.meta: if 'EXPNUM' in image.meta: log.warning('using EXPNUM {} for EXPID'.format( image.meta['EXPNUM'])) image.meta['EXPID'] = image.meta['EXPNUM'] expid = image.meta['EXPID'] except KeyError as e: log.error( "Need at least NIGHT and EXPID (or EXPNUM) to run in auto mode. Retry without the --auto option." ) log.error(str(e)) sys.exit(12) indir = os.path.dirname(args.image) if args.fibermap is None: filename = '{}/fibermap-{:08d}.fits'.format(indir, expid) if os.path.isfile(filename): log.debug("auto-mode: found a fibermap, {}, using it!".format( filename)) args.fibermap = filename if args.output_preproc is None: if not is_input_preprocessed: args.output_preproc = '{}/preproc-{}-{:08d}.fits'.format( args.auto_output_dir, args.camera.lower(), expid) log.debug("auto-mode: will write preproc in " + args.output_preproc) else: log.debug( "auto-mode: will not write preproc because input is a preprocessed image" ) if args.auto_output_dir != '.': if not os.path.isdir(args.auto_output_dir): log.debug("auto-mode: creating directory " + args.auto_output_dir) os.makedirs(args.auto_output_dir) if args.output_preproc is not None: write_image(args.output_preproc, image) cfinder = None if args.psf is None: if cfinder is None: cfinder = CalibFinder([image.meta, primary_header]) args.psf = cfinder.findfile("PSF") log.info(" Using PSF {}".format(args.psf)) tset = read_xytraceset(args.psf) # add fibermap if args.fibermap: if os.path.isfile(args.fibermap): fibermap = read_fibermap(args.fibermap) else: log.error("no fibermap file {}".format(args.fibermap)) fibermap = None else: fibermap = None if "OBSTYPE" in image.meta: obstype = image.meta["OBSTYPE"].upper() image.meta["OBSTYPE"] = obstype # make sure it's upper case qframe = None else: log.warning("No OBSTYPE keyword, trying to guess ...") qframe = qproc_boxcar_extraction(tset, image, width=args.width, fibermap=fibermap) obstype = check_qframe_flavor( qframe, input_flavor=image.meta["FLAVOR"]).upper() image.meta["OBSTYPE"] = obstype log.info("OBSTYPE = '{}'".format(obstype)) if args.auto: # now set the things to do if obstype == "SKY" or obstype == "TWILIGHT" or obstype == "SCIENCE": args.shift_psf = True args.output_psf = '{}/psf-{}-{:08d}.fits'.format( args.auto_output_dir, args.camera, expid) args.output_rawframe = '{}/qframe-{}-{:08d}.fits'.format( args.auto_output_dir, args.camera, expid) args.apply_fiberflat = True args.skysub = True args.output_skyframe = '{}/qsky-{}-{:08d}.fits'.format( args.auto_output_dir, args.camera, expid) args.fluxcalib = True args.outframe = '{}/qcframe-{}-{:08d}.fits'.format( args.auto_output_dir, args.camera, expid) elif obstype == "ARC" or obstype == "TESTARC": args.shift_psf = True args.output_psf = '{}/psf-{}-{:08d}.fits'.format( args.auto_output_dir, args.camera, expid) args.output_rawframe = '{}/qframe-{}-{:08d}.fits'.format( args.auto_output_dir, args.camera, expid) args.compute_lsf_sigma = True elif obstype == "FLAT" or obstype == "TESTFLAT": args.shift_psf = True args.output_psf = '{}/psf-{}-{:08d}.fits'.format( args.auto_output_dir, args.camera, expid) args.output_rawframe = '{}/qframe-{}-{:08d}.fits'.format( args.auto_output_dir, args.camera, expid) args.compute_fiberflat = '{}/qfiberflat-{}-{:08d}.fits'.format( args.auto_output_dir, args.camera, expid) if args.shift_psf: # using the trace shift script if args.auto: options = option_list({ "psf": args.psf, "image": "dummy", "outpsf": "dummy", "continuum": ((obstype == "FLAT") | (obstype == "TESTFLAT")), "sky": ((obstype == "SCIENCE") | (obstype == "SKY")) }) else: options = option_list({ "psf": args.psf, "image": "dummy", "outpsf": "dummy" }) tmp_args = trace_shifts_script.parse(options=options) tset = trace_shifts_script.fit_trace_shifts(image=image, args=tmp_args) qframe = qproc_boxcar_extraction(tset, image, width=args.width, fibermap=fibermap) if tset.meta is not None: # add traceshift info in the qframe, this will be saved in the qframe header if qframe.meta is None: qframe.meta = dict() for k in tset.meta.keys(): qframe.meta[k] = tset.meta[k] if args.output_rawframe is not None: write_qframe(args.output_rawframe, qframe) log.info("wrote raw extracted frame in {}".format( args.output_rawframe)) if args.compute_lsf_sigma: tset = process_arc(qframe, tset, linelist=None, npoly=2, nbins=2) if args.output_psf is not None: for k in qframe.meta: if k not in tset.meta: tset.meta[k] = qframe.meta[k] write_xytraceset(args.output_psf, tset) if args.compute_fiberflat is not None: fiberflat = qproc_compute_fiberflat(qframe) #write_qframe(args.compute_fiberflat,qflat) write_fiberflat(args.compute_fiberflat, fiberflat, header=qframe.meta) log.info("wrote fiberflat in {}".format(args.compute_fiberflat)) if args.apply_fiberflat or args.input_fiberflat: if args.input_fiberflat is None: if cfinder is None: cfinder = CalibFinder([image.meta, primary_header]) try: args.input_fiberflat = cfinder.findfile("FIBERFLAT") except KeyError as e: log.error("no FIBERFLAT for this spectro config") sys.exit(12) log.info("applying fiber flat {}".format(args.input_fiberflat)) flat = read_fiberflat(args.input_fiberflat) qproc_apply_fiberflat(qframe, flat) if args.skysub: log.info("sky subtraction") if args.output_skyframe is not None: skyflux = qproc_sky_subtraction(qframe, return_skymodel=True) sqframe = QFrame(qframe.wave, skyflux, np.ones(skyflux.shape)) write_qframe(args.output_skyframe, sqframe) log.info("wrote sky model in {}".format(args.output_skyframe)) else: qproc_sky_subtraction(qframe) if args.fluxcalib: if cfinder is None: cfinder = CalibFinder([image.meta, primary_header]) # check for flux calib if cfinder.haskey("FLUXCALIB"): fluxcalib_filename = cfinder.findfile("FLUXCALIB") fluxcalib = read_average_flux_calibration(fluxcalib_filename) log.info("read average calib in {}".format(fluxcalib_filename)) seeing = qframe.meta["SEEING"] airmass = qframe.meta["AIRMASS"] exptime = qframe.meta["EXPTIME"] exposure_calib = fluxcalib.value(seeing=seeing, airmass=airmass) for q in range(qframe.nspec): fiber_calib = np.interp(qframe.wave[q], fluxcalib.wave, exposure_calib) * exptime inv_calib = (fiber_calib > 0) / (fiber_calib + (fiber_calib == 0)) qframe.flux[q] *= inv_calib qframe.ivar[q] *= fiber_calib**2 * (fiber_calib > 0) # add keyword in header giving the calibration factor applied at a reference wavelength band = qframe.meta["CAMERA"].upper()[0] if band == "B": refwave = 4500 elif band == "R": refwave = 6500 else: refwave = 8500 calvalue = np.interp(refwave, fluxcalib.wave, exposure_calib) * exptime qframe.meta["CALWAVE"] = refwave qframe.meta["CALVALUE"] = calvalue else: log.error( "Cannot calibrate fluxes because no FLUXCALIB keywork in calibration files" ) fibers = parse_fibers(args.fibers) if fibers is None: fibers = qframe.flux.shape[0] else: ii = np.arange(qframe.fibers.size)[np.in1d(qframe.fibers, fibers)] if ii.size == 0: log.error("no such fibers in frame,") log.error("fibers are in range [{}:{}]".format( qframe.fibers[0], qframe.fibers[-1] + 1)) sys.exit(12) qframe = qframe[ii] if args.outframe is not None: write_qframe(args.outframe, qframe) log.info("wrote {}".format(args.outframe)) t1 = time.time() log.info("all done in {:3.1f} sec".format(t1 - t0)) if args.plot: log.info("plotting {} spectra".format(qframe.wave.shape[0])) import matplotlib.pyplot as plt fig = plt.figure() for i in range(qframe.wave.shape[0]): j = (qframe.ivar[i] > 0) plt.plot(qframe.wave[i, j], qframe.flux[i, j]) plt.grid() plt.xlabel("wavelength") plt.ylabel("flux") plt.show()
def main(args): log = get_logger() if (args.night is None or args.arm is None) and args.prefix is None: log.error( "ERROR in arguments, need night and arm or prefix for output file names" ) return log = get_logger() log.info("starting at {}".format(time.asctime())) inputs = [] for filename in args.infile: inputs.append(read_fiberflat(filename)) program = [] camera = [] expid = [] for fflat in inputs: program.append(fflat.header["PROGRAM"]) camera.append(fflat.header["CAMERA"]) expid.append(fflat.header["EXPID"]) program = np.array(program) camera = np.array(camera) expid = np.array(expid) ucam = np.unique(camera) log.debug("cameras: {}".format(ucam)) if args.average_per_program: uprog = np.unique(program) log.info("programs: {}".format(uprog)) fiberflat_per_program_and_camera = [] for p in uprog: if p.find("CALIB DESI-CALIB-00 to 03") >= 0: log.warning("ignore program {}".format(p)) continue log.debug( "make sure we have the same list of exposures per camera, for each program" ) common_expid = None for c in ucam: expid_per_program_and_camera = expid[(program == p) & (camera == c)] print("expids with camera={} for program={} : {}".format( c, p, expid_per_program_and_camera)) if common_expid is None: common_expid = expid_per_program_and_camera else: common_expid = np.intersect1d( common_expid, expid_per_program_and_camera) print("expids with all cameras for program={} : {}".format( p, common_expid)) for c in ucam: fflat_to_average = [] for e in common_expid: ii = np.where((program == p) & (camera == c) & (expid == e))[0] for i in ii: fflat_to_average.append(inputs[i]) log.info("averaging {} {} ({} files)".format( p, c, len(fflat_to_average))) fiberflat_per_program_and_camera.append( average_fiberflat(fflat_to_average)) inputs = fiberflat_per_program_and_camera else: log.debug( "make sure we have the same list of exposures per camera, for each program" ) common_expid = None for c in ucam: expid_per_camera = expid[(camera == c)] print("expids with camera={} : {}".format(c, expid_per_camera)) if common_expid is None: common_expid = expid_per_camera else: common_expid = np.intersect1d(common_expid, expid_per_camera) print("expids with all cameras : {}".format(common_expid)) fflat_to_average = [] for e in common_expid: ii = np.where((expid == e))[0] for i in ii: fflat_to_average.append(inputs[i]) inputs = fflat_to_average fiberflats = autocalib_fiberflat(inputs) for spectro in fiberflats.keys(): if args.prefix: ofilename = "{}{}-autocal.fits".format(args.prefix, spectro) else: camera = "{}{}".format(args.arm, spectro) ofilename = findfile('fiberflatnight', args.night, 0, camera) write_fiberflat(ofilename, fiberflats[spectro]) log.info("successfully wrote %s" % ofilename)
def main(args) : """ finds the best models of all standard stars in the frame and normlize the model flux. Output is written to a file and will be called for calibration. """ log = get_logger() log.info("mag delta %s = %f (for the pre-selection of stellar models)"%(args.color,args.delta_color)) frames={} flats={} skies={} spectrograph=None starfibers=None starindices=None fibermap=None # READ DATA ############################################ for filename in args.frames : log.info("reading %s"%filename) frame=io.read_frame(filename) header=fits.getheader(filename, 0) frame_fibermap = frame.fibermap frame_starindices = np.where(isStdStar(frame_fibermap['DESI_TARGET']))[0] #- Confirm that all fluxes have entries but trust targeting bits #- to get basic magnitude range correct keep = np.ones(len(frame_starindices), dtype=bool) for colname in ['FLUX_G', 'FLUX_R', 'FLUX_Z']: #- and W1 and W2? keep &= frame_fibermap[colname][frame_starindices] > 10**((22.5-30)/2.5) keep &= frame_fibermap[colname][frame_starindices] < 10**((22.5-0)/2.5) frame_starindices = frame_starindices[keep] camera=safe_read_key(header,"CAMERA").strip().lower() if spectrograph is None : spectrograph = frame.spectrograph fibermap = frame_fibermap starindices=frame_starindices starfibers=fibermap["FIBER"][starindices] elif spectrograph != frame.spectrograph : log.error("incompatible spectrographs %d != %d"%(spectrograph,frame.spectrograph)) raise ValueError("incompatible spectrographs %d != %d"%(spectrograph,frame.spectrograph)) elif starindices.size != frame_starindices.size or np.sum(starindices!=frame_starindices)>0 : log.error("incompatible fibermap") raise ValueError("incompatible fibermap") if not camera in frames : frames[camera]=[] frames[camera].append(frame) for filename in args.skymodels : log.info("reading %s"%filename) sky=io.read_sky(filename) header=fits.getheader(filename, 0) camera=safe_read_key(header,"CAMERA").strip().lower() if not camera in skies : skies[camera]=[] skies[camera].append(sky) for filename in args.fiberflats : log.info("reading %s"%filename) header=fits.getheader(filename, 0) flat=io.read_fiberflat(filename) camera=safe_read_key(header,"CAMERA").strip().lower() # NEED TO ADD MORE CHECKS if camera in flats: log.warning("cannot handle several flats of same camera (%s), will use only the first one"%camera) #raise ValueError("cannot handle several flats of same camera (%s)"%camera) else : flats[camera]=flat if starindices.size == 0 : log.error("no STD star found in fibermap") raise ValueError("no STD star found in fibermap") log.info("found %d STD stars"%starindices.size) log.warning("Not using flux errors for Standard Star fits!") # DIVIDE FLAT AND SUBTRACT SKY , TRIM DATA ############################################ for cam in frames : if not cam in skies: log.warning("Missing sky for %s"%cam) frames.pop(cam) continue if not cam in flats: log.warning("Missing flat for %s"%cam) frames.pop(cam) continue flat=flats[cam] for frame,sky in zip(frames[cam],skies[cam]) : frame.flux = frame.flux[starindices] frame.ivar = frame.ivar[starindices] frame.ivar *= (frame.mask[starindices] == 0) frame.ivar *= (sky.ivar[starindices] != 0) frame.ivar *= (sky.mask[starindices] == 0) frame.ivar *= (flat.ivar[starindices] != 0) frame.ivar *= (flat.mask[starindices] == 0) frame.flux *= ( frame.ivar > 0) # just for clean plots for star in range(frame.flux.shape[0]) : ok=np.where((frame.ivar[star]>0)&(flat.fiberflat[star]!=0))[0] if ok.size > 0 : frame.flux[star] = frame.flux[star]/flat.fiberflat[star] - sky.flux[star] frame.resolution_data = frame.resolution_data[starindices] nstars = starindices.size fibermap = Table(fibermap[starindices]) # READ MODELS ############################################ log.info("reading star models in %s"%args.starmodels) stdwave,stdflux,templateid,teff,logg,feh=io.read_stdstar_templates(args.starmodels) # COMPUTE MAGS OF MODELS FOR EACH STD STAR MAG ############################################ #- Support older fibermaps if 'PHOTSYS' not in fibermap.colnames: log.warning('Old fibermap format; using defaults for missing columns') log.warning(" PHOTSYS = 'S'") log.warning(" MW_TRANSMISSION_G/R/Z = 1.0") log.warning(" EBV = 0.0") fibermap['PHOTSYS'] = 'S' fibermap['MW_TRANSMISSION_G'] = 1.0 fibermap['MW_TRANSMISSION_R'] = 1.0 fibermap['MW_TRANSMISSION_Z'] = 1.0 fibermap['EBV'] = 0.0 model_filters = dict() if 'S' in fibermap['PHOTSYS']: for filter_name in ['DECAM_G', 'DECAM_R', 'DECAM_Z']: model_filters[filter_name] = load_filter(filter_name) if 'N' in fibermap['PHOTSYS']: for filter_name in ['BASS_G', 'BASS_R', 'MZLS_Z']: model_filters[filter_name] = load_filter(filter_name) if len(model_filters) == 0: raise ValueError("No filters loaded; neither 'N' nor 'S' in PHOTSYS?") log.info("computing model mags for %s"%sorted(model_filters.keys())) model_mags = dict() fluxunits = 1e-17 * units.erg / units.s / units.cm**2 / units.Angstrom for filter_name, filter_response in model_filters.items(): model_mags[filter_name] = filter_response.get_ab_magnitude(stdflux*fluxunits,stdwave) log.info("done computing model mags") # LOOP ON STARS TO FIND BEST MODEL ############################################ linear_coefficients=np.zeros((nstars,stdflux.shape[0])) chi2dof=np.zeros((nstars)) redshift=np.zeros((nstars)) normflux=[] star_mags = dict() star_unextincted_mags = dict() for band in ['G', 'R', 'Z']: star_mags[band] = 22.5 - 2.5 * np.log10(fibermap['FLUX_'+band]) star_unextincted_mags[band] = 22.5 - 2.5 * np.log10(fibermap['FLUX_'+band] / fibermap['MW_TRANSMISSION_'+band]) star_colors = dict() star_colors['G-R'] = star_mags['G'] - star_mags['R'] star_colors['R-Z'] = star_mags['R'] - star_mags['Z'] star_unextincted_colors = dict() star_unextincted_colors['G-R'] = star_unextincted_mags['G'] - star_unextincted_mags['R'] star_unextincted_colors['R-Z'] = star_unextincted_mags['R'] - star_unextincted_mags['Z'] fitted_model_colors = np.zeros(nstars) for star in range(nstars) : log.info("finding best model for observed star #%d"%star) # np.array of wave,flux,ivar,resol wave = {} flux = {} ivar = {} resolution_data = {} for camera in frames : for i,frame in enumerate(frames[camera]) : identifier="%s-%d"%(camera,i) wave[identifier]=frame.wave flux[identifier]=frame.flux[star] ivar[identifier]=frame.ivar[star] resolution_data[identifier]=frame.resolution_data[star] # preselect models based on magnitudes if fibermap['PHOTSYS'][star] == 'N': if args.color == 'G-R': model_colors = model_mags['BASS_G'] - model_mags['BASS_R'] elif args.color == 'R-Z': model_colors = model_mags['BASS_R'] - model_mags['MZLS_Z'] else: raise ValueError('Unknown color {}'.format(args.color)) else: if args.color == 'G-R': model_colors = model_mags['DECAM_G'] - model_mags['DECAM_R'] elif args.color == 'R-Z': model_colors = model_mags['DECAM_R'] - model_mags['DECAM_Z'] else: raise ValueError('Unknown color {}'.format(args.color)) color_diff = model_colors - star_unextincted_colors[args.color][star] selection = np.abs(color_diff) < args.delta_color # smallest cube in parameter space including this selection (needed for interpolation) new_selection = (teff>=np.min(teff[selection]))&(teff<=np.max(teff[selection])) new_selection &= (logg>=np.min(logg[selection]))&(logg<=np.max(logg[selection])) new_selection &= (feh>=np.min(feh[selection]))&(feh<=np.max(feh[selection])) selection = np.where(new_selection)[0] log.info("star#%d fiber #%d, %s = %f, number of pre-selected models = %d/%d"%( star, starfibers[star], args.color, star_unextincted_colors[args.color][star], selection.size, stdflux.shape[0])) # Match unextincted standard stars to data coefficients, redshift[star], chi2dof[star] = match_templates( wave, flux, ivar, resolution_data, stdwave, stdflux[selection], teff[selection], logg[selection], feh[selection], ncpu=args.ncpu, z_max=args.z_max, z_res=args.z_res, template_error=args.template_error ) linear_coefficients[star,selection] = coefficients log.info('Star Fiber: {0}; TEFF: {1}; LOGG: {2}; FEH: {3}; Redshift: {4}; Chisq/dof: {5}'.format( starfibers[star], np.inner(teff,linear_coefficients[star]), np.inner(logg,linear_coefficients[star]), np.inner(feh,linear_coefficients[star]), redshift[star], chi2dof[star]) ) # Apply redshift to original spectrum at full resolution model=np.zeros(stdwave.size) redshifted_stdwave = stdwave*(1+redshift[star]) for i,c in enumerate(linear_coefficients[star]) : if c != 0 : model += c*np.interp(stdwave,redshifted_stdwave,stdflux[i]) # Apply dust extinction to the model model *= dust_transmission(stdwave, fibermap['EBV'][star]) # Compute final color of dust-extincted model if fibermap['PHOTSYS'][star] == 'N': if args.color == 'G-R': model_mag1 = model_filters['BASS_G'].get_ab_magnitude(model*fluxunits, stdwave) model_mag2 = model_filters['BASS_R'].get_ab_magnitude(model*fluxunits, stdwave) model_magr = model_mag2 elif args.color == 'R-Z': model_mag1 = model_filters['BASS_R'].get_ab_magnitude(model*fluxunits, stdwave) model_mag2 = model_filters['MZLS_Z'].get_ab_magnitude(model*fluxunits, stdwave) model_magr = model_mag1 else: raise ValueError('Unknown color {}'.format(args.color)) else: if args.color == 'G-R': model_mag1 = model_filters['DECAM_G'].get_ab_magnitude(model*fluxunits, stdwave) model_mag2 = model_filters['DECAM_R'].get_ab_magnitude(model*fluxunits, stdwave) model_magr = model_mag2 elif args.color == 'R-Z': model_mag1 = model_filters['DECAM_R'].get_ab_magnitude(model*fluxunits, stdwave) model_mag2 = model_filters['DECAM_Z'].get_ab_magnitude(model*fluxunits, stdwave) model_magr = model_mag1 else: raise ValueError('Unknown color {}'.format(args.color)) fitted_model_colors[star] = model_mag1 - model_mag2 #- TODO: move this back into normalize_templates, at the cost of #- recalculating a model magnitude? # Normalize the best model using reported magnitude scalefac=10**((model_magr - star_mags['R'][star])/2.5) log.info('scaling R mag {} to {} using scale {}'.format(model_magr, star_mags['R'][star], scalefac)) normflux.append(model*scalefac) # Now write the normalized flux for all best models to a file normflux=np.array(normflux) data={} data['LOGG']=linear_coefficients.dot(logg) data['TEFF']= linear_coefficients.dot(teff) data['FEH']= linear_coefficients.dot(feh) data['CHI2DOF']=chi2dof data['REDSHIFT']=redshift data['COEFF']=linear_coefficients data['DATA_%s'%args.color]=star_colors[args.color] data['MODEL_%s'%args.color]=fitted_model_colors io.write_stdstar_models(args.outfile,normflux,stdwave,starfibers,data)
def main(args): """ finds the best models of all standard stars in the frame and normlize the model flux. Output is written to a file and will be called for calibration. """ log = get_logger() log.info("mag delta %s = %f (for the pre-selection of stellar models)" % (args.color, args.delta_color)) frames = {} flats = {} skies = {} spectrograph = None starfibers = None starindices = None fibermap = None # READ DATA ############################################ for filename in args.frames: log.info("reading %s" % filename) frame = io.read_frame(filename) header = fits.getheader(filename, 0) frame_fibermap = frame.fibermap frame_starindices = np.where(isStdStar(frame_fibermap))[0] #- Confirm that all fluxes have entries but trust targeting bits #- to get basic magnitude range correct keep = np.ones(len(frame_starindices), dtype=bool) for colname in ['FLUX_G', 'FLUX_R', 'FLUX_Z']: #- and W1 and W2? keep &= frame_fibermap[colname][frame_starindices] > 10**( (22.5 - 30) / 2.5) keep &= frame_fibermap[colname][frame_starindices] < 10**( (22.5 - 0) / 2.5) frame_starindices = frame_starindices[keep] camera = safe_read_key(header, "CAMERA").strip().lower() if spectrograph is None: spectrograph = frame.spectrograph fibermap = frame_fibermap starindices = frame_starindices starfibers = fibermap["FIBER"][starindices] elif spectrograph != frame.spectrograph: log.error("incompatible spectrographs %d != %d" % (spectrograph, frame.spectrograph)) raise ValueError("incompatible spectrographs %d != %d" % (spectrograph, frame.spectrograph)) elif starindices.size != frame_starindices.size or np.sum( starindices != frame_starindices) > 0: log.error("incompatible fibermap") raise ValueError("incompatible fibermap") if not camera in frames: frames[camera] = [] frames[camera].append(frame) for filename in args.skymodels: log.info("reading %s" % filename) sky = io.read_sky(filename) header = fits.getheader(filename, 0) camera = safe_read_key(header, "CAMERA").strip().lower() if not camera in skies: skies[camera] = [] skies[camera].append(sky) for filename in args.fiberflats: log.info("reading %s" % filename) header = fits.getheader(filename, 0) flat = io.read_fiberflat(filename) camera = safe_read_key(header, "CAMERA").strip().lower() # NEED TO ADD MORE CHECKS if camera in flats: log.warning( "cannot handle several flats of same camera (%s), will use only the first one" % camera) #raise ValueError("cannot handle several flats of same camera (%s)"%camera) else: flats[camera] = flat if starindices.size == 0: log.error("no STD star found in fibermap") raise ValueError("no STD star found in fibermap") log.info("found %d STD stars" % starindices.size) log.warning("Not using flux errors for Standard Star fits!") # DIVIDE FLAT AND SUBTRACT SKY , TRIM DATA ############################################ for cam in frames: if not cam in skies: log.warning("Missing sky for %s" % cam) frames.pop(cam) continue if not cam in flats: log.warning("Missing flat for %s" % cam) frames.pop(cam) continue flat = flats[cam] for frame, sky in zip(frames[cam], skies[cam]): frame.flux = frame.flux[starindices] frame.ivar = frame.ivar[starindices] frame.ivar *= (frame.mask[starindices] == 0) frame.ivar *= (sky.ivar[starindices] != 0) frame.ivar *= (sky.mask[starindices] == 0) frame.ivar *= (flat.ivar[starindices] != 0) frame.ivar *= (flat.mask[starindices] == 0) frame.flux *= (frame.ivar > 0) # just for clean plots for star in range(frame.flux.shape[0]): ok = np.where((frame.ivar[star] > 0) & (flat.fiberflat[star] != 0))[0] if ok.size > 0: frame.flux[star] = frame.flux[star] / flat.fiberflat[ star] - sky.flux[star] frame.resolution_data = frame.resolution_data[starindices] nstars = starindices.size fibermap = Table(fibermap[starindices]) # READ MODELS ############################################ log.info("reading star models in %s" % args.starmodels) stdwave, stdflux, templateid, teff, logg, feh = io.read_stdstar_templates( args.starmodels) # COMPUTE MAGS OF MODELS FOR EACH STD STAR MAG ############################################ #- Support older fibermaps if 'PHOTSYS' not in fibermap.colnames: log.warning('Old fibermap format; using defaults for missing columns') log.warning(" PHOTSYS = 'S'") log.warning(" MW_TRANSMISSION_G/R/Z = 1.0") log.warning(" EBV = 0.0") fibermap['PHOTSYS'] = 'S' fibermap['MW_TRANSMISSION_G'] = 1.0 fibermap['MW_TRANSMISSION_R'] = 1.0 fibermap['MW_TRANSMISSION_Z'] = 1.0 fibermap['EBV'] = 0.0 model_filters = dict() if 'S' in fibermap['PHOTSYS']: for filter_name in ['DECAM_G', 'DECAM_R', 'DECAM_Z']: model_filters[filter_name] = load_filter(filter_name) if 'N' in fibermap['PHOTSYS']: for filter_name in ['BASS_G', 'BASS_R', 'MZLS_Z']: model_filters[filter_name] = load_filter(filter_name) if len(model_filters) == 0: raise ValueError("No filters loaded; neither 'N' nor 'S' in PHOTSYS?") log.info("computing model mags for %s" % sorted(model_filters.keys())) model_mags = dict() fluxunits = 1e-17 * units.erg / units.s / units.cm**2 / units.Angstrom for filter_name, filter_response in model_filters.items(): model_mags[filter_name] = filter_response.get_ab_magnitude( stdflux * fluxunits, stdwave) log.info("done computing model mags") # LOOP ON STARS TO FIND BEST MODEL ############################################ linear_coefficients = np.zeros((nstars, stdflux.shape[0])) chi2dof = np.zeros((nstars)) redshift = np.zeros((nstars)) normflux = [] star_mags = dict() star_unextincted_mags = dict() for band in ['G', 'R', 'Z']: star_mags[band] = 22.5 - 2.5 * np.log10(fibermap['FLUX_' + band]) star_unextincted_mags[band] = 22.5 - 2.5 * np.log10( fibermap['FLUX_' + band] / fibermap['MW_TRANSMISSION_' + band]) star_colors = dict() star_colors['G-R'] = star_mags['G'] - star_mags['R'] star_colors['R-Z'] = star_mags['R'] - star_mags['Z'] star_unextincted_colors = dict() star_unextincted_colors[ 'G-R'] = star_unextincted_mags['G'] - star_unextincted_mags['R'] star_unextincted_colors[ 'R-Z'] = star_unextincted_mags['R'] - star_unextincted_mags['Z'] fitted_model_colors = np.zeros(nstars) for star in range(nstars): log.info("finding best model for observed star #%d" % star) # np.array of wave,flux,ivar,resol wave = {} flux = {} ivar = {} resolution_data = {} for camera in frames: for i, frame in enumerate(frames[camera]): identifier = "%s-%d" % (camera, i) wave[identifier] = frame.wave flux[identifier] = frame.flux[star] ivar[identifier] = frame.ivar[star] resolution_data[identifier] = frame.resolution_data[star] # preselect models based on magnitudes if fibermap['PHOTSYS'][star] == 'N': if args.color == 'G-R': model_colors = model_mags['BASS_G'] - model_mags['BASS_R'] elif args.color == 'R-Z': model_colors = model_mags['BASS_R'] - model_mags['MZLS_Z'] else: raise ValueError('Unknown color {}'.format(args.color)) else: if args.color == 'G-R': model_colors = model_mags['DECAM_G'] - model_mags['DECAM_R'] elif args.color == 'R-Z': model_colors = model_mags['DECAM_R'] - model_mags['DECAM_Z'] else: raise ValueError('Unknown color {}'.format(args.color)) color_diff = model_colors - star_unextincted_colors[args.color][star] selection = np.abs(color_diff) < args.delta_color # smallest cube in parameter space including this selection (needed for interpolation) new_selection = (teff >= np.min(teff[selection])) & (teff <= np.max( teff[selection])) new_selection &= (logg >= np.min(logg[selection])) & (logg <= np.max( logg[selection])) new_selection &= (feh >= np.min(feh[selection])) & (feh <= np.max( feh[selection])) selection = np.where(new_selection)[0] log.info( "star#%d fiber #%d, %s = %f, number of pre-selected models = %d/%d" % (star, starfibers[star], args.color, star_unextincted_colors[args.color][star], selection.size, stdflux.shape[0])) # Match unextincted standard stars to data coefficients, redshift[star], chi2dof[star] = match_templates( wave, flux, ivar, resolution_data, stdwave, stdflux[selection], teff[selection], logg[selection], feh[selection], ncpu=args.ncpu, z_max=args.z_max, z_res=args.z_res, template_error=args.template_error) linear_coefficients[star, selection] = coefficients log.info( 'Star Fiber: {0}; TEFF: {1}; LOGG: {2}; FEH: {3}; Redshift: {4}; Chisq/dof: {5}' .format(starfibers[star], np.inner(teff, linear_coefficients[star]), np.inner(logg, linear_coefficients[star]), np.inner(feh, linear_coefficients[star]), redshift[star], chi2dof[star])) # Apply redshift to original spectrum at full resolution model = np.zeros(stdwave.size) redshifted_stdwave = stdwave * (1 + redshift[star]) for i, c in enumerate(linear_coefficients[star]): if c != 0: model += c * np.interp(stdwave, redshifted_stdwave, stdflux[i]) # Apply dust extinction to the model model *= dust_transmission(stdwave, fibermap['EBV'][star]) # Compute final color of dust-extincted model if fibermap['PHOTSYS'][star] == 'N': if args.color == 'G-R': model_mag1 = model_filters['BASS_G'].get_ab_magnitude( model * fluxunits, stdwave) model_mag2 = model_filters['BASS_R'].get_ab_magnitude( model * fluxunits, stdwave) model_magr = model_mag2 elif args.color == 'R-Z': model_mag1 = model_filters['BASS_R'].get_ab_magnitude( model * fluxunits, stdwave) model_mag2 = model_filters['MZLS_Z'].get_ab_magnitude( model * fluxunits, stdwave) model_magr = model_mag1 else: raise ValueError('Unknown color {}'.format(args.color)) else: if args.color == 'G-R': model_mag1 = model_filters['DECAM_G'].get_ab_magnitude( model * fluxunits, stdwave) model_mag2 = model_filters['DECAM_R'].get_ab_magnitude( model * fluxunits, stdwave) model_magr = model_mag2 elif args.color == 'R-Z': model_mag1 = model_filters['DECAM_R'].get_ab_magnitude( model * fluxunits, stdwave) model_mag2 = model_filters['DECAM_Z'].get_ab_magnitude( model * fluxunits, stdwave) model_magr = model_mag1 else: raise ValueError('Unknown color {}'.format(args.color)) fitted_model_colors[star] = model_mag1 - model_mag2 #- TODO: move this back into normalize_templates, at the cost of #- recalculating a model magnitude? # Normalize the best model using reported magnitude scalefac = 10**((model_magr - star_mags['R'][star]) / 2.5) log.info('scaling R mag {} to {} using scale {}'.format( model_magr, star_mags['R'][star], scalefac)) normflux.append(model * scalefac) # Now write the normalized flux for all best models to a file normflux = np.array(normflux) data = {} data['LOGG'] = linear_coefficients.dot(logg) data['TEFF'] = linear_coefficients.dot(teff) data['FEH'] = linear_coefficients.dot(feh) data['CHI2DOF'] = chi2dof data['REDSHIFT'] = redshift data['COEFF'] = linear_coefficients data['DATA_%s' % args.color] = star_colors[args.color] data['MODEL_%s' % args.color] = fitted_model_colors io.write_stdstar_models(args.outfile, normflux, stdwave, starfibers, data)
def preproc(rawimage, header, primary_header, bias=True, dark=True, pixflat=True, mask=True, bkgsub=False, nocosmic=False, cosmics_nsig=6, cosmics_cfudge=3., cosmics_c2fudge=0.5, ccd_calibration_filename=None, nocrosstalk=False, nogain=False, overscan_per_row=False, use_overscan_row=False, use_savgol=None, nodarktrail=False, remove_scattered_light=False, psf_filename=None, bias_img=None, model_variance=False): ''' preprocess image using metadata in header image = ((rawimage-bias-overscan)*gain)/pixflat Args: rawimage : 2D numpy array directly from raw data file header : dict-like metadata, e.g. from FITS header, with keywords CAMERA, BIASSECx, DATASECx, CCDSECx where x = A, B, C, D for each of the 4 amplifiers (also supports old naming convention 1, 2, 3, 4). primary_header: dict-like metadata fit keywords EXPTIME, DOSVER DATE-OBS is also required if bias, pixflat, or mask=True Optional bias, pixflat, and mask can each be: False: don't apply that step True: use default calibration data for that night ndarray: use that array filename (str or unicode): read HDU 0 and use that Optional overscan features: overscan_per_row : bool, Subtract the overscan_col values row by row from the data. use_overscan_row : bool, Subtract off the overscan_row from the data (default: False). Requires ORSEC in the Header use_savgol : bool, Specify whether to use Savitsky-Golay filter for the overscan. (default: False). Requires use_overscan_row=True to have any effect. Optional variance model if model_variance=True Optional background subtraction with median filtering if bkgsub=True Optional disabling of cosmic ray rejection if nocosmic=True Optional disabling of dark trail correction if nodarktrail=True Optional bias image (testing only) may be provided by bias_img= Optional tuning of cosmic ray rejection parameters: cosmics_nsig: number of sigma above background required cosmics_cfudge: number of sigma inconsistent with PSF required cosmics_c2fudge: fudge factor applied to PSF Optional fit and subtraction of scattered light Returns Image object with member variables: pix : 2D preprocessed image in units of electrons per pixel ivar : 2D inverse variance of image mask : 2D mask of image (0=good) readnoise : 2D per-pixel readnoise of image meta : metadata dictionary TODO: define what keywords are included preprocessing includes the following steps: - bias image subtraction - overscan subtraction (from BIASSEC* keyword defined regions) - readnoise estimation (from BIASSEC* keyword defined regions) - gain correction (from GAIN* keywords) - pixel flat correction - cosmic ray masking - propagation of input known bad pixel mask - inverse variance estimation Notes: The bias image is subtracted before any other calculation to remove any non-uniformities in the overscan regions prior to calculating overscan levels and readnoise. The readnoise is an image not just one number per amp, because the pixflat image also affects the interpreted readnoise. The inverse variance is estimated from the readnoise and the image itself, and thus is biased. ''' log = get_logger() header = header.copy() depend.setdep(header, 'DESI_SPECTRO_CALIB', os.getenv('DESI_SPECTRO_CALIB')) for key in ['DESI_SPECTRO_REDUX', 'SPECPROD']: if key in os.environ: depend.setdep(header, key, os.environ[key]) cfinder = None if ccd_calibration_filename is not False: cfinder = CalibFinder([header, primary_header], yaml_file=ccd_calibration_filename) #- TODO: Check for required keywords first #- Subtract bias image camera = header['CAMERA'].lower() #- convert rawimage to float64 : this is the output format of read_image rawimage = rawimage.astype(np.float64) # Savgol if cfinder and cfinder.haskey("USE_ORSEC"): use_overscan_row = cfinder.value("USE_ORSEC") if cfinder and cfinder.haskey("SAVGOL"): use_savgol = cfinder.value("SAVGOL") # Set bias image, as desired if bias_img is None: bias = get_calibration_image(cfinder, "BIAS", bias, header) else: bias = bias_img #- Check if this file uses amp names 1,2,3,4 (old) or A,B,C,D (new) amp_ids = get_amp_ids(header) #- Double check that we have the necessary keywords missing_keywords = list() for prefix in ['CCDSEC', 'BIASSEC']: for amp in amp_ids: key = prefix + amp if not key in header: log.error('No {} keyword in header'.format(key)) missing_keywords.append(key) if len(missing_keywords) > 0: raise KeyError("Missing keywords {}".format( ' '.join(missing_keywords))) #- Output arrays ny = 0 nx = 0 for amp in amp_ids: yy, xx = parse_sec_keyword(header['CCDSEC%s' % amp]) ny = max(ny, yy.stop) nx = max(nx, xx.stop) image = np.zeros((ny, nx)) readnoise = np.zeros_like(image) #- Load dark if cfinder and cfinder.haskey("DARK") and (dark is not False): #- Exposure time if cfinder and cfinder.haskey("EXPTIMEKEY"): exptime_key = cfinder.value("EXPTIMEKEY") log.info("Using exposure time keyword %s for dark normalization" % exptime_key) else: exptime_key = "EXPTIME" exptime = primary_header[exptime_key] log.info( "Use exptime = {} sec to compute the dark current".format(exptime)) dark_filename = cfinder.findfile("DARK") depend.setdep(header, 'CCD_CALIB_DARK', shorten_filename(dark_filename)) log.info(f'Using DARK model from {dark_filename}') # dark is multipled by exptime, or we use the non-linear dark model in the routine dark = read_dark(filename=dark_filename, exptime=exptime) if dark.shape == image.shape: log.info("dark is trimmed") trimmed_dark_in_electrons = dark dark_is_trimmed = True elif dark.shape == rawimage.shape: log.info("dark is not trimmed") trimmed_dark_in_electrons = np.zeros_like(image) dark_is_trimmed = False else: message = "incompatible dark shape={} when raw shape={} and preproc shape={}".format( dark.shape, rawimage.shape, image.shape) log.error(message) raise ValueError(message) else: dark = False if bias is not False: #- it's an array if bias.shape == rawimage.shape: log.info("subtracting bias") rawimage = rawimage - bias else: raise ValueError('shape mismatch bias {} != rawimage {}'.format( bias.shape, rawimage.shape)) #- Load mask mask = get_calibration_image(cfinder, "MASK", mask, header) if mask is False: mask = np.zeros(image.shape, dtype=np.int32) else: if mask.shape != image.shape: raise ValueError('shape mismatch mask {} != image {}'.format( mask.shape, image.shape)) for amp in amp_ids: # Grab the sections ov_col = parse_sec_keyword(header['BIASSEC' + amp]) if 'ORSEC' + amp in header.keys(): ov_row = parse_sec_keyword(header['ORSEC' + amp]) elif use_overscan_row: log.error('No ORSEC{} keyword; not using overscan_row'.format(amp)) use_overscan_row = False if nogain: gain = 1. else: #- Initial teststand data may be missing GAIN* keywords; don't crash if 'GAIN' + amp in header: gain = header['GAIN' + amp] #- gain = electrons / ADU else: if cfinder and cfinder.haskey('GAIN' + amp): gain = float(cfinder.value('GAIN' + amp)) log.info('Using GAIN{}={} from calibration data'.format( amp, gain)) else: gain = 1.0 log.warning( 'Missing keyword GAIN{} in header and nothing in calib data; using {}' .format(amp, gain)) #- Record what gain value was actually used header['GAIN' + amp] = gain #- Add saturation level if 'SATURLEV' + amp in header: saturlev_adu = header['SATURLEV' + amp] # in ADU else: if cfinder and cfinder.haskey('SATURLEV' + amp): saturlev_adu = float(cfinder.value('SATURLEV' + amp)) log.info('Using SATURLEV{}={} from calibration data'.format( amp, saturlev_adu)) else: saturlev_adu = 2**16 - 1 # 65535 is the max value in the images log.warning( 'Missing keyword SATURLEV{} in header and nothing in calib data; using {} ADU' .format(amp, saturlev_adu)) header['SATULEV' + amp] = (saturlev_adu, "saturation or non lin. level, in ADU, inc. bias") # Generate the overscan images raw_overscan_col = rawimage[ov_col].copy() if use_overscan_row: raw_overscan_row = rawimage[ov_row].copy() overscan_row = np.zeros_like(raw_overscan_row) # Remove overscan_col from overscan_row raw_overscan_squared = rawimage[ov_row[0], ov_col[1]].copy() for row in range(raw_overscan_row.shape[0]): o, r = _overscan(raw_overscan_squared[row]) overscan_row[row] = raw_overscan_row[row] - o # Now remove the overscan_col nrows = raw_overscan_col.shape[0] log.info("nrows in overscan=%d" % nrows) overscan_col = np.zeros(nrows) rdnoise = np.zeros(nrows) if (cfinder and cfinder.haskey('OVERSCAN' + amp) and cfinder.value("OVERSCAN" + amp).upper() == "PER_ROW") or overscan_per_row: log.info( "Subtracting overscan per row for amplifier %s of camera %s" % (amp, camera)) for j in range(nrows): if np.isnan(np.sum(overscan_col[j])): log.warning( "NaN values in row %d of overscan of amplifier %s of camera %s" % (j, amp, camera)) continue o, r = _overscan(raw_overscan_col[j]) #log.info("%d %f %f"%(j,o,r)) overscan_col[j] = o rdnoise[j] = r else: log.info( "Subtracting average overscan for amplifier %s of camera %s" % (amp, camera)) o, r = _overscan(raw_overscan_col) overscan_col += o rdnoise += r if bias is not False: jj = parse_sec_keyword(header['DATASEC' + amp]) o, biasnoise = _overscan(bias[jj]) new_rdnoise = np.sqrt(rdnoise**2 + biasnoise**2) log.info( "Master bias noise for AMP %s = %4.3f ADU, rdnoise %4.3f -> %4.3f ADU" % (amp, biasnoise, np.mean(rdnoise), np.mean(new_rdnoise))) rdnoise = new_rdnoise rdnoise *= gain median_rdnoise = np.median(rdnoise) median_overscan = np.median(overscan_col) log.info("Median rdnoise and overscan= %f %f" % (median_rdnoise, median_overscan)) kk = parse_sec_keyword(header['CCDSEC' + amp]) for j in range(nrows): readnoise[kk][j] = rdnoise[j] header['OVERSCN' + amp] = (median_overscan, 'ADUs (gain not applied)') if gain != 1: rdnoise_message = 'electrons (gain is applied)' gain_message = 'e/ADU (gain applied to image)' else: rdnoise_message = 'ADUs (gain not applied)' gain_message = 'gain not applied to image' header['OBSRDN' + amp] = (median_rdnoise, rdnoise_message) header['GAIN' + amp] = (gain, gain_message) #- Warn/error if measured readnoise is very different from expected if exists if 'RDNOISE' + amp in header: expected_readnoise = header['RDNOISE' + amp] if median_rdnoise < 0.5 * expected_readnoise: log.error( 'Amp {} measured readnoise {:.2f} < 0.5 * expected readnoise {:.2f}' .format(amp, median_rdnoise, expected_readnoise)) elif median_rdnoise < 0.9 * expected_readnoise: log.warning( 'Amp {} measured readnoise {:.2f} < 0.9 * expected readnoise {:.2f}' .format(amp, median_rdnoise, expected_readnoise)) elif median_rdnoise > 2.0 * expected_readnoise: log.error( 'Amp {} measured readnoise {:.2f} > 2 * expected readnoise {:.2f}' .format(amp, median_rdnoise, expected_readnoise)) elif median_rdnoise > 1.2 * expected_readnoise: log.warning( 'Amp {} measured readnoise {:.2f} > 1.2 * expected readnoise {:.2f}' .format(amp, median_rdnoise, expected_readnoise)) #else: # log.warning('Expected readnoise keyword {} missing'.format('RDNOISE'+amp)) log.info("Measured readnoise for AMP %s = %f" % (amp, median_rdnoise)) #- subtract overscan from data region and apply gain jj = parse_sec_keyword(header['DATASEC' + amp]) data = rawimage[jj].copy() # Subtract columns for k in range(nrows): data[k] -= overscan_col[k] saturlev_elec = gain * (saturlev_adu - np.mean(overscan_col)) header['SATUELE' + amp] = (saturlev_elec, "saturation or non lin. level, in electrons") # And now the rows if use_overscan_row: # Savgol? if use_savgol: log.info("Using savgol") collapse_oscan_row = np.zeros(overscan_row.shape[1]) for col in range(overscan_row.shape[1]): o, _ = _overscan(overscan_row[:, col]) collapse_oscan_row[col] = o oscan_row = _savgol_clipped(collapse_oscan_row, niter=0) oimg_row = np.outer(np.ones(data.shape[0]), oscan_row) data -= oimg_row else: o, r = _overscan(overscan_row) data -= o #- apply saturlev (defined in ADU), prior to multiplication by gain saturated = (rawimage[jj] >= saturlev_adu) mask[kk][saturated] |= ccdmask.SATURATED #- ADC to electrons image[kk] = data * gain if dark is not False: if not dark_is_trimmed: trimmed_dark_in_electrons[kk] = dark[jj] * gain if not nocrosstalk: #- apply cross-talk # the ccd looks like : # C D # A B # for cross talk, we need a symmetric 4x4 flip_matrix # of coordinates ABCD giving flip of both axis # when computing crosstalk of # A B C D # # A AA AB AC AD # B BA BB BC BD # C CA CB CC CD # D DA DB DC BB # orientation_matrix_defines change of orientation # fip_axis_0 = np.array([[1, 1, -1, -1], [1, 1, -1, -1], [-1, -1, 1, 1], [-1, -1, 1, 1]]) fip_axis_1 = np.array([[1, -1, 1, -1], [-1, 1, -1, 1], [1, -1, 1, -1], [-1, 1, -1, 1]]) for a1 in range(len(amp_ids)): amp1 = amp_ids[a1] ii1 = parse_sec_keyword(header['CCDSEC' + amp1]) a1flux = image[ii1] #a1mask=mask[ii1] for a2 in range(len(amp_ids)): if a1 == a2: continue amp2 = amp_ids[a2] if cfinder is None: continue if not cfinder.haskey("CROSSTALK%s%s" % (amp1, amp2)): continue crosstalk = cfinder.value("CROSSTALK%s%s" % (amp1, amp2)) if crosstalk == 0.: continue log.info("Correct for crosstalk=%f from AMP %s into %s" % (crosstalk, amp1, amp2)) a12flux = crosstalk * a1flux.copy() #a12mask=a1mask.copy() if fip_axis_0[a1, a2] == -1: a12flux = a12flux[::-1] #a12mask=a12mask[::-1] if fip_axis_1[a1, a2] == -1: a12flux = a12flux[:, ::-1] #a12mask=a12mask[:,::-1] ii2 = parse_sec_keyword(header['CCDSEC' + amp2]) image[ii2] -= a12flux # mask[ii2] |= a12mask (not sure we really need to propagate the mask) #- Poisson noise variance (prior to dark subtraction and prior to pixel flat field) #- This is biasing, but that's what we have for now poisson_var = image.clip(0) #- subtract dark after multiplication by gain if dark is not False: log.info("subtracting dark") image -= trimmed_dark_in_electrons # measure its noise new_readnoise = np.zeros(readnoise.shape) for amp in amp_ids: kk = parse_sec_keyword(header['CCDSEC' + amp]) o, darknoise = _overscan(trimmed_dark_in_electrons[kk]) new_readnoise[kk] = np.sqrt(readnoise[kk]**2 + darknoise**2) log.info( "Master dark noise for AMP %s = %4.3f elec, rdnoise %4.3f -> %4.3f elec" % (amp, darknoise, np.mean( readnoise[kk]), np.mean(new_readnoise[kk]))) readnoise = new_readnoise #- Correct for dark trails if any if not nodarktrail and cfinder is not None: for amp in amp_ids: if cfinder.haskey("DARKTRAILAMP%s" % amp): amplitude = cfinder.value("DARKTRAILAMP%s" % amp) width = cfinder.value("DARKTRAILWIDTH%s" % amp) ii = _parse_sec_keyword(header["CCDSEC" + amp]) log.info( "Removing dark trails for amplifier %s with width=%3.1f and amplitude=%5.4f" % (amp, width, amplitude)) correct_dark_trail(image, ii, left=((amp == "B") | (amp == "D")), width=width, amplitude=amplitude) #- Divide by pixflat image pixflat = get_calibration_image(cfinder, "PIXFLAT", pixflat, header) if pixflat is not False: if pixflat.shape != image.shape: raise ValueError('shape mismatch pixflat {} != image {}'.format( pixflat.shape, image.shape)) almost_zero = 0.001 if np.all(pixflat > almost_zero): image /= pixflat readnoise /= pixflat poisson_var /= pixflat**2 else: good = (pixflat > almost_zero) image[good] /= pixflat[good] readnoise[good] /= pixflat[good] poisson_var[good] /= pixflat[good]**2 mask[~good] |= ccdmask.PIXFLATZERO lowpixflat = (0 < pixflat) & (pixflat < 0.1) if np.any(lowpixflat): mask[lowpixflat] |= ccdmask.PIXFLATLOW #- Inverse variance, estimated directly from the data (BEWARE: biased!) var = poisson_var + readnoise**2 ivar = np.zeros(var.shape) ivar[var > 0] = 1.0 / var[var > 0] #- Ridiculously high readnoise is bad mask[readnoise > 100] |= ccdmask.BADREADNOISE if bkgsub: bkg = _background(image, header) image -= bkg img = Image(image, ivar=ivar, mask=mask, meta=header, readnoise=readnoise, camera=camera) #- update img.mask to mask cosmic rays if not nocosmic: cosmics.reject_cosmic_rays(img, nsig=cosmics_nsig, cfudge=cosmics_cfudge, c2fudge=cosmics_c2fudge) mask = img.mask xyset = None if model_variance: psf = None if psf_filename is None: psf_filename = cfinder.findfile("PSF") depend.setdep(header, 'CCD_CALIB_PSF', shorten_filename(psf_filename)) xyset = read_xytraceset(psf_filename) fiberflat = None with_spectral_smoothing = True with_sky_model = True if with_sky_model: log.debug("Will use a sky model to model the spectra") fiberflat_filename = cfinder.findfile("FIBERFLAT") depend.setdep(header, 'CCD_CALIB_FIBERFLAT', shorten_filename(fiberflat_filename)) if fiberflat_filename is not None: fiberflat = read_fiberflat(fiberflat_filename) log.info("compute an image model after dark correction and pixel flat") nsig = 5. mimage = compute_image_model( img, xyset, fiberflat=fiberflat, with_spectral_smoothing=with_spectral_smoothing, with_sky_model=with_sky_model, spectral_smoothing_nsig=nsig, psf=psf) # here we bring back original image for large outliers # this allows to have a correct ivar for cosmic rays and bright sources eps = 0.1 out = (((ivar > 0) * (image - mimage)**2 / (1. / (ivar + (ivar == 0)) + (0.1 * mimage)**2)) > nsig**2) # out &= (image>mimage) # could request this to be conservative on the variance ... but this could cause other issues mimage[out] = image[out] log.info("use image model to compute variance") if bkgsub: mimage += bkg if pixflat is not False: # undo pixflat mimage *= pixflat if dark is not False: mimage += dark poisson_var = mimage.clip(0) if pixflat is not False: if np.all(pixflat > almost_zero): poisson_var /= pixflat**2 else: poisson_var[good] /= pixflat[good]**2 var = poisson_var + readnoise**2 ivar[var > 0] = 1.0 / var[var > 0] # regenerate img object img = Image(image, ivar=ivar, mask=mask, meta=header, readnoise=readnoise, camera=camera) if remove_scattered_light: if xyset is None: if psf_filename is None: psf_filename = cfinder.findfile("PSF") depend.setdep(header, 'SCATTERED_LIGHT_PSF', shorten_filename(psf_filename)) xyset = read_xytraceset(psf_filename) img.pix -= model_scattered_light(img, xyset) #- Extend header with primary header keywords too addkeys(img.meta, primary_header) return img
def main(args): log = get_logger() if (args.fiberflat is None) and (args.sky is None) and (args.calib is None): log.critical('no --fiberflat, --sky, or --calib; nothing to do ?!?') sys.exit(12) frame = read_frame(args.infile) #- Raw scores already added in extraction, but just in case they weren't #- it is harmless to rerun to make sure we have them. compute_and_append_frame_scores(frame, suffix="RAW") if args.cosmics_nsig > 0 and args.sky == None: # Reject cosmics (otherwise do it after sky subtraction) log.info("cosmics ray 1D rejection") reject_cosmic_rays_1d(frame, args.cosmics_nsig) if args.fiberflat != None: log.info("apply fiberflat") # read fiberflat fiberflat = read_fiberflat(args.fiberflat) # apply fiberflat to all fibers apply_fiberflat(frame, fiberflat) compute_and_append_frame_scores(frame, suffix="FFLAT") if args.sky != None: # read sky skymodel = read_sky(args.sky) if args.cosmics_nsig > 0: # first subtract sky without throughput correction subtract_sky(frame, skymodel, throughput_correction=False) # then find cosmics log.info("cosmics ray 1D rejection after sky subtraction") reject_cosmic_rays_1d(frame, args.cosmics_nsig) if args.sky_throughput_correction: # and (re-)subtract sky, but just the correction term subtract_sky(frame, skymodel, throughput_correction=True, default_throughput_correction=0.) else: # subtract sky subtract_sky(frame, skymodel, throughput_correction=args.sky_throughput_correction) compute_and_append_frame_scores(frame, suffix="SKYSUB") if args.calib != None: log.info("calibrate") # read calibration fluxcalib = read_flux_calibration(args.calib) # apply calibration apply_flux_calibration(frame, fluxcalib) compute_and_append_frame_scores(frame, suffix="CALIB") # save output write_frame(args.outfile, frame, units='1e-17 erg/(s cm2 Angstrom)') log.info("successfully wrote %s" % args.outfile)
#exp='00054444' #night='20191115' #exp='00028364' #f0=data_dir+'/'+night+'/'+exp+'/fiberflat-'+camera0+'-'+exp+'.fits' #f1=data_dir+'/'+night+'/'+exp+'/fiberflat-'+camera1+'-'+exp+'.fits' #f2=data_dir+'/'+night+'/'+exp+'/fiberflat-'+camera2+'-'+exp+'.fits' #f3=data_dir+'/'+night+'/'+exp+'/fiberflat-'+camera3+'-'+exp+'.fits' #f4=data_dir+'/'+night+'/'+exp+'/fiberflat-'+camera4+'-'+exp+'.fits' #f5=data_dir+'/'+night+'/'+exp+'/fiberflat-'+camera5+'-'+exp+'.fits' #f6=data_dir+'/'+night+'/'+exp+'/fiberflat-'+camera6+'-'+exp+'.fits' #f7=data_dir+'/'+night+'/'+exp+'/fiberflat-'+camera7+'-'+exp+'.fits' #f8=data_dir+'/'+night+'/'+exp+'/fiberflat-'+camera8+'-'+exp+'.fits' #f9=data_dir+'/'+night+'/'+exp+'/fiberflat-'+camera9+'-'+exp+'.fits' try: d0 = read_fiberflat(f0) except: pass try: d1 = read_fiberflat(f1) except: pass try: d2 = read_fiberflat(f2) except: pass try: d3 = read_fiberflat(f3) except: pass try:
def main() : """ finds the best models of all standard stars in the frame and normlize the model flux. Output is written to a file and will be called for calibration. """ parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--fiberflatexpid', type = int, help = 'fiberflat exposure ID') parser.add_argument('--fibermap', type = str, help = 'path of fibermap file') parser.add_argument('--models', type = str, help = 'path of spectro-photometric stellar spectra fits') parser.add_argument('--spectrograph', type = int, default = 0, help = 'spectrograph number, can go 0-9') parser.add_argument('--outfile', type = str, help = 'output file for normalized stdstar model flux') args = parser.parse_args() log = get_logger() # Call necessary environment variables. No need if add argument to give full file path. if 'DESI_SPECTRO_REDUX' not in os.environ: raise RuntimeError('Set environment DESI_SPECTRO_REDUX. It is needed to read the needed datafiles') DESI_SPECTRO_REDUX=os.environ['DESI_SPECTRO_REDUX'] PRODNAME=os.environ['PRODNAME'] if 'DESISIM' not in os.environ: raise RuntimeError('Set environment DESISIM. It will be neede to read the filter transmission files for calibration') DESISIM=os.environ['DESISIM'] # to read the filter transmission files if args.fibermap is None or args.models is None or \ args.spectrograph is None or args.outfile is None or \ args.fiberflatexpid is None: log.critical('Missing a required argument') parser.print_help() sys.exit(12) # read Standard Stars from the fibermap file # returns the Fiber id, filter names and mags for the standard stars fiber_tbdata,fiber_header=io.read_fibermap(args.fibermap, header=True) #- Trim to just fibers on this spectrograph ii = (500*args.spectrograph <= fiber_tbdata["FIBER"]) ii &= (fiber_tbdata["FIBER"] < 500*(args.spectrograph+1)) fiber_tbdata = fiber_tbdata[ii] #- Get info for the standard stars refStarIdx=np.where(fiber_tbdata["OBJTYPE"]=="STD") refFibers=fiber_tbdata["FIBER"][refStarIdx] refFilters=fiber_tbdata["FILTER"][refStarIdx] refMags=fiber_tbdata["MAG"] fibers={"FIBER":refFibers,"FILTER":refFilters,"MAG":refMags} NIGHT=fiber_header['NIGHT'] EXPID=fiber_header['EXPID'] filters=fibers["FILTER"] if 'DESISIM' not in os.environ: raise RuntimeError('Set environment DESISIM. Can not find filter response files') basepath=DESISIM+"/data/" #now load all the skyfiles, framefiles, fiberflatfiles etc # all three channels files are simultaneously treated for model fitting skyfile={} framefile={} fiberflatfile={} for i in ["b","r","z"]: camera = i+str(args.spectrograph) skyfile[i] = io.findfile('sky', NIGHT, EXPID, camera) framefile[i] = io.findfile('frame', NIGHT, EXPID, camera) fiberflatfile[i] = io.findfile('fiberflat', NIGHT, args.fiberflatexpid, camera) #Read Frames, Flats and Sky files frameFlux={} frameIvar={} frameWave={} frameResolution={} framehdr={} fiberFlat={} ivarFlat={} maskFlat={} meanspecFlat={} waveFlat={} headerFlat={} sky={} skyivar={} skymask={} skywave={} skyhdr={} for i in ["b","r","z"]: #arg=(night,expid,'%s%s'%(i,spectrograph)) #- minimal code change for refactored I/O, while not taking advantage of simplified structure frame = io.read_frame(framefile[i]) frameFlux[i] = frame.flux frameIvar[i] = frame.ivar frameWave[i] = frame.wave frameResolution[i] = frame.resolution_data framehdr[i] = frame.header ff = io.read_fiberflat(fiberflatfile[i]) fiberFlat[i] = ff.fiberflat ivarFlat[i] = ff.ivar maskFlat[i] = ff.mask meanspecFlat[i] = ff.meanspec waveFlat[i] = ff.wave headerFlat[i] = ff.header skymodel = io.read_sky(skyfile[i]) sky[i] = skymodel.flux skyivar[i] = skymodel.ivar skymask[i] = skymodel.mask skywave[i] = skymodel.wave skyhdr[i] = skymodel.header # Convolve Sky with Detector Resolution, so as to subtract from data. Convolve for all 500 specs. Subtracting sky this way should be equivalent to sky_subtract convolvedsky={"b":sky["b"], "r":sky["r"], "z":sky["z"]} # Read the standard Star data and divide by flat and subtract sky stars=[] ivars=[] for i in fibers["FIBER"]: #flat and sky should have same wavelength binning as data, otherwise should be rebinned. stars.append((i,{"b":[frameFlux["b"][i]/fiberFlat["b"][i]-convolvedsky["b"][i],frameWave["b"]], "r":[frameFlux["r"][i]/fiberFlat["r"][i]-convolvedsky["r"][i],frameWave["r"]], "z":[frameFlux["z"][i]/fiberFlat["z"][i]-convolvedsky["z"][i],frameWave]},fibers["MAG"][i])) ivars.append((i,{"b":[frameIvar["b"][i]],"r":[frameIvar["r"][i,:]],"z":[frameIvar["z"][i,:]]})) stdwave,stdflux,templateid=io.read_stdstar_templates(args.models) #- Trim standard star wavelengths to just the range we need minwave = min([min(w) for w in frameWave.values()]) maxwave = max([max(w) for w in frameWave.values()]) ii = (minwave-10 < stdwave) & (stdwave < maxwave+10) stdwave = stdwave[ii] stdflux = stdflux[:, ii] log.info('Number of Standard Stars in this frame: {0:d}'.format(len(stars))) if len(stars) == 0: log.critical("No standard stars! Exiting") sys.exit(1) # Now for each star, find the best model and normalize. normflux=[] bestModelIndex=np.arange(len(stars)) templateID=np.arange(len(stars)) chi2dof=np.zeros(len(stars)) #- TODO: don't use 'l' as a variable name. Can look like a '1' for k,l in enumerate(stars): log.info("checking best model for star {0}".format(l[0])) starindex=l[0] mags=l[2] filters=fibers["FILTER"][k] rflux=stars[k][1]["r"][0] bflux=stars[k][1]["b"][0] zflux=stars[k][1]["z"][0] flux={"b":bflux,"r":rflux,"z":zflux} #print ivars rivar=ivars[k][1]["r"][0] bivar=ivars[k][1]["b"][0] zivar=ivars[k][1]["z"][0] ivar={"b":bivar,"r":rivar,"z":zivar} resol_star={"r":frameResolution["r"][l[0]],"b":frameResolution["b"][l[0]],"z":frameResolution["z"][l[0]]} # Now find the best Model bestModelIndex[k],bestmodelWave,bestModelFlux,chi2dof[k]=match_templates(frameWave,flux,ivar,resol_star,stdwave,stdflux) log.info('Star Fiber: {0}; Best Model Fiber: {1}; TemplateID: {2}; Chisq/dof: {3}'.format(l[0],bestModelIndex[k],templateid[bestModelIndex[k]],chi2dof[k])) # Normalize the best model using reported magnitude modelwave,normalizedflux=normalize_templates(stdwave,stdflux[bestModelIndex[k]],mags,filters,basepath) normflux.append(normalizedflux) # Now write the normalized flux for all best models to a file normflux=np.array(normflux) stdfibers=fibers["FIBER"] data={} data['BESTMODEL']=bestModelIndex data['CHI2DOF']=chi2dof data['TEMPLATEID']=templateid[bestModelIndex] norm_model_file=args.outfile io.write_stdstar_model(norm_model_file,normflux,stdwave,stdfibers,data)
def main(args): """ finds the best models of all standard stars in the frame and normlize the model flux. Output is written to a file and will be called for calibration. """ log = get_logger() log.info("mag delta %s = %f (for the pre-selection of stellar models)" % (args.color, args.delta_color)) frames = {} flats = {} skies = {} spectrograph = None starfibers = None starindices = None fibermap = None # READ DATA ############################################ for filename in args.frames: log.info("reading %s" % filename) frame = io.read_frame(filename) header = fits.getheader(filename, 0) frame_fibermap = frame.fibermap frame_starindices = np.where(frame_fibermap["OBJTYPE"] == "STD")[0] camera = safe_read_key(header, "CAMERA").strip().lower() if spectrograph is None: spectrograph = frame.spectrograph fibermap = frame_fibermap starindices = frame_starindices starfibers = fibermap["FIBER"][starindices] elif spectrograph != frame.spectrograph: log.error("incompatible spectrographs %d != %d" % (spectrograph, frame.spectrograph)) raise ValueError("incompatible spectrographs %d != %d" % (spectrograph, frame.spectrograph)) elif starindices.size != frame_starindices.size or np.sum( starindices != frame_starindices) > 0: log.error("incompatible fibermap") raise ValueError("incompatible fibermap") if frames.has_key(camera): log.error( "cannot handle for now several frame of same camera (%s)" % camera) raise ValueError( "cannot handle for now several frame of same camera (%s)" % camera) frames[camera] = frame for filename in args.skymodels: log.info("reading %s" % filename) sky = io.read_sky(filename) header = fits.getheader(filename, 0) camera = safe_read_key(header, "CAMERA").strip().lower() # NEED TO ADD MORE CHECKS if skies.has_key(camera): log.error("cannot handle several skymodels of same camera (%s)" % camera) raise ValueError( "cannot handle several skymodels of same camera (%s)" % camera) skies[camera] = sky for filename in args.fiberflats: log.info("reading %s" % filename) header = fits.getheader(filename, 0) flat = io.read_fiberflat(filename) camera = safe_read_key(header, "CAMERA").strip().lower() # NEED TO ADD MORE CHECKS if flats.has_key(camera): log.error("cannot handle several flats of same camera (%s)" % camera) raise ValueError( "cannot handle several flats of same camera (%s)" % camera) flats[camera] = flat if starindices.size == 0: log.error("no STD star found in fibermap") raise ValueError("no STD star found in fibermap") log.info("found %d STD stars" % starindices.size) imaging_filters = fibermap["FILTER"][starindices] imaging_mags = fibermap["MAG"][starindices] log.warning( "NO MAG ERRORS IN FIBERMAP, I AM IGNORING MEASUREMENT ERRORS !!") log.warning( "NO EXTINCTION VALUES IN FIBERMAP, I AM IGNORING THIS FOR NOW !!") # DIVIDE FLAT AND SUBTRACT SKY , TRIM DATA ############################################ for cam in frames: if not skies.has_key(cam): log.warning("Missing sky for %s" % cam) frames.pop(cam) continue if not flats.has_key(cam): log.warning("Missing flat for %s" % cam) frames.pop(cam) continue frames[cam].flux = frames[cam].flux[starindices] frames[cam].ivar = frames[cam].ivar[starindices] frames[cam].ivar *= (frames[cam].mask[starindices] == 0) frames[cam].ivar *= (skies[cam].ivar[starindices] != 0) frames[cam].ivar *= (skies[cam].mask[starindices] == 0) frames[cam].ivar *= (flats[cam].ivar[starindices] != 0) frames[cam].ivar *= (flats[cam].mask[starindices] == 0) frames[cam].flux *= (frames[cam].ivar > 0) # just for clean plots for star in range(frames[cam].flux.shape[0]): ok = np.where((frames[cam].ivar[star] > 0) & (flats[cam].fiberflat[star] != 0))[0] if ok.size > 0: frames[cam].flux[star] = frames[cam].flux[star] / flats[ cam].fiberflat[star] - skies[cam].flux[star] nstars = starindices.size starindices = None # we don't need this anymore # READ MODELS ############################################ log.info("reading star models in %s" % args.starmodels) stdwave, stdflux, templateid, teff, logg, feh = io.read_stdstar_templates( args.starmodels) # COMPUTE MAGS OF MODELS FOR EACH STD STAR MAG ############################################ model_filters = [] for tmp in np.unique(imaging_filters): if len(tmp) > 0: # can be one empty entry model_filters.append(tmp) log.info("computing model mags %s" % model_filters) model_mags = np.zeros((stdflux.shape[0], len(model_filters))) fluxunits = 1e-17 * units.erg / units.s / units.cm**2 / units.Angstrom for index in range(len(model_filters)): filter_response = load_filter(model_filters[index]) for m in range(stdflux.shape[0]): model_mags[m, index] = filter_response.get_ab_magnitude( stdflux[m] * fluxunits, stdwave) log.info("done computing model mags") # LOOP ON STARS TO FIND BEST MODEL ############################################ bestModelIndex = np.arange(nstars) templateID = np.arange(nstars) chi2dof = np.zeros((nstars)) redshift = np.zeros((nstars)) normflux = [] for star in range(nstars): log.info("finding best model for observed star #%d" % star) # np.array of wave,flux,ivar,resol wave = {} flux = {} ivar = {} resolution_data = {} for camera in frames: band = camera[0] wave[band] = frames[camera].wave flux[band] = frames[camera].flux[star] ivar[band] = frames[camera].ivar[star] resolution_data[band] = frames[camera].resolution_data[star] # preselec models based on magnitudes # compute star color index1, index2 = get_color_filter_indices(imaging_filters[star], args.color) if index1 < 0 or index2 < 0: log.error("cannot compute '%s' color from %s" % (color_name, filters)) filter1 = imaging_filters[star][index1] filter2 = imaging_filters[star][index2] star_color = imaging_mags[star][index1] - imaging_mags[star][index2] # compute models color model_index1 = -1 model_index2 = -1 for i, fname in enumerate(model_filters): if fname == filter1: model_index1 = i elif fname == filter2: model_index2 = i if model_index1 < 0 or model_index2 < 0: log.error("cannot compute '%s' model color from %s" % (color_name, filters)) model_colors = model_mags[:, model_index1] - model_mags[:, model_index2] # selection selection = np.where( np.abs(model_colors - star_color) < args.delta_color)[0] log.info( "star#%d fiber #%d, %s = %s-%s = %f, number of pre-selected models = %d/%d" % (star, starfibers[star], args.color, filter1, filter2, star_color, selection.size, stdflux.shape[0])) index_in_selection, redshift[star], chi2dof[star] = match_templates( wave, flux, ivar, resolution_data, stdwave, stdflux[selection], teff[selection], logg[selection], feh[selection], ncpu=args.ncpu, z_max=args.z_max, z_res=args.z_res) bestModelIndex[star] = selection[index_in_selection] log.info( 'Star Fiber: {0}; TemplateID: {1}; Redshift: {2}; Chisq/dof: {3}'. format(starfibers[star], bestModelIndex[star], redshift[star], chi2dof[star])) # Apply redshift to original spectrum at full resolution tmp = np.interp(stdwave, stdwave / (1 + redshift[star]), stdflux[bestModelIndex[star]]) # Normalize the best model using reported magnitude normalizedflux = normalize_templates(stdwave, tmp, imaging_mags[star], imaging_filters[star]) normflux.append(normalizedflux) # Now write the normalized flux for all best models to a file normflux = np.array(normflux) data = {} data['BESTMODEL'] = bestModelIndex data['TEMPLATEID'] = bestModelIndex # IS THAT IT? data['CHI2DOF'] = chi2dof data['REDSHIFT'] = redshift norm_model_file = args.outfile io.write_stdstar_models(args.outfile, normflux, stdwave, starfibers, data)
def main(args): log = get_logger() cmd = [ 'desi_compute_fluxcalibration', ] for key, value in args.__dict__.items(): if value is not None: cmd += ['--' + key, str(value)] cmd = ' '.join(cmd) log.info(cmd) log.info("read frame") # read frame frame = read_frame(args.infile) # Set fibermask flagged spectra to have 0 flux and variance frame = get_fiberbitmasked_frame(frame, bitmask='flux', ivar_framemask=True) log.info("apply fiberflat") # read fiberflat fiberflat = read_fiberflat(args.fiberflat) # apply fiberflat apply_fiberflat(frame, fiberflat) log.info("subtract sky") # read sky skymodel = read_sky(args.sky) # subtract sky subtract_sky(frame, skymodel) log.info("compute flux calibration") # read models model_flux, model_wave, model_fibers, model_metadata = read_stdstar_models( args.models) ok = np.ones(len(model_metadata), dtype=bool) if args.chi2cut > 0: log.info("apply cut CHI2DOF<{}".format(args.chi2cut)) good = (model_metadata["CHI2DOF"] < args.chi2cut) bad = ~good ok &= good if np.any(bad): log.info(" discard {} stars with CHI2DOF= {}".format( np.sum(bad), list(model_metadata["CHI2DOF"][bad]))) legacy_filters = ('G-R', 'R-Z') gaia_filters = ('GAIA-BP-RP', 'GAIA-G-RP') model_column_list = model_metadata.columns.names if args.color is None: if 'MODEL_G-R' in model_column_list: color = 'G-R' elif 'MODEL_GAIA-BP-RP' in model_column_list: log.info('Using Gaia filters') color = 'GAIA-BP-RP' else: log.error( "Can't find either G-R or BP-RP color in the model file.") sys.exit(15) else: if args.color not in legacy_filters and args.color not in gaia_filters: log.error( 'Color name {} is not allowed, must be one of {} {}'.format( args.color, legacy_filters, gaia_filters)) sys.exit(14) color = args.color if color not in model_column_list: # This should't happen log.error( 'The color {} was not computed in the models'.format(color)) sys.exit(16) if args.delta_color_cut > 0: log.info("apply cut |delta color|<{}".format(args.delta_color_cut)) good = (np.abs(model_metadata["MODEL_" + color] - model_metadata["DATA_" + color]) < args.delta_color_cut) bad = ok & (~good) ok &= good if np.any(bad): vals = model_metadata["MODEL_" + color][bad] - model_metadata["DATA_" + color][bad] log.info(" discard {} stars with dcolor= {}".format( np.sum(bad), list(vals))) if args.min_color is not None: log.info("apply cut DATA_{}>{}".format(color, args.min_color)) good = (model_metadata["DATA_{}".format(color)] > args.min_color) bad = ok & (~good) ok &= good if np.any(bad): vals = model_metadata["DATA_{}".format(color)][bad] log.info(" discard {} stars with {}= {}".format( np.sum(bad), color, list(vals))) if args.chi2cut_nsig > 0: # automatically reject stars that ar chi2 outliers mchi2 = np.median(model_metadata["CHI2DOF"]) rmschi2 = np.std(model_metadata["CHI2DOF"]) maxchi2 = mchi2 + args.chi2cut_nsig * rmschi2 log.info("apply cut CHI2DOF<{} based on chi2cut_nsig={}".format( maxchi2, args.chi2cut_nsig)) good = (model_metadata["CHI2DOF"] <= maxchi2) bad = ok & (~good) ok &= good if np.any(bad): log.info(" discard {} stars with CHI2DOF={}".format( np.sum(bad), list(model_metadata["CHI2DOF"][bad]))) ok = np.where(ok)[0] if ok.size == 0: log.error("selection cuts discarded all stars") sys.exit(12) nstars = model_flux.shape[0] nbad = nstars - ok.size if nbad > 0: log.warning("discarding %d star(s) out of %d because of cuts" % (nbad, nstars)) model_flux = model_flux[ok] model_fibers = model_fibers[ok] model_metadata = model_metadata[:][ok] # check that the model_fibers are actually standard stars fibermap = frame.fibermap ## check whether star fibers from args.models are consistent with fibers from fibermap ## if not print the OBJTYPE from fibermap for the fibers numbers in args.models and exit fibermap_std_indices = np.where(isStdStar(fibermap))[0] if np.any(~np.in1d(model_fibers % 500, fibermap_std_indices)): target_colnames, target_masks, survey = main_cmx_or_sv(fibermap) colname = target_colnames[0] for i in model_fibers % 500: log.error( "inconsistency with spectrum {}, OBJTYPE={}, {}={} in fibermap" .format(i, fibermap["OBJTYPE"][i], colname, fibermap[colname][i])) sys.exit(12) # Make sure the fibers of interest aren't entirely masked. if np.sum( np.sum(frame.ivar[model_fibers % 500, :] == 0, axis=1) == frame.nwave) == len(model_fibers): log.warning('All standard-star spectra are masked!') return fluxcalib = compute_flux_calibration( frame, model_wave, model_flux, model_fibers % 500, highest_throughput_nstars=args.highest_throughput, exposure_seeing_fwhm=args.seeing_fwhm) # QA if (args.qafile is not None): from desispec.io import write_qa_frame from desispec.io.qa import load_qa_frame from desispec.qa import qa_plots log.info("performing fluxcalib QA") # Load qaframe = load_qa_frame(args.qafile, frame_meta=frame.meta, flavor=frame.meta['FLAVOR']) # Run #import pdb; pdb.set_trace() qaframe.run_qa('FLUXCALIB', (frame, fluxcalib)) # Write if args.qafile is not None: write_qa_frame(args.qafile, qaframe) log.info("successfully wrote {:s}".format(args.qafile)) # Figure(s) if args.qafig is not None: qa_plots.frame_fluxcalib(args.qafig, qaframe, frame, fluxcalib) # record inputs frame.meta['IN_FRAME'] = shorten_filename(args.infile) frame.meta['IN_SKY'] = shorten_filename(args.sky) frame.meta['FIBERFLT'] = shorten_filename(args.fiberflat) frame.meta['STDMODEL'] = shorten_filename(args.models) # write result write_flux_calibration(args.outfile, fluxcalib, header=frame.meta) log.info("successfully wrote %s" % args.outfile)
def main(args): log = get_logger() log.info("read frame") # read frame frame = read_frame(args.infile) log.info("apply fiberflat") # read fiberflat fiberflat = read_fiberflat(args.fiberflat) # apply fiberflat apply_fiberflat(frame, fiberflat) log.info("subtract sky") # read sky skymodel = read_sky(args.sky) # subtract sky subtract_sky(frame, skymodel) log.info("compute flux calibration") # read models model_flux, model_wave, model_fibers, model_metadata = read_stdstar_models( args.models) if args.chi2cut > 0: ok = np.where(model_metadata["CHI2DOF"] < args.chi2cut)[0] if ok.size == 0: log.error("chi2cut has discarded all stars") sys.exit(12) nstars = model_flux.shape[0] nbad = nstars - ok.size if nbad > 0: log.warning("discarding %d star(s) out of %d because of chi2cut" % (nbad, nstars)) model_flux = model_flux[ok] model_fibers = model_fibers[ok] model_metadata = model_metadata[:][ok] if args.delta_color_cut > 0: ok = np.where( np.abs(model_metadata["MODEL_G-R"] - model_metadata["DATA_G-R"]) < args.delta_color_cut)[0] nstars = model_flux.shape[0] nbad = nstars - ok.size if nbad > 0: log.warning( "discarding %d star(s) out of %d because |delta_color|>%f" % (nbad, nstars, args.delta_color_cut)) model_flux = model_flux[ok] model_fibers = model_fibers[ok] model_metadata = model_metadata[:][ok] # automatically reject stars that ar chi2 outliers if args.chi2cut_nsig > 0: mchi2 = np.median(model_metadata["CHI2DOF"]) rmschi2 = np.std(model_metadata["CHI2DOF"]) maxchi2 = mchi2 + args.chi2cut_nsig * rmschi2 ok = np.where(model_metadata["CHI2DOF"] <= maxchi2)[0] nstars = model_flux.shape[0] nbad = nstars - ok.size if nbad > 0: log.warning( "discarding %d star(s) out of %d because reduced chi2 outliers (at %d sigma, giving rchi2<%f )" % (nbad, nstars, args.chi2cut_nsig, maxchi2)) model_flux = model_flux[ok] model_fibers = model_fibers[ok] model_metadata = model_metadata[:][ok] # check that the model_fibers are actually standard stars fibermap = frame.fibermap ## check whether star fibers from args.models are consistent with fibers from fibermap ## if not print the OBJTYPE from fibermap for the fibers numbers in args.models and exit w = np.where(fibermap["OBJTYPE"][model_fibers % 500] != 'STD')[0] if len(w) > 0: for i in model_fibers % 500: log.error( "inconsistency with spectrum %d, OBJTYPE='%s' in fibermap" % (i, fibermap["OBJTYPE"][i])) sys.exit(12) fluxcalib = compute_flux_calibration(frame, model_wave, model_flux, model_fibers % 500) # QA if (args.qafile is not None): log.info("performing fluxcalib QA") # Load qaframe = load_qa_frame(args.qafile, frame, flavor=frame.meta['FLAVOR']) # Run #import pdb; pdb.set_trace() qaframe.run_qa('FLUXCALIB', (frame, fluxcalib)) # Write if args.qafile is not None: write_qa_frame(args.qafile, qaframe) log.info("successfully wrote {:s}".format(args.qafile)) # Figure(s) if args.qafig is not None: qa_plots.frame_fluxcalib(args.qafig, qaframe, frame, fluxcalib) # write result write_flux_calibration(args.outfile, fluxcalib, header=frame.meta) log.info("successfully wrote %s" % args.outfile)
def main(args): log = get_logger() if (args.fiberflat is None) and (args.sky is None) and (args.calib is None): log.critical('no --fiberflat, --sky, or --calib; nothing to do ?!?') sys.exit(12) frame = read_frame(args.infile) #- Raw scores already added in extraction, but just in case they weren't #- it is harmless to rerun to make sure we have them. compute_and_append_frame_scores(frame,suffix="RAW") if args.cosmics_nsig>0 and args.sky==None : # Reject cosmics (otherwise do it after sky subtraction) log.info("cosmics ray 1D rejection") reject_cosmic_rays_1d(frame,args.cosmics_nsig) if args.fiberflat!=None : log.info("apply fiberflat") # read fiberflat fiberflat = read_fiberflat(args.fiberflat) # apply fiberflat to all fibers apply_fiberflat(frame, fiberflat) compute_and_append_frame_scores(frame,suffix="FFLAT") if args.sky!=None : # read sky skymodel=read_sky(args.sky) if args.cosmics_nsig>0 : # use a copy the frame (not elegant but robust) copied_frame = copy.deepcopy(frame) # first subtract sky without throughput correction subtract_sky(copied_frame, skymodel, apply_throughput_correction = False) # then find cosmics log.info("cosmics ray 1D rejection after sky subtraction") reject_cosmic_rays_1d(copied_frame,args.cosmics_nsig) # copy mask frame.mask = copied_frame.mask # and (re-)subtract sky, but just the correction term subtract_sky(frame, skymodel, apply_throughput_correction = (not args.no_sky_throughput_correction) ) else : # subtract sky subtract_sky(frame, skymodel, apply_throughput_correction = (not args.no_sky_throughput_correction) ) compute_and_append_frame_scores(frame,suffix="SKYSUB") if args.calib!=None : log.info("calibrate") # read calibration fluxcalib=read_flux_calibration(args.calib) # apply calibration apply_flux_calibration(frame, fluxcalib) # Ensure that ivars are set to 0 for all values if any designated # fibermask bit is set. Also flips a bits for each frame.mask value using specmask.BADFIBER frame = get_fiberbitmasked_frame(frame,bitmask="flux",ivar_framemask=True) compute_and_append_frame_scores(frame,suffix="CALIB") # save output write_frame(args.outfile, frame, units='10**-17 erg/(s cm2 Angstrom)') log.info("successfully wrote %s"%args.outfile)
def main(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--infile', type=str, default=None, required=True, help='path of DESI exposure frame fits file') parser.add_argument('--fiberflat', type=str, default=None, help='path of DESI fiberflat fits file') parser.add_argument('--sky', type=str, default=None, help='path of DESI sky fits file') parser.add_argument('--calib', type=str, default=None, help='path of DESI calibration fits file') parser.add_argument('--outfile', type=str, default=None, required=True, help='path of DESI sky fits file') # add calibration here when exists args = parser.parse_args() log = get_logger() if (args.fiberflat is None) and (args.sky is None) and (args.calib is None): log.critical('no --fiberflat, --sky, or --calib; nothing to do ?!?') sys.exit(12) frame = read_frame(args.infile) if args.fiberflat != None: log.info("apply fiberflat") # read fiberflat fiberflat = read_fiberflat(args.fiberflat) # apply fiberflat to sky fibers apply_fiberflat(frame, fiberflat) if args.sky != None: log.info("subtract sky") # read sky skymodel = read_sky(args.sky) # subtract sky subtract_sky(frame, skymodel) if args.calib != None: log.info("calibrate") # read calibration fluxcalib = read_flux_calibration(args.calib) # apply calibration apply_flux_calibration(frame, fluxcalib) # save output write_frame(args.outfile, frame) log.info("successfully wrote %s" % args.outfile)
def main(args): """ finds the best models of all standard stars in the frame and normlize the model flux. Output is written to a file and will be called for calibration. """ log = get_logger() log.info("mag delta %s = %f (for the pre-selection of stellar models)" % (args.color, args.delta_color)) frames = {} flats = {} skies = {} spectrograph = None starfibers = None starindices = None fibermap = None # READ DATA ############################################ for filename in args.frames: log.info("reading %s" % filename) frame = io.read_frame(filename) header = fits.getheader(filename, 0) frame_fibermap = frame.fibermap frame_starindices = np.where(frame_fibermap["OBJTYPE"] == "STD")[0] # check magnitude are well defined or discard stars tmp = [] for i in frame_starindices: mags = frame_fibermap["MAG"][i] ok = np.sum((mags > 0) & (mags < 30)) if np.sum((mags > 0) & (mags < 30)) == mags.size: tmp.append(i) frame_starindices = np.array(tmp).astype(int) camera = safe_read_key(header, "CAMERA").strip().lower() if spectrograph is None: spectrograph = frame.spectrograph fibermap = frame_fibermap starindices = frame_starindices starfibers = fibermap["FIBER"][starindices] elif spectrograph != frame.spectrograph: log.error("incompatible spectrographs %d != %d" % (spectrograph, frame.spectrograph)) raise ValueError("incompatible spectrographs %d != %d" % (spectrograph, frame.spectrograph)) elif starindices.size != frame_starindices.size or np.sum( starindices != frame_starindices) > 0: log.error("incompatible fibermap") raise ValueError("incompatible fibermap") if not camera in frames: frames[camera] = [] frames[camera].append(frame) for filename in args.skymodels: log.info("reading %s" % filename) sky = io.read_sky(filename) header = fits.getheader(filename, 0) camera = safe_read_key(header, "CAMERA").strip().lower() if not camera in skies: skies[camera] = [] skies[camera].append(sky) for filename in args.fiberflats: log.info("reading %s" % filename) header = fits.getheader(filename, 0) flat = io.read_fiberflat(filename) camera = safe_read_key(header, "CAMERA").strip().lower() # NEED TO ADD MORE CHECKS if camera in flats: log.warning( "cannot handle several flats of same camera (%s), will use only the first one" % camera) #raise ValueError("cannot handle several flats of same camera (%s)"%camera) else: flats[camera] = flat if starindices.size == 0: log.error("no STD star found in fibermap") raise ValueError("no STD star found in fibermap") log.info("found %d STD stars" % starindices.size) imaging_filters = fibermap["FILTER"][starindices] imaging_mags = fibermap["MAG"][starindices] log.warning( "NO MAG ERRORS IN FIBERMAP, I AM IGNORING MEASUREMENT ERRORS !!") ebv = np.zeros(starindices.size) if "SFD_EBV" in fibermap.columns.names: log.info("Using 'SFD_EBV' from fibermap") ebv = fibermap["SFD_EBV"][starindices] else: log.warning("NO EXTINCTION VALUES IN FIBERMAP!!") # DIVIDE FLAT AND SUBTRACT SKY , TRIM DATA ############################################ for cam in frames: if not cam in skies: log.warning("Missing sky for %s" % cam) frames.pop(cam) continue if not cam in flats: log.warning("Missing flat for %s" % cam) frames.pop(cam) continue flat = flats[cam] for frame, sky in zip(frames[cam], skies[cam]): frame.flux = frame.flux[starindices] frame.ivar = frame.ivar[starindices] frame.ivar *= (frame.mask[starindices] == 0) frame.ivar *= (sky.ivar[starindices] != 0) frame.ivar *= (sky.mask[starindices] == 0) frame.ivar *= (flat.ivar[starindices] != 0) frame.ivar *= (flat.mask[starindices] == 0) frame.flux *= (frame.ivar > 0) # just for clean plots for star in range(frame.flux.shape[0]): ok = np.where((frame.ivar[star] > 0) & (flat.fiberflat[star] != 0))[0] if ok.size > 0: frame.flux[star] = frame.flux[star] / flat.fiberflat[ star] - sky.flux[star] frame.resolution_data = frame.resolution_data[starindices] nstars = starindices.size starindices = None # we don't need this anymore # READ MODELS ############################################ log.info("reading star models in %s" % args.starmodels) stdwave, stdflux, templateid, teff, logg, feh = io.read_stdstar_templates( args.starmodels) # COMPUTE MAGS OF MODELS FOR EACH STD STAR MAG ############################################ model_filters = [] for tmp in np.unique(imaging_filters): if len(tmp) > 0: # can be one empty entry model_filters.append(tmp) log.info("computing model mags %s" % model_filters) model_mags = np.zeros((stdflux.shape[0], len(model_filters))) fluxunits = 1e-17 * units.erg / units.s / units.cm**2 / units.Angstrom for index in range(len(model_filters)): if model_filters[index].startswith('WISE'): log.warning('not computing stdstar {} mags'.format( model_filters[index])) continue filter_response = load_filter(model_filters[index]) for m in range(stdflux.shape[0]): model_mags[m, index] = filter_response.get_ab_magnitude( stdflux[m] * fluxunits, stdwave) log.info("done computing model mags") mean_extinction_delta_mags = None mean_ebv = np.mean(ebv) if mean_ebv > 0: log.info( "Compute a mean delta_color from average E(B-V) = %3.2f based on canonial model star" % mean_ebv) # compute a mean delta_color from mean_ebv based on canonial model star ####################################################################### # will then use this color offset in the model pre-selection # find canonical f-type model: Teff=6000, logg=4, Fe/H=-1.5 canonical_model = np.argmin((teff - 6000.0)**2 + (logg - 4.0)**2 + (feh + 1.5)**2) canonical_model_mags_without_extinction = model_mags[canonical_model] canonical_model_mags_with_extinction = np.zeros( canonical_model_mags_without_extinction.shape) canonical_model_reddened_flux = stdflux[ canonical_model] * dust_transmission(stdwave, mean_ebv) for index in range(len(model_filters)): if model_filters[index].startswith('WISE'): log.warning('not computing stdstar {} mags'.format( model_filters[index])) continue filter_response = load_filter(model_filters[index]) canonical_model_mags_with_extinction[ index] = filter_response.get_ab_magnitude( canonical_model_reddened_flux * fluxunits, stdwave) mean_extinction_delta_mags = canonical_model_mags_with_extinction - canonical_model_mags_without_extinction # LOOP ON STARS TO FIND BEST MODEL ############################################ linear_coefficients = np.zeros((nstars, stdflux.shape[0])) chi2dof = np.zeros((nstars)) redshift = np.zeros((nstars)) normflux = [] star_colors_array = np.zeros((nstars)) model_colors_array = np.zeros((nstars)) for star in range(nstars): log.info("finding best model for observed star #%d" % star) # np.array of wave,flux,ivar,resol wave = {} flux = {} ivar = {} resolution_data = {} for camera in frames: for i, frame in enumerate(frames[camera]): identifier = "%s-%d" % (camera, i) wave[identifier] = frame.wave flux[identifier] = frame.flux[star] ivar[identifier] = frame.ivar[star] resolution_data[identifier] = frame.resolution_data[star] # preselec models based on magnitudes # compute star color index1, index2 = get_color_filter_indices(imaging_filters[star], args.color) if index1 < 0 or index2 < 0: log.error("cannot compute '%s' color from %s" % (color_name, filters)) filter1 = imaging_filters[star][index1] filter2 = imaging_filters[star][index2] star_color = imaging_mags[star][index1] - imaging_mags[star][index2] star_colors_array[star] = star_color # compute models color model_index1 = -1 model_index2 = -1 for i, fname in enumerate(model_filters): if fname == filter1: model_index1 = i elif fname == filter2: model_index2 = i if model_index1 < 0 or model_index2 < 0: log.error("cannot compute '%s' model color from %s" % (color_name, filters)) model_colors = model_mags[:, model_index1] - model_mags[:, model_index2] # apply extinction here # use the colors derived from the cannonical model with the mean ebv of the stars # and simply apply a scaling factor based on the ebv of this star # this is sufficiently precise for the broad model pre-selection we are doing here # the exact reddening of the star to each pre-selected model is # apply afterwards if mean_extinction_delta_mags is not None and mean_ebv != 0: delta_color = (mean_extinction_delta_mags[model_index1] - mean_extinction_delta_mags[model_index2] ) * ebv[star] / mean_ebv model_colors += delta_color log.info( "Apply a %s-%s color offset = %4.3f to the models for star with E(B-V)=%4.3f" % (model_filters[model_index1], model_filters[model_index2], delta_color, ebv[star])) # selection selection = np.abs(model_colors - star_color) < args.delta_color # smallest cube in parameter space including this selection (needed for interpolation) new_selection = (teff >= np.min(teff[selection])) & (teff <= np.max( teff[selection])) new_selection &= (logg >= np.min(logg[selection])) & (logg <= np.max( logg[selection])) new_selection &= (feh >= np.min(feh[selection])) & (feh <= np.max( feh[selection])) selection = np.where(new_selection)[0] log.info( "star#%d fiber #%d, %s = %s-%s = %f, number of pre-selected models = %d/%d" % (star, starfibers[star], args.color, filter1, filter2, star_color, selection.size, stdflux.shape[0])) # apply extinction to selected_models dust_transmission_of_this_star = dust_transmission(stdwave, ebv[star]) selected_reddened_stdflux = stdflux[ selection] * dust_transmission_of_this_star coefficients, redshift[star], chi2dof[star] = match_templates( wave, flux, ivar, resolution_data, stdwave, selected_reddened_stdflux, teff[selection], logg[selection], feh[selection], ncpu=args.ncpu, z_max=args.z_max, z_res=args.z_res, template_error=args.template_error) linear_coefficients[star, selection] = coefficients log.info( 'Star Fiber: {0}; TEFF: {1}; LOGG: {2}; FEH: {3}; Redshift: {4}; Chisq/dof: {5}' .format(starfibers[star], np.inner(teff, linear_coefficients[star]), np.inner(logg, linear_coefficients[star]), np.inner(feh, linear_coefficients[star]), redshift[star], chi2dof[star])) # Apply redshift to original spectrum at full resolution model = np.zeros(stdwave.size) for i, c in enumerate(linear_coefficients[star]): if c != 0: model += c * np.interp(stdwave, stdwave * (1 + redshift[star]), stdflux[i]) # Apply dust extinction model *= dust_transmission_of_this_star # Compute final model color mag1 = load_filter(model_filters[model_index1]).get_ab_magnitude( model * fluxunits, stdwave) mag2 = load_filter(model_filters[model_index2]).get_ab_magnitude( model * fluxunits, stdwave) model_colors_array[star] = mag1 - mag2 # Normalize the best model using reported magnitude normalizedflux = normalize_templates(stdwave, model, imaging_mags[star], imaging_filters[star]) normflux.append(normalizedflux) # Now write the normalized flux for all best models to a file normflux = np.array(normflux) data = {} data['LOGG'] = linear_coefficients.dot(logg) data['TEFF'] = linear_coefficients.dot(teff) data['FEH'] = linear_coefficients.dot(feh) data['CHI2DOF'] = chi2dof data['REDSHIFT'] = redshift data['COEFF'] = linear_coefficients data['DATA_%s' % args.color] = star_colors_array data['MODEL_%s' % args.color] = model_colors_array norm_model_file = args.outfile io.write_stdstar_models(args.outfile, normflux, stdwave, starfibers, data)
def main(args): log = get_logger() # precompute convolution kernels kernels = compute_crosstalk_kernels() A = None B = None out_wave = None dfiber = np.array([-2, -1, 1, 2]) #dfiber=np.array([-1,1]) npar = dfiber.size with_cst = True # to marginalize over residual background (should not change much) if with_cst: npar += 1 # one measurement per fiber bundle nfiber_per_bundle = 25 nbundles = 500 // nfiber_per_bundle xtalks = [] previous_psf_filename = None previous_fiberflat_filename = None for filename in args.infile: # read a frame and fiber the sky fibers frame = read_frame(filename) if out_wave is None: dwave = (frame.wave[-1] - frame.wave[0]) / 40 out_wave = np.linspace(frame.wave[0] + dwave / 2, frame.wave[-1] - dwave / 2, 40) # find fiberflat if "FIBERFLT" in frame.meta.keys(): flatname = frame.meta["FIBERFLT"] if flatname.find("SPCALIB") >= 0: flatname = flatname.replace( "SPCALIB", os.environ["DESI_SPECTRO_CALIB"] + "/") if flatname.find("SPECPROD") >= 0: # this one is harder :-( dirname = os.path.dirname( os.path.dirname(os.path.dirname( os.path.dirname(filename)))) flatname = flatname.replace("SPECPROD", dirname + "/") else: flatname = findcalibfile([ frame.meta, ], "FIBERFLAT") if flatname is not None: if previous_fiberflat_filename is not None and previous_fiberflat_filename == flatname: log.info("Using same fiberflat") else: if not os.path.isfile(flatname): log.error("Cannot open fiberflat file {}".format(flatname)) raise IOError( "Cannot open fiberflat file {}".format(flatname)) log.info("Using fiberflat {}".format(flatname)) fiberflat = read_fiberflat(flatname) medflat = np.median(fiberflat.fiberflat, axis=1) previous_fiberflat_filename = flatname else: medflat = None log.warning("No fiberflat") skyfibers = np.where((frame.fibermap["OBJTYPE"] == "SKY") & (frame.fibermap["FIBERSTATUS"] == 0))[0] log.info("{} sky fibers in {}".format(skyfibers.size, filename)) frame.ivar *= ( (frame.mask == 0) | (frame.mask == specmask.BADFIBER) ) # ignore BADFIBER which is a statement on the positioning # also open trace set to determine the shift # to apply to adjacent spectra psf_filename = findcalibfile([ frame.meta, ], "PSF") # only reread if necessary if previous_psf_filename is None or previous_psf_filename != psf_filename: tset = read_xytraceset(psf_filename) previous_psf_filename = psf_filename # will use this y central_y = tset.npix_y // 2 mwave = np.mean(frame.wave) if A is None: A = np.zeros((nbundles, npar, npar, out_wave.size)) B = np.zeros((nbundles, npar, out_wave.size)) fA = np.zeros((npar, npar, out_wave.size)) fB = np.zeros((npar, out_wave.size)) ninput = np.zeros((nbundles, dfiber.size)) for skyfiber in skyfibers: cflux = np.zeros((dfiber.size, out_wave.size)) skyfiberbundle = skyfiber // nfiber_per_bundle nbad = np.sum(frame.ivar[skyfiber] == 0) if nbad > 200: if nbad < 2000: log.warning( "ignore skyfiber {} from {} with {} masked pixel". format(skyfiber, filename, nbad)) continue skyfiber_central_wave = tset.wave_vs_y(skyfiber, central_y) should_consider = False must_exclude = False fA *= 0. fB *= 0. use_median_filter = False # not needed median_filter_width = 30 skyfiberflux, skyfiberivar = resample_flux(out_wave, frame.wave, frame.flux[skyfiber], frame.ivar[skyfiber]) if medflat is not None: skyfiberflux *= medflat[ skyfiber] # apply relative transmission of fiber, i.e. undo the fiberflat correction if use_median_filter: good = (skyfiberivar > 0) skyfiberflux = np.interp(out_wave, out_wave[good], skyfiberflux[good]) skyfiberflux = scipy.ndimage.filters.median_filter( skyfiberflux, median_filter_width, mode='constant') for i, df in enumerate(dfiber): otherfiber = df + skyfiber if otherfiber < 0: continue if otherfiber >= frame.nspec: continue if otherfiber // nfiber_per_bundle != skyfiberbundle: continue # not same bundle snr = np.sqrt(frame.ivar[otherfiber]) * frame.flux[otherfiber] medsnr = np.median(snr) if medsnr > 2: # need good SNR to model cross talk should_consider = True # in which case we need all of the contaminants to the sky fiber ... nbad = np.sum(snr == 0) if nbad > 200: if nbad < 2000: log.warning( "ignore fiber {} from {} with {} masked pixel". format(otherfiber, filename, nbad)) must_exclude = True # because 1 bad fiber break if np.any(snr > 1000.): log.error( "signal to noise is suspiciously too high in fiber {} from {}" .format(otherfiber, filename)) must_exclude = True # because 1 bad fiber break # interpolate over masked pixels or low snr pixels and shift medivar = np.median(frame.ivar[otherfiber]) good = (frame.ivar[otherfiber] > 0.01 * medivar ) # interpolate over brigh sky lines # account for change of wavelength for same y coordinate otherfiber_central_wave = tset.wave_vs_y(otherfiber, central_y) flux = np.interp( frame.wave + (otherfiber_central_wave - skyfiber_central_wave), frame.wave[good], frame.flux[otherfiber][good]) if medflat is not None: flux *= medflat[ otherfiber] # apply relative transmission of fiber, i.e. undo the fiberflat correction if use_median_filter: flux = scipy.ndimage.filters.median_filter( flux, median_filter_width, mode='constant') kern = kernels[np.abs(df)] tmp = fftconvolve(flux, kern, mode="same") cflux[i] = resample_flux(out_wave, frame.wave, tmp) fB[i] = skyfiberivar * cflux[i] * skyfiberflux for j in range(i + 1): fA[i, j] = skyfiberivar * cflux[i] * cflux[j] if should_consider and (not must_exclude): scflux = np.sum(cflux, axis=0) mscflux = np.sum(skyfiberivar * scflux) / np.sum(skyfiberivar) if mscflux < 100: continue if with_cst: i = dfiber.size fA[i, i] = skyfiberivar # constant term fB[i] = skyfiberivar * skyfiberflux for j in range(i): fA[i, j] = skyfiberivar * cflux[j] # just stack all wavelength to get 1 number for this fiber scflux = np.sum(cflux[np.abs(dfiber) == 1], axis=0) a = np.sum(skyfiberivar * scflux**2) b = np.sum(skyfiberivar * scflux * skyfiberflux) xtalk = b / a err = 1. / np.sqrt(a) msky = np.sum( skyfiberivar * skyfiberflux) / np.sum(skyfiberivar) ra = frame.fibermap["TARGET_RA"][skyfiber] dec = frame.fibermap["TARGET_DEC"][skyfiber] if np.abs(xtalk) > 0.02 and np.abs(xtalk) / err > 5: log.warning( "discard skyfiber = {}, xtalk = {:4.3f} +- {:4.3f}, ra = {:5.4f} , dec = {:5.4f}, sky fiber flux= {:4.3f}, cont= {:4.3f}" .format(skyfiber, xtalk, err, ra, dec, msky, mscflux)) continue if err < 0.01 / 5.: xtalks.append(xtalk) for i in range(dfiber.size): ninput[skyfiberbundle, i] += int(np.sum(fB[i]) != 0) # to monitor B[skyfiberbundle] += fB A[skyfiberbundle] += fA for bundle in range(nbundles): for i in range(npar): for j in range(i): A[bundle, j, i] = A[bundle, i, j] # now solve crosstalk = np.zeros((nbundles, dfiber.size, out_wave.size)) crosstalk_ivar = np.zeros((nbundles, dfiber.size, out_wave.size)) for bundle in range(nbundles): for j in range(out_wave.size): try: Ai = np.linalg.inv(A[bundle, :, :, j]) if with_cst: crosstalk[bundle, :, j] = Ai.dot( B[bundle, :, j])[:-1] # last coefficient is constant crosstalk_ivar[bundle, :, j] = 1. / np.diag(Ai)[:-1] else: crosstalk[bundle, :, j] = Ai.dot(B[bundle, :, j]) crosstalk_ivar[bundle, :, j] = 1. / np.diag(Ai) except np.linalg.LinAlgError as e: pass table = Table() table["WAVELENGTH"] = out_wave for bundle in range(nbundles): for i, df in enumerate(dfiber): key = "CROSSTALK-B{:02d}-F{:+d}".format(bundle, df) table[key] = crosstalk[bundle, i] key = "CROSSTALKIVAR-B{:02d}-F{:+d}".format(bundle, df) table[key] = crosstalk_ivar[bundle, i] key = "NINPUT-B{:02d}-F{:+d}".format(bundle, df) table[key] = np.repeat(ninput[bundle, i], out_wave.size) table.write(args.outfile, overwrite=True) log.info("wrote {}".format(args.outfile)) log.info("number of sky fibers used per bundle:") for bundle in range(nbundles): log.info("bundle {}: {}".format(bundle, ninput[bundle])) if args.plot: for bundle in range(nbundles): for i, df in enumerate(dfiber): err = 1. / np.sqrt(crosstalk_ivar[bundle, i] + (crosstalk_ivar[bundle, i] == 0)) plt.errorbar(wave, crosstalk[bundle, i], err, fmt="o-", label="bundle = {:02d} dfiber = {:+d}".format( bundle, df)) plt.grid() plt.legend() plt.show()
def main(args, comm=None): """ finds the best models of all standard stars in the frame and normlize the model flux. Output is written to a file and will be called for calibration. """ log = get_logger() log.info("mag delta %s = %f (for the pre-selection of stellar models)" % (args.color, args.delta_color)) if args.mpi or comm is not None: from mpi4py import MPI if comm is None: comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() if rank == 0: log.info('mpi parallelizing with {} ranks'.format(size)) else: comm = None rank = 0 size = 1 # disable multiprocess by forcing ncpu = 1 when using MPI if comm is not None: ncpu = 1 if rank == 0: log.info('disabling multiprocess (forcing ncpu = 1)') else: ncpu = args.ncpu if ncpu > 1: if rank == 0: log.info( 'multiprocess parallelizing with {} processes'.format(ncpu)) if args.ignore_gpu and desispec.fluxcalibration.use_gpu: # Opt-out of GPU usage desispec.fluxcalibration.use_gpu = False if rank == 0: log.info('ignoring GPU') elif desispec.fluxcalibration.use_gpu: # Nothing to do here, GPU is used by default if available if rank == 0: log.info('using GPU') else: if rank == 0: log.info('GPU not available') std_targetids = None if args.std_targetids is not None: std_targetids = args.std_targetids # READ DATA ############################################ # First loop through and group by exposure and spectrograph frames_by_expid = {} rows = list() for filename in args.frames: log.info("reading %s" % filename) frame = io.read_frame(filename) night = safe_read_key(frame.meta, "NIGHT") expid = safe_read_key(frame.meta, "EXPID") camera = safe_read_key(frame.meta, "CAMERA").strip().lower() rows.append((night, expid, camera)) spec = camera[1] uniq_key = (expid, spec) if uniq_key in frames_by_expid.keys(): frames_by_expid[uniq_key][camera] = frame else: frames_by_expid[uniq_key] = {camera: frame} input_frames_table = Table(rows=rows, names=('NIGHT', 'EXPID', 'TILEID')) frames = {} flats = {} skies = {} spectrograph = None starfibers = None starindices = None fibermap = None # For each unique expid,spec pair, get the logical OR of the FIBERSTATUS for all # cameras and then proceed with extracting the frame information # once we modify the fibermap FIBERSTATUS for (expid, spec), camdict in frames_by_expid.items(): fiberstatus = None for frame in camdict.values(): if fiberstatus is None: fiberstatus = frame.fibermap['FIBERSTATUS'].data.copy() else: fiberstatus |= frame.fibermap['FIBERSTATUS'] for camera, frame in camdict.items(): frame.fibermap['FIBERSTATUS'] |= fiberstatus # Set fibermask flagged spectra to have 0 flux and variance frame = get_fiberbitmasked_frame(frame, bitmask='stdstars', ivar_framemask=True) frame_fibermap = frame.fibermap if std_targetids is None: frame_starindices = np.where(isStdStar(frame_fibermap))[0] else: frame_starindices = np.nonzero( np.isin(frame_fibermap['TARGETID'], std_targetids))[0] #- Confirm that all fluxes have entries but trust targeting bits #- to get basic magnitude range correct keep_legacy = np.ones(len(frame_starindices), dtype=bool) for colname in ['FLUX_G', 'FLUX_R', 'FLUX_Z']: #- and W1 and W2? keep_legacy &= frame_fibermap[colname][ frame_starindices] > 10**((22.5 - 30) / 2.5) keep_legacy &= frame_fibermap[colname][ frame_starindices] < 10**((22.5 - 0) / 2.5) keep_gaia = np.ones(len(frame_starindices), dtype=bool) for colname in ['G', 'BP', 'RP']: #- and W1 and W2? keep_gaia &= frame_fibermap[ 'GAIA_PHOT_' + colname + '_MEAN_MAG'][frame_starindices] > 10 keep_gaia &= frame_fibermap[ 'GAIA_PHOT_' + colname + '_MEAN_MAG'][frame_starindices] < 20 n_legacy_std = keep_legacy.sum() n_gaia_std = keep_gaia.sum() keep = keep_legacy | keep_gaia # accept both types of standards for the time being # keep the indices for gaia/legacy subsets gaia_indices = keep_gaia[keep] legacy_indices = keep_legacy[keep] frame_starindices = frame_starindices[keep] if spectrograph is None: spectrograph = frame.spectrograph fibermap = frame_fibermap starindices = frame_starindices starfibers = fibermap["FIBER"][starindices] elif spectrograph != frame.spectrograph: log.error("incompatible spectrographs {} != {}".format( spectrograph, frame.spectrograph)) raise ValueError("incompatible spectrographs {} != {}".format( spectrograph, frame.spectrograph)) elif starindices.size != frame_starindices.size or np.sum( starindices != frame_starindices) > 0: log.error("incompatible fibermap") raise ValueError("incompatible fibermap") if not camera in frames: frames[camera] = [] frames[camera].append(frame) # possibly cleanup memory del frames_by_expid for filename in args.skymodels: log.info("reading %s" % filename) sky = io.read_sky(filename) camera = safe_read_key(sky.header, "CAMERA").strip().lower() if not camera in skies: skies[camera] = [] skies[camera].append(sky) for filename in args.fiberflats: log.info("reading %s" % filename) flat = io.read_fiberflat(filename) camera = safe_read_key(flat.header, "CAMERA").strip().lower() # NEED TO ADD MORE CHECKS if camera in flats: log.warning( "cannot handle several flats of same camera (%s), will use only the first one" % camera) #raise ValueError("cannot handle several flats of same camera (%s)"%camera) else: flats[camera] = flat # if color is not specified we decide on the fly color = args.color if color is not None: if color[:4] == 'GAIA': legacy_color = False gaia_color = True else: legacy_color = True gaia_color = False if n_legacy_std == 0 and legacy_color: raise Exception( 'Specified Legacy survey color, but no legacy standards') if n_gaia_std == 0 and gaia_color: raise Exception('Specified gaia color, but no gaia stds') if starindices.size == 0: log.error("no STD star found in fibermap") raise ValueError("no STD star found in fibermap") log.info("found %d STD stars" % starindices.size) if n_legacy_std == 0: gaia_std = True if color is None: color = 'GAIA-BP-RP' else: gaia_std = False if color is None: color = 'G-R' if n_gaia_std > 0: log.info('Gaia standards found but not used') if gaia_std: # The name of the reference filter to which we normalize the flux ref_mag_name = 'GAIA-G' color_band1, color_band2 = ['GAIA-' + _ for _ in color[5:].split('-')] log.info( "Using Gaia standards with color {} and normalizing to {}".format( color, ref_mag_name)) # select appropriate subset of standards starindices = starindices[gaia_indices] starfibers = starfibers[gaia_indices] else: ref_mag_name = 'R' color_band1, color_band2 = color.split('-') log.info("Using Legacy standards with color {} and normalizing to {}". format(color, ref_mag_name)) # select appropriate subset of standards starindices = starindices[legacy_indices] starfibers = starfibers[legacy_indices] # excessive check but just in case if not color in ['G-R', 'R-Z', 'GAIA-BP-RP', 'GAIA-G-RP']: raise ValueError('Unknown color {}'.format(color)) # log.warning("Not using flux errors for Standard Star fits!") # DIVIDE FLAT AND SUBTRACT SKY , TRIM DATA ############################################ # since poping dict, we need to copy keys to iterate over to avoid # RuntimeError due to changing dict frame_cams = list(frames.keys()) for cam in frame_cams: if not cam in skies: log.warning("Missing sky for %s" % cam) frames.pop(cam) continue if not cam in flats: log.warning("Missing flat for %s" % cam) frames.pop(cam) continue flat = flats[cam] for frame, sky in zip(frames[cam], skies[cam]): frame.flux = frame.flux[starindices] frame.ivar = frame.ivar[starindices] frame.ivar *= (frame.mask[starindices] == 0) frame.ivar *= (sky.ivar[starindices] != 0) frame.ivar *= (sky.mask[starindices] == 0) frame.ivar *= (flat.ivar[starindices] != 0) frame.ivar *= (flat.mask[starindices] == 0) frame.flux *= (frame.ivar > 0) # just for clean plots for star in range(frame.flux.shape[0]): ok = np.where((frame.ivar[star] > 0) & (flat.fiberflat[star] != 0))[0] if ok.size > 0: frame.flux[star] = frame.flux[star] / flat.fiberflat[ star] - sky.flux[star] frame.resolution_data = frame.resolution_data[starindices] nframes = len(frames[cam]) if nframes > 1: # optimal weights for the coaddition = ivar*throughput, not directly ivar, # we estimate the relative throughput with median fluxes at this stage medflux = np.zeros(nframes) for i, frame in enumerate(frames[cam]): if np.sum(frame.ivar > 0) == 0: log.error( "ivar=0 for all std star spectra in frame {}-{:08d}". format(cam, frame.meta["EXPID"])) else: medflux[i] = np.median(frame.flux[frame.ivar > 0]) log.debug("medflux = {}".format(medflux)) medflux *= (medflux > 0) if np.sum(medflux > 0) == 0: log.error( "mean median flux = 0, for all stars in fibers {}".format( list(frames[cam][0].fibermap["FIBER"][starindices]))) sys.exit(12) mmedflux = np.mean(medflux[medflux > 0]) weights = medflux / mmedflux log.info("coadding {} exposures in cam {}, w={}".format( nframes, cam, weights)) sw = np.zeros(frames[cam][0].flux.shape) swf = np.zeros(frames[cam][0].flux.shape) swr = np.zeros(frames[cam][0].resolution_data.shape) for i, frame in enumerate(frames[cam]): sw += weights[i] * frame.ivar swf += weights[i] * frame.ivar * frame.flux swr += weights[i] * frame.ivar[:, None, :] * frame.resolution_data coadded_frame = frames[cam][0] coadded_frame.ivar = sw coadded_frame.flux = swf / (sw + (sw == 0)) coadded_frame.resolution_data = swr / ((sw + (sw == 0))[:, None, :]) frames[cam] = [coadded_frame] # CHECK S/N ############################################ # for each band in 'brz', record quadratic sum of median S/N across wavelength snr = dict() for band in ['b', 'r', 'z']: snr[band] = np.zeros(starindices.size) for cam in frames: band = cam[0].lower() for frame in frames[cam]: msnr = np.median(frame.flux * np.sqrt(frame.ivar) / np.sqrt(np.gradient(frame.wave)), axis=1) # median SNR per sqrt(A.) msnr *= (msnr > 0) snr[band] = np.sqrt(snr[band]**2 + msnr**2) log.info("SNR(B) = {}".format(snr['b'])) ############################### max_number_of_stars = 50 min_blue_snr = 4. ############################### indices = np.argsort(snr['b'])[::-1][:max_number_of_stars] validstars = np.where(snr['b'][indices] > min_blue_snr)[0] #- TODO: later we filter on models based upon color, thus throwing #- away very blue stars for which we don't have good models. log.info("Number of stars with median stacked blue S/N > {} /sqrt(A) = {}". format(min_blue_snr, validstars.size)) if validstars.size == 0: log.error("No valid star") sys.exit(12) validstars = indices[validstars] for band in ['b', 'r', 'z']: snr[band] = snr[band][validstars] log.info("BLUE SNR of selected stars={}".format(snr['b'])) for cam in frames: for frame in frames[cam]: frame.flux = frame.flux[validstars] frame.ivar = frame.ivar[validstars] frame.resolution_data = frame.resolution_data[validstars] starindices = starindices[validstars] starfibers = starfibers[validstars] nstars = starindices.size fibermap = Table(fibermap[starindices]) # MASK OUT THROUGHPUT DIP REGION ############################################ mask_throughput_dip_region = True if mask_throughput_dip_region: wmin = 4300. wmax = 4500. log.warning( "Masking out the wavelength region [{},{}]A in the standard star fit" .format(wmin, wmax)) for cam in frames: for frame in frames[cam]: ii = np.where((frame.wave >= wmin) & (frame.wave <= wmax))[0] if ii.size > 0: frame.ivar[:, ii] = 0 # READ MODELS ############################################ log.info("reading star models in %s" % args.starmodels) stdwave, stdflux, templateid, teff, logg, feh = io.read_stdstar_templates( args.starmodels) # COMPUTE MAGS OF MODELS FOR EACH STD STAR MAG ############################################ #- Support older fibermaps if 'PHOTSYS' not in fibermap.colnames: log.warning('Old fibermap format; using defaults for missing columns') log.warning(" PHOTSYS = 'S'") log.warning(" EBV = 0.0") fibermap['PHOTSYS'] = 'S' fibermap['EBV'] = 0.0 if not np.in1d(np.unique(fibermap['PHOTSYS']), ['', 'N', 'S', 'G']).all(): log.error('Unknown PHOTSYS found') raise Exception('Unknown PHOTSYS found') # Fetching Filter curves model_filters = dict() for band in ["G", "R", "Z"]: for photsys in np.unique(fibermap['PHOTSYS']): if photsys in ['N', 'S']: model_filters[band + photsys] = load_legacy_survey_filter( band=band, photsys=photsys) if len(model_filters) == 0: log.info('No Legacy survey photometry identified in fibermap') # I will always load gaia data even if we are fitting LS standards only for band in ["G", "BP", "RP"]: model_filters["GAIA-" + band] = load_gaia_filter(band=band, dr=2) # Compute model mags on rank 0 and bcast result to other ranks # This sidesteps an OOM event on Cori Haswell with "-c 2" model_mags = None if rank == 0: log.info("computing model mags for %s" % sorted(model_filters.keys())) model_mags = dict() for filter_name in model_filters.keys(): model_mags[filter_name] = get_magnitude(stdwave, stdflux, model_filters, filter_name) log.info("done computing model mags") if comm is not None: model_mags = comm.bcast(model_mags, root=0) # LOOP ON STARS TO FIND BEST MODEL ############################################ star_mags = dict() star_unextincted_mags = dict() if gaia_std and (fibermap['EBV'] == 0).all(): log.info("Using E(B-V) from SFD rather than FIBERMAP") # when doing gaia standards, on old tiles the # EBV is not set so we fetch from SFD (in original SFD scaling) ebv = SFDMap(scaling=1).ebv( acoo.SkyCoord(ra=fibermap['TARGET_RA'] * units.deg, dec=fibermap['TARGET_DEC'] * units.deg)) else: ebv = fibermap['EBV'] photometric_systems = np.unique(fibermap['PHOTSYS']) if not gaia_std: for band in ['G', 'R', 'Z']: star_mags[band] = 22.5 - 2.5 * np.log10(fibermap['FLUX_' + band]) star_unextincted_mags[band] = np.zeros(star_mags[band].shape) for photsys in photometric_systems: r_band = extinction_total_to_selective_ratio( band, photsys) # dimensionless # r_band = a_band / E(B-V) # E(B-V) is a difference of magnitudes (dimensionless) # a_band = -2.5*log10(effective dust transmission) , dimensionless # effective dust transmission = # integral( SED(lambda) * filter_transmission(lambda,band) * dust_transmission(lambda,E(B-V)) dlamdba) # / integral( SED(lambda) * filter_transmission(lambda,band) dlamdba) selection = (fibermap['PHOTSYS'] == photsys) a_band = r_band * ebv[selection] # dimensionless star_unextincted_mags[band][selection] = 22.5 - 2.5 * np.log10( fibermap['FLUX_' + band][selection]) - a_band for band in ['G', 'BP', 'RP']: star_mags['GAIA-' + band] = fibermap['GAIA_PHOT_' + band + '_MEAN_MAG'] for band, extval in gaia_extinction(star_mags['GAIA-G'], star_mags['GAIA-BP'], star_mags['GAIA-RP'], ebv).items(): star_unextincted_mags['GAIA-' + band] = star_mags['GAIA-' + band] - extval star_colors = dict() star_unextincted_colors = dict() # compute the colors and define the unextincted colors # the unextincted colors are filled later if not gaia_std: for c1, c2 in ['GR', 'RZ']: star_colors[c1 + '-' + c2] = star_mags[c1] - star_mags[c2] star_unextincted_colors[c1 + '-' + c2] = (star_unextincted_mags[c1] - star_unextincted_mags[c2]) for c1, c2 in [('BP', 'RP'), ('G', 'RP')]: star_colors['GAIA-' + c1 + '-' + c2] = (star_mags['GAIA-' + c1] - star_mags['GAIA-' + c2]) star_unextincted_colors['GAIA-' + c1 + '-' + c2] = (star_unextincted_mags['GAIA-' + c1] - star_unextincted_mags['GAIA-' + c2]) linear_coefficients = np.zeros((nstars, stdflux.shape[0])) chi2dof = np.zeros((nstars)) redshift = np.zeros((nstars)) normflux = np.zeros((nstars, stdwave.size)) fitted_model_colors = np.zeros(nstars) local_comm, head_comm = None, None if comm is not None: # All ranks in local_comm work on the same stars local_comm = comm.Split(rank % nstars, rank) # The color 1 in head_comm contains all ranks that are have rank 0 in local_comm head_comm = comm.Split(rank < nstars, rank) for star in range(rank % nstars, nstars, size): log.info("rank %d: finding best model for observed star #%d" % (rank, star)) # np.array of wave,flux,ivar,resol wave = {} flux = {} ivar = {} resolution_data = {} for camera in frames: for i, frame in enumerate(frames[camera]): identifier = "%s-%d" % (camera, i) wave[identifier] = frame.wave flux[identifier] = frame.flux[star] ivar[identifier] = frame.ivar[star] resolution_data[identifier] = frame.resolution_data[star] # preselect models based on magnitudes photsys = fibermap['PHOTSYS'][star] if gaia_std: model_colors = model_mags[color_band1] - model_mags[color_band2] else: model_colors = model_mags[color_band1 + photsys] - model_mags[color_band2 + photsys] color_diff = model_colors - star_unextincted_colors[color][star] selection = np.abs(color_diff) < args.delta_color if np.sum(selection) == 0: log.warning("no model in the selected color range for this star") continue # smallest cube in parameter space including this selection (needed for interpolation) new_selection = (teff >= np.min(teff[selection])) & (teff <= np.max( teff[selection])) new_selection &= (logg >= np.min(logg[selection])) & (logg <= np.max( logg[selection])) new_selection &= (feh >= np.min(feh[selection])) & (feh <= np.max( feh[selection])) selection = np.where(new_selection)[0] log.info( "star#%d fiber #%d, %s = %f, number of pre-selected models = %d/%d" % (star, starfibers[star], color, star_unextincted_colors[color][star], selection.size, stdflux.shape[0])) # Match unextincted standard stars to data match_templates_result = match_templates( wave, flux, ivar, resolution_data, stdwave, stdflux[selection], teff[selection], logg[selection], feh[selection], ncpu=ncpu, z_max=args.z_max, z_res=args.z_res, template_error=args.template_error, comm=local_comm) # Only local rank 0 can perform the remaining work if local_comm is not None and local_comm.Get_rank() != 0: continue coefficients, redshift[star], chi2dof[star] = match_templates_result linear_coefficients[star, selection] = coefficients log.info( 'Star Fiber: {}; TEFF: {:.3f}; LOGG: {:.3f}; FEH: {:.3f}; Redshift: {:g}; Chisq/dof: {:.3f}' .format(starfibers[star], np.inner(teff, linear_coefficients[star]), np.inner(logg, linear_coefficients[star]), np.inner(feh, linear_coefficients[star]), redshift[star], chi2dof[star])) # Apply redshift to original spectrum at full resolution model = np.zeros(stdwave.size) redshifted_stdwave = stdwave * (1 + redshift[star]) for i, c in enumerate(linear_coefficients[star]): if c != 0: model += c * np.interp(stdwave, redshifted_stdwave, stdflux[i]) # Apply dust extinction to the model log.info("Applying MW dust extinction to star {} with EBV = {}".format( star, ebv[star])) model *= dust_transmission(stdwave, ebv[star]) # Compute final color of dust-extincted model photsys = fibermap['PHOTSYS'][star] if not gaia_std: model_mag1, model_mag2 = [ get_magnitude(stdwave, model, model_filters, _ + photsys) for _ in [color_band1, color_band2] ] else: model_mag1, model_mag2 = [ get_magnitude(stdwave, model, model_filters, _) for _ in [color_band1, color_band2] ] if color_band1 == ref_mag_name: model_magr = model_mag1 elif color_band2 == ref_mag_name: model_magr = model_mag2 else: # if the reference magnitude is not among colours # I'm fetching it separately. This will happen when # colour is BP-RP and ref magnitude is G if gaia_std: model_magr = get_magnitude(stdwave, model, model_filters, ref_mag_name) else: model_magr = get_magnitude(stdwave, model, model_filters, ref_mag_name + photsys) fitted_model_colors[star] = model_mag1 - model_mag2 #- TODO: move this back into normalize_templates, at the cost of #- recalculating a model magnitude? cur_refmag = star_mags[ref_mag_name][star] # Normalize the best model using reported magnitude scalefac = 10**((model_magr - cur_refmag) / 2.5) log.info('scaling {} mag {:.3f} to {:.3f} using scale {}'.format( ref_mag_name, model_magr, cur_refmag, scalefac)) normflux[star] = model * scalefac if head_comm is not None and rank < nstars: # head_comm color is 1 linear_coefficients = head_comm.reduce(linear_coefficients, op=MPI.SUM, root=0) redshift = head_comm.reduce(redshift, op=MPI.SUM, root=0) chi2dof = head_comm.reduce(chi2dof, op=MPI.SUM, root=0) fitted_model_colors = head_comm.reduce(fitted_model_colors, op=MPI.SUM, root=0) normflux = head_comm.reduce(normflux, op=MPI.SUM, root=0) # Check at least one star was fit. The check is peformed on rank 0 and # the result is bcast to other ranks so that all ranks exit together if # the check fails. atleastonestarfit = False if rank == 0: fitted_stars = np.where(chi2dof != 0)[0] atleastonestarfit = fitted_stars.size > 0 if comm is not None: atleastonestarfit = comm.bcast(atleastonestarfit, root=0) if not atleastonestarfit: log.error("No star has been fit.") sys.exit(12) # Now write the normalized flux for all best models to a file if rank == 0: # get the fibermap from any input frame for the standard stars fibermap = Table(frame.fibermap) keep = np.isin(fibermap['FIBER'], starfibers[fitted_stars]) fibermap = fibermap[keep] # drop fibermap columns specific to exposures instead of targets for col in [ 'DELTA_X', 'DELTA_Y', 'EXPTIME', 'NUM_ITER', 'FIBER_RA', 'FIBER_DEC', 'FIBER_X', 'FIBER_Y' ]: if col in fibermap.colnames: fibermap.remove_column(col) data = {} data['LOGG'] = linear_coefficients[fitted_stars, :].dot(logg) data['TEFF'] = linear_coefficients[fitted_stars, :].dot(teff) data['FEH'] = linear_coefficients[fitted_stars, :].dot(feh) data['CHI2DOF'] = chi2dof[fitted_stars] data['REDSHIFT'] = redshift[fitted_stars] data['COEFF'] = linear_coefficients[fitted_stars, :] data['DATA_%s' % color] = star_colors[color][fitted_stars] data['MODEL_%s' % color] = fitted_model_colors[fitted_stars] data['BLUE_SNR'] = snr['b'][fitted_stars] data['RED_SNR'] = snr['r'][fitted_stars] data['NIR_SNR'] = snr['z'][fitted_stars] io.write_stdstar_models(args.outfile, normflux, stdwave, starfibers[fitted_stars], data, fibermap, input_frames_table)
def main(args): log = get_logger() log.info("read frame") # read frame frame = read_frame(args.infile) log.info("apply fiberflat") # read fiberflat fiberflat = read_fiberflat(args.fiberflat) # apply fiberflat apply_fiberflat(frame, fiberflat) log.info("subtract sky") # read sky skymodel = read_sky(args.sky) # subtract sky subtract_sky(frame, skymodel) log.info("compute flux calibration") # read models model_flux, model_wave, model_fibers = read_stdstar_models(args.models) model_tuple = model_flux, model_wave, model_fibers # check that the model_fibers are actually standard stars fibermap = frame.fibermap model_fibers = model_fibers % 500 if np.any(fibermap['OBJTYPE'][model_fibers] != 'STD'): for i in model_fibers: log.error( "inconsistency with spectrum %d, OBJTYPE='%s' in fibermap" % (i, fibermap["OBJTYPE"][i])) sys.exit(12) #fluxcalib, indiv_stars = compute_flux_calibration(frame, model_wave, model_flux) fluxcalib = compute_flux_calibration(frame, model_wave, model_flux) # QA if (args.qafile is not None): log.info("performing fluxcalib QA") # Load qaframe = load_qa_frame(args.qafile, frame, flavor=frame.meta['FLAVOR']) # Run qaframe.run_qa('FLUXCALIB', (frame, fluxcalib, model_tuple)) #, indiv_stars)) # Write if args.qafile is not None: write_qa_frame(args.qafile, qaframe) log.info("successfully wrote {:s}".format(args.qafile)) # Figure(s) if args.qafig is not None: qa_plots.frame_fluxcalib(args.qafig, qaframe, frame, fluxcalib, model_tuple) # write result write_flux_calibration(args.outfile, fluxcalib, header=frame.meta) log.info("successfully wrote %s" % args.outfile)
def main(args): """ finds the best models of all standard stars in the frame and normlize the model flux. Output is written to a file and will be called for calibration. """ log = get_logger() log.info("mag delta %s = %f (for the pre-selection of stellar models)" % (args.color, args.delta_color)) log.info('multiprocess parallelizing with {} processes'.format(args.ncpu)) # READ DATA ############################################ # First loop through and group by exposure and spectrograph frames_by_expid = {} for filename in args.frames: log.info("reading %s" % filename) frame = io.read_frame(filename) expid = safe_read_key(frame.meta, "EXPID") camera = safe_read_key(frame.meta, "CAMERA").strip().lower() spec = camera[1] uniq_key = (expid, spec) if uniq_key in frames_by_expid.keys(): frames_by_expid[uniq_key][camera] = frame else: frames_by_expid[uniq_key] = {camera: frame} frames = {} flats = {} skies = {} spectrograph = None starfibers = None starindices = None fibermap = None # For each unique expid,spec pair, get the logical OR of the FIBERSTATUS for all # cameras and then proceed with extracting the frame information # once we modify the fibermap FIBERSTATUS for (expid, spec), camdict in frames_by_expid.items(): fiberstatus = None for frame in camdict.values(): if fiberstatus is None: fiberstatus = frame.fibermap['FIBERSTATUS'].data.copy() else: fiberstatus |= frame.fibermap['FIBERSTATUS'] for camera, frame in camdict.items(): frame.fibermap['FIBERSTATUS'] |= fiberstatus # Set fibermask flagged spectra to have 0 flux and variance frame = get_fiberbitmasked_frame(frame, bitmask='stdstars', ivar_framemask=True) frame_fibermap = frame.fibermap frame_starindices = np.where(isStdStar(frame_fibermap))[0] #- Confirm that all fluxes have entries but trust targeting bits #- to get basic magnitude range correct keep = np.ones(len(frame_starindices), dtype=bool) for colname in ['FLUX_G', 'FLUX_R', 'FLUX_Z']: #- and W1 and W2? keep &= frame_fibermap[colname][frame_starindices] > 10**( (22.5 - 30) / 2.5) keep &= frame_fibermap[colname][frame_starindices] < 10**( (22.5 - 0) / 2.5) frame_starindices = frame_starindices[keep] if spectrograph is None: spectrograph = frame.spectrograph fibermap = frame_fibermap starindices = frame_starindices starfibers = fibermap["FIBER"][starindices] elif spectrograph != frame.spectrograph: log.error("incompatible spectrographs %d != %d" % (spectrograph, frame.spectrograph)) raise ValueError("incompatible spectrographs %d != %d" % (spectrograph, frame.spectrograph)) elif starindices.size != frame_starindices.size or np.sum( starindices != frame_starindices) > 0: log.error("incompatible fibermap") raise ValueError("incompatible fibermap") if not camera in frames: frames[camera] = [] frames[camera].append(frame) # possibly cleanup memory del frames_by_expid for filename in args.skymodels: log.info("reading %s" % filename) sky = io.read_sky(filename) camera = safe_read_key(sky.header, "CAMERA").strip().lower() if not camera in skies: skies[camera] = [] skies[camera].append(sky) for filename in args.fiberflats: log.info("reading %s" % filename) flat = io.read_fiberflat(filename) camera = safe_read_key(flat.header, "CAMERA").strip().lower() # NEED TO ADD MORE CHECKS if camera in flats: log.warning( "cannot handle several flats of same camera (%s), will use only the first one" % camera) #raise ValueError("cannot handle several flats of same camera (%s)"%camera) else: flats[camera] = flat if starindices.size == 0: log.error("no STD star found in fibermap") raise ValueError("no STD star found in fibermap") log.info("found %d STD stars" % starindices.size) log.warning("Not using flux errors for Standard Star fits!") # DIVIDE FLAT AND SUBTRACT SKY , TRIM DATA ############################################ # since poping dict, we need to copy keys to iterate over to avoid # RuntimeError due to changing dict frame_cams = list(frames.keys()) for cam in frame_cams: if not cam in skies: log.warning("Missing sky for %s" % cam) frames.pop(cam) continue if not cam in flats: log.warning("Missing flat for %s" % cam) frames.pop(cam) continue flat = flats[cam] for frame, sky in zip(frames[cam], skies[cam]): frame.flux = frame.flux[starindices] frame.ivar = frame.ivar[starindices] frame.ivar *= (frame.mask[starindices] == 0) frame.ivar *= (sky.ivar[starindices] != 0) frame.ivar *= (sky.mask[starindices] == 0) frame.ivar *= (flat.ivar[starindices] != 0) frame.ivar *= (flat.mask[starindices] == 0) frame.flux *= (frame.ivar > 0) # just for clean plots for star in range(frame.flux.shape[0]): ok = np.where((frame.ivar[star] > 0) & (flat.fiberflat[star] != 0))[0] if ok.size > 0: frame.flux[star] = frame.flux[star] / flat.fiberflat[ star] - sky.flux[star] frame.resolution_data = frame.resolution_data[starindices] # CHECK S/N ############################################ # for each band in 'brz', record quadratic sum of median S/N across wavelength snr = dict() for band in ['b', 'r', 'z']: snr[band] = np.zeros(starindices.size) for cam in frames: band = cam[0].lower() for frame in frames[cam]: msnr = np.median(frame.flux * np.sqrt(frame.ivar) / np.sqrt(np.gradient(frame.wave)), axis=1) # median SNR per sqrt(A.) msnr *= (msnr > 0) snr[band] = np.sqrt(snr[band]**2 + msnr**2) log.info("SNR(B) = {}".format(snr['b'])) ############################### max_number_of_stars = 50 min_blue_snr = 4. ############################### indices = np.argsort(snr['b'])[::-1][:max_number_of_stars] validstars = np.where(snr['b'][indices] > min_blue_snr)[0] #- TODO: later we filter on models based upon color, thus throwing #- away very blue stars for which we don't have good models. log.info("Number of stars with median stacked blue S/N > {} /sqrt(A) = {}". format(min_blue_snr, validstars.size)) if validstars.size == 0: log.error("No valid star") sys.exit(12) validstars = indices[validstars] for band in ['b', 'r', 'z']: snr[band] = snr[band][validstars] log.info("BLUE SNR of selected stars={}".format(snr['b'])) for cam in frames: for frame in frames[cam]: frame.flux = frame.flux[validstars] frame.ivar = frame.ivar[validstars] frame.resolution_data = frame.resolution_data[validstars] starindices = starindices[validstars] starfibers = starfibers[validstars] nstars = starindices.size fibermap = Table(fibermap[starindices]) # MASK OUT THROUGHPUT DIP REGION ############################################ mask_throughput_dip_region = True if mask_throughput_dip_region: wmin = 4300. wmax = 4500. log.warning( "Masking out the wavelength region [{},{}]A in the standard star fit" .format(wmin, wmax)) for cam in frames: for frame in frames[cam]: ii = np.where((frame.wave >= wmin) & (frame.wave <= wmax))[0] if ii.size > 0: frame.ivar[:, ii] = 0 # READ MODELS ############################################ log.info("reading star models in %s" % args.starmodels) stdwave, stdflux, templateid, teff, logg, feh = io.read_stdstar_templates( args.starmodels) # COMPUTE MAGS OF MODELS FOR EACH STD STAR MAG ############################################ #- Support older fibermaps if 'PHOTSYS' not in fibermap.colnames: log.warning('Old fibermap format; using defaults for missing columns') log.warning(" PHOTSYS = 'S'") log.warning(" MW_TRANSMISSION_G/R/Z = 1.0") log.warning(" EBV = 0.0") fibermap['PHOTSYS'] = 'S' fibermap['MW_TRANSMISSION_G'] = 1.0 fibermap['MW_TRANSMISSION_R'] = 1.0 fibermap['MW_TRANSMISSION_Z'] = 1.0 fibermap['EBV'] = 0.0 model_filters = dict() for band in ["G", "R", "Z"]: for photsys in np.unique(fibermap['PHOTSYS']): model_filters[band + photsys] = load_legacy_survey_filter( band=band, photsys=photsys) log.info("computing model mags for %s" % sorted(model_filters.keys())) model_mags = dict() fluxunits = 1e-17 * units.erg / units.s / units.cm**2 / units.Angstrom for filter_name, filter_response in model_filters.items(): model_mags[filter_name] = filter_response.get_ab_magnitude( stdflux * fluxunits, stdwave) log.info("done computing model mags") # LOOP ON STARS TO FIND BEST MODEL ############################################ linear_coefficients = np.zeros((nstars, stdflux.shape[0])) chi2dof = np.zeros((nstars)) redshift = np.zeros((nstars)) normflux = [] star_mags = dict() star_unextincted_mags = dict() for band in ['G', 'R', 'Z']: star_mags[band] = 22.5 - 2.5 * np.log10(fibermap['FLUX_' + band]) star_unextincted_mags[band] = 22.5 - 2.5 * np.log10( fibermap['FLUX_' + band] / fibermap['MW_TRANSMISSION_' + band]) star_colors = dict() star_colors['G-R'] = star_mags['G'] - star_mags['R'] star_colors['R-Z'] = star_mags['R'] - star_mags['Z'] star_unextincted_colors = dict() star_unextincted_colors[ 'G-R'] = star_unextincted_mags['G'] - star_unextincted_mags['R'] star_unextincted_colors[ 'R-Z'] = star_unextincted_mags['R'] - star_unextincted_mags['Z'] fitted_model_colors = np.zeros(nstars) for star in range(nstars): log.info("finding best model for observed star #%d" % star) # np.array of wave,flux,ivar,resol wave = {} flux = {} ivar = {} resolution_data = {} for camera in frames: for i, frame in enumerate(frames[camera]): identifier = "%s-%d" % (camera, i) wave[identifier] = frame.wave flux[identifier] = frame.flux[star] ivar[identifier] = frame.ivar[star] resolution_data[identifier] = frame.resolution_data[star] # preselect models based on magnitudes photsys = fibermap['PHOTSYS'][star] if not args.color in ['G-R', 'R-Z']: raise ValueError('Unknown color {}'.format(args.color)) bands = args.color.split("-") model_colors = model_mags[bands[0] + photsys] - model_mags[bands[1] + photsys] color_diff = model_colors - star_unextincted_colors[args.color][star] selection = np.abs(color_diff) < args.delta_color if np.sum(selection) == 0: log.warning("no model in the selected color range for this star") continue # smallest cube in parameter space including this selection (needed for interpolation) new_selection = (teff >= np.min(teff[selection])) & (teff <= np.max( teff[selection])) new_selection &= (logg >= np.min(logg[selection])) & (logg <= np.max( logg[selection])) new_selection &= (feh >= np.min(feh[selection])) & (feh <= np.max( feh[selection])) selection = np.where(new_selection)[0] log.info( "star#%d fiber #%d, %s = %f, number of pre-selected models = %d/%d" % (star, starfibers[star], args.color, star_unextincted_colors[args.color][star], selection.size, stdflux.shape[0])) # Match unextincted standard stars to data coefficients, redshift[star], chi2dof[star] = match_templates( wave, flux, ivar, resolution_data, stdwave, stdflux[selection], teff[selection], logg[selection], feh[selection], ncpu=args.ncpu, z_max=args.z_max, z_res=args.z_res, template_error=args.template_error) linear_coefficients[star, selection] = coefficients log.info( 'Star Fiber: {}; TEFF: {:.3f}; LOGG: {:.3f}; FEH: {:.3f}; Redshift: {:g}; Chisq/dof: {:.3f}' .format(starfibers[star], np.inner(teff, linear_coefficients[star]), np.inner(logg, linear_coefficients[star]), np.inner(feh, linear_coefficients[star]), redshift[star], chi2dof[star])) # Apply redshift to original spectrum at full resolution model = np.zeros(stdwave.size) redshifted_stdwave = stdwave * (1 + redshift[star]) for i, c in enumerate(linear_coefficients[star]): if c != 0: model += c * np.interp(stdwave, redshifted_stdwave, stdflux[i]) # Apply dust extinction to the model log.info("Applying MW dust extinction to star {} with EBV = {}".format( star, fibermap['EBV'][star])) model *= dust_transmission(stdwave, fibermap['EBV'][star]) # Compute final color of dust-extincted model photsys = fibermap['PHOTSYS'][star] if not args.color in ['G-R', 'R-Z']: raise ValueError('Unknown color {}'.format(args.color)) bands = args.color.split("-") model_mag1 = model_filters[bands[0] + photsys].get_ab_magnitude( model * fluxunits, stdwave) model_mag2 = model_filters[bands[1] + photsys].get_ab_magnitude( model * fluxunits, stdwave) fitted_model_colors[star] = model_mag1 - model_mag2 if bands[0] == "R": model_magr = model_mag1 elif bands[1] == "R": model_magr = model_mag2 #- TODO: move this back into normalize_templates, at the cost of #- recalculating a model magnitude? # Normalize the best model using reported magnitude scalefac = 10**((model_magr - star_mags['R'][star]) / 2.5) log.info('scaling R mag {:.3f} to {:.3f} using scale {}'.format( model_magr, star_mags['R'][star], scalefac)) normflux.append(model * scalefac) # Now write the normalized flux for all best models to a file normflux = np.array(normflux) fitted_stars = np.where(chi2dof != 0)[0] if fitted_stars.size == 0: log.error("No star has been fit.") sys.exit(12) data = {} data['LOGG'] = linear_coefficients[fitted_stars, :].dot(logg) data['TEFF'] = linear_coefficients[fitted_stars, :].dot(teff) data['FEH'] = linear_coefficients[fitted_stars, :].dot(feh) data['CHI2DOF'] = chi2dof[fitted_stars] data['REDSHIFT'] = redshift[fitted_stars] data['COEFF'] = linear_coefficients[fitted_stars, :] data['DATA_%s' % args.color] = star_colors[args.color][fitted_stars] data['MODEL_%s' % args.color] = fitted_model_colors[fitted_stars] data['BLUE_SNR'] = snr['b'][fitted_stars] data['RED_SNR'] = snr['r'][fitted_stars] data['NIR_SNR'] = snr['z'][fitted_stars] io.write_stdstar_models(args.outfile, normflux, stdwave, starfibers[fitted_stars], data)
def main(args) : log=get_logger() cmd = ['desi_compute_fluxcalibration',] for key, value in args.__dict__.items(): if value is not None: cmd += ['--'+key, str(value)] cmd = ' '.join(cmd) log.info(cmd) log.info("read frame") # read frame frame = read_frame(args.infile) log.info("apply fiberflat") # read fiberflat fiberflat = read_fiberflat(args.fiberflat) # apply fiberflat apply_fiberflat(frame, fiberflat) log.info("subtract sky") # read sky skymodel=read_sky(args.sky) # subtract sky subtract_sky(frame, skymodel) log.info("compute flux calibration") # read models model_flux,model_wave,model_fibers,model_metadata=read_stdstar_models(args.models) if args.chi2cut > 0 : ok = np.where(model_metadata["CHI2DOF"]<args.chi2cut)[0] if ok.size == 0 : log.error("chi2cut has discarded all stars") sys.exit(12) nstars=model_flux.shape[0] nbad=nstars-ok.size if nbad>0 : log.warning("discarding %d star(s) out of %d because of chi2cut"%(nbad,nstars)) model_flux=model_flux[ok] model_fibers=model_fibers[ok] model_metadata=model_metadata[:][ok] if args.delta_color_cut > 0 : ok = np.where(np.abs(model_metadata["MODEL_G-R"]-model_metadata["DATA_G-R"])<args.delta_color_cut)[0] nstars=model_flux.shape[0] nbad=nstars-ok.size if nbad>0 : log.warning("discarding %d star(s) out of %d because |delta_color|>%f"%(nbad,nstars,args.delta_color_cut)) model_flux=model_flux[ok] model_fibers=model_fibers[ok] model_metadata=model_metadata[:][ok] # automatically reject stars that ar chi2 outliers if args.chi2cut_nsig > 0 : mchi2=np.median(model_metadata["CHI2DOF"]) rmschi2=np.std(model_metadata["CHI2DOF"]) maxchi2=mchi2+args.chi2cut_nsig*rmschi2 ok=np.where(model_metadata["CHI2DOF"]<=maxchi2)[0] nstars=model_flux.shape[0] nbad=nstars-ok.size if nbad>0 : log.warning("discarding %d star(s) out of %d because reduced chi2 outliers (at %d sigma, giving rchi2<%f )"%(nbad,nstars,args.chi2cut_nsig,maxchi2)) model_flux=model_flux[ok] model_fibers=model_fibers[ok] model_metadata=model_metadata[:][ok] # check that the model_fibers are actually standard stars fibermap = frame.fibermap ## check whether star fibers from args.models are consistent with fibers from fibermap ## if not print the OBJTYPE from fibermap for the fibers numbers in args.models and exit fibermap_std_indices = np.where(isStdStar(fibermap['DESI_TARGET']))[0] if np.any(~np.in1d(model_fibers%500, fibermap_std_indices)): for i in model_fibers%500: log.error("inconsistency with spectrum {}, OBJTYPE='{}', DESI_TARGET={} in fibermap".format( (i, fibermap["OBJTYPE"][i], fibermap["DESI_TARGET"][i]))) sys.exit(12) fluxcalib = compute_flux_calibration(frame, model_wave, model_flux, model_fibers%500) # QA if (args.qafile is not None): log.info("performing fluxcalib QA") # Load qaframe = load_qa_frame(args.qafile, frame, flavor=frame.meta['FLAVOR']) # Run #import pdb; pdb.set_trace() qaframe.run_qa('FLUXCALIB', (frame, fluxcalib)) # Write if args.qafile is not None: write_qa_frame(args.qafile, qaframe) log.info("successfully wrote {:s}".format(args.qafile)) # Figure(s) if args.qafig is not None: qa_plots.frame_fluxcalib(args.qafig, qaframe, frame, fluxcalib) # write result write_flux_calibration(args.outfile, fluxcalib, header=frame.meta) log.info("successfully wrote %s"%args.outfile)
def main(args): log = get_logger() cmd = [ 'desi_compute_fluxcalibration', ] for key, value in args.__dict__.items(): if value is not None: cmd += ['--' + key, str(value)] cmd = ' '.join(cmd) log.info(cmd) log.info("read frame") # read frame frame = read_frame(args.infile) # Set fibermask flagged spectra to have 0 flux and variance frame = get_fiberbitmasked_frame(frame, bitmask='flux', ivar_framemask=True) log.info("apply fiberflat") # read fiberflat fiberflat = read_fiberflat(args.fiberflat) # apply fiberflat apply_fiberflat(frame, fiberflat) log.info("subtract sky") # read sky skymodel = read_sky(args.sky) # subtract sky subtract_sky(frame, skymodel) log.info("compute flux calibration") # read models model_flux, model_wave, model_fibers, model_metadata = read_stdstar_models( args.models) ok = np.ones(len(model_metadata), dtype=bool) if args.chi2cut > 0: log.info("Apply cut CHI2DOF<{}".format(args.chi2cut)) ok &= (model_metadata["CHI2DOF"] < args.chi2cut) if args.delta_color_cut > 0: log.info("Apply cut |delta color|<{}".format(args.delta_color_cut)) ok &= (np.abs(model_metadata["MODEL_G-R"] - model_metadata["DATA_G-R"]) < args.delta_color_cut) if args.min_color is not None: log.info("Apply cut DATA_G-R>{}".format(args.min_color)) ok &= (model_metadata["DATA_G-R"] > args.min_color) if args.chi2cut_nsig > 0: # automatically reject stars that ar chi2 outliers mchi2 = np.median(model_metadata["CHI2DOF"]) rmschi2 = np.std(model_metadata["CHI2DOF"]) maxchi2 = mchi2 + args.chi2cut_nsig * rmschi2 log.info("Apply cut CHI2DOF<{} based on chi2cut_nsig={}".format( maxchi2, args.chi2cut_nsig)) ok &= (model_metadata["CHI2DOF"] <= maxchi2) ok = np.where(ok)[0] if ok.size == 0: log.error("cuts discarded all stars") sys.exit(12) nstars = model_flux.shape[0] nbad = nstars - ok.size if nbad > 0: log.warning("discarding %d star(s) out of %d because of cuts" % (nbad, nstars)) model_flux = model_flux[ok] model_fibers = model_fibers[ok] model_metadata = model_metadata[:][ok] # check that the model_fibers are actually standard stars fibermap = frame.fibermap ## check whether star fibers from args.models are consistent with fibers from fibermap ## if not print the OBJTYPE from fibermap for the fibers numbers in args.models and exit fibermap_std_indices = np.where(isStdStar(fibermap))[0] if np.any(~np.in1d(model_fibers % 500, fibermap_std_indices)): target_colnames, target_masks, survey = main_cmx_or_sv(fibermap) colname = target_colnames[0] for i in model_fibers % 500: log.error( "inconsistency with spectrum {}, OBJTYPE={}, {}={} in fibermap" .format(i, fibermap["OBJTYPE"][i], colname, fibermap[colname][i])) sys.exit(12) # Make sure the fibers of interest aren't entirely masked. if np.sum( np.sum(frame.ivar[model_fibers % 500, :] == 0, axis=1) == frame.nwave) == len(model_fibers): log.warning('All standard-star spectra are masked!') return fluxcalib = compute_flux_calibration( frame, model_wave, model_flux, model_fibers % 500, highest_throughput_nstars=args.highest_throughput) # QA if (args.qafile is not None): log.info("performing fluxcalib QA") # Load qaframe = load_qa_frame(args.qafile, frame_meta=frame.meta, flavor=frame.meta['FLAVOR']) # Run #import pdb; pdb.set_trace() qaframe.run_qa('FLUXCALIB', (frame, fluxcalib)) # Write if args.qafile is not None: write_qa_frame(args.qafile, qaframe) log.info("successfully wrote {:s}".format(args.qafile)) # Figure(s) if args.qafig is not None: qa_plots.frame_fluxcalib(args.qafig, qaframe, frame, fluxcalib) # write result write_flux_calibration(args.outfile, fluxcalib, header=frame.meta) log.info("successfully wrote %s" % args.outfile)
def main(args): log = get_logger() if (args.fiberflat is None) and (args.sky is None) and (args.calib is None): log.critical('no --fiberflat, --sky, or --calib; nothing to do ?!?') sys.exit(12) if (not args.no_tsnr) and (args.calib is None): log.critical( 'need --fiberflat --sky and --calib to compute template SNR') sys.exit(12) frame = read_frame(args.infile) if not args.no_tsnr: # tsnr alpha calc. requires uncalibrated + no substraction rame. uncalibrated_frame = copy.deepcopy(frame) #- Raw scores already added in extraction, but just in case they weren't #- it is harmless to rerun to make sure we have them. compute_and_append_frame_scores(frame, suffix="RAW") if args.cosmics_nsig > 0 and args.sky == None: # Reject cosmics (otherwise do it after sky subtraction) log.info("cosmics ray 1D rejection") reject_cosmic_rays_1d(frame, args.cosmics_nsig) if args.fiberflat != None: log.info("apply fiberflat") # read fiberflat fiberflat = read_fiberflat(args.fiberflat) # apply fiberflat to all fibers apply_fiberflat(frame, fiberflat) compute_and_append_frame_scores(frame, suffix="FFLAT") else: fiberflat = None if args.no_xtalk: zero_ivar = (not args.no_zero_ivar) else: zero_ivar = False if args.sky != None: # read sky skymodel = read_sky(args.sky) if args.cosmics_nsig > 0: # use a copy the frame (not elegant but robust) copied_frame = copy.deepcopy(frame) # first subtract sky without throughput correction subtract_sky(copied_frame, skymodel, apply_throughput_correction=False, zero_ivar=zero_ivar) # then find cosmics log.info("cosmics ray 1D rejection after sky subtraction") reject_cosmic_rays_1d(copied_frame, args.cosmics_nsig) # copy mask frame.mask = copied_frame.mask # and (re-)subtract sky, but just the correction term subtract_sky(frame, skymodel, apply_throughput_correction=( not args.no_sky_throughput_correction), zero_ivar=zero_ivar) else: # subtract sky subtract_sky(frame, skymodel, apply_throughput_correction=( not args.no_sky_throughput_correction), zero_ivar=zero_ivar) compute_and_append_frame_scores(frame, suffix="SKYSUB") if not args.no_xtalk: log.info("fiber crosstalk correction") correct_fiber_crosstalk(frame, fiberflat) if not args.no_zero_ivar: frame.ivar *= (frame.mask == 0) if args.calib != None: log.info("calibrate") # read calibration fluxcalib = read_flux_calibration(args.calib) # apply calibration apply_flux_calibration(frame, fluxcalib) # Ensure that ivars are set to 0 for all values if any designated # fibermask bit is set. Also flips a bits for each frame.mask value using specmask.BADFIBER frame = get_fiberbitmasked_frame( frame, bitmask="flux", ivar_framemask=(not args.no_zero_ivar)) compute_and_append_frame_scores(frame, suffix="CALIB") if not args.no_tsnr: log.info("calculating tsnr") results, alpha = calc_tsnr2(uncalibrated_frame, fiberflat=fiberflat, skymodel=skymodel, fluxcalib=fluxcalib, alpha_only=args.alpha_only) frame.meta['TSNRALPH'] = alpha comments = {k: "from calc_frame_tsnr" for k in results.keys()} append_frame_scores(frame, results, comments, overwrite=True) # record inputs frame.meta['IN_FRAME'] = shorten_filename(args.infile) frame.meta['FIBERFLT'] = shorten_filename(args.fiberflat) frame.meta['IN_SKY'] = shorten_filename(args.sky) frame.meta['IN_CALIB'] = shorten_filename(args.calib) # save output write_frame(args.outfile, frame, units='10**-17 erg/(s cm2 Angstrom)') log.info("successfully wrote %s" % args.outfile)