def KLIP_Extraction(dataset, PSF_cube, posn, numthreads): planet_sep, planet_pa = posn subsections = [[(planet_pa+(4.0*i*stamp_size)-2.0*stamp_size)/180.*np.pi,\ (planet_pa+(4.0*i*stamp_size)+2.0*stamp_size)/180.*np.pi] for i in range(sections)] num_k_klip = len(numbasis) # how many k_klips running N_frames = len(dataset.input) N_cubes = np.size(np.unique(dataset.filenums)) nl = N_frames // N_cubes spectra_template = None #np.tile(np.array(exspect[i]),N_cubes) ###### The forward model class ###### fm_class = es.ExtractSpec(dataset.input.shape, numbasis, planet_sep, planet_pa, PSF_cube, np.unique(dataset.wvs), stamp_size=stamp_size, datatype='double') ###### Now run KLIP! ###### fm.klip_dataset( dataset, fm_class, fileprefix=instrument + "_" + planet_name + "_fmspect", annuli=[[planet_sep - 1.5 * stamp_size, planet_sep + 1.5 * stamp_size]], subsections=subsections, movement=movement, #flux_overlap = 0.1, numbasis=numbasis, maxnumbasis=maxnumbasis, numthreads=numthreads, spectrum=spectra_template, #time_collapse = 'weighted-mean', save_klipped=True, highpass=True, calibrate_flux=True, outputdir=data_dir + "pyklip/") #klipped = dataset.fmout[:,:,-1,:] # If you want to scale your spectrum by a calibration factor: units = "natural" scaling_factor = 1.0 exspect, fm_matrix = es.invert_spect_fmodel(dataset.fmout, dataset, units=units, scaling_factor=scaling_factor, method="leastsq") np.save(data_dir + "pyklip/exspect", exspect) np.save(data_dir + "pyklip/exspect", fm_matrix) return exspect, fm_matrix
def run_KLIP(self): ''' Runs klip on the dataset with the fm_class object given all input parameters. Last step before MCMC ''' # run KLIP-FM fm.klip_dataset(self.dataset, self.fm_class, mode="ADI", outputdir=self.outputdir, fileprefix=self.prefix, numbasis=self.numbasis, annuli=self.annulus_bounds, subsections=self.subsections, padding=self.padding, movement=self.movement, numthreads=self.cores, highpass=self.hpf, corr_smooth=1) print('Done constructing forward model! You are ready to MCMC.')
def get_astrometry(dataset, PSF_cube, guesssep,guesspa, guessflux, data_dir, planet_name): if not os.path.isdir(data_dir + "pyklip"): os.makedirs(data_dir + "pyklip", exist_ok=True) #### Astrometry Prep ### guesssep = 30.263908266374713 guesspa = 281.79004721905693# estimate of position angle, in degrees guessflux = 5e-5 # estimated contrast dn_per_contrast = dataset.dn_per_contrast# your_flux_conversion # factor to scale PSF to star PSF. For GPI, this is dataset.dn_per_contrast #guessspec = np.array(klipcontrast) #your_spectrum # should be 1-D array with number of elements = np.size(np.unique(dataset.wvs)) # klipcontrast read from residuals below numbasis=[5,10] # initialize the FM Planet PSF class fm_class = fmpsf.FMPlanetPSF(dataset.input.shape, numbasis, guesssep, guesspa, guessflux, dataset.psfs, np.unique(dataset.wvs), dn_per_contrast, star_spt='A0V', spectrallib=None) # Astrometry KLIP # PSF subtraction parameters # You should change these to be suited to your data! outputdir = data_dir + "pyklip/" # where to write the output files prefix = instrument + "_" + planet_name + "_fmpsf" # fileprefix for the output files annulus_bounds = [[guesssep-15, guesssep+15], [60,75]] # one annulus centered on the planet, one for covariance subsections = 10 # we are not breaking up the annulus padding = 0 # we are not padding our zones movement = 4 # we are using an conservative exclusion criteria of 4 pixels numbasis = [5,10] # run KLIP-FM fm.klip_dataset(dataset, fm_class, outputdir=outputdir, fileprefix=prefix, numbasis=numbasis, annuli=annulus_bounds, subsections=subsections, padding=padding, movement=movement) ### FIT ASTROMETRY ### # read in outputs output_prefix = os.path.join(outputdir, prefix) fm_hdu = fits.open(output_prefix + "-model-KLmodes-all.fits") data_hdu = fits.open(output_prefix + "-klipped-KLmodes-all.fits") # get FM frame, use KL=7 fm_frame = fm_hdu[1].data[1] fm_centx = fm_hdu[1].header['PSFCENTX'] fm_centy = fm_hdu[1].header['PSFCENTY'] # get data_stamp frame, use KL=7 data_frame = data_hdu[1].data[1] data_centx = data_hdu[1].header["PSFCENTX"] data_centy = data_hdu[1].header["PSFCENTY"] # get initial guesses guesssep = fm_hdu[0].header['FM_SEP'] guesspa = fm_hdu[0].header['FM_PA'] # create FM Astrometry object that does MCMC fitting fit = fitpsf.FMAstrometry(guesssep, guesspa, 13, method="mcmc") # alternatively, could use maximum likelihood fitting # fit = fitpsf.FMAstrometry(guesssep, guesspa, 13, method="maxl") # generate FM stamp # padding should be greater than 0 so we don't run into interpolation problems fit.generate_fm_stamp(fm_frame, [fm_centx, fm_centy], padding=5) # generate data_stamp stamp # not that dr=4 means we are using a 4 pixel wide annulus to sample the noise for each pixel # exclusion_radius excludes all pixels less than that distance from the estimated location of the planet fit.generate_data_stamp(data_frame, [data_centx, data_centy], dr=4, exclusion_radius=10) # set kernel, no read noise corr_len_guess = 3. corr_len_label = r"$l$" fit.set_kernel("matern32", [corr_len_guess], [corr_len_label]) # set bounds x_range = 8 # pixels y_range = 15 # pixels flux_range = 2. # flux can vary by an order of magnitude corr_len_range = 3. # between 0.3 and 30 fit.set_bounds(x_range, y_range, flux_range, [corr_len_range]) # run MCMC fit fit.fit_astrometry(nwalkers=100, nburn=500, nsteps=2200, numthreads=3) plot_astrometry(fit,data_dir,planet_name) if "gpi" in instrument.lower(): platescale = GPI.GPIData.lenslet_scale*1000 plate_err = 0.007 elif "sphere" in instrument.lower(): platescale = dataset.platescale*1000 plate_err = 0.02 parang_err = # Outputs and Erro Propagation fit.propogate_errs(star_center_err=0.05, platescale=platescale, platescale_err=plate_err, pa_offset=-0.1, pa_uncertainty=0.13) write_astrometry(fit,data_dir,planet_name) return (fit.sep.bestfit,fit.PA.bestfit)
# PSF subtraction parameters # You should change these to be suited to your data! outputdir = output # where to write the output files prefix = pre # fileprefix for the output files annulus_bounds = [annulus] # one annulus centered on the planet subsections = 1 # we are not breaking up the annulus padding = 0 # we are not padding our zones movement = move # run KLIP-FM import pyklip.fm as fm fm.klip_dataset(dataset, fm_class, mode="ADI", outputdir=outputdir, fileprefix=prefix, numbasis=numbasis, annuli=annulus_bounds, subsections=subsections, padding=padding, movement=movement) else: # run William's script which asks for inputs # check if they have a ghost/psf nearby ghostpath = input( 'Enter the path to your instrumental psf (enter \'none\' to generate one): ' ) if ghostpath == 'none': cubepath = input('enter path to your MagAO image cube: ')
numbasis, dataset, model_convolved, basis_filename=os.path.join(dir_test, "test_results/" + fileprefix + "_KLbasis.h5"), save_basis=True, aligned_center=aligned_center, ) fm.klip_dataset( dataset, diskobj, outputdir=dir_test + "test_results/", fileprefix=fileprefix, annuli=annuli, subsections=subsections, numbasis=numbasis, maxnumbasis=maxnumbasis, mode="ADI", aligned_center=aligned_center, highpass=False, mute_progression=True, ) diskobj = DiskFM(dataset.input.shape, numbasis, dataset, model_convolved, basis_filename=os.path.join( dir_test, "test_results/" + fileprefix + "_KLbasis.h5"), load_from_basis=True)
model_multi_spectra_initial, basis_filename=os.path.join( resultdir, file_prefix_all + '_klip-basis.h5'), save_basis=True, aligned_center=[140, 140]) # measure the KL basis and save it maxnumbasis = dataset.input.shape[0] fm.klip_dataset(dataset, diskobj, numbasis=KLMODE, maxnumbasis=maxnumbasis, annuli=1, subsections=1, mode='ADI', outputdir=resultdir, fileprefix=file_prefix_all, aligned_center=[140, 140], mute_progression=True, highpass=False, minrot=3, calibrate_flux=True, numthreads=1) enablePrint() # We load the KL basis. This step is fairly long. Once loaded the variable diskobj can be passed without having to reload the KL basis. blockPrint() diskobj = DiskFM(dataset.input.shape, KLMODE, dataset,
def gen_fm(dataset, pars, numbasis=20, mv=2.0, stamp=10, numthreads=4, maxnumbasis=100, spectra_template=None, manual_psfs=None, aligned_center=None): """ inputs: - pars - tuple of planet position (sep (pixels), pa (deg)). - numbasis - can be a list or a single number - mv - klip movement (pixels) - stamp - size of box around companion for FM - numthreads (default=4) - spectra_template - Can provide a template, default is None - manual_psfs - If dataset does not have attribute "psfs" will look for manual input of psf model. - aligned_center - pass to klip_dataset """ maxnumbasis = maxnumbasis movement = mv stamp_size = stamp N_frames = len(dataset.input) N_cubes = len(dataset.exthdrs) nl = N_frames // N_cubes print("====================================") print("planet separation, pa: {0}".format(pars)) print("numbasis: {0}".format(numbasis)) print("movement: {0}".format(mv)) print("====================================") print("Generating forward model...") planet_sep, planet_pa = pars # If 'dataset' does not already have psf model, check if manual_psfs not None. if hasattr(dataset, "psfs"): print("Using dataset PSF model.") # What is this normalization? Not sure it matters (see line 82). radial_psfs = dataset.psfs / \ (np.mean(dataset.spot_flux.reshape([dataset.spot_flux.shape[0]//nl, nl]),\ axis=0)[:, None, None]) elif manual_psfs is not None: radial_psfs = manual_psfs else: raise AttributeError("dataset has no psfs attribute. \n"+\ "Either run dataset.generate_psfs before gen_fm or"+\ "provide psf models in keyword manual_psfs. \n"+\ "examples/FM_spectral_extraction_tutorial.py for example.") # The forward model class fm_class = ExtractSpec(dataset.input.shape, numbasis, planet_sep, planet_pa, radial_psfs, np.unique(dataset.wvs), stamp_size=stamp_size) # Now run KLIP! fm.klip_dataset(dataset, fm_class, fileprefix="fmspect", annuli=[[planet_sep-stamp,planet_sep+stamp]], subsections=[[(planet_pa-stamp)/180.*np.pi,\ (planet_pa+stamp)/180.*np.pi]], movement=movement, numbasis = numbasis, maxnumbasis=maxnumbasis, numthreads=numthreads, spectrum=spectra_template, save_klipped=False, highpass=True, aligned_center=aligned_center) return dataset.fmout
def do_fm_pyklip(modfm, dataset, new_model): import pyklip.instruments.GPI as GPI from pyklip import fm from pyklip.fmlib import diskfm # Define KLIP parameters used for data. # Settings are for a9s1mv1_medcollapse. ann = 9 subs = 1 mvmt = 1 minrot = None highpass = False sufx = '_%s_%s_%slk' % (mctype, s_ident, which) kl = 1 # NOTE that a9s1mv1_medcollapse used subset of images: 70-99 inclusive. fl = [ path_data + 'S20160228S%04d_spdc_distorcorr_phot_4p_hpNone_Jy_arcsec-2.fits' % ii for ii in range(70, 114) ] dataset = GPI.GPIData(fl, highpass=highpass, meas_satspot_flux=False) # Manually decreasing inner working angle to improve inner KLIP. dataset.IWA = 10 # [pix] dataset.OWA = 135 # Manually set plate scale to best known value. dataset.lenslet_scale = 0.014166 # [arcsec/pix] best as of 6-2016 numbasis = np.array([1, 2, 3, 10, 20, 50]) maxnumbasis = 50 star = np.array([140, 140]) #np.mean(dataset.centers, axis=0) collapse_spec = True # If desired, collapse the spec cube as sum of wavelength channels. if collapse_spec and dataset.prihdrs[0]['DISPERSR'] != 'WOLLASTON': input_collapsed = [] ## Average all spec cubes along wavelength axis. # Sum each spec cube along wavelength axis to collapse channels. for fn in fl: # input_collapsed.append(numpy.nanmedian(fits.getdata(fn), axis=0)) input_collapsed.append(np.sum(fits.getdata(fn), axis=0)) input_collapsed = np.array(input_collapsed) dataset.input = input_collapsed # Average centers of all wavelength slices and store as new centers. centers_collapsed = [] sl = 0 while sl < dataset.centers.shape[0]: centers_collapsed.append( np.mean(dataset.centers[sl:sl + 37], axis=0)) sl += 37 centers_collapsed = np.array(centers_collapsed) dataset.centers = centers_collapsed # Reduce dataset info from 37 slices to 1 slice. dataset.PAs = dataset.PAs[list(range(0, len(dataset.PAs), 37))] dataset.filenums = dataset.filenums[list( range(0, len(dataset.filenums), 37))] dataset.filenames = dataset.filenames[list( range(0, len(dataset.filenames), 37))] dataset.wcs = dataset.wcs[list(range(0, len(dataset.wcs), 37))] # Lie to pyklip about wavelengths. dataset.wvs = np.ones(input_collapsed.shape[0]) # Create object from diskfm.DiskFM class. print("\nInitializing DiskFM object...") modfm = diskfm.DiskFM(dataset.input.shape, np.array(numbasis), dataset, mod_I, load_from_basis=load_from_basis, save_basis=save_basis, annuli=ann, subsections=subs, OWA=dataset.OWA, basis_filename=basis_fn, numthreads=numthreads) # TEMP!!! modfm.maxnumbasis = maxnumbasis # modfm.numthreads = numthreads if mvmt is not None: fname = 'hd35841_pyklipfm_a%ds%dmv%d_hp%.1f_k%d-%d' % ( ann, subs, mvmt, highpass, numbasis[0], numbasis[-1]) + sufx elif minrot is not None: fname = 'hd35841_pyklipfm_a%ds%dmr%d_hp%.1f_k%d-%d' % ( ann, subs, minrot, highpass, numbasis[0], numbasis[-1]) + sufx if load_from_basis: # # Set model's aligned center property (do usual swap of y,x). # modfm.aligned_center = mod_cen_aligned[::-1] # Use loaded basis vectors to FM the original disk model (get images grouped by KL mode). fmsub_mod_imgs = modfm.fm_parallelized() # # Save the fm output FITS to disk. # modfm.save_fmout(dataset, fmsub_mod_imgs, path[:-1], fname, numbasis, '', False, None) # Take mean across the FM'd images for each KL mode. fmsub_mod = np.mean(fmsub_mod_imgs, axis=1) # Mask interior to the IWA (pyklip includes r=IWA pixels in first annulus). fmsub_mod[:, radii_data < dataset.IWA] = np.nan mod_I_fm = fmsub_mod[np.where(numbasis == kl)[0][0]] else: # FIX ME!!! FM without saved bases is likely broken. # pyklip FM the model dataset (similar yet distinct function from pyklip.klip_dataset) # This writes the self-subtracted model and the klip'd data to disk but does not # output any arguments. if ann == 1: padding = 0 else: padding = 3 print( "KLIP FM without a saved basis set is NOT FUNCTIONAL! Will probably fail." ) fmout = fm.klip_dataset( dataset, modfm, mode='ADI', outputdir=path_data, fileprefix=fname, annuli=ann, subsections=subs, OWA=dataset.OWA, N_pix_sector=None, movement=mvmt, minrot=minrot, numbasis=np.array(numbasis), maxnumbasis=maxnumbasis, numthreads=numthreads, calibrate_flux=False, aligned_center=star[::-1], #aligned_center=mod_cen_aligned[::-1] spectrum=None, highpass=highpass, save_klipped=False, padding=padding, mute_progression=False) # Update the model image in modfm object to a new model. modfm.update_disk(new_model) # # Load the KL basis info from log file instead of slowly recalculating. # modfm.load_basis_files(modfm.basis_filename) # FM the new disk model. fmsub_mod_imgs = modfm.fm_parallelized() # # Save the fm output FITS to disk. # modfm.save_fmout(dataset, fmsub_mod_imgs, path[:-1], fname, numbasis, '', False, None) # Take mean across the FM'd images for each KL mode. fmsub_mod = np.nanmean(fmsub_mod_imgs, axis=1) # # Mask interior to the IWA (pyklip includes r=IWA pixels in first annulus). # fmsub_mod[:, radii < dataset.IWA] = numpy.nan return fmsub_mod
def test_fmastrometry(): """ Tests FM astrometry using MCMC + GP Regression """ # time it t1 = time.time() # # open up already generated FM and data_stamp # fm_hdu = fits.open("/home/jwang/GPI/betapic/fm_models/final_altpsf/pyklip-131118-h-k100m4-dIWA8-nohp-klipfm-KL7cube.fits") # data_hdu = fits.open("/home/jwang/GPI/betapic/klipped/final_altpsf/pyklip-131118-h-k100m4-dIWA8-nohp-onezone-KL7cube.fits") ########### generate FM ############ # grab the files filelist = glob.glob(testdir + os.path.join("data", "S20131210*distorcorr.fits")) filelist.sort() # hopefully there is still 3 filelists assert (len(filelist) == 3) # only read in one spectral channel skipslices = [i for i in range(37) if i != 7 and i != 33] # read in data dataset = GPI.GPIData(filelist, highpass=9, skipslices=skipslices) numwvs = np.size(np.unique(dataset.wvs)) assert (numwvs == 2) # save old centesr for later oldcenters = np.copy(dataset.centers) # generate PSF dataset.generate_psfs(boxrad=25 // 2) dataset.psfs /= (np.mean(dataset.spot_flux.reshape( [dataset.spot_flux.shape[0] // numwvs, numwvs]), axis=0)[:, None, None]) # read in model spectrum model_file = os.path.join(testdir, "..", "pyklip", "spectra", "cloudy", "t1600g100f2.flx") spec_dat = np.loadtxt(model_file) spec_wvs = spec_dat[1] spec_f = spec_dat[3] spec_interp = sinterp.interp1d(spec_wvs, spec_f, kind='nearest') inputspec = spec_interp(np.unique(dataset.wvs)) # setup FM guesses numbasis = np.array([1, 7, 100]) guesssep = 0.4267 / GPI.GPIData.lenslet_scale guesspa = 212.15 guessflux = 5e-5 print(guesssep, guesspa) fm_class = fmpsf.FMPlanetPSF(dataset.input.shape, numbasis, guesssep, guesspa, guessflux, dataset.psfs, np.unique(dataset.wvs), dataset.dn_per_contrast, star_spt='A6', spectrallib=[inputspec]) # run KLIP-FM prefix = "betpic-131210-j-fmpsf" fm.klip_dataset(dataset, fm_class, outputdir=testdir, fileprefix=prefix, numbasis=numbasis, annuli=[[guesssep - 15, guesssep + 15]], subsections=1, padding=0, movement=2) # before we do anything else, check that dataset.centers remains unchanged assert (dataset.centers[0][0] == oldcenters[0][0]) # read in outputs output_prefix = os.path.join(testdir, prefix) fm_hdu = fits.open(output_prefix + "-fmpsf-KLmodes-all.fits") data_hdu = fits.open(output_prefix + "-klipped-KLmodes-all.fits") # get FM frame fm_frame = np.nanmean(fm_hdu[1].data, axis=0) fm_centx = fm_hdu[1].header['PSFCENTX'] fm_centy = fm_hdu[1].header['PSFCENTY'] # get data_stamp frame data_frame = np.nanmean(data_hdu[1].data, axis=0) data_centx = data_hdu[1].header["PSFCENTX"] data_centy = data_hdu[1].header["PSFCENTY"] # get initial guesses guesssep = fm_hdu[0].header['FM_SEP'] guesspa = fm_hdu[0].header['FM_PA'] # create FM Astrometry object fma = fitpsf.FMAstrometry(guesssep, guesspa, 9) # generate FM stamp fma.generate_fm_stamp(fm_frame, [fm_centx, fm_centy], padding=5) # generate data_stamp stamp fma.generate_data_stamp(data_frame, [data_centx, data_centy], dr=6) # set kernel, with read noise fma.set_kernel("matern32", [3.], [r"$l$"], True, 0.05) # set bounds fma.set_bounds(1.5, 1.5, 1, [1.], 1) print(fma.guess_RA_offset, fma.guess_Dec_offset) print(fma.bounds) # test likelihood function mod_bounds = np.copy(fma.bounds) mod_bounds[2:] = np.log(mod_bounds[2:]) print(mod_bounds) lnpos = fitpsf.lnprob((-16, -25.7, np.log(0.8), np.log(3.3), np.log(0.05)), fma, mod_bounds, fma.covar, readnoise=True) print(lnpos, np.nanmean(data_frame), np.nanmean(fm_frame), np.nanmean(fma.data_stamp), np.nanmean(fma.fm_stamp)) assert lnpos > -np.inf # run MCMC fit fma.fit_astrometry(nburn=150, nsteps=25, nwalkers=50, numthreads=1) print("{0} seconds to run".format(time.time() - t1)) fma.propogate_errs(star_center_err=0.05, platescale=GPI.GPIData.lenslet_scale * 1000, platescale_err=0.007, pa_offset=-0.1, pa_uncertainty=0.13) assert (np.abs(fma.RA_offset.bestfit - -227.2) < 5.) assert (np.abs(fma.Dec_offset.bestfit - -361.1) < 5.) fma.best_fit_and_residuals() plt.savefig("tests/bka2.png")