def initialize(): # dp = device_params() dprint(dp.hot_pix) dp.response_map = array_response(plot=False) if mp.pix_yield == 1: mp.bad_pix =False if mp.bad_pix == True: dp.response_map = create_bad_pix(dp.response_map) # dp.response_map = create_hot_pix(dp.response_map) if mp.hot_pix: dp.hot_locs = create_hot_pix(mp) dp.hot_per_step = int(np.round(ap.exposure_time*mp.hot_bright)) # dp.response_map = create_bad_pix_center(dp.response_map) # quicklook_im(dp.response_map) dp.Rs = assign_spectral_res(plot=False) dp.sigs = get_R_hyper(dp.Rs, plot=False) dprint(dp.sigs.shape) # get_phase_distortions(plot=True) if mp.phase_background: dp.basesDeg = assign_phase_background(plot=False) else: dp.basesDeg = np.zeros((mp.array_size)) with open(iop.device_params, 'wb') as handle: pickle.dump(dp, handle, protocol=pickle.HIGHEST_PROTOCOL) return dp
def RDI_SDI(simple_hypercube_1, simple_hypercube_2, psf_template): dprint('RDI_SDI') wsamples = np.linspace(tp.band[0], tp.band[1], tp.w_bins) scale_list = tp.band[0] / wsamples simple_hypercube_1 = np.transpose(simple_hypercube_1, (1, 0, 2, 3)) simple_hypercube_2 = np.transpose(simple_hypercube_2, (1, 0, 2, 3)) psf_template = np.resize( psf_template, (tp.w_bins, psf_template.shape[0], psf_template.shape[1])) algo_dict = { 'thresh': 0, 'full_target_cube': simple_hypercube_1, 'cube_ref': simple_hypercube_2, 'thru': True, 'scale_list': scale_list } angle_list = np.zeros((simple_hypercube_1.shape[1])) # method_out = eval_method(simple_hypercube_1[:], Analysis.stats.RDI_SDI_4_VIP, angle_list, algo_dict, psf_template=psf_template) dprint((simple_hypercube_1.shape[1], angle_list.shape[0])) method_out = eval_method(simple_hypercube_1[:], Analysis.stats.SDI_RDI_4_VIP, angle_list, algo_dict, psf_template=psf_template) return method_out
def SDI_RDI_DSI_4_VIP(cube, angle_list, verbose, **kwargs): from vip_hci import pca wsamples = np.linspace(tp.band[0], tp.band[1], tp.w_bins) scale_list = tp.band[0]/wsamples # cube = np.mean(cube, axis=1)/ap.exposure_time # SDI_tar = pca.pca(np.mean(cube, axis=1)/ap.exposure_time, angle_list=np.zeros((cube.shape[0])), scale_list=scale_list, # mask_center_px=None) # SDI_ref = pca.pca(np.mean(kwargs['cube_ref'], axis=1)/ap.exposure_time, angle_list=np.zeros((cube.shape[0])), scale_list=scale_list, # mask_center_px=None) cube = np.transpose(cube, (1, 0, 2, 3)) kwargs['cube_ref'] = np.transpose(kwargs['cube_ref'], (1, 0, 2, 3)) SDI_tar = phot.SDI_each_exposure(cube, binning=25) SDI_tar = np.resize(SDI_tar, (SDI_tar.shape[0], 1, SDI_tar.shape[1], SDI_tar.shape[2])) SDI_ref = phot.SDI_each_exposure(kwargs['cube_ref'], binning=25) SDI_ref = np.resize(SDI_ref, (SDI_ref.shape[0], 1, SDI_ref.shape[1], SDI_ref.shape[2])) # quicklook_im(SDI) dprint(SDI_ref.shape) diff_cube= SDI_tar-SDI_ref LCcube = np.transpose(diff_cube, (2, 3, 0, 1)) Lmap = get_Dmap(LCcube, binning=2, plot=False) return Lmap
def get_LmapBB(LCmap, threshold=0, binning=100, plot=False, verb_output=False): dprint(LCmap.shape) # mask = phot.aperture(tp.grid_size/2,tp.grid_size/2,9) # mask = mask==0 #flip the zeros and ones # add_rc = np.ones((mp.array_size[0],mp.array_size[1])) # add row col of ones to mask # add_rc[:-1,:-1] = mask # mask=add_rc # mask = np.transpose(np.resize(mask,(8,ap.numframes,129,129))) # LCmap *= mask Lmap = np.zeros((LCmap.shape[0],LCmap.shape[1])) nexps = LCmap.shape[2] # print nexps intervals = list(range(0,nexps+1,binning)) dprint(intervals) Dmaps = [] for ie in range(nexps/binning): # print ie, # print intervals[ie], intervals[ie + 1] exp = np.sum(LCmap[:,:,intervals[ie]:intervals[ie+1],:], axis=2) lightmask = np.all(exp > threshold, axis=2) # 0#, np.abs(exp)>2)#5e-6 Lmap += lightmask # if plot and ie % 100 == 0: # plt.hist(exp.flatten(), bins=np.linspace(-10,10,100)) # plt.show() # quicklook_im(lightmask, logAmp=True, vmin=1) # loop_frames(np.transpose(exp, (2, 0, 1)), logAmp=True) # , vmin=0.1) # quicklook_im(Lmap, logAmp=True, vmin=1) # quicklook_im(Lmap, logAmp=True, vmin=1) return Lmap
def eval_method(cube, algo, angle_list, algo_dict, psf_template): dprint((cube.shape, len(angle_list), tp.platescale / 1000, psf_template.shape)) # wsamples = np.linspace(tp.band[0], tp.band[1], tp.w_bins) # scale_list = tp.band[0]/wsamples # scale_list = 1./scale_list[::-1] # dprint(scale_list) # fwhms = np.round(lod*scale_list) # dprint((fwhms, type(fwhms))) dprint('lol') fulloutput = phot.contrcurve.contrast_curve( cube=cube, angle_list=angle_list, psf_template=psf_template, fwhm=lod, pxscale=tp.platescale / 1000, starphot=star_phot, algo=algo, # wedge=(60,30), nbranch=1, verbose=True, debug=False, plot=False, theta=theta, full_output=True, fc_snr=5, **algo_dict) plt.show() metrics = [ fulloutput[0]['throughput'], fulloutput[0]['noise'], fulloutput[0]['sensitivity (Student)'], fulloutput[0]['distance'] ] metrics = np.array(metrics) return metrics, fulloutput[3]
def create_hot_pix(mp): amount = mp.hot_pix dprint(amount) bad_ind = random.sample(list(range(mp.array_size[0]*mp.array_size[1])), amount) bad_y = np.int_(np.floor(bad_ind / mp.array_size[1])) bad_x = bad_ind % mp.array_size[1] return [bad_x, bad_y]
def eff_int(simple_hypercube_1, psf_template): dprint('effint') algo_dict = {'full_target_cube': simple_hypercube_1} angle_list = np.zeros((len(simple_hypercube_1))) method_out = eval_method(simple_hypercube_1[:, 0], Analysis.stats.effint_4_VIP, angle_list, algo_dict, psf_template=psf_template) return method_out
def DSI_4_VIP(cube, angle_list, verbose, **kwargs): dprint(cube.shape) LCmap = np.transpose(cube) dprint(LCmap.shape) Lmap = get_Dmap(LCmap, kwargs['thresh']) # quicklook_im(Lmap) normed_Lmap = Lmap#/kwargs['DSI_starphot'] Lmap = np.transpose(Lmap) # quicklook_im(Lmap) return normed_Lmap
def RDI(simple_hypercube_1, simple_hypercube_2, psf_template): dprint('RDI') algo_dict = {'cube_ref': simple_hypercube_2[:, 0]} angle_list = np.zeros((len(simple_hypercube_1))) method_out = eval_method(simple_hypercube_1[:, 0], Analysis.stats.RDI_4_VIP, angle_list, algo_dict, psf_template=psf_template) return method_out
def add_atmos(wfo, f_lens, w, atmos_map, correction=False): # print 'Including Atmospheric Aberations' # rms_error = 5e-9#500.e-9 # RMS wavefront error in meters # c_freq = 5e9 # correlation frequency (cycles/meter) # high_power = 8. # high frewquency falloff (r^-high_power) # print atmos_map, 'here' samp = proper.prop_get_sampling(wfo) * tp.band[0] * 1e-9 / w # samp = 0.125 # from caos ATM GUI: ((r0 sampling [px/r0]) /r0)*0.1 # print samp dprint(atmos_map) try: # rawImageIO.scale_image(atmos_map, 1e6) obj_map = proper.prop_errormap( wfo, atmos_map, MULTIPLY=1.0, WAVEFRONT=True, MAP="obj_map", SAMPLING=tp.samp) # )##FILE='telescope_objtest.fits' # quicklook_im(obj_map, logAmp=False) except IOError: print '*** Using exception hack for name rounding error ***', i = 0 up = True indx = float(atmos_map[-19:-11]) while not os.path.isfile(atmos_map): # print atmos_map[:11], atmos_map[13:] # atmos_map = atmos_map[:-12]+ str(i) + atmos_map[-11:] # print atmos_map # print indx, indx +i, '%1.6f' % (indx +i) atmos_map = atmos_map[:-19] + '%1.6f' % (indx + i) + atmos_map[-11:] print atmos_map if up: i += 1e-6 else: i -= 1e-6 if i >= 50e-6: i = 0 up = 0 elif i <= -50e-6: print 'No file found' exit() # rawImageIO.scale_image(atmos_map, 1e-6) obj_map = proper.prop_errormap(wfo, atmos_map, MULTIPLY=1.0, WAVEFRONT=True, MAP="obj_map", SAMPLING=tp.samp) # quicklook_im(obj_map, logAmp=False) return obj_map
def SDI(simple_hypercube_1, psf_template): dprint('SDI') wsamples = np.linspace(tp.band[0], tp.band[1], tp.w_bins) scale_list = tp.band[0]/wsamples algo_dict = {'full_target_cube': simple_hypercube_1, 'scale_list':scale_list, 'thru':True} simple_hypercube_1 = np.transpose(simple_hypercube_1, (1, 0, 2, 3)) angle_list = np.zeros((simple_hypercube_1.shape[1])) psf_template = np.resize(psf_template, (tp.w_bins, psf_template.shape[0],psf_template.shape[1])) method_out = eval_method(simple_hypercube_1, Analysis.stats.SDI_4_VIP, angle_list, algo_dict, psf_template=psf_template) return method_out
def SDI_each_exposure(hypercube, binning=10): shape = hypercube.shape timecube = np.zeros_like(hypercube[::binning,0]) dprint(timecube.shape) dprint(hypercube.shape) idx = np.arange(0,len(hypercube),binning) for i in range(len(idx)-1): timecube[i] = do_SDI(np.mean(hypercube[idx[i]:idx[i+1]],axis=0), plot=False) # for t in range(shape[0])[:1]: # timecube[t] = do_SDI(hypercube[t], plot=True) # loop_frames(timecube) return timecube
def do_SDI(datacube, plot=False): wsamples = np.linspace(tp.band[0], tp.band[1], tp.w_bins) scale_list = tp.band[0] / wsamples # print scale_list from vip_hci import pca dprint((datacube.shape, scale_list.shape)) fr_pca1 = pca.pca(datacube, angle_list=np.zeros((len(scale_list))), scale_list=scale_list, mask_center_px=None) # fr_pca1 = fr_pca1[:,:-4] if plot: quicklook_im(fr_pca1) # dprint(fr_pca1.shape) return fr_pca1
def SDI_4_VIP(cube, angle_list, verbose, **kwargs): dprint(cube.shape) wsamples = np.linspace(tp.band[0], tp.band[1], tp.w_bins) scale_list = tp.band[0]/wsamples cube = np.mean(cube, axis=1)/ap.exposure_time # SDI = phot.SDI_each_exposure(cube, binning=len(cube))[0] print(len(angle_list)) from vip_hci import pca SDI = pca.pca(cube, angle_list=np.zeros((len(cube))), scale_list=scale_list, mask_center_px=None) # quicklook_im(SDI) return SDI
def RDI_SDI_4_VIP(cube, angle_list, verbose, **kwargs): # if kwargs['thru']: diff_cube = cube - kwargs['cube_ref']#[:, -2] # else: # quicklook_im(np.mean(cube[:,-2], axis=0)) # quicklook_im(np.mean(kwargs['cube_ref'][:,-2], axis=0)) # quicklook_im(np.mean(diff_cube[:,-2], axis=0)) # diff_cube = kwargs['full_target_cube'] - kwargs['cube_ref'] diff_cube = np.transpose(diff_cube, (1, 0, 2, 3)) time_cube = phot.SDI_each_exposure(diff_cube, binning=25) dprint(np.mean(time_cube[:], axis=0).shape) # quicklook_im(np.mean(time_cube[:],axis=0)) return np.mean(time_cube[:],axis=0)
def RDI_DSI(simple_hypercube_1, simple_hypercube_2, psf_template): dprint('RDI_DSI_SDI') # wsamples = np.linspace(tp.band[0], tp.band[1], tp.w_bins) # scale_list = tp.band[0] / wsamples # simple_hypercube_1 = np.transpose(simple_hypercube_1, (1, 0, 2, 3)) # simple_hypercube_2 = np.transpose(simple_hypercube_2, (1, 0, 2, 3)) # psf_template = np.resize(psf_template, (tp.w_bins, psf_template.shape[0], psf_template.shape[1])) algo_dict = {'thresh': 0, 'cube_ref': simple_hypercube_2[:,0]} # angle_list = np.zeros((simple_hypercube_1.shape[1])) angle_list = np.zeros((len(simple_hypercube_1))) method_out = eval_method(simple_hypercube_1[:,0], Analysis.stats.RDI_DSI_4_VIP, angle_list, algo_dict, psf_template=psf_template) return method_out
def assign_spectral_res(plot=False): print('Assigning each pixel a spectral resolution (at 800nm)') dist = Distribution(gaussian(0.5, 0.25, np.linspace(-0.2, 1.2, mp.res_elements)), interpolation=True) dprint(mp.R_mean) Rs = (dist(mp.array_size[0]*mp.array_size[1])[0]/float(mp.res_elements)-0.5)*mp.R_sig + mp.R_mean# if plot: plt.xlabel('R') plt.ylabel('#') plt.hist(Rs) plt.show() Rs = np.reshape(Rs, mp.array_size) # plt.imshow(Rs) # plt.show() return Rs
def RDI_4_VIP(cube, angle_list, verbose, **kwargs): dprint(cube.shape) # if kwargs['thru']: # diff_cube = cube - kwargs['cube_ref'][:,-2] # dprint(np.mean(diff_cube[:],axis=0).shape) # return np.mean(diff_cube[:], axis=0) # else: diff_cube = cube - np.mean(kwargs['cube_ref'], axis=0) dprint(diff_cube.shape) # quicklook_im(np.mean(diff_cube,axis=0), annos=['MKIDs'], title= r' $I_L / I^{*}$', mark_star=True) # dprint(np.mean(diff_cube[:,0],axis=0).shape) return np.mean(diff_cube[:],axis=0)
def star_throughput(): ap.companion = False thru_tot = np.zeros((3, 4)) for p in range(2, 3): # for p in range(0,1): if p == 0: tp.aber_params['CPA'] = False tp.aber_params['NCPA'] = False tp.use_atmos = False tp.use_ao = False if p == 1: tp.aber_params['CPA'] = True tp.aber_params['NCPA'] = False tp.use_atmos = True tp.use_ao = True if p == 2: tp.aber_params['CPA'] = True tp.aber_params['NCPA'] = True tp.use_atmos = True tp.use_ao = True # tp.band[0] = 860 tp.occulter_type = 'None' ref = gpd.run()[0, 0] # ref_phot = phot.aper_phot(ref, 0, lods[0]) ref_tot = np.sum(ref) # # mask = np.int_(phot.aperture(76, 52, 6)) # print ref_phot # datacube = [] # # tp.use_ao = False rad_int = np.zeros((4, 64)) for i in range(1, 4): #range(4): tp.occulter_type = occulter_types[i] # 'Gaussian' occult = gpd.run()[0, 0] # datacube[i]/ref_phot # phot.coron_4_VIP(datacube[i],ref) for r in range(64): print i, r, occult.shape, r + 1 rad_int[i, r] = phot.aper_phot(occult, r * 1, (r + 1) * 1) # plt.plot(rad_int[i]) # plt.show() occult_tot = np.sum(occult) dprint(occult_tot / ref_tot) thru_tot[p, i] = occult_tot / ref_tot # datacube = np.array(datacube) # throughput = np.zeros((3)) return thru_tot
def take_exposure(hypercube): dprint(np.sum(hypercube)) dprint((ap.exposure_time, cp.frame_time)) factor = ap.exposure_time / cp.frame_time num_exp = int(ap.numframes / factor) # print factor, num_exp downsample_cube = np.zeros( (num_exp, hypercube.shape[1], hypercube.shape[2], hypercube.shape[3])) # print ap.numframes, factor, num_exp for i in range(num_exp): # print np.shape(downsample_cube[i]), np.shape(hypercube), np.shape(np.sum(hypercube[i * factor : (i + 1) * factor], axis=0)) downsample_cube[i] = np.sum(hypercube[int(i * factor):int((i + 1) * factor)], axis=0) #/float(factor) return downsample_cube
def effint_4_VIP(cube, angle_list, verbose, **kwargs): dprint(cube.shape) # if kwargs['thru']: # diff_cube = cube print(cube.shape) # quicklook_im(np.mean(cube[:],axis=0), annos=['MKIDs'], title= r' $I_L / I^{*}$', mark_star=True) return np.mean(cube[:], axis=0) dprint(cube.shape) # else: # cube = kwargs['full_target_cube'] # dprint(np.mean(cube[:,0],axis=0).shape) # # quicklook_im(np.mean(cube[:,-2],axis=0), annos=['MKIDs'], title=r' $I_L / I^{*}$', mark_star=True) # return np.mean(cube[:, 0], axis=0) return np.mean(cube[:],axis=0)
def readfield(path, filename): try: data_r, hdr = getdata(path + filename + '_r.fits', header=True) data_i = getdata(path + filename + '_i.fits') except: dprint('FileNotFoundError. Waiting...') import time time.sleep(10) data_r, hdr = getdata(path + filename + '_r.fits', header=True) data_i = getdata(path + filename + '_i.fits') field = np.array(data_r, dtype=complex) field.imag = data_i return (field)
def RDI_SDI_DSI_4_VIP(cube, angle_list, verbose, **kwargs): # if kwargs['thru']: diff_cube = cube - kwargs['cube_ref']#[:, -2] # else: # quicklook_im(np.mean(cube[:,-2], axis=0)) # quicklook_im(np.mean(kwargs['cube_ref'][:,-2], axis=0)) # quicklook_im(np.mean(diff_cube[:,-2], axis=0)) # diff_cube = kwargs['full_target_cube'] - kwargs['cube_ref'] diff_cube = np.transpose(diff_cube, (1, 0, 2, 3)) time_cube = phot.SDI_each_exposure(diff_cube, binning=25) time_cube = np.resize(time_cube, (time_cube.shape[0], 1, time_cube.shape[1], time_cube.shape[2])) # dprint(np.mean(time_cube[:], axis=0).shape) # quicklook_im(np.mean(time_cube[:],axis=0)) dprint(time_cube.shape) LCcube = np.transpose(time_cube, (2, 3, 0, 1)) Lmap = get_Dmap(LCcube, binning=2, plot=False) return Lmap
def get_integ_hypercube(plot=False): import Detector.get_photon_data as gpd print(os.path.isfile(iop.hyperFile), iop.hyperFile) print(ap.numframes) if os.path.isfile(iop.hyperFile): dprint(iop.hyperFile[-3:]) if iop.hyperFile[-3:] == '.h5': # try: hypercube = open_hypercube_hdf5(HyperCubeFile=iop.hyperFile) else: # except: hypercube = open_hypercube(HyperCubeFile=iop.hyperFile) dprint(hypercube.shape) dprint(np.sum(hypercube)) else: # hypercube = gpd.run() hypercube = gpd.take_obs_data() dprint(np.sum(hypercube)) if plot: loop_frames(hypercube[:, 0]) loop_frames(hypercube[0]) print('finished run') print(np.shape(hypercube)) if plot: view_datacube(hypercube[0], logAmp=True) if tp.detector == 'H2RG': hypercube = H2RG.scale_to_luminos(hypercube) if plot: view_datacube(hypercube[0], logAmp=True) # hypercube = take_exposure(hypercube) # if tp.detector == 'H2RG': if tp.detector == 'H2RG' and hp.use_readnoise == True: hypercube = H2RG.add_readnoise(hypercube, hp.readnoise) # if plot: view_datacube(hypercube[0], logAmp=True) if plot: view_datacube(hypercube[0], logAmp=True) # save_hypercube(hypercube, HyperCubeFile=iop.hyperFile) dprint(iop.hyperFile) save_hypercube_hdf5(hypercube, HyperCubeFile=iop.hyperFile) # print np.shape(hypercube) if plot: loop_frames(hypercube[:, 0]) if plot: loop_frames(hypercube[0]) return hypercube
def create_bad_pix(responsivities, plot=False): x = np.arange(mp.array_size[0]) y = np.arange(mp.array_size[1]) amount = int(mp.array_size[0]*mp.array_size[1]*(1.-mp.pix_yield)) bad_ind = random.sample(list(range(mp.array_size[0]*mp.array_size[1])), amount) dprint((len(bad_ind), mp.array_size[0]*mp.array_size[1], mp.pix_yield, amount)) # bad_y = random.sample(y, amount) bad_y = np.int_(np.floor(bad_ind/mp.array_size[1])) bad_x = bad_ind%mp.array_size[1] print(responsivities.shape) responsivities[bad_x, bad_y]=0 if plot: plt.imshow(responsivities) plt.show() return responsivities
def RDSI_4_VIP(cube, angle_list, verbose, **kwargs): dprint(cube.shape) # diff_cube = cube - kwargs['cube_ref'] if kwargs['thru']: diff_cube = cube - kwargs['cube_ref'][:, -2] LCcube = np.transpose(diff_cube,(1,2,0)) else: diff_cube = kwargs['full_target_cube'] - kwargs['cube_ref'] LCcube = np.transpose(diff_cube, (2, 3, 0, 1)) dprint(diff_cube.shape) print(LCcube.shape) # quicklook_im(LCcube[:,:,0]) LCcube = np.resize(LCcube,(LCcube.shape[0],LCcube.shape[1],LCcube.shape[2],1)) print(LCcube.shape) algo_dict = {'thresh': 0} Lmap = get_Dmap(LCcube, algo_dict['thresh'], binning=2) quicklook_im(Lmap, annos=['MKIDs'], title= r' $I_L / I^{*}$', mark_star=True) dprint(LCcube.shape) # Lmap = get_Dmap(LCcube, kwargs['thresh']) # # quicklook_im(Lmap) # normed_Lmap = Lmap#/kwargs['DSI_starphot'] # Lmap = np.transpose(Lmap) # quicklook_im(Lmap) return Lmap
def eval_method(cube, algo, angle_list, algo_dict): dprint(star_phot) fulloutput = phot.contrcurve.contrast_curve(cube=cube, angle_list=angle_list, psf_template=psf_template, fwhm=lod, pxscale=tp.platescale / 1000, starphot=star_phot, algo=algo, wedge=(0, 360), debug=False, plot=False, theta=theta, full_output=True, fc_snr=10, **algo_dict) plt.show() metrics = [ fulloutput[0]['throughput'], fulloutput[0]['noise'], fulloutput[0]['sensitivity (Student)'] ] metrics = np.array(metrics) return metrics, fulloutput[3]
def quick_processing(simple_hypercube_1, simple_hypercube_2, plot=True): diff_cube = simple_hypercube_1 - simple_hypercube_2 if plot: quicklook_im(np.mean(simple_hypercube_1[:,-2], axis=0)) quicklook_im(np.mean(simple_hypercube_2[:,-2], axis=0)) quicklook_im(np.mean(diff_cube[:,-2], axis=0)) Lmaps = np.zeros((diff_cube.shape[1], diff_cube.shape[2], diff_cube.shape[3])) rmaps = np.zeros_like(Lmaps) for iw in range(diff_cube.shape[1]): dprint((diff_cube.shape, iw)) LCcube = np.transpose(diff_cube[:, iw:iw + 1], (2, 3, 0, 1)) rmaps[iw] = Analysis.stats.get_skew(LCcube)#, xinspect = range(233,236), yinspect = range(233,236), inspect = True)#, xinspect = range(40,50), yinspect = range(40,50), inspect = True) # quicklook_im(rmaps[iw], logAmp=True) Lmaps[iw] = Analysis.stats.get_Dmap(LCcube, binning=10, plot=False, threshold=0.01) if plot: loop_frames(rmaps) loop_frames(Lmaps) SDI = Analysis.phot.do_SDI(rmaps*Lmaps) if plot: quicklook_im(SDI) return [np.mean(simple_hypercube_1[:,0], axis=0), np.mean(diff_cube[:,0], axis=0), Lmaps[0], SDI]
def RDI_DSI_4_VIP(cube, angle_list, verbose, **kwargs): from vip_hci import pca diff_cube = cube - kwargs['cube_ref'] # loop_frames(cube[:,1]) # loop_frames(cube[0, :]) # quicklook_im(np.mean(cube[:,-2], axis=0)) # quicklook_im(np.mean(simple_hypercube_2[:,-2], axis=0)) # quicklook_im(np.mean(diff_cube[:,-2], axis=0)) # Lmaps = np.zeros((diff_cube.shape[0], diff_cube.shape[2], diff_cube.shape[3])) # for iw in range(diff_cube.shape[0]): dprint(diff_cube.shape) diff_cube = np.resize(diff_cube, (diff_cube.shape[0],1, diff_cube.shape[1], diff_cube.shape[2])) dprint(diff_cube.shape) LCcube = np.transpose(diff_cube, (2, 3, 0, 1)) dprint(LCcube.shape) Lmap = get_Dmap(LCcube, binning=1, plot=False) # Lmap = get_skew(LCcube) # quicklook_im(Lmap, logAmp=True) # loop_frames(Lmaps) # angle_list = np.zeros((len(Lmaps))) # SDI = phot.do_SDI(Lmaps) # # quicklook_im(SDI) return Lmap
def run_system(empty_lamda, grid_size, PASSVALUE): # 'dm_disp':0 passpara = PASSVALUE['params'] ap.__dict__ = passpara[0].__dict__ tp.__dict__ = passpara[1].__dict__ iop.__dict__ = passpara[2].__dict__ sp.__dict__ = passpara[3].__dict__ # params.ap = passpara[0] # params.tp = passpara[1] # # ap = params.ap # tp = params.tp # print 'line 23', tp.occulter_type # print 'propagating frame:', PASSVALUE['iter'] wsamples = np.linspace(tp.band[0], tp.band[1], tp.nwsamp) / 1e9 # print wsamples datacube = [] # print proper.prop_get_sampling(wfp), proper.prop_get_nyquistsampling(wfp), proper.prop_get_fratio(wfp) # global phase_map, Imaps # Imaps = np.zeros((4,tp.grid_size,tp.grid_size)) # phase_map = np.zeros((tp.grid_size, tp.grid_size)) if ap.companion: wf_array = np.empty((len(wsamples), 1 + len(ap.contrast)), dtype=object) else: wf_array = np.empty((len(wsamples), 1), dtype=object) beam_ratios = np.zeros_like((wsamples)) for iw, w in enumerate(wsamples): # Define the wavefront beam_ratios[iw] = tp.beam_ratio * tp.band[0] / w * 1e-9 wfp = proper.prop_begin(tp.diam, w, tp.grid_size, beam_ratios[iw]) wfs = [wfp] names = ['primary'] if ap.companion: for id in range(len(ap.contrast)): wfc = proper.prop_begin(tp.diam, w, tp.grid_size, beam_ratios[iw]) wfs.append(wfc) names.append('companion_%i' % id) for io, (iwf, wf) in enumerate(zip(names, wfs)): wf_array[iw, io] = wf iter_func(wf_array, proper.prop_circular_aperture, **{'radius':tp.diam/2}) if tp.use_atmos: tdm.add_atmos(wf_array, *(tp.f_lens, w, PASSVALUE['atmos_map'])) # quicklook_wf(wf_array[0, 0]) wf_array = tdm.abs_zeros(wf_array) # get_intensity(wf_array, sp, phase=True) if tp.rot_rate: iter_func(wf_array, tdm.rotate_atmos, *(PASSVALUE['atmos_map'])) if tp.use_spiders: iter_func(wf_array, tdm.add_spiders, tp.diam) wf_array = tdm.abs_zeros(wf_array) if sp.get_ints: get_intensity(wf_array, sp, phase=True) # tdm.add_spiders(wf, tp.diam) wf_array = tdm.abs_zeros(wf_array) # if tp.use_hex: # tdm.add_hex(wf) iter_func(wf_array,proper.prop_define_entrance) # normalizes the intensity if wf_array.shape[1] >=1: tdm.offset_companion(wf_array[:,1:], PASSVALUE['atmos_map'], ) # tdm.offset_companion(wf_array, PASSVALUE['atmos_map']) # if tp.use_apod: # tdm.do_apod(wf, tp.grid_size, tp.beam_ratio, tp.apod_gaus) # # iter_func(wf_array, proper.prop_propagate, tp.f_lens) if tp.aber_params['CPA']: tdm.add_aber(wf_array, tp.f_lens, tp.aber_params, tp.aber_vals, PASSVALUE['iter'], Loc='CPA') iter_func(wf_array, proper.prop_circular_aperture, **{'radius': tp.diam / 2}) iter_func(wf_array, tdm.add_spiders, tp.diam, legs=False) wf_array = tdm.abs_zeros(wf_array) if sp.get_ints: get_intensity(wf_array, sp, phase=True) # iter_func(wf_array, proper.prop_propagate, tp.f_lens) # quicklook_wf(wf_array[0,0]) # iter_func(wf_array, proper.prop_circular_aperture, **{'radius': tp.diam / 2}) # iter_func(wf_array, tdm.add_spiders, tp.diam, legs=False) # # proper.prop_rectangular_obscuration(wf_array[0,0], 0.05 * 8, 8 * 1.3, ROTATION=20) # # proper.prop_rectangular_obscuration(wf_array[0,0], 8 * 1.3, 0.05 * 8, ROTATION=20) # quicklook_wf(wf_array[0, 0]) if tp.quick_ao: r0 = float(PASSVALUE['atmos_map'][-10:-5]) tdm.flat_outside(wf_array) CPA_maps = tdm.quick_wfs(wf_array[:,0], PASSVALUE['iter'], r0=r0) # , obj_map, tp.wfs_scale) if tp.use_ao: tdm.quick_ao(wf_array, iwf, tp.f_lens, beam_ratios, PASSVALUE['iter'], CPA_maps) # iter_func(wf_array, proper.prop_circular_aperture, **{'radius': tp.diam / 2}) # iter_func(wf_array, tdm.add_spiders, tp.diam, legs=False) wf_array = tdm.abs_zeros(wf_array) if sp.get_ints: get_intensity(wf_array, sp, phase=True) # dprint('quick_ao') else: print('This need to be updated to the parrallel implementation') exit() # if tp.use_ao: # tdm.adaptive_optics(wf, iwf, iw, tp.f_lens, beam_ratio, PASSVALUE['iter']) # # if iwf == 'primary': # and PASSVALUE['iter'] == 0: # # quicklook_wf(wf, show=True) # r0 = float(PASSVALUE['atmos_map'][-10:-5]) # # dprint((r0, 'r0')) # # if iw == np.ceil(tp.nwsamp/2): # tdm.wfs_measurement(wf, PASSVALUE['iter'], iw, r0=r0) # , obj_map, tp.wfs_scale) # # iter_func(wf_array, proper.prop_propagate, tp.f_lens) # quicklook_wf(wf_array[0,0]) # rawImageIO.save_wf(wf, iop.datadir+'/loopAO_8act.pkl') # if iwf == 'primary': # quicklook_wf(wf, show=True) # if tp.active_modulate: # tdm.modulate(wf, w, PASSVALUE['iter']) # if iwf == 'primary': # quicklook_wf(wf, show=True) if tp.aber_params['NCPA']: tdm.add_aber(wf_array, tp.f_lens, tp.aber_params, tp.aber_vals, PASSVALUE['iter'], Loc='NCPA') iter_func(wf_array, proper.prop_circular_aperture, **{'radius': tp.diam / 2}) iter_func(wf_array, tdm.add_spiders, tp.diam, legs=False) wf_array = tdm.abs_zeros(wf_array) if sp.get_ints: get_intensity(wf_array, sp, phase=True) if tp.use_zern_ab: iter_func(wf_array, tdm.add_zern_ab, tp.f_lens) # # if iwf == 'primary': # # NCPA_phasemap = proper.prop_get_phase(wf) # # quicklook_im(NCPA_phasemap, logAmp=False, show=False, colormap="jet", vmin=-3.14, vmax=3.14) # # if iwf == 'primary': # # global obj_map # # r0 = float(PASSVALUE['atmos_map'][-10:-5]) # # obj_map = tdm.wfs_measurement(wf, r0 = r0)#, obj_map, tp.wfs_scale) # # # quicklook_im(obj_map, logAmp=False) # # iter_func(wf_array, proper.prop_propagate, 2*tp.f_lens) # # spiders are introduced here for now since the phase unwrapping seems to ignore them and hence so does the DM # Check out http://scikit-image.org/docs/dev/auto_examples/filters/plot_phase_unwrap.html for masking argument # if tp.use_spiders: # iter_func(wf_array, tdm.add_spiders, tp.diam) # # tdm.prop_mid_optics(wf, tp.f_lens) if tp.use_apod: from coronagraph import apodization iter_func(wf_array, apodization, True) iter_func(wf_array, tdm.prop_mid_optics, tp.f_lens) # # # if iwf == 'primary': # # if PASSVALUE['iter']>ap.numframes-2 or PASSVALUE['iter']==0: # # quicklook_wf(wf, show=True) # dprint((proper.prop_get_sampling(wf_array[0,0]), proper.prop_get_sampling_arcsec(wf_array[0,0]), 'here')) # if tp.satelite_speck and iwf == 'primary': # tdm.add_speckles(wf) # # # tp.variable = proper.prop_get_phase(wfo)[20,20] # # print 'speck phase', tp.variable # # # import cPickle as pickle # # dprint('just saved') # # with open(iop.phase_ideal, 'wb') as handle: # # pickle.dump(proper.prop_get_phase(wf), handle, protocol=pickle.HIGHEST_PROTOCOL) # # if tp.active_null and iwf == 'primary': # FPWFS.active_null(wf, PASSVALUE['iter'], w) # # if tp.speckle_kill and iwf == 'primary': # # tdm.speckle_killer(wf) # # tdm.speck_kill(wf) # # # iwf == 'primary': # # parent_bright = aper_phot(proper.prop_get_amplitude(wf),0,8) # # # # if iwf == 'primary' and iop.saveIQ: # # save_pix_IQ(wf) # # complex_map = proper.prop_shift_center(wf.wfarr) # # complex_pix = complex_map[64, 64] # # print complex_pix # # if np.real(complex_pix) < 0.2: # # quicklook_IQ(wf) # # # # if iwf == 'primary': # # # print np.sum(proper.prop_get_amplitude(wf)), 'before', aper_phot(proper.prop_get_amplitude(wf),0,4) # # quicklook_wf(wf, show=True, logAmp=True) # # if iwf == 'primary': # # quicklook_wf(wf, show=True) # # # if tp.active_modulate and PASSVALUE['iter'] >=8: # # coronagraph(wf, tp.f_lens, tp.occulter_type, tp.occult_loc, tp.diam) # # if not tp.active_modulate: if sp.get_ints: get_intensity(wf_array, sp, phase=False) # quicklook_wf(wf_array[0, 0], show=True) iter_func(wf_array, coronagraph, *(tp.f_lens, tp.occulter_type, tp.occult_loc, tp.diam)) # if 'None' not in tp.occulter_type: # kludge for now until more sophisticated coronagraph has been installed # for iw in range(len(wf_array)): # wf_array[iw,0].wfarr *= 0.1 if sp.get_ints: get_intensity(wf_array, sp, phase=False) # dprint(wf_array.shape) # quicklook_wf(wf_array[0, 0], show=True) # quicklook_wf(wf_array[0, 1], show=True) # quicklook_wf(wf_array[1, 0], show=True) dprint(proper.prop_get_sampling_arcsec(wf_array[0,0])) # dprint(proper.prop_get_sampling_arcsec(wf_array[0,1])) # # exit() # # tp.occult_factor = aper_phot(proper.prop_get_amplitude(wf),0,8)/parent_bright # # if PASSVALUE['iter'] % 10 == 0: # # with open(iop.logfile, 'a') as the_file: # # the_file.write('\n', tp.occult_factor) # # # quicklook_wf(wf, show=True) # if tp.occulter_type != 'None' and iwf == 'primary': # kludge for now until more sophisticated coronapraph has been installed # wf.wfarr *= 0.1 # # # print np.sum(proper.prop_get_amplitude(wf)), 'after', aper_phot(proper.prop_get_amplitude(wf), 0, 4) # # quicklook_wf(wf, show=True) # # print proper.prop_get_sampling(wfp), proper.prop_get_sampling_arcsec(wfp), 'here' # # if iwf == 'primary': # # quicklook_wf(wf, show=True) # if tp.use_zern_ab: # tdm.add_zern_ab(wf, tp.f_lens) # shape = wf_array.shape # comp_scaling = 10*np.arange(1,shape[0]+1)/shape[0] # dprint(comp_scaling) for iw in range(shape[0]): wframes = np.zeros((tp.grid_size, tp.grid_size)) for io in range(shape[1]): (wframe, sampling) = proper.prop_end(wf_array[iw,io]) # dprint((np.sum(wframe), 'sum')) # wframe = proper.prop_get_amplitude(wf) # planet = np.roll(np.roll(wframe, 20, 1), 20, 0) * 0.1 # [92,92] # if ap.companion: # from scipy.ndimage.interpolation import shift # companion = shift(wframe, shift= np.array(ap.comp_loc[::-1])- np.array([tp.grid_size/2,tp.grid_size/2])) * ap.contrast # # planet = np.roll(wframe, 15, 0) * 0.1 # [92,92] # # wframe = (wframe + companion) # quicklook_im(wframe, logAmp=True) # '''test conserve=True on prop_magnify!''' # wframe = proper.prop_magnify(wframe, (w*1e9)/tp.band[0]) # wframe = tdm.scale_wframe(wframe, w, iwf) # print np.shape(wframe) # quicklook_im(wframe, logAmp=True) # quicklook_im(wframe[57:201,59:199]) # mid = int(len(wframe)/2) # wframe = wframe[mid - tp.grid_size/2 : mid +tp.grid_size/2, mid - tp.grid_size/2 : mid +tp.grid_size/2] # if max(mp.array_size) < tp.grid_size: # # Photons seeded outside the array cannot have pixel phase uncertainty applied to them. Instead make both grids match in size # wframe = rawImageIO.resize_image(wframe, newsize=(max(mp.array_size),max(mp.array_size))) # dprint(np.sum(wframe)) # dprint(iwf) # if iwf == 'companion_0': # if io > 0: # wframe *= comp_scaling[iw] wframes += wframe # if sp.show_wframe: # quicklook_im(wframes, logAmp=True, show=True) datacube.append(wframes) datacube = np.array(datacube) # if tp.pix_shift: # datacube = np.roll(np.roll(datacube, tp.pix_shift[0], 1), tp.pix_shift[1], 2) datacube = np.abs(datacube) # #normalize # dprint(np.sum(datacube, axis=(1, 2))) # dprint((tp.interp_sample , tp.nwsamp>1 , tp.nwsamp<tp.w_bins)) if tp.interp_sample and tp.nwsamp>1 and tp.nwsamp<tp.w_bins: # view_datacube(datacube, logAmp=True) wave_samps = np.linspace(0, 1, tp.nwsamp) f_out = interp1d(wave_samps, datacube, axis=0) new_heights = np.linspace(0, 1, tp.w_bins) datacube = f_out(new_heights) # dprint(datacube.shape) # view_datacube(datacube, logAmp=True) # datacube = np.transpose(np.transpose(datacube) / np.sum(datacube, axis=(1, 2)))/float(tp.nwsamp) # print 'Some pixels have negative values, possibly because of some Gaussian uncertainy you introduced. Taking abs for now.' # view_datacube(datacube) # # End # print type(wfo[0,0]), type(wfo) # # proper.prop_savestate(wfo) # # else: # # wfo = proper.prop_state(wfo) return (datacube, sampling)