def calculate_xspec_file(cube1_file, cube2_file, bins, weight1_file=None, weight2_file=None, truncate=False, window="blackman", return_3d=False, unitless=True): cube1 = algebra.make_vect(algebra.load(cube1_file)) cube2 = algebra.make_vect(algebra.load(cube2_file)) if weight1_file is None: weight1 = algebra.ones_like(cube1) else: weight1 = algebra.make_vect(algebra.load(weight1_file)) if weight2_file is None: weight2 = algebra.ones_like(cube2) else: weight2 = algebra.make_vect(algebra.load(weight2_file)) print cube1.shape, cube2.shape, weight1.shape, weight2.shape return calculate_xspec(cube1, cube2, weight1, weight2, bins=bins, window=window, unitless=unitless, truncate=truncate, return_3d=return_3d)
def timestream2map_GBT(vis_one, vis_mask, time, ra, dec, map_tmp, n_poly = 1, interpolation = 'linear'): vis_mask = (vis_mask.copy()).astype('bool') vis_one = np.array(vis_one) vis_one[vis_mask] = 0. cov_inv_block = np.zeros(map_tmp.shape * 2) polys = ortho_poly(time, n_poly, ~vis_mask, 0) amps = np.sum(polys * vis_one[None, :], -1) vis_fit = np.sum(amps[:, None] * polys, 0) vis_one -= vis_fit _good = ( ra < max(map_tmp.get_axis('ra') )) _good *= ( ra > min(map_tmp.get_axis('ra') )) _good *= ( dec < max(map_tmp.get_axis('dec'))) _good *= ( dec > min(map_tmp.get_axis('dec'))) _good *= ~vis_mask if np.sum(_good) < 5: print 'bad block < 5' return al.zeros_like(map_tmp), cov_inv_block ra = ra[_good] dec = dec[_good] vis_one = vis_one[_good] vis_mask = vis_mask[_good] time = time[_good] P = Pointing(('ra', 'dec'), (ra, dec), map_tmp, interpolation) _vars = sp.sum(vis_one ** 2.) _cont = sp.sum(~vis_mask) if _cont != 0: _vars /= _cont else: _vars = T_infinity ** 2. if _vars < T_small ** 2: print "vars too small" _vars = T_small ** 2 #thermal_noise = np.var(vis_one) thermal_noise = _vars vis_one = al.make_vect(vis_one[None, :], axis_names=['freq', 'time']) N = Noise(vis_one, time) N.add_thermal(thermal_noise) if n_poly == 1: N.deweight_time_mean(T_huge ** 2.) elif n_poly == 2: N.deweight_time_slope(T_huge ** 2.) N.finalize(frequency_correlations=False, preserve_matrices=False) vis_weighted = N.weight_time_stream(vis_one) dirty_map = P.apply_to_time_axis(vis_weighted)[0,...] P.noise_channel_to_map(N, 0, cov_inv_block) return dirty_map, cov_inv_block
def init_task_list(self): ''' init task list [A1, A2, A3, ... An] A1 x A1 A2 x A2 ... An x An ''' if self.mode_list is None: self.mode_list = self.params['mode_list'] with h5.File(self.input_files[0], 'r') as f: map_tmp = al.make_vect(al.load_h5(f, 'clean_map')) self.map_info = map_tmp.info task_list = [] for ii in range(self.input_files_num): input_file_name_ii = self.input_files[ii].split('/')[-1] input_file_name_ii = input_file_name_ii.replace('.h5', '') input_file_name_jj = input_file_name_ii if self.params['svd_key'] is not None: input_file_name_ii = self.params['svd_key'][0] input_file_name_jj = self.params['svd_key'][1] tind_l = (ii, ) tind_r = (ii, ) tind_o = [input_file_name_ii, input_file_name_jj] task_list.append([tind_l, tind_r, tind_o]) for kk in self.mode_list: self.create_dataset(ii, 'cleaned_%02dmode/' % kk + input_file_name_ii, dset_shp=map_tmp.shape, dset_info=map_tmp.info) if input_file_name_jj != input_file_name_ii: self.create_dataset(ii, 'cleaned_%02dmode/' % kk + input_file_name_jj, dset_shp=map_tmp.shape, dset_info=map_tmp.info) #print 'cleaned_%02dmode/Combined'%kk self.create_dataset(ii, 'cleaned_%02dmode/Combined' % kk, dset_shp=map_tmp.shape, dset_info=map_tmp.info) self.create_dataset(ii, 'weight', dset_shp=map_tmp.shape, dset_info=map_tmp.info) self.create_dataset(ii, 'mask', dset_shp=map_tmp.shape[:1]) self.df_out[ii]['mode_list'] = self.mode_list self.task_list = task_list self.dset_shp = map_tmp.shape
def theory_power_spectrum(map_tmp, bin_centers, unitless=True, cross=False): r"""simple caller to output a power spectrum""" with h5.File(map_tmp) as hf: zspace_cube = algebra.make_vect(algebra.load_h5(hf, 'clean_map')) simobj = Corr21cm.like_kiyo_map(zspace_cube) pwrspec_input = simobj.get_pwrspec(bin_centers, cross) if unitless: pwrspec_input *= bin_centers**3. / 2. / np.pi / np.pi return pwrspec_input
def load_2dtr_from3d(ps_path, ps_name, ps_ref, kbin_x_edges=None, kbin_y_edges=None): with h5.File(ps_path + ps_ref, 'r') as f: ps3d_ref = al.make_vect(al.load_h5(f, 'ps3d')) with h5.File(ps_path + ps_name, 'r') as f: ps3d = al.make_vect(al.load_h5(f, 'ps3d')) #ps3d[np.abs(ps3d)<1.e-20] = np.inf #ps3d = ps3d * ps3d_ref.copy() if kbin_x_edges is None: x = f['kbin_x_edges'][:] else: x = kbin_x_edges if kbin_y_edges is None: y = f['kbin_y_edges'][:] else: y = kbin_y_edges # find the k_perp by not including k_nu in the distance k_perp = binning.radius_array(ps3d, zero_axes=[0]) # find the k_perp by not including k_RA,Dec in the distance k_para = binning.radius_array(ps3d, zero_axes=[1, 2]) ps2d = binning.bin_an_array_2d(ps3d, k_perp, k_para, x, y)[1] ps2d_ref = binning.bin_an_array_2d(ps3d_ref, k_perp, k_para, x, y)[1] ps2d_ref[ps2d_ref==0] = np.inf ps2d /= ps2d_ref ps2d = ps2d ** 0.5 ps2d[ps2d==0] = np.inf #ps2d = np.ma.masked_equal(ps2d, 0) ps2d = 1./ps2d return ps2d, x, y
def load_maps_npy(dm_path, dm_file): #with h5.File(dm_path+dm_file, 'r') as f: #print f.keys() imap = al.make_vect(al.load(dm_path + dm_file)) freq = imap.get_axis('freq') #print freq[1] - freq[0] #print freq[0], freq[-1] ra = imap.get_axis('ra') dec = imap.get_axis('dec') ra_edges = imap.get_axis_edges('ra') dec_edges = imap.get_axis_edges('dec') #print imap.get_axis('freq') return imap, ra, dec, ra_edges, dec_edges, freq
def setup(self): params = self.params self.n_ra, self.n_dec = params['map_shape'] self.map_shp = (self.n_ra, self.n_dec) self.spacing = params['pixel_spacing'] self.dec_spacing = self.spacing # Negative sign because RA increases from right to left. self.ra_spacing = -self.spacing/sp.cos(params['field_centre'][1]*sp.pi/180.) axis_names = ('ra', 'dec') map_tmp = np.zeros(self.map_shp, dtype=__dtype__) map_tmp = al.make_vect(map_tmp, axis_names=axis_names) map_tmp.set_axis_info('ra', params['field_centre'][0], self.ra_spacing) map_tmp.set_axis_info('dec', params['field_centre'][1], self.dec_spacing) self.map_tmp = map_tmp
def init_task_list(self): with h5.File(self.input_files[0], 'r') as f: map_tmp = al.make_vect(al.load_h5(f, 'delta')) self.map_info = map_tmp.info xps_num = self.input_files_num task_list = [] for ii in range(xps_num): tind_l = (ii, ) tind_r = (ii,) tind_o = (ii,) task_list.append([tind_l, tind_r, tind_o]) self.task_list = task_list self.dset_shp = (xps_num, )
def read_input(self): for input_file in self.input_files: print input_file self.open(input_file) self.map_tmp = al.make_vect(al.load_h5(self.df_in[0], 'dirty_map')) self.map_shp = self.map_tmp.shape for output_file in self.output_files: output_file = output_path(output_file, relative=not output_file.startswith('/')) self.allocate_output(output_file, 'w') self.create_dataset_like(-1, 'clean_map', self.map_tmp) self.create_dataset_like(-1, 'noise_diag', self.map_tmp) return 1
def make_optical_sim(self): if self.params['selection'] is None: print 'optical sim need selection function, pass' return else: with h5py.File(self.params['selection'], 'r') as f: _sel = al.load_h5(f, 'separable') _axis_names = _sel.info['axes'] _sel = al.make_vect(_sel, axis_names=_axis_names) _sel_ra = _sel.get_axis('ra') _sel_dec = _sel.get_axis('dec') _sel = np.ma.masked_equal(_sel, 0) # the simulated cube may have different shape than the # original shape of selection function, we 2d interpolate # to the correct shape _sel_mean = np.ma.mean(_sel, axis=0) _sel_freq = np.ma.sum(_sel, axis=(1, 2)) _sel_freq /= np.ma.sum(_sel_freq) #_cut = np.percentile(_sel_mean[~_sel_mean.mask], 60) #_sel_mean[_sel_mean>_cut] = _cut _sel_2dintp = interp2d(_sel_dec, _sel_ra, _sel_mean, bounds_error=False, fill_value=0) _ra = self.map_tmp.get_axis('ra') _dec = self.map_tmp.get_axis('dec') _ra_s = np.argsort(_ra) _sel = _sel_2dintp(_dec, _ra[_ra_s])[_ra_s, ...] #_sel = _sel * al.ones_like(self.map_tmp) _sel = _sel * _sel_freq[:, None, None] if not hasattr(self, 'sim_map_delta'): self.make_delta_sim() poisson_vect = np.vectorize(np.random.poisson) mean_num_gal = (self.sim_map_delta + 1.) * _sel self.sim_map_optsim = poisson_vect(mean_num_gal) self.sim_map_optsim = mean_num_gal _sel[_sel == 0] = np.inf self.sim_map_optsim = self.sim_map_optsim / _sel - 1. _sel[_sel == np.inf] = 0. self.sel = _sel
def init_task_list(self): with h5.File(self.input_files[0], 'r') as f: map_tmp = al.make_vect(al.load_h5(f, 'clean_map')) ant_n, pol_n = map_tmp.shape[:2] self.map_info = map_tmp.info task_list = [] for ii in range(self.input_files_num): for jj in range(ant_n): for kk in range(pol_n): tind_l = (ii, jj, kk) tind_r = tind_l tind_o = tind_l task_list.append([tind_l, tind_r, tind_o]) self.task_list = task_list self.dset_shp = (self.input_files_num, ant_n, pol_n)
def test_with_random(unitless=True): """Test the power spectral estimator using a random noise cube""" delta = 1.33333 cube1 = algebra.make_vect(np.random.normal(0, 1, size=(257, 124, 68))) info = { 'axes': ["freq", "ra", "dec"], 'type': 'vect', 'freq_delta': delta / 3.78, 'freq_centre': 0., 'ra_delta': delta / 1.63, 'ra_centre': 0., 'dec_delta': delta, 'dec_centre': 0. } cube1.info = info cube2 = copy.deepcopy(cube1) weight1 = algebra.ones_like(cube1) weight2 = algebra.ones_like(cube2) bin_left, bin_center, bin_right, counts_histo, binavg = \ calculate_xspec(cube1, cube2, weight1, weight2, window="blackman", truncate=False, nbins=40, unitless=unitless, logbins=True) if unitless: pwrspec_input = bin_center**3. / 2. / math.pi / math.pi else: pwrspec_input = np.ones_like(bin_center) volume = 1. for axis_name in cube1.axes: axis_vector = cube1.get_axis(axis_name) volume *= abs(axis_vector[1] - axis_vector[0]) pwrspec_input *= volume for specdata in zip(bin_left, bin_center, bin_right, counts_histo, binavg, pwrspec_input): print("%10.15g " * 6) % specdata
def init_task_list(self): with h5.File(self.input_files[0], 'r') as f: map_tmp = al.make_vect(al.load_h5(f, self.params['map_key'][0])) ant_n, pol_n = map_tmp.shape[:2] self.map_info = map_tmp.info xps_num = self.input_files_num / 2 task_list = [] for ii in range(xps_num): #for jj in range(ant_n): # for kk in range(pol_n): tind_l = (ii, ) tind_r = (ii + xps_num, ) tind_o = tind_l task_list.append([tind_l, tind_r, tind_o]) self.task_list = task_list self.dset_shp = (xps_num, )
def read_input(self): for input_file in self.input_files: if mpiutil.rank0: logger.info('%s' % input_file) self.open(input_file) map_tmp = al.load_h5(self.df_in[0], 'dirty_map') self.map_tmp = al.make_vect(map_tmp, axis_names=map_tmp.info['axes']) self.map_shp = self.map_tmp.shape for output_file in self.output_files: output_file = output_path(output_file, relative=not output_file.startswith('/')) self.allocate_output(output_file, 'w') self.create_dataset_like(-1, 'clean_map', self.map_tmp) self.create_dataset_like(-1, 'noise_diag', self.map_tmp) self.create_dataset_like(-1, 'dirty_map', self.map_tmp) return 1
def load_maps(dm_path, dm_file, name='clean_map'): with h5.File(dm_path + dm_file, 'r') as f: print f.keys() imap = al.load_h5(f, name) imap = al.make_vect(imap, axis_names=imap.info['axes']) #imap = al.make_vect(al.load_h5(f, name)) freq = imap.get_axis('freq') #print freq[1] - freq[0] #print freq[0], freq[-1] ra = imap.get_axis('ra') dec = imap.get_axis('dec') ra_edges = imap.get_axis_edges('ra') dec_edges = imap.get_axis_edges('dec') #print imap.get_axis('freq') try: mask = f['mask'][:] except KeyError: mask = None return imap, ra, dec, ra_edges, dec_edges, freq, mask
def setup(self): super(SurveySimToMap, self).setup() params = self.params freq = params['freq'] self.n_freq = freq.shape[0] self.freq_spacing = freq[1] - freq[0] self.n_ra, self.n_dec = params['map_shape'] self.map_shp = (self.n_freq, self.n_ra, self.n_dec) self.spacing = params['pixel_spacing'] self.dec_spacing = self.spacing self.ra_spacing = self.spacing / sp.cos( params['field_centre'][1] * sp.pi / 180.) axis_names = ('freq', 'ra', 'dec') map_tmp = np.zeros(self.map_shp, dtype=__dtype__) map_tmp = al.make_vect(map_tmp, axis_names=axis_names) map_tmp.set_axis_info('freq', freq[self.n_freq // 2], self.freq_spacing) map_tmp.set_axis_info('ra', params['field_centre'][0], self.ra_spacing) map_tmp.set_axis_info('dec', params['field_centre'][1], self.dec_spacing) self.map_tmp = map_tmp mock_n = self.mock_n #for ii in range(mock_n): for ii in self.HI_mock_ids: output_file = 'sim_mock%03d_%s_%s_%s_%s.h5' % ( ii, self.params['prefix'], self.params['survey_mode'], self.params['HI_scenario'], self.params['HI_model_type']) output_file = output_path(output_file, relative=True) self.allocate_output(output_file, 'w') self.create_dataset_like(-1, 'dirty_map', map_tmp) self.create_dataset_like(-1, 'clean_map', map_tmp) self.create_dataset_like(-1, 'count_map', map_tmp)
def iterpstasks(self, input): refinement = self.params['refinement'] task_list = self.task_list for task_ind in mpiutil.mpirange(len(task_list)): tind_l, tind_r, tind_o = task_list[task_ind] tind_l = tuple(tind_l) tind_r = tuple(tind_r) tind_o = tuple(tind_o) msg = ("RANK %03d est. ps.(" + "%03d,"*len(tind_l) + ") x ("\ + "%03d,"*len(tind_r) + ")")%((mpiutil.rank, ) + tind_l + tind_r) logger.info(msg) cube = [] cube_w = [] tind_list = [tind_l, tind_r] for i in range(2): tind = tind_list[i] map_key = self.params['map_key'][i] input_map = input[tind[0]][map_key][tind[1:] + (slice(None), )] input_map_mask = ~np.isfinite(input_map) if (map_key is not 'delta') and (len(self.params['freq_mask']) != 0): # ignore freqency mask for optical data logger.info('apply freq_mask') input_map_mask[self.params['freq_mask'], ...] = True #input_map_mask += input_map == 0. input_map[input_map_mask] = 0. if self.params['prewhite']: input_map_mask = input_map == 0. _mean = np.ma.mean(np.ma.masked_equal(input_map, 0), axis=(1, 2)) input_map -= _mean[:, None, None] input_map[input_map_mask] = 0. input_map = al.make_vect(input_map, axis_names = ['freq', 'ra', 'dec']) for key in input_map.info['axes']: input_map.set_axis_info(key, self.map_info[key+'_centre'], self.map_info[key+'_delta']) weight_key = self.params['weight_key'][i] if weight_key is not None: weight = input[tind[0]][weight_key][tind[1:] + (slice(None), )] weight[input_map_mask] = 0. if weight_key == 'noise_diag': weight = fgrm.make_noise_factorizable(weight) if weight_key == 'separable': logger.debug('apply FKP weight') weight = weight / (1. + weight * self.params['FKP']) weight = al.make_vect(weight, axis_names = ['freq', 'ra', 'dec']) weight.info = input_map.info if not self.params['cube_input'][i]: c, c_info = physical_grid(input_map, refinement=refinement,order=0) else: logger.debug('cube input') c = input_map c_info = None cube.append(c) if weight_key is not None: if not self.params['cube_input'][i]: cw, cw_info = physical_grid(weight, refinement=refinement,order=0) else: cw = weight cw_info = None #cw[c==0] = 0. cube_w.append(cw) del weight else: cw = al.ones_like(c) cw[c==0] = 0. cube_w.append(cw) del c, c_info, cw, input_map if tind_l == tind_r: cube.append(cube[0]) cube_w.append(cube_w[0]) break yield tind_o, cube, cube_w
def setup(self): self.refinement = self.params['refinement'] self.scenario = self.params['scenario'] map_pad = self.params['map_pad'] if self.params['map_tmp'] is None: freq = self.params['freq'] * 1.e6 freq_d = freq[1] - freq[0] freq_n = freq.shape[0] freq_c = freq[freq_n // 2] field_centre = self.params['field_centre'] spacing = self.params['pixel_spacing'] dec_spacing = spacing ra_spacing = -spacing / np.cos(field_centre[1] * np.pi / 180.) axis_names = ['freq', 'ra', 'dec'] map_shp = [x + map_pad for x in self.params['map_shape']] map_tmp = np.zeros([ freq_n, ] + map_shp) map_tmp = al.make_vect(map_tmp, axis_names=axis_names) map_tmp.set_axis_info('freq', freq_c, freq_d) map_tmp.set_axis_info('ra', field_centre[0], ra_spacing) map_tmp.set_axis_info('dec', field_centre[1], dec_spacing) self.map_tmp = map_tmp else: pad_shp = ((0, 0), (map_pad, map_pad), (map_pad, map_pad)) with h5py.File(self.params['map_tmp'], 'r') as f: _map_tmp = al.load_h5(f, self.params['map_tmp_key']) _axis_names = _map_tmp.info['axes'] _info = _map_tmp.info _map_tmp = np.pad(_map_tmp, pad_shp, 'constant') _map_tmp = al.make_vect(_map_tmp, axis_names=_axis_names) _map_tmp.info.update(_info) _weight = al.load_h5(f, self.params['map_tmp_weight']) _weight = np.pad(_weight, pad_shp, 'constant') _weight = al.make_vect(_weight, axis_names=_axis_names) #self.map_tmp = al.zeros_like(_map_tmp) self.map_tmp = _map_tmp self.weight = _weight # here we use 300 h km/s from WiggleZ for streaming dispersion self.streaming_dispersion = 300. * 0.72 self.map_pad = map_pad #self.beam_data = np.array([1., 1., 1.]) #self.beam_freq = np.array([900, 1100, 1400]) #* 1.e6 if self.params['beam_file'] is not None: _bd = np.loadtxt(self.params['beam_file']) self.beam_freq = _bd[:, 0] * 1.e6 self.beam_data = _bd[:, 1] else: fwhm1400 = 0.9 self.beam_freq = np.linspace(800., 1600., 500).astype('float') self.beam_data = 1.2 * fwhm1400 * 1400. / self.beam_freq self.beam_freq *= 1.e6 random.seed(3936650408) seeds = random.random_integers(100000000, 1000000000, mpiutil.size) self.seed = seeds[mpiutil.rank] print "RANK: %02d with random seed [%d]" % (mpiutil.rank, self.seed) random.seed(self.seed) self.outfiles = self.params['outfiles'] self.outfiles_split = self.params['outfiles_split'] self.open_outputfiles() self.iter_list = mpiutil.mpirange(self.params['mock_n']) self.iter = 0 self.iter_num = len(self.iter_list)
def realize_simulation(self): """do basic handling to call Richard's simulation code this produces self.sim_map and self.sim_map_phys """ if self.scenario == "nostr": print "running dd+vv and no streaming case" #simobj = corr21cm.Corr21cm.like_kiyo_map(self.map_tmp) simobj = self.corr.like_kiyo_map(self.map_tmp) maps = simobj.get_kiyo_field_physical(refinement=self.refinement) else: if self.scenario == "str": print "running dd+vv and streaming simulation" #simobj = corr21cm.Corr21cm.like_kiyo_map(self.map_tmp, simobj = self.corr.like_kiyo_map( self.map_tmp, sigma_v=self.streaming_dispersion) maps = simobj.get_kiyo_field_physical( refinement=self.refinement) if self.scenario == "ideal": print "running dd-only and no mean simulation" #simobj = corr21cm.Corr21cm.like_kiyo_map(self.map_tmp) simobj = self.corr.like_kiyo_map(self.map_tmp) maps = simobj.get_kiyo_field_physical( refinement=self.refinement, density_only=True, no_mean=True, no_evolution=True) self.simobj = simobj self.kk_input = np.logspace(-2, 0, 200) self.pk_input = simobj.get_pwrspec(self.kk_input) (gbtsim, gbtphys, physdim) = maps # process the physical-space map self.sim_map_phys = al.make_vect(gbtphys, axis_names=('freq', 'ra', 'dec')) pshp = self.sim_map_phys.shape # define the axes of the physical map; several alternatives are commented info = {} info['axes'] = ('freq', 'ra', 'dec') info['type'] = 'vect' info['freq_delta'] = abs(physdim[0] - physdim[1]) / float(pshp[0] - 1) info['freq_centre'] = physdim[0] + info['freq_delta'] * float( pshp[0] // 2) # 'freq_centre': abs(physdim[0] + physdim[1]) / 2., info['ra_delta'] = abs(physdim[2]) / float(pshp[1] - 1) #info['ra_centre'] = info['ra_delta'] * float(pshp[1] // 2) # 'ra_centre': abs(physdim[2]) / 2., info['ra_centre'] = 0. info['dec_delta'] = abs(physdim[3]) / float(pshp[2] - 1) #info['dec_centre'] = info['dec_delta'] * float(pshp[2] // 2) # 'dec_centre': abs(physdim[3]) / 2., info['dec_centre'] = 0. self.sim_map_phys.info = info # process the map in observation coordinates self.sim_map = al.make_vect(gbtsim, axis_names=('freq', 'ra', 'dec')) self.sim_map.copy_axis_info(self.map_tmp) self.sim_map_raw = self.sim_map
def process(self, input): task_list = self.task_list for task_ind in mpiutil.mpirange(len(task_list)): tind_l, tind_r, tind_o = task_list[task_ind] tind_l = tuple(tind_l) tind_r = tuple(tind_r) tind_o = tind_o print ("RANK %03d fgrm.\n(" + "%03d,"*len(tind_l) + ") x ("\ + "%03d,"*len(tind_r) + ")\n")%((mpiutil.rank, ) + tind_l + tind_r) tind_list = [tind_l, tind_r] maps = [] weights = [] freq_good = np.ones(self.dset_shp[0]).astype('bool') if len(self.params['freq_mask']) != 0: freq_good[self.params['freq_mask']] = False for i in range(2): tind = tind_list[i] map_key = self.params['map_key'] #'clean_map' input_map = al.load_h5(input[tind[0]], map_key) input_map = al.make_vect(input_map, axis_names=['freq', 'ra', 'dec']) maps.append(input_map) weight_key = self.params['weight_key'] #'noise_diag' if weight_key is not None: weight = al.load_h5(input[tind[0]], weight_key) if weight_key is 'noise_diag': weight_prior = self.params['weight_prior'] logger.info('using wp %e' % weight_prior) weight = make_noise_factorizable(weight, weight_prior) else: weight = np.ones_like(input_map) weight[input_map == 0] = 0. weight = al.make_vect(weight, axis_names=['freq', 'ra', 'dec']) weight.info = input_map.info try: freq_good *= ~(input[tind[0]]['mask'][:]).astype('bool') except KeyError: logger.info('mask doesn\' exist') pass weights.append(weight) maps[0][~freq_good] = 0. maps[1][~freq_good] = 0. weights[0][~freq_good] = 0. weights[1][~freq_good] = 0. if self.params['conv_factor'] != 0: maps, weights = degrade_resolution( maps, weights, conv_factor=self.params['conv_factor'], mode='constant', beam_file=self.params['beam_file'], fwhm1400=self.params['fwhm1400']) else: logger.info('common reso. conv. ignored') if self.params['add_map'] is not None: _maps = self.params['add_map'] _map_A_path, _map_A_name = os.path.split( os.path.splitext(_maps[0])[0]) _map_B_path, _map_B_name = os.path.split( os.path.splitext(_maps[1])[0]) logger.info('add real map pair (%s %s)' % (_map_A_name, _map_B_name)) with h5.File(os.path.join(_map_A_path, _map_A_name + '.h5'), 'r') as f: maps[0][:] += al.load_h5(f, 'cleaned_00mode/%s' % _map_B_name) with h5.File(os.path.join(_map_B_path, _map_B_name + '.h5'), 'r') as f: maps[1][:] += al.load_h5(f, 'cleaned_00mode/%s' % _map_A_name) svd_info = self.svd_info if svd_info is None: freq_cov, counts = find_modes.freq_covariance( maps[0], maps[1], weights[0], weights[1], freq_good, freq_good) svd_info = find_modes.get_freq_svd_modes( freq_cov, np.sum(freq_good)) mode_list = self.mode_list mode_list_ed = copy.deepcopy(mode_list) mode_list_st = copy.deepcopy(mode_list) mode_list_st[1:] = mode_list_st[:-1] dset_key = tind_o[0] + '_sigvalu' self.df_out[tind_l[0]][dset_key] = svd_info[0] dset_key = tind_o[0] + '_sigvect' self.df_out[tind_l[0]][dset_key] = svd_info[1] self.df_out[tind_l[0]]['weight'][:] = weights[0] self.df_out[tind_l[0]]['mask'][:] = (~freq_good).astype('int') if tind_o[1] != tind_o[0]: dset_key = tind_o[1] + '_sigvalu' self.df_out[tind_r[0]][dset_key] = svd_info[0] dset_key = tind_o[1] + '_sigvect' self.df_out[tind_r[0]][dset_key] = svd_info[2] self.df_out[tind_r[0]]['weight'][:] = weights[1] self.df_out[tind_r[0]]['mask'][:] = (~freq_good).astype('int') for (n_modes_st, n_modes_ed) in zip(mode_list_st, mode_list_ed): svd_modes = svd_info[1][n_modes_st:n_modes_ed] group_name = 'cleaned_%02dmode/' % n_modes_ed maps[0], amp = find_modes.subtract_frequency_modes( maps[0], svd_modes, weights[0], freq_good) dset_key = group_name + tind_o[0] self.df_out[tind_l[0]][dset_key][:] = copy.deepcopy(maps[0]) if tind_o[0] != tind_o[1]: svd_modes = svd_info[2][n_modes_st:n_modes_ed] maps[1], amp = find_modes.subtract_frequency_modes( maps[1], svd_modes, weights[1], freq_good) dset_key = group_name + tind_o[1] self.df_out[tind_r[0]][dset_key][:] = copy.deepcopy( maps[1]) # for the case of auto with different svd svd modes if 'Combined' in self.df_out[tind_r[0]][group_name].keys(): dset_key = group_name + 'Combined' _map = maps[0].copy() * weights[0].copy()\ + maps[1].copy() * weights[1].copy() _wet = weights[0].copy() + weights[1].copy() _wet[_wet == 0] = np.inf _map /= _wet self.df_out[tind_r[0]][dset_key][:] = copy.deepcopy(_map) if self.params['output_combined'] is not None: self.combine_results() for ii in range(self.input_files_num): input[ii].close()
def init_ps_datasets(self, ts): ts.main_data_name = self.params['data_sets'] n_time, n_freq, n_pol, n_bl = ts.main_data.shape tblock_len = self.params['tblock_len'] freq = ts['freq'] freq_c = freq[n_freq//2] freq_d = freq[1] - freq[0] field_centre = self.params['field_centre'] self.pol = ts['pol'][:] self.bl = ts['blorder'][:] # for now, we assume no frequency corr, and thermal noise only. ra_spacing = self.ra_spacing dec_spacing = self.dec_spacing axis_names = ('bl', 'pol', 'freq', 'ra', 'dec') dirty_map_tmp = np.zeros((n_bl, n_pol, n_freq) + self.map_shp) dirty_map_tmp = al.make_vect(dirty_map_tmp, axis_names=axis_names) dirty_map_tmp.set_axis_info('bl', np.arange(n_bl)[n_bl//2], 1) dirty_map_tmp.set_axis_info('pol', np.arange(n_pol)[n_pol//2], 1) dirty_map_tmp.set_axis_info('freq', freq_c, freq_d) dirty_map_tmp.set_axis_info('ra', field_centre[0], self.ra_spacing) dirty_map_tmp.set_axis_info('dec', field_centre[1], self.dec_spacing) self.map_axis_names = axis_names #self.dirty_map = dirty_map_tmp self.create_dataset_like('dirty_map', dirty_map_tmp) self.create_dataset_like('clean_map', dirty_map_tmp) self.create_dataset_like('noise_diag', dirty_map_tmp) self.df['mask'] = np.zeros([n_bl, n_pol, n_freq]) #self.mask = np.zeros([n_bl, n_pol, n_freq]) if self.params['diag_cov']: axis_names = ('bl', 'pol', 'freq', 'ra', 'dec') cov_tmp = np.zeros((n_bl, n_pol, n_freq) + self.map_shp) else: axis_names = ('bl', 'pol', 'freq', 'ra', 'dec', 'ra', 'dec') cov_tmp = np.zeros((n_bl, n_pol, n_freq) + self.map_shp + self.map_shp) cov_tmp = al.make_vect(cov_tmp, axis_names=axis_names) cov_tmp.set_axis_info('bl', np.arange(n_bl)[n_bl//2], 1) cov_tmp.set_axis_info('pol', np.arange(n_pol)[n_pol//2], 1) cov_tmp.set_axis_info('freq', freq_c, freq_d) cov_tmp.set_axis_info('ra', field_centre[0], self.ra_spacing) cov_tmp.set_axis_info('dec', field_centre[1], self.dec_spacing) #self.cov = cov_tmp self.create_dataset_like('cov_inv', cov_tmp) self.df['pol'] = self.pol self.df['bl'] = self.bl #func = ts.freq_pol_and_bl_data_operate func = ts.freq_data_operate return func
def physical_grid_lf(input_array, refinement=1, pad=2, order=0, feedback=1, mode='constant'): r"""Project from freq, ra, dec into physical coordinates Parameters ---------- input_array: np.ndarray The freq, ra, dec map Returns ------- cube: np.ndarray The cube projected back into physical coordinates """ if not hasattr(pad, '__iter__'): pad = [pad, pad, pad] pad = np.array(pad) freq_axis = input_array.get_axis('freq') #/ 1.e6 ra_axis = input_array.get_axis('ra') dec_axis = input_array.get_axis('dec') freq_axis = np.pad(freq_axis, 1, mode='edge') ra_axis = np.pad(ra_axis, 1, mode='edge') dec_axis = np.pad(dec_axis, 1, mode='edge') freq_axis[0] -= input_array.info['freq_delta'] freq_axis[-1] += input_array.info['freq_delta'] ra_axis[0] -= input_array.info['ra_delta'] ra_axis[-1] += input_array.info['ra_delta'] dec_axis[0] -= input_array.info['dec_delta'] dec_axis[-1] += input_array.info['dec_delta'] input_array = np.pad(input_array, 1, mode='constant') _dec, _ra = np.meshgrid(dec_axis, ra_axis) _ra, _dec = centering_to_fieldcenter(_ra, _dec) # convert the freq, ra and dec axis to physical distance z_axis = __nu21__ / freq_axis - 1.0 d_axis = (cosmology.comoving_transverse_distance(z_axis) * cosmology.h).value c_axis = (cosmology.comoving_distance(z_axis) * cosmology.h).value d_axis = d_axis[:, None, None] c_axis = c_axis[:, None, None] _ra = _ra[None, :, :] _dec = _dec[None, :, :] xx = d_axis * np.cos(np.deg2rad(_ra)) * np.cos(np.deg2rad(_dec)) yy = d_axis * np.sin(np.deg2rad(_ra)) * np.cos(np.deg2rad(_dec)) zz = c_axis * np.sin(np.deg2rad(_dec)) xx = xx.flatten()[:, None] yy = yy.flatten()[:, None] zz = zz.flatten()[:, None] dd = input_array.flatten()[:, None] coord = np.concatenate([xx, yy, zz], axis=1) #input_array_f = NearestNDInterpolator(coord, input_array.flatten()) #input_array_f = Rbf(xx, yy, zz, input_array.flatten()[:, None], function='linear') (numz, numx, numy) = input_array.shape c1, c2 = zz.min(), zz.max() c_center = 0.5 * (c1 + c2) phys_dim = np.array([c2 - c1, xx.max() - xx.min(), yy.max() - yy.min()]) n = np.array([numz, numx, numy]) # Enlarge cube size by `pad` in each dimension, so raytraced cube # sits exactly within the gridded points. phys_dim = phys_dim * (n + pad).astype(float) / n.astype(float) c1 = c_center - (c_center - c1) * (n[0] + pad[0]) / float(n[0]) c2 = c_center + (c2 - c_center) * (n[0] + pad[0]) / float(n[0]) n = n + pad # now multiply by scaling for a finer sub-grid n = (refinement * n).astype('int') if feedback > 0: msg = "converting from obs. to physical coord\n"\ "refinement=%s, pad=(%s, %s, %s)\n "\ "(%d, %d, %d)->(%f to %f) x %f x %f\n "\ "(%d, %d, %d) (h^-1 cMpc)^3\n" % \ ((refinement, ) + tuple(pad) + ( numz, numx, numy, c1, c2, phys_dim[1], phys_dim[2], n[0], n[1], n[2])) msg += "dx = %f, dy = %f, dz = %f" % ( abs(phys_dim[1]) / float(n[1] - 1), abs(phys_dim[2]) / float(n[2] - 1), abs(c2 - c1) / float(n[0] - 1)) logger.debug(msg) print msg # this is wasteful in memory, but numpy can be pickled phys_map = algebra.make_vect(np.zeros(n), axis_names=('freq', 'ra', 'dec')) # TODO: should this be more sophisticated? N-1 or N? info = {} info['axes'] = ('freq', 'ra', 'dec') info['type'] = 'vect' info['freq_delta'] = abs(c2 - c1) / float(n[0] - 1) info['freq_centre'] = c1 + info['freq_delta'] * float(n[0] // 2) info['ra_delta'] = abs(phys_dim[1]) / float(n[1] - 1) info['ra_centre'] = 0.5 * (xx.max() + xx.min()) info['dec_delta'] = abs(phys_dim[2]) / float(n[2] - 1) info['dec_centre'] = 0.5 * (yy.max() + yy.min()) phys_map.info = info # same as np.linspace(c1, c2, n[0], endpoint=True) radius_axis = phys_map.get_axis("freq") x_axis = phys_map.get_axis("ra") y_axis = phys_map.get_axis("dec") _yy, _xx = np.meshgrid(y_axis, x_axis) #dd_f = NearestNDInterpolator(coord, dd) _pp = 0 for i in range(radius_axis.shape[0]): if int(10 * i / float(radius_axis.shape[0])) > _pp: print '.', _pp = int(10 * i / float(radius_axis.shape[0])) #print '%3d '%i, _zz = radius_axis[i] * np.ones(_yy.shape) _sel = zz[:, 0] < radius_axis[i] + 1 * info['freq_delta'] _sel *= zz[:, 0] > radius_axis[i] - 1 * info['freq_delta'] if np.any(_sel): #dd_f = Rbf(xx[_sel], yy[_sel], zz[_sel], dd[_sel], function='linear') dd_f = NearestNDInterpolator(coord[_sel], dd[_sel]) phys_map[i] = dd_f(_xx, _yy, _zz)[:, :, 0] #phys_map[i] = dd_f(_xx, _yy, _zz)[:, :, 0] print '. Done' #phys_map_npy = algebra.make_vect(phys_map_npy, axis_names=('freq', 'ra', 'dec')) #phys_map_npy.info = info return phys_map, info
def physical_grid(input_array, refinement=1, pad=2, order=0, feedback=1, mode='constant'): r"""Project from freq, ra, dec into physical coordinates Parameters ---------- input_array: np.ndarray The freq, ra, dec map Returns ------- cube: np.ndarray The cube projected back into physical coordinates """ if not hasattr(pad, '__iter__'): pad = [pad, pad, pad] pad = np.array(pad) freq_axis = input_array.get_axis('freq') #/ 1.e6 ra_axis = input_array.get_axis('ra') dec_axis = input_array.get_axis('dec') nu_lower, nu_upper = freq_axis.min(), freq_axis.max() ra_fact = sp.cos(sp.pi * input_array.info['dec_centre'] / 180.0) thetax, thetay = np.ptp(ra_axis), np.ptp(dec_axis) thetax *= ra_fact (numz, numx, numy) = input_array.shape z1 = __nu21__ / nu_upper - 1.0 z2 = __nu21__ / nu_lower - 1.0 d1 = (cosmology.comoving_transverse_distance(z1) * cosmology.h).value d2 = (cosmology.comoving_transverse_distance(z2) * cosmology.h).value c1 = (cosmology.comoving_distance(z1) * cosmology.h).value c2 = (cosmology.comoving_distance(z2) * cosmology.h).value c1 = np.sqrt(c1**2. - (((0.5 * thetax * u.deg).to(u.rad)).value * d1)**2) c_center = (c1 + c2) / 2. # Make cube pixelisation finer, such that angular cube will # have sufficient resolution on the closest face. phys_dim = np.array([ c2 - c1, ((thetax * u.deg).to(u.rad)).value * d2, ((thetay * u.deg).to(u.rad)).value * d2 ]) # Note that the ratio of deltas in Ra, Dec in degrees may # be different than the Ra, Dec in physical coordinates due to # rounding onto this grid #n = np.array([numz, int(d2 / d1 * numx), int(d2 / d1 * numy)]) n = np.array([numz, numx, numy]) # Enlarge cube size by `pad` in each dimension, so raytraced cube # sits exactly within the gridded points. phys_dim = phys_dim * (n + pad).astype(float) / n.astype(float) c1 = c_center - (c_center - c1) * (n[0] + pad[0]) / float(n[0]) c2 = c_center + (c2 - c_center) * (n[0] + pad[0]) / float(n[0]) n = n + pad # now multiply by scaling for a finer sub-grid n = (refinement * n).astype('int') if feedback > 0: msg = "converting from obs. to physical coord\n"\ "refinement=%s, pad=(%s, %s, %s)\n "\ "(%d, %d, %d)->(%f to %f) x %f x %f\n "\ "(%d, %d, %d) (h^-1 cMpc)^3\n" % \ ((refinement, ) + tuple(pad) + ( numz, numx, numy, c1, c2, phys_dim[1], phys_dim[2], n[0], n[1], n[2])) msg += "dx = %f, dy = %f, dz = %f" % ( abs(phys_dim[1]) / float(n[1] - 1), abs(phys_dim[2]) / float(n[2] - 1), abs(c2 - c1) / float(n[0] - 1)) logger.debug(msg) print msg # this is wasteful in memory, but numpy can be pickled phys_map_npy = np.zeros(n) phys_map = algebra.make_vect(phys_map_npy, axis_names=('freq', 'ra', 'dec')) #mask = np.ones_like(phys_map) mask = np.ones_like(phys_map_npy) # TODO: should this be more sophisticated? N-1 or N? info = {} info['axes'] = ('freq', 'ra', 'dec') info['type'] = 'vect' #info = {'freq_delta': abs(phys_dim[0])/float(n[0]), # 'freq_centre': abs(c2+c1)/2., info['freq_delta'] = abs(c2 - c1) / float(n[0] - 1) info['freq_centre'] = c1 + info['freq_delta'] * float(n[0] // 2) info['ra_delta'] = abs(phys_dim[1]) / float(n[1] - 1) #info['ra_centre'] = info['ra_delta'] * float(n[1] // 2) info['ra_centre'] = 0. info['dec_delta'] = abs(phys_dim[2]) / float(n[2] - 1) #info['dec_centre'] = info['dec_delta'] * float(n[2] // 2) info['dec_centre'] = 0. phys_map.info = info #print info # same as np.linspace(c1, c2, n[0], endpoint=True) radius_axis = phys_map.get_axis("freq") x_axis = phys_map.get_axis("ra") y_axis = phys_map.get_axis("dec") # Construct an array of the redshifts on each slice of the cube. #comoving_inv = cosmo.inverse_approx(cosmology.comoving_distance, z1 * 0.9, z2 * 1.1) #za = comoving_inv(radius_axis) # redshifts on the constant-D spacing _xp = np.linspace(z1 * 0.9, z2 * 1.1, 500) _fp = (cosmology.comoving_distance(_xp) * cosmology.h).value #comoving_inv = interp1d(_fp, _xp) #za = comoving_inv(radius_axis) # redshifts on the constant-D spacing za = np.interp(radius_axis, _fp, _xp) nua = __nu21__ / (1. + za) gridy, gridx = np.meshgrid(y_axis, x_axis) interpol_grid = np.zeros((3, n[1], n[2])) for i in range(n[0]): # nua[0] = nu_upper, nua[1] = nu_lower #print nua[i], freq_axis[0], freq_axis[-1], (nua[i] - freq_axis[0]) / \ # (freq_axis[-1] - freq_axis[0]) * numz #_radius_axis = np.sqrt(radius_axis[i]**2 + gridy**2 + gridx**2) #_radius_axis = np.sqrt(radius_axis[i]**2 + gridx**2) #za = np.interp(_radius_axis, _fp, _xp) #nua = __nu21__ / (1. + za) interpol_grid[0, :, :] = (nua[i] - freq_axis[0]) / \ (freq_axis[-1] - freq_axis[0]) * numz proper_z = cosmology.comoving_transverse_distance(za[i]) * cosmology.h proper_z = proper_z.value angscale = ((proper_z * u.deg).to(u.rad)).value interpol_grid[1, :, :] = gridx / angscale / thetax * numx + numx / 2 interpol_grid[2, :, :] = gridy / angscale / thetay * numy + numy / 2 phys_map_npy[i, :, :] = sp.ndimage.map_coordinates(input_array, interpol_grid, order=order, mode=mode) interpol_grid[1, :, :] = np.logical_or(interpol_grid[1, :, :] >= numx, interpol_grid[1, :, :] < 0) interpol_grid[2, :, :] = np.logical_or(interpol_grid[2, :, :] >= numy, interpol_grid[2, :, :] < 0) mask = np.logical_not( np.logical_or(interpol_grid[1, :, :], interpol_grid[2, :, :])) phys_map_npy *= mask phys_map_npy = algebra.make_vect(phys_map_npy, axis_names=('freq', 'ra', 'dec')) phys_map_npy.info = info return phys_map_npy, info
def cross_power_est_highmem(arr1, arr2, weight1, weight2, window="blackman", nonorm=False): """Calculate the cross-power spectrum of a two nD fields. The arrays must be identical and have the same length (physically and in pixel number) along each axis. Same goal as above without the emphasis on saving memory. This is the "tried and true" legacy function. """ if window: window_function = fftutil.window_nd(arr1.shape, name=window) weight1 *= window_function weight2 *= window_function warr1 = arr1 * weight1 warr2 = arr2 * weight2 ndim = arr1.ndim fft_arr1 = np.fft.fftshift(np.fft.fftn(warr1)) fft_arr2 = np.fft.fftshift(np.fft.fftn(warr2)) xspec = fft_arr1 * fft_arr2.conj() xspec = xspec.real # correct for the weighting product_weight = weight1 * weight2 xspec /= np.sum(product_weight) # make the axes k_axes = tuple(["k_" + axis_name for axis_name in arr1.axes]) xspec_arr = algebra.make_vect(xspec, axis_names=k_axes) info = {'axes': k_axes, 'type': 'vect'} width = np.zeros(ndim) for axis_index in range(ndim): n_axis = arr1.shape[axis_index] axis_name = arr1.axes[axis_index] axis_vector = arr1.get_axis(axis_name) delta_axis = abs(axis_vector[1] - axis_vector[0]) width[axis_index] = delta_axis k_axis = np.fft.fftshift(np.fft.fftfreq(n_axis, d=delta_axis)) k_axis *= 2. * math.pi delta_k_axis = abs(k_axis[1] - k_axis[0]) k_name = k_axes[axis_index] info[k_name + "_delta"] = delta_k_axis info[k_name + "_centre"] = 0. #print k_axis #print k_name, n_axis, delta_axis xspec_arr.info = info #print xspec_arr.get_axis("k_dec") if not nonorm: xspec_arr *= width.prod() return xspec_arr
def setup(self): ant_file = self.params['ant_file'] #ant_dat = np.genfromtxt(ant_file, # dtype=[('name', 'S4'), ('X', 'f8'), ('Y', 'f8'), ('Z', 'f8')]) ant_dat = pd.read_fwf(ant_file, header=None, names=['name', 'X', 'Y', 'Z', 'px', 'py']) self.ants = np.array(ant_dat['name'], dtype='str') ants_pos = [ np.array(ant_dat['X'])[:, None], np.array(ant_dat['Y'])[:, None], np.array(ant_dat['Z'])[:, None] ] self.ants_pos = np.concatenate(ants_pos, axis=1) freq = self.params['freq'] dfreq = freq[1] - freq[0] freq_n = freq.shape[0] self.SM = globals()[self.params['survey_mode']]( self.params['schedule_file']) #self.SM.generate_altaz(startalt, startaz, starttime, obs_len, obs_speed, obs_int) self.SM.generate_altaz() self.SM.radec_list([ant_dat['px'], ant_dat['py']]) #starttime = Time(self.params['starttime']) #startalt, startaz = self.params['startpointing'] #startalt *= u.deg #startaz *= u.deg #obs_speed = self.params['obs_speed'] obs_int = self.SM.obs_int #self.params['obs_int'] self.obs_int = obs_int samplerate = ((1. / obs_int).to(u.Hz)).value #obs_tot = self.SM.obs_tot # self.params['obs_tot'] #obs_len = int((obs_tot / obs_int).decompose().value) #self.block_time = self.SM.sche['block_time'] #self.params['block_time'] self.block_time = np.array(self.SM.sche['block_time']) #self.block_len = int((block_time / obs_int).decompose().value) block_num = self.block_time.shape[0] _obs_int = (obs_int.to(u.second)).value self._RMS = self.params['T_rec'] / np.sqrt(_obs_int * dfreq * 1.e6) if self.params['fg_syn_model'] is not None: self.syn_model = hp.read_map(self.params['fg_syn_model'], range(freq.shape[0])) self.syn_model = self.syn_model.T if self.params['HI_model'] is not None: with h5py.File(self.params['HI_model'], 'r') as fhi: #_HI_model = al.make_vect( _HI_model = al.load_h5(fhi, self.params['HI_model_type']) logger.info('HI bias %3.2f' % self.params['HI_bias']) _HI_model *= self.params['HI_bias'] if self.params['HI_mock_ids'] is not None: self.HI_mock_ids = list(self.params['HI_mock_ids']) _HI_model = _HI_model[self.HI_mock_ids] else: self.HI_mock_ids = range(_HI_model.shape[0]) self.mock_n = _HI_model.shape[0] self.HI_model = al.make_vect(_HI_model) else: self.mock_n = self.params['mock_n'] if self.params['fnoise']: self.FN = fnoise.FNoise(dtime=obs_int.value, dfreq=dfreq, alpha=self.params['alpha'], f0=self.params['f0'], beta=self.params['beta']) self.get_blorder() #self.iter_list = mpiutil.mpirange(0, obs_len, self.block_len) self.iter_list = mpiutil.mpirange(0, block_num) self.iter = 0 self.iter_num = len(self.iter_list)
def show_map(map_path, map_type, indx=(), figsize=(10, 4), xlim=None, ylim=None, logscale=False, vmin=None, vmax=None, sigma=2., inv=False, mK=True, title='', c_label=None, factorize=False, nvss_path=None, smoothing=False, opt=False, print_info=False, submean=False): ext = os.path.splitext(map_path)[-1] if ext == '.h5': with h5.File(map_path, 'r') as f: keys = tuple(f.keys()) imap = al.load_h5(f, map_type) if print_info: logger.info(('%s ' * len(keys)) % keys) print imap.info try: mask = f['mask'][:].astype('bool') except KeyError: mask = None elif ext == '.npy': imap = al.load(map_path) mask = None else: raise IOError('%s not exists' % map_path) imap = al.make_vect(imap, axis_names=imap.info['axes']) freq = imap.get_axis('freq') ra = imap.get_axis('ra') dec = imap.get_axis('dec') ra_edges = imap.get_axis_edges('ra') dec_edges = imap.get_axis_edges('dec') if map_type == 'noise_diag' and factorize: imap = fgrm.make_noise_factorizable(imap) #imap[np.abs(imap) < imap.max() * 1.e-4] = 0. imap = np.ma.masked_equal(imap, 0) imap = np.ma.masked_invalid(imap) if mask is not None: imap[mask] = np.ma.masked imap = imap[indx] freq = freq[indx[-1]] if isinstance(indx[-1], slice): freq = (freq[0], freq[-1]) #print imap.shape imap = np.ma.mean(imap, axis=0) else: freq = (freq, ) if not opt: if mK: if map_type == 'noise_diag': imap = imap * 1.e6 unit = r'$[\rm mK]^2$' else: imap = imap * 1.e3 unit = r'$[\rm mK]$' else: if map_type == 'noise_diag': unit = r'$[\rm K]^2$' else: unit = r'$[\rm K]$' else: unit = r'$\delta N$' if c_label is None: c_label = unit if inv: imap[imap == 0] = np.inf imap = 1. / imap if xlim is None: xlim = [ra_edges.min(), ra_edges.max()] if ylim is None: ylim = [dec_edges.min(), dec_edges.max()] #imap -= np.ma.mean(imap) if smoothing: _sig = 3. / (8. * np.log(2.))**0.5 / 1. imap = gf(imap, _sig) if submean: imap -= np.ma.mean(imap) if logscale: imap = np.ma.masked_less(imap, 0) if vmin is None: vmin = np.ma.min(imap) if vmax is None: vmax = np.ma.max(imap) norm = mpl.colors.LogNorm(vmin=vmin, vmax=vmax) else: if sigma is not None: if vmin is None: vmin = np.ma.mean(imap) - sigma * np.ma.std(imap) if vmax is None: vmax = np.ma.mean(imap) + sigma * np.ma.std(imap) else: if vmin is None: vmin = np.ma.min(imap) if vmax is None: vmax = np.ma.max(imap) #if vmax is None: vmax = np.ma.median(imap) norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax) fig = plt.figure(figsize=figsize) l = 0.08 * 10. / figsize[0] b = 0.08 * 4. / figsize[1] w = 1 - 0.20 * 10. / figsize[0] h = 1 - 0.10 * 4. / figsize[1] ax = fig.add_axes([l, b, w, h]) l = 1 - 0.11 * 10. / figsize[0] b = 0.20 * 4 / figsize[1] w = 1 - 0.10 * 10 / figsize[0] - l h = 1 - 0.34 * 4 / figsize[1] cax = fig.add_axes([l, b, w, h]) ax.set_aspect('equal') #imap = np.sum(imap, axis=1) #imap = np.array(imap) cm = ax.pcolormesh(ra_edges, dec_edges, imap.T, norm=norm) if len(freq) == 1: ax.set_title(title + r'${\rm Frequency}\, %7.3f\,{\rm MHz}$' % freq) else: ax.set_title( title + r'${\rm Frequency}\, %7.3f\,{\rm MHz}$ - $%7.3f\,{\rm MHz}$' % freq) ax.set_xlim(xlim) ax.set_ylim(ylim) ax.set_xlabel(r'${\rm RA}\,[^\circ]$') ax.set_ylabel(r'${\rm Dec}\,[^\circ]$') nvss_range = [ [ra_edges.min(), ra_edges.max(), dec_edges.min(), dec_edges.max()], ] if nvss_path is not None: nvss_cat = plot_waterfall.get_nvss_radec(nvss_path, nvss_range) nvss_sel = nvss_cat['FLUX_20_CM'] > 10. nvss_ra = nvss_cat['RA'][nvss_sel] nvss_dec = nvss_cat['DEC'][nvss_sel] ax.plot(nvss_ra, nvss_dec, 'ko', mec='k', mfc='none', ms=8, mew=1.5) _sel = nvss_cat['FLUX_20_CM'] > 100. _id = nvss_cat['NAME'][_sel] _ra = nvss_cat['RA'][_sel] _dec = nvss_cat['DEC'][_sel] _flx = nvss_cat['FLUX_20_CM'][_sel] for i in range(np.sum(_sel)): ra_idx = np.digitize(_ra[i], ra_edges) - 1 dec_idx = np.digitize(_dec[i], dec_edges) - 1 ax.plot(ra[ra_idx], dec[dec_idx], 'wx', ms=10, mew=2) _c = SkyCoord(_ra[i] * u.deg, _dec[i] * u.deg) print '%s [RA,Dec]:'%_id[i] \ + '[%7.4fd'%_c.ra.deg \ + '(%dh%dm%6.4f) '%_c.ra.hms\ + ': %7.4fd], FLUX %7.4f Jy'%(_c.dec.deg, _flx[i]/1000.) if not logscale: ticks = list(np.linspace(vmin, vmax, 5)) ticks_label = [] for x in ticks: ticks_label.append(r"$%5.2f$" % x) fig.colorbar(cm, ax=ax, cax=cax, ticks=ticks) cax.set_yticklabels(ticks_label) else: fig.colorbar(cm, ax=ax, cax=cax) cax.minorticks_off() if c_label is None: c_label = r'$T\,$' + unit cax.set_ylabel(c_label) return xlim, ylim, (vmin, vmax), fig
def cross_power_est(arr1, arr2, weight1, weight2, window="blackman", nonorm=True): """Calculate the cross-power spectrum of a two nD fields. The arrays must be identical and have the same length (physically and in pixel number) along each axis. inputs are clobbered to save memory """ info, k_axes, width = make_k_axes(arr1) #weight1[weight2==0] = 0. #weight2[weight1==0] = 0. if window: # along all axes window_function = fftutil.window_nd(arr1.shape, name=window) #window_function_0 = fftutil.window_nd(arr1.shape[:1], name='hamming') #window_function_1 = fftutil.window_nd(arr1.shape[1:], name=window) #window_function = window_function_0[:, None, None] * window_function_1[None, :, :] # apodize along frequency only #window_func = getattr(np, window) #window_function = window_func(arr1.shape[0]) #window_function = window_function[:, None, None] weight1 = weight1 * window_function weight2 = weight2 * window_function del window_function msk1 = arr1 == 0 msk2 = arr2 == 0 arr1 = arr1 * weight1 arr2 = arr2 * weight2 arr1 = arr1 - np.mean(arr1, axis=(1, 2))[:, None, None] arr2 = arr2 - np.mean(arr2, axis=(1, 2))[:, None, None] arr1[msk1] = 0. arr2[msk2] = 0. # correct for the weighting fisher_diagonal = np.sum(weight1 * weight2) fft_arr1 = np.fft.fftshift(np.fft.fftn(arr1)) fft_arr2 = np.fft.fftshift(np.fft.fftn(arr2)) fft_arr1 *= fft_arr2.conj() xspec = fft_arr1.real del fft_arr1, fft_arr2 gc.collect() xspec /= fisher_diagonal # make the axes xspec_arr = algebra.make_vect(xspec, axis_names=k_axes) xspec_arr.info = info #print xspec_arr.get_axis("k_dec") #print np.median(xspec_arr) if not nonorm: xspec_arr *= width.prod() else: logger.debug('width prod %f' % width.prod()) #print np.median(xspec_arr) return xspec_arr