def get_nlkk_single(estimator, modlmap, tnoise, enoise, bnoise, tmask, pmask, pols=['TT', 'TE', 'EE', 'EB', 'TB']): import symlens as s feed_dict = get_feed(lcltt, lclte, lclee, lclbb, tnoise, enoise, bnoise) alpha_estimator = estimator beta_estimator = estimator npols = len(pols) masks = {'T': tmask, 'E': pmask, 'B': pmask} bin_edges = np.arange(40, 2500, 40) binner = stats.bin2D(modlmap, bin_edges) cents = binner.centers nls = np.zeros((npols, npols, cents.size)) Als = [] for i in range(npols): a, b = pols[i] Als.append( s.A_l(shape, wcs, feed_dict, alpha_estimator, pols[i], xmask=masks[a], ymask=masks[b])) for i in range(npols): for j in range(i, npols): print(pols[i], 'x', pols[j]) alpha_XY = pols[i] beta_XY = pols[j] a, b = alpha_XY c, d = beta_XY if i == j: xmask = masks[a] ymask = masks[b] else: xmask = masks['T'] ymask = masks['T'] nl = s.N_l_cross(shape, wcs, feed_dict, alpha_estimator, alpha_XY, beta_estimator, beta_XY, xmask=xmask, ymask=ymask, Aalpha=Als[i], Abeta=Als[j], field_names_alpha=None, field_names_beta=None) cents, nl1d = binner.bin(nl) nls[i, j] = nl1d.copy() if i != j: nls[j, i] = nl1d.copy() ncoadd = get_mv(nls) return cents, nls, ncoadd
def signal_average(cov,bin_edges=None,bin_width=40,kind=3,lmin=None,dlspace=True,return_bins=False,**kwargs): """ dcov = cov * ellfact bin dcov in annuli interpolate back on to ell cov = dcov / ellfact where ellfact = ell**2 if dlspace else 1 """ modlmap = cov.modlmap() assert np.all(np.isfinite(cov)) dcov = cov*modlmap**2. if dlspace else cov.copy() if lmin is None: minell = maps.minimum_ell(dcov.shape,dcov.wcs) else: minell = modlmap[modlmap<=lmin].max() if bin_edges is None: bin_edges = np.append([2],np.arange(minell,modlmap.max(),bin_width)) binner = stats.bin2D(modlmap,bin_edges) cents,c1d = binner.bin(dcov) outcov = enmap.enmap(maps.interp(cents,c1d,kind=kind,fill_value=c1d[-1],**kwargs)(modlmap),dcov.wcs) with np.errstate(invalid='ignore'): outcov = outcov / modlmap**2. if dlspace else outcov outcov[modlmap<2] = 0 assert np.all(np.isfinite(outcov)) if return_bins: return cents,c1d,outcov else: return outcov
def process(kouts, name="default", ellmax=None, y=False, y_ellmin=400): ellmax = lmax if ellmax is None else ellmax ksilc = enmap.zeros((Ny, Nx), wcs, dtype=np.complex128).reshape(-1) ksilc[modlmap.reshape(-1) < lmax] = np.nan_to_num(kouts.copy()) ksilc = enmap.enmap(ksilc.reshape((Ny, Nx)), wcs) ksilc[modlmap > ellmax] = 0 if y: ksilc[modlmap < y_ellmin] = 0 msilc = np.nan_to_num( fft.ifft(ksilc, axes=[-2, -1], normalize=True).real * bmask) enmap.write_map(proot + "outmap_%s.fits" % name, enmap.enmap(msilc, wcs)) p2d = fc.f2power(ksilc, ksilc) bin_edges = np.arange(100, 3000, 40) binner = stats.bin2D(modlmap, bin_edges) cents, p1d = binner.bin(p2d) try: # io.plot_img(np.log10(np.fft.fftshift(p2d)),proot+"ksilc_%s.png" % name,aspect='auto') # io.plot_img(msilc,proot+"msilc_%s.png" % name) # io.plot_img(msilc,proot+"lmsilc_%s.png" % name,lim=300) tmask = maps.mask_kspace(shape, wcs, lmin=300, lmax=5000 if not (y) else 1500) fmap = maps.filter_map(msilc, tmask) * bmask io.plot_img(fmap, proot + "hmsilc_%s.png" % name, high_res=True) except: pass return cents, p1d
def get_nlkk_mixed(modlmap, stnoise, ctnoise, xnoise, enoise, bnoise, tmask, pmask, ipols): estimator = "hdv" import symlens as s feed_dict = get_feed_cross(lcltt, lclte, lclee, lclbb, stnoise, ctnoise, xnoise, enoise, bnoise) alpha_estimator = estimator beta_estimator = estimator pols = [] for pol in ipols: if pol == 'TT': pols.append('S_T_C_T') pols.append('C_T_S_T') else: x, y = pol pols.append('S_%s_S_%s' % (x, y)) def get_xy(ipol): sp = ipol.split('_') return ''.join([sp[1], sp[3]]) npols = len(pols) masks = {'T': tmask, 'E': pmask, 'B': pmask} bin_edges = np.arange(40, 2500, 40) binner = stats.bin2D(modlmap, bin_edges) cents = binner.centers nls = np.zeros((npols, npols, cents.size)) for i in range(npols): for j in range(i, npols): print(pols[i], 'x', pols[j]) alpha_XY = get_xy(pols[i]) beta_XY = get_xy(pols[j]) fa, a, fb, b = pols[i].split('_') fc, c, fd, d = pols[j].split('_') if i == j: xmask = masks[a] ymask = masks[b] else: xmask = masks['T'] ymask = masks['T'] fnames1 = [fa, fb] fnames2 = [fc, fd] nl = s.N_l_cross(shape, wcs, feed_dict, estimator, alpha_XY, estimator, beta_XY, xmask=xmask, ymask=ymask, field_names_alpha=fnames1, field_names_beta=fnames2) cents, nl1d = binner.bin(nl) nls[i, j] = nl1d.copy() if i != j: nls[j, i] = nl1d.copy() ncoadd = get_mv(nls) return cents, nls, ncoadd, pols
def getspec(f, lmin=50, lmax=4000, deltal=20): p2d = enmap.read_map(f) shape, wcs = p2d.shape, p2d.wcs bin_edges = np.arange(lmin, lmax, deltal) modlmap = enmap.modlmap(shape, wcs) binner = stats.bin2D(modlmap, bin_edges) cents, p1d = binner.bin(p2d) return cents, p1d
def fts(result, lmin, lmax, deltal): shape,wcs, delta, modlmap = list(result.values()) fc = omaps.FourierCalc(shape,wcs) p2d,kgal,_ = fc.power2d(delta) bin_edges = np.arange(lmin, lmax, deltal) binner = stats.bin2D(modlmap,bin_edges) cents, p1d = binner.bin(p2d) result = OrderedDict([('cents', cents), ('p1d', p1d)]) return result
def init_geometry(ishape,iwcs): modlmap = enmap.modlmap(ishape,iwcs) bin_edges = np.arange(args.kellmin,args.kellmax,args.dell) binner = stats.bin2D(modlmap,bin_edges) if args.beam<1e-5: kbeam = None else: kbeam = maps.gauss_beam(modlmap,args.beam) lmax = modlmap.max() ells = np.arange(2,lmax,1) wnoise_TT = ells*0.+(args.noise*(np.pi/180./60.))**2. wnoise_PP = 2.*wnoise_TT nT = modlmap*0.+(args.noise*(np.pi/180./60.))**2. nP = 2.*nT ncomp = 3 if pol else 1 ps = np.zeros((ncomp,ncomp,ells.size)) ps[0,0] = wnoise_TT if pol: ps[1,1] = wnoise_PP ps[2,2] = wnoise_PP oshape = (3,)+ishape if pol else ishape if not(args.flat) and args.noise_pad>1.e-5: # Pad noise sim geometry pad_width_deg = args.noise_pad pad_width = pad_width_deg * np.pi/180. res = maps.resolution(oshape[-2:],iwcs) pad_pixels = int(pad_width/res) template = enmap.zeros(oshape,iwcs) btemplate = enmap.pad(template,pad_pixels) bshape,bwcs = btemplate.shape,btemplate.wcs del template del btemplate ngen = maps.MapGen(bshape,bwcs,ps) else: ngen = maps.MapGen(oshape,iwcs,ps) tmask = maps.mask_kspace(ishape,iwcs,lmin=args.tellmin,lmax=args.tellmax) pmask = maps.mask_kspace(ishape,iwcs,lmin=args.pellmin,lmax=args.pellmax) kmask = maps.mask_kspace(ishape,iwcs,lmin=args.kellmin,lmax=args.kellmax) qest = lensing.qest(ishape,iwcs,theory,noise2d=nT,beam2d=kbeam,kmask=tmask,noise2d_P=nP,kmask_P=pmask,kmask_K=kmask,pol=pol,grad_cut=None,unlensed_equals_lensed=True) taper,w2 = maps.get_taper_deg(ishape,iwcs,taper_width_degrees = args.taper_width,pad_width_degrees = args.pad_width) fc = maps.FourierCalc(oshape,iwcs,iau=args.iau) purifier = maps.Purify(ishape,iwcs,taper) if args.purify else None return qest,ngen,kbeam,binner,taper,fc,purifier
def fit_noise_1d(npower,lmin=300,lmax=10000,wnoise_annulus=500,bin_annulus=20,lknee_guess=3000,alpha_guess=-4, lknee_min=0,lknee_max=9000,alpha_min=-5,alpha_max=1,allow_low_wnoise=False): """Obtain a white noise + lknee + alpha fit to a 2D noise power spectrum The white noise part is inferred from the mean of lmax-wnoise_annulus < ells < lmax npower is 2d noise power """ fbin_edges = np.arange(lmin,lmax,bin_annulus) modlmap = npower.modlmap() fbinner = stats.bin2D(modlmap,fbin_edges) cents,dn1d = fbinner.bin(npower) w2 = dn1d[np.logical_and(cents>=(lmax-wnoise_annulus),cents<lmax)].mean() try: # print(w2) assert w2>0 # pl = io.Plotter('Dell') # pl.add(cents,dn1d) # pl.add(cents,cents*0+w2) # pl.done(os.environ['WORK']+"/nonpos_white_works.png") except: print("White noise level not positive") print(w2) if not(allow_low_wnoise): pl = io.Plotter('Dell') pl.add(cents,dn1d) pl.done(os.environ['WORK']+"/nonpos_white.png") raise else: w2 = np.abs(w2) print("Setting to ",w2) wnoise = np.sqrt(w2)*180.*60./np.pi ntemplatefunc = lambda x,lknee,alpha: fbinner.bin(rednoise(modlmap,wnoise,lknee=lknee,alpha=alpha))[1] #ntemplatefunc = lambda x,lknee,alpha: rednoise(x,wnoise,lknee=lknee,alpha=alpha) # FIXME: This switch needs testing !!!! res,_ = curve_fit(ntemplatefunc,cents,dn1d,p0=[lknee_guess,alpha_guess],bounds=([lknee_min,alpha_min],[lknee_max,alpha_max])) lknee_fit,alpha_fit = res # print(lknee_fit,alpha_fit,wnoise) # pl = io.Plotter(xyscale='linlog',xlabel='l',ylabel='D',scalefn=lambda x: x**2./2./np.pi) # pl.add(cents,dn1d) # pl.add(cents,cents*0+w2) # pl.add(cents,rednoise(cents,wnoise,lknee=lknee_fit,alpha=alpha_fit),ls="--") # pl.add(cents,rednoise(cents,wnoise,lknee=lknee_guess,alpha=alpha_guess),ls="-.") # pl._ax.set_ylim(1e-1,1e4) # pl.done(os.environ['WORK']+"/fitnoise_pre.png") # sys.exit() return wnoise,lknee_fit,alpha_fit
#### NORM FROM FLAT-SKY CODE FOR NOW bin_edges = np.linspace(2, lmax, 40) with bench.show("flat sky AL"): ls,nlkks,theory,qest = lensing.lensing_noise(ells,ntt,nee,nbb, \ ellmin_t,ellmin_t,ellmin_t, \ ellmax_t,ellmax_t,ellmax_t, \ bin_edges, \ camb_theory_file_root=None, \ estimators = ['TT'], \ delens = False, \ theory=theory, \ dimensionless=False, \ unlensed_equals_lensed=True, \ grad_cut=None,width_deg=25.,px_res_arcmin=res) binner = stats.bin2D(qest.N.modLMap, bin_edges) cents, albinned = binner.bin(qest.AL['TT']) Al = maps.interp(cents, albinned)(ells) Nl = maps.interp(ls, nlkks['TT'])(ells) lpls, lpal = np.loadtxt("data/nls_2000.txt", unpack=True) pl = io.Plotter(yscale='log', xscale='log') pl.add(ells, theory.gCl('kk', ells), lw=3, color='k') pl.add(ells, Nl, ls="--") pl.add(lpls, lpal * (lpls * (lpls + 1.))**2. / 4., ls="-.") #pl._ax.set_ylim(1e-10,1e-6) pl.done(io.dout_dir + "fullsky_qe_result_al.png") dh_nls = np.nan_to_num(lpal * (lpls * (lpls + 1.))**2. / 4.) dh_als = np.nan_to_num(dh_nls * 2. / lpls / (lpls + 1)) Al = dh_als
bin_edges = np.arange(100,8000,40) pfunc = lambda x,y: np.real(x*y.conj()) ffunc = lambda x: enmap.fft(x,normalize='phys') Ncrop = 400 navg = lambda x,delta : covtools.noise_block_average(x,nsplits=1,delta_ell=delta, radial_fit=False,lmax=None, wnoise_annulus=None, lmin = 40, bin_annulus=None,fill_lmax=None, log=False) loc = "/home/r/rbond/sigurdkn/project/actpol/maps/mr3f_20190328/transfun/release/" mask = sints.get_act_mr3_crosslinked_mask('deep56') binner = stats.bin2D(mask.modlmap(),bin_edges) bin = lambda x: binner.bin(x) isim1 = enmap.extract(enmap.read_map(loc+'../sims/deep56_00.fits'),mask.shape,mask.wcs) * mask isim2 = enmap.extract(enmap.read_map(loc+'../sims/deep56_01.fits'),mask.shape,mask.wcs) * mask tmap1 = enmap.extract(enmap.read_map(loc+'s15_deep56_pa2_f150_nohwp_night_sim00_3pass_4way_coadd_transmap.fits'),mask.shape,mask.wcs) * mask tmap2 = enmap.extract(enmap.read_map(loc+'s15_deep56_pa2_f150_nohwp_night_sim01_3pass_4way_coadd_transmap.fits'),mask.shape,mask.wcs) * mask lmap = mask.lmap() lymap,lxmap = lmap # def model(x,width,amplitude,sigma): # mmap = 1-amplitude * np.exp(-lymap**2./2./sigma**2.) # mmap[lxmap>width/2.] = 1 # mmap[lxmap<-width/2.] = 1 # return mmap
def get_flat_power(map1, map2=None): map2 = map1 if map2 is None else map2 power2d, _, _ = fc.power2d(emap=map1, emap2=map2) binner = stats.bin2D(map1.modlmap(), bin_edges) return binner.bin(power2d)
def calculate_yy(bin_edges,arrays,region,version,cov_versions,beam_version, effective_freq,overwrite,maxval,unsanitized_beam=False,do_weights=False, pa1_shift = None, pa2_shift = None, pa3_150_shift = None, pa3_090_shift = None, no_act_color_correction=False, ccor_exp = -1, sim_splits=None,unblind=False,all_analytic=False,beta_samples=None): """ We calculate the yy power spectrum as follows. We restrict the Fourier modes in our analysis to those within bin_edges. This way we don't carry irrelevant pixels and thus speed up the ability to MC. We accept two covariance versions in cov_versions, which correspond to [act_covariance_from_split_0,act_covariance_from_split_1,other_covs]. Thus the ACT auto covariances are pre-calculated """ arrays = arrays.split(',') narrays = len(arrays) if sim_splits is not None: assert not(unblind) def warn(): print("WARNING: no bandpass file found. Assuming array ",dm.c['id']," has no response to CMB, tSZ and CIB.") aspecs = tutils.ASpecs().get_specs bandpasses = not(effective_freq) savedir = tutils.get_save_path(version,region) assert len(cov_versions)==3 covdirs = [tutils.get_save_path(cov_versions[i],region) for i in range(3)] for covdir in covdirs: assert os.path.exists(covdir) if not(overwrite): assert not(os.path.exists(savedir)), \ "This version already exists on disk. Please use a different version identifier." try: os.makedirs(savedir) except: if overwrite: pass else: raise mask = enmap.read_map(covdir+"tilec_mask.fits") from scipy.ndimage.filters import gaussian_filter as smooth pm = enmap.read_map("/scratch/r/rbond/msyriac/data/planck/data/pr2/COM_Mask_Lensing_2048_R2.00_car_deep56_interp_order0.fits") wcs = pm.wcs mask = enmap.enmap(smooth(pm,sigma=10),wcs) * mask shape,wcs = mask.shape,mask.wcs Ny,Nx = shape modlmap = enmap.modlmap(shape,wcs) omodlmap = modlmap.copy() ells = np.arange(0,modlmap.max()) minell = maps.minimum_ell(shape,wcs) sel = np.where(np.logical_and(modlmap>=bin_edges[0]-minell,modlmap<=bin_edges[-1]+minell)) modlmap = modlmap[sel] bps = [] lbeams = [] kbeams = [] shifts = [] cfreqs = [] lmins = [] lmaxs = [] names = [] for i,qid in enumerate(arrays): dm = sints.models[sints.arrays(qid,'data_model')](region=mask,calibrated=True) if dm.name=='act_mr3': season,array1,array2 = sints.arrays(qid,'season'),sints.arrays(qid,'array'),sints.arrays(qid,'freq') array = '_'.join([array1,array2]) elif dm.name=='planck_hybrid': season,patch,array = None,None,sints.arrays(qid,'freq') else: raise ValueError lmin,lmax,hybrid,radial,friend,cfreq,fgroup,wrfit = aspecs(qid) lmins.append(lmin) lmaxs.append(lmax) names.append(qid) cfreqs.append(cfreq) if bandpasses: try: fname = dm.get_bandpass_file_name(array) bps.append("data/"+fname) if (pa1_shift is not None) and 'PA1' in fname: shifts.append(pa1_shift) elif (pa2_shift is not None) and 'PA2' in fname: shifts.append(pa2_shift) elif (pa3_150_shift is not None) and ('PA3' in fname) and ('150' in fname): shifts.append(pa3_150_shift) elif (pa3_090_shift is not None) and ('PA3' in fname) and ('090' in fname): shifts.append(pa3_90_shift) else: shifts.append(0) except: warn() bps.append(None) else: try: bps.append(cfreq) except: warn() bps.append(None) kbeam = tutils.get_kbeam(qid,modlmap,sanitize=not(unsanitized_beam),version=beam_version,planck_pixwin=True) if dm.name=='act_mr3': lbeam = tutils.get_kbeam(qid,ells,sanitize=not(unsanitized_beam),version=beam_version,planck_pixwin=False) # note no pixwin but doesnt matter since no ccorr for planck elif dm.name=='planck_hybrid': lbeam = None else: raise ValueError lbeams.append(lbeam) kbeams.append(kbeam.copy()) # Make responses responses = {} def _get_response(comp,param_override=None): if bandpasses: if no_act_color_correction: r = tfg.get_mix_bandpassed(bps, comp, bandpass_shifts=shifts, param_dict_override=param_override) else: r = tfg.get_mix_bandpassed(bps, comp, bandpass_shifts=shifts, ccor_cen_nus=cfreqs, ccor_beams=lbeams, ccor_exps = [ccor_exp] * narrays, param_dict_override=param_override) else: r = tfg.get_mix(bps, comp,param_dict_override=param_override) return r for comp in ['tSZ','CMB','CIB']: responses[comp] = _get_response(comp,None) from tilec.utils import is_planck ilcgens = [] okcoadds = [] for splitnum in range(2): covdir = covdirs[splitnum] kcoadds = [] for i,qid in enumerate(arrays): lmin = lmins[i] lmax = lmaxs[i] if is_planck(qid): dm = sints.models[sints.arrays(qid,'data_model')](region=mask,calibrated=True) _,kcoadd,_ = kspace.process(dm,region,qid,mask, skip_splits=True, splits_fname=sim_splits[i] if sim_splits is not None else None, inpaint=False,fn_beam = None, plot_inpaint_path = None, split_set=splitnum) else: kcoadd_name = covdir + "kcoadd_%s.npy" % qid kcoadd = enmap.enmap(np.load(kcoadd_name),wcs) kmask = maps.mask_kspace(shape,wcs,lmin=lmin,lmax=lmax) dtype = kcoadd.dtype kcoadds.append((kcoadd.copy()*kmask)[sel]) kcoadds = enmap.enmap(np.stack(kcoadds),wcs) okcoadds.append(kcoadds.copy()) # Read Covmat ctheory = ilc.CTheory(modlmap) nells = kcoadds[0].size cov = np.zeros((narrays,narrays,nells)) for aindex1 in range(narrays): for aindex2 in range(aindex1,narrays): qid1 = names[aindex1] qid2 = names[aindex2] if is_planck(names[aindex1]) or is_planck(names[aindex2]) or all_analytic: lmin,lmax,hybrid,radial,friend,f1,fgroup,wrfit = aspecs(qid1) lmin,lmax,hybrid,radial,friend,f2,fgroup,wrfit = aspecs(qid2) # If both are Planck and same array, get white noise from last bin icov = ctheory.get_theory_cls(f1,f2,a_cmb=1,a_gal=0.8)*kbeams[aindex1]*kbeams[aindex2] if aindex1==aindex2: pcov = enmap.enmap(np.load(covdirs[2]+"tilec_hybrid_covariance_%s_%s.npy" % (names[aindex1],names[aindex2])),wcs) pbin_edges = np.append(np.arange(500,3000,200) ,[3000,4000,5000,5800]) pbinner = stats.bin2D(omodlmap,pbin_edges) w = pbinner.bin(pcov)[1][-1] icov = icov + w else: icov = np.load(covdir+"tilec_hybrid_covariance_%s_%s.npy" % (names[aindex1],names[aindex2]))[sel] if aindex1==aindex2: icov[modlmap<lmins[aindex1]] = maxval icov[modlmap>lmaxs[aindex1]] = maxval cov[aindex1,aindex2] = icov cov[aindex2,aindex1] = icov assert np.all(np.isfinite(cov)) ilcgen = ilc.HILC(modlmap,np.stack(kbeams),cov=cov,responses=responses,invert=True) ilcgens.append(ilcgen) solutions = ['tSZ','tSZ-CMB','tSZ-CIB'] ypowers = {} w2 = np.mean(mask**2.) binner = stats.bin2D(modlmap,bin_edges) np.random.seed(100) blinding = np.random.uniform(0.8,1.2) if not(unblind) else 1 def _get_ypow(sname,dname,dresponse=None,dcmb=False): if dresponse is not None: assert dname is not None for splitnum in range(2): ilcgens[splitnum].add_response(dname,dresponse) ykmaps = [] for splitnum in range(2): if dcmb: assert dname is not None ykmap = ilcgens[splitnum].multi_constrained_map(okcoadds[splitnum],sname,[dname,"CMB"]) else: if dname is None: ykmap = ilcgens[splitnum].standard_map(okcoadds[splitnum],sname) else: ykmap = ilcgens[splitnum].constrained_map(okcoadds[splitnum],sname,dname) ykmaps.append(ykmap.copy()) ypower = (ykmaps[0]*ykmaps[1].conj()).real / w2 return binner.bin(ypower)[1] * blinding # The usual solutions for solution in solutions: sols = solution.split('-') if len(sols)==2: sname = sols[0] dname = sols[1] elif len(sols)==1: sname = sols[0] dname = None else: raise ValueError ypowers[solution] = _get_ypow(sname,dname,dresponse=None) # The CIB SED samples if beta_samples is not None: y_bsamples = [] y_bsamples_cmb = [] for beta in beta_samples: pdict = tfg.default_dict.copy() pdict['beta_CIB'] = beta response = _get_response("CIB",param_override=pdict) y_bsamples.append( _get_ypow("tSZ","iCIB",dresponse=response,dcmb=False) ) y_bsamples_cmb.append( _get_ypow("tSZ","iCIB",dresponse=response,dcmb=True) ) else: y_bsamples = None y_bsamples_cmb = None return binner.centers,ypowers,y_bsamples,y_bsamples_cmb
def build_and_save_ilc(arrays,region,version,cov_version,beam_version, solutions,beams,chunk_size, effective_freq,overwrite,maxval,unsanitized_beam=False,do_weights=False, pa1_shift = None, pa2_shift = None, pa3_150_shift = None, pa3_090_shift = None, no_act_color_correction=False, ccor_exp = -1, isotropize=False, isotropize_width=20): print("Chunk size is ", chunk_size*64./8./1024./1024./1024., " GB.") def warn(): print("WARNING: no bandpass file found. Assuming array ",dm.c['id']," has no response to CMB, tSZ and CIB.") aspecs = tutils.ASpecs().get_specs bandpasses = not(effective_freq) savedir = tutils.get_save_path(version,region) covdir = tutils.get_save_path(cov_version,region) assert os.path.exists(covdir) if not(overwrite): assert not(os.path.exists(savedir)), \ "This version already exists on disk. Please use a different version identifier." try: os.makedirs(savedir) except: if overwrite: pass else: raise mask = enmap.read_map(covdir+"tilec_mask.fits") shape,wcs = mask.shape,mask.wcs Ny,Nx = shape modlmap = enmap.modlmap(shape,wcs) arrays = arrays.split(',') narrays = len(arrays) kcoadds = [] kbeams = [] bps = [] names = [] lmins = [] lmaxs = [] shifts = [] cfreqs = [] lbeams = [] ells = np.arange(0,modlmap.max()) for i,qid in enumerate(arrays): dm = sints.models[sints.arrays(qid,'data_model')](region=mask,calibrated=True) lmin,lmax,hybrid,radial,friend,cfreq,fgroup,wrfit = aspecs(qid) cfreqs.append(cfreq) lmins.append(lmin) lmaxs.append(lmax) names.append(qid) if dm.name=='act_mr3': season,array1,array2 = sints.arrays(qid,'season'),sints.arrays(qid,'array'),sints.arrays(qid,'freq') array = '_'.join([array1,array2]) elif dm.name=='planck_hybrid': season,patch,array = None,None,sints.arrays(qid,'freq') else: raise ValueError kcoadd_name = covdir + "kcoadd_%s.npy" % qid kmask = maps.mask_kspace(shape,wcs,lmin=lmin,lmax=lmax) kcoadd = enmap.enmap(np.load(kcoadd_name),wcs) dtype = kcoadd.dtype kcoadds.append(kcoadd.copy()*kmask) kbeam = tutils.get_kbeam(qid,modlmap,sanitize=not(unsanitized_beam),version=beam_version,planck_pixwin=True) if dm.name=='act_mr3': lbeam = tutils.get_kbeam(qid,ells,sanitize=not(unsanitized_beam),version=beam_version,planck_pixwin=False) # note no pixwin but doesnt matter since no ccorr for planck elif dm.name=='planck_hybrid': lbeam = None else: raise ValueError lbeams.append(lbeam) kbeams.append(kbeam.copy()) if bandpasses: try: fname = dm.get_bandpass_file_name(array) bps.append("data/"+fname) if (pa1_shift is not None) and 'PA1' in fname: shifts.append(pa1_shift) elif (pa2_shift is not None) and 'PA2' in fname: shifts.append(pa2_shift) elif (pa3_150_shift is not None) and ('PA3' in fname) and ('150' in fname): shifts.append(pa3_150_shift) elif (pa3_090_shift is not None) and ('PA3' in fname) and ('090' in fname): shifts.append(pa3_90_shift) else: shifts.append(0) except: warn() bps.append(None) else: try: bps.append(cfreq) except: warn() bps.append(None) kcoadds = enmap.enmap(np.stack(kcoadds),wcs) # Read Covmat cov = maps.SymMat(narrays,shape[-2:]) for aindex1 in range(narrays): for aindex2 in range(aindex1,narrays): icov = enmap.enmap(np.load(covdir+"tilec_hybrid_covariance_%s_%s.npy" % (names[aindex1],names[aindex2])),wcs) if isotropize: bin_edges = np.append([0.],np.arange(min(lmins),modlmap.max(),isotropize_width)) binner = stats.bin2D(modlmap,bin_edges) ls,c1d = binner.bin(icov) icov = maps.interp(ls,c1d)(modlmap) if aindex1==aindex2: icov[modlmap<lmins[aindex1]] = maxval icov[modlmap>lmaxs[aindex1]] = maxval cov[aindex1,aindex2] = icov cov.data = enmap.enmap(cov.data,wcs,copy=False) covfunc = lambda sel: cov.to_array(sel,flatten=True) assert cov.data.shape[0]==((narrays*(narrays+1))/2) # FIXME: generalize assert np.all(np.isfinite(cov.data)) # Make responses responses = {} for comp in ['tSZ','CMB','CIB']: if bandpasses: if no_act_color_correction: responses[comp] = tfg.get_mix_bandpassed(bps, comp, bandpass_shifts=shifts) else: responses[comp] = tfg.get_mix_bandpassed(bps, comp, bandpass_shifts=shifts, ccor_cen_nus=cfreqs, ccor_beams=lbeams, ccor_exps = [ccor_exp] * narrays) else: responses[comp] = tfg.get_mix(bps, comp) ilcgen = ilc.chunked_ilc(modlmap,np.stack(kbeams),covfunc,chunk_size,responses=responses,invert=True) # Initialize containers solutions = solutions.split(',') data = {} kcoadds = kcoadds.reshape((narrays,Ny*Nx)) for solution in solutions: data[solution] = {} comps = solution.split('-') data[solution]['comps'] = comps if len(comps)<=2: data[solution]['noise'] = enmap.zeros((Ny*Nx),wcs) if len(comps)==2: data[solution]['cnoise'] = enmap.zeros((Ny*Nx),wcs) data[solution]['kmap'] = enmap.zeros((Ny*Nx),wcs,dtype=dtype) # FIXME: reduce dtype? if do_weights and len(comps)<=2: for qid in arrays: data[solution]['weight_%s' % qid] = enmap.zeros((Ny*Nx),wcs) for chunknum,(hilc,selchunk) in enumerate(ilcgen): print("ILC on chunk ", chunknum+1, " / ",int(modlmap.size/chunk_size)+1," ...") for solution in solutions: comps = data[solution]['comps'] if len(comps)==1: # GENERALIZE data[solution]['noise'][selchunk] = hilc.standard_noise(comps[0]) if do_weights: weight = hilc.standard_weight(comps[0]) data[solution]['kmap'][selchunk] = hilc.standard_map(kcoadds[...,selchunk],comps[0]) elif len(comps)==2: data[solution]['noise'][selchunk] = hilc.constrained_noise(comps[0],comps[1]) data[solution]['cnoise'][selchunk] = hilc.cross_noise(comps[0],comps[1]) ret = hilc.constrained_map(kcoadds[...,selchunk],comps[0],comps[1],return_weight=do_weights) if do_weights: data[solution]['kmap'][selchunk],weight = ret else: data[solution]['kmap'][selchunk] = ret elif len(comps)>2: data[solution]['kmap'][selchunk] = np.nan_to_num(hilc.multi_constrained_map(kcoadds[...,selchunk],comps[0],*comps[1:])) if len(comps)<=2 and do_weights: for qind,qid in enumerate(arrays): data[solution]['weight_%s' % qid][selchunk] = weight[qind] del ilcgen,cov # Reshape into maps name_map = {'CMB':'cmb','tSZ':'comptony','CIB':'cib'} beams = beams.split(',') for solution,beam in zip(solutions,beams): comps = "tilec_single_tile_"+region+"_" comps = comps + name_map[data[solution]['comps'][0]]+"_" if len(data[solution]['comps'])>1: comps = comps + "deprojects_"+ '_'.join([name_map[x] for x in data[solution]['comps'][1:]]) + "_" comps = comps + version if do_weights and len(data[solution]['comps'])<=2: for qind,qid in enumerate(arrays): enmap.write_map("%s/%s_%s_weight.fits" % (savedir,comps,qid), enmap.enmap(data[solution]['weight_%s' % qid].reshape((Ny,Nx)),wcs)) try: noise = enmap.enmap(data[solution]['noise'].reshape((Ny,Nx)),wcs) enmap.write_map("%s/%s_noise.fits" % (savedir,comps),noise) except: pass try: cnoise = enmap.enmap(data[solution]['cnoise'].reshape((Ny,Nx)),wcs) enmap.write_map("%s/%s_cross_noise.fits" % (savedir,comps),cnoise) except: pass ells = np.arange(0,modlmap.max(),1) try: fbeam = float(beam) kbeam = maps.gauss_beam(modlmap,fbeam) lbeam = maps.gauss_beam(ells,fbeam) except: qid = beam bfunc = lambda x: tutils.get_kbeam(qid,x,version=beam_version,sanitize=not(unsanitized_beam),planck_pixwin=False) kbeam = bfunc(modlmap) lbeam = bfunc(ells) kmap = enmap.enmap(data[solution]['kmap'].reshape((Ny,Nx)),wcs) smap = enmap.ifft(kbeam*kmap,normalize='phys').real enmap.write_map("%s/%s.fits" % (savedir,comps),smap) io.save_cols("%s/%s_beam.txt" % (savedir,comps),(ells,lbeam),header="ell beam") enmap.write_map(savedir+"/tilec_mask.fits",mask)
def noise_block_average(n2d,nsplits,delta_ell,lmin=300,lmax=8000,wnoise_annulus=500,bin_annulus=20, lknee_guess=3000,alpha_guess=-4,nparams=None, verbose=False,radial_fit=True,fill_lmax=None,fill_lmax_width=100,log=True, isotropic_low_ell=True,allow_low_wnoise=False): """Find the empirical mean noise binned in blocks of dfact[0] x dfact[1] . Preserves noise anisotropy. Most arguments are for the radial fitting part. A radial fit is divided out before downsampling (by default by FFT) and then multplied back with the radial fit. Watch for ringing in the final output. n2d noise power n2d -- the [...,Ny,Nx] 2d power to smooth nsplits -- the number of splits from which the 2d noise power was estimated. This needs to be known if log is True, in which case the power is log-transformed before smoothing, which changes the statistics of the samples and hence needs a pre-determined correction based on the distribution of the original sample. log -- whether to log transform before smoothing. Should only be used if the power is positive (so should not be used e.g. if this is for the cross-noise of two components) delta_ell -- the block width in ell units for the smoothing. The smoothing effectively gets done in blocks of delta_ell x delta_ell. radial_fit -- if True, divides out a fit to the 1d power lmin -- lmin for the radial fit lmax -- lmax for the radial fit (adjust based on resolution of map) wnoise_annulus -- width of annulus in ell within which to estimate high ell white noise (adjust based on resolution) bin_annulus -- width of 1d bins (IMPORTANT: adjust based on map size) lknee_guess -- guess lknee for fit alpha_guess -- guess alpha for fit nparams -- optionally pass in a radial fit's parameters verbose -- print more fill_lmax -- fill power outside this lmax with the mean of the annulus between fill_lmax and fill_lmax_width fill_lmax_width -- see above isotropic_low_ell -- fill below lmin with an isotropic fit to the 1d power allow_low_wnoise -- allow white noise level to be negative (for debugging) """ assert np.all(np.isfinite(n2d)) if log: assert np.all(n2d>0), "You can't log smooth a PS with negative or zero power. Use log=False for these." shape,wcs = n2d.shape,n2d.wcs modlmap = n2d.modlmap() minell = maps.minimum_ell(shape,wcs) Ny,Nx = shape[-2:] if radial_fit: with bench.show("radial fit"): if nparams is None: if verbose: print("Radial fitting...") nparams = fit_noise_1d(n2d,lmin=lmin,lmax=lmax,wnoise_annulus=wnoise_annulus, bin_annulus=bin_annulus,lknee_guess=lknee_guess,alpha_guess=alpha_guess, allow_low_wnoise=allow_low_wnoise) wfit,lfit,afit = nparams nfitted = rednoise(modlmap,wfit,lfit,afit) else: nparams = None nfitted = n2d*0 + 1 nfitted = np.maximum(nfitted,np.max(n2d)*1e-14) nflat = enmap.enmap(n2d/nfitted,wcs) # flattened 2d noise power fval = nflat[np.logical_and(modlmap>2,modlmap<2*minell)].mean() nflat[modlmap<2] = fval if fill_lmax is not None: fill_avg = nflat[np.logical_and(modlmap>(fill_lmax-fill_lmax_width),modlmap<=fill_lmax)].mean() nflat[modlmap>fill_lmax] = fill_avg if verbose: print("Resampling...") assert np.all(np.isfinite(nflat)) with bench.show("smooth ps grid"): ndown = smooth_ps_grid(nflat, res=delta_ell, alpha=4, log=log, ndof=2*(nsplits-1)) # pshow(nflat) # pshow(ndown) outcov = ndown*nfitted outcov[modlmap<minell] = 0 if fill_lmax is not None: outcov[modlmap>fill_lmax] = 0 assert np.all(np.isfinite(outcov)) if isotropic_low_ell: with bench.show("isotropic low ell"): if radial_fit: ifunc = lambda ells,ell0,A,shell: (A*np.exp(-ell0/ells) + shell) sel = np.logical_and(modlmap<=lmin,modlmap>=2) ibin_edges = np.arange(minell,(lmin*2)+2*minell,2*minell) ibinner = stats.bin2D(modlmap,ibin_edges) cents,inls = ibinner.bin(nflat) ys = inls xs = cents if radial_fit: res,_ = curve_fit(ifunc,xs,ys,p0=[20,1,0],bounds=([2,0.,-np.inf],[lmin*2,np.inf,np.inf])) outcov[sel] = ifunc(modlmap[sel],res[0],res[1],res[2])*nfitted[sel] else: deg = 5 res = np.polyfit(np.log(xs),np.log(ys*xs**2.),deg=deg) assert res.size==(deg+1) fitfunc = lambda x: sum([res[deg-p]*(x**p) for p in range(0,deg+1)[::-1]]) outcov[sel] = (np.exp(fitfunc(np.log(modlmap[sel])))/modlmap[sel]**2.)*nfitted[sel] outcov[modlmap<2] = 0 # fbin_edges = np.arange(minell,lmax,bin_annulus) # fbinner = stats.bin2D(modlmap,fbin_edges) # cents, n1d = fbinner.bin(nflat) # pl = io.Plotter(xyscale='loglog',xlabel='l',ylabel='D',scalefn=lambda x: x**2./2./np.pi) # ells = np.arange(minell,2*lmin,1) # if radial_fit: # pl.add(ells,ifunc(ells,res[0],res[1],res[2])) # else: # pl.add(xs,ys,ls="--") # pl.add(ells,np.exp(fitfunc(np.log(ells)))/ells**2.) # pl.add(cents,n1d) # pl.vline(x=100) # pl.vline(x=200) # pl.vline(x=300) # pl.vline(x=500) # t = "000" # pl._ax.set_xlim(10,3000) # pl.done(os.environ['WORK']+"/iso_fitnoise2_%s.png" % t) # fbin_edges = np.arange(minell,lmax,bin_annulus) # fbinner = stats.bin2D(modlmap,fbin_edges) # cents, n1d = fbinner.bin(n2d) # cents,dn1d = fbinner.bin(outcov) # # cents,dn1d2 = fbinner.bin(nfitted) # pl = io.Plotter(xyscale='linlog',xlabel='l',ylabel='D',scalefn=lambda x: x**2./2./np.pi) # pl.add(cents,n1d) # pl.add(cents,dn1d,ls="--") # pl.vline(x=100) # pl.vline(x=200) # pl.vline(x=300) # pl.vline(x=500) # # pl.add(cents,dn1d2,ls="-.") # t = "000" # pl._ax.set_ylim(1e1,1e5) # pl.done(os.environ['WORK']+"/fitnoise2_%s.png" % t) # sys.exit() return outcov,nfitted,nparams
# Theory theory_file_root = "../alhazen/data/Aug6_highAcc_CDM" cc = counts.ClusterCosmology(skipCls=True) theory = cosmology.loadTheorySpectraFromCAMB(theory_file_root,unlensedEqualsLensed=False, useTotal=False,TCMB = 2.7255e6,lpad=9000,get_dimensionless=False) # Geometry shape, wcs = maps.rect_geometry(width_arcmin=args.arc,px_res_arcmin=args.pix,pol=False) modlmap = enmap.modlmap(shape,wcs) modrmap = enmap.modrmap(shape,wcs) # Binning bin_edges = np.arange(0.,20.0,args.pix*2) binner = stats.bin2D(modrmap*60.*180./np.pi,bin_edges) # Noise model noise_uK_rad = args.noise*np.pi/180./60. normfact = np.sqrt(np.prod(enmap.pixsize(shape,wcs))) kbeam = maps.gauss_beam(args.beam,modlmap) # Simulate lmax = int(modlmap.max()+1) ells = np.arange(0,lmax,1) ps = theory.uCl('TT',ells).reshape((1,1,lmax)) ~ps_noise = np.array([(noise_uK_rad)**2.]*ells.size).reshape((1,1,ells.size)) mg = maps.MapGen(shape,wcs,ps) ng = maps.MapGen(shape,wcs,ps_noise) kamp_true = args.Amp
""" njobs = len(qpairs) comm,rank,my_tasks = mpi.distribute(njobs) mask = sints.get_act_mr3_crosslinked_mask(args.region, version=args.mask_version, kind='binary_apod') shape,wcs = mask.shape,mask.wcs modlmap = mask.modlmap() aspecs = tutils.ASpecs().get_specs region = args.region fbeam = lambda qname,x: tutils.get_kbeam(qname,x,sanitize=not(args.unsanitized_beam),planck_pixwin=True) nbin_edges = np.arange(20,8000,100) nbinner = stats.bin2D(modlmap,nbin_edges) ncents = nbinner.centers #cbin_edges = np.arange(20,8000,20) cbin_edges = np.arange(20,8000,40) cbinner = stats.bin2D(modlmap,cbin_edges) fells = np.arange(lmax) for task in my_tasks: qids = qpairs[task] qid1,qid2 = qids ncents,n1d = np.loadtxt("%sn1d_%s_%s.txt" % (spath,qid1,qid2),unpack=True) ncents,n1d1 = np.loadtxt("%sn1d_%s_%s.txt" % (spath,qid1,qid1),unpack=True) ncents,n1d2 = np.loadtxt("%sn1d_%s_%s.txt" % (spath,qid2,qid2),unpack=True) ccents,s1d = np.loadtxt("%ss1d_%s_%s.txt" % (spath,qid1,qid2),unpack=True) fbeam1 = lambda x: tutils.get_kbeam(qid1,x,sanitize=not(args.unsanitized_beam),planck_pixwin=True)
opath = "/scratch/r/rbond/msyriac/data/act/omar/" tpath = os.environ[ 'WORK'] + "/data/depot/tilec/map_v1.0.0_rc_joint_%s/" % region """ We compare masks and make sure they are identical """ omask = enmap.read_map("%smask_s14&15_%s.fits" % (opath, region)) tmask = enmap.read_map("%stilec_mask.fits" % tpath) assert np.all(np.isclose(omask, tmask)) modlmap = omask.modlmap() bin_edges = np.arange(400, 8000, 200) binner = stats.bin2D(modlmap, bin_edges) Nplot = 300 kbeam = tutils.get_kbeam("d56_05", modlmap, sanitize=False) w2 = np.mean(omask**2.) ls, bells = np.loadtxt( "%stilec_single_tile_%s_cmb_map_v1.0.0_rc_joint_beam.txt" % (tpath, region), unpack=True) tkbeam = maps.interp(ls, bells)(modlmap) ls, bells = np.loadtxt( "%stilec_single_tile_%s_cmb_deprojects_comptony_map_v1.0.0_rc_joint_beam.txt" % (tpath, region), unpack=True) tkbeam_nosz = maps.interp(ls, bells)(modlmap)
parray_dat = aio.patch_array_from_config(Config, expf_name, shape_dat, wcs_dat, dimensionless=True) parray_sim = aio.patch_array_from_config(Config, expf_name, shape_sim, wcs_sim, dimensionless=True) lxmap_dat, lymap_dat, modlmap_dat, angmap_dat, lx_dat, ly_dat = fmaps.get_ft_attributes_enmap( shape_dat, wcs_dat) lxmap_sim, lymap_sim, modlmap_sim, angmap_sim, lx_sim, ly_sim = fmaps.get_ft_attributes_enmap( shape_sim, wcs_sim) lbin_edges = np.arange(kellmin, kellmax, 300) lbinner_dat = stats.bin2D(modlmap_dat, lbin_edges) lbinner_sim = stats.bin2D(modlmap_sim, lbin_edges) # === COSMOLOGY === theory, cc, lmax = aio.theory_from_config(Config, cosmology_section) parray_dat.add_theory(None, theory, lmax) template_dat = fmaps.simple_flipper_template_from_enmap(shape_dat, wcs_dat) nT = parray_dat.nT nP = parray_dat.nP if rank == 0: io.quickPlot2d(nT, out_dir + "nt.png") kbeam_dat = parray_dat.lbeam kbeampass = kbeam_dat if rank == 0: io.quickPlot2d(kbeampass, out_dir + "kbeam.png") fMaskCMB_T = fmaps.fourierMask(lx_dat, ly_dat, modlmap_dat,
# Set up SZ frequency dependence def gnu(nu_ghz,tcmb=2.7255): nu = 1e9*np.asarray(nu_ghz) hplanck = 6.62607e-34 kboltzmann = 1.38065e-23 x = hplanck*nu/kboltzmann/tcmb coth = np.cosh(x/2.)/np.sinh(x/2.) return x*coth-4. yresponses = gnu(freqs) cresponses = yresponses*0 + 1. fc = maps.FourierCalc(shape[-2:],wcs) s = stats.Stats() bin_edges = np.arange(300,5000,80) binner = stats.bin2D(agen.modlmap,bin_edges) for i in range(nsims): cmb,y,observed = agen.get_maps() kmaps = [] for j in range(len(freqs)): _,kmap,_ = fc.power2d(observed[j]) km = np.nan_to_num(kmap/agen.kbeams[j]) km[agen.modlmap>ellmaxes[j]] = 0 km[agen.modlmap<ellmins[j]] = 0 kmaps.append(km.copy()) kmaps = np.stack(kmaps) sc = maps.silc(kmaps,cinv,cresponses) sy = maps.silc(kmaps,cinv,yresponses) cc = maps.cilc(kmaps,cinv,cresponses,yresponses) cy = maps.cilc(kmaps,cinv,yresponses,cresponses)
def make_sim(self, seed): with bench.show( "Lensing operation...") if self.rank == 0 else ignore(): full, kappa = lensing.rand_map( self.fshape, self.fwcs, self.ps, lmax=self.lmax, maplmax=self.lmax, seed=seed, verbose=True if self.rank == 0 else False, dtype=self.dtype, output="lk") alms = curvedsky.map2alm(full, lmax=self.lmax) ps_data = hp.alm2cl(alms.astype(np.complex128)) del alms self.mpibox.add_to_stats("fullsky_ps", ps_data) south = full.submap(self.pos_south) equator = full.submap(self.pos_eq) ksouth = kappa.submap(self.pos_south) kequator = kappa.submap(self.pos_eq) del full del kappa if self.count == 0: self.shape['s'], self.wcs['s'] = south.shape, south.wcs self.shape['e'], self.wcs['e'] = equator.shape, equator.wcs for m in ['s', 'e']: self.taper[m], self.w2[m] = fmaps.get_taper(self.shape[m], taper_percent=18.0, pad_percent=4.0, weight=None) self.w4[m] = np.mean(self.taper[m]**4.) self.w3[m] = np.mean(self.taper[m]**3.) self.rotator = fmaps.MapRotatorEquator( self.shape['s'], self.wcs['s'], self.wdeg, self.hdeg, width_multiplier=0.6, height_multiplier=1.2, downsample=True, verbose=True if self.rank == 0 else False, pix_target_override_arcmin=self.pix_intermediate) self.taper['r'] = self.rotator.rotate(self.taper['s']) self.w2['r'] = np.mean(self.taper['r']**2.) self.w4['r'] = np.mean(self.taper['r']**4.) self.w3['r'] = np.mean(self.taper['r']**3.) self.shape['r'], self.wcs[ 'r'] = self.rotator.shape_final, self.rotator.wcs_final self.fc = {} self.binner = {} self.modlmap = {} for m in ['s', 'e', 'r']: self.fc[m] = fmaps.FourierCalc(self.shape[m], self.wcs[m]) self.modlmap[m] = enmap.modlmap(self.shape[m], self.wcs[m]) self.binner[m] = bin2D(self.modlmap[m], self.bin_edges) self.cents = self.binner['s'].centers self._init_qests() self.count += 1 south *= self.taper['s'] equator *= self.taper['e'] ksouth *= self.taper['s'] kequator *= self.taper['e'] return south, equator, ksouth, kequator
from soapack import interfaces as sints from actsims import noise froot = "/scratch/r/rbond/msyriac/data/scratch/tilec/test_lfi_v2_00_0000_deep56/" kroot = "/scratch/r/rbond/msyriac/data/depot/tilec/test_lfi_v2_00_0000_deep56/" droot = "/scratch/r/rbond/msyriac/data/depot/tilec/test_lfi_data_deep56/" lroot = "/scratch/r/rbond/msyriac/data/depot/tilec/test_lfi_v3_00_0000_deep56/" mask = sints.get_act_mr3_crosslinked_mask("deep56") shape,wcs = mask.shape,mask.wcs w2 = np.mean(mask**2.) qids = 'p01,p02,p03'.split(',') bin_edges = np.arange(20,6000,40) modlmap = mask.modlmap() binner = stats.bin2D(modlmap,bin_edges) arraynames = {'p01':'030','p02':'044','p03':'070'} dm = sints.PlanckHybrid(region=mask) for qid in qids: split = enmap.read_map("%ssplit_%s.fits" % (froot,qid)) #io.hplot(enmap.downgrade(split,4),"split_%s" % qid) arrayname = arraynames[qid] wts = dm.get_splits_ivar(arrayname)[0,:,0,...] coadd,_ = noise.get_coadd(split[:,0,...],wts,axis=0) * mask
def do(ymap, cmap, dmap, mask, ras, decs, wt): combined = list(zip(ras, decs)) random.shuffle(combined) ras[:], decs[:] = zip(*combined) Nrand = 400 njobs = len(ras) comm, rank, my_tasks = mpi.distribute(njobs) print("Rank %d starting" % rank) s = stats.Stats(comm) i = 0 for task in my_tasks: ra = ras[task] dec = decs[task] mcut, ycut, ccut, dcut, weight = get_cuts(mask, ymap, cmap, dmap, wt, ra, dec, arcmin, pix) if mcut is None: continue if i == 0: modrmap = np.rad2deg(ycut.modrmap()) * 60. bin_edges = np.arange(0., 15., 1.0) binner = stats.bin2D(modrmap, bin_edges) rras, rdecs = catalogs.random_catalog(ymap.shape, ymap.wcs, Nrand, edge_avoid_deg=4.) nrej = 0 for rra, rdec in zip(rras, rdecs): rmcut, rycut, rccut, rdcut, rweight = get_cuts( mask, ymap, cmap, dmap, wt, rra, rdec, arcmin, pix) if rmcut is None: nrej = nrej + 1 continue cents, ry1d = binner.bin(rycut) cents, rc1d = binner.bin(rccut) cents, rd1d = binner.bin(rdcut) s.add_to_stats("rc1d", rc1d * 1e6) s.add_to_stats("ry1d", ry1d * 1e6) s.add_to_stats("rd1d", rd1d * 1e6) if rank == 0: print(Nrand - nrej, " accepted") cents, y1d = binner.bin(ycut) cents, c1d = binner.bin(ccut) cents, d1d = binner.bin(dcut) s.add_to_stats("c1d", c1d * 1e6) s.add_to_stats("y1d", y1d * 1e6) s.add_to_stats("d1d", d1d * 1e6) s.add_to_stack("cstack", ccut * 1e6 * weight) s.add_to_stack("dstack", dcut * 1e6 * weight) s.add_to_stack("ystack", ycut * 1e6 * weight) s.add_to_stats("sum", (weight, )) i = i + 1 if i % 10 == 0 and rank == 0: print(i) print("Rank %d done " % rank) s.get_stats() s.get_stacks() if rank == 0: N = s.vectors['sum'].sum() ystack = s.stacks['ystack'] * N cstack = s.stacks['cstack'] * N dstack = s.stacks['dstack'] * N y1ds = s.vectors['y1d'] c1ds = s.vectors['c1d'] d1ds = s.vectors['d1d'] ry1d = s.stats['ry1d']['mean'] rc1d = s.stats['rc1d']['mean'] rd1d = s.stats['rd1d']['mean'] _, nwcs = enmap.geometry(pos=(0, 0), shape=ystack.shape, res=np.deg2rad(0.5 / 60.)) return rank, enmap.enmap( ystack, nwcs), enmap.enmap(cstack, nwcs), enmap.enmap( dstack, nwcs), N, cents, y1ds, c1ds, d1ds, ry1d, rc1d, rd1d else: return rank, None, None, None, None, None, None, None, None, None, None, None