### Cosmo cosmo = constants.cosmo(args.fid_Om) ### Read deltas dels, ndels, zmin_pix, zmax_pix = io.read_deltas(args.in_dir, args.nside, xcf.lambda_abs, args.z_evol_del, args.z_ref, cosmo=cosmo,nspec=args.nspec) for p,delsp in dels.items(): for d in delsp: d.fname = 'D1' for k in ['co','de','order','iv','diff','m_SNR','m_reso','m_z','dll']: setattr(d,k,None) xcf.npix = len(dels) xcf.dels = dels xcf.ndels = ndels sys.stderr.write("\n") print("done, npix = {}, ndels = {}".format(xcf.npix,xcf.ndels)) sys.stderr.write("\n") ### Find the redshift range if (args.z_min_obj is None): dmin_pix = cosmo.r_comoving(zmin_pix) dmin_obj = max(0.,dmin_pix+xcf.rp_min) args.z_min_obj = cosmo.r_2_z(dmin_obj) sys.stderr.write("\r z_min_obj = {}\r".format(args.z_min_obj)) if (args.z_max_obj is None): dmax_pix = cosmo.r_comoving(zmax_pix) dmax_obj = max(0.,dmax_pix+xcf.rp_max) args.z_max_obj = cosmo.r_2_z(dmax_obj) sys.stderr.write("\r z_max_obj = {}\r".format(args.z_max_obj)) ### Read objects
def metal_dmat(pix, abs_igm1="LYA", abs_igm2="SiIII(1207)"): dm = sp.zeros(np * nt * ntm * npm) wdm = sp.zeros(np * nt) rpeff = sp.zeros(ntm * npm) rteff = sp.zeros(ntm * npm) zeff = sp.zeros(ntm * npm) weff = sp.zeros(ntm * npm) npairs = 0 npairs_used = 0 for p in pix: for d1 in data[p]: print("\rcomputing metal dmat {} {}: {}%".format( abs_igm1, abs_igm2, round(counter.value * 100. / ndata, 3)), end="") with lock: counter.value += 1 r = sp.random.rand(len(d1.dneighs)) w = r > rej npairs += len(d1.dneighs) npairs_used += w.sum() for d2 in sp.array(d1.dneighs)[w]: r1 = d1.r_comov rdm1 = d1.rdm_comov z1_abs1 = 10**d1.ll / constants.absorber_IGM[abs_igm1] - 1 r1_abs1 = cosmo.r_comoving(z1_abs1) rdm1_abs1 = cosmo.dm(z1_abs1) w1 = d1.we wzcut = z1_abs1 < d1.zqso r1 = r1[wzcut] rdm1 = rdm1[wzcut] w1 = w1[wzcut] r1_abs1 = r1_abs1[wzcut] rdm1_abs1 = rdm1_abs1[wzcut] z1_abs1 = z1_abs1[wzcut] same_half_plate = (d1.plate == d2.plate) and\ ( (d1.fid<=500 and d2.fid<=500) or (d1.fid>500 and d2.fid>500) ) ang = d1 ^ d2 r2 = d2.r_comov rdm2 = d2.rdm_comov z2_abs2 = 10**d2.ll / constants.absorber_IGM[abs_igm2] - 1 r2_abs2 = cosmo.r_comoving(z2_abs2) rdm2_abs2 = cosmo.dm(z2_abs2) w2 = d2.we wzcut = z2_abs2 < d2.zqso r2 = r2[wzcut] rdm2 = rdm2[wzcut] w2 = w2[wzcut] r2_abs2 = r2_abs2[wzcut] rdm2_abs2 = rdm2_abs2[wzcut] z2_abs2 = z2_abs2[wzcut] rp = (r1[:, None] - r2) * sp.cos(ang / 2) if not x_correlation: rp = abs(rp) rt = (rdm1[:, None] + rdm2) * sp.sin(ang / 2) w12 = w1[:, None] * w2 bp = sp.floor( (rp - rp_min) / (rp_max - rp_min) * np).astype(int) bt = (rt / rt_max * nt).astype(int) if remove_same_half_plate_close_pairs and same_half_plate: wp = abs(rp) < (rp_max - rp_min) / np w12[wp] = 0. bA = bt + nt * bp wA = (bp < np) & (bt < nt) & (bp >= 0) c = sp.bincount(bA[wA], weights=w12[wA]) wdm[:len(c)] += c rp_abs1_abs2 = (r1_abs1[:, None] - r2_abs2) * sp.cos(ang / 2) if not x_correlation: rp_abs1_abs2 = abs(rp_abs1_abs2) rt_abs1_abs2 = (rdm1_abs1[:, None] + rdm2_abs2) * sp.sin( ang / 2) zwe12 = (1 + z1_abs1[:, None])**(alpha_abs[abs_igm1] - 1) * ( 1 + z2_abs2)**(alpha_abs[abs_igm2] - 1) / (1 + zref)**( alpha_abs[abs_igm1] + alpha_abs[abs_igm2] - 2) bp_abs1_abs2 = sp.floor((rp_abs1_abs2 - rp_min) / (rp_max - rp_min) * npm).astype(int) bt_abs1_abs2 = (rt_abs1_abs2 / rt_max * ntm).astype(int) bBma = bt_abs1_abs2 + ntm * bp_abs1_abs2 wBma = (bp_abs1_abs2 < npm) & (bt_abs1_abs2 < ntm) & (bp_abs1_abs2 >= 0) wAB = wA & wBma c = sp.bincount(bBma[wAB] + npm * ntm * bA[wAB], weights=w12[wAB] * zwe12[wAB]) dm[:len(c)] += c c = sp.bincount(bBma[wAB], weights=rp_abs1_abs2[wAB] * w12[wAB] * zwe12[wAB]) rpeff[:len(c)] += c c = sp.bincount(bBma[wAB], weights=rt_abs1_abs2[wAB] * w12[wAB] * zwe12[wAB]) rteff[:len(c)] += c c = sp.bincount(bBma[wAB], weights=(z1_abs1[:, None] + z2_abs2)[wAB] / 2 * w12[wAB] * zwe12[wAB]) zeff[:len(c)] += c c = sp.bincount(bBma[wAB], weights=w12[wAB] * zwe12[wAB]) weff[:len(c)] += c if ((not x_correlation) and (abs_igm1 != abs_igm2)) or (x_correlation and (lambda_abs == lambda_abs2)): r1 = d1.r_comov rdm1 = d1.rdm_comov w1 = d1.we z1_abs2 = 10**d1.ll / constants.absorber_IGM[abs_igm2] - 1 r1_abs2 = cosmo.r_comoving(z1_abs2) rdm1_abs2 = cosmo.dm(z1_abs2) wzcut = z1_abs2 < d1.zqso r1 = r1[wzcut] rdm1 = rdm1[wzcut] w1 = w1[wzcut] z1_abs2 = z1_abs2[wzcut] r1_abs2 = r1_abs2[wzcut] rdm1_abs2 = rdm1_abs2[wzcut] r2 = d2.r_comov rdm2 = d2.rdm_comov w2 = d2.we z2_abs1 = 10**d2.ll / constants.absorber_IGM[abs_igm1] - 1 r2_abs1 = cosmo.r_comoving(z2_abs1) rdm2_abs1 = cosmo.dm(z2_abs1) wzcut = z2_abs1 < d2.zqso r2 = r2[wzcut] rdm2 = rdm2[wzcut] w2 = w2[wzcut] z2_abs1 = z2_abs1[wzcut] r2_abs1 = r2_abs1[wzcut] rdm2_abs1 = rdm2_abs1[wzcut] rp = (r1[:, None] - r2) * sp.cos(ang / 2) if not x_correlation: rp = abs(rp) rt = (rdm1[:, None] + rdm2) * sp.sin(ang / 2) w12 = w1[:, None] * w2 bp = sp.floor( (rp - rp_min) / (rp_max - rp_min) * np).astype(int) bt = (rt / rt_max * nt).astype(int) if remove_same_half_plate_close_pairs and same_half_plate: wp = abs(rp) < (rp_max - rp_min) / np w12[wp] = 0. bA = bt + nt * bp wA = (bp < np) & (bt < nt) & (bp >= 0) c = sp.bincount(bA[wA], weights=w12[wA]) wdm[:len(c)] += c rp_abs2_abs1 = (r1_abs2[:, None] - r2_abs1) * sp.cos( ang / 2) if not x_correlation: rp_abs2_abs1 = abs(rp_abs2_abs1) rt_abs2_abs1 = (rdm1_abs2[:, None] + rdm2_abs1) * sp.sin( ang / 2) zwe21 = (1 + z1_abs2[:, None])**( alpha_abs[abs_igm2] - 1) * (1 + z2_abs1)**( alpha_abs[abs_igm1] - 1) / (1 + zref)**( alpha_abs[abs_igm1] + alpha_abs[abs_igm2] - 2) bp_abs2_abs1 = sp.floor( (rp_abs2_abs1 - rp_min) / (rp_max - rp_min) * npm).astype(int) bt_abs2_abs1 = (rt_abs2_abs1 / rt_max * ntm).astype(int) bBam = bt_abs2_abs1 + ntm * bp_abs2_abs1 wBam = (bp_abs2_abs1 < npm) & (bt_abs2_abs1 < ntm) & (bp_abs2_abs1 >= 0) wAB = wA & wBam c = sp.bincount(bBam[wAB], weights=rp_abs2_abs1[wAB] * w12[wAB] * zwe21[wAB]) rpeff[:len(c)] += c c = sp.bincount(bBam[wAB], weights=rt_abs2_abs1[wAB] * w12[wAB] * zwe21[wAB]) rteff[:len(c)] += c c = sp.bincount(bBam[wAB], weights=(z1_abs2[:, None] + z2_abs1)[wAB] / 2 * w12[wAB] * zwe21[wAB]) zeff[:len(c)] += c c = sp.bincount(bBam[wAB], weights=w12[wAB] * zwe21[wAB]) weff[:len(c)] += c c = sp.bincount(bBam[wAB] + npm * ntm * bA[wAB], weights=w12[wAB] * zwe21[wAB]) dm[:len(c)] += c setattr(d1, "neighs", None) return wdm, dm.reshape(np * nt, npm * ntm), rpeff, rteff, zeff, weff, npairs, npairs_used
def wickT(pix): T1 = sp.zeros((np * nt, np * nt)) T2 = sp.zeros((np * nt, np * nt)) T3 = sp.zeros((np * nt, np * nt)) T4 = sp.zeros((np * nt, np * nt)) T5 = sp.zeros((np * nt, np * nt)) T6 = sp.zeros((np * nt, np * nt)) wAll = sp.zeros(np * nt) nb = sp.zeros(np * nt, dtype=sp.int64) npairs = 0 npairs_used = 0 for ipix in pix: r = sp.random.rand(len(data[ipix])) w = r > rej npairs += len(data[ipix]) npairs_used += w.sum() if w.sum() == 0: continue for d1 in [td for ti, td in enumerate(data[ipix]) if w[ti]]: print("\rcomputing xi: {}%".format( round(counter.value * 100. / ndata / (1. - rej), 3)), end="") with lock: counter.value += 1 if len(d1.dneighs) == 0: continue v1 = v1d[d1.fname](d1.ll) w1 = d1.we c1d_1 = (w1 * w1[:, None]) * c1d[d1.fname]( abs(d1.ll - d1.ll[:, None])) * sp.sqrt(v1 * v1[:, None]) r1 = d1.r_comov z1 = d1.z for i2, d2 in enumerate(d1.dneighs): ang12 = d1 ^ d2 v2 = v1d[d2.fname](d2.ll) w2 = d2.we c1d_2 = (w2 * w2[:, None]) * c1d[d2.fname]( abs(d2.ll - d2.ll[:, None])) * sp.sqrt(v2 * v2[:, None]) r2 = d2.r_comov z2 = d2.z fill_wickT123(r1, r2, ang12, w1, d2.we, z1, z2, c1d_1, c1d_2, wAll, nb, T1, T2, T3) if max_diagram <= 3: continue ### d3 and d2 have the same 'fname' for d3 in d1.dneighs[:i2]: ang13 = d1 ^ d3 ang23 = d2 ^ d3 v3 = v1d[d3.fname](d3.ll) w3 = d3.we c1d_3 = (w3 * w3[:, None]) * c1d[d3.fname]( abs(d3.ll - d3.ll[:, None])) * sp.sqrt( v3 * v3[:, None]) r3 = d3.r_comov z3 = d3.z fill_wickT45(r1, r2, r3, ang12, ang13, ang23, w1, w2, w3, z1, z2, z3, c1d_1, c1d_2, c1d_3, d1.fname, d2.fname, d3.fname, T4, T5) ### TODO: when there is two different catalogs ### d3 and d1 have the same 'fname' return wAll, nb, npairs, npairs_used, T1, T2, T3, T4, T5, T6
def wickT(pix): """Compute the Wick covariance matrix for the object-pixel cross-correlation Args: pix (lst): list of HEALpix pixels Returns: (tuple): results of the Wick computation """ T1 = sp.zeros((np * nt, np * nt)) T2 = sp.zeros((np * nt, np * nt)) T3 = sp.zeros((np * nt, np * nt)) T4 = sp.zeros((np * nt, np * nt)) T5 = sp.zeros((np * nt, np * nt)) T6 = sp.zeros((np * nt, np * nt)) wAll = sp.zeros(np * nt) nb = sp.zeros(np * nt, dtype=sp.int64) npairs = 0 npairs_used = 0 for ipix in pix: npairs += len(dels[ipix]) r = sp.random.rand(len(dels[ipix])) w = r > rej npairs_used += w.sum() if w.sum() == 0: continue for d1 in [td for ti, td in enumerate(dels[ipix]) if w[ti]]: print("\rcomputing xi: {}%".format( round(counter.value * 100. / ndels / (1. - rej), 3)), end="") with lock: counter.value += 1 if d1.qneighs.size == 0: continue v1 = v1d[d1.fname](d1.ll) w1 = d1.we c1d_1 = (w1 * w1[:, None]) * c1d[d1.fname]( abs(d1.ll - d1.ll[:, None])) * sp.sqrt(v1 * v1[:, None]) r1 = d1.r_comov z1 = d1.z neighs = d1.qneighs ang12 = d1 ^ neighs r2 = sp.array([q2.r_comov for q2 in neighs]) z2 = sp.array([q2.zqso for q2 in neighs]) w2 = sp.array([q2.we for q2 in neighs]) fill_wickT1234(ang12, r1, r2, z1, z2, w1, w2, c1d_1, wAll, nb, T1, T2, T3, T4) ### Higher order diagrams if (cfWick is None) or (max_diagram <= 4): continue thid2 = sp.array([q2.thid for q2 in neighs]) for d3 in sp.array(d1.dneighs): if d3.qneighs.size == 0: continue ang13 = d1 ^ d3 r3 = d3.r_comov w3 = d3.we neighs = d3.qneighs ang34 = d3 ^ neighs r4 = sp.array([q4.r_comov for q4 in neighs]) w4 = sp.array([q4.we for q4 in neighs]) thid4 = sp.array([q4.thid for q4 in neighs]) if max_diagram == 5: w = sp.in1d(d1.qneighs, d3.qneighs) if w.sum() == 0: continue t_ang12 = ang12[w] t_r2 = r2[w] t_w2 = w2[w] t_thid2 = thid2[w] w = sp.in1d(d3.qneighs, d1.qneighs) if w.sum() == 0: continue ang34 = ang34[w] r4 = r4[w] w4 = w4[w] thid4 = thid4[w] fill_wickT56(t_ang12, ang34, ang13, r1, t_r2, r3, r4, w1, t_w2, w3, w4, t_thid2, thid4, T5, T6) return wAll, nb, npairs, npairs_used, T1, T2, T3, T4, T5, T6
cf.n1d = int((cf.lmax-cf.lmin)/cf.dll+1) cf.x_correlation = False cf.lambda_abs = constants.absorber_IGM[args.lambda_abs] if args.lambda_abs2: cf.lambda_abs2 = constants.absorber_IGM[args.lambda_abs2] else: cf.lambda_abs2 = constants.absorber_IGM[args.lambda_abs] ### Read data 1 data, ndata, zmin_pix, zmax_pix = io.read_deltas(args.in_dir, cf.nside, cf.lambda_abs,args.z_evol, args.z_ref, cosmo=None,nspec=args.nspec,no_project=args.no_project) cf.npix = len(data) cf.data = data cf.ndata = ndata print("") print("done, npix = {}\n".format(cf.npix)) ### Read data 2 if args.in_dir2: cf.x_correlation = True data2, ndata2, zmin_pix2, zmax_pix2 = io.read_deltas(args.in_dir2, cf.nside, cf.lambda_abs2,args.z_evol2, args.z_ref, cosmo=None,nspec=args.nspec,no_project=args.no_project) cf.data2 = data2 cf.ndata2 = ndata2 print("") print("done, npix = {}\n".format(len(data2))) elif cf.lambda_abs != cf.lambda_abs2: cf.x_correlation = True data2, ndata2, zmin_pix2, zmax_pix2 = io.read_deltas(args.in_dir, cf.nside, cf.lambda_abs2,args.z_evol2, args.z_ref, cosmo=None,nspec=args.nspec,no_project=args.no_project) cf.data2 = data2 cf.ndata2 = ndata2
cosmo = constants.cosmo(args.fid_Om) ### Read deltas dels, ndels, zmin_pix, zmax_pix = io.read_deltas( args.in_dir, args.nside, constants.absorber_IGM[args.lambda_abs], args.z_evol_del, args.z_ref, cosmo=cosmo, nspec=args.nspec, no_project=args.no_project) xcf.npix = len(dels) xcf.dels = dels xcf.ndels = ndels print("") print("done, npix = {}".format(xcf.npix)) ### Remove <delta> vs. lambda_obs if not args.no_remove_mean_lambda_obs: forest.dll = None for p in xcf.dels: for d in xcf.dels[p]: dll = sp.asarray([ d.ll[ii] - d.ll[ii - 1] for ii in range(1, d.ll.size) ]).min() if forest.dll is None: forest.dll = dll else: forest.dll = min(dll, forest.dll) forest.lmin = sp.log10(
def metal_dmat(pix, abs_igm="SiII(1526)"): dm = sp.zeros(np * nt * ntm * npm) wdm = sp.zeros(np * nt) rpeff = sp.zeros(ntm * npm) rteff = sp.zeros(ntm * npm) zeff = sp.zeros(ntm * npm) weff = sp.zeros(ntm * npm) npairs = 0 npairs_used = 0 for p in pix: for d in dels[p]: with lock: print("\rcomputing metal dmat {}: {}%".format( abs_igm, round(counter.value * 100. / ndels, 3)), end="") counter.value += 1 r = sp.random.rand(len(d.qneighs)) w = r > rej npairs += len(d.qneighs) npairs_used += w.sum() rd = d.r_comov rdm = d.rdm_comov wd = d.we zd_abs = 10**d.ll / constants.absorber_IGM[abs_igm] - 1 rd_abs = cosmo.r_comoving(zd_abs) rdm_abs = cosmo.dm(zd_abs) wzcut = zd_abs < d.zqso rd = rd[wzcut] rdm = rdm[wzcut] wd = wd[wzcut] zd_abs = zd_abs[wzcut] rd_abs = rd_abs[wzcut] rdm_abs = rdm_abs[wzcut] if rd.size == 0: continue for q in sp.array(d.qneighs)[w]: ang = d ^ q rq = q.r_comov rqm = q.rdm_comov wq = q.we zq = q.zqso rp = (rd - rq) * sp.cos(ang / 2) rt = (rdm + rqm) * sp.sin(ang / 2) wdq = wd * wq wA = (rp > rp_min) & (rp < rp_max) & (rt < rt_max) bp = ((rp - rp_min) / (rp_max - rp_min) * np).astype(int) bt = (rt / rt_max * nt).astype(int) bA = bt + nt * bp c = sp.bincount(bA[wA], weights=wdq[wA]) wdm[:len(c)] += c rp_abs = (rd_abs - rq) * sp.cos(ang / 2) rt_abs = (rdm_abs + rqm) * sp.sin(ang / 2) bp_abs = ((rp_abs - rp_min) / (rp_max - rp_min) * npm).astype(int) bt_abs = (rt_abs / rt_max * ntm).astype(int) bBma = bt_abs + ntm * bp_abs wBma = (rp_abs > rp_min) & (rp_abs < rp_max) & (rt_abs < rt_max) wAB = wA & wBma c = sp.bincount(bBma[wAB] + npm * ntm * bA[wAB], weights=wdq[wAB]) dm[:len(c)] += c c = sp.bincount(bBma[wAB], weights=rp_abs[wAB] * wdq[wAB]) rpeff[:len(c)] += c c = sp.bincount(bBma[wAB], weights=rt_abs[wAB] * wdq[wAB]) rteff[:len(c)] += c c = sp.bincount(bBma[wAB], weights=(zd_abs + zq)[wAB] / 2 * wdq[wAB]) zeff[:len(c)] += c c = sp.bincount(bBma[wAB], weights=wdq[wAB]) weff[:len(c)] += c setattr(d, "qneighs", None) return wdm, dm.reshape(np * nt, npm * ntm), rpeff, rteff, zeff, weff, npairs, npairs_used
### Read data 1 data, ndata, zmin_pix, zmax_pix = io.read_deltas( args.in_dir, cf.nside, cf.lambda_abs, cf.alpha, cf.zref, cosmo, nspec=args.nspec, no_project=args.no_project) cf.npix = len(data) cf.data = data cf.ndata = ndata cf.angmax = utils.compute_ang_max(cosmo, cf.rt_max, zmin_pix) print("") print("done, npix = {}".format(cf.npix)) ### Read data 2 if args.in_dir2 or args.lambda_abs2: if args.lambda_abs2 or args.unfold_cf: cf.x_correlation = True cf.alpha2 = args.z_evol2 if args.in_dir2 is None: args.in_dir2 = args.in_dir if args.lambda_abs2: cf.lambda_abs2 = constants.absorber_IGM[args.lambda_abs2] else: cf.lambda_abs2 = cf.lambda_abs data2, ndata2, zmin_pix2, zmax_pix2 = io.read_deltas(
'--cov', type=str, default=None, required=False, help= 'Path to a covariance matrix file (if not provided it will be calculated by subsampling or from Poisson statistics)' ) args = parser.parse_args() ### Auto or cross correlation? if (args.DD_file is None and args.xDD_file is None) or ( not args.DD_file is None and not args.xDD_file is None) or ( not args.cov is None and not args.get_cov_from_poisson): print( 'ERROR: No data files, or both auto and cross data files, or two different method for covariance' ) sys.exit() elif not args.DD_file is None: corr = 'AUTO' lst_file = { 'DD': args.DD_file, 'RR': args.RR_file, 'DR': args.DR_file, 'RD': args.RD_file } elif not args.xDD_file is None: # TODO: Test if picca_co.py and export_co.py work for cross corr = 'CROSS' lst_file = { 'xDD': args.xDD_file,
parser.add_argument('--error-on-mean', action='store_true', default=False, help='Divide the covairance by the number of realizations') parser.add_argument('--do-not-smooth-cov', action='store_true', default=False, help='Do not smooth the covariance matrix') parser.add_argument('--out', type=str, default=None, required=True, help='Output file name') args = parser.parse_args() ### head = {'RPMIN':None, 'RPMAX':None, 'RTMAX':None, 'NP':None, 'NT':None} dic = {'RP':[], 'RT':[], 'Z':[], 'NB':[], 'DA':[]} for i,f in enumerate(args.data): print('INFO: file {}: {} over {} files'.format(i,f,len(args.data))) h = fitsio.FITS(f) for k in head.keys(): if head[k] is None: head[k] = h[1].read_header()[k] else: assert head[k]==h[1].read_header()[k] for k in [ el for el in dic.keys() if el!='DA']: dic[k] += [h[1][k][:]] if h[1].read_header()['EXTNAME'].strip()=='ATTRI': da = np.array(h['COR']['DA'][:]) we = np.array(h['COR']['WE'][:]) da = (da*we).sum(axis=0)
for f in args.data: if not (os.path.isfile(f.replace('cf', 'dmat')) or args.no_dmat): continue h = fitsio.FITS(f) we_aux = h[2]["WE"][:] wet_aux = we_aux.sum(axis=0) rp += h[1]['RP'][:] * wet_aux rt += h[1]['RT'][:] * wet_aux z += h[1]['Z'][:] * wet_aux nb += h[1]['NB'][:] wet += wet_aux hid = h[2]['HEALPID'][:] for i, p in enumerate(hid): print("\rcoadding healpix {} in file {}".format(p, f), end="") if p in da: da[p] += h[2]["DA"][:][i] * we_aux[i] we[p] += we_aux[i, :] else: da[p] = h[2]["DA"][:][i] * we_aux[i] we[p] = we_aux[i] h.close() if not args.no_dmat: h = fitsio.FITS(f.replace('cf', 'dmat')) dm += h[1]['DM'][:] * wet_aux[:, None] if 'dmrp' in locals(): ## TODO: get the weights dmrp += h[2]['RP'][:] dmrt += h[2]['RT'][:]
help='Maximum number of spectra to read') parser.add_argument( '--unfold-cf', action='store_true', required=False, help= 'rp can be positive or negative depending on the relative position between absorber1 and absorber2' ) args = parser.parse_args() if args.nproc is None: args.nproc = cpu_count() // 2 print("nproc", args.nproc) cf.rp_max = args.rp_max cf.rp_min = args.rp_min cf.rt_max = args.rt_max cf.z_cut_max = args.z_cut_max cf.z_cut_min = args.z_cut_min # npb = number of parallel bins (to avoid collision with numpy np) cf.npb = args.np cf.ntb = args.nt cf.npm = args.np * args.coef_binning_model cf.ntm = args.nt * args.coef_binning_model cf.nside = args.nside cf.zref = args.z_ref cf.alpha = args.z_evol cf.rej = args.rej
rp_max = head['RPMAX'] h.close() if not args.remove_shuffled_correlation is None: th = fitsio.FITS(args.remove_shuffled_correlation) da_s = th['COR']['DA'][:] we_s = th['COR']['WE'][:] da_s = (da_s*we_s).sum(axis=1) we_s = we_s.sum(axis=1) w = we_s>0. da_s[w] /= we_s[w] th.close() da -= da_s[:,None] if args.cov is not None: print('INFO: The covariance-matrix will be read from file: {}'.format(args.cov)) hh = fitsio.FITS(args.cov) co = hh[1]['CO'][:] hh.close() elif args.cor is not None: print('INFO: The correlation-matrix will be read from file: {}'.format(args.cor)) hh = fitsio.FITS(args.cor) cor = hh[1]['CO'][:] hh.close() if (cor.min()<-1.) | (cor.min()>1.) | (cor.max()<-1.) | (cor.max()>1.) | sp.any(np.diag(cor)!=1.): print('WARNING: The correlation-matrix has some incorrect values') tvar = np.diagonal(cor) cor = cor/np.sqrt(tvar*tvar[:,None]) co = cov(da,we) var = np.diagonal(co) co = cor * np.sqrt(var*var[:,None])
def var_lss(data, eta_lim=(0.5, 1.5), vlss_lim=(0., 0.3)): nlss = 20 eta = sp.zeros(nlss) vlss = sp.zeros(nlss) fudge = sp.zeros(nlss) err_eta = sp.zeros(nlss) err_vlss = sp.zeros(nlss) err_fudge = sp.zeros(nlss) nb_pixels = sp.zeros(nlss) ll = forest.lmin + (sp.arange(nlss) + .5) * (forest.lmax - forest.lmin) / nlss nwe = 100 vpmin = sp.log10(1e-5) vpmax = sp.log10(2.) var = 10**(vpmin + (sp.arange(nwe) + .5) * (vpmax - vpmin) / nwe) var_del = sp.zeros(nlss * nwe) mdel = sp.zeros(nlss * nwe) var2_del = sp.zeros(nlss * nwe) count = sp.zeros(nlss * nwe) nqso = sp.zeros(nlss * nwe) for p in sorted(list(data.keys())): for d in data[p]: var_pipe = 1 / d.iv / d.co**2 w = (sp.log10(var_pipe) > vpmin) & (sp.log10(var_pipe) < vpmax) bll = ((d.ll - forest.lmin) / (forest.lmax - forest.lmin) * nlss).astype(int) bwe = sp.floor((sp.log10(var_pipe) - vpmin) / (vpmax - vpmin) * nwe).astype(int) bll = bll[w] bwe = bwe[w] de = (d.fl / d.co - 1) de = de[w] bins = bwe + nwe * bll c = sp.bincount(bins, weights=de) mdel[:len(c)] += c c = sp.bincount(bins, weights=de**2) var_del[:len(c)] += c c = sp.bincount(bins, weights=de**4) var2_del[:len(c)] += c c = sp.bincount(bins) count[:len(c)] += c nqso[sp.unique(bins)] += 1 w = count > 0 var_del[w] /= count[w] mdel[w] /= count[w] var_del -= mdel**2 var2_del[w] /= count[w] var2_del -= var_del**2 var2_del[w] /= count[w] bin_chi2 = sp.zeros(nlss) fudge_ref = 1e-7 for i in range(nlss): def chi2(eta, vlss, fudge): v = var_del[i * nwe:(i + 1) * nwe] - variance( var, eta, vlss, fudge * fudge_ref) dv2 = var2_del[i * nwe:(i + 1) * nwe] w = nqso[i * nwe:(i + 1) * nwe] > 100 return sp.sum(v[w]**2 / dv2[w]) mig = iminuit.Minuit(chi2, forced_parameters=("eta", "vlss", "fudge"), eta=1., vlss=0.1, fudge=1., error_eta=0.05, error_vlss=0.05, error_fudge=0.05, errordef=1., print_level=0, limit_eta=eta_lim, limit_vlss=vlss_lim, limit_fudge=(0, None)) mig.migrad() if mig.migrad_ok(): mig.hesse() eta[i] = mig.values["eta"] vlss[i] = mig.values["vlss"] fudge[i] = mig.values["fudge"] * fudge_ref err_eta[i] = mig.errors["eta"] err_vlss[i] = mig.errors["vlss"] err_fudge[i] = mig.errors["fudge"] * fudge_ref else: eta[i] = 1. vlss[i] = 0.1 fudge[i] = 1. * fudge_ref err_eta[i] = 0. err_vlss[i] = 0. err_fudge[i] = 0. nb_pixels[i] = count[i * nwe:(i + 1) * nwe].sum() bin_chi2[i] = mig.fval print(eta[i], vlss[i], fudge[i], mig.fval, nb_pixels[i], err_eta[i], err_vlss[i], err_fudge[i]) return ll, eta, vlss, fudge, nb_pixels, var, var_del.reshape( nlss, -1), var2_del.reshape(nlss, -1), count.reshape(nlss, -1), nqso.reshape( nlss, -1), bin_chi2, err_eta, err_vlss, err_fudge
xcf.rt_max = args.rt_max xcf.np = args.np xcf.nt = args.nt xcf.nside = args.nside xcf.lambda_abs = constants.absorber_IGM[args.lambda_abs] cosmo = constants.cosmo(args.fid_Om) ### Read deltas dels, ndels, zmin_pix, zmax_pix = io.read_deltas(args.in_dir, args.nside, xcf.lambda_abs, args.z_evol_del, args.z_ref, cosmo=cosmo,nspec=args.nspec,no_project=args.no_project, from_image=args.from_image) xcf.npix = len(dels) xcf.dels = dels xcf.ndels = ndels print("") print("done, npix = {}\n".format(xcf.npix)) ### Remove <delta> vs. lambda_obs if not args.no_remove_mean_lambda_obs: forest.dll = None for p in xcf.dels: for d in xcf.dels[p]: dll = sp.asarray([d.ll[ii]-d.ll[ii-1] for ii in range(1,d.ll.size)]).min() if forest.dll is None: forest.dll = dll else: forest.dll = min(dll,forest.dll) forest.lmin = sp.log10( (zmin_pix+1.)*xcf.lambda_abs )-forest.dll/2. forest.lmax = sp.log10( (zmax_pix+1.)*xcf.lambda_abs )+forest.dll/2. ll,st, wst = prep_del.stack(xcf.dels,delta=True)
parser.add_argument('--nside', type=int, default=16, required=False, help='Healpix nside') parser.add_argument('--nproc', type=int, default=None, required=False, help='Number of processors') parser.add_argument('--nspec', type=int, default=None, required=False, help='Maximum number of spectra to read') args = parser.parse_args() if args.nproc is None: args.nproc = cpu_count()//2 print("nproc",args.nproc) cf.rp_max = args.rp_max cf.rt_max = args.rt_max cf.rp_min = args.rp_min cf.z_cut_max = args.z_cut_max cf.z_cut_min = args.z_cut_min # npb = number of parallel bins (to avoid collision with numpy np) cf.npb = args.np cf.ntb = args.nt cf.nside = args.nside cf.zref = args.z_ref cf.alpha = args.z_evol cf.alpha2 = args.z_evol cf.lambda_abs = constants.absorber_IGM[args.lambda_abs] cf.rej = args.rej
parser.add_argument('--nproc', type=int, default=None, required=False, help='Number of processors') parser.add_argument('--nspec', type=int, default=None, required=False, help='Maximum number of spectra to read') parser.add_argument('--unfold-cf', action='store_true', required=False, help='rp can be positive or negative depending on the relative position between absorber1 and absorber2') args = parser.parse_args() if args.nproc is None: args.nproc = cpu_count()//2 print("nproc",args.nproc) cf.rp_max = args.rp_max cf.rt_max = args.rt_max cf.rp_min = args.rp_min cf.z_cut_max = args.z_cut_max cf.z_cut_min = args.z_cut_min # npb = number of parallel bins (to avoid collision with numpy np) cf.npb = args.np*args.coef_binning_model cf.ntb = args.nt*args.coef_binning_model cf.npm = args.np*args.coef_binning_model cf.ntm = args.nt*args.coef_binning_model cf.nside = args.nside cf.zref = args.z_ref cf.alpha = args.z_evol
default=None, required=False, help='Number of processors') parser.add_argument('--nspec', type=int, default=None, required=False, help='Maximum number of spectra to read') args = parser.parse_args() if args.nproc is None: args.nproc = cpu_count() // 2 print("nproc", args.nproc) cf.rp_max = args.rp_max cf.rt_max = args.rt_max cf.rp_min = args.rp_min cf.z_cut_max = args.z_cut_max cf.z_cut_min = args.z_cut_min cf.np = args.np cf.nt = args.nt cf.nside = args.nside cf.zref = args.z_ref cf.alpha = args.z_evol cf.alpha2 = args.z_evol cf.lambda_abs = constants.absorber_IGM[args.lambda_abs] cf.rej = args.rej cf.max_diagram = args.max_diagram