def get_cross_pairwise_ksz(df1, df2, params): ''' Wrapper for calling pairwise_cross_ksz using a cross_dataset object. params: params object from paramTools.py ''' dT1 = df1.dT.values dT2 = df2.dT.values z = df1.z.values # this requires Dc and z values to be the same Dc = df1.Dc.values if params.GET_TZAV_FAST: tzav1 = get_tzav_fast(dT1, z, params.SIGMA_Z) tzav2 = get_tzav_fast(dT2, z, params.SIGMA_Z) else: tzav1 = get_tzav(dT1, z, params.SIGMA_Z) tzav2 = get_tzav(dT2, z, params.SIGMA_Z) # more common values ra_deg = df1.ra.values dec_deg = df1.dec.values assert params.UNEVEN_BINS # only uneven bins supported for this bin_edges = params.BIN_EDGES if params.DO_VARIANCE_WEIGHTED: assert False # not implemented else: r_sep, p_uk = cross_pairwise_ksz_uneven_bins(Dc, ra_deg, dec_deg, tzav1, tzav2, dT1, dT2, bin_edges) return r_sep, p_uk
def test_get_pairwise_ksz(): testPath = '/'.join((catalogTools.__file__).split('/')[:-2]) + '/tests/' testParamFileFullPath = os.path.join(testPath, 'data_toTestAPI/params.ini') params = paramTools.params(testParamFileFullPath) df = produceFakeCatalog() rsep, p_uk = pairwiser.get_pairwise_ksz(df, params, multithreading=False) tzav = pairwiser.get_tzav(df.dT.values, df.z.values, params.SIGMA_Z) if not params.UNEVEN_BINS: rsep0, p_uk0 = pairwiser.pairwise_ksz(df.Dc.values, df.ra.values, df.dec.values, tzav, df.dT.values, params.BIN_SIZE_MPC, params.N_BINS, multithreading=False) else: rsep0, p_uk0 = pairwiser.pairwise_ksz_uneven_bins(df.Dc.values, df.ra.values, df.dec.values, tzav, df.dT.values, params.BIN_EDGES, multithreading=False) rsep_diff_sq = np.sum((rsep - rsep0)**2) p_uk_diff_sq = np.sum((p_uk - p_uk0)**2) assert rsep_diff_sq < 1e-10 assert p_uk_diff_sq < 1e-10
def test_get_tzav_fast(): N_gals = 15000 sigma_z = 0.01 z = np.random.uniform(size=N_gals) dT = np.random.normal(size=N_gals) tzav = pairwiser.get_tzav(dT, z, sigma_z) tzav_fast = pairwiser.get_tzav_fast(dT, z, sigma_z) chisq = np.sum((tzav - tzav_fast)**2) assert chisq < 1e-8
def test_get_tzav(): size = 1000 dTs = np.random.uniform(low=0, high=100, size=size) zs = np.random.uniform(low=0, high=100, size=size) dTs = dTs + zs sigma_z = 10 numer = np.empty_like(dTs) denom = np.empty_like(dTs) for j in range(size): numer[j] = np.sum(dTs * np.exp(-(zs[j] - zs)**2 / (2 * sigma_z**2))) denom[j] = np.sum(np.exp(-(zs[j] - zs)**2 / (2 * sigma_z**2))) Tz_numpy = numer / denom Tz_numba = pairwiser.get_tzav(dTs, zs, sigma_z) diff_sq = (Tz_numpy - Tz_numba)**2 error = diff_sq.sum() assert error < 1e-10
def test_varianceWeighted(): '''Tests variance_weighted_pairwise_ksz and variance_weighted_pairwise_one_row''' testPath = '/'.join((catalogTools.__file__).split('/')[:-2]) + '/tests/' testParamFileFullPath = os.path.join(testPath, 'data_toTestAPI/params.ini') params = paramTools.params(testParamFileFullPath) df = produceFakeCatalog() rsep, p_uk = pairwiser.get_pairwise_ksz(df, params, multithreading=False) tzav = pairwiser.get_tzav(df.dT.values, df.z.values, params.SIGMA_Z) div = np.ones(len(tzav)) rsep0, p_uk0 = pairwiser.variance_weighted_pairwise_ksz( df.Dc.values, df.ra.values, # noqa df.dec.values, tzav, df.dT.values, div, params.BIN_SIZE_MPC, params.N_BINS, multithreading=False) chisq = np.sum((p_uk - p_uk0)**2) assert chisq < 1.0e10
''' reads a preprocessed catalog and computes tzav for it. Writes a file with the numpy array Written by: P. Gallardo. ''' import numpy as np # noqa from iskay import catalogTools # noqa from iskay import pairwiser # noqa #look in parent directory df = catalogTools.preProcessedCat(directory='../ApPhotoResults').df sigma_z = 0.01 dT = df.dT.values z = df.z.values tzav = pairwiser.get_tzav(dT, z, sigma_z) np.savez('tzav_allCat', tzav)