Ejemplo n.º 1
0
    def write(self):
        """
        Write lens and source n(z)s to fits file for tomographic and non-tomographic cases.
        """

        nz_source = twopoint.NumberDensity(
            NOFZ_NAMES[0], self.binlow, self.z, self.binhigh,
            [self.nofz[i, :] for i in range(self.tomobins)])

        nz_source.ngal = self.neff
        nz_source.sigma_e = self.sigma_e
        nz_source.area = self.area
        kernels = [nz_source]

        if 'colour_bins' in self.params.keys():
            filename = self.output_path("nz_source_txt").replace(
                "nofz/", "nofz_%s/" % self.samples[0])
        else:
            filename = self.output_path("nz_source_txt")
        np.savetxt(filename, np.vstack((self.binlow, self.nofz)).T)

        if self.params['lensfile'] != 'None':
            nz_lens = twopoint.NumberDensity(
                NOFZ_NAMES[1], self.binlow, self.z, self.binhigh,
                [self.lens_nofz[i, :] for i in range(self.lens_tomobins)])
            nz_lens.ngal = self.lens_neff
            nz_lens.area = self.area
            kernels.append(nz_lens)
            np.savetxt(self.output_path("nz_lens_txt"),
                       np.vstack((self.binlow, self.lens_nofz)).T)

        data = twopoint.TwoPointFile([], kernels, None, None)
        data.to_fits(self.output_path("2pt"), clobber=True)

        self.write_metadata()
Ejemplo n.º 2
0
def execute(block, config):
    real_space = config['real_space']
    fourier_space = not real_space
    if real_space:
        angle_sample = config['theta']
    else:
        angle_sample = config['ell']

    filename = config['filename']
    shear_nz = config['shear_nz']
    position_nz = config['position_nz']
    clobber = config['clobber']

    make_covariance = config['make_covariance']
    if make_covariance:
        number_density_shear_bin = config['number_density_shear_bin']
        number_density_lss_bin = config['number_density_lss_bin']
        sigma_e_bin = config['sigma_e_bin']
        survey_area = config['survey_area']

    print("Saving two-point data to {}".format(filename))

    spectra = []

    need_source_nz = False
    need_lens_nz = False
    if fourier_space and block.has_section(names.shear_cl):
        name = "shear_cl"
        types = (twopoint.Types.galaxy_shear_emode_fourier,
                 twopoint.Types.galaxy_shear_emode_fourier)
        kernels = (shear_nz, shear_nz)
        s = spectrum_measurement_from_block(block, name, name, types, kernels,
                                            angle_sample, real_space)
        print(" - saving shear_cl")
        need_source_nz = True
        spectra.append(s)

    if fourier_space and block.has_section("galaxy_shear_cl"):
        name = "galaxy_shear_cl"
        types = (
            twopoint.Types.galaxy_position_fourier,
            twopoint.Types.galaxy_shear_emode_fourier,
        )
        kernels = (position_nz, shear_nz)
        s = spectrum_measurement_from_block(block, name, name, types, kernels,
                                            angle_sample, real_space)
        print(" - saving galaxy_shear_cl")
        need_source_nz = True
        need_lens_nz = True
        spectra.append(s)

    if fourier_space and block.has_section("galaxy_cl"):
        name = "galaxy_cl"
        types = (twopoint.Types.galaxy_position_fourier,
                 twopoint.Types.galaxy_position_fourier)
        kernels = (position_nz, position_nz)
        s = spectrum_measurement_from_block(block, name, name, types, kernels,
                                            angle_sample, real_space)
        print(" - saving galaxy_cl")
        need_lens_nz = True
        spectra.append(s)

    if real_space and block.has_section("shear_xi"):
        types = (twopoint.Types.galaxy_shear_plus_real,
                 twopoint.Types.galaxy_shear_plus_real)
        kernels = (shear_nz, shear_nz)
        s = spectrum_measurement_from_block(block, "shear_xi", "xip", types,
                                            kernels, angle_sample, real_space)
        print(" - saving xi_plus")
        need_source_nz = True
        spectra.append(s)

        name = "shear_xi"
        types = (twopoint.Types.galaxy_shear_minus_real,
                 twopoint.Types.galaxy_shear_minus_real)
        kernels = (shear_nz, shear_nz)
        s = spectrum_measurement_from_block(block, "shear_xi", "xim", types,
                                            kernels, angle_sample, real_space)
        need_source_nz = True
        print(" - saving xi_minus")
        spectra.append(s)

    if real_space and block.has_section("galaxy_shear_xi"):
        types = (twopoint.Types.galaxy_position_real,
                 twopoint.Types.galaxy_shear_plus_real)
        kernels = (position_nz, shear_nz)
        s = spectrum_measurement_from_block(block, "galaxy_shear_xi", "gammat",
                                            types, kernels, angle_sample,
                                            real_space)
        print(" - saving gammat")
        need_source_nz = True
        need_lens_nz = True
        spectra.append(s)

    if real_space and block.has_section("galaxy_xi"):
        types = (twopoint.Types.galaxy_position_real,
                 twopoint.Types.galaxy_position_real)
        kernels = (position_nz, position_nz)
        s = spectrum_measurement_from_block(block, "galaxy_xi", "wtheta",
                                            types, kernels, angle_sample,
                                            real_space)
        print(" - saving wtheta")
        need_lens_nz = True
        spectra.append(s)

    if make_covariance:
        covmat_info = covmat_from_block(block, spectra, survey_area,
                                        number_density_shear_bin,
                                        number_density_lss_bin, sigma_e_bin)
    else:
        covmat_info = None

    if not spectra:
        raise ValueError("Sorry - I couldn't find any spectra to save.")

    kernels = []
    if need_source_nz:

        kernels.append(
            nz_from_block(block, shear_nz, config['prefix_nz_section']))
    if need_lens_nz and (shear_nz != position_nz):
        kernels.append(
            nz_from_block(block, position_nz, config['prefix_nz_section']))

    windows = []

    data = twopoint.TwoPointFile(spectra, kernels, windows, covmat_info)

    # Apply cuts
    scale_cuts = config['scale_cuts']
    bin_cuts = config['bin_cuts']
    if scale_cuts or bin_cuts:
        data.mask_scales(scale_cuts, bin_cuts)

    data.to_fits(filename, clobber=clobber)

    return 0
Ejemplo n.º 3
0
def run_treecorr(config_file, tomo_file, cat_file, output_file):
    config = treecorr.read_config(config_file)

    tomo_data = fitsio.read(tomo_file)
    shear_data = fitsio.read(cat_file)
    tomo_header = fitsio.read_header(tomo_file, 1)
    nbin = tomo_header.get("NBIN")

    gg = treecorr.GGCorrelation(config)
    theta = []
    xip = []
    xim = []
    bin1 = []
    bin2 = []
    angbin = []

    print("Not doing the proper ngmix weighting in this test!")

    for b1 in range(nbin):
        w1 = np.where(tomo_data['BIN'] == b1 + 1)
        d1 = shear_data[w1]
        cat1 = treecorr.Catalog(g1=d1['E_1'],
                                g2=d1['E_1'],
                                w=d1['W'],
                                ra=d1['RA'],
                                dec=d1['DEC'],
                                ra_units='deg',
                                dec_units='deg')
        for b2 in range(nbin):
            if b2 < b1:
                continue

            w2 = np.where(tomo_data['BIN'] == b2 + 1)
            d2 = shear_data[w2]
            cat2 = treecorr.Catalog(g1=d2['E_1'],
                                    g2=d2['E_1'],
                                    w=d2['W'],
                                    ra=d2['RA'],
                                    dec=d2['DEC'],
                                    ra_units='deg',
                                    dec_units='deg')
            gg.process(cat1, cat2)
            ntheta = len(gg.meanr)
            theta.append(gg.meanr)
            xip.append(gg.xip)
            xim.append(gg.xim)
            bin1.append(np.repeat(b1, ntheta))
            bin2.append(np.repeat(b2, ntheta))
            angbin.append(np.arange(ntheta, dtype=int))

    theta = np.concatenate(theta)
    xip = np.concatenate(xip)
    xim = np.concatenate(xim)
    bin1 = np.concatenate(bin1)
    bin2 = np.concatenate(bin2)
    angbin = np.concatenate(angbin)

    tp = twopoint.Types.galaxy_shear_plus_real
    tm = twopoint.Types.galaxy_shear_minus_real

    XIP = twopoint.SpectrumMeasurement("xip", (bin1, bin2), (tp, tp),
                                       ("source", "source"),
                                       "SAMPLE",
                                       angbin,
                                       xip,
                                       angle=theta,
                                       angle_unit="arcmin")
    XIM = twopoint.SpectrumMeasurement("xim", (bin1, bin2), (tm, tm),
                                       ("source", "source"),
                                       "SAMPLE",
                                       angbin,
                                       xim,
                                       angle=theta,
                                       angle_unit="arcmin")

    output = twopoint.TwoPointFile([XIP, XIM], [], [], None)
    output.to_fits(output_file, clobber=True)
Ejemplo n.º 4
0
def execute(block, config):

    real_space = config['real_space']
    fourier_space = not real_space

    filename = config['filename']
    #shear_nz = config['shear_nz']
    #position_nz = config['position_nz']
    overwrite = config['overwrite']

    make_covariance = config['make_covariance']

    print("Saving two-point data to {}".format(filename))

    theory_spec_list = []
    cl_theory_spec_list = []
    cl_to_xi_type_list = []
    spec_meas_list = []
    kernels = []
    no_kernel_found = []

    #Loop through spectrum_sections, generating a SpectrumMeasurement
    #for each.
    #If we're generating the covariance for real space spectra, also 
    #generate a TheorySpectrum for the corresponding Cl.
    print("Generating twopoint file with the following spectra:")
    print(config['spectrum_sections'])
    for i_spec in range( len(config["spectrum_sections"]) ):
        spectrum_section = config["spectrum_sections"][i_spec]
        output_extension = config["output_extensions"][i_spec]

        #Read in sample information from block
        sample_a, sample_b = ( block[spectrum_section, "sample_a"], 
                               block[spectrum_section, "sample_b"] )
        kernel_name_a, kernel_name_b = "nz_"+sample_a, "nz_"+sample_b
        
        #Get kernels
        if (kernel_name_a not in [ k.name for k in kernels ]) and (kernel_name_a not in no_kernel_found):
            if block.has_section(kernel_name_a):
                kernels.append( twopoint.NumberDensity.from_block( block, kernel_name_a ) )
            else:
                no_kernel_found.append(kernel_name_a)
        if kernel_name_b not in [ k.name for k in kernels ]:
            if block.has_section(kernel_name_b):
                kernels.append( twopoint.NumberDensity.from_block( block, kernel_name_b ) )
            else:
                no_kernel_found.append(kernel_name_b)

        if len(no_kernel_found)>0:
            print("No kernel found for kernel names:", no_kernel_found)
            print("This might not be a problem e.g. for CMB lensing.")

        theory_spec = TheorySpectrum.from_block( block, spectrum_section )
        theory_spec_list.append(theory_spec)

        #get angle_units
        if config["angle_units"] is not None:
            angle_units = config['angle_units'].name
        else:
            angle_units = None
        spec_meas_list.append( theory_spec.get_spectrum_measurement( config['angle_mids_userunits'], 
            (kernel_name_a, kernel_name_b), output_extension, angle_lims = config['angle_lims_userunits'], 
            angle_units=angle_units ) )
        
        if make_covariance:
            if real_space:
                #In this case we also need the corresponding Cl spectra to generate the covariance
                cl_section = config["cl_sections"][i_spec]
                cl_spec = TheorySpectrum.from_block( block, cl_section )
                cl_theory_spec_list.append( cl_spec )
                #Check cls have the same bin pairings as their corresponding real-space spectra
                try:
                    assert cl_spec.bin_pairs == theory_spec_list[i_spec].bin_pairs
                except AssertionError as e:
                    print( "cl and xi specs have different bin_pairs:" )
                    print( "sections were %s and %s"%(cl_section, spectrum_section))
                    print( "cl bin pairs:", cl_spec.bin_pairs )
                    print( "xi bin pairs:", theory_spec_list[i_spec].bin_pairs)
                    raise(e)

    if not real_space:
        cl_theory_spec_list = theory_spec_list

    if make_covariance:
        #First we need to get the ClCov
        #For the covariance matrix, we may need to read in more Cls - e.g. if we want 
        #a covariance for Cl_ab and Cl_cd, we require Cl_ad, Cl_ac, Cl_bc and Cl_bd for 
        #the covariance calculation.
        #So the following checks the types of Cl_ab and Cl_cd, and from that infers
        #the required spectra.
        types = []
        for spec in cl_theory_spec_list:
            type_i, type_j = spec.types
            if type_i not in types:
                types.append(type_i)
            if type_j not in types:
                types.append(type_j)
        cl_specs = []
        for (i,type_1) in enumerate(types):
            for (j,type_2) in enumerate(types[i:]):
                print("Getting cls for cov:")
                print("type_1:", type_1)
                print("type_2:", type_2)
                #Get the cl section name for these types from the type_table (man
                #we need to sort out easy access to this type_table info...it sucks right now)
                try:
                    cl_section = type_table[(type_1.name, type_2.name)][0]
                except KeyError:
                    cl_section = type_table[(type_2.name, type_1.name)][0]
                assert cl_section not in [s.name for s in cl_specs]
                cl_spec = TheorySpectrum.from_block( block, cl_section )
                #Add noise if necessary
                if (cl_spec.types[0] == cl_spec.types[1]):
                    if cl_spec.types[0].name == "galaxy_shear_emode_fourier":
                        noise = ([ (s**2 / 2 / n) for (s,n) in 
                                 zip(config['sigma_e'],config['number_density_shear_rad2']) ])
                    elif cl_spec.types[0].name == "galaxy_position_fourier":
                        noise = [ 1./n for n in config['number_density_lss_rad2'] ]
                    else:
                        print("Tried to, but can't generate noise for spectrum %s"%cl_section)
                        raise ValueError
                    cl_spec.set_noise(noise)

                cl_specs.append( cl_spec )

        cl_cov = ClCov(cl_specs, fsky=config['fsky'])

        if real_space:

            #If requested, apply bin cuts now - this will speed up the covariance calculation
            #Need to apply cuts to real space spectra and cls
            for (name, b1, b2) in config['bin_cuts']:
                print("cutting %d,%d from %s"%(b1,b2,name))
                spec_index = config['output_extensions'].index(name)
                spec_meas_list[spec_index].cut_bin_pair( (b1,b2), complain=True )
                cl_theory_spec_list[spec_index].cut_bin_pair( (b1,b2) )

            cov_blocks, covmat, xi_starts, xi_lengths = real_space_cov( cl_cov, 
                cl_theory_spec_list, config['cl_to_xi_types'], 
                config['ell_max'], config['angle_lims'], 
                upsample=config['upsample_cov'], 
                high_l_filter = config['high_l_filter'] )
            covmat_info = twopoint.CovarianceMatrixInfo( 'COVMAT', [s.name for s in spec_meas_list], 
                                                         xi_lengths, covmat )

        else:
            covmat, cl_lengths = cl_cov.get_binned_cl_cov(config['angle_lims'])
            assert covmat.shape[0] == sum([len(s.value) for s in spec_meas_list])
            covmat_info = twopoint.CovarianceMatrixInfo( 'COVMAT', [s.name for s in spec_meas_list],
                                                         [len(s.value) for s in spec_meas_list], covmat )
    else:
        covmat_info = None

    if not spec_meas_list:
        raise ValueError("Sorry - I couldn't find any spectra to save.")

    windows = []

    data = twopoint.TwoPointFile(spec_meas_list, kernels, windows, covmat_info)

    # Apply cuts
    scale_cuts = config['scale_cuts']
    bin_cuts = config['bin_cuts']
    if scale_cuts or bin_cuts:
        data.mask_scales(scale_cuts, bin_cuts)

    data.to_fits(filename, overwrite=overwrite)

    return 0
Ejemplo n.º 5
0
def main():

    args = parse_args()
    if args.q_cat is not None:
        assert args.q_coeff_cat is not None

    psf_data = read_psf_data(args)

    #if requested, apply gold footprint mask and/or gold badregions mask
    use = np.ones(len(psf_data), dtype=bool)
    if args.gold_fp_mask:
        fp = hu.Map("ring", hp.read_map(args.gold_fp_mask))
        fp_vals = fp.get_mapval(psf_data['ra'], psf_data['dec'])
        use[fp_vals < 1] = False
        print float(use.sum()) / len(use)
    if args.gold_br_mask:
        br = hu.Map("ring", hp.read_map(args.gold_br_mask))
        br_vals = br.get_mapval(psf_data['ra'], psf_data['dec'])
        use[br_vals > 0] = False
        print float(use.sum()) / len(use)
    if use.sum() != len(use):
        print 'gold masks leave fraction %f of stars' % (float(use.sum()) /
                                                         len(use))
    psf_data = psf_data[use]

    theta_min, theta_max, nbins = 0.25, 250., args.nbins
    bin_slop = args.bin_slop
    sep_units = 'arcmin'
    gg = treecorr.GGCorrelation(nbins=nbins,
                                min_sep=theta_min,
                                max_sep=theta_max,
                                sep_units=sep_units,
                                verbose=1,
                                bin_slop=bin_slop)

    if args.z_bin_lims:
        num_z_bins = len(args.z_bin_lims) - 1
    else:
        num_z_bins = 1

    shape_colnames = homog_colnames

    #read data
    if args.pipeline == 'metacal':
        shape_data, shape_mask_0, shape_masks_sel, common_mask, read_rows_union, area_deg = get_mcal_cat(
            args.shape_cat,
            args.cut_dict,
            mcal_cols=[
                'e1', 'e2', 'psf_e1', 'psf_e2', 'R11', 'R22', 'ra', 'dec'
            ],
            test_nrows=args.test_nrows,
            add_Rmean_col=True)

        if args.z_bin_lims:
            #supplying z_bin_lims implies you want to some 'tomographic' redshift bin splitting, so read in some redshift info too
            z_arrays = get_mcal_photoz(read_rows=read_rows_union)
            #pylab.hist(z_arrays[0],bins=20)
            #pylab.show()
            #first one is the one used for the unsheared binning (and therefore the measurement), so add this to the shape_data array
            shape_data = nmbot.add_field(shape_data, [("mean_z", float)],
                                         [z_arrays[0]])

        #now make a McalCat and apply correction
        shape_cat = corrtools.McalCat(shape_data,
                                      shape_mask_0,
                                      shape_masks_sel,
                                      quantities=[('e1', 'e2'),
                                                  ('psf_e1', 'psf_e2')])
        print len(shape_cat.arr_data)
        if args.z_bin_lims:
            print shape_cat.arr_data['mean_z'].min(
            ), shape_cat.arr_data['mean_z'].max()
            shape_cat.redshift_split(zcol='mean_z', z_bin_lims=args.z_bin_lims)
            shape_cat.sheared_redshift_split(z_arrs_sheared=z_arrays[1:])
            shape_cat.apply_mcal_correction()
            print shape_cat.bin_stats

    else:
        assert args.pipeline == "im3shape"
        shape_data, read_rows, area_deg = get_im3_cat(
            args.shape_cat,
            args.cut_dict,
            im3_cols=IM3_COLS_DEFAULT + ['psf_e1', 'psf_e2'],
            test_nrows=None,
            apply_c=True)
        print shape_data.dtype.names
        if args.z_bin_lims:
            z_array = get_photoz(read_rows=read_rows)
            shape_data = nmbot.add_field(shape_data, [("mean_z", float)],
                                         [z_array])
        shape_cat = corrtools.GalCat(shape_data,
                                     quantities=[('e1', 'e2'),
                                                 ('psf_e1', 'psf_e2')])
        if args.z_bin_lims:
            shape_cat.redshift_split(zcol='mean_z', z_bin_lims=args.z_bin_lims)
            shape_cat.apply_m_correction()

        print shape_cat.bin_stats
        weight_col = 'weight'

    if args.q_cat:
        q_data = fitsio.read(args.q_cat, rows=read_rows_union)[shape_mask_0]
        if args.z_bin_lims:
            q_data = q_data[
                (shape_data[shape_mask_0]["mean_z"] > args.z_bin_lims[0]) *
                (shape_data["mean_z"][shape_mask_0] < args.z_bin_lims[-1])]
        print "len(q_data), len(shape_cat.arr_data)", len(q_data), len(
            shape_cat.arr_data)
        #shape_data,q_data=shape_data[shape_mask_0],q_data[shape_mask_0]
        #shape_data = nmbot.add_field(shape_data, [('de1',float),('de2',float)], [q_data['de1'],q_data['de2']])
        use = np.ones(len(q_data), dtype='bool')
        use[np.isnan(q_data['e1'])] = False
        use[q_data['de1'] < -1.] = False
        use[q_data['de2'] < -1.] = False
        print 'fraction %f has bad q data, will use mean qs for these objets' % (
            1 - float(use.sum()) / len(use))
        mean_q1 = q_data['de1'][use].mean()
        mean_q2 = q_data['de2'][use].mean()
        q_data['de1'][~use] = mean_q1
        q_data['de2'][~use] = mean_q2
        #now compute corrections
        with open(args.q_coeff_cat, 'r') as f:
            coeff_lines = f.readlines()
        for l in coeff_lines:
            if l[0] == '#':
                continue
            l_entries = (l.strip()).split()
            zbin, x, y, alpha = int(
                l_entries[0]), l_entries[1], l_entries[2], float(l_entries[3])
            if (x == 'de1' and y == 'e1'):
                shape_cat.arr_data['e1'][shape_cat.zbin_masks[
                    zbin]] -= alpha * q_data['de1'][shape_cat.zbin_masks[zbin]]
            elif (x == 'de2' and y == 'e2'):
                shape_cat.arr_data['e2'][shape_cat.zbin_masks[
                    zbin]] -= alpha * q_data['de2'][shape_cat.zbin_masks[zbin]]
            else:
                continue
            print 'zbin, x, y, alpha', zbin, x, y, alpha

    #sh_cat,im3_psf_cat = make_shape_treecorr_cats(shape_data, shape_colnames)
    if not os.path.isdir(args.outdir):
        os.makedirs(args.outdir)
    #recalculate post-correction bin stats and save
    shape_cat.get_bin_stats()
    with open(pj(args.outdir, "bin_stats_shape.pkl"), "wb") as f:
        pickle.dump(shape_cat.bin_stats, f)

    print psf_data.dtype.names
    psf_corr_names = ['P', 'p', 'q']
    #psf_quantities = [(args.psf_model_prefix+'_e1', args.psf_model_prefix+'_e2'),('de1','de2')]
    psf_quantities = [('e1', 'e2'),
                      (args.psf_model_prefix + '_e1',
                       args.psf_model_prefix + '_e2'), ('de1', 'de2')]
    if args.dTpT:
        psf_data = nmbot.add_field(
            psf_data, [('dse_1', float), ('dse_2', float)], [
                psf_data['dsize'] * psf_data['e1'] / psf_data['size'],
                psf_data['dsize'] * psf_data['e2'] / psf_data['size']
            ])
        psf_quantities.append(('dse_1', 'dse_2'))
        psf_corr_names.append('dse')

    elif args.dTp:
        psf_data = nmbot.add_field(psf_data,
                                   [('dse_1', float), ('dse_2', float)], [
                                       psf_data['dsize'] * psf_data['e1'],
                                       psf_data['dsize'] * psf_data['e2']
                                   ])
        psf_quantities.append(('dse_1', 'dse_2'))
        psf_corr_names.append('dse')

    psf_cat = corrtools.GalCat(psf_data,
                               x_col='ra',
                               y_col='dec',
                               quantities=psf_quantities,
                               w_col=args.star_weight_col)
    psf_cat.redshift_split()
    with open(pj(args.outdir, "bin_stats_psf.pkl"), "wb") as f:
        pickle.dump(psf_cat.bin_stats, f)

    sh = (shape_colnames['e1'], shape_colnames['e2'])
    sh_psf = (shape_colnames['psf_e1'], shape_colnames['psf_e2'])
    sh_quantities = [sh, sh_psf]

    cross_corrs = []
    cross_specs = []
    psf_auto_specs = []
    varxi_arr = []
    #first do shear cross psf(gal)
    epg = corrtools.CorrMeasurement(shape_cat,
                                    sh,
                                    q2=sh_psf,
                                    XX=gg,
                                    sub_mean=True)
    spec_epg, varxis = epg.get_spec(['epg'] * 2,
                                    'NZ_DUMMY',
                                    kernel_name2='NZ_DUMMY',
                                    ret_varxi_arrs=True)
    cross_corrs.append(epg)
    cross_specs.append(spec_epg[0])
    varxi_arr += list(varxis[0].copy())
    #and psf(gal) auto
    #do gp auto
    pg = corrtools.CorrMeasurement(shape_cat, sh_psf, XX=gg, sub_mean=True)
    spec_pg, varxis = pg.get_spec(['pgpg'] * 2,
                                  'NZ_DUMMY',
                                  ret_varxi_arrs=True)
    varxi_arr += list(varxis[0].copy())
    psf_auto_specs.append(spec_pg[0])

    for i in range(len(psf_quantities)):
        q2_i, name_i = psf_quantities[i], psf_corr_names[i]
        # e x p
        ep = corrtools.CorrMeasurement(shape_cat,
                                       sh,
                                       gal_cat2=psf_cat,
                                       q2=q2_i,
                                       XX=gg,
                                       sub_mean=True)
        sp, varxis = ep.get_spec(['e' + name_i] * 2,
                                 'NZ_DUMMY',
                                 kernel_name2='NZ_DUMMY',
                                 ret_varxi_arrs=True)
        cross_corrs.append(ep)
        cross_specs.append(sp[0])  #just use xip
        varxi_arr += list(varxis[0].copy())
        #do p(gal) x p
        pgp = corrtools.CorrMeasurement(shape_cat,
                                        sh_psf,
                                        gal_cat2=psf_cat,
                                        q2=q2_i,
                                        XX=gg,
                                        sub_mean=True)
        spec_pgp, varxis = pgp.get_spec(['pg' + name_i] * 2,
                                        'NZ_DUMMY',
                                        kernel_name2='NZ_DUMMY',
                                        ret_varxi_arrs=True)
        psf_auto_specs.append(spec_pgp[0])
        varxi_arr += list(varxis[0].copy())
        #sp = corrtools.SpectrumMeasurement.from_galcats(['e'+name_i]*2, shape_cat, sh, 'NZ_DUMMY', gal_cat2=psf_cat, q2=q2_i,
        #                                                XX=gg, kernel_name2='NZ_DUMMY')
        for j in range(i, len(psf_quantities)):
            print name_i, psf_corr_names[j]
            if i == j:
                pp = corrtools.CorrMeasurement(psf_cat,
                                               q2_i,
                                               XX=gg,
                                               sub_mean=True)
                sp, varxis = pp.get_spec([name_i + name_i] * 2,
                                         'NZ_DUMMY',
                                         ret_varxi_arrs=True)
                #pp = corrtools.SpectrumMeasurement.from_galcats([name_i+name_i]*2, psf_cat, q2_i, 'NZ_DUMMY', XX=gg)
            else:
                q2_j, name_j = psf_quantities[j], psf_corr_names[j]
                pp = corrtools.CorrMeasurement(psf_cat,
                                               q2_i,
                                               XX=gg,
                                               q2=q2_j,
                                               sub_mean=True)
                sp, varxis = pp.get_spec([name_i + name_j] * 2,
                                         'NZ_DUMMY',
                                         ret_varxi_arrs=True)
                #pp = corrtools.SpectrumMeasurement.from_galcats([name_i+name_j]*2, psf_cat, q2_i, 'NZ_DUMMY', XX=gg, q2=q2_j, kernel_name2='NZ_DUMMY')
            psf_auto_specs.append(sp[0])
            varxi_arr += list(varxis[0].copy())

    print 'varxi_arr', varxi_arr
    print 'cross_specs', cross_specs
    print 'psf_auto_specs', psf_auto_specs

    #make shape noise covariance
    shape_noise_cov = np.diag(np.array(varxi_arr))
    shape_noise_cov_info = twopoint.CovarianceMatrixInfo(
        "COV_SHAPENOISE",
        [s.name for s in cross_specs] + [s.name for s in psf_auto_specs],
        [len(s.value)
         for s in cross_specs] + [len(s.value)
                                  for s in psf_auto_specs], shape_noise_cov)
    #No covariance compuation, just save measurement
    t = twopoint.TwoPointFile(cross_specs + psf_auto_specs,
                              [twopoint.dummy_kernel("NZ_DUMMY")], None,
                              shape_noise_cov_info)
    t.to_fits(pj(args.outdir, 'corrs_covsn.fits'), clobber=True)
Ejemplo n.º 6
0
        'w_dict_bs{bin_slop}_{label}.npy'.format(bin_slop=bin_slop,
                                                 label=label),
        w_dict,
    )
    np.save(
        'corr_dictbs{bin_slop}_{label}.npy'.format(bin_slop=bin_slop,
                                                   label=label),
        corr_dict,
    )

    angle_edges = np.logspace(np.log10(thetamin * 60.),
                              np.log10(thetamin * 60.), 21)
    w_dict['angle_min'] = angle_edges[:-1]
    w_dict['angle_max'] = angle_edges[1:]

    spectrum = lsssys.corrdict_2_spectrumtype(
        w_dict,
        autoonly=True,
        name='wtheta',
        kernel1='nz_lens',
        kernel2='nz_lens',
    )

    tp = twopoint.TwoPointFile([spectrum],
                               kernels=None,
                               windows={},
                               covmat_info=None)
    tp.to_fits(
        'wtheta_redmagic_y3_data_bs{bin_slop}_{label}_UNBLIND.fits'.format(
            bin_slop=bin_slop, label=label))
Ejemplo n.º 7
0
import numpy as np
import twopoint

filename_list = [
    'wz_sim_sample2_specz_hist.txt',
    'wz_sim_sample4_specz_hist.txt',
    'wz_sim_sample6_specz_hist.txt',
]

outfile = 'wz_sim_sample246_maglim_specz_hist.fits'

nzs = []
for ifile, filename in enumerate(filename_list):
    zmid1, nz = np.loadtxt(filename)
    if ifile == 0:
        zmid = zmid1
    assert (zmid == zmid1).all()

    nzs.append(nz)

dz = zmid[1] - zmid[0]
zlow = zmid - dz / 2.
zhigh = zmid + dz / 2.

k = twopoint.NumberDensity('nz_lens', zlow, zmid, zhigh, nzs)
tp = twopoint.TwoPointFile([], [k], [], None)
tp.to_fits(outfile)