예제 #1
0
    def compute(self, psf, stars, logger=None):
        """
        :param psf:         A PSF Object
        :param stars:       A list of Star instances.
        :param logger:      A logger object for logging debug info. [default: None]
        """
        import treecorr

        # get the shapes
        if logger:
            logger.warning("Calculating rho statistics for %d stars",len(stars))
        positions, shapes_truth, shapes_model = self.measureShapes(psf, stars, logger=logger)

        # Only use stars for which hsm was successful
        flag_truth = shapes_truth[:, 6]
        flag_model = shapes_model[:, 6]
        mask = (flag_truth == 0) & (flag_model == 0)

        # define terms for the catalogs
        u = positions[mask, 0]
        v = positions[mask, 1]
        T = shapes_truth[mask, 3]
        g1 = shapes_truth[mask, 4]
        g2 = shapes_truth[mask, 5]
        dT = T - shapes_model[mask, 3]
        dg1 = g1 - shapes_model[mask, 4]
        dg2 = g2 - shapes_model[mask, 5]

        # make the treecorr catalogs
        if logger:
            logger.info("Creating Treecorr Catalogs")

        cat_g = treecorr.Catalog(x=u, y=v, x_units='arcsec', y_units='arcsec',
                                 g1=g1, g2=g2)
        cat_dg = treecorr.Catalog(x=u, y=v, x_units='arcsec', y_units='arcsec',
                                  g1=dg1, g2=dg2)
        cat_gdTT = treecorr.Catalog(x=u, y=v, x_units='arcsec', y_units='arcsec',
                                    g1=g1 * dT / T, g2=g2 * dT / T)

        # setup and run the correlations
        if logger:
            logger.info("Processing rho PSF statistics")

        # save the rho objects
        self.rho1 = treecorr.GGCorrelation(self.tckwargs)
        self.rho1.process(cat_dg)
        self.rho2 = treecorr.GGCorrelation(self.tckwargs)
        self.rho2.process(cat_g, cat_dg)
        self.rho3 = treecorr.GGCorrelation(self.tckwargs)
        self.rho3.process(cat_gdTT)
        self.rho4 = treecorr.GGCorrelation(self.tckwargs)
        self.rho4.process(cat_dg, cat_gdTT)
        self.rho5 = treecorr.GGCorrelation(self.tckwargs)
        self.rho5.process(cat_g, cat_gdTT)
예제 #2
0
def get_corr(res, weights):

    import treecorr
    nbins = config['treecorr_n_bins']
    min_sep = config['treecorr_min_sep']
    max_sep = config['treecorr_max_sep']
    sep_units = 'arcmin'

    shape_cat = treecorr.Catalog(g1=res['e1'],
                                 g2=res['e2'],
                                 ra=res['ra'],
                                 dec=res['de'],
                                 ra_units='deg',
                                 dec_units='deg',
                                 w=weights)
    gg = treecorr.GGCorrelation(nbins=nbins,
                                min_sep=min_sep,
                                max_sep=max_sep,
                                sep_units=sep_units,
                                verbose=2)
    gg.process(shape_cat)

    responsivity_cat = treecorr.Catalog(g1=res['m1'],
                                        g2=res['m2'],
                                        ra=res['ra'],
                                        dec=res['de'],
                                        ra_units='deg',
                                        dec_units='deg',
                                        w=weights)
    mm = treecorr.GGCorrelation(nbins=nbins,
                                min_sep=min_sep,
                                max_sep=max_sep,
                                sep_units=sep_units,
                                verbose=2)
    mm.process(responsivity_cat)

    if args.use_responsivity == True:
        xip = gg.xip / mm.xip * 2.  # The real part of xi+
        xim = gg.xim / mm.xim * 2.  # The real part of xi-
        warnings.warn('using responsivity')
    else:
        xip = gg.xip  # The real part of xi+
        xim = gg.xim  # The real part of xi-

    logr = gg.logr  # The nominal center of each bin
    meanlogr = gg.meanlogr  # The mean <log(r)> within the bins
    varxi = gg.varxi  # The variance of each xi+ or xi- value taking into account shape noise only
    stdxi = np.sqrt(varxi)

    return logr, xip, xim, stdxi
예제 #3
0
def run_treecorr(x, y, g1, g2, min_sep, max_sep, nbins):
    """Run treecorr on GalSim shear grid routine"""

    assert x.shape == y.shape
    assert x.shape == g1.shape
    assert x.shape == g2.shape

    x_col = pyfits.Column(name='x', format='1D', array=x.flatten())
    y_col = pyfits.Column(name='y', format='1D', array=y.flatten())
    g1_col = pyfits.Column(name='g1', format='1D', array=g1.flatten())
    g2_col = pyfits.Column(name='g2', format='1D', array=g2.flatten())
    cols = pyfits.ColDefs([x_col, y_col, g1_col, g2_col])
    table = pyfits.new_table(cols)
    phdu = pyfits.PrimaryHDU()
    hdus = pyfits.HDUList([phdu, table])
    hdus.writeto('temp.fits', clobber=True)

    # Define the treecorr catalog object.
    cat = treecorr.Catalog('temp.fits',
                           x_units='degrees',
                           y_units='degrees',
                           x_col='x',
                           y_col='y',
                           g1_col='g1',
                           g2_col='g2')
    gg = treecorr.GGCorrelation(min_sep=min_sep,
                                max_sep=max_sep,
                                nbins=nbins,
                                sep_units='degrees',
                                bin_slop=0.2)
    gg.process(cat)
    os.remove('temp.fits')

    return {'log_r': gg.logr, 'xipm': np.hstack((gg.xip, gg.xim))}
예제 #4
0
    def calc_shear_shear(self,i,j,verbose,num_threads):

        m1,m2,mask = self.get_m(i)
        if self.params['has_sheared']:
            cat_i = treecorr.Catalog(g1=self.shape['e1'][mask]/m1[mask], g2=self.shape['e2'][mask]/m2[mask], w=self.weight[mask], ra=self.shape['ra'][mask], dec=self.shape['dec'][mask], ra_units='deg', dec_units='deg')
        else:
            cat_i = treecorr.Catalog(g1=self.shape['e1'][mask], g2=self.shape['e2'][mask], w=self.weight[mask], ra=self.shape['ra'][mask], dec=self.shape['dec'][mask], ra_units='deg', dec_units='deg')
            biascat_i = treecorr.Catalog(k=np.sqrt(self.shape['m1'][mask]*self.shape['m2'][mask]), w=self.weight[mask], ra=self.shape['ra'][mask], dec=self.shape['dec'][mask], ra_units='deg', dec_units='deg')

        m1,m2,mask = self.get_m(j)
        if self.params['has_sheared']:
            cat_j = treecorr.Catalog(g1=self.shape['e1'][mask]/m1[mask], g2=self.shape['e2'][mask]/m2[mask], w=self.weight[mask], ra=self.shape['ra'][mask], dec=self.shape['dec'][mask], ra_units='deg', dec_units='deg')
        else:
            cat_j = treecorr.Catalog(g1=self.shape['e1'][mask], g2=self.shape['e2'][mask], w=self.weight[mask], ra=self.shape['ra'][mask], dec=self.shape['dec'][mask], ra_units='deg', dec_units='deg')
            biascat_j = treecorr.Catalog(k=np.sqrt(self.shape['m1'][mask]*self.shape['m2'][mask]), w=self.weight[mask], ra=self.shape['ra'][mask], dec=self.shape['dec'][mask], ra_units='deg', dec_units='deg')

        gg = treecorr.GGCorrelation(nbins=self.params['tbins'], min_sep=self.params['tbounds'][0], max_sep=self.params['tbounds'][1], sep_units='arcmin', bin_slop=self.params['slop'], verbose=verbose,num_threads=num_threads)
        gg.process(cat_i,cat_j)
        if self.params['has_sheared']:
            norm = 1.
        else:
            kk = treecorr.KKCorrelation(nbins=self.params['tbins'], min_sep=self.params['tbounds'][0], max_sep=self.params['tbounds'][1], sep_units='arcmin', bin_slop=self.params['slop'], verbose=verbose,num_threads=num_threads)
            kk.process(biascat_i,biascat_j)
            norm = kk.xi

        theta=np.exp(gg.meanlogr)
        xip = gg.xip/norm
        xim = gg.xim/norm
        xiperr = ximerr = np.sqrt(gg.varxi)/norm

        return theta, xip, xim, xiperr, ximerr
예제 #5
0
    def calculate_shear_shear(self, data, i, j):
        import treecorr

        cat_i = self.get_shear_catalog(data, i)
        n_i = cat_i.nobj

        if i == j:
            cat_j = cat_i
            n_j = n_i
        else:
            cat_j = self.get_shear_catalog(data, j)
            n_j = cat_j.nobj

        print(
            f"Rank {self.rank} calculating shear-shear bin pair ({i},{j}): {n_i} x {n_j} objects"
        )

        gg = treecorr.GGCorrelation(self.config)
        gg.process(cat_i, cat_j)

        theta = np.exp(gg.meanlogr)
        xip = gg.xip
        xim = gg.xim
        xiperr = np.sqrt(gg.varxip)
        ximerr = np.sqrt(gg.varxim)

        return theta, xip, xim, xiperr, ximerr, gg.npairs, gg.weight
예제 #6
0
def shear_shear_corr(pos1,pos2,shear1,shear2,k1=None,k2=None,w1=None,w2=None,same_zshell=False,same_cell=False,unique_encounter=False,num_threads=0):

	nbins = 6
	min_sep = 0.05 # 3 arcmin
	max_sep = 3.0 # 180 arcmin
	bin_size = (max_sep-min_sep)/nbins # roughly
	bin_slop = 0.05/bin_size # 0.1 -> 0.05 # 2pt_pipeline for des used bin_slop: 0.01 here: https://github.com/des-science/2pt_pipeline/blob/master/pipeline/twopt_pipeline.yaml
	# num_threads = 5 #None #0
	logger = None

	if same_zshell and same_cell: # auto
		ra, dec = pos1 # either 1 or 2 works
		g1, g2 = shear1
		k = k1
		w = w1
		cat = treecorr.Catalog(g1=g1, g2=g2, k=k, ra=ra, dec=dec, w=w, ra_units='degrees', dec_units='degrees')
	elif same_zshell: # just wanted to distrubute the workload fairly for two encounters (didn't want to make one of the cores idle)
		ra1, dec1 = np.array_split(pos1[0], 2), np.array_split(pos1[1], 2) # split in half
		ra2, dec2 = np.array_split(pos2[0], 2), np.array_split(pos2[1], 2)
		g1_1st, g2_1st = np.array_split(shear1[0], 2), np.array_split(shear1[1], 2)
		g1_2nd, g2_2nd = np.array_split(shear2[0], 2), np.array_split(shear2[1], 2)
		k1 = np.array_split(k1, 2) if (k1 is not None) else [None,None]
		k2 = np.array_split(k2, 2) if (k2 is not None) else [None,None]
		w1 = np.array_split(w1, 2) if (w1 is not None) else [None,None]
		w2 = np.array_split(w2, 2) if (w2 is not None) else [None,None]
		cat1 = [treecorr.Catalog(g1=g1_1st[h], g2=g2_1st[h], k=k1[h], ra=ra1[h], dec=dec1[h], w=w1[h], ra_units='degrees', dec_units='degrees') for h in [0,1]]
		cat2 = [treecorr.Catalog(g1=g1_2nd[h], g2=g2_2nd[h], k=k2[h], ra=ra2[h], dec=dec2[h], w=w2[h], ra_units='degrees', dec_units='degrees') for h in [0,1]]
	else:
		ra1, dec1 = pos1
		ra2, dec2 = pos2
		g1_1st, g2_1st = shear1
		g1_2nd, g2_2nd = shear2
		cat1 = treecorr.Catalog(g1=g1_1st, g2=g2_1st, k=k1, ra=ra1, dec=dec1, w=w1, ra_units='degrees', dec_units='degrees')
		cat2 = treecorr.Catalog(g1=g1_2nd, g2=g2_2nd, k=k2, ra=ra2, dec=dec2, w=w2, ra_units='degrees', dec_units='degrees')

	gg = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, bin_slop=bin_slop, sep_units='degrees', logger=logger)
	
	if same_zshell and same_cell:
		gg.process_auto(cat,num_threads=num_threads)
	elif same_zshell: # just wanted to distrubute the workload fairly for two encounters (didn't want to make one of the cores idle)
		if unique_encounter: # the following two counts shouldn't be doubled up cuz they're the same in both directions
			gg.process_cross(cat1[0],cat2[0],num_threads=num_threads)
			gg.process_cross(cat1[1],cat2[1],num_threads=num_threads)
		else: # in the other encounter cat1 and cat2 are switched but does not matter anyway
			gg.process_cross(cat2[0],cat1[1],num_threads=num_threads)
			gg.process_cross(cat2[1],cat1[0],num_threads=num_threads)
	else:
		gg.process_cross(cat1,cat2,num_threads=num_threads)

	if same_zshell and same_cell:
		varg1 = treecorr.calculateVarG(cat)
		varg2 = varg1
	elif same_cell:
		varg1 = treecorr.calculateVarG(cat1)
		varg2 = treecorr.calculateVarG(cat2)
	else:
		varg1 = np.nan
		varg2 = np.nan

	return gg, varg1, varg2
예제 #7
0
def run_treecorr(x, y, g1, g2):
    """Helper routine to take outputs of GalSim shear grid routine, and run treecorr on it."""
    import pyfits
    import os
    import treecorr
    # Use fits binary table for faster I/O.
    assert x.shape == y.shape
    assert x.shape == g1.shape
    assert x.shape == g2.shape
    x_col = pyfits.Column(name='x', format='1D', array=x.flatten())
    y_col = pyfits.Column(name='y', format='1D', array=y.flatten())
    g1_col = pyfits.Column(name='g1', format='1D', array=g1.flatten())
    g2_col = pyfits.Column(name='g2', format='1D', array=g2.flatten())
    cols = pyfits.ColDefs([x_col, y_col, g1_col, g2_col])
    table = pyfits.new_table(cols)
    phdu = pyfits.PrimaryHDU()
    hdus = pyfits.HDUList([phdu, table])
    hdus.writeto('temp.fits', clobber=True)
    # Define the treecorr catalog object.
    cat = treecorr.Catalog('temp.fits',
                           x_units='degrees',
                           y_units='degrees',
                           x_col='x',
                           y_col='y',
                           g1_col='g1',
                           g2_col='g2')
    # Define the corrfunc object
    gg = treecorr.GGCorrelation(min_sep=min_sep,
                                max_sep=max_sep,
                                bin_size=0.1,
                                sep_units='degrees')
    # Actually calculate the correlation function.
    gg.process(cat)
    os.remove('temp.fits')
    return gg
예제 #8
0
def measure_rho(ra, dec, e1, e2, s, m_e1, m_e2, m_s, max_sep, tag=None):
    """Compute the rho statistics
    """
    import treecorr

    de1 = e1 - m_e1
    de2 = e2 - m_e2
    dt = (s**2 - m_s**2) / s**2

    ecat = treecorr.Catalog(ra=ra,
                            dec=dec,
                            ra_units='deg',
                            dec_units='deg',
                            g1=e1,
                            g2=e2)
    decat = treecorr.Catalog(ra=ra,
                             dec=dec,
                             ra_units='deg',
                             dec_units='deg',
                             g1=de1,
                             g2=de2)
    dtcat = treecorr.Catalog(ra=ra,
                             dec=dec,
                             ra_units='deg',
                             dec_units='deg',
                             k=dt,
                             g1=dt * e1,
                             g2=dt * e2)

    ecat.name = 'ecat'
    decat.name = 'decat'
    dtcat.name = 'dtcat'
    if tag is not None:
        for cat in [ecat, decat, dtcat]:
            cat.name = tag + ":" + cat.name

    min_sep = 0.5
    bin_size = 0.5
    bin_slop = 0.1

    results = []
    for (cat1, cat2) in [(decat, decat), (ecat, decat), (dtcat, dtcat),
                         (decat, dtcat), (ecat, dtcat)]:
        print 'Doing correlation of %s vs %s' % (cat1.name, cat2.name)

        rho = treecorr.GGCorrelation(min_sep=min_sep,
                                     max_sep=max_sep,
                                     sep_units='arcmin',
                                     bin_size=bin_size,
                                     bin_slop=bin_slop,
                                     verbose=2)
        if cat1 is cat2:
            rho.process(cat1)
        else:
            rho.process(cat1, cat2)
        results.append(rho)

    return results
예제 #9
0
파일: shear_test.py 프로젝트: j-dr/descqa
    def jackknife(self, catalog_data, xip, xim):
        " computing jack-knife covariance matrix using K-means clustering"
        #k-means clustering to define areas
        #NOTE: This is somewhat deprecated, the jack-knifing takes too much effort to find appropriately accurate covariance matrices.
        # If you want to use this, do a quick convergence check and some timing tests on small N_clust values (~5 to start) first.
        # note also that this is comparing against the (low) variance in the catalog which might not be a great comparison -no shape noise
        N_clust = self.N_clust
        nn = np.stack((catalog_data[self.ra], catalog_data[self.dec]), axis=1)
        _, labs, _ = k_means(
            n_clusters=N_clust, random_state=0, X=nn, n_jobs=-1)  # check random state, n_jobs is in debugging mode
        print("computing jack-knife errors")
        time_jack = time.time()
        # jack-knife code
        xip_jack = []
        xim_jack = []
        gg = treecorr.GGCorrelation(
            nbins=self.nbins,
            min_sep=self.min_sep,
            max_sep=self.max_sep,
            sep_units='arcmin',
            bin_slop=self.bin_slop,
            verbose=True)
        for i in range(N_clust):
            ##### shear computation excluding each jack-knife region
            cat_s = treecorr.Catalog(
                ra=catalog_data[self.ra][labs != i],
                dec=catalog_data[self.dec][labs != i],
                g1=catalog_data[self.e1][labs != i] - np.mean(catalog_data[self.e1][labs != i]),
                g2=-(catalog_data[self.e2][labs != i] - np.mean(catalog_data[self.e2][labs != i])),
                ra_units='deg',
                dec_units='deg')
            gg.process(cat_s)

            xip_jack.append(gg.xip)
            xim_jack.append(gg.xim)
            ## debugging outputs
            print("xip_jack")
            print(i)
            print(gg.xip)
            print("time = " + str(time.time() - time_jack))


        ### assign covariance matrix - loop is poor python syntax but compared to the time taken for the rest of the test doesn't really matter
        cp_xip = np.zeros((self.nbins, self.nbins))
       #TODO: check factors of N_clust here
        for i in range(self.nbins):
            for j in range(self.nbins):
                for k in range(N_clust):
                    cp_xip[i][j] += N_clust/(N_clust - 1.)  * (xip[i] - xip_jack[k][i] * 1.e6) * (
                        xip[j] - xip_jack[k][j] * 1.e6)

        cp_xim = np.zeros((self.nbins, self.nbins))
        for i in range(self.nbins):
            for j in range(self.nbins):
                for k in range(N_clust):
                    cp_xim[i][j] += N_clust/(N_clust - 1.)  * (xim[i] - xim_jack[k][i] * 1.e6) * (
                        xim[j] - xim_jack[k][j] * 1.e6)
        return cp_xip, cp_xim
예제 #10
0
 def compute_statistic(self, i, ra, dec, q1, q2):
     n = len(ra)
     print(f"Computing Rowe statistic rho_{i} from {n} objects")
     import treecorr
     corr = treecorr.GGCorrelation(self.config)
     cat1 = treecorr.Catalog(ra=ra, dec=dec, g1=q1[0], g2=q1[1], ra_units='deg', dec_units='deg')
     cat2 = treecorr.Catalog(ra=ra, dec=dec, g1=q2[0], g2=q2[1], ra_units='deg', dec_units='deg')
     corr.process(cat1, cat2)
     return corr.meanr, corr.xip, corr.varxip**0.5
예제 #11
0
    def jackknife(self, catalog_data, xip, xim):
        " computing jack-knife covariance matrix using K-means clustering"
        #k-means clustering to define areas
        N_clust = self.N_clust
        nn = np.stack((catalog_data[self.ra], catalog_data[self.dec]), axis=1)
        _, labs, _ = k_means(
            n_clusters=N_clust, random_state=0, X=nn,
            n_jobs=-1)  # check random state, n_jobs is in debugging mode
        print("computing jack-knife errors")
        time_jack = time.time()
        # jack-knife code
        xip_jack = []
        xim_jack = []
        gg = treecorr.GGCorrelation(nbins=self.nbins,
                                    min_sep=self.min_sep,
                                    max_sep=self.max_sep,
                                    sep_units='arcmin',
                                    bin_slop=self.bin_slop,
                                    verbose=True)
        for i in range(N_clust):
            ##### shear computation excluding each jack-knife region
            cat_s = treecorr.Catalog(
                ra=catalog_data[self.ra][labs != i],
                dec=catalog_data[self.dec][labs != i],
                g1=catalog_data[self.e1][labs != i] -
                np.mean(catalog_data[self.e1][labs != i]),
                g2=-(catalog_data[self.e2][labs != i] -
                     np.mean(catalog_data[self.e2][labs != i])),
                ra_units='deg',
                dec_units='deg')
            gg.process(cat_s)

            xip_jack.append(gg.xip)
            xim_jack.append(gg.xim)
            ## debugging outputs
            print("xip_jack")
            print(i)
            print(gg.xip)
            print("time = " + str(time.time() - time_jack))

        ### assign covariance matrix - loop is poor python syntax but compared to the time taken for the rest of the test doesn't really matter
        cp_xip = np.zeros((self.nbins, self.nbins))
        for i in range(self.nbins):
            for j in range(self.nbins):
                for k in range(N_clust):
                    cp_xip[i][j] += (N_clust - 1.) / N_clust * (
                        xip[i] - xip_jack[k][i] * 1.e6) * (
                            xip[j] - xip_jack[k][j] * 1.e6)

        cp_xim = np.zeros((self.nbins, self.nbins))
        for i in range(self.nbins):
            for j in range(self.nbins):
                for k in range(N_clust):
                    cp_xim[i][j] += (N_clust - 1.) / N_clust * (
                        xim[i] - xim_jack[k][i] * 1.e6) * (
                            xim[j] - xim_jack[k][j] * 1.e6)
        return cp_xip, cp_xim
예제 #12
0
def compute_SinglePair_GGCorrelation(cat_i, bin_i, bin_j, config):

        if bin_i == bin_j:
                gg = treecorr.GGCorrelation(config)
                gg.process(cat_i, num_threads=ncpus)

        else:
                cat_j_name = input_dir + 'src-cat_z'+str(bin_j+1)+'.fits'
                cat_j = treecorr.Catalog(cat_j_name, config)

                gg = treecorr.GGCorrelation(config)
                gg.process(cat_i, cat_j, num_threads=ncpus)


        data_name = output_dir + 'gg_data/gg_z'+str(bin_i+1)+str(bin_j+1)+'.dat'
        gg.write(data_name)
        gg.clear()

        return
예제 #13
0
def test_shuffle():
    # Check that the code is insensitive to shuffling the input data vectors.

    # Might as well use the same function as above, although I reduce L a bit.
    ngal = 10000
    gamma0 = 0.05
    r0 = 10.
    L = 5. * r0
    numpy.random.seed(8675309)
    x = (numpy.random.random_sample(ngal) - 0.5) * L
    y = (numpy.random.random_sample(ngal) - 0.5) * L
    r2 = (x**2 + y**2) / r0**2
    g1 = -gamma0 * numpy.exp(-r2 / 2.) * (x**2 - y**2) / r0**2
    g2 = -gamma0 * numpy.exp(-r2 / 2.) * (2. * x * y) / r0**2

    cat_u = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
    gg_u = treecorr.GGCorrelation(bin_size=0.1,
                                  min_sep=1.,
                                  max_sep=30.,
                                  verbose=1)
    gg_u.process(cat_u)

    # Put these in a single 2d array so we can easily use numpy.random.shuffle
    data = numpy.array([x, y, g1, g2]).T
    print('data = ', data)
    numpy.random.shuffle(data)

    cat_s = treecorr.Catalog(x=data[:, 0],
                             y=data[:, 1],
                             g1=data[:, 2],
                             g2=data[:, 3])
    gg_s = treecorr.GGCorrelation(bin_size=0.1,
                                  min_sep=1.,
                                  max_sep=30.,
                                  verbose=1)
    gg_s.process(cat_s)

    print('gg_u.xip = ', gg_u.xip)
    print('gg_s.xip = ', gg_s.xip)
    print('ratio = ', gg_u.xip / gg_s.xip)
    print('diff = ', gg_u.xip - gg_s.xip)
    print('max diff = ', max(abs(gg_u.xip - gg_s.xip)))
    assert max(abs(gg_u.xip - gg_s.xip)) < 1.e-14
예제 #14
0
def compute_SingleMock_SinglePair_GGCorrelation(cat_i, bin_i, bin_j, config):

    if bin_i == bin_j:
        gg = treecorr.GGCorrelation(config)
        gg.process(cat_i, num_threads=ncpus)

    else:
        cat_j_name = '/home/hcamacho/src_s1_z' + str(bin_j + 1) + '_c1.fits'
        cat_j = treecorr.Catalog(cat_j_name, config)

        gg = treecorr.GGCorrelation(config)
        gg.process(cat_i, cat_j, num_threads=ncpus)

    data_name = '/home/anderson/3x2pt/gg_data/gg_s1_z' + str(bin_i + 1) + str(
        bin_j + 1) + '_c1.dat'
    gg.write(data_name)
    gg.clear()

    return
예제 #15
0
def compute_SingleMock_SinglePair_GGCorrelation(cat_i, bin_i, bin_j, config):

    if bin_i == bin_j:
        gg = treecorr.GGCorrelation(config)
        gg.process(cat_i, num_threads=ncpus)

    else:
        cat_j_name = input_dir + 'src-cat_z' + str(bin_j + 1) + '.fits'
        cat_j = treecorr.Catalog(cat_j_name, config)

        gg = treecorr.GGCorrelation(config)
        gg.process(cat_i, cat_j, num_threads=ncpus)

    data_name = '/home/anderson/3x2pt/gg_data/gg_z' + str(bin_i +
                                                          1) + str(bin_j +
                                                                   1) + '.dat'
    gg.write(data_name)
    gg.clear()

    return
    '''

	Process a single pair of catalogs, accumulating the cross-correlation.
       
        This accumulates the weighted sums into the bins, but does not finalize
        the calculation by dividing by the total weight at the end.  After
        calling this function as often as desired, the finalize() command will
        finish the calculation.

	

	src_cats = [ treecorr.Catalog(names, config) for names in src_names ]
	gg = treecorr.GGCorrelation(config)
	varg = treecorr.calculateVarG(src_cats)

	for cat1, cat2 in zip(src_cats, src_cats):
		gg.process_cross(cat1, cat2)
		gg.write()

	'''
    '''
예제 #16
0
def correlation_function_ellipticity(ra, dec, e1_res, e2_res,
                                     nbins=20, min_sep=0.25, max_sep=20,
                                     sep_units='arcmin', verbose=False):
    """Compute shear-shear correlation function from ra, dec, g1, g2.

    Default parameters for nbins, min_sep, max_sep chosen to cover
       an appropriate range to calculate TE1 (<=1 arcmin) and TE2 (>=5 arcmin).
    Parameters
    ----------
    ra : numpy.array
        Right ascension of points [radians]
    dec : numpy.array
        Declination of points [radians]
    e1_res : numpy.array
        Residual ellipticity 1st component
    e2_res : numpy.array
        Residual ellipticity 2nd component
    nbins : float, optional
        Number of bins over which to analyze the two-point correlation
    min_sep : float, optional
        Minimum separation over which to analyze the two-point correlation
    max_sep : float, optional
        Maximum separation over which to analyze the two-point correlation
    sep_units : str, optional
        Specify the units of min_sep and max_sep
    verbose : bool
        Request verbose output from `treecorr`.
        verbose=True will use verbose=2 for `treecorr.GGCorrelation`.

    Returns
    -------
    r, xip, xip_err : each a np.array(dtype=float)
        - The bin centers, two-point correlation, and uncertainty.
    """
    # Translate to 'verbose_level' here to refer to the integer levels in TreeCorr
    # While 'verbose' is more generically what is being passed around
    #   for verbosity within 'validate_drp'
    if verbose:
        verbose_level = 2
    else:
        verbose_level = 0

    catTree = treecorr.Catalog(ra=ra, dec=dec, g1=e1_res, g2=e2_res,
                               dec_units='radian', ra_units='radian')
    gg = treecorr.GGCorrelation(nbins=nbins, min_sep=min_sep, max_sep=max_sep,
                                sep_units=sep_units,
                                verbose=verbose_level)
    gg.process(catTree)
    r = np.exp(gg.meanlogr) * u.arcmin
    xip = gg.xip * u.Unit('')
    xip_err = np.sqrt(gg.varxi) * u.Unit('')

    return (r, xip, xip_err)
예제 #17
0
def run_parallel():

    t0 = time.time()
    print(rank, socket.gethostname(), flush=True)
    fname = file_name.replace('.fits', '_%d.fits' % rank)[:]
    log_file = 'parallel_%d.log' % rank

    # All processes make the full cat with these patches.
    # Note: this doesn't actually read anything from disk yet.
    cat = treecorr.Catalog(fname,
                           ra_col=ra_col,
                           dec_col=ra_col,
                           ra_units=ra_units,
                           dec_units=dec_units,
                           g1_col=g1_col,
                           g2_col=g1_col,
                           flag_col=flag_col,
                           verbose=1,
                           log_file=log_file,
                           patch_centers=patch_file)
    t1 = time.time()
    print('Made cat', t1 - t0, flush=True)

    # Everyone needs to make their own Correlation object.
    gg = treecorr.GGCorrelation(bin_size=bin_size,
                                min_sep=min_sep,
                                max_sep=max_sep,
                                sep_units='arcmin',
                                bin_slop=bin_slop,
                                verbose=1,
                                log_file=log_file)

    cat.load()
    t2 = time.time()
    print(rank, 'Loaded', t2 - t1, flush=True)

    cat.get_patches()
    t3 = time.time()
    print(rank, 'Made patches', t3 - t2, flush=True)

    # To use the multiple process, just pass comm to the process command.
    gg.process(cat, comm=comm, low_mem=low_mem)
    t4 = time.time()
    print(rank, 'Processed', t4 - t3, flush=True)
    comm.Barrier()
    t5 = time.time()
    print(rank, 'Barrier', t5 - t4, flush=True)
    print(rank, 'Done with parallel computation', t5 - t0, flush=True)

    # rank 0 has the completed result.
    if rank == 0:
        print('xip = ', gg.xip, flush=True)
예제 #18
0
def get_xi(map, window_norm, mask=None, Sim_jk=None):
    self = Sim_jk
    maps = {'galaxy': map[0]}
    maps['shear'] = {0: map[1], 1: map[2]}
    if mask is None:
        mask = {}
        mask['galaxy'] = maps['galaxy'] == hp.UNSEEN
        mask['shear'] = maps['shear'][0] == hp.UNSEEN
    tree_cat_args = get_treecorr_cat_args(maps, masks=mask, nside=Sim_jk.nside)
    tree_cat = {}
    tree_cat['galaxy'] = treecorr.Catalog(w=maps['galaxy'][~mask['galaxy']],
                                          **tree_cat_args['galaxy'])
    tree_cat['shear'] = treecorr.Catalog(g1=maps['shear'][0][~mask['shear']],
                                         g2=maps['shear'][1][~mask['shear']],
                                         **tree_cat_args['shear'])
    del mask
    ndim = 3  #FIXME
    xi = np.zeros(self.n_th_bins * (self.ndim + 1))
    th_i = 0
    tree_corrs = {}
    n_th_bins = self.n_th_bins
    for corr in self.kappa_class.corrs:  #note that in treecorr npairs includes pairs with 0 weights. That affects this calc
        if corr == corr_ggl:
            tree_corrs[corr] = treecorr.NGCorrelation(**corr_config)
            tree_corrs[corr].process(tree_cat['galaxy'], tree_cat['shear'])
            xi[th_i:th_i + n_th_bins] = tree_corrs[corr].xi * tree_corrs[
                corr].weight / window_norm[corr][
                    'weight'] * -1  #sign convention
            #
            th_i += self.n_th_bins
        if corr == corr_ll:
            tree_corrs[corr] = treecorr.GGCorrelation(**corr_config)
            tree_corrs[corr].process(tree_cat['shear'])
            xi[th_i:th_i + n_th_bins] = tree_corrs[
                corr].xip  #*tree_corrs[corr].npairs/window_norm[corr]['weight']
            th_i += n_th_bins
            xi[th_i:th_i + n_th_bins] = tree_corrs[
                corr].xim  #*tree_corrs[corr].npairs/window_norm[corr]['weight']
            th_i += n_th_bins
        if corr == corr_gg:
            tree_corrs[corr] = treecorr.NNCorrelation(**corr_config)
            tree_corrs[corr].process(tree_cat['galaxy'])
            xi[th_i:th_i + n_th_bins] = tree_corrs[corr].weight / tree_corrs[
                corr].npairs  #window_norm[corr]['weight']  #
            #             xi[th_i:th_i+n_th_bins]=tree_corrs[corr].weight/window_norm[corr]
            th_i += n_th_bins


#     del tree_cat,tree_corrs
#     gc.collect()
    return xi
예제 #19
0
def main():
    import treecorr
    import numpy as np
    import matplotlib
    matplotlib.use('Agg')
    import matplotlib.pyplot as plt
      
    ax =  np.array([0, 1, 2])
    ay =  np.array([0, 0, 0])
    a1 = np.array([ -1,-2, 3])
    a2 = np.array([ - 4, 5, 6])
    acat = treecorr.Catalog(x=ax, y=ay, g1=a1,  g2=a2)

    bx =  np.array([0, 1.1, 2.1])
    by =  np.array([0, 0, 0])
    b1 = np.array([ -7,8, 9])
    b2 = np.array([ -10, 11,- 12])
    bcat = treecorr.Catalog(x=bx, y=by, g1=b1,  g2=b2)

    
    gg = treecorr.GGCorrelation(min_sep=0.1, max_sep=3, bin_size=0.1)
    gg.process(acat, bcat)
    
    var = np.array((2*gg.varxi).tolist())
    npairs = np.array(gg.npairs.tolist())
    meanr = np.array(gg.meanlogr.tolist())
    xip_real = np.array(gg.xip.tolist())
    xip_im = np.array(gg.xip_im.tolist())
    xim_real = np.array(gg.xim.tolist())
    xim_im =  np.array(gg.xim_im.tolist())

    print(meanr)
    print(xip_real)
    #print(xip_im)
    #print(xim_real)
    #print(xim_im)
    
    plt.clf()
    pretty_rho(meanr, npairs, sig=np.zeros(len(meanr)),  legend=None, lfontsize=24, color='black', marker='o', ylabel='Npairs')
    #plt.xlim( [0.01,10.] )
    fname =  '/home/dfa/sobreira/alsina/basic_tools/examples/npairs.pdf'
    plt.savefig(fname)
    
    plt.clf()
    pretty_rho(meanr, xip_real, sig=np.zeros(len(meanr)) , legend='xip_real', lfontsize=24, color='black', marker='o')
    #pretty_rho(meanr, xip_real, sig=np.sqrt(var) , legend='xip_real', lfontsize=24, color='black', marker='o')
    plt.xlim( [0.01,5.] )
    fname =  '/home/dfa/sobreira/alsina/basic_tools/examples/xip_real.pdf'
    plt.savefig(fname)

    '''
예제 #20
0
def compute(options):
	print 'Shape data : %s'%options['output']
	data = fi.FITS(options['2pt']['shapes'])[-1].read()

	splitflag=options['2pt']['split']

	if splitflag is not None:
		name = options['2pt']['split']
		print 'Dividing catalogue by %s'%name
		mask = (data[name]>=options['2pt']['split_val'])
		print '%3.3f percent split'%(data[name][mask].size*100./data[name].size)
	else:
		print 'No catalogue split required.'
		mask = np.ones(data.size).astype(bool)

	print 'Setting up correlations'
	cat1 = treecorr.Catalog(x=data['x'][mask], y=data['y'][mask], z=data['z'][mask], a=data['a1'][mask], b=data['a2'][mask], c=data['a3'][mask])
	c1c1 = treecorr.GGCorrelation(min_sep=options['2pt']['rmin'], max_sep=options['2pt']['rmax'], nbins=options['2pt']['nbin'])

	if splitflag:
		cat2 = treecorr.Catalog(x=data['x'][np.invert(mask)], y=data['y'][np.invert(mask)], z=data['z'][mask], a=data['a1'][np.invert(mask)], b=data['a2'][np.invert(mask)], c=data['a3'][np.invert(mask)])
		c2c2 = treecorr.GGCorrelation(min_sep=options['2pt']['rmin'], max_sep=options['2pt']['rmax'], nbins=options['2pt']['nbin'])
		c1c2 = treecorr.GGCorrelation(min_sep=options['2pt']['rmin'], max_sep=options['2pt']['rmax'], nbins=options['2pt']['nbin'])
		suffix = '_splitby%s'%options['2pt']['split']
	else:
		suffix=''

	print 'Computing correlation functions.'
	c1c1.process(cat1,cat1)
	util.export_treecorr_output('%s/xigg_corr_11%s.txt'%(options['2pt']['savedir'], suffix), c1c1)

	if split:
		c2c2.process(cat2,cat2)
		util.export_treecorr_output('%s/xigg_corr_22%s.txt'%(options['2pt']['savedir'], suffix), c2c2)
		c1c2.process(cat1,cat2)
		util.export_treecorr_output('%s/xigg_corr_12%s.txt'%(options['2pt']['savedir'], suffix), c1c2)

	print 'Done'
예제 #21
0
def run_rho(data, results, band):
    print('run_rho for ', band)
    print('len(data) = ', len(data))
    dt = (data['T_data'] - data['T_model']) / data['T_data']
    ecat = treecorr.Catalog(ra=data['ra'],
                            dec=data['dec'],
                            ra_units='deg',
                            dec_units='deg',
                            g1=data['g1_data'],
                            g2=data['g2_data'])
    qcat = treecorr.Catalog(ra=data['ra'],
                            dec=data['dec'],
                            ra_units='deg',
                            dec_units='deg',
                            g1=data['g1_data'] - data['g1_model'],
                            g2=data['g2_data'] - data['g2_model'])
    wcat = treecorr.Catalog(ra=data['ra'],
                            dec=data['dec'],
                            ra_units='deg',
                            dec_units='deg',
                            g1=data['g1_data'] * dt,
                            g2=data['g2_data'] * dt)
    ecat.name = 'ecat'
    qcat.name = 'qcat'
    wcat.name = 'wcat'

    bin_config = dict(
        sep_units='arcmin',
        bin_slop=0.1,
        min_sep=0.5,
        max_sep=250.,
        bin_size=0.2,
    )
    pairs = [(qcat, qcat), (ecat, qcat), (wcat, wcat), (qcat, wcat),
             (ecat, wcat), (ecat, ecat)]
    print('data has %s stars' % ecat.ntot)
    for (cat1, cat2) in pairs:
        print('Doing correlation of %s band %s vs %s' %
              (band, cat1.name, cat2.name))

        rho = treecorr.GGCorrelation(bin_config, verbose=2)

        if cat1 is cat2:
            rho.process(cat1)
        else:
            rho.process(cat1, cat2)
        print('mean xi+ = ', rho.xip.mean())
        print('mean xi- = ', rho.xim.mean())
        results[(band, cat1.name, cat2.name)] = rho
예제 #22
0
def CorrFunc(cat1, cat2, 
                outpath, nbins, mins, maxs, units, bin_slop, nthr):
    """
    Function for correlation calculation (auto & cross)
    """

    gg = treecorr.GGCorrelation(nbins=nbins, min_sep=mins, max_sep=maxs, 
                                sep_units=units, bin_slop=bin_slop)

    if cat2 == None:
        # auto-correlation
        gg.process(cat1, num_threads=nthr)
    else:
        # cross-correlation
        gg.process(cat1, cat2, num_threads=nthr)
    gg.write(outpath)
    print("TreeCorr results saved in", outpath)
예제 #23
0
파일: mock.py 프로젝트: shivamp1495/destest
    def loop_2pt_noweight(catname, ii):

        t0 = time.time()

        cnt = 0
        for j in range(36):
            for i in range(8):
                for zbin in range(4):
                    for k in range(3):
                        if cnt > 225:
                            continue
                        if (cnt + 1) % 5 != ii:
                            continue
                        print j, i, k, time.time() - t0
                        out = methods.rotate_mock_rescale_nsigma(zbin + 1,
                                                                 i + 1,
                                                                 j + 1,
                                                                 wfile=None)
                        cat = treecorr.Catalog(g1=out['e1'],
                                               g2=out['e2'],
                                               w=out['w'],
                                               ra=out['ra'],
                                               dec=out['dec'],
                                               ra_units='deg',
                                               dec_units='deg')
                        gg = treecorr.GGCorrelation(nbins=20,
                                                    min_sep=2.5,
                                                    max_sep=250.,
                                                    sep_units='arcmin',
                                                    bin_slop=0.2,
                                                    verbose=0)
                        gg.process(cat)

                        d = {
                            'theta': np.exp(gg.meanlogr),
                            'xip': gg.xip,
                            'xim': gg.xim,
                            'err': np.sqrt(gg.varxi)
                        }

                        save_obj(
                            d, 'text/flask_GG_' + catname + '_noweight_' +
                            str(zbin) + '_' + str(cnt) + '.cpickle')
                cnt += 1

        return
예제 #24
0
def run_serial():
    from test_helper import profile
    t0 = time.time()
    fname = file_name.replace('.fits', '_0.fits')
    log_file = 'serial.log'

    cat = treecorr.Catalog(fname,
                           ra_col=ra_col,
                           dec_col=ra_col,
                           ra_units=ra_units,
                           dec_units=dec_units,
                           g1_col=g1_col,
                           g2_col=g1_col,
                           flag_col=flag_col,
                           verbose=1,
                           log_file=log_file,
                           patch_centers=patch_file)
    t1 = time.time()
    print('Made cat', t1 - t0)

    gg = treecorr.GGCorrelation(bin_size=bin_size,
                                min_sep=min_sep,
                                max_sep=max_sep,
                                sep_units='arcmin',
                                bin_slop=bin_slop,
                                verbose=1,
                                log_file=log_file)

    # These next two steps don't need to be done separately.  They will automatically
    # happen when calling process.  But separating them out makes it easier to profile.
    with profile():
        cat.load()
    t2 = time.time()
    print('Loaded', t2 - t1)

    with profile():
        cat.get_patches()
    t3 = time.time()
    print('Made patches', t3 - t2)

    with profile():
        gg.process(cat, low_mem=low_mem)
    t4 = time.time()
    print('Processed', t4 - t3)
    print('Done with non-parallel computation', t4 - t0)
    print('xip = ', gg.xip, flush=True)
예제 #25
0
def get_Maps(cat, theta_min, theta_max, ntheta, bin_slop):

    config = {
        'min_sep': theta_min,
        'max_sep': theta_max,
        'nbins': ntheta,
        'bin_slop': bin_slop,
        'sep_units': 'arcmin',
        'verbose': 1,
        'num_threads': Map_threads,
    }

    gg = treecorr.GGCorrelation(**config)
    gg.process(cat)

    theta = gg.meanr
    Mapsq, Mapsq_im, Mxsq, Mxsq_im, varMapsq = gg.calculateMapSq()

    return (theta, Mapsq, Mapsq_im, Mxsq, Mxsq_im, varMapsq)
    def compute_shear_shear(self, i, j, cat1, cat2):
        maski = (cat1.mask) & (self.p1 == i)
        maskj = (cat2.mask) & (self.p2 == j)

        # Initialised the catlogues
        namei_1, namei_2 = colnames[self.corrtype[0]]
        cat_i = treecorr.Catalog(g1=cat1.cols[namei_1][maski],
                                 g2=cat1.cols[namei_2][maski],
                                 ra=cat1.cols['ra'][maski],
                                 dec=cat1.cols['dec'][maski],
                                 ra_units='deg',
                                 dec_units='deg')

        namej_1, namej_2 = colnames[self.corrtype[1]]
        cat_j = treecorr.Catalog(g1=cat2.cols[namej_1][maskj],
                                 g2=cat2.cols[namej_2][maskj],
                                 ra=cat2.cols['ra'][maskj],
                                 dec=cat2.cols['dec'][maskj],
                                 ra_units='deg',
                                 dec_units='deg')

        # Set up the correlation
        # Note that we're using the binning configuration from
        # the first of the two config files
        # might be good to check and give a warning if the two files
        # specify different 2pt binning parameters
        gg = treecorr.GGCorrelation(nbins=cat1.info['tbins'],
                                    min_sep=cat1.info['tmin'],
                                    max_sep=cat1.info['tmax'],
                                    sep_units='arcmin',
                                    bin_slop=0.1,
                                    verbose=True,
                                    num_threads=1)

        # And process it
        gg.process(cat_i, cat_j)

        theta = np.exp(gg.meanlogr)
        xip = gg.xip
        xim = gg.xim
        xiperr = ximerr = np.sqrt(gg.varxi)

        return theta, xip, xim, xiperr, ximerr
예제 #27
0
    def GGCorrelation(self):
        """
        Caclulates 2D correlation function using Catalog's ra, dec.
        Requires randcatalog to exist.

        Returns tuple (logr, meanlogr, xip, xim, xivar)
        """
        catS = treecorr.Catalog(ra=self.catalog["ra"], dec=self.catalog["dec"], 
                  ra_units="radians", dec_units="radians", g1=self.catalog["g1"],
                  g2=self.catalog["g2"] )
        dd=treecorr.GGCorrelation(min_sep=self.min_sep, bin_size=self.bin_size, 
                                  max_sep=self.max_sep, sep_units='arcmin')

        dd.process(catS)
        logr = dd.logr 
        meanlogr = dd.logr
        xip=dd.xip
        xim=dd.xim
        xivar=dd.varxi
        return (logr, meanlogr, xip, xim, xivar)
예제 #28
0
	def get_2pt(self, corr1, corr2, nbins=12, error_type="bootstrap", xmin=1, xmax=300, units="arcmin"):
		correlation_lookup = {"psf":("mean_psf_e%d_sky", self.res), "gal":("e%d", self.res)}

		if hasattr(self,"truth"): correlation_lookup["int"] = ("intrinsic_e%d", self.truth)

		c1, data1 = correlation_lookup[corr1]
		c2, data2 = correlation_lookup[corr2]
		print "Will correlate columns %s and %s."%(c1,c2), 

		cat1 = tc.Catalog(ra=self.res["ra"]*60, dec=self.res["dec"]*60, ra_units=units, dec_units=units, g1=data1[c1%1], g2=data1[c1%2])
		cat2 = tc.Catalog(ra=self.res["ra"]*60, dec=self.res["dec"]*60, ra_units=units, dec_units=units, g1=data2[c2%1], g2=data2[c2%2])

		gg = tc.GGCorrelation(nbins=nbins, min_sep=xmin, max_sep=xmax, sep_units=units)

		gg.process(cat1,cat2)

		setattr(gg, "theta", np.exp(gg.logr))

		print "stored"
		self.corr[(corr1,corr2)]=gg
예제 #29
0
    def shearshear(self, cat1, cat2=None):
        '''calculate shear-shear correlation '''
        ggkwargs = {
            'min_sep': 3,
            'max_sep': 90,
            'nbins': 12,
            'sep_units': 'arcmin'
        }
        gg = treecorr.GGCorrelation(**ggkwargs)
        tree_cat1 = self.io.df_to_corr(cat1, shears=True)
        if cat2 is not None:
            tree_cat2 = self.io.df_to_corr(cat2, shears=True)
            gg.process(tree_cat1, tree_cat2)
        else:
            gg.process(tree_cat1)
        r = np.exp(gg.meanlogr)
        xip = gg.xip
        xim = gg.xim
        sig = np.sqrt(gg.varxip)

        return {'xip': xip, 'xim': xim, 'r': r, 'sig': sig}
예제 #30
0
파일: xi_de.py 프로젝트: seccolf/DESWL
def compute_xi(ra, dec, e1, e2, w, s):

    # Apply the mean sensitivity in each case
    #means = numpy.sum(w*s) / numpy.sum(w)
    #e1c = e1 / means
    #e2c = e2 / means

    # Build a TreeCorr catalog
    cat = treecorr.Catalog(ra=ra,
                           dec=dec,
                           g1=e1,
                           g2=e2,
                           k=s,
                           w=w,
                           ra_units='deg',
                           dec_units='deg')

    # Compute the correlation function
    gg = treecorr.GGCorrelation(bin_size=0.05,
                                min_sep=0.1,
                                max_sep=500,
                                sep_units='arcmin',
                                output_dots=True,
                                verbose=2)
    ss = treecorr.KKCorrelation(bin_size=0.05,
                                min_sep=0.1,
                                max_sep=500,
                                sep_units='arcmin',
                                output_dots=True,
                                verbose=2)

    print 'Start corr2'
    gg.process(cat)
    ss.process(cat)

    gg.xip /= ss.xi
    gg.xim /= ss.xi
    gg.varxi /= ss.xi**2

    return gg