Esempio n. 1
0
def test_subvol_gmf(Mr):
    """
    """
    thr = -1.0 * np.float(Mr)
    model = PrebuiltHodModelFactory("zheng07", threshold=thr, halocat="multidark", redshift=0.0)
    model.new_haloprop_func_dict = {"sim_subvol": util.mk_id_column}

    datsubvol = lambda x: util.mask_func(x, 0)
    model.populate_mock(simname="multidark", masking_function=datsubvol, enforce_PBC=False)

    # compute group richness
    # These are wrong because they assume periodicity
    # rich = richness(model.mock.compute_fof_group_ids())
    # gmf = GMF(rich)  #GMF
    # print gmf
    # print GMF(rich , counts = True)

    galaxy_sample = model.mock.galaxy_table
    x = galaxy_sample["x"]
    y = galaxy_sample["y"]
    z = galaxy_sample["z"]
    vz = galaxy_sample["vz"]

    pos = three_dim_pos_bundle(model.mock.galaxy_table, "x", "y", "z", velocity=vz, velocity_distortion_dimension="z")

    b_para, b_perp = 0.2, 0.2
    groups = FoFGroups(pos, b_perp, b_para, period=None, Lbox=200, num_threads="max")

    gids = groups.group_ids
    rich = richness(gids)
    gmf = GMF(rich)

    print gmf
    print GMF(rich, counts=True)
    return None
Esempio n. 2
0
def main():
     
    # Entire MultiDark Volume (Analytic xi) 
    cov = np.loadtxt("../data/wpxicov_dr72_bright0_mr21.0_z0.159_nj400")
    print(cov.shape)
    model = PrebuiltHodModelFactory('zheng07', threshold=-21)
    print(model.param_dict)
    model.param_dict['logM0'] =  12.59
    model.param_dict['sigma_logM'] =  0.49
    model.param_dict['logMmin'] =  12.78
    model.param_dict['alpha'] =  1.14
    model.param_dict['logM1'] =  13.99
    #, 'sigma_logM': 0.39, 'logMmin': 12.79, 'alpha': 1.15, 'logM1': 13.94}

    halocat = CachedHaloCatalog(simname = 'bolplanck', redshift = 0, halo_finder = 'rockstar')
    model.populate_mock(halocat, enforce_PBC = True)
    pos = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')

    tstart = time.time()
    t0 = tstart
    pos = pos.astype(np.float32)
    x, y, z = pos[:,0] , pos[:,1] , pos[:,2]
    t1 = time.time()
    print("Done reading the data - time taken = {0:10.1f} seconds"
          .format(t1 - t0))
    print("Beginning Correlation functions calculations")
    boxsize = 250
    nthreads = 4
    pimax = 40.0
    binfile = path.join(path.dirname(path.abspath(__file__)),
                        "../", "bin")
    autocorr = 1
    numbins_to_print = 12
    
    print("\nRunning 2-D projected correlation function wp(rp)")
    results_wp = _countpairs.countpairs_wp(boxsize, pimax, nthreads,
                                           binfile, x, y, z)
    print("\n#            ******    wp: first {0} bins  *******         "
          .format(numbins_to_print))
    print("#      rmin        rmax       rpavg        wp       npairs")
    print("##########################################################")
    for ibin in range(numbins_to_print):
        items = results_wp[ibin]
        print("{0:12.4f} {1:12.4f} {2:10.4f} {3:10.1f} {4:10d}"
              .format(items[0], items[1], items[2], items[3], items[4]))
    print("-----------------------------------------------------------")
    
    data_wp = np.loadtxt("../data/wpxi_dr72_bright0_mr21.0_z0.159_nj400")[:,1]
    print(data_wp.shape)
    data_wp_error = np.sqrt(np.diag(cov)[:12])
    print(data_wp_error.shape) 
    rbins = np.loadtxt(binfile)
    rs = np.mean(rbins , axis = 1)
    plt.figure(figsize=(10,10))
    plt.errorbar(rs , data_wp , data_wp_error , fmt=".k" , capsize = 2)
    plt.plot(rs , np.array(results_wp)[:,3])
    plt.loglog()
    plt.savefig("wp.pdf")
Esempio n. 3
0
def test_GMFbinning(Mr):
    """ Tests for the GMF binning scheme in order to make it sensible. 
    """
    gids_saved = "gids_saved.p"
    if not os.path.isfile(gids_saved):
        thr = -1.0 * np.float(Mr)
        model = PrebuiltHodModelFactory("zheng07", threshold=thr, halocat="multidark", redshift=0.0)
        model.new_haloprop_func_dict = {"sim_subvol": util.mk_id_column}

        datsubvol = lambda x: util.mask_func(x, 0)
        model.populate_mock(simname="multidark", masking_function=datsubvol, enforce_PBC=False)

        galaxy_sample = model.mock.galaxy_table
        x = galaxy_sample["x"]
        y = galaxy_sample["y"]
        z = galaxy_sample["z"]
        vz = galaxy_sample["vz"]

        pos = three_dim_pos_bundle(
            model.mock.galaxy_table, "x", "y", "z", velocity=vz, velocity_distortion_dimension="z"
        )

        b_para, b_perp = 0.2, 0.2
        groups = FoFGroups(pos, b_perp, b_para, period=None, Lbox=200, num_threads="max")

        pickle.dump(groups, open(gids_saved, "wb"))
    else:
        groups = pickle.load(open(gids_saved, "rb"))

    gids = groups.group_ids
    rich = richness(gids)
    # print "rich=" , rich
    rbins = np.logspace(np.log10(3.0), np.log10(20), 10)
    rbins = np.array([1, 2.0, 3.0, 4.0, 5.0, 6.0, 7, 9, 11, 14, 17, 20])
    gmf = GMF(rich, counts=False, bins=rbins)
    gmf_counts = GMF(rich, counts=True, bins=rbins)

    print rbins
    print gmf
    print gmf_counts
    fig = plt.figure(1)
    sub = fig.add_subplot(111)

    sub.plot(0.5 * (rbins[:-1] + rbins[1:]), gmf)

    sub.set_xscale("log")
    sub.set_yscale("log")

    plt.show()
    # print gmf
    # print GMF(rich , counts = True)
    return None
Esempio n. 4
0
def test_subvol_gmf(Mr):
    '''
    '''
    thr = -1. * np.float(Mr)
    model = PrebuiltHodModelFactory('zheng07',
                                    threshold=thr,
                                    halocat='multidark',
                                    redshift=0.)
    model.new_haloprop_func_dict = {'sim_subvol': util.mk_id_column}

    datsubvol = lambda x: util.mask_func(x, 0)
    model.populate_mock(simname='multidark',
                        masking_function=datsubvol,
                        enforce_PBC=False)

    #compute group richness
    # These are wrong because they assume periodicity
    #rich = richness(model.mock.compute_fof_group_ids())
    #gmf = GMF(rich)  #GMF
    #print gmf
    #print GMF(rich , counts = True)

    galaxy_sample = model.mock.galaxy_table
    x = galaxy_sample['x']
    y = galaxy_sample['y']
    z = galaxy_sample['z']
    vz = galaxy_sample['vz']

    pos = three_dim_pos_bundle(model.mock.galaxy_table,
                               'x',
                               'y',
                               'z',
                               velocity=vz,
                               velocity_distortion_dimension="z")

    b_para, b_perp = 0.2, 0.2
    groups = FoFGroups(pos,
                       b_perp,
                       b_para,
                       period=None,
                       Lbox=200,
                       num_threads='max')

    gids = groups.group_ids
    rich = richness(gids)
    gmf = GMF(rich)

    print gmf
    print GMF(rich, counts=True)
    return None
Esempio n. 5
0
def model(theta, Mstar, prior_range = None):

    '''
    Given theta (HOD parameters) and stellar mass threshold Mstar, 
    compute wp
    not sure prior_range is necessary here
    '''

    model = PrebuiltHodModelFactory('hearin15', threshold = Mstar, 
                                     redshift = 0.0 ,
                                     central_assembias_strength = 0.,
                                     satellite_assembias_strength = 0.)

    model.param_dict['alphasat'] = theta[0]#1.0
    model.param_dict['bcut'] = theta[1]#1.47, 
    model.param_dict['betacut'] = theta[2]#0.13
    model.param_dict['betasat'] =  theta[3]#0.859
    model.param_dict['bsat'] = theta[4]#0.62
    model.param_dict['mean_occupation_centrals_assembias_param1'] = theta[5] # 0
    model.param_dict['mean_occupation_satellites_assembias_param1'] = theta[6] #.75
    model.param_dict['scatter_model_param1'] = theta[7]#0.2
    model.param_dict['smhm_beta_0'] = theta[8]# 0.43 
    model.param_dict['smhm_beta_a'] = theta[9]#0.18 
    model.param_dict['smhm_delta_0'] = theta[10]#0.56
    model.param_dict['smhm_delta_a'] = theta[11]#0.18, 
    model.param_dict['smhm_gamma_0']= theta[12]#1.54 
    model.param_dict['smhm_gamma_a'] = theta[13]#2.52, 
    model.param_dict['smhm_m0_0'] = theta[14]# 10.72, 
    model.param_dict['smhm_m0_a'] = theta[15]# 0.59 
    model.param_dict['smhm_m1_0'] = theta[16]# 12.35, 
    model.param_dict['smhm_m1_a'] = theta[17]#0.3
 
    
    model.populate_mock(simname = 'bolshoi', redshift = 0, halo_finder = 'rockstar')
    #x = model.mock.galaxy_table['x']
    #y = model.mock.galaxy_table['y']
    #z = model.mock.galaxy_table['z']
    #all_positions = return_xyz_formatted_array(x, y, z)    
    all_positions = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')
    
    wp_all = wp(all_positions, rp_bins, pi_bins,
                period=model.mock.Lbox, estimator = 'Landy-Szalay', num_threads = 1)
    #wp = proj_clustering(pos , rbins , pbins , cellsize = [rmax, rmax, rmax])
    print 1
    return rp_bin_centers, wp_all
Esempio n. 6
0
def build_xi_nbar_gmf(Mr=21):
    '''
    Build "data" xi, nbar, GMF values and write to file
    '''
    model = PrebuiltHodModelFactory('zheng07', threshold = -1.0*np.float(Mr))
    model.populate_mock(halocat = halocat , enforce_PBC = False) # population mock realization
    pos = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')

    # write xi
    rbins = hardcoded_xi_bins()
    rmax = rbins.max()
    approx_cell1_size = [rmax, rmax, rmax]
    approx_cell2_size = approx_cell1_size
    approx_cellran_size = [rmax, rmax, rmax]
    period = np.array([Lbox , Lbox , Lbox]) 
    data_xir = tpcf(
            sample1, rbins, sample2 = sample2, 
            randoms=randoms, period = period, 
            max_sample_size=int(1e4), estimator='Landy-Szalay', 
            approx_cell1_size=approx_cell1_size, 
            approx_cellran_size=approx_cellran_size, 
            RR_precomputed = RR, 
            NR_precomputed = NR1) 
    output_file = ''.join([util.dat_dir(), 'xir.Mr', str(Mr), '.dat'])
    np.savetxt(output_file, data_xir)

    # write nbar values
    nbar = model.mock.number_density
    output_file = ''.join([util.dat_dir(), 'nbar.Mr', str(Mr), '.dat'])
    np.savetxt(output_file, [nbar])

    # write GMF
    rich = richness(model.mock.compute_fof_group_ids())
    gmf = GMF(rich)  # GMF
    output_file = ''.join([util.dat_dir(), 'gmf.Mr', str(Mr), '.dat'])
    np.savetxt(output_file, gmf)

    return None
Esempio n. 7
0
def build_nbar_xi_gmf_cov(Mr=21):
    ''' Build covariance matrix for the full nbar, xi, gmf data vector
    using realisations of galaxy mocks for "data" HOD parameters in the 
    halos from the multidark simulation. Covariance matrices for different sets of observables
    can be extracted from the full covariance matrix by slicing through 
    the indices. 

    '''
    nbars = []
    xir = []
    gmfs = []

    thr = -1. * np.float(Mr)
    model = PrebuiltHodModelFactory('zheng07', threshold=thr)
    halocat = CachedHaloCatalog(simname='multidark',
                                redshift=0,
                                halo_finder='rockstar')
    #some settings for tpcf calculations
    rbins = hardcoded_xi_bins()

    for i in xrange(1, 125):
        print 'mock#', i

        # populate the mock subvolume
        model.populate_mock(halocat)
        # returning the positions of galaxies
        pos = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')
        # calculate nbar
        nbars.append(len(pos) / 1000**3.)
        # translate the positions of randoms to the new subbox
        #calculate xi(r)
        xi = tpcf(pos,
                  rbins,
                  period=model.mock.Lbox,
                  max_sample_size=int(2e5),
                  estimator='Landy-Szalay')
        xir.append(xi)
        # calculate gmf
        nbar = len(pos) / 1000**3.
        b_normal = 0.75
        b = b_normal * (nbar)**(-1. / 3)
        groups = pyfof.friends_of_friends(pos, b)
        w = np.array([len(x) for x in groups])
        gbins = gmf_bins()
        gmf = np.histogram(w, gbins)[0] / (1000.**3.)
        gmfs.append(gmf)  # GMF
    # save nbar variance
    nbar_var = np.var(nbars, axis=0, ddof=1)
    nbar_file = ''.join(
        [util.multidat_dir(), 'abc_nbar_var.Mr',
         str(Mr), '.dat'])
    np.savetxt(nbar_file, [nbar_var])

    # write full covariance matrix of various combinations of the data
    # and invert for the likelihood evaluations

    # --- covariance for all three ---
    fulldatarr = np.hstack(
        (np.array(nbars).reshape(len(nbars),
                                 1), np.array(xir), np.array(gmfs)))
    fullcov = np.cov(fulldatarr.T)
    fullcorr = np.corrcoef(fulldatarr.T)
    # and save the covariance matrix
    nopoisson_file = ''.join([
        util.multidat_dir(), 'abc_nbar_xi_gmf_cov.no_poisson.Mr',
        str(Mr), '.dat'
    ])
    np.savetxt(nopoisson_file, fullcov)

    # and a correlation matrix
    full_corr_file = ''.join(
        [util.multidat_dir(), 'abc_nbar_xi_gmf_corr.Mr',
         str(Mr), '.dat'])
    np.savetxt(full_corr_file, fullcorr)

    return None
Esempio n. 8
0
                                     np.log10(20.), 15)])
rmax = xi_bin.max()

# Get those randoms
num_randoms = 50 * 800000
xran = np.random.uniform(0, 1000, num_randoms)
yran = np.random.uniform(0, 1000, num_randoms)
zran = np.random.uniform(0, 1000, num_randoms)
full_randoms = np.vstack((xran, yran, zran)).T

# Get the full box mock
model = PrebuiltHodModelFactory('zheng07', threshold=-21)
halocat = CachedHaloCatalog(simname='multidark', redshift=0,
                            halo_finder='rockstar')
model.populate_mock(halocat, enforce_PBC=False)
pos = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')

# Get full tpcf
print "getting full vol tpcf..."
xi_full_pc = tpcf(pos, xi_bin,
                  randoms=full_randoms,
                  do_auto=True, do_cross=False,
                  max_sample_size=int(pos.shape[0]),
                  estimator='Natural',
                  approx_cell1_size=[rmax, rmax, rmax],
                  approx_cellran_size=[rmax, rmax, rmax])
print "done"

Nsub = 8

# Now set up for subvol boxes
Esempio n. 9
0
def main():
     
    cov = np.loadtxt("../../data/wpxicov_dr72_bright0_mr21.0_z0.159_nj400")
    f_MD = (1. + 71.74*10**6. / (1000.)**3.)
    f_bol = (1. + 71.74*10.**6. / (250.)**3.)
      
    print("covariance correction factor=" , f_bol/f_MD)
    cov = cov*f_bol/f_MD
   
    model.param_dict['logM0'] =  12.59
    model.param_dict['sigma_logM'] =  0.49
    model.param_dict['logMmin'] =  12.78
    model.param_dict['alpha'] =  1.14
    model.param_dict['logM1'] =  13.99

    model.param_dict['mean_occupation_satellites_assembias_param1'] = 0.0
    halocat = CachedHaloCatalog(simname = 'bolplanck', redshift = 0, halo_finder = 'rockstar')
    model.populate_mock(halocat, enforce_PBC = True)
    pos = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')
  
    print("modelnumber density=" , len(pos)/halocat.Lbox**3.)
    print("data number density=" , 1.16*10**-3.) 
    x = model.mock.galaxy_table['x']
    y = model.mock.galaxy_table['y']
    z = model.mock.galaxy_table['z']
    vz = model.mock.galaxy_table['vz']

    # applying RSD
    pos = return_xyz_formatted_array(x, y, z, velocity = vz, velocity_distortion_dimension = 'z')
    # enfprcing PBC
    pos =  enforce_periodicity_of_box(pos, halocat.Lbox)

    tstart = time.time()
    t0 = tstart
    pos = pos.astype(np.float32)
    x, y, z = pos[:,0] , pos[:,1] , pos[:,2]
    t1 = time.time()
    print("Done reading the data - time taken = {0:10.1f} seconds"
          .format(t1 - t0))
    print("Beginning Correlation functions calculations")
    boxsize = halocat.Lbox
    nthreads = 4
    pimax = 40.0
    binfile = path.join(path.dirname(path.abspath(__file__)),
                        "../../", "bin")
    autocorr = 1
    numbins_to_print = 12
    
    print("\nRunning 2-D projected correlation function wp(rp)")
    results_wp = _countpairs.countpairs_wp(boxsize, pimax, nthreads,
                                           binfile, x, y, z)
    print("\n#            ******    wp: first {0} bins  *******         "
          .format(numbins_to_print))
    print("#      rmin        rmax       rpavg        wp       npairs")
    print("##########################################################")
    for ibin in range(numbins_to_print):
        items = results_wp[ibin]
        print("{0:12.4f} {1:12.4f} {2:10.4f} {3:10.1f} {4:10d}"
              .format(items[0], items[1], items[2], items[3], items[4]))
    print("-----------------------------------------------------------")
    
    data_wp = np.loadtxt("../../data/wpxi_dr72_bright0_mr21.0_z0.159_nj400")[:,1]
    print(data_wp.shape)
    data_wp_error = np.sqrt(np.diag(cov)[:12])
    print(data_wp_error.shape) 
    rbins = np.loadtxt(binfile)
    rs = np.mean(rbins , axis = 1)
    plt.figure(figsize=(10,10))
    plt.errorbar(rs , data_wp , data_wp_error , fmt=".k" , capsize = 2)
    plt.plot(rs , np.array(results_wp)[:,3])
    plt.loglog()
    plt.savefig("wp.pdf")
Esempio n. 10
0
def Subvolume_FullvolumeCut(N_sub, ratio=False):
    ''' Test the 2PCF estimates from MultiDark subvolume versus the 
    2PCF for the entire MultiDark volume WITHOUT periodic boundary conditions
    and actual pair counts, CUT into subvolumes of the same size *AFTER*
    populate mock 


    Parameters
    ----------
    N_sub : (int)
        Number of subvolumes to sample

    '''
    prettyplot()
    pretty_colors = prettycolors()

    pickle_file = ''.join([
        '/export/bbq2/hahn/ccppabc/dump/', 'xi_subvolume_fullvolume_cut_test',
        '.Nsub',
        str(N_sub), '.p'
    ])

    fig = plt.figure(1)
    sub = fig.add_subplot(111)

    xi_bin = xi_binedges()

    # Entire MultiDark Volume (No Periodic Boundary Conditions)
    model = PrebuiltHodModelFactory('zheng07', threshold=-21)
    halocat = CachedHaloCatalog(simname='multidark',
                                redshift=0,
                                halo_finder='rockstar')

    sub_RR = data_RR(box='md_sub')
    sub_randoms = data_random(box='md_sub')
    sub_NR = len(sub_randoms)

    rmax = xi_bin.max()
    full_approx_cell1_size = [rmax, rmax, rmax]
    full_approx_cellran_size = [rmax, rmax, rmax]

    model.populate_mock(halocat, enforce_PBC=False)
    subvol_id = util.mk_id_column(table=model.mock.galaxy_table)
    full_pos = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')

    # Full Volume
    if os.path.isfile(pickle_file):
        data_dump = pickle.load(open(pickle_file, 'rb'))
        full_xi = data_dump['full_xi']
    else:
        model = PrebuiltHodModelFactory('zheng07', threshold=-21)
        halocat = CachedHaloCatalog(simname='multidark',
                                    redshift=0,
                                    halo_finder='rockstar')

        full_randoms = data_random(box='md_all')
        full_RR = data_RR(box='md_all')
        full_NR = len(full_randoms)

        rmax = xi_bin.max()
        full_approx_cell1_size = [rmax, rmax, rmax]
        full_approx_cellran_size = [rmax, rmax, rmax]

        model.populate_mock(halocat, enforce_PBC=False)
        full_pos = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')

        full_xi = tpcf(full_pos,
                       xi_bin,
                       randoms=full_randoms,
                       period=None,
                       do_auto=True,
                       do_cross=False,
                       num_threads=5,
                       max_sample_size=int(full_pos.shape[0]),
                       estimator='Natural',
                       approx_cell1_size=full_approx_cell1_size,
                       approx_cellran_size=full_approx_cellran_size,
                       RR_precomputed=full_RR,
                       NR_precomputed=full_NR)
        data_dump = {}
        data_dump['full_xi'] = full_xi

    if not ratio:
        sub.plot(0.5 * (xi_bin[:-1] + xi_bin[1:]),
                 full_xi,
                 lw=2,
                 ls='-',
                 c='k',
                 label=r'Full Volume')

    if os.path.isfile(pickle_file):
        fullcut_xi_list = data_dump['fullcut_xi']['fullcut_xi_list']
        fullcut_xi_avg = data_dump['fullcut_xi']['fullcut_xi_avg']
    else:
        data_dump['fullcut_xi'] = {}
        fullcut_xi_list = []
        fullcut_xi_tot = np.zeros(len(xi_bin) - 1)
        for id in np.unique(subvol_id)[:N_sub]:
            print 'Subvolume ', id
            in_cut = np.where(subvol_id == id)

            fullcut_pos = full_pos[in_cut]

            fullcut_xi = tpcf(fullcut_pos,
                              xi_bin,
                              randoms=sub_randoms,
                              period=None,
                              do_auto=True,
                              do_cross=False,
                              num_threads=5,
                              max_sample_size=int(fullcut_pos.shape[0]),
                              estimator='Natural',
                              approx_cell1_size=full_approx_cell1_size,
                              approx_cellran_size=full_approx_cellran_size,
                              RR_precomputed=sub_RR,
                              NR_precomputed=sub_NR)

            fullcut_xi_list.append(fullcut_xi)
            fullcut_xi_tot += fullcut_xi

        fullcut_xi_avg = fullcut_xi_tot / np.float(N_sub)
        data_dump['fullcut_xi']['fullcut_xi_list'] = fullcut_xi_list
        data_dump['fullcut_xi']['fullcut_xi_avg'] = fullcut_xi_avg

    if not ratio:
        sub.plot(0.5 * (xi_bin[:-1] + xi_bin[1:]),
                 fullcut_xi_avg,
                 lw=2,
                 ls='-',
                 c='k',
                 label=r'Full Volume Cut Average')
    else:
        sub.plot(0.5 * (xi_bin[:-1] + xi_bin[1:]),
                 fullcut_xi_avg / full_xi,
                 lw=2,
                 ls='-',
                 c='k',
                 label=r'Full Volume Cut Average')

    if not os.path.isfile(pickle_file):
        # MultiDark SubVolume (precomputed RR pairs)
        sub_model = PrebuiltHodModelFactory('zheng07', threshold=-21)
        sub_model.new_haloprop_func_dict = {'sim_subvol': util.mk_id_column}
        sub_halocat = CachedHaloCatalog(simname='multidark',
                                        redshift=0,
                                        halo_finder='rockstar')
        sub_RR = data_RR(box='md_sub')
        sub_randoms = data_random(box='md_sub')
        sub_NR = len(sub_randoms)

        sub_xis_list = []
        sub_xis = np.zeros(len(full_xi))

        for ii in range(1, N_sub):
            print 'Subvolume ', ii
            # randomly sample one of the subvolumes
            rint = ii  #np.random.randint(1, 125)
            simsubvol = lambda x: util.mask_func(x, rint)
            sub_model.populate_mock(sub_halocat,
                                    masking_function=simsubvol,
                                    enforce_PBC=False)

            pos = three_dim_pos_bundle(sub_model.mock.galaxy_table, 'x', 'y',
                                       'z')

            xi, yi, zi = util.random_shifter(rint)
            temp_randoms = sub_randoms.copy()
            temp_randoms[:, 0] += xi
            temp_randoms[:, 1] += yi
            temp_randoms[:, 2] += zi

            rmax = xi_bin.max()
            sub_approx_cell1_size = [rmax, rmax, rmax]
            sub_approx_cellran_size = [rmax, rmax, rmax]

            sub_xi = tpcf(pos,
                          xi_bin,
                          randoms=temp_randoms,
                          period=None,
                          do_auto=True,
                          do_cross=False,
                          num_threads=5,
                          max_sample_size=int(pos.shape[0]),
                          estimator='Natural',
                          approx_cell1_size=sub_approx_cell1_size,
                          approx_cellran_size=sub_approx_cellran_size,
                          RR_precomputed=sub_RR,
                          NR_precomputed=sub_NR)

            label = None
            if ii == N_sub - 1:
                label = 'Subvolumes'

            sub_xis += sub_xi
            sub_xis_list.append(sub_xi)

        sub_xi_avg = sub_xis / np.float(N_sub)

        data_dump['Natural'] = {}
        data_dump['Natural']['sub_xi_avg'] = sub_xi_avg
        data_dump['Natural']['sub_xis_list'] = sub_xis_list
    else:
        sub_xis_list = data_dump['Natural']['sub_xis_list']
        sub_xi_avg = data_dump['Natural']['sub_xi_avg']

    if not os.path.isfile(pickle_file):
        pickle.dump(data_dump, open(pickle_file, 'wb'))

    if not ratio:
        sub.plot(0.5 * (xi_bin[:-1] + xi_bin[1:]),
                 sub_xi_avg,
                 lw=2,
                 ls='--',
                 c=pretty_colors[3],
                 label='Subvolume')
    else:
        sub.plot(0.5 * (xi_bin[:-1] + xi_bin[1:]),
                 sub_xi_avg / full_xi,
                 lw=2,
                 ls='--',
                 c=pretty_colors[3],
                 label='Subvolume')

    sub.set_xlim([0.1, 50.])
    sub.set_xlabel('r', fontsize=30)
    sub.set_xscale('log')

    if not ratio:
        sub.set_ylabel(r"$\xi \mathtt{(r)}$", fontsize=25)
        sub.set_yscale('log')
    else:
        sub.set_ylabel(r"$\overline{\xi^\mathtt{sub}}/\xi^\mathtt{all}$",
                       fontsize=25)

    sub.legend(loc='lower left')

    if ratio:
        fig_file = ''.join([
            util.fig_dir(), 'test_xi_subvolume_fullvolume_cut.Nsub',
            str(N_sub), '.ratio.png'
        ])
    else:
        fig_file = ''.join([
            util.fig_dir(), 'test_xi_subvolume_fullvolume_cut.Nsub',
            str(N_sub), '.png'
        ])
    fig.savefig(fig_file, bbox_inches='tight', dpi=100)
    plt.close()
    return None
def test_precomputed_rr(Nr, Mr = 21):
    '''
    Mr = Luminositty threshold
    Nr = Number of randoms
    '''

    rbins = np.logspace(-1, 1.25, 15)
    rmax = rbins.max()
    rbin_centers = (rbins[1:] + rbins[0:-1])/2.    

    halocat = CachedHaloCatalog(simname = 'bolshoi', redshift = 0)
    model = PrebuiltHodModelFactory("zheng07")
    model.populate_mock(halocat = halocat, enforce_PBC = False)
    data = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')
    print data.shape
    L = halocat.Lbox

    xmin , ymin , zmin = 0., 0., 0.
    xmax , ymax , zmax = L, L, L
   
    
    num_randoms = Nr
    xran = np.random.uniform(xmin, xmax, num_randoms)
    yran = np.random.uniform(ymin, ymax, num_randoms)
    zran = np.random.uniform(zmin, zmax, num_randoms)
    randoms = np.vstack((xran, yran, zran)).T
    

    verbose = False
    num_threads = cpu_count()

    period = None
    approx_cell1_size = [rmax, rmax, rmax]
    approx_cell2_size = approx_cell1_size
    approx_cellran_size = [rmax, rmax, rmax]

    normal_result = tpcf(
            data, rbins, data, 
            randoms=randoms, period = period, 
            max_sample_size=int(1e4), estimator='Landy-Szalay', 
            approx_cell1_size=approx_cell1_size, 
            approx_cellran_size=approx_cellran_size)


    #count data pairs
    DD = npairs(
            data, data, rbins, period,
            verbose, num_threads,
            approx_cell1_size, approx_cell2_size)
    DD = np.diff(DD)
    #count random pairs
    RR = npairs(
            randoms, randoms, rbins, period,
            verbose, num_threads,
            approx_cellran_size, approx_cellran_size)
    RR = np.diff(RR)
    #count data random pairs 
    DR = npairs(
            data, randoms, rbins, period,
            verbose, num_threads,
            approx_cell1_size, approx_cell2_size)
    DR = np.diff(DR)

    print "DD=", DD
    print "DR=", DR
    print "RR=", RR        
    
    ND = len(data)
    NR = len(randoms)

    factor1 = ND*ND/(NR*NR)
    factor2 = ND*NR/(NR*NR)

    mult = lambda x,y: x*y
    xi_LS = mult(1.0/factor1,DD/RR) - mult(1.0/factor2,2.0*DR/RR) + 1.0

    print "xi=" , xi_LS
    print "normal=" , normal_result

    result_with_RR_precomputed = tpcf(
            data, rbins, data, 
            randoms=randoms, period = period, 
            max_sample_size=int(1e5), estimator='Landy-Szalay', 
            approx_cell1_size=approx_cell1_size, 
            approx_cellran_size=approx_cellran_size, 
            RR_precomputed = RR, 
            NR_precomputed = NR)
    
    print "xi_pre=" , result_with_RR_precomputed
Esempio n. 12
0
from halotools.empirical_models import PrebuiltHodModelFactory

# The "fiducial" stellar mass threshold is 10**10.5
model = PrebuiltHodModelFactory('hearin15', threshold = 10.5, redshift = 0., 
                        central_assembias_strength = 1, 
                        satellite_assembias_strength = 1)
model.param_dict['scatter_model_param1'] = 0.4 # This is the "fiducial" scatter used throughout the paper

baseline_model = PrebuiltHodModelFactory('leauthaud11', threshold = 10.5, redshift = 0.)
baseline_model.param_dict['scatter_model_param1'] = 0.4

#base_model

baseline_model.populate_mock()
pos = three_dim_pos_bundle(baseline_model.mock.galaxy_table, 'x', 'y', 'z')
wp_base = proj_clustering(pos , rbins , pbins , cellsize = [rmax, rmax, rmax])


#central assembly bias only

model.param_dict['mean_occupation_satellites_assembias_param1'] = 0
model.param_dict['mean_occupation_centrals_assembias_param1'] = 1
model.populate_mock()
pos = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')
wp_cen_only = proj_clustering(pos , rbins , pbins , cellsize = [rmax, rmax, rmax])

# Satellite assembias only

model.param_dict['mean_occupation_satellites_assembias_param1'] = 1
model.param_dict['mean_occupation_centrals_assembias_param1'] = 0
Esempio n. 13
0
def main():
   

    ###############################Multi-Dark########################### 
    
    model = PrebuiltHodModelFactory('zheng07', threshold=-21)
    halocat = CachedHaloCatalog(simname = 'multidark', redshift = 0, halo_finder = 'rockstar')
    t0 = time.time()
    model.populate_mock(halocat, enforce_PBC = False)
    print(time.time() - t0)
    t0 = time.time()
    model.populate_mock(halocat, enforce_PBC = False)
    print(time.time() - t0)
    pos = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')
    nthreads = 1

    binfile = path.join(path.dirname(path.abspath(__file__)),
                        "/home/mj/Corrfunc/xi_theory/tests/", "bins")
    autocorr = 1
    pos = pos.astype(np.float32)
   
    x , y , z = pos[:,0] , pos[:,1] , pos[:,2]
    DD = _countpairs.countpairs(autocorr, nthreads, binfile, x, y, z,
                                x , y , z)
    ND = len(pos)
    NR = 50*800000
    DD = np.array(DD)[:,3]
    
    
    num_randoms = 50 * 800000
    xran = np.random.uniform(0, 1000, num_randoms)
    yran = np.random.uniform(0, 1000, num_randoms)
    zran = np.random.uniform(0, 1000, num_randoms)
    randoms = np.vstack((xran, yran, zran)).T 
    
    randoms = randoms.astype(np.float32)

    xran = randoms[:,0]
    yran = randoms[:,1]
    zran = randoms[:,2]

     
    results_RR = _countpairs.countpairs(autocorr, nthreads, binfile, xran, yran, zran,
                                        xran, yran, zran)
    
    RR = np.array(results_RR)[:,3]
    factor1 = 1.*ND*ND/(NR*NR)
    mult = lambda x,y: x*y
    xi_MD = mult(1.0/factor1 , DD/RR) - 1.

    print(xi_MD)
   

    ###############################Subvolume of Multi-Dark########################### 
     
    num_srandoms = 50 * 8000
    xran = np.random.uniform(0, 200, num_srandoms)
    yran = np.random.uniform(0, 200, num_srandoms)
    zran = np.random.uniform(0, 200, num_srandoms)
    randoms = np.vstack((xran, yran, zran)).T 
    
    randoms = randoms.astype(np.float32)

    xran = randoms[:,0]
    yran = randoms[:,1]
    zran = randoms[:,2]

    results_RR = _countpairs.countpairs(autocorr, nthreads, binfile, xran, yran, zran,
                                        xran, yran, zran)
    RR_sub = np.array(results_RR)[:,3] 
    
    
    import util    
    sub_model = PrebuiltHodModelFactory('zheng07' , threshold = -21)
    sub_model.new_haloprop_func_dict = {'sim_subvol': util.mk_id_column}
    sub_halocat = CachedHaloCatalog(simname='multidark', redshift=0, halo_finder='rockstar')     
    xi_subs = [] 
    for i in range(10):
      simsubvol = lambda x: util.mask_func(x, i)
      sub_model.populate_mock(sub_halocat, masking_function=simsubvol, enforce_PBC=False)
      sub_pos = three_dim_pos_bundle(sub_model.mock.galaxy_table, 'x', 'y', 'z')
      nthreads = 1
      binfile = path.join(path.dirname(path.abspath(__file__)),
                        "/home/mj/Corrfunc/xi_theory/tests/", "bins")
      autocorr = 1
      sub_pos = sub_pos.astype(np.float32)
      x , y , z = sub_pos[:,0] , sub_pos[:,1] , sub_pos[:,2]
      DD_sub = _countpairs.countpairs(autocorr, nthreads, binfile, x, y, z,
                                        x, y, z)
    
      ND_sub = len(sub_pos)
      NR_sub = 50 * 8000
      DD_sub = np.array(DD_sub)[:,3]
      factor1 = 1.*ND_sub*ND_sub/(NR_sub*NR_sub)
      mult = lambda x,y: x*y
      xi_n = mult(1.0/factor1 , DD_sub/RR_sub) - 1.
      xi_subs.append(xi_n)
    
    xi_subs = np.array(xi_subs)
    np.savetxt("xi_subs.dat" , xi_subs)
     
    import matplotlib.pyplot as plt
    from ChangTools.plotting import prettyplot
    from ChangTools.plotting import prettycolors 

        
    binfile = path.join(path.dirname(path.abspath(__file__)),
                        "/home/mj/Corrfunc/xi_theory/tests/", "bins")
    rbins =  np.loadtxt(binfile)
    rbins_centers = np.mean(rbins , axis = 1)
    
    xi_subs = np.loadtxt("xi_subs.dat")

    fig = plt.figure(figsize=(10,10))
    ax = fig.add_subplot(111)
    for i in range(10):

        ax.semilogx(rbins_centers , xi_subs[i,:] / xi_MD , alpha = 0.2)
        plt.xlabel(r"$r$" , fontsize = 20)
        plt.ylabel(r"$\xi_{\rm subvolume}(r) / \xi_{\rm MD}(r)$" , fontsize = 20) 
        plt.savefig("xi_ratios.pdf")
Esempio n. 14
0
def Subvolume_FullvolumeCut(N_sub, ratio=False): 
    ''' Test the 2PCF estimates from MultiDark subvolume versus the 
    2PCF for the entire MultiDark volume WITHOUT periodic boundary conditions
    and actual pair counts, CUT into subvolumes of the same size *AFTER*
    populate mock 


    Parameters
    ----------
    N_sub : (int)
        Number of subvolumes to sample

    '''
    prettyplot()
    pretty_colors = prettycolors()

    pickle_file = ''.join([
        '/export/bbq2/hahn/ccppabc/dump/', 
        'xi_subvolume_fullvolume_cut_test', 
        '.Nsub', str(N_sub), 
        '.p'])
    
    fig = plt.figure(1)
    sub = fig.add_subplot(111)

    xi_bin = xi_binedges() 
    
    # Entire MultiDark Volume (No Periodic Boundary Conditions) 
    model = PrebuiltHodModelFactory('zheng07', threshold=-21)
    halocat = CachedHaloCatalog(simname = 'multidark', redshift = 0, halo_finder = 'rockstar')

    sub_RR = data_RR(box='md_sub')
    sub_randoms = data_random(box='md_sub')
    sub_NR = len(sub_randoms)

    rmax = xi_bin.max()
    full_approx_cell1_size = [rmax , rmax , rmax]
    full_approx_cellran_size = [rmax , rmax , rmax]

    model.populate_mock(halocat, enforce_PBC=False)
    subvol_id = util.mk_id_column(table=model.mock.galaxy_table)
    full_pos = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')
    
    # Full Volume 
    if os.path.isfile(pickle_file):
        data_dump = pickle.load(open(pickle_file, 'rb'))
        full_xi = data_dump['full_xi']
    else: 
        model = PrebuiltHodModelFactory('zheng07', threshold=-21)
        halocat = CachedHaloCatalog(simname = 'multidark', redshift = 0, halo_finder = 'rockstar')

        full_randoms = data_random(box='md_all')
        full_RR = data_RR(box='md_all')
        full_NR = len(full_randoms)

        rmax = xi_bin.max()
        full_approx_cell1_size = [rmax , rmax , rmax]
        full_approx_cellran_size = [rmax , rmax , rmax]

        model.populate_mock(halocat, enforce_PBC=False)
        full_pos = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')
        
        full_xi = tpcf(
                full_pos, xi_bin, 
                randoms=full_randoms, period=None, 
                do_auto=True,
                do_cross=False, 
                num_threads=5, 
                max_sample_size=int(full_pos.shape[0]), estimator='Natural', 
                approx_cell1_size=full_approx_cell1_size, 
                approx_cellran_size=full_approx_cellran_size,
                RR_precomputed = full_RR,
                NR_precomputed = full_NR)
        data_dump = {} 
        data_dump['full_xi'] = full_xi
    
    if not ratio:  
        sub.plot(0.5*(xi_bin[:-1]+xi_bin[1:]), full_xi, 
                lw=2, ls='-', c='k', label=r'Full Volume') 

    if os.path.isfile(pickle_file):
        fullcut_xi_list = data_dump['fullcut_xi']['fullcut_xi_list']
        fullcut_xi_avg =  data_dump['fullcut_xi']['fullcut_xi_avg']
    else: 
        data_dump['fullcut_xi'] = {}
        fullcut_xi_list = [] 
        fullcut_xi_tot = np.zeros(len(xi_bin)-1)
        for id in np.unique(subvol_id)[:N_sub]: 
            print 'Subvolume ', id
            in_cut = np.where(subvol_id == id) 

            fullcut_pos = full_pos[in_cut]

            fullcut_xi = tpcf(
                    fullcut_pos, xi_bin, 
                    randoms=sub_randoms, period=None, 
                    do_auto=True,
                    do_cross=False, 
                    num_threads=5, 
                    max_sample_size=int(fullcut_pos.shape[0]), estimator='Natural', 
                    approx_cell1_size=full_approx_cell1_size, 
                    approx_cellran_size=full_approx_cellran_size,
                    RR_precomputed=sub_RR,
                    NR_precomputed=sub_NR)

            fullcut_xi_list.append(fullcut_xi)
            fullcut_xi_tot += fullcut_xi
        
        fullcut_xi_avg = fullcut_xi_tot / np.float(N_sub)
        data_dump['fullcut_xi']['fullcut_xi_list']= fullcut_xi_list
        data_dump['fullcut_xi']['fullcut_xi_avg']= fullcut_xi_avg

    if not ratio:  
        sub.plot(0.5*(xi_bin[:-1]+xi_bin[1:]), fullcut_xi_avg, 
                lw=2, ls='-', c='k', label=r'Full Volume Cut Average') 
    else: 
        sub.plot(0.5*(xi_bin[:-1]+xi_bin[1:]), fullcut_xi_avg/full_xi, 
                lw=2, ls='-', c='k', label=r'Full Volume Cut Average') 
    
    if not os.path.isfile(pickle_file):
        # MultiDark SubVolume (precomputed RR pairs) 
        sub_model = PrebuiltHodModelFactory('zheng07', threshold=-21)
        sub_model.new_haloprop_func_dict = {'sim_subvol': util.mk_id_column}
        sub_halocat = CachedHaloCatalog(simname = 'multidark', redshift = 0, halo_finder = 'rockstar')
        sub_RR = data_RR(box='md_sub')
        sub_randoms = data_random(box='md_sub')
        sub_NR = len(sub_randoms)
    
        
        sub_xis_list = [] 
        sub_xis = np.zeros(len(full_xi)) 

        for ii in range(1,N_sub): 
            print 'Subvolume ', ii 
            # randomly sample one of the subvolumes
            rint = ii #np.random.randint(1, 125)
            simsubvol = lambda x: util.mask_func(x, rint)
            sub_model.populate_mock(sub_halocat, masking_function=simsubvol, enforce_PBC=False)
               
            pos = three_dim_pos_bundle(sub_model.mock.galaxy_table, 'x', 'y', 'z')

            xi, yi , zi = util.random_shifter(rint)
            temp_randoms = sub_randoms.copy()
            temp_randoms[:,0] += xi
            temp_randoms[:,1] += yi
            temp_randoms[:,2] += zi
            
            rmax = xi_bin.max()
            sub_approx_cell1_size = [rmax , rmax , rmax]
            sub_approx_cellran_size = [rmax , rmax , rmax]

            sub_xi = tpcf(
                    pos, xi_bin,  
                    randoms=temp_randoms, 
                    period = None, 
                    do_auto=True,
                    do_cross=False, 
                    num_threads=5, 
                    max_sample_size=int(pos.shape[0]), 
                    estimator='Natural', 
                    approx_cell1_size = sub_approx_cell1_size, 
                    approx_cellran_size = sub_approx_cellran_size,
                    RR_precomputed=sub_RR,
                    NR_precomputed=sub_NR)

            label = None 
            if ii == N_sub - 1: 
                label = 'Subvolumes'
            
            sub_xis += sub_xi
            sub_xis_list.append(sub_xi)

        sub_xi_avg = sub_xis/np.float(N_sub)

        data_dump['Natural'] = {} 
        data_dump['Natural']['sub_xi_avg'] = sub_xi_avg
        data_dump['Natural']['sub_xis_list'] = sub_xis_list 
    else: 
        sub_xis_list = data_dump['Natural']['sub_xis_list']
        sub_xi_avg = data_dump['Natural']['sub_xi_avg'] 

    if not os.path.isfile(pickle_file): 
        pickle.dump(data_dump, open(pickle_file, 'wb')) 

    if not ratio: 
        sub.plot(0.5*(xi_bin[:-1]+xi_bin[1:]), sub_xi_avg, 
                lw=2, ls='--', c=pretty_colors[3], label='Subvolume')
    else: 
        sub.plot(0.5*(xi_bin[:-1]+xi_bin[1:]), sub_xi_avg/full_xi, 
                lw=2, ls='--', c=pretty_colors[3], label='Subvolume')

    sub.set_xlim([0.1, 50.])
    sub.set_xlabel('r', fontsize=30)
    sub.set_xscale('log')
    
    if not ratio: 
        sub.set_ylabel(r"$\xi \mathtt{(r)}$", fontsize=25)
        sub.set_yscale('log')
    else: 
        sub.set_ylabel(r"$\overline{\xi^\mathtt{sub}}/\xi^\mathtt{all}$", fontsize=25)

    sub.legend(loc='lower left')
    
    if ratio: 
        fig_file = ''.join([util.fig_dir(), 'test_xi_subvolume_fullvolume_cut.Nsub', str(N_sub), '.ratio.png'])
    else:
        fig_file = ''.join([util.fig_dir(), 'test_xi_subvolume_fullvolume_cut.Nsub', str(N_sub), '.png'])
    fig.savefig(fig_file, bbox_inches='tight', dpi=100)
    plt.close()
    return None 
Esempio n. 15
0
def build_MCMC_cov_nbar_xi_gmf(Mr=21, b_normal=0.25):
    ''' Build covariance matrix used in MCMC for the full nbar, xi, gmf data vector
    using realisations of galaxy mocks for "data" HOD parameters in the 
    halos from the other subvolumes (subvolume 1 to subvolume 125) of
    the simulation. Covariance matrices for different sets of observables
    can be extracted from the full covariance matrix by slicing through 
    the indices. 

    '''
    nbars = []
    xir = []
    gmfs = []

    thr = -1. * np.float(Mr)
    model = PrebuiltHodModelFactory('zheng07', threshold=thr)
    halocat = CachedHaloCatalog(simname='multidark',
                                redshift=0,
                                halo_finder='rockstar')
    ###model.new_haloprop_func_dict = {'sim_subvol': util.mk_id_column}

    #some settings for tpcf calculations
    rbins = xi_binedges()
    rmax = rbins.max()
    approx_cell1_size = [rmax, rmax, rmax]
    approx_cellran_size = [rmax, rmax, rmax]

    #load randoms and RRs

    randoms = data_random(box='md_sub')
    RR = data_RR(box='md_sub')
    NR = len(randoms)

    for i in xrange(1, 125):
        print 'mock#', i

        # populate the mock subvolume
        ###mocksubvol = lambda x: util.mask_func(x, i)
        ###model.populate_mock(halocat,
        ###                    masking_function=mocksubvol,
        ###                    enforce_PBC=False)
        model.populate_mock(halocat)
        # returning the positions of galaxies in the entire volume
        pos = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')
        # masking out the galaxies outside the subvolume i
        pos = util.mask_galaxy_table(pos, i)
        # calculate nbar
        print "shape of pos", pos.shape
        nbars.append(len(pos) / 200**3.)
        # translate the positions of randoms to the new subbox
        xi0, yi0, zi0 = util.random_shifter(i)
        temp_randoms = randoms.copy()
        temp_randoms[:, 0] += xi0
        temp_randoms[:, 1] += yi0
        temp_randoms[:, 2] += zi0
        #calculate xi(r)
        xi = tpcf(pos,
                  rbins,
                  pos,
                  randoms=temp_randoms,
                  period=None,
                  max_sample_size=int(3e5),
                  estimator='Natural',
                  approx_cell1_size=approx_cell1_size,
                  approx_cellran_size=approx_cellran_size,
                  RR_precomputed=RR,
                  NR_precomputed=NR)
        xir.append(xi)
        # calculate gmf

        nbar = len(pos) / 200**3.
        b = b_normal * (nbar)**(-1. / 3)
        groups = pyfof.friends_of_friends(pos, b)
        w = np.array([len(x) for x in groups])
        gbins = gmf_bins()
        gmf = np.histogram(w, gbins)[0] / 200.**3.
        gmfs.append(gmf)

    # save nbar variance
    nbar_var = np.var(nbars, axis=0, ddof=1)
    nbar_file = ''.join([util.obvs_dir(), 'nbar_var.Mr', str(Mr), '.dat'])
    np.savetxt(nbar_file, [nbar_var])

    # write full covariance matrix of various combinations of the data
    # and invert for the likelihood evaluations

    # --- covariance for all three ---
    fulldatarr = np.hstack(
        (np.array(nbars).reshape(len(nbars),
                                 1), np.array(xir), np.array(gmfs)))
    fullcov = np.cov(fulldatarr.T)
    fullcorr = np.corrcoef(fulldatarr.T)

    # and save the covariance matrix
    nopoisson_file = ''.join([
        util.obvs_dir(), 'MCMC.nbar_xi_gmf_cov', '.no_poisson', '.Mr',
        str(Mr), '.bnorm',
        str(round(b_normal, 2)), '.dat'
    ])
    np.savetxt(nopoisson_file, fullcov)
    return None
Esempio n. 16
0
    def _sum_stat(self, theta, prior_range=None, observables=['nbar', 'gmf']):
        '''
        Given theta, sum_stat calculates the observables from our forward model

        Parameters
        ----------
        theta : (self explanatory)
        prior_range : If specified, checks to make sure that theta is within the prior range.
        '''
        self.model.param_dict['logM0'] = theta[0]
        self.model.param_dict['sigma_logM'] = np.exp(theta[1])
        self.model.param_dict['logMmin'] = theta[2]
        self.model.param_dict['alpha'] = theta[3]
        self.model.param_dict['logM1'] = theta[4]

        rbins = xi_binedges()
        rmax = rbins.max()
        approx_cell1_size = [rmax, rmax, rmax]
        approx_cellran_size = [rmax, rmax, rmax]

        if prior_range is None:

            self.model.populate_mock(self.halocat, enforce_PBC=False)
            pos = three_dim_pos_bundle(self.model.mock.galaxy_table, 'x', 'y',
                                       'z')
            obvs = []

            for obv in observables:
                if obv == 'nbar':
                    obvs.append(len(pos) /
                                1000.**3.)  # nbar of the galaxy catalog
                elif obv == 'gmf':
                    nbar = len(pos) / 1000**3.
                    b = self.b_normal * (nbar)**(-1. / 3)
                    groups = pyfof.friends_of_friends(pos, b)
                    w = np.array([len(x) for x in groups])
                    gbins = data_gmf_bins()
                    gmf = np.histogram(w, gbins)[0] / (1000.**3.)
                    obvs.append(gmf)
                elif obv == 'xi':
                    greek_xi = tpcf(pos,
                                    rbins,
                                    pos,
                                    randoms=randoms,
                                    period=None,
                                    max_sample_size=int(3e5),
                                    estimator='Natural',
                                    approx_cell1_size=approx_cell1_size,
                                    approx_cellran_size=approx_cellran_size,
                                    RR_precomputed=self.RR,
                                    NR_precomputed=self.NR)
                    obvs.append(greek_xi)
                else:
                    raise NotImplementedError(
                        'Only nbar 2pcf, gmf implemented so far')

            return obvs

        else:
            if np.all((prior_range[:, 0] < theta)
                      & (theta < prior_range[:, 1])):
                # if all theta_i is within prior range ...
                try:

                    self.model.populate_mock(self.halocat)
                    pos = three_dim_pos_bundle(self.model.mock.galaxy_table,
                                               'x', 'y', 'z')
                    obvs = []
                    for obv in observables:
                        if obv == 'nbar':
                            obvs.append(len(pos) /
                                        1000**3.)  # nbar of the galaxy catalog
                        elif obv == 'gmf':
                            nbar = len(pos) / 1000**3.
                            b = self.b_normal * (nbar)**(-1. / 3)
                            groups = pyfof.friends_of_friends(pos, b)
                            w = np.array([len(x) for x in groups])
                            gbins = data_gmf_bins()
                            gmf = np.histogram(w, gbins)[0] / (1000.**3.)
                            obvs.append(gmf)
                        elif obv == 'xi':
                            greek_xi = tpcf(
                                pos,
                                rbins,
                                pos,
                                randoms=randoms,
                                period=None,
                                max_sample_size=int(3e5),
                                estimator='Natural',
                                approx_cell1_size=approx_cell1_size,
                                approx_cellran_size=approx_cellran_size,
                                RR_precomputed=self.RR,
                                NR_precomputed=self.NR)
                            obvs.append(greek_xi)
                        else:
                            raise NotImplementedError(
                                'Only nbar, tpcf, and gmf are implemented so far'
                            )

                    return obvs

                except ValueError:

                    obvs = []
                    for obv in observables:
                        if obv == 'nbar':
                            obvs.append(10.)
                        elif obv == 'gmf':
                            bins = data_gmf_bins()
                            obvs.append(np.ones_like(bins)[:-1] * 1000.)
                        elif obv == 'xi':
                            obvs.append(np.zeros(len(xi_binedges()[:-1])))
                    return obvs
            else:
                obvs = []
                for obv in observables:
                    if obv == 'nbar':
                        obvs.append(10.)
                    elif obv == 'gmf':
                        bins = data_gmf_bins()
                        obvs.append(np.ones_like(bins)[:-1] * 1000.)
                    elif obv == 'xi':
                        obvs.append(np.zeros(len(xi_binedges()[:-1])))
                return obvs
Esempio n. 17
0
    def _sum_stat(self, theta, prior_range=None, observables=['nbar', 'gmf']):
        '''
        Given theta, sum_stat calculates the observables from our forward model

        Parameters
        ----------
        theta : (self explanatory)
        prior_range : If specified, checks to make sure that theta is within the prior range.
        '''
        self.model.param_dict['logM0'] = theta[0]
        self.model.param_dict['sigma_logM'] = np.exp(theta[1])
        self.model.param_dict['logMmin'] = theta[2]
        self.model.param_dict['alpha'] = theta[3]
        self.model.param_dict['logM1'] = theta[4]

        rbins = xi_binedges()
        rmax = rbins.max()
        approx_cell1_size = [rmax , rmax , rmax]
        approx_cellran_size = [rmax , rmax , rmax]

        if prior_range is None:
            
            self.model.populate_mock(self.halocat)
            pos =three_dim_pos_bundle(self.model.mock.galaxy_table, 'x', 'y', 'z')
            obvs = []

            for obv in observables:
                if obv == 'nbar':
                    obvs.append(len(pos) / 1000.**3.)       # nbar of the galaxy catalog
                elif obv == 'gmf':
                    nbar = len(pos) / 1000**3.
    		    b = self.b_normal * (nbar)**(-1./3) 
    		    groups = pyfof.friends_of_friends(pos , b)
    		    w = np.array([len(x) for x in groups])
    		    gbins =data_gmf_bins()
    		    gmf = np.histogram(w , gbins)[0] / (1000.**3.)
                    obvs.append(gmf)   
                elif obv == 'xi':
                    greek_xi = tpcf(
                            pos, rbins,  
                            period=self.model.mock.Lbox, 
                            max_sample_size=int(3e5), estimator='Natural', 
                            approx_cell1_size=approx_cell1_size)
                    obvs.append(greek_xi)
                else:
                    raise NotImplementedError('Only nbar 2pcf, gmf implemented so far')

            return obvs

        else:
            if np.all((prior_range[:,0] < theta) & (theta < prior_range[:,1])):
                # if all theta_i is within prior range ...
                try:


                    self.model.populate_mock(self.halocat) 
                    pos=three_dim_pos_bundle(self.model.mock.galaxy_table, 'x', 'y', 'z')
            	    obvs = []
            	    for obv in observables:
                        if obv == 'nbar':
                    	    obvs.append(len(pos) / 1000**3.)       # nbar of the galaxy catalog
                        elif obv == 'gmf':
                    	    nbar = len(pos) / 1000**3.
    		    	    b = self.b_normal * (nbar)**(-1./3) 
    		    	    groups = pyfof.friends_of_friends(pos , b)
    		    	    w = np.array([len(x) for x in groups])
    		    	    gbins =data_gmf_bins()
    		    	    gmf = np.histogram(w , gbins)[0] / (1000.**3.)
                    	    obvs.append(gmf)   
                        elif obv == 'xi':
                            greek_xi = tpcf(
                                    pos, rbins, period=self.model.mock.Lbox, 
                                    max_sample_size=int(3e5), estimator='Natural', 
                                    approx_cell1_size=approx_cell1_size)
                            obvs.append(greek_xi)
                        else:
                            raise NotImplementedError('Only nbar, tpcf, and gmf are implemented so far')

                    return obvs

                except ValueError:

                    obvs = []
                    for obv in observables:
                        if obv == 'nbar':
                            obvs.append(10.)
                        elif obv == 'gmf':
                            bins = data_gmf_bins()
                            obvs.append(np.ones_like(bins)[:-1]*1000.)
                        elif obv == 'xi':
                            obvs.append(np.zeros(len(xi_binedges()[:-1])))
                    return obvs
            else:
                obvs = []
                for obv in observables:
                    if obv == 'nbar':
                        obvs.append(10.)
                    elif obv == 'gmf':
                        bins = data_gmf_bins()
                        obvs.append(np.ones_like(bins)[:-1]*1000.)
                    elif obv == 'xi':
                        obvs.append(np.zeros(len(xi_binedges()[:-1])))
                return obvs
Esempio n. 18
0
    def _sum_stat(self, theta, prior_range=None, observables=['nbar', 'gmf']):
        '''
        Given theta, sum_stat calculates the observables from our forward model

        Parameters
        ----------
        theta : (self explanatory)
        prior_range : If specified, checks to make sure that theta is within the prior range.
        '''
        self.model.param_dict['logM0'] = theta[0]
        self.model.param_dict['sigma_logM'] = np.exp(theta[1])
        self.model.param_dict['logMmin'] = theta[2]
        self.model.param_dict['alpha'] = theta[3]
        self.model.param_dict['logM1'] = theta[4]

        rbins = xi_binedges()
        rmax = rbins.max()
        period = None
        approx_cell1_size = [rmax , rmax , rmax]
        approx_cellran_size = [rmax , rmax , rmax]

        if prior_range is None:
            rint = np.random.randint(1, 125)
            ####simsubvol = lambda x: util.mask_func(x, rint)
            ####self.model.populate_mock(self.halocat,
            ####                masking_function=simsubvol,
            ####                enforce_PBC=False)
            self.model.populate_mock(self.halocat)
                        
            pos =three_dim_pos_bundle(self.model.mock.galaxy_table, 'x', 'y', 'z')
            pos = util.mask_galaxy_table(pos , rint) 

            xi , yi , zi = util.random_shifter(rint)
            temp_randoms = self.randoms.copy()
            temp_randoms[:,0] += xi
            temp_randoms[:,1] += yi
            temp_randoms[:,2] += zi

            obvs = []
            for obv in observables:
                if obv == 'nbar':
                    obvs.append(len(pos) / 200**3.)       # nbar of the galaxy catalog
                elif obv == 'gmf':
                    #compute group richness 
                    nbar = len(pos) / 200**3.
    		    b = self.b_normal * (nbar)**(-1./3) 
    		    groups = pyfof.friends_of_friends(pos , b)
    		    w = np.array([len(x) for x in groups])
    		    gbins = data_gmf_bins()
    		    gmf = np.histogram(w , gbins)[0] / (200.**3.)
                    obvs.append(gmf)   
                elif obv == 'xi':
                    greek_xi = tpcf(
                        pos, rbins, pos, 
                        randoms=temp_randoms, period = period, 
                        max_sample_size=int(1e5), estimator='Natural', 
                        approx_cell1_size=approx_cell1_size, 
                        approx_cellran_size=approx_cellran_size,
                        RR_precomputed = self.RR,
	                NR_precomputed = self.NR)

                    obvs.append(greek_xi)
                else:
                    raise NotImplementedError('Only nbar 2pcf, gmf implemented so far')

            return obvs

        else:
            if np.all((prior_range[:,0] < theta) & (theta < prior_range[:,1])):
                # if all theta_i is within prior range ...
                try:
                    rint = np.random.randint(1, 125)
                    simsubvol = lambda x: util.mask_func(x, rint)
                    self.model.populate_mock(self.halocat,
                                masking_function=simsubvol,
                                enforce_PBC=False)
           
                    pos =three_dim_pos_bundle(self.model.mock.galaxy_table, 'x', 'y', 'z')
                    #imposing mask on the galaxy table
                    pos = util.mask_galaxy_table(pos , rint) 
            	    xi , yi , zi = util.random_shifter(rint)
            	    temp_randoms = self.randoms.copy()
            	    temp_randoms[:,0] += xi
            	    temp_randoms[:,1] += yi
            	    temp_randoms[:,2] += zi
            	    obvs = []

            	    for obv in observables:
                        if obv == 'nbar':
                    	    obvs.append(len(pos) / 200**3.)       # nbar of the galaxy catalog
                        elif obv == 'gmf':
                            nbar = len(pos) / 200**3.
    		            b = self.b_normal * (nbar)**(-1./3) 
    		            groups = pyfof.friends_of_friends(pos , b)
    		            w = np.array([len(x) for x in groups])
    		    	    gbins =data_gmf_bins()
    		            gmf = np.histogram(w , gbins)[0] / (200.**3.)
                    	    obvs.append(gmf)   
                        elif obv == 'xi':
                    	    greek_xi = tpcf(
                                     pos, rbins, pos, 
                        	     randoms=temp_randoms, period = period, 
                                     max_sample_size=int(1e5), estimator='Natural', 
                                     approx_cell1_size=approx_cell1_size, 
                                     approx_cellran_size=approx_cellran_size,
                                     RR_precomputed = self.RR,
	                             NR_precomputed = self.NR)

                    	    obvs.append(greek_xi)
                        else:
                            raise NotImplementedError('Only nbar, tpcf, and gmf are implemented so far')

                    return obvs

                except ValueError:

                    obvs = []
                    for obv in observables:
                        if obv == 'nbar':
                            obvs.append(10.)
                        elif obv == 'gmf':
                            bins = data_gmf_bins()
                            obvs.append(np.ones_like(bins)[:-1]*1000.)
                        elif obv == 'xi':
                            obvs.append(np.zeros(len(xi_binedges()[:-1])))
                    return obvs
            else:
                obvs = []
                for obv in observables:
                    if obv == 'nbar':
                        obvs.append(10.)
                    elif obv == 'gmf':
                        bins = data_gmf_bins()
                        obvs.append(np.ones_like(bins)[:-1]*1000.)
                    elif obv == 'xi':
                        obvs.append(np.zeros(len(xi_binedges()[:-1])))
                return obvs
Esempio n. 19
0
def test_GMFbinning(Mr):
    ''' Tests for the GMF binning scheme in order to make it sensible. 
    '''
    gids_saved = 'gids_saved.p'
    if not os.path.isfile(gids_saved):
        thr = -1. * np.float(Mr)
        model = PrebuiltHodModelFactory('zheng07',
                                        threshold=thr,
                                        halocat='multidark',
                                        redshift=0.)
        model.new_haloprop_func_dict = {'sim_subvol': util.mk_id_column}

        datsubvol = lambda x: util.mask_func(x, 0)
        model.populate_mock(simname='multidark',
                            masking_function=datsubvol,
                            enforce_PBC=False)

        galaxy_sample = model.mock.galaxy_table
        x = galaxy_sample['x']
        y = galaxy_sample['y']
        z = galaxy_sample['z']
        vz = galaxy_sample['vz']

        pos = three_dim_pos_bundle(model.mock.galaxy_table,
                                   'x',
                                   'y',
                                   'z',
                                   velocity=vz,
                                   velocity_distortion_dimension="z")

        b_para, b_perp = 0.2, 0.2
        groups = FoFGroups(pos,
                           b_perp,
                           b_para,
                           period=None,
                           Lbox=200,
                           num_threads='max')

        pickle.dump(groups, open(gids_saved, 'wb'))
    else:
        groups = pickle.load(open(gids_saved, 'rb'))

    gids = groups.group_ids
    rich = richness(gids)
    #print "rich=" , rich
    rbins = np.logspace(np.log10(3.), np.log10(20), 10)
    rbins = np.array([1, 2., 3., 4., 5., 6., 7, 9, 11, 14, 17, 20])
    gmf = GMF(rich, counts=False, bins=rbins)
    gmf_counts = GMF(rich, counts=True, bins=rbins)

    print rbins
    print gmf
    print gmf_counts
    fig = plt.figure(1)
    sub = fig.add_subplot(111)

    sub.plot(0.5 * (rbins[:-1] + rbins[1:]), gmf)

    sub.set_xscale('log')
    sub.set_yscale('log')

    plt.show()
    #print gmf
    #print GMF(rich , counts = True)
    return None
Esempio n. 20
0
def richness(group_id):
    '''
    Calculate the richness of a group given group_ids of galaxies. Uses astropy.table module
    '''
    gals = Table()
    gals['groupid'] = group_id
    gals['dummy'] = 1
    grouped_table = gals.group_by('groupid')
    grp_richness = grouped_table['dummy'].groups.aggregate(np.sum)

    return grp_richness
model = PrebuiltHodModelFactory('zheng07', threshold=-21)
halocat = CachedHaloCatalog(simname = 'multidark', redshift = 0, halo_finder = 'rockstar')

model.populate_mock(halocat)
pos =three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')

#groups = FoFGroups(pos, 0.75, 0.75, Lbox = model.mock.Lbox, num_threads=1)
#gids = groups.group_ids

#print richness(gids)
import time

a = time.time()

groups = pyfof.friends_of_friends(pos, 0.75*(len(pos)/1000**3.)**(-1./3))

w = np.array([len(x) for x in groups])

bins = np.array([2.,3.,4.,5.,6.,7,9,11,14,17,20])
Esempio n. 21
0
def Subvolume_Analytic(N_sub, ratio=False): 
    ''' Test the 2PCF estimates from MultiDark subvolume versus the 
    analytic 2PCF for the entire MultiDark volume

    Parameters
    ----------
    N_sub : (int)
        Number of subvolumes to sample

    '''
    prettyplot()
    pretty_colors = prettycolors()

    pickle_file = ''.join([
        '/export/bbq2/hahn/ccppabc/dump/', 
        'xi_subvolume_test', 
        '.Nsub', str(N_sub), 
        '.p'])
    
    fig = plt.figure(1)
    sub = fig.add_subplot(111)

    xi_bin = xi_binedges() 
    
    if os.path.isfile(pickle_file):
        data_dump = pickle.load(open(pickle_file, 'rb'))
        full_xi = data_dump['full_xi']
    else: 
        # Entire MultiDark Volume (Analytic xi) 
        model = PrebuiltHodModelFactory('zheng07', threshold=-21)
        halocat = CachedHaloCatalog(simname = 'multidark', redshift = 0, halo_finder = 'rockstar')
        
        model.populate_mock(halocat)
        pos = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')
        
        # while the estimator claims to be Landy-Szalay, I highly suspect it
        # actually uses Landy-Szalay since DR pairs cannot be calculated from 
        # analytic randoms
        full_xi = tpcf(pos, xi_bin, period=model.mock.Lbox, max_sample_size=int(2e5), estimator='Landy-Szalay', num_threads=1)
        data_dump = {} 
        data_dump['full_xi'] = full_xi

    if not ratio:  
        sub.plot(0.5*(xi_bin[:-1]+xi_bin[1:]), full_xi, lw=2, ls='-', c='k', label=r'Analytic $\xi$ Entire Volume') 
    
    if not os.path.isfile(pickle_file):
        # MultiDark SubVolume (precomputed RR pairs) 
        sub_model = PrebuiltHodModelFactory('zheng07', threshold=-21)
        sub_model.new_haloprop_func_dict = {'sim_subvol': util.mk_id_column}
        sub_halocat = CachedHaloCatalog(simname = 'multidark', redshift = 0, halo_finder = 'rockstar')
        RR = data_RR()
        randoms = data_random()
        NR = len(randoms)
    
    for method in ['Landy-Szalay', 'Natural']:
        
        if method == 'Landy-Szalay': 
            iii = 3
        elif method == 'Natural': 
            iii = 5
        
        if not os.path.isfile(pickle_file): 
            sub_xis_list = [] 
            sub_xis = np.zeros(len(full_xi)) 

            for ii in range(1,N_sub+1): 
                # randomly sample one of the subvolumes
                rint = ii #np.random.randint(1, 125)
                simsubvol = lambda x: util.mask_func(x, rint)
                sub_model.populate_mock(sub_halocat, masking_function=simsubvol, enforce_PBC=False)
                   
                pos = three_dim_pos_bundle(sub_model.mock.galaxy_table, 'x', 'y', 'z')

                xi, yi , zi = util.random_shifter(rint)
                temp_randoms = randoms.copy()
                temp_randoms[:,0] += xi
                temp_randoms[:,1] += yi
                temp_randoms[:,2] += zi
                
                rmax = xi_bin.max()
                approx_cell1_size = [rmax , rmax , rmax]
                approx_cellran_size = [rmax , rmax , rmax]

                sub_xi = tpcf(
                        pos, xi_bin, pos, 
                        randoms=temp_randoms, 
                        period = None, 
                        max_sample_size=int(1e5), 
                        estimator=method, 
                        approx_cell1_size = approx_cell1_size, 
                        approx_cellran_size = approx_cellran_size,
                        RR_precomputed=RR,
                        NR_precomputed=NR)
                label = None 
                if ii == N_sub - 1: 
                    label = 'Subvolumes'
                
                #if not ratio: 
                #    sub.plot(0.5*(xi_bin[:-1]+xi_bin[1:]), sub_xi, lw=0.5, ls='--', c=pretty_colors[iii])
                sub_xis += sub_xi
                sub_xis_list.append(sub_xi)

            sub_xi_avg = sub_xis/np.float(N_sub)

            data_dump[method] = {} 
            data_dump[method]['sub_xi_avg'] = sub_xi_avg
            data_dump[method]['sub_xis_list'] = sub_xis_list 
        else: 
            sub_xis_list = data_dump[method]['sub_xis_list']
            sub_xi_avg = data_dump[method]['sub_xi_avg'] 

        if not ratio: 
            sub.plot(0.5*(xi_bin[:-1]+xi_bin[1:]), sub_xi_avg, 
                    lw=2, ls='--', c=pretty_colors[iii], label='Subvolume '+method)
        else: 
            sub.plot(0.5*(xi_bin[:-1]+xi_bin[1:]), sub_xi_avg/full_xi, 
                    lw=2, ls='--', c=pretty_colors[iii], label='Subvolume '+method)
    
    if not os.path.isfile(pickle_file): 
        pickle.dump(data_dump, open(pickle_file, 'wb')) 

    sub.set_xlim([0.1, 50.])
    sub.set_xlabel('r', fontsize=30)
    sub.set_xscale('log')
    
    if not ratio: 
        sub.set_ylabel(r"$\xi \mathtt{(r)}$", fontsize=25)
        sub.set_yscale('log')
    else: 
        sub.set_ylabel(r"$\overline{\xi^\mathtt{sub}}/\xi^\mathtt{all}$", fontsize=25)

    sub.legend(loc='lower left')
    
    if ratio: 
        fig_file = ''.join([util.fig_dir(), 'test_xi_subvolume_analytic.Nsub', str(N_sub), '.ratio.png'])
    else:
        fig_file = ''.join([util.fig_dir(), 'test_xi_subvolume_analytic.Nsub', str(N_sub), '.png'])
    fig.savefig(fig_file, bbox_inches='tight', dpi=100)
    plt.close()
    return None 
Esempio n. 22
0
def build_nbar_xi_gmf(Mr=21, b_normal=0.25):
    ''' Build data vector [nbar, xi, gmf] and save to file 
    This data vector is built from the zeroth slice of the multidark
    The other slices will be used for building the covariance matrix.

    Parameters
    ----------
    Mr : (int) 
        Absolute magnitude cut off M_r. Default M_r = -21.

    b_normal : (float) 
        FoF Linking length
    '''
    thr = -1. * np.float(Mr)
    model = PrebuiltHodModelFactory('zheng07', threshold=thr)
    halocat = CachedHaloCatalog(simname = 'multidark', redshift = 0, halo_finder = 'rockstar')
    ####model.new_haloprop_func_dict = {'sim_subvol': util.mk_id_column}

    ####datsubvol = lambda x: util.mask_func(x, 0)
    ####model.populate_mock(halocat, masking_function=datsubvol, enforce_PBC=False)
    model.populate_mock(halocat)
    
    #all the things necessary for tpcf calculation
    pos = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')
    #masking the galaxies outside the subvolume 0
    pos = util.mask_galaxy_table(pos , 0)
    rbins = xi_binedges()
    rmax = rbins.max()
    approx_cell1_size = [rmax , rmax , rmax]
    approx_cellran_size = [rmax , rmax , rmax]

    #compute number density    
    nbar = len(pos) / 200**3.
    
    # load MD subvolume randoms and RRs
    randoms = data_random(box='md_sub')
    RR = data_RR(box='md_sub')
    NR = len(randoms)
 
    #compue tpcf with Natural estimator
    data_xir = tpcf(
                 pos, rbins, pos, 
                 randoms=randoms, period=None, 
                 max_sample_size=int(2e5), estimator='Natural', 
                 approx_cell1_size=approx_cell1_size, 
                 approx_cellran_size=approx_cellran_size, 
                 RR_precomputed=RR, 
                 NR_precomputed=NR)

    fullvec = np.append(nbar, data_xir)
    
    #compute gmf
    b = b_normal * (nbar)**(-1./3) 
    groups = pyfof.friends_of_friends(pos , b)
    w = np.array([len(x) for x in groups])
    gbins = gmf_bins()
    gmf = np.histogram(w , gbins)[0] / (200.**3.)
    fullvec = np.append(fullvec, gmf)

    output_file = data_file(Mr=Mr, b_normal=b_normal)
    np.savetxt(output_file, fullvec)
    return None
Esempio n. 23
0
def build_nbar_xi_gmf(Mr=21, b_normal=0.25):
    ''' Build data vector [nbar, xi, gmf] and save to file 
    This data vector is built from the zeroth slice of the multidark
    The other slices will be used for building the covariance matrix.

    Parameters
    ----------
    Mr : (int) 
        Absolute magnitude cut off M_r. Default M_r = -21.

    b_normal : (float) 
        FoF Linking length
    '''
    thr = -1. * np.float(Mr)
    model = PrebuiltHodModelFactory('zheng07', threshold=thr)
    halocat = CachedHaloCatalog(simname='multidark',
                                redshift=0,
                                halo_finder='rockstar')
    ####model.new_haloprop_func_dict = {'sim_subvol': util.mk_id_column}

    ####datsubvol = lambda x: util.mask_func(x, 0)
    ####model.populate_mock(halocat, masking_function=datsubvol, enforce_PBC=False)
    model.populate_mock(halocat)

    #all the things necessary for tpcf calculation
    pos = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')
    #masking the galaxies outside the subvolume 0
    pos = util.mask_galaxy_table(pos, 0)
    rbins = xi_binedges()
    rmax = rbins.max()
    approx_cell1_size = [rmax, rmax, rmax]
    approx_cellran_size = [rmax, rmax, rmax]

    #compute number density
    nbar = len(pos) / 200**3.

    # load MD subvolume randoms and RRs
    randoms = data_random(box='md_sub')
    RR = data_RR(box='md_sub')
    NR = len(randoms)

    #compue tpcf with Natural estimator
    data_xir = tpcf(pos,
                    rbins,
                    pos,
                    randoms=randoms,
                    period=None,
                    max_sample_size=int(2e5),
                    estimator='Natural',
                    approx_cell1_size=approx_cell1_size,
                    approx_cellran_size=approx_cellran_size,
                    RR_precomputed=RR,
                    NR_precomputed=NR)

    fullvec = np.append(nbar, data_xir)

    #compute gmf
    b = b_normal * (nbar)**(-1. / 3)
    groups = pyfof.friends_of_friends(pos, b)
    w = np.array([len(x) for x in groups])
    gbins = gmf_bins()
    gmf = np.histogram(w, gbins)[0] / (200.**3.)
    fullvec = np.append(fullvec, gmf)

    output_file = data_file(Mr=Mr, b_normal=b_normal)
    np.savetxt(output_file, fullvec)
    return None
Esempio n. 24
0
def build_MCMC_cov_nbar_xi_gmf(Mr=21, b_normal=0.25):
    ''' Build covariance matrix used in MCMC for the full nbar, xi, gmf data vector
    using realisations of galaxy mocks for "data" HOD parameters in the 
    halos from the other subvolumes (subvolume 1 to subvolume 125) of
    the simulation. Covariance matrices for different sets of observables
    can be extracted from the full covariance matrix by slicing through 
    the indices. 

    '''
    nbars = []
    xir = []
    gmfs = []

    thr = -1. * np.float(Mr)
    model = PrebuiltHodModelFactory('zheng07', threshold=thr)
    halocat = CachedHaloCatalog(simname = 'multidark', redshift = 0, halo_finder = 'rockstar')
    ###model.new_haloprop_func_dict = {'sim_subvol': util.mk_id_column}
    
    #some settings for tpcf calculations
    rbins = xi_binedges()
    rmax = rbins.max()
    approx_cell1_size = [rmax , rmax , rmax]
    approx_cellran_size = [rmax , rmax , rmax]
    
    #load randoms and RRs
    
    randoms = data_random(box='md_sub')
    RR = data_RR(box='md_sub')
    NR = len(randoms)

    for i in xrange(1,125):
        print 'mock#', i

        # populate the mock subvolume
        ###mocksubvol = lambda x: util.mask_func(x, i)
        ###model.populate_mock(halocat,
        ###                    masking_function=mocksubvol,
        ###                    enforce_PBC=False)
        model.populate_mock(halocat)
        # returning the positions of galaxies in the entire volume
        pos = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')
        # masking out the galaxies outside the subvolume i
        pos = util.mask_galaxy_table(pos , i)
        # calculate nbar
        print "shape of pos" , pos.shape 
        nbars.append(len(pos) / 200**3.)
        # translate the positions of randoms to the new subbox
        xi0 , yi0 , zi0 = util.random_shifter(i)
        temp_randoms = randoms.copy()
        temp_randoms[:,0] += xi0
        temp_randoms[:,1] += yi0
        temp_randoms[:,2] += zi0
        #calculate xi(r)        
        xi=tpcf(
             pos, rbins, pos, 
             randoms=temp_randoms, period=None, 
             max_sample_size=int(3e5), estimator='Natural', 
             approx_cell1_size=approx_cell1_size, 
             approx_cellran_size=approx_cellran_size,
             RR_precomputed = RR,
	     NR_precomputed = NR)
        xir.append(xi)
        # calculate gmf

        nbar = len(pos) / 200**3.
        b = b_normal * (nbar)**(-1./3) 
        groups = pyfof.friends_of_friends(pos , b)
    	w = np.array([len(x) for x in groups])
    	gbins = gmf_bins()
    	gmf = np.histogram(w , gbins)[0] / 200.**3.
        gmfs.append(gmf)

    # save nbar variance
    nbar_var = np.var(nbars, axis=0, ddof=1)
    nbar_file = ''.join([util.obvs_dir(), 'nbar_var.Mr', str(Mr), '.dat'])
    np.savetxt(nbar_file, [nbar_var])

    # write full covariance matrix of various combinations of the data
    # and invert for the likelihood evaluations

    # --- covariance for all three ---
    fulldatarr = np.hstack((np.array(nbars).reshape(len(nbars), 1),
                            np.array(xir),
                            np.array(gmfs)))
    fullcov = np.cov(fulldatarr.T)
    fullcorr = np.corrcoef(fulldatarr.T)

    # and save the covariance matrix
    nopoisson_file = ''.join([util.obvs_dir(),
        'MCMC.nbar_xi_gmf_cov', '.no_poisson', '.Mr', str(Mr), '.bnorm', str(round(b_normal,2)), '.dat'])
    np.savetxt(nopoisson_file, fullcov)
    return None
Esempio n. 25
0
def build_ABC_cov_nbar_xi_gmf(Mr=21, b_normal=0.25):
    ''' Build covariance matrix used in ABC for the full nbar, xi, gmf data vector
    using realisations of galaxy mocks for "data" HOD parameters in the 
    halos from the multidark simulation. Covariance matrices for different sets of observables
    can be extracted from the full covariance matrix by slicing through 
    the indices. 

    Notes 
    -----
    * This covariance matrix is the covariance matrix calculated from the *entire* multidark 
        box. So this does _not_ account for the sample variance, which the MCMC covariance does. 
    '''
    nbars, xir, gmfs = [], [], []

    thr = -1. * np.float(Mr)
    model = PrebuiltHodModelFactory('zheng07', threshold=thr)
    halocat = CachedHaloCatalog(simname='multidark',
                                redshift=0,
                                halo_finder='rockstar')
    rbins = xi_binedges()  # some setting for tpcf calculations

    rmax = rbins.max()
    approx_cell1_size = [rmax, rmax, rmax]
    approx_cellran_size = [rmax, rmax, rmax]

    # load randoms and RRs for the ENTIRE MultiDark volume
    ###randoms = data_random(box='md_all')
    ###RR = data_RR(box='md_all')
    ###NR = len(randoms)

    for i in xrange(1, 125):
        print 'mock#', i
        # populate the mock subvolume
        model.populate_mock(halocat)
        # returning the positions of galaxies
        pos = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')

        # calculate nbar
        nbars.append(len(pos) / 1000**3.)

        # calculate xi(r) for the ENTIRE MultiDark volume
        # using the natural estimator DD/RR - 1
        xi = tpcf(pos,
                  rbins,
                  period=model.mock.Lbox,
                  max_sample_size=int(3e5),
                  estimator='Natural',
                  approx_cell1_size=approx_cell1_size)
        xir.append(xi)

        # calculate gmf
        nbar = len(pos) / 1000**3.
        b = b_normal * (nbar)**(-1. / 3)
        groups = pyfof.friends_of_friends(pos, b)
        w = np.array([len(x) for x in groups])
        gbins = gmf_bins()
        gmf = np.histogram(w, gbins)[0] / (1000.**3.)
        gmfs.append(gmf)  # GMF

    # save nbar variance
    nbar_var = np.var(nbars, axis=0, ddof=1)
    nbar_file = ''.join([util.obvs_dir(), 'abc_nbar_var.Mr', str(Mr), '.dat'])
    np.savetxt(nbar_file, [nbar_var])

    # write full covariance matrix of various combinations of the data
    # and invert for the likelihood evaluations

    # --- covariance for all three ---
    fulldatarr = np.hstack(
        (np.array(nbars).reshape(len(nbars),
                                 1), np.array(xir), np.array(gmfs)))
    fullcov = np.cov(fulldatarr.T)
    fullcorr = np.corrcoef(fulldatarr.T)
    # and save the covariance matrix
    nopoisson_file = ''.join([
        util.obvs_dir(), 'ABC.nbar_xi_gmf_cov', '.no_poisson', '.Mr',
        str(Mr), '.bnorm',
        str(round(b_normal, 2)), '.dat'
    ])
    np.savetxt(nopoisson_file, fullcov)
    return None
Esempio n. 26
0
def build_ABC_cov_nbar_xi_gmf(Mr=21, b_normal=0.25):
    ''' Build covariance matrix used in ABC for the full nbar, xi, gmf data vector
    using realisations of galaxy mocks for "data" HOD parameters in the 
    halos from the multidark simulation. Covariance matrices for different sets of observables
    can be extracted from the full covariance matrix by slicing through 
    the indices. 

    Notes 
    -----
    * This covariance matrix is the covariance matrix calculated from the *entire* multidark 
        box. So this does _not_ account for the sample variance, which the MCMC covariance does. 
    '''
    nbars, xir, gmfs = [], [], [] 

    thr = -1. * np.float(Mr)
    model = PrebuiltHodModelFactory('zheng07', threshold=thr)
    halocat = CachedHaloCatalog(simname = 'multidark', redshift = 0, halo_finder = 'rockstar')
    rbins = xi_binedges()  # some setting for tpcf calculations

    rmax = rbins.max()
    approx_cell1_size = [rmax , rmax , rmax]
    approx_cellran_size = [rmax , rmax , rmax]
    
    # load randoms and RRs for the ENTIRE MultiDark volume 
    ###randoms = data_random(box='md_all')
    ###RR = data_RR(box='md_all')
    ###NR = len(randoms)

    for i in xrange(1,125):
        print 'mock#', i
        # populate the mock subvolume
        model.populate_mock(halocat)
        # returning the positions of galaxies
        pos = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')

        # calculate nbar
        nbars.append(len(pos) / 1000**3.)

        # calculate xi(r) for the ENTIRE MultiDark volume 
        # using the natural estimator DD/RR - 1
        xi = tpcf(
                pos, rbins, period=model.mock.Lbox, 
                max_sample_size=int(3e5), estimator='Natural', 
                approx_cell1_size=approx_cell1_size)
        xir.append(xi)

        # calculate gmf
        nbar = len(pos) / 1000**3.
        b = b_normal * (nbar)**(-1./3) 
        groups = pyfof.friends_of_friends(pos , b)
        w = np.array([len(x) for x in groups])
        gbins = gmf_bins()
        gmf = np.histogram(w , gbins)[0] / (1000.**3.)
        gmfs.append(gmf)  # GMF

    # save nbar variance
    nbar_var = np.var(nbars, axis=0, ddof=1)
    nbar_file = ''.join([util.obvs_dir(), 'abc_nbar_var.Mr', str(Mr), '.dat'])
    np.savetxt(nbar_file, [nbar_var])

    # write full covariance matrix of various combinations of the data
    # and invert for the likelihood evaluations

    # --- covariance for all three ---
    fulldatarr = np.hstack((np.array(nbars).reshape(len(nbars), 1),
                            np.array(xir),
                            np.array(gmfs)))
    fullcov = np.cov(fulldatarr.T)
    fullcorr = np.corrcoef(fulldatarr.T)
    # and save the covariance matrix
    nopoisson_file = ''.join([util.obvs_dir(),
        'ABC.nbar_xi_gmf_cov', '.no_poisson', '.Mr', str(Mr), '.bnorm', str(round(b_normal, 2)), '.dat'])
    np.savetxt(nopoisson_file, fullcov)
    return None
Esempio n. 27
0
def main():
     
    #cov = np.loadtxt("../../data/wpxicov_dr72_bright0_mr21.0_z0.159_nj400")
    #f_MD = (1. + 71.74*10**6. / (1000.)**3.)
    #f_bol = (1. + 71.74*10.**6. / (250.)**3.)
      
    #print("covariance correction factor=" , f_bol/f_MD)
    #cov = cov*f_bol/f_MD


    gmf_cat = np.loadtxt("gmf20.dat")
    data_gmf = gmf_cat[:,2]
    data_gmf_err = (gmf_cat[:,3]**2. + gmf_cat[:,4]**2.)**.5
    data_gmf_bin = np.hstack([gmf_cat[:,0],gmf_cat[-1,1]])
    print(data_gmf_bin) 
    model.param_dict['logM0'] =  11.16
    model.param_dict['sigma_logM'] =  0.61
    model.param_dict['logMmin'] =  12.10	
    model.param_dict['alpha'] =  1.08
    model.param_dict['logM1'] =  13.33
    model.param_dict['mean_occupation_centrals_assembias_param1'] = -1.
    
    halocat = CachedHaloCatalog(simname = 'bolplanck', redshift = 0, halo_finder = 'rockstar')
    model.populate_mock(halocat)
    pos = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')
  
    print("modelnumber density=" , len(pos)/halocat.Lbox**3.)
    print("data number density=" , 1.16*10**-3.) 

    x = model.mock.galaxy_table['x']
    y = model.mock.galaxy_table['y']
    z = model.mock.galaxy_table['z']
    vz = model.mock.galaxy_table['vz']

    # applying RSD
    pos = return_xyz_formatted_array(x, y, z, velocity = vz, velocity_distortion_dimension = 'z')
    # enfprcing PBC
    #pos =  enforce_periodicity_of_box(pos, halocat.Lbox)

    from halotools.mock_observables import FoFGroups

    bperp = 0.14
    bpar = 0.75
    Lbox = np.array([halocat.Lbox,halocat.Lbox,halocat.Lbox])
    period = Lbox
    groups = FoFGroups(pos, b_perp=bperp, b_para=bpar, Lbox = Lbox , period=Lbox) 
  
    #model.mock.galaxy_table['fof_group_id'] = groups.group_ids
    #model.mock.galaxy_table.sort(['fof_group_id'])
    #from halotools.utils import group_member_generator


    #grouping_key = 'fof_group_id'
    #requested_columns = []

    #group_gen = group_member_generator(model.mock.galaxy_table, grouping_key, requested_columns)

    #group_richness = np.zeros(len(model.mock.galaxy_table), dtype=int)
    #for first, last, member_props in group_gen:
    #    group_richness[first:last] = last-first
    
    group_ids = groups.group_ids
    group_richness = richness(group_ids)        
    gmf = np.histogram(np.array(group_richness) , data_gmf_bin)[0] / halocat.Lbox**3.
   
    chi = np.sum(((gmf - data_gmf)**2. / data_gmf_err**2.)[:-1])
    print(chi/(len(gmf)-1)) 

    bin_centers = 0.5 * (data_gmf_bin[1:] + data_gmf_bin[:-1])

    plt.figure(figsize=(10,10))
    plt.errorbar(bin_centers , data_gmf , data_gmf_err , fmt=".k" , capsize = 2)
    plt.plot(bin_centers , gmf)
    plt.yscale("log")
    plt.xlim([0,60])
    plt.savefig("gmf.pdf")
Esempio n. 28
0
def build_nbar_xi_gmf_cov(Mr=21):
    ''' Build covariance matrix for the full nbar, xi, gmf data vector
    using realisations of galaxy mocks for "data" HOD parameters in the 
    halos from the multidark simulation. Covariance matrices for different sets of observables
    can be extracted from the full covariance matrix by slicing through 
    the indices. 

    '''
    nbars = []
    xir = []
    gmfs = []

    thr = -1. * np.float(Mr)
    model = PrebuiltHodModelFactory('zheng07', threshold=thr)
    halocat = CachedHaloCatalog(simname = 'multidark', redshift = 0, halo_finder = 'rockstar')
    #some settings for tpcf calculations
    rbins = hardcoded_xi_bins()

    for i in xrange(1,125):
        print 'mock#', i

        # populate the mock subvolume
        model.populate_mock(halocat)
        # returning the positions of galaxies
        pos = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')
        # calculate nbar
        nbars.append(len(pos) / 1000**3.)
        # translate the positions of randoms to the new subbox
        #calculate xi(r)        
        xi = tpcf(pos, rbins, period = model.mock.Lbox, 
                  max_sample_size=int(2e5), estimator='Landy-Szalay')
        xir.append(xi)
        # calculate gmf
        nbar = len(pos) / 1000**3.
        b_normal = 0.75
        b = b_normal * (nbar)**(-1./3) 
        groups = pyfof.friends_of_friends(pos , b)
        w = np.array([len(x) for x in groups])
        gbins = gmf_bins()
        gmf = np.histogram(w , gbins)[0] / (1000.**3.)
        gmfs.append(gmf)  # GMF
    # save nbar variance
    nbar_var = np.var(nbars, axis=0, ddof=1)
    nbar_file = ''.join([util.multidat_dir(), 'abc_nbar_var.Mr', str(Mr), '.dat'])
    np.savetxt(nbar_file, [nbar_var])


    # write full covariance matrix of various combinations of the data
    # and invert for the likelihood evaluations

    # --- covariance for all three ---
    fulldatarr = np.hstack((np.array(nbars).reshape(len(nbars), 1),
                            np.array(xir),
                            np.array(gmfs)))
    fullcov = np.cov(fulldatarr.T)
    fullcorr = np.corrcoef(fulldatarr.T)
    # and save the covariance matrix
    nopoisson_file = ''.join([util.multidat_dir(), 'abc_nbar_xi_gmf_cov.no_poisson.Mr', str(Mr), '.dat'])
    np.savetxt(nopoisson_file, fullcov)

    # and a correlation matrix
    full_corr_file = ''.join([util.multidat_dir(), 'abc_nbar_xi_gmf_corr.Mr', str(Mr), '.dat'])
    np.savetxt(full_corr_file, fullcorr)

    return None
Esempio n. 29
0
def test_precomputed_rr(Nr, Mr=21):
    '''
    Mr = Luminositty threshold
    Nr = Number of randoms
    '''

    rbins = np.logspace(-1, 1.25, 15)
    rmax = rbins.max()
    rbin_centers = (rbins[1:] + rbins[0:-1]) / 2.

    halocat = CachedHaloCatalog(simname='bolshoi', redshift=0)
    model = PrebuiltHodModelFactory("zheng07")
    model.populate_mock(halocat=halocat, enforce_PBC=False)
    data = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')
    print data.shape
    L = halocat.Lbox

    xmin, ymin, zmin = 0., 0., 0.
    xmax, ymax, zmax = L, L, L

    num_randoms = Nr
    xran = np.random.uniform(xmin, xmax, num_randoms)
    yran = np.random.uniform(ymin, ymax, num_randoms)
    zran = np.random.uniform(zmin, zmax, num_randoms)
    randoms = np.vstack((xran, yran, zran)).T

    verbose = False
    num_threads = cpu_count()

    period = None
    approx_cell1_size = [rmax, rmax, rmax]
    approx_cell2_size = approx_cell1_size
    approx_cellran_size = [rmax, rmax, rmax]

    normal_result = tpcf(data,
                         rbins,
                         data,
                         randoms=randoms,
                         period=period,
                         max_sample_size=int(1e4),
                         estimator='Landy-Szalay',
                         approx_cell1_size=approx_cell1_size,
                         approx_cellran_size=approx_cellran_size)

    #count data pairs
    DD = npairs(data, data, rbins, period, verbose, num_threads,
                approx_cell1_size, approx_cell2_size)
    DD = np.diff(DD)
    #count random pairs
    RR = npairs(randoms, randoms, rbins, period, verbose, num_threads,
                approx_cellran_size, approx_cellran_size)
    RR = np.diff(RR)
    #count data random pairs
    DR = npairs(data, randoms, rbins, period, verbose, num_threads,
                approx_cell1_size, approx_cell2_size)
    DR = np.diff(DR)

    print "DD=", DD
    print "DR=", DR
    print "RR=", RR

    ND = len(data)
    NR = len(randoms)

    factor1 = ND * ND / (NR * NR)
    factor2 = ND * NR / (NR * NR)

    mult = lambda x, y: x * y
    xi_LS = mult(1.0 / factor1, DD / RR) - mult(1.0 / factor2,
                                                2.0 * DR / RR) + 1.0

    print "xi=", xi_LS
    print "normal=", normal_result

    result_with_RR_precomputed = tpcf(data,
                                      rbins,
                                      data,
                                      randoms=randoms,
                                      period=period,
                                      max_sample_size=int(1e5),
                                      estimator='Landy-Szalay',
                                      approx_cell1_size=approx_cell1_size,
                                      approx_cellran_size=approx_cellran_size,
                                      RR_precomputed=RR,
                                      NR_precomputed=NR)

    print "xi_pre=", result_with_RR_precomputed
Esempio n. 30
0
def Subvolume_Analytic(N_sub, ratio=False):
    ''' Test the 2PCF estimates from MultiDark subvolume versus the 
    analytic 2PCF for the entire MultiDark volume

    Parameters
    ----------
    N_sub : (int)
        Number of subvolumes to sample

    '''
    prettyplot()
    pretty_colors = prettycolors()

    pickle_file = ''.join([
        '/export/bbq2/hahn/ccppabc/dump/', 'xi_subvolume_test', '.Nsub',
        str(N_sub), '.p'
    ])

    fig = plt.figure(1)
    sub = fig.add_subplot(111)

    xi_bin = xi_binedges()

    if os.path.isfile(pickle_file):
        data_dump = pickle.load(open(pickle_file, 'rb'))
        full_xi = data_dump['full_xi']
    else:
        # Entire MultiDark Volume (Analytic xi)
        model = PrebuiltHodModelFactory('zheng07', threshold=-21)
        halocat = CachedHaloCatalog(simname='multidark',
                                    redshift=0,
                                    halo_finder='rockstar')

        model.populate_mock(halocat)
        pos = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')

        # while the estimator claims to be Landy-Szalay, I highly suspect it
        # actually uses Landy-Szalay since DR pairs cannot be calculated from
        # analytic randoms
        full_xi = tpcf(pos,
                       xi_bin,
                       period=model.mock.Lbox,
                       max_sample_size=int(2e5),
                       estimator='Landy-Szalay',
                       num_threads=1)
        data_dump = {}
        data_dump['full_xi'] = full_xi

    if not ratio:
        sub.plot(0.5 * (xi_bin[:-1] + xi_bin[1:]),
                 full_xi,
                 lw=2,
                 ls='-',
                 c='k',
                 label=r'Analytic $\xi$ Entire Volume')

    if not os.path.isfile(pickle_file):
        # MultiDark SubVolume (precomputed RR pairs)
        sub_model = PrebuiltHodModelFactory('zheng07', threshold=-21)
        sub_model.new_haloprop_func_dict = {'sim_subvol': util.mk_id_column}
        sub_halocat = CachedHaloCatalog(simname='multidark',
                                        redshift=0,
                                        halo_finder='rockstar')
        RR = data_RR()
        randoms = data_random()
        NR = len(randoms)

    for method in ['Landy-Szalay', 'Natural']:

        if method == 'Landy-Szalay':
            iii = 3
        elif method == 'Natural':
            iii = 5

        if not os.path.isfile(pickle_file):
            sub_xis_list = []
            sub_xis = np.zeros(len(full_xi))

            for ii in range(1, N_sub + 1):
                # randomly sample one of the subvolumes
                rint = ii  #np.random.randint(1, 125)
                simsubvol = lambda x: util.mask_func(x, rint)
                sub_model.populate_mock(sub_halocat,
                                        masking_function=simsubvol,
                                        enforce_PBC=False)

                pos = three_dim_pos_bundle(sub_model.mock.galaxy_table, 'x',
                                           'y', 'z')

                xi, yi, zi = util.random_shifter(rint)
                temp_randoms = randoms.copy()
                temp_randoms[:, 0] += xi
                temp_randoms[:, 1] += yi
                temp_randoms[:, 2] += zi

                rmax = xi_bin.max()
                approx_cell1_size = [rmax, rmax, rmax]
                approx_cellran_size = [rmax, rmax, rmax]

                sub_xi = tpcf(pos,
                              xi_bin,
                              pos,
                              randoms=temp_randoms,
                              period=None,
                              max_sample_size=int(1e5),
                              estimator=method,
                              approx_cell1_size=approx_cell1_size,
                              approx_cellran_size=approx_cellran_size,
                              RR_precomputed=RR,
                              NR_precomputed=NR)
                label = None
                if ii == N_sub - 1:
                    label = 'Subvolumes'

                #if not ratio:
                #    sub.plot(0.5*(xi_bin[:-1]+xi_bin[1:]), sub_xi, lw=0.5, ls='--', c=pretty_colors[iii])
                sub_xis += sub_xi
                sub_xis_list.append(sub_xi)

            sub_xi_avg = sub_xis / np.float(N_sub)

            data_dump[method] = {}
            data_dump[method]['sub_xi_avg'] = sub_xi_avg
            data_dump[method]['sub_xis_list'] = sub_xis_list
        else:
            sub_xis_list = data_dump[method]['sub_xis_list']
            sub_xi_avg = data_dump[method]['sub_xi_avg']

        if not ratio:
            sub.plot(0.5 * (xi_bin[:-1] + xi_bin[1:]),
                     sub_xi_avg,
                     lw=2,
                     ls='--',
                     c=pretty_colors[iii],
                     label='Subvolume ' + method)
        else:
            sub.plot(0.5 * (xi_bin[:-1] + xi_bin[1:]),
                     sub_xi_avg / full_xi,
                     lw=2,
                     ls='--',
                     c=pretty_colors[iii],
                     label='Subvolume ' + method)

    if not os.path.isfile(pickle_file):
        pickle.dump(data_dump, open(pickle_file, 'wb'))

    sub.set_xlim([0.1, 50.])
    sub.set_xlabel('r', fontsize=30)
    sub.set_xscale('log')

    if not ratio:
        sub.set_ylabel(r"$\xi \mathtt{(r)}$", fontsize=25)
        sub.set_yscale('log')
    else:
        sub.set_ylabel(r"$\overline{\xi^\mathtt{sub}}/\xi^\mathtt{all}$",
                       fontsize=25)

    sub.legend(loc='lower left')

    if ratio:
        fig_file = ''.join([
            util.fig_dir(), 'test_xi_subvolume_analytic.Nsub',
            str(N_sub), '.ratio.png'
        ])
    else:
        fig_file = ''.join([
            util.fig_dir(), 'test_xi_subvolume_analytic.Nsub',
            str(N_sub), '.png'
        ])
    fig.savefig(fig_file, bbox_inches='tight', dpi=100)
    plt.close()
    return None